xref: /linux/drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c (revision 37bb2e7217b01404e2abf9d90d8e5705a5603b52)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Broadcom BCM2835 V4L2 driver
4  *
5  * Copyright © 2013 Raspberry Pi (Trading) Ltd.
6  *
7  * Authors: Vincent Sanders @ Collabora
8  *          Dave Stevenson @ Broadcom
9  *		(now dave.stevenson@raspberrypi.org)
10  *          Simon Mellor @ Broadcom
11  *          Luke Diamand @ Broadcom
12  *
13  * V4L2 driver MMAL vchiq interface code
14  */
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/mm.h>
23 #include <linux/slab.h>
24 #include <linux/completion.h>
25 #include <linux/raspberrypi/vchiq.h>
26 #include <linux/vmalloc.h>
27 #include <media/videobuf2-vmalloc.h>
28 
29 #include <linux/raspberrypi/vchiq_arm.h>
30 
31 #include "mmal-common.h"
32 #include "mmal-vchiq.h"
33 #include "mmal-msg.h"
34 
35 /*
36  * maximum number of components supported.
37  * This matches the maximum permitted by default on the VPU
38  */
39 #define VCHIQ_MMAL_MAX_COMPONENTS 64
40 
41 /*
42  * Timeout for synchronous msg responses in seconds.
43  * Helpful to increase this if stopping in the VPU debugger.
44  */
45 #define SYNC_MSG_TIMEOUT       3
46 
47 /*#define FULL_MSG_DUMP 1*/
48 
49 #ifdef DEBUG
50 static const char *const msg_type_names[] = {
51 	"UNKNOWN",
52 	"QUIT",
53 	"SERVICE_CLOSED",
54 	"GET_VERSION",
55 	"COMPONENT_CREATE",
56 	"COMPONENT_DESTROY",
57 	"COMPONENT_ENABLE",
58 	"COMPONENT_DISABLE",
59 	"PORT_INFO_GET",
60 	"PORT_INFO_SET",
61 	"PORT_ACTION",
62 	"BUFFER_FROM_HOST",
63 	"BUFFER_TO_HOST",
64 	"GET_STATS",
65 	"PORT_PARAMETER_SET",
66 	"PORT_PARAMETER_GET",
67 	"EVENT_TO_HOST",
68 	"GET_CORE_STATS_FOR_PORT",
69 	"OPAQUE_ALLOCATOR",
70 	"CONSUME_MEM",
71 	"LMK",
72 	"OPAQUE_ALLOCATOR_DESC",
73 	"DRM_GET_LHS32",
74 	"DRM_GET_TIME",
75 	"BUFFER_FROM_HOST_ZEROLEN",
76 	"PORT_FLUSH",
77 	"HOST_LOG",
78 };
79 #endif
80 
81 static const char *const port_action_type_names[] = {
82 	"UNKNOWN",
83 	"ENABLE",
84 	"DISABLE",
85 	"FLUSH",
86 	"CONNECT",
87 	"DISCONNECT",
88 	"SET_REQUIREMENTS",
89 };
90 
91 #if defined(DEBUG)
92 #if defined(FULL_MSG_DUMP)
93 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)				\
94 	do {								\
95 		pr_debug(TITLE" type:%s(%d) length:%d\n",		\
96 			 msg_type_names[(MSG)->h.type],			\
97 			 (MSG)->h.type, (MSG_LEN));			\
98 		print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET,	\
99 			       16, 4, (MSG),				\
100 			       sizeof(struct mmal_msg_header), 1);	\
101 		print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET,	\
102 			       16, 4,					\
103 			       ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
104 			       (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
105 	} while (0)
106 #else
107 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)				\
108 	{								\
109 		pr_debug(TITLE" type:%s(%d) length:%d\n",		\
110 			 msg_type_names[(MSG)->h.type],			\
111 			 (MSG)->h.type, (MSG_LEN));			\
112 	}
113 #endif
114 #else
115 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
116 #endif
117 
118 struct vchiq_mmal_instance;
119 
120 /* normal message context */
121 struct mmal_msg_context {
122 	struct vchiq_mmal_instance *instance;
123 
124 	/* Index in the context_map idr so that we can find the
125 	 * mmal_msg_context again when servicing the VCHI reply.
126 	 */
127 	int handle;
128 
129 	union {
130 		struct {
131 			/* work struct for buffer_cb callback */
132 			struct work_struct work;
133 			/* work struct for deferred callback */
134 			struct work_struct buffer_to_host_work;
135 			/* mmal instance */
136 			struct vchiq_mmal_instance *instance;
137 			/* mmal port */
138 			struct vchiq_mmal_port *port;
139 			/* actual buffer used to store bulk reply */
140 			struct mmal_buffer *buffer;
141 			/* amount of buffer used */
142 			unsigned long buffer_used;
143 			/* MMAL buffer flags */
144 			u32 mmal_flags;
145 			/* Presentation and Decode timestamps */
146 			s64 pts;
147 			s64 dts;
148 
149 			int status;	/* context status */
150 
151 		} bulk;		/* bulk data */
152 
153 		struct {
154 			/* message handle to release */
155 			struct vchiq_header *msg_handle;
156 			/* pointer to received message */
157 			struct mmal_msg *msg;
158 			/* received message length */
159 			u32 msg_len;
160 			/* completion upon reply */
161 			struct completion cmplt;
162 		} sync;		/* synchronous response */
163 	} u;
164 
165 };
166 
167 struct vchiq_mmal_instance {
168 	unsigned int service_handle;
169 
170 	/* ensure serialised access to service */
171 	struct mutex vchiq_mutex;
172 
173 	struct idr context_map;
174 	/* protect accesses to context_map */
175 	struct mutex context_map_lock;
176 
177 	struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
178 
179 	/* ordered workqueue to process all bulk operations */
180 	struct workqueue_struct *bulk_wq;
181 
182 	/* handle for a vchiq instance */
183 	struct vchiq_instance *vchiq_instance;
184 };
185 
186 static struct mmal_msg_context *
get_msg_context(struct vchiq_mmal_instance * instance)187 get_msg_context(struct vchiq_mmal_instance *instance)
188 {
189 	struct mmal_msg_context *msg_context;
190 	int handle;
191 
192 	/* todo: should this be allocated from a pool to avoid kzalloc */
193 	msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
194 
195 	if (!msg_context)
196 		return ERR_PTR(-ENOMEM);
197 
198 	/* Create an ID that will be passed along with our message so
199 	 * that when we service the VCHI reply, we can look up what
200 	 * message is being replied to.
201 	 */
202 	mutex_lock(&instance->context_map_lock);
203 	handle = idr_alloc(&instance->context_map, msg_context,
204 			   0, 0, GFP_KERNEL);
205 	mutex_unlock(&instance->context_map_lock);
206 
207 	if (handle < 0) {
208 		kfree(msg_context);
209 		return ERR_PTR(handle);
210 	}
211 
212 	msg_context->instance = instance;
213 	msg_context->handle = handle;
214 
215 	return msg_context;
216 }
217 
218 static struct mmal_msg_context *
lookup_msg_context(struct vchiq_mmal_instance * instance,int handle)219 lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
220 {
221 	return idr_find(&instance->context_map, handle);
222 }
223 
224 static void
release_msg_context(struct mmal_msg_context * msg_context)225 release_msg_context(struct mmal_msg_context *msg_context)
226 {
227 	struct vchiq_mmal_instance *instance = msg_context->instance;
228 
229 	mutex_lock(&instance->context_map_lock);
230 	idr_remove(&instance->context_map, msg_context->handle);
231 	mutex_unlock(&instance->context_map_lock);
232 	kfree(msg_context);
233 }
234 
235 /* deals with receipt of event to host message */
event_to_host_cb(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,u32 msg_len)236 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
237 			     struct mmal_msg *msg, u32 msg_len)
238 {
239 	pr_debug("unhandled event\n");
240 	pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
241 		 msg->u.event_to_host.client_component,
242 		 msg->u.event_to_host.port_type,
243 		 msg->u.event_to_host.port_num,
244 		 msg->u.event_to_host.cmd, msg->u.event_to_host.length);
245 }
246 
247 /* workqueue scheduled callback
248  *
249  * we do this because it is important we do not call any other vchiq
250  * sync calls from within the message delivery thread
251  */
buffer_work_cb(struct work_struct * work)252 static void buffer_work_cb(struct work_struct *work)
253 {
254 	struct mmal_msg_context *msg_context =
255 		container_of(work, struct mmal_msg_context, u.bulk.work);
256 	struct mmal_buffer *buffer = msg_context->u.bulk.buffer;
257 
258 	if (!buffer) {
259 		pr_err("%s: ctx: %p, No mmal buffer to pass details\n",
260 		       __func__, msg_context);
261 		return;
262 	}
263 
264 	buffer->length = msg_context->u.bulk.buffer_used;
265 	buffer->mmal_flags = msg_context->u.bulk.mmal_flags;
266 	buffer->dts = msg_context->u.bulk.dts;
267 	buffer->pts = msg_context->u.bulk.pts;
268 
269 	atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
270 
271 	msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
272 					    msg_context->u.bulk.port,
273 					    msg_context->u.bulk.status,
274 					    msg_context->u.bulk.buffer);
275 }
276 
277 /* workqueue scheduled callback to handle receiving buffers
278  *
279  * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
280  * If we block in the service_callback context then we can't process the
281  * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
282  * vchiq_bulk_receive() call to complete.
283  */
buffer_to_host_work_cb(struct work_struct * work)284 static void buffer_to_host_work_cb(struct work_struct *work)
285 {
286 	struct mmal_msg_context *msg_context =
287 		container_of(work, struct mmal_msg_context,
288 			     u.bulk.buffer_to_host_work);
289 	struct vchiq_mmal_instance *instance = msg_context->instance;
290 	unsigned long len = msg_context->u.bulk.buffer_used;
291 	int ret;
292 
293 	if (!len)
294 		/* Dummy receive to ensure the buffers remain in order */
295 		len = 8;
296 	/* queue the bulk submission */
297 	vchiq_use_service(instance->vchiq_instance, instance->service_handle);
298 	ret = vchiq_bulk_receive(instance->vchiq_instance, instance->service_handle,
299 				 msg_context->u.bulk.buffer->buffer,
300 				 /* Actual receive needs to be a multiple
301 				  * of 4 bytes
302 				  */
303 				(len + 3) & ~3,
304 				msg_context,
305 				VCHIQ_BULK_MODE_CALLBACK);
306 
307 	vchiq_release_service(instance->vchiq_instance, instance->service_handle);
308 
309 	if (ret != 0)
310 		pr_err("%s: ctx: %p, vchiq_bulk_receive failed %d\n",
311 		       __func__, msg_context, ret);
312 }
313 
314 /* enqueue a bulk receive for a given message context */
bulk_receive(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,struct mmal_msg_context * msg_context)315 static int bulk_receive(struct vchiq_mmal_instance *instance,
316 			struct mmal_msg *msg,
317 			struct mmal_msg_context *msg_context)
318 {
319 	unsigned long rd_len;
320 
321 	rd_len = msg->u.buffer_from_host.buffer_header.length;
322 
323 	if (!msg_context->u.bulk.buffer) {
324 		pr_err("bulk.buffer not configured - error in buffer_from_host\n");
325 
326 		/* todo: this is a serious error, we should never have
327 		 * committed a buffer_to_host operation to the mmal
328 		 * port without the buffer to back it up (underflow
329 		 * handling) and there is no obvious way to deal with
330 		 * this - how is the mmal service going to react when
331 		 * we fail to do the xfer and reschedule a buffer when
332 		 * it arrives? perhaps a starved flag to indicate a
333 		 * waiting bulk receive?
334 		 */
335 
336 		return -EINVAL;
337 	}
338 
339 	/* ensure we do not overrun the available buffer */
340 	if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
341 		rd_len = msg_context->u.bulk.buffer->buffer_size;
342 		pr_warn("short read as not enough receive buffer space\n");
343 		/* todo: is this the correct response, what happens to
344 		 * the rest of the message data?
345 		 */
346 	}
347 
348 	/* store length */
349 	msg_context->u.bulk.buffer_used = rd_len;
350 	msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
351 	msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
352 
353 	queue_work(msg_context->instance->bulk_wq,
354 		   &msg_context->u.bulk.buffer_to_host_work);
355 
356 	return 0;
357 }
358 
359 /* data in message, memcpy from packet into output buffer */
inline_receive(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,struct mmal_msg_context * msg_context)360 static int inline_receive(struct vchiq_mmal_instance *instance,
361 			  struct mmal_msg *msg,
362 			  struct mmal_msg_context *msg_context)
363 {
364 	memcpy(msg_context->u.bulk.buffer->buffer,
365 	       msg->u.buffer_from_host.short_data,
366 	       msg->u.buffer_from_host.payload_in_message);
367 
368 	msg_context->u.bulk.buffer_used =
369 	    msg->u.buffer_from_host.payload_in_message;
370 
371 	return 0;
372 }
373 
374 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
375 static int
buffer_from_host(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,struct mmal_buffer * buf)376 buffer_from_host(struct vchiq_mmal_instance *instance,
377 		 struct vchiq_mmal_port *port, struct mmal_buffer *buf)
378 {
379 	struct mmal_msg_context *msg_context;
380 	struct mmal_msg m;
381 	int ret;
382 
383 	if (!port->enabled)
384 		return -EINVAL;
385 
386 	pr_debug("instance:%u buffer:%p\n", instance->service_handle, buf);
387 
388 	/* get context */
389 	if (!buf->msg_context) {
390 		pr_err("%s: msg_context not allocated, buf %p\n", __func__,
391 		       buf);
392 		return -EINVAL;
393 	}
394 	msg_context = buf->msg_context;
395 
396 	/* store bulk message context for when data arrives */
397 	msg_context->u.bulk.instance = instance;
398 	msg_context->u.bulk.port = port;
399 	msg_context->u.bulk.buffer = buf;
400 	msg_context->u.bulk.buffer_used = 0;
401 
402 	/* initialise work structure ready to schedule callback */
403 	INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
404 	INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
405 		  buffer_to_host_work_cb);
406 
407 	atomic_inc(&port->buffers_with_vpu);
408 
409 	/* prep the buffer from host message */
410 	memset(&m, 0xbc, sizeof(m));	/* just to make debug clearer */
411 
412 	m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
413 	m.h.magic = MMAL_MAGIC;
414 	m.h.context = msg_context->handle;
415 	m.h.status = 0;
416 
417 	/* drvbuf is our private data passed back */
418 	m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
419 	m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
420 	m.u.buffer_from_host.drvbuf.port_handle = port->handle;
421 	m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
422 
423 	/* buffer header */
424 	m.u.buffer_from_host.buffer_header.cmd = 0;
425 	m.u.buffer_from_host.buffer_header.data =
426 		(u32)(unsigned long)buf->buffer;
427 	m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
428 	m.u.buffer_from_host.buffer_header.length = 0;	/* nothing used yet */
429 	m.u.buffer_from_host.buffer_header.offset = 0;	/* no offset */
430 	m.u.buffer_from_host.buffer_header.flags = 0;	/* no flags */
431 	m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
432 	m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
433 
434 	/* clear buffer type specific data */
435 	memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
436 	       sizeof(m.u.buffer_from_host.buffer_header_type_specific));
437 
438 	/* no payload in message */
439 	m.u.buffer_from_host.payload_in_message = 0;
440 
441 	vchiq_use_service(instance->vchiq_instance, instance->service_handle);
442 
443 	ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, &m,
444 					 sizeof(struct mmal_msg_header) +
445 					 sizeof(m.u.buffer_from_host));
446 	if (ret)
447 		atomic_dec(&port->buffers_with_vpu);
448 
449 	vchiq_release_service(instance->vchiq_instance, instance->service_handle);
450 
451 	return ret;
452 }
453 
454 /* deals with receipt of buffer to host message */
buffer_to_host_cb(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,u32 msg_len)455 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
456 			      struct mmal_msg *msg, u32 msg_len)
457 {
458 	struct mmal_msg_context *msg_context;
459 	u32 handle;
460 
461 	pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
462 		 __func__, instance, msg, msg_len);
463 
464 	if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
465 		handle = msg->u.buffer_from_host.drvbuf.client_context;
466 		msg_context = lookup_msg_context(instance, handle);
467 
468 		if (!msg_context) {
469 			pr_err("drvbuf.client_context(%u) is invalid\n",
470 			       handle);
471 			return;
472 		}
473 	} else {
474 		pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
475 		return;
476 	}
477 
478 	msg_context->u.bulk.mmal_flags =
479 				msg->u.buffer_from_host.buffer_header.flags;
480 
481 	if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
482 		/* message reception had an error */
483 		pr_warn("error %d in reply\n", msg->h.status);
484 
485 		msg_context->u.bulk.status = msg->h.status;
486 
487 	} else if (msg->u.buffer_from_host.buffer_header.length == 0) {
488 		/* empty buffer */
489 		if (msg->u.buffer_from_host.buffer_header.flags &
490 		    MMAL_BUFFER_HEADER_FLAG_EOS) {
491 			msg_context->u.bulk.status =
492 			    bulk_receive(instance, msg, msg_context);
493 			if (msg_context->u.bulk.status == 0)
494 				return;	/* successful bulk submission, bulk
495 					 * completion will trigger callback
496 					 */
497 		} else {
498 			/* do callback with empty buffer - not EOS though */
499 			msg_context->u.bulk.status = 0;
500 			msg_context->u.bulk.buffer_used = 0;
501 		}
502 	} else if (msg->u.buffer_from_host.payload_in_message == 0) {
503 		/* data is not in message, queue a bulk receive */
504 		msg_context->u.bulk.status =
505 		    bulk_receive(instance, msg, msg_context);
506 		if (msg_context->u.bulk.status == 0)
507 			return;	/* successful bulk submission, bulk
508 				 * completion will trigger callback
509 				 */
510 
511 		/* failed to submit buffer, this will end badly */
512 		pr_err("error %d on bulk submission\n",
513 		       msg_context->u.bulk.status);
514 
515 	} else if (msg->u.buffer_from_host.payload_in_message <=
516 		   MMAL_VC_SHORT_DATA) {
517 		/* data payload within message */
518 		msg_context->u.bulk.status = inline_receive(instance, msg,
519 							    msg_context);
520 	} else {
521 		pr_err("message with invalid short payload\n");
522 
523 		/* signal error */
524 		msg_context->u.bulk.status = -EINVAL;
525 		msg_context->u.bulk.buffer_used =
526 		    msg->u.buffer_from_host.payload_in_message;
527 	}
528 
529 	/* schedule the port callback */
530 	schedule_work(&msg_context->u.bulk.work);
531 }
532 
bulk_receive_cb(struct vchiq_mmal_instance * instance,struct mmal_msg_context * msg_context)533 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
534 			    struct mmal_msg_context *msg_context)
535 {
536 	msg_context->u.bulk.status = 0;
537 
538 	/* schedule the port callback */
539 	schedule_work(&msg_context->u.bulk.work);
540 }
541 
bulk_abort_cb(struct vchiq_mmal_instance * instance,struct mmal_msg_context * msg_context)542 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
543 			  struct mmal_msg_context *msg_context)
544 {
545 	pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
546 
547 	msg_context->u.bulk.status = -EINTR;
548 
549 	schedule_work(&msg_context->u.bulk.work);
550 }
551 
552 /* incoming event service callback */
mmal_service_callback(struct vchiq_instance * vchiq_instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int handle,void * cb_data,void __user * cb_userdata)553 static int mmal_service_callback(struct vchiq_instance *vchiq_instance,
554 				 enum vchiq_reason reason, struct vchiq_header *header,
555 				 unsigned int handle, void *cb_data,
556 				 void __user *cb_userdata)
557 {
558 	struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(vchiq_instance, handle);
559 	u32 msg_len;
560 	struct mmal_msg *msg;
561 	struct mmal_msg_context *msg_context;
562 
563 	if (!instance) {
564 		pr_err("Message callback passed NULL instance\n");
565 		return 0;
566 	}
567 
568 	switch (reason) {
569 	case VCHIQ_MESSAGE_AVAILABLE:
570 		msg = (void *)header->data;
571 		msg_len = header->size;
572 
573 		DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
574 
575 		/* handling is different for buffer messages */
576 		switch (msg->h.type) {
577 		case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
578 			vchiq_release_message(vchiq_instance, handle, header);
579 			break;
580 
581 		case MMAL_MSG_TYPE_EVENT_TO_HOST:
582 			event_to_host_cb(instance, msg, msg_len);
583 			vchiq_release_message(vchiq_instance, handle, header);
584 
585 			break;
586 
587 		case MMAL_MSG_TYPE_BUFFER_TO_HOST:
588 			buffer_to_host_cb(instance, msg, msg_len);
589 			vchiq_release_message(vchiq_instance, handle, header);
590 			break;
591 
592 		default:
593 			/* messages dependent on header context to complete */
594 			if (!msg->h.context) {
595 				pr_err("received message context was null!\n");
596 				vchiq_release_message(vchiq_instance, handle, header);
597 				break;
598 			}
599 
600 			msg_context = lookup_msg_context(instance,
601 							 msg->h.context);
602 			if (!msg_context) {
603 				pr_err("received invalid message context %u!\n",
604 				       msg->h.context);
605 				vchiq_release_message(vchiq_instance, handle, header);
606 				break;
607 			}
608 
609 			/* fill in context values */
610 			msg_context->u.sync.msg_handle = header;
611 			msg_context->u.sync.msg = msg;
612 			msg_context->u.sync.msg_len = msg_len;
613 
614 			/* todo: should this check (completion_done()
615 			 * == 1) for no one waiting? or do we need a
616 			 * flag to tell us the completion has been
617 			 * interrupted so we can free the message and
618 			 * its context. This probably also solves the
619 			 * message arriving after interruption todo
620 			 * below
621 			 */
622 
623 			/* complete message so caller knows it happened */
624 			complete(&msg_context->u.sync.cmplt);
625 			break;
626 		}
627 
628 		break;
629 
630 	case VCHIQ_BULK_RECEIVE_DONE:
631 		bulk_receive_cb(instance, cb_data);
632 		break;
633 
634 	case VCHIQ_BULK_RECEIVE_ABORTED:
635 		bulk_abort_cb(instance, cb_data);
636 		break;
637 
638 	case VCHIQ_SERVICE_CLOSED:
639 		/* TODO: consider if this requires action if received when
640 		 * driver is not explicitly closing the service
641 		 */
642 		break;
643 
644 	default:
645 		pr_err("Received unhandled message reason %d\n", reason);
646 		break;
647 	}
648 
649 	return 0;
650 }
651 
send_synchronous_mmal_msg(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,unsigned int payload_len,struct mmal_msg ** msg_out,struct vchiq_header ** msg_handle)652 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
653 				     struct mmal_msg *msg,
654 				     unsigned int payload_len,
655 				     struct mmal_msg **msg_out,
656 				     struct vchiq_header **msg_handle)
657 {
658 	struct mmal_msg_context *msg_context;
659 	int ret;
660 	unsigned long time_left;
661 
662 	/* payload size must not cause message to exceed max size */
663 	if (payload_len >
664 	    (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
665 		pr_err("payload length %d exceeds max:%d\n", payload_len,
666 		       (int)(MMAL_MSG_MAX_SIZE -
667 			    sizeof(struct mmal_msg_header)));
668 		return -EINVAL;
669 	}
670 
671 	msg_context = get_msg_context(instance);
672 	if (IS_ERR(msg_context))
673 		return PTR_ERR(msg_context);
674 
675 	init_completion(&msg_context->u.sync.cmplt);
676 
677 	msg->h.magic = MMAL_MAGIC;
678 	msg->h.context = msg_context->handle;
679 	msg->h.status = 0;
680 
681 	DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
682 		     ">>> sync message");
683 
684 	vchiq_use_service(instance->vchiq_instance, instance->service_handle);
685 
686 	ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, msg,
687 					 sizeof(struct mmal_msg_header) +
688 					 payload_len);
689 
690 	vchiq_release_service(instance->vchiq_instance, instance->service_handle);
691 
692 	if (ret) {
693 		pr_err("error %d queuing message\n", ret);
694 		release_msg_context(msg_context);
695 		return ret;
696 	}
697 
698 	time_left = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
699 						SYNC_MSG_TIMEOUT * HZ);
700 	if (time_left == 0) {
701 		pr_err("timed out waiting for sync completion\n");
702 		ret = -ETIME;
703 		/* todo: what happens if the message arrives after aborting */
704 		release_msg_context(msg_context);
705 		return ret;
706 	}
707 
708 	*msg_out = msg_context->u.sync.msg;
709 	*msg_handle = msg_context->u.sync.msg_handle;
710 	release_msg_context(msg_context);
711 
712 	return 0;
713 }
714 
dump_port_info(struct vchiq_mmal_port * port)715 static void dump_port_info(struct vchiq_mmal_port *port)
716 {
717 	pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
718 
719 	pr_debug("buffer minimum num:%d size:%d align:%d\n",
720 		 port->minimum_buffer.num,
721 		 port->minimum_buffer.size, port->minimum_buffer.alignment);
722 
723 	pr_debug("buffer recommended num:%d size:%d align:%d\n",
724 		 port->recommended_buffer.num,
725 		 port->recommended_buffer.size,
726 		 port->recommended_buffer.alignment);
727 
728 	pr_debug("buffer current values num:%d size:%d align:%d\n",
729 		 port->current_buffer.num,
730 		 port->current_buffer.size, port->current_buffer.alignment);
731 
732 	pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
733 		 port->format.type,
734 		 port->format.encoding, port->format.encoding_variant);
735 
736 	pr_debug("		    bitrate:%d flags:0x%x\n",
737 		 port->format.bitrate, port->format.flags);
738 
739 	if (port->format.type == MMAL_ES_TYPE_VIDEO) {
740 		pr_debug
741 		    ("es video format: width:%d height:%d colourspace:0x%x\n",
742 		     port->es.video.width, port->es.video.height,
743 		     port->es.video.color_space);
744 
745 		pr_debug("		 : crop xywh %d,%d,%d,%d\n",
746 			 port->es.video.crop.x,
747 			 port->es.video.crop.y,
748 			 port->es.video.crop.width, port->es.video.crop.height);
749 		pr_debug("		 : framerate %d/%d  aspect %d/%d\n",
750 			 port->es.video.frame_rate.numerator,
751 			 port->es.video.frame_rate.denominator,
752 			 port->es.video.par.numerator, port->es.video.par.denominator);
753 	}
754 }
755 
port_to_mmal_msg(struct vchiq_mmal_port * port,struct mmal_port * p)756 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
757 {
758 	/* todo do readonly fields need setting at all? */
759 	p->type = port->type;
760 	p->index = port->index;
761 	p->index_all = 0;
762 	p->is_enabled = port->enabled;
763 	p->buffer_num_min = port->minimum_buffer.num;
764 	p->buffer_size_min = port->minimum_buffer.size;
765 	p->buffer_alignment_min = port->minimum_buffer.alignment;
766 	p->buffer_num_recommended = port->recommended_buffer.num;
767 	p->buffer_size_recommended = port->recommended_buffer.size;
768 
769 	/* only three writable fields in a port */
770 	p->buffer_num = port->current_buffer.num;
771 	p->buffer_size = port->current_buffer.size;
772 	p->userdata = (u32)(unsigned long)port;
773 }
774 
port_info_set(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)775 static int port_info_set(struct vchiq_mmal_instance *instance,
776 			 struct vchiq_mmal_port *port)
777 {
778 	int ret;
779 	struct mmal_msg m;
780 	struct mmal_msg *rmsg;
781 	struct vchiq_header *rmsg_handle;
782 
783 	pr_debug("setting port info port %p\n", port);
784 	if (!port)
785 		return -1;
786 	dump_port_info(port);
787 
788 	m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
789 
790 	m.u.port_info_set.component_handle = port->component->handle;
791 	m.u.port_info_set.port_type = port->type;
792 	m.u.port_info_set.port_index = port->index;
793 
794 	port_to_mmal_msg(port, &m.u.port_info_set.port);
795 
796 	/* elementary stream format setup */
797 	m.u.port_info_set.format.type = port->format.type;
798 	m.u.port_info_set.format.encoding = port->format.encoding;
799 	m.u.port_info_set.format.encoding_variant =
800 	    port->format.encoding_variant;
801 	m.u.port_info_set.format.bitrate = port->format.bitrate;
802 	m.u.port_info_set.format.flags = port->format.flags;
803 
804 	memcpy(&m.u.port_info_set.es, &port->es,
805 	       sizeof(union mmal_es_specific_format));
806 
807 	m.u.port_info_set.format.extradata_size = port->format.extradata_size;
808 	memcpy(&m.u.port_info_set.extradata, port->format.extradata,
809 	       port->format.extradata_size);
810 
811 	ret = send_synchronous_mmal_msg(instance, &m,
812 					sizeof(m.u.port_info_set),
813 					&rmsg, &rmsg_handle);
814 	if (ret)
815 		return ret;
816 
817 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
818 		/* got an unexpected message type in reply */
819 		ret = -EINVAL;
820 		goto release_msg;
821 	}
822 
823 	/* return operation status */
824 	ret = -rmsg->u.port_info_get_reply.status;
825 
826 	pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
827 		 port->component->handle, port->handle);
828 
829 release_msg:
830 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
831 
832 	return ret;
833 }
834 
835 /* use port info get message to retrieve port information */
port_info_get(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)836 static int port_info_get(struct vchiq_mmal_instance *instance,
837 			 struct vchiq_mmal_port *port)
838 {
839 	int ret;
840 	struct mmal_msg m;
841 	struct mmal_msg *rmsg;
842 	struct vchiq_header *rmsg_handle;
843 
844 	/* port info time */
845 	m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
846 	m.u.port_info_get.component_handle = port->component->handle;
847 	m.u.port_info_get.port_type = port->type;
848 	m.u.port_info_get.index = port->index;
849 
850 	ret = send_synchronous_mmal_msg(instance, &m,
851 					sizeof(m.u.port_info_get),
852 					&rmsg, &rmsg_handle);
853 	if (ret)
854 		return ret;
855 
856 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
857 		/* got an unexpected message type in reply */
858 		ret = -EINVAL;
859 		goto release_msg;
860 	}
861 
862 	/* return operation status */
863 	ret = -rmsg->u.port_info_get_reply.status;
864 	if (ret != MMAL_MSG_STATUS_SUCCESS)
865 		goto release_msg;
866 
867 	if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
868 		port->enabled = false;
869 	else
870 		port->enabled = true;
871 
872 	/* copy the values out of the message */
873 	port->handle = rmsg->u.port_info_get_reply.port_handle;
874 
875 	/* port type and index cached to use on port info set because
876 	 * it does not use a port handle
877 	 */
878 	port->type = rmsg->u.port_info_get_reply.port_type;
879 	port->index = rmsg->u.port_info_get_reply.port_index;
880 
881 	port->minimum_buffer.num =
882 	    rmsg->u.port_info_get_reply.port.buffer_num_min;
883 	port->minimum_buffer.size =
884 	    rmsg->u.port_info_get_reply.port.buffer_size_min;
885 	port->minimum_buffer.alignment =
886 	    rmsg->u.port_info_get_reply.port.buffer_alignment_min;
887 
888 	port->recommended_buffer.alignment =
889 	    rmsg->u.port_info_get_reply.port.buffer_alignment_min;
890 	port->recommended_buffer.num =
891 	    rmsg->u.port_info_get_reply.port.buffer_num_recommended;
892 
893 	port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
894 	port->current_buffer.size =
895 	    rmsg->u.port_info_get_reply.port.buffer_size;
896 
897 	/* stream format */
898 	port->format.type = rmsg->u.port_info_get_reply.format.type;
899 	port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
900 	port->format.encoding_variant =
901 	    rmsg->u.port_info_get_reply.format.encoding_variant;
902 	port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
903 	port->format.flags = rmsg->u.port_info_get_reply.format.flags;
904 
905 	/* elementary stream format */
906 	memcpy(&port->es,
907 	       &rmsg->u.port_info_get_reply.es,
908 	       sizeof(union mmal_es_specific_format));
909 	port->format.es = &port->es;
910 
911 	port->format.extradata_size =
912 	    rmsg->u.port_info_get_reply.format.extradata_size;
913 	memcpy(port->format.extradata,
914 	       rmsg->u.port_info_get_reply.extradata,
915 	       port->format.extradata_size);
916 
917 	pr_debug("received port info\n");
918 	dump_port_info(port);
919 
920 release_msg:
921 
922 	pr_debug("%s:result:%d component:0x%x port:%d\n",
923 		 __func__, ret, port->component->handle, port->handle);
924 
925 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
926 
927 	return ret;
928 }
929 
930 /* create component on vc */
create_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component,const char * name)931 static int create_component(struct vchiq_mmal_instance *instance,
932 			    struct vchiq_mmal_component *component,
933 			    const char *name)
934 {
935 	int ret;
936 	struct mmal_msg m;
937 	struct mmal_msg *rmsg;
938 	struct vchiq_header *rmsg_handle;
939 
940 	/* build component create message */
941 	m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
942 	m.u.component_create.client_component = component->client_component;
943 	strscpy_pad(m.u.component_create.name, name,
944 		    sizeof(m.u.component_create.name));
945 	m.u.component_create.pid = 0;
946 
947 	ret = send_synchronous_mmal_msg(instance, &m,
948 					sizeof(m.u.component_create),
949 					&rmsg, &rmsg_handle);
950 	if (ret)
951 		return ret;
952 
953 	if (rmsg->h.type != m.h.type) {
954 		/* got an unexpected message type in reply */
955 		ret = -EINVAL;
956 		goto release_msg;
957 	}
958 
959 	ret = -rmsg->u.component_create_reply.status;
960 	if (ret != MMAL_MSG_STATUS_SUCCESS)
961 		goto release_msg;
962 
963 	/* a valid component response received */
964 	component->handle = rmsg->u.component_create_reply.component_handle;
965 	component->inputs = rmsg->u.component_create_reply.input_num;
966 	component->outputs = rmsg->u.component_create_reply.output_num;
967 	component->clocks = rmsg->u.component_create_reply.clock_num;
968 
969 	pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
970 		 component->handle,
971 		 component->inputs, component->outputs, component->clocks);
972 
973 release_msg:
974 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
975 
976 	return ret;
977 }
978 
979 /* destroys a component on vc */
destroy_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)980 static int destroy_component(struct vchiq_mmal_instance *instance,
981 			     struct vchiq_mmal_component *component)
982 {
983 	int ret;
984 	struct mmal_msg m;
985 	struct mmal_msg *rmsg;
986 	struct vchiq_header *rmsg_handle;
987 
988 	m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
989 	m.u.component_destroy.component_handle = component->handle;
990 
991 	ret = send_synchronous_mmal_msg(instance, &m,
992 					sizeof(m.u.component_destroy),
993 					&rmsg, &rmsg_handle);
994 	if (ret)
995 		return ret;
996 
997 	if (rmsg->h.type != m.h.type) {
998 		/* got an unexpected message type in reply */
999 		ret = -EINVAL;
1000 		goto release_msg;
1001 	}
1002 
1003 	ret = -rmsg->u.component_destroy_reply.status;
1004 
1005 release_msg:
1006 
1007 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1008 
1009 	return ret;
1010 }
1011 
1012 /* enable a component on vc */
enable_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1013 static int enable_component(struct vchiq_mmal_instance *instance,
1014 			    struct vchiq_mmal_component *component)
1015 {
1016 	int ret;
1017 	struct mmal_msg m;
1018 	struct mmal_msg *rmsg;
1019 	struct vchiq_header *rmsg_handle;
1020 
1021 	m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1022 	m.u.component_enable.component_handle = component->handle;
1023 
1024 	ret = send_synchronous_mmal_msg(instance, &m,
1025 					sizeof(m.u.component_enable),
1026 					&rmsg, &rmsg_handle);
1027 	if (ret)
1028 		return ret;
1029 
1030 	if (rmsg->h.type != m.h.type) {
1031 		/* got an unexpected message type in reply */
1032 		ret = -EINVAL;
1033 		goto release_msg;
1034 	}
1035 
1036 	ret = -rmsg->u.component_enable_reply.status;
1037 
1038 release_msg:
1039 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1040 
1041 	return ret;
1042 }
1043 
1044 /* disable a component on vc */
disable_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1045 static int disable_component(struct vchiq_mmal_instance *instance,
1046 			     struct vchiq_mmal_component *component)
1047 {
1048 	int ret;
1049 	struct mmal_msg m;
1050 	struct mmal_msg *rmsg;
1051 	struct vchiq_header *rmsg_handle;
1052 
1053 	m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1054 	m.u.component_disable.component_handle = component->handle;
1055 
1056 	ret = send_synchronous_mmal_msg(instance, &m,
1057 					sizeof(m.u.component_disable),
1058 					&rmsg, &rmsg_handle);
1059 	if (ret)
1060 		return ret;
1061 
1062 	if (rmsg->h.type != m.h.type) {
1063 		/* got an unexpected message type in reply */
1064 		ret = -EINVAL;
1065 		goto release_msg;
1066 	}
1067 
1068 	ret = -rmsg->u.component_disable_reply.status;
1069 
1070 release_msg:
1071 
1072 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1073 
1074 	return ret;
1075 }
1076 
1077 /* get version of mmal implementation */
get_version(struct vchiq_mmal_instance * instance,u32 * major_out,u32 * minor_out)1078 static int get_version(struct vchiq_mmal_instance *instance,
1079 		       u32 *major_out, u32 *minor_out)
1080 {
1081 	int ret;
1082 	struct mmal_msg m;
1083 	struct mmal_msg *rmsg;
1084 	struct vchiq_header *rmsg_handle;
1085 
1086 	m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1087 
1088 	ret = send_synchronous_mmal_msg(instance, &m,
1089 					sizeof(m.u.version),
1090 					&rmsg, &rmsg_handle);
1091 	if (ret)
1092 		return ret;
1093 
1094 	if (rmsg->h.type != m.h.type) {
1095 		/* got an unexpected message type in reply */
1096 		ret = -EINVAL;
1097 		goto release_msg;
1098 	}
1099 
1100 	*major_out = rmsg->u.version.major;
1101 	*minor_out = rmsg->u.version.minor;
1102 
1103 release_msg:
1104 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1105 
1106 	return ret;
1107 }
1108 
1109 /* do a port action with a port as a parameter */
port_action_port(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,enum mmal_msg_port_action_type action_type)1110 static int port_action_port(struct vchiq_mmal_instance *instance,
1111 			    struct vchiq_mmal_port *port,
1112 			    enum mmal_msg_port_action_type action_type)
1113 {
1114 	int ret;
1115 	struct mmal_msg m;
1116 	struct mmal_msg *rmsg;
1117 	struct vchiq_header *rmsg_handle;
1118 
1119 	m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1120 	m.u.port_action_port.component_handle = port->component->handle;
1121 	m.u.port_action_port.port_handle = port->handle;
1122 	m.u.port_action_port.action = action_type;
1123 
1124 	port_to_mmal_msg(port, &m.u.port_action_port.port);
1125 
1126 	ret = send_synchronous_mmal_msg(instance, &m,
1127 					sizeof(m.u.port_action_port),
1128 					&rmsg, &rmsg_handle);
1129 	if (ret)
1130 		return ret;
1131 
1132 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1133 		/* got an unexpected message type in reply */
1134 		ret = -EINVAL;
1135 		goto release_msg;
1136 	}
1137 
1138 	ret = -rmsg->u.port_action_reply.status;
1139 
1140 	pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1141 		 __func__,
1142 		 ret, port->component->handle, port->handle,
1143 		 port_action_type_names[action_type], action_type);
1144 
1145 release_msg:
1146 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1147 
1148 	return ret;
1149 }
1150 
1151 /* do a port action with handles as parameters */
port_action_handle(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,enum mmal_msg_port_action_type action_type,u32 connect_component_handle,u32 connect_port_handle)1152 static int port_action_handle(struct vchiq_mmal_instance *instance,
1153 			      struct vchiq_mmal_port *port,
1154 			      enum mmal_msg_port_action_type action_type,
1155 			      u32 connect_component_handle,
1156 			      u32 connect_port_handle)
1157 {
1158 	int ret;
1159 	struct mmal_msg m;
1160 	struct mmal_msg *rmsg;
1161 	struct vchiq_header *rmsg_handle;
1162 
1163 	m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1164 
1165 	m.u.port_action_handle.component_handle = port->component->handle;
1166 	m.u.port_action_handle.port_handle = port->handle;
1167 	m.u.port_action_handle.action = action_type;
1168 
1169 	m.u.port_action_handle.connect_component_handle =
1170 	    connect_component_handle;
1171 	m.u.port_action_handle.connect_port_handle = connect_port_handle;
1172 
1173 	ret = send_synchronous_mmal_msg(instance, &m,
1174 					sizeof(m.u.port_action_handle),
1175 					&rmsg, &rmsg_handle);
1176 	if (ret)
1177 		return ret;
1178 
1179 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1180 		/* got an unexpected message type in reply */
1181 		ret = -EINVAL;
1182 		goto release_msg;
1183 	}
1184 
1185 	ret = -rmsg->u.port_action_reply.status;
1186 
1187 	pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1188 		 __func__,
1189 		 ret, port->component->handle, port->handle,
1190 		 port_action_type_names[action_type],
1191 		 action_type, connect_component_handle, connect_port_handle);
1192 
1193 release_msg:
1194 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1195 
1196 	return ret;
1197 }
1198 
port_parameter_set(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter_id,void * value,u32 value_size)1199 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1200 			      struct vchiq_mmal_port *port,
1201 			      u32 parameter_id, void *value, u32 value_size)
1202 {
1203 	int ret;
1204 	struct mmal_msg m;
1205 	struct mmal_msg *rmsg;
1206 	struct vchiq_header *rmsg_handle;
1207 
1208 	m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1209 
1210 	m.u.port_parameter_set.component_handle = port->component->handle;
1211 	m.u.port_parameter_set.port_handle = port->handle;
1212 	m.u.port_parameter_set.id = parameter_id;
1213 	m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1214 	memcpy(&m.u.port_parameter_set.value, value, value_size);
1215 
1216 	ret = send_synchronous_mmal_msg(instance, &m,
1217 					(4 * sizeof(u32)) + value_size,
1218 					&rmsg, &rmsg_handle);
1219 	if (ret)
1220 		return ret;
1221 
1222 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1223 		/* got an unexpected message type in reply */
1224 		ret = -EINVAL;
1225 		goto release_msg;
1226 	}
1227 
1228 	ret = -rmsg->u.port_parameter_set_reply.status;
1229 
1230 	pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1231 		 __func__,
1232 		 ret, port->component->handle, port->handle, parameter_id);
1233 
1234 release_msg:
1235 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1236 
1237 	return ret;
1238 }
1239 
port_parameter_get(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter_id,void * value,u32 * value_size)1240 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1241 			      struct vchiq_mmal_port *port,
1242 			      u32 parameter_id, void *value, u32 *value_size)
1243 {
1244 	int ret;
1245 	struct mmal_msg m;
1246 	struct mmal_msg *rmsg;
1247 	struct vchiq_header *rmsg_handle;
1248 
1249 	m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1250 
1251 	m.u.port_parameter_get.component_handle = port->component->handle;
1252 	m.u.port_parameter_get.port_handle = port->handle;
1253 	m.u.port_parameter_get.id = parameter_id;
1254 	m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1255 
1256 	ret = send_synchronous_mmal_msg(instance, &m,
1257 					sizeof(struct
1258 					       mmal_msg_port_parameter_get),
1259 					&rmsg, &rmsg_handle);
1260 	if (ret)
1261 		return ret;
1262 
1263 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1264 		/* got an unexpected message type in reply */
1265 		pr_err("Incorrect reply type %d\n", rmsg->h.type);
1266 		ret = -EINVAL;
1267 		goto release_msg;
1268 	}
1269 
1270 	ret = rmsg->u.port_parameter_get_reply.status;
1271 
1272 	/* port_parameter_get_reply.size includes the header,
1273 	 * whilst *value_size doesn't.
1274 	 */
1275 	rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1276 
1277 	if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1278 		/* Copy only as much as we have space for
1279 		 * but report true size of parameter
1280 		 */
1281 		memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1282 		       *value_size);
1283 	} else {
1284 		memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1285 		       rmsg->u.port_parameter_get_reply.size);
1286 	}
1287 	/* Always report the size of the returned parameter to the caller */
1288 	*value_size = rmsg->u.port_parameter_get_reply.size;
1289 
1290 	pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1291 		 ret, port->component->handle, port->handle, parameter_id);
1292 
1293 release_msg:
1294 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1295 
1296 	return ret;
1297 }
1298 
1299 /* disables a port and drains buffers from it */
port_disable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1300 static int port_disable(struct vchiq_mmal_instance *instance,
1301 			struct vchiq_mmal_port *port)
1302 {
1303 	int ret;
1304 	struct list_head *q, *buf_head;
1305 	unsigned long flags = 0;
1306 
1307 	if (!port->enabled)
1308 		return 0;
1309 
1310 	port->enabled = false;
1311 
1312 	ret = port_action_port(instance, port,
1313 			       MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1314 	if (ret == 0) {
1315 		/*
1316 		 * Drain all queued buffers on port. This should only
1317 		 * apply to buffers that have been queued before the port
1318 		 * has been enabled. If the port has been enabled and buffers
1319 		 * passed, then the buffers should have been removed from this
1320 		 * list, and we should get the relevant callbacks via VCHIQ
1321 		 * to release the buffers.
1322 		 */
1323 		spin_lock_irqsave(&port->slock, flags);
1324 
1325 		list_for_each_safe(buf_head, q, &port->buffers) {
1326 			struct mmal_buffer *mmalbuf;
1327 
1328 			mmalbuf = list_entry(buf_head, struct mmal_buffer,
1329 					     list);
1330 			list_del(buf_head);
1331 			if (port->buffer_cb) {
1332 				mmalbuf->length = 0;
1333 				mmalbuf->mmal_flags = 0;
1334 				mmalbuf->dts = MMAL_TIME_UNKNOWN;
1335 				mmalbuf->pts = MMAL_TIME_UNKNOWN;
1336 				port->buffer_cb(instance,
1337 						port, 0, mmalbuf);
1338 			}
1339 		}
1340 
1341 		spin_unlock_irqrestore(&port->slock, flags);
1342 
1343 		ret = port_info_get(instance, port);
1344 	}
1345 
1346 	return ret;
1347 }
1348 
1349 /* enable a port */
port_enable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1350 static int port_enable(struct vchiq_mmal_instance *instance,
1351 		       struct vchiq_mmal_port *port)
1352 {
1353 	unsigned int hdr_count;
1354 	struct list_head *q, *buf_head;
1355 	int ret;
1356 
1357 	if (port->enabled)
1358 		return 0;
1359 
1360 	ret = port_action_port(instance, port,
1361 			       MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1362 	if (ret)
1363 		goto done;
1364 
1365 	port->enabled = true;
1366 
1367 	if (port->buffer_cb) {
1368 		/* send buffer headers to videocore */
1369 		hdr_count = 1;
1370 		list_for_each_safe(buf_head, q, &port->buffers) {
1371 			struct mmal_buffer *mmalbuf;
1372 
1373 			mmalbuf = list_entry(buf_head, struct mmal_buffer,
1374 					     list);
1375 			ret = buffer_from_host(instance, port, mmalbuf);
1376 			if (ret)
1377 				goto done;
1378 
1379 			list_del(buf_head);
1380 			hdr_count++;
1381 			if (hdr_count > port->current_buffer.num)
1382 				break;
1383 		}
1384 	}
1385 
1386 	ret = port_info_get(instance, port);
1387 
1388 done:
1389 	return ret;
1390 }
1391 
1392 /* ------------------------------------------------------------------
1393  * Exported API
1394  *------------------------------------------------------------------
1395  */
1396 
vchiq_mmal_port_set_format(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1397 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1398 			       struct vchiq_mmal_port *port)
1399 {
1400 	int ret;
1401 
1402 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1403 		return -EINTR;
1404 
1405 	ret = port_info_set(instance, port);
1406 	if (ret)
1407 		goto release_unlock;
1408 
1409 	/* read what has actually been set */
1410 	ret = port_info_get(instance, port);
1411 
1412 release_unlock:
1413 	mutex_unlock(&instance->vchiq_mutex);
1414 
1415 	return ret;
1416 }
1417 EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format);
1418 
vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter,void * value,u32 value_size)1419 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1420 				  struct vchiq_mmal_port *port,
1421 				  u32 parameter, void *value, u32 value_size)
1422 {
1423 	int ret;
1424 
1425 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1426 		return -EINTR;
1427 
1428 	ret = port_parameter_set(instance, port, parameter, value, value_size);
1429 
1430 	mutex_unlock(&instance->vchiq_mutex);
1431 
1432 	return ret;
1433 }
1434 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set);
1435 
vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter,void * value,u32 * value_size)1436 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1437 				  struct vchiq_mmal_port *port,
1438 				  u32 parameter, void *value, u32 *value_size)
1439 {
1440 	int ret;
1441 
1442 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1443 		return -EINTR;
1444 
1445 	ret = port_parameter_get(instance, port, parameter, value, value_size);
1446 
1447 	mutex_unlock(&instance->vchiq_mutex);
1448 
1449 	return ret;
1450 }
1451 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get);
1452 
1453 /* enable a port
1454  *
1455  * enables a port and queues buffers for satisfying callbacks if we
1456  * provide a callback handler
1457  */
vchiq_mmal_port_enable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,vchiq_mmal_buffer_cb buffer_cb)1458 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1459 			   struct vchiq_mmal_port *port,
1460 			   vchiq_mmal_buffer_cb buffer_cb)
1461 {
1462 	int ret;
1463 
1464 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1465 		return -EINTR;
1466 
1467 	/* already enabled - noop */
1468 	if (port->enabled) {
1469 		ret = 0;
1470 		goto unlock;
1471 	}
1472 
1473 	port->buffer_cb = buffer_cb;
1474 
1475 	ret = port_enable(instance, port);
1476 
1477 unlock:
1478 	mutex_unlock(&instance->vchiq_mutex);
1479 
1480 	return ret;
1481 }
1482 EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable);
1483 
vchiq_mmal_port_disable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1484 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1485 			    struct vchiq_mmal_port *port)
1486 {
1487 	int ret;
1488 
1489 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1490 		return -EINTR;
1491 
1492 	if (!port->enabled) {
1493 		mutex_unlock(&instance->vchiq_mutex);
1494 		return 0;
1495 	}
1496 
1497 	ret = port_disable(instance, port);
1498 
1499 	mutex_unlock(&instance->vchiq_mutex);
1500 
1501 	return ret;
1502 }
1503 EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable);
1504 
1505 /* ports will be connected in a tunneled manner so data buffers
1506  * are not handled by client.
1507  */
vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * src,struct vchiq_mmal_port * dst)1508 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1509 				   struct vchiq_mmal_port *src,
1510 				   struct vchiq_mmal_port *dst)
1511 {
1512 	int ret;
1513 
1514 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1515 		return -EINTR;
1516 
1517 	/* disconnect ports if connected */
1518 	if (src->connected) {
1519 		ret = port_disable(instance, src);
1520 		if (ret) {
1521 			pr_err("failed disabling src port(%d)\n", ret);
1522 			goto release_unlock;
1523 		}
1524 
1525 		/* do not need to disable the destination port as they
1526 		 * are connected and it is done automatically
1527 		 */
1528 
1529 		ret = port_action_handle(instance, src,
1530 					 MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1531 					 src->connected->component->handle,
1532 					 src->connected->handle);
1533 		if (ret < 0) {
1534 			pr_err("failed disconnecting src port\n");
1535 			goto release_unlock;
1536 		}
1537 		src->connected->enabled = false;
1538 		src->connected = NULL;
1539 	}
1540 
1541 	if (!dst) {
1542 		/* do not make new connection */
1543 		ret = 0;
1544 		pr_debug("not making new connection\n");
1545 		goto release_unlock;
1546 	}
1547 
1548 	/* copy src port format to dst */
1549 	dst->format.encoding = src->format.encoding;
1550 	dst->es.video.width = src->es.video.width;
1551 	dst->es.video.height = src->es.video.height;
1552 	dst->es.video.crop.x = src->es.video.crop.x;
1553 	dst->es.video.crop.y = src->es.video.crop.y;
1554 	dst->es.video.crop.width = src->es.video.crop.width;
1555 	dst->es.video.crop.height = src->es.video.crop.height;
1556 	dst->es.video.frame_rate.numerator = src->es.video.frame_rate.numerator;
1557 	dst->es.video.frame_rate.denominator = src->es.video.frame_rate.denominator;
1558 
1559 	/* set new format */
1560 	ret = port_info_set(instance, dst);
1561 	if (ret) {
1562 		pr_debug("setting port info failed\n");
1563 		goto release_unlock;
1564 	}
1565 
1566 	/* read what has actually been set */
1567 	ret = port_info_get(instance, dst);
1568 	if (ret) {
1569 		pr_debug("read back port info failed\n");
1570 		goto release_unlock;
1571 	}
1572 
1573 	/* connect two ports together */
1574 	ret = port_action_handle(instance, src,
1575 				 MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1576 				 dst->component->handle, dst->handle);
1577 	if (ret < 0) {
1578 		pr_debug("connecting port %d:%d to %d:%d failed\n",
1579 			 src->component->handle, src->handle,
1580 			 dst->component->handle, dst->handle);
1581 		goto release_unlock;
1582 	}
1583 	src->connected = dst;
1584 
1585 release_unlock:
1586 
1587 	mutex_unlock(&instance->vchiq_mutex);
1588 
1589 	return ret;
1590 }
1591 EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel);
1592 
vchiq_mmal_submit_buffer(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,struct mmal_buffer * buffer)1593 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1594 			     struct vchiq_mmal_port *port,
1595 			     struct mmal_buffer *buffer)
1596 {
1597 	unsigned long flags = 0;
1598 	int ret;
1599 
1600 	ret = buffer_from_host(instance, port, buffer);
1601 	if (ret == -EINVAL) {
1602 		/* Port is disabled. Queue for when it is enabled. */
1603 		spin_lock_irqsave(&port->slock, flags);
1604 		list_add_tail(&buffer->list, &port->buffers);
1605 		spin_unlock_irqrestore(&port->slock, flags);
1606 	}
1607 
1608 	return 0;
1609 }
1610 EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer);
1611 
mmal_vchi_buffer_init(struct vchiq_mmal_instance * instance,struct mmal_buffer * buf)1612 int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1613 			  struct mmal_buffer *buf)
1614 {
1615 	struct mmal_msg_context *msg_context = get_msg_context(instance);
1616 
1617 	if (IS_ERR(msg_context))
1618 		return (PTR_ERR(msg_context));
1619 
1620 	buf->msg_context = msg_context;
1621 	return 0;
1622 }
1623 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init);
1624 
mmal_vchi_buffer_cleanup(struct mmal_buffer * buf)1625 int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1626 {
1627 	struct mmal_msg_context *msg_context = buf->msg_context;
1628 
1629 	if (msg_context)
1630 		release_msg_context(msg_context);
1631 	buf->msg_context = NULL;
1632 
1633 	return 0;
1634 }
1635 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup);
1636 
1637 /* Initialise a mmal component and its ports
1638  *
1639  */
vchiq_mmal_component_init(struct vchiq_mmal_instance * instance,const char * name,struct vchiq_mmal_component ** component_out)1640 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1641 			      const char *name,
1642 			      struct vchiq_mmal_component **component_out)
1643 {
1644 	int ret;
1645 	int idx;		/* port index */
1646 	struct vchiq_mmal_component *component = NULL;
1647 
1648 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1649 		return -EINTR;
1650 
1651 	for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) {
1652 		if (!instance->component[idx].in_use) {
1653 			component = &instance->component[idx];
1654 			component->in_use = true;
1655 			break;
1656 		}
1657 	}
1658 
1659 	if (!component) {
1660 		ret = -EINVAL;	/* todo is this correct error? */
1661 		goto unlock;
1662 	}
1663 
1664 	/* We need a handle to reference back to our component structure.
1665 	 * Use the array index in instance->component rather than rolling
1666 	 * another IDR.
1667 	 */
1668 	component->client_component = idx;
1669 
1670 	ret = create_component(instance, component, name);
1671 	if (ret < 0) {
1672 		pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
1673 		       __func__, ret);
1674 		goto unlock;
1675 	}
1676 
1677 	/* ports info needs gathering */
1678 	component->control.type = MMAL_PORT_TYPE_CONTROL;
1679 	component->control.index = 0;
1680 	component->control.component = component;
1681 	spin_lock_init(&component->control.slock);
1682 	INIT_LIST_HEAD(&component->control.buffers);
1683 	ret = port_info_get(instance, &component->control);
1684 	if (ret < 0)
1685 		goto release_component;
1686 
1687 	for (idx = 0; idx < component->inputs; idx++) {
1688 		component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1689 		component->input[idx].index = idx;
1690 		component->input[idx].component = component;
1691 		spin_lock_init(&component->input[idx].slock);
1692 		INIT_LIST_HEAD(&component->input[idx].buffers);
1693 		ret = port_info_get(instance, &component->input[idx]);
1694 		if (ret < 0)
1695 			goto release_component;
1696 	}
1697 
1698 	for (idx = 0; idx < component->outputs; idx++) {
1699 		component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1700 		component->output[idx].index = idx;
1701 		component->output[idx].component = component;
1702 		spin_lock_init(&component->output[idx].slock);
1703 		INIT_LIST_HEAD(&component->output[idx].buffers);
1704 		ret = port_info_get(instance, &component->output[idx]);
1705 		if (ret < 0)
1706 			goto release_component;
1707 	}
1708 
1709 	for (idx = 0; idx < component->clocks; idx++) {
1710 		component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1711 		component->clock[idx].index = idx;
1712 		component->clock[idx].component = component;
1713 		spin_lock_init(&component->clock[idx].slock);
1714 		INIT_LIST_HEAD(&component->clock[idx].buffers);
1715 		ret = port_info_get(instance, &component->clock[idx]);
1716 		if (ret < 0)
1717 			goto release_component;
1718 	}
1719 
1720 	*component_out = component;
1721 
1722 	mutex_unlock(&instance->vchiq_mutex);
1723 
1724 	return 0;
1725 
1726 release_component:
1727 	destroy_component(instance, component);
1728 unlock:
1729 	if (component)
1730 		component->in_use = false;
1731 	mutex_unlock(&instance->vchiq_mutex);
1732 
1733 	return ret;
1734 }
1735 EXPORT_SYMBOL_GPL(vchiq_mmal_component_init);
1736 
1737 /*
1738  * cause a mmal component to be destroyed
1739  */
vchiq_mmal_component_finalise(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1740 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1741 				  struct vchiq_mmal_component *component)
1742 {
1743 	int ret;
1744 
1745 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1746 		return -EINTR;
1747 
1748 	if (component->enabled)
1749 		ret = disable_component(instance, component);
1750 
1751 	ret = destroy_component(instance, component);
1752 
1753 	component->in_use = false;
1754 
1755 	mutex_unlock(&instance->vchiq_mutex);
1756 
1757 	return ret;
1758 }
1759 EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise);
1760 
1761 /*
1762  * cause a mmal component to be enabled
1763  */
vchiq_mmal_component_enable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1764 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
1765 				struct vchiq_mmal_component *component)
1766 {
1767 	int ret;
1768 
1769 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1770 		return -EINTR;
1771 
1772 	if (component->enabled) {
1773 		mutex_unlock(&instance->vchiq_mutex);
1774 		return 0;
1775 	}
1776 
1777 	ret = enable_component(instance, component);
1778 	if (ret == 0)
1779 		component->enabled = true;
1780 
1781 	mutex_unlock(&instance->vchiq_mutex);
1782 
1783 	return ret;
1784 }
1785 EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable);
1786 
1787 /*
1788  * cause a mmal component to be enabled
1789  */
vchiq_mmal_component_disable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1790 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
1791 				 struct vchiq_mmal_component *component)
1792 {
1793 	int ret;
1794 
1795 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1796 		return -EINTR;
1797 
1798 	if (!component->enabled) {
1799 		mutex_unlock(&instance->vchiq_mutex);
1800 		return 0;
1801 	}
1802 
1803 	ret = disable_component(instance, component);
1804 	if (ret == 0)
1805 		component->enabled = false;
1806 
1807 	mutex_unlock(&instance->vchiq_mutex);
1808 
1809 	return ret;
1810 }
1811 EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable);
1812 
vchiq_mmal_version(struct vchiq_mmal_instance * instance,u32 * major_out,u32 * minor_out)1813 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
1814 		       u32 *major_out, u32 *minor_out)
1815 {
1816 	int ret;
1817 
1818 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1819 		return -EINTR;
1820 
1821 	ret = get_version(instance, major_out, minor_out);
1822 
1823 	mutex_unlock(&instance->vchiq_mutex);
1824 
1825 	return ret;
1826 }
1827 EXPORT_SYMBOL_GPL(vchiq_mmal_version);
1828 
vchiq_mmal_finalise(struct vchiq_mmal_instance * instance)1829 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
1830 {
1831 	int status = 0;
1832 
1833 	if (!instance)
1834 		return -EINVAL;
1835 
1836 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1837 		return -EINTR;
1838 
1839 	vchiq_use_service(instance->vchiq_instance, instance->service_handle);
1840 
1841 	status = vchiq_close_service(instance->vchiq_instance, instance->service_handle);
1842 	if (status != 0)
1843 		pr_err("mmal-vchiq: VCHIQ close failed\n");
1844 
1845 	mutex_unlock(&instance->vchiq_mutex);
1846 
1847 	vchiq_shutdown(instance->vchiq_instance);
1848 	destroy_workqueue(instance->bulk_wq);
1849 
1850 	idr_destroy(&instance->context_map);
1851 
1852 	kfree(instance);
1853 
1854 	return status;
1855 }
1856 EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
1857 
vchiq_mmal_init(struct device * dev,struct vchiq_mmal_instance ** out_instance)1858 int vchiq_mmal_init(struct device *dev, struct vchiq_mmal_instance **out_instance)
1859 {
1860 	int status;
1861 	int err = -ENODEV;
1862 	struct vchiq_mmal_instance *instance;
1863 	struct vchiq_instance *vchiq_instance;
1864 	struct vchiq_service_params_kernel params = {
1865 		.version		= VC_MMAL_VER,
1866 		.version_min		= VC_MMAL_MIN_VER,
1867 		.fourcc			= VCHIQ_MAKE_FOURCC('m', 'm', 'a', 'l'),
1868 		.callback		= mmal_service_callback,
1869 		.userdata		= NULL,
1870 	};
1871 	struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(dev->parent);
1872 
1873 	/* compile time checks to ensure structure size as they are
1874 	 * directly (de)serialised from memory.
1875 	 */
1876 
1877 	/* ensure the header structure has packed to the correct size */
1878 	BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
1879 
1880 	/* ensure message structure does not exceed maximum length */
1881 	BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
1882 
1883 	/* mmal port struct is correct size */
1884 	BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
1885 
1886 	/* create a vchi instance */
1887 	status = vchiq_initialise(&mgmt->state, &vchiq_instance);
1888 	if (status) {
1889 		pr_err("Failed to initialise VCHI instance (status=%d)\n",
1890 		       status);
1891 		return -EIO;
1892 	}
1893 
1894 	status = vchiq_connect(vchiq_instance);
1895 	if (status) {
1896 		pr_err("Failed to connect VCHI instance (status=%d)\n", status);
1897 		err = -EIO;
1898 		goto err_shutdown_vchiq;
1899 	}
1900 
1901 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1902 
1903 	if (!instance) {
1904 		err = -ENOMEM;
1905 		goto err_shutdown_vchiq;
1906 	}
1907 
1908 	mutex_init(&instance->vchiq_mutex);
1909 
1910 	instance->vchiq_instance = vchiq_instance;
1911 
1912 	mutex_init(&instance->context_map_lock);
1913 	idr_init_base(&instance->context_map, 1);
1914 
1915 	params.userdata = instance;
1916 
1917 	instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
1918 						    WQ_MEM_RECLAIM);
1919 	if (!instance->bulk_wq)
1920 		goto err_free;
1921 
1922 	status = vchiq_open_service(vchiq_instance, &params,
1923 				    &instance->service_handle);
1924 	if (status) {
1925 		pr_err("Failed to open VCHI service connection (status=%d)\n",
1926 		       status);
1927 		goto err_close_services;
1928 	}
1929 
1930 	vchiq_release_service(instance->vchiq_instance, instance->service_handle);
1931 
1932 	*out_instance = instance;
1933 
1934 	return 0;
1935 
1936 err_close_services:
1937 	vchiq_close_service(instance->vchiq_instance, instance->service_handle);
1938 	destroy_workqueue(instance->bulk_wq);
1939 err_free:
1940 	kfree(instance);
1941 err_shutdown_vchiq:
1942 	vchiq_shutdown(vchiq_instance);
1943 	return err;
1944 }
1945 EXPORT_SYMBOL_GPL(vchiq_mmal_init);
1946 
1947 MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface");
1948 MODULE_AUTHOR("Dave Stevenson, <dave.stevenson@raspberrypi.org>");
1949 MODULE_LICENSE("GPL");
1950