xref: /linux/drivers/media/platform/xilinx/xilinx-dma.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * Xilinx Video DMA
3  *
4  * Copyright (C) 2013-2015 Ideas on Board
5  * Copyright (C) 2013-2015 Xilinx, Inc.
6  *
7  * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
8  *           Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/dma/xilinx_dma.h>
16 #include <linux/lcm.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21 
22 #include <media/v4l2-dev.h>
23 #include <media/v4l2-fh.h>
24 #include <media/v4l2-ioctl.h>
25 #include <media/videobuf2-v4l2.h>
26 #include <media/videobuf2-dma-contig.h>
27 
28 #include "xilinx-dma.h"
29 #include "xilinx-vip.h"
30 #include "xilinx-vipp.h"
31 
32 #define XVIP_DMA_DEF_FORMAT		V4L2_PIX_FMT_YUYV
33 #define XVIP_DMA_DEF_WIDTH		1920
34 #define XVIP_DMA_DEF_HEIGHT		1080
35 
36 /* Minimum and maximum widths are expressed in bytes */
37 #define XVIP_DMA_MIN_WIDTH		1U
38 #define XVIP_DMA_MAX_WIDTH		65535U
39 #define XVIP_DMA_MIN_HEIGHT		1U
40 #define XVIP_DMA_MAX_HEIGHT		8191U
41 
42 /* -----------------------------------------------------------------------------
43  * Helper functions
44  */
45 
46 static struct v4l2_subdev *
47 xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
48 {
49 	struct media_pad *remote;
50 
51 	remote = media_entity_remote_pad(local);
52 	if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
53 		return NULL;
54 
55 	if (pad)
56 		*pad = remote->index;
57 
58 	return media_entity_to_v4l2_subdev(remote->entity);
59 }
60 
61 static int xvip_dma_verify_format(struct xvip_dma *dma)
62 {
63 	struct v4l2_subdev_format fmt;
64 	struct v4l2_subdev *subdev;
65 	int ret;
66 
67 	subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
68 	if (subdev == NULL)
69 		return -EPIPE;
70 
71 	fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
72 	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
73 	if (ret < 0)
74 		return ret == -ENOIOCTLCMD ? -EINVAL : ret;
75 
76 	if (dma->fmtinfo->code != fmt.format.code ||
77 	    dma->format.height != fmt.format.height ||
78 	    dma->format.width != fmt.format.width ||
79 	    dma->format.colorspace != fmt.format.colorspace)
80 		return -EINVAL;
81 
82 	return 0;
83 }
84 
85 /* -----------------------------------------------------------------------------
86  * Pipeline Stream Management
87  */
88 
89 /**
90  * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
91  * @pipe: The pipeline
92  * @start: Start (when true) or stop (when false) the pipeline
93  *
94  * Walk the entities chain starting at the pipeline output video node and start
95  * or stop all of them.
96  *
97  * Return: 0 if successful, or the return value of the failed video::s_stream
98  * operation otherwise.
99  */
100 static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
101 {
102 	struct xvip_dma *dma = pipe->output;
103 	struct media_entity *entity;
104 	struct media_pad *pad;
105 	struct v4l2_subdev *subdev;
106 	int ret;
107 
108 	entity = &dma->video.entity;
109 	while (1) {
110 		pad = &entity->pads[0];
111 		if (!(pad->flags & MEDIA_PAD_FL_SINK))
112 			break;
113 
114 		pad = media_entity_remote_pad(pad);
115 		if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
116 			break;
117 
118 		entity = pad->entity;
119 		subdev = media_entity_to_v4l2_subdev(entity);
120 
121 		ret = v4l2_subdev_call(subdev, video, s_stream, start);
122 		if (start && ret < 0 && ret != -ENOIOCTLCMD)
123 			return ret;
124 	}
125 
126 	return 0;
127 }
128 
129 /**
130  * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
131  * @pipe: The pipeline
132  * @on: Turn the stream on when true or off when false
133  *
134  * The pipeline is shared between all DMA engines connect at its input and
135  * output. While the stream state of DMA engines can be controlled
136  * independently, pipelines have a shared stream state that enable or disable
137  * all entities in the pipeline. For this reason the pipeline uses a streaming
138  * counter that tracks the number of DMA engines that have requested the stream
139  * to be enabled.
140  *
141  * When called with the @on argument set to true, this function will increment
142  * the pipeline streaming count. If the streaming count reaches the number of
143  * DMA engines in the pipeline it will enable all entities that belong to the
144  * pipeline.
145  *
146  * Similarly, when called with the @on argument set to false, this function will
147  * decrement the pipeline streaming count and disable all entities in the
148  * pipeline when the streaming count reaches zero.
149  *
150  * Return: 0 if successful, or the return value of the failed video::s_stream
151  * operation otherwise. Stopping the pipeline never fails. The pipeline state is
152  * not updated when the operation fails.
153  */
154 static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
155 {
156 	int ret = 0;
157 
158 	mutex_lock(&pipe->lock);
159 
160 	if (on) {
161 		if (pipe->stream_count == pipe->num_dmas - 1) {
162 			ret = xvip_pipeline_start_stop(pipe, true);
163 			if (ret < 0)
164 				goto done;
165 		}
166 		pipe->stream_count++;
167 	} else {
168 		if (--pipe->stream_count == 0)
169 			xvip_pipeline_start_stop(pipe, false);
170 	}
171 
172 done:
173 	mutex_unlock(&pipe->lock);
174 	return ret;
175 }
176 
177 static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
178 				  struct xvip_dma *start)
179 {
180 	struct media_graph graph;
181 	struct media_entity *entity = &start->video.entity;
182 	struct media_device *mdev = entity->graph_obj.mdev;
183 	unsigned int num_inputs = 0;
184 	unsigned int num_outputs = 0;
185 	int ret;
186 
187 	mutex_lock(&mdev->graph_mutex);
188 
189 	/* Walk the graph to locate the video nodes. */
190 	ret = media_graph_walk_init(&graph, mdev);
191 	if (ret) {
192 		mutex_unlock(&mdev->graph_mutex);
193 		return ret;
194 	}
195 
196 	media_graph_walk_start(&graph, entity);
197 
198 	while ((entity = media_graph_walk_next(&graph))) {
199 		struct xvip_dma *dma;
200 
201 		if (entity->function != MEDIA_ENT_F_IO_V4L)
202 			continue;
203 
204 		dma = to_xvip_dma(media_entity_to_video_device(entity));
205 
206 		if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
207 			pipe->output = dma;
208 			num_outputs++;
209 		} else {
210 			num_inputs++;
211 		}
212 	}
213 
214 	mutex_unlock(&mdev->graph_mutex);
215 
216 	media_graph_walk_cleanup(&graph);
217 
218 	/* We need exactly one output and zero or one input. */
219 	if (num_outputs != 1 || num_inputs > 1)
220 		return -EPIPE;
221 
222 	pipe->num_dmas = num_inputs + num_outputs;
223 
224 	return 0;
225 }
226 
227 static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
228 {
229 	pipe->num_dmas = 0;
230 	pipe->output = NULL;
231 }
232 
233 /**
234  * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
235  * @pipe: the pipeline
236  *
237  * Decrease the pipeline use count and clean it up if we were the last user.
238  */
239 static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
240 {
241 	mutex_lock(&pipe->lock);
242 
243 	/* If we're the last user clean up the pipeline. */
244 	if (--pipe->use_count == 0)
245 		__xvip_pipeline_cleanup(pipe);
246 
247 	mutex_unlock(&pipe->lock);
248 }
249 
250 /**
251  * xvip_pipeline_prepare - Prepare the pipeline for streaming
252  * @pipe: the pipeline
253  * @dma: DMA engine at one end of the pipeline
254  *
255  * Validate the pipeline if no user exists yet, otherwise just increase the use
256  * count.
257  *
258  * Return: 0 if successful or -EPIPE if the pipeline is not valid.
259  */
260 static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
261 				 struct xvip_dma *dma)
262 {
263 	int ret;
264 
265 	mutex_lock(&pipe->lock);
266 
267 	/* If we're the first user validate and initialize the pipeline. */
268 	if (pipe->use_count == 0) {
269 		ret = xvip_pipeline_validate(pipe, dma);
270 		if (ret < 0) {
271 			__xvip_pipeline_cleanup(pipe);
272 			goto done;
273 		}
274 	}
275 
276 	pipe->use_count++;
277 	ret = 0;
278 
279 done:
280 	mutex_unlock(&pipe->lock);
281 	return ret;
282 }
283 
284 /* -----------------------------------------------------------------------------
285  * videobuf2 queue operations
286  */
287 
288 /**
289  * struct xvip_dma_buffer - Video DMA buffer
290  * @buf: vb2 buffer base object
291  * @queue: buffer list entry in the DMA engine queued buffers list
292  * @dma: DMA channel that uses the buffer
293  */
294 struct xvip_dma_buffer {
295 	struct vb2_v4l2_buffer buf;
296 	struct list_head queue;
297 	struct xvip_dma *dma;
298 };
299 
300 #define to_xvip_dma_buffer(vb)	container_of(vb, struct xvip_dma_buffer, buf)
301 
302 static void xvip_dma_complete(void *param)
303 {
304 	struct xvip_dma_buffer *buf = param;
305 	struct xvip_dma *dma = buf->dma;
306 
307 	spin_lock(&dma->queued_lock);
308 	list_del(&buf->queue);
309 	spin_unlock(&dma->queued_lock);
310 
311 	buf->buf.field = V4L2_FIELD_NONE;
312 	buf->buf.sequence = dma->sequence++;
313 	buf->buf.vb2_buf.timestamp = ktime_get_ns();
314 	vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage);
315 	vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
316 }
317 
318 static int
319 xvip_dma_queue_setup(struct vb2_queue *vq,
320 		     unsigned int *nbuffers, unsigned int *nplanes,
321 		     unsigned int sizes[], struct device *alloc_devs[])
322 {
323 	struct xvip_dma *dma = vb2_get_drv_priv(vq);
324 
325 	/* Make sure the image size is large enough. */
326 	if (*nplanes)
327 		return sizes[0] < dma->format.sizeimage ? -EINVAL : 0;
328 
329 	*nplanes = 1;
330 	sizes[0] = dma->format.sizeimage;
331 
332 	return 0;
333 }
334 
335 static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
336 {
337 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
338 	struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
339 	struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
340 
341 	buf->dma = dma;
342 
343 	return 0;
344 }
345 
346 static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
347 {
348 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
349 	struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
350 	struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
351 	struct dma_async_tx_descriptor *desc;
352 	dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
353 	u32 flags;
354 
355 	if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
356 		flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
357 		dma->xt.dir = DMA_DEV_TO_MEM;
358 		dma->xt.src_sgl = false;
359 		dma->xt.dst_sgl = true;
360 		dma->xt.dst_start = addr;
361 	} else {
362 		flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
363 		dma->xt.dir = DMA_MEM_TO_DEV;
364 		dma->xt.src_sgl = true;
365 		dma->xt.dst_sgl = false;
366 		dma->xt.src_start = addr;
367 	}
368 
369 	dma->xt.frame_size = 1;
370 	dma->sgl[0].size = dma->format.width * dma->fmtinfo->bpp;
371 	dma->sgl[0].icg = dma->format.bytesperline - dma->sgl[0].size;
372 	dma->xt.numf = dma->format.height;
373 
374 	desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
375 	if (!desc) {
376 		dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
377 		vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
378 		return;
379 	}
380 	desc->callback = xvip_dma_complete;
381 	desc->callback_param = buf;
382 
383 	spin_lock_irq(&dma->queued_lock);
384 	list_add_tail(&buf->queue, &dma->queued_bufs);
385 	spin_unlock_irq(&dma->queued_lock);
386 
387 	dmaengine_submit(desc);
388 
389 	if (vb2_is_streaming(&dma->queue))
390 		dma_async_issue_pending(dma->dma);
391 }
392 
393 static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
394 {
395 	struct xvip_dma *dma = vb2_get_drv_priv(vq);
396 	struct xvip_dma_buffer *buf, *nbuf;
397 	struct xvip_pipeline *pipe;
398 	int ret;
399 
400 	dma->sequence = 0;
401 
402 	/*
403 	 * Start streaming on the pipeline. No link touching an entity in the
404 	 * pipeline can be activated or deactivated once streaming is started.
405 	 *
406 	 * Use the pipeline object embedded in the first DMA object that starts
407 	 * streaming.
408 	 */
409 	pipe = dma->video.entity.pipe
410 	     ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
411 
412 	ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
413 	if (ret < 0)
414 		goto error;
415 
416 	/* Verify that the configured format matches the output of the
417 	 * connected subdev.
418 	 */
419 	ret = xvip_dma_verify_format(dma);
420 	if (ret < 0)
421 		goto error_stop;
422 
423 	ret = xvip_pipeline_prepare(pipe, dma);
424 	if (ret < 0)
425 		goto error_stop;
426 
427 	/* Start the DMA engine. This must be done before starting the blocks
428 	 * in the pipeline to avoid DMA synchronization issues.
429 	 */
430 	dma_async_issue_pending(dma->dma);
431 
432 	/* Start the pipeline. */
433 	xvip_pipeline_set_stream(pipe, true);
434 
435 	return 0;
436 
437 error_stop:
438 	media_pipeline_stop(&dma->video.entity);
439 
440 error:
441 	/* Give back all queued buffers to videobuf2. */
442 	spin_lock_irq(&dma->queued_lock);
443 	list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
444 		vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
445 		list_del(&buf->queue);
446 	}
447 	spin_unlock_irq(&dma->queued_lock);
448 
449 	return ret;
450 }
451 
452 static void xvip_dma_stop_streaming(struct vb2_queue *vq)
453 {
454 	struct xvip_dma *dma = vb2_get_drv_priv(vq);
455 	struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
456 	struct xvip_dma_buffer *buf, *nbuf;
457 
458 	/* Stop the pipeline. */
459 	xvip_pipeline_set_stream(pipe, false);
460 
461 	/* Stop and reset the DMA engine. */
462 	dmaengine_terminate_all(dma->dma);
463 
464 	/* Cleanup the pipeline and mark it as being stopped. */
465 	xvip_pipeline_cleanup(pipe);
466 	media_pipeline_stop(&dma->video.entity);
467 
468 	/* Give back all queued buffers to videobuf2. */
469 	spin_lock_irq(&dma->queued_lock);
470 	list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
471 		vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
472 		list_del(&buf->queue);
473 	}
474 	spin_unlock_irq(&dma->queued_lock);
475 }
476 
477 static const struct vb2_ops xvip_dma_queue_qops = {
478 	.queue_setup = xvip_dma_queue_setup,
479 	.buf_prepare = xvip_dma_buffer_prepare,
480 	.buf_queue = xvip_dma_buffer_queue,
481 	.wait_prepare = vb2_ops_wait_prepare,
482 	.wait_finish = vb2_ops_wait_finish,
483 	.start_streaming = xvip_dma_start_streaming,
484 	.stop_streaming = xvip_dma_stop_streaming,
485 };
486 
487 /* -----------------------------------------------------------------------------
488  * V4L2 ioctls
489  */
490 
491 static int
492 xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
493 {
494 	struct v4l2_fh *vfh = file->private_data;
495 	struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
496 
497 	cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
498 			  | dma->xdev->v4l2_caps;
499 
500 	if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
501 		cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
502 	else
503 		cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
504 
505 	strlcpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
506 	strlcpy(cap->card, dma->video.name, sizeof(cap->card));
507 	snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s:%u",
508 		 dma->xdev->dev->of_node->name, dma->port);
509 
510 	return 0;
511 }
512 
513 /* FIXME: without this callback function, some applications are not configured
514  * with correct formats, and it results in frames in wrong format. Whether this
515  * callback needs to be required is not clearly defined, so it should be
516  * clarified through the mailing list.
517  */
518 static int
519 xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
520 {
521 	struct v4l2_fh *vfh = file->private_data;
522 	struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
523 
524 	if (f->index > 0)
525 		return -EINVAL;
526 
527 	f->pixelformat = dma->format.pixelformat;
528 	strlcpy(f->description, dma->fmtinfo->description,
529 		sizeof(f->description));
530 
531 	return 0;
532 }
533 
534 static int
535 xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
536 {
537 	struct v4l2_fh *vfh = file->private_data;
538 	struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
539 
540 	format->fmt.pix = dma->format;
541 
542 	return 0;
543 }
544 
545 static void
546 __xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
547 		      const struct xvip_video_format **fmtinfo)
548 {
549 	const struct xvip_video_format *info;
550 	unsigned int min_width;
551 	unsigned int max_width;
552 	unsigned int min_bpl;
553 	unsigned int max_bpl;
554 	unsigned int width;
555 	unsigned int align;
556 	unsigned int bpl;
557 
558 	/* Retrieve format information and select the default format if the
559 	 * requested format isn't supported.
560 	 */
561 	info = xvip_get_format_by_fourcc(pix->pixelformat);
562 	if (IS_ERR(info))
563 		info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
564 
565 	pix->pixelformat = info->fourcc;
566 	pix->field = V4L2_FIELD_NONE;
567 
568 	/* The transfer alignment requirements are expressed in bytes. Compute
569 	 * the minimum and maximum values, clamp the requested width and convert
570 	 * it back to pixels.
571 	 */
572 	align = lcm(dma->align, info->bpp);
573 	min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
574 	max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
575 	width = rounddown(pix->width * info->bpp, align);
576 
577 	pix->width = clamp(width, min_width, max_width) / info->bpp;
578 	pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
579 			    XVIP_DMA_MAX_HEIGHT);
580 
581 	/* Clamp the requested bytes per line value. If the maximum bytes per
582 	 * line value is zero, the module doesn't support user configurable line
583 	 * sizes. Override the requested value with the minimum in that case.
584 	 */
585 	min_bpl = pix->width * info->bpp;
586 	max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
587 	bpl = rounddown(pix->bytesperline, dma->align);
588 
589 	pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
590 	pix->sizeimage = pix->bytesperline * pix->height;
591 
592 	if (fmtinfo)
593 		*fmtinfo = info;
594 }
595 
596 static int
597 xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
598 {
599 	struct v4l2_fh *vfh = file->private_data;
600 	struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
601 
602 	__xvip_dma_try_format(dma, &format->fmt.pix, NULL);
603 	return 0;
604 }
605 
606 static int
607 xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
608 {
609 	struct v4l2_fh *vfh = file->private_data;
610 	struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
611 	const struct xvip_video_format *info;
612 
613 	__xvip_dma_try_format(dma, &format->fmt.pix, &info);
614 
615 	if (vb2_is_busy(&dma->queue))
616 		return -EBUSY;
617 
618 	dma->format = format->fmt.pix;
619 	dma->fmtinfo = info;
620 
621 	return 0;
622 }
623 
624 static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
625 	.vidioc_querycap		= xvip_dma_querycap,
626 	.vidioc_enum_fmt_vid_cap	= xvip_dma_enum_format,
627 	.vidioc_g_fmt_vid_cap		= xvip_dma_get_format,
628 	.vidioc_g_fmt_vid_out		= xvip_dma_get_format,
629 	.vidioc_s_fmt_vid_cap		= xvip_dma_set_format,
630 	.vidioc_s_fmt_vid_out		= xvip_dma_set_format,
631 	.vidioc_try_fmt_vid_cap		= xvip_dma_try_format,
632 	.vidioc_try_fmt_vid_out		= xvip_dma_try_format,
633 	.vidioc_reqbufs			= vb2_ioctl_reqbufs,
634 	.vidioc_querybuf		= vb2_ioctl_querybuf,
635 	.vidioc_qbuf			= vb2_ioctl_qbuf,
636 	.vidioc_dqbuf			= vb2_ioctl_dqbuf,
637 	.vidioc_create_bufs		= vb2_ioctl_create_bufs,
638 	.vidioc_expbuf			= vb2_ioctl_expbuf,
639 	.vidioc_streamon		= vb2_ioctl_streamon,
640 	.vidioc_streamoff		= vb2_ioctl_streamoff,
641 };
642 
643 /* -----------------------------------------------------------------------------
644  * V4L2 file operations
645  */
646 
647 static const struct v4l2_file_operations xvip_dma_fops = {
648 	.owner		= THIS_MODULE,
649 	.unlocked_ioctl	= video_ioctl2,
650 	.open		= v4l2_fh_open,
651 	.release	= vb2_fop_release,
652 	.poll		= vb2_fop_poll,
653 	.mmap		= vb2_fop_mmap,
654 };
655 
656 /* -----------------------------------------------------------------------------
657  * Xilinx Video DMA Core
658  */
659 
660 int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
661 		  enum v4l2_buf_type type, unsigned int port)
662 {
663 	char name[16];
664 	int ret;
665 
666 	dma->xdev = xdev;
667 	dma->port = port;
668 	mutex_init(&dma->lock);
669 	mutex_init(&dma->pipe.lock);
670 	INIT_LIST_HEAD(&dma->queued_bufs);
671 	spin_lock_init(&dma->queued_lock);
672 
673 	dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
674 	dma->format.pixelformat = dma->fmtinfo->fourcc;
675 	dma->format.colorspace = V4L2_COLORSPACE_SRGB;
676 	dma->format.field = V4L2_FIELD_NONE;
677 	dma->format.width = XVIP_DMA_DEF_WIDTH;
678 	dma->format.height = XVIP_DMA_DEF_HEIGHT;
679 	dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp;
680 	dma->format.sizeimage = dma->format.bytesperline * dma->format.height;
681 
682 	/* Initialize the media entity... */
683 	dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
684 		       ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
685 
686 	ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad);
687 	if (ret < 0)
688 		goto error;
689 
690 	/* ... and the video node... */
691 	dma->video.fops = &xvip_dma_fops;
692 	dma->video.v4l2_dev = &xdev->v4l2_dev;
693 	dma->video.queue = &dma->queue;
694 	snprintf(dma->video.name, sizeof(dma->video.name), "%s %s %u",
695 		 xdev->dev->of_node->name,
696 		 type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input",
697 		 port);
698 	dma->video.vfl_type = VFL_TYPE_GRABBER;
699 	dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
700 			   ? VFL_DIR_RX : VFL_DIR_TX;
701 	dma->video.release = video_device_release_empty;
702 	dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
703 	dma->video.lock = &dma->lock;
704 
705 	video_set_drvdata(&dma->video, dma);
706 
707 	/* ... and the buffers queue... */
708 	/* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
709 	 * V4L2 APIs would be inefficient. Testing on the command line with a
710 	 * 'cat /dev/video?' thus won't be possible, but given that the driver
711 	 * anyway requires a test tool to setup the pipeline before any video
712 	 * stream can be started, requiring a specific V4L2 test tool as well
713 	 * instead of 'cat' isn't really a drawback.
714 	 */
715 	dma->queue.type = type;
716 	dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
717 	dma->queue.lock = &dma->lock;
718 	dma->queue.drv_priv = dma;
719 	dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
720 	dma->queue.ops = &xvip_dma_queue_qops;
721 	dma->queue.mem_ops = &vb2_dma_contig_memops;
722 	dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
723 				   | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
724 	dma->queue.dev = dma->xdev->dev;
725 	ret = vb2_queue_init(&dma->queue);
726 	if (ret < 0) {
727 		dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
728 		goto error;
729 	}
730 
731 	/* ... and the DMA channel. */
732 	snprintf(name, sizeof(name), "port%u", port);
733 	dma->dma = dma_request_slave_channel(dma->xdev->dev, name);
734 	if (dma->dma == NULL) {
735 		dev_err(dma->xdev->dev, "no VDMA channel found\n");
736 		ret = -ENODEV;
737 		goto error;
738 	}
739 
740 	dma->align = 1 << dma->dma->device->copy_align;
741 
742 	ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1);
743 	if (ret < 0) {
744 		dev_err(dma->xdev->dev, "failed to register video device\n");
745 		goto error;
746 	}
747 
748 	return 0;
749 
750 error:
751 	xvip_dma_cleanup(dma);
752 	return ret;
753 }
754 
755 void xvip_dma_cleanup(struct xvip_dma *dma)
756 {
757 	if (video_is_registered(&dma->video))
758 		video_unregister_device(&dma->video);
759 
760 	if (dma->dma)
761 		dma_release_channel(dma->dma);
762 
763 	media_entity_cleanup(&dma->video.entity);
764 
765 	mutex_destroy(&dma->lock);
766 	mutex_destroy(&dma->pipe.lock);
767 }
768