xref: /linux/drivers/media/common/videobuf2/videobuf2-v4l2.c (revision 9ac8090fda77f072815c209c80fb230e89cda18c)
1 /*
2  * videobuf2-v4l2.c - V4L2 driver helper framework
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *	   Marek Szyprowski <m.szyprowski@samsung.com>
8  *
9  * The vb2_thread implementation was based on code from videobuf-dvb.c:
10  *	(c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation.
15  */
16 
17 #include <linux/err.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/mm.h>
21 #include <linux/poll.h>
22 #include <linux/slab.h>
23 #include <linux/sched.h>
24 #include <linux/freezer.h>
25 #include <linux/kthread.h>
26 
27 #include <media/v4l2-dev.h>
28 #include <media/v4l2-device.h>
29 #include <media/v4l2-fh.h>
30 #include <media/v4l2-event.h>
31 #include <media/v4l2-common.h>
32 
33 #include <media/videobuf2-v4l2.h>
34 
35 static int debug;
36 module_param(debug, int, 0644);
37 
38 #define dprintk(level, fmt, arg...)					      \
39 	do {								      \
40 		if (debug >= level)					      \
41 			pr_info("vb2-v4l2: %s: " fmt, __func__, ## arg); \
42 	} while (0)
43 
44 /* Flags that are set by us */
45 #define V4L2_BUFFER_MASK_FLAGS	(V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
46 				 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
47 				 V4L2_BUF_FLAG_PREPARED | \
48 				 V4L2_BUF_FLAG_IN_REQUEST | \
49 				 V4L2_BUF_FLAG_REQUEST_FD | \
50 				 V4L2_BUF_FLAG_TIMESTAMP_MASK)
51 /* Output buffer flags that should be passed on to the driver */
52 #define V4L2_BUFFER_OUT_FLAGS	(V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
53 				 V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
54 
55 /*
56  * __verify_planes_array() - verify that the planes array passed in struct
57  * v4l2_buffer from userspace can be safely used
58  */
59 static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
60 {
61 	if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
62 		return 0;
63 
64 	/* Is memory for copying plane information present? */
65 	if (b->m.planes == NULL) {
66 		dprintk(1, "multi-planar buffer passed but planes array not provided\n");
67 		return -EINVAL;
68 	}
69 
70 	if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
71 		dprintk(1, "incorrect planes array length, expected %d, got %d\n",
72 			vb->num_planes, b->length);
73 		return -EINVAL;
74 	}
75 
76 	return 0;
77 }
78 
79 static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
80 {
81 	return __verify_planes_array(vb, pb);
82 }
83 
84 /*
85  * __verify_length() - Verify that the bytesused value for each plane fits in
86  * the plane length and that the data offset doesn't exceed the bytesused value.
87  */
88 static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
89 {
90 	unsigned int length;
91 	unsigned int bytesused;
92 	unsigned int plane;
93 
94 	if (!V4L2_TYPE_IS_OUTPUT(b->type))
95 		return 0;
96 
97 	if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
98 		for (plane = 0; plane < vb->num_planes; ++plane) {
99 			length = (b->memory == VB2_MEMORY_USERPTR ||
100 				  b->memory == VB2_MEMORY_DMABUF)
101 			       ? b->m.planes[plane].length
102 				: vb->planes[plane].length;
103 			bytesused = b->m.planes[plane].bytesused
104 				  ? b->m.planes[plane].bytesused : length;
105 
106 			if (b->m.planes[plane].bytesused > length)
107 				return -EINVAL;
108 
109 			if (b->m.planes[plane].data_offset > 0 &&
110 			    b->m.planes[plane].data_offset >= bytesused)
111 				return -EINVAL;
112 		}
113 	} else {
114 		length = (b->memory == VB2_MEMORY_USERPTR)
115 			? b->length : vb->planes[0].length;
116 
117 		if (b->bytesused > length)
118 			return -EINVAL;
119 	}
120 
121 	return 0;
122 }
123 
124 /*
125  * __init_v4l2_vb2_buffer() - initialize the v4l2_vb2_buffer struct
126  */
127 static void __init_v4l2_vb2_buffer(struct vb2_buffer *vb)
128 {
129 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
130 
131 	vbuf->request_fd = -1;
132 }
133 
134 static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
135 {
136 	const struct v4l2_buffer *b = pb;
137 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
138 	struct vb2_queue *q = vb->vb2_queue;
139 
140 	if (q->is_output) {
141 		/*
142 		 * For output buffers copy the timestamp if needed,
143 		 * and the timecode field and flag if needed.
144 		 */
145 		if (q->copy_timestamp)
146 			vb->timestamp = timeval_to_ns(&b->timestamp);
147 		vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
148 		if (b->flags & V4L2_BUF_FLAG_TIMECODE)
149 			vbuf->timecode = b->timecode;
150 	}
151 };
152 
153 static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
154 {
155 	static bool check_once;
156 
157 	if (check_once)
158 		return;
159 
160 	check_once = true;
161 	WARN_ON(1);
162 
163 	pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
164 	if (vb->vb2_queue->allow_zero_bytesused)
165 		pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
166 	else
167 		pr_warn("use the actual size instead.\n");
168 }
169 
170 static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
171 {
172 	struct vb2_queue *q = vb->vb2_queue;
173 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
174 	struct vb2_plane *planes = vbuf->planes;
175 	unsigned int plane;
176 	int ret;
177 
178 	ret = __verify_length(vb, b);
179 	if (ret < 0) {
180 		dprintk(1, "plane parameters verification failed: %d\n", ret);
181 		return ret;
182 	}
183 	if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
184 		/*
185 		 * If the format's field is ALTERNATE, then the buffer's field
186 		 * should be either TOP or BOTTOM, not ALTERNATE since that
187 		 * makes no sense. The driver has to know whether the
188 		 * buffer represents a top or a bottom field in order to
189 		 * program any DMA correctly. Using ALTERNATE is wrong, since
190 		 * that just says that it is either a top or a bottom field,
191 		 * but not which of the two it is.
192 		 */
193 		dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
194 		return -EINVAL;
195 	}
196 	vbuf->sequence = 0;
197 	vbuf->request_fd = -1;
198 
199 	if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
200 		switch (b->memory) {
201 		case VB2_MEMORY_USERPTR:
202 			for (plane = 0; plane < vb->num_planes; ++plane) {
203 				planes[plane].m.userptr =
204 					b->m.planes[plane].m.userptr;
205 				planes[plane].length =
206 					b->m.planes[plane].length;
207 			}
208 			break;
209 		case VB2_MEMORY_DMABUF:
210 			for (plane = 0; plane < vb->num_planes; ++plane) {
211 				planes[plane].m.fd =
212 					b->m.planes[plane].m.fd;
213 				planes[plane].length =
214 					b->m.planes[plane].length;
215 			}
216 			break;
217 		default:
218 			for (plane = 0; plane < vb->num_planes; ++plane) {
219 				planes[plane].m.offset =
220 					vb->planes[plane].m.offset;
221 				planes[plane].length =
222 					vb->planes[plane].length;
223 			}
224 			break;
225 		}
226 
227 		/* Fill in driver-provided information for OUTPUT types */
228 		if (V4L2_TYPE_IS_OUTPUT(b->type)) {
229 			/*
230 			 * Will have to go up to b->length when API starts
231 			 * accepting variable number of planes.
232 			 *
233 			 * If bytesused == 0 for the output buffer, then fall
234 			 * back to the full buffer size. In that case
235 			 * userspace clearly never bothered to set it and
236 			 * it's a safe assumption that they really meant to
237 			 * use the full plane sizes.
238 			 *
239 			 * Some drivers, e.g. old codec drivers, use bytesused == 0
240 			 * as a way to indicate that streaming is finished.
241 			 * In that case, the driver should use the
242 			 * allow_zero_bytesused flag to keep old userspace
243 			 * applications working.
244 			 */
245 			for (plane = 0; plane < vb->num_planes; ++plane) {
246 				struct vb2_plane *pdst = &planes[plane];
247 				struct v4l2_plane *psrc = &b->m.planes[plane];
248 
249 				if (psrc->bytesused == 0)
250 					vb2_warn_zero_bytesused(vb);
251 
252 				if (vb->vb2_queue->allow_zero_bytesused)
253 					pdst->bytesused = psrc->bytesused;
254 				else
255 					pdst->bytesused = psrc->bytesused ?
256 						psrc->bytesused : pdst->length;
257 				pdst->data_offset = psrc->data_offset;
258 			}
259 		}
260 	} else {
261 		/*
262 		 * Single-planar buffers do not use planes array,
263 		 * so fill in relevant v4l2_buffer struct fields instead.
264 		 * In videobuf we use our internal V4l2_planes struct for
265 		 * single-planar buffers as well, for simplicity.
266 		 *
267 		 * If bytesused == 0 for the output buffer, then fall back
268 		 * to the full buffer size as that's a sensible default.
269 		 *
270 		 * Some drivers, e.g. old codec drivers, use bytesused == 0 as
271 		 * a way to indicate that streaming is finished. In that case,
272 		 * the driver should use the allow_zero_bytesused flag to keep
273 		 * old userspace applications working.
274 		 */
275 		switch (b->memory) {
276 		case VB2_MEMORY_USERPTR:
277 			planes[0].m.userptr = b->m.userptr;
278 			planes[0].length = b->length;
279 			break;
280 		case VB2_MEMORY_DMABUF:
281 			planes[0].m.fd = b->m.fd;
282 			planes[0].length = b->length;
283 			break;
284 		default:
285 			planes[0].m.offset = vb->planes[0].m.offset;
286 			planes[0].length = vb->planes[0].length;
287 			break;
288 		}
289 
290 		planes[0].data_offset = 0;
291 		if (V4L2_TYPE_IS_OUTPUT(b->type)) {
292 			if (b->bytesused == 0)
293 				vb2_warn_zero_bytesused(vb);
294 
295 			if (vb->vb2_queue->allow_zero_bytesused)
296 				planes[0].bytesused = b->bytesused;
297 			else
298 				planes[0].bytesused = b->bytesused ?
299 					b->bytesused : planes[0].length;
300 		} else
301 			planes[0].bytesused = 0;
302 
303 	}
304 
305 	/* Zero flags that we handle */
306 	vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
307 	if (!vb->vb2_queue->copy_timestamp || !V4L2_TYPE_IS_OUTPUT(b->type)) {
308 		/*
309 		 * Non-COPY timestamps and non-OUTPUT queues will get
310 		 * their timestamp and timestamp source flags from the
311 		 * queue.
312 		 */
313 		vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
314 	}
315 
316 	if (V4L2_TYPE_IS_OUTPUT(b->type)) {
317 		/*
318 		 * For output buffers mask out the timecode flag:
319 		 * this will be handled later in vb2_qbuf().
320 		 * The 'field' is valid metadata for this output buffer
321 		 * and so that needs to be copied here.
322 		 */
323 		vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
324 		vbuf->field = b->field;
325 	} else {
326 		/* Zero any output buffer flags as this is a capture buffer */
327 		vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
328 		/* Zero last flag, this is a signal from driver to userspace */
329 		vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
330 	}
331 
332 	return 0;
333 }
334 
335 static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
336 				    struct v4l2_buffer *b,
337 				    const char *opname,
338 				    struct media_request **p_req)
339 {
340 	struct media_request *req;
341 	struct vb2_v4l2_buffer *vbuf;
342 	struct vb2_buffer *vb;
343 	int ret;
344 
345 	if (b->type != q->type) {
346 		dprintk(1, "%s: invalid buffer type\n", opname);
347 		return -EINVAL;
348 	}
349 
350 	if (b->index >= q->num_buffers) {
351 		dprintk(1, "%s: buffer index out of range\n", opname);
352 		return -EINVAL;
353 	}
354 
355 	if (q->bufs[b->index] == NULL) {
356 		/* Should never happen */
357 		dprintk(1, "%s: buffer is NULL\n", opname);
358 		return -EINVAL;
359 	}
360 
361 	if (b->memory != q->memory) {
362 		dprintk(1, "%s: invalid memory type\n", opname);
363 		return -EINVAL;
364 	}
365 
366 	vb = q->bufs[b->index];
367 	vbuf = to_vb2_v4l2_buffer(vb);
368 	ret = __verify_planes_array(vb, b);
369 	if (ret)
370 		return ret;
371 
372 	if (!vb->prepared) {
373 		/* Copy relevant information provided by the userspace */
374 		memset(vbuf->planes, 0,
375 		       sizeof(vbuf->planes[0]) * vb->num_planes);
376 		ret = vb2_fill_vb2_v4l2_buffer(vb, b);
377 		if (ret)
378 			return ret;
379 	}
380 
381 	if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
382 		if (q->uses_requests) {
383 			dprintk(1, "%s: queue uses requests\n", opname);
384 			return -EPERM;
385 		}
386 		return 0;
387 	} else if (q->uses_qbuf) {
388 		dprintk(1, "%s: queue does not use requests\n", opname);
389 		return -EPERM;
390 	}
391 
392 	/*
393 	 * For proper locking when queueing a request you need to be able
394 	 * to lock access to the vb2 queue, so check that there is a lock
395 	 * that we can use. In addition p_req must be non-NULL.
396 	 */
397 	if (WARN_ON(!q->lock || !p_req))
398 		return -EINVAL;
399 
400 	/*
401 	 * Make sure this op is implemented by the driver. It's easy to forget
402 	 * this callback, but is it important when canceling a buffer in a
403 	 * queued request.
404 	 */
405 	if (WARN_ON(!q->ops->buf_request_complete))
406 		return -EINVAL;
407 
408 	if (vb->state != VB2_BUF_STATE_DEQUEUED) {
409 		dprintk(1, "%s: buffer is not in dequeued state\n", opname);
410 		return -EINVAL;
411 	}
412 
413 	if (b->request_fd < 0) {
414 		dprintk(1, "%s: request_fd < 0\n", opname);
415 		return -EINVAL;
416 	}
417 
418 	req = media_request_get_by_fd(mdev, b->request_fd);
419 	if (IS_ERR(req)) {
420 		dprintk(1, "%s: invalid request_fd\n", opname);
421 		return PTR_ERR(req);
422 	}
423 
424 	/*
425 	 * Early sanity check. This is checked again when the buffer
426 	 * is bound to the request in vb2_core_qbuf().
427 	 */
428 	if (req->state != MEDIA_REQUEST_STATE_IDLE &&
429 	    req->state != MEDIA_REQUEST_STATE_UPDATING) {
430 		dprintk(1, "%s: request is not idle\n", opname);
431 		media_request_put(req);
432 		return -EBUSY;
433 	}
434 
435 	*p_req = req;
436 	vbuf->request_fd = b->request_fd;
437 
438 	return 0;
439 }
440 
441 /*
442  * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
443  * returned to userspace
444  */
445 static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
446 {
447 	struct v4l2_buffer *b = pb;
448 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
449 	struct vb2_queue *q = vb->vb2_queue;
450 	unsigned int plane;
451 
452 	/* Copy back data such as timestamp, flags, etc. */
453 	b->index = vb->index;
454 	b->type = vb->type;
455 	b->memory = vb->memory;
456 	b->bytesused = 0;
457 
458 	b->flags = vbuf->flags;
459 	b->field = vbuf->field;
460 	b->timestamp = ns_to_timeval(vb->timestamp);
461 	b->timecode = vbuf->timecode;
462 	b->sequence = vbuf->sequence;
463 	b->reserved2 = 0;
464 	b->request_fd = 0;
465 
466 	if (q->is_multiplanar) {
467 		/*
468 		 * Fill in plane-related data if userspace provided an array
469 		 * for it. The caller has already verified memory and size.
470 		 */
471 		b->length = vb->num_planes;
472 		for (plane = 0; plane < vb->num_planes; ++plane) {
473 			struct v4l2_plane *pdst = &b->m.planes[plane];
474 			struct vb2_plane *psrc = &vb->planes[plane];
475 
476 			pdst->bytesused = psrc->bytesused;
477 			pdst->length = psrc->length;
478 			if (q->memory == VB2_MEMORY_MMAP)
479 				pdst->m.mem_offset = psrc->m.offset;
480 			else if (q->memory == VB2_MEMORY_USERPTR)
481 				pdst->m.userptr = psrc->m.userptr;
482 			else if (q->memory == VB2_MEMORY_DMABUF)
483 				pdst->m.fd = psrc->m.fd;
484 			pdst->data_offset = psrc->data_offset;
485 			memset(pdst->reserved, 0, sizeof(pdst->reserved));
486 		}
487 	} else {
488 		/*
489 		 * We use length and offset in v4l2_planes array even for
490 		 * single-planar buffers, but userspace does not.
491 		 */
492 		b->length = vb->planes[0].length;
493 		b->bytesused = vb->planes[0].bytesused;
494 		if (q->memory == VB2_MEMORY_MMAP)
495 			b->m.offset = vb->planes[0].m.offset;
496 		else if (q->memory == VB2_MEMORY_USERPTR)
497 			b->m.userptr = vb->planes[0].m.userptr;
498 		else if (q->memory == VB2_MEMORY_DMABUF)
499 			b->m.fd = vb->planes[0].m.fd;
500 	}
501 
502 	/*
503 	 * Clear any buffer state related flags.
504 	 */
505 	b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
506 	b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
507 	if (!q->copy_timestamp) {
508 		/*
509 		 * For non-COPY timestamps, drop timestamp source bits
510 		 * and obtain the timestamp source from the queue.
511 		 */
512 		b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
513 		b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
514 	}
515 
516 	switch (vb->state) {
517 	case VB2_BUF_STATE_QUEUED:
518 	case VB2_BUF_STATE_ACTIVE:
519 		b->flags |= V4L2_BUF_FLAG_QUEUED;
520 		break;
521 	case VB2_BUF_STATE_IN_REQUEST:
522 		b->flags |= V4L2_BUF_FLAG_IN_REQUEST;
523 		break;
524 	case VB2_BUF_STATE_ERROR:
525 		b->flags |= V4L2_BUF_FLAG_ERROR;
526 		/* fall through */
527 	case VB2_BUF_STATE_DONE:
528 		b->flags |= V4L2_BUF_FLAG_DONE;
529 		break;
530 	case VB2_BUF_STATE_PREPARING:
531 	case VB2_BUF_STATE_DEQUEUED:
532 	case VB2_BUF_STATE_REQUEUEING:
533 		/* nothing */
534 		break;
535 	}
536 
537 	if ((vb->state == VB2_BUF_STATE_DEQUEUED ||
538 	     vb->state == VB2_BUF_STATE_IN_REQUEST) &&
539 	    vb->synced && vb->prepared)
540 		b->flags |= V4L2_BUF_FLAG_PREPARED;
541 
542 	if (vb2_buffer_in_use(q, vb))
543 		b->flags |= V4L2_BUF_FLAG_MAPPED;
544 	if (vbuf->request_fd >= 0) {
545 		b->flags |= V4L2_BUF_FLAG_REQUEST_FD;
546 		b->request_fd = vbuf->request_fd;
547 	}
548 
549 	if (!q->is_output &&
550 		b->flags & V4L2_BUF_FLAG_DONE &&
551 		b->flags & V4L2_BUF_FLAG_LAST)
552 		q->last_buffer_dequeued = true;
553 }
554 
555 /*
556  * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
557  * v4l2_buffer by the userspace. It also verifies that struct
558  * v4l2_buffer has a valid number of planes.
559  */
560 static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
561 {
562 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
563 	unsigned int plane;
564 
565 	if (!vb->vb2_queue->is_output || !vb->vb2_queue->copy_timestamp)
566 		vb->timestamp = 0;
567 
568 	for (plane = 0; plane < vb->num_planes; ++plane) {
569 		if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) {
570 			planes[plane].m = vbuf->planes[plane].m;
571 			planes[plane].length = vbuf->planes[plane].length;
572 		}
573 		planes[plane].bytesused = vbuf->planes[plane].bytesused;
574 		planes[plane].data_offset = vbuf->planes[plane].data_offset;
575 	}
576 	return 0;
577 }
578 
579 static const struct vb2_buf_ops v4l2_buf_ops = {
580 	.verify_planes_array	= __verify_planes_array_core,
581 	.init_buffer		= __init_v4l2_vb2_buffer,
582 	.fill_user_buffer	= __fill_v4l2_buffer,
583 	.fill_vb2_buffer	= __fill_vb2_buffer,
584 	.copy_timestamp		= __copy_timestamp,
585 };
586 
587 /*
588  * vb2_querybuf() - query video buffer information
589  * @q:		videobuf queue
590  * @b:		buffer struct passed from userspace to vidioc_querybuf handler
591  *		in driver
592  *
593  * Should be called from vidioc_querybuf ioctl handler in driver.
594  * This function will verify the passed v4l2_buffer structure and fill the
595  * relevant information for the userspace.
596  *
597  * The return values from this function are intended to be directly returned
598  * from vidioc_querybuf handler in driver.
599  */
600 int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
601 {
602 	struct vb2_buffer *vb;
603 	int ret;
604 
605 	if (b->type != q->type) {
606 		dprintk(1, "wrong buffer type\n");
607 		return -EINVAL;
608 	}
609 
610 	if (b->index >= q->num_buffers) {
611 		dprintk(1, "buffer index out of range\n");
612 		return -EINVAL;
613 	}
614 	vb = q->bufs[b->index];
615 	ret = __verify_planes_array(vb, b);
616 	if (!ret)
617 		vb2_core_querybuf(q, b->index, b);
618 	return ret;
619 }
620 EXPORT_SYMBOL(vb2_querybuf);
621 
622 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
623 {
624 	int ret = vb2_verify_memory_type(q, req->memory, req->type);
625 
626 	return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count);
627 }
628 EXPORT_SYMBOL_GPL(vb2_reqbufs);
629 
630 int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
631 		    struct v4l2_buffer *b)
632 {
633 	int ret;
634 
635 	if (vb2_fileio_is_active(q)) {
636 		dprintk(1, "file io in progress\n");
637 		return -EBUSY;
638 	}
639 
640 	if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
641 		return -EINVAL;
642 
643 	ret = vb2_queue_or_prepare_buf(q, mdev, b, "prepare_buf", NULL);
644 
645 	return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
646 }
647 EXPORT_SYMBOL_GPL(vb2_prepare_buf);
648 
649 int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
650 {
651 	unsigned requested_planes = 1;
652 	unsigned requested_sizes[VIDEO_MAX_PLANES];
653 	struct v4l2_format *f = &create->format;
654 	int ret = vb2_verify_memory_type(q, create->memory, f->type);
655 	unsigned i;
656 
657 	create->index = q->num_buffers;
658 	if (create->count == 0)
659 		return ret != -EBUSY ? ret : 0;
660 
661 	switch (f->type) {
662 	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
663 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
664 		requested_planes = f->fmt.pix_mp.num_planes;
665 		if (requested_planes == 0 ||
666 		    requested_planes > VIDEO_MAX_PLANES)
667 			return -EINVAL;
668 		for (i = 0; i < requested_planes; i++)
669 			requested_sizes[i] =
670 				f->fmt.pix_mp.plane_fmt[i].sizeimage;
671 		break;
672 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
673 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
674 		requested_sizes[0] = f->fmt.pix.sizeimage;
675 		break;
676 	case V4L2_BUF_TYPE_VBI_CAPTURE:
677 	case V4L2_BUF_TYPE_VBI_OUTPUT:
678 		requested_sizes[0] = f->fmt.vbi.samples_per_line *
679 			(f->fmt.vbi.count[0] + f->fmt.vbi.count[1]);
680 		break;
681 	case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
682 	case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
683 		requested_sizes[0] = f->fmt.sliced.io_size;
684 		break;
685 	case V4L2_BUF_TYPE_SDR_CAPTURE:
686 	case V4L2_BUF_TYPE_SDR_OUTPUT:
687 		requested_sizes[0] = f->fmt.sdr.buffersize;
688 		break;
689 	case V4L2_BUF_TYPE_META_CAPTURE:
690 		requested_sizes[0] = f->fmt.meta.buffersize;
691 		break;
692 	default:
693 		return -EINVAL;
694 	}
695 	for (i = 0; i < requested_planes; i++)
696 		if (requested_sizes[i] == 0)
697 			return -EINVAL;
698 	return ret ? ret : vb2_core_create_bufs(q, create->memory,
699 		&create->count, requested_planes, requested_sizes);
700 }
701 EXPORT_SYMBOL_GPL(vb2_create_bufs);
702 
703 int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
704 	     struct v4l2_buffer *b)
705 {
706 	struct media_request *req = NULL;
707 	int ret;
708 
709 	if (vb2_fileio_is_active(q)) {
710 		dprintk(1, "file io in progress\n");
711 		return -EBUSY;
712 	}
713 
714 	ret = vb2_queue_or_prepare_buf(q, mdev, b, "qbuf", &req);
715 	if (ret)
716 		return ret;
717 	ret = vb2_core_qbuf(q, b->index, b, req);
718 	if (req)
719 		media_request_put(req);
720 	return ret;
721 }
722 EXPORT_SYMBOL_GPL(vb2_qbuf);
723 
724 int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
725 {
726 	int ret;
727 
728 	if (vb2_fileio_is_active(q)) {
729 		dprintk(1, "file io in progress\n");
730 		return -EBUSY;
731 	}
732 
733 	if (b->type != q->type) {
734 		dprintk(1, "invalid buffer type\n");
735 		return -EINVAL;
736 	}
737 
738 	ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
739 
740 	/*
741 	 *  After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be
742 	 *  cleared.
743 	 */
744 	b->flags &= ~V4L2_BUF_FLAG_DONE;
745 
746 	return ret;
747 }
748 EXPORT_SYMBOL_GPL(vb2_dqbuf);
749 
750 int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
751 {
752 	if (vb2_fileio_is_active(q)) {
753 		dprintk(1, "file io in progress\n");
754 		return -EBUSY;
755 	}
756 	return vb2_core_streamon(q, type);
757 }
758 EXPORT_SYMBOL_GPL(vb2_streamon);
759 
760 int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
761 {
762 	if (vb2_fileio_is_active(q)) {
763 		dprintk(1, "file io in progress\n");
764 		return -EBUSY;
765 	}
766 	return vb2_core_streamoff(q, type);
767 }
768 EXPORT_SYMBOL_GPL(vb2_streamoff);
769 
770 int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
771 {
772 	return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index,
773 				eb->plane, eb->flags);
774 }
775 EXPORT_SYMBOL_GPL(vb2_expbuf);
776 
777 int vb2_queue_init(struct vb2_queue *q)
778 {
779 	/*
780 	 * Sanity check
781 	 */
782 	if (WARN_ON(!q)			  ||
783 	    WARN_ON(q->timestamp_flags &
784 		    ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
785 		      V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
786 		return -EINVAL;
787 
788 	/* Warn that the driver should choose an appropriate timestamp type */
789 	WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
790 		V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
791 
792 	/* Warn that vb2_memory should match with v4l2_memory */
793 	if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP)
794 		|| WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR)
795 		|| WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF))
796 		return -EINVAL;
797 
798 	if (q->buf_struct_size == 0)
799 		q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
800 
801 	q->buf_ops = &v4l2_buf_ops;
802 	q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
803 	q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
804 	q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
805 			== V4L2_BUF_FLAG_TIMESTAMP_COPY;
806 	/*
807 	 * For compatibility with vb1: if QBUF hasn't been called yet, then
808 	 * return EPOLLERR as well. This only affects capture queues, output
809 	 * queues will always initialize waiting_for_buffers to false.
810 	 */
811 	q->quirk_poll_must_check_waiting_for_buffers = true;
812 
813 	return vb2_core_queue_init(q);
814 }
815 EXPORT_SYMBOL_GPL(vb2_queue_init);
816 
817 void vb2_queue_release(struct vb2_queue *q)
818 {
819 	vb2_core_queue_release(q);
820 }
821 EXPORT_SYMBOL_GPL(vb2_queue_release);
822 
823 __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
824 {
825 	struct video_device *vfd = video_devdata(file);
826 	__poll_t req_events = poll_requested_events(wait);
827 	__poll_t res = 0;
828 
829 	if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
830 		struct v4l2_fh *fh = file->private_data;
831 
832 		if (v4l2_event_pending(fh))
833 			res = EPOLLPRI;
834 		else if (req_events & EPOLLPRI)
835 			poll_wait(file, &fh->wait, wait);
836 	}
837 
838 	return res | vb2_core_poll(q, file, wait);
839 }
840 EXPORT_SYMBOL_GPL(vb2_poll);
841 
842 /*
843  * The following functions are not part of the vb2 core API, but are helper
844  * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
845  * and struct vb2_ops.
846  * They contain boilerplate code that most if not all drivers have to do
847  * and so they simplify the driver code.
848  */
849 
850 /* The queue is busy if there is a owner and you are not that owner. */
851 static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
852 {
853 	return vdev->queue->owner && vdev->queue->owner != file->private_data;
854 }
855 
856 /* vb2 ioctl helpers */
857 
858 int vb2_ioctl_reqbufs(struct file *file, void *priv,
859 			  struct v4l2_requestbuffers *p)
860 {
861 	struct video_device *vdev = video_devdata(file);
862 	int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
863 
864 	if (res)
865 		return res;
866 	if (vb2_queue_is_busy(vdev, file))
867 		return -EBUSY;
868 	res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count);
869 	/* If count == 0, then the owner has released all buffers and he
870 	   is no longer owner of the queue. Otherwise we have a new owner. */
871 	if (res == 0)
872 		vdev->queue->owner = p->count ? file->private_data : NULL;
873 	return res;
874 }
875 EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
876 
877 int vb2_ioctl_create_bufs(struct file *file, void *priv,
878 			  struct v4l2_create_buffers *p)
879 {
880 	struct video_device *vdev = video_devdata(file);
881 	int res = vb2_verify_memory_type(vdev->queue, p->memory,
882 			p->format.type);
883 
884 	p->index = vdev->queue->num_buffers;
885 	/*
886 	 * If count == 0, then just check if memory and type are valid.
887 	 * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
888 	 */
889 	if (p->count == 0)
890 		return res != -EBUSY ? res : 0;
891 	if (res)
892 		return res;
893 	if (vb2_queue_is_busy(vdev, file))
894 		return -EBUSY;
895 
896 	res = vb2_create_bufs(vdev->queue, p);
897 	if (res == 0)
898 		vdev->queue->owner = file->private_data;
899 	return res;
900 }
901 EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
902 
903 int vb2_ioctl_prepare_buf(struct file *file, void *priv,
904 			  struct v4l2_buffer *p)
905 {
906 	struct video_device *vdev = video_devdata(file);
907 
908 	if (vb2_queue_is_busy(vdev, file))
909 		return -EBUSY;
910 	return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p);
911 }
912 EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
913 
914 int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
915 {
916 	struct video_device *vdev = video_devdata(file);
917 
918 	/* No need to call vb2_queue_is_busy(), anyone can query buffers. */
919 	return vb2_querybuf(vdev->queue, p);
920 }
921 EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
922 
923 int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
924 {
925 	struct video_device *vdev = video_devdata(file);
926 
927 	if (vb2_queue_is_busy(vdev, file))
928 		return -EBUSY;
929 	return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p);
930 }
931 EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
932 
933 int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
934 {
935 	struct video_device *vdev = video_devdata(file);
936 
937 	if (vb2_queue_is_busy(vdev, file))
938 		return -EBUSY;
939 	return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
940 }
941 EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
942 
943 int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
944 {
945 	struct video_device *vdev = video_devdata(file);
946 
947 	if (vb2_queue_is_busy(vdev, file))
948 		return -EBUSY;
949 	return vb2_streamon(vdev->queue, i);
950 }
951 EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
952 
953 int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
954 {
955 	struct video_device *vdev = video_devdata(file);
956 
957 	if (vb2_queue_is_busy(vdev, file))
958 		return -EBUSY;
959 	return vb2_streamoff(vdev->queue, i);
960 }
961 EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
962 
963 int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
964 {
965 	struct video_device *vdev = video_devdata(file);
966 
967 	if (vb2_queue_is_busy(vdev, file))
968 		return -EBUSY;
969 	return vb2_expbuf(vdev->queue, p);
970 }
971 EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
972 
973 /* v4l2_file_operations helpers */
974 
975 int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
976 {
977 	struct video_device *vdev = video_devdata(file);
978 
979 	return vb2_mmap(vdev->queue, vma);
980 }
981 EXPORT_SYMBOL_GPL(vb2_fop_mmap);
982 
983 int _vb2_fop_release(struct file *file, struct mutex *lock)
984 {
985 	struct video_device *vdev = video_devdata(file);
986 
987 	if (lock)
988 		mutex_lock(lock);
989 	if (file->private_data == vdev->queue->owner) {
990 		vb2_queue_release(vdev->queue);
991 		vdev->queue->owner = NULL;
992 	}
993 	if (lock)
994 		mutex_unlock(lock);
995 	return v4l2_fh_release(file);
996 }
997 EXPORT_SYMBOL_GPL(_vb2_fop_release);
998 
999 int vb2_fop_release(struct file *file)
1000 {
1001 	struct video_device *vdev = video_devdata(file);
1002 	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1003 
1004 	return _vb2_fop_release(file, lock);
1005 }
1006 EXPORT_SYMBOL_GPL(vb2_fop_release);
1007 
1008 ssize_t vb2_fop_write(struct file *file, const char __user *buf,
1009 		size_t count, loff_t *ppos)
1010 {
1011 	struct video_device *vdev = video_devdata(file);
1012 	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1013 	int err = -EBUSY;
1014 
1015 	if (!(vdev->queue->io_modes & VB2_WRITE))
1016 		return -EINVAL;
1017 	if (lock && mutex_lock_interruptible(lock))
1018 		return -ERESTARTSYS;
1019 	if (vb2_queue_is_busy(vdev, file))
1020 		goto exit;
1021 	err = vb2_write(vdev->queue, buf, count, ppos,
1022 		       file->f_flags & O_NONBLOCK);
1023 	if (vdev->queue->fileio)
1024 		vdev->queue->owner = file->private_data;
1025 exit:
1026 	if (lock)
1027 		mutex_unlock(lock);
1028 	return err;
1029 }
1030 EXPORT_SYMBOL_GPL(vb2_fop_write);
1031 
1032 ssize_t vb2_fop_read(struct file *file, char __user *buf,
1033 		size_t count, loff_t *ppos)
1034 {
1035 	struct video_device *vdev = video_devdata(file);
1036 	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1037 	int err = -EBUSY;
1038 
1039 	if (!(vdev->queue->io_modes & VB2_READ))
1040 		return -EINVAL;
1041 	if (lock && mutex_lock_interruptible(lock))
1042 		return -ERESTARTSYS;
1043 	if (vb2_queue_is_busy(vdev, file))
1044 		goto exit;
1045 	err = vb2_read(vdev->queue, buf, count, ppos,
1046 		       file->f_flags & O_NONBLOCK);
1047 	if (vdev->queue->fileio)
1048 		vdev->queue->owner = file->private_data;
1049 exit:
1050 	if (lock)
1051 		mutex_unlock(lock);
1052 	return err;
1053 }
1054 EXPORT_SYMBOL_GPL(vb2_fop_read);
1055 
1056 __poll_t vb2_fop_poll(struct file *file, poll_table *wait)
1057 {
1058 	struct video_device *vdev = video_devdata(file);
1059 	struct vb2_queue *q = vdev->queue;
1060 	struct mutex *lock = q->lock ? q->lock : vdev->lock;
1061 	__poll_t res;
1062 	void *fileio;
1063 
1064 	/*
1065 	 * If this helper doesn't know how to lock, then you shouldn't be using
1066 	 * it but you should write your own.
1067 	 */
1068 	WARN_ON(!lock);
1069 
1070 	if (lock && mutex_lock_interruptible(lock))
1071 		return EPOLLERR;
1072 
1073 	fileio = q->fileio;
1074 
1075 	res = vb2_poll(vdev->queue, file, wait);
1076 
1077 	/* If fileio was started, then we have a new queue owner. */
1078 	if (!fileio && q->fileio)
1079 		q->owner = file->private_data;
1080 	if (lock)
1081 		mutex_unlock(lock);
1082 	return res;
1083 }
1084 EXPORT_SYMBOL_GPL(vb2_fop_poll);
1085 
1086 #ifndef CONFIG_MMU
1087 unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
1088 		unsigned long len, unsigned long pgoff, unsigned long flags)
1089 {
1090 	struct video_device *vdev = video_devdata(file);
1091 
1092 	return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
1093 }
1094 EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
1095 #endif
1096 
1097 /* vb2_ops helpers. Only use if vq->lock is non-NULL. */
1098 
1099 void vb2_ops_wait_prepare(struct vb2_queue *vq)
1100 {
1101 	mutex_unlock(vq->lock);
1102 }
1103 EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
1104 
1105 void vb2_ops_wait_finish(struct vb2_queue *vq)
1106 {
1107 	mutex_lock(vq->lock);
1108 }
1109 EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
1110 
1111 /*
1112  * Note that this function is called during validation time and
1113  * thus the req_queue_mutex is held to ensure no request objects
1114  * can be added or deleted while validating. So there is no need
1115  * to protect the objects list.
1116  */
1117 int vb2_request_validate(struct media_request *req)
1118 {
1119 	struct media_request_object *obj;
1120 	int ret = 0;
1121 
1122 	if (!vb2_request_has_buffers(req))
1123 		return -ENOENT;
1124 
1125 	list_for_each_entry(obj, &req->objects, list) {
1126 		if (!obj->ops->prepare)
1127 			continue;
1128 
1129 		ret = obj->ops->prepare(obj);
1130 		if (ret)
1131 			break;
1132 	}
1133 
1134 	if (ret) {
1135 		list_for_each_entry_continue_reverse(obj, &req->objects, list)
1136 			if (obj->ops->unprepare)
1137 				obj->ops->unprepare(obj);
1138 		return ret;
1139 	}
1140 	return 0;
1141 }
1142 EXPORT_SYMBOL_GPL(vb2_request_validate);
1143 
1144 void vb2_request_queue(struct media_request *req)
1145 {
1146 	struct media_request_object *obj, *obj_safe;
1147 
1148 	/*
1149 	 * Queue all objects. Note that buffer objects are at the end of the
1150 	 * objects list, after all other object types. Once buffer objects
1151 	 * are queued, the driver might delete them immediately (if the driver
1152 	 * processes the buffer at once), so we have to use
1153 	 * list_for_each_entry_safe() to handle the case where the object we
1154 	 * queue is deleted.
1155 	 */
1156 	list_for_each_entry_safe(obj, obj_safe, &req->objects, list)
1157 		if (obj->ops->queue)
1158 			obj->ops->queue(obj);
1159 }
1160 EXPORT_SYMBOL_GPL(vb2_request_queue);
1161 
1162 MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
1163 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
1164 MODULE_LICENSE("GPL");
1165