xref: /linux/drivers/media/common/videobuf2/videobuf2-v4l2.c (revision 6fd600d742744dc7ef7fc65ca26daa2b1163158a)
1 /*
2  * videobuf2-v4l2.c - V4L2 driver helper framework
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *	   Marek Szyprowski <m.szyprowski@samsung.com>
8  *
9  * The vb2_thread implementation was based on code from videobuf-dvb.c:
10  *	(c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation.
15  */
16 
17 #include <linux/device.h>
18 #include <linux/err.h>
19 #include <linux/freezer.h>
20 #include <linux/kernel.h>
21 #include <linux/kthread.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/poll.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 
28 #include <media/v4l2-common.h>
29 #include <media/v4l2-dev.h>
30 #include <media/v4l2-device.h>
31 #include <media/v4l2-event.h>
32 #include <media/v4l2-fh.h>
33 
34 #include <media/videobuf2-v4l2.h>
35 
36 static int debug;
37 module_param(debug, int, 0644);
38 
39 #define dprintk(q, level, fmt, arg...)					      \
40 	do {								      \
41 		if (debug >= level)					      \
42 			pr_info("vb2-v4l2: [%p] %s: " fmt,		      \
43 				(q)->name, __func__, ## arg);		      \
44 	} while (0)
45 
46 /* Flags that are set by us */
47 #define V4L2_BUFFER_MASK_FLAGS	(V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
48 				 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
49 				 V4L2_BUF_FLAG_PREPARED | \
50 				 V4L2_BUF_FLAG_IN_REQUEST | \
51 				 V4L2_BUF_FLAG_REQUEST_FD | \
52 				 V4L2_BUF_FLAG_TIMESTAMP_MASK)
53 /* Output buffer flags that should be passed on to the driver */
54 #define V4L2_BUFFER_OUT_FLAGS	(V4L2_BUF_FLAG_PFRAME | \
55 				 V4L2_BUF_FLAG_BFRAME | \
56 				 V4L2_BUF_FLAG_KEYFRAME | \
57 				 V4L2_BUF_FLAG_TIMECODE | \
58 				 V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF)
59 
60 /*
61  * __verify_planes_array() - verify that the planes array passed in struct
62  * v4l2_buffer from userspace can be safely used
63  */
__verify_planes_array(struct vb2_buffer * vb,const struct v4l2_buffer * b)64 static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
65 {
66 	if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
67 		return 0;
68 
69 	/* Is memory for copying plane information present? */
70 	if (b->m.planes == NULL) {
71 		dprintk(vb->vb2_queue, 1,
72 			"multi-planar buffer passed but planes array not provided\n");
73 		return -EINVAL;
74 	}
75 
76 	if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
77 		dprintk(vb->vb2_queue, 1,
78 			"incorrect planes array length, expected %d, got %d\n",
79 			vb->num_planes, b->length);
80 		return -EINVAL;
81 	}
82 
83 	return 0;
84 }
85 
__verify_planes_array_core(struct vb2_buffer * vb,const void * pb)86 static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
87 {
88 	return __verify_planes_array(vb, pb);
89 }
90 
91 /*
92  * __verify_length() - Verify that the bytesused value for each plane fits in
93  * the plane length and that the data offset doesn't exceed the bytesused value.
94  */
__verify_length(struct vb2_buffer * vb,const struct v4l2_buffer * b)95 static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
96 {
97 	unsigned int length;
98 	unsigned int bytesused;
99 	unsigned int plane;
100 
101 	if (V4L2_TYPE_IS_CAPTURE(b->type))
102 		return 0;
103 
104 	if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
105 		for (plane = 0; plane < vb->num_planes; ++plane) {
106 			length = (b->memory == VB2_MEMORY_USERPTR ||
107 				  b->memory == VB2_MEMORY_DMABUF)
108 			       ? b->m.planes[plane].length
109 				: vb->planes[plane].length;
110 			bytesused = b->m.planes[plane].bytesused
111 				  ? b->m.planes[plane].bytesused : length;
112 
113 			if (b->m.planes[plane].bytesused > length)
114 				return -EINVAL;
115 
116 			if (b->m.planes[plane].data_offset > 0 &&
117 			    b->m.planes[plane].data_offset >= bytesused)
118 				return -EINVAL;
119 		}
120 	} else {
121 		length = (b->memory == VB2_MEMORY_USERPTR)
122 			? b->length : vb->planes[0].length;
123 
124 		if (b->bytesused > length)
125 			return -EINVAL;
126 	}
127 
128 	return 0;
129 }
130 
131 /*
132  * __init_vb2_v4l2_buffer() - initialize the vb2_v4l2_buffer struct
133  */
__init_vb2_v4l2_buffer(struct vb2_buffer * vb)134 static void __init_vb2_v4l2_buffer(struct vb2_buffer *vb)
135 {
136 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
137 
138 	vbuf->request_fd = -1;
139 }
140 
__copy_timestamp(struct vb2_buffer * vb,const void * pb)141 static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
142 {
143 	const struct v4l2_buffer *b = pb;
144 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
145 	struct vb2_queue *q = vb->vb2_queue;
146 
147 	if (q->is_output) {
148 		/*
149 		 * For output buffers copy the timestamp if needed,
150 		 * and the timecode field and flag if needed.
151 		 */
152 		if (q->copy_timestamp)
153 			vb->timestamp = v4l2_buffer_get_timestamp(b);
154 		vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
155 		if (b->flags & V4L2_BUF_FLAG_TIMECODE)
156 			vbuf->timecode = b->timecode;
157 	}
158 };
159 
vb2_warn_zero_bytesused(struct vb2_buffer * vb)160 static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
161 {
162 	static bool check_once;
163 
164 	if (check_once)
165 		return;
166 
167 	check_once = true;
168 
169 	pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
170 	if (vb->vb2_queue->allow_zero_bytesused)
171 		pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
172 	else
173 		pr_warn("use the actual size instead.\n");
174 }
175 
vb2_fill_vb2_v4l2_buffer(struct vb2_buffer * vb,struct v4l2_buffer * b)176 static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
177 {
178 	struct vb2_queue *q = vb->vb2_queue;
179 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
180 	struct vb2_plane *planes = vbuf->planes;
181 	unsigned int plane;
182 	int ret;
183 
184 	ret = __verify_length(vb, b);
185 	if (ret < 0) {
186 		dprintk(q, 1, "plane parameters verification failed: %d\n", ret);
187 		return ret;
188 	}
189 	if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
190 		/*
191 		 * If the format's field is ALTERNATE, then the buffer's field
192 		 * should be either TOP or BOTTOM, not ALTERNATE since that
193 		 * makes no sense. The driver has to know whether the
194 		 * buffer represents a top or a bottom field in order to
195 		 * program any DMA correctly. Using ALTERNATE is wrong, since
196 		 * that just says that it is either a top or a bottom field,
197 		 * but not which of the two it is.
198 		 */
199 		dprintk(q, 1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
200 		return -EINVAL;
201 	}
202 	vbuf->sequence = 0;
203 	vbuf->request_fd = -1;
204 	vbuf->is_held = false;
205 
206 	if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
207 		switch (b->memory) {
208 		case VB2_MEMORY_USERPTR:
209 			for (plane = 0; plane < vb->num_planes; ++plane) {
210 				planes[plane].m.userptr =
211 					b->m.planes[plane].m.userptr;
212 				planes[plane].length =
213 					b->m.planes[plane].length;
214 			}
215 			break;
216 		case VB2_MEMORY_DMABUF:
217 			for (plane = 0; plane < vb->num_planes; ++plane) {
218 				planes[plane].m.fd =
219 					b->m.planes[plane].m.fd;
220 				planes[plane].length =
221 					b->m.planes[plane].length;
222 			}
223 			break;
224 		default:
225 			for (plane = 0; plane < vb->num_planes; ++plane) {
226 				planes[plane].m.offset =
227 					vb->planes[plane].m.offset;
228 				planes[plane].length =
229 					vb->planes[plane].length;
230 			}
231 			break;
232 		}
233 
234 		/* Fill in driver-provided information for OUTPUT types */
235 		if (V4L2_TYPE_IS_OUTPUT(b->type)) {
236 			/*
237 			 * Will have to go up to b->length when API starts
238 			 * accepting variable number of planes.
239 			 *
240 			 * If bytesused == 0 for the output buffer, then fall
241 			 * back to the full buffer size. In that case
242 			 * userspace clearly never bothered to set it and
243 			 * it's a safe assumption that they really meant to
244 			 * use the full plane sizes.
245 			 *
246 			 * Some drivers, e.g. old codec drivers, use bytesused == 0
247 			 * as a way to indicate that streaming is finished.
248 			 * In that case, the driver should use the
249 			 * allow_zero_bytesused flag to keep old userspace
250 			 * applications working.
251 			 */
252 			for (plane = 0; plane < vb->num_planes; ++plane) {
253 				struct vb2_plane *pdst = &planes[plane];
254 				struct v4l2_plane *psrc = &b->m.planes[plane];
255 
256 				if (psrc->bytesused == 0)
257 					vb2_warn_zero_bytesused(vb);
258 
259 				if (vb->vb2_queue->allow_zero_bytesused)
260 					pdst->bytesused = psrc->bytesused;
261 				else
262 					pdst->bytesused = psrc->bytesused ?
263 						psrc->bytesused : pdst->length;
264 				pdst->data_offset = psrc->data_offset;
265 			}
266 		}
267 	} else {
268 		/*
269 		 * Single-planar buffers do not use planes array,
270 		 * so fill in relevant v4l2_buffer struct fields instead.
271 		 * In vb2 we use our internal V4l2_planes struct for
272 		 * single-planar buffers as well, for simplicity.
273 		 *
274 		 * If bytesused == 0 for the output buffer, then fall back
275 		 * to the full buffer size as that's a sensible default.
276 		 *
277 		 * Some drivers, e.g. old codec drivers, use bytesused == 0 as
278 		 * a way to indicate that streaming is finished. In that case,
279 		 * the driver should use the allow_zero_bytesused flag to keep
280 		 * old userspace applications working.
281 		 */
282 		switch (b->memory) {
283 		case VB2_MEMORY_USERPTR:
284 			planes[0].m.userptr = b->m.userptr;
285 			planes[0].length = b->length;
286 			break;
287 		case VB2_MEMORY_DMABUF:
288 			planes[0].m.fd = b->m.fd;
289 			planes[0].length = b->length;
290 			break;
291 		default:
292 			planes[0].m.offset = vb->planes[0].m.offset;
293 			planes[0].length = vb->planes[0].length;
294 			break;
295 		}
296 
297 		planes[0].data_offset = 0;
298 		if (V4L2_TYPE_IS_OUTPUT(b->type)) {
299 			if (b->bytesused == 0)
300 				vb2_warn_zero_bytesused(vb);
301 
302 			if (vb->vb2_queue->allow_zero_bytesused)
303 				planes[0].bytesused = b->bytesused;
304 			else
305 				planes[0].bytesused = b->bytesused ?
306 					b->bytesused : planes[0].length;
307 		} else
308 			planes[0].bytesused = 0;
309 
310 	}
311 
312 	/* Zero flags that we handle */
313 	vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
314 	if (!vb->vb2_queue->copy_timestamp || V4L2_TYPE_IS_CAPTURE(b->type)) {
315 		/*
316 		 * Non-COPY timestamps and non-OUTPUT queues will get
317 		 * their timestamp and timestamp source flags from the
318 		 * queue.
319 		 */
320 		vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
321 	}
322 
323 	if (V4L2_TYPE_IS_OUTPUT(b->type)) {
324 		/*
325 		 * For output buffers mask out the timecode flag:
326 		 * this will be handled later in vb2_qbuf().
327 		 * The 'field' is valid metadata for this output buffer
328 		 * and so that needs to be copied here.
329 		 */
330 		vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
331 		vbuf->field = b->field;
332 		if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
333 			vbuf->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
334 	} else {
335 		/* Zero any output buffer flags as this is a capture buffer */
336 		vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
337 		/* Zero last flag, this is a signal from driver to userspace */
338 		vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
339 	}
340 
341 	return 0;
342 }
343 
set_buffer_cache_hints(struct vb2_queue * q,struct vb2_buffer * vb,struct v4l2_buffer * b)344 static void set_buffer_cache_hints(struct vb2_queue *q,
345 				   struct vb2_buffer *vb,
346 				   struct v4l2_buffer *b)
347 {
348 	if (!vb2_queue_allows_cache_hints(q)) {
349 		/*
350 		 * Clear buffer cache flags if queue does not support user
351 		 * space hints. That's to indicate to userspace that these
352 		 * flags won't work.
353 		 */
354 		b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_INVALIDATE;
355 		b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_CLEAN;
356 		return;
357 	}
358 
359 	if (b->flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE)
360 		vb->skip_cache_sync_on_finish = 1;
361 
362 	if (b->flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN)
363 		vb->skip_cache_sync_on_prepare = 1;
364 }
365 
vb2_queue_or_prepare_buf(struct vb2_queue * q,struct media_device * mdev,struct vb2_buffer * vb,struct v4l2_buffer * b,bool is_prepare,struct media_request ** p_req)366 static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
367 				    struct vb2_buffer *vb, struct v4l2_buffer *b,
368 				    bool is_prepare, struct media_request **p_req)
369 {
370 	const char *opname = is_prepare ? "prepare_buf" : "qbuf";
371 	struct media_request *req;
372 	struct vb2_v4l2_buffer *vbuf;
373 	int ret;
374 
375 	if (b->type != q->type) {
376 		dprintk(q, 1, "%s: invalid buffer type\n", opname);
377 		return -EINVAL;
378 	}
379 
380 	if (b->memory != q->memory) {
381 		dprintk(q, 1, "%s: invalid memory type\n", opname);
382 		return -EINVAL;
383 	}
384 
385 	vbuf = to_vb2_v4l2_buffer(vb);
386 	ret = __verify_planes_array(vb, b);
387 	if (ret)
388 		return ret;
389 
390 	if (!is_prepare && (b->flags & V4L2_BUF_FLAG_REQUEST_FD) &&
391 	    vb->state != VB2_BUF_STATE_DEQUEUED) {
392 		dprintk(q, 1, "%s: buffer is not in dequeued state\n", opname);
393 		return -EINVAL;
394 	}
395 
396 	if (!vb->prepared) {
397 		set_buffer_cache_hints(q, vb, b);
398 		/* Copy relevant information provided by the userspace */
399 		memset(vbuf->planes, 0,
400 		       sizeof(vbuf->planes[0]) * vb->num_planes);
401 		ret = vb2_fill_vb2_v4l2_buffer(vb, b);
402 		if (ret)
403 			return ret;
404 	}
405 
406 	if (is_prepare)
407 		return 0;
408 
409 	if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
410 		if (q->requires_requests) {
411 			dprintk(q, 1, "%s: queue requires requests\n", opname);
412 			return -EBADR;
413 		}
414 		if (q->uses_requests) {
415 			dprintk(q, 1, "%s: queue uses requests\n", opname);
416 			return -EBUSY;
417 		}
418 		return 0;
419 	} else if (!q->supports_requests) {
420 		dprintk(q, 1, "%s: queue does not support requests\n", opname);
421 		return -EBADR;
422 	} else if (q->uses_qbuf) {
423 		dprintk(q, 1, "%s: queue does not use requests\n", opname);
424 		return -EBUSY;
425 	}
426 
427 	/*
428 	 * For proper locking when queueing a request you need to be able
429 	 * to lock access to the vb2 queue, so check that there is a lock
430 	 * that we can use. In addition p_req must be non-NULL.
431 	 */
432 	if (WARN_ON(!q->lock || !p_req))
433 		return -EINVAL;
434 
435 	/*
436 	 * Make sure this op is implemented by the driver. It's easy to forget
437 	 * this callback, but is it important when canceling a buffer in a
438 	 * queued request.
439 	 */
440 	if (WARN_ON(!q->ops->buf_request_complete))
441 		return -EINVAL;
442 	/*
443 	 * Make sure this op is implemented by the driver for the output queue.
444 	 * It's easy to forget this callback, but is it important to correctly
445 	 * validate the 'field' value at QBUF time.
446 	 */
447 	if (WARN_ON((q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
448 		     q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
449 		    !q->ops->buf_out_validate))
450 		return -EINVAL;
451 
452 	req = media_request_get_by_fd(mdev, b->request_fd);
453 	if (IS_ERR(req)) {
454 		dprintk(q, 1, "%s: invalid request_fd\n", opname);
455 		return PTR_ERR(req);
456 	}
457 
458 	/*
459 	 * Early sanity check. This is checked again when the buffer
460 	 * is bound to the request in vb2_core_qbuf().
461 	 */
462 	if (req->state != MEDIA_REQUEST_STATE_IDLE &&
463 	    req->state != MEDIA_REQUEST_STATE_UPDATING) {
464 		dprintk(q, 1, "%s: request is not idle\n", opname);
465 		media_request_put(req);
466 		return -EBUSY;
467 	}
468 
469 	*p_req = req;
470 	vbuf->request_fd = b->request_fd;
471 
472 	return 0;
473 }
474 
475 /*
476  * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
477  * returned to userspace
478  */
__fill_v4l2_buffer(struct vb2_buffer * vb,void * pb)479 static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
480 {
481 	struct v4l2_buffer *b = pb;
482 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
483 	struct vb2_queue *q = vb->vb2_queue;
484 	unsigned int plane;
485 
486 	/* Copy back data such as timestamp, flags, etc. */
487 	b->index = vb->index;
488 	b->type = vb->type;
489 	b->memory = vb->memory;
490 	b->bytesused = 0;
491 
492 	b->flags = vbuf->flags;
493 	b->field = vbuf->field;
494 	v4l2_buffer_set_timestamp(b, vb->timestamp);
495 	b->timecode = vbuf->timecode;
496 	b->sequence = vbuf->sequence;
497 	b->reserved2 = 0;
498 	b->request_fd = 0;
499 
500 	if (q->is_multiplanar) {
501 		/*
502 		 * Fill in plane-related data if userspace provided an array
503 		 * for it. The caller has already verified memory and size.
504 		 */
505 		b->length = vb->num_planes;
506 		for (plane = 0; plane < vb->num_planes; ++plane) {
507 			struct v4l2_plane *pdst = &b->m.planes[plane];
508 			struct vb2_plane *psrc = &vb->planes[plane];
509 
510 			pdst->bytesused = psrc->bytesused;
511 			pdst->length = psrc->length;
512 			if (q->memory == VB2_MEMORY_MMAP)
513 				pdst->m.mem_offset = psrc->m.offset;
514 			else if (q->memory == VB2_MEMORY_USERPTR)
515 				pdst->m.userptr = psrc->m.userptr;
516 			else if (q->memory == VB2_MEMORY_DMABUF)
517 				pdst->m.fd = psrc->m.fd;
518 			pdst->data_offset = psrc->data_offset;
519 			memset(pdst->reserved, 0, sizeof(pdst->reserved));
520 		}
521 	} else {
522 		/*
523 		 * We use length and offset in v4l2_planes array even for
524 		 * single-planar buffers, but userspace does not.
525 		 */
526 		b->length = vb->planes[0].length;
527 		b->bytesused = vb->planes[0].bytesused;
528 		if (q->memory == VB2_MEMORY_MMAP)
529 			b->m.offset = vb->planes[0].m.offset;
530 		else if (q->memory == VB2_MEMORY_USERPTR)
531 			b->m.userptr = vb->planes[0].m.userptr;
532 		else if (q->memory == VB2_MEMORY_DMABUF)
533 			b->m.fd = vb->planes[0].m.fd;
534 	}
535 
536 	/*
537 	 * Clear any buffer state related flags.
538 	 */
539 	b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
540 	b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
541 	if (!q->copy_timestamp) {
542 		/*
543 		 * For non-COPY timestamps, drop timestamp source bits
544 		 * and obtain the timestamp source from the queue.
545 		 */
546 		b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
547 		b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
548 	}
549 
550 	switch (vb->state) {
551 	case VB2_BUF_STATE_QUEUED:
552 	case VB2_BUF_STATE_ACTIVE:
553 		b->flags |= V4L2_BUF_FLAG_QUEUED;
554 		break;
555 	case VB2_BUF_STATE_IN_REQUEST:
556 		b->flags |= V4L2_BUF_FLAG_IN_REQUEST;
557 		break;
558 	case VB2_BUF_STATE_ERROR:
559 		b->flags |= V4L2_BUF_FLAG_ERROR;
560 		fallthrough;
561 	case VB2_BUF_STATE_DONE:
562 		b->flags |= V4L2_BUF_FLAG_DONE;
563 		break;
564 	case VB2_BUF_STATE_PREPARING:
565 	case VB2_BUF_STATE_DEQUEUED:
566 		/* nothing */
567 		break;
568 	}
569 
570 	if ((vb->state == VB2_BUF_STATE_DEQUEUED ||
571 	     vb->state == VB2_BUF_STATE_IN_REQUEST) &&
572 	    vb->synced && vb->prepared)
573 		b->flags |= V4L2_BUF_FLAG_PREPARED;
574 
575 	if (vb2_buffer_in_use(q, vb))
576 		b->flags |= V4L2_BUF_FLAG_MAPPED;
577 	if (vbuf->request_fd >= 0) {
578 		b->flags |= V4L2_BUF_FLAG_REQUEST_FD;
579 		b->request_fd = vbuf->request_fd;
580 	}
581 }
582 
583 /*
584  * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
585  * v4l2_buffer by the userspace. It also verifies that struct
586  * v4l2_buffer has a valid number of planes.
587  */
__fill_vb2_buffer(struct vb2_buffer * vb,struct vb2_plane * planes)588 static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
589 {
590 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
591 	unsigned int plane;
592 
593 	if (!vb->vb2_queue->copy_timestamp)
594 		vb->timestamp = 0;
595 
596 	for (plane = 0; plane < vb->num_planes; ++plane) {
597 		if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) {
598 			planes[plane].m = vbuf->planes[plane].m;
599 			planes[plane].length = vbuf->planes[plane].length;
600 		}
601 		planes[plane].bytesused = vbuf->planes[plane].bytesused;
602 		planes[plane].data_offset = vbuf->planes[plane].data_offset;
603 	}
604 	return 0;
605 }
606 
607 static const struct vb2_buf_ops v4l2_buf_ops = {
608 	.verify_planes_array	= __verify_planes_array_core,
609 	.init_buffer		= __init_vb2_v4l2_buffer,
610 	.fill_user_buffer	= __fill_v4l2_buffer,
611 	.fill_vb2_buffer	= __fill_vb2_buffer,
612 	.copy_timestamp		= __copy_timestamp,
613 };
614 
vb2_find_buffer(struct vb2_queue * q,u64 timestamp)615 struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp)
616 {
617 	unsigned int i;
618 	struct vb2_buffer *vb2;
619 
620 	/*
621 	 * This loop doesn't scale if there is a really large number of buffers.
622 	 * Maybe something more efficient will be needed in this case.
623 	 */
624 	for (i = 0; i < q->max_num_buffers; i++) {
625 		vb2 = vb2_get_buffer(q, i);
626 
627 		if (!vb2)
628 			continue;
629 
630 		if (vb2->copied_timestamp &&
631 		    vb2->timestamp == timestamp)
632 			return vb2;
633 	}
634 	return NULL;
635 }
636 EXPORT_SYMBOL_GPL(vb2_find_buffer);
637 
638 /*
639  * vb2_querybuf() - query video buffer information
640  * @q:		vb2 queue
641  * @b:		buffer struct passed from userspace to vidioc_querybuf handler
642  *		in driver
643  *
644  * Should be called from vidioc_querybuf ioctl handler in driver.
645  * This function will verify the passed v4l2_buffer structure and fill the
646  * relevant information for the userspace.
647  *
648  * The return values from this function are intended to be directly returned
649  * from vidioc_querybuf handler in driver.
650  */
vb2_querybuf(struct vb2_queue * q,struct v4l2_buffer * b)651 int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
652 {
653 	struct vb2_buffer *vb;
654 	int ret;
655 
656 	if (b->type != q->type) {
657 		dprintk(q, 1, "wrong buffer type\n");
658 		return -EINVAL;
659 	}
660 
661 	vb = vb2_get_buffer(q, b->index);
662 	if (!vb) {
663 		dprintk(q, 1, "can't find the requested buffer %u\n", b->index);
664 		return -EINVAL;
665 	}
666 
667 	ret = __verify_planes_array(vb, b);
668 	if (!ret)
669 		vb2_core_querybuf(q, vb, b);
670 	return ret;
671 }
672 EXPORT_SYMBOL(vb2_querybuf);
673 
vb2_set_flags_and_caps(struct vb2_queue * q,u32 memory,u32 * flags,u32 * caps,u32 * max_num_bufs)674 static void vb2_set_flags_and_caps(struct vb2_queue *q, u32 memory,
675 				   u32 *flags, u32 *caps, u32 *max_num_bufs)
676 {
677 	if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
678 		/*
679 		 * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only,
680 		 * but in order to avoid bugs we zero out all bits.
681 		 */
682 		*flags = 0;
683 	} else {
684 		/* Clear all unknown flags. */
685 		*flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
686 	}
687 
688 	*caps |= V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
689 	if (q->io_modes & VB2_MMAP)
690 		*caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
691 	if (q->io_modes & VB2_USERPTR)
692 		*caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
693 	if (q->io_modes & VB2_DMABUF)
694 		*caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
695 	if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)
696 		*caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
697 	if (q->allow_cache_hints && q->io_modes & VB2_MMAP)
698 		*caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS;
699 	if (q->supports_requests)
700 		*caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
701 	if (max_num_bufs) {
702 		*max_num_bufs = q->max_num_buffers;
703 		*caps |= V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS;
704 	}
705 }
706 
vb2_reqbufs(struct vb2_queue * q,struct v4l2_requestbuffers * req)707 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
708 {
709 	int ret = vb2_verify_memory_type(q, req->memory, req->type);
710 	u32 flags = req->flags;
711 
712 	vb2_set_flags_and_caps(q, req->memory, &flags,
713 			       &req->capabilities, NULL);
714 	req->flags = flags;
715 	return ret ? ret : vb2_core_reqbufs(q, req->memory,
716 					    req->flags, &req->count);
717 }
718 EXPORT_SYMBOL_GPL(vb2_reqbufs);
719 
vb2_prepare_buf(struct vb2_queue * q,struct media_device * mdev,struct v4l2_buffer * b)720 int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
721 		    struct v4l2_buffer *b)
722 {
723 	struct vb2_buffer *vb;
724 	int ret;
725 
726 	if (vb2_fileio_is_active(q)) {
727 		dprintk(q, 1, "file io in progress\n");
728 		return -EBUSY;
729 	}
730 
731 	if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
732 		return -EINVAL;
733 
734 	vb = vb2_get_buffer(q, b->index);
735 	if (!vb) {
736 		dprintk(q, 1, "can't find the requested buffer %u\n", b->index);
737 		return -EINVAL;
738 	}
739 
740 	ret = vb2_queue_or_prepare_buf(q, mdev, vb, b, true, NULL);
741 
742 	return ret ? ret : vb2_core_prepare_buf(q, vb, b);
743 }
744 EXPORT_SYMBOL_GPL(vb2_prepare_buf);
745 
vb2_create_bufs(struct vb2_queue * q,struct v4l2_create_buffers * create)746 int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
747 {
748 	unsigned requested_planes = 1;
749 	unsigned requested_sizes[VIDEO_MAX_PLANES];
750 	struct v4l2_format *f = &create->format;
751 	int ret = vb2_verify_memory_type(q, create->memory, f->type);
752 	unsigned i;
753 
754 	create->index = vb2_get_num_buffers(q);
755 	vb2_set_flags_and_caps(q, create->memory, &create->flags,
756 			       &create->capabilities, &create->max_num_buffers);
757 	if (create->count == 0)
758 		return ret != -EBUSY ? ret : 0;
759 
760 	switch (f->type) {
761 	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
762 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
763 		requested_planes = f->fmt.pix_mp.num_planes;
764 		if (requested_planes == 0 ||
765 		    requested_planes > VIDEO_MAX_PLANES)
766 			return -EINVAL;
767 		for (i = 0; i < requested_planes; i++)
768 			requested_sizes[i] =
769 				f->fmt.pix_mp.plane_fmt[i].sizeimage;
770 		break;
771 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
772 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
773 		requested_sizes[0] = f->fmt.pix.sizeimage;
774 		break;
775 	case V4L2_BUF_TYPE_VBI_CAPTURE:
776 	case V4L2_BUF_TYPE_VBI_OUTPUT:
777 		requested_sizes[0] = f->fmt.vbi.samples_per_line *
778 			(f->fmt.vbi.count[0] + f->fmt.vbi.count[1]);
779 		break;
780 	case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
781 	case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
782 		requested_sizes[0] = f->fmt.sliced.io_size;
783 		break;
784 	case V4L2_BUF_TYPE_SDR_CAPTURE:
785 	case V4L2_BUF_TYPE_SDR_OUTPUT:
786 		requested_sizes[0] = f->fmt.sdr.buffersize;
787 		break;
788 	case V4L2_BUF_TYPE_META_CAPTURE:
789 	case V4L2_BUF_TYPE_META_OUTPUT:
790 		requested_sizes[0] = f->fmt.meta.buffersize;
791 		break;
792 	default:
793 		return -EINVAL;
794 	}
795 	for (i = 0; i < requested_planes; i++)
796 		if (requested_sizes[i] == 0)
797 			return -EINVAL;
798 	if (ret)
799 		return ret;
800 
801 	return vb2_core_create_bufs(q, create->memory,
802 				    create->flags,
803 				    &create->count,
804 				    requested_planes,
805 				    requested_sizes,
806 				    &create->index);
807 }
808 EXPORT_SYMBOL_GPL(vb2_create_bufs);
809 
vb2_qbuf(struct vb2_queue * q,struct media_device * mdev,struct v4l2_buffer * b)810 int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
811 	     struct v4l2_buffer *b)
812 {
813 	struct media_request *req = NULL;
814 	struct vb2_buffer *vb;
815 	int ret;
816 
817 	if (vb2_fileio_is_active(q)) {
818 		dprintk(q, 1, "file io in progress\n");
819 		return -EBUSY;
820 	}
821 
822 	vb = vb2_get_buffer(q, b->index);
823 	if (!vb) {
824 		dprintk(q, 1, "can't find the requested buffer %u\n", b->index);
825 		return -EINVAL;
826 	}
827 
828 	ret = vb2_queue_or_prepare_buf(q, mdev, vb, b, false, &req);
829 	if (ret)
830 		return ret;
831 	ret = vb2_core_qbuf(q, vb, b, req);
832 	if (req)
833 		media_request_put(req);
834 	return ret;
835 }
836 EXPORT_SYMBOL_GPL(vb2_qbuf);
837 
vb2_dqbuf(struct vb2_queue * q,struct v4l2_buffer * b,bool nonblocking)838 int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
839 {
840 	int ret;
841 
842 	if (vb2_fileio_is_active(q)) {
843 		dprintk(q, 1, "file io in progress\n");
844 		return -EBUSY;
845 	}
846 
847 	if (b->type != q->type) {
848 		dprintk(q, 1, "invalid buffer type\n");
849 		return -EINVAL;
850 	}
851 
852 	ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
853 
854 	if (!q->is_output &&
855 	    b->flags & V4L2_BUF_FLAG_DONE &&
856 	    b->flags & V4L2_BUF_FLAG_LAST)
857 		q->last_buffer_dequeued = true;
858 
859 	/*
860 	 *  After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be
861 	 *  cleared.
862 	 */
863 	b->flags &= ~V4L2_BUF_FLAG_DONE;
864 
865 	return ret;
866 }
867 EXPORT_SYMBOL_GPL(vb2_dqbuf);
868 
vb2_streamon(struct vb2_queue * q,enum v4l2_buf_type type)869 int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
870 {
871 	if (vb2_fileio_is_active(q)) {
872 		dprintk(q, 1, "file io in progress\n");
873 		return -EBUSY;
874 	}
875 	return vb2_core_streamon(q, type);
876 }
877 EXPORT_SYMBOL_GPL(vb2_streamon);
878 
vb2_streamoff(struct vb2_queue * q,enum v4l2_buf_type type)879 int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
880 {
881 	if (vb2_fileio_is_active(q)) {
882 		dprintk(q, 1, "file io in progress\n");
883 		return -EBUSY;
884 	}
885 	return vb2_core_streamoff(q, type);
886 }
887 EXPORT_SYMBOL_GPL(vb2_streamoff);
888 
vb2_expbuf(struct vb2_queue * q,struct v4l2_exportbuffer * eb)889 int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
890 {
891 	struct vb2_buffer *vb;
892 
893 	vb = vb2_get_buffer(q, eb->index);
894 	if (!vb) {
895 		dprintk(q, 1, "can't find the requested buffer %u\n", eb->index);
896 		return -EINVAL;
897 	}
898 
899 	return vb2_core_expbuf(q, &eb->fd, eb->type, vb,
900 				eb->plane, eb->flags);
901 }
902 EXPORT_SYMBOL_GPL(vb2_expbuf);
903 
vb2_queue_init_name(struct vb2_queue * q,const char * name)904 int vb2_queue_init_name(struct vb2_queue *q, const char *name)
905 {
906 	/*
907 	 * Sanity check
908 	 */
909 	if (WARN_ON(!q)			  ||
910 	    WARN_ON(q->timestamp_flags &
911 		    ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
912 		      V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
913 		return -EINVAL;
914 
915 	/* Warn that the driver should choose an appropriate timestamp type */
916 	WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
917 		V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
918 
919 	/* Warn that vb2_memory should match with v4l2_memory */
920 	if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP)
921 		|| WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR)
922 		|| WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF))
923 		return -EINVAL;
924 
925 	if (q->buf_struct_size == 0)
926 		q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
927 
928 	q->buf_ops = &v4l2_buf_ops;
929 	q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
930 	q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
931 	q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
932 			== V4L2_BUF_FLAG_TIMESTAMP_COPY;
933 	/*
934 	 * For compatibility with vb1: if QBUF hasn't been called yet, then
935 	 * return EPOLLERR as well. This only affects capture queues, output
936 	 * queues will always initialize waiting_for_buffers to false.
937 	 */
938 	q->quirk_poll_must_check_waiting_for_buffers = true;
939 
940 	if (name)
941 		strscpy(q->name, name, sizeof(q->name));
942 	else
943 		q->name[0] = '\0';
944 
945 	return vb2_core_queue_init(q);
946 }
947 EXPORT_SYMBOL_GPL(vb2_queue_init_name);
948 
vb2_queue_init(struct vb2_queue * q)949 int vb2_queue_init(struct vb2_queue *q)
950 {
951 	return vb2_queue_init_name(q, NULL);
952 }
953 EXPORT_SYMBOL_GPL(vb2_queue_init);
954 
vb2_queue_release(struct vb2_queue * q)955 void vb2_queue_release(struct vb2_queue *q)
956 {
957 	vb2_core_queue_release(q);
958 }
959 EXPORT_SYMBOL_GPL(vb2_queue_release);
960 
vb2_queue_change_type(struct vb2_queue * q,unsigned int type)961 int vb2_queue_change_type(struct vb2_queue *q, unsigned int type)
962 {
963 	if (type == q->type)
964 		return 0;
965 
966 	if (vb2_is_busy(q))
967 		return -EBUSY;
968 
969 	q->type = type;
970 
971 	return 0;
972 }
973 EXPORT_SYMBOL_GPL(vb2_queue_change_type);
974 
vb2_poll(struct vb2_queue * q,struct file * file,poll_table * wait)975 __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
976 {
977 	struct video_device *vfd = video_devdata(file);
978 	__poll_t res;
979 
980 	res = vb2_core_poll(q, file, wait);
981 
982 	if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
983 		struct v4l2_fh *fh = file->private_data;
984 
985 		poll_wait(file, &fh->wait, wait);
986 		if (v4l2_event_pending(fh))
987 			res |= EPOLLPRI;
988 	}
989 
990 	return res;
991 }
992 EXPORT_SYMBOL_GPL(vb2_poll);
993 
994 /*
995  * The following functions are not part of the vb2 core API, but are helper
996  * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
997  * and struct vb2_ops.
998  * They contain boilerplate code that most if not all drivers have to do
999  * and so they simplify the driver code.
1000  */
1001 
1002 /* vb2 ioctl helpers */
1003 
vb2_ioctl_remove_bufs(struct file * file,void * priv,struct v4l2_remove_buffers * d)1004 int vb2_ioctl_remove_bufs(struct file *file, void *priv,
1005 			  struct v4l2_remove_buffers *d)
1006 {
1007 	struct video_device *vdev = video_devdata(file);
1008 
1009 	if (vdev->queue->type != d->type)
1010 		return -EINVAL;
1011 
1012 	if (d->count == 0)
1013 		return 0;
1014 
1015 	if (vb2_queue_is_busy(vdev->queue, file))
1016 		return -EBUSY;
1017 
1018 	return vb2_core_remove_bufs(vdev->queue, d->index, d->count);
1019 }
1020 EXPORT_SYMBOL_GPL(vb2_ioctl_remove_bufs);
1021 
vb2_ioctl_reqbufs(struct file * file,void * priv,struct v4l2_requestbuffers * p)1022 int vb2_ioctl_reqbufs(struct file *file, void *priv,
1023 			  struct v4l2_requestbuffers *p)
1024 {
1025 	struct video_device *vdev = video_devdata(file);
1026 	int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
1027 	u32 flags = p->flags;
1028 
1029 	vb2_set_flags_and_caps(vdev->queue, p->memory, &flags,
1030 			       &p->capabilities, NULL);
1031 	p->flags = flags;
1032 	if (res)
1033 		return res;
1034 	if (vb2_queue_is_busy(vdev->queue, file))
1035 		return -EBUSY;
1036 	res = vb2_core_reqbufs(vdev->queue, p->memory, p->flags, &p->count);
1037 	/* If count == 0, then the owner has released all buffers and he
1038 	   is no longer owner of the queue. Otherwise we have a new owner. */
1039 	if (res == 0)
1040 		vdev->queue->owner = p->count ? file->private_data : NULL;
1041 	return res;
1042 }
1043 EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
1044 
vb2_ioctl_create_bufs(struct file * file,void * priv,struct v4l2_create_buffers * p)1045 int vb2_ioctl_create_bufs(struct file *file, void *priv,
1046 			  struct v4l2_create_buffers *p)
1047 {
1048 	struct video_device *vdev = video_devdata(file);
1049 	int res = vb2_verify_memory_type(vdev->queue, p->memory, p->format.type);
1050 
1051 	p->index = vb2_get_num_buffers(vdev->queue);
1052 	vb2_set_flags_and_caps(vdev->queue, p->memory, &p->flags,
1053 			       &p->capabilities, &p->max_num_buffers);
1054 	/*
1055 	 * If count == 0, then just check if memory and type are valid.
1056 	 * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
1057 	 */
1058 	if (p->count == 0)
1059 		return res != -EBUSY ? res : 0;
1060 	if (res)
1061 		return res;
1062 	if (vb2_queue_is_busy(vdev->queue, file))
1063 		return -EBUSY;
1064 
1065 	res = vb2_create_bufs(vdev->queue, p);
1066 	if (res == 0)
1067 		vdev->queue->owner = file->private_data;
1068 	return res;
1069 }
1070 EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
1071 
vb2_ioctl_prepare_buf(struct file * file,void * priv,struct v4l2_buffer * p)1072 int vb2_ioctl_prepare_buf(struct file *file, void *priv,
1073 			  struct v4l2_buffer *p)
1074 {
1075 	struct video_device *vdev = video_devdata(file);
1076 
1077 	if (vb2_queue_is_busy(vdev->queue, file))
1078 		return -EBUSY;
1079 	return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p);
1080 }
1081 EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
1082 
vb2_ioctl_querybuf(struct file * file,void * priv,struct v4l2_buffer * p)1083 int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
1084 {
1085 	struct video_device *vdev = video_devdata(file);
1086 
1087 	/* No need to call vb2_queue_is_busy(), anyone can query buffers. */
1088 	return vb2_querybuf(vdev->queue, p);
1089 }
1090 EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
1091 
vb2_ioctl_qbuf(struct file * file,void * priv,struct v4l2_buffer * p)1092 int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
1093 {
1094 	struct video_device *vdev = video_devdata(file);
1095 
1096 	if (vb2_queue_is_busy(vdev->queue, file))
1097 		return -EBUSY;
1098 	return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p);
1099 }
1100 EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
1101 
vb2_ioctl_dqbuf(struct file * file,void * priv,struct v4l2_buffer * p)1102 int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
1103 {
1104 	struct video_device *vdev = video_devdata(file);
1105 
1106 	if (vb2_queue_is_busy(vdev->queue, file))
1107 		return -EBUSY;
1108 	return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
1109 }
1110 EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
1111 
vb2_ioctl_streamon(struct file * file,void * priv,enum v4l2_buf_type i)1112 int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
1113 {
1114 	struct video_device *vdev = video_devdata(file);
1115 
1116 	if (vb2_queue_is_busy(vdev->queue, file))
1117 		return -EBUSY;
1118 	return vb2_streamon(vdev->queue, i);
1119 }
1120 EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
1121 
vb2_ioctl_streamoff(struct file * file,void * priv,enum v4l2_buf_type i)1122 int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
1123 {
1124 	struct video_device *vdev = video_devdata(file);
1125 
1126 	if (vb2_queue_is_busy(vdev->queue, file))
1127 		return -EBUSY;
1128 	return vb2_streamoff(vdev->queue, i);
1129 }
1130 EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
1131 
vb2_ioctl_expbuf(struct file * file,void * priv,struct v4l2_exportbuffer * p)1132 int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
1133 {
1134 	struct video_device *vdev = video_devdata(file);
1135 
1136 	if (vb2_queue_is_busy(vdev->queue, file))
1137 		return -EBUSY;
1138 	return vb2_expbuf(vdev->queue, p);
1139 }
1140 EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
1141 
1142 /* v4l2_file_operations helpers */
1143 
vb2_fop_mmap(struct file * file,struct vm_area_struct * vma)1144 int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
1145 {
1146 	struct video_device *vdev = video_devdata(file);
1147 
1148 	return vb2_mmap(vdev->queue, vma);
1149 }
1150 EXPORT_SYMBOL_GPL(vb2_fop_mmap);
1151 
_vb2_fop_release(struct file * file,struct mutex * lock)1152 int _vb2_fop_release(struct file *file, struct mutex *lock)
1153 {
1154 	struct video_device *vdev = video_devdata(file);
1155 
1156 	if (lock)
1157 		mutex_lock(lock);
1158 	if (!vdev->queue->owner || file->private_data == vdev->queue->owner) {
1159 		vb2_queue_release(vdev->queue);
1160 		vdev->queue->owner = NULL;
1161 	}
1162 	if (lock)
1163 		mutex_unlock(lock);
1164 	return v4l2_fh_release(file);
1165 }
1166 EXPORT_SYMBOL_GPL(_vb2_fop_release);
1167 
vb2_fop_release(struct file * file)1168 int vb2_fop_release(struct file *file)
1169 {
1170 	struct video_device *vdev = video_devdata(file);
1171 	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1172 
1173 	return _vb2_fop_release(file, lock);
1174 }
1175 EXPORT_SYMBOL_GPL(vb2_fop_release);
1176 
vb2_fop_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1177 ssize_t vb2_fop_write(struct file *file, const char __user *buf,
1178 		size_t count, loff_t *ppos)
1179 {
1180 	struct video_device *vdev = video_devdata(file);
1181 	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1182 	int err = -EBUSY;
1183 
1184 	if (!(vdev->queue->io_modes & VB2_WRITE))
1185 		return -EINVAL;
1186 	if (lock && mutex_lock_interruptible(lock))
1187 		return -ERESTARTSYS;
1188 	if (vb2_queue_is_busy(vdev->queue, file))
1189 		goto exit;
1190 	err = vb2_write(vdev->queue, buf, count, ppos,
1191 		       file->f_flags & O_NONBLOCK);
1192 	if (vdev->queue->fileio)
1193 		vdev->queue->owner = file->private_data;
1194 exit:
1195 	if (lock)
1196 		mutex_unlock(lock);
1197 	return err;
1198 }
1199 EXPORT_SYMBOL_GPL(vb2_fop_write);
1200 
vb2_fop_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1201 ssize_t vb2_fop_read(struct file *file, char __user *buf,
1202 		size_t count, loff_t *ppos)
1203 {
1204 	struct video_device *vdev = video_devdata(file);
1205 	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1206 	int err = -EBUSY;
1207 
1208 	if (!(vdev->queue->io_modes & VB2_READ))
1209 		return -EINVAL;
1210 	if (lock && mutex_lock_interruptible(lock))
1211 		return -ERESTARTSYS;
1212 	if (vb2_queue_is_busy(vdev->queue, file))
1213 		goto exit;
1214 	vdev->queue->owner = file->private_data;
1215 	err = vb2_read(vdev->queue, buf, count, ppos,
1216 		       file->f_flags & O_NONBLOCK);
1217 	if (!vdev->queue->fileio)
1218 		vdev->queue->owner = NULL;
1219 exit:
1220 	if (lock)
1221 		mutex_unlock(lock);
1222 	return err;
1223 }
1224 EXPORT_SYMBOL_GPL(vb2_fop_read);
1225 
vb2_fop_poll(struct file * file,poll_table * wait)1226 __poll_t vb2_fop_poll(struct file *file, poll_table *wait)
1227 {
1228 	struct video_device *vdev = video_devdata(file);
1229 	struct vb2_queue *q = vdev->queue;
1230 	struct mutex *lock = q->lock ? q->lock : vdev->lock;
1231 	__poll_t res;
1232 	void *fileio;
1233 
1234 	/*
1235 	 * If this helper doesn't know how to lock, then you shouldn't be using
1236 	 * it but you should write your own.
1237 	 */
1238 	WARN_ON(!lock);
1239 
1240 	if (lock && mutex_lock_interruptible(lock))
1241 		return EPOLLERR;
1242 
1243 	fileio = q->fileio;
1244 
1245 	res = vb2_poll(vdev->queue, file, wait);
1246 
1247 	/* If fileio was started, then we have a new queue owner. */
1248 	if (!fileio && q->fileio)
1249 		q->owner = file->private_data;
1250 	if (lock)
1251 		mutex_unlock(lock);
1252 	return res;
1253 }
1254 EXPORT_SYMBOL_GPL(vb2_fop_poll);
1255 
1256 #ifndef CONFIG_MMU
vb2_fop_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1257 unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
1258 		unsigned long len, unsigned long pgoff, unsigned long flags)
1259 {
1260 	struct video_device *vdev = video_devdata(file);
1261 
1262 	return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
1263 }
1264 EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
1265 #endif
1266 
vb2_video_unregister_device(struct video_device * vdev)1267 void vb2_video_unregister_device(struct video_device *vdev)
1268 {
1269 	/* Check if vdev was ever registered at all */
1270 	if (!vdev || !video_is_registered(vdev))
1271 		return;
1272 
1273 	/*
1274 	 * Calling this function only makes sense if vdev->queue is set.
1275 	 * If it is NULL, then just call video_unregister_device() instead.
1276 	 */
1277 	WARN_ON(!vdev->queue);
1278 
1279 	/*
1280 	 * Take a reference to the device since video_unregister_device()
1281 	 * calls device_unregister(), but we don't want that to release
1282 	 * the device since we want to clean up the queue first.
1283 	 */
1284 	get_device(&vdev->dev);
1285 	video_unregister_device(vdev);
1286 	if (vdev->queue) {
1287 		struct mutex *lock = vdev->queue->lock ?
1288 			vdev->queue->lock : vdev->lock;
1289 
1290 		if (lock)
1291 			mutex_lock(lock);
1292 		vb2_queue_release(vdev->queue);
1293 		vdev->queue->owner = NULL;
1294 		if (lock)
1295 			mutex_unlock(lock);
1296 	}
1297 	/*
1298 	 * Now we put the device, and in most cases this will release
1299 	 * everything.
1300 	 */
1301 	put_device(&vdev->dev);
1302 }
1303 EXPORT_SYMBOL_GPL(vb2_video_unregister_device);
1304 
1305 /* vb2_ops helpers. Only use if vq->lock is non-NULL. */
1306 
vb2_ops_wait_prepare(struct vb2_queue * vq)1307 void vb2_ops_wait_prepare(struct vb2_queue *vq)
1308 {
1309 	mutex_unlock(vq->lock);
1310 }
1311 EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
1312 
vb2_ops_wait_finish(struct vb2_queue * vq)1313 void vb2_ops_wait_finish(struct vb2_queue *vq)
1314 {
1315 	mutex_lock(vq->lock);
1316 }
1317 EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
1318 
1319 /*
1320  * Note that this function is called during validation time and
1321  * thus the req_queue_mutex is held to ensure no request objects
1322  * can be added or deleted while validating. So there is no need
1323  * to protect the objects list.
1324  */
vb2_request_validate(struct media_request * req)1325 int vb2_request_validate(struct media_request *req)
1326 {
1327 	struct media_request_object *obj;
1328 	int ret = 0;
1329 
1330 	if (!vb2_request_buffer_cnt(req))
1331 		return -ENOENT;
1332 
1333 	list_for_each_entry(obj, &req->objects, list) {
1334 		if (!obj->ops->prepare)
1335 			continue;
1336 
1337 		ret = obj->ops->prepare(obj);
1338 		if (ret)
1339 			break;
1340 	}
1341 
1342 	if (ret) {
1343 		list_for_each_entry_continue_reverse(obj, &req->objects, list)
1344 			if (obj->ops->unprepare)
1345 				obj->ops->unprepare(obj);
1346 		return ret;
1347 	}
1348 	return 0;
1349 }
1350 EXPORT_SYMBOL_GPL(vb2_request_validate);
1351 
vb2_request_queue(struct media_request * req)1352 void vb2_request_queue(struct media_request *req)
1353 {
1354 	struct media_request_object *obj, *obj_safe;
1355 
1356 	/*
1357 	 * Queue all objects. Note that buffer objects are at the end of the
1358 	 * objects list, after all other object types. Once buffer objects
1359 	 * are queued, the driver might delete them immediately (if the driver
1360 	 * processes the buffer at once), so we have to use
1361 	 * list_for_each_entry_safe() to handle the case where the object we
1362 	 * queue is deleted.
1363 	 */
1364 	list_for_each_entry_safe(obj, obj_safe, &req->objects, list)
1365 		if (obj->ops->queue)
1366 			obj->ops->queue(obj);
1367 }
1368 EXPORT_SYMBOL_GPL(vb2_request_queue);
1369 
1370 MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
1371 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
1372 MODULE_LICENSE("GPL");
1373