xref: /linux/drivers/media/common/videobuf2/videobuf2-core.c (revision 4bf194e10e42aa0759eb5cc0173b76d3523654b4)
1 /*
2  * videobuf2-core.c - video buffer 2 core framework
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *	   Marek Szyprowski <m.szyprowski@samsung.com>
8  *
9  * The vb2_thread implementation was based on code from videobuf-dvb.c:
10  *	(c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation.
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/mm.h>
23 #include <linux/poll.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/freezer.h>
27 #include <linux/kthread.h>
28 
29 #include <media/videobuf2-core.h>
30 #include <media/v4l2-mc.h>
31 
32 #include <trace/events/vb2.h>
33 
34 #define PLANE_INDEX_BITS	3
35 #define PLANE_INDEX_SHIFT	(PAGE_SHIFT + PLANE_INDEX_BITS)
36 #define PLANE_INDEX_MASK	(BIT_MASK(PLANE_INDEX_BITS) - 1)
37 #define MAX_BUFFER_INDEX	BIT_MASK(30 - PLANE_INDEX_SHIFT)
38 #define BUFFER_INDEX_MASK	(MAX_BUFFER_INDEX - 1)
39 
40 #if BIT(PLANE_INDEX_BITS) != VIDEO_MAX_PLANES
41 #error PLANE_INDEX_BITS order must be equal to VIDEO_MAX_PLANES
42 #endif
43 
44 static int debug;
45 module_param(debug, int, 0644);
46 
47 #define dprintk(q, level, fmt, arg...)					\
48 	do {								\
49 		if (debug >= level)					\
50 			pr_info("[%s] %s: " fmt, (q)->name, __func__,	\
51 				## arg);				\
52 	} while (0)
53 
54 #ifdef CONFIG_VIDEO_ADV_DEBUG
55 
56 /*
57  * If advanced debugging is on, then count how often each op is called
58  * successfully, which can either be per-buffer or per-queue.
59  *
60  * This makes it easy to check that the 'init' and 'cleanup'
61  * (and variations thereof) stay balanced.
62  */
63 
64 #define log_memop(vb, op)						\
65 	dprintk((vb)->vb2_queue, 2, "call_memop(%d, %s)%s\n",		\
66 		(vb)->index, #op,					\
67 		(vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
68 
69 #define call_memop(vb, op, args...)					\
70 ({									\
71 	struct vb2_queue *_q = (vb)->vb2_queue;				\
72 	int err;							\
73 									\
74 	log_memop(vb, op);						\
75 	err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0;		\
76 	if (!err)							\
77 		(vb)->cnt_mem_ ## op++;					\
78 	err;								\
79 })
80 
81 #define call_ptr_memop(op, vb, args...)					\
82 ({									\
83 	struct vb2_queue *_q = (vb)->vb2_queue;				\
84 	void *ptr;							\
85 									\
86 	log_memop(vb, op);						\
87 	ptr = _q->mem_ops->op ? _q->mem_ops->op(vb, args) : NULL;	\
88 	if (!IS_ERR_OR_NULL(ptr))					\
89 		(vb)->cnt_mem_ ## op++;					\
90 	ptr;								\
91 })
92 
93 #define call_void_memop(vb, op, args...)				\
94 ({									\
95 	struct vb2_queue *_q = (vb)->vb2_queue;				\
96 									\
97 	log_memop(vb, op);						\
98 	if (_q->mem_ops->op)						\
99 		_q->mem_ops->op(args);					\
100 	(vb)->cnt_mem_ ## op++;						\
101 })
102 
103 #define log_qop(q, op)							\
104 	dprintk(q, 2, "call_qop(%s)%s\n", #op,				\
105 		(q)->ops->op ? "" : " (nop)")
106 
107 #define call_qop(q, op, args...)					\
108 ({									\
109 	int err;							\
110 									\
111 	log_qop(q, op);							\
112 	err = (q)->ops->op ? (q)->ops->op(args) : 0;			\
113 	if (!err)							\
114 		(q)->cnt_ ## op++;					\
115 	err;								\
116 })
117 
118 #define call_void_qop(q, op, args...)					\
119 ({									\
120 	log_qop(q, op);							\
121 	if ((q)->ops->op)						\
122 		(q)->ops->op(args);					\
123 	(q)->cnt_ ## op++;						\
124 })
125 
126 #define log_vb_qop(vb, op, args...)					\
127 	dprintk((vb)->vb2_queue, 2, "call_vb_qop(%d, %s)%s\n",		\
128 		(vb)->index, #op,					\
129 		(vb)->vb2_queue->ops->op ? "" : " (nop)")
130 
131 #define call_vb_qop(vb, op, args...)					\
132 ({									\
133 	int err;							\
134 									\
135 	log_vb_qop(vb, op);						\
136 	err = (vb)->vb2_queue->ops->op ?				\
137 		(vb)->vb2_queue->ops->op(args) : 0;			\
138 	if (!err)							\
139 		(vb)->cnt_ ## op++;					\
140 	err;								\
141 })
142 
143 #define call_void_vb_qop(vb, op, args...)				\
144 ({									\
145 	log_vb_qop(vb, op);						\
146 	if ((vb)->vb2_queue->ops->op)					\
147 		(vb)->vb2_queue->ops->op(args);				\
148 	(vb)->cnt_ ## op++;						\
149 })
150 
151 #else
152 
153 #define call_memop(vb, op, args...)					\
154 	((vb)->vb2_queue->mem_ops->op ?					\
155 		(vb)->vb2_queue->mem_ops->op(args) : 0)
156 
157 #define call_ptr_memop(op, vb, args...)					\
158 	((vb)->vb2_queue->mem_ops->op ?					\
159 		(vb)->vb2_queue->mem_ops->op(vb, args) : NULL)
160 
161 #define call_void_memop(vb, op, args...)				\
162 	do {								\
163 		if ((vb)->vb2_queue->mem_ops->op)			\
164 			(vb)->vb2_queue->mem_ops->op(args);		\
165 	} while (0)
166 
167 #define call_qop(q, op, args...)					\
168 	((q)->ops->op ? (q)->ops->op(args) : 0)
169 
170 #define call_void_qop(q, op, args...)					\
171 	do {								\
172 		if ((q)->ops->op)					\
173 			(q)->ops->op(args);				\
174 	} while (0)
175 
176 #define call_vb_qop(vb, op, args...)					\
177 	((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
178 
179 #define call_void_vb_qop(vb, op, args...)				\
180 	do {								\
181 		if ((vb)->vb2_queue->ops->op)				\
182 			(vb)->vb2_queue->ops->op(args);			\
183 	} while (0)
184 
185 #endif
186 
187 #define call_bufop(q, op, args...)					\
188 ({									\
189 	int ret = 0;							\
190 	if (q && q->buf_ops && q->buf_ops->op)				\
191 		ret = q->buf_ops->op(args);				\
192 	ret;								\
193 })
194 
195 #define call_void_bufop(q, op, args...)					\
196 ({									\
197 	if (q && q->buf_ops && q->buf_ops->op)				\
198 		q->buf_ops->op(args);					\
199 })
200 
201 static void __vb2_queue_cancel(struct vb2_queue *q);
202 
203 static const char *vb2_state_name(enum vb2_buffer_state s)
204 {
205 	static const char * const state_names[] = {
206 		[VB2_BUF_STATE_DEQUEUED] = "dequeued",
207 		[VB2_BUF_STATE_IN_REQUEST] = "in request",
208 		[VB2_BUF_STATE_PREPARING] = "preparing",
209 		[VB2_BUF_STATE_QUEUED] = "queued",
210 		[VB2_BUF_STATE_ACTIVE] = "active",
211 		[VB2_BUF_STATE_DONE] = "done",
212 		[VB2_BUF_STATE_ERROR] = "error",
213 	};
214 
215 	if ((unsigned int)(s) < ARRAY_SIZE(state_names))
216 		return state_names[s];
217 	return "unknown";
218 }
219 
220 /*
221  * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
222  */
223 static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
224 {
225 	struct vb2_queue *q = vb->vb2_queue;
226 	void *mem_priv;
227 	int plane;
228 	int ret = -ENOMEM;
229 
230 	/*
231 	 * Allocate memory for all planes in this buffer
232 	 * NOTE: mmapped areas should be page aligned
233 	 */
234 	for (plane = 0; plane < vb->num_planes; ++plane) {
235 		/* Memops alloc requires size to be page aligned. */
236 		unsigned long size = PAGE_ALIGN(vb->planes[plane].length);
237 
238 		/* Did it wrap around? */
239 		if (size < vb->planes[plane].length)
240 			goto free;
241 
242 		mem_priv = call_ptr_memop(alloc,
243 					  vb,
244 					  q->alloc_devs[plane] ? : q->dev,
245 					  size);
246 		if (IS_ERR_OR_NULL(mem_priv)) {
247 			if (mem_priv)
248 				ret = PTR_ERR(mem_priv);
249 			goto free;
250 		}
251 
252 		/* Associate allocator private data with this plane */
253 		vb->planes[plane].mem_priv = mem_priv;
254 	}
255 
256 	return 0;
257 free:
258 	/* Free already allocated memory if one of the allocations failed */
259 	for (; plane > 0; --plane) {
260 		call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
261 		vb->planes[plane - 1].mem_priv = NULL;
262 	}
263 
264 	return ret;
265 }
266 
267 /*
268  * __vb2_buf_mem_free() - free memory of the given buffer
269  */
270 static void __vb2_buf_mem_free(struct vb2_buffer *vb)
271 {
272 	unsigned int plane;
273 
274 	for (plane = 0; plane < vb->num_planes; ++plane) {
275 		call_void_memop(vb, put, vb->planes[plane].mem_priv);
276 		vb->planes[plane].mem_priv = NULL;
277 		dprintk(vb->vb2_queue, 3, "freed plane %d of buffer %d\n",
278 			plane, vb->index);
279 	}
280 }
281 
282 /*
283  * __vb2_buf_userptr_put() - release userspace memory associated with
284  * a USERPTR buffer
285  */
286 static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
287 {
288 	unsigned int plane;
289 
290 	for (plane = 0; plane < vb->num_planes; ++plane) {
291 		if (vb->planes[plane].mem_priv)
292 			call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
293 		vb->planes[plane].mem_priv = NULL;
294 	}
295 }
296 
297 /*
298  * __vb2_plane_dmabuf_put() - release memory associated with
299  * a DMABUF shared plane
300  */
301 static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
302 {
303 	if (!p->mem_priv)
304 		return;
305 
306 	if (!p->dbuf_duplicated) {
307 		if (p->dbuf_mapped)
308 			call_void_memop(vb, unmap_dmabuf, p->mem_priv);
309 
310 		call_void_memop(vb, detach_dmabuf, p->mem_priv);
311 	}
312 
313 	dma_buf_put(p->dbuf);
314 	p->mem_priv = NULL;
315 	p->dbuf = NULL;
316 	p->dbuf_mapped = 0;
317 	p->bytesused = 0;
318 	p->length = 0;
319 	p->m.fd = 0;
320 	p->data_offset = 0;
321 	p->dbuf_duplicated = false;
322 }
323 
324 /*
325  * __vb2_buf_dmabuf_put() - release memory associated with
326  * a DMABUF shared buffer
327  */
328 static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
329 {
330 	int plane;
331 
332 	/*
333 	 * When multiple planes share the same DMA buffer attachment, the plane
334 	 * with the lowest index owns the mem_priv.
335 	 * Put planes in the reversed order so that we don't leave invalid
336 	 * mem_priv behind.
337 	 */
338 	for (plane = vb->num_planes - 1; plane >= 0; --plane)
339 		__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
340 }
341 
342 /*
343  * __vb2_buf_mem_prepare() - call ->prepare() on buffer's private memory
344  * to sync caches
345  */
346 static void __vb2_buf_mem_prepare(struct vb2_buffer *vb)
347 {
348 	unsigned int plane;
349 
350 	if (vb->synced)
351 		return;
352 
353 	vb->synced = 1;
354 	for (plane = 0; plane < vb->num_planes; ++plane)
355 		call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
356 }
357 
358 /*
359  * __vb2_buf_mem_finish() - call ->finish on buffer's private memory
360  * to sync caches
361  */
362 static void __vb2_buf_mem_finish(struct vb2_buffer *vb)
363 {
364 	unsigned int plane;
365 
366 	if (!vb->synced)
367 		return;
368 
369 	vb->synced = 0;
370 	for (plane = 0; plane < vb->num_planes; ++plane)
371 		call_void_memop(vb, finish, vb->planes[plane].mem_priv);
372 }
373 
374 /*
375  * __setup_offsets() - setup unique offsets ("cookies") for every plane in
376  * the buffer.
377  */
378 static void __setup_offsets(struct vb2_buffer *vb)
379 {
380 	struct vb2_queue *q = vb->vb2_queue;
381 	unsigned int plane;
382 	unsigned long offset = 0;
383 
384 	/*
385 	 * The offset "cookie" value has the following constraints:
386 	 * - a buffer can have up to 8 planes.
387 	 * - v4l2 mem2mem uses bit 30 to distinguish between
388 	 *   OUTPUT (aka "source", bit 30 is 0) and
389 	 *   CAPTURE (aka "destination", bit 30 is 1) buffers.
390 	 * - must be page aligned
391 	 * That led to this bit mapping when PAGE_SHIFT = 12:
392 	 * |30                |29        15|14       12|11 0|
393 	 * |DST_QUEUE_OFF_BASE|buffer index|plane index| 0  |
394 	 * where there are 15 bits to store the buffer index.
395 	 * Depending on PAGE_SHIFT value we can have fewer bits
396 	 * to store the buffer index.
397 	 */
398 	offset = vb->index << PLANE_INDEX_SHIFT;
399 
400 	for (plane = 0; plane < vb->num_planes; ++plane) {
401 		vb->planes[plane].m.offset = offset + (plane << PAGE_SHIFT);
402 
403 		dprintk(q, 3, "buffer %d, plane %d offset 0x%08lx\n",
404 				vb->index, plane, offset);
405 	}
406 }
407 
408 static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb)
409 {
410 	/*
411 	 * DMA exporter should take care of cache syncs, so we can avoid
412 	 * explicit ->prepare()/->finish() syncs. For other ->memory types
413 	 * we always need ->prepare() or/and ->finish() cache sync.
414 	 */
415 	if (q->memory == VB2_MEMORY_DMABUF) {
416 		vb->skip_cache_sync_on_finish = 1;
417 		vb->skip_cache_sync_on_prepare = 1;
418 		return;
419 	}
420 
421 	/*
422 	 * ->finish() cache sync can be avoided when queue direction is
423 	 * TO_DEVICE.
424 	 */
425 	if (q->dma_dir == DMA_TO_DEVICE)
426 		vb->skip_cache_sync_on_finish = 1;
427 }
428 
429 /**
430  * vb2_queue_add_buffer() - add a buffer to a queue
431  * @q:	pointer to &struct vb2_queue with videobuf2 queue.
432  * @vb:	pointer to &struct vb2_buffer to be added to the queue.
433  * @index: index where add vb2_buffer in the queue
434  */
435 static void vb2_queue_add_buffer(struct vb2_queue *q, struct vb2_buffer *vb, unsigned int index)
436 {
437 	WARN_ON(index >= q->max_num_buffers || test_bit(index, q->bufs_bitmap) || vb->vb2_queue);
438 
439 	q->bufs[index] = vb;
440 	vb->index = index;
441 	vb->vb2_queue = q;
442 	set_bit(index, q->bufs_bitmap);
443 }
444 
445 /**
446  * vb2_queue_remove_buffer() - remove a buffer from a queue
447  * @vb:	pointer to &struct vb2_buffer to be removed from the queue.
448  */
449 static void vb2_queue_remove_buffer(struct vb2_buffer *vb)
450 {
451 	clear_bit(vb->index, vb->vb2_queue->bufs_bitmap);
452 	vb->vb2_queue->bufs[vb->index] = NULL;
453 	vb->vb2_queue = NULL;
454 }
455 
456 /*
457  * __vb2_queue_alloc() - allocate vb2 buffer structures and (for MMAP type)
458  * video buffer memory for all buffers/planes on the queue and initializes the
459  * queue
460  * @first_index: index of the first created buffer, all newly allocated buffers
461  *		 have indices in the range [first_index..first_index+count-1]
462  *
463  * Returns the number of buffers successfully allocated.
464  */
465 static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
466 			     unsigned int num_buffers, unsigned int num_planes,
467 			     const unsigned int plane_sizes[VB2_MAX_PLANES],
468 			     unsigned int *first_index)
469 {
470 	unsigned int buffer, plane;
471 	struct vb2_buffer *vb;
472 	unsigned long index = q->max_num_buffers;
473 	int ret;
474 
475 	/*
476 	 * Ensure that the number of already queue + the number of buffers already
477 	 * in the queue is below q->max_num_buffers
478 	 */
479 	num_buffers = min_t(unsigned int, num_buffers,
480 			    q->max_num_buffers - vb2_get_num_buffers(q));
481 
482 	while (num_buffers) {
483 		index = bitmap_find_next_zero_area(q->bufs_bitmap, q->max_num_buffers,
484 						   0, num_buffers, 0);
485 
486 		if (index < q->max_num_buffers)
487 			break;
488 		/* Try to find free space for less buffers */
489 		num_buffers--;
490 	}
491 
492 	/* If there is no space left to allocate buffers return 0 to indicate the error */
493 	if (!num_buffers) {
494 		*first_index = 0;
495 		return 0;
496 	}
497 
498 	*first_index = index;
499 
500 	for (buffer = 0; buffer < num_buffers; ++buffer) {
501 		/* Allocate vb2 buffer structures */
502 		vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
503 		if (!vb) {
504 			dprintk(q, 1, "memory alloc for buffer struct failed\n");
505 			break;
506 		}
507 
508 		vb->state = VB2_BUF_STATE_DEQUEUED;
509 		vb->num_planes = num_planes;
510 		vb->type = q->type;
511 		vb->memory = memory;
512 		init_buffer_cache_hints(q, vb);
513 		for (plane = 0; plane < num_planes; ++plane) {
514 			vb->planes[plane].length = plane_sizes[plane];
515 			vb->planes[plane].min_length = plane_sizes[plane];
516 		}
517 
518 		vb2_queue_add_buffer(q, vb, index++);
519 		call_void_bufop(q, init_buffer, vb);
520 
521 		/* Allocate video buffer memory for the MMAP type */
522 		if (memory == VB2_MEMORY_MMAP) {
523 			ret = __vb2_buf_mem_alloc(vb);
524 			if (ret) {
525 				dprintk(q, 1, "failed allocating memory for buffer %d\n",
526 					buffer);
527 				vb2_queue_remove_buffer(vb);
528 				kfree(vb);
529 				break;
530 			}
531 			__setup_offsets(vb);
532 			/*
533 			 * Call the driver-provided buffer initialization
534 			 * callback, if given. An error in initialization
535 			 * results in queue setup failure.
536 			 */
537 			ret = call_vb_qop(vb, buf_init, vb);
538 			if (ret) {
539 				dprintk(q, 1, "buffer %d %p initialization failed\n",
540 					buffer, vb);
541 				__vb2_buf_mem_free(vb);
542 				vb2_queue_remove_buffer(vb);
543 				kfree(vb);
544 				break;
545 			}
546 		}
547 	}
548 
549 	dprintk(q, 3, "allocated %d buffers, %d plane(s) each\n",
550 		buffer, num_planes);
551 
552 	return buffer;
553 }
554 
555 /*
556  * __vb2_free_mem() - release video buffer memory for a given range of
557  * buffers in a given queue
558  */
559 static void __vb2_free_mem(struct vb2_queue *q, unsigned int start, unsigned int count)
560 {
561 	unsigned int i;
562 	struct vb2_buffer *vb;
563 
564 	for (i = start; i < start + count; i++) {
565 		vb = vb2_get_buffer(q, i);
566 		if (!vb)
567 			continue;
568 
569 		/* Free MMAP buffers or release USERPTR buffers */
570 		if (q->memory == VB2_MEMORY_MMAP)
571 			__vb2_buf_mem_free(vb);
572 		else if (q->memory == VB2_MEMORY_DMABUF)
573 			__vb2_buf_dmabuf_put(vb);
574 		else
575 			__vb2_buf_userptr_put(vb);
576 	}
577 }
578 
579 /*
580  * __vb2_queue_free() - free @count buffers from @start index of the queue - video memory and
581  * related information, if no buffers are left return the queue to an
582  * uninitialized state. Might be called even if the queue has already been freed.
583  */
584 static void __vb2_queue_free(struct vb2_queue *q, unsigned int start, unsigned int count)
585 {
586 	unsigned int i;
587 
588 	lockdep_assert_held(&q->mmap_lock);
589 
590 	/* Call driver-provided cleanup function for each buffer, if provided */
591 	for (i = start; i < start + count; i++) {
592 		struct vb2_buffer *vb = vb2_get_buffer(q, i);
593 
594 		if (vb && vb->planes[0].mem_priv)
595 			call_void_vb_qop(vb, buf_cleanup, vb);
596 	}
597 
598 	/* Release video buffer memory */
599 	__vb2_free_mem(q, start, count);
600 
601 #ifdef CONFIG_VIDEO_ADV_DEBUG
602 	/*
603 	 * Check that all the calls were balanced during the life-time of this
604 	 * queue. If not then dump the counters to the kernel log.
605 	 */
606 	if (vb2_get_num_buffers(q)) {
607 		bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
608 				  q->cnt_prepare_streaming != q->cnt_unprepare_streaming ||
609 				  q->cnt_wait_prepare != q->cnt_wait_finish;
610 
611 		if (unbalanced) {
612 			pr_info("unbalanced counters for queue %p:\n", q);
613 			if (q->cnt_start_streaming != q->cnt_stop_streaming)
614 				pr_info("     setup: %u start_streaming: %u stop_streaming: %u\n",
615 					q->cnt_queue_setup, q->cnt_start_streaming,
616 					q->cnt_stop_streaming);
617 			if (q->cnt_prepare_streaming != q->cnt_unprepare_streaming)
618 				pr_info("     prepare_streaming: %u unprepare_streaming: %u\n",
619 					q->cnt_prepare_streaming, q->cnt_unprepare_streaming);
620 			if (q->cnt_wait_prepare != q->cnt_wait_finish)
621 				pr_info("     wait_prepare: %u wait_finish: %u\n",
622 					q->cnt_wait_prepare, q->cnt_wait_finish);
623 		}
624 		q->cnt_queue_setup = 0;
625 		q->cnt_wait_prepare = 0;
626 		q->cnt_wait_finish = 0;
627 		q->cnt_prepare_streaming = 0;
628 		q->cnt_start_streaming = 0;
629 		q->cnt_stop_streaming = 0;
630 		q->cnt_unprepare_streaming = 0;
631 	}
632 	for (i = start; i < start + count; i++) {
633 		struct vb2_buffer *vb = vb2_get_buffer(q, i);
634 		bool unbalanced;
635 
636 		if (!vb)
637 			continue;
638 
639 		unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
640 			     vb->cnt_mem_prepare != vb->cnt_mem_finish ||
641 			     vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
642 			     vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
643 			     vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
644 			     vb->cnt_buf_queue != vb->cnt_buf_done ||
645 			     vb->cnt_buf_prepare != vb->cnt_buf_finish ||
646 			     vb->cnt_buf_init != vb->cnt_buf_cleanup;
647 
648 		if (unbalanced) {
649 			pr_info("unbalanced counters for queue %p, buffer %d:\n",
650 				q, i);
651 			if (vb->cnt_buf_init != vb->cnt_buf_cleanup)
652 				pr_info("     buf_init: %u buf_cleanup: %u\n",
653 					vb->cnt_buf_init, vb->cnt_buf_cleanup);
654 			if (vb->cnt_buf_prepare != vb->cnt_buf_finish)
655 				pr_info("     buf_prepare: %u buf_finish: %u\n",
656 					vb->cnt_buf_prepare, vb->cnt_buf_finish);
657 			if (vb->cnt_buf_queue != vb->cnt_buf_done)
658 				pr_info("     buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n",
659 					vb->cnt_buf_out_validate, vb->cnt_buf_queue,
660 					vb->cnt_buf_done, vb->cnt_buf_request_complete);
661 			if (vb->cnt_mem_alloc != vb->cnt_mem_put)
662 				pr_info("     alloc: %u put: %u\n",
663 					vb->cnt_mem_alloc, vb->cnt_mem_put);
664 			if (vb->cnt_mem_prepare != vb->cnt_mem_finish)
665 				pr_info("     prepare: %u finish: %u\n",
666 					vb->cnt_mem_prepare, vb->cnt_mem_finish);
667 			if (vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr)
668 				pr_info("     get_userptr: %u put_userptr: %u\n",
669 					vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
670 			if (vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf)
671 				pr_info("     attach_dmabuf: %u detach_dmabuf: %u\n",
672 					vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf);
673 			if (vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf)
674 				pr_info("     map_dmabuf: %u unmap_dmabuf: %u\n",
675 					vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
676 			pr_info("     get_dmabuf: %u num_users: %u\n",
677 				vb->cnt_mem_get_dmabuf,
678 				vb->cnt_mem_num_users);
679 		}
680 	}
681 #endif
682 
683 	/* Free vb2 buffers */
684 	for (i = start; i < start + count; i++) {
685 		struct vb2_buffer *vb = vb2_get_buffer(q, i);
686 
687 		if (!vb)
688 			continue;
689 
690 		vb2_queue_remove_buffer(vb);
691 		kfree(vb);
692 	}
693 
694 	if (!vb2_get_num_buffers(q)) {
695 		q->memory = VB2_MEMORY_UNKNOWN;
696 		INIT_LIST_HEAD(&q->queued_list);
697 	}
698 }
699 
700 bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
701 {
702 	unsigned int plane;
703 	for (plane = 0; plane < vb->num_planes; ++plane) {
704 		void *mem_priv = vb->planes[plane].mem_priv;
705 		/*
706 		 * If num_users() has not been provided, call_memop
707 		 * will return 0, apparently nobody cares about this
708 		 * case anyway. If num_users() returns more than 1,
709 		 * we are not the only user of the plane's memory.
710 		 */
711 		if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
712 			return true;
713 	}
714 	return false;
715 }
716 EXPORT_SYMBOL(vb2_buffer_in_use);
717 
718 /*
719  * __buffers_in_use() - return true if any buffers on the queue are in use and
720  * the queue cannot be freed (by the means of REQBUFS(0)) call
721  */
722 static bool __buffers_in_use(struct vb2_queue *q)
723 {
724 	unsigned int buffer;
725 	for (buffer = 0; buffer < q->max_num_buffers; ++buffer) {
726 		struct vb2_buffer *vb = vb2_get_buffer(q, buffer);
727 
728 		if (!vb)
729 			continue;
730 
731 		if (vb2_buffer_in_use(q, vb))
732 			return true;
733 	}
734 	return false;
735 }
736 
737 void vb2_core_querybuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb)
738 {
739 	call_void_bufop(q, fill_user_buffer, vb, pb);
740 }
741 EXPORT_SYMBOL_GPL(vb2_core_querybuf);
742 
743 /*
744  * __verify_userptr_ops() - verify that all memory operations required for
745  * USERPTR queue type have been provided
746  */
747 static int __verify_userptr_ops(struct vb2_queue *q)
748 {
749 	if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
750 	    !q->mem_ops->put_userptr)
751 		return -EINVAL;
752 
753 	return 0;
754 }
755 
756 /*
757  * __verify_mmap_ops() - verify that all memory operations required for
758  * MMAP queue type have been provided
759  */
760 static int __verify_mmap_ops(struct vb2_queue *q)
761 {
762 	if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
763 	    !q->mem_ops->put || !q->mem_ops->mmap)
764 		return -EINVAL;
765 
766 	return 0;
767 }
768 
769 /*
770  * __verify_dmabuf_ops() - verify that all memory operations required for
771  * DMABUF queue type have been provided
772  */
773 static int __verify_dmabuf_ops(struct vb2_queue *q)
774 {
775 	if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
776 	    !q->mem_ops->detach_dmabuf  || !q->mem_ops->map_dmabuf ||
777 	    !q->mem_ops->unmap_dmabuf)
778 		return -EINVAL;
779 
780 	return 0;
781 }
782 
783 int vb2_verify_memory_type(struct vb2_queue *q,
784 		enum vb2_memory memory, unsigned int type)
785 {
786 	if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR &&
787 	    memory != VB2_MEMORY_DMABUF) {
788 		dprintk(q, 1, "unsupported memory type\n");
789 		return -EINVAL;
790 	}
791 
792 	if (type != q->type) {
793 		dprintk(q, 1, "requested type is incorrect\n");
794 		return -EINVAL;
795 	}
796 
797 	/*
798 	 * Make sure all the required memory ops for given memory type
799 	 * are available.
800 	 */
801 	if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) {
802 		dprintk(q, 1, "MMAP for current setup unsupported\n");
803 		return -EINVAL;
804 	}
805 
806 	if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
807 		dprintk(q, 1, "USERPTR for current setup unsupported\n");
808 		return -EINVAL;
809 	}
810 
811 	if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
812 		dprintk(q, 1, "DMABUF for current setup unsupported\n");
813 		return -EINVAL;
814 	}
815 
816 	/*
817 	 * Place the busy tests at the end: -EBUSY can be ignored when
818 	 * create_bufs is called with count == 0, but count == 0 should still
819 	 * do the memory and type validation.
820 	 */
821 	if (vb2_fileio_is_active(q)) {
822 		dprintk(q, 1, "file io in progress\n");
823 		return -EBUSY;
824 	}
825 	return 0;
826 }
827 EXPORT_SYMBOL(vb2_verify_memory_type);
828 
829 static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem)
830 {
831 	q->non_coherent_mem = 0;
832 
833 	if (!vb2_queue_allows_cache_hints(q))
834 		return;
835 	q->non_coherent_mem = non_coherent_mem;
836 }
837 
838 static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem)
839 {
840 	if (non_coherent_mem != q->non_coherent_mem) {
841 		dprintk(q, 1, "memory coherency model mismatch\n");
842 		return false;
843 	}
844 	return true;
845 }
846 
847 static int vb2_core_allocated_buffers_storage(struct vb2_queue *q)
848 {
849 	if (!q->bufs)
850 		q->bufs = kcalloc(q->max_num_buffers, sizeof(*q->bufs), GFP_KERNEL);
851 	if (!q->bufs)
852 		return -ENOMEM;
853 
854 	if (!q->bufs_bitmap)
855 		q->bufs_bitmap = bitmap_zalloc(q->max_num_buffers, GFP_KERNEL);
856 	if (!q->bufs_bitmap) {
857 		kfree(q->bufs);
858 		q->bufs = NULL;
859 		return -ENOMEM;
860 	}
861 
862 	return 0;
863 }
864 
865 static void vb2_core_free_buffers_storage(struct vb2_queue *q)
866 {
867 	kfree(q->bufs);
868 	q->bufs = NULL;
869 	bitmap_free(q->bufs_bitmap);
870 	q->bufs_bitmap = NULL;
871 }
872 
873 int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
874 		     unsigned int flags, unsigned int *count)
875 {
876 	unsigned int num_buffers, allocated_buffers, num_planes = 0;
877 	unsigned int q_num_bufs = vb2_get_num_buffers(q);
878 	unsigned plane_sizes[VB2_MAX_PLANES] = { };
879 	bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
880 	unsigned int i, first_index;
881 	int ret = 0;
882 
883 	if (q->streaming) {
884 		dprintk(q, 1, "streaming active\n");
885 		return -EBUSY;
886 	}
887 
888 	if (q->waiting_in_dqbuf && *count) {
889 		dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
890 		return -EBUSY;
891 	}
892 
893 	if (*count == 0 || q_num_bufs != 0 ||
894 	    (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) ||
895 	    !verify_coherency_flags(q, non_coherent_mem)) {
896 		/*
897 		 * We already have buffers allocated, so first check if they
898 		 * are not in use and can be freed.
899 		 */
900 		mutex_lock(&q->mmap_lock);
901 		if (debug && q->memory == VB2_MEMORY_MMAP &&
902 		    __buffers_in_use(q))
903 			dprintk(q, 1, "memory in use, orphaning buffers\n");
904 
905 		/*
906 		 * Call queue_cancel to clean up any buffers in the
907 		 * QUEUED state which is possible if buffers were prepared or
908 		 * queued without ever calling STREAMON.
909 		 */
910 		__vb2_queue_cancel(q);
911 		__vb2_queue_free(q, 0, q->max_num_buffers);
912 		mutex_unlock(&q->mmap_lock);
913 
914 		q->is_busy = 0;
915 		/*
916 		 * In case of REQBUFS(0) return immediately without calling
917 		 * driver's queue_setup() callback and allocating resources.
918 		 */
919 		if (*count == 0)
920 			return 0;
921 	}
922 
923 	/*
924 	 * Make sure the requested values and current defaults are sane.
925 	 */
926 	num_buffers = max_t(unsigned int, *count, q->min_reqbufs_allocation);
927 	num_buffers = min_t(unsigned int, num_buffers, q->max_num_buffers);
928 	memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
929 	/*
930 	 * Set this now to ensure that drivers see the correct q->memory value
931 	 * in the queue_setup op.
932 	 */
933 	mutex_lock(&q->mmap_lock);
934 	ret = vb2_core_allocated_buffers_storage(q);
935 	q->memory = memory;
936 	mutex_unlock(&q->mmap_lock);
937 	if (ret)
938 		return ret;
939 	set_queue_coherency(q, non_coherent_mem);
940 
941 	/*
942 	 * Ask the driver how many buffers and planes per buffer it requires.
943 	 * Driver also sets the size and allocator context for each plane.
944 	 */
945 	ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
946 		       plane_sizes, q->alloc_devs);
947 	if (ret)
948 		goto error;
949 
950 	/* Check that driver has set sane values */
951 	if (WARN_ON(!num_planes)) {
952 		ret = -EINVAL;
953 		goto error;
954 	}
955 
956 	for (i = 0; i < num_planes; i++)
957 		if (WARN_ON(!plane_sizes[i])) {
958 			ret = -EINVAL;
959 			goto error;
960 		}
961 
962 	/* Finally, allocate buffers and video memory */
963 	allocated_buffers =
964 		__vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes, &first_index);
965 	if (allocated_buffers == 0) {
966 		/* There shouldn't be any buffers allocated, so first_index == 0 */
967 		WARN_ON(first_index);
968 		dprintk(q, 1, "memory allocation failed\n");
969 		ret = -ENOMEM;
970 		goto error;
971 	}
972 
973 	/*
974 	 * There is no point in continuing if we can't allocate the minimum
975 	 * number of buffers needed by this vb2_queue.
976 	 */
977 	if (allocated_buffers < q->min_reqbufs_allocation)
978 		ret = -ENOMEM;
979 
980 	/*
981 	 * Check if driver can handle the allocated number of buffers.
982 	 */
983 	if (!ret && allocated_buffers < num_buffers) {
984 		num_buffers = allocated_buffers;
985 		/*
986 		 * num_planes is set by the previous queue_setup(), but since it
987 		 * signals to queue_setup() whether it is called from create_bufs()
988 		 * vs reqbufs() we zero it here to signal that queue_setup() is
989 		 * called for the reqbufs() case.
990 		 */
991 		num_planes = 0;
992 
993 		ret = call_qop(q, queue_setup, q, &num_buffers,
994 			       &num_planes, plane_sizes, q->alloc_devs);
995 
996 		if (!ret && allocated_buffers < num_buffers)
997 			ret = -ENOMEM;
998 
999 		/*
1000 		 * Either the driver has accepted a smaller number of buffers,
1001 		 * or .queue_setup() returned an error
1002 		 */
1003 	}
1004 
1005 	mutex_lock(&q->mmap_lock);
1006 
1007 	if (ret < 0) {
1008 		/*
1009 		 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
1010 		 * from already queued buffers and it will reset q->memory to
1011 		 * VB2_MEMORY_UNKNOWN.
1012 		 */
1013 		__vb2_queue_free(q, first_index, allocated_buffers);
1014 		mutex_unlock(&q->mmap_lock);
1015 		return ret;
1016 	}
1017 	mutex_unlock(&q->mmap_lock);
1018 
1019 	/*
1020 	 * Return the number of successfully allocated buffers
1021 	 * to the userspace.
1022 	 */
1023 	*count = allocated_buffers;
1024 	q->waiting_for_buffers = !q->is_output;
1025 	q->is_busy = 1;
1026 
1027 	return 0;
1028 
1029 error:
1030 	mutex_lock(&q->mmap_lock);
1031 	q->memory = VB2_MEMORY_UNKNOWN;
1032 	mutex_unlock(&q->mmap_lock);
1033 	vb2_core_free_buffers_storage(q);
1034 	return ret;
1035 }
1036 EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
1037 
1038 int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
1039 			 unsigned int flags, unsigned int *count,
1040 			 unsigned int requested_planes,
1041 			 const unsigned int requested_sizes[],
1042 			 unsigned int *first_index)
1043 {
1044 	unsigned int num_planes = 0, num_buffers, allocated_buffers;
1045 	unsigned plane_sizes[VB2_MAX_PLANES] = { };
1046 	bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
1047 	unsigned int q_num_bufs = vb2_get_num_buffers(q);
1048 	bool no_previous_buffers = !q_num_bufs;
1049 	int ret = 0;
1050 
1051 	if (q_num_bufs == q->max_num_buffers) {
1052 		dprintk(q, 1, "maximum number of buffers already allocated\n");
1053 		return -ENOBUFS;
1054 	}
1055 
1056 	if (no_previous_buffers) {
1057 		if (q->waiting_in_dqbuf && *count) {
1058 			dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
1059 			return -EBUSY;
1060 		}
1061 		memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
1062 		/*
1063 		 * Set this now to ensure that drivers see the correct q->memory
1064 		 * value in the queue_setup op.
1065 		 */
1066 		mutex_lock(&q->mmap_lock);
1067 		ret = vb2_core_allocated_buffers_storage(q);
1068 		q->memory = memory;
1069 		mutex_unlock(&q->mmap_lock);
1070 		if (ret)
1071 			return ret;
1072 		q->waiting_for_buffers = !q->is_output;
1073 		set_queue_coherency(q, non_coherent_mem);
1074 	} else {
1075 		if (q->memory != memory) {
1076 			dprintk(q, 1, "memory model mismatch\n");
1077 			return -EINVAL;
1078 		}
1079 		if (!verify_coherency_flags(q, non_coherent_mem))
1080 			return -EINVAL;
1081 	}
1082 
1083 	num_buffers = min(*count, q->max_num_buffers - q_num_bufs);
1084 
1085 	if (requested_planes && requested_sizes) {
1086 		num_planes = requested_planes;
1087 		memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes));
1088 	}
1089 
1090 	/*
1091 	 * Ask the driver, whether the requested number of buffers, planes per
1092 	 * buffer and their sizes are acceptable
1093 	 */
1094 	ret = call_qop(q, queue_setup, q, &num_buffers,
1095 		       &num_planes, plane_sizes, q->alloc_devs);
1096 	if (ret)
1097 		goto error;
1098 
1099 	/* Finally, allocate buffers and video memory */
1100 	allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
1101 				num_planes, plane_sizes, first_index);
1102 	if (allocated_buffers == 0) {
1103 		dprintk(q, 1, "memory allocation failed\n");
1104 		ret = -ENOMEM;
1105 		goto error;
1106 	}
1107 
1108 	/*
1109 	 * Check if driver can handle the so far allocated number of buffers.
1110 	 */
1111 	if (allocated_buffers < num_buffers) {
1112 		num_buffers = allocated_buffers;
1113 
1114 		/*
1115 		 * num_buffers contains the total number of buffers, that the
1116 		 * queue driver has set up
1117 		 */
1118 		ret = call_qop(q, queue_setup, q, &num_buffers,
1119 			       &num_planes, plane_sizes, q->alloc_devs);
1120 
1121 		if (!ret && allocated_buffers < num_buffers)
1122 			ret = -ENOMEM;
1123 
1124 		/*
1125 		 * Either the driver has accepted a smaller number of buffers,
1126 		 * or .queue_setup() returned an error
1127 		 */
1128 	}
1129 
1130 	mutex_lock(&q->mmap_lock);
1131 
1132 	if (ret < 0) {
1133 		/*
1134 		 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
1135 		 * from already queued buffers and it will reset q->memory to
1136 		 * VB2_MEMORY_UNKNOWN.
1137 		 */
1138 		__vb2_queue_free(q, *first_index, allocated_buffers);
1139 		mutex_unlock(&q->mmap_lock);
1140 		return -ENOMEM;
1141 	}
1142 	mutex_unlock(&q->mmap_lock);
1143 
1144 	/*
1145 	 * Return the number of successfully allocated buffers
1146 	 * to the userspace.
1147 	 */
1148 	*count = allocated_buffers;
1149 	q->is_busy = 1;
1150 
1151 	return 0;
1152 
1153 error:
1154 	if (no_previous_buffers) {
1155 		mutex_lock(&q->mmap_lock);
1156 		q->memory = VB2_MEMORY_UNKNOWN;
1157 		mutex_unlock(&q->mmap_lock);
1158 	}
1159 	return ret;
1160 }
1161 EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
1162 
1163 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
1164 {
1165 	if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
1166 		return NULL;
1167 
1168 	return call_ptr_memop(vaddr, vb, vb->planes[plane_no].mem_priv);
1169 
1170 }
1171 EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
1172 
1173 void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
1174 {
1175 	if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
1176 		return NULL;
1177 
1178 	return call_ptr_memop(cookie, vb, vb->planes[plane_no].mem_priv);
1179 }
1180 EXPORT_SYMBOL_GPL(vb2_plane_cookie);
1181 
1182 void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1183 {
1184 	struct vb2_queue *q = vb->vb2_queue;
1185 	unsigned long flags;
1186 
1187 	if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
1188 		return;
1189 
1190 	if (WARN_ON(state != VB2_BUF_STATE_DONE &&
1191 		    state != VB2_BUF_STATE_ERROR &&
1192 		    state != VB2_BUF_STATE_QUEUED))
1193 		state = VB2_BUF_STATE_ERROR;
1194 
1195 #ifdef CONFIG_VIDEO_ADV_DEBUG
1196 	/*
1197 	 * Although this is not a callback, it still does have to balance
1198 	 * with the buf_queue op. So update this counter manually.
1199 	 */
1200 	vb->cnt_buf_done++;
1201 #endif
1202 	dprintk(q, 4, "done processing on buffer %d, state: %s\n",
1203 		vb->index, vb2_state_name(state));
1204 
1205 	if (state != VB2_BUF_STATE_QUEUED)
1206 		__vb2_buf_mem_finish(vb);
1207 
1208 	spin_lock_irqsave(&q->done_lock, flags);
1209 	if (state == VB2_BUF_STATE_QUEUED) {
1210 		vb->state = VB2_BUF_STATE_QUEUED;
1211 	} else {
1212 		/* Add the buffer to the done buffers list */
1213 		list_add_tail(&vb->done_entry, &q->done_list);
1214 		vb->state = state;
1215 	}
1216 	atomic_dec(&q->owned_by_drv_count);
1217 
1218 	if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) {
1219 		media_request_object_unbind(&vb->req_obj);
1220 		media_request_object_put(&vb->req_obj);
1221 	}
1222 
1223 	spin_unlock_irqrestore(&q->done_lock, flags);
1224 
1225 	trace_vb2_buf_done(q, vb);
1226 
1227 	switch (state) {
1228 	case VB2_BUF_STATE_QUEUED:
1229 		return;
1230 	default:
1231 		/* Inform any processes that may be waiting for buffers */
1232 		wake_up(&q->done_wq);
1233 		break;
1234 	}
1235 }
1236 EXPORT_SYMBOL_GPL(vb2_buffer_done);
1237 
1238 void vb2_discard_done(struct vb2_queue *q)
1239 {
1240 	struct vb2_buffer *vb;
1241 	unsigned long flags;
1242 
1243 	spin_lock_irqsave(&q->done_lock, flags);
1244 	list_for_each_entry(vb, &q->done_list, done_entry)
1245 		vb->state = VB2_BUF_STATE_ERROR;
1246 	spin_unlock_irqrestore(&q->done_lock, flags);
1247 }
1248 EXPORT_SYMBOL_GPL(vb2_discard_done);
1249 
1250 /*
1251  * __prepare_mmap() - prepare an MMAP buffer
1252  */
1253 static int __prepare_mmap(struct vb2_buffer *vb)
1254 {
1255 	int ret = 0;
1256 
1257 	ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
1258 			 vb, vb->planes);
1259 	return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
1260 }
1261 
1262 /*
1263  * __prepare_userptr() - prepare a USERPTR buffer
1264  */
1265 static int __prepare_userptr(struct vb2_buffer *vb)
1266 {
1267 	struct vb2_plane planes[VB2_MAX_PLANES];
1268 	struct vb2_queue *q = vb->vb2_queue;
1269 	void *mem_priv;
1270 	unsigned int plane;
1271 	int ret = 0;
1272 	bool reacquired = vb->planes[0].mem_priv == NULL;
1273 
1274 	memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
1275 	/* Copy relevant information provided by the userspace */
1276 	ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
1277 			 vb, planes);
1278 	if (ret)
1279 		return ret;
1280 
1281 	for (plane = 0; plane < vb->num_planes; ++plane) {
1282 		/* Skip the plane if already verified */
1283 		if (vb->planes[plane].m.userptr &&
1284 			vb->planes[plane].m.userptr == planes[plane].m.userptr
1285 			&& vb->planes[plane].length == planes[plane].length)
1286 			continue;
1287 
1288 		dprintk(q, 3, "userspace address for plane %d changed, reacquiring memory\n",
1289 			plane);
1290 
1291 		/* Check if the provided plane buffer is large enough */
1292 		if (planes[plane].length < vb->planes[plane].min_length) {
1293 			dprintk(q, 1, "provided buffer size %u is less than setup size %u for plane %d\n",
1294 						planes[plane].length,
1295 						vb->planes[plane].min_length,
1296 						plane);
1297 			ret = -EINVAL;
1298 			goto err;
1299 		}
1300 
1301 		/* Release previously acquired memory if present */
1302 		if (vb->planes[plane].mem_priv) {
1303 			if (!reacquired) {
1304 				reacquired = true;
1305 				vb->copied_timestamp = 0;
1306 				call_void_vb_qop(vb, buf_cleanup, vb);
1307 			}
1308 			call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
1309 		}
1310 
1311 		vb->planes[plane].mem_priv = NULL;
1312 		vb->planes[plane].bytesused = 0;
1313 		vb->planes[plane].length = 0;
1314 		vb->planes[plane].m.userptr = 0;
1315 		vb->planes[plane].data_offset = 0;
1316 
1317 		/* Acquire each plane's memory */
1318 		mem_priv = call_ptr_memop(get_userptr,
1319 					  vb,
1320 					  q->alloc_devs[plane] ? : q->dev,
1321 					  planes[plane].m.userptr,
1322 					  planes[plane].length);
1323 		if (IS_ERR(mem_priv)) {
1324 			dprintk(q, 1, "failed acquiring userspace memory for plane %d\n",
1325 				plane);
1326 			ret = PTR_ERR(mem_priv);
1327 			goto err;
1328 		}
1329 		vb->planes[plane].mem_priv = mem_priv;
1330 	}
1331 
1332 	/*
1333 	 * Now that everything is in order, copy relevant information
1334 	 * provided by userspace.
1335 	 */
1336 	for (plane = 0; plane < vb->num_planes; ++plane) {
1337 		vb->planes[plane].bytesused = planes[plane].bytesused;
1338 		vb->planes[plane].length = planes[plane].length;
1339 		vb->planes[plane].m.userptr = planes[plane].m.userptr;
1340 		vb->planes[plane].data_offset = planes[plane].data_offset;
1341 	}
1342 
1343 	if (reacquired) {
1344 		/*
1345 		 * One or more planes changed, so we must call buf_init to do
1346 		 * the driver-specific initialization on the newly acquired
1347 		 * buffer, if provided.
1348 		 */
1349 		ret = call_vb_qop(vb, buf_init, vb);
1350 		if (ret) {
1351 			dprintk(q, 1, "buffer initialization failed\n");
1352 			goto err;
1353 		}
1354 	}
1355 
1356 	ret = call_vb_qop(vb, buf_prepare, vb);
1357 	if (ret) {
1358 		dprintk(q, 1, "buffer preparation failed\n");
1359 		call_void_vb_qop(vb, buf_cleanup, vb);
1360 		goto err;
1361 	}
1362 
1363 	return 0;
1364 err:
1365 	/* In case of errors, release planes that were already acquired */
1366 	for (plane = 0; plane < vb->num_planes; ++plane) {
1367 		if (vb->planes[plane].mem_priv)
1368 			call_void_memop(vb, put_userptr,
1369 				vb->planes[plane].mem_priv);
1370 		vb->planes[plane].mem_priv = NULL;
1371 		vb->planes[plane].m.userptr = 0;
1372 		vb->planes[plane].length = 0;
1373 	}
1374 
1375 	return ret;
1376 }
1377 
1378 /*
1379  * __prepare_dmabuf() - prepare a DMABUF buffer
1380  */
1381 static int __prepare_dmabuf(struct vb2_buffer *vb)
1382 {
1383 	struct vb2_plane planes[VB2_MAX_PLANES];
1384 	struct vb2_queue *q = vb->vb2_queue;
1385 	void *mem_priv;
1386 	unsigned int plane, i;
1387 	int ret = 0;
1388 	bool reacquired = vb->planes[0].mem_priv == NULL;
1389 
1390 	memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
1391 	/* Copy relevant information provided by the userspace */
1392 	ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
1393 			 vb, planes);
1394 	if (ret)
1395 		return ret;
1396 
1397 	for (plane = 0; plane < vb->num_planes; ++plane) {
1398 		struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
1399 
1400 		planes[plane].dbuf = dbuf;
1401 
1402 		if (IS_ERR_OR_NULL(dbuf)) {
1403 			dprintk(q, 1, "invalid dmabuf fd for plane %d\n",
1404 				plane);
1405 			ret = -EINVAL;
1406 			goto err_put_planes;
1407 		}
1408 
1409 		/* use DMABUF size if length is not provided */
1410 		if (planes[plane].length == 0)
1411 			planes[plane].length = dbuf->size;
1412 
1413 		if (planes[plane].length < vb->planes[plane].min_length) {
1414 			dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n",
1415 				planes[plane].length, plane,
1416 				vb->planes[plane].min_length);
1417 			ret = -EINVAL;
1418 			goto err_put_planes;
1419 		}
1420 
1421 		/* Skip the plane if already verified */
1422 		if (dbuf == vb->planes[plane].dbuf &&
1423 		    vb->planes[plane].length == planes[plane].length)
1424 			continue;
1425 
1426 		dprintk(q, 3, "buffer for plane %d changed\n", plane);
1427 
1428 		reacquired = true;
1429 	}
1430 
1431 	if (reacquired) {
1432 		if (vb->planes[0].mem_priv) {
1433 			vb->copied_timestamp = 0;
1434 			call_void_vb_qop(vb, buf_cleanup, vb);
1435 			__vb2_buf_dmabuf_put(vb);
1436 		}
1437 
1438 		for (plane = 0; plane < vb->num_planes; ++plane) {
1439 			/*
1440 			 * This is an optimization to reduce dma_buf attachment/mapping.
1441 			 * When the same dma_buf is used for multiple planes, there is no need
1442 			 * to create duplicated attachments.
1443 			 */
1444 			for (i = 0; i < plane; ++i) {
1445 				if (planes[plane].dbuf == vb->planes[i].dbuf &&
1446 				    q->alloc_devs[plane] == q->alloc_devs[i]) {
1447 					vb->planes[plane].dbuf_duplicated = true;
1448 					vb->planes[plane].dbuf = vb->planes[i].dbuf;
1449 					vb->planes[plane].mem_priv = vb->planes[i].mem_priv;
1450 					break;
1451 				}
1452 			}
1453 
1454 			if (vb->planes[plane].dbuf_duplicated)
1455 				continue;
1456 
1457 			/* Acquire each plane's memory */
1458 			mem_priv = call_ptr_memop(attach_dmabuf,
1459 						  vb,
1460 						  q->alloc_devs[plane] ? : q->dev,
1461 						  planes[plane].dbuf,
1462 						  planes[plane].length);
1463 			if (IS_ERR(mem_priv)) {
1464 				dprintk(q, 1, "failed to attach dmabuf\n");
1465 				ret = PTR_ERR(mem_priv);
1466 				goto err_put_vb2_buf;
1467 			}
1468 
1469 			vb->planes[plane].dbuf = planes[plane].dbuf;
1470 			vb->planes[plane].mem_priv = mem_priv;
1471 
1472 			/*
1473 			 * This pins the buffer(s) with dma_buf_map_attachment()). It's done
1474 			 * here instead just before the DMA, while queueing the buffer(s) so
1475 			 * userspace knows sooner rather than later if the dma-buf map fails.
1476 			 */
1477 			ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
1478 			if (ret) {
1479 				dprintk(q, 1, "failed to map dmabuf for plane %d\n",
1480 					plane);
1481 				goto err_put_vb2_buf;
1482 			}
1483 			vb->planes[plane].dbuf_mapped = 1;
1484 		}
1485 
1486 		/*
1487 		 * Now that everything is in order, copy relevant information
1488 		 * provided by userspace.
1489 		 */
1490 		for (plane = 0; plane < vb->num_planes; ++plane) {
1491 			vb->planes[plane].bytesused = planes[plane].bytesused;
1492 			vb->planes[plane].length = planes[plane].length;
1493 			vb->planes[plane].m.fd = planes[plane].m.fd;
1494 			vb->planes[plane].data_offset = planes[plane].data_offset;
1495 		}
1496 
1497 		/*
1498 		 * Call driver-specific initialization on the newly acquired buffer,
1499 		 * if provided.
1500 		 */
1501 		ret = call_vb_qop(vb, buf_init, vb);
1502 		if (ret) {
1503 			dprintk(q, 1, "buffer initialization failed\n");
1504 			goto err_put_vb2_buf;
1505 		}
1506 	} else {
1507 		for (plane = 0; plane < vb->num_planes; ++plane)
1508 			dma_buf_put(planes[plane].dbuf);
1509 	}
1510 
1511 	ret = call_vb_qop(vb, buf_prepare, vb);
1512 	if (ret) {
1513 		dprintk(q, 1, "buffer preparation failed\n");
1514 		call_void_vb_qop(vb, buf_cleanup, vb);
1515 		goto err_put_vb2_buf;
1516 	}
1517 
1518 	return 0;
1519 
1520 err_put_planes:
1521 	for (plane = 0; plane < vb->num_planes; ++plane) {
1522 		if (!IS_ERR_OR_NULL(planes[plane].dbuf))
1523 			dma_buf_put(planes[plane].dbuf);
1524 	}
1525 err_put_vb2_buf:
1526 	/* In case of errors, release planes that were already acquired */
1527 	__vb2_buf_dmabuf_put(vb);
1528 
1529 	return ret;
1530 }
1531 
1532 /*
1533  * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
1534  */
1535 static void __enqueue_in_driver(struct vb2_buffer *vb)
1536 {
1537 	struct vb2_queue *q = vb->vb2_queue;
1538 
1539 	vb->state = VB2_BUF_STATE_ACTIVE;
1540 	atomic_inc(&q->owned_by_drv_count);
1541 
1542 	trace_vb2_buf_queue(q, vb);
1543 
1544 	call_void_vb_qop(vb, buf_queue, vb);
1545 }
1546 
1547 static int __buf_prepare(struct vb2_buffer *vb)
1548 {
1549 	struct vb2_queue *q = vb->vb2_queue;
1550 	enum vb2_buffer_state orig_state = vb->state;
1551 	int ret;
1552 
1553 	if (q->error) {
1554 		dprintk(q, 1, "fatal error occurred on queue\n");
1555 		return -EIO;
1556 	}
1557 
1558 	if (vb->prepared)
1559 		return 0;
1560 	WARN_ON(vb->synced);
1561 
1562 	if (q->is_output) {
1563 		ret = call_vb_qop(vb, buf_out_validate, vb);
1564 		if (ret) {
1565 			dprintk(q, 1, "buffer validation failed\n");
1566 			return ret;
1567 		}
1568 	}
1569 
1570 	vb->state = VB2_BUF_STATE_PREPARING;
1571 
1572 	switch (q->memory) {
1573 	case VB2_MEMORY_MMAP:
1574 		ret = __prepare_mmap(vb);
1575 		break;
1576 	case VB2_MEMORY_USERPTR:
1577 		ret = __prepare_userptr(vb);
1578 		break;
1579 	case VB2_MEMORY_DMABUF:
1580 		ret = __prepare_dmabuf(vb);
1581 		break;
1582 	default:
1583 		WARN(1, "Invalid queue type\n");
1584 		ret = -EINVAL;
1585 		break;
1586 	}
1587 
1588 	if (ret) {
1589 		dprintk(q, 1, "buffer preparation failed: %d\n", ret);
1590 		vb->state = orig_state;
1591 		return ret;
1592 	}
1593 
1594 	__vb2_buf_mem_prepare(vb);
1595 	vb->prepared = 1;
1596 	vb->state = orig_state;
1597 
1598 	return 0;
1599 }
1600 
1601 static int vb2_req_prepare(struct media_request_object *obj)
1602 {
1603 	struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
1604 	int ret;
1605 
1606 	if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST))
1607 		return -EINVAL;
1608 
1609 	mutex_lock(vb->vb2_queue->lock);
1610 	ret = __buf_prepare(vb);
1611 	mutex_unlock(vb->vb2_queue->lock);
1612 	return ret;
1613 }
1614 
1615 static void __vb2_dqbuf(struct vb2_buffer *vb);
1616 
1617 static void vb2_req_unprepare(struct media_request_object *obj)
1618 {
1619 	struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
1620 
1621 	mutex_lock(vb->vb2_queue->lock);
1622 	__vb2_dqbuf(vb);
1623 	vb->state = VB2_BUF_STATE_IN_REQUEST;
1624 	mutex_unlock(vb->vb2_queue->lock);
1625 	WARN_ON(!vb->req_obj.req);
1626 }
1627 
1628 static void vb2_req_queue(struct media_request_object *obj)
1629 {
1630 	struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
1631 	int err;
1632 
1633 	mutex_lock(vb->vb2_queue->lock);
1634 	/*
1635 	 * There is no method to propagate an error from vb2_core_qbuf(),
1636 	 * so if this returns a non-0 value, then WARN.
1637 	 *
1638 	 * The only exception is -EIO which is returned if q->error is
1639 	 * set. We just ignore that, and expect this will be caught the
1640 	 * next time vb2_req_prepare() is called.
1641 	 */
1642 	err = vb2_core_qbuf(vb->vb2_queue, vb, NULL, NULL);
1643 	WARN_ON_ONCE(err && err != -EIO);
1644 	mutex_unlock(vb->vb2_queue->lock);
1645 }
1646 
1647 static void vb2_req_unbind(struct media_request_object *obj)
1648 {
1649 	struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
1650 
1651 	if (vb->state == VB2_BUF_STATE_IN_REQUEST)
1652 		call_void_bufop(vb->vb2_queue, init_buffer, vb);
1653 }
1654 
1655 static void vb2_req_release(struct media_request_object *obj)
1656 {
1657 	struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
1658 
1659 	if (vb->state == VB2_BUF_STATE_IN_REQUEST) {
1660 		vb->state = VB2_BUF_STATE_DEQUEUED;
1661 		if (vb->request)
1662 			media_request_put(vb->request);
1663 		vb->request = NULL;
1664 	}
1665 }
1666 
1667 static const struct media_request_object_ops vb2_core_req_ops = {
1668 	.prepare = vb2_req_prepare,
1669 	.unprepare = vb2_req_unprepare,
1670 	.queue = vb2_req_queue,
1671 	.unbind = vb2_req_unbind,
1672 	.release = vb2_req_release,
1673 };
1674 
1675 bool vb2_request_object_is_buffer(struct media_request_object *obj)
1676 {
1677 	return obj->ops == &vb2_core_req_ops;
1678 }
1679 EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer);
1680 
1681 unsigned int vb2_request_buffer_cnt(struct media_request *req)
1682 {
1683 	struct media_request_object *obj;
1684 	unsigned long flags;
1685 	unsigned int buffer_cnt = 0;
1686 
1687 	spin_lock_irqsave(&req->lock, flags);
1688 	list_for_each_entry(obj, &req->objects, list)
1689 		if (vb2_request_object_is_buffer(obj))
1690 			buffer_cnt++;
1691 	spin_unlock_irqrestore(&req->lock, flags);
1692 
1693 	return buffer_cnt;
1694 }
1695 EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt);
1696 
1697 int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb)
1698 {
1699 	int ret;
1700 
1701 	if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1702 		dprintk(q, 1, "invalid buffer state %s\n",
1703 			vb2_state_name(vb->state));
1704 		return -EINVAL;
1705 	}
1706 	if (vb->prepared) {
1707 		dprintk(q, 1, "buffer already prepared\n");
1708 		return -EINVAL;
1709 	}
1710 
1711 	ret = __buf_prepare(vb);
1712 	if (ret)
1713 		return ret;
1714 
1715 	/* Fill buffer information for the userspace */
1716 	call_void_bufop(q, fill_user_buffer, vb, pb);
1717 
1718 	dprintk(q, 2, "prepare of buffer %d succeeded\n", vb->index);
1719 
1720 	return 0;
1721 }
1722 EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
1723 
1724 int vb2_core_remove_bufs(struct vb2_queue *q, unsigned int start, unsigned int count)
1725 {
1726 	unsigned int i, ret = 0;
1727 	unsigned int q_num_bufs = vb2_get_num_buffers(q);
1728 
1729 	if (count == 0)
1730 		return 0;
1731 
1732 	if (count > q_num_bufs)
1733 		return -EINVAL;
1734 
1735 	if (start > q->max_num_buffers - count)
1736 		return -EINVAL;
1737 
1738 	mutex_lock(&q->mmap_lock);
1739 
1740 	/* Check that all buffers in the range exist */
1741 	for (i = start; i < start + count; i++) {
1742 		struct vb2_buffer *vb = vb2_get_buffer(q, i);
1743 
1744 		if (!vb) {
1745 			ret = -EINVAL;
1746 			goto unlock;
1747 		}
1748 		if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1749 			ret = -EBUSY;
1750 			goto unlock;
1751 		}
1752 	}
1753 	__vb2_queue_free(q, start, count);
1754 	dprintk(q, 2, "%u buffers removed\n", count);
1755 
1756 unlock:
1757 	mutex_unlock(&q->mmap_lock);
1758 	return ret;
1759 }
1760 EXPORT_SYMBOL_GPL(vb2_core_remove_bufs);
1761 
1762 /*
1763  * vb2_start_streaming() - Attempt to start streaming.
1764  * @q:		videobuf2 queue
1765  *
1766  * Attempt to start streaming. When this function is called there must be
1767  * at least q->min_queued_buffers queued up (i.e. the minimum
1768  * number of buffers required for the DMA engine to function). If the
1769  * @start_streaming op fails it is supposed to return all the driver-owned
1770  * buffers back to vb2 in state QUEUED. Check if that happened and if
1771  * not warn and reclaim them forcefully.
1772  */
1773 static int vb2_start_streaming(struct vb2_queue *q)
1774 {
1775 	struct vb2_buffer *vb;
1776 	int ret;
1777 
1778 	/*
1779 	 * If any buffers were queued before streamon,
1780 	 * we can now pass them to driver for processing.
1781 	 */
1782 	list_for_each_entry(vb, &q->queued_list, queued_entry)
1783 		__enqueue_in_driver(vb);
1784 
1785 	/* Tell the driver to start streaming */
1786 	q->start_streaming_called = 1;
1787 	ret = call_qop(q, start_streaming, q,
1788 		       atomic_read(&q->owned_by_drv_count));
1789 	if (!ret)
1790 		return 0;
1791 
1792 	q->start_streaming_called = 0;
1793 
1794 	dprintk(q, 1, "driver refused to start streaming\n");
1795 	/*
1796 	 * If you see this warning, then the driver isn't cleaning up properly
1797 	 * after a failed start_streaming(). See the start_streaming()
1798 	 * documentation in videobuf2-core.h for more information how buffers
1799 	 * should be returned to vb2 in start_streaming().
1800 	 */
1801 	if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
1802 		unsigned i;
1803 
1804 		/*
1805 		 * Forcefully reclaim buffers if the driver did not
1806 		 * correctly return them to vb2.
1807 		 */
1808 		for (i = 0; i < q->max_num_buffers; ++i) {
1809 			vb = vb2_get_buffer(q, i);
1810 
1811 			if (!vb)
1812 				continue;
1813 
1814 			if (vb->state == VB2_BUF_STATE_ACTIVE)
1815 				vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
1816 		}
1817 		/* Must be zero now */
1818 		WARN_ON(atomic_read(&q->owned_by_drv_count));
1819 	}
1820 	/*
1821 	 * If done_list is not empty, then start_streaming() didn't call
1822 	 * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or
1823 	 * STATE_DONE.
1824 	 */
1825 	WARN_ON(!list_empty(&q->done_list));
1826 	return ret;
1827 }
1828 
1829 int vb2_core_qbuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb,
1830 		  struct media_request *req)
1831 {
1832 	enum vb2_buffer_state orig_state;
1833 	int ret;
1834 
1835 	if (q->error) {
1836 		dprintk(q, 1, "fatal error occurred on queue\n");
1837 		return -EIO;
1838 	}
1839 
1840 	if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
1841 	    q->requires_requests) {
1842 		dprintk(q, 1, "qbuf requires a request\n");
1843 		return -EBADR;
1844 	}
1845 
1846 	if ((req && q->uses_qbuf) ||
1847 	    (!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
1848 	     q->uses_requests)) {
1849 		dprintk(q, 1, "queue in wrong mode (qbuf vs requests)\n");
1850 		return -EBUSY;
1851 	}
1852 
1853 	if (req) {
1854 		int ret;
1855 
1856 		q->uses_requests = 1;
1857 		if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1858 			dprintk(q, 1, "buffer %d not in dequeued state\n",
1859 				vb->index);
1860 			return -EINVAL;
1861 		}
1862 
1863 		if (q->is_output && !vb->prepared) {
1864 			ret = call_vb_qop(vb, buf_out_validate, vb);
1865 			if (ret) {
1866 				dprintk(q, 1, "buffer validation failed\n");
1867 				return ret;
1868 			}
1869 		}
1870 
1871 		media_request_object_init(&vb->req_obj);
1872 
1873 		/* Make sure the request is in a safe state for updating. */
1874 		ret = media_request_lock_for_update(req);
1875 		if (ret)
1876 			return ret;
1877 		ret = media_request_object_bind(req, &vb2_core_req_ops,
1878 						q, true, &vb->req_obj);
1879 		media_request_unlock_for_update(req);
1880 		if (ret)
1881 			return ret;
1882 
1883 		vb->state = VB2_BUF_STATE_IN_REQUEST;
1884 
1885 		/*
1886 		 * Increment the refcount and store the request.
1887 		 * The request refcount is decremented again when the
1888 		 * buffer is dequeued. This is to prevent vb2_buffer_done()
1889 		 * from freeing the request from interrupt context, which can
1890 		 * happen if the application closed the request fd after
1891 		 * queueing the request.
1892 		 */
1893 		media_request_get(req);
1894 		vb->request = req;
1895 
1896 		/* Fill buffer information for the userspace */
1897 		if (pb) {
1898 			call_void_bufop(q, copy_timestamp, vb, pb);
1899 			call_void_bufop(q, fill_user_buffer, vb, pb);
1900 		}
1901 
1902 		dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
1903 		return 0;
1904 	}
1905 
1906 	if (vb->state != VB2_BUF_STATE_IN_REQUEST)
1907 		q->uses_qbuf = 1;
1908 
1909 	switch (vb->state) {
1910 	case VB2_BUF_STATE_DEQUEUED:
1911 	case VB2_BUF_STATE_IN_REQUEST:
1912 		if (!vb->prepared) {
1913 			ret = __buf_prepare(vb);
1914 			if (ret)
1915 				return ret;
1916 		}
1917 		break;
1918 	case VB2_BUF_STATE_PREPARING:
1919 		dprintk(q, 1, "buffer still being prepared\n");
1920 		return -EINVAL;
1921 	default:
1922 		dprintk(q, 1, "invalid buffer state %s\n",
1923 			vb2_state_name(vb->state));
1924 		return -EINVAL;
1925 	}
1926 
1927 	/*
1928 	 * Add to the queued buffers list, a buffer will stay on it until
1929 	 * dequeued in dqbuf.
1930 	 */
1931 	orig_state = vb->state;
1932 	list_add_tail(&vb->queued_entry, &q->queued_list);
1933 	q->queued_count++;
1934 	q->waiting_for_buffers = false;
1935 	vb->state = VB2_BUF_STATE_QUEUED;
1936 
1937 	if (pb)
1938 		call_void_bufop(q, copy_timestamp, vb, pb);
1939 
1940 	trace_vb2_qbuf(q, vb);
1941 
1942 	/*
1943 	 * If already streaming, give the buffer to driver for processing.
1944 	 * If not, the buffer will be given to driver on next streamon.
1945 	 */
1946 	if (q->start_streaming_called)
1947 		__enqueue_in_driver(vb);
1948 
1949 	/* Fill buffer information for the userspace */
1950 	if (pb)
1951 		call_void_bufop(q, fill_user_buffer, vb, pb);
1952 
1953 	/*
1954 	 * If streamon has been called, and we haven't yet called
1955 	 * start_streaming() since not enough buffers were queued, and
1956 	 * we now have reached the minimum number of queued buffers,
1957 	 * then we can finally call start_streaming().
1958 	 */
1959 	if (q->streaming && !q->start_streaming_called &&
1960 	    q->queued_count >= q->min_queued_buffers) {
1961 		ret = vb2_start_streaming(q);
1962 		if (ret) {
1963 			/*
1964 			 * Since vb2_core_qbuf will return with an error,
1965 			 * we should return it to state DEQUEUED since
1966 			 * the error indicates that the buffer wasn't queued.
1967 			 */
1968 			list_del(&vb->queued_entry);
1969 			q->queued_count--;
1970 			vb->state = orig_state;
1971 			return ret;
1972 		}
1973 	}
1974 
1975 	dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
1976 	return 0;
1977 }
1978 EXPORT_SYMBOL_GPL(vb2_core_qbuf);
1979 
1980 /*
1981  * __vb2_wait_for_done_vb() - wait for a buffer to become available
1982  * for dequeuing
1983  *
1984  * Will sleep if required for nonblocking == false.
1985  */
1986 static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1987 {
1988 	/*
1989 	 * All operations on vb_done_list are performed under done_lock
1990 	 * spinlock protection. However, buffers may be removed from
1991 	 * it and returned to userspace only while holding both driver's
1992 	 * lock and the done_lock spinlock. Thus we can be sure that as
1993 	 * long as we hold the driver's lock, the list will remain not
1994 	 * empty if list_empty() check succeeds.
1995 	 */
1996 
1997 	for (;;) {
1998 		int ret;
1999 
2000 		if (q->waiting_in_dqbuf) {
2001 			dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
2002 			return -EBUSY;
2003 		}
2004 
2005 		if (!q->streaming) {
2006 			dprintk(q, 1, "streaming off, will not wait for buffers\n");
2007 			return -EINVAL;
2008 		}
2009 
2010 		if (q->error) {
2011 			dprintk(q, 1, "Queue in error state, will not wait for buffers\n");
2012 			return -EIO;
2013 		}
2014 
2015 		if (q->last_buffer_dequeued) {
2016 			dprintk(q, 3, "last buffer dequeued already, will not wait for buffers\n");
2017 			return -EPIPE;
2018 		}
2019 
2020 		if (!list_empty(&q->done_list)) {
2021 			/*
2022 			 * Found a buffer that we were waiting for.
2023 			 */
2024 			break;
2025 		}
2026 
2027 		if (nonblocking) {
2028 			dprintk(q, 3, "nonblocking and no buffers to dequeue, will not wait\n");
2029 			return -EAGAIN;
2030 		}
2031 
2032 		q->waiting_in_dqbuf = 1;
2033 		/*
2034 		 * We are streaming and blocking, wait for another buffer to
2035 		 * become ready or for streamoff. Driver's lock is released to
2036 		 * allow streamoff or qbuf to be called while waiting.
2037 		 */
2038 		if (q->ops->wait_prepare)
2039 			call_void_qop(q, wait_prepare, q);
2040 		else if (q->lock)
2041 			mutex_unlock(q->lock);
2042 
2043 		/*
2044 		 * All locks have been released, it is safe to sleep now.
2045 		 */
2046 		dprintk(q, 3, "will sleep waiting for buffers\n");
2047 		ret = wait_event_interruptible(q->done_wq,
2048 				!list_empty(&q->done_list) || !q->streaming ||
2049 				q->error);
2050 
2051 		if (q->ops->wait_finish)
2052 			call_void_qop(q, wait_finish, q);
2053 		else if (q->lock)
2054 			mutex_lock(q->lock);
2055 
2056 		q->waiting_in_dqbuf = 0;
2057 		/*
2058 		 * We need to reevaluate both conditions again after reacquiring
2059 		 * the locks or return an error if one occurred.
2060 		 */
2061 		if (ret) {
2062 			dprintk(q, 1, "sleep was interrupted\n");
2063 			return ret;
2064 		}
2065 	}
2066 	return 0;
2067 }
2068 
2069 /*
2070  * __vb2_get_done_vb() - get a buffer ready for dequeuing
2071  *
2072  * Will sleep if required for nonblocking == false.
2073  */
2074 static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
2075 			     void *pb, int nonblocking)
2076 {
2077 	unsigned long flags;
2078 	int ret = 0;
2079 
2080 	/*
2081 	 * Wait for at least one buffer to become available on the done_list.
2082 	 */
2083 	ret = __vb2_wait_for_done_vb(q, nonblocking);
2084 	if (ret)
2085 		return ret;
2086 
2087 	/*
2088 	 * Driver's lock has been held since we last verified that done_list
2089 	 * is not empty, so no need for another list_empty(done_list) check.
2090 	 */
2091 	spin_lock_irqsave(&q->done_lock, flags);
2092 	*vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
2093 	/*
2094 	 * Only remove the buffer from done_list if all planes can be
2095 	 * handled. Some cases such as V4L2 file I/O and DVB have pb
2096 	 * == NULL; skip the check then as there's nothing to verify.
2097 	 */
2098 	if (pb)
2099 		ret = call_bufop(q, verify_planes_array, *vb, pb);
2100 	if (!ret)
2101 		list_del(&(*vb)->done_entry);
2102 	spin_unlock_irqrestore(&q->done_lock, flags);
2103 
2104 	return ret;
2105 }
2106 
2107 int vb2_wait_for_all_buffers(struct vb2_queue *q)
2108 {
2109 	if (!q->streaming) {
2110 		dprintk(q, 1, "streaming off, will not wait for buffers\n");
2111 		return -EINVAL;
2112 	}
2113 
2114 	if (q->start_streaming_called)
2115 		wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
2116 	return 0;
2117 }
2118 EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
2119 
2120 /*
2121  * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
2122  */
2123 static void __vb2_dqbuf(struct vb2_buffer *vb)
2124 {
2125 	struct vb2_queue *q = vb->vb2_queue;
2126 
2127 	/* nothing to do if the buffer is already dequeued */
2128 	if (vb->state == VB2_BUF_STATE_DEQUEUED)
2129 		return;
2130 
2131 	vb->state = VB2_BUF_STATE_DEQUEUED;
2132 
2133 	call_void_bufop(q, init_buffer, vb);
2134 }
2135 
2136 int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
2137 		   bool nonblocking)
2138 {
2139 	struct vb2_buffer *vb = NULL;
2140 	int ret;
2141 
2142 	ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
2143 	if (ret < 0)
2144 		return ret;
2145 
2146 	switch (vb->state) {
2147 	case VB2_BUF_STATE_DONE:
2148 		dprintk(q, 3, "returning done buffer\n");
2149 		break;
2150 	case VB2_BUF_STATE_ERROR:
2151 		dprintk(q, 3, "returning done buffer with errors\n");
2152 		break;
2153 	default:
2154 		dprintk(q, 1, "invalid buffer state %s\n",
2155 			vb2_state_name(vb->state));
2156 		return -EINVAL;
2157 	}
2158 
2159 	call_void_vb_qop(vb, buf_finish, vb);
2160 	vb->prepared = 0;
2161 
2162 	if (pindex)
2163 		*pindex = vb->index;
2164 
2165 	/* Fill buffer information for the userspace */
2166 	if (pb)
2167 		call_void_bufop(q, fill_user_buffer, vb, pb);
2168 
2169 	/* Remove from vb2 queue */
2170 	list_del(&vb->queued_entry);
2171 	q->queued_count--;
2172 
2173 	trace_vb2_dqbuf(q, vb);
2174 
2175 	/* go back to dequeued state */
2176 	__vb2_dqbuf(vb);
2177 
2178 	if (WARN_ON(vb->req_obj.req)) {
2179 		media_request_object_unbind(&vb->req_obj);
2180 		media_request_object_put(&vb->req_obj);
2181 	}
2182 	if (vb->request)
2183 		media_request_put(vb->request);
2184 	vb->request = NULL;
2185 
2186 	dprintk(q, 2, "dqbuf of buffer %d, state: %s\n",
2187 		vb->index, vb2_state_name(vb->state));
2188 
2189 	return 0;
2190 
2191 }
2192 EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
2193 
2194 /*
2195  * __vb2_queue_cancel() - cancel and stop (pause) streaming
2196  *
2197  * Removes all queued buffers from driver's queue and all buffers queued by
2198  * userspace from vb2's queue. Returns to state after reqbufs.
2199  */
2200 static void __vb2_queue_cancel(struct vb2_queue *q)
2201 {
2202 	unsigned int i;
2203 
2204 	/*
2205 	 * Tell driver to stop all transactions and release all queued
2206 	 * buffers.
2207 	 */
2208 	if (q->start_streaming_called)
2209 		call_void_qop(q, stop_streaming, q);
2210 
2211 	if (q->streaming)
2212 		call_void_qop(q, unprepare_streaming, q);
2213 
2214 	/*
2215 	 * If you see this warning, then the driver isn't cleaning up properly
2216 	 * in stop_streaming(). See the stop_streaming() documentation in
2217 	 * videobuf2-core.h for more information how buffers should be returned
2218 	 * to vb2 in stop_streaming().
2219 	 */
2220 	if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
2221 		for (i = 0; i < q->max_num_buffers; i++) {
2222 			struct vb2_buffer *vb = vb2_get_buffer(q, i);
2223 
2224 			if (!vb)
2225 				continue;
2226 
2227 			if (vb->state == VB2_BUF_STATE_ACTIVE) {
2228 				pr_warn("driver bug: stop_streaming operation is leaving buffer %u in active state\n",
2229 					vb->index);
2230 				vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
2231 			}
2232 		}
2233 		/* Must be zero now */
2234 		WARN_ON(atomic_read(&q->owned_by_drv_count));
2235 	}
2236 
2237 	q->streaming = 0;
2238 	q->start_streaming_called = 0;
2239 	q->queued_count = 0;
2240 	q->error = 0;
2241 	q->uses_requests = 0;
2242 	q->uses_qbuf = 0;
2243 
2244 	/*
2245 	 * Remove all buffers from vb2's list...
2246 	 */
2247 	INIT_LIST_HEAD(&q->queued_list);
2248 	/*
2249 	 * ...and done list; userspace will not receive any buffers it
2250 	 * has not already dequeued before initiating cancel.
2251 	 */
2252 	INIT_LIST_HEAD(&q->done_list);
2253 	atomic_set(&q->owned_by_drv_count, 0);
2254 	wake_up_all(&q->done_wq);
2255 
2256 	/*
2257 	 * Reinitialize all buffers for next use.
2258 	 * Make sure to call buf_finish for any queued buffers. Normally
2259 	 * that's done in dqbuf, but that's not going to happen when we
2260 	 * cancel the whole queue. Note: this code belongs here, not in
2261 	 * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical
2262 	 * call to __fill_user_buffer() after buf_finish(). That order can't
2263 	 * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
2264 	 */
2265 	for (i = 0; i < q->max_num_buffers; i++) {
2266 		struct vb2_buffer *vb;
2267 		struct media_request *req;
2268 
2269 		vb = vb2_get_buffer(q, i);
2270 		if (!vb)
2271 			continue;
2272 
2273 		req = vb->req_obj.req;
2274 		/*
2275 		 * If a request is associated with this buffer, then
2276 		 * call buf_request_cancel() to give the driver to complete()
2277 		 * related request objects. Otherwise those objects would
2278 		 * never complete.
2279 		 */
2280 		if (req) {
2281 			enum media_request_state state;
2282 			unsigned long flags;
2283 
2284 			spin_lock_irqsave(&req->lock, flags);
2285 			state = req->state;
2286 			spin_unlock_irqrestore(&req->lock, flags);
2287 
2288 			if (state == MEDIA_REQUEST_STATE_QUEUED)
2289 				call_void_vb_qop(vb, buf_request_complete, vb);
2290 		}
2291 
2292 		__vb2_buf_mem_finish(vb);
2293 
2294 		if (vb->prepared) {
2295 			call_void_vb_qop(vb, buf_finish, vb);
2296 			vb->prepared = 0;
2297 		}
2298 		__vb2_dqbuf(vb);
2299 
2300 		if (vb->req_obj.req) {
2301 			media_request_object_unbind(&vb->req_obj);
2302 			media_request_object_put(&vb->req_obj);
2303 		}
2304 		if (vb->request)
2305 			media_request_put(vb->request);
2306 		vb->request = NULL;
2307 		vb->copied_timestamp = 0;
2308 	}
2309 }
2310 
2311 int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
2312 {
2313 	unsigned int q_num_bufs = vb2_get_num_buffers(q);
2314 	int ret;
2315 
2316 	if (type != q->type) {
2317 		dprintk(q, 1, "invalid stream type\n");
2318 		return -EINVAL;
2319 	}
2320 
2321 	if (q->streaming) {
2322 		dprintk(q, 3, "already streaming\n");
2323 		return 0;
2324 	}
2325 
2326 	if (!q_num_bufs) {
2327 		dprintk(q, 1, "no buffers have been allocated\n");
2328 		return -EINVAL;
2329 	}
2330 
2331 	if (q_num_bufs < q->min_queued_buffers) {
2332 		dprintk(q, 1, "need at least %u queued buffers\n",
2333 			q->min_queued_buffers);
2334 		return -EINVAL;
2335 	}
2336 
2337 	ret = call_qop(q, prepare_streaming, q);
2338 	if (ret)
2339 		return ret;
2340 
2341 	/*
2342 	 * Tell driver to start streaming provided sufficient buffers
2343 	 * are available.
2344 	 */
2345 	if (q->queued_count >= q->min_queued_buffers) {
2346 		ret = vb2_start_streaming(q);
2347 		if (ret)
2348 			goto unprepare;
2349 	}
2350 
2351 	q->streaming = 1;
2352 
2353 	dprintk(q, 3, "successful\n");
2354 	return 0;
2355 
2356 unprepare:
2357 	call_void_qop(q, unprepare_streaming, q);
2358 	return ret;
2359 }
2360 EXPORT_SYMBOL_GPL(vb2_core_streamon);
2361 
2362 void vb2_queue_error(struct vb2_queue *q)
2363 {
2364 	q->error = 1;
2365 
2366 	wake_up_all(&q->done_wq);
2367 }
2368 EXPORT_SYMBOL_GPL(vb2_queue_error);
2369 
2370 int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
2371 {
2372 	if (type != q->type) {
2373 		dprintk(q, 1, "invalid stream type\n");
2374 		return -EINVAL;
2375 	}
2376 
2377 	/*
2378 	 * Cancel will pause streaming and remove all buffers from the driver
2379 	 * and vb2, effectively returning control over them to userspace.
2380 	 *
2381 	 * Note that we do this even if q->streaming == 0: if you prepare or
2382 	 * queue buffers, and then call streamoff without ever having called
2383 	 * streamon, you would still expect those buffers to be returned to
2384 	 * their normal dequeued state.
2385 	 */
2386 	__vb2_queue_cancel(q);
2387 	q->waiting_for_buffers = !q->is_output;
2388 	q->last_buffer_dequeued = false;
2389 
2390 	dprintk(q, 3, "successful\n");
2391 	return 0;
2392 }
2393 EXPORT_SYMBOL_GPL(vb2_core_streamoff);
2394 
2395 /*
2396  * __find_plane_by_offset() - find plane associated with the given offset
2397  */
2398 static int __find_plane_by_offset(struct vb2_queue *q, unsigned long offset,
2399 			struct vb2_buffer **vb, unsigned int *plane)
2400 {
2401 	unsigned int buffer;
2402 
2403 	/*
2404 	 * Sanity checks to ensure the lock is held, MEMORY_MMAP is
2405 	 * used and fileio isn't active.
2406 	 */
2407 	lockdep_assert_held(&q->mmap_lock);
2408 
2409 	if (q->memory != VB2_MEMORY_MMAP) {
2410 		dprintk(q, 1, "queue is not currently set up for mmap\n");
2411 		return -EINVAL;
2412 	}
2413 
2414 	if (vb2_fileio_is_active(q)) {
2415 		dprintk(q, 1, "file io in progress\n");
2416 		return -EBUSY;
2417 	}
2418 
2419 	/* Get buffer and plane from the offset */
2420 	buffer = (offset >> PLANE_INDEX_SHIFT) & BUFFER_INDEX_MASK;
2421 	*plane = (offset >> PAGE_SHIFT) & PLANE_INDEX_MASK;
2422 
2423 	*vb = vb2_get_buffer(q, buffer);
2424 	if (!*vb)
2425 		return -EINVAL;
2426 	if (*plane >= (*vb)->num_planes)
2427 		return -EINVAL;
2428 
2429 	return 0;
2430 }
2431 
2432 int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
2433 		    struct vb2_buffer *vb, unsigned int plane, unsigned int flags)
2434 {
2435 	struct vb2_plane *vb_plane;
2436 	int ret;
2437 	struct dma_buf *dbuf;
2438 
2439 	if (q->memory != VB2_MEMORY_MMAP) {
2440 		dprintk(q, 1, "queue is not currently set up for mmap\n");
2441 		return -EINVAL;
2442 	}
2443 
2444 	if (!q->mem_ops->get_dmabuf) {
2445 		dprintk(q, 1, "queue does not support DMA buffer exporting\n");
2446 		return -EINVAL;
2447 	}
2448 
2449 	if (flags & ~(O_CLOEXEC | O_ACCMODE)) {
2450 		dprintk(q, 1, "queue does support only O_CLOEXEC and access mode flags\n");
2451 		return -EINVAL;
2452 	}
2453 
2454 	if (type != q->type) {
2455 		dprintk(q, 1, "invalid buffer type\n");
2456 		return -EINVAL;
2457 	}
2458 
2459 	if (plane >= vb->num_planes) {
2460 		dprintk(q, 1, "buffer plane out of range\n");
2461 		return -EINVAL;
2462 	}
2463 
2464 	if (vb2_fileio_is_active(q)) {
2465 		dprintk(q, 1, "expbuf: file io in progress\n");
2466 		return -EBUSY;
2467 	}
2468 
2469 	vb_plane = &vb->planes[plane];
2470 
2471 	dbuf = call_ptr_memop(get_dmabuf,
2472 			      vb,
2473 			      vb_plane->mem_priv,
2474 			      flags & O_ACCMODE);
2475 	if (IS_ERR_OR_NULL(dbuf)) {
2476 		dprintk(q, 1, "failed to export buffer %d, plane %d\n",
2477 			vb->index, plane);
2478 		return -EINVAL;
2479 	}
2480 
2481 	ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE);
2482 	if (ret < 0) {
2483 		dprintk(q, 3, "buffer %d, plane %d failed to export (%d)\n",
2484 			vb->index, plane, ret);
2485 		dma_buf_put(dbuf);
2486 		return ret;
2487 	}
2488 
2489 	dprintk(q, 3, "buffer %d, plane %d exported as %d descriptor\n",
2490 		vb->index, plane, ret);
2491 	*fd = ret;
2492 
2493 	return 0;
2494 }
2495 EXPORT_SYMBOL_GPL(vb2_core_expbuf);
2496 
2497 int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
2498 {
2499 	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
2500 	struct vb2_buffer *vb;
2501 	unsigned int plane = 0;
2502 	int ret;
2503 	unsigned long length;
2504 
2505 	/*
2506 	 * Check memory area access mode.
2507 	 */
2508 	if (!(vma->vm_flags & VM_SHARED)) {
2509 		dprintk(q, 1, "invalid vma flags, VM_SHARED needed\n");
2510 		return -EINVAL;
2511 	}
2512 	if (q->is_output) {
2513 		if (!(vma->vm_flags & VM_WRITE)) {
2514 			dprintk(q, 1, "invalid vma flags, VM_WRITE needed\n");
2515 			return -EINVAL;
2516 		}
2517 	} else {
2518 		if (!(vma->vm_flags & VM_READ)) {
2519 			dprintk(q, 1, "invalid vma flags, VM_READ needed\n");
2520 			return -EINVAL;
2521 		}
2522 	}
2523 
2524 	mutex_lock(&q->mmap_lock);
2525 
2526 	/*
2527 	 * Find the plane corresponding to the offset passed by userspace. This
2528 	 * will return an error if not MEMORY_MMAP or file I/O is in progress.
2529 	 */
2530 	ret = __find_plane_by_offset(q, offset, &vb, &plane);
2531 	if (ret)
2532 		goto unlock;
2533 
2534 	/*
2535 	 * MMAP requires page_aligned buffers.
2536 	 * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
2537 	 * so, we need to do the same here.
2538 	 */
2539 	length = PAGE_ALIGN(vb->planes[plane].length);
2540 	if (length < (vma->vm_end - vma->vm_start)) {
2541 		dprintk(q, 1,
2542 			"MMAP invalid, as it would overflow buffer length\n");
2543 		ret = -EINVAL;
2544 		goto unlock;
2545 	}
2546 
2547 	/*
2548 	 * vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer,
2549 	 * not as a in-buffer offset. We always want to mmap a whole buffer
2550 	 * from its beginning.
2551 	 */
2552 	vma->vm_pgoff = 0;
2553 
2554 	ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
2555 
2556 unlock:
2557 	mutex_unlock(&q->mmap_lock);
2558 	if (ret)
2559 		return ret;
2560 
2561 	dprintk(q, 3, "buffer %u, plane %d successfully mapped\n", vb->index, plane);
2562 	return 0;
2563 }
2564 EXPORT_SYMBOL_GPL(vb2_mmap);
2565 
2566 #ifndef CONFIG_MMU
2567 unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
2568 				    unsigned long addr,
2569 				    unsigned long len,
2570 				    unsigned long pgoff,
2571 				    unsigned long flags)
2572 {
2573 	unsigned long offset = pgoff << PAGE_SHIFT;
2574 	struct vb2_buffer *vb;
2575 	unsigned int plane;
2576 	void *vaddr;
2577 	int ret;
2578 
2579 	mutex_lock(&q->mmap_lock);
2580 
2581 	/*
2582 	 * Find the plane corresponding to the offset passed by userspace. This
2583 	 * will return an error if not MEMORY_MMAP or file I/O is in progress.
2584 	 */
2585 	ret = __find_plane_by_offset(q, offset, &vb, &plane);
2586 	if (ret)
2587 		goto unlock;
2588 
2589 	vaddr = vb2_plane_vaddr(vb, plane);
2590 	mutex_unlock(&q->mmap_lock);
2591 	return vaddr ? (unsigned long)vaddr : -EINVAL;
2592 
2593 unlock:
2594 	mutex_unlock(&q->mmap_lock);
2595 	return ret;
2596 }
2597 EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
2598 #endif
2599 
2600 int vb2_core_queue_init(struct vb2_queue *q)
2601 {
2602 	/*
2603 	 * Sanity check
2604 	 */
2605 	/*
2606 	 * For drivers who don't support max_num_buffers ensure
2607 	 * a backward compatibility.
2608 	 */
2609 	if (!q->max_num_buffers)
2610 		q->max_num_buffers = VB2_MAX_FRAME;
2611 
2612 	/* The maximum is limited by offset cookie encoding pattern */
2613 	q->max_num_buffers = min_t(unsigned int, q->max_num_buffers, MAX_BUFFER_INDEX);
2614 
2615 	if (WARN_ON(!q)			  ||
2616 	    WARN_ON(!q->ops)		  ||
2617 	    WARN_ON(!q->mem_ops)	  ||
2618 	    WARN_ON(!q->type)		  ||
2619 	    WARN_ON(!q->io_modes)	  ||
2620 	    WARN_ON(!q->ops->queue_setup) ||
2621 	    WARN_ON(!q->ops->buf_queue))
2622 		return -EINVAL;
2623 
2624 	if (WARN_ON(q->max_num_buffers < VB2_MAX_FRAME) ||
2625 	    WARN_ON(q->min_queued_buffers > q->max_num_buffers))
2626 		return -EINVAL;
2627 
2628 	if (WARN_ON(q->requires_requests && !q->supports_requests))
2629 		return -EINVAL;
2630 
2631 	/*
2632 	 * This combination is not allowed since a non-zero value of
2633 	 * q->min_queued_buffers can cause vb2_core_qbuf() to fail if
2634 	 * it has to call start_streaming(), and the Request API expects
2635 	 * that queueing a request (and thus queueing a buffer contained
2636 	 * in that request) will always succeed. There is no method of
2637 	 * propagating an error back to userspace.
2638 	 */
2639 	if (WARN_ON(q->supports_requests && q->min_queued_buffers))
2640 		return -EINVAL;
2641 
2642 	/*
2643 	 * If the driver needs 'min_queued_buffers' in the queue before
2644 	 * calling start_streaming() then the minimum requirement is
2645 	 * 'min_queued_buffers + 1' to keep at least one buffer available
2646 	 * for userspace.
2647 	 */
2648 	if (q->min_reqbufs_allocation < q->min_queued_buffers + 1)
2649 		q->min_reqbufs_allocation = q->min_queued_buffers + 1;
2650 
2651 	if (WARN_ON(q->min_reqbufs_allocation > q->max_num_buffers))
2652 		return -EINVAL;
2653 
2654 	/* Either both or none are set */
2655 	if (WARN_ON(!q->ops->wait_prepare ^ !q->ops->wait_finish))
2656 		return -EINVAL;
2657 
2658 	/* Warn if q->lock is NULL and no custom wait_prepare is provided */
2659 	if (WARN_ON(!q->lock && !q->ops->wait_prepare))
2660 		return -EINVAL;
2661 
2662 	INIT_LIST_HEAD(&q->queued_list);
2663 	INIT_LIST_HEAD(&q->done_list);
2664 	spin_lock_init(&q->done_lock);
2665 	mutex_init(&q->mmap_lock);
2666 	init_waitqueue_head(&q->done_wq);
2667 
2668 	q->memory = VB2_MEMORY_UNKNOWN;
2669 
2670 	if (q->buf_struct_size == 0)
2671 		q->buf_struct_size = sizeof(struct vb2_buffer);
2672 
2673 	if (q->bidirectional)
2674 		q->dma_dir = DMA_BIDIRECTIONAL;
2675 	else
2676 		q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
2677 
2678 	if (q->name[0] == '\0')
2679 		snprintf(q->name, sizeof(q->name), "%s-%p",
2680 			 q->is_output ? "out" : "cap", q);
2681 
2682 	return 0;
2683 }
2684 EXPORT_SYMBOL_GPL(vb2_core_queue_init);
2685 
2686 static int __vb2_init_fileio(struct vb2_queue *q, int read);
2687 static int __vb2_cleanup_fileio(struct vb2_queue *q);
2688 void vb2_core_queue_release(struct vb2_queue *q)
2689 {
2690 	__vb2_cleanup_fileio(q);
2691 	__vb2_queue_cancel(q);
2692 	mutex_lock(&q->mmap_lock);
2693 	__vb2_queue_free(q, 0, q->max_num_buffers);
2694 	vb2_core_free_buffers_storage(q);
2695 	q->is_busy = 0;
2696 	mutex_unlock(&q->mmap_lock);
2697 }
2698 EXPORT_SYMBOL_GPL(vb2_core_queue_release);
2699 
2700 __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
2701 		poll_table *wait)
2702 {
2703 	__poll_t req_events = poll_requested_events(wait);
2704 	struct vb2_buffer *vb = NULL;
2705 	unsigned long flags;
2706 
2707 	/*
2708 	 * poll_wait() MUST be called on the first invocation on all the
2709 	 * potential queues of interest, even if we are not interested in their
2710 	 * events during this first call. Failure to do so will result in
2711 	 * queue's events to be ignored because the poll_table won't be capable
2712 	 * of adding new wait queues thereafter.
2713 	 */
2714 	poll_wait(file, &q->done_wq, wait);
2715 
2716 	if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM)))
2717 		return 0;
2718 	if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM)))
2719 		return 0;
2720 
2721 	/*
2722 	 * Start file I/O emulator only if streaming API has not been used yet.
2723 	 */
2724 	if (vb2_get_num_buffers(q) == 0 && !vb2_fileio_is_active(q)) {
2725 		if (!q->is_output && (q->io_modes & VB2_READ) &&
2726 				(req_events & (EPOLLIN | EPOLLRDNORM))) {
2727 			if (__vb2_init_fileio(q, 1))
2728 				return EPOLLERR;
2729 		}
2730 		if (q->is_output && (q->io_modes & VB2_WRITE) &&
2731 				(req_events & (EPOLLOUT | EPOLLWRNORM))) {
2732 			if (__vb2_init_fileio(q, 0))
2733 				return EPOLLERR;
2734 			/*
2735 			 * Write to OUTPUT queue can be done immediately.
2736 			 */
2737 			return EPOLLOUT | EPOLLWRNORM;
2738 		}
2739 	}
2740 
2741 	/*
2742 	 * There is nothing to wait for if the queue isn't streaming, or if the
2743 	 * error flag is set.
2744 	 */
2745 	if (!vb2_is_streaming(q) || q->error)
2746 		return EPOLLERR;
2747 
2748 	/*
2749 	 * If this quirk is set and QBUF hasn't been called yet then
2750 	 * return EPOLLERR as well. This only affects capture queues, output
2751 	 * queues will always initialize waiting_for_buffers to false.
2752 	 * This quirk is set by V4L2 for backwards compatibility reasons.
2753 	 */
2754 	if (q->quirk_poll_must_check_waiting_for_buffers &&
2755 	    q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM)))
2756 		return EPOLLERR;
2757 
2758 	/*
2759 	 * For output streams you can call write() as long as there are fewer
2760 	 * buffers queued than there are buffers available.
2761 	 */
2762 	if (q->is_output && q->fileio && q->queued_count < vb2_get_num_buffers(q))
2763 		return EPOLLOUT | EPOLLWRNORM;
2764 
2765 	if (list_empty(&q->done_list)) {
2766 		/*
2767 		 * If the last buffer was dequeued from a capture queue,
2768 		 * return immediately. DQBUF will return -EPIPE.
2769 		 */
2770 		if (q->last_buffer_dequeued)
2771 			return EPOLLIN | EPOLLRDNORM;
2772 	}
2773 
2774 	/*
2775 	 * Take first buffer available for dequeuing.
2776 	 */
2777 	spin_lock_irqsave(&q->done_lock, flags);
2778 	if (!list_empty(&q->done_list))
2779 		vb = list_first_entry(&q->done_list, struct vb2_buffer,
2780 					done_entry);
2781 	spin_unlock_irqrestore(&q->done_lock, flags);
2782 
2783 	if (vb && (vb->state == VB2_BUF_STATE_DONE
2784 			|| vb->state == VB2_BUF_STATE_ERROR)) {
2785 		return (q->is_output) ?
2786 				EPOLLOUT | EPOLLWRNORM :
2787 				EPOLLIN | EPOLLRDNORM;
2788 	}
2789 	return 0;
2790 }
2791 EXPORT_SYMBOL_GPL(vb2_core_poll);
2792 
2793 /*
2794  * struct vb2_fileio_buf - buffer context used by file io emulator
2795  *
2796  * vb2 provides a compatibility layer and emulator of file io (read and
2797  * write) calls on top of streaming API. This structure is used for
2798  * tracking context related to the buffers.
2799  */
2800 struct vb2_fileio_buf {
2801 	void *vaddr;
2802 	unsigned int size;
2803 	unsigned int pos;
2804 	unsigned int queued:1;
2805 };
2806 
2807 /*
2808  * struct vb2_fileio_data - queue context used by file io emulator
2809  *
2810  * @cur_index:	the index of the buffer currently being read from or
2811  *		written to. If equal to number of buffers in the vb2_queue
2812  *		then a new buffer must be dequeued.
2813  * @initial_index: in the read() case all buffers are queued up immediately
2814  *		in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
2815  *		buffers. However, in the write() case no buffers are initially
2816  *		queued, instead whenever a buffer is full it is queued up by
2817  *		__vb2_perform_fileio(). Only once all available buffers have
2818  *		been queued up will __vb2_perform_fileio() start to dequeue
2819  *		buffers. This means that initially __vb2_perform_fileio()
2820  *		needs to know what buffer index to use when it is queuing up
2821  *		the buffers for the first time. That initial index is stored
2822  *		in this field. Once it is equal to number of buffers in the
2823  *		vb2_queue all available buffers have been queued and
2824  *		__vb2_perform_fileio() should start the normal dequeue/queue cycle.
2825  *
2826  * vb2 provides a compatibility layer and emulator of file io (read and
2827  * write) calls on top of streaming API. For proper operation it required
2828  * this structure to save the driver state between each call of the read
2829  * or write function.
2830  */
2831 struct vb2_fileio_data {
2832 	unsigned int count;
2833 	unsigned int type;
2834 	unsigned int memory;
2835 	struct vb2_fileio_buf bufs[VB2_MAX_FRAME];
2836 	unsigned int cur_index;
2837 	unsigned int initial_index;
2838 	unsigned int q_count;
2839 	unsigned int dq_count;
2840 	unsigned read_once:1;
2841 	unsigned write_immediately:1;
2842 };
2843 
2844 /*
2845  * __vb2_init_fileio() - initialize file io emulator
2846  * @q:		videobuf2 queue
2847  * @read:	mode selector (1 means read, 0 means write)
2848  */
2849 static int __vb2_init_fileio(struct vb2_queue *q, int read)
2850 {
2851 	struct vb2_fileio_data *fileio;
2852 	struct vb2_buffer *vb;
2853 	int i, ret;
2854 
2855 	/*
2856 	 * Sanity check
2857 	 */
2858 	if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
2859 		    (!read && !(q->io_modes & VB2_WRITE))))
2860 		return -EINVAL;
2861 
2862 	/*
2863 	 * Check if device supports mapping buffers to kernel virtual space.
2864 	 */
2865 	if (!q->mem_ops->vaddr)
2866 		return -EBUSY;
2867 
2868 	/*
2869 	 * Check if streaming api has not been already activated.
2870 	 */
2871 	if (q->streaming || vb2_get_num_buffers(q) > 0)
2872 		return -EBUSY;
2873 
2874 	dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
2875 		(read) ? "read" : "write", q->min_reqbufs_allocation, q->fileio_read_once,
2876 		q->fileio_write_immediately);
2877 
2878 	fileio = kzalloc(sizeof(*fileio), GFP_KERNEL);
2879 	if (fileio == NULL)
2880 		return -ENOMEM;
2881 
2882 	fileio->read_once = q->fileio_read_once;
2883 	fileio->write_immediately = q->fileio_write_immediately;
2884 
2885 	/*
2886 	 * Request buffers and use MMAP type to force driver
2887 	 * to allocate buffers by itself.
2888 	 */
2889 	fileio->count = q->min_reqbufs_allocation;
2890 	fileio->memory = VB2_MEMORY_MMAP;
2891 	fileio->type = q->type;
2892 	q->fileio = fileio;
2893 	ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
2894 	if (ret)
2895 		goto err_kfree;
2896 	/* vb2_fileio_data supports max VB2_MAX_FRAME buffers */
2897 	if (fileio->count > VB2_MAX_FRAME) {
2898 		dprintk(q, 1, "fileio: more than VB2_MAX_FRAME buffers requested\n");
2899 		ret = -ENOSPC;
2900 		goto err_reqbufs;
2901 	}
2902 
2903 	/*
2904 	 * Userspace can never add or delete buffers later, so there
2905 	 * will never be holes. It is safe to assume that vb2_get_buffer(q, 0)
2906 	 * will always return a valid vb pointer
2907 	 */
2908 	vb = vb2_get_buffer(q, 0);
2909 
2910 	/*
2911 	 * Check if plane_count is correct
2912 	 * (multiplane buffers are not supported).
2913 	 */
2914 	if (vb->num_planes != 1) {
2915 		ret = -EBUSY;
2916 		goto err_reqbufs;
2917 	}
2918 
2919 	/*
2920 	 * Get kernel address of each buffer.
2921 	 */
2922 	for (i = 0; i < vb2_get_num_buffers(q); i++) {
2923 		/* vb can never be NULL when using fileio. */
2924 		vb = vb2_get_buffer(q, i);
2925 
2926 		fileio->bufs[i].vaddr = vb2_plane_vaddr(vb, 0);
2927 		if (fileio->bufs[i].vaddr == NULL) {
2928 			ret = -EINVAL;
2929 			goto err_reqbufs;
2930 		}
2931 		fileio->bufs[i].size = vb2_plane_size(vb, 0);
2932 	}
2933 
2934 	/*
2935 	 * Read mode requires pre queuing of all buffers.
2936 	 */
2937 	if (read) {
2938 		/*
2939 		 * Queue all buffers.
2940 		 */
2941 		for (i = 0; i < vb2_get_num_buffers(q); i++) {
2942 			struct vb2_buffer *vb2 = vb2_get_buffer(q, i);
2943 
2944 			if (!vb2)
2945 				continue;
2946 
2947 			ret = vb2_core_qbuf(q, vb2, NULL, NULL);
2948 			if (ret)
2949 				goto err_reqbufs;
2950 			fileio->bufs[i].queued = 1;
2951 		}
2952 		/*
2953 		 * All buffers have been queued, so mark that by setting
2954 		 * initial_index to the number of buffers in the vb2_queue
2955 		 */
2956 		fileio->initial_index = vb2_get_num_buffers(q);
2957 		fileio->cur_index = fileio->initial_index;
2958 	}
2959 
2960 	/*
2961 	 * Start streaming.
2962 	 */
2963 	ret = vb2_core_streamon(q, q->type);
2964 	if (ret)
2965 		goto err_reqbufs;
2966 
2967 	return ret;
2968 
2969 err_reqbufs:
2970 	fileio->count = 0;
2971 	vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
2972 
2973 err_kfree:
2974 	q->fileio = NULL;
2975 	kfree(fileio);
2976 	return ret;
2977 }
2978 
2979 /*
2980  * __vb2_cleanup_fileio() - free resourced used by file io emulator
2981  * @q:		videobuf2 queue
2982  */
2983 static int __vb2_cleanup_fileio(struct vb2_queue *q)
2984 {
2985 	struct vb2_fileio_data *fileio = q->fileio;
2986 
2987 	if (fileio) {
2988 		vb2_core_streamoff(q, q->type);
2989 		q->fileio = NULL;
2990 		fileio->count = 0;
2991 		vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
2992 		kfree(fileio);
2993 		dprintk(q, 3, "file io emulator closed\n");
2994 	}
2995 	return 0;
2996 }
2997 
2998 /*
2999  * __vb2_perform_fileio() - perform a single file io (read or write) operation
3000  * @q:		videobuf2 queue
3001  * @data:	pointed to target userspace buffer
3002  * @count:	number of bytes to read or write
3003  * @ppos:	file handle position tracking pointer
3004  * @nonblock:	mode selector (1 means blocking calls, 0 means nonblocking)
3005  * @read:	access mode selector (1 means read, 0 means write)
3006  */
3007 static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
3008 		loff_t *ppos, int nonblock, int read)
3009 {
3010 	struct vb2_fileio_data *fileio;
3011 	struct vb2_fileio_buf *buf;
3012 	bool is_multiplanar = q->is_multiplanar;
3013 	/*
3014 	 * When using write() to write data to an output video node the vb2 core
3015 	 * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
3016 	 * else is able to provide this information with the write() operation.
3017 	 */
3018 	bool copy_timestamp = !read && q->copy_timestamp;
3019 	unsigned index;
3020 	int ret;
3021 
3022 	dprintk(q, 3, "mode %s, offset %ld, count %zd, %sblocking\n",
3023 		read ? "read" : "write", (long)*ppos, count,
3024 		nonblock ? "non" : "");
3025 
3026 	if (!data)
3027 		return -EINVAL;
3028 
3029 	if (q->waiting_in_dqbuf) {
3030 		dprintk(q, 3, "another dup()ped fd is %s\n",
3031 			read ? "reading" : "writing");
3032 		return -EBUSY;
3033 	}
3034 
3035 	/*
3036 	 * Initialize emulator on first call.
3037 	 */
3038 	if (!vb2_fileio_is_active(q)) {
3039 		ret = __vb2_init_fileio(q, read);
3040 		dprintk(q, 3, "vb2_init_fileio result: %d\n", ret);
3041 		if (ret)
3042 			return ret;
3043 	}
3044 	fileio = q->fileio;
3045 
3046 	/*
3047 	 * Check if we need to dequeue the buffer.
3048 	 */
3049 	index = fileio->cur_index;
3050 	if (index >= vb2_get_num_buffers(q)) {
3051 		struct vb2_buffer *b;
3052 
3053 		/*
3054 		 * Call vb2_dqbuf to get buffer back.
3055 		 */
3056 		ret = vb2_core_dqbuf(q, &index, NULL, nonblock);
3057 		dprintk(q, 5, "vb2_dqbuf result: %d\n", ret);
3058 		if (ret)
3059 			return ret;
3060 		fileio->dq_count += 1;
3061 
3062 		fileio->cur_index = index;
3063 		buf = &fileio->bufs[index];
3064 
3065 		/* b can never be NULL when using fileio. */
3066 		b = vb2_get_buffer(q, index);
3067 
3068 		/*
3069 		 * Get number of bytes filled by the driver
3070 		 */
3071 		buf->pos = 0;
3072 		buf->queued = 0;
3073 		buf->size = read ? vb2_get_plane_payload(b, 0)
3074 				 : vb2_plane_size(b, 0);
3075 		/* Compensate for data_offset on read in the multiplanar case. */
3076 		if (is_multiplanar && read &&
3077 				b->planes[0].data_offset < buf->size) {
3078 			buf->pos = b->planes[0].data_offset;
3079 			buf->size -= buf->pos;
3080 		}
3081 	} else {
3082 		buf = &fileio->bufs[index];
3083 	}
3084 
3085 	/*
3086 	 * Limit count on last few bytes of the buffer.
3087 	 */
3088 	if (buf->pos + count > buf->size) {
3089 		count = buf->size - buf->pos;
3090 		dprintk(q, 5, "reducing read count: %zd\n", count);
3091 	}
3092 
3093 	/*
3094 	 * Transfer data to userspace.
3095 	 */
3096 	dprintk(q, 3, "copying %zd bytes - buffer %d, offset %u\n",
3097 		count, index, buf->pos);
3098 	if (read)
3099 		ret = copy_to_user(data, buf->vaddr + buf->pos, count);
3100 	else
3101 		ret = copy_from_user(buf->vaddr + buf->pos, data, count);
3102 	if (ret) {
3103 		dprintk(q, 3, "error copying data\n");
3104 		return -EFAULT;
3105 	}
3106 
3107 	/*
3108 	 * Update counters.
3109 	 */
3110 	buf->pos += count;
3111 	*ppos += count;
3112 
3113 	/*
3114 	 * Queue next buffer if required.
3115 	 */
3116 	if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
3117 		/* b can never be NULL when using fileio. */
3118 		struct vb2_buffer *b = vb2_get_buffer(q, index);
3119 
3120 		/*
3121 		 * Check if this is the last buffer to read.
3122 		 */
3123 		if (read && fileio->read_once && fileio->dq_count == 1) {
3124 			dprintk(q, 3, "read limit reached\n");
3125 			return __vb2_cleanup_fileio(q);
3126 		}
3127 
3128 		/*
3129 		 * Call vb2_qbuf and give buffer to the driver.
3130 		 */
3131 		b->planes[0].bytesused = buf->pos;
3132 
3133 		if (copy_timestamp)
3134 			b->timestamp = ktime_get_ns();
3135 		ret = vb2_core_qbuf(q, b, NULL, NULL);
3136 		dprintk(q, 5, "vb2_qbuf result: %d\n", ret);
3137 		if (ret)
3138 			return ret;
3139 
3140 		/*
3141 		 * Buffer has been queued, update the status
3142 		 */
3143 		buf->pos = 0;
3144 		buf->queued = 1;
3145 		buf->size = vb2_plane_size(b, 0);
3146 		fileio->q_count += 1;
3147 		/*
3148 		 * If we are queuing up buffers for the first time, then
3149 		 * increase initial_index by one.
3150 		 */
3151 		if (fileio->initial_index < vb2_get_num_buffers(q))
3152 			fileio->initial_index++;
3153 		/*
3154 		 * The next buffer to use is either a buffer that's going to be
3155 		 * queued for the first time (initial_index < number of buffers in the vb2_queue)
3156 		 * or it is equal to the number of buffers in the vb2_queue,
3157 		 * meaning that the next time we need to dequeue a buffer since
3158 		 * we've now queued up all the 'first time' buffers.
3159 		 */
3160 		fileio->cur_index = fileio->initial_index;
3161 	}
3162 
3163 	/*
3164 	 * Return proper number of bytes processed.
3165 	 */
3166 	if (ret == 0)
3167 		ret = count;
3168 	return ret;
3169 }
3170 
3171 size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
3172 		loff_t *ppos, int nonblocking)
3173 {
3174 	return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
3175 }
3176 EXPORT_SYMBOL_GPL(vb2_read);
3177 
3178 size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
3179 		loff_t *ppos, int nonblocking)
3180 {
3181 	return __vb2_perform_fileio(q, (char __user *) data, count,
3182 							ppos, nonblocking, 0);
3183 }
3184 EXPORT_SYMBOL_GPL(vb2_write);
3185 
3186 struct vb2_threadio_data {
3187 	struct task_struct *thread;
3188 	vb2_thread_fnc fnc;
3189 	void *priv;
3190 	bool stop;
3191 };
3192 
3193 static int vb2_thread(void *data)
3194 {
3195 	struct vb2_queue *q = data;
3196 	struct vb2_threadio_data *threadio = q->threadio;
3197 	bool copy_timestamp = false;
3198 	unsigned prequeue = 0;
3199 	unsigned index = 0;
3200 	int ret = 0;
3201 
3202 	if (q->is_output) {
3203 		prequeue = vb2_get_num_buffers(q);
3204 		copy_timestamp = q->copy_timestamp;
3205 	}
3206 
3207 	set_freezable();
3208 
3209 	for (;;) {
3210 		struct vb2_buffer *vb;
3211 
3212 		/*
3213 		 * Call vb2_dqbuf to get buffer back.
3214 		 */
3215 		if (prequeue) {
3216 			vb = vb2_get_buffer(q, index++);
3217 			if (!vb)
3218 				continue;
3219 			prequeue--;
3220 		} else {
3221 			if (!threadio->stop) {
3222 				if (q->ops->wait_finish)
3223 					call_void_qop(q, wait_finish, q);
3224 				else if (q->lock)
3225 					mutex_lock(q->lock);
3226 				ret = vb2_core_dqbuf(q, &index, NULL, 0);
3227 				if (q->ops->wait_prepare)
3228 					call_void_qop(q, wait_prepare, q);
3229 				else if (q->lock)
3230 					mutex_unlock(q->lock);
3231 			}
3232 			dprintk(q, 5, "file io: vb2_dqbuf result: %d\n", ret);
3233 			if (!ret)
3234 				vb = vb2_get_buffer(q, index);
3235 		}
3236 		if (ret || threadio->stop)
3237 			break;
3238 		try_to_freeze();
3239 
3240 		if (vb->state != VB2_BUF_STATE_ERROR)
3241 			if (threadio->fnc(vb, threadio->priv))
3242 				break;
3243 		if (copy_timestamp)
3244 			vb->timestamp = ktime_get_ns();
3245 		if (!threadio->stop) {
3246 			if (q->ops->wait_finish)
3247 				call_void_qop(q, wait_finish, q);
3248 			else if (q->lock)
3249 				mutex_lock(q->lock);
3250 			ret = vb2_core_qbuf(q, vb, NULL, NULL);
3251 			if (q->ops->wait_prepare)
3252 				call_void_qop(q, wait_prepare, q);
3253 			else if (q->lock)
3254 				mutex_unlock(q->lock);
3255 		}
3256 		if (ret || threadio->stop)
3257 			break;
3258 	}
3259 
3260 	/* Hmm, linux becomes *very* unhappy without this ... */
3261 	while (!kthread_should_stop()) {
3262 		set_current_state(TASK_INTERRUPTIBLE);
3263 		schedule();
3264 	}
3265 	return 0;
3266 }
3267 
3268 /*
3269  * This function should not be used for anything else but the videobuf2-dvb
3270  * support. If you think you have another good use-case for this, then please
3271  * contact the linux-media mailinglist first.
3272  */
3273 int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
3274 		     const char *thread_name)
3275 {
3276 	struct vb2_threadio_data *threadio;
3277 	int ret = 0;
3278 
3279 	if (q->threadio)
3280 		return -EBUSY;
3281 	if (vb2_is_busy(q))
3282 		return -EBUSY;
3283 	if (WARN_ON(q->fileio))
3284 		return -EBUSY;
3285 
3286 	threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
3287 	if (threadio == NULL)
3288 		return -ENOMEM;
3289 	threadio->fnc = fnc;
3290 	threadio->priv = priv;
3291 
3292 	ret = __vb2_init_fileio(q, !q->is_output);
3293 	dprintk(q, 3, "file io: vb2_init_fileio result: %d\n", ret);
3294 	if (ret)
3295 		goto nomem;
3296 	q->threadio = threadio;
3297 	threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
3298 	if (IS_ERR(threadio->thread)) {
3299 		ret = PTR_ERR(threadio->thread);
3300 		threadio->thread = NULL;
3301 		goto nothread;
3302 	}
3303 	return 0;
3304 
3305 nothread:
3306 	__vb2_cleanup_fileio(q);
3307 nomem:
3308 	kfree(threadio);
3309 	return ret;
3310 }
3311 EXPORT_SYMBOL_GPL(vb2_thread_start);
3312 
3313 int vb2_thread_stop(struct vb2_queue *q)
3314 {
3315 	struct vb2_threadio_data *threadio = q->threadio;
3316 	int err;
3317 
3318 	if (threadio == NULL)
3319 		return 0;
3320 	threadio->stop = true;
3321 	/* Wake up all pending sleeps in the thread */
3322 	vb2_queue_error(q);
3323 	err = kthread_stop(threadio->thread);
3324 	__vb2_cleanup_fileio(q);
3325 	threadio->thread = NULL;
3326 	kfree(threadio);
3327 	q->threadio = NULL;
3328 	return err;
3329 }
3330 EXPORT_SYMBOL_GPL(vb2_thread_stop);
3331 
3332 MODULE_DESCRIPTION("Media buffer core framework");
3333 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
3334 MODULE_LICENSE("GPL");
3335 MODULE_IMPORT_NS(DMA_BUF);
3336