1 /* 2 * videobuf2-core.c - video buffer 2 core framework 3 * 4 * Copyright (C) 2010 Samsung Electronics 5 * 6 * Author: Pawel Osciak <pawel@osciak.com> 7 * Marek Szyprowski <m.szyprowski@samsung.com> 8 * 9 * The vb2_thread implementation was based on code from videobuf-dvb.c: 10 * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs] 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation. 15 */ 16 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 #include <linux/err.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/mm.h> 23 #include <linux/poll.h> 24 #include <linux/slab.h> 25 #include <linux/sched.h> 26 #include <linux/freezer.h> 27 #include <linux/kthread.h> 28 29 #include <media/videobuf2-core.h> 30 #include <media/v4l2-mc.h> 31 32 #include <trace/events/vb2.h> 33 34 #define PLANE_INDEX_BITS 3 35 #define PLANE_INDEX_SHIFT (PAGE_SHIFT + PLANE_INDEX_BITS) 36 #define PLANE_INDEX_MASK (BIT_MASK(PLANE_INDEX_BITS) - 1) 37 #define MAX_BUFFER_INDEX BIT_MASK(30 - PLANE_INDEX_SHIFT) 38 #define BUFFER_INDEX_MASK (MAX_BUFFER_INDEX - 1) 39 40 #if BIT(PLANE_INDEX_BITS) != VIDEO_MAX_PLANES 41 #error PLANE_INDEX_BITS order must be equal to VIDEO_MAX_PLANES 42 #endif 43 44 static int debug; 45 module_param(debug, int, 0644); 46 47 #define dprintk(q, level, fmt, arg...) \ 48 do { \ 49 if (debug >= level) \ 50 pr_info("[%s] %s: " fmt, (q)->name, __func__, \ 51 ## arg); \ 52 } while (0) 53 54 #ifdef CONFIG_VIDEO_ADV_DEBUG 55 56 /* 57 * If advanced debugging is on, then count how often each op is called 58 * successfully, which can either be per-buffer or per-queue. 59 * 60 * This makes it easy to check that the 'init' and 'cleanup' 61 * (and variations thereof) stay balanced. 62 */ 63 64 #define log_memop(vb, op) \ 65 dprintk((vb)->vb2_queue, 2, "call_memop(%d, %s)%s\n", \ 66 (vb)->index, #op, \ 67 (vb)->vb2_queue->mem_ops->op ? "" : " (nop)") 68 69 #define call_memop(vb, op, args...) \ 70 ({ \ 71 struct vb2_queue *_q = (vb)->vb2_queue; \ 72 int err; \ 73 \ 74 log_memop(vb, op); \ 75 err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \ 76 if (!err) \ 77 (vb)->cnt_mem_ ## op++; \ 78 err; \ 79 }) 80 81 #define call_ptr_memop(op, vb, args...) \ 82 ({ \ 83 struct vb2_queue *_q = (vb)->vb2_queue; \ 84 void *ptr; \ 85 \ 86 log_memop(vb, op); \ 87 ptr = _q->mem_ops->op ? _q->mem_ops->op(vb, args) : NULL; \ 88 if (!IS_ERR_OR_NULL(ptr)) \ 89 (vb)->cnt_mem_ ## op++; \ 90 ptr; \ 91 }) 92 93 #define call_void_memop(vb, op, args...) \ 94 ({ \ 95 struct vb2_queue *_q = (vb)->vb2_queue; \ 96 \ 97 log_memop(vb, op); \ 98 if (_q->mem_ops->op) \ 99 _q->mem_ops->op(args); \ 100 (vb)->cnt_mem_ ## op++; \ 101 }) 102 103 #define log_qop(q, op) \ 104 dprintk(q, 2, "call_qop(%s)%s\n", #op, \ 105 (q)->ops->op ? "" : " (nop)") 106 107 #define call_qop(q, op, args...) \ 108 ({ \ 109 int err; \ 110 \ 111 log_qop(q, op); \ 112 err = (q)->ops->op ? (q)->ops->op(args) : 0; \ 113 if (!err) \ 114 (q)->cnt_ ## op++; \ 115 err; \ 116 }) 117 118 #define call_void_qop(q, op, args...) \ 119 ({ \ 120 log_qop(q, op); \ 121 if ((q)->ops->op) \ 122 (q)->ops->op(args); \ 123 (q)->cnt_ ## op++; \ 124 }) 125 126 #define log_vb_qop(vb, op, args...) \ 127 dprintk((vb)->vb2_queue, 2, "call_vb_qop(%d, %s)%s\n", \ 128 (vb)->index, #op, \ 129 (vb)->vb2_queue->ops->op ? "" : " (nop)") 130 131 #define call_vb_qop(vb, op, args...) \ 132 ({ \ 133 int err; \ 134 \ 135 log_vb_qop(vb, op); \ 136 err = (vb)->vb2_queue->ops->op ? \ 137 (vb)->vb2_queue->ops->op(args) : 0; \ 138 if (!err) \ 139 (vb)->cnt_ ## op++; \ 140 err; \ 141 }) 142 143 #define call_void_vb_qop(vb, op, args...) \ 144 ({ \ 145 log_vb_qop(vb, op); \ 146 if ((vb)->vb2_queue->ops->op) \ 147 (vb)->vb2_queue->ops->op(args); \ 148 (vb)->cnt_ ## op++; \ 149 }) 150 151 #else 152 153 #define call_memop(vb, op, args...) \ 154 ((vb)->vb2_queue->mem_ops->op ? \ 155 (vb)->vb2_queue->mem_ops->op(args) : 0) 156 157 #define call_ptr_memop(op, vb, args...) \ 158 ((vb)->vb2_queue->mem_ops->op ? \ 159 (vb)->vb2_queue->mem_ops->op(vb, args) : NULL) 160 161 #define call_void_memop(vb, op, args...) \ 162 do { \ 163 if ((vb)->vb2_queue->mem_ops->op) \ 164 (vb)->vb2_queue->mem_ops->op(args); \ 165 } while (0) 166 167 #define call_qop(q, op, args...) \ 168 ((q)->ops->op ? (q)->ops->op(args) : 0) 169 170 #define call_void_qop(q, op, args...) \ 171 do { \ 172 if ((q)->ops->op) \ 173 (q)->ops->op(args); \ 174 } while (0) 175 176 #define call_vb_qop(vb, op, args...) \ 177 ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0) 178 179 #define call_void_vb_qop(vb, op, args...) \ 180 do { \ 181 if ((vb)->vb2_queue->ops->op) \ 182 (vb)->vb2_queue->ops->op(args); \ 183 } while (0) 184 185 #endif 186 187 #define call_bufop(q, op, args...) \ 188 ({ \ 189 int ret = 0; \ 190 if (q && q->buf_ops && q->buf_ops->op) \ 191 ret = q->buf_ops->op(args); \ 192 ret; \ 193 }) 194 195 #define call_void_bufop(q, op, args...) \ 196 ({ \ 197 if (q && q->buf_ops && q->buf_ops->op) \ 198 q->buf_ops->op(args); \ 199 }) 200 201 static void __vb2_queue_cancel(struct vb2_queue *q); 202 203 static const char *vb2_state_name(enum vb2_buffer_state s) 204 { 205 static const char * const state_names[] = { 206 [VB2_BUF_STATE_DEQUEUED] = "dequeued", 207 [VB2_BUF_STATE_IN_REQUEST] = "in request", 208 [VB2_BUF_STATE_PREPARING] = "preparing", 209 [VB2_BUF_STATE_QUEUED] = "queued", 210 [VB2_BUF_STATE_ACTIVE] = "active", 211 [VB2_BUF_STATE_DONE] = "done", 212 [VB2_BUF_STATE_ERROR] = "error", 213 }; 214 215 if ((unsigned int)(s) < ARRAY_SIZE(state_names)) 216 return state_names[s]; 217 return "unknown"; 218 } 219 220 /* 221 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer 222 */ 223 static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) 224 { 225 struct vb2_queue *q = vb->vb2_queue; 226 void *mem_priv; 227 int plane; 228 int ret = -ENOMEM; 229 230 /* 231 * Allocate memory for all planes in this buffer 232 * NOTE: mmapped areas should be page aligned 233 */ 234 for (plane = 0; plane < vb->num_planes; ++plane) { 235 /* Memops alloc requires size to be page aligned. */ 236 unsigned long size = PAGE_ALIGN(vb->planes[plane].length); 237 238 /* Did it wrap around? */ 239 if (size < vb->planes[plane].length) 240 goto free; 241 242 mem_priv = call_ptr_memop(alloc, 243 vb, 244 q->alloc_devs[plane] ? : q->dev, 245 size); 246 if (IS_ERR_OR_NULL(mem_priv)) { 247 if (mem_priv) 248 ret = PTR_ERR(mem_priv); 249 goto free; 250 } 251 252 /* Associate allocator private data with this plane */ 253 vb->planes[plane].mem_priv = mem_priv; 254 } 255 256 return 0; 257 free: 258 /* Free already allocated memory if one of the allocations failed */ 259 for (; plane > 0; --plane) { 260 call_void_memop(vb, put, vb->planes[plane - 1].mem_priv); 261 vb->planes[plane - 1].mem_priv = NULL; 262 } 263 264 return ret; 265 } 266 267 /* 268 * __vb2_buf_mem_free() - free memory of the given buffer 269 */ 270 static void __vb2_buf_mem_free(struct vb2_buffer *vb) 271 { 272 unsigned int plane; 273 274 for (plane = 0; plane < vb->num_planes; ++plane) { 275 call_void_memop(vb, put, vb->planes[plane].mem_priv); 276 vb->planes[plane].mem_priv = NULL; 277 dprintk(vb->vb2_queue, 3, "freed plane %d of buffer %d\n", 278 plane, vb->index); 279 } 280 } 281 282 /* 283 * __vb2_buf_userptr_put() - release userspace memory associated with 284 * a USERPTR buffer 285 */ 286 static void __vb2_buf_userptr_put(struct vb2_buffer *vb) 287 { 288 unsigned int plane; 289 290 for (plane = 0; plane < vb->num_planes; ++plane) { 291 if (vb->planes[plane].mem_priv) 292 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); 293 vb->planes[plane].mem_priv = NULL; 294 } 295 } 296 297 /* 298 * __vb2_plane_dmabuf_put() - release memory associated with 299 * a DMABUF shared plane 300 */ 301 static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p) 302 { 303 if (!p->mem_priv) 304 return; 305 306 if (p->dbuf_mapped) 307 call_void_memop(vb, unmap_dmabuf, p->mem_priv); 308 309 call_void_memop(vb, detach_dmabuf, p->mem_priv); 310 dma_buf_put(p->dbuf); 311 p->mem_priv = NULL; 312 p->dbuf = NULL; 313 p->dbuf_mapped = 0; 314 } 315 316 /* 317 * __vb2_buf_dmabuf_put() - release memory associated with 318 * a DMABUF shared buffer 319 */ 320 static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) 321 { 322 unsigned int plane; 323 324 for (plane = 0; plane < vb->num_planes; ++plane) 325 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); 326 } 327 328 /* 329 * __vb2_buf_mem_prepare() - call ->prepare() on buffer's private memory 330 * to sync caches 331 */ 332 static void __vb2_buf_mem_prepare(struct vb2_buffer *vb) 333 { 334 unsigned int plane; 335 336 if (vb->synced) 337 return; 338 339 vb->synced = 1; 340 for (plane = 0; plane < vb->num_planes; ++plane) 341 call_void_memop(vb, prepare, vb->planes[plane].mem_priv); 342 } 343 344 /* 345 * __vb2_buf_mem_finish() - call ->finish on buffer's private memory 346 * to sync caches 347 */ 348 static void __vb2_buf_mem_finish(struct vb2_buffer *vb) 349 { 350 unsigned int plane; 351 352 if (!vb->synced) 353 return; 354 355 vb->synced = 0; 356 for (plane = 0; plane < vb->num_planes; ++plane) 357 call_void_memop(vb, finish, vb->planes[plane].mem_priv); 358 } 359 360 /* 361 * __setup_offsets() - setup unique offsets ("cookies") for every plane in 362 * the buffer. 363 */ 364 static void __setup_offsets(struct vb2_buffer *vb) 365 { 366 struct vb2_queue *q = vb->vb2_queue; 367 unsigned int plane; 368 unsigned long offset = 0; 369 370 /* 371 * The offset "cookie" value has the following constraints: 372 * - a buffer can have up to 8 planes. 373 * - v4l2 mem2mem uses bit 30 to distinguish between 374 * OUTPUT (aka "source", bit 30 is 0) and 375 * CAPTURE (aka "destination", bit 30 is 1) buffers. 376 * - must be page aligned 377 * That led to this bit mapping when PAGE_SHIFT = 12: 378 * |30 |29 15|14 12|11 0| 379 * |DST_QUEUE_OFF_BASE|buffer index|plane index| 0 | 380 * where there are 15 bits to store the buffer index. 381 * Depending on PAGE_SHIFT value we can have fewer bits 382 * to store the buffer index. 383 */ 384 offset = vb->index << PLANE_INDEX_SHIFT; 385 386 for (plane = 0; plane < vb->num_planes; ++plane) { 387 vb->planes[plane].m.offset = offset + (plane << PAGE_SHIFT); 388 389 dprintk(q, 3, "buffer %d, plane %d offset 0x%08lx\n", 390 vb->index, plane, offset); 391 } 392 } 393 394 static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb) 395 { 396 /* 397 * DMA exporter should take care of cache syncs, so we can avoid 398 * explicit ->prepare()/->finish() syncs. For other ->memory types 399 * we always need ->prepare() or/and ->finish() cache sync. 400 */ 401 if (q->memory == VB2_MEMORY_DMABUF) { 402 vb->skip_cache_sync_on_finish = 1; 403 vb->skip_cache_sync_on_prepare = 1; 404 return; 405 } 406 407 /* 408 * ->finish() cache sync can be avoided when queue direction is 409 * TO_DEVICE. 410 */ 411 if (q->dma_dir == DMA_TO_DEVICE) 412 vb->skip_cache_sync_on_finish = 1; 413 } 414 415 /** 416 * vb2_queue_add_buffer() - add a buffer to a queue 417 * @q: pointer to &struct vb2_queue with videobuf2 queue. 418 * @vb: pointer to &struct vb2_buffer to be added to the queue. 419 * @index: index where add vb2_buffer in the queue 420 */ 421 static void vb2_queue_add_buffer(struct vb2_queue *q, struct vb2_buffer *vb, unsigned int index) 422 { 423 WARN_ON(index >= q->max_num_buffers || test_bit(index, q->bufs_bitmap) || vb->vb2_queue); 424 425 q->bufs[index] = vb; 426 vb->index = index; 427 vb->vb2_queue = q; 428 set_bit(index, q->bufs_bitmap); 429 } 430 431 /** 432 * vb2_queue_remove_buffer() - remove a buffer from a queue 433 * @vb: pointer to &struct vb2_buffer to be removed from the queue. 434 */ 435 static void vb2_queue_remove_buffer(struct vb2_buffer *vb) 436 { 437 clear_bit(vb->index, vb->vb2_queue->bufs_bitmap); 438 vb->vb2_queue->bufs[vb->index] = NULL; 439 vb->vb2_queue = NULL; 440 } 441 442 /* 443 * __vb2_queue_alloc() - allocate vb2 buffer structures and (for MMAP type) 444 * video buffer memory for all buffers/planes on the queue and initializes the 445 * queue 446 * @first_index: index of the first created buffer, all newly allocated buffers 447 * have indices in the range [first_index..first_index+count-1] 448 * 449 * Returns the number of buffers successfully allocated. 450 */ 451 static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, 452 unsigned int num_buffers, unsigned int num_planes, 453 const unsigned int plane_sizes[VB2_MAX_PLANES], 454 unsigned int *first_index) 455 { 456 unsigned int buffer, plane; 457 struct vb2_buffer *vb; 458 unsigned long index = q->max_num_buffers; 459 int ret; 460 461 /* 462 * Ensure that the number of already queue + the number of buffers already 463 * in the queue is below q->max_num_buffers 464 */ 465 num_buffers = min_t(unsigned int, num_buffers, 466 q->max_num_buffers - vb2_get_num_buffers(q)); 467 468 while (num_buffers) { 469 index = bitmap_find_next_zero_area(q->bufs_bitmap, q->max_num_buffers, 470 0, num_buffers, 0); 471 472 if (index < q->max_num_buffers) 473 break; 474 /* Try to find free space for less buffers */ 475 num_buffers--; 476 } 477 478 /* If there is no space left to allocate buffers return 0 to indicate the error */ 479 if (!num_buffers) { 480 *first_index = 0; 481 return 0; 482 } 483 484 *first_index = index; 485 486 for (buffer = 0; buffer < num_buffers; ++buffer) { 487 /* Allocate vb2 buffer structures */ 488 vb = kzalloc(q->buf_struct_size, GFP_KERNEL); 489 if (!vb) { 490 dprintk(q, 1, "memory alloc for buffer struct failed\n"); 491 break; 492 } 493 494 vb->state = VB2_BUF_STATE_DEQUEUED; 495 vb->num_planes = num_planes; 496 vb->type = q->type; 497 vb->memory = memory; 498 init_buffer_cache_hints(q, vb); 499 for (plane = 0; plane < num_planes; ++plane) { 500 vb->planes[plane].length = plane_sizes[plane]; 501 vb->planes[plane].min_length = plane_sizes[plane]; 502 } 503 504 vb2_queue_add_buffer(q, vb, index++); 505 call_void_bufop(q, init_buffer, vb); 506 507 /* Allocate video buffer memory for the MMAP type */ 508 if (memory == VB2_MEMORY_MMAP) { 509 ret = __vb2_buf_mem_alloc(vb); 510 if (ret) { 511 dprintk(q, 1, "failed allocating memory for buffer %d\n", 512 buffer); 513 vb2_queue_remove_buffer(vb); 514 kfree(vb); 515 break; 516 } 517 __setup_offsets(vb); 518 /* 519 * Call the driver-provided buffer initialization 520 * callback, if given. An error in initialization 521 * results in queue setup failure. 522 */ 523 ret = call_vb_qop(vb, buf_init, vb); 524 if (ret) { 525 dprintk(q, 1, "buffer %d %p initialization failed\n", 526 buffer, vb); 527 __vb2_buf_mem_free(vb); 528 vb2_queue_remove_buffer(vb); 529 kfree(vb); 530 break; 531 } 532 } 533 } 534 535 dprintk(q, 3, "allocated %d buffers, %d plane(s) each\n", 536 buffer, num_planes); 537 538 return buffer; 539 } 540 541 /* 542 * __vb2_free_mem() - release video buffer memory for a given range of 543 * buffers in a given queue 544 */ 545 static void __vb2_free_mem(struct vb2_queue *q, unsigned int start, unsigned int count) 546 { 547 unsigned int i; 548 struct vb2_buffer *vb; 549 550 for (i = start; i < start + count; i++) { 551 vb = vb2_get_buffer(q, i); 552 if (!vb) 553 continue; 554 555 /* Free MMAP buffers or release USERPTR buffers */ 556 if (q->memory == VB2_MEMORY_MMAP) 557 __vb2_buf_mem_free(vb); 558 else if (q->memory == VB2_MEMORY_DMABUF) 559 __vb2_buf_dmabuf_put(vb); 560 else 561 __vb2_buf_userptr_put(vb); 562 } 563 } 564 565 /* 566 * __vb2_queue_free() - free @count buffers from @start index of the queue - video memory and 567 * related information, if no buffers are left return the queue to an 568 * uninitialized state. Might be called even if the queue has already been freed. 569 */ 570 static void __vb2_queue_free(struct vb2_queue *q, unsigned int start, unsigned int count) 571 { 572 unsigned int i; 573 574 lockdep_assert_held(&q->mmap_lock); 575 576 /* Call driver-provided cleanup function for each buffer, if provided */ 577 for (i = start; i < start + count; i++) { 578 struct vb2_buffer *vb = vb2_get_buffer(q, i); 579 580 if (vb && vb->planes[0].mem_priv) 581 call_void_vb_qop(vb, buf_cleanup, vb); 582 } 583 584 /* Release video buffer memory */ 585 __vb2_free_mem(q, start, count); 586 587 #ifdef CONFIG_VIDEO_ADV_DEBUG 588 /* 589 * Check that all the calls were balanced during the life-time of this 590 * queue. If not then dump the counters to the kernel log. 591 */ 592 if (vb2_get_num_buffers(q)) { 593 bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming || 594 q->cnt_prepare_streaming != q->cnt_unprepare_streaming || 595 q->cnt_wait_prepare != q->cnt_wait_finish; 596 597 if (unbalanced) { 598 pr_info("unbalanced counters for queue %p:\n", q); 599 if (q->cnt_start_streaming != q->cnt_stop_streaming) 600 pr_info(" setup: %u start_streaming: %u stop_streaming: %u\n", 601 q->cnt_queue_setup, q->cnt_start_streaming, 602 q->cnt_stop_streaming); 603 if (q->cnt_prepare_streaming != q->cnt_unprepare_streaming) 604 pr_info(" prepare_streaming: %u unprepare_streaming: %u\n", 605 q->cnt_prepare_streaming, q->cnt_unprepare_streaming); 606 if (q->cnt_wait_prepare != q->cnt_wait_finish) 607 pr_info(" wait_prepare: %u wait_finish: %u\n", 608 q->cnt_wait_prepare, q->cnt_wait_finish); 609 } 610 q->cnt_queue_setup = 0; 611 q->cnt_wait_prepare = 0; 612 q->cnt_wait_finish = 0; 613 q->cnt_prepare_streaming = 0; 614 q->cnt_start_streaming = 0; 615 q->cnt_stop_streaming = 0; 616 q->cnt_unprepare_streaming = 0; 617 } 618 for (i = start; i < start + count; i++) { 619 struct vb2_buffer *vb = vb2_get_buffer(q, i); 620 bool unbalanced; 621 622 if (!vb) 623 continue; 624 625 unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put || 626 vb->cnt_mem_prepare != vb->cnt_mem_finish || 627 vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr || 628 vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf || 629 vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf || 630 vb->cnt_buf_queue != vb->cnt_buf_done || 631 vb->cnt_buf_prepare != vb->cnt_buf_finish || 632 vb->cnt_buf_init != vb->cnt_buf_cleanup; 633 634 if (unbalanced) { 635 pr_info("unbalanced counters for queue %p, buffer %d:\n", 636 q, i); 637 if (vb->cnt_buf_init != vb->cnt_buf_cleanup) 638 pr_info(" buf_init: %u buf_cleanup: %u\n", 639 vb->cnt_buf_init, vb->cnt_buf_cleanup); 640 if (vb->cnt_buf_prepare != vb->cnt_buf_finish) 641 pr_info(" buf_prepare: %u buf_finish: %u\n", 642 vb->cnt_buf_prepare, vb->cnt_buf_finish); 643 if (vb->cnt_buf_queue != vb->cnt_buf_done) 644 pr_info(" buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n", 645 vb->cnt_buf_out_validate, vb->cnt_buf_queue, 646 vb->cnt_buf_done, vb->cnt_buf_request_complete); 647 if (vb->cnt_mem_alloc != vb->cnt_mem_put) 648 pr_info(" alloc: %u put: %u\n", 649 vb->cnt_mem_alloc, vb->cnt_mem_put); 650 if (vb->cnt_mem_prepare != vb->cnt_mem_finish) 651 pr_info(" prepare: %u finish: %u\n", 652 vb->cnt_mem_prepare, vb->cnt_mem_finish); 653 if (vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr) 654 pr_info(" get_userptr: %u put_userptr: %u\n", 655 vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr); 656 if (vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf) 657 pr_info(" attach_dmabuf: %u detach_dmabuf: %u\n", 658 vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf); 659 if (vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf) 660 pr_info(" map_dmabuf: %u unmap_dmabuf: %u\n", 661 vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf); 662 pr_info(" get_dmabuf: %u num_users: %u\n", 663 vb->cnt_mem_get_dmabuf, 664 vb->cnt_mem_num_users); 665 } 666 } 667 #endif 668 669 /* Free vb2 buffers */ 670 for (i = start; i < start + count; i++) { 671 struct vb2_buffer *vb = vb2_get_buffer(q, i); 672 673 if (!vb) 674 continue; 675 676 vb2_queue_remove_buffer(vb); 677 kfree(vb); 678 } 679 680 if (!vb2_get_num_buffers(q)) { 681 q->memory = VB2_MEMORY_UNKNOWN; 682 INIT_LIST_HEAD(&q->queued_list); 683 } 684 } 685 686 bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb) 687 { 688 unsigned int plane; 689 for (plane = 0; plane < vb->num_planes; ++plane) { 690 void *mem_priv = vb->planes[plane].mem_priv; 691 /* 692 * If num_users() has not been provided, call_memop 693 * will return 0, apparently nobody cares about this 694 * case anyway. If num_users() returns more than 1, 695 * we are not the only user of the plane's memory. 696 */ 697 if (mem_priv && call_memop(vb, num_users, mem_priv) > 1) 698 return true; 699 } 700 return false; 701 } 702 EXPORT_SYMBOL(vb2_buffer_in_use); 703 704 /* 705 * __buffers_in_use() - return true if any buffers on the queue are in use and 706 * the queue cannot be freed (by the means of REQBUFS(0)) call 707 */ 708 static bool __buffers_in_use(struct vb2_queue *q) 709 { 710 unsigned int buffer; 711 for (buffer = 0; buffer < q->max_num_buffers; ++buffer) { 712 struct vb2_buffer *vb = vb2_get_buffer(q, buffer); 713 714 if (!vb) 715 continue; 716 717 if (vb2_buffer_in_use(q, vb)) 718 return true; 719 } 720 return false; 721 } 722 723 void vb2_core_querybuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb) 724 { 725 call_void_bufop(q, fill_user_buffer, vb, pb); 726 } 727 EXPORT_SYMBOL_GPL(vb2_core_querybuf); 728 729 /* 730 * __verify_userptr_ops() - verify that all memory operations required for 731 * USERPTR queue type have been provided 732 */ 733 static int __verify_userptr_ops(struct vb2_queue *q) 734 { 735 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr || 736 !q->mem_ops->put_userptr) 737 return -EINVAL; 738 739 return 0; 740 } 741 742 /* 743 * __verify_mmap_ops() - verify that all memory operations required for 744 * MMAP queue type have been provided 745 */ 746 static int __verify_mmap_ops(struct vb2_queue *q) 747 { 748 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc || 749 !q->mem_ops->put || !q->mem_ops->mmap) 750 return -EINVAL; 751 752 return 0; 753 } 754 755 /* 756 * __verify_dmabuf_ops() - verify that all memory operations required for 757 * DMABUF queue type have been provided 758 */ 759 static int __verify_dmabuf_ops(struct vb2_queue *q) 760 { 761 if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf || 762 !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf || 763 !q->mem_ops->unmap_dmabuf) 764 return -EINVAL; 765 766 return 0; 767 } 768 769 int vb2_verify_memory_type(struct vb2_queue *q, 770 enum vb2_memory memory, unsigned int type) 771 { 772 if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR && 773 memory != VB2_MEMORY_DMABUF) { 774 dprintk(q, 1, "unsupported memory type\n"); 775 return -EINVAL; 776 } 777 778 if (type != q->type) { 779 dprintk(q, 1, "requested type is incorrect\n"); 780 return -EINVAL; 781 } 782 783 /* 784 * Make sure all the required memory ops for given memory type 785 * are available. 786 */ 787 if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) { 788 dprintk(q, 1, "MMAP for current setup unsupported\n"); 789 return -EINVAL; 790 } 791 792 if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) { 793 dprintk(q, 1, "USERPTR for current setup unsupported\n"); 794 return -EINVAL; 795 } 796 797 if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) { 798 dprintk(q, 1, "DMABUF for current setup unsupported\n"); 799 return -EINVAL; 800 } 801 802 /* 803 * Place the busy tests at the end: -EBUSY can be ignored when 804 * create_bufs is called with count == 0, but count == 0 should still 805 * do the memory and type validation. 806 */ 807 if (vb2_fileio_is_active(q)) { 808 dprintk(q, 1, "file io in progress\n"); 809 return -EBUSY; 810 } 811 return 0; 812 } 813 EXPORT_SYMBOL(vb2_verify_memory_type); 814 815 static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem) 816 { 817 q->non_coherent_mem = 0; 818 819 if (!vb2_queue_allows_cache_hints(q)) 820 return; 821 q->non_coherent_mem = non_coherent_mem; 822 } 823 824 static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem) 825 { 826 if (non_coherent_mem != q->non_coherent_mem) { 827 dprintk(q, 1, "memory coherency model mismatch\n"); 828 return false; 829 } 830 return true; 831 } 832 833 static int vb2_core_allocated_buffers_storage(struct vb2_queue *q) 834 { 835 if (!q->bufs) 836 q->bufs = kcalloc(q->max_num_buffers, sizeof(*q->bufs), GFP_KERNEL); 837 if (!q->bufs) 838 return -ENOMEM; 839 840 if (!q->bufs_bitmap) 841 q->bufs_bitmap = bitmap_zalloc(q->max_num_buffers, GFP_KERNEL); 842 if (!q->bufs_bitmap) { 843 kfree(q->bufs); 844 q->bufs = NULL; 845 return -ENOMEM; 846 } 847 848 return 0; 849 } 850 851 static void vb2_core_free_buffers_storage(struct vb2_queue *q) 852 { 853 kfree(q->bufs); 854 q->bufs = NULL; 855 bitmap_free(q->bufs_bitmap); 856 q->bufs_bitmap = NULL; 857 } 858 859 int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, 860 unsigned int flags, unsigned int *count) 861 { 862 unsigned int num_buffers, allocated_buffers, num_planes = 0; 863 unsigned int q_num_bufs = vb2_get_num_buffers(q); 864 unsigned plane_sizes[VB2_MAX_PLANES] = { }; 865 bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT; 866 unsigned int i, first_index; 867 int ret = 0; 868 869 if (q->streaming) { 870 dprintk(q, 1, "streaming active\n"); 871 return -EBUSY; 872 } 873 874 if (q->waiting_in_dqbuf && *count) { 875 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); 876 return -EBUSY; 877 } 878 879 if (*count == 0 || q_num_bufs != 0 || 880 (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) || 881 !verify_coherency_flags(q, non_coherent_mem)) { 882 /* 883 * We already have buffers allocated, so first check if they 884 * are not in use and can be freed. 885 */ 886 mutex_lock(&q->mmap_lock); 887 if (debug && q->memory == VB2_MEMORY_MMAP && 888 __buffers_in_use(q)) 889 dprintk(q, 1, "memory in use, orphaning buffers\n"); 890 891 /* 892 * Call queue_cancel to clean up any buffers in the 893 * QUEUED state which is possible if buffers were prepared or 894 * queued without ever calling STREAMON. 895 */ 896 __vb2_queue_cancel(q); 897 __vb2_queue_free(q, 0, q->max_num_buffers); 898 mutex_unlock(&q->mmap_lock); 899 900 q->is_busy = 0; 901 /* 902 * In case of REQBUFS(0) return immediately without calling 903 * driver's queue_setup() callback and allocating resources. 904 */ 905 if (*count == 0) 906 return 0; 907 } 908 909 /* 910 * Make sure the requested values and current defaults are sane. 911 */ 912 num_buffers = max_t(unsigned int, *count, q->min_reqbufs_allocation); 913 num_buffers = min_t(unsigned int, num_buffers, q->max_num_buffers); 914 memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); 915 /* 916 * Set this now to ensure that drivers see the correct q->memory value 917 * in the queue_setup op. 918 */ 919 mutex_lock(&q->mmap_lock); 920 ret = vb2_core_allocated_buffers_storage(q); 921 q->memory = memory; 922 mutex_unlock(&q->mmap_lock); 923 if (ret) 924 return ret; 925 set_queue_coherency(q, non_coherent_mem); 926 927 /* 928 * Ask the driver how many buffers and planes per buffer it requires. 929 * Driver also sets the size and allocator context for each plane. 930 */ 931 ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, 932 plane_sizes, q->alloc_devs); 933 if (ret) 934 goto error; 935 936 /* Check that driver has set sane values */ 937 if (WARN_ON(!num_planes)) { 938 ret = -EINVAL; 939 goto error; 940 } 941 942 for (i = 0; i < num_planes; i++) 943 if (WARN_ON(!plane_sizes[i])) { 944 ret = -EINVAL; 945 goto error; 946 } 947 948 /* Finally, allocate buffers and video memory */ 949 allocated_buffers = 950 __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes, &first_index); 951 if (allocated_buffers == 0) { 952 /* There shouldn't be any buffers allocated, so first_index == 0 */ 953 WARN_ON(first_index); 954 dprintk(q, 1, "memory allocation failed\n"); 955 ret = -ENOMEM; 956 goto error; 957 } 958 959 /* 960 * There is no point in continuing if we can't allocate the minimum 961 * number of buffers needed by this vb2_queue. 962 */ 963 if (allocated_buffers < q->min_reqbufs_allocation) 964 ret = -ENOMEM; 965 966 /* 967 * Check if driver can handle the allocated number of buffers. 968 */ 969 if (!ret && allocated_buffers < num_buffers) { 970 num_buffers = allocated_buffers; 971 /* 972 * num_planes is set by the previous queue_setup(), but since it 973 * signals to queue_setup() whether it is called from create_bufs() 974 * vs reqbufs() we zero it here to signal that queue_setup() is 975 * called for the reqbufs() case. 976 */ 977 num_planes = 0; 978 979 ret = call_qop(q, queue_setup, q, &num_buffers, 980 &num_planes, plane_sizes, q->alloc_devs); 981 982 if (!ret && allocated_buffers < num_buffers) 983 ret = -ENOMEM; 984 985 /* 986 * Either the driver has accepted a smaller number of buffers, 987 * or .queue_setup() returned an error 988 */ 989 } 990 991 mutex_lock(&q->mmap_lock); 992 993 if (ret < 0) { 994 /* 995 * Note: __vb2_queue_free() will subtract 'allocated_buffers' 996 * from already queued buffers and it will reset q->memory to 997 * VB2_MEMORY_UNKNOWN. 998 */ 999 __vb2_queue_free(q, first_index, allocated_buffers); 1000 mutex_unlock(&q->mmap_lock); 1001 return ret; 1002 } 1003 mutex_unlock(&q->mmap_lock); 1004 1005 /* 1006 * Return the number of successfully allocated buffers 1007 * to the userspace. 1008 */ 1009 *count = allocated_buffers; 1010 q->waiting_for_buffers = !q->is_output; 1011 q->is_busy = 1; 1012 1013 return 0; 1014 1015 error: 1016 mutex_lock(&q->mmap_lock); 1017 q->memory = VB2_MEMORY_UNKNOWN; 1018 mutex_unlock(&q->mmap_lock); 1019 vb2_core_free_buffers_storage(q); 1020 return ret; 1021 } 1022 EXPORT_SYMBOL_GPL(vb2_core_reqbufs); 1023 1024 int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, 1025 unsigned int flags, unsigned int *count, 1026 unsigned int requested_planes, 1027 const unsigned int requested_sizes[], 1028 unsigned int *first_index) 1029 { 1030 unsigned int num_planes = 0, num_buffers, allocated_buffers; 1031 unsigned plane_sizes[VB2_MAX_PLANES] = { }; 1032 bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT; 1033 unsigned int q_num_bufs = vb2_get_num_buffers(q); 1034 bool no_previous_buffers = !q_num_bufs; 1035 int ret = 0; 1036 1037 if (q_num_bufs == q->max_num_buffers) { 1038 dprintk(q, 1, "maximum number of buffers already allocated\n"); 1039 return -ENOBUFS; 1040 } 1041 1042 if (no_previous_buffers) { 1043 if (q->waiting_in_dqbuf && *count) { 1044 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); 1045 return -EBUSY; 1046 } 1047 memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); 1048 /* 1049 * Set this now to ensure that drivers see the correct q->memory 1050 * value in the queue_setup op. 1051 */ 1052 mutex_lock(&q->mmap_lock); 1053 ret = vb2_core_allocated_buffers_storage(q); 1054 q->memory = memory; 1055 mutex_unlock(&q->mmap_lock); 1056 if (ret) 1057 return ret; 1058 q->waiting_for_buffers = !q->is_output; 1059 set_queue_coherency(q, non_coherent_mem); 1060 } else { 1061 if (q->memory != memory) { 1062 dprintk(q, 1, "memory model mismatch\n"); 1063 return -EINVAL; 1064 } 1065 if (!verify_coherency_flags(q, non_coherent_mem)) 1066 return -EINVAL; 1067 } 1068 1069 num_buffers = min(*count, q->max_num_buffers - q_num_bufs); 1070 1071 if (requested_planes && requested_sizes) { 1072 num_planes = requested_planes; 1073 memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes)); 1074 } 1075 1076 /* 1077 * Ask the driver, whether the requested number of buffers, planes per 1078 * buffer and their sizes are acceptable 1079 */ 1080 ret = call_qop(q, queue_setup, q, &num_buffers, 1081 &num_planes, plane_sizes, q->alloc_devs); 1082 if (ret) 1083 goto error; 1084 1085 /* Finally, allocate buffers and video memory */ 1086 allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, 1087 num_planes, plane_sizes, first_index); 1088 if (allocated_buffers == 0) { 1089 dprintk(q, 1, "memory allocation failed\n"); 1090 ret = -ENOMEM; 1091 goto error; 1092 } 1093 1094 /* 1095 * Check if driver can handle the so far allocated number of buffers. 1096 */ 1097 if (allocated_buffers < num_buffers) { 1098 num_buffers = allocated_buffers; 1099 1100 /* 1101 * num_buffers contains the total number of buffers, that the 1102 * queue driver has set up 1103 */ 1104 ret = call_qop(q, queue_setup, q, &num_buffers, 1105 &num_planes, plane_sizes, q->alloc_devs); 1106 1107 if (!ret && allocated_buffers < num_buffers) 1108 ret = -ENOMEM; 1109 1110 /* 1111 * Either the driver has accepted a smaller number of buffers, 1112 * or .queue_setup() returned an error 1113 */ 1114 } 1115 1116 mutex_lock(&q->mmap_lock); 1117 1118 if (ret < 0) { 1119 /* 1120 * Note: __vb2_queue_free() will subtract 'allocated_buffers' 1121 * from already queued buffers and it will reset q->memory to 1122 * VB2_MEMORY_UNKNOWN. 1123 */ 1124 __vb2_queue_free(q, *first_index, allocated_buffers); 1125 mutex_unlock(&q->mmap_lock); 1126 return -ENOMEM; 1127 } 1128 mutex_unlock(&q->mmap_lock); 1129 1130 /* 1131 * Return the number of successfully allocated buffers 1132 * to the userspace. 1133 */ 1134 *count = allocated_buffers; 1135 q->is_busy = 1; 1136 1137 return 0; 1138 1139 error: 1140 if (no_previous_buffers) { 1141 mutex_lock(&q->mmap_lock); 1142 q->memory = VB2_MEMORY_UNKNOWN; 1143 mutex_unlock(&q->mmap_lock); 1144 } 1145 return ret; 1146 } 1147 EXPORT_SYMBOL_GPL(vb2_core_create_bufs); 1148 1149 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) 1150 { 1151 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) 1152 return NULL; 1153 1154 return call_ptr_memop(vaddr, vb, vb->planes[plane_no].mem_priv); 1155 1156 } 1157 EXPORT_SYMBOL_GPL(vb2_plane_vaddr); 1158 1159 void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) 1160 { 1161 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) 1162 return NULL; 1163 1164 return call_ptr_memop(cookie, vb, vb->planes[plane_no].mem_priv); 1165 } 1166 EXPORT_SYMBOL_GPL(vb2_plane_cookie); 1167 1168 void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) 1169 { 1170 struct vb2_queue *q = vb->vb2_queue; 1171 unsigned long flags; 1172 1173 if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE)) 1174 return; 1175 1176 if (WARN_ON(state != VB2_BUF_STATE_DONE && 1177 state != VB2_BUF_STATE_ERROR && 1178 state != VB2_BUF_STATE_QUEUED)) 1179 state = VB2_BUF_STATE_ERROR; 1180 1181 #ifdef CONFIG_VIDEO_ADV_DEBUG 1182 /* 1183 * Although this is not a callback, it still does have to balance 1184 * with the buf_queue op. So update this counter manually. 1185 */ 1186 vb->cnt_buf_done++; 1187 #endif 1188 dprintk(q, 4, "done processing on buffer %d, state: %s\n", 1189 vb->index, vb2_state_name(state)); 1190 1191 if (state != VB2_BUF_STATE_QUEUED) 1192 __vb2_buf_mem_finish(vb); 1193 1194 spin_lock_irqsave(&q->done_lock, flags); 1195 if (state == VB2_BUF_STATE_QUEUED) { 1196 vb->state = VB2_BUF_STATE_QUEUED; 1197 } else { 1198 /* Add the buffer to the done buffers list */ 1199 list_add_tail(&vb->done_entry, &q->done_list); 1200 vb->state = state; 1201 } 1202 atomic_dec(&q->owned_by_drv_count); 1203 1204 if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) { 1205 media_request_object_unbind(&vb->req_obj); 1206 media_request_object_put(&vb->req_obj); 1207 } 1208 1209 spin_unlock_irqrestore(&q->done_lock, flags); 1210 1211 trace_vb2_buf_done(q, vb); 1212 1213 switch (state) { 1214 case VB2_BUF_STATE_QUEUED: 1215 return; 1216 default: 1217 /* Inform any processes that may be waiting for buffers */ 1218 wake_up(&q->done_wq); 1219 break; 1220 } 1221 } 1222 EXPORT_SYMBOL_GPL(vb2_buffer_done); 1223 1224 void vb2_discard_done(struct vb2_queue *q) 1225 { 1226 struct vb2_buffer *vb; 1227 unsigned long flags; 1228 1229 spin_lock_irqsave(&q->done_lock, flags); 1230 list_for_each_entry(vb, &q->done_list, done_entry) 1231 vb->state = VB2_BUF_STATE_ERROR; 1232 spin_unlock_irqrestore(&q->done_lock, flags); 1233 } 1234 EXPORT_SYMBOL_GPL(vb2_discard_done); 1235 1236 /* 1237 * __prepare_mmap() - prepare an MMAP buffer 1238 */ 1239 static int __prepare_mmap(struct vb2_buffer *vb) 1240 { 1241 int ret = 0; 1242 1243 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1244 vb, vb->planes); 1245 return ret ? ret : call_vb_qop(vb, buf_prepare, vb); 1246 } 1247 1248 /* 1249 * __prepare_userptr() - prepare a USERPTR buffer 1250 */ 1251 static int __prepare_userptr(struct vb2_buffer *vb) 1252 { 1253 struct vb2_plane planes[VB2_MAX_PLANES]; 1254 struct vb2_queue *q = vb->vb2_queue; 1255 void *mem_priv; 1256 unsigned int plane; 1257 int ret = 0; 1258 bool reacquired = vb->planes[0].mem_priv == NULL; 1259 1260 memset(planes, 0, sizeof(planes[0]) * vb->num_planes); 1261 /* Copy relevant information provided by the userspace */ 1262 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1263 vb, planes); 1264 if (ret) 1265 return ret; 1266 1267 for (plane = 0; plane < vb->num_planes; ++plane) { 1268 /* Skip the plane if already verified */ 1269 if (vb->planes[plane].m.userptr && 1270 vb->planes[plane].m.userptr == planes[plane].m.userptr 1271 && vb->planes[plane].length == planes[plane].length) 1272 continue; 1273 1274 dprintk(q, 3, "userspace address for plane %d changed, reacquiring memory\n", 1275 plane); 1276 1277 /* Check if the provided plane buffer is large enough */ 1278 if (planes[plane].length < vb->planes[plane].min_length) { 1279 dprintk(q, 1, "provided buffer size %u is less than setup size %u for plane %d\n", 1280 planes[plane].length, 1281 vb->planes[plane].min_length, 1282 plane); 1283 ret = -EINVAL; 1284 goto err; 1285 } 1286 1287 /* Release previously acquired memory if present */ 1288 if (vb->planes[plane].mem_priv) { 1289 if (!reacquired) { 1290 reacquired = true; 1291 vb->copied_timestamp = 0; 1292 call_void_vb_qop(vb, buf_cleanup, vb); 1293 } 1294 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); 1295 } 1296 1297 vb->planes[plane].mem_priv = NULL; 1298 vb->planes[plane].bytesused = 0; 1299 vb->planes[plane].length = 0; 1300 vb->planes[plane].m.userptr = 0; 1301 vb->planes[plane].data_offset = 0; 1302 1303 /* Acquire each plane's memory */ 1304 mem_priv = call_ptr_memop(get_userptr, 1305 vb, 1306 q->alloc_devs[plane] ? : q->dev, 1307 planes[plane].m.userptr, 1308 planes[plane].length); 1309 if (IS_ERR(mem_priv)) { 1310 dprintk(q, 1, "failed acquiring userspace memory for plane %d\n", 1311 plane); 1312 ret = PTR_ERR(mem_priv); 1313 goto err; 1314 } 1315 vb->planes[plane].mem_priv = mem_priv; 1316 } 1317 1318 /* 1319 * Now that everything is in order, copy relevant information 1320 * provided by userspace. 1321 */ 1322 for (plane = 0; plane < vb->num_planes; ++plane) { 1323 vb->planes[plane].bytesused = planes[plane].bytesused; 1324 vb->planes[plane].length = planes[plane].length; 1325 vb->planes[plane].m.userptr = planes[plane].m.userptr; 1326 vb->planes[plane].data_offset = planes[plane].data_offset; 1327 } 1328 1329 if (reacquired) { 1330 /* 1331 * One or more planes changed, so we must call buf_init to do 1332 * the driver-specific initialization on the newly acquired 1333 * buffer, if provided. 1334 */ 1335 ret = call_vb_qop(vb, buf_init, vb); 1336 if (ret) { 1337 dprintk(q, 1, "buffer initialization failed\n"); 1338 goto err; 1339 } 1340 } 1341 1342 ret = call_vb_qop(vb, buf_prepare, vb); 1343 if (ret) { 1344 dprintk(q, 1, "buffer preparation failed\n"); 1345 call_void_vb_qop(vb, buf_cleanup, vb); 1346 goto err; 1347 } 1348 1349 return 0; 1350 err: 1351 /* In case of errors, release planes that were already acquired */ 1352 for (plane = 0; plane < vb->num_planes; ++plane) { 1353 if (vb->planes[plane].mem_priv) 1354 call_void_memop(vb, put_userptr, 1355 vb->planes[plane].mem_priv); 1356 vb->planes[plane].mem_priv = NULL; 1357 vb->planes[plane].m.userptr = 0; 1358 vb->planes[plane].length = 0; 1359 } 1360 1361 return ret; 1362 } 1363 1364 /* 1365 * __prepare_dmabuf() - prepare a DMABUF buffer 1366 */ 1367 static int __prepare_dmabuf(struct vb2_buffer *vb) 1368 { 1369 struct vb2_plane planes[VB2_MAX_PLANES]; 1370 struct vb2_queue *q = vb->vb2_queue; 1371 void *mem_priv; 1372 unsigned int plane; 1373 int ret = 0; 1374 bool reacquired = vb->planes[0].mem_priv == NULL; 1375 1376 memset(planes, 0, sizeof(planes[0]) * vb->num_planes); 1377 /* Copy relevant information provided by the userspace */ 1378 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1379 vb, planes); 1380 if (ret) 1381 return ret; 1382 1383 for (plane = 0; plane < vb->num_planes; ++plane) { 1384 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd); 1385 1386 if (IS_ERR_OR_NULL(dbuf)) { 1387 dprintk(q, 1, "invalid dmabuf fd for plane %d\n", 1388 plane); 1389 ret = -EINVAL; 1390 goto err; 1391 } 1392 1393 /* use DMABUF size if length is not provided */ 1394 if (planes[plane].length == 0) 1395 planes[plane].length = dbuf->size; 1396 1397 if (planes[plane].length < vb->planes[plane].min_length) { 1398 dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n", 1399 planes[plane].length, plane, 1400 vb->planes[plane].min_length); 1401 dma_buf_put(dbuf); 1402 ret = -EINVAL; 1403 goto err; 1404 } 1405 1406 /* Skip the plane if already verified */ 1407 if (dbuf == vb->planes[plane].dbuf && 1408 vb->planes[plane].length == planes[plane].length) { 1409 dma_buf_put(dbuf); 1410 continue; 1411 } 1412 1413 dprintk(q, 3, "buffer for plane %d changed\n", plane); 1414 1415 if (!reacquired) { 1416 reacquired = true; 1417 vb->copied_timestamp = 0; 1418 call_void_vb_qop(vb, buf_cleanup, vb); 1419 } 1420 1421 /* Release previously acquired memory if present */ 1422 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); 1423 vb->planes[plane].bytesused = 0; 1424 vb->planes[plane].length = 0; 1425 vb->planes[plane].m.fd = 0; 1426 vb->planes[plane].data_offset = 0; 1427 1428 /* Acquire each plane's memory */ 1429 mem_priv = call_ptr_memop(attach_dmabuf, 1430 vb, 1431 q->alloc_devs[plane] ? : q->dev, 1432 dbuf, 1433 planes[plane].length); 1434 if (IS_ERR(mem_priv)) { 1435 dprintk(q, 1, "failed to attach dmabuf\n"); 1436 ret = PTR_ERR(mem_priv); 1437 dma_buf_put(dbuf); 1438 goto err; 1439 } 1440 1441 vb->planes[plane].dbuf = dbuf; 1442 vb->planes[plane].mem_priv = mem_priv; 1443 } 1444 1445 /* 1446 * This pins the buffer(s) with dma_buf_map_attachment()). It's done 1447 * here instead just before the DMA, while queueing the buffer(s) so 1448 * userspace knows sooner rather than later if the dma-buf map fails. 1449 */ 1450 for (plane = 0; plane < vb->num_planes; ++plane) { 1451 if (vb->planes[plane].dbuf_mapped) 1452 continue; 1453 1454 ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv); 1455 if (ret) { 1456 dprintk(q, 1, "failed to map dmabuf for plane %d\n", 1457 plane); 1458 goto err; 1459 } 1460 vb->planes[plane].dbuf_mapped = 1; 1461 } 1462 1463 /* 1464 * Now that everything is in order, copy relevant information 1465 * provided by userspace. 1466 */ 1467 for (plane = 0; plane < vb->num_planes; ++plane) { 1468 vb->planes[plane].bytesused = planes[plane].bytesused; 1469 vb->planes[plane].length = planes[plane].length; 1470 vb->planes[plane].m.fd = planes[plane].m.fd; 1471 vb->planes[plane].data_offset = planes[plane].data_offset; 1472 } 1473 1474 if (reacquired) { 1475 /* 1476 * Call driver-specific initialization on the newly acquired buffer, 1477 * if provided. 1478 */ 1479 ret = call_vb_qop(vb, buf_init, vb); 1480 if (ret) { 1481 dprintk(q, 1, "buffer initialization failed\n"); 1482 goto err; 1483 } 1484 } 1485 1486 ret = call_vb_qop(vb, buf_prepare, vb); 1487 if (ret) { 1488 dprintk(q, 1, "buffer preparation failed\n"); 1489 call_void_vb_qop(vb, buf_cleanup, vb); 1490 goto err; 1491 } 1492 1493 return 0; 1494 err: 1495 /* In case of errors, release planes that were already acquired */ 1496 __vb2_buf_dmabuf_put(vb); 1497 1498 return ret; 1499 } 1500 1501 /* 1502 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing 1503 */ 1504 static void __enqueue_in_driver(struct vb2_buffer *vb) 1505 { 1506 struct vb2_queue *q = vb->vb2_queue; 1507 1508 vb->state = VB2_BUF_STATE_ACTIVE; 1509 atomic_inc(&q->owned_by_drv_count); 1510 1511 trace_vb2_buf_queue(q, vb); 1512 1513 call_void_vb_qop(vb, buf_queue, vb); 1514 } 1515 1516 static int __buf_prepare(struct vb2_buffer *vb) 1517 { 1518 struct vb2_queue *q = vb->vb2_queue; 1519 enum vb2_buffer_state orig_state = vb->state; 1520 int ret; 1521 1522 if (q->error) { 1523 dprintk(q, 1, "fatal error occurred on queue\n"); 1524 return -EIO; 1525 } 1526 1527 if (vb->prepared) 1528 return 0; 1529 WARN_ON(vb->synced); 1530 1531 if (q->is_output) { 1532 ret = call_vb_qop(vb, buf_out_validate, vb); 1533 if (ret) { 1534 dprintk(q, 1, "buffer validation failed\n"); 1535 return ret; 1536 } 1537 } 1538 1539 vb->state = VB2_BUF_STATE_PREPARING; 1540 1541 switch (q->memory) { 1542 case VB2_MEMORY_MMAP: 1543 ret = __prepare_mmap(vb); 1544 break; 1545 case VB2_MEMORY_USERPTR: 1546 ret = __prepare_userptr(vb); 1547 break; 1548 case VB2_MEMORY_DMABUF: 1549 ret = __prepare_dmabuf(vb); 1550 break; 1551 default: 1552 WARN(1, "Invalid queue type\n"); 1553 ret = -EINVAL; 1554 break; 1555 } 1556 1557 if (ret) { 1558 dprintk(q, 1, "buffer preparation failed: %d\n", ret); 1559 vb->state = orig_state; 1560 return ret; 1561 } 1562 1563 __vb2_buf_mem_prepare(vb); 1564 vb->prepared = 1; 1565 vb->state = orig_state; 1566 1567 return 0; 1568 } 1569 1570 static int vb2_req_prepare(struct media_request_object *obj) 1571 { 1572 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1573 int ret; 1574 1575 if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST)) 1576 return -EINVAL; 1577 1578 mutex_lock(vb->vb2_queue->lock); 1579 ret = __buf_prepare(vb); 1580 mutex_unlock(vb->vb2_queue->lock); 1581 return ret; 1582 } 1583 1584 static void __vb2_dqbuf(struct vb2_buffer *vb); 1585 1586 static void vb2_req_unprepare(struct media_request_object *obj) 1587 { 1588 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1589 1590 mutex_lock(vb->vb2_queue->lock); 1591 __vb2_dqbuf(vb); 1592 vb->state = VB2_BUF_STATE_IN_REQUEST; 1593 mutex_unlock(vb->vb2_queue->lock); 1594 WARN_ON(!vb->req_obj.req); 1595 } 1596 1597 static void vb2_req_queue(struct media_request_object *obj) 1598 { 1599 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1600 int err; 1601 1602 mutex_lock(vb->vb2_queue->lock); 1603 /* 1604 * There is no method to propagate an error from vb2_core_qbuf(), 1605 * so if this returns a non-0 value, then WARN. 1606 * 1607 * The only exception is -EIO which is returned if q->error is 1608 * set. We just ignore that, and expect this will be caught the 1609 * next time vb2_req_prepare() is called. 1610 */ 1611 err = vb2_core_qbuf(vb->vb2_queue, vb, NULL, NULL); 1612 WARN_ON_ONCE(err && err != -EIO); 1613 mutex_unlock(vb->vb2_queue->lock); 1614 } 1615 1616 static void vb2_req_unbind(struct media_request_object *obj) 1617 { 1618 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1619 1620 if (vb->state == VB2_BUF_STATE_IN_REQUEST) 1621 call_void_bufop(vb->vb2_queue, init_buffer, vb); 1622 } 1623 1624 static void vb2_req_release(struct media_request_object *obj) 1625 { 1626 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1627 1628 if (vb->state == VB2_BUF_STATE_IN_REQUEST) { 1629 vb->state = VB2_BUF_STATE_DEQUEUED; 1630 if (vb->request) 1631 media_request_put(vb->request); 1632 vb->request = NULL; 1633 } 1634 } 1635 1636 static const struct media_request_object_ops vb2_core_req_ops = { 1637 .prepare = vb2_req_prepare, 1638 .unprepare = vb2_req_unprepare, 1639 .queue = vb2_req_queue, 1640 .unbind = vb2_req_unbind, 1641 .release = vb2_req_release, 1642 }; 1643 1644 bool vb2_request_object_is_buffer(struct media_request_object *obj) 1645 { 1646 return obj->ops == &vb2_core_req_ops; 1647 } 1648 EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer); 1649 1650 unsigned int vb2_request_buffer_cnt(struct media_request *req) 1651 { 1652 struct media_request_object *obj; 1653 unsigned long flags; 1654 unsigned int buffer_cnt = 0; 1655 1656 spin_lock_irqsave(&req->lock, flags); 1657 list_for_each_entry(obj, &req->objects, list) 1658 if (vb2_request_object_is_buffer(obj)) 1659 buffer_cnt++; 1660 spin_unlock_irqrestore(&req->lock, flags); 1661 1662 return buffer_cnt; 1663 } 1664 EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt); 1665 1666 int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb) 1667 { 1668 int ret; 1669 1670 if (vb->state != VB2_BUF_STATE_DEQUEUED) { 1671 dprintk(q, 1, "invalid buffer state %s\n", 1672 vb2_state_name(vb->state)); 1673 return -EINVAL; 1674 } 1675 if (vb->prepared) { 1676 dprintk(q, 1, "buffer already prepared\n"); 1677 return -EINVAL; 1678 } 1679 1680 ret = __buf_prepare(vb); 1681 if (ret) 1682 return ret; 1683 1684 /* Fill buffer information for the userspace */ 1685 call_void_bufop(q, fill_user_buffer, vb, pb); 1686 1687 dprintk(q, 2, "prepare of buffer %d succeeded\n", vb->index); 1688 1689 return 0; 1690 } 1691 EXPORT_SYMBOL_GPL(vb2_core_prepare_buf); 1692 1693 int vb2_core_remove_bufs(struct vb2_queue *q, unsigned int start, unsigned int count) 1694 { 1695 unsigned int i, ret = 0; 1696 unsigned int q_num_bufs = vb2_get_num_buffers(q); 1697 1698 if (count == 0) 1699 return 0; 1700 1701 if (count > q_num_bufs) 1702 return -EINVAL; 1703 1704 if (start > q->max_num_buffers - count) 1705 return -EINVAL; 1706 1707 mutex_lock(&q->mmap_lock); 1708 1709 /* Check that all buffers in the range exist */ 1710 for (i = start; i < start + count; i++) { 1711 struct vb2_buffer *vb = vb2_get_buffer(q, i); 1712 1713 if (!vb) { 1714 ret = -EINVAL; 1715 goto unlock; 1716 } 1717 if (vb->state != VB2_BUF_STATE_DEQUEUED) { 1718 ret = -EBUSY; 1719 goto unlock; 1720 } 1721 } 1722 __vb2_queue_free(q, start, count); 1723 dprintk(q, 2, "%u buffers removed\n", count); 1724 1725 unlock: 1726 mutex_unlock(&q->mmap_lock); 1727 return ret; 1728 } 1729 EXPORT_SYMBOL_GPL(vb2_core_remove_bufs); 1730 1731 /* 1732 * vb2_start_streaming() - Attempt to start streaming. 1733 * @q: videobuf2 queue 1734 * 1735 * Attempt to start streaming. When this function is called there must be 1736 * at least q->min_queued_buffers queued up (i.e. the minimum 1737 * number of buffers required for the DMA engine to function). If the 1738 * @start_streaming op fails it is supposed to return all the driver-owned 1739 * buffers back to vb2 in state QUEUED. Check if that happened and if 1740 * not warn and reclaim them forcefully. 1741 */ 1742 static int vb2_start_streaming(struct vb2_queue *q) 1743 { 1744 struct vb2_buffer *vb; 1745 int ret; 1746 1747 /* 1748 * If any buffers were queued before streamon, 1749 * we can now pass them to driver for processing. 1750 */ 1751 list_for_each_entry(vb, &q->queued_list, queued_entry) 1752 __enqueue_in_driver(vb); 1753 1754 /* Tell the driver to start streaming */ 1755 q->start_streaming_called = 1; 1756 ret = call_qop(q, start_streaming, q, 1757 atomic_read(&q->owned_by_drv_count)); 1758 if (!ret) 1759 return 0; 1760 1761 q->start_streaming_called = 0; 1762 1763 dprintk(q, 1, "driver refused to start streaming\n"); 1764 /* 1765 * If you see this warning, then the driver isn't cleaning up properly 1766 * after a failed start_streaming(). See the start_streaming() 1767 * documentation in videobuf2-core.h for more information how buffers 1768 * should be returned to vb2 in start_streaming(). 1769 */ 1770 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { 1771 unsigned i; 1772 1773 /* 1774 * Forcefully reclaim buffers if the driver did not 1775 * correctly return them to vb2. 1776 */ 1777 for (i = 0; i < q->max_num_buffers; ++i) { 1778 vb = vb2_get_buffer(q, i); 1779 1780 if (!vb) 1781 continue; 1782 1783 if (vb->state == VB2_BUF_STATE_ACTIVE) 1784 vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED); 1785 } 1786 /* Must be zero now */ 1787 WARN_ON(atomic_read(&q->owned_by_drv_count)); 1788 } 1789 /* 1790 * If done_list is not empty, then start_streaming() didn't call 1791 * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or 1792 * STATE_DONE. 1793 */ 1794 WARN_ON(!list_empty(&q->done_list)); 1795 return ret; 1796 } 1797 1798 int vb2_core_qbuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb, 1799 struct media_request *req) 1800 { 1801 enum vb2_buffer_state orig_state; 1802 int ret; 1803 1804 if (q->error) { 1805 dprintk(q, 1, "fatal error occurred on queue\n"); 1806 return -EIO; 1807 } 1808 1809 if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && 1810 q->requires_requests) { 1811 dprintk(q, 1, "qbuf requires a request\n"); 1812 return -EBADR; 1813 } 1814 1815 if ((req && q->uses_qbuf) || 1816 (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && 1817 q->uses_requests)) { 1818 dprintk(q, 1, "queue in wrong mode (qbuf vs requests)\n"); 1819 return -EBUSY; 1820 } 1821 1822 if (req) { 1823 int ret; 1824 1825 q->uses_requests = 1; 1826 if (vb->state != VB2_BUF_STATE_DEQUEUED) { 1827 dprintk(q, 1, "buffer %d not in dequeued state\n", 1828 vb->index); 1829 return -EINVAL; 1830 } 1831 1832 if (q->is_output && !vb->prepared) { 1833 ret = call_vb_qop(vb, buf_out_validate, vb); 1834 if (ret) { 1835 dprintk(q, 1, "buffer validation failed\n"); 1836 return ret; 1837 } 1838 } 1839 1840 media_request_object_init(&vb->req_obj); 1841 1842 /* Make sure the request is in a safe state for updating. */ 1843 ret = media_request_lock_for_update(req); 1844 if (ret) 1845 return ret; 1846 ret = media_request_object_bind(req, &vb2_core_req_ops, 1847 q, true, &vb->req_obj); 1848 media_request_unlock_for_update(req); 1849 if (ret) 1850 return ret; 1851 1852 vb->state = VB2_BUF_STATE_IN_REQUEST; 1853 1854 /* 1855 * Increment the refcount and store the request. 1856 * The request refcount is decremented again when the 1857 * buffer is dequeued. This is to prevent vb2_buffer_done() 1858 * from freeing the request from interrupt context, which can 1859 * happen if the application closed the request fd after 1860 * queueing the request. 1861 */ 1862 media_request_get(req); 1863 vb->request = req; 1864 1865 /* Fill buffer information for the userspace */ 1866 if (pb) { 1867 call_void_bufop(q, copy_timestamp, vb, pb); 1868 call_void_bufop(q, fill_user_buffer, vb, pb); 1869 } 1870 1871 dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); 1872 return 0; 1873 } 1874 1875 if (vb->state != VB2_BUF_STATE_IN_REQUEST) 1876 q->uses_qbuf = 1; 1877 1878 switch (vb->state) { 1879 case VB2_BUF_STATE_DEQUEUED: 1880 case VB2_BUF_STATE_IN_REQUEST: 1881 if (!vb->prepared) { 1882 ret = __buf_prepare(vb); 1883 if (ret) 1884 return ret; 1885 } 1886 break; 1887 case VB2_BUF_STATE_PREPARING: 1888 dprintk(q, 1, "buffer still being prepared\n"); 1889 return -EINVAL; 1890 default: 1891 dprintk(q, 1, "invalid buffer state %s\n", 1892 vb2_state_name(vb->state)); 1893 return -EINVAL; 1894 } 1895 1896 /* 1897 * Add to the queued buffers list, a buffer will stay on it until 1898 * dequeued in dqbuf. 1899 */ 1900 orig_state = vb->state; 1901 list_add_tail(&vb->queued_entry, &q->queued_list); 1902 q->queued_count++; 1903 q->waiting_for_buffers = false; 1904 vb->state = VB2_BUF_STATE_QUEUED; 1905 1906 if (pb) 1907 call_void_bufop(q, copy_timestamp, vb, pb); 1908 1909 trace_vb2_qbuf(q, vb); 1910 1911 /* 1912 * If already streaming, give the buffer to driver for processing. 1913 * If not, the buffer will be given to driver on next streamon. 1914 */ 1915 if (q->start_streaming_called) 1916 __enqueue_in_driver(vb); 1917 1918 /* Fill buffer information for the userspace */ 1919 if (pb) 1920 call_void_bufop(q, fill_user_buffer, vb, pb); 1921 1922 /* 1923 * If streamon has been called, and we haven't yet called 1924 * start_streaming() since not enough buffers were queued, and 1925 * we now have reached the minimum number of queued buffers, 1926 * then we can finally call start_streaming(). 1927 */ 1928 if (q->streaming && !q->start_streaming_called && 1929 q->queued_count >= q->min_queued_buffers) { 1930 ret = vb2_start_streaming(q); 1931 if (ret) { 1932 /* 1933 * Since vb2_core_qbuf will return with an error, 1934 * we should return it to state DEQUEUED since 1935 * the error indicates that the buffer wasn't queued. 1936 */ 1937 list_del(&vb->queued_entry); 1938 q->queued_count--; 1939 vb->state = orig_state; 1940 return ret; 1941 } 1942 } 1943 1944 dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); 1945 return 0; 1946 } 1947 EXPORT_SYMBOL_GPL(vb2_core_qbuf); 1948 1949 /* 1950 * __vb2_wait_for_done_vb() - wait for a buffer to become available 1951 * for dequeuing 1952 * 1953 * Will sleep if required for nonblocking == false. 1954 */ 1955 static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) 1956 { 1957 /* 1958 * All operations on vb_done_list are performed under done_lock 1959 * spinlock protection. However, buffers may be removed from 1960 * it and returned to userspace only while holding both driver's 1961 * lock and the done_lock spinlock. Thus we can be sure that as 1962 * long as we hold the driver's lock, the list will remain not 1963 * empty if list_empty() check succeeds. 1964 */ 1965 1966 for (;;) { 1967 int ret; 1968 1969 if (q->waiting_in_dqbuf) { 1970 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); 1971 return -EBUSY; 1972 } 1973 1974 if (!q->streaming) { 1975 dprintk(q, 1, "streaming off, will not wait for buffers\n"); 1976 return -EINVAL; 1977 } 1978 1979 if (q->error) { 1980 dprintk(q, 1, "Queue in error state, will not wait for buffers\n"); 1981 return -EIO; 1982 } 1983 1984 if (q->last_buffer_dequeued) { 1985 dprintk(q, 3, "last buffer dequeued already, will not wait for buffers\n"); 1986 return -EPIPE; 1987 } 1988 1989 if (!list_empty(&q->done_list)) { 1990 /* 1991 * Found a buffer that we were waiting for. 1992 */ 1993 break; 1994 } 1995 1996 if (nonblocking) { 1997 dprintk(q, 3, "nonblocking and no buffers to dequeue, will not wait\n"); 1998 return -EAGAIN; 1999 } 2000 2001 q->waiting_in_dqbuf = 1; 2002 /* 2003 * We are streaming and blocking, wait for another buffer to 2004 * become ready or for streamoff. Driver's lock is released to 2005 * allow streamoff or qbuf to be called while waiting. 2006 */ 2007 call_void_qop(q, wait_prepare, q); 2008 2009 /* 2010 * All locks have been released, it is safe to sleep now. 2011 */ 2012 dprintk(q, 3, "will sleep waiting for buffers\n"); 2013 ret = wait_event_interruptible(q->done_wq, 2014 !list_empty(&q->done_list) || !q->streaming || 2015 q->error); 2016 2017 /* 2018 * We need to reevaluate both conditions again after reacquiring 2019 * the locks or return an error if one occurred. 2020 */ 2021 call_void_qop(q, wait_finish, q); 2022 q->waiting_in_dqbuf = 0; 2023 if (ret) { 2024 dprintk(q, 1, "sleep was interrupted\n"); 2025 return ret; 2026 } 2027 } 2028 return 0; 2029 } 2030 2031 /* 2032 * __vb2_get_done_vb() - get a buffer ready for dequeuing 2033 * 2034 * Will sleep if required for nonblocking == false. 2035 */ 2036 static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, 2037 void *pb, int nonblocking) 2038 { 2039 unsigned long flags; 2040 int ret = 0; 2041 2042 /* 2043 * Wait for at least one buffer to become available on the done_list. 2044 */ 2045 ret = __vb2_wait_for_done_vb(q, nonblocking); 2046 if (ret) 2047 return ret; 2048 2049 /* 2050 * Driver's lock has been held since we last verified that done_list 2051 * is not empty, so no need for another list_empty(done_list) check. 2052 */ 2053 spin_lock_irqsave(&q->done_lock, flags); 2054 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); 2055 /* 2056 * Only remove the buffer from done_list if all planes can be 2057 * handled. Some cases such as V4L2 file I/O and DVB have pb 2058 * == NULL; skip the check then as there's nothing to verify. 2059 */ 2060 if (pb) 2061 ret = call_bufop(q, verify_planes_array, *vb, pb); 2062 if (!ret) 2063 list_del(&(*vb)->done_entry); 2064 spin_unlock_irqrestore(&q->done_lock, flags); 2065 2066 return ret; 2067 } 2068 2069 int vb2_wait_for_all_buffers(struct vb2_queue *q) 2070 { 2071 if (!q->streaming) { 2072 dprintk(q, 1, "streaming off, will not wait for buffers\n"); 2073 return -EINVAL; 2074 } 2075 2076 if (q->start_streaming_called) 2077 wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count)); 2078 return 0; 2079 } 2080 EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); 2081 2082 /* 2083 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state 2084 */ 2085 static void __vb2_dqbuf(struct vb2_buffer *vb) 2086 { 2087 struct vb2_queue *q = vb->vb2_queue; 2088 2089 /* nothing to do if the buffer is already dequeued */ 2090 if (vb->state == VB2_BUF_STATE_DEQUEUED) 2091 return; 2092 2093 vb->state = VB2_BUF_STATE_DEQUEUED; 2094 2095 call_void_bufop(q, init_buffer, vb); 2096 } 2097 2098 int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, 2099 bool nonblocking) 2100 { 2101 struct vb2_buffer *vb = NULL; 2102 int ret; 2103 2104 ret = __vb2_get_done_vb(q, &vb, pb, nonblocking); 2105 if (ret < 0) 2106 return ret; 2107 2108 switch (vb->state) { 2109 case VB2_BUF_STATE_DONE: 2110 dprintk(q, 3, "returning done buffer\n"); 2111 break; 2112 case VB2_BUF_STATE_ERROR: 2113 dprintk(q, 3, "returning done buffer with errors\n"); 2114 break; 2115 default: 2116 dprintk(q, 1, "invalid buffer state %s\n", 2117 vb2_state_name(vb->state)); 2118 return -EINVAL; 2119 } 2120 2121 call_void_vb_qop(vb, buf_finish, vb); 2122 vb->prepared = 0; 2123 2124 if (pindex) 2125 *pindex = vb->index; 2126 2127 /* Fill buffer information for the userspace */ 2128 if (pb) 2129 call_void_bufop(q, fill_user_buffer, vb, pb); 2130 2131 /* Remove from vb2 queue */ 2132 list_del(&vb->queued_entry); 2133 q->queued_count--; 2134 2135 trace_vb2_dqbuf(q, vb); 2136 2137 /* go back to dequeued state */ 2138 __vb2_dqbuf(vb); 2139 2140 if (WARN_ON(vb->req_obj.req)) { 2141 media_request_object_unbind(&vb->req_obj); 2142 media_request_object_put(&vb->req_obj); 2143 } 2144 if (vb->request) 2145 media_request_put(vb->request); 2146 vb->request = NULL; 2147 2148 dprintk(q, 2, "dqbuf of buffer %d, state: %s\n", 2149 vb->index, vb2_state_name(vb->state)); 2150 2151 return 0; 2152 2153 } 2154 EXPORT_SYMBOL_GPL(vb2_core_dqbuf); 2155 2156 /* 2157 * __vb2_queue_cancel() - cancel and stop (pause) streaming 2158 * 2159 * Removes all queued buffers from driver's queue and all buffers queued by 2160 * userspace from vb2's queue. Returns to state after reqbufs. 2161 */ 2162 static void __vb2_queue_cancel(struct vb2_queue *q) 2163 { 2164 unsigned int i; 2165 2166 /* 2167 * Tell driver to stop all transactions and release all queued 2168 * buffers. 2169 */ 2170 if (q->start_streaming_called) 2171 call_void_qop(q, stop_streaming, q); 2172 2173 if (q->streaming) 2174 call_void_qop(q, unprepare_streaming, q); 2175 2176 /* 2177 * If you see this warning, then the driver isn't cleaning up properly 2178 * in stop_streaming(). See the stop_streaming() documentation in 2179 * videobuf2-core.h for more information how buffers should be returned 2180 * to vb2 in stop_streaming(). 2181 */ 2182 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { 2183 for (i = 0; i < q->max_num_buffers; i++) { 2184 struct vb2_buffer *vb = vb2_get_buffer(q, i); 2185 2186 if (!vb) 2187 continue; 2188 2189 if (vb->state == VB2_BUF_STATE_ACTIVE) { 2190 pr_warn("driver bug: stop_streaming operation is leaving buffer %u in active state\n", 2191 vb->index); 2192 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); 2193 } 2194 } 2195 /* Must be zero now */ 2196 WARN_ON(atomic_read(&q->owned_by_drv_count)); 2197 } 2198 2199 q->streaming = 0; 2200 q->start_streaming_called = 0; 2201 q->queued_count = 0; 2202 q->error = 0; 2203 q->uses_requests = 0; 2204 q->uses_qbuf = 0; 2205 2206 /* 2207 * Remove all buffers from vb2's list... 2208 */ 2209 INIT_LIST_HEAD(&q->queued_list); 2210 /* 2211 * ...and done list; userspace will not receive any buffers it 2212 * has not already dequeued before initiating cancel. 2213 */ 2214 INIT_LIST_HEAD(&q->done_list); 2215 atomic_set(&q->owned_by_drv_count, 0); 2216 wake_up_all(&q->done_wq); 2217 2218 /* 2219 * Reinitialize all buffers for next use. 2220 * Make sure to call buf_finish for any queued buffers. Normally 2221 * that's done in dqbuf, but that's not going to happen when we 2222 * cancel the whole queue. Note: this code belongs here, not in 2223 * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical 2224 * call to __fill_user_buffer() after buf_finish(). That order can't 2225 * be changed, so we can't move the buf_finish() to __vb2_dqbuf(). 2226 */ 2227 for (i = 0; i < q->max_num_buffers; i++) { 2228 struct vb2_buffer *vb; 2229 struct media_request *req; 2230 2231 vb = vb2_get_buffer(q, i); 2232 if (!vb) 2233 continue; 2234 2235 req = vb->req_obj.req; 2236 /* 2237 * If a request is associated with this buffer, then 2238 * call buf_request_cancel() to give the driver to complete() 2239 * related request objects. Otherwise those objects would 2240 * never complete. 2241 */ 2242 if (req) { 2243 enum media_request_state state; 2244 unsigned long flags; 2245 2246 spin_lock_irqsave(&req->lock, flags); 2247 state = req->state; 2248 spin_unlock_irqrestore(&req->lock, flags); 2249 2250 if (state == MEDIA_REQUEST_STATE_QUEUED) 2251 call_void_vb_qop(vb, buf_request_complete, vb); 2252 } 2253 2254 __vb2_buf_mem_finish(vb); 2255 2256 if (vb->prepared) { 2257 call_void_vb_qop(vb, buf_finish, vb); 2258 vb->prepared = 0; 2259 } 2260 __vb2_dqbuf(vb); 2261 2262 if (vb->req_obj.req) { 2263 media_request_object_unbind(&vb->req_obj); 2264 media_request_object_put(&vb->req_obj); 2265 } 2266 if (vb->request) 2267 media_request_put(vb->request); 2268 vb->request = NULL; 2269 vb->copied_timestamp = 0; 2270 } 2271 } 2272 2273 int vb2_core_streamon(struct vb2_queue *q, unsigned int type) 2274 { 2275 unsigned int q_num_bufs = vb2_get_num_buffers(q); 2276 int ret; 2277 2278 if (type != q->type) { 2279 dprintk(q, 1, "invalid stream type\n"); 2280 return -EINVAL; 2281 } 2282 2283 if (q->streaming) { 2284 dprintk(q, 3, "already streaming\n"); 2285 return 0; 2286 } 2287 2288 if (!q_num_bufs) { 2289 dprintk(q, 1, "no buffers have been allocated\n"); 2290 return -EINVAL; 2291 } 2292 2293 if (q_num_bufs < q->min_queued_buffers) { 2294 dprintk(q, 1, "need at least %u queued buffers\n", 2295 q->min_queued_buffers); 2296 return -EINVAL; 2297 } 2298 2299 ret = call_qop(q, prepare_streaming, q); 2300 if (ret) 2301 return ret; 2302 2303 /* 2304 * Tell driver to start streaming provided sufficient buffers 2305 * are available. 2306 */ 2307 if (q->queued_count >= q->min_queued_buffers) { 2308 ret = vb2_start_streaming(q); 2309 if (ret) 2310 goto unprepare; 2311 } 2312 2313 q->streaming = 1; 2314 2315 dprintk(q, 3, "successful\n"); 2316 return 0; 2317 2318 unprepare: 2319 call_void_qop(q, unprepare_streaming, q); 2320 return ret; 2321 } 2322 EXPORT_SYMBOL_GPL(vb2_core_streamon); 2323 2324 void vb2_queue_error(struct vb2_queue *q) 2325 { 2326 q->error = 1; 2327 2328 wake_up_all(&q->done_wq); 2329 } 2330 EXPORT_SYMBOL_GPL(vb2_queue_error); 2331 2332 int vb2_core_streamoff(struct vb2_queue *q, unsigned int type) 2333 { 2334 if (type != q->type) { 2335 dprintk(q, 1, "invalid stream type\n"); 2336 return -EINVAL; 2337 } 2338 2339 /* 2340 * Cancel will pause streaming and remove all buffers from the driver 2341 * and vb2, effectively returning control over them to userspace. 2342 * 2343 * Note that we do this even if q->streaming == 0: if you prepare or 2344 * queue buffers, and then call streamoff without ever having called 2345 * streamon, you would still expect those buffers to be returned to 2346 * their normal dequeued state. 2347 */ 2348 __vb2_queue_cancel(q); 2349 q->waiting_for_buffers = !q->is_output; 2350 q->last_buffer_dequeued = false; 2351 2352 dprintk(q, 3, "successful\n"); 2353 return 0; 2354 } 2355 EXPORT_SYMBOL_GPL(vb2_core_streamoff); 2356 2357 /* 2358 * __find_plane_by_offset() - find plane associated with the given offset 2359 */ 2360 static int __find_plane_by_offset(struct vb2_queue *q, unsigned long offset, 2361 struct vb2_buffer **vb, unsigned int *plane) 2362 { 2363 unsigned int buffer; 2364 2365 /* 2366 * Sanity checks to ensure the lock is held, MEMORY_MMAP is 2367 * used and fileio isn't active. 2368 */ 2369 lockdep_assert_held(&q->mmap_lock); 2370 2371 if (q->memory != VB2_MEMORY_MMAP) { 2372 dprintk(q, 1, "queue is not currently set up for mmap\n"); 2373 return -EINVAL; 2374 } 2375 2376 if (vb2_fileio_is_active(q)) { 2377 dprintk(q, 1, "file io in progress\n"); 2378 return -EBUSY; 2379 } 2380 2381 /* Get buffer and plane from the offset */ 2382 buffer = (offset >> PLANE_INDEX_SHIFT) & BUFFER_INDEX_MASK; 2383 *plane = (offset >> PAGE_SHIFT) & PLANE_INDEX_MASK; 2384 2385 *vb = vb2_get_buffer(q, buffer); 2386 if (!*vb) 2387 return -EINVAL; 2388 if (*plane >= (*vb)->num_planes) 2389 return -EINVAL; 2390 2391 return 0; 2392 } 2393 2394 int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, 2395 struct vb2_buffer *vb, unsigned int plane, unsigned int flags) 2396 { 2397 struct vb2_plane *vb_plane; 2398 int ret; 2399 struct dma_buf *dbuf; 2400 2401 if (q->memory != VB2_MEMORY_MMAP) { 2402 dprintk(q, 1, "queue is not currently set up for mmap\n"); 2403 return -EINVAL; 2404 } 2405 2406 if (!q->mem_ops->get_dmabuf) { 2407 dprintk(q, 1, "queue does not support DMA buffer exporting\n"); 2408 return -EINVAL; 2409 } 2410 2411 if (flags & ~(O_CLOEXEC | O_ACCMODE)) { 2412 dprintk(q, 1, "queue does support only O_CLOEXEC and access mode flags\n"); 2413 return -EINVAL; 2414 } 2415 2416 if (type != q->type) { 2417 dprintk(q, 1, "invalid buffer type\n"); 2418 return -EINVAL; 2419 } 2420 2421 if (plane >= vb->num_planes) { 2422 dprintk(q, 1, "buffer plane out of range\n"); 2423 return -EINVAL; 2424 } 2425 2426 if (vb2_fileio_is_active(q)) { 2427 dprintk(q, 1, "expbuf: file io in progress\n"); 2428 return -EBUSY; 2429 } 2430 2431 vb_plane = &vb->planes[plane]; 2432 2433 dbuf = call_ptr_memop(get_dmabuf, 2434 vb, 2435 vb_plane->mem_priv, 2436 flags & O_ACCMODE); 2437 if (IS_ERR_OR_NULL(dbuf)) { 2438 dprintk(q, 1, "failed to export buffer %d, plane %d\n", 2439 vb->index, plane); 2440 return -EINVAL; 2441 } 2442 2443 ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE); 2444 if (ret < 0) { 2445 dprintk(q, 3, "buffer %d, plane %d failed to export (%d)\n", 2446 vb->index, plane, ret); 2447 dma_buf_put(dbuf); 2448 return ret; 2449 } 2450 2451 dprintk(q, 3, "buffer %d, plane %d exported as %d descriptor\n", 2452 vb->index, plane, ret); 2453 *fd = ret; 2454 2455 return 0; 2456 } 2457 EXPORT_SYMBOL_GPL(vb2_core_expbuf); 2458 2459 int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) 2460 { 2461 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 2462 struct vb2_buffer *vb; 2463 unsigned int plane = 0; 2464 int ret; 2465 unsigned long length; 2466 2467 /* 2468 * Check memory area access mode. 2469 */ 2470 if (!(vma->vm_flags & VM_SHARED)) { 2471 dprintk(q, 1, "invalid vma flags, VM_SHARED needed\n"); 2472 return -EINVAL; 2473 } 2474 if (q->is_output) { 2475 if (!(vma->vm_flags & VM_WRITE)) { 2476 dprintk(q, 1, "invalid vma flags, VM_WRITE needed\n"); 2477 return -EINVAL; 2478 } 2479 } else { 2480 if (!(vma->vm_flags & VM_READ)) { 2481 dprintk(q, 1, "invalid vma flags, VM_READ needed\n"); 2482 return -EINVAL; 2483 } 2484 } 2485 2486 mutex_lock(&q->mmap_lock); 2487 2488 /* 2489 * Find the plane corresponding to the offset passed by userspace. This 2490 * will return an error if not MEMORY_MMAP or file I/O is in progress. 2491 */ 2492 ret = __find_plane_by_offset(q, offset, &vb, &plane); 2493 if (ret) 2494 goto unlock; 2495 2496 /* 2497 * MMAP requires page_aligned buffers. 2498 * The buffer length was page_aligned at __vb2_buf_mem_alloc(), 2499 * so, we need to do the same here. 2500 */ 2501 length = PAGE_ALIGN(vb->planes[plane].length); 2502 if (length < (vma->vm_end - vma->vm_start)) { 2503 dprintk(q, 1, 2504 "MMAP invalid, as it would overflow buffer length\n"); 2505 ret = -EINVAL; 2506 goto unlock; 2507 } 2508 2509 /* 2510 * vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer, 2511 * not as a in-buffer offset. We always want to mmap a whole buffer 2512 * from its beginning. 2513 */ 2514 vma->vm_pgoff = 0; 2515 2516 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); 2517 2518 unlock: 2519 mutex_unlock(&q->mmap_lock); 2520 if (ret) 2521 return ret; 2522 2523 dprintk(q, 3, "buffer %u, plane %d successfully mapped\n", vb->index, plane); 2524 return 0; 2525 } 2526 EXPORT_SYMBOL_GPL(vb2_mmap); 2527 2528 #ifndef CONFIG_MMU 2529 unsigned long vb2_get_unmapped_area(struct vb2_queue *q, 2530 unsigned long addr, 2531 unsigned long len, 2532 unsigned long pgoff, 2533 unsigned long flags) 2534 { 2535 unsigned long offset = pgoff << PAGE_SHIFT; 2536 struct vb2_buffer *vb; 2537 unsigned int plane; 2538 void *vaddr; 2539 int ret; 2540 2541 mutex_lock(&q->mmap_lock); 2542 2543 /* 2544 * Find the plane corresponding to the offset passed by userspace. This 2545 * will return an error if not MEMORY_MMAP or file I/O is in progress. 2546 */ 2547 ret = __find_plane_by_offset(q, offset, &vb, &plane); 2548 if (ret) 2549 goto unlock; 2550 2551 vaddr = vb2_plane_vaddr(vb, plane); 2552 mutex_unlock(&q->mmap_lock); 2553 return vaddr ? (unsigned long)vaddr : -EINVAL; 2554 2555 unlock: 2556 mutex_unlock(&q->mmap_lock); 2557 return ret; 2558 } 2559 EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); 2560 #endif 2561 2562 int vb2_core_queue_init(struct vb2_queue *q) 2563 { 2564 /* 2565 * Sanity check 2566 */ 2567 /* 2568 * For drivers who don't support max_num_buffers ensure 2569 * a backward compatibility. 2570 */ 2571 if (!q->max_num_buffers) 2572 q->max_num_buffers = VB2_MAX_FRAME; 2573 2574 /* The maximum is limited by offset cookie encoding pattern */ 2575 q->max_num_buffers = min_t(unsigned int, q->max_num_buffers, MAX_BUFFER_INDEX); 2576 2577 if (WARN_ON(!q) || 2578 WARN_ON(!q->ops) || 2579 WARN_ON(!q->mem_ops) || 2580 WARN_ON(!q->type) || 2581 WARN_ON(!q->io_modes) || 2582 WARN_ON(!q->ops->queue_setup) || 2583 WARN_ON(!q->ops->buf_queue)) 2584 return -EINVAL; 2585 2586 if (WARN_ON(q->max_num_buffers < VB2_MAX_FRAME) || 2587 WARN_ON(q->min_queued_buffers > q->max_num_buffers)) 2588 return -EINVAL; 2589 2590 if (WARN_ON(q->requires_requests && !q->supports_requests)) 2591 return -EINVAL; 2592 2593 /* 2594 * This combination is not allowed since a non-zero value of 2595 * q->min_queued_buffers can cause vb2_core_qbuf() to fail if 2596 * it has to call start_streaming(), and the Request API expects 2597 * that queueing a request (and thus queueing a buffer contained 2598 * in that request) will always succeed. There is no method of 2599 * propagating an error back to userspace. 2600 */ 2601 if (WARN_ON(q->supports_requests && q->min_queued_buffers)) 2602 return -EINVAL; 2603 2604 /* 2605 * The minimum requirement is 2: one buffer is used 2606 * by the hardware while the other is being processed by userspace. 2607 */ 2608 if (q->min_reqbufs_allocation < 2) 2609 q->min_reqbufs_allocation = 2; 2610 2611 /* 2612 * If the driver needs 'min_queued_buffers' in the queue before 2613 * calling start_streaming() then the minimum requirement is 2614 * 'min_queued_buffers + 1' to keep at least one buffer available 2615 * for userspace. 2616 */ 2617 if (q->min_reqbufs_allocation < q->min_queued_buffers + 1) 2618 q->min_reqbufs_allocation = q->min_queued_buffers + 1; 2619 2620 if (WARN_ON(q->min_reqbufs_allocation > q->max_num_buffers)) 2621 return -EINVAL; 2622 2623 INIT_LIST_HEAD(&q->queued_list); 2624 INIT_LIST_HEAD(&q->done_list); 2625 spin_lock_init(&q->done_lock); 2626 mutex_init(&q->mmap_lock); 2627 init_waitqueue_head(&q->done_wq); 2628 2629 q->memory = VB2_MEMORY_UNKNOWN; 2630 2631 if (q->buf_struct_size == 0) 2632 q->buf_struct_size = sizeof(struct vb2_buffer); 2633 2634 if (q->bidirectional) 2635 q->dma_dir = DMA_BIDIRECTIONAL; 2636 else 2637 q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 2638 2639 if (q->name[0] == '\0') 2640 snprintf(q->name, sizeof(q->name), "%s-%p", 2641 q->is_output ? "out" : "cap", q); 2642 2643 return 0; 2644 } 2645 EXPORT_SYMBOL_GPL(vb2_core_queue_init); 2646 2647 static int __vb2_init_fileio(struct vb2_queue *q, int read); 2648 static int __vb2_cleanup_fileio(struct vb2_queue *q); 2649 void vb2_core_queue_release(struct vb2_queue *q) 2650 { 2651 __vb2_cleanup_fileio(q); 2652 __vb2_queue_cancel(q); 2653 mutex_lock(&q->mmap_lock); 2654 __vb2_queue_free(q, 0, q->max_num_buffers); 2655 vb2_core_free_buffers_storage(q); 2656 q->is_busy = 0; 2657 mutex_unlock(&q->mmap_lock); 2658 } 2659 EXPORT_SYMBOL_GPL(vb2_core_queue_release); 2660 2661 __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, 2662 poll_table *wait) 2663 { 2664 __poll_t req_events = poll_requested_events(wait); 2665 struct vb2_buffer *vb = NULL; 2666 unsigned long flags; 2667 2668 /* 2669 * poll_wait() MUST be called on the first invocation on all the 2670 * potential queues of interest, even if we are not interested in their 2671 * events during this first call. Failure to do so will result in 2672 * queue's events to be ignored because the poll_table won't be capable 2673 * of adding new wait queues thereafter. 2674 */ 2675 poll_wait(file, &q->done_wq, wait); 2676 2677 if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM))) 2678 return 0; 2679 if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM))) 2680 return 0; 2681 2682 /* 2683 * Start file I/O emulator only if streaming API has not been used yet. 2684 */ 2685 if (vb2_get_num_buffers(q) == 0 && !vb2_fileio_is_active(q)) { 2686 if (!q->is_output && (q->io_modes & VB2_READ) && 2687 (req_events & (EPOLLIN | EPOLLRDNORM))) { 2688 if (__vb2_init_fileio(q, 1)) 2689 return EPOLLERR; 2690 } 2691 if (q->is_output && (q->io_modes & VB2_WRITE) && 2692 (req_events & (EPOLLOUT | EPOLLWRNORM))) { 2693 if (__vb2_init_fileio(q, 0)) 2694 return EPOLLERR; 2695 /* 2696 * Write to OUTPUT queue can be done immediately. 2697 */ 2698 return EPOLLOUT | EPOLLWRNORM; 2699 } 2700 } 2701 2702 /* 2703 * There is nothing to wait for if the queue isn't streaming, or if the 2704 * error flag is set. 2705 */ 2706 if (!vb2_is_streaming(q) || q->error) 2707 return EPOLLERR; 2708 2709 /* 2710 * If this quirk is set and QBUF hasn't been called yet then 2711 * return EPOLLERR as well. This only affects capture queues, output 2712 * queues will always initialize waiting_for_buffers to false. 2713 * This quirk is set by V4L2 for backwards compatibility reasons. 2714 */ 2715 if (q->quirk_poll_must_check_waiting_for_buffers && 2716 q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM))) 2717 return EPOLLERR; 2718 2719 /* 2720 * For output streams you can call write() as long as there are fewer 2721 * buffers queued than there are buffers available. 2722 */ 2723 if (q->is_output && q->fileio && q->queued_count < vb2_get_num_buffers(q)) 2724 return EPOLLOUT | EPOLLWRNORM; 2725 2726 if (list_empty(&q->done_list)) { 2727 /* 2728 * If the last buffer was dequeued from a capture queue, 2729 * return immediately. DQBUF will return -EPIPE. 2730 */ 2731 if (q->last_buffer_dequeued) 2732 return EPOLLIN | EPOLLRDNORM; 2733 } 2734 2735 /* 2736 * Take first buffer available for dequeuing. 2737 */ 2738 spin_lock_irqsave(&q->done_lock, flags); 2739 if (!list_empty(&q->done_list)) 2740 vb = list_first_entry(&q->done_list, struct vb2_buffer, 2741 done_entry); 2742 spin_unlock_irqrestore(&q->done_lock, flags); 2743 2744 if (vb && (vb->state == VB2_BUF_STATE_DONE 2745 || vb->state == VB2_BUF_STATE_ERROR)) { 2746 return (q->is_output) ? 2747 EPOLLOUT | EPOLLWRNORM : 2748 EPOLLIN | EPOLLRDNORM; 2749 } 2750 return 0; 2751 } 2752 EXPORT_SYMBOL_GPL(vb2_core_poll); 2753 2754 /* 2755 * struct vb2_fileio_buf - buffer context used by file io emulator 2756 * 2757 * vb2 provides a compatibility layer and emulator of file io (read and 2758 * write) calls on top of streaming API. This structure is used for 2759 * tracking context related to the buffers. 2760 */ 2761 struct vb2_fileio_buf { 2762 void *vaddr; 2763 unsigned int size; 2764 unsigned int pos; 2765 unsigned int queued:1; 2766 }; 2767 2768 /* 2769 * struct vb2_fileio_data - queue context used by file io emulator 2770 * 2771 * @cur_index: the index of the buffer currently being read from or 2772 * written to. If equal to number of buffers in the vb2_queue 2773 * then a new buffer must be dequeued. 2774 * @initial_index: in the read() case all buffers are queued up immediately 2775 * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles 2776 * buffers. However, in the write() case no buffers are initially 2777 * queued, instead whenever a buffer is full it is queued up by 2778 * __vb2_perform_fileio(). Only once all available buffers have 2779 * been queued up will __vb2_perform_fileio() start to dequeue 2780 * buffers. This means that initially __vb2_perform_fileio() 2781 * needs to know what buffer index to use when it is queuing up 2782 * the buffers for the first time. That initial index is stored 2783 * in this field. Once it is equal to number of buffers in the 2784 * vb2_queue all available buffers have been queued and 2785 * __vb2_perform_fileio() should start the normal dequeue/queue cycle. 2786 * 2787 * vb2 provides a compatibility layer and emulator of file io (read and 2788 * write) calls on top of streaming API. For proper operation it required 2789 * this structure to save the driver state between each call of the read 2790 * or write function. 2791 */ 2792 struct vb2_fileio_data { 2793 unsigned int count; 2794 unsigned int type; 2795 unsigned int memory; 2796 struct vb2_fileio_buf bufs[VB2_MAX_FRAME]; 2797 unsigned int cur_index; 2798 unsigned int initial_index; 2799 unsigned int q_count; 2800 unsigned int dq_count; 2801 unsigned read_once:1; 2802 unsigned write_immediately:1; 2803 }; 2804 2805 /* 2806 * __vb2_init_fileio() - initialize file io emulator 2807 * @q: videobuf2 queue 2808 * @read: mode selector (1 means read, 0 means write) 2809 */ 2810 static int __vb2_init_fileio(struct vb2_queue *q, int read) 2811 { 2812 struct vb2_fileio_data *fileio; 2813 struct vb2_buffer *vb; 2814 int i, ret; 2815 2816 /* 2817 * Sanity check 2818 */ 2819 if (WARN_ON((read && !(q->io_modes & VB2_READ)) || 2820 (!read && !(q->io_modes & VB2_WRITE)))) 2821 return -EINVAL; 2822 2823 /* 2824 * Check if device supports mapping buffers to kernel virtual space. 2825 */ 2826 if (!q->mem_ops->vaddr) 2827 return -EBUSY; 2828 2829 /* 2830 * Check if streaming api has not been already activated. 2831 */ 2832 if (q->streaming || vb2_get_num_buffers(q) > 0) 2833 return -EBUSY; 2834 2835 dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n", 2836 (read) ? "read" : "write", q->min_reqbufs_allocation, q->fileio_read_once, 2837 q->fileio_write_immediately); 2838 2839 fileio = kzalloc(sizeof(*fileio), GFP_KERNEL); 2840 if (fileio == NULL) 2841 return -ENOMEM; 2842 2843 fileio->read_once = q->fileio_read_once; 2844 fileio->write_immediately = q->fileio_write_immediately; 2845 2846 /* 2847 * Request buffers and use MMAP type to force driver 2848 * to allocate buffers by itself. 2849 */ 2850 fileio->count = q->min_reqbufs_allocation; 2851 fileio->memory = VB2_MEMORY_MMAP; 2852 fileio->type = q->type; 2853 q->fileio = fileio; 2854 ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); 2855 if (ret) 2856 goto err_kfree; 2857 /* vb2_fileio_data supports max VB2_MAX_FRAME buffers */ 2858 if (fileio->count > VB2_MAX_FRAME) { 2859 dprintk(q, 1, "fileio: more than VB2_MAX_FRAME buffers requested\n"); 2860 ret = -ENOSPC; 2861 goto err_reqbufs; 2862 } 2863 2864 /* 2865 * Userspace can never add or delete buffers later, so there 2866 * will never be holes. It is safe to assume that vb2_get_buffer(q, 0) 2867 * will always return a valid vb pointer 2868 */ 2869 vb = vb2_get_buffer(q, 0); 2870 2871 /* 2872 * Check if plane_count is correct 2873 * (multiplane buffers are not supported). 2874 */ 2875 if (vb->num_planes != 1) { 2876 ret = -EBUSY; 2877 goto err_reqbufs; 2878 } 2879 2880 /* 2881 * Get kernel address of each buffer. 2882 */ 2883 for (i = 0; i < vb2_get_num_buffers(q); i++) { 2884 /* vb can never be NULL when using fileio. */ 2885 vb = vb2_get_buffer(q, i); 2886 2887 fileio->bufs[i].vaddr = vb2_plane_vaddr(vb, 0); 2888 if (fileio->bufs[i].vaddr == NULL) { 2889 ret = -EINVAL; 2890 goto err_reqbufs; 2891 } 2892 fileio->bufs[i].size = vb2_plane_size(vb, 0); 2893 } 2894 2895 /* 2896 * Read mode requires pre queuing of all buffers. 2897 */ 2898 if (read) { 2899 /* 2900 * Queue all buffers. 2901 */ 2902 for (i = 0; i < vb2_get_num_buffers(q); i++) { 2903 struct vb2_buffer *vb2 = vb2_get_buffer(q, i); 2904 2905 if (!vb2) 2906 continue; 2907 2908 ret = vb2_core_qbuf(q, vb2, NULL, NULL); 2909 if (ret) 2910 goto err_reqbufs; 2911 fileio->bufs[i].queued = 1; 2912 } 2913 /* 2914 * All buffers have been queued, so mark that by setting 2915 * initial_index to the number of buffers in the vb2_queue 2916 */ 2917 fileio->initial_index = vb2_get_num_buffers(q); 2918 fileio->cur_index = fileio->initial_index; 2919 } 2920 2921 /* 2922 * Start streaming. 2923 */ 2924 ret = vb2_core_streamon(q, q->type); 2925 if (ret) 2926 goto err_reqbufs; 2927 2928 return ret; 2929 2930 err_reqbufs: 2931 fileio->count = 0; 2932 vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); 2933 2934 err_kfree: 2935 q->fileio = NULL; 2936 kfree(fileio); 2937 return ret; 2938 } 2939 2940 /* 2941 * __vb2_cleanup_fileio() - free resourced used by file io emulator 2942 * @q: videobuf2 queue 2943 */ 2944 static int __vb2_cleanup_fileio(struct vb2_queue *q) 2945 { 2946 struct vb2_fileio_data *fileio = q->fileio; 2947 2948 if (fileio) { 2949 vb2_core_streamoff(q, q->type); 2950 q->fileio = NULL; 2951 fileio->count = 0; 2952 vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); 2953 kfree(fileio); 2954 dprintk(q, 3, "file io emulator closed\n"); 2955 } 2956 return 0; 2957 } 2958 2959 /* 2960 * __vb2_perform_fileio() - perform a single file io (read or write) operation 2961 * @q: videobuf2 queue 2962 * @data: pointed to target userspace buffer 2963 * @count: number of bytes to read or write 2964 * @ppos: file handle position tracking pointer 2965 * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) 2966 * @read: access mode selector (1 means read, 0 means write) 2967 */ 2968 static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count, 2969 loff_t *ppos, int nonblock, int read) 2970 { 2971 struct vb2_fileio_data *fileio; 2972 struct vb2_fileio_buf *buf; 2973 bool is_multiplanar = q->is_multiplanar; 2974 /* 2975 * When using write() to write data to an output video node the vb2 core 2976 * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody 2977 * else is able to provide this information with the write() operation. 2978 */ 2979 bool copy_timestamp = !read && q->copy_timestamp; 2980 unsigned index; 2981 int ret; 2982 2983 dprintk(q, 3, "mode %s, offset %ld, count %zd, %sblocking\n", 2984 read ? "read" : "write", (long)*ppos, count, 2985 nonblock ? "non" : ""); 2986 2987 if (!data) 2988 return -EINVAL; 2989 2990 if (q->waiting_in_dqbuf) { 2991 dprintk(q, 3, "another dup()ped fd is %s\n", 2992 read ? "reading" : "writing"); 2993 return -EBUSY; 2994 } 2995 2996 /* 2997 * Initialize emulator on first call. 2998 */ 2999 if (!vb2_fileio_is_active(q)) { 3000 ret = __vb2_init_fileio(q, read); 3001 dprintk(q, 3, "vb2_init_fileio result: %d\n", ret); 3002 if (ret) 3003 return ret; 3004 } 3005 fileio = q->fileio; 3006 3007 /* 3008 * Check if we need to dequeue the buffer. 3009 */ 3010 index = fileio->cur_index; 3011 if (index >= vb2_get_num_buffers(q)) { 3012 struct vb2_buffer *b; 3013 3014 /* 3015 * Call vb2_dqbuf to get buffer back. 3016 */ 3017 ret = vb2_core_dqbuf(q, &index, NULL, nonblock); 3018 dprintk(q, 5, "vb2_dqbuf result: %d\n", ret); 3019 if (ret) 3020 return ret; 3021 fileio->dq_count += 1; 3022 3023 fileio->cur_index = index; 3024 buf = &fileio->bufs[index]; 3025 3026 /* b can never be NULL when using fileio. */ 3027 b = vb2_get_buffer(q, index); 3028 3029 /* 3030 * Get number of bytes filled by the driver 3031 */ 3032 buf->pos = 0; 3033 buf->queued = 0; 3034 buf->size = read ? vb2_get_plane_payload(b, 0) 3035 : vb2_plane_size(b, 0); 3036 /* Compensate for data_offset on read in the multiplanar case. */ 3037 if (is_multiplanar && read && 3038 b->planes[0].data_offset < buf->size) { 3039 buf->pos = b->planes[0].data_offset; 3040 buf->size -= buf->pos; 3041 } 3042 } else { 3043 buf = &fileio->bufs[index]; 3044 } 3045 3046 /* 3047 * Limit count on last few bytes of the buffer. 3048 */ 3049 if (buf->pos + count > buf->size) { 3050 count = buf->size - buf->pos; 3051 dprintk(q, 5, "reducing read count: %zd\n", count); 3052 } 3053 3054 /* 3055 * Transfer data to userspace. 3056 */ 3057 dprintk(q, 3, "copying %zd bytes - buffer %d, offset %u\n", 3058 count, index, buf->pos); 3059 if (read) 3060 ret = copy_to_user(data, buf->vaddr + buf->pos, count); 3061 else 3062 ret = copy_from_user(buf->vaddr + buf->pos, data, count); 3063 if (ret) { 3064 dprintk(q, 3, "error copying data\n"); 3065 return -EFAULT; 3066 } 3067 3068 /* 3069 * Update counters. 3070 */ 3071 buf->pos += count; 3072 *ppos += count; 3073 3074 /* 3075 * Queue next buffer if required. 3076 */ 3077 if (buf->pos == buf->size || (!read && fileio->write_immediately)) { 3078 /* b can never be NULL when using fileio. */ 3079 struct vb2_buffer *b = vb2_get_buffer(q, index); 3080 3081 /* 3082 * Check if this is the last buffer to read. 3083 */ 3084 if (read && fileio->read_once && fileio->dq_count == 1) { 3085 dprintk(q, 3, "read limit reached\n"); 3086 return __vb2_cleanup_fileio(q); 3087 } 3088 3089 /* 3090 * Call vb2_qbuf and give buffer to the driver. 3091 */ 3092 b->planes[0].bytesused = buf->pos; 3093 3094 if (copy_timestamp) 3095 b->timestamp = ktime_get_ns(); 3096 ret = vb2_core_qbuf(q, b, NULL, NULL); 3097 dprintk(q, 5, "vb2_qbuf result: %d\n", ret); 3098 if (ret) 3099 return ret; 3100 3101 /* 3102 * Buffer has been queued, update the status 3103 */ 3104 buf->pos = 0; 3105 buf->queued = 1; 3106 buf->size = vb2_plane_size(b, 0); 3107 fileio->q_count += 1; 3108 /* 3109 * If we are queuing up buffers for the first time, then 3110 * increase initial_index by one. 3111 */ 3112 if (fileio->initial_index < vb2_get_num_buffers(q)) 3113 fileio->initial_index++; 3114 /* 3115 * The next buffer to use is either a buffer that's going to be 3116 * queued for the first time (initial_index < number of buffers in the vb2_queue) 3117 * or it is equal to the number of buffers in the vb2_queue, 3118 * meaning that the next time we need to dequeue a buffer since 3119 * we've now queued up all the 'first time' buffers. 3120 */ 3121 fileio->cur_index = fileio->initial_index; 3122 } 3123 3124 /* 3125 * Return proper number of bytes processed. 3126 */ 3127 if (ret == 0) 3128 ret = count; 3129 return ret; 3130 } 3131 3132 size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, 3133 loff_t *ppos, int nonblocking) 3134 { 3135 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1); 3136 } 3137 EXPORT_SYMBOL_GPL(vb2_read); 3138 3139 size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, 3140 loff_t *ppos, int nonblocking) 3141 { 3142 return __vb2_perform_fileio(q, (char __user *) data, count, 3143 ppos, nonblocking, 0); 3144 } 3145 EXPORT_SYMBOL_GPL(vb2_write); 3146 3147 struct vb2_threadio_data { 3148 struct task_struct *thread; 3149 vb2_thread_fnc fnc; 3150 void *priv; 3151 bool stop; 3152 }; 3153 3154 static int vb2_thread(void *data) 3155 { 3156 struct vb2_queue *q = data; 3157 struct vb2_threadio_data *threadio = q->threadio; 3158 bool copy_timestamp = false; 3159 unsigned prequeue = 0; 3160 unsigned index = 0; 3161 int ret = 0; 3162 3163 if (q->is_output) { 3164 prequeue = vb2_get_num_buffers(q); 3165 copy_timestamp = q->copy_timestamp; 3166 } 3167 3168 set_freezable(); 3169 3170 for (;;) { 3171 struct vb2_buffer *vb; 3172 3173 /* 3174 * Call vb2_dqbuf to get buffer back. 3175 */ 3176 if (prequeue) { 3177 vb = vb2_get_buffer(q, index++); 3178 if (!vb) 3179 continue; 3180 prequeue--; 3181 } else { 3182 call_void_qop(q, wait_finish, q); 3183 if (!threadio->stop) 3184 ret = vb2_core_dqbuf(q, &index, NULL, 0); 3185 call_void_qop(q, wait_prepare, q); 3186 dprintk(q, 5, "file io: vb2_dqbuf result: %d\n", ret); 3187 if (!ret) 3188 vb = vb2_get_buffer(q, index); 3189 } 3190 if (ret || threadio->stop) 3191 break; 3192 try_to_freeze(); 3193 3194 if (vb->state != VB2_BUF_STATE_ERROR) 3195 if (threadio->fnc(vb, threadio->priv)) 3196 break; 3197 call_void_qop(q, wait_finish, q); 3198 if (copy_timestamp) 3199 vb->timestamp = ktime_get_ns(); 3200 if (!threadio->stop) 3201 ret = vb2_core_qbuf(q, vb, NULL, NULL); 3202 call_void_qop(q, wait_prepare, q); 3203 if (ret || threadio->stop) 3204 break; 3205 } 3206 3207 /* Hmm, linux becomes *very* unhappy without this ... */ 3208 while (!kthread_should_stop()) { 3209 set_current_state(TASK_INTERRUPTIBLE); 3210 schedule(); 3211 } 3212 return 0; 3213 } 3214 3215 /* 3216 * This function should not be used for anything else but the videobuf2-dvb 3217 * support. If you think you have another good use-case for this, then please 3218 * contact the linux-media mailinglist first. 3219 */ 3220 int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, 3221 const char *thread_name) 3222 { 3223 struct vb2_threadio_data *threadio; 3224 int ret = 0; 3225 3226 if (q->threadio) 3227 return -EBUSY; 3228 if (vb2_is_busy(q)) 3229 return -EBUSY; 3230 if (WARN_ON(q->fileio)) 3231 return -EBUSY; 3232 3233 threadio = kzalloc(sizeof(*threadio), GFP_KERNEL); 3234 if (threadio == NULL) 3235 return -ENOMEM; 3236 threadio->fnc = fnc; 3237 threadio->priv = priv; 3238 3239 ret = __vb2_init_fileio(q, !q->is_output); 3240 dprintk(q, 3, "file io: vb2_init_fileio result: %d\n", ret); 3241 if (ret) 3242 goto nomem; 3243 q->threadio = threadio; 3244 threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name); 3245 if (IS_ERR(threadio->thread)) { 3246 ret = PTR_ERR(threadio->thread); 3247 threadio->thread = NULL; 3248 goto nothread; 3249 } 3250 return 0; 3251 3252 nothread: 3253 __vb2_cleanup_fileio(q); 3254 nomem: 3255 kfree(threadio); 3256 return ret; 3257 } 3258 EXPORT_SYMBOL_GPL(vb2_thread_start); 3259 3260 int vb2_thread_stop(struct vb2_queue *q) 3261 { 3262 struct vb2_threadio_data *threadio = q->threadio; 3263 int err; 3264 3265 if (threadio == NULL) 3266 return 0; 3267 threadio->stop = true; 3268 /* Wake up all pending sleeps in the thread */ 3269 vb2_queue_error(q); 3270 err = kthread_stop(threadio->thread); 3271 __vb2_cleanup_fileio(q); 3272 threadio->thread = NULL; 3273 kfree(threadio); 3274 q->threadio = NULL; 3275 return err; 3276 } 3277 EXPORT_SYMBOL_GPL(vb2_thread_stop); 3278 3279 MODULE_DESCRIPTION("Media buffer core framework"); 3280 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski"); 3281 MODULE_LICENSE("GPL"); 3282 MODULE_IMPORT_NS(DMA_BUF); 3283