1 /* 2 * videobuf2-core.c - video buffer 2 core framework 3 * 4 * Copyright (C) 2010 Samsung Electronics 5 * 6 * Author: Pawel Osciak <pawel@osciak.com> 7 * Marek Szyprowski <m.szyprowski@samsung.com> 8 * 9 * The vb2_thread implementation was based on code from videobuf-dvb.c: 10 * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs] 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation. 15 */ 16 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 #include <linux/err.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/mm.h> 23 #include <linux/poll.h> 24 #include <linux/slab.h> 25 #include <linux/sched.h> 26 #include <linux/freezer.h> 27 #include <linux/kthread.h> 28 29 #include <media/videobuf2-core.h> 30 #include <media/v4l2-mc.h> 31 32 #include <trace/events/vb2.h> 33 34 #define PLANE_INDEX_BITS 3 35 #define PLANE_INDEX_SHIFT (PAGE_SHIFT + PLANE_INDEX_BITS) 36 #define PLANE_INDEX_MASK (BIT_MASK(PLANE_INDEX_BITS) - 1) 37 #define MAX_BUFFER_INDEX BIT_MASK(30 - PLANE_INDEX_SHIFT) 38 #define BUFFER_INDEX_MASK (MAX_BUFFER_INDEX - 1) 39 40 #if BIT(PLANE_INDEX_BITS) != VIDEO_MAX_PLANES 41 #error PLANE_INDEX_BITS order must be equal to VIDEO_MAX_PLANES 42 #endif 43 44 static int debug; 45 module_param(debug, int, 0644); 46 47 #define dprintk(q, level, fmt, arg...) \ 48 do { \ 49 if (debug >= level) \ 50 pr_info("[%s] %s: " fmt, (q)->name, __func__, \ 51 ## arg); \ 52 } while (0) 53 54 #ifdef CONFIG_VIDEO_ADV_DEBUG 55 56 /* 57 * If advanced debugging is on, then count how often each op is called 58 * successfully, which can either be per-buffer or per-queue. 59 * 60 * This makes it easy to check that the 'init' and 'cleanup' 61 * (and variations thereof) stay balanced. 62 */ 63 64 #define log_memop(vb, op) \ 65 dprintk((vb)->vb2_queue, 2, "call_memop(%d, %s)%s\n", \ 66 (vb)->index, #op, \ 67 (vb)->vb2_queue->mem_ops->op ? "" : " (nop)") 68 69 #define call_memop(vb, op, args...) \ 70 ({ \ 71 struct vb2_queue *_q = (vb)->vb2_queue; \ 72 int err; \ 73 \ 74 log_memop(vb, op); \ 75 err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \ 76 if (!err) \ 77 (vb)->cnt_mem_ ## op++; \ 78 err; \ 79 }) 80 81 #define call_ptr_memop(op, vb, args...) \ 82 ({ \ 83 struct vb2_queue *_q = (vb)->vb2_queue; \ 84 void *ptr; \ 85 \ 86 log_memop(vb, op); \ 87 ptr = _q->mem_ops->op ? _q->mem_ops->op(vb, args) : NULL; \ 88 if (!IS_ERR_OR_NULL(ptr)) \ 89 (vb)->cnt_mem_ ## op++; \ 90 ptr; \ 91 }) 92 93 #define call_void_memop(vb, op, args...) \ 94 ({ \ 95 struct vb2_queue *_q = (vb)->vb2_queue; \ 96 \ 97 log_memop(vb, op); \ 98 if (_q->mem_ops->op) \ 99 _q->mem_ops->op(args); \ 100 (vb)->cnt_mem_ ## op++; \ 101 }) 102 103 #define log_qop(q, op) \ 104 dprintk(q, 2, "call_qop(%s)%s\n", #op, \ 105 (q)->ops->op ? "" : " (nop)") 106 107 #define call_qop(q, op, args...) \ 108 ({ \ 109 int err; \ 110 \ 111 log_qop(q, op); \ 112 err = (q)->ops->op ? (q)->ops->op(args) : 0; \ 113 if (!err) \ 114 (q)->cnt_ ## op++; \ 115 err; \ 116 }) 117 118 #define call_void_qop(q, op, args...) \ 119 ({ \ 120 log_qop(q, op); \ 121 if ((q)->ops->op) \ 122 (q)->ops->op(args); \ 123 (q)->cnt_ ## op++; \ 124 }) 125 126 #define log_vb_qop(vb, op, args...) \ 127 dprintk((vb)->vb2_queue, 2, "call_vb_qop(%d, %s)%s\n", \ 128 (vb)->index, #op, \ 129 (vb)->vb2_queue->ops->op ? "" : " (nop)") 130 131 #define call_vb_qop(vb, op, args...) \ 132 ({ \ 133 int err; \ 134 \ 135 log_vb_qop(vb, op); \ 136 err = (vb)->vb2_queue->ops->op ? \ 137 (vb)->vb2_queue->ops->op(args) : 0; \ 138 if (!err) \ 139 (vb)->cnt_ ## op++; \ 140 err; \ 141 }) 142 143 #define call_void_vb_qop(vb, op, args...) \ 144 ({ \ 145 log_vb_qop(vb, op); \ 146 if ((vb)->vb2_queue->ops->op) \ 147 (vb)->vb2_queue->ops->op(args); \ 148 (vb)->cnt_ ## op++; \ 149 }) 150 151 #else 152 153 #define call_memop(vb, op, args...) \ 154 ((vb)->vb2_queue->mem_ops->op ? \ 155 (vb)->vb2_queue->mem_ops->op(args) : 0) 156 157 #define call_ptr_memop(op, vb, args...) \ 158 ((vb)->vb2_queue->mem_ops->op ? \ 159 (vb)->vb2_queue->mem_ops->op(vb, args) : NULL) 160 161 #define call_void_memop(vb, op, args...) \ 162 do { \ 163 if ((vb)->vb2_queue->mem_ops->op) \ 164 (vb)->vb2_queue->mem_ops->op(args); \ 165 } while (0) 166 167 #define call_qop(q, op, args...) \ 168 ((q)->ops->op ? (q)->ops->op(args) : 0) 169 170 #define call_void_qop(q, op, args...) \ 171 do { \ 172 if ((q)->ops->op) \ 173 (q)->ops->op(args); \ 174 } while (0) 175 176 #define call_vb_qop(vb, op, args...) \ 177 ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0) 178 179 #define call_void_vb_qop(vb, op, args...) \ 180 do { \ 181 if ((vb)->vb2_queue->ops->op) \ 182 (vb)->vb2_queue->ops->op(args); \ 183 } while (0) 184 185 #endif 186 187 #define call_bufop(q, op, args...) \ 188 ({ \ 189 int ret = 0; \ 190 if (q && q->buf_ops && q->buf_ops->op) \ 191 ret = q->buf_ops->op(args); \ 192 ret; \ 193 }) 194 195 #define call_void_bufop(q, op, args...) \ 196 ({ \ 197 if (q && q->buf_ops && q->buf_ops->op) \ 198 q->buf_ops->op(args); \ 199 }) 200 201 static void __vb2_queue_cancel(struct vb2_queue *q); 202 static void __enqueue_in_driver(struct vb2_buffer *vb); 203 204 static const char *vb2_state_name(enum vb2_buffer_state s) 205 { 206 static const char * const state_names[] = { 207 [VB2_BUF_STATE_DEQUEUED] = "dequeued", 208 [VB2_BUF_STATE_IN_REQUEST] = "in request", 209 [VB2_BUF_STATE_PREPARING] = "preparing", 210 [VB2_BUF_STATE_QUEUED] = "queued", 211 [VB2_BUF_STATE_ACTIVE] = "active", 212 [VB2_BUF_STATE_DONE] = "done", 213 [VB2_BUF_STATE_ERROR] = "error", 214 }; 215 216 if ((unsigned int)(s) < ARRAY_SIZE(state_names)) 217 return state_names[s]; 218 return "unknown"; 219 } 220 221 /* 222 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer 223 */ 224 static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) 225 { 226 struct vb2_queue *q = vb->vb2_queue; 227 void *mem_priv; 228 int plane; 229 int ret = -ENOMEM; 230 231 /* 232 * Allocate memory for all planes in this buffer 233 * NOTE: mmapped areas should be page aligned 234 */ 235 for (plane = 0; plane < vb->num_planes; ++plane) { 236 /* Memops alloc requires size to be page aligned. */ 237 unsigned long size = PAGE_ALIGN(vb->planes[plane].length); 238 239 /* Did it wrap around? */ 240 if (size < vb->planes[plane].length) 241 goto free; 242 243 mem_priv = call_ptr_memop(alloc, 244 vb, 245 q->alloc_devs[plane] ? : q->dev, 246 size); 247 if (IS_ERR_OR_NULL(mem_priv)) { 248 if (mem_priv) 249 ret = PTR_ERR(mem_priv); 250 goto free; 251 } 252 253 /* Associate allocator private data with this plane */ 254 vb->planes[plane].mem_priv = mem_priv; 255 } 256 257 return 0; 258 free: 259 /* Free already allocated memory if one of the allocations failed */ 260 for (; plane > 0; --plane) { 261 call_void_memop(vb, put, vb->planes[plane - 1].mem_priv); 262 vb->planes[plane - 1].mem_priv = NULL; 263 } 264 265 return ret; 266 } 267 268 /* 269 * __vb2_buf_mem_free() - free memory of the given buffer 270 */ 271 static void __vb2_buf_mem_free(struct vb2_buffer *vb) 272 { 273 unsigned int plane; 274 275 for (plane = 0; plane < vb->num_planes; ++plane) { 276 call_void_memop(vb, put, vb->planes[plane].mem_priv); 277 vb->planes[plane].mem_priv = NULL; 278 dprintk(vb->vb2_queue, 3, "freed plane %d of buffer %d\n", 279 plane, vb->index); 280 } 281 } 282 283 /* 284 * __vb2_buf_userptr_put() - release userspace memory associated with 285 * a USERPTR buffer 286 */ 287 static void __vb2_buf_userptr_put(struct vb2_buffer *vb) 288 { 289 unsigned int plane; 290 291 for (plane = 0; plane < vb->num_planes; ++plane) { 292 if (vb->planes[plane].mem_priv) 293 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); 294 vb->planes[plane].mem_priv = NULL; 295 } 296 } 297 298 /* 299 * __vb2_plane_dmabuf_put() - release memory associated with 300 * a DMABUF shared plane 301 */ 302 static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p) 303 { 304 if (!p->mem_priv) 305 return; 306 307 if (p->dbuf_mapped) 308 call_void_memop(vb, unmap_dmabuf, p->mem_priv); 309 310 call_void_memop(vb, detach_dmabuf, p->mem_priv); 311 dma_buf_put(p->dbuf); 312 p->mem_priv = NULL; 313 p->dbuf = NULL; 314 p->dbuf_mapped = 0; 315 } 316 317 /* 318 * __vb2_buf_dmabuf_put() - release memory associated with 319 * a DMABUF shared buffer 320 */ 321 static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) 322 { 323 unsigned int plane; 324 325 for (plane = 0; plane < vb->num_planes; ++plane) 326 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); 327 } 328 329 /* 330 * __vb2_buf_mem_prepare() - call ->prepare() on buffer's private memory 331 * to sync caches 332 */ 333 static void __vb2_buf_mem_prepare(struct vb2_buffer *vb) 334 { 335 unsigned int plane; 336 337 if (vb->synced) 338 return; 339 340 vb->synced = 1; 341 for (plane = 0; plane < vb->num_planes; ++plane) 342 call_void_memop(vb, prepare, vb->planes[plane].mem_priv); 343 } 344 345 /* 346 * __vb2_buf_mem_finish() - call ->finish on buffer's private memory 347 * to sync caches 348 */ 349 static void __vb2_buf_mem_finish(struct vb2_buffer *vb) 350 { 351 unsigned int plane; 352 353 if (!vb->synced) 354 return; 355 356 vb->synced = 0; 357 for (plane = 0; plane < vb->num_planes; ++plane) 358 call_void_memop(vb, finish, vb->planes[plane].mem_priv); 359 } 360 361 /* 362 * __setup_offsets() - setup unique offsets ("cookies") for every plane in 363 * the buffer. 364 */ 365 static void __setup_offsets(struct vb2_buffer *vb) 366 { 367 struct vb2_queue *q = vb->vb2_queue; 368 unsigned int plane; 369 unsigned long offset = 0; 370 371 /* 372 * The offset "cookie" value has the following constraints: 373 * - a buffer can have up to 8 planes. 374 * - v4l2 mem2mem uses bit 30 to distinguish between 375 * OUTPUT (aka "source", bit 30 is 0) and 376 * CAPTURE (aka "destination", bit 30 is 1) buffers. 377 * - must be page aligned 378 * That led to this bit mapping when PAGE_SHIFT = 12: 379 * |30 |29 15|14 12|11 0| 380 * |DST_QUEUE_OFF_BASE|buffer index|plane index| 0 | 381 * where there are 15 bits to store the buffer index. 382 * Depending on PAGE_SHIFT value we can have fewer bits 383 * to store the buffer index. 384 */ 385 offset = vb->index << PLANE_INDEX_SHIFT; 386 387 for (plane = 0; plane < vb->num_planes; ++plane) { 388 vb->planes[plane].m.offset = offset + (plane << PAGE_SHIFT); 389 390 dprintk(q, 3, "buffer %d, plane %d offset 0x%08lx\n", 391 vb->index, plane, offset); 392 } 393 } 394 395 static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb) 396 { 397 /* 398 * DMA exporter should take care of cache syncs, so we can avoid 399 * explicit ->prepare()/->finish() syncs. For other ->memory types 400 * we always need ->prepare() or/and ->finish() cache sync. 401 */ 402 if (q->memory == VB2_MEMORY_DMABUF) { 403 vb->skip_cache_sync_on_finish = 1; 404 vb->skip_cache_sync_on_prepare = 1; 405 return; 406 } 407 408 /* 409 * ->finish() cache sync can be avoided when queue direction is 410 * TO_DEVICE. 411 */ 412 if (q->dma_dir == DMA_TO_DEVICE) 413 vb->skip_cache_sync_on_finish = 1; 414 } 415 416 /** 417 * vb2_queue_add_buffer() - add a buffer to a queue 418 * @q: pointer to &struct vb2_queue with videobuf2 queue. 419 * @vb: pointer to &struct vb2_buffer to be added to the queue. 420 * @index: index where add vb2_buffer in the queue 421 */ 422 static void vb2_queue_add_buffer(struct vb2_queue *q, struct vb2_buffer *vb, unsigned int index) 423 { 424 WARN_ON(index >= VB2_MAX_FRAME || q->bufs[index] || vb->vb2_queue); 425 426 q->bufs[index] = vb; 427 vb->index = index; 428 vb->vb2_queue = q; 429 } 430 431 /** 432 * vb2_queue_remove_buffer() - remove a buffer from a queue 433 * @vb: pointer to &struct vb2_buffer to be removed from the queue. 434 */ 435 static void vb2_queue_remove_buffer(struct vb2_buffer *vb) 436 { 437 vb->vb2_queue->bufs[vb->index] = NULL; 438 vb->vb2_queue = NULL; 439 } 440 441 /* 442 * __vb2_queue_alloc() - allocate vb2 buffer structures and (for MMAP type) 443 * video buffer memory for all buffers/planes on the queue and initializes the 444 * queue 445 * 446 * Returns the number of buffers successfully allocated. 447 */ 448 static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, 449 unsigned int num_buffers, unsigned int num_planes, 450 const unsigned plane_sizes[VB2_MAX_PLANES]) 451 { 452 unsigned int q_num_buffers = vb2_get_num_buffers(q); 453 unsigned int buffer, plane; 454 struct vb2_buffer *vb; 455 int ret; 456 457 /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */ 458 num_buffers = min_t(unsigned int, num_buffers, 459 VB2_MAX_FRAME - q_num_buffers); 460 461 for (buffer = 0; buffer < num_buffers; ++buffer) { 462 /* Allocate vb2 buffer structures */ 463 vb = kzalloc(q->buf_struct_size, GFP_KERNEL); 464 if (!vb) { 465 dprintk(q, 1, "memory alloc for buffer struct failed\n"); 466 break; 467 } 468 469 vb->state = VB2_BUF_STATE_DEQUEUED; 470 vb->num_planes = num_planes; 471 vb->type = q->type; 472 vb->memory = memory; 473 init_buffer_cache_hints(q, vb); 474 for (plane = 0; plane < num_planes; ++plane) { 475 vb->planes[plane].length = plane_sizes[plane]; 476 vb->planes[plane].min_length = plane_sizes[plane]; 477 } 478 479 vb2_queue_add_buffer(q, vb, q_num_buffers + buffer); 480 call_void_bufop(q, init_buffer, vb); 481 482 /* Allocate video buffer memory for the MMAP type */ 483 if (memory == VB2_MEMORY_MMAP) { 484 ret = __vb2_buf_mem_alloc(vb); 485 if (ret) { 486 dprintk(q, 1, "failed allocating memory for buffer %d\n", 487 buffer); 488 vb2_queue_remove_buffer(vb); 489 kfree(vb); 490 break; 491 } 492 __setup_offsets(vb); 493 /* 494 * Call the driver-provided buffer initialization 495 * callback, if given. An error in initialization 496 * results in queue setup failure. 497 */ 498 ret = call_vb_qop(vb, buf_init, vb); 499 if (ret) { 500 dprintk(q, 1, "buffer %d %p initialization failed\n", 501 buffer, vb); 502 __vb2_buf_mem_free(vb); 503 vb2_queue_remove_buffer(vb); 504 kfree(vb); 505 break; 506 } 507 } 508 } 509 510 dprintk(q, 3, "allocated %d buffers, %d plane(s) each\n", 511 buffer, num_planes); 512 513 return buffer; 514 } 515 516 /* 517 * __vb2_free_mem() - release all video buffer memory for a given queue 518 */ 519 static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers) 520 { 521 unsigned int buffer; 522 struct vb2_buffer *vb; 523 unsigned int q_num_buffers = vb2_get_num_buffers(q); 524 525 for (buffer = q_num_buffers - buffers; buffer < q_num_buffers; 526 ++buffer) { 527 vb = vb2_get_buffer(q, buffer); 528 if (!vb) 529 continue; 530 531 /* Free MMAP buffers or release USERPTR buffers */ 532 if (q->memory == VB2_MEMORY_MMAP) 533 __vb2_buf_mem_free(vb); 534 else if (q->memory == VB2_MEMORY_DMABUF) 535 __vb2_buf_dmabuf_put(vb); 536 else 537 __vb2_buf_userptr_put(vb); 538 } 539 } 540 541 /* 542 * __vb2_queue_free() - free buffers at the end of the queue - video memory and 543 * related information, if no buffers are left return the queue to an 544 * uninitialized state. Might be called even if the queue has already been freed. 545 */ 546 static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) 547 { 548 unsigned int buffer; 549 unsigned int q_num_buffers = vb2_get_num_buffers(q); 550 551 lockdep_assert_held(&q->mmap_lock); 552 553 /* Call driver-provided cleanup function for each buffer, if provided */ 554 for (buffer = q_num_buffers - buffers; buffer < q_num_buffers; 555 ++buffer) { 556 struct vb2_buffer *vb = vb2_get_buffer(q, buffer); 557 558 if (vb && vb->planes[0].mem_priv) 559 call_void_vb_qop(vb, buf_cleanup, vb); 560 } 561 562 /* Release video buffer memory */ 563 __vb2_free_mem(q, buffers); 564 565 #ifdef CONFIG_VIDEO_ADV_DEBUG 566 /* 567 * Check that all the calls were balanced during the life-time of this 568 * queue. If not then dump the counters to the kernel log. 569 */ 570 if (q_num_buffers) { 571 bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming || 572 q->cnt_prepare_streaming != q->cnt_unprepare_streaming || 573 q->cnt_wait_prepare != q->cnt_wait_finish; 574 575 if (unbalanced) { 576 pr_info("unbalanced counters for queue %p:\n", q); 577 if (q->cnt_start_streaming != q->cnt_stop_streaming) 578 pr_info(" setup: %u start_streaming: %u stop_streaming: %u\n", 579 q->cnt_queue_setup, q->cnt_start_streaming, 580 q->cnt_stop_streaming); 581 if (q->cnt_prepare_streaming != q->cnt_unprepare_streaming) 582 pr_info(" prepare_streaming: %u unprepare_streaming: %u\n", 583 q->cnt_prepare_streaming, q->cnt_unprepare_streaming); 584 if (q->cnt_wait_prepare != q->cnt_wait_finish) 585 pr_info(" wait_prepare: %u wait_finish: %u\n", 586 q->cnt_wait_prepare, q->cnt_wait_finish); 587 } 588 q->cnt_queue_setup = 0; 589 q->cnt_wait_prepare = 0; 590 q->cnt_wait_finish = 0; 591 q->cnt_prepare_streaming = 0; 592 q->cnt_start_streaming = 0; 593 q->cnt_stop_streaming = 0; 594 q->cnt_unprepare_streaming = 0; 595 } 596 for (buffer = 0; buffer < vb2_get_num_buffers(q); buffer++) { 597 struct vb2_buffer *vb = vb2_get_buffer(q, buffer); 598 bool unbalanced; 599 600 if (!vb) 601 continue; 602 603 unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put || 604 vb->cnt_mem_prepare != vb->cnt_mem_finish || 605 vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr || 606 vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf || 607 vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf || 608 vb->cnt_buf_queue != vb->cnt_buf_done || 609 vb->cnt_buf_prepare != vb->cnt_buf_finish || 610 vb->cnt_buf_init != vb->cnt_buf_cleanup; 611 612 if (unbalanced) { 613 pr_info("unbalanced counters for queue %p, buffer %d:\n", 614 q, buffer); 615 if (vb->cnt_buf_init != vb->cnt_buf_cleanup) 616 pr_info(" buf_init: %u buf_cleanup: %u\n", 617 vb->cnt_buf_init, vb->cnt_buf_cleanup); 618 if (vb->cnt_buf_prepare != vb->cnt_buf_finish) 619 pr_info(" buf_prepare: %u buf_finish: %u\n", 620 vb->cnt_buf_prepare, vb->cnt_buf_finish); 621 if (vb->cnt_buf_queue != vb->cnt_buf_done) 622 pr_info(" buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n", 623 vb->cnt_buf_out_validate, vb->cnt_buf_queue, 624 vb->cnt_buf_done, vb->cnt_buf_request_complete); 625 if (vb->cnt_mem_alloc != vb->cnt_mem_put) 626 pr_info(" alloc: %u put: %u\n", 627 vb->cnt_mem_alloc, vb->cnt_mem_put); 628 if (vb->cnt_mem_prepare != vb->cnt_mem_finish) 629 pr_info(" prepare: %u finish: %u\n", 630 vb->cnt_mem_prepare, vb->cnt_mem_finish); 631 if (vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr) 632 pr_info(" get_userptr: %u put_userptr: %u\n", 633 vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr); 634 if (vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf) 635 pr_info(" attach_dmabuf: %u detach_dmabuf: %u\n", 636 vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf); 637 if (vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf) 638 pr_info(" map_dmabuf: %u unmap_dmabuf: %u\n", 639 vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf); 640 pr_info(" get_dmabuf: %u num_users: %u\n", 641 vb->cnt_mem_get_dmabuf, 642 vb->cnt_mem_num_users); 643 } 644 } 645 #endif 646 647 /* Free vb2 buffers */ 648 for (buffer = q_num_buffers - buffers; buffer < q_num_buffers; 649 ++buffer) { 650 struct vb2_buffer *vb = vb2_get_buffer(q, buffer); 651 652 if (!vb) 653 continue; 654 655 vb2_queue_remove_buffer(vb); 656 kfree(vb); 657 } 658 659 q->num_buffers -= buffers; 660 if (!vb2_get_num_buffers(q)) { 661 q->memory = VB2_MEMORY_UNKNOWN; 662 INIT_LIST_HEAD(&q->queued_list); 663 } 664 } 665 666 bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb) 667 { 668 unsigned int plane; 669 for (plane = 0; plane < vb->num_planes; ++plane) { 670 void *mem_priv = vb->planes[plane].mem_priv; 671 /* 672 * If num_users() has not been provided, call_memop 673 * will return 0, apparently nobody cares about this 674 * case anyway. If num_users() returns more than 1, 675 * we are not the only user of the plane's memory. 676 */ 677 if (mem_priv && call_memop(vb, num_users, mem_priv) > 1) 678 return true; 679 } 680 return false; 681 } 682 EXPORT_SYMBOL(vb2_buffer_in_use); 683 684 /* 685 * __buffers_in_use() - return true if any buffers on the queue are in use and 686 * the queue cannot be freed (by the means of REQBUFS(0)) call 687 */ 688 static bool __buffers_in_use(struct vb2_queue *q) 689 { 690 unsigned int buffer; 691 for (buffer = 0; buffer < vb2_get_num_buffers(q); ++buffer) { 692 struct vb2_buffer *vb = vb2_get_buffer(q, buffer); 693 694 if (!vb) 695 continue; 696 697 if (vb2_buffer_in_use(q, vb)) 698 return true; 699 } 700 return false; 701 } 702 703 void vb2_core_querybuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb) 704 { 705 call_void_bufop(q, fill_user_buffer, vb, pb); 706 } 707 EXPORT_SYMBOL_GPL(vb2_core_querybuf); 708 709 /* 710 * __verify_userptr_ops() - verify that all memory operations required for 711 * USERPTR queue type have been provided 712 */ 713 static int __verify_userptr_ops(struct vb2_queue *q) 714 { 715 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr || 716 !q->mem_ops->put_userptr) 717 return -EINVAL; 718 719 return 0; 720 } 721 722 /* 723 * __verify_mmap_ops() - verify that all memory operations required for 724 * MMAP queue type have been provided 725 */ 726 static int __verify_mmap_ops(struct vb2_queue *q) 727 { 728 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc || 729 !q->mem_ops->put || !q->mem_ops->mmap) 730 return -EINVAL; 731 732 return 0; 733 } 734 735 /* 736 * __verify_dmabuf_ops() - verify that all memory operations required for 737 * DMABUF queue type have been provided 738 */ 739 static int __verify_dmabuf_ops(struct vb2_queue *q) 740 { 741 if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf || 742 !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf || 743 !q->mem_ops->unmap_dmabuf) 744 return -EINVAL; 745 746 return 0; 747 } 748 749 int vb2_verify_memory_type(struct vb2_queue *q, 750 enum vb2_memory memory, unsigned int type) 751 { 752 if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR && 753 memory != VB2_MEMORY_DMABUF) { 754 dprintk(q, 1, "unsupported memory type\n"); 755 return -EINVAL; 756 } 757 758 if (type != q->type) { 759 dprintk(q, 1, "requested type is incorrect\n"); 760 return -EINVAL; 761 } 762 763 /* 764 * Make sure all the required memory ops for given memory type 765 * are available. 766 */ 767 if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) { 768 dprintk(q, 1, "MMAP for current setup unsupported\n"); 769 return -EINVAL; 770 } 771 772 if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) { 773 dprintk(q, 1, "USERPTR for current setup unsupported\n"); 774 return -EINVAL; 775 } 776 777 if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) { 778 dprintk(q, 1, "DMABUF for current setup unsupported\n"); 779 return -EINVAL; 780 } 781 782 /* 783 * Place the busy tests at the end: -EBUSY can be ignored when 784 * create_bufs is called with count == 0, but count == 0 should still 785 * do the memory and type validation. 786 */ 787 if (vb2_fileio_is_active(q)) { 788 dprintk(q, 1, "file io in progress\n"); 789 return -EBUSY; 790 } 791 return 0; 792 } 793 EXPORT_SYMBOL(vb2_verify_memory_type); 794 795 static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem) 796 { 797 q->non_coherent_mem = 0; 798 799 if (!vb2_queue_allows_cache_hints(q)) 800 return; 801 q->non_coherent_mem = non_coherent_mem; 802 } 803 804 static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem) 805 { 806 if (non_coherent_mem != q->non_coherent_mem) { 807 dprintk(q, 1, "memory coherency model mismatch\n"); 808 return false; 809 } 810 return true; 811 } 812 813 int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, 814 unsigned int flags, unsigned int *count) 815 { 816 unsigned int num_buffers, allocated_buffers, num_planes = 0; 817 unsigned int q_num_bufs = vb2_get_num_buffers(q); 818 unsigned plane_sizes[VB2_MAX_PLANES] = { }; 819 bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT; 820 unsigned int i; 821 int ret; 822 823 if (q->streaming) { 824 dprintk(q, 1, "streaming active\n"); 825 return -EBUSY; 826 } 827 828 if (q->waiting_in_dqbuf && *count) { 829 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); 830 return -EBUSY; 831 } 832 833 if (*count == 0 || q_num_bufs != 0 || 834 (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) || 835 !verify_coherency_flags(q, non_coherent_mem)) { 836 /* 837 * We already have buffers allocated, so first check if they 838 * are not in use and can be freed. 839 */ 840 mutex_lock(&q->mmap_lock); 841 if (debug && q->memory == VB2_MEMORY_MMAP && 842 __buffers_in_use(q)) 843 dprintk(q, 1, "memory in use, orphaning buffers\n"); 844 845 /* 846 * Call queue_cancel to clean up any buffers in the 847 * QUEUED state which is possible if buffers were prepared or 848 * queued without ever calling STREAMON. 849 */ 850 __vb2_queue_cancel(q); 851 __vb2_queue_free(q, q_num_bufs); 852 mutex_unlock(&q->mmap_lock); 853 854 /* 855 * In case of REQBUFS(0) return immediately without calling 856 * driver's queue_setup() callback and allocating resources. 857 */ 858 if (*count == 0) 859 return 0; 860 } 861 862 /* 863 * Make sure the requested values and current defaults are sane. 864 */ 865 WARN_ON(q->min_buffers_needed > VB2_MAX_FRAME); 866 num_buffers = max_t(unsigned int, *count, q->min_buffers_needed); 867 num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME); 868 memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); 869 /* 870 * Set this now to ensure that drivers see the correct q->memory value 871 * in the queue_setup op. 872 */ 873 mutex_lock(&q->mmap_lock); 874 q->memory = memory; 875 mutex_unlock(&q->mmap_lock); 876 set_queue_coherency(q, non_coherent_mem); 877 878 /* 879 * Ask the driver how many buffers and planes per buffer it requires. 880 * Driver also sets the size and allocator context for each plane. 881 */ 882 ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, 883 plane_sizes, q->alloc_devs); 884 if (ret) 885 goto error; 886 887 /* Check that driver has set sane values */ 888 if (WARN_ON(!num_planes)) { 889 ret = -EINVAL; 890 goto error; 891 } 892 893 for (i = 0; i < num_planes; i++) 894 if (WARN_ON(!plane_sizes[i])) { 895 ret = -EINVAL; 896 goto error; 897 } 898 899 /* Finally, allocate buffers and video memory */ 900 allocated_buffers = 901 __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes); 902 if (allocated_buffers == 0) { 903 dprintk(q, 1, "memory allocation failed\n"); 904 ret = -ENOMEM; 905 goto error; 906 } 907 908 /* 909 * There is no point in continuing if we can't allocate the minimum 910 * number of buffers needed by this vb2_queue. 911 */ 912 if (allocated_buffers < q->min_buffers_needed) 913 ret = -ENOMEM; 914 915 /* 916 * Check if driver can handle the allocated number of buffers. 917 */ 918 if (!ret && allocated_buffers < num_buffers) { 919 num_buffers = allocated_buffers; 920 /* 921 * num_planes is set by the previous queue_setup(), but since it 922 * signals to queue_setup() whether it is called from create_bufs() 923 * vs reqbufs() we zero it here to signal that queue_setup() is 924 * called for the reqbufs() case. 925 */ 926 num_planes = 0; 927 928 ret = call_qop(q, queue_setup, q, &num_buffers, 929 &num_planes, plane_sizes, q->alloc_devs); 930 931 if (!ret && allocated_buffers < num_buffers) 932 ret = -ENOMEM; 933 934 /* 935 * Either the driver has accepted a smaller number of buffers, 936 * or .queue_setup() returned an error 937 */ 938 } 939 940 mutex_lock(&q->mmap_lock); 941 q->num_buffers = allocated_buffers; 942 943 if (ret < 0) { 944 /* 945 * Note: __vb2_queue_free() will subtract 'allocated_buffers' 946 * from already queued buffers and it will reset q->memory to 947 * VB2_MEMORY_UNKNOWN. 948 */ 949 __vb2_queue_free(q, allocated_buffers); 950 mutex_unlock(&q->mmap_lock); 951 return ret; 952 } 953 mutex_unlock(&q->mmap_lock); 954 955 /* 956 * Return the number of successfully allocated buffers 957 * to the userspace. 958 */ 959 *count = allocated_buffers; 960 q->waiting_for_buffers = !q->is_output; 961 962 return 0; 963 964 error: 965 mutex_lock(&q->mmap_lock); 966 q->memory = VB2_MEMORY_UNKNOWN; 967 mutex_unlock(&q->mmap_lock); 968 return ret; 969 } 970 EXPORT_SYMBOL_GPL(vb2_core_reqbufs); 971 972 int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, 973 unsigned int flags, unsigned int *count, 974 unsigned int requested_planes, 975 const unsigned int requested_sizes[]) 976 { 977 unsigned int num_planes = 0, num_buffers, allocated_buffers; 978 unsigned plane_sizes[VB2_MAX_PLANES] = { }; 979 bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT; 980 unsigned int q_num_bufs = vb2_get_num_buffers(q); 981 bool no_previous_buffers = !q_num_bufs; 982 int ret = 0; 983 984 if (q_num_bufs == VB2_MAX_FRAME) { 985 dprintk(q, 1, "maximum number of buffers already allocated\n"); 986 return -ENOBUFS; 987 } 988 989 if (no_previous_buffers) { 990 if (q->waiting_in_dqbuf && *count) { 991 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); 992 return -EBUSY; 993 } 994 memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); 995 /* 996 * Set this now to ensure that drivers see the correct q->memory 997 * value in the queue_setup op. 998 */ 999 mutex_lock(&q->mmap_lock); 1000 q->memory = memory; 1001 mutex_unlock(&q->mmap_lock); 1002 q->waiting_for_buffers = !q->is_output; 1003 set_queue_coherency(q, non_coherent_mem); 1004 } else { 1005 if (q->memory != memory) { 1006 dprintk(q, 1, "memory model mismatch\n"); 1007 return -EINVAL; 1008 } 1009 if (!verify_coherency_flags(q, non_coherent_mem)) 1010 return -EINVAL; 1011 } 1012 1013 num_buffers = min(*count, VB2_MAX_FRAME - q_num_bufs); 1014 1015 if (requested_planes && requested_sizes) { 1016 num_planes = requested_planes; 1017 memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes)); 1018 } 1019 1020 /* 1021 * Ask the driver, whether the requested number of buffers, planes per 1022 * buffer and their sizes are acceptable 1023 */ 1024 ret = call_qop(q, queue_setup, q, &num_buffers, 1025 &num_planes, plane_sizes, q->alloc_devs); 1026 if (ret) 1027 goto error; 1028 1029 /* Finally, allocate buffers and video memory */ 1030 allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, 1031 num_planes, plane_sizes); 1032 if (allocated_buffers == 0) { 1033 dprintk(q, 1, "memory allocation failed\n"); 1034 ret = -ENOMEM; 1035 goto error; 1036 } 1037 1038 /* 1039 * Check if driver can handle the so far allocated number of buffers. 1040 */ 1041 if (allocated_buffers < num_buffers) { 1042 num_buffers = allocated_buffers; 1043 1044 /* 1045 * num_buffers contains the total number of buffers, that the 1046 * queue driver has set up 1047 */ 1048 ret = call_qop(q, queue_setup, q, &num_buffers, 1049 &num_planes, plane_sizes, q->alloc_devs); 1050 1051 if (!ret && allocated_buffers < num_buffers) 1052 ret = -ENOMEM; 1053 1054 /* 1055 * Either the driver has accepted a smaller number of buffers, 1056 * or .queue_setup() returned an error 1057 */ 1058 } 1059 1060 mutex_lock(&q->mmap_lock); 1061 q->num_buffers += allocated_buffers; 1062 1063 if (ret < 0) { 1064 /* 1065 * Note: __vb2_queue_free() will subtract 'allocated_buffers' 1066 * from already queued buffers and it will reset q->memory to 1067 * VB2_MEMORY_UNKNOWN. 1068 */ 1069 __vb2_queue_free(q, allocated_buffers); 1070 mutex_unlock(&q->mmap_lock); 1071 return -ENOMEM; 1072 } 1073 mutex_unlock(&q->mmap_lock); 1074 1075 /* 1076 * Return the number of successfully allocated buffers 1077 * to the userspace. 1078 */ 1079 *count = allocated_buffers; 1080 1081 return 0; 1082 1083 error: 1084 if (no_previous_buffers) { 1085 mutex_lock(&q->mmap_lock); 1086 q->memory = VB2_MEMORY_UNKNOWN; 1087 mutex_unlock(&q->mmap_lock); 1088 } 1089 return ret; 1090 } 1091 EXPORT_SYMBOL_GPL(vb2_core_create_bufs); 1092 1093 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) 1094 { 1095 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) 1096 return NULL; 1097 1098 return call_ptr_memop(vaddr, vb, vb->planes[plane_no].mem_priv); 1099 1100 } 1101 EXPORT_SYMBOL_GPL(vb2_plane_vaddr); 1102 1103 void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) 1104 { 1105 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) 1106 return NULL; 1107 1108 return call_ptr_memop(cookie, vb, vb->planes[plane_no].mem_priv); 1109 } 1110 EXPORT_SYMBOL_GPL(vb2_plane_cookie); 1111 1112 void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) 1113 { 1114 struct vb2_queue *q = vb->vb2_queue; 1115 unsigned long flags; 1116 1117 if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE)) 1118 return; 1119 1120 if (WARN_ON(state != VB2_BUF_STATE_DONE && 1121 state != VB2_BUF_STATE_ERROR && 1122 state != VB2_BUF_STATE_QUEUED)) 1123 state = VB2_BUF_STATE_ERROR; 1124 1125 #ifdef CONFIG_VIDEO_ADV_DEBUG 1126 /* 1127 * Although this is not a callback, it still does have to balance 1128 * with the buf_queue op. So update this counter manually. 1129 */ 1130 vb->cnt_buf_done++; 1131 #endif 1132 dprintk(q, 4, "done processing on buffer %d, state: %s\n", 1133 vb->index, vb2_state_name(state)); 1134 1135 if (state != VB2_BUF_STATE_QUEUED) 1136 __vb2_buf_mem_finish(vb); 1137 1138 spin_lock_irqsave(&q->done_lock, flags); 1139 if (state == VB2_BUF_STATE_QUEUED) { 1140 vb->state = VB2_BUF_STATE_QUEUED; 1141 } else { 1142 /* Add the buffer to the done buffers list */ 1143 list_add_tail(&vb->done_entry, &q->done_list); 1144 vb->state = state; 1145 } 1146 atomic_dec(&q->owned_by_drv_count); 1147 1148 if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) { 1149 media_request_object_unbind(&vb->req_obj); 1150 media_request_object_put(&vb->req_obj); 1151 } 1152 1153 spin_unlock_irqrestore(&q->done_lock, flags); 1154 1155 trace_vb2_buf_done(q, vb); 1156 1157 switch (state) { 1158 case VB2_BUF_STATE_QUEUED: 1159 return; 1160 default: 1161 /* Inform any processes that may be waiting for buffers */ 1162 wake_up(&q->done_wq); 1163 break; 1164 } 1165 } 1166 EXPORT_SYMBOL_GPL(vb2_buffer_done); 1167 1168 void vb2_discard_done(struct vb2_queue *q) 1169 { 1170 struct vb2_buffer *vb; 1171 unsigned long flags; 1172 1173 spin_lock_irqsave(&q->done_lock, flags); 1174 list_for_each_entry(vb, &q->done_list, done_entry) 1175 vb->state = VB2_BUF_STATE_ERROR; 1176 spin_unlock_irqrestore(&q->done_lock, flags); 1177 } 1178 EXPORT_SYMBOL_GPL(vb2_discard_done); 1179 1180 /* 1181 * __prepare_mmap() - prepare an MMAP buffer 1182 */ 1183 static int __prepare_mmap(struct vb2_buffer *vb) 1184 { 1185 int ret = 0; 1186 1187 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1188 vb, vb->planes); 1189 return ret ? ret : call_vb_qop(vb, buf_prepare, vb); 1190 } 1191 1192 /* 1193 * __prepare_userptr() - prepare a USERPTR buffer 1194 */ 1195 static int __prepare_userptr(struct vb2_buffer *vb) 1196 { 1197 struct vb2_plane planes[VB2_MAX_PLANES]; 1198 struct vb2_queue *q = vb->vb2_queue; 1199 void *mem_priv; 1200 unsigned int plane; 1201 int ret = 0; 1202 bool reacquired = vb->planes[0].mem_priv == NULL; 1203 1204 memset(planes, 0, sizeof(planes[0]) * vb->num_planes); 1205 /* Copy relevant information provided by the userspace */ 1206 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1207 vb, planes); 1208 if (ret) 1209 return ret; 1210 1211 for (plane = 0; plane < vb->num_planes; ++plane) { 1212 /* Skip the plane if already verified */ 1213 if (vb->planes[plane].m.userptr && 1214 vb->planes[plane].m.userptr == planes[plane].m.userptr 1215 && vb->planes[plane].length == planes[plane].length) 1216 continue; 1217 1218 dprintk(q, 3, "userspace address for plane %d changed, reacquiring memory\n", 1219 plane); 1220 1221 /* Check if the provided plane buffer is large enough */ 1222 if (planes[plane].length < vb->planes[plane].min_length) { 1223 dprintk(q, 1, "provided buffer size %u is less than setup size %u for plane %d\n", 1224 planes[plane].length, 1225 vb->planes[plane].min_length, 1226 plane); 1227 ret = -EINVAL; 1228 goto err; 1229 } 1230 1231 /* Release previously acquired memory if present */ 1232 if (vb->planes[plane].mem_priv) { 1233 if (!reacquired) { 1234 reacquired = true; 1235 vb->copied_timestamp = 0; 1236 call_void_vb_qop(vb, buf_cleanup, vb); 1237 } 1238 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv); 1239 } 1240 1241 vb->planes[plane].mem_priv = NULL; 1242 vb->planes[plane].bytesused = 0; 1243 vb->planes[plane].length = 0; 1244 vb->planes[plane].m.userptr = 0; 1245 vb->planes[plane].data_offset = 0; 1246 1247 /* Acquire each plane's memory */ 1248 mem_priv = call_ptr_memop(get_userptr, 1249 vb, 1250 q->alloc_devs[plane] ? : q->dev, 1251 planes[plane].m.userptr, 1252 planes[plane].length); 1253 if (IS_ERR(mem_priv)) { 1254 dprintk(q, 1, "failed acquiring userspace memory for plane %d\n", 1255 plane); 1256 ret = PTR_ERR(mem_priv); 1257 goto err; 1258 } 1259 vb->planes[plane].mem_priv = mem_priv; 1260 } 1261 1262 /* 1263 * Now that everything is in order, copy relevant information 1264 * provided by userspace. 1265 */ 1266 for (plane = 0; plane < vb->num_planes; ++plane) { 1267 vb->planes[plane].bytesused = planes[plane].bytesused; 1268 vb->planes[plane].length = planes[plane].length; 1269 vb->planes[plane].m.userptr = planes[plane].m.userptr; 1270 vb->planes[plane].data_offset = planes[plane].data_offset; 1271 } 1272 1273 if (reacquired) { 1274 /* 1275 * One or more planes changed, so we must call buf_init to do 1276 * the driver-specific initialization on the newly acquired 1277 * buffer, if provided. 1278 */ 1279 ret = call_vb_qop(vb, buf_init, vb); 1280 if (ret) { 1281 dprintk(q, 1, "buffer initialization failed\n"); 1282 goto err; 1283 } 1284 } 1285 1286 ret = call_vb_qop(vb, buf_prepare, vb); 1287 if (ret) { 1288 dprintk(q, 1, "buffer preparation failed\n"); 1289 call_void_vb_qop(vb, buf_cleanup, vb); 1290 goto err; 1291 } 1292 1293 return 0; 1294 err: 1295 /* In case of errors, release planes that were already acquired */ 1296 for (plane = 0; plane < vb->num_planes; ++plane) { 1297 if (vb->planes[plane].mem_priv) 1298 call_void_memop(vb, put_userptr, 1299 vb->planes[plane].mem_priv); 1300 vb->planes[plane].mem_priv = NULL; 1301 vb->planes[plane].m.userptr = 0; 1302 vb->planes[plane].length = 0; 1303 } 1304 1305 return ret; 1306 } 1307 1308 /* 1309 * __prepare_dmabuf() - prepare a DMABUF buffer 1310 */ 1311 static int __prepare_dmabuf(struct vb2_buffer *vb) 1312 { 1313 struct vb2_plane planes[VB2_MAX_PLANES]; 1314 struct vb2_queue *q = vb->vb2_queue; 1315 void *mem_priv; 1316 unsigned int plane; 1317 int ret = 0; 1318 bool reacquired = vb->planes[0].mem_priv == NULL; 1319 1320 memset(planes, 0, sizeof(planes[0]) * vb->num_planes); 1321 /* Copy relevant information provided by the userspace */ 1322 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, 1323 vb, planes); 1324 if (ret) 1325 return ret; 1326 1327 for (plane = 0; plane < vb->num_planes; ++plane) { 1328 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd); 1329 1330 if (IS_ERR_OR_NULL(dbuf)) { 1331 dprintk(q, 1, "invalid dmabuf fd for plane %d\n", 1332 plane); 1333 ret = -EINVAL; 1334 goto err; 1335 } 1336 1337 /* use DMABUF size if length is not provided */ 1338 if (planes[plane].length == 0) 1339 planes[plane].length = dbuf->size; 1340 1341 if (planes[plane].length < vb->planes[plane].min_length) { 1342 dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n", 1343 planes[plane].length, plane, 1344 vb->planes[plane].min_length); 1345 dma_buf_put(dbuf); 1346 ret = -EINVAL; 1347 goto err; 1348 } 1349 1350 /* Skip the plane if already verified */ 1351 if (dbuf == vb->planes[plane].dbuf && 1352 vb->planes[plane].length == planes[plane].length) { 1353 dma_buf_put(dbuf); 1354 continue; 1355 } 1356 1357 dprintk(q, 3, "buffer for plane %d changed\n", plane); 1358 1359 if (!reacquired) { 1360 reacquired = true; 1361 vb->copied_timestamp = 0; 1362 call_void_vb_qop(vb, buf_cleanup, vb); 1363 } 1364 1365 /* Release previously acquired memory if present */ 1366 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]); 1367 vb->planes[plane].bytesused = 0; 1368 vb->planes[plane].length = 0; 1369 vb->planes[plane].m.fd = 0; 1370 vb->planes[plane].data_offset = 0; 1371 1372 /* Acquire each plane's memory */ 1373 mem_priv = call_ptr_memop(attach_dmabuf, 1374 vb, 1375 q->alloc_devs[plane] ? : q->dev, 1376 dbuf, 1377 planes[plane].length); 1378 if (IS_ERR(mem_priv)) { 1379 dprintk(q, 1, "failed to attach dmabuf\n"); 1380 ret = PTR_ERR(mem_priv); 1381 dma_buf_put(dbuf); 1382 goto err; 1383 } 1384 1385 vb->planes[plane].dbuf = dbuf; 1386 vb->planes[plane].mem_priv = mem_priv; 1387 } 1388 1389 /* 1390 * This pins the buffer(s) with dma_buf_map_attachment()). It's done 1391 * here instead just before the DMA, while queueing the buffer(s) so 1392 * userspace knows sooner rather than later if the dma-buf map fails. 1393 */ 1394 for (plane = 0; plane < vb->num_planes; ++plane) { 1395 if (vb->planes[plane].dbuf_mapped) 1396 continue; 1397 1398 ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv); 1399 if (ret) { 1400 dprintk(q, 1, "failed to map dmabuf for plane %d\n", 1401 plane); 1402 goto err; 1403 } 1404 vb->planes[plane].dbuf_mapped = 1; 1405 } 1406 1407 /* 1408 * Now that everything is in order, copy relevant information 1409 * provided by userspace. 1410 */ 1411 for (plane = 0; plane < vb->num_planes; ++plane) { 1412 vb->planes[plane].bytesused = planes[plane].bytesused; 1413 vb->planes[plane].length = planes[plane].length; 1414 vb->planes[plane].m.fd = planes[plane].m.fd; 1415 vb->planes[plane].data_offset = planes[plane].data_offset; 1416 } 1417 1418 if (reacquired) { 1419 /* 1420 * Call driver-specific initialization on the newly acquired buffer, 1421 * if provided. 1422 */ 1423 ret = call_vb_qop(vb, buf_init, vb); 1424 if (ret) { 1425 dprintk(q, 1, "buffer initialization failed\n"); 1426 goto err; 1427 } 1428 } 1429 1430 ret = call_vb_qop(vb, buf_prepare, vb); 1431 if (ret) { 1432 dprintk(q, 1, "buffer preparation failed\n"); 1433 call_void_vb_qop(vb, buf_cleanup, vb); 1434 goto err; 1435 } 1436 1437 return 0; 1438 err: 1439 /* In case of errors, release planes that were already acquired */ 1440 __vb2_buf_dmabuf_put(vb); 1441 1442 return ret; 1443 } 1444 1445 /* 1446 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing 1447 */ 1448 static void __enqueue_in_driver(struct vb2_buffer *vb) 1449 { 1450 struct vb2_queue *q = vb->vb2_queue; 1451 1452 vb->state = VB2_BUF_STATE_ACTIVE; 1453 atomic_inc(&q->owned_by_drv_count); 1454 1455 trace_vb2_buf_queue(q, vb); 1456 1457 call_void_vb_qop(vb, buf_queue, vb); 1458 } 1459 1460 static int __buf_prepare(struct vb2_buffer *vb) 1461 { 1462 struct vb2_queue *q = vb->vb2_queue; 1463 enum vb2_buffer_state orig_state = vb->state; 1464 int ret; 1465 1466 if (q->error) { 1467 dprintk(q, 1, "fatal error occurred on queue\n"); 1468 return -EIO; 1469 } 1470 1471 if (vb->prepared) 1472 return 0; 1473 WARN_ON(vb->synced); 1474 1475 if (q->is_output) { 1476 ret = call_vb_qop(vb, buf_out_validate, vb); 1477 if (ret) { 1478 dprintk(q, 1, "buffer validation failed\n"); 1479 return ret; 1480 } 1481 } 1482 1483 vb->state = VB2_BUF_STATE_PREPARING; 1484 1485 switch (q->memory) { 1486 case VB2_MEMORY_MMAP: 1487 ret = __prepare_mmap(vb); 1488 break; 1489 case VB2_MEMORY_USERPTR: 1490 ret = __prepare_userptr(vb); 1491 break; 1492 case VB2_MEMORY_DMABUF: 1493 ret = __prepare_dmabuf(vb); 1494 break; 1495 default: 1496 WARN(1, "Invalid queue type\n"); 1497 ret = -EINVAL; 1498 break; 1499 } 1500 1501 if (ret) { 1502 dprintk(q, 1, "buffer preparation failed: %d\n", ret); 1503 vb->state = orig_state; 1504 return ret; 1505 } 1506 1507 __vb2_buf_mem_prepare(vb); 1508 vb->prepared = 1; 1509 vb->state = orig_state; 1510 1511 return 0; 1512 } 1513 1514 static int vb2_req_prepare(struct media_request_object *obj) 1515 { 1516 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1517 int ret; 1518 1519 if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST)) 1520 return -EINVAL; 1521 1522 mutex_lock(vb->vb2_queue->lock); 1523 ret = __buf_prepare(vb); 1524 mutex_unlock(vb->vb2_queue->lock); 1525 return ret; 1526 } 1527 1528 static void __vb2_dqbuf(struct vb2_buffer *vb); 1529 1530 static void vb2_req_unprepare(struct media_request_object *obj) 1531 { 1532 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1533 1534 mutex_lock(vb->vb2_queue->lock); 1535 __vb2_dqbuf(vb); 1536 vb->state = VB2_BUF_STATE_IN_REQUEST; 1537 mutex_unlock(vb->vb2_queue->lock); 1538 WARN_ON(!vb->req_obj.req); 1539 } 1540 1541 static void vb2_req_queue(struct media_request_object *obj) 1542 { 1543 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1544 int err; 1545 1546 mutex_lock(vb->vb2_queue->lock); 1547 /* 1548 * There is no method to propagate an error from vb2_core_qbuf(), 1549 * so if this returns a non-0 value, then WARN. 1550 * 1551 * The only exception is -EIO which is returned if q->error is 1552 * set. We just ignore that, and expect this will be caught the 1553 * next time vb2_req_prepare() is called. 1554 */ 1555 err = vb2_core_qbuf(vb->vb2_queue, vb, NULL, NULL); 1556 WARN_ON_ONCE(err && err != -EIO); 1557 mutex_unlock(vb->vb2_queue->lock); 1558 } 1559 1560 static void vb2_req_unbind(struct media_request_object *obj) 1561 { 1562 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1563 1564 if (vb->state == VB2_BUF_STATE_IN_REQUEST) 1565 call_void_bufop(vb->vb2_queue, init_buffer, vb); 1566 } 1567 1568 static void vb2_req_release(struct media_request_object *obj) 1569 { 1570 struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj); 1571 1572 if (vb->state == VB2_BUF_STATE_IN_REQUEST) { 1573 vb->state = VB2_BUF_STATE_DEQUEUED; 1574 if (vb->request) 1575 media_request_put(vb->request); 1576 vb->request = NULL; 1577 } 1578 } 1579 1580 static const struct media_request_object_ops vb2_core_req_ops = { 1581 .prepare = vb2_req_prepare, 1582 .unprepare = vb2_req_unprepare, 1583 .queue = vb2_req_queue, 1584 .unbind = vb2_req_unbind, 1585 .release = vb2_req_release, 1586 }; 1587 1588 bool vb2_request_object_is_buffer(struct media_request_object *obj) 1589 { 1590 return obj->ops == &vb2_core_req_ops; 1591 } 1592 EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer); 1593 1594 unsigned int vb2_request_buffer_cnt(struct media_request *req) 1595 { 1596 struct media_request_object *obj; 1597 unsigned long flags; 1598 unsigned int buffer_cnt = 0; 1599 1600 spin_lock_irqsave(&req->lock, flags); 1601 list_for_each_entry(obj, &req->objects, list) 1602 if (vb2_request_object_is_buffer(obj)) 1603 buffer_cnt++; 1604 spin_unlock_irqrestore(&req->lock, flags); 1605 1606 return buffer_cnt; 1607 } 1608 EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt); 1609 1610 int vb2_core_prepare_buf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb) 1611 { 1612 int ret; 1613 1614 if (vb->state != VB2_BUF_STATE_DEQUEUED) { 1615 dprintk(q, 1, "invalid buffer state %s\n", 1616 vb2_state_name(vb->state)); 1617 return -EINVAL; 1618 } 1619 if (vb->prepared) { 1620 dprintk(q, 1, "buffer already prepared\n"); 1621 return -EINVAL; 1622 } 1623 1624 ret = __buf_prepare(vb); 1625 if (ret) 1626 return ret; 1627 1628 /* Fill buffer information for the userspace */ 1629 call_void_bufop(q, fill_user_buffer, vb, pb); 1630 1631 dprintk(q, 2, "prepare of buffer %d succeeded\n", vb->index); 1632 1633 return 0; 1634 } 1635 EXPORT_SYMBOL_GPL(vb2_core_prepare_buf); 1636 1637 /* 1638 * vb2_start_streaming() - Attempt to start streaming. 1639 * @q: videobuf2 queue 1640 * 1641 * Attempt to start streaming. When this function is called there must be 1642 * at least q->min_buffers_needed buffers queued up (i.e. the minimum 1643 * number of buffers required for the DMA engine to function). If the 1644 * @start_streaming op fails it is supposed to return all the driver-owned 1645 * buffers back to vb2 in state QUEUED. Check if that happened and if 1646 * not warn and reclaim them forcefully. 1647 */ 1648 static int vb2_start_streaming(struct vb2_queue *q) 1649 { 1650 struct vb2_buffer *vb; 1651 int ret; 1652 1653 /* 1654 * If any buffers were queued before streamon, 1655 * we can now pass them to driver for processing. 1656 */ 1657 list_for_each_entry(vb, &q->queued_list, queued_entry) 1658 __enqueue_in_driver(vb); 1659 1660 /* Tell the driver to start streaming */ 1661 q->start_streaming_called = 1; 1662 ret = call_qop(q, start_streaming, q, 1663 atomic_read(&q->owned_by_drv_count)); 1664 if (!ret) 1665 return 0; 1666 1667 q->start_streaming_called = 0; 1668 1669 dprintk(q, 1, "driver refused to start streaming\n"); 1670 /* 1671 * If you see this warning, then the driver isn't cleaning up properly 1672 * after a failed start_streaming(). See the start_streaming() 1673 * documentation in videobuf2-core.h for more information how buffers 1674 * should be returned to vb2 in start_streaming(). 1675 */ 1676 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { 1677 unsigned i; 1678 1679 /* 1680 * Forcefully reclaim buffers if the driver did not 1681 * correctly return them to vb2. 1682 */ 1683 for (i = 0; i < vb2_get_num_buffers(q); ++i) { 1684 vb = vb2_get_buffer(q, i); 1685 1686 if (!vb) 1687 continue; 1688 1689 if (vb->state == VB2_BUF_STATE_ACTIVE) 1690 vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED); 1691 } 1692 /* Must be zero now */ 1693 WARN_ON(atomic_read(&q->owned_by_drv_count)); 1694 } 1695 /* 1696 * If done_list is not empty, then start_streaming() didn't call 1697 * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or 1698 * STATE_DONE. 1699 */ 1700 WARN_ON(!list_empty(&q->done_list)); 1701 return ret; 1702 } 1703 1704 int vb2_core_qbuf(struct vb2_queue *q, struct vb2_buffer *vb, void *pb, 1705 struct media_request *req) 1706 { 1707 enum vb2_buffer_state orig_state; 1708 int ret; 1709 1710 if (q->error) { 1711 dprintk(q, 1, "fatal error occurred on queue\n"); 1712 return -EIO; 1713 } 1714 1715 if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && 1716 q->requires_requests) { 1717 dprintk(q, 1, "qbuf requires a request\n"); 1718 return -EBADR; 1719 } 1720 1721 if ((req && q->uses_qbuf) || 1722 (!req && vb->state != VB2_BUF_STATE_IN_REQUEST && 1723 q->uses_requests)) { 1724 dprintk(q, 1, "queue in wrong mode (qbuf vs requests)\n"); 1725 return -EBUSY; 1726 } 1727 1728 if (req) { 1729 int ret; 1730 1731 q->uses_requests = 1; 1732 if (vb->state != VB2_BUF_STATE_DEQUEUED) { 1733 dprintk(q, 1, "buffer %d not in dequeued state\n", 1734 vb->index); 1735 return -EINVAL; 1736 } 1737 1738 if (q->is_output && !vb->prepared) { 1739 ret = call_vb_qop(vb, buf_out_validate, vb); 1740 if (ret) { 1741 dprintk(q, 1, "buffer validation failed\n"); 1742 return ret; 1743 } 1744 } 1745 1746 media_request_object_init(&vb->req_obj); 1747 1748 /* Make sure the request is in a safe state for updating. */ 1749 ret = media_request_lock_for_update(req); 1750 if (ret) 1751 return ret; 1752 ret = media_request_object_bind(req, &vb2_core_req_ops, 1753 q, true, &vb->req_obj); 1754 media_request_unlock_for_update(req); 1755 if (ret) 1756 return ret; 1757 1758 vb->state = VB2_BUF_STATE_IN_REQUEST; 1759 1760 /* 1761 * Increment the refcount and store the request. 1762 * The request refcount is decremented again when the 1763 * buffer is dequeued. This is to prevent vb2_buffer_done() 1764 * from freeing the request from interrupt context, which can 1765 * happen if the application closed the request fd after 1766 * queueing the request. 1767 */ 1768 media_request_get(req); 1769 vb->request = req; 1770 1771 /* Fill buffer information for the userspace */ 1772 if (pb) { 1773 call_void_bufop(q, copy_timestamp, vb, pb); 1774 call_void_bufop(q, fill_user_buffer, vb, pb); 1775 } 1776 1777 dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); 1778 return 0; 1779 } 1780 1781 if (vb->state != VB2_BUF_STATE_IN_REQUEST) 1782 q->uses_qbuf = 1; 1783 1784 switch (vb->state) { 1785 case VB2_BUF_STATE_DEQUEUED: 1786 case VB2_BUF_STATE_IN_REQUEST: 1787 if (!vb->prepared) { 1788 ret = __buf_prepare(vb); 1789 if (ret) 1790 return ret; 1791 } 1792 break; 1793 case VB2_BUF_STATE_PREPARING: 1794 dprintk(q, 1, "buffer still being prepared\n"); 1795 return -EINVAL; 1796 default: 1797 dprintk(q, 1, "invalid buffer state %s\n", 1798 vb2_state_name(vb->state)); 1799 return -EINVAL; 1800 } 1801 1802 /* 1803 * Add to the queued buffers list, a buffer will stay on it until 1804 * dequeued in dqbuf. 1805 */ 1806 orig_state = vb->state; 1807 list_add_tail(&vb->queued_entry, &q->queued_list); 1808 q->queued_count++; 1809 q->waiting_for_buffers = false; 1810 vb->state = VB2_BUF_STATE_QUEUED; 1811 1812 if (pb) 1813 call_void_bufop(q, copy_timestamp, vb, pb); 1814 1815 trace_vb2_qbuf(q, vb); 1816 1817 /* 1818 * If already streaming, give the buffer to driver for processing. 1819 * If not, the buffer will be given to driver on next streamon. 1820 */ 1821 if (q->start_streaming_called) 1822 __enqueue_in_driver(vb); 1823 1824 /* Fill buffer information for the userspace */ 1825 if (pb) 1826 call_void_bufop(q, fill_user_buffer, vb, pb); 1827 1828 /* 1829 * If streamon has been called, and we haven't yet called 1830 * start_streaming() since not enough buffers were queued, and 1831 * we now have reached the minimum number of queued buffers, 1832 * then we can finally call start_streaming(). 1833 */ 1834 if (q->streaming && !q->start_streaming_called && 1835 q->queued_count >= q->min_buffers_needed) { 1836 ret = vb2_start_streaming(q); 1837 if (ret) { 1838 /* 1839 * Since vb2_core_qbuf will return with an error, 1840 * we should return it to state DEQUEUED since 1841 * the error indicates that the buffer wasn't queued. 1842 */ 1843 list_del(&vb->queued_entry); 1844 q->queued_count--; 1845 vb->state = orig_state; 1846 return ret; 1847 } 1848 } 1849 1850 dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index); 1851 return 0; 1852 } 1853 EXPORT_SYMBOL_GPL(vb2_core_qbuf); 1854 1855 /* 1856 * __vb2_wait_for_done_vb() - wait for a buffer to become available 1857 * for dequeuing 1858 * 1859 * Will sleep if required for nonblocking == false. 1860 */ 1861 static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) 1862 { 1863 /* 1864 * All operations on vb_done_list are performed under done_lock 1865 * spinlock protection. However, buffers may be removed from 1866 * it and returned to userspace only while holding both driver's 1867 * lock and the done_lock spinlock. Thus we can be sure that as 1868 * long as we hold the driver's lock, the list will remain not 1869 * empty if list_empty() check succeeds. 1870 */ 1871 1872 for (;;) { 1873 int ret; 1874 1875 if (q->waiting_in_dqbuf) { 1876 dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n"); 1877 return -EBUSY; 1878 } 1879 1880 if (!q->streaming) { 1881 dprintk(q, 1, "streaming off, will not wait for buffers\n"); 1882 return -EINVAL; 1883 } 1884 1885 if (q->error) { 1886 dprintk(q, 1, "Queue in error state, will not wait for buffers\n"); 1887 return -EIO; 1888 } 1889 1890 if (q->last_buffer_dequeued) { 1891 dprintk(q, 3, "last buffer dequeued already, will not wait for buffers\n"); 1892 return -EPIPE; 1893 } 1894 1895 if (!list_empty(&q->done_list)) { 1896 /* 1897 * Found a buffer that we were waiting for. 1898 */ 1899 break; 1900 } 1901 1902 if (nonblocking) { 1903 dprintk(q, 3, "nonblocking and no buffers to dequeue, will not wait\n"); 1904 return -EAGAIN; 1905 } 1906 1907 q->waiting_in_dqbuf = 1; 1908 /* 1909 * We are streaming and blocking, wait for another buffer to 1910 * become ready or for streamoff. Driver's lock is released to 1911 * allow streamoff or qbuf to be called while waiting. 1912 */ 1913 call_void_qop(q, wait_prepare, q); 1914 1915 /* 1916 * All locks have been released, it is safe to sleep now. 1917 */ 1918 dprintk(q, 3, "will sleep waiting for buffers\n"); 1919 ret = wait_event_interruptible(q->done_wq, 1920 !list_empty(&q->done_list) || !q->streaming || 1921 q->error); 1922 1923 /* 1924 * We need to reevaluate both conditions again after reacquiring 1925 * the locks or return an error if one occurred. 1926 */ 1927 call_void_qop(q, wait_finish, q); 1928 q->waiting_in_dqbuf = 0; 1929 if (ret) { 1930 dprintk(q, 1, "sleep was interrupted\n"); 1931 return ret; 1932 } 1933 } 1934 return 0; 1935 } 1936 1937 /* 1938 * __vb2_get_done_vb() - get a buffer ready for dequeuing 1939 * 1940 * Will sleep if required for nonblocking == false. 1941 */ 1942 static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, 1943 void *pb, int nonblocking) 1944 { 1945 unsigned long flags; 1946 int ret = 0; 1947 1948 /* 1949 * Wait for at least one buffer to become available on the done_list. 1950 */ 1951 ret = __vb2_wait_for_done_vb(q, nonblocking); 1952 if (ret) 1953 return ret; 1954 1955 /* 1956 * Driver's lock has been held since we last verified that done_list 1957 * is not empty, so no need for another list_empty(done_list) check. 1958 */ 1959 spin_lock_irqsave(&q->done_lock, flags); 1960 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); 1961 /* 1962 * Only remove the buffer from done_list if all planes can be 1963 * handled. Some cases such as V4L2 file I/O and DVB have pb 1964 * == NULL; skip the check then as there's nothing to verify. 1965 */ 1966 if (pb) 1967 ret = call_bufop(q, verify_planes_array, *vb, pb); 1968 if (!ret) 1969 list_del(&(*vb)->done_entry); 1970 spin_unlock_irqrestore(&q->done_lock, flags); 1971 1972 return ret; 1973 } 1974 1975 int vb2_wait_for_all_buffers(struct vb2_queue *q) 1976 { 1977 if (!q->streaming) { 1978 dprintk(q, 1, "streaming off, will not wait for buffers\n"); 1979 return -EINVAL; 1980 } 1981 1982 if (q->start_streaming_called) 1983 wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count)); 1984 return 0; 1985 } 1986 EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); 1987 1988 /* 1989 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state 1990 */ 1991 static void __vb2_dqbuf(struct vb2_buffer *vb) 1992 { 1993 struct vb2_queue *q = vb->vb2_queue; 1994 1995 /* nothing to do if the buffer is already dequeued */ 1996 if (vb->state == VB2_BUF_STATE_DEQUEUED) 1997 return; 1998 1999 vb->state = VB2_BUF_STATE_DEQUEUED; 2000 2001 call_void_bufop(q, init_buffer, vb); 2002 } 2003 2004 int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, 2005 bool nonblocking) 2006 { 2007 struct vb2_buffer *vb = NULL; 2008 int ret; 2009 2010 ret = __vb2_get_done_vb(q, &vb, pb, nonblocking); 2011 if (ret < 0) 2012 return ret; 2013 2014 switch (vb->state) { 2015 case VB2_BUF_STATE_DONE: 2016 dprintk(q, 3, "returning done buffer\n"); 2017 break; 2018 case VB2_BUF_STATE_ERROR: 2019 dprintk(q, 3, "returning done buffer with errors\n"); 2020 break; 2021 default: 2022 dprintk(q, 1, "invalid buffer state %s\n", 2023 vb2_state_name(vb->state)); 2024 return -EINVAL; 2025 } 2026 2027 call_void_vb_qop(vb, buf_finish, vb); 2028 vb->prepared = 0; 2029 2030 if (pindex) 2031 *pindex = vb->index; 2032 2033 /* Fill buffer information for the userspace */ 2034 if (pb) 2035 call_void_bufop(q, fill_user_buffer, vb, pb); 2036 2037 /* Remove from vb2 queue */ 2038 list_del(&vb->queued_entry); 2039 q->queued_count--; 2040 2041 trace_vb2_dqbuf(q, vb); 2042 2043 /* go back to dequeued state */ 2044 __vb2_dqbuf(vb); 2045 2046 if (WARN_ON(vb->req_obj.req)) { 2047 media_request_object_unbind(&vb->req_obj); 2048 media_request_object_put(&vb->req_obj); 2049 } 2050 if (vb->request) 2051 media_request_put(vb->request); 2052 vb->request = NULL; 2053 2054 dprintk(q, 2, "dqbuf of buffer %d, state: %s\n", 2055 vb->index, vb2_state_name(vb->state)); 2056 2057 return 0; 2058 2059 } 2060 EXPORT_SYMBOL_GPL(vb2_core_dqbuf); 2061 2062 /* 2063 * __vb2_queue_cancel() - cancel and stop (pause) streaming 2064 * 2065 * Removes all queued buffers from driver's queue and all buffers queued by 2066 * userspace from vb2's queue. Returns to state after reqbufs. 2067 */ 2068 static void __vb2_queue_cancel(struct vb2_queue *q) 2069 { 2070 unsigned int i; 2071 2072 /* 2073 * Tell driver to stop all transactions and release all queued 2074 * buffers. 2075 */ 2076 if (q->start_streaming_called) 2077 call_void_qop(q, stop_streaming, q); 2078 2079 if (q->streaming) 2080 call_void_qop(q, unprepare_streaming, q); 2081 2082 /* 2083 * If you see this warning, then the driver isn't cleaning up properly 2084 * in stop_streaming(). See the stop_streaming() documentation in 2085 * videobuf2-core.h for more information how buffers should be returned 2086 * to vb2 in stop_streaming(). 2087 */ 2088 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { 2089 for (i = 0; i < vb2_get_num_buffers(q); i++) { 2090 struct vb2_buffer *vb = vb2_get_buffer(q, i); 2091 2092 if (!vb) 2093 continue; 2094 2095 if (vb->state == VB2_BUF_STATE_ACTIVE) { 2096 pr_warn("driver bug: stop_streaming operation is leaving buffer %u in active state\n", 2097 vb->index); 2098 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); 2099 } 2100 } 2101 /* Must be zero now */ 2102 WARN_ON(atomic_read(&q->owned_by_drv_count)); 2103 } 2104 2105 q->streaming = 0; 2106 q->start_streaming_called = 0; 2107 q->queued_count = 0; 2108 q->error = 0; 2109 q->uses_requests = 0; 2110 q->uses_qbuf = 0; 2111 2112 /* 2113 * Remove all buffers from vb2's list... 2114 */ 2115 INIT_LIST_HEAD(&q->queued_list); 2116 /* 2117 * ...and done list; userspace will not receive any buffers it 2118 * has not already dequeued before initiating cancel. 2119 */ 2120 INIT_LIST_HEAD(&q->done_list); 2121 atomic_set(&q->owned_by_drv_count, 0); 2122 wake_up_all(&q->done_wq); 2123 2124 /* 2125 * Reinitialize all buffers for next use. 2126 * Make sure to call buf_finish for any queued buffers. Normally 2127 * that's done in dqbuf, but that's not going to happen when we 2128 * cancel the whole queue. Note: this code belongs here, not in 2129 * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical 2130 * call to __fill_user_buffer() after buf_finish(). That order can't 2131 * be changed, so we can't move the buf_finish() to __vb2_dqbuf(). 2132 */ 2133 for (i = 0; i < vb2_get_num_buffers(q); i++) { 2134 struct vb2_buffer *vb; 2135 struct media_request *req; 2136 2137 vb = vb2_get_buffer(q, i); 2138 if (!vb) 2139 continue; 2140 2141 req = vb->req_obj.req; 2142 /* 2143 * If a request is associated with this buffer, then 2144 * call buf_request_cancel() to give the driver to complete() 2145 * related request objects. Otherwise those objects would 2146 * never complete. 2147 */ 2148 if (req) { 2149 enum media_request_state state; 2150 unsigned long flags; 2151 2152 spin_lock_irqsave(&req->lock, flags); 2153 state = req->state; 2154 spin_unlock_irqrestore(&req->lock, flags); 2155 2156 if (state == MEDIA_REQUEST_STATE_QUEUED) 2157 call_void_vb_qop(vb, buf_request_complete, vb); 2158 } 2159 2160 __vb2_buf_mem_finish(vb); 2161 2162 if (vb->prepared) { 2163 call_void_vb_qop(vb, buf_finish, vb); 2164 vb->prepared = 0; 2165 } 2166 __vb2_dqbuf(vb); 2167 2168 if (vb->req_obj.req) { 2169 media_request_object_unbind(&vb->req_obj); 2170 media_request_object_put(&vb->req_obj); 2171 } 2172 if (vb->request) 2173 media_request_put(vb->request); 2174 vb->request = NULL; 2175 vb->copied_timestamp = 0; 2176 } 2177 } 2178 2179 int vb2_core_streamon(struct vb2_queue *q, unsigned int type) 2180 { 2181 unsigned int q_num_bufs = vb2_get_num_buffers(q); 2182 int ret; 2183 2184 if (type != q->type) { 2185 dprintk(q, 1, "invalid stream type\n"); 2186 return -EINVAL; 2187 } 2188 2189 if (q->streaming) { 2190 dprintk(q, 3, "already streaming\n"); 2191 return 0; 2192 } 2193 2194 if (!q_num_bufs) { 2195 dprintk(q, 1, "no buffers have been allocated\n"); 2196 return -EINVAL; 2197 } 2198 2199 if (q_num_bufs < q->min_buffers_needed) { 2200 dprintk(q, 1, "need at least %u allocated buffers\n", 2201 q->min_buffers_needed); 2202 return -EINVAL; 2203 } 2204 2205 ret = call_qop(q, prepare_streaming, q); 2206 if (ret) 2207 return ret; 2208 2209 /* 2210 * Tell driver to start streaming provided sufficient buffers 2211 * are available. 2212 */ 2213 if (q->queued_count >= q->min_buffers_needed) { 2214 ret = vb2_start_streaming(q); 2215 if (ret) 2216 goto unprepare; 2217 } 2218 2219 q->streaming = 1; 2220 2221 dprintk(q, 3, "successful\n"); 2222 return 0; 2223 2224 unprepare: 2225 call_void_qop(q, unprepare_streaming, q); 2226 return ret; 2227 } 2228 EXPORT_SYMBOL_GPL(vb2_core_streamon); 2229 2230 void vb2_queue_error(struct vb2_queue *q) 2231 { 2232 q->error = 1; 2233 2234 wake_up_all(&q->done_wq); 2235 } 2236 EXPORT_SYMBOL_GPL(vb2_queue_error); 2237 2238 int vb2_core_streamoff(struct vb2_queue *q, unsigned int type) 2239 { 2240 if (type != q->type) { 2241 dprintk(q, 1, "invalid stream type\n"); 2242 return -EINVAL; 2243 } 2244 2245 /* 2246 * Cancel will pause streaming and remove all buffers from the driver 2247 * and vb2, effectively returning control over them to userspace. 2248 * 2249 * Note that we do this even if q->streaming == 0: if you prepare or 2250 * queue buffers, and then call streamoff without ever having called 2251 * streamon, you would still expect those buffers to be returned to 2252 * their normal dequeued state. 2253 */ 2254 __vb2_queue_cancel(q); 2255 q->waiting_for_buffers = !q->is_output; 2256 q->last_buffer_dequeued = false; 2257 2258 dprintk(q, 3, "successful\n"); 2259 return 0; 2260 } 2261 EXPORT_SYMBOL_GPL(vb2_core_streamoff); 2262 2263 /* 2264 * __find_plane_by_offset() - find plane associated with the given offset 2265 */ 2266 static int __find_plane_by_offset(struct vb2_queue *q, unsigned long offset, 2267 struct vb2_buffer **vb, unsigned int *plane) 2268 { 2269 unsigned int buffer; 2270 2271 /* 2272 * Sanity checks to ensure the lock is held, MEMORY_MMAP is 2273 * used and fileio isn't active. 2274 */ 2275 lockdep_assert_held(&q->mmap_lock); 2276 2277 if (q->memory != VB2_MEMORY_MMAP) { 2278 dprintk(q, 1, "queue is not currently set up for mmap\n"); 2279 return -EINVAL; 2280 } 2281 2282 if (vb2_fileio_is_active(q)) { 2283 dprintk(q, 1, "file io in progress\n"); 2284 return -EBUSY; 2285 } 2286 2287 /* Get buffer and plane from the offset */ 2288 buffer = (offset >> PLANE_INDEX_SHIFT) & BUFFER_INDEX_MASK; 2289 *plane = (offset >> PAGE_SHIFT) & PLANE_INDEX_MASK; 2290 2291 *vb = vb2_get_buffer(q, buffer); 2292 if (!*vb) 2293 return -EINVAL; 2294 if (*plane >= (*vb)->num_planes) 2295 return -EINVAL; 2296 2297 return 0; 2298 } 2299 2300 int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, 2301 struct vb2_buffer *vb, unsigned int plane, unsigned int flags) 2302 { 2303 struct vb2_plane *vb_plane; 2304 int ret; 2305 struct dma_buf *dbuf; 2306 2307 if (q->memory != VB2_MEMORY_MMAP) { 2308 dprintk(q, 1, "queue is not currently set up for mmap\n"); 2309 return -EINVAL; 2310 } 2311 2312 if (!q->mem_ops->get_dmabuf) { 2313 dprintk(q, 1, "queue does not support DMA buffer exporting\n"); 2314 return -EINVAL; 2315 } 2316 2317 if (flags & ~(O_CLOEXEC | O_ACCMODE)) { 2318 dprintk(q, 1, "queue does support only O_CLOEXEC and access mode flags\n"); 2319 return -EINVAL; 2320 } 2321 2322 if (type != q->type) { 2323 dprintk(q, 1, "invalid buffer type\n"); 2324 return -EINVAL; 2325 } 2326 2327 if (plane >= vb->num_planes) { 2328 dprintk(q, 1, "buffer plane out of range\n"); 2329 return -EINVAL; 2330 } 2331 2332 if (vb2_fileio_is_active(q)) { 2333 dprintk(q, 1, "expbuf: file io in progress\n"); 2334 return -EBUSY; 2335 } 2336 2337 vb_plane = &vb->planes[plane]; 2338 2339 dbuf = call_ptr_memop(get_dmabuf, 2340 vb, 2341 vb_plane->mem_priv, 2342 flags & O_ACCMODE); 2343 if (IS_ERR_OR_NULL(dbuf)) { 2344 dprintk(q, 1, "failed to export buffer %d, plane %d\n", 2345 vb->index, plane); 2346 return -EINVAL; 2347 } 2348 2349 ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE); 2350 if (ret < 0) { 2351 dprintk(q, 3, "buffer %d, plane %d failed to export (%d)\n", 2352 vb->index, plane, ret); 2353 dma_buf_put(dbuf); 2354 return ret; 2355 } 2356 2357 dprintk(q, 3, "buffer %d, plane %d exported as %d descriptor\n", 2358 vb->index, plane, ret); 2359 *fd = ret; 2360 2361 return 0; 2362 } 2363 EXPORT_SYMBOL_GPL(vb2_core_expbuf); 2364 2365 int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) 2366 { 2367 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 2368 struct vb2_buffer *vb; 2369 unsigned int plane = 0; 2370 int ret; 2371 unsigned long length; 2372 2373 /* 2374 * Check memory area access mode. 2375 */ 2376 if (!(vma->vm_flags & VM_SHARED)) { 2377 dprintk(q, 1, "invalid vma flags, VM_SHARED needed\n"); 2378 return -EINVAL; 2379 } 2380 if (q->is_output) { 2381 if (!(vma->vm_flags & VM_WRITE)) { 2382 dprintk(q, 1, "invalid vma flags, VM_WRITE needed\n"); 2383 return -EINVAL; 2384 } 2385 } else { 2386 if (!(vma->vm_flags & VM_READ)) { 2387 dprintk(q, 1, "invalid vma flags, VM_READ needed\n"); 2388 return -EINVAL; 2389 } 2390 } 2391 2392 mutex_lock(&q->mmap_lock); 2393 2394 /* 2395 * Find the plane corresponding to the offset passed by userspace. This 2396 * will return an error if not MEMORY_MMAP or file I/O is in progress. 2397 */ 2398 ret = __find_plane_by_offset(q, offset, &vb, &plane); 2399 if (ret) 2400 goto unlock; 2401 2402 /* 2403 * MMAP requires page_aligned buffers. 2404 * The buffer length was page_aligned at __vb2_buf_mem_alloc(), 2405 * so, we need to do the same here. 2406 */ 2407 length = PAGE_ALIGN(vb->planes[plane].length); 2408 if (length < (vma->vm_end - vma->vm_start)) { 2409 dprintk(q, 1, 2410 "MMAP invalid, as it would overflow buffer length\n"); 2411 ret = -EINVAL; 2412 goto unlock; 2413 } 2414 2415 /* 2416 * vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer, 2417 * not as a in-buffer offset. We always want to mmap a whole buffer 2418 * from its beginning. 2419 */ 2420 vma->vm_pgoff = 0; 2421 2422 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); 2423 2424 unlock: 2425 mutex_unlock(&q->mmap_lock); 2426 if (ret) 2427 return ret; 2428 2429 dprintk(q, 3, "buffer %u, plane %d successfully mapped\n", vb->index, plane); 2430 return 0; 2431 } 2432 EXPORT_SYMBOL_GPL(vb2_mmap); 2433 2434 #ifndef CONFIG_MMU 2435 unsigned long vb2_get_unmapped_area(struct vb2_queue *q, 2436 unsigned long addr, 2437 unsigned long len, 2438 unsigned long pgoff, 2439 unsigned long flags) 2440 { 2441 unsigned long offset = pgoff << PAGE_SHIFT; 2442 struct vb2_buffer *vb; 2443 unsigned int plane; 2444 void *vaddr; 2445 int ret; 2446 2447 mutex_lock(&q->mmap_lock); 2448 2449 /* 2450 * Find the plane corresponding to the offset passed by userspace. This 2451 * will return an error if not MEMORY_MMAP or file I/O is in progress. 2452 */ 2453 ret = __find_plane_by_offset(q, offset, &vb, &plane); 2454 if (ret) 2455 goto unlock; 2456 2457 vaddr = vb2_plane_vaddr(vb, plane); 2458 mutex_unlock(&q->mmap_lock); 2459 return vaddr ? (unsigned long)vaddr : -EINVAL; 2460 2461 unlock: 2462 mutex_unlock(&q->mmap_lock); 2463 return ret; 2464 } 2465 EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); 2466 #endif 2467 2468 int vb2_core_queue_init(struct vb2_queue *q) 2469 { 2470 /* 2471 * Sanity check 2472 */ 2473 if (WARN_ON(!q) || 2474 WARN_ON(!q->ops) || 2475 WARN_ON(!q->mem_ops) || 2476 WARN_ON(!q->type) || 2477 WARN_ON(!q->io_modes) || 2478 WARN_ON(!q->ops->queue_setup) || 2479 WARN_ON(!q->ops->buf_queue)) 2480 return -EINVAL; 2481 2482 if (WARN_ON(q->requires_requests && !q->supports_requests)) 2483 return -EINVAL; 2484 2485 /* 2486 * This combination is not allowed since a non-zero value of 2487 * q->min_buffers_needed can cause vb2_core_qbuf() to fail if 2488 * it has to call start_streaming(), and the Request API expects 2489 * that queueing a request (and thus queueing a buffer contained 2490 * in that request) will always succeed. There is no method of 2491 * propagating an error back to userspace. 2492 */ 2493 if (WARN_ON(q->supports_requests && q->min_buffers_needed)) 2494 return -EINVAL; 2495 2496 INIT_LIST_HEAD(&q->queued_list); 2497 INIT_LIST_HEAD(&q->done_list); 2498 spin_lock_init(&q->done_lock); 2499 mutex_init(&q->mmap_lock); 2500 init_waitqueue_head(&q->done_wq); 2501 2502 q->memory = VB2_MEMORY_UNKNOWN; 2503 2504 if (q->buf_struct_size == 0) 2505 q->buf_struct_size = sizeof(struct vb2_buffer); 2506 2507 if (q->bidirectional) 2508 q->dma_dir = DMA_BIDIRECTIONAL; 2509 else 2510 q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 2511 2512 if (q->name[0] == '\0') 2513 snprintf(q->name, sizeof(q->name), "%s-%p", 2514 q->is_output ? "out" : "cap", q); 2515 2516 return 0; 2517 } 2518 EXPORT_SYMBOL_GPL(vb2_core_queue_init); 2519 2520 static int __vb2_init_fileio(struct vb2_queue *q, int read); 2521 static int __vb2_cleanup_fileio(struct vb2_queue *q); 2522 void vb2_core_queue_release(struct vb2_queue *q) 2523 { 2524 __vb2_cleanup_fileio(q); 2525 __vb2_queue_cancel(q); 2526 mutex_lock(&q->mmap_lock); 2527 __vb2_queue_free(q, vb2_get_num_buffers(q)); 2528 mutex_unlock(&q->mmap_lock); 2529 } 2530 EXPORT_SYMBOL_GPL(vb2_core_queue_release); 2531 2532 __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, 2533 poll_table *wait) 2534 { 2535 __poll_t req_events = poll_requested_events(wait); 2536 struct vb2_buffer *vb = NULL; 2537 unsigned long flags; 2538 2539 /* 2540 * poll_wait() MUST be called on the first invocation on all the 2541 * potential queues of interest, even if we are not interested in their 2542 * events during this first call. Failure to do so will result in 2543 * queue's events to be ignored because the poll_table won't be capable 2544 * of adding new wait queues thereafter. 2545 */ 2546 poll_wait(file, &q->done_wq, wait); 2547 2548 if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM))) 2549 return 0; 2550 if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM))) 2551 return 0; 2552 2553 /* 2554 * Start file I/O emulator only if streaming API has not been used yet. 2555 */ 2556 if (vb2_get_num_buffers(q) == 0 && !vb2_fileio_is_active(q)) { 2557 if (!q->is_output && (q->io_modes & VB2_READ) && 2558 (req_events & (EPOLLIN | EPOLLRDNORM))) { 2559 if (__vb2_init_fileio(q, 1)) 2560 return EPOLLERR; 2561 } 2562 if (q->is_output && (q->io_modes & VB2_WRITE) && 2563 (req_events & (EPOLLOUT | EPOLLWRNORM))) { 2564 if (__vb2_init_fileio(q, 0)) 2565 return EPOLLERR; 2566 /* 2567 * Write to OUTPUT queue can be done immediately. 2568 */ 2569 return EPOLLOUT | EPOLLWRNORM; 2570 } 2571 } 2572 2573 /* 2574 * There is nothing to wait for if the queue isn't streaming, or if the 2575 * error flag is set. 2576 */ 2577 if (!vb2_is_streaming(q) || q->error) 2578 return EPOLLERR; 2579 2580 /* 2581 * If this quirk is set and QBUF hasn't been called yet then 2582 * return EPOLLERR as well. This only affects capture queues, output 2583 * queues will always initialize waiting_for_buffers to false. 2584 * This quirk is set by V4L2 for backwards compatibility reasons. 2585 */ 2586 if (q->quirk_poll_must_check_waiting_for_buffers && 2587 q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM))) 2588 return EPOLLERR; 2589 2590 /* 2591 * For output streams you can call write() as long as there are fewer 2592 * buffers queued than there are buffers available. 2593 */ 2594 if (q->is_output && q->fileio && q->queued_count < vb2_get_num_buffers(q)) 2595 return EPOLLOUT | EPOLLWRNORM; 2596 2597 if (list_empty(&q->done_list)) { 2598 /* 2599 * If the last buffer was dequeued from a capture queue, 2600 * return immediately. DQBUF will return -EPIPE. 2601 */ 2602 if (q->last_buffer_dequeued) 2603 return EPOLLIN | EPOLLRDNORM; 2604 } 2605 2606 /* 2607 * Take first buffer available for dequeuing. 2608 */ 2609 spin_lock_irqsave(&q->done_lock, flags); 2610 if (!list_empty(&q->done_list)) 2611 vb = list_first_entry(&q->done_list, struct vb2_buffer, 2612 done_entry); 2613 spin_unlock_irqrestore(&q->done_lock, flags); 2614 2615 if (vb && (vb->state == VB2_BUF_STATE_DONE 2616 || vb->state == VB2_BUF_STATE_ERROR)) { 2617 return (q->is_output) ? 2618 EPOLLOUT | EPOLLWRNORM : 2619 EPOLLIN | EPOLLRDNORM; 2620 } 2621 return 0; 2622 } 2623 EXPORT_SYMBOL_GPL(vb2_core_poll); 2624 2625 /* 2626 * struct vb2_fileio_buf - buffer context used by file io emulator 2627 * 2628 * vb2 provides a compatibility layer and emulator of file io (read and 2629 * write) calls on top of streaming API. This structure is used for 2630 * tracking context related to the buffers. 2631 */ 2632 struct vb2_fileio_buf { 2633 void *vaddr; 2634 unsigned int size; 2635 unsigned int pos; 2636 unsigned int queued:1; 2637 }; 2638 2639 /* 2640 * struct vb2_fileio_data - queue context used by file io emulator 2641 * 2642 * @cur_index: the index of the buffer currently being read from or 2643 * written to. If equal to number of buffers in the vb2_queue 2644 * then a new buffer must be dequeued. 2645 * @initial_index: in the read() case all buffers are queued up immediately 2646 * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles 2647 * buffers. However, in the write() case no buffers are initially 2648 * queued, instead whenever a buffer is full it is queued up by 2649 * __vb2_perform_fileio(). Only once all available buffers have 2650 * been queued up will __vb2_perform_fileio() start to dequeue 2651 * buffers. This means that initially __vb2_perform_fileio() 2652 * needs to know what buffer index to use when it is queuing up 2653 * the buffers for the first time. That initial index is stored 2654 * in this field. Once it is equal to number of buffers in the 2655 * vb2_queue all available buffers have been queued and 2656 * __vb2_perform_fileio() should start the normal dequeue/queue cycle. 2657 * 2658 * vb2 provides a compatibility layer and emulator of file io (read and 2659 * write) calls on top of streaming API. For proper operation it required 2660 * this structure to save the driver state between each call of the read 2661 * or write function. 2662 */ 2663 struct vb2_fileio_data { 2664 unsigned int count; 2665 unsigned int type; 2666 unsigned int memory; 2667 struct vb2_fileio_buf bufs[VB2_MAX_FRAME]; 2668 unsigned int cur_index; 2669 unsigned int initial_index; 2670 unsigned int q_count; 2671 unsigned int dq_count; 2672 unsigned read_once:1; 2673 unsigned write_immediately:1; 2674 }; 2675 2676 /* 2677 * __vb2_init_fileio() - initialize file io emulator 2678 * @q: videobuf2 queue 2679 * @read: mode selector (1 means read, 0 means write) 2680 */ 2681 static int __vb2_init_fileio(struct vb2_queue *q, int read) 2682 { 2683 struct vb2_fileio_data *fileio; 2684 struct vb2_buffer *vb; 2685 int i, ret; 2686 unsigned int count = 0; 2687 2688 /* 2689 * Sanity check 2690 */ 2691 if (WARN_ON((read && !(q->io_modes & VB2_READ)) || 2692 (!read && !(q->io_modes & VB2_WRITE)))) 2693 return -EINVAL; 2694 2695 /* 2696 * Check if device supports mapping buffers to kernel virtual space. 2697 */ 2698 if (!q->mem_ops->vaddr) 2699 return -EBUSY; 2700 2701 /* 2702 * Check if streaming api has not been already activated. 2703 */ 2704 if (q->streaming || vb2_get_num_buffers(q) > 0) 2705 return -EBUSY; 2706 2707 /* 2708 * Start with count 1, driver can increase it in queue_setup() 2709 */ 2710 count = 1; 2711 2712 dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n", 2713 (read) ? "read" : "write", count, q->fileio_read_once, 2714 q->fileio_write_immediately); 2715 2716 fileio = kzalloc(sizeof(*fileio), GFP_KERNEL); 2717 if (fileio == NULL) 2718 return -ENOMEM; 2719 2720 fileio->read_once = q->fileio_read_once; 2721 fileio->write_immediately = q->fileio_write_immediately; 2722 2723 /* 2724 * Request buffers and use MMAP type to force driver 2725 * to allocate buffers by itself. 2726 */ 2727 fileio->count = count; 2728 fileio->memory = VB2_MEMORY_MMAP; 2729 fileio->type = q->type; 2730 q->fileio = fileio; 2731 ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); 2732 if (ret) 2733 goto err_kfree; 2734 2735 /* 2736 * Userspace can never add or delete buffers later, so there 2737 * will never be holes. It is safe to assume that vb2_get_buffer(q, 0) 2738 * will always return a valid vb pointer 2739 */ 2740 vb = vb2_get_buffer(q, 0); 2741 2742 /* 2743 * Check if plane_count is correct 2744 * (multiplane buffers are not supported). 2745 */ 2746 if (vb->num_planes != 1) { 2747 ret = -EBUSY; 2748 goto err_reqbufs; 2749 } 2750 2751 /* 2752 * Get kernel address of each buffer. 2753 */ 2754 for (i = 0; i < vb2_get_num_buffers(q); i++) { 2755 /* vb can never be NULL when using fileio. */ 2756 vb = vb2_get_buffer(q, i); 2757 2758 fileio->bufs[i].vaddr = vb2_plane_vaddr(vb, 0); 2759 if (fileio->bufs[i].vaddr == NULL) { 2760 ret = -EINVAL; 2761 goto err_reqbufs; 2762 } 2763 fileio->bufs[i].size = vb2_plane_size(vb, 0); 2764 } 2765 2766 /* 2767 * Read mode requires pre queuing of all buffers. 2768 */ 2769 if (read) { 2770 /* 2771 * Queue all buffers. 2772 */ 2773 for (i = 0; i < vb2_get_num_buffers(q); i++) { 2774 struct vb2_buffer *vb2 = vb2_get_buffer(q, i); 2775 2776 if (!vb2) 2777 continue; 2778 2779 ret = vb2_core_qbuf(q, vb2, NULL, NULL); 2780 if (ret) 2781 goto err_reqbufs; 2782 fileio->bufs[i].queued = 1; 2783 } 2784 /* 2785 * All buffers have been queued, so mark that by setting 2786 * initial_index to the number of buffers in the vb2_queue 2787 */ 2788 fileio->initial_index = vb2_get_num_buffers(q); 2789 fileio->cur_index = fileio->initial_index; 2790 } 2791 2792 /* 2793 * Start streaming. 2794 */ 2795 ret = vb2_core_streamon(q, q->type); 2796 if (ret) 2797 goto err_reqbufs; 2798 2799 return ret; 2800 2801 err_reqbufs: 2802 fileio->count = 0; 2803 vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); 2804 2805 err_kfree: 2806 q->fileio = NULL; 2807 kfree(fileio); 2808 return ret; 2809 } 2810 2811 /* 2812 * __vb2_cleanup_fileio() - free resourced used by file io emulator 2813 * @q: videobuf2 queue 2814 */ 2815 static int __vb2_cleanup_fileio(struct vb2_queue *q) 2816 { 2817 struct vb2_fileio_data *fileio = q->fileio; 2818 2819 if (fileio) { 2820 vb2_core_streamoff(q, q->type); 2821 q->fileio = NULL; 2822 fileio->count = 0; 2823 vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); 2824 kfree(fileio); 2825 dprintk(q, 3, "file io emulator closed\n"); 2826 } 2827 return 0; 2828 } 2829 2830 /* 2831 * __vb2_perform_fileio() - perform a single file io (read or write) operation 2832 * @q: videobuf2 queue 2833 * @data: pointed to target userspace buffer 2834 * @count: number of bytes to read or write 2835 * @ppos: file handle position tracking pointer 2836 * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) 2837 * @read: access mode selector (1 means read, 0 means write) 2838 */ 2839 static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count, 2840 loff_t *ppos, int nonblock, int read) 2841 { 2842 struct vb2_fileio_data *fileio; 2843 struct vb2_fileio_buf *buf; 2844 bool is_multiplanar = q->is_multiplanar; 2845 /* 2846 * When using write() to write data to an output video node the vb2 core 2847 * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody 2848 * else is able to provide this information with the write() operation. 2849 */ 2850 bool copy_timestamp = !read && q->copy_timestamp; 2851 unsigned index; 2852 int ret; 2853 2854 dprintk(q, 3, "mode %s, offset %ld, count %zd, %sblocking\n", 2855 read ? "read" : "write", (long)*ppos, count, 2856 nonblock ? "non" : ""); 2857 2858 if (!data) 2859 return -EINVAL; 2860 2861 if (q->waiting_in_dqbuf) { 2862 dprintk(q, 3, "another dup()ped fd is %s\n", 2863 read ? "reading" : "writing"); 2864 return -EBUSY; 2865 } 2866 2867 /* 2868 * Initialize emulator on first call. 2869 */ 2870 if (!vb2_fileio_is_active(q)) { 2871 ret = __vb2_init_fileio(q, read); 2872 dprintk(q, 3, "vb2_init_fileio result: %d\n", ret); 2873 if (ret) 2874 return ret; 2875 } 2876 fileio = q->fileio; 2877 2878 /* 2879 * Check if we need to dequeue the buffer. 2880 */ 2881 index = fileio->cur_index; 2882 if (index >= vb2_get_num_buffers(q)) { 2883 struct vb2_buffer *b; 2884 2885 /* 2886 * Call vb2_dqbuf to get buffer back. 2887 */ 2888 ret = vb2_core_dqbuf(q, &index, NULL, nonblock); 2889 dprintk(q, 5, "vb2_dqbuf result: %d\n", ret); 2890 if (ret) 2891 return ret; 2892 fileio->dq_count += 1; 2893 2894 fileio->cur_index = index; 2895 buf = &fileio->bufs[index]; 2896 2897 /* b can never be NULL when using fileio. */ 2898 b = vb2_get_buffer(q, index); 2899 2900 /* 2901 * Get number of bytes filled by the driver 2902 */ 2903 buf->pos = 0; 2904 buf->queued = 0; 2905 buf->size = read ? vb2_get_plane_payload(b, 0) 2906 : vb2_plane_size(b, 0); 2907 /* Compensate for data_offset on read in the multiplanar case. */ 2908 if (is_multiplanar && read && 2909 b->planes[0].data_offset < buf->size) { 2910 buf->pos = b->planes[0].data_offset; 2911 buf->size -= buf->pos; 2912 } 2913 } else { 2914 buf = &fileio->bufs[index]; 2915 } 2916 2917 /* 2918 * Limit count on last few bytes of the buffer. 2919 */ 2920 if (buf->pos + count > buf->size) { 2921 count = buf->size - buf->pos; 2922 dprintk(q, 5, "reducing read count: %zd\n", count); 2923 } 2924 2925 /* 2926 * Transfer data to userspace. 2927 */ 2928 dprintk(q, 3, "copying %zd bytes - buffer %d, offset %u\n", 2929 count, index, buf->pos); 2930 if (read) 2931 ret = copy_to_user(data, buf->vaddr + buf->pos, count); 2932 else 2933 ret = copy_from_user(buf->vaddr + buf->pos, data, count); 2934 if (ret) { 2935 dprintk(q, 3, "error copying data\n"); 2936 return -EFAULT; 2937 } 2938 2939 /* 2940 * Update counters. 2941 */ 2942 buf->pos += count; 2943 *ppos += count; 2944 2945 /* 2946 * Queue next buffer if required. 2947 */ 2948 if (buf->pos == buf->size || (!read && fileio->write_immediately)) { 2949 /* b can never be NULL when using fileio. */ 2950 struct vb2_buffer *b = vb2_get_buffer(q, index); 2951 2952 /* 2953 * Check if this is the last buffer to read. 2954 */ 2955 if (read && fileio->read_once && fileio->dq_count == 1) { 2956 dprintk(q, 3, "read limit reached\n"); 2957 return __vb2_cleanup_fileio(q); 2958 } 2959 2960 /* 2961 * Call vb2_qbuf and give buffer to the driver. 2962 */ 2963 b->planes[0].bytesused = buf->pos; 2964 2965 if (copy_timestamp) 2966 b->timestamp = ktime_get_ns(); 2967 ret = vb2_core_qbuf(q, b, NULL, NULL); 2968 dprintk(q, 5, "vb2_qbuf result: %d\n", ret); 2969 if (ret) 2970 return ret; 2971 2972 /* 2973 * Buffer has been queued, update the status 2974 */ 2975 buf->pos = 0; 2976 buf->queued = 1; 2977 buf->size = vb2_plane_size(b, 0); 2978 fileio->q_count += 1; 2979 /* 2980 * If we are queuing up buffers for the first time, then 2981 * increase initial_index by one. 2982 */ 2983 if (fileio->initial_index < vb2_get_num_buffers(q)) 2984 fileio->initial_index++; 2985 /* 2986 * The next buffer to use is either a buffer that's going to be 2987 * queued for the first time (initial_index < number of buffers in the vb2_queue) 2988 * or it is equal to the number of buffers in the vb2_queue, 2989 * meaning that the next time we need to dequeue a buffer since 2990 * we've now queued up all the 'first time' buffers. 2991 */ 2992 fileio->cur_index = fileio->initial_index; 2993 } 2994 2995 /* 2996 * Return proper number of bytes processed. 2997 */ 2998 if (ret == 0) 2999 ret = count; 3000 return ret; 3001 } 3002 3003 size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, 3004 loff_t *ppos, int nonblocking) 3005 { 3006 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1); 3007 } 3008 EXPORT_SYMBOL_GPL(vb2_read); 3009 3010 size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, 3011 loff_t *ppos, int nonblocking) 3012 { 3013 return __vb2_perform_fileio(q, (char __user *) data, count, 3014 ppos, nonblocking, 0); 3015 } 3016 EXPORT_SYMBOL_GPL(vb2_write); 3017 3018 struct vb2_threadio_data { 3019 struct task_struct *thread; 3020 vb2_thread_fnc fnc; 3021 void *priv; 3022 bool stop; 3023 }; 3024 3025 static int vb2_thread(void *data) 3026 { 3027 struct vb2_queue *q = data; 3028 struct vb2_threadio_data *threadio = q->threadio; 3029 bool copy_timestamp = false; 3030 unsigned prequeue = 0; 3031 unsigned index = 0; 3032 int ret = 0; 3033 3034 if (q->is_output) { 3035 prequeue = vb2_get_num_buffers(q); 3036 copy_timestamp = q->copy_timestamp; 3037 } 3038 3039 set_freezable(); 3040 3041 for (;;) { 3042 struct vb2_buffer *vb; 3043 3044 /* 3045 * Call vb2_dqbuf to get buffer back. 3046 */ 3047 if (prequeue) { 3048 vb = vb2_get_buffer(q, index++); 3049 if (!vb) 3050 continue; 3051 prequeue--; 3052 } else { 3053 call_void_qop(q, wait_finish, q); 3054 if (!threadio->stop) 3055 ret = vb2_core_dqbuf(q, &index, NULL, 0); 3056 call_void_qop(q, wait_prepare, q); 3057 dprintk(q, 5, "file io: vb2_dqbuf result: %d\n", ret); 3058 if (!ret) 3059 vb = vb2_get_buffer(q, index); 3060 } 3061 if (ret || threadio->stop) 3062 break; 3063 try_to_freeze(); 3064 3065 if (vb->state != VB2_BUF_STATE_ERROR) 3066 if (threadio->fnc(vb, threadio->priv)) 3067 break; 3068 call_void_qop(q, wait_finish, q); 3069 if (copy_timestamp) 3070 vb->timestamp = ktime_get_ns(); 3071 if (!threadio->stop) 3072 ret = vb2_core_qbuf(q, vb, NULL, NULL); 3073 call_void_qop(q, wait_prepare, q); 3074 if (ret || threadio->stop) 3075 break; 3076 } 3077 3078 /* Hmm, linux becomes *very* unhappy without this ... */ 3079 while (!kthread_should_stop()) { 3080 set_current_state(TASK_INTERRUPTIBLE); 3081 schedule(); 3082 } 3083 return 0; 3084 } 3085 3086 /* 3087 * This function should not be used for anything else but the videobuf2-dvb 3088 * support. If you think you have another good use-case for this, then please 3089 * contact the linux-media mailinglist first. 3090 */ 3091 int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, 3092 const char *thread_name) 3093 { 3094 struct vb2_threadio_data *threadio; 3095 int ret = 0; 3096 3097 if (q->threadio) 3098 return -EBUSY; 3099 if (vb2_is_busy(q)) 3100 return -EBUSY; 3101 if (WARN_ON(q->fileio)) 3102 return -EBUSY; 3103 3104 threadio = kzalloc(sizeof(*threadio), GFP_KERNEL); 3105 if (threadio == NULL) 3106 return -ENOMEM; 3107 threadio->fnc = fnc; 3108 threadio->priv = priv; 3109 3110 ret = __vb2_init_fileio(q, !q->is_output); 3111 dprintk(q, 3, "file io: vb2_init_fileio result: %d\n", ret); 3112 if (ret) 3113 goto nomem; 3114 q->threadio = threadio; 3115 threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name); 3116 if (IS_ERR(threadio->thread)) { 3117 ret = PTR_ERR(threadio->thread); 3118 threadio->thread = NULL; 3119 goto nothread; 3120 } 3121 return 0; 3122 3123 nothread: 3124 __vb2_cleanup_fileio(q); 3125 nomem: 3126 kfree(threadio); 3127 return ret; 3128 } 3129 EXPORT_SYMBOL_GPL(vb2_thread_start); 3130 3131 int vb2_thread_stop(struct vb2_queue *q) 3132 { 3133 struct vb2_threadio_data *threadio = q->threadio; 3134 int err; 3135 3136 if (threadio == NULL) 3137 return 0; 3138 threadio->stop = true; 3139 /* Wake up all pending sleeps in the thread */ 3140 vb2_queue_error(q); 3141 err = kthread_stop(threadio->thread); 3142 __vb2_cleanup_fileio(q); 3143 threadio->thread = NULL; 3144 kfree(threadio); 3145 q->threadio = NULL; 3146 return err; 3147 } 3148 EXPORT_SYMBOL_GPL(vb2_thread_stop); 3149 3150 MODULE_DESCRIPTION("Media buffer core framework"); 3151 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski"); 3152 MODULE_LICENSE("GPL"); 3153 MODULE_IMPORT_NS(DMA_BUF); 3154