1 /* 2 * videobuf2-v4l2.c - V4L2 driver helper framework 3 * 4 * Copyright (C) 2010 Samsung Electronics 5 * 6 * Author: Pawel Osciak <pawel@osciak.com> 7 * Marek Szyprowski <m.szyprowski@samsung.com> 8 * 9 * The vb2_thread implementation was based on code from videobuf-dvb.c: 10 * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs] 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation. 15 */ 16 17 #include <linux/device.h> 18 #include <linux/err.h> 19 #include <linux/freezer.h> 20 #include <linux/kernel.h> 21 #include <linux/kthread.h> 22 #include <linux/mm.h> 23 #include <linux/module.h> 24 #include <linux/poll.h> 25 #include <linux/sched.h> 26 #include <linux/slab.h> 27 28 #include <media/v4l2-common.h> 29 #include <media/v4l2-dev.h> 30 #include <media/v4l2-device.h> 31 #include <media/v4l2-event.h> 32 #include <media/v4l2-fh.h> 33 34 #include <media/videobuf2-v4l2.h> 35 36 static int debug; 37 module_param(debug, int, 0644); 38 39 #define dprintk(q, level, fmt, arg...) \ 40 do { \ 41 if (debug >= level) \ 42 pr_info("vb2-v4l2: [%p] %s: " fmt, \ 43 (q)->name, __func__, ## arg); \ 44 } while (0) 45 46 /* Flags that are set by us */ 47 #define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \ 48 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \ 49 V4L2_BUF_FLAG_PREPARED | \ 50 V4L2_BUF_FLAG_IN_REQUEST | \ 51 V4L2_BUF_FLAG_REQUEST_FD | \ 52 V4L2_BUF_FLAG_TIMESTAMP_MASK) 53 /* Output buffer flags that should be passed on to the driver */ 54 #define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | \ 55 V4L2_BUF_FLAG_BFRAME | \ 56 V4L2_BUF_FLAG_KEYFRAME | \ 57 V4L2_BUF_FLAG_TIMECODE | \ 58 V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF) 59 60 /* 61 * __verify_planes_array() - verify that the planes array passed in struct 62 * v4l2_buffer from userspace can be safely used 63 */ 64 static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b) 65 { 66 if (!V4L2_TYPE_IS_MULTIPLANAR(b->type)) 67 return 0; 68 69 /* Is memory for copying plane information present? */ 70 if (b->m.planes == NULL) { 71 dprintk(vb->vb2_queue, 1, 72 "multi-planar buffer passed but planes array not provided\n"); 73 return -EINVAL; 74 } 75 76 if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) { 77 dprintk(vb->vb2_queue, 1, 78 "incorrect planes array length, expected %d, got %d\n", 79 vb->num_planes, b->length); 80 return -EINVAL; 81 } 82 83 return 0; 84 } 85 86 static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb) 87 { 88 return __verify_planes_array(vb, pb); 89 } 90 91 /* 92 * __verify_length() - Verify that the bytesused value for each plane fits in 93 * the plane length and that the data offset doesn't exceed the bytesused value. 94 */ 95 static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b) 96 { 97 unsigned int length; 98 unsigned int bytesused; 99 unsigned int plane; 100 101 if (V4L2_TYPE_IS_CAPTURE(b->type)) 102 return 0; 103 104 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { 105 for (plane = 0; plane < vb->num_planes; ++plane) { 106 length = (b->memory == VB2_MEMORY_USERPTR || 107 b->memory == VB2_MEMORY_DMABUF) 108 ? b->m.planes[plane].length 109 : vb->planes[plane].length; 110 bytesused = b->m.planes[plane].bytesused 111 ? b->m.planes[plane].bytesused : length; 112 113 if (b->m.planes[plane].bytesused > length) 114 return -EINVAL; 115 116 if (b->m.planes[plane].data_offset > 0 && 117 b->m.planes[plane].data_offset >= bytesused) 118 return -EINVAL; 119 } 120 } else { 121 length = (b->memory == VB2_MEMORY_USERPTR) 122 ? b->length : vb->planes[0].length; 123 124 if (b->bytesused > length) 125 return -EINVAL; 126 } 127 128 return 0; 129 } 130 131 /* 132 * __init_vb2_v4l2_buffer() - initialize the vb2_v4l2_buffer struct 133 */ 134 static void __init_vb2_v4l2_buffer(struct vb2_buffer *vb) 135 { 136 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 137 138 vbuf->request_fd = -1; 139 } 140 141 static void __copy_timestamp(struct vb2_buffer *vb, const void *pb) 142 { 143 const struct v4l2_buffer *b = pb; 144 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 145 struct vb2_queue *q = vb->vb2_queue; 146 147 if (q->is_output) { 148 /* 149 * For output buffers copy the timestamp if needed, 150 * and the timecode field and flag if needed. 151 */ 152 if (q->copy_timestamp) 153 vb->timestamp = v4l2_buffer_get_timestamp(b); 154 vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE; 155 if (b->flags & V4L2_BUF_FLAG_TIMECODE) 156 vbuf->timecode = b->timecode; 157 } 158 }; 159 160 static void vb2_warn_zero_bytesused(struct vb2_buffer *vb) 161 { 162 static bool check_once; 163 164 if (check_once) 165 return; 166 167 check_once = true; 168 169 pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n"); 170 if (vb->vb2_queue->allow_zero_bytesused) 171 pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n"); 172 else 173 pr_warn("use the actual size instead.\n"); 174 } 175 176 static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) 177 { 178 struct vb2_queue *q = vb->vb2_queue; 179 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 180 struct vb2_plane *planes = vbuf->planes; 181 unsigned int plane; 182 int ret; 183 184 ret = __verify_length(vb, b); 185 if (ret < 0) { 186 dprintk(q, 1, "plane parameters verification failed: %d\n", ret); 187 return ret; 188 } 189 if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) { 190 /* 191 * If the format's field is ALTERNATE, then the buffer's field 192 * should be either TOP or BOTTOM, not ALTERNATE since that 193 * makes no sense. The driver has to know whether the 194 * buffer represents a top or a bottom field in order to 195 * program any DMA correctly. Using ALTERNATE is wrong, since 196 * that just says that it is either a top or a bottom field, 197 * but not which of the two it is. 198 */ 199 dprintk(q, 1, "the field is incorrectly set to ALTERNATE for an output buffer\n"); 200 return -EINVAL; 201 } 202 vbuf->sequence = 0; 203 vbuf->request_fd = -1; 204 vbuf->is_held = false; 205 206 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { 207 switch (b->memory) { 208 case VB2_MEMORY_USERPTR: 209 for (plane = 0; plane < vb->num_planes; ++plane) { 210 planes[plane].m.userptr = 211 b->m.planes[plane].m.userptr; 212 planes[plane].length = 213 b->m.planes[plane].length; 214 } 215 break; 216 case VB2_MEMORY_DMABUF: 217 for (plane = 0; plane < vb->num_planes; ++plane) { 218 planes[plane].m.fd = 219 b->m.planes[plane].m.fd; 220 planes[plane].length = 221 b->m.planes[plane].length; 222 } 223 break; 224 default: 225 for (plane = 0; plane < vb->num_planes; ++plane) { 226 planes[plane].m.offset = 227 vb->planes[plane].m.offset; 228 planes[plane].length = 229 vb->planes[plane].length; 230 } 231 break; 232 } 233 234 /* Fill in driver-provided information for OUTPUT types */ 235 if (V4L2_TYPE_IS_OUTPUT(b->type)) { 236 /* 237 * Will have to go up to b->length when API starts 238 * accepting variable number of planes. 239 * 240 * If bytesused == 0 for the output buffer, then fall 241 * back to the full buffer size. In that case 242 * userspace clearly never bothered to set it and 243 * it's a safe assumption that they really meant to 244 * use the full plane sizes. 245 * 246 * Some drivers, e.g. old codec drivers, use bytesused == 0 247 * as a way to indicate that streaming is finished. 248 * In that case, the driver should use the 249 * allow_zero_bytesused flag to keep old userspace 250 * applications working. 251 */ 252 for (plane = 0; plane < vb->num_planes; ++plane) { 253 struct vb2_plane *pdst = &planes[plane]; 254 struct v4l2_plane *psrc = &b->m.planes[plane]; 255 256 if (psrc->bytesused == 0) 257 vb2_warn_zero_bytesused(vb); 258 259 if (vb->vb2_queue->allow_zero_bytesused) 260 pdst->bytesused = psrc->bytesused; 261 else 262 pdst->bytesused = psrc->bytesused ? 263 psrc->bytesused : pdst->length; 264 pdst->data_offset = psrc->data_offset; 265 } 266 } 267 } else { 268 /* 269 * Single-planar buffers do not use planes array, 270 * so fill in relevant v4l2_buffer struct fields instead. 271 * In videobuf we use our internal V4l2_planes struct for 272 * single-planar buffers as well, for simplicity. 273 * 274 * If bytesused == 0 for the output buffer, then fall back 275 * to the full buffer size as that's a sensible default. 276 * 277 * Some drivers, e.g. old codec drivers, use bytesused == 0 as 278 * a way to indicate that streaming is finished. In that case, 279 * the driver should use the allow_zero_bytesused flag to keep 280 * old userspace applications working. 281 */ 282 switch (b->memory) { 283 case VB2_MEMORY_USERPTR: 284 planes[0].m.userptr = b->m.userptr; 285 planes[0].length = b->length; 286 break; 287 case VB2_MEMORY_DMABUF: 288 planes[0].m.fd = b->m.fd; 289 planes[0].length = b->length; 290 break; 291 default: 292 planes[0].m.offset = vb->planes[0].m.offset; 293 planes[0].length = vb->planes[0].length; 294 break; 295 } 296 297 planes[0].data_offset = 0; 298 if (V4L2_TYPE_IS_OUTPUT(b->type)) { 299 if (b->bytesused == 0) 300 vb2_warn_zero_bytesused(vb); 301 302 if (vb->vb2_queue->allow_zero_bytesused) 303 planes[0].bytesused = b->bytesused; 304 else 305 planes[0].bytesused = b->bytesused ? 306 b->bytesused : planes[0].length; 307 } else 308 planes[0].bytesused = 0; 309 310 } 311 312 /* Zero flags that we handle */ 313 vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS; 314 if (!vb->vb2_queue->copy_timestamp || V4L2_TYPE_IS_CAPTURE(b->type)) { 315 /* 316 * Non-COPY timestamps and non-OUTPUT queues will get 317 * their timestamp and timestamp source flags from the 318 * queue. 319 */ 320 vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 321 } 322 323 if (V4L2_TYPE_IS_OUTPUT(b->type)) { 324 /* 325 * For output buffers mask out the timecode flag: 326 * this will be handled later in vb2_qbuf(). 327 * The 'field' is valid metadata for this output buffer 328 * and so that needs to be copied here. 329 */ 330 vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE; 331 vbuf->field = b->field; 332 if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)) 333 vbuf->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; 334 } else { 335 /* Zero any output buffer flags as this is a capture buffer */ 336 vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS; 337 /* Zero last flag, this is a signal from driver to userspace */ 338 vbuf->flags &= ~V4L2_BUF_FLAG_LAST; 339 } 340 341 return 0; 342 } 343 344 static void set_buffer_cache_hints(struct vb2_queue *q, 345 struct vb2_buffer *vb, 346 struct v4l2_buffer *b) 347 { 348 /* 349 * DMA exporter should take care of cache syncs, so we can avoid 350 * explicit ->prepare()/->finish() syncs. For other ->memory types 351 * we always need ->prepare() or/and ->finish() cache sync. 352 */ 353 if (q->memory == VB2_MEMORY_DMABUF) { 354 vb->need_cache_sync_on_finish = 0; 355 vb->need_cache_sync_on_prepare = 0; 356 return; 357 } 358 359 /* 360 * Cache sync/invalidation flags are set by default in order to 361 * preserve existing behaviour for old apps/drivers. 362 */ 363 vb->need_cache_sync_on_prepare = 1; 364 vb->need_cache_sync_on_finish = 1; 365 366 if (!vb2_queue_allows_cache_hints(q)) { 367 /* 368 * Clear buffer cache flags if queue does not support user 369 * space hints. That's to indicate to userspace that these 370 * flags won't work. 371 */ 372 b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_INVALIDATE; 373 b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_CLEAN; 374 return; 375 } 376 377 /* 378 * ->finish() cache sync can be avoided when queue direction is 379 * TO_DEVICE. 380 */ 381 if (q->dma_dir == DMA_TO_DEVICE) 382 vb->need_cache_sync_on_finish = 0; 383 384 if (b->flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE) 385 vb->need_cache_sync_on_finish = 0; 386 387 if (b->flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN) 388 vb->need_cache_sync_on_prepare = 0; 389 } 390 391 static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev, 392 struct v4l2_buffer *b, bool is_prepare, 393 struct media_request **p_req) 394 { 395 const char *opname = is_prepare ? "prepare_buf" : "qbuf"; 396 struct media_request *req; 397 struct vb2_v4l2_buffer *vbuf; 398 struct vb2_buffer *vb; 399 int ret; 400 401 if (b->type != q->type) { 402 dprintk(q, 1, "%s: invalid buffer type\n", opname); 403 return -EINVAL; 404 } 405 406 if (b->index >= q->num_buffers) { 407 dprintk(q, 1, "%s: buffer index out of range\n", opname); 408 return -EINVAL; 409 } 410 411 if (q->bufs[b->index] == NULL) { 412 /* Should never happen */ 413 dprintk(q, 1, "%s: buffer is NULL\n", opname); 414 return -EINVAL; 415 } 416 417 if (b->memory != q->memory) { 418 dprintk(q, 1, "%s: invalid memory type\n", opname); 419 return -EINVAL; 420 } 421 422 vb = q->bufs[b->index]; 423 vbuf = to_vb2_v4l2_buffer(vb); 424 ret = __verify_planes_array(vb, b); 425 if (ret) 426 return ret; 427 428 if (!is_prepare && (b->flags & V4L2_BUF_FLAG_REQUEST_FD) && 429 vb->state != VB2_BUF_STATE_DEQUEUED) { 430 dprintk(q, 1, "%s: buffer is not in dequeued state\n", opname); 431 return -EINVAL; 432 } 433 434 if (!vb->prepared) { 435 set_buffer_cache_hints(q, vb, b); 436 /* Copy relevant information provided by the userspace */ 437 memset(vbuf->planes, 0, 438 sizeof(vbuf->planes[0]) * vb->num_planes); 439 ret = vb2_fill_vb2_v4l2_buffer(vb, b); 440 if (ret) 441 return ret; 442 } 443 444 if (is_prepare) 445 return 0; 446 447 if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) { 448 if (q->requires_requests) { 449 dprintk(q, 1, "%s: queue requires requests\n", opname); 450 return -EBADR; 451 } 452 if (q->uses_requests) { 453 dprintk(q, 1, "%s: queue uses requests\n", opname); 454 return -EBUSY; 455 } 456 return 0; 457 } else if (!q->supports_requests) { 458 dprintk(q, 1, "%s: queue does not support requests\n", opname); 459 return -EBADR; 460 } else if (q->uses_qbuf) { 461 dprintk(q, 1, "%s: queue does not use requests\n", opname); 462 return -EBUSY; 463 } 464 465 /* 466 * For proper locking when queueing a request you need to be able 467 * to lock access to the vb2 queue, so check that there is a lock 468 * that we can use. In addition p_req must be non-NULL. 469 */ 470 if (WARN_ON(!q->lock || !p_req)) 471 return -EINVAL; 472 473 /* 474 * Make sure this op is implemented by the driver. It's easy to forget 475 * this callback, but is it important when canceling a buffer in a 476 * queued request. 477 */ 478 if (WARN_ON(!q->ops->buf_request_complete)) 479 return -EINVAL; 480 /* 481 * Make sure this op is implemented by the driver for the output queue. 482 * It's easy to forget this callback, but is it important to correctly 483 * validate the 'field' value at QBUF time. 484 */ 485 if (WARN_ON((q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT || 486 q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) && 487 !q->ops->buf_out_validate)) 488 return -EINVAL; 489 490 req = media_request_get_by_fd(mdev, b->request_fd); 491 if (IS_ERR(req)) { 492 dprintk(q, 1, "%s: invalid request_fd\n", opname); 493 return PTR_ERR(req); 494 } 495 496 /* 497 * Early sanity check. This is checked again when the buffer 498 * is bound to the request in vb2_core_qbuf(). 499 */ 500 if (req->state != MEDIA_REQUEST_STATE_IDLE && 501 req->state != MEDIA_REQUEST_STATE_UPDATING) { 502 dprintk(q, 1, "%s: request is not idle\n", opname); 503 media_request_put(req); 504 return -EBUSY; 505 } 506 507 *p_req = req; 508 vbuf->request_fd = b->request_fd; 509 510 return 0; 511 } 512 513 /* 514 * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be 515 * returned to userspace 516 */ 517 static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb) 518 { 519 struct v4l2_buffer *b = pb; 520 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 521 struct vb2_queue *q = vb->vb2_queue; 522 unsigned int plane; 523 524 /* Copy back data such as timestamp, flags, etc. */ 525 b->index = vb->index; 526 b->type = vb->type; 527 b->memory = vb->memory; 528 b->bytesused = 0; 529 530 b->flags = vbuf->flags; 531 b->field = vbuf->field; 532 v4l2_buffer_set_timestamp(b, vb->timestamp); 533 b->timecode = vbuf->timecode; 534 b->sequence = vbuf->sequence; 535 b->reserved2 = 0; 536 b->request_fd = 0; 537 538 if (q->is_multiplanar) { 539 /* 540 * Fill in plane-related data if userspace provided an array 541 * for it. The caller has already verified memory and size. 542 */ 543 b->length = vb->num_planes; 544 for (plane = 0; plane < vb->num_planes; ++plane) { 545 struct v4l2_plane *pdst = &b->m.planes[plane]; 546 struct vb2_plane *psrc = &vb->planes[plane]; 547 548 pdst->bytesused = psrc->bytesused; 549 pdst->length = psrc->length; 550 if (q->memory == VB2_MEMORY_MMAP) 551 pdst->m.mem_offset = psrc->m.offset; 552 else if (q->memory == VB2_MEMORY_USERPTR) 553 pdst->m.userptr = psrc->m.userptr; 554 else if (q->memory == VB2_MEMORY_DMABUF) 555 pdst->m.fd = psrc->m.fd; 556 pdst->data_offset = psrc->data_offset; 557 memset(pdst->reserved, 0, sizeof(pdst->reserved)); 558 } 559 } else { 560 /* 561 * We use length and offset in v4l2_planes array even for 562 * single-planar buffers, but userspace does not. 563 */ 564 b->length = vb->planes[0].length; 565 b->bytesused = vb->planes[0].bytesused; 566 if (q->memory == VB2_MEMORY_MMAP) 567 b->m.offset = vb->planes[0].m.offset; 568 else if (q->memory == VB2_MEMORY_USERPTR) 569 b->m.userptr = vb->planes[0].m.userptr; 570 else if (q->memory == VB2_MEMORY_DMABUF) 571 b->m.fd = vb->planes[0].m.fd; 572 } 573 574 /* 575 * Clear any buffer state related flags. 576 */ 577 b->flags &= ~V4L2_BUFFER_MASK_FLAGS; 578 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK; 579 if (!q->copy_timestamp) { 580 /* 581 * For non-COPY timestamps, drop timestamp source bits 582 * and obtain the timestamp source from the queue. 583 */ 584 b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 585 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; 586 } 587 588 switch (vb->state) { 589 case VB2_BUF_STATE_QUEUED: 590 case VB2_BUF_STATE_ACTIVE: 591 b->flags |= V4L2_BUF_FLAG_QUEUED; 592 break; 593 case VB2_BUF_STATE_IN_REQUEST: 594 b->flags |= V4L2_BUF_FLAG_IN_REQUEST; 595 break; 596 case VB2_BUF_STATE_ERROR: 597 b->flags |= V4L2_BUF_FLAG_ERROR; 598 fallthrough; 599 case VB2_BUF_STATE_DONE: 600 b->flags |= V4L2_BUF_FLAG_DONE; 601 break; 602 case VB2_BUF_STATE_PREPARING: 603 case VB2_BUF_STATE_DEQUEUED: 604 /* nothing */ 605 break; 606 } 607 608 if ((vb->state == VB2_BUF_STATE_DEQUEUED || 609 vb->state == VB2_BUF_STATE_IN_REQUEST) && 610 vb->synced && vb->prepared) 611 b->flags |= V4L2_BUF_FLAG_PREPARED; 612 613 if (vb2_buffer_in_use(q, vb)) 614 b->flags |= V4L2_BUF_FLAG_MAPPED; 615 if (vbuf->request_fd >= 0) { 616 b->flags |= V4L2_BUF_FLAG_REQUEST_FD; 617 b->request_fd = vbuf->request_fd; 618 } 619 } 620 621 /* 622 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a 623 * v4l2_buffer by the userspace. It also verifies that struct 624 * v4l2_buffer has a valid number of planes. 625 */ 626 static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes) 627 { 628 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 629 unsigned int plane; 630 631 if (!vb->vb2_queue->copy_timestamp) 632 vb->timestamp = 0; 633 634 for (plane = 0; plane < vb->num_planes; ++plane) { 635 if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) { 636 planes[plane].m = vbuf->planes[plane].m; 637 planes[plane].length = vbuf->planes[plane].length; 638 } 639 planes[plane].bytesused = vbuf->planes[plane].bytesused; 640 planes[plane].data_offset = vbuf->planes[plane].data_offset; 641 } 642 return 0; 643 } 644 645 static const struct vb2_buf_ops v4l2_buf_ops = { 646 .verify_planes_array = __verify_planes_array_core, 647 .init_buffer = __init_vb2_v4l2_buffer, 648 .fill_user_buffer = __fill_v4l2_buffer, 649 .fill_vb2_buffer = __fill_vb2_buffer, 650 .copy_timestamp = __copy_timestamp, 651 }; 652 653 int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp, 654 unsigned int start_idx) 655 { 656 unsigned int i; 657 658 for (i = start_idx; i < q->num_buffers; i++) 659 if (q->bufs[i]->copied_timestamp && 660 q->bufs[i]->timestamp == timestamp) 661 return i; 662 return -1; 663 } 664 EXPORT_SYMBOL_GPL(vb2_find_timestamp); 665 666 /* 667 * vb2_querybuf() - query video buffer information 668 * @q: videobuf queue 669 * @b: buffer struct passed from userspace to vidioc_querybuf handler 670 * in driver 671 * 672 * Should be called from vidioc_querybuf ioctl handler in driver. 673 * This function will verify the passed v4l2_buffer structure and fill the 674 * relevant information for the userspace. 675 * 676 * The return values from this function are intended to be directly returned 677 * from vidioc_querybuf handler in driver. 678 */ 679 int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b) 680 { 681 struct vb2_buffer *vb; 682 int ret; 683 684 if (b->type != q->type) { 685 dprintk(q, 1, "wrong buffer type\n"); 686 return -EINVAL; 687 } 688 689 if (b->index >= q->num_buffers) { 690 dprintk(q, 1, "buffer index out of range\n"); 691 return -EINVAL; 692 } 693 vb = q->bufs[b->index]; 694 ret = __verify_planes_array(vb, b); 695 if (!ret) 696 vb2_core_querybuf(q, b->index, b); 697 return ret; 698 } 699 EXPORT_SYMBOL(vb2_querybuf); 700 701 static void fill_buf_caps(struct vb2_queue *q, u32 *caps) 702 { 703 *caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS; 704 if (q->io_modes & VB2_MMAP) 705 *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP; 706 if (q->io_modes & VB2_USERPTR) 707 *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR; 708 if (q->io_modes & VB2_DMABUF) 709 *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF; 710 if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF) 711 *caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF; 712 if (q->allow_cache_hints && q->io_modes & VB2_MMAP) 713 *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS; 714 #ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API 715 if (q->supports_requests) 716 *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS; 717 #endif 718 } 719 720 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) 721 { 722 int ret = vb2_verify_memory_type(q, req->memory, req->type); 723 724 fill_buf_caps(q, &req->capabilities); 725 return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count); 726 } 727 EXPORT_SYMBOL_GPL(vb2_reqbufs); 728 729 int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev, 730 struct v4l2_buffer *b) 731 { 732 int ret; 733 734 if (vb2_fileio_is_active(q)) { 735 dprintk(q, 1, "file io in progress\n"); 736 return -EBUSY; 737 } 738 739 if (b->flags & V4L2_BUF_FLAG_REQUEST_FD) 740 return -EINVAL; 741 742 ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL); 743 744 return ret ? ret : vb2_core_prepare_buf(q, b->index, b); 745 } 746 EXPORT_SYMBOL_GPL(vb2_prepare_buf); 747 748 int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) 749 { 750 unsigned requested_planes = 1; 751 unsigned requested_sizes[VIDEO_MAX_PLANES]; 752 struct v4l2_format *f = &create->format; 753 int ret = vb2_verify_memory_type(q, create->memory, f->type); 754 unsigned i; 755 756 fill_buf_caps(q, &create->capabilities); 757 create->index = q->num_buffers; 758 if (create->count == 0) 759 return ret != -EBUSY ? ret : 0; 760 761 switch (f->type) { 762 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: 763 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: 764 requested_planes = f->fmt.pix_mp.num_planes; 765 if (requested_planes == 0 || 766 requested_planes > VIDEO_MAX_PLANES) 767 return -EINVAL; 768 for (i = 0; i < requested_planes; i++) 769 requested_sizes[i] = 770 f->fmt.pix_mp.plane_fmt[i].sizeimage; 771 break; 772 case V4L2_BUF_TYPE_VIDEO_CAPTURE: 773 case V4L2_BUF_TYPE_VIDEO_OUTPUT: 774 requested_sizes[0] = f->fmt.pix.sizeimage; 775 break; 776 case V4L2_BUF_TYPE_VBI_CAPTURE: 777 case V4L2_BUF_TYPE_VBI_OUTPUT: 778 requested_sizes[0] = f->fmt.vbi.samples_per_line * 779 (f->fmt.vbi.count[0] + f->fmt.vbi.count[1]); 780 break; 781 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE: 782 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT: 783 requested_sizes[0] = f->fmt.sliced.io_size; 784 break; 785 case V4L2_BUF_TYPE_SDR_CAPTURE: 786 case V4L2_BUF_TYPE_SDR_OUTPUT: 787 requested_sizes[0] = f->fmt.sdr.buffersize; 788 break; 789 case V4L2_BUF_TYPE_META_CAPTURE: 790 case V4L2_BUF_TYPE_META_OUTPUT: 791 requested_sizes[0] = f->fmt.meta.buffersize; 792 break; 793 default: 794 return -EINVAL; 795 } 796 for (i = 0; i < requested_planes; i++) 797 if (requested_sizes[i] == 0) 798 return -EINVAL; 799 return ret ? ret : vb2_core_create_bufs(q, create->memory, 800 &create->count, 801 requested_planes, 802 requested_sizes); 803 } 804 EXPORT_SYMBOL_GPL(vb2_create_bufs); 805 806 int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev, 807 struct v4l2_buffer *b) 808 { 809 struct media_request *req = NULL; 810 int ret; 811 812 if (vb2_fileio_is_active(q)) { 813 dprintk(q, 1, "file io in progress\n"); 814 return -EBUSY; 815 } 816 817 ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req); 818 if (ret) 819 return ret; 820 ret = vb2_core_qbuf(q, b->index, b, req); 821 if (req) 822 media_request_put(req); 823 return ret; 824 } 825 EXPORT_SYMBOL_GPL(vb2_qbuf); 826 827 int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking) 828 { 829 int ret; 830 831 if (vb2_fileio_is_active(q)) { 832 dprintk(q, 1, "file io in progress\n"); 833 return -EBUSY; 834 } 835 836 if (b->type != q->type) { 837 dprintk(q, 1, "invalid buffer type\n"); 838 return -EINVAL; 839 } 840 841 ret = vb2_core_dqbuf(q, NULL, b, nonblocking); 842 843 if (!q->is_output && 844 b->flags & V4L2_BUF_FLAG_DONE && 845 b->flags & V4L2_BUF_FLAG_LAST) 846 q->last_buffer_dequeued = true; 847 848 /* 849 * After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be 850 * cleared. 851 */ 852 b->flags &= ~V4L2_BUF_FLAG_DONE; 853 854 return ret; 855 } 856 EXPORT_SYMBOL_GPL(vb2_dqbuf); 857 858 int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type) 859 { 860 if (vb2_fileio_is_active(q)) { 861 dprintk(q, 1, "file io in progress\n"); 862 return -EBUSY; 863 } 864 return vb2_core_streamon(q, type); 865 } 866 EXPORT_SYMBOL_GPL(vb2_streamon); 867 868 int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) 869 { 870 if (vb2_fileio_is_active(q)) { 871 dprintk(q, 1, "file io in progress\n"); 872 return -EBUSY; 873 } 874 return vb2_core_streamoff(q, type); 875 } 876 EXPORT_SYMBOL_GPL(vb2_streamoff); 877 878 int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) 879 { 880 return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index, 881 eb->plane, eb->flags); 882 } 883 EXPORT_SYMBOL_GPL(vb2_expbuf); 884 885 int vb2_queue_init_name(struct vb2_queue *q, const char *name) 886 { 887 /* 888 * Sanity check 889 */ 890 if (WARN_ON(!q) || 891 WARN_ON(q->timestamp_flags & 892 ~(V4L2_BUF_FLAG_TIMESTAMP_MASK | 893 V4L2_BUF_FLAG_TSTAMP_SRC_MASK))) 894 return -EINVAL; 895 896 /* Warn that the driver should choose an appropriate timestamp type */ 897 WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == 898 V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN); 899 900 /* Warn that vb2_memory should match with v4l2_memory */ 901 if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP) 902 || WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR) 903 || WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF)) 904 return -EINVAL; 905 906 if (q->buf_struct_size == 0) 907 q->buf_struct_size = sizeof(struct vb2_v4l2_buffer); 908 909 q->buf_ops = &v4l2_buf_ops; 910 q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type); 911 q->is_output = V4L2_TYPE_IS_OUTPUT(q->type); 912 q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) 913 == V4L2_BUF_FLAG_TIMESTAMP_COPY; 914 /* 915 * For compatibility with vb1: if QBUF hasn't been called yet, then 916 * return EPOLLERR as well. This only affects capture queues, output 917 * queues will always initialize waiting_for_buffers to false. 918 */ 919 q->quirk_poll_must_check_waiting_for_buffers = true; 920 921 if (name) 922 strscpy(q->name, name, sizeof(q->name)); 923 else 924 q->name[0] = '\0'; 925 926 return vb2_core_queue_init(q); 927 } 928 EXPORT_SYMBOL_GPL(vb2_queue_init_name); 929 930 int vb2_queue_init(struct vb2_queue *q) 931 { 932 return vb2_queue_init_name(q, NULL); 933 } 934 EXPORT_SYMBOL_GPL(vb2_queue_init); 935 936 void vb2_queue_release(struct vb2_queue *q) 937 { 938 vb2_core_queue_release(q); 939 } 940 EXPORT_SYMBOL_GPL(vb2_queue_release); 941 942 __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) 943 { 944 struct video_device *vfd = video_devdata(file); 945 __poll_t res; 946 947 res = vb2_core_poll(q, file, wait); 948 949 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { 950 struct v4l2_fh *fh = file->private_data; 951 952 poll_wait(file, &fh->wait, wait); 953 if (v4l2_event_pending(fh)) 954 res |= EPOLLPRI; 955 } 956 957 return res; 958 } 959 EXPORT_SYMBOL_GPL(vb2_poll); 960 961 /* 962 * The following functions are not part of the vb2 core API, but are helper 963 * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations 964 * and struct vb2_ops. 965 * They contain boilerplate code that most if not all drivers have to do 966 * and so they simplify the driver code. 967 */ 968 969 /* The queue is busy if there is a owner and you are not that owner. */ 970 static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file) 971 { 972 return vdev->queue->owner && vdev->queue->owner != file->private_data; 973 } 974 975 /* vb2 ioctl helpers */ 976 977 int vb2_ioctl_reqbufs(struct file *file, void *priv, 978 struct v4l2_requestbuffers *p) 979 { 980 struct video_device *vdev = video_devdata(file); 981 int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type); 982 983 fill_buf_caps(vdev->queue, &p->capabilities); 984 if (res) 985 return res; 986 if (vb2_queue_is_busy(vdev, file)) 987 return -EBUSY; 988 res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count); 989 /* If count == 0, then the owner has released all buffers and he 990 is no longer owner of the queue. Otherwise we have a new owner. */ 991 if (res == 0) 992 vdev->queue->owner = p->count ? file->private_data : NULL; 993 return res; 994 } 995 EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs); 996 997 int vb2_ioctl_create_bufs(struct file *file, void *priv, 998 struct v4l2_create_buffers *p) 999 { 1000 struct video_device *vdev = video_devdata(file); 1001 int res = vb2_verify_memory_type(vdev->queue, p->memory, 1002 p->format.type); 1003 1004 p->index = vdev->queue->num_buffers; 1005 fill_buf_caps(vdev->queue, &p->capabilities); 1006 /* 1007 * If count == 0, then just check if memory and type are valid. 1008 * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0. 1009 */ 1010 if (p->count == 0) 1011 return res != -EBUSY ? res : 0; 1012 if (res) 1013 return res; 1014 if (vb2_queue_is_busy(vdev, file)) 1015 return -EBUSY; 1016 1017 res = vb2_create_bufs(vdev->queue, p); 1018 if (res == 0) 1019 vdev->queue->owner = file->private_data; 1020 return res; 1021 } 1022 EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs); 1023 1024 int vb2_ioctl_prepare_buf(struct file *file, void *priv, 1025 struct v4l2_buffer *p) 1026 { 1027 struct video_device *vdev = video_devdata(file); 1028 1029 if (vb2_queue_is_busy(vdev, file)) 1030 return -EBUSY; 1031 return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p); 1032 } 1033 EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf); 1034 1035 int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p) 1036 { 1037 struct video_device *vdev = video_devdata(file); 1038 1039 /* No need to call vb2_queue_is_busy(), anyone can query buffers. */ 1040 return vb2_querybuf(vdev->queue, p); 1041 } 1042 EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf); 1043 1044 int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p) 1045 { 1046 struct video_device *vdev = video_devdata(file); 1047 1048 if (vb2_queue_is_busy(vdev, file)) 1049 return -EBUSY; 1050 return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p); 1051 } 1052 EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf); 1053 1054 int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p) 1055 { 1056 struct video_device *vdev = video_devdata(file); 1057 1058 if (vb2_queue_is_busy(vdev, file)) 1059 return -EBUSY; 1060 return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK); 1061 } 1062 EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf); 1063 1064 int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i) 1065 { 1066 struct video_device *vdev = video_devdata(file); 1067 1068 if (vb2_queue_is_busy(vdev, file)) 1069 return -EBUSY; 1070 return vb2_streamon(vdev->queue, i); 1071 } 1072 EXPORT_SYMBOL_GPL(vb2_ioctl_streamon); 1073 1074 int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) 1075 { 1076 struct video_device *vdev = video_devdata(file); 1077 1078 if (vb2_queue_is_busy(vdev, file)) 1079 return -EBUSY; 1080 return vb2_streamoff(vdev->queue, i); 1081 } 1082 EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff); 1083 1084 int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p) 1085 { 1086 struct video_device *vdev = video_devdata(file); 1087 1088 if (vb2_queue_is_busy(vdev, file)) 1089 return -EBUSY; 1090 return vb2_expbuf(vdev->queue, p); 1091 } 1092 EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf); 1093 1094 /* v4l2_file_operations helpers */ 1095 1096 int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma) 1097 { 1098 struct video_device *vdev = video_devdata(file); 1099 1100 return vb2_mmap(vdev->queue, vma); 1101 } 1102 EXPORT_SYMBOL_GPL(vb2_fop_mmap); 1103 1104 int _vb2_fop_release(struct file *file, struct mutex *lock) 1105 { 1106 struct video_device *vdev = video_devdata(file); 1107 1108 if (lock) 1109 mutex_lock(lock); 1110 if (file->private_data == vdev->queue->owner) { 1111 vb2_queue_release(vdev->queue); 1112 vdev->queue->owner = NULL; 1113 } 1114 if (lock) 1115 mutex_unlock(lock); 1116 return v4l2_fh_release(file); 1117 } 1118 EXPORT_SYMBOL_GPL(_vb2_fop_release); 1119 1120 int vb2_fop_release(struct file *file) 1121 { 1122 struct video_device *vdev = video_devdata(file); 1123 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; 1124 1125 return _vb2_fop_release(file, lock); 1126 } 1127 EXPORT_SYMBOL_GPL(vb2_fop_release); 1128 1129 ssize_t vb2_fop_write(struct file *file, const char __user *buf, 1130 size_t count, loff_t *ppos) 1131 { 1132 struct video_device *vdev = video_devdata(file); 1133 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; 1134 int err = -EBUSY; 1135 1136 if (!(vdev->queue->io_modes & VB2_WRITE)) 1137 return -EINVAL; 1138 if (lock && mutex_lock_interruptible(lock)) 1139 return -ERESTARTSYS; 1140 if (vb2_queue_is_busy(vdev, file)) 1141 goto exit; 1142 err = vb2_write(vdev->queue, buf, count, ppos, 1143 file->f_flags & O_NONBLOCK); 1144 if (vdev->queue->fileio) 1145 vdev->queue->owner = file->private_data; 1146 exit: 1147 if (lock) 1148 mutex_unlock(lock); 1149 return err; 1150 } 1151 EXPORT_SYMBOL_GPL(vb2_fop_write); 1152 1153 ssize_t vb2_fop_read(struct file *file, char __user *buf, 1154 size_t count, loff_t *ppos) 1155 { 1156 struct video_device *vdev = video_devdata(file); 1157 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; 1158 int err = -EBUSY; 1159 1160 if (!(vdev->queue->io_modes & VB2_READ)) 1161 return -EINVAL; 1162 if (lock && mutex_lock_interruptible(lock)) 1163 return -ERESTARTSYS; 1164 if (vb2_queue_is_busy(vdev, file)) 1165 goto exit; 1166 err = vb2_read(vdev->queue, buf, count, ppos, 1167 file->f_flags & O_NONBLOCK); 1168 if (vdev->queue->fileio) 1169 vdev->queue->owner = file->private_data; 1170 exit: 1171 if (lock) 1172 mutex_unlock(lock); 1173 return err; 1174 } 1175 EXPORT_SYMBOL_GPL(vb2_fop_read); 1176 1177 __poll_t vb2_fop_poll(struct file *file, poll_table *wait) 1178 { 1179 struct video_device *vdev = video_devdata(file); 1180 struct vb2_queue *q = vdev->queue; 1181 struct mutex *lock = q->lock ? q->lock : vdev->lock; 1182 __poll_t res; 1183 void *fileio; 1184 1185 /* 1186 * If this helper doesn't know how to lock, then you shouldn't be using 1187 * it but you should write your own. 1188 */ 1189 WARN_ON(!lock); 1190 1191 if (lock && mutex_lock_interruptible(lock)) 1192 return EPOLLERR; 1193 1194 fileio = q->fileio; 1195 1196 res = vb2_poll(vdev->queue, file, wait); 1197 1198 /* If fileio was started, then we have a new queue owner. */ 1199 if (!fileio && q->fileio) 1200 q->owner = file->private_data; 1201 if (lock) 1202 mutex_unlock(lock); 1203 return res; 1204 } 1205 EXPORT_SYMBOL_GPL(vb2_fop_poll); 1206 1207 #ifndef CONFIG_MMU 1208 unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, 1209 unsigned long len, unsigned long pgoff, unsigned long flags) 1210 { 1211 struct video_device *vdev = video_devdata(file); 1212 1213 return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags); 1214 } 1215 EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area); 1216 #endif 1217 1218 void vb2_video_unregister_device(struct video_device *vdev) 1219 { 1220 /* Check if vdev was ever registered at all */ 1221 if (!vdev || !video_is_registered(vdev)) 1222 return; 1223 1224 /* 1225 * Calling this function only makes sense if vdev->queue is set. 1226 * If it is NULL, then just call video_unregister_device() instead. 1227 */ 1228 WARN_ON(!vdev->queue); 1229 1230 /* 1231 * Take a reference to the device since video_unregister_device() 1232 * calls device_unregister(), but we don't want that to release 1233 * the device since we want to clean up the queue first. 1234 */ 1235 get_device(&vdev->dev); 1236 video_unregister_device(vdev); 1237 if (vdev->queue && vdev->queue->owner) { 1238 struct mutex *lock = vdev->queue->lock ? 1239 vdev->queue->lock : vdev->lock; 1240 1241 if (lock) 1242 mutex_lock(lock); 1243 vb2_queue_release(vdev->queue); 1244 vdev->queue->owner = NULL; 1245 if (lock) 1246 mutex_unlock(lock); 1247 } 1248 /* 1249 * Now we put the device, and in most cases this will release 1250 * everything. 1251 */ 1252 put_device(&vdev->dev); 1253 } 1254 EXPORT_SYMBOL_GPL(vb2_video_unregister_device); 1255 1256 /* vb2_ops helpers. Only use if vq->lock is non-NULL. */ 1257 1258 void vb2_ops_wait_prepare(struct vb2_queue *vq) 1259 { 1260 mutex_unlock(vq->lock); 1261 } 1262 EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare); 1263 1264 void vb2_ops_wait_finish(struct vb2_queue *vq) 1265 { 1266 mutex_lock(vq->lock); 1267 } 1268 EXPORT_SYMBOL_GPL(vb2_ops_wait_finish); 1269 1270 /* 1271 * Note that this function is called during validation time and 1272 * thus the req_queue_mutex is held to ensure no request objects 1273 * can be added or deleted while validating. So there is no need 1274 * to protect the objects list. 1275 */ 1276 int vb2_request_validate(struct media_request *req) 1277 { 1278 struct media_request_object *obj; 1279 int ret = 0; 1280 1281 if (!vb2_request_buffer_cnt(req)) 1282 return -ENOENT; 1283 1284 list_for_each_entry(obj, &req->objects, list) { 1285 if (!obj->ops->prepare) 1286 continue; 1287 1288 ret = obj->ops->prepare(obj); 1289 if (ret) 1290 break; 1291 } 1292 1293 if (ret) { 1294 list_for_each_entry_continue_reverse(obj, &req->objects, list) 1295 if (obj->ops->unprepare) 1296 obj->ops->unprepare(obj); 1297 return ret; 1298 } 1299 return 0; 1300 } 1301 EXPORT_SYMBOL_GPL(vb2_request_validate); 1302 1303 void vb2_request_queue(struct media_request *req) 1304 { 1305 struct media_request_object *obj, *obj_safe; 1306 1307 /* 1308 * Queue all objects. Note that buffer objects are at the end of the 1309 * objects list, after all other object types. Once buffer objects 1310 * are queued, the driver might delete them immediately (if the driver 1311 * processes the buffer at once), so we have to use 1312 * list_for_each_entry_safe() to handle the case where the object we 1313 * queue is deleted. 1314 */ 1315 list_for_each_entry_safe(obj, obj_safe, &req->objects, list) 1316 if (obj->ops->queue) 1317 obj->ops->queue(obj); 1318 } 1319 EXPORT_SYMBOL_GPL(vb2_request_queue); 1320 1321 MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2"); 1322 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski"); 1323 MODULE_LICENSE("GPL"); 1324