1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * uvc_queue.c -- USB Video Class driver - Buffers management 4 * 5 * Copyright (C) 2005-2010 6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 7 */ 8 9 #include <linux/atomic.h> 10 #include <linux/kernel.h> 11 #include <linux/mm.h> 12 #include <linux/list.h> 13 #include <linux/module.h> 14 #include <linux/usb.h> 15 #include <linux/videodev2.h> 16 #include <linux/vmalloc.h> 17 #include <linux/wait.h> 18 #include <media/videobuf2-v4l2.h> 19 #include <media/videobuf2-vmalloc.h> 20 21 #include "uvcvideo.h" 22 23 /* ------------------------------------------------------------------------ 24 * Video buffers queue management. 25 * 26 * Video queues is initialized by uvc_queue_init(). The function performs 27 * basic initialization of the uvc_video_queue struct and never fails. 28 * 29 * Video buffers are managed by videobuf2. The driver uses a mutex to protect 30 * the videobuf2 queue operations by serializing calls to videobuf2 and a 31 * spinlock to protect the IRQ queue that holds the buffers to be processed by 32 * the driver. 33 */ 34 35 static inline struct uvc_buffer *uvc_vbuf_to_buffer(struct vb2_v4l2_buffer *buf) 36 { 37 return container_of(buf, struct uvc_buffer, buf); 38 } 39 40 /* 41 * Return all queued buffers to videobuf2 in the requested state. 42 * 43 * This function must be called with the queue spinlock held. 44 */ 45 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, 46 enum uvc_buffer_state state) 47 { 48 enum vb2_buffer_state vb2_state = state == UVC_BUF_STATE_ERROR 49 ? VB2_BUF_STATE_ERROR 50 : VB2_BUF_STATE_QUEUED; 51 52 while (!list_empty(&queue->irqqueue)) { 53 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, 54 struct uvc_buffer, 55 queue); 56 list_del(&buf->queue); 57 buf->state = state; 58 vb2_buffer_done(&buf->buf.vb2_buf, vb2_state); 59 } 60 } 61 62 /* ----------------------------------------------------------------------------- 63 * videobuf2 queue operations 64 */ 65 66 static int uvc_queue_setup(struct vb2_queue *vq, 67 unsigned int *nbuffers, unsigned int *nplanes, 68 unsigned int sizes[], struct device *alloc_devs[]) 69 { 70 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); 71 struct uvc_streaming *stream; 72 unsigned int size; 73 74 switch (vq->type) { 75 case V4L2_BUF_TYPE_META_CAPTURE: 76 size = UVC_METADATA_BUF_SIZE; 77 break; 78 79 default: 80 stream = uvc_queue_to_stream(queue); 81 size = stream->ctrl.dwMaxVideoFrameSize; 82 break; 83 } 84 85 /* 86 * When called with plane sizes, validate them. The driver supports 87 * single planar formats only, and requires buffers to be large enough 88 * to store a complete frame. 89 */ 90 if (*nplanes) 91 return *nplanes != 1 || sizes[0] < size ? -EINVAL : 0; 92 93 *nplanes = 1; 94 sizes[0] = size; 95 return 0; 96 } 97 98 static int uvc_buffer_prepare(struct vb2_buffer *vb) 99 { 100 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 101 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 102 struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf); 103 104 if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && 105 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) { 106 uvc_dbg(uvc_queue_to_stream(queue)->dev, CAPTURE, 107 "[E] Bytes used out of bounds\n"); 108 return -EINVAL; 109 } 110 111 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) 112 return -ENODEV; 113 114 buf->state = UVC_BUF_STATE_QUEUED; 115 buf->error = 0; 116 buf->mem = vb2_plane_vaddr(vb, 0); 117 buf->length = vb2_plane_size(vb, 0); 118 if (vb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) 119 buf->bytesused = 0; 120 else 121 buf->bytesused = vb2_get_plane_payload(vb, 0); 122 123 return 0; 124 } 125 126 static void uvc_buffer_queue(struct vb2_buffer *vb) 127 { 128 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 129 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 130 struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf); 131 unsigned long flags; 132 133 spin_lock_irqsave(&queue->irqlock, flags); 134 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { 135 kref_init(&buf->ref); 136 list_add_tail(&buf->queue, &queue->irqqueue); 137 } else { 138 /* 139 * If the device is disconnected return the buffer to userspace 140 * directly. The next QBUF call will fail with -ENODEV. 141 */ 142 buf->state = UVC_BUF_STATE_ERROR; 143 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); 144 } 145 146 spin_unlock_irqrestore(&queue->irqlock, flags); 147 } 148 149 static void uvc_buffer_finish(struct vb2_buffer *vb) 150 { 151 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); 152 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 153 struct uvc_streaming *stream = uvc_queue_to_stream(queue); 154 struct uvc_buffer *buf = uvc_vbuf_to_buffer(vbuf); 155 156 if (vb->state == VB2_BUF_STATE_DONE) 157 uvc_video_clock_update(stream, vbuf, buf); 158 } 159 160 static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count) 161 { 162 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); 163 struct uvc_streaming *stream = uvc_queue_to_stream(queue); 164 int ret; 165 166 lockdep_assert_irqs_enabled(); 167 168 queue->buf_used = 0; 169 170 ret = uvc_video_start_streaming(stream); 171 if (ret == 0) 172 return 0; 173 174 spin_lock_irq(&queue->irqlock); 175 uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED); 176 spin_unlock_irq(&queue->irqlock); 177 178 return ret; 179 } 180 181 static void uvc_stop_streaming(struct vb2_queue *vq) 182 { 183 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); 184 185 lockdep_assert_irqs_enabled(); 186 187 if (vq->type != V4L2_BUF_TYPE_META_CAPTURE) 188 uvc_video_stop_streaming(uvc_queue_to_stream(queue)); 189 190 spin_lock_irq(&queue->irqlock); 191 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR); 192 spin_unlock_irq(&queue->irqlock); 193 } 194 195 static const struct vb2_ops uvc_queue_qops = { 196 .queue_setup = uvc_queue_setup, 197 .buf_prepare = uvc_buffer_prepare, 198 .buf_queue = uvc_buffer_queue, 199 .buf_finish = uvc_buffer_finish, 200 .wait_prepare = vb2_ops_wait_prepare, 201 .wait_finish = vb2_ops_wait_finish, 202 .start_streaming = uvc_start_streaming, 203 .stop_streaming = uvc_stop_streaming, 204 }; 205 206 static const struct vb2_ops uvc_meta_queue_qops = { 207 .queue_setup = uvc_queue_setup, 208 .buf_prepare = uvc_buffer_prepare, 209 .buf_queue = uvc_buffer_queue, 210 .wait_prepare = vb2_ops_wait_prepare, 211 .wait_finish = vb2_ops_wait_finish, 212 .stop_streaming = uvc_stop_streaming, 213 }; 214 215 int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, 216 int drop_corrupted) 217 { 218 int ret; 219 220 queue->queue.type = type; 221 queue->queue.io_modes = VB2_MMAP | VB2_USERPTR; 222 queue->queue.drv_priv = queue; 223 queue->queue.buf_struct_size = sizeof(struct uvc_buffer); 224 queue->queue.mem_ops = &vb2_vmalloc_memops; 225 queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC 226 | V4L2_BUF_FLAG_TSTAMP_SRC_SOE; 227 queue->queue.lock = &queue->mutex; 228 229 switch (type) { 230 case V4L2_BUF_TYPE_META_CAPTURE: 231 queue->queue.ops = &uvc_meta_queue_qops; 232 break; 233 default: 234 queue->queue.io_modes |= VB2_DMABUF; 235 queue->queue.ops = &uvc_queue_qops; 236 break; 237 } 238 239 ret = vb2_queue_init(&queue->queue); 240 if (ret) 241 return ret; 242 243 mutex_init(&queue->mutex); 244 spin_lock_init(&queue->irqlock); 245 INIT_LIST_HEAD(&queue->irqqueue); 246 queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0; 247 248 return 0; 249 } 250 251 void uvc_queue_release(struct uvc_video_queue *queue) 252 { 253 mutex_lock(&queue->mutex); 254 vb2_queue_release(&queue->queue); 255 mutex_unlock(&queue->mutex); 256 } 257 258 /* ----------------------------------------------------------------------------- 259 * V4L2 queue operations 260 */ 261 262 int uvc_request_buffers(struct uvc_video_queue *queue, 263 struct v4l2_requestbuffers *rb) 264 { 265 int ret; 266 267 mutex_lock(&queue->mutex); 268 ret = vb2_reqbufs(&queue->queue, rb); 269 mutex_unlock(&queue->mutex); 270 271 return ret ? ret : rb->count; 272 } 273 274 int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) 275 { 276 int ret; 277 278 mutex_lock(&queue->mutex); 279 ret = vb2_querybuf(&queue->queue, buf); 280 mutex_unlock(&queue->mutex); 281 282 return ret; 283 } 284 285 int uvc_create_buffers(struct uvc_video_queue *queue, 286 struct v4l2_create_buffers *cb) 287 { 288 int ret; 289 290 mutex_lock(&queue->mutex); 291 ret = vb2_create_bufs(&queue->queue, cb); 292 mutex_unlock(&queue->mutex); 293 294 return ret; 295 } 296 297 int uvc_queue_buffer(struct uvc_video_queue *queue, 298 struct media_device *mdev, struct v4l2_buffer *buf) 299 { 300 int ret; 301 302 mutex_lock(&queue->mutex); 303 ret = vb2_qbuf(&queue->queue, mdev, buf); 304 mutex_unlock(&queue->mutex); 305 306 return ret; 307 } 308 309 int uvc_export_buffer(struct uvc_video_queue *queue, 310 struct v4l2_exportbuffer *exp) 311 { 312 int ret; 313 314 mutex_lock(&queue->mutex); 315 ret = vb2_expbuf(&queue->queue, exp); 316 mutex_unlock(&queue->mutex); 317 318 return ret; 319 } 320 321 int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, 322 int nonblocking) 323 { 324 int ret; 325 326 mutex_lock(&queue->mutex); 327 ret = vb2_dqbuf(&queue->queue, buf, nonblocking); 328 mutex_unlock(&queue->mutex); 329 330 return ret; 331 } 332 333 int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type) 334 { 335 int ret; 336 337 mutex_lock(&queue->mutex); 338 ret = vb2_streamon(&queue->queue, type); 339 mutex_unlock(&queue->mutex); 340 341 return ret; 342 } 343 344 int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type) 345 { 346 int ret; 347 348 mutex_lock(&queue->mutex); 349 ret = vb2_streamoff(&queue->queue, type); 350 mutex_unlock(&queue->mutex); 351 352 return ret; 353 } 354 355 int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) 356 { 357 return vb2_mmap(&queue->queue, vma); 358 } 359 360 #ifndef CONFIG_MMU 361 unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue, 362 unsigned long pgoff) 363 { 364 return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0); 365 } 366 #endif 367 368 __poll_t uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, 369 poll_table *wait) 370 { 371 __poll_t ret; 372 373 mutex_lock(&queue->mutex); 374 ret = vb2_poll(&queue->queue, file, wait); 375 mutex_unlock(&queue->mutex); 376 377 return ret; 378 } 379 380 /* ----------------------------------------------------------------------------- 381 * 382 */ 383 384 /* 385 * Check if buffers have been allocated. 386 */ 387 int uvc_queue_allocated(struct uvc_video_queue *queue) 388 { 389 int allocated; 390 391 mutex_lock(&queue->mutex); 392 allocated = vb2_is_busy(&queue->queue); 393 mutex_unlock(&queue->mutex); 394 395 return allocated; 396 } 397 398 /* 399 * Cancel the video buffers queue. 400 * 401 * Cancelling the queue marks all buffers on the irq queue as erroneous, 402 * wakes them up and removes them from the queue. 403 * 404 * If the disconnect parameter is set, further calls to uvc_queue_buffer will 405 * fail with -ENODEV. 406 * 407 * This function acquires the irq spinlock and can be called from interrupt 408 * context. 409 */ 410 void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect) 411 { 412 unsigned long flags; 413 414 spin_lock_irqsave(&queue->irqlock, flags); 415 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR); 416 /* 417 * This must be protected by the irqlock spinlock to avoid race 418 * conditions between uvc_buffer_queue and the disconnection event that 419 * could result in an interruptible wait in uvc_dequeue_buffer. Do not 420 * blindly replace this logic by checking for the UVC_QUEUE_DISCONNECTED 421 * state outside the queue code. 422 */ 423 if (disconnect) 424 queue->flags |= UVC_QUEUE_DISCONNECTED; 425 spin_unlock_irqrestore(&queue->irqlock, flags); 426 } 427 428 /* 429 * uvc_queue_get_current_buffer: Obtain the current working output buffer 430 * 431 * Buffers may span multiple packets, and even URBs, therefore the active buffer 432 * remains on the queue until the EOF marker. 433 */ 434 static struct uvc_buffer * 435 __uvc_queue_get_current_buffer(struct uvc_video_queue *queue) 436 { 437 if (list_empty(&queue->irqqueue)) 438 return NULL; 439 440 return list_first_entry(&queue->irqqueue, struct uvc_buffer, queue); 441 } 442 443 struct uvc_buffer *uvc_queue_get_current_buffer(struct uvc_video_queue *queue) 444 { 445 struct uvc_buffer *nextbuf; 446 unsigned long flags; 447 448 spin_lock_irqsave(&queue->irqlock, flags); 449 nextbuf = __uvc_queue_get_current_buffer(queue); 450 spin_unlock_irqrestore(&queue->irqlock, flags); 451 452 return nextbuf; 453 } 454 455 /* 456 * uvc_queue_buffer_requeue: Requeue a buffer on our internal irqqueue 457 * 458 * Reuse a buffer through our internal queue without the need to 'prepare'. 459 * The buffer will be returned to userspace through the uvc_buffer_queue call if 460 * the device has been disconnected. 461 */ 462 static void uvc_queue_buffer_requeue(struct uvc_video_queue *queue, 463 struct uvc_buffer *buf) 464 { 465 buf->error = 0; 466 buf->state = UVC_BUF_STATE_QUEUED; 467 buf->bytesused = 0; 468 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0); 469 470 uvc_buffer_queue(&buf->buf.vb2_buf); 471 } 472 473 static void uvc_queue_buffer_complete(struct kref *ref) 474 { 475 struct uvc_buffer *buf = container_of(ref, struct uvc_buffer, ref); 476 struct vb2_buffer *vb = &buf->buf.vb2_buf; 477 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 478 479 if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) { 480 uvc_queue_buffer_requeue(queue, buf); 481 return; 482 } 483 484 buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; 485 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused); 486 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); 487 } 488 489 /* 490 * Release a reference on the buffer. Complete the buffer when the last 491 * reference is released. 492 */ 493 void uvc_queue_buffer_release(struct uvc_buffer *buf) 494 { 495 kref_put(&buf->ref, uvc_queue_buffer_complete); 496 } 497 498 /* 499 * Remove this buffer from the queue. Lifetime will persist while async actions 500 * are still running (if any), and uvc_queue_buffer_release will give the buffer 501 * back to VB2 when all users have completed. 502 */ 503 struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, 504 struct uvc_buffer *buf) 505 { 506 struct uvc_buffer *nextbuf; 507 unsigned long flags; 508 509 spin_lock_irqsave(&queue->irqlock, flags); 510 list_del(&buf->queue); 511 nextbuf = __uvc_queue_get_current_buffer(queue); 512 spin_unlock_irqrestore(&queue->irqlock, flags); 513 514 uvc_queue_buffer_release(buf); 515 516 return nextbuf; 517 } 518