1 /* 2 * uvc_queue.c -- USB Video Class driver - Buffers management 3 * 4 * Copyright (C) 2005-2010 5 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 */ 13 14 #include <linux/atomic.h> 15 #include <linux/kernel.h> 16 #include <linux/mm.h> 17 #include <linux/list.h> 18 #include <linux/module.h> 19 #include <linux/usb.h> 20 #include <linux/videodev2.h> 21 #include <linux/vmalloc.h> 22 #include <linux/wait.h> 23 #include <media/videobuf2-vmalloc.h> 24 25 #include "uvcvideo.h" 26 27 /* ------------------------------------------------------------------------ 28 * Video buffers queue management. 29 * 30 * Video queues is initialized by uvc_queue_init(). The function performs 31 * basic initialization of the uvc_video_queue struct and never fails. 32 * 33 * Video buffers are managed by videobuf2. The driver uses a mutex to protect 34 * the videobuf2 queue operations by serializing calls to videobuf2 and a 35 * spinlock to protect the IRQ queue that holds the buffers to be processed by 36 * the driver. 37 */ 38 39 static inline struct uvc_streaming * 40 uvc_queue_to_stream(struct uvc_video_queue *queue) 41 { 42 return container_of(queue, struct uvc_streaming, queue); 43 } 44 45 /* 46 * Return all queued buffers to videobuf2 in the requested state. 47 * 48 * This function must be called with the queue spinlock held. 49 */ 50 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, 51 enum uvc_buffer_state state) 52 { 53 enum vb2_buffer_state vb2_state = state == UVC_BUF_STATE_ERROR 54 ? VB2_BUF_STATE_ERROR 55 : VB2_BUF_STATE_QUEUED; 56 57 while (!list_empty(&queue->irqqueue)) { 58 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, 59 struct uvc_buffer, 60 queue); 61 list_del(&buf->queue); 62 buf->state = state; 63 vb2_buffer_done(&buf->buf, vb2_state); 64 } 65 } 66 67 /* ----------------------------------------------------------------------------- 68 * videobuf2 queue operations 69 */ 70 71 static int uvc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, 72 unsigned int *nbuffers, unsigned int *nplanes, 73 unsigned int sizes[], void *alloc_ctxs[]) 74 { 75 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); 76 struct uvc_streaming *stream = uvc_queue_to_stream(queue); 77 78 /* Make sure the image size is large enough. */ 79 if (fmt && fmt->fmt.pix.sizeimage < stream->ctrl.dwMaxVideoFrameSize) 80 return -EINVAL; 81 82 *nplanes = 1; 83 84 sizes[0] = fmt ? fmt->fmt.pix.sizeimage 85 : stream->ctrl.dwMaxVideoFrameSize; 86 87 return 0; 88 } 89 90 static int uvc_buffer_prepare(struct vb2_buffer *vb) 91 { 92 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 93 struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf); 94 95 if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT && 96 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) { 97 uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n"); 98 return -EINVAL; 99 } 100 101 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) 102 return -ENODEV; 103 104 buf->state = UVC_BUF_STATE_QUEUED; 105 buf->error = 0; 106 buf->mem = vb2_plane_vaddr(vb, 0); 107 buf->length = vb2_plane_size(vb, 0); 108 if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) 109 buf->bytesused = 0; 110 else 111 buf->bytesused = vb2_get_plane_payload(vb, 0); 112 113 return 0; 114 } 115 116 static void uvc_buffer_queue(struct vb2_buffer *vb) 117 { 118 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 119 struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf); 120 unsigned long flags; 121 122 spin_lock_irqsave(&queue->irqlock, flags); 123 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { 124 list_add_tail(&buf->queue, &queue->irqqueue); 125 } else { 126 /* If the device is disconnected return the buffer to userspace 127 * directly. The next QBUF call will fail with -ENODEV. 128 */ 129 buf->state = UVC_BUF_STATE_ERROR; 130 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR); 131 } 132 133 spin_unlock_irqrestore(&queue->irqlock, flags); 134 } 135 136 static void uvc_buffer_finish(struct vb2_buffer *vb) 137 { 138 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); 139 struct uvc_streaming *stream = uvc_queue_to_stream(queue); 140 struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf); 141 142 if (vb->state == VB2_BUF_STATE_DONE) 143 uvc_video_clock_update(stream, &vb->v4l2_buf, buf); 144 } 145 146 static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count) 147 { 148 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); 149 struct uvc_streaming *stream = uvc_queue_to_stream(queue); 150 unsigned long flags; 151 int ret; 152 153 queue->buf_used = 0; 154 155 ret = uvc_video_enable(stream, 1); 156 if (ret == 0) 157 return 0; 158 159 spin_lock_irqsave(&queue->irqlock, flags); 160 uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED); 161 spin_unlock_irqrestore(&queue->irqlock, flags); 162 163 return ret; 164 } 165 166 static void uvc_stop_streaming(struct vb2_queue *vq) 167 { 168 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); 169 struct uvc_streaming *stream = uvc_queue_to_stream(queue); 170 unsigned long flags; 171 172 uvc_video_enable(stream, 0); 173 174 spin_lock_irqsave(&queue->irqlock, flags); 175 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR); 176 spin_unlock_irqrestore(&queue->irqlock, flags); 177 } 178 179 static struct vb2_ops uvc_queue_qops = { 180 .queue_setup = uvc_queue_setup, 181 .buf_prepare = uvc_buffer_prepare, 182 .buf_queue = uvc_buffer_queue, 183 .buf_finish = uvc_buffer_finish, 184 .wait_prepare = vb2_ops_wait_prepare, 185 .wait_finish = vb2_ops_wait_finish, 186 .start_streaming = uvc_start_streaming, 187 .stop_streaming = uvc_stop_streaming, 188 }; 189 190 int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, 191 int drop_corrupted) 192 { 193 int ret; 194 195 queue->queue.type = type; 196 queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; 197 queue->queue.drv_priv = queue; 198 queue->queue.buf_struct_size = sizeof(struct uvc_buffer); 199 queue->queue.ops = &uvc_queue_qops; 200 queue->queue.mem_ops = &vb2_vmalloc_memops; 201 queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC 202 | V4L2_BUF_FLAG_TSTAMP_SRC_SOE; 203 queue->queue.lock = &queue->mutex; 204 ret = vb2_queue_init(&queue->queue); 205 if (ret) 206 return ret; 207 208 mutex_init(&queue->mutex); 209 spin_lock_init(&queue->irqlock); 210 INIT_LIST_HEAD(&queue->irqqueue); 211 queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0; 212 213 return 0; 214 } 215 216 void uvc_queue_release(struct uvc_video_queue *queue) 217 { 218 mutex_lock(&queue->mutex); 219 vb2_queue_release(&queue->queue); 220 mutex_unlock(&queue->mutex); 221 } 222 223 /* ----------------------------------------------------------------------------- 224 * V4L2 queue operations 225 */ 226 227 int uvc_request_buffers(struct uvc_video_queue *queue, 228 struct v4l2_requestbuffers *rb) 229 { 230 int ret; 231 232 mutex_lock(&queue->mutex); 233 ret = vb2_reqbufs(&queue->queue, rb); 234 mutex_unlock(&queue->mutex); 235 236 return ret ? ret : rb->count; 237 } 238 239 int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) 240 { 241 int ret; 242 243 mutex_lock(&queue->mutex); 244 ret = vb2_querybuf(&queue->queue, buf); 245 mutex_unlock(&queue->mutex); 246 247 return ret; 248 } 249 250 int uvc_create_buffers(struct uvc_video_queue *queue, 251 struct v4l2_create_buffers *cb) 252 { 253 int ret; 254 255 mutex_lock(&queue->mutex); 256 ret = vb2_create_bufs(&queue->queue, cb); 257 mutex_unlock(&queue->mutex); 258 259 return ret; 260 } 261 262 int uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) 263 { 264 int ret; 265 266 mutex_lock(&queue->mutex); 267 ret = vb2_qbuf(&queue->queue, buf); 268 mutex_unlock(&queue->mutex); 269 270 return ret; 271 } 272 273 int uvc_export_buffer(struct uvc_video_queue *queue, 274 struct v4l2_exportbuffer *exp) 275 { 276 int ret; 277 278 mutex_lock(&queue->mutex); 279 ret = vb2_expbuf(&queue->queue, exp); 280 mutex_unlock(&queue->mutex); 281 282 return ret; 283 } 284 285 int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, 286 int nonblocking) 287 { 288 int ret; 289 290 mutex_lock(&queue->mutex); 291 ret = vb2_dqbuf(&queue->queue, buf, nonblocking); 292 mutex_unlock(&queue->mutex); 293 294 return ret; 295 } 296 297 int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type) 298 { 299 int ret; 300 301 mutex_lock(&queue->mutex); 302 ret = vb2_streamon(&queue->queue, type); 303 mutex_unlock(&queue->mutex); 304 305 return ret; 306 } 307 308 int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type) 309 { 310 int ret; 311 312 mutex_lock(&queue->mutex); 313 ret = vb2_streamoff(&queue->queue, type); 314 mutex_unlock(&queue->mutex); 315 316 return ret; 317 } 318 319 int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) 320 { 321 return vb2_mmap(&queue->queue, vma); 322 } 323 324 #ifndef CONFIG_MMU 325 unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue, 326 unsigned long pgoff) 327 { 328 return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0); 329 } 330 #endif 331 332 unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, 333 poll_table *wait) 334 { 335 unsigned int ret; 336 337 mutex_lock(&queue->mutex); 338 ret = vb2_poll(&queue->queue, file, wait); 339 mutex_unlock(&queue->mutex); 340 341 return ret; 342 } 343 344 /* ----------------------------------------------------------------------------- 345 * 346 */ 347 348 /* 349 * Check if buffers have been allocated. 350 */ 351 int uvc_queue_allocated(struct uvc_video_queue *queue) 352 { 353 int allocated; 354 355 mutex_lock(&queue->mutex); 356 allocated = vb2_is_busy(&queue->queue); 357 mutex_unlock(&queue->mutex); 358 359 return allocated; 360 } 361 362 /* 363 * Cancel the video buffers queue. 364 * 365 * Cancelling the queue marks all buffers on the irq queue as erroneous, 366 * wakes them up and removes them from the queue. 367 * 368 * If the disconnect parameter is set, further calls to uvc_queue_buffer will 369 * fail with -ENODEV. 370 * 371 * This function acquires the irq spinlock and can be called from interrupt 372 * context. 373 */ 374 void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect) 375 { 376 unsigned long flags; 377 378 spin_lock_irqsave(&queue->irqlock, flags); 379 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR); 380 /* This must be protected by the irqlock spinlock to avoid race 381 * conditions between uvc_buffer_queue and the disconnection event that 382 * could result in an interruptible wait in uvc_dequeue_buffer. Do not 383 * blindly replace this logic by checking for the UVC_QUEUE_DISCONNECTED 384 * state outside the queue code. 385 */ 386 if (disconnect) 387 queue->flags |= UVC_QUEUE_DISCONNECTED; 388 spin_unlock_irqrestore(&queue->irqlock, flags); 389 } 390 391 struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, 392 struct uvc_buffer *buf) 393 { 394 struct uvc_buffer *nextbuf; 395 unsigned long flags; 396 397 if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) { 398 buf->error = 0; 399 buf->state = UVC_BUF_STATE_QUEUED; 400 buf->bytesused = 0; 401 vb2_set_plane_payload(&buf->buf, 0, 0); 402 return buf; 403 } 404 405 spin_lock_irqsave(&queue->irqlock, flags); 406 list_del(&buf->queue); 407 if (!list_empty(&queue->irqqueue)) 408 nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer, 409 queue); 410 else 411 nextbuf = NULL; 412 spin_unlock_irqrestore(&queue->irqlock, flags); 413 414 buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; 415 vb2_set_plane_payload(&buf->buf, 0, buf->bytesused); 416 vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE); 417 418 return nextbuf; 419 } 420