1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * uvc_queue.c -- USB Video Class driver - Buffers management
4 *
5 * Copyright (C) 2005-2010
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 */
8
9 #include <linux/atomic.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/list.h>
13 #include <linux/module.h>
14 #include <linux/usb.h>
15 #include <linux/videodev2.h>
16 #include <linux/vmalloc.h>
17 #include <linux/wait.h>
18
19 #include <media/v4l2-common.h>
20 #include <media/videobuf2-dma-sg.h>
21 #include <media/videobuf2-vmalloc.h>
22
23 #include "uvc.h"
24 #include "uvc_video.h"
25
26 /* ------------------------------------------------------------------------
27 * Video buffers queue management.
28 *
29 * Video queues is initialized by uvcg_queue_init(). The function performs
30 * basic initialization of the uvc_video_queue struct and never fails.
31 *
32 * Video buffers are managed by videobuf2. The driver uses a mutex to protect
33 * the videobuf2 queue operations by serializing calls to videobuf2 and a
34 * spinlock to protect the IRQ queue that holds the buffers to be processed by
35 * the driver.
36 */
37
38 /* -----------------------------------------------------------------------------
39 * videobuf2 queue operations
40 */
41
uvc_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])42 static int uvc_queue_setup(struct vb2_queue *vq,
43 unsigned int *nbuffers, unsigned int *nplanes,
44 unsigned int sizes[], struct device *alloc_devs[])
45 {
46 struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
47 struct uvc_video *video = container_of(queue, struct uvc_video, queue);
48
49 if (*nbuffers > UVC_MAX_VIDEO_BUFFERS)
50 *nbuffers = UVC_MAX_VIDEO_BUFFERS;
51 if (*nbuffers < UVCG_STREAMING_MIN_BUFFERS)
52 *nbuffers = UVCG_STREAMING_MIN_BUFFERS;
53
54 *nplanes = 1;
55
56 sizes[0] = video->imagesize;
57
58 return 0;
59 }
60
uvc_buffer_prepare(struct vb2_buffer * vb)61 static int uvc_buffer_prepare(struct vb2_buffer *vb)
62 {
63 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
64 struct uvc_video *video = container_of(queue, struct uvc_video, queue);
65 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
66 struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
67
68 if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
69 vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
70 uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
71 return -EINVAL;
72 }
73
74 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED))
75 return -ENODEV;
76
77 buf->state = UVC_BUF_STATE_QUEUED;
78 if (queue->use_sg) {
79 buf->sgt = vb2_dma_sg_plane_desc(vb, 0);
80 buf->sg = buf->sgt->sgl;
81 } else {
82 buf->mem = vb2_plane_vaddr(vb, 0);
83 }
84 buf->length = vb2_plane_size(vb, 0);
85 if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
86 buf->bytesused = 0;
87 } else {
88 buf->bytesused = vb2_get_plane_payload(vb, 0);
89
90 if (video->reqs_per_frame != 0) {
91 buf->req_payload_size =
92 DIV_ROUND_UP(buf->bytesused +
93 (video->reqs_per_frame * UVCG_REQUEST_HEADER_LEN),
94 video->reqs_per_frame);
95 if (buf->req_payload_size > video->req_size)
96 buf->req_payload_size = video->req_size;
97 } else {
98 buf->req_payload_size = video->max_req_size;
99 }
100 }
101
102 return 0;
103 }
104
uvc_buffer_queue(struct vb2_buffer * vb)105 static void uvc_buffer_queue(struct vb2_buffer *vb)
106 {
107 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
108 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
109 struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
110 unsigned long flags;
111
112 spin_lock_irqsave(&queue->irqlock, flags);
113
114 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) {
115 list_add_tail(&buf->queue, &queue->irqqueue);
116 } else {
117 /*
118 * If the device is disconnected return the buffer to userspace
119 * directly. The next QBUF call will fail with -ENODEV.
120 */
121 buf->state = UVC_BUF_STATE_ERROR;
122 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
123 }
124
125 spin_unlock_irqrestore(&queue->irqlock, flags);
126 }
127
128 static const struct vb2_ops uvc_queue_qops = {
129 .queue_setup = uvc_queue_setup,
130 .buf_prepare = uvc_buffer_prepare,
131 .buf_queue = uvc_buffer_queue,
132 };
133
uvcg_queue_init(struct uvc_video_queue * queue,struct device * dev,enum v4l2_buf_type type,struct mutex * lock)134 int uvcg_queue_init(struct uvc_video_queue *queue, struct device *dev, enum v4l2_buf_type type,
135 struct mutex *lock)
136 {
137 struct uvc_video *video = container_of(queue, struct uvc_video, queue);
138 struct usb_composite_dev *cdev = video->uvc->func.config->cdev;
139 int ret;
140
141 queue->queue.type = type;
142 queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
143 queue->queue.drv_priv = queue;
144 queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
145 queue->queue.ops = &uvc_queue_qops;
146 queue->queue.lock = lock;
147 if (cdev->gadget->sg_supported) {
148 queue->queue.mem_ops = &vb2_dma_sg_memops;
149 queue->use_sg = 1;
150 } else {
151 queue->queue.mem_ops = &vb2_vmalloc_memops;
152 }
153
154 queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY
155 | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
156 queue->queue.dev = dev;
157
158 ret = vb2_queue_init(&queue->queue);
159 if (ret)
160 return ret;
161
162 spin_lock_init(&queue->irqlock);
163 INIT_LIST_HEAD(&queue->irqqueue);
164 queue->flags = 0;
165
166 return 0;
167 }
168
169 /*
170 * Free the video buffers.
171 */
uvcg_free_buffers(struct uvc_video_queue * queue)172 void uvcg_free_buffers(struct uvc_video_queue *queue)
173 {
174 vb2_queue_release(&queue->queue);
175 }
176
177 /*
178 * Allocate the video buffers.
179 */
uvcg_alloc_buffers(struct uvc_video_queue * queue,struct v4l2_requestbuffers * rb)180 int uvcg_alloc_buffers(struct uvc_video_queue *queue,
181 struct v4l2_requestbuffers *rb)
182 {
183 int ret;
184
185 retry:
186 ret = vb2_reqbufs(&queue->queue, rb);
187 if (ret < 0 && queue->use_sg) {
188 uvc_trace(UVC_TRACE_IOCTL,
189 "failed to alloc buffer with sg enabled, try non-sg mode\n");
190 queue->use_sg = 0;
191 queue->queue.mem_ops = &vb2_vmalloc_memops;
192 goto retry;
193 }
194
195 return ret ? ret : rb->count;
196 }
197
uvcg_query_buffer(struct uvc_video_queue * queue,struct v4l2_buffer * buf)198 int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
199 {
200 return vb2_querybuf(&queue->queue, buf);
201 }
202
uvcg_queue_buffer(struct uvc_video_queue * queue,struct v4l2_buffer * buf)203 int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
204 {
205 return vb2_qbuf(&queue->queue, NULL, buf);
206 }
207
208 /*
209 * Dequeue a video buffer. If nonblocking is false, block until a buffer is
210 * available.
211 */
uvcg_dequeue_buffer(struct uvc_video_queue * queue,struct v4l2_buffer * buf,int nonblocking)212 int uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
213 int nonblocking)
214 {
215 return vb2_dqbuf(&queue->queue, buf, nonblocking);
216 }
217
218 /*
219 * Poll the video queue.
220 *
221 * This function implements video queue polling and is intended to be used by
222 * the device poll handler.
223 */
uvcg_queue_poll(struct uvc_video_queue * queue,struct file * file,poll_table * wait)224 __poll_t uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file,
225 poll_table *wait)
226 {
227 return vb2_poll(&queue->queue, file, wait);
228 }
229
uvcg_queue_mmap(struct uvc_video_queue * queue,struct vm_area_struct * vma)230 int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
231 {
232 return vb2_mmap(&queue->queue, vma);
233 }
234
235 #ifndef CONFIG_MMU
236 /*
237 * Get unmapped area.
238 *
239 * NO-MMU arch need this function to make mmap() work correctly.
240 */
uvcg_queue_get_unmapped_area(struct uvc_video_queue * queue,unsigned long pgoff)241 unsigned long uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue,
242 unsigned long pgoff)
243 {
244 return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
245 }
246 #endif
247
248 /*
249 * Cancel the video buffers queue.
250 *
251 * Cancelling the queue marks all buffers on the irq queue as erroneous,
252 * wakes them up and removes them from the queue.
253 *
254 * If the disconnect parameter is set, further calls to uvc_queue_buffer will
255 * fail with -ENODEV.
256 *
257 * This function acquires the irq spinlock and can be called from interrupt
258 * context.
259 */
uvcg_queue_cancel(struct uvc_video_queue * queue,int disconnect)260 void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect)
261 {
262 struct uvc_buffer *buf;
263 unsigned long flags;
264
265 spin_lock_irqsave(&queue->irqlock, flags);
266 while (!list_empty(&queue->irqqueue)) {
267 buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
268 queue);
269 list_del(&buf->queue);
270 buf->state = UVC_BUF_STATE_ERROR;
271 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
272 }
273 queue->buf_used = 0;
274
275 /*
276 * This must be protected by the irqlock spinlock to avoid race
277 * conditions between uvc_queue_buffer and the disconnection event that
278 * could result in an interruptible wait in uvc_dequeue_buffer. Do not
279 * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED
280 * state outside the queue code.
281 */
282 if (disconnect)
283 queue->flags |= UVC_QUEUE_DISCONNECTED;
284 spin_unlock_irqrestore(&queue->irqlock, flags);
285 }
286
287 /*
288 * Enable or disable the video buffers queue.
289 *
290 * The queue must be enabled before starting video acquisition and must be
291 * disabled after stopping it. This ensures that the video buffers queue
292 * state can be properly initialized before buffers are accessed from the
293 * interrupt handler.
294 *
295 * Enabling the video queue initializes parameters (such as sequence number,
296 * sync pattern, ...). If the queue is already enabled, return -EBUSY.
297 *
298 * Disabling the video queue cancels the queue and removes all buffers from
299 * the main queue.
300 *
301 * This function can't be called from interrupt context. Use
302 * uvcg_queue_cancel() instead.
303 */
uvcg_queue_enable(struct uvc_video_queue * queue,int enable)304 int uvcg_queue_enable(struct uvc_video_queue *queue, int enable)
305 {
306 unsigned long flags;
307 int ret = 0;
308
309 if (enable) {
310 ret = vb2_streamon(&queue->queue, queue->queue.type);
311 if (ret < 0)
312 return ret;
313
314 queue->sequence = 0;
315 queue->buf_used = 0;
316 queue->flags &= ~UVC_QUEUE_DROP_INCOMPLETE;
317 } else {
318 ret = vb2_streamoff(&queue->queue, queue->queue.type);
319 if (ret < 0)
320 return ret;
321
322 spin_lock_irqsave(&queue->irqlock, flags);
323 INIT_LIST_HEAD(&queue->irqqueue);
324
325 /*
326 * FIXME: We need to clear the DISCONNECTED flag to ensure that
327 * applications will be able to queue buffers for the next
328 * streaming run. However, clearing it here doesn't guarantee
329 * that the device will be reconnected in the meantime.
330 */
331 queue->flags &= ~UVC_QUEUE_DISCONNECTED;
332 spin_unlock_irqrestore(&queue->irqlock, flags);
333 }
334
335 return ret;
336 }
337
338 /* called with &queue_irqlock held.. */
uvcg_complete_buffer(struct uvc_video_queue * queue,struct uvc_buffer * buf)339 void uvcg_complete_buffer(struct uvc_video_queue *queue,
340 struct uvc_buffer *buf)
341 {
342 if (queue->flags & UVC_QUEUE_DROP_INCOMPLETE) {
343 queue->flags &= ~UVC_QUEUE_DROP_INCOMPLETE;
344 buf->state = UVC_BUF_STATE_ERROR;
345 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
346 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
347 return;
348 }
349
350 buf->buf.field = V4L2_FIELD_NONE;
351 buf->buf.sequence = queue->sequence++;
352 buf->buf.vb2_buf.timestamp = ktime_get_ns();
353
354 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
355 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
356 }
357
uvcg_queue_head(struct uvc_video_queue * queue)358 struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue)
359 {
360 struct uvc_buffer *buf = NULL;
361
362 if (!list_empty(&queue->irqqueue))
363 buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
364 queue);
365
366 return buf;
367 }
368
369