1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * uvc_video.c -- USB Video Class Gadget driver
4 *
5 * Copyright (C) 2009-2010
6 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/usb/ch9.h>
13 #include <linux/usb/gadget.h>
14 #include <linux/usb/video.h>
15 #include <linux/unaligned.h>
16
17 #include <media/v4l2-dev.h>
18
19 #include "uvc.h"
20 #include "uvc_queue.h"
21 #include "uvc_video.h"
22 #include "uvc_trace.h"
23
24 /* --------------------------------------------------------------------------
25 * Video codecs
26 */
27
28 static int
uvc_video_encode_header(struct uvc_video * video,struct uvc_buffer * buf,u8 * data,int len)29 uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
30 u8 *data, int len)
31 {
32 struct uvc_device *uvc = container_of(video, struct uvc_device, video);
33 struct usb_composite_dev *cdev = uvc->func.config->cdev;
34 struct timespec64 ts = ns_to_timespec64(buf->buf.vb2_buf.timestamp);
35 int pos = 2;
36
37 data[1] = UVC_STREAM_EOH | video->fid;
38
39 if (video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE)
40 data[1] |= UVC_STREAM_ERR;
41
42 if (video->queue.buf_used == 0 && ts.tv_sec) {
43 /* dwClockFrequency is 48 MHz */
44 u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
45
46 data[1] |= UVC_STREAM_PTS;
47 put_unaligned_le32(pts, &data[pos]);
48 pos += 4;
49 }
50
51 if (cdev->gadget->ops->get_frame) {
52 u32 sof, stc;
53
54 sof = usb_gadget_frame_number(cdev->gadget);
55 ktime_get_ts64(&ts);
56 stc = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
57
58 data[1] |= UVC_STREAM_SCR;
59 put_unaligned_le32(stc, &data[pos]);
60 put_unaligned_le16(sof, &data[pos+4]);
61 pos += 6;
62 }
63
64 data[0] = pos;
65
66 if (buf->bytesused - video->queue.buf_used <= len - pos)
67 data[1] |= UVC_STREAM_EOF;
68
69 return pos;
70 }
71
72 static int
uvc_video_encode_data(struct uvc_video * video,struct uvc_buffer * buf,u8 * data,int len)73 uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf,
74 u8 *data, int len)
75 {
76 struct uvc_video_queue *queue = &video->queue;
77 unsigned int nbytes;
78 void *mem;
79
80 /* Copy video data to the USB buffer. */
81 mem = buf->mem + queue->buf_used;
82 nbytes = min_t(unsigned int, len, buf->bytesused - queue->buf_used);
83
84 memcpy(data, mem, nbytes);
85 queue->buf_used += nbytes;
86
87 return nbytes;
88 }
89
90 static void
uvc_video_encode_bulk(struct usb_request * req,struct uvc_video * video,struct uvc_buffer * buf)91 uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
92 struct uvc_buffer *buf)
93 {
94 void *mem = req->buf;
95 struct uvc_request *ureq = req->context;
96 int len = video->req_size;
97 int ret;
98
99 /* Add a header at the beginning of the payload. */
100 if (video->payload_size == 0) {
101 ret = uvc_video_encode_header(video, buf, mem, len);
102 video->payload_size += ret;
103 mem += ret;
104 len -= ret;
105 }
106
107 /* Process video data. */
108 len = min_t(int, video->max_payload_size - video->payload_size, len);
109 ret = uvc_video_encode_data(video, buf, mem, len);
110
111 video->payload_size += ret;
112 len -= ret;
113
114 req->length = video->req_size - len;
115 req->zero = video->payload_size == video->max_payload_size;
116
117 if (buf->bytesused == video->queue.buf_used) {
118 video->queue.buf_used = 0;
119 buf->state = UVC_BUF_STATE_DONE;
120 list_del(&buf->queue);
121 video->fid ^= UVC_STREAM_FID;
122 ureq->last_buf = buf;
123
124 video->payload_size = 0;
125 }
126
127 if (video->payload_size == video->max_payload_size ||
128 video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE ||
129 buf->bytesused == video->queue.buf_used)
130 video->payload_size = 0;
131 }
132
133 static void
uvc_video_encode_isoc_sg(struct usb_request * req,struct uvc_video * video,struct uvc_buffer * buf)134 uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
135 struct uvc_buffer *buf)
136 {
137 unsigned int pending = buf->bytesused - video->queue.buf_used;
138 struct uvc_request *ureq = req->context;
139 struct scatterlist *sg, *iter;
140 unsigned int len = buf->req_payload_size;
141 unsigned int sg_left, part = 0;
142 unsigned int i;
143 int header_len;
144
145 sg = ureq->sgt.sgl;
146 sg_init_table(sg, ureq->sgt.nents);
147
148 /* Init the header. */
149 header_len = uvc_video_encode_header(video, buf, ureq->header,
150 buf->req_payload_size);
151 sg_set_buf(sg, ureq->header, header_len);
152 len -= header_len;
153
154 if (pending <= len)
155 len = pending;
156
157 req->length = (len == pending) ? len + header_len :
158 buf->req_payload_size;
159
160 /* Init the pending sgs with payload */
161 sg = sg_next(sg);
162
163 for_each_sg(sg, iter, ureq->sgt.nents - 1, i) {
164 if (!len || !buf->sg || !buf->sg->length)
165 break;
166
167 sg_left = buf->sg->length - buf->offset;
168 part = min_t(unsigned int, len, sg_left);
169
170 sg_set_page(iter, sg_page(buf->sg), part, buf->offset);
171
172 if (part == sg_left) {
173 buf->offset = 0;
174 buf->sg = sg_next(buf->sg);
175 } else {
176 buf->offset += part;
177 }
178 len -= part;
179 }
180
181 /* Assign the video data with header. */
182 req->buf = NULL;
183 req->sg = ureq->sgt.sgl;
184 req->num_sgs = i + 1;
185
186 req->length -= len;
187 video->queue.buf_used += req->length - header_len;
188
189 if (buf->bytesused == video->queue.buf_used || !buf->sg ||
190 video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
191 video->queue.buf_used = 0;
192 buf->state = UVC_BUF_STATE_DONE;
193 buf->offset = 0;
194 list_del(&buf->queue);
195 video->fid ^= UVC_STREAM_FID;
196 ureq->last_buf = buf;
197 }
198 }
199
200 static void
uvc_video_encode_isoc(struct usb_request * req,struct uvc_video * video,struct uvc_buffer * buf)201 uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
202 struct uvc_buffer *buf)
203 {
204 void *mem = req->buf;
205 struct uvc_request *ureq = req->context;
206 int len = buf->req_payload_size;
207 int ret;
208
209 /* Add the header. */
210 ret = uvc_video_encode_header(video, buf, mem, len);
211 mem += ret;
212 len -= ret;
213
214 /* Process video data. */
215 ret = uvc_video_encode_data(video, buf, mem, len);
216 len -= ret;
217
218 req->length = buf->req_payload_size - len;
219
220 if (buf->bytesused == video->queue.buf_used ||
221 video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
222 video->queue.buf_used = 0;
223 buf->state = UVC_BUF_STATE_DONE;
224 list_del(&buf->queue);
225 video->fid ^= UVC_STREAM_FID;
226 ureq->last_buf = buf;
227 }
228 }
229
230 /* --------------------------------------------------------------------------
231 * Request handling
232 */
233
234 /*
235 * Callers must take care to hold req_lock when this function may be called
236 * from multiple threads. For example, when frames are streaming to the host.
237 */
238 static void
uvc_video_free_request(struct uvc_request * ureq,struct usb_ep * ep)239 uvc_video_free_request(struct uvc_request *ureq, struct usb_ep *ep)
240 {
241 sg_free_table(&ureq->sgt);
242 if (ureq->req && ep) {
243 usb_ep_free_request(ep, ureq->req);
244 ureq->req = NULL;
245 }
246
247 kfree(ureq->req_buffer);
248 ureq->req_buffer = NULL;
249
250 if (!list_empty(&ureq->list))
251 list_del_init(&ureq->list);
252
253 kfree(ureq);
254 }
255
uvcg_video_ep_queue(struct uvc_video * video,struct usb_request * req)256 static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
257 {
258 int ret;
259
260 ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
261 if (ret < 0) {
262 uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n",
263 ret);
264
265 /* If the endpoint is disabled the descriptor may be NULL. */
266 if (video->ep->desc) {
267 /* Isochronous endpoints can't be halted. */
268 if (usb_endpoint_xfer_bulk(video->ep->desc))
269 usb_ep_set_halt(video->ep);
270 }
271 }
272
273 atomic_inc(&video->queued);
274
275 trace_uvcg_video_queue(req, atomic_read(&video->queued));
276
277 return ret;
278 }
279
280 /* This function must be called with video->req_lock held. */
uvcg_video_usb_req_queue(struct uvc_video * video,struct usb_request * req,bool queue_to_ep)281 static int uvcg_video_usb_req_queue(struct uvc_video *video,
282 struct usb_request *req, bool queue_to_ep)
283 {
284 bool is_bulk = video->max_payload_size;
285 struct list_head *list = NULL;
286
287 if (!video->is_enabled)
288 return -ENODEV;
289
290 if (queue_to_ep) {
291 struct uvc_request *ureq = req->context;
292 /*
293 * With USB3 handling more requests at a higher speed, we can't
294 * afford to generate an interrupt for every request. Decide to
295 * interrupt:
296 *
297 * - When no more requests are available in the free queue, as
298 * this may be our last chance to refill the endpoint's
299 * request queue.
300 *
301 * - When this is request is the last request for the video
302 * buffer, as we want to start sending the next video buffer
303 * ASAP in case it doesn't get started already in the next
304 * iteration of this loop.
305 *
306 * - Four times over the length of the requests queue (as
307 * indicated by video->uvc_num_requests), as a trade-off
308 * between latency and interrupt load.
309 */
310 if (list_empty(&video->req_free) || ureq->last_buf ||
311 !(video->req_int_count %
312 min(DIV_ROUND_UP(video->uvc_num_requests, 4), UVCG_REQ_MAX_INT_COUNT))) {
313 video->req_int_count = 0;
314 req->no_interrupt = 0;
315 } else {
316 req->no_interrupt = 1;
317 }
318 video->req_int_count++;
319 return uvcg_video_ep_queue(video, req);
320 }
321 /*
322 * If we're not queuing to the ep, for isoc we're queuing
323 * to the req_ready list, otherwise req_free.
324 */
325 list = is_bulk ? &video->req_free : &video->req_ready;
326 list_add_tail(&req->list, list);
327 return 0;
328 }
329
330 static void
uvc_video_complete(struct usb_ep * ep,struct usb_request * req)331 uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
332 {
333 struct uvc_request *ureq = req->context;
334 struct uvc_video *video = ureq->video;
335 struct uvc_video_queue *queue = &video->queue;
336 struct uvc_buffer *last_buf;
337 unsigned long flags;
338
339 spin_lock_irqsave(&video->req_lock, flags);
340 atomic_dec(&video->queued);
341 if (!video->is_enabled) {
342 /*
343 * When is_enabled is false, uvcg_video_disable() ensures
344 * that in-flight uvc_buffers are returned, so we can
345 * safely call free_request without worrying about
346 * last_buf.
347 */
348 uvc_video_free_request(ureq, ep);
349 spin_unlock_irqrestore(&video->req_lock, flags);
350 return;
351 }
352
353 last_buf = ureq->last_buf;
354 ureq->last_buf = NULL;
355 spin_unlock_irqrestore(&video->req_lock, flags);
356
357 switch (req->status) {
358 case 0:
359 break;
360
361 case -EXDEV:
362 uvcg_dbg(&video->uvc->func, "VS request missed xfer.\n");
363 if (req->length != 0)
364 queue->flags |= UVC_QUEUE_DROP_INCOMPLETE;
365 break;
366
367 case -ESHUTDOWN: /* disconnect from host. */
368 uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
369 uvcg_queue_cancel(queue, 1);
370 break;
371
372 default:
373 uvcg_warn(&video->uvc->func,
374 "VS request completed with status %d.\n",
375 req->status);
376 uvcg_queue_cancel(queue, 0);
377 }
378
379 if (last_buf) {
380 spin_lock_irqsave(&queue->irqlock, flags);
381 uvcg_complete_buffer(queue, last_buf);
382 spin_unlock_irqrestore(&queue->irqlock, flags);
383 }
384
385 spin_lock_irqsave(&video->req_lock, flags);
386 /*
387 * Video stream might have been disabled while we were
388 * processing the current usb_request. So make sure
389 * we're still streaming before queueing the usb_request
390 * back to req_free
391 */
392 if (!video->is_enabled) {
393 uvc_video_free_request(ureq, ep);
394 spin_unlock_irqrestore(&video->req_lock, flags);
395 uvcg_queue_cancel(queue, 0);
396
397 return;
398 }
399
400 list_add_tail(&req->list, &video->req_free);
401 /*
402 * Queue work to the wq as well since it is possible that a
403 * buffer may not have been completely encoded with the set of
404 * in-flight usb requests for whih the complete callbacks are
405 * firing.
406 * In that case, if we do not queue work to the worker thread,
407 * the buffer will never be marked as complete - and therefore
408 * not be returned to userpsace. As a result,
409 * dequeue -> queue -> dequeue flow of uvc buffers will not
410 * happen. Since there are is a new free request wake up the pump.
411 */
412 queue_work(video->async_wq, &video->pump);
413
414 trace_uvcg_video_complete(req, atomic_read(&video->queued));
415
416 spin_unlock_irqrestore(&video->req_lock, flags);
417
418 kthread_queue_work(video->kworker, &video->hw_submit);
419 }
420
uvcg_video_hw_submit(struct kthread_work * work)421 static void uvcg_video_hw_submit(struct kthread_work *work)
422 {
423 struct uvc_video *video = container_of(work, struct uvc_video, hw_submit);
424 bool is_bulk = video->max_payload_size;
425 unsigned long flags;
426 struct usb_request *req;
427 int ret = 0;
428
429 while (true) {
430 if (!video->ep->enabled)
431 return;
432 spin_lock_irqsave(&video->req_lock, flags);
433 /*
434 * Here we check whether any request is available in the ready
435 * list. If it is, queue it to the ep and add the current
436 * usb_request to the req_free list - for video_pump to fill in.
437 * Otherwise, just use the current usb_request to queue a 0
438 * length request to the ep. Since we always add to the req_free
439 * list if we dequeue from the ready list, there will never
440 * be a situation where the req_free list is completely out of
441 * requests and cannot recover.
442 */
443 if (!list_empty(&video->req_ready)) {
444 req = list_first_entry(&video->req_ready,
445 struct usb_request, list);
446 } else {
447 if (list_empty(&video->req_free) ||
448 (atomic_read(&video->queued) > UVCG_REQ_MAX_ZERO_COUNT)) {
449 spin_unlock_irqrestore(&video->req_lock, flags);
450
451 return;
452 }
453 req = list_first_entry(&video->req_free, struct usb_request,
454 list);
455 req->length = 0;
456 }
457 list_del(&req->list);
458
459 /*
460 * Queue to the endpoint. The actual queueing to ep will
461 * only happen on one thread - the async_wq for bulk endpoints
462 * and this thread for isoc endpoints.
463 */
464 ret = uvcg_video_usb_req_queue(video, req, !is_bulk);
465 if (ret < 0) {
466 /*
467 * Endpoint error, but the stream is still enabled.
468 * Put request back in req_free for it to be cleaned
469 * up later.
470 */
471 list_add_tail(&req->list, &video->req_free);
472 /*
473 * There is a new free request - wake up the pump.
474 */
475 queue_work(video->async_wq, &video->pump);
476
477 }
478
479 spin_unlock_irqrestore(&video->req_lock, flags);
480 }
481 }
482
483 static int
uvc_video_free_requests(struct uvc_video * video)484 uvc_video_free_requests(struct uvc_video *video)
485 {
486 struct uvc_request *ureq, *temp;
487
488 list_for_each_entry_safe(ureq, temp, &video->ureqs, list)
489 uvc_video_free_request(ureq, video->ep);
490
491 INIT_LIST_HEAD(&video->ureqs);
492 INIT_LIST_HEAD(&video->req_free);
493 INIT_LIST_HEAD(&video->req_ready);
494 return 0;
495 }
496
497 static void
uvc_video_prep_requests(struct uvc_video * video)498 uvc_video_prep_requests(struct uvc_video *video)
499 {
500 struct uvc_device *uvc = container_of(video, struct uvc_device, video);
501 struct usb_composite_dev *cdev = uvc->func.config->cdev;
502 unsigned int interval_duration;
503 unsigned int max_req_size, req_size, header_size;
504 unsigned int nreq;
505
506 max_req_size = video->max_req_size;
507
508 if (!usb_endpoint_xfer_isoc(video->ep->desc)) {
509 video->req_size = max_req_size;
510 video->reqs_per_frame = video->uvc_num_requests =
511 DIV_ROUND_UP(video->imagesize, max_req_size);
512
513 return;
514 }
515
516 interval_duration = 2 << (video->ep->desc->bInterval - 1);
517 if (cdev->gadget->speed < USB_SPEED_HIGH)
518 interval_duration *= 10000;
519 else
520 interval_duration *= 1250;
521
522 nreq = DIV_ROUND_UP(video->interval, interval_duration);
523
524 header_size = nreq * UVCG_REQUEST_HEADER_LEN;
525
526 req_size = DIV_ROUND_UP(video->imagesize + header_size, nreq);
527
528 if (req_size > max_req_size) {
529 /* The prepared interval length and expected buffer size
530 * is not possible to stream with the currently configured
531 * isoc bandwidth. Fallback to the maximum.
532 */
533 req_size = max_req_size;
534 }
535 video->req_size = req_size;
536
537 /* We need to compensate the amount of requests to be
538 * allocated with the maximum amount of zero length requests.
539 * Since it is possible that hw_submit will initially
540 * enqueue some zero length requests and we then will not be
541 * able to fully encode one frame.
542 */
543 video->uvc_num_requests = nreq + UVCG_REQ_MAX_ZERO_COUNT;
544 video->reqs_per_frame = nreq;
545 }
546
547 static int
uvc_video_alloc_requests(struct uvc_video * video)548 uvc_video_alloc_requests(struct uvc_video *video)
549 {
550 struct uvc_request *ureq;
551 unsigned int i;
552 int ret = -ENOMEM;
553
554 /*
555 * calculate in uvc_video_prep_requests
556 * - video->uvc_num_requests
557 * - video->req_size
558 */
559 uvc_video_prep_requests(video);
560
561 for (i = 0; i < video->uvc_num_requests; i++) {
562 ureq = kzalloc(sizeof(struct uvc_request), GFP_KERNEL);
563 if (ureq == NULL)
564 goto error;
565
566 INIT_LIST_HEAD(&ureq->list);
567
568 list_add_tail(&ureq->list, &video->ureqs);
569
570 ureq->req_buffer = kmalloc(video->req_size, GFP_KERNEL);
571 if (ureq->req_buffer == NULL)
572 goto error;
573
574 ureq->req = usb_ep_alloc_request(video->ep, GFP_KERNEL);
575 if (ureq->req == NULL)
576 goto error;
577
578 ureq->req->buf = ureq->req_buffer;
579 ureq->req->length = 0;
580 ureq->req->complete = uvc_video_complete;
581 ureq->req->context = ureq;
582 ureq->video = video;
583 ureq->last_buf = NULL;
584
585 list_add_tail(&ureq->req->list, &video->req_free);
586 /* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
587 sg_alloc_table(&ureq->sgt,
588 DIV_ROUND_UP(video->req_size - UVCG_REQUEST_HEADER_LEN,
589 PAGE_SIZE) + 2, GFP_KERNEL);
590 }
591
592 return 0;
593
594 error:
595 uvc_video_free_requests(video);
596 return ret;
597 }
598
599 /* --------------------------------------------------------------------------
600 * Video streaming
601 */
602
603 /*
604 * uvcg_video_pump - Pump video data into the USB requests
605 *
606 * This function fills the available USB requests (listed in req_free) with
607 * video data from the queued buffers.
608 */
uvcg_video_pump(struct work_struct * work)609 static void uvcg_video_pump(struct work_struct *work)
610 {
611 struct uvc_video *video = container_of(work, struct uvc_video, pump);
612 struct uvc_video_queue *queue = &video->queue;
613 /* video->max_payload_size is only set when using bulk transfer */
614 bool is_bulk = video->max_payload_size;
615 struct usb_request *req = NULL;
616 struct uvc_buffer *buf;
617 unsigned long flags;
618 int ret = 0;
619
620 while (true) {
621 if (!video->ep->enabled)
622 return;
623
624 /*
625 * Check is_enabled and retrieve the first available USB
626 * request, protected by the request lock.
627 */
628 spin_lock_irqsave(&video->req_lock, flags);
629 if (!video->is_enabled || list_empty(&video->req_free)) {
630 spin_unlock_irqrestore(&video->req_lock, flags);
631 return;
632 }
633 req = list_first_entry(&video->req_free, struct usb_request,
634 list);
635 list_del(&req->list);
636 spin_unlock_irqrestore(&video->req_lock, flags);
637
638 /*
639 * Retrieve the first available video buffer and fill the
640 * request, protected by the video queue irqlock.
641 */
642 spin_lock_irqsave(&queue->irqlock, flags);
643 buf = uvcg_queue_head(queue);
644 if (!buf) {
645 /*
646 * Either the queue has been disconnected or no video buffer
647 * available for bulk transfer. Either way, stop processing
648 * further.
649 */
650 spin_unlock_irqrestore(&queue->irqlock, flags);
651 break;
652 }
653
654 video->encode(req, video, buf);
655
656 spin_unlock_irqrestore(&queue->irqlock, flags);
657
658 spin_lock_irqsave(&video->req_lock, flags);
659 /* For bulk end points we queue from the worker thread
660 * since we would preferably not want to wait on requests
661 * to be ready, in the uvcg_video_complete() handler.
662 * For isoc endpoints we add the request to the ready list
663 * and only queue it to the endpoint from the complete handler.
664 */
665 ret = uvcg_video_usb_req_queue(video, req, is_bulk);
666 spin_unlock_irqrestore(&video->req_lock, flags);
667
668 if (ret < 0) {
669 uvcg_queue_cancel(queue, 0);
670 break;
671 }
672 }
673 spin_lock_irqsave(&video->req_lock, flags);
674 if (video->is_enabled)
675 list_add_tail(&req->list, &video->req_free);
676 else
677 uvc_video_free_request(req->context, video->ep);
678 spin_unlock_irqrestore(&video->req_lock, flags);
679 }
680
681 /*
682 * Disable the video stream
683 */
684 int
uvcg_video_disable(struct uvc_video * video)685 uvcg_video_disable(struct uvc_video *video)
686 {
687 unsigned long flags;
688 struct list_head inflight_bufs;
689 struct usb_request *req, *temp;
690 struct uvc_buffer *buf, *btemp;
691 struct uvc_request *ureq, *utemp;
692
693 if (video->ep == NULL) {
694 uvcg_info(&video->uvc->func,
695 "Video disable failed, device is uninitialized.\n");
696 return -ENODEV;
697 }
698
699 INIT_LIST_HEAD(&inflight_bufs);
700 spin_lock_irqsave(&video->req_lock, flags);
701 video->is_enabled = false;
702
703 /*
704 * Remove any in-flight buffers from the uvc_requests
705 * because we want to return them before cancelling the
706 * queue. This ensures that we aren't stuck waiting for
707 * all complete callbacks to come through before disabling
708 * vb2 queue.
709 */
710 list_for_each_entry(ureq, &video->ureqs, list) {
711 if (ureq->last_buf) {
712 list_add_tail(&ureq->last_buf->queue, &inflight_bufs);
713 ureq->last_buf = NULL;
714 }
715 }
716 spin_unlock_irqrestore(&video->req_lock, flags);
717
718 cancel_work_sync(&video->pump);
719 uvcg_queue_cancel(&video->queue, 0);
720
721 spin_lock_irqsave(&video->req_lock, flags);
722 /*
723 * Remove all uvc_requests from ureqs with list_del_init
724 * This lets uvc_video_free_request correctly identify
725 * if the uvc_request is attached to a list or not when freeing
726 * memory.
727 */
728 list_for_each_entry_safe(ureq, utemp, &video->ureqs, list)
729 list_del_init(&ureq->list);
730
731 list_for_each_entry_safe(req, temp, &video->req_free, list) {
732 list_del(&req->list);
733 uvc_video_free_request(req->context, video->ep);
734 }
735
736 list_for_each_entry_safe(req, temp, &video->req_ready, list) {
737 list_del(&req->list);
738 uvc_video_free_request(req->context, video->ep);
739 }
740
741 INIT_LIST_HEAD(&video->ureqs);
742 INIT_LIST_HEAD(&video->req_free);
743 INIT_LIST_HEAD(&video->req_ready);
744 spin_unlock_irqrestore(&video->req_lock, flags);
745
746 /*
747 * Return all the video buffers before disabling the queue.
748 */
749 spin_lock_irqsave(&video->queue.irqlock, flags);
750 list_for_each_entry_safe(buf, btemp, &inflight_bufs, queue) {
751 list_del(&buf->queue);
752 uvcg_complete_buffer(&video->queue, buf);
753 }
754 spin_unlock_irqrestore(&video->queue.irqlock, flags);
755
756 uvcg_queue_enable(&video->queue, 0);
757 return 0;
758 }
759
760 /*
761 * Enable the video stream.
762 */
uvcg_video_enable(struct uvc_video * video)763 int uvcg_video_enable(struct uvc_video *video)
764 {
765 int ret;
766
767 if (video->ep == NULL) {
768 uvcg_info(&video->uvc->func,
769 "Video enable failed, device is uninitialized.\n");
770 return -ENODEV;
771 }
772
773 /*
774 * Safe to access request related fields without req_lock because
775 * this is the only thread currently active, and no other
776 * request handling thread will become active until this function
777 * returns.
778 */
779 video->is_enabled = true;
780
781 if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0)
782 return ret;
783
784 if ((ret = uvc_video_alloc_requests(video)) < 0)
785 return ret;
786
787 if (video->max_payload_size) {
788 video->encode = uvc_video_encode_bulk;
789 video->payload_size = 0;
790 } else
791 video->encode = video->queue.use_sg ?
792 uvc_video_encode_isoc_sg : uvc_video_encode_isoc;
793
794 video->req_int_count = 0;
795
796 atomic_set(&video->queued, 0);
797
798 kthread_queue_work(video->kworker, &video->hw_submit);
799 queue_work(video->async_wq, &video->pump);
800
801 return ret;
802 }
803
804 /*
805 * Initialize the UVC video stream.
806 */
uvcg_video_init(struct uvc_video * video,struct uvc_device * uvc)807 int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
808 {
809 video->is_enabled = false;
810 INIT_LIST_HEAD(&video->ureqs);
811 INIT_LIST_HEAD(&video->req_free);
812 INIT_LIST_HEAD(&video->req_ready);
813 spin_lock_init(&video->req_lock);
814 INIT_WORK(&video->pump, uvcg_video_pump);
815
816 /* Allocate a work queue for asynchronous video pump handler. */
817 video->async_wq = alloc_workqueue("uvcgadget", WQ_UNBOUND | WQ_HIGHPRI, 0);
818 if (!video->async_wq)
819 return -EINVAL;
820
821 /* Allocate a kthread for asynchronous hw submit handler. */
822 video->kworker = kthread_run_worker(0, "UVCG");
823 if (IS_ERR(video->kworker)) {
824 uvcg_err(&video->uvc->func, "failed to create UVCG kworker\n");
825 return PTR_ERR(video->kworker);
826 }
827
828 kthread_init_work(&video->hw_submit, uvcg_video_hw_submit);
829
830 sched_set_fifo(video->kworker->task);
831
832 video->uvc = uvc;
833 video->fcc = V4L2_PIX_FMT_YUYV;
834 video->bpp = 16;
835 video->width = 320;
836 video->height = 240;
837 video->imagesize = 320 * 240 * 2;
838 video->interval = 666666;
839
840 /* Initialize the video buffers queue. */
841 return uvcg_queue_init(&video->queue, uvc->v4l2_dev.dev->parent,
842 V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex);
843 }
844