xref: /linux/drivers/usb/gadget/function/uvc_video.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *	uvc_video.c  --  USB Video Class Gadget driver
4  *
5  *	Copyright (C) 2009-2010
6  *	    Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/usb/ch9.h>
13 #include <linux/usb/gadget.h>
14 #include <linux/usb/video.h>
15 #include <linux/unaligned.h>
16 
17 #include <media/v4l2-dev.h>
18 
19 #include "uvc.h"
20 #include "uvc_queue.h"
21 #include "uvc_video.h"
22 #include "uvc_trace.h"
23 
24 /* --------------------------------------------------------------------------
25  * Video codecs
26  */
27 
28 static int
29 uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
30 		u8 *data, int len)
31 {
32 	struct uvc_device *uvc = container_of(video, struct uvc_device, video);
33 	struct usb_composite_dev *cdev = uvc->func.config->cdev;
34 	struct timespec64 ts = ns_to_timespec64(buf->buf.vb2_buf.timestamp);
35 	int pos = 2;
36 
37 	data[1] = UVC_STREAM_EOH | video->fid;
38 
39 	if (video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE)
40 		data[1] |= UVC_STREAM_ERR;
41 
42 	if (video->queue.buf_used == 0 && ts.tv_sec) {
43 		/* dwClockFrequency is 48 MHz */
44 		u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
45 
46 		data[1] |= UVC_STREAM_PTS;
47 		put_unaligned_le32(pts, &data[pos]);
48 		pos += 4;
49 	}
50 
51 	if (cdev->gadget->ops->get_frame) {
52 		u32 sof, stc;
53 
54 		sof = usb_gadget_frame_number(cdev->gadget);
55 		ktime_get_ts64(&ts);
56 		stc = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
57 
58 		data[1] |= UVC_STREAM_SCR;
59 		put_unaligned_le32(stc, &data[pos]);
60 		put_unaligned_le16(sof, &data[pos+4]);
61 		pos += 6;
62 	}
63 
64 	data[0] = pos;
65 
66 	if (buf->bytesused - video->queue.buf_used <= len - pos)
67 		data[1] |= UVC_STREAM_EOF;
68 
69 	return pos;
70 }
71 
72 static int
73 uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf,
74 		u8 *data, int len)
75 {
76 	struct uvc_video_queue *queue = &video->queue;
77 	unsigned int nbytes;
78 	void *mem;
79 
80 	/* Copy video data to the USB buffer. */
81 	mem = buf->mem + queue->buf_used;
82 	nbytes = min_t(unsigned int, len, buf->bytesused - queue->buf_used);
83 
84 	memcpy(data, mem, nbytes);
85 	queue->buf_used += nbytes;
86 
87 	return nbytes;
88 }
89 
90 static void
91 uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
92 		struct uvc_buffer *buf)
93 {
94 	void *mem = req->buf;
95 	struct uvc_request *ureq = req->context;
96 	int len = video->req_size;
97 	int ret;
98 
99 	/* Add a header at the beginning of the payload. */
100 	if (video->payload_size == 0) {
101 		ret = uvc_video_encode_header(video, buf, mem, len);
102 		video->payload_size += ret;
103 		mem += ret;
104 		len -= ret;
105 	}
106 
107 	/* Process video data. */
108 	len = min_t(int, video->max_payload_size - video->payload_size, len);
109 	ret = uvc_video_encode_data(video, buf, mem, len);
110 
111 	video->payload_size += ret;
112 	len -= ret;
113 
114 	req->length = video->req_size - len;
115 	req->zero = video->payload_size == video->max_payload_size;
116 
117 	if (buf->bytesused == video->queue.buf_used) {
118 		video->queue.buf_used = 0;
119 		buf->state = UVC_BUF_STATE_DONE;
120 		list_del(&buf->queue);
121 		video->fid ^= UVC_STREAM_FID;
122 		ureq->last_buf = buf;
123 
124 		video->payload_size = 0;
125 	}
126 
127 	if (video->payload_size == video->max_payload_size ||
128 	    video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE ||
129 	    buf->bytesused == video->queue.buf_used)
130 		video->payload_size = 0;
131 }
132 
133 static void
134 uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
135 		struct uvc_buffer *buf)
136 {
137 	unsigned int pending = buf->bytesused - video->queue.buf_used;
138 	struct uvc_request *ureq = req->context;
139 	struct scatterlist *sg, *iter;
140 	unsigned int len = buf->req_payload_size;
141 	unsigned int sg_left, part = 0;
142 	unsigned int i;
143 	int header_len;
144 
145 	sg = ureq->sgt.sgl;
146 	sg_init_table(sg, ureq->sgt.nents);
147 
148 	/* Init the header. */
149 	header_len = uvc_video_encode_header(video, buf, ureq->header,
150 					     buf->req_payload_size);
151 	sg_set_buf(sg, ureq->header, header_len);
152 	len -= header_len;
153 
154 	if (pending <= len)
155 		len = pending;
156 
157 	req->length = (len == pending) ? len + header_len :
158 		buf->req_payload_size;
159 
160 	/* Init the pending sgs with payload */
161 	sg = sg_next(sg);
162 
163 	for_each_sg(sg, iter, ureq->sgt.nents - 1, i) {
164 		if (!len || !buf->sg || !buf->sg->length)
165 			break;
166 
167 		sg_left = buf->sg->length - buf->offset;
168 		part = min_t(unsigned int, len, sg_left);
169 
170 		sg_set_page(iter, sg_page(buf->sg), part, buf->offset);
171 
172 		if (part == sg_left) {
173 			buf->offset = 0;
174 			buf->sg = sg_next(buf->sg);
175 		} else {
176 			buf->offset += part;
177 		}
178 		len -= part;
179 	}
180 
181 	/* Assign the video data with header. */
182 	req->buf = NULL;
183 	req->sg	= ureq->sgt.sgl;
184 	req->num_sgs = i + 1;
185 
186 	req->length -= len;
187 	video->queue.buf_used += req->length - header_len;
188 
189 	if (buf->bytesused == video->queue.buf_used || !buf->sg ||
190 			video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
191 		video->queue.buf_used = 0;
192 		buf->state = UVC_BUF_STATE_DONE;
193 		buf->offset = 0;
194 		list_del(&buf->queue);
195 		video->fid ^= UVC_STREAM_FID;
196 		ureq->last_buf = buf;
197 	}
198 }
199 
200 static void
201 uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
202 		struct uvc_buffer *buf)
203 {
204 	void *mem = req->buf;
205 	struct uvc_request *ureq = req->context;
206 	int len = buf->req_payload_size;
207 	int ret;
208 
209 	/* Add the header. */
210 	ret = uvc_video_encode_header(video, buf, mem, len);
211 	mem += ret;
212 	len -= ret;
213 
214 	/* Process video data. */
215 	ret = uvc_video_encode_data(video, buf, mem, len);
216 	len -= ret;
217 
218 	req->length = buf->req_payload_size - len;
219 
220 	if (buf->bytesused == video->queue.buf_used ||
221 			video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
222 		video->queue.buf_used = 0;
223 		buf->state = UVC_BUF_STATE_DONE;
224 		list_del(&buf->queue);
225 		video->fid ^= UVC_STREAM_FID;
226 		ureq->last_buf = buf;
227 	}
228 }
229 
230 /* --------------------------------------------------------------------------
231  * Request handling
232  */
233 
234 /*
235  * Callers must take care to hold req_lock when this function may be called
236  * from multiple threads. For example, when frames are streaming to the host.
237  */
238 static void
239 uvc_video_free_request(struct uvc_request *ureq, struct usb_ep *ep)
240 {
241 	sg_free_table(&ureq->sgt);
242 	if (ureq->req && ep) {
243 		usb_ep_free_request(ep, ureq->req);
244 		ureq->req = NULL;
245 	}
246 
247 	kfree(ureq->req_buffer);
248 	ureq->req_buffer = NULL;
249 
250 	if (!list_empty(&ureq->list))
251 		list_del_init(&ureq->list);
252 
253 	kfree(ureq);
254 }
255 
256 static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
257 {
258 	int ret;
259 
260 	ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
261 	if (ret < 0) {
262 		uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n",
263 			 ret);
264 
265 		/* If the endpoint is disabled the descriptor may be NULL. */
266 		if (video->ep->desc) {
267 			/* Isochronous endpoints can't be halted. */
268 			if (usb_endpoint_xfer_bulk(video->ep->desc))
269 				usb_ep_set_halt(video->ep);
270 		}
271 	}
272 
273 	atomic_inc(&video->queued);
274 
275 	trace_uvcg_video_queue(req, atomic_read(&video->queued));
276 
277 	return ret;
278 }
279 
280 /* This function must be called with video->req_lock held. */
281 static int uvcg_video_usb_req_queue(struct uvc_video *video,
282 	struct usb_request *req, bool queue_to_ep)
283 {
284 	bool is_bulk = video->max_payload_size;
285 	struct list_head *list = NULL;
286 
287 	if (!video->is_enabled)
288 		return -ENODEV;
289 
290 	if (queue_to_ep) {
291 		struct uvc_request *ureq = req->context;
292 		/*
293 		 * With USB3 handling more requests at a higher speed, we can't
294 		 * afford to generate an interrupt for every request. Decide to
295 		 * interrupt:
296 		 *
297 		 * - When no more requests are available in the free queue, as
298 		 *   this may be our last chance to refill the endpoint's
299 		 *   request queue.
300 		 *
301 		 * - When this is request is the last request for the video
302 		 *   buffer, as we want to start sending the next video buffer
303 		 *   ASAP in case it doesn't get started already in the next
304 		 *   iteration of this loop.
305 		 *
306 		 * - Four times over the length of the requests queue (as
307 		 *   indicated by video->uvc_num_requests), as a trade-off
308 		 *   between latency and interrupt load.
309 		 */
310 		if (list_empty(&video->req_free) || ureq->last_buf ||
311 			!(video->req_int_count %
312 			min(DIV_ROUND_UP(video->uvc_num_requests, 4), UVCG_REQ_MAX_INT_COUNT))) {
313 			video->req_int_count = 0;
314 			req->no_interrupt = 0;
315 		} else {
316 			req->no_interrupt = 1;
317 		}
318 		video->req_int_count++;
319 		return uvcg_video_ep_queue(video, req);
320 	}
321 	/*
322 	 * If we're not queuing to the ep, for isoc we're queuing
323 	 * to the req_ready list, otherwise req_free.
324 	 */
325 	list = is_bulk ? &video->req_free : &video->req_ready;
326 	list_add_tail(&req->list, list);
327 	return 0;
328 }
329 
330 static void
331 uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
332 {
333 	struct uvc_request *ureq = req->context;
334 	struct uvc_video *video = ureq->video;
335 	struct uvc_video_queue *queue = &video->queue;
336 	struct uvc_buffer *last_buf;
337 	unsigned long flags;
338 
339 	spin_lock_irqsave(&video->req_lock, flags);
340 	atomic_dec(&video->queued);
341 	if (!video->is_enabled) {
342 		/*
343 		 * When is_enabled is false, uvcg_video_disable() ensures
344 		 * that in-flight uvc_buffers are returned, so we can
345 		 * safely call free_request without worrying about
346 		 * last_buf.
347 		 */
348 		uvc_video_free_request(ureq, ep);
349 		spin_unlock_irqrestore(&video->req_lock, flags);
350 		return;
351 	}
352 
353 	last_buf = ureq->last_buf;
354 	ureq->last_buf = NULL;
355 	spin_unlock_irqrestore(&video->req_lock, flags);
356 
357 	switch (req->status) {
358 	case 0:
359 		break;
360 
361 	case -EXDEV:
362 		uvcg_dbg(&video->uvc->func, "VS request missed xfer.\n");
363 		if (req->length != 0)
364 			queue->flags |= UVC_QUEUE_DROP_INCOMPLETE;
365 		break;
366 
367 	case -ESHUTDOWN:	/* disconnect from host. */
368 		uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
369 		uvcg_queue_cancel(queue, 1);
370 		break;
371 
372 	default:
373 		uvcg_warn(&video->uvc->func,
374 			  "VS request completed with status %d.\n",
375 			  req->status);
376 		uvcg_queue_cancel(queue, 0);
377 	}
378 
379 	if (last_buf) {
380 		spin_lock_irqsave(&queue->irqlock, flags);
381 		uvcg_complete_buffer(queue, last_buf);
382 		spin_unlock_irqrestore(&queue->irqlock, flags);
383 	}
384 
385 	spin_lock_irqsave(&video->req_lock, flags);
386 	/*
387 	 * Video stream might have been disabled while we were
388 	 * processing the current usb_request. So make sure
389 	 * we're still streaming before queueing the usb_request
390 	 * back to req_free
391 	 */
392 	if (!video->is_enabled) {
393 		uvc_video_free_request(ureq, ep);
394 		spin_unlock_irqrestore(&video->req_lock, flags);
395 		uvcg_queue_cancel(queue, 0);
396 
397 		return;
398 	}
399 
400 	list_add_tail(&req->list, &video->req_free);
401 	/*
402 	 * Queue work to the wq as well since it is possible that a
403 	 * buffer may not have been completely encoded with the set of
404 	 * in-flight usb requests for whih the complete callbacks are
405 	 * firing.
406 	 * In that case, if we do not queue work to the worker thread,
407 	 * the buffer will never be marked as complete - and therefore
408 	 * not be returned to userpsace. As a result,
409 	 * dequeue -> queue -> dequeue flow of uvc buffers will not
410 	 * happen. Since there are is a new free request wake up the pump.
411 	 */
412 	queue_work(video->async_wq, &video->pump);
413 
414 	trace_uvcg_video_complete(req, atomic_read(&video->queued));
415 
416 	spin_unlock_irqrestore(&video->req_lock, flags);
417 
418 	kthread_queue_work(video->kworker, &video->hw_submit);
419 }
420 
421 static void uvcg_video_hw_submit(struct kthread_work *work)
422 {
423 	struct uvc_video *video = container_of(work, struct uvc_video, hw_submit);
424 	bool is_bulk = video->max_payload_size;
425 	unsigned long flags;
426 	struct usb_request *req;
427 	int ret = 0;
428 
429 	while (true) {
430 		if (!video->ep->enabled)
431 			return;
432 		spin_lock_irqsave(&video->req_lock, flags);
433 		/*
434 		 * Here we check whether any request is available in the ready
435 		 * list. If it is, queue it to the ep and add the current
436 		 * usb_request to the req_free list - for video_pump to fill in.
437 		 * Otherwise, just use the current usb_request to queue a 0
438 		 * length request to the ep. Since we always add to the req_free
439 		 * list if we dequeue from the ready list, there will never
440 		 * be a situation where the req_free list is completely out of
441 		 * requests and cannot recover.
442 		 */
443 		if (!list_empty(&video->req_ready)) {
444 			req = list_first_entry(&video->req_ready,
445 					       struct usb_request, list);
446 		} else {
447 			if (list_empty(&video->req_free) ||
448 			    (atomic_read(&video->queued) > UVCG_REQ_MAX_ZERO_COUNT)) {
449 				spin_unlock_irqrestore(&video->req_lock, flags);
450 
451 				return;
452 			}
453 			req = list_first_entry(&video->req_free, struct usb_request,
454 					       list);
455 			req->length = 0;
456 		}
457 		list_del(&req->list);
458 
459 		/*
460 		 * Queue to the endpoint. The actual queueing to ep will
461 		 * only happen on one thread - the async_wq for bulk endpoints
462 		 * and this thread for isoc endpoints.
463 		 */
464 		ret = uvcg_video_usb_req_queue(video, req, !is_bulk);
465 		if (ret < 0) {
466 			/*
467 			 * Endpoint error, but the stream is still enabled.
468 			 * Put request back in req_free for it to be cleaned
469 			 * up later.
470 			 */
471 			list_add_tail(&req->list, &video->req_free);
472 			/*
473 			 * There is a new free request - wake up the pump.
474 			 */
475 			queue_work(video->async_wq, &video->pump);
476 
477 		}
478 
479 		spin_unlock_irqrestore(&video->req_lock, flags);
480 	}
481 }
482 
483 static int
484 uvc_video_free_requests(struct uvc_video *video)
485 {
486 	struct uvc_request *ureq, *temp;
487 
488 	list_for_each_entry_safe(ureq, temp, &video->ureqs, list)
489 		uvc_video_free_request(ureq, video->ep);
490 
491 	INIT_LIST_HEAD(&video->ureqs);
492 	INIT_LIST_HEAD(&video->req_free);
493 	INIT_LIST_HEAD(&video->req_ready);
494 	return 0;
495 }
496 
497 static void
498 uvc_video_prep_requests(struct uvc_video *video)
499 {
500 	struct uvc_device *uvc = container_of(video, struct uvc_device, video);
501 	struct usb_composite_dev *cdev = uvc->func.config->cdev;
502 	unsigned int interval_duration = video->ep->desc->bInterval * 1250;
503 	unsigned int max_req_size, req_size, header_size;
504 	unsigned int nreq;
505 
506 	max_req_size = video->ep->maxpacket
507 		 * max_t(unsigned int, video->ep->maxburst, 1)
508 		 * (video->ep->mult);
509 
510 	if (!usb_endpoint_xfer_isoc(video->ep->desc)) {
511 		video->req_size = max_req_size;
512 		video->reqs_per_frame = video->uvc_num_requests =
513 			DIV_ROUND_UP(video->imagesize, max_req_size);
514 
515 		return;
516 	}
517 
518 	if (cdev->gadget->speed < USB_SPEED_HIGH)
519 		interval_duration = video->ep->desc->bInterval * 10000;
520 
521 	nreq = DIV_ROUND_UP(video->interval, interval_duration);
522 
523 	header_size = nreq * UVCG_REQUEST_HEADER_LEN;
524 
525 	req_size = DIV_ROUND_UP(video->imagesize + header_size, nreq);
526 
527 	if (req_size > max_req_size) {
528 		/* The prepared interval length and expected buffer size
529 		 * is not possible to stream with the currently configured
530 		 * isoc bandwidth. Fallback to the maximum.
531 		 */
532 		req_size = max_req_size;
533 	}
534 	video->req_size = req_size;
535 
536 	/* We need to compensate the amount of requests to be
537 	 * allocated with the maximum amount of zero length requests.
538 	 * Since it is possible that hw_submit will initially
539 	 * enqueue some zero length requests and we then will not be
540 	 * able to fully encode one frame.
541 	 */
542 	video->uvc_num_requests = nreq + UVCG_REQ_MAX_ZERO_COUNT;
543 	video->reqs_per_frame = nreq;
544 }
545 
546 static int
547 uvc_video_alloc_requests(struct uvc_video *video)
548 {
549 	struct uvc_request *ureq;
550 	unsigned int i;
551 	int ret = -ENOMEM;
552 
553 	/*
554 	 * calculate in uvc_video_prep_requests
555 	 * - video->uvc_num_requests
556 	 * - video->req_size
557 	 */
558 	uvc_video_prep_requests(video);
559 
560 	for (i = 0; i < video->uvc_num_requests; i++) {
561 		ureq = kzalloc(sizeof(struct uvc_request), GFP_KERNEL);
562 		if (ureq == NULL)
563 			goto error;
564 
565 		INIT_LIST_HEAD(&ureq->list);
566 
567 		list_add_tail(&ureq->list, &video->ureqs);
568 
569 		ureq->req_buffer = kmalloc(video->req_size, GFP_KERNEL);
570 		if (ureq->req_buffer == NULL)
571 			goto error;
572 
573 		ureq->req = usb_ep_alloc_request(video->ep, GFP_KERNEL);
574 		if (ureq->req == NULL)
575 			goto error;
576 
577 		ureq->req->buf = ureq->req_buffer;
578 		ureq->req->length = 0;
579 		ureq->req->complete = uvc_video_complete;
580 		ureq->req->context = ureq;
581 		ureq->video = video;
582 		ureq->last_buf = NULL;
583 
584 		list_add_tail(&ureq->req->list, &video->req_free);
585 		/* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
586 		sg_alloc_table(&ureq->sgt,
587 			       DIV_ROUND_UP(video->req_size - UVCG_REQUEST_HEADER_LEN,
588 					    PAGE_SIZE) + 2, GFP_KERNEL);
589 	}
590 
591 	return 0;
592 
593 error:
594 	uvc_video_free_requests(video);
595 	return ret;
596 }
597 
598 /* --------------------------------------------------------------------------
599  * Video streaming
600  */
601 
602 /*
603  * uvcg_video_pump - Pump video data into the USB requests
604  *
605  * This function fills the available USB requests (listed in req_free) with
606  * video data from the queued buffers.
607  */
608 static void uvcg_video_pump(struct work_struct *work)
609 {
610 	struct uvc_video *video = container_of(work, struct uvc_video, pump);
611 	struct uvc_video_queue *queue = &video->queue;
612 	/* video->max_payload_size is only set when using bulk transfer */
613 	bool is_bulk = video->max_payload_size;
614 	struct usb_request *req = NULL;
615 	struct uvc_buffer *buf;
616 	unsigned long flags;
617 	int ret = 0;
618 
619 	while (true) {
620 		if (!video->ep->enabled)
621 			return;
622 
623 		/*
624 		 * Check is_enabled and retrieve the first available USB
625 		 * request, protected by the request lock.
626 		 */
627 		spin_lock_irqsave(&video->req_lock, flags);
628 		if (!video->is_enabled || list_empty(&video->req_free)) {
629 			spin_unlock_irqrestore(&video->req_lock, flags);
630 			return;
631 		}
632 		req = list_first_entry(&video->req_free, struct usb_request,
633 					list);
634 		list_del(&req->list);
635 		spin_unlock_irqrestore(&video->req_lock, flags);
636 
637 		/*
638 		 * Retrieve the first available video buffer and fill the
639 		 * request, protected by the video queue irqlock.
640 		 */
641 		spin_lock_irqsave(&queue->irqlock, flags);
642 		buf = uvcg_queue_head(queue);
643 		if (!buf) {
644 			/*
645 			 * Either the queue has been disconnected or no video buffer
646 			 * available for bulk transfer. Either way, stop processing
647 			 * further.
648 			 */
649 			spin_unlock_irqrestore(&queue->irqlock, flags);
650 			break;
651 		}
652 
653 		video->encode(req, video, buf);
654 
655 		spin_unlock_irqrestore(&queue->irqlock, flags);
656 
657 		spin_lock_irqsave(&video->req_lock, flags);
658 		/* For bulk end points we queue from the worker thread
659 		 * since we would preferably not want to wait on requests
660 		 * to be ready, in the uvcg_video_complete() handler.
661 		 * For isoc endpoints we add the request to the ready list
662 		 * and only queue it to the endpoint from the complete handler.
663 		 */
664 		ret = uvcg_video_usb_req_queue(video, req, is_bulk);
665 		spin_unlock_irqrestore(&video->req_lock, flags);
666 
667 		if (ret < 0) {
668 			uvcg_queue_cancel(queue, 0);
669 			break;
670 		}
671 	}
672 	spin_lock_irqsave(&video->req_lock, flags);
673 	if (video->is_enabled)
674 		list_add_tail(&req->list, &video->req_free);
675 	else
676 		uvc_video_free_request(req->context, video->ep);
677 	spin_unlock_irqrestore(&video->req_lock, flags);
678 }
679 
680 /*
681  * Disable the video stream
682  */
683 int
684 uvcg_video_disable(struct uvc_video *video)
685 {
686 	unsigned long flags;
687 	struct list_head inflight_bufs;
688 	struct usb_request *req, *temp;
689 	struct uvc_buffer *buf, *btemp;
690 	struct uvc_request *ureq, *utemp;
691 
692 	if (video->ep == NULL) {
693 		uvcg_info(&video->uvc->func,
694 			  "Video disable failed, device is uninitialized.\n");
695 		return -ENODEV;
696 	}
697 
698 	INIT_LIST_HEAD(&inflight_bufs);
699 	spin_lock_irqsave(&video->req_lock, flags);
700 	video->is_enabled = false;
701 
702 	/*
703 	 * Remove any in-flight buffers from the uvc_requests
704 	 * because we want to return them before cancelling the
705 	 * queue. This ensures that we aren't stuck waiting for
706 	 * all complete callbacks to come through before disabling
707 	 * vb2 queue.
708 	 */
709 	list_for_each_entry(ureq, &video->ureqs, list) {
710 		if (ureq->last_buf) {
711 			list_add_tail(&ureq->last_buf->queue, &inflight_bufs);
712 			ureq->last_buf = NULL;
713 		}
714 	}
715 	spin_unlock_irqrestore(&video->req_lock, flags);
716 
717 	cancel_work_sync(&video->pump);
718 	uvcg_queue_cancel(&video->queue, 0);
719 
720 	spin_lock_irqsave(&video->req_lock, flags);
721 	/*
722 	 * Remove all uvc_requests from ureqs with list_del_init
723 	 * This lets uvc_video_free_request correctly identify
724 	 * if the uvc_request is attached to a list or not when freeing
725 	 * memory.
726 	 */
727 	list_for_each_entry_safe(ureq, utemp, &video->ureqs, list)
728 		list_del_init(&ureq->list);
729 
730 	list_for_each_entry_safe(req, temp, &video->req_free, list) {
731 		list_del(&req->list);
732 		uvc_video_free_request(req->context, video->ep);
733 	}
734 
735 	list_for_each_entry_safe(req, temp, &video->req_ready, list) {
736 		list_del(&req->list);
737 		uvc_video_free_request(req->context, video->ep);
738 	}
739 
740 	INIT_LIST_HEAD(&video->ureqs);
741 	INIT_LIST_HEAD(&video->req_free);
742 	INIT_LIST_HEAD(&video->req_ready);
743 	spin_unlock_irqrestore(&video->req_lock, flags);
744 
745 	/*
746 	 * Return all the video buffers before disabling the queue.
747 	 */
748 	spin_lock_irqsave(&video->queue.irqlock, flags);
749 	list_for_each_entry_safe(buf, btemp, &inflight_bufs, queue) {
750 		list_del(&buf->queue);
751 		uvcg_complete_buffer(&video->queue, buf);
752 	}
753 	spin_unlock_irqrestore(&video->queue.irqlock, flags);
754 
755 	uvcg_queue_enable(&video->queue, 0);
756 	return 0;
757 }
758 
759 /*
760  * Enable the video stream.
761  */
762 int uvcg_video_enable(struct uvc_video *video)
763 {
764 	int ret;
765 
766 	if (video->ep == NULL) {
767 		uvcg_info(&video->uvc->func,
768 			  "Video enable failed, device is uninitialized.\n");
769 		return -ENODEV;
770 	}
771 
772 	/*
773 	 * Safe to access request related fields without req_lock because
774 	 * this is the only thread currently active, and no other
775 	 * request handling thread will become active until this function
776 	 * returns.
777 	 */
778 	video->is_enabled = true;
779 
780 	if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0)
781 		return ret;
782 
783 	if ((ret = uvc_video_alloc_requests(video)) < 0)
784 		return ret;
785 
786 	if (video->max_payload_size) {
787 		video->encode = uvc_video_encode_bulk;
788 		video->payload_size = 0;
789 	} else
790 		video->encode = video->queue.use_sg ?
791 			uvc_video_encode_isoc_sg : uvc_video_encode_isoc;
792 
793 	video->req_int_count = 0;
794 
795 	atomic_set(&video->queued, 0);
796 
797 	kthread_queue_work(video->kworker, &video->hw_submit);
798 	queue_work(video->async_wq, &video->pump);
799 
800 	return ret;
801 }
802 
803 /*
804  * Initialize the UVC video stream.
805  */
806 int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
807 {
808 	video->is_enabled = false;
809 	INIT_LIST_HEAD(&video->ureqs);
810 	INIT_LIST_HEAD(&video->req_free);
811 	INIT_LIST_HEAD(&video->req_ready);
812 	spin_lock_init(&video->req_lock);
813 	INIT_WORK(&video->pump, uvcg_video_pump);
814 
815 	/* Allocate a work queue for asynchronous video pump handler. */
816 	video->async_wq = alloc_workqueue("uvcgadget", WQ_UNBOUND | WQ_HIGHPRI, 0);
817 	if (!video->async_wq)
818 		return -EINVAL;
819 
820 	/* Allocate a kthread for asynchronous hw submit handler. */
821 	video->kworker = kthread_create_worker(0, "UVCG");
822 	if (IS_ERR(video->kworker)) {
823 		uvcg_err(&video->uvc->func, "failed to create UVCG kworker\n");
824 		return PTR_ERR(video->kworker);
825 	}
826 
827 	kthread_init_work(&video->hw_submit, uvcg_video_hw_submit);
828 
829 	sched_set_fifo(video->kworker->task);
830 
831 	video->uvc = uvc;
832 	video->fcc = V4L2_PIX_FMT_YUYV;
833 	video->bpp = 16;
834 	video->width = 320;
835 	video->height = 240;
836 	video->imagesize = 320 * 240 * 2;
837 	video->interval = 666666;
838 
839 	/* Initialize the video buffers queue. */
840 	uvcg_queue_init(&video->queue, uvc->v4l2_dev.dev->parent,
841 			V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex);
842 	return 0;
843 }
844