xref: /linux/drivers/usb/gadget/function/uvc_video.c (revision 8c994eff8fcfe8ecb1f1dbebed25b4d7bb75be12)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *	uvc_video.c  --  USB Video Class Gadget driver
4  *
5  *	Copyright (C) 2009-2010
6  *	    Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/usb/ch9.h>
13 #include <linux/usb/gadget.h>
14 #include <linux/usb/video.h>
15 #include <asm/unaligned.h>
16 
17 #include <media/v4l2-dev.h>
18 
19 #include "uvc.h"
20 #include "uvc_queue.h"
21 #include "uvc_video.h"
22 
23 /* --------------------------------------------------------------------------
24  * Video codecs
25  */
26 
27 static int
28 uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
29 		u8 *data, int len)
30 {
31 	struct uvc_device *uvc = container_of(video, struct uvc_device, video);
32 	struct usb_composite_dev *cdev = uvc->func.config->cdev;
33 	struct timespec64 ts = ns_to_timespec64(buf->buf.vb2_buf.timestamp);
34 	int pos = 2;
35 
36 	data[1] = UVC_STREAM_EOH | video->fid;
37 
38 	if (video->queue.buf_used == 0 && ts.tv_sec) {
39 		/* dwClockFrequency is 48 MHz */
40 		u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
41 
42 		data[1] |= UVC_STREAM_PTS;
43 		put_unaligned_le32(pts, &data[pos]);
44 		pos += 4;
45 	}
46 
47 	if (cdev->gadget->ops->get_frame) {
48 		u32 sof, stc;
49 
50 		sof = usb_gadget_frame_number(cdev->gadget);
51 		ktime_get_ts64(&ts);
52 		stc = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
53 
54 		data[1] |= UVC_STREAM_SCR;
55 		put_unaligned_le32(stc, &data[pos]);
56 		put_unaligned_le16(sof, &data[pos+4]);
57 		pos += 6;
58 	}
59 
60 	data[0] = pos;
61 
62 	if (buf->bytesused - video->queue.buf_used <= len - pos)
63 		data[1] |= UVC_STREAM_EOF;
64 
65 	return pos;
66 }
67 
68 static int
69 uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf,
70 		u8 *data, int len)
71 {
72 	struct uvc_video_queue *queue = &video->queue;
73 	unsigned int nbytes;
74 	void *mem;
75 
76 	/* Copy video data to the USB buffer. */
77 	mem = buf->mem + queue->buf_used;
78 	nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used);
79 
80 	memcpy(data, mem, nbytes);
81 	queue->buf_used += nbytes;
82 
83 	return nbytes;
84 }
85 
86 static void
87 uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
88 		struct uvc_buffer *buf)
89 {
90 	void *mem = req->buf;
91 	struct uvc_request *ureq = req->context;
92 	int len = video->req_size;
93 	int ret;
94 
95 	/* Add a header at the beginning of the payload. */
96 	if (video->payload_size == 0) {
97 		ret = uvc_video_encode_header(video, buf, mem, len);
98 		video->payload_size += ret;
99 		mem += ret;
100 		len -= ret;
101 	}
102 
103 	/* Process video data. */
104 	len = min((int)(video->max_payload_size - video->payload_size), len);
105 	ret = uvc_video_encode_data(video, buf, mem, len);
106 
107 	video->payload_size += ret;
108 	len -= ret;
109 
110 	req->length = video->req_size - len;
111 	req->zero = video->payload_size == video->max_payload_size;
112 
113 	if (buf->bytesused == video->queue.buf_used) {
114 		video->queue.buf_used = 0;
115 		buf->state = UVC_BUF_STATE_DONE;
116 		list_del(&buf->queue);
117 		video->fid ^= UVC_STREAM_FID;
118 		ureq->last_buf = buf;
119 
120 		video->payload_size = 0;
121 	}
122 
123 	if (video->payload_size == video->max_payload_size ||
124 	    video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE ||
125 	    buf->bytesused == video->queue.buf_used)
126 		video->payload_size = 0;
127 }
128 
129 static void
130 uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
131 		struct uvc_buffer *buf)
132 {
133 	unsigned int pending = buf->bytesused - video->queue.buf_used;
134 	struct uvc_request *ureq = req->context;
135 	struct scatterlist *sg, *iter;
136 	unsigned int len = video->req_size;
137 	unsigned int sg_left, part = 0;
138 	unsigned int i;
139 	int header_len;
140 
141 	sg = ureq->sgt.sgl;
142 	sg_init_table(sg, ureq->sgt.nents);
143 
144 	/* Init the header. */
145 	header_len = uvc_video_encode_header(video, buf, ureq->header,
146 				      video->req_size);
147 	sg_set_buf(sg, ureq->header, header_len);
148 	len -= header_len;
149 
150 	if (pending <= len)
151 		len = pending;
152 
153 	req->length = (len == pending) ?
154 		len + header_len : video->req_size;
155 
156 	/* Init the pending sgs with payload */
157 	sg = sg_next(sg);
158 
159 	for_each_sg(sg, iter, ureq->sgt.nents - 1, i) {
160 		if (!len || !buf->sg || !buf->sg->length)
161 			break;
162 
163 		sg_left = buf->sg->length - buf->offset;
164 		part = min_t(unsigned int, len, sg_left);
165 
166 		sg_set_page(iter, sg_page(buf->sg), part, buf->offset);
167 
168 		if (part == sg_left) {
169 			buf->offset = 0;
170 			buf->sg = sg_next(buf->sg);
171 		} else {
172 			buf->offset += part;
173 		}
174 		len -= part;
175 	}
176 
177 	/* Assign the video data with header. */
178 	req->buf = NULL;
179 	req->sg	= ureq->sgt.sgl;
180 	req->num_sgs = i + 1;
181 
182 	req->length -= len;
183 	video->queue.buf_used += req->length - header_len;
184 
185 	if (buf->bytesused == video->queue.buf_used || !buf->sg ||
186 			video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
187 		video->queue.buf_used = 0;
188 		buf->state = UVC_BUF_STATE_DONE;
189 		buf->offset = 0;
190 		list_del(&buf->queue);
191 		video->fid ^= UVC_STREAM_FID;
192 		ureq->last_buf = buf;
193 	}
194 }
195 
196 static void
197 uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
198 		struct uvc_buffer *buf)
199 {
200 	void *mem = req->buf;
201 	struct uvc_request *ureq = req->context;
202 	int len = video->req_size;
203 	int ret;
204 
205 	/* Add the header. */
206 	ret = uvc_video_encode_header(video, buf, mem, len);
207 	mem += ret;
208 	len -= ret;
209 
210 	/* Process video data. */
211 	ret = uvc_video_encode_data(video, buf, mem, len);
212 	len -= ret;
213 
214 	req->length = video->req_size - len;
215 
216 	if (buf->bytesused == video->queue.buf_used ||
217 			video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
218 		video->queue.buf_used = 0;
219 		buf->state = UVC_BUF_STATE_DONE;
220 		list_del(&buf->queue);
221 		video->fid ^= UVC_STREAM_FID;
222 		ureq->last_buf = buf;
223 	}
224 }
225 
226 /* --------------------------------------------------------------------------
227  * Request handling
228  */
229 
230 static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
231 {
232 	int ret;
233 
234 	ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
235 	if (ret < 0) {
236 		uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n",
237 			 ret);
238 
239 		/* If the endpoint is disabled the descriptor may be NULL. */
240 		if (video->ep->desc) {
241 			/* Isochronous endpoints can't be halted. */
242 			if (usb_endpoint_xfer_bulk(video->ep->desc))
243 				usb_ep_set_halt(video->ep);
244 		}
245 	}
246 
247 	return ret;
248 }
249 
250 static void
251 uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
252 {
253 	struct uvc_request *ureq = req->context;
254 	struct uvc_video *video = ureq->video;
255 	struct uvc_video_queue *queue = &video->queue;
256 	struct uvc_device *uvc = video->uvc;
257 	unsigned long flags;
258 
259 	switch (req->status) {
260 	case 0:
261 		break;
262 
263 	case -EXDEV:
264 		uvcg_dbg(&video->uvc->func, "VS request missed xfer.\n");
265 		queue->flags |= UVC_QUEUE_DROP_INCOMPLETE;
266 		break;
267 
268 	case -ESHUTDOWN:	/* disconnect from host. */
269 		uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
270 		uvcg_queue_cancel(queue, 1);
271 		break;
272 
273 	default:
274 		uvcg_warn(&video->uvc->func,
275 			  "VS request completed with status %d.\n",
276 			  req->status);
277 		uvcg_queue_cancel(queue, 0);
278 	}
279 
280 	if (ureq->last_buf) {
281 		uvcg_complete_buffer(&video->queue, ureq->last_buf);
282 		ureq->last_buf = NULL;
283 	}
284 
285 	spin_lock_irqsave(&video->req_lock, flags);
286 	list_add_tail(&req->list, &video->req_free);
287 	spin_unlock_irqrestore(&video->req_lock, flags);
288 
289 	if (uvc->state == UVC_STATE_STREAMING)
290 		queue_work(video->async_wq, &video->pump);
291 }
292 
293 static int
294 uvc_video_free_requests(struct uvc_video *video)
295 {
296 	unsigned int i;
297 
298 	if (video->ureq) {
299 		for (i = 0; i < video->uvc_num_requests; ++i) {
300 			sg_free_table(&video->ureq[i].sgt);
301 
302 			if (video->ureq[i].req) {
303 				usb_ep_free_request(video->ep, video->ureq[i].req);
304 				video->ureq[i].req = NULL;
305 			}
306 
307 			if (video->ureq[i].req_buffer) {
308 				kfree(video->ureq[i].req_buffer);
309 				video->ureq[i].req_buffer = NULL;
310 			}
311 		}
312 
313 		kfree(video->ureq);
314 		video->ureq = NULL;
315 	}
316 
317 	INIT_LIST_HEAD(&video->req_free);
318 	video->req_size = 0;
319 	return 0;
320 }
321 
322 static int
323 uvc_video_alloc_requests(struct uvc_video *video)
324 {
325 	unsigned int req_size;
326 	unsigned int i;
327 	int ret = -ENOMEM;
328 
329 	BUG_ON(video->req_size);
330 
331 	req_size = video->ep->maxpacket
332 		 * max_t(unsigned int, video->ep->maxburst, 1)
333 		 * (video->ep->mult);
334 
335 	video->ureq = kcalloc(video->uvc_num_requests, sizeof(struct uvc_request), GFP_KERNEL);
336 	if (video->ureq == NULL)
337 		return -ENOMEM;
338 
339 	for (i = 0; i < video->uvc_num_requests; ++i) {
340 		video->ureq[i].req_buffer = kmalloc(req_size, GFP_KERNEL);
341 		if (video->ureq[i].req_buffer == NULL)
342 			goto error;
343 
344 		video->ureq[i].req = usb_ep_alloc_request(video->ep, GFP_KERNEL);
345 		if (video->ureq[i].req == NULL)
346 			goto error;
347 
348 		video->ureq[i].req->buf = video->ureq[i].req_buffer;
349 		video->ureq[i].req->length = 0;
350 		video->ureq[i].req->complete = uvc_video_complete;
351 		video->ureq[i].req->context = &video->ureq[i];
352 		video->ureq[i].video = video;
353 		video->ureq[i].last_buf = NULL;
354 
355 		list_add_tail(&video->ureq[i].req->list, &video->req_free);
356 		/* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
357 		sg_alloc_table(&video->ureq[i].sgt,
358 			       DIV_ROUND_UP(req_size - UVCG_REQUEST_HEADER_LEN,
359 					    PAGE_SIZE) + 2, GFP_KERNEL);
360 	}
361 
362 	video->req_size = req_size;
363 
364 	return 0;
365 
366 error:
367 	uvc_video_free_requests(video);
368 	return ret;
369 }
370 
371 /* --------------------------------------------------------------------------
372  * Video streaming
373  */
374 
375 /*
376  * uvcg_video_pump - Pump video data into the USB requests
377  *
378  * This function fills the available USB requests (listed in req_free) with
379  * video data from the queued buffers.
380  */
381 static void uvcg_video_pump(struct work_struct *work)
382 {
383 	struct uvc_video *video = container_of(work, struct uvc_video, pump);
384 	struct uvc_video_queue *queue = &video->queue;
385 	/* video->max_payload_size is only set when using bulk transfer */
386 	bool is_bulk = video->max_payload_size;
387 	struct usb_request *req = NULL;
388 	struct uvc_buffer *buf;
389 	unsigned long flags;
390 	bool buf_done;
391 	int ret;
392 
393 	while (video->ep->enabled) {
394 		/*
395 		 * Retrieve the first available USB request, protected by the
396 		 * request lock.
397 		 */
398 		spin_lock_irqsave(&video->req_lock, flags);
399 		if (list_empty(&video->req_free)) {
400 			spin_unlock_irqrestore(&video->req_lock, flags);
401 			return;
402 		}
403 		req = list_first_entry(&video->req_free, struct usb_request,
404 					list);
405 		list_del(&req->list);
406 		spin_unlock_irqrestore(&video->req_lock, flags);
407 
408 		/*
409 		 * Retrieve the first available video buffer and fill the
410 		 * request, protected by the video queue irqlock.
411 		 */
412 		spin_lock_irqsave(&queue->irqlock, flags);
413 		buf = uvcg_queue_head(queue);
414 
415 		if (buf != NULL) {
416 			video->encode(req, video, buf);
417 			buf_done = buf->state == UVC_BUF_STATE_DONE;
418 		} else if (!(queue->flags & UVC_QUEUE_DISCONNECTED) && !is_bulk) {
419 			/*
420 			 * No video buffer available; the queue is still connected and
421 			 * we're transferring over ISOC. Queue a 0 length request to
422 			 * prevent missed ISOC transfers.
423 			 */
424 			req->length = 0;
425 			buf_done = false;
426 		} else {
427 			/*
428 			 * Either the queue has been disconnected or no video buffer
429 			 * available for bulk transfer. Either way, stop processing
430 			 * further.
431 			 */
432 			spin_unlock_irqrestore(&queue->irqlock, flags);
433 			break;
434 		}
435 
436 		/*
437 		 * With USB3 handling more requests at a higher speed, we can't
438 		 * afford to generate an interrupt for every request. Decide to
439 		 * interrupt:
440 		 *
441 		 * - When no more requests are available in the free queue, as
442 		 *   this may be our last chance to refill the endpoint's
443 		 *   request queue.
444 		 *
445 		 * - When this is request is the last request for the video
446 		 *   buffer, as we want to start sending the next video buffer
447 		 *   ASAP in case it doesn't get started already in the next
448 		 *   iteration of this loop.
449 		 *
450 		 * - Four times over the length of the requests queue (as
451 		 *   indicated by video->uvc_num_requests), as a trade-off
452 		 *   between latency and interrupt load.
453 		 */
454 		if (list_empty(&video->req_free) || buf_done ||
455 		    !(video->req_int_count %
456 		       DIV_ROUND_UP(video->uvc_num_requests, 4))) {
457 			video->req_int_count = 0;
458 			req->no_interrupt = 0;
459 		} else {
460 			req->no_interrupt = 1;
461 		}
462 
463 		/* Queue the USB request */
464 		ret = uvcg_video_ep_queue(video, req);
465 		spin_unlock_irqrestore(&queue->irqlock, flags);
466 
467 		if (ret < 0) {
468 			uvcg_queue_cancel(queue, 0);
469 			break;
470 		}
471 
472 		/* Endpoint now owns the request */
473 		req = NULL;
474 		video->req_int_count++;
475 	}
476 
477 	if (!req)
478 		return;
479 
480 	spin_lock_irqsave(&video->req_lock, flags);
481 	list_add_tail(&req->list, &video->req_free);
482 	spin_unlock_irqrestore(&video->req_lock, flags);
483 	return;
484 }
485 
486 /*
487  * Enable or disable the video stream.
488  */
489 int uvcg_video_enable(struct uvc_video *video, int enable)
490 {
491 	unsigned int i;
492 	int ret;
493 
494 	if (video->ep == NULL) {
495 		uvcg_info(&video->uvc->func,
496 			  "Video enable failed, device is uninitialized.\n");
497 		return -ENODEV;
498 	}
499 
500 	if (!enable) {
501 		cancel_work_sync(&video->pump);
502 		uvcg_queue_cancel(&video->queue, 0);
503 
504 		for (i = 0; i < video->uvc_num_requests; ++i)
505 			if (video->ureq && video->ureq[i].req)
506 				usb_ep_dequeue(video->ep, video->ureq[i].req);
507 
508 		uvc_video_free_requests(video);
509 		uvcg_queue_enable(&video->queue, 0);
510 		return 0;
511 	}
512 
513 	if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0)
514 		return ret;
515 
516 	if ((ret = uvc_video_alloc_requests(video)) < 0)
517 		return ret;
518 
519 	if (video->max_payload_size) {
520 		video->encode = uvc_video_encode_bulk;
521 		video->payload_size = 0;
522 	} else
523 		video->encode = video->queue.use_sg ?
524 			uvc_video_encode_isoc_sg : uvc_video_encode_isoc;
525 
526 	video->req_int_count = 0;
527 
528 	queue_work(video->async_wq, &video->pump);
529 
530 	return ret;
531 }
532 
533 /*
534  * Initialize the UVC video stream.
535  */
536 int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
537 {
538 	INIT_LIST_HEAD(&video->req_free);
539 	spin_lock_init(&video->req_lock);
540 	INIT_WORK(&video->pump, uvcg_video_pump);
541 
542 	/* Allocate a work queue for asynchronous video pump handler. */
543 	video->async_wq = alloc_workqueue("uvcgadget", WQ_UNBOUND | WQ_HIGHPRI, 0);
544 	if (!video->async_wq)
545 		return -EINVAL;
546 
547 	video->uvc = uvc;
548 	video->fcc = V4L2_PIX_FMT_YUYV;
549 	video->bpp = 16;
550 	video->width = 320;
551 	video->height = 240;
552 	video->imagesize = 320 * 240 * 2;
553 
554 	/* Initialize the video buffers queue. */
555 	uvcg_queue_init(&video->queue, uvc->v4l2_dev.dev->parent,
556 			V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex);
557 	return 0;
558 }
559