xref: /linux/drivers/usb/usbip/vudc_transfer.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
4  * Copyright (C) 2015-2016 Samsung Electronics
5  *               Igor Kotrasinski <i.kotrasinsk@samsung.com>
6  *
7  * Based on dummy_hcd.c, which is:
8  * Copyright (C) 2003 David Brownell
9  * Copyright (C) 2003-2005 Alan Stern
10  */
11 
12 #include <linux/usb.h>
13 #include <linux/timer.h>
14 #include <linux/usb/ch9.h>
15 
16 #include "vudc.h"
17 
18 #define DEV_REQUEST	(USB_TYPE_STANDARD | USB_RECIP_DEVICE)
19 #define DEV_INREQUEST	(DEV_REQUEST | USB_DIR_IN)
20 #define INTF_REQUEST	(USB_TYPE_STANDARD | USB_RECIP_INTERFACE)
21 #define INTF_INREQUEST	(INTF_REQUEST | USB_DIR_IN)
22 #define EP_REQUEST	(USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
23 #define EP_INREQUEST	(EP_REQUEST | USB_DIR_IN)
24 
25 static int get_frame_limit(enum usb_device_speed speed)
26 {
27 	switch (speed) {
28 	case USB_SPEED_LOW:
29 		return 8 /*bytes*/ * 12 /*packets*/;
30 	case USB_SPEED_FULL:
31 		return 64 /*bytes*/ * 19 /*packets*/;
32 	case USB_SPEED_HIGH:
33 		return 512 /*bytes*/ * 13 /*packets*/ * 8 /*uframes*/;
34 	case USB_SPEED_SUPER:
35 		/* Bus speed is 500000 bytes/ms, so use a little less */
36 		return 490000;
37 	default:
38 		/* error */
39 		return -1;
40 	}
41 
42 }
43 
44 /*
45  * handle_control_request() - handles all control transfers
46  * @udc: pointer to vudc
47  * @urb: the urb request to handle
48  * @setup: pointer to the setup data for a USB device control
49  *	 request
50  * @status: pointer to request handling status
51  *
52  * Return 0 - if the request was handled
53  *	  1 - if the request wasn't handles
54  *	  error code on error
55  *
56  * Adapted from drivers/usb/gadget/udc/dummy_hcd.c
57  */
58 static int handle_control_request(struct vudc *udc, struct urb *urb,
59 				  struct usb_ctrlrequest *setup,
60 				  int *status)
61 {
62 	struct vep	*ep2;
63 	int		ret_val = 1;
64 	unsigned int	w_index;
65 	unsigned int	w_value;
66 
67 	w_index = le16_to_cpu(setup->wIndex);
68 	w_value = le16_to_cpu(setup->wValue);
69 	switch (setup->bRequest) {
70 	case USB_REQ_SET_ADDRESS:
71 		if (setup->bRequestType != DEV_REQUEST)
72 			break;
73 		udc->address = w_value;
74 		ret_val = 0;
75 		*status = 0;
76 		break;
77 	case USB_REQ_SET_FEATURE:
78 		if (setup->bRequestType == DEV_REQUEST) {
79 			ret_val = 0;
80 			switch (w_value) {
81 			case USB_DEVICE_REMOTE_WAKEUP:
82 				break;
83 			case USB_DEVICE_B_HNP_ENABLE:
84 				udc->gadget.b_hnp_enable = 1;
85 				break;
86 			case USB_DEVICE_A_HNP_SUPPORT:
87 				udc->gadget.a_hnp_support = 1;
88 				break;
89 			case USB_DEVICE_A_ALT_HNP_SUPPORT:
90 				udc->gadget.a_alt_hnp_support = 1;
91 				break;
92 			default:
93 				ret_val = -EOPNOTSUPP;
94 			}
95 			if (ret_val == 0) {
96 				udc->devstatus |= (1 << w_value);
97 				*status = 0;
98 			}
99 		} else if (setup->bRequestType == EP_REQUEST) {
100 			/* endpoint halt */
101 			ep2 = vudc_find_endpoint(udc, w_index);
102 			if (!ep2 || ep2->ep.name == udc->ep[0].ep.name) {
103 				ret_val = -EOPNOTSUPP;
104 				break;
105 			}
106 			ep2->halted = 1;
107 			ret_val = 0;
108 			*status = 0;
109 		}
110 		break;
111 	case USB_REQ_CLEAR_FEATURE:
112 		if (setup->bRequestType == DEV_REQUEST) {
113 			ret_val = 0;
114 			switch (w_value) {
115 			case USB_DEVICE_REMOTE_WAKEUP:
116 				w_value = USB_DEVICE_REMOTE_WAKEUP;
117 				break;
118 
119 			case USB_DEVICE_U1_ENABLE:
120 			case USB_DEVICE_U2_ENABLE:
121 			case USB_DEVICE_LTM_ENABLE:
122 				ret_val = -EOPNOTSUPP;
123 				break;
124 			default:
125 				ret_val = -EOPNOTSUPP;
126 				break;
127 			}
128 			if (ret_val == 0) {
129 				udc->devstatus &= ~(1 << w_value);
130 				*status = 0;
131 			}
132 		} else if (setup->bRequestType == EP_REQUEST) {
133 			/* endpoint halt */
134 			ep2 = vudc_find_endpoint(udc, w_index);
135 			if (!ep2) {
136 				ret_val = -EOPNOTSUPP;
137 				break;
138 			}
139 			if (!ep2->wedged)
140 				ep2->halted = 0;
141 			ret_val = 0;
142 			*status = 0;
143 		}
144 		break;
145 	case USB_REQ_GET_STATUS:
146 		if (setup->bRequestType == DEV_INREQUEST
147 				|| setup->bRequestType == INTF_INREQUEST
148 				|| setup->bRequestType == EP_INREQUEST) {
149 			char *buf;
150 			/*
151 			 * device: remote wakeup, selfpowered
152 			 * interface: nothing
153 			 * endpoint: halt
154 			 */
155 			buf = (char *)urb->transfer_buffer;
156 			if (urb->transfer_buffer_length > 0) {
157 				if (setup->bRequestType == EP_INREQUEST) {
158 					ep2 = vudc_find_endpoint(udc, w_index);
159 					if (!ep2) {
160 						ret_val = -EOPNOTSUPP;
161 						break;
162 					}
163 					buf[0] = ep2->halted;
164 				} else if (setup->bRequestType ==
165 					   DEV_INREQUEST) {
166 					buf[0] = (u8)udc->devstatus;
167 				} else
168 					buf[0] = 0;
169 			}
170 			if (urb->transfer_buffer_length > 1)
171 				buf[1] = 0;
172 			urb->actual_length = min_t(u32, 2,
173 				urb->transfer_buffer_length);
174 			ret_val = 0;
175 			*status = 0;
176 		}
177 		break;
178 	}
179 	return ret_val;
180 }
181 
182 /* Adapted from dummy_hcd.c ; caller must hold lock */
183 static int transfer(struct vudc *udc,
184 		struct urb *urb, struct vep *ep, int limit)
185 {
186 	struct vrequest	*req;
187 	int sent = 0;
188 top:
189 	/* if there's no request queued, the device is NAKing; return */
190 	list_for_each_entry(req, &ep->req_queue, req_entry) {
191 		unsigned int	host_len, dev_len, len;
192 		void		*ubuf_pos, *rbuf_pos;
193 		int		is_short, to_host;
194 		int		rescan = 0;
195 
196 		/*
197 		 * 1..N packets of ep->ep.maxpacket each ... the last one
198 		 * may be short (including zero length).
199 		 *
200 		 * writer can send a zlp explicitly (length 0) or implicitly
201 		 * (length mod maxpacket zero, and 'zero' flag); they always
202 		 * terminate reads.
203 		 */
204 		host_len = urb->transfer_buffer_length - urb->actual_length;
205 		dev_len = req->req.length - req->req.actual;
206 		len = min(host_len, dev_len);
207 
208 		to_host = usb_pipein(urb->pipe);
209 		if (unlikely(len == 0))
210 			is_short = 1;
211 		else {
212 			/* send multiple of maxpacket first, then remainder */
213 			if (len >= ep->ep.maxpacket) {
214 				is_short = 0;
215 				if (len % ep->ep.maxpacket > 0)
216 					rescan = 1;
217 				len -= len % ep->ep.maxpacket;
218 			} else {
219 				is_short = 1;
220 			}
221 
222 			ubuf_pos = urb->transfer_buffer + urb->actual_length;
223 			rbuf_pos = req->req.buf + req->req.actual;
224 
225 			if (urb->pipe & USB_DIR_IN)
226 				memcpy(ubuf_pos, rbuf_pos, len);
227 			else
228 				memcpy(rbuf_pos, ubuf_pos, len);
229 
230 			urb->actual_length += len;
231 			req->req.actual += len;
232 			sent += len;
233 		}
234 
235 		/*
236 		 * short packets terminate, maybe with overflow/underflow.
237 		 * it's only really an error to write too much.
238 		 *
239 		 * partially filling a buffer optionally blocks queue advances
240 		 * (so completion handlers can clean up the queue) but we don't
241 		 * need to emulate such data-in-flight.
242 		 */
243 		if (is_short) {
244 			if (host_len == dev_len) {
245 				req->req.status = 0;
246 				urb->status = 0;
247 			} else if (to_host) {
248 				req->req.status = 0;
249 				if (dev_len > host_len)
250 					urb->status = -EOVERFLOW;
251 				else
252 					urb->status = 0;
253 			} else {
254 				urb->status = 0;
255 				if (host_len > dev_len)
256 					req->req.status = -EOVERFLOW;
257 				else
258 					req->req.status = 0;
259 			}
260 
261 		/* many requests terminate without a short packet */
262 		/* also check if we need to send zlp */
263 		} else {
264 			if (req->req.length == req->req.actual) {
265 				if (req->req.zero && to_host)
266 					rescan = 1;
267 				else
268 					req->req.status = 0;
269 			}
270 			if (urb->transfer_buffer_length == urb->actual_length) {
271 				if (urb->transfer_flags & URB_ZERO_PACKET &&
272 				    !to_host)
273 					rescan = 1;
274 				else
275 					urb->status = 0;
276 			}
277 		}
278 
279 		/* device side completion --> continuable */
280 		if (req->req.status != -EINPROGRESS) {
281 
282 			list_del_init(&req->req_entry);
283 			spin_unlock(&udc->lock);
284 			usb_gadget_giveback_request(&ep->ep, &req->req);
285 			spin_lock(&udc->lock);
286 
287 			/* requests might have been unlinked... */
288 			rescan = 1;
289 		}
290 
291 		/* host side completion --> terminate */
292 		if (urb->status != -EINPROGRESS)
293 			break;
294 
295 		/* rescan to continue with any other queued i/o */
296 		if (rescan)
297 			goto top;
298 	}
299 	return sent;
300 }
301 
302 static void v_timer(struct timer_list *t)
303 {
304 	struct vudc *udc = from_timer(udc, t, tr_timer.timer);
305 	struct transfer_timer *timer = &udc->tr_timer;
306 	struct urbp *urb_p, *tmp;
307 	unsigned long flags;
308 	struct usb_ep *_ep;
309 	struct vep *ep;
310 	int ret = 0;
311 	int total, limit;
312 
313 	spin_lock_irqsave(&udc->lock, flags);
314 
315 	total = get_frame_limit(udc->gadget.speed);
316 	if (total < 0) {	/* unknown speed, or not set yet */
317 		timer->state = VUDC_TR_IDLE;
318 		spin_unlock_irqrestore(&udc->lock, flags);
319 		return;
320 	}
321 	/* is it next frame now? */
322 	if (time_after(jiffies, timer->frame_start + msecs_to_jiffies(1))) {
323 		timer->frame_limit = total;
324 		/* FIXME: how to make it accurate? */
325 		timer->frame_start = jiffies;
326 	} else {
327 		total = timer->frame_limit;
328 	}
329 
330 	/* We have to clear ep0 flags separately as it's not on the list */
331 	udc->ep[0].already_seen = 0;
332 	list_for_each_entry(_ep, &udc->gadget.ep_list, ep_list) {
333 		ep = to_vep(_ep);
334 		ep->already_seen = 0;
335 	}
336 
337 restart:
338 	list_for_each_entry_safe(urb_p, tmp, &udc->urb_queue, urb_entry) {
339 		struct urb *urb = urb_p->urb;
340 
341 		ep = urb_p->ep;
342 		if (urb->unlinked)
343 			goto return_urb;
344 		if (timer->state != VUDC_TR_RUNNING)
345 			continue;
346 
347 		if (!ep) {
348 			urb->status = -EPROTO;
349 			goto return_urb;
350 		}
351 
352 		/* Used up bandwidth? */
353 		if (total <= 0 && ep->type == USB_ENDPOINT_XFER_BULK)
354 			continue;
355 
356 		if (ep->already_seen)
357 			continue;
358 		ep->already_seen = 1;
359 		if (ep == &udc->ep[0] && urb_p->new) {
360 			ep->setup_stage = 1;
361 			urb_p->new = 0;
362 		}
363 		if (ep->halted && !ep->setup_stage) {
364 			urb->status = -EPIPE;
365 			goto return_urb;
366 		}
367 
368 		if (ep == &udc->ep[0] && ep->setup_stage) {
369 			/* TODO - flush any stale requests */
370 			ep->setup_stage = 0;
371 			ep->halted = 0;
372 
373 			ret = handle_control_request(udc, urb,
374 				(struct usb_ctrlrequest *) urb->setup_packet,
375 				(&urb->status));
376 			if (ret > 0) {
377 				spin_unlock(&udc->lock);
378 				ret = udc->driver->setup(&udc->gadget,
379 					(struct usb_ctrlrequest *)
380 					urb->setup_packet);
381 				spin_lock(&udc->lock);
382 			}
383 			if (ret >= 0) {
384 				/* no delays (max 64kb data stage) */
385 				limit = 64 * 1024;
386 				goto treat_control_like_bulk;
387 			} else {
388 				urb->status = -EPIPE;
389 				urb->actual_length = 0;
390 				goto return_urb;
391 			}
392 		}
393 
394 		limit = total;
395 		switch (ep->type) {
396 		case USB_ENDPOINT_XFER_ISOC:
397 			/* TODO: support */
398 			urb->status = -EXDEV;
399 			break;
400 
401 		case USB_ENDPOINT_XFER_INT:
402 			/*
403 			 * TODO: figure out bandwidth guarantees
404 			 * for now, give unlimited bandwidth
405 			 */
406 			limit += urb->transfer_buffer_length;
407 			fallthrough;
408 		default:
409 treat_control_like_bulk:
410 			total -= transfer(udc, urb, ep, limit);
411 		}
412 		if (urb->status == -EINPROGRESS)
413 			continue;
414 
415 return_urb:
416 		if (ep)
417 			ep->already_seen = ep->setup_stage = 0;
418 
419 		spin_lock(&udc->lock_tx);
420 		list_del(&urb_p->urb_entry);
421 		if (!urb->unlinked) {
422 			v_enqueue_ret_submit(udc, urb_p);
423 		} else {
424 			v_enqueue_ret_unlink(udc, urb_p->seqnum,
425 					     urb->unlinked);
426 			free_urbp_and_urb(urb_p);
427 		}
428 		wake_up(&udc->tx_waitq);
429 		spin_unlock(&udc->lock_tx);
430 
431 		goto restart;
432 	}
433 
434 	/* TODO - also wait on empty usb_request queues? */
435 	if (list_empty(&udc->urb_queue))
436 		timer->state = VUDC_TR_IDLE;
437 	else
438 		mod_timer(&timer->timer,
439 			  timer->frame_start + msecs_to_jiffies(1));
440 
441 	spin_unlock_irqrestore(&udc->lock, flags);
442 }
443 
444 /* All timer functions are run with udc->lock held */
445 
446 void v_init_timer(struct vudc *udc)
447 {
448 	struct transfer_timer *t = &udc->tr_timer;
449 
450 	timer_setup(&t->timer, v_timer, 0);
451 	t->state = VUDC_TR_STOPPED;
452 }
453 
454 void v_start_timer(struct vudc *udc)
455 {
456 	struct transfer_timer *t = &udc->tr_timer;
457 
458 	dev_dbg(&udc->pdev->dev, "timer start");
459 	switch (t->state) {
460 	case VUDC_TR_RUNNING:
461 		return;
462 	case VUDC_TR_IDLE:
463 		return v_kick_timer(udc, jiffies);
464 	case VUDC_TR_STOPPED:
465 		t->state = VUDC_TR_IDLE;
466 		t->frame_start = jiffies;
467 		t->frame_limit = get_frame_limit(udc->gadget.speed);
468 		return v_kick_timer(udc, jiffies);
469 	}
470 }
471 
472 void v_kick_timer(struct vudc *udc, unsigned long time)
473 {
474 	struct transfer_timer *t = &udc->tr_timer;
475 
476 	dev_dbg(&udc->pdev->dev, "timer kick");
477 	switch (t->state) {
478 	case VUDC_TR_RUNNING:
479 		return;
480 	case VUDC_TR_IDLE:
481 		t->state = VUDC_TR_RUNNING;
482 		fallthrough;
483 	case VUDC_TR_STOPPED:
484 		/* we may want to kick timer to unqueue urbs */
485 		mod_timer(&t->timer, time);
486 	}
487 }
488 
489 void v_stop_timer(struct vudc *udc)
490 {
491 	struct transfer_timer *t = &udc->tr_timer;
492 
493 	/* timer itself will take care of stopping */
494 	dev_dbg(&udc->pdev->dev, "timer stop");
495 	t->state = VUDC_TR_STOPPED;
496 }
497