xref: /linux/drivers/usb/host/xen-hcd.c (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * xen-hcd.c
4  *
5  * Xen USB Virtual Host Controller driver
6  *
7  * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
8  * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
9  */
10 
11 #include <linux/module.h>
12 #include <linux/usb.h>
13 #include <linux/list.h>
14 #include <linux/usb/hcd.h>
15 #include <linux/io.h>
16 
17 #include <xen/xen.h>
18 #include <xen/xenbus.h>
19 #include <xen/grant_table.h>
20 #include <xen/events.h>
21 #include <xen/page.h>
22 
23 #include <xen/interface/io/usbif.h>
24 
25 /* Private per-URB data */
26 struct urb_priv {
27 	struct list_head list;
28 	struct urb *urb;
29 	int req_id;		/* RING_REQUEST id for submitting */
30 	int unlink_req_id;	/* RING_REQUEST id for unlinking */
31 	int status;
32 	bool unlinked;		/* dequeued marker */
33 };
34 
35 /* virtual roothub port status */
36 struct rhport_status {
37 	__u32 status;
38 	bool resuming;		/* in resuming */
39 	bool c_connection;	/* connection changed */
40 	unsigned long timeout;
41 };
42 
43 /* status of attached device */
44 struct vdevice_status {
45 	int devnum;
46 	enum usb_device_state status;
47 	enum usb_device_speed speed;
48 };
49 
50 /* RING request shadow */
51 struct usb_shadow {
52 	struct xenusb_urb_request req;
53 	struct urb *urb;
54 };
55 
56 struct xenhcd_info {
57 	/* Virtual Host Controller has 4 urb queues */
58 	struct list_head pending_submit_list;
59 	struct list_head pending_unlink_list;
60 	struct list_head in_progress_list;
61 	struct list_head giveback_waiting_list;
62 
63 	spinlock_t lock;
64 
65 	/* timer that kick pending and giveback waiting urbs */
66 	struct timer_list watchdog;
67 	unsigned long actions;
68 
69 	/* virtual root hub */
70 	int rh_numports;
71 	struct rhport_status ports[XENUSB_MAX_PORTNR];
72 	struct vdevice_status devices[XENUSB_MAX_PORTNR];
73 
74 	/* Xen related staff */
75 	struct xenbus_device *xbdev;
76 	int urb_ring_ref;
77 	int conn_ring_ref;
78 	struct xenusb_urb_front_ring urb_ring;
79 	struct xenusb_conn_front_ring conn_ring;
80 
81 	unsigned int evtchn;
82 	unsigned int irq;
83 	struct usb_shadow shadow[XENUSB_URB_RING_SIZE];
84 	unsigned int shadow_free;
85 
86 	bool error;
87 };
88 
89 #define GRANT_INVALID_REF 0
90 
91 #define XENHCD_RING_JIFFIES (HZ/200)
92 #define XENHCD_SCAN_JIFFIES 1
93 
94 enum xenhcd_timer_action {
95 	TIMER_RING_WATCHDOG,
96 	TIMER_SCAN_PENDING_URBS,
97 };
98 
99 static struct kmem_cache *xenhcd_urbp_cachep;
100 
101 static inline struct xenhcd_info *xenhcd_hcd_to_info(struct usb_hcd *hcd)
102 {
103 	return (struct xenhcd_info *)hcd->hcd_priv;
104 }
105 
106 static inline struct usb_hcd *xenhcd_info_to_hcd(struct xenhcd_info *info)
107 {
108 	return container_of((void *)info, struct usb_hcd, hcd_priv);
109 }
110 
111 static void xenhcd_set_error(struct xenhcd_info *info, const char *msg)
112 {
113 	info->error = true;
114 
115 	pr_alert("xen-hcd: protocol error: %s!\n", msg);
116 }
117 
118 static inline void xenhcd_timer_action_done(struct xenhcd_info *info,
119 					    enum xenhcd_timer_action action)
120 {
121 	clear_bit(action, &info->actions);
122 }
123 
124 static void xenhcd_timer_action(struct xenhcd_info *info,
125 				enum xenhcd_timer_action action)
126 {
127 	if (timer_pending(&info->watchdog) &&
128 	    test_bit(TIMER_SCAN_PENDING_URBS, &info->actions))
129 		return;
130 
131 	if (!test_and_set_bit(action, &info->actions)) {
132 		unsigned long t;
133 
134 		switch (action) {
135 		case TIMER_RING_WATCHDOG:
136 			t = XENHCD_RING_JIFFIES;
137 			break;
138 		default:
139 			t = XENHCD_SCAN_JIFFIES;
140 			break;
141 		}
142 		mod_timer(&info->watchdog, t + jiffies);
143 	}
144 }
145 
146 /*
147  * set virtual port connection status
148  */
149 static void xenhcd_set_connect_state(struct xenhcd_info *info, int portnum)
150 {
151 	int port;
152 
153 	port = portnum - 1;
154 	if (info->ports[port].status & USB_PORT_STAT_POWER) {
155 		switch (info->devices[port].speed) {
156 		case XENUSB_SPEED_NONE:
157 			info->ports[port].status &=
158 				~(USB_PORT_STAT_CONNECTION |
159 				  USB_PORT_STAT_ENABLE |
160 				  USB_PORT_STAT_LOW_SPEED |
161 				  USB_PORT_STAT_HIGH_SPEED |
162 				  USB_PORT_STAT_SUSPEND);
163 			break;
164 		case XENUSB_SPEED_LOW:
165 			info->ports[port].status |= USB_PORT_STAT_CONNECTION;
166 			info->ports[port].status |= USB_PORT_STAT_LOW_SPEED;
167 			break;
168 		case XENUSB_SPEED_FULL:
169 			info->ports[port].status |= USB_PORT_STAT_CONNECTION;
170 			break;
171 		case XENUSB_SPEED_HIGH:
172 			info->ports[port].status |= USB_PORT_STAT_CONNECTION;
173 			info->ports[port].status |= USB_PORT_STAT_HIGH_SPEED;
174 			break;
175 		default: /* error */
176 			return;
177 		}
178 		info->ports[port].status |= (USB_PORT_STAT_C_CONNECTION << 16);
179 	}
180 }
181 
182 /*
183  * set virtual device connection status
184  */
185 static int xenhcd_rhport_connect(struct xenhcd_info *info, __u8 portnum,
186 				 __u8 speed)
187 {
188 	int port;
189 
190 	if (portnum < 1 || portnum > info->rh_numports)
191 		return -EINVAL; /* invalid port number */
192 
193 	port = portnum - 1;
194 	if (info->devices[port].speed != speed) {
195 		switch (speed) {
196 		case XENUSB_SPEED_NONE: /* disconnect */
197 			info->devices[port].status = USB_STATE_NOTATTACHED;
198 			break;
199 		case XENUSB_SPEED_LOW:
200 		case XENUSB_SPEED_FULL:
201 		case XENUSB_SPEED_HIGH:
202 			info->devices[port].status = USB_STATE_ATTACHED;
203 			break;
204 		default: /* error */
205 			return -EINVAL;
206 		}
207 		info->devices[port].speed = speed;
208 		info->ports[port].c_connection = true;
209 
210 		xenhcd_set_connect_state(info, portnum);
211 	}
212 
213 	return 0;
214 }
215 
216 /*
217  * SetPortFeature(PORT_SUSPENDED)
218  */
219 static void xenhcd_rhport_suspend(struct xenhcd_info *info, int portnum)
220 {
221 	int port;
222 
223 	port = portnum - 1;
224 	info->ports[port].status |= USB_PORT_STAT_SUSPEND;
225 	info->devices[port].status = USB_STATE_SUSPENDED;
226 }
227 
228 /*
229  * ClearPortFeature(PORT_SUSPENDED)
230  */
231 static void xenhcd_rhport_resume(struct xenhcd_info *info, int portnum)
232 {
233 	int port;
234 
235 	port = portnum - 1;
236 	if (info->ports[port].status & USB_PORT_STAT_SUSPEND) {
237 		info->ports[port].resuming = true;
238 		info->ports[port].timeout = jiffies + msecs_to_jiffies(20);
239 	}
240 }
241 
242 /*
243  * SetPortFeature(PORT_POWER)
244  */
245 static void xenhcd_rhport_power_on(struct xenhcd_info *info, int portnum)
246 {
247 	int port;
248 
249 	port = portnum - 1;
250 	if ((info->ports[port].status & USB_PORT_STAT_POWER) == 0) {
251 		info->ports[port].status |= USB_PORT_STAT_POWER;
252 		if (info->devices[port].status != USB_STATE_NOTATTACHED)
253 			info->devices[port].status = USB_STATE_POWERED;
254 		if (info->ports[port].c_connection)
255 			xenhcd_set_connect_state(info, portnum);
256 	}
257 }
258 
259 /*
260  * ClearPortFeature(PORT_POWER)
261  * SetConfiguration(non-zero)
262  * Power_Source_Off
263  * Over-current
264  */
265 static void xenhcd_rhport_power_off(struct xenhcd_info *info, int portnum)
266 {
267 	int port;
268 
269 	port = portnum - 1;
270 	if (info->ports[port].status & USB_PORT_STAT_POWER) {
271 		info->ports[port].status = 0;
272 		if (info->devices[port].status != USB_STATE_NOTATTACHED)
273 			info->devices[port].status = USB_STATE_ATTACHED;
274 	}
275 }
276 
277 /*
278  * ClearPortFeature(PORT_ENABLE)
279  */
280 static void xenhcd_rhport_disable(struct xenhcd_info *info, int portnum)
281 {
282 	int port;
283 
284 	port = portnum - 1;
285 	info->ports[port].status &= ~USB_PORT_STAT_ENABLE;
286 	info->ports[port].status &= ~USB_PORT_STAT_SUSPEND;
287 	info->ports[port].resuming = false;
288 	if (info->devices[port].status != USB_STATE_NOTATTACHED)
289 		info->devices[port].status = USB_STATE_POWERED;
290 }
291 
292 /*
293  * SetPortFeature(PORT_RESET)
294  */
295 static void xenhcd_rhport_reset(struct xenhcd_info *info, int portnum)
296 {
297 	int port;
298 
299 	port = portnum - 1;
300 	info->ports[port].status &= ~(USB_PORT_STAT_ENABLE |
301 				      USB_PORT_STAT_LOW_SPEED |
302 				      USB_PORT_STAT_HIGH_SPEED);
303 	info->ports[port].status |= USB_PORT_STAT_RESET;
304 
305 	if (info->devices[port].status != USB_STATE_NOTATTACHED)
306 		info->devices[port].status = USB_STATE_ATTACHED;
307 
308 	/* 10msec reset signaling */
309 	info->ports[port].timeout = jiffies + msecs_to_jiffies(10);
310 }
311 
312 #ifdef CONFIG_PM
313 static int xenhcd_bus_suspend(struct usb_hcd *hcd)
314 {
315 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
316 	int ret = 0;
317 	int i, ports;
318 
319 	ports = info->rh_numports;
320 
321 	spin_lock_irq(&info->lock);
322 	if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
323 		ret = -ESHUTDOWN;
324 	} else {
325 		/* suspend any active ports*/
326 		for (i = 1; i <= ports; i++)
327 			xenhcd_rhport_suspend(info, i);
328 	}
329 	spin_unlock_irq(&info->lock);
330 
331 	del_timer_sync(&info->watchdog);
332 
333 	return ret;
334 }
335 
336 static int xenhcd_bus_resume(struct usb_hcd *hcd)
337 {
338 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
339 	int ret = 0;
340 	int i, ports;
341 
342 	ports = info->rh_numports;
343 
344 	spin_lock_irq(&info->lock);
345 	if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
346 		ret = -ESHUTDOWN;
347 	} else {
348 		/* resume any suspended ports*/
349 		for (i = 1; i <= ports; i++)
350 			xenhcd_rhport_resume(info, i);
351 	}
352 	spin_unlock_irq(&info->lock);
353 
354 	return ret;
355 }
356 #endif
357 
358 static void xenhcd_hub_descriptor(struct xenhcd_info *info,
359 				  struct usb_hub_descriptor *desc)
360 {
361 	__u16 temp;
362 	int ports = info->rh_numports;
363 
364 	desc->bDescriptorType = 0x29;
365 	desc->bPwrOn2PwrGood = 10; /* EHCI says 20ms max */
366 	desc->bHubContrCurrent = 0;
367 	desc->bNbrPorts = ports;
368 
369 	/* size of DeviceRemovable and PortPwrCtrlMask fields */
370 	temp = 1 + (ports / 8);
371 	desc->bDescLength = 7 + 2 * temp;
372 
373 	/* bitmaps for DeviceRemovable and PortPwrCtrlMask */
374 	memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
375 	memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
376 
377 	/* per-port over current reporting and no power switching */
378 	temp = 0x000a;
379 	desc->wHubCharacteristics = cpu_to_le16(temp);
380 }
381 
382 /* port status change mask for hub_status_data */
383 #define PORT_C_MASK	((USB_PORT_STAT_C_CONNECTION |		\
384 			  USB_PORT_STAT_C_ENABLE |		\
385 			  USB_PORT_STAT_C_SUSPEND |		\
386 			  USB_PORT_STAT_C_OVERCURRENT |		\
387 			  USB_PORT_STAT_C_RESET) << 16)
388 
389 /*
390  * See USB 2.0 Spec, 11.12.4 Hub and Port Status Change Bitmap.
391  * If port status changed, writes the bitmap to buf and return
392  * that length(number of bytes).
393  * If Nothing changed, return 0.
394  */
395 static int xenhcd_hub_status_data(struct usb_hcd *hcd, char *buf)
396 {
397 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
398 	int ports;
399 	int i;
400 	unsigned long flags;
401 	int ret;
402 	int changed = 0;
403 
404 	/* initialize the status to no-changes */
405 	ports = info->rh_numports;
406 	ret = 1 + (ports / 8);
407 	memset(buf, 0, ret);
408 
409 	spin_lock_irqsave(&info->lock, flags);
410 
411 	for (i = 0; i < ports; i++) {
412 		/* check status for each port */
413 		if (info->ports[i].status & PORT_C_MASK) {
414 			buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
415 			changed = 1;
416 		}
417 	}
418 
419 	if ((hcd->state == HC_STATE_SUSPENDED) && (changed == 1))
420 		usb_hcd_resume_root_hub(hcd);
421 
422 	spin_unlock_irqrestore(&info->lock, flags);
423 
424 	return changed ? ret : 0;
425 }
426 
427 static int xenhcd_hub_control(struct usb_hcd *hcd, __u16 typeReq, __u16 wValue,
428 			      __u16 wIndex, char *buf, __u16 wLength)
429 {
430 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
431 	int ports = info->rh_numports;
432 	unsigned long flags;
433 	int ret = 0;
434 	int i;
435 	int changed = 0;
436 
437 	spin_lock_irqsave(&info->lock, flags);
438 	switch (typeReq) {
439 	case ClearHubFeature:
440 		/* ignore this request */
441 		break;
442 	case ClearPortFeature:
443 		if (!wIndex || wIndex > ports)
444 			goto error;
445 
446 		switch (wValue) {
447 		case USB_PORT_FEAT_SUSPEND:
448 			xenhcd_rhport_resume(info, wIndex);
449 			break;
450 		case USB_PORT_FEAT_POWER:
451 			xenhcd_rhport_power_off(info, wIndex);
452 			break;
453 		case USB_PORT_FEAT_ENABLE:
454 			xenhcd_rhport_disable(info, wIndex);
455 			break;
456 		case USB_PORT_FEAT_C_CONNECTION:
457 			info->ports[wIndex - 1].c_connection = false;
458 			fallthrough;
459 		default:
460 			info->ports[wIndex - 1].status &= ~(1 << wValue);
461 			break;
462 		}
463 		break;
464 	case GetHubDescriptor:
465 		xenhcd_hub_descriptor(info, (struct usb_hub_descriptor *)buf);
466 		break;
467 	case GetHubStatus:
468 		/* always local power supply good and no over-current exists. */
469 		*(__le32 *)buf = cpu_to_le32(0);
470 		break;
471 	case GetPortStatus:
472 		if (!wIndex || wIndex > ports)
473 			goto error;
474 
475 		wIndex--;
476 
477 		/* resume completion */
478 		if (info->ports[wIndex].resuming &&
479 		    time_after_eq(jiffies, info->ports[wIndex].timeout)) {
480 			info->ports[wIndex].status |=
481 				USB_PORT_STAT_C_SUSPEND << 16;
482 			info->ports[wIndex].status &= ~USB_PORT_STAT_SUSPEND;
483 		}
484 
485 		/* reset completion */
486 		if ((info->ports[wIndex].status & USB_PORT_STAT_RESET) != 0 &&
487 		    time_after_eq(jiffies, info->ports[wIndex].timeout)) {
488 			info->ports[wIndex].status |=
489 				USB_PORT_STAT_C_RESET << 16;
490 			info->ports[wIndex].status &= ~USB_PORT_STAT_RESET;
491 
492 			if (info->devices[wIndex].status !=
493 			    USB_STATE_NOTATTACHED) {
494 				info->ports[wIndex].status |=
495 					USB_PORT_STAT_ENABLE;
496 				info->devices[wIndex].status =
497 					USB_STATE_DEFAULT;
498 			}
499 
500 			switch (info->devices[wIndex].speed) {
501 			case XENUSB_SPEED_LOW:
502 				info->ports[wIndex].status |=
503 					USB_PORT_STAT_LOW_SPEED;
504 				break;
505 			case XENUSB_SPEED_HIGH:
506 				info->ports[wIndex].status |=
507 					USB_PORT_STAT_HIGH_SPEED;
508 				break;
509 			default:
510 				break;
511 			}
512 		}
513 
514 		*(__le32 *)buf = cpu_to_le32(info->ports[wIndex].status);
515 		break;
516 	case SetPortFeature:
517 		if (!wIndex || wIndex > ports)
518 			goto error;
519 
520 		switch (wValue) {
521 		case USB_PORT_FEAT_POWER:
522 			xenhcd_rhport_power_on(info, wIndex);
523 			break;
524 		case USB_PORT_FEAT_RESET:
525 			xenhcd_rhport_reset(info, wIndex);
526 			break;
527 		case USB_PORT_FEAT_SUSPEND:
528 			xenhcd_rhport_suspend(info, wIndex);
529 			break;
530 		default:
531 			if (info->ports[wIndex-1].status & USB_PORT_STAT_POWER)
532 				info->ports[wIndex-1].status |= (1 << wValue);
533 		}
534 		break;
535 
536 	case SetHubFeature:
537 		/* not supported */
538 	default:
539 error:
540 		ret = -EPIPE;
541 	}
542 	spin_unlock_irqrestore(&info->lock, flags);
543 
544 	/* check status for each port */
545 	for (i = 0; i < ports; i++) {
546 		if (info->ports[i].status & PORT_C_MASK)
547 			changed = 1;
548 	}
549 	if (changed)
550 		usb_hcd_poll_rh_status(hcd);
551 
552 	return ret;
553 }
554 
555 static void xenhcd_free_urb_priv(struct urb_priv *urbp)
556 {
557 	urbp->urb->hcpriv = NULL;
558 	kmem_cache_free(xenhcd_urbp_cachep, urbp);
559 }
560 
561 static inline unsigned int xenhcd_get_id_from_freelist(struct xenhcd_info *info)
562 {
563 	unsigned int free;
564 
565 	free = info->shadow_free;
566 	info->shadow_free = info->shadow[free].req.id;
567 	info->shadow[free].req.id = 0x0fff; /* debug */
568 	return free;
569 }
570 
571 static inline void xenhcd_add_id_to_freelist(struct xenhcd_info *info,
572 					     unsigned int id)
573 {
574 	info->shadow[id].req.id	= info->shadow_free;
575 	info->shadow[id].urb = NULL;
576 	info->shadow_free = id;
577 }
578 
579 static inline int xenhcd_count_pages(void *addr, int length)
580 {
581 	unsigned long vaddr = (unsigned long)addr;
582 
583 	return PFN_UP(vaddr + length) - PFN_DOWN(vaddr);
584 }
585 
586 static void xenhcd_gnttab_map(struct xenhcd_info *info, void *addr, int length,
587 			      grant_ref_t *gref_head,
588 			      struct xenusb_request_segment *seg,
589 			      int nr_pages, int flags)
590 {
591 	grant_ref_t ref;
592 	unsigned long buffer_mfn;
593 	unsigned int offset;
594 	unsigned int len = length;
595 	unsigned int bytes;
596 	int i;
597 
598 	for (i = 0; i < nr_pages; i++) {
599 		buffer_mfn = PFN_DOWN(arbitrary_virt_to_machine(addr).maddr);
600 		offset = offset_in_page(addr);
601 
602 		bytes = PAGE_SIZE - offset;
603 		if (bytes > len)
604 			bytes = len;
605 
606 		ref = gnttab_claim_grant_reference(gref_head);
607 		gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id,
608 						buffer_mfn, flags);
609 		seg[i].gref = ref;
610 		seg[i].offset = (__u16)offset;
611 		seg[i].length = (__u16)bytes;
612 
613 		addr += bytes;
614 		len -= bytes;
615 	}
616 }
617 
618 static __u32 xenhcd_pipe_urb_to_xenusb(__u32 urb_pipe, __u8 port)
619 {
620 	static __u32 pipe;
621 
622 	pipe = usb_pipedevice(urb_pipe) << XENUSB_PIPE_DEV_SHIFT;
623 	pipe |= usb_pipeendpoint(urb_pipe) << XENUSB_PIPE_EP_SHIFT;
624 	if (usb_pipein(urb_pipe))
625 		pipe |= XENUSB_PIPE_DIR;
626 	switch (usb_pipetype(urb_pipe)) {
627 	case PIPE_ISOCHRONOUS:
628 		pipe |= XENUSB_PIPE_TYPE_ISOC << XENUSB_PIPE_TYPE_SHIFT;
629 		break;
630 	case PIPE_INTERRUPT:
631 		pipe |= XENUSB_PIPE_TYPE_INT << XENUSB_PIPE_TYPE_SHIFT;
632 		break;
633 	case PIPE_CONTROL:
634 		pipe |= XENUSB_PIPE_TYPE_CTRL << XENUSB_PIPE_TYPE_SHIFT;
635 		break;
636 	case PIPE_BULK:
637 		pipe |= XENUSB_PIPE_TYPE_BULK << XENUSB_PIPE_TYPE_SHIFT;
638 		break;
639 	}
640 	pipe = xenusb_setportnum_pipe(pipe, port);
641 
642 	return pipe;
643 }
644 
645 static int xenhcd_map_urb_for_request(struct xenhcd_info *info, struct urb *urb,
646 				      struct xenusb_urb_request *req)
647 {
648 	grant_ref_t gref_head;
649 	int nr_buff_pages = 0;
650 	int nr_isodesc_pages = 0;
651 	int nr_grants = 0;
652 
653 	if (urb->transfer_buffer_length) {
654 		nr_buff_pages = xenhcd_count_pages(urb->transfer_buffer,
655 						urb->transfer_buffer_length);
656 
657 		if (usb_pipeisoc(urb->pipe))
658 			nr_isodesc_pages = xenhcd_count_pages(
659 				&urb->iso_frame_desc[0],
660 				sizeof(struct usb_iso_packet_descriptor) *
661 				urb->number_of_packets);
662 
663 		nr_grants = nr_buff_pages + nr_isodesc_pages;
664 		if (nr_grants > XENUSB_MAX_SEGMENTS_PER_REQUEST) {
665 			pr_err("xenhcd: error: %d grants\n", nr_grants);
666 			return -E2BIG;
667 		}
668 
669 		if (gnttab_alloc_grant_references(nr_grants, &gref_head)) {
670 			pr_err("xenhcd: gnttab_alloc_grant_references() error\n");
671 			return -ENOMEM;
672 		}
673 
674 		xenhcd_gnttab_map(info, urb->transfer_buffer,
675 				  urb->transfer_buffer_length, &gref_head,
676 				  &req->seg[0], nr_buff_pages,
677 				  usb_pipein(urb->pipe) ? 0 : GTF_readonly);
678 	}
679 
680 	req->pipe = xenhcd_pipe_urb_to_xenusb(urb->pipe, urb->dev->portnum);
681 	req->transfer_flags = 0;
682 	if (urb->transfer_flags & URB_SHORT_NOT_OK)
683 		req->transfer_flags |= XENUSB_SHORT_NOT_OK;
684 	req->buffer_length = urb->transfer_buffer_length;
685 	req->nr_buffer_segs = nr_buff_pages;
686 
687 	switch (usb_pipetype(urb->pipe)) {
688 	case PIPE_ISOCHRONOUS:
689 		req->u.isoc.interval = urb->interval;
690 		req->u.isoc.start_frame = urb->start_frame;
691 		req->u.isoc.number_of_packets = urb->number_of_packets;
692 		req->u.isoc.nr_frame_desc_segs = nr_isodesc_pages;
693 
694 		xenhcd_gnttab_map(info, &urb->iso_frame_desc[0],
695 				  sizeof(struct usb_iso_packet_descriptor) *
696 				  urb->number_of_packets,
697 				  &gref_head, &req->seg[nr_buff_pages],
698 				  nr_isodesc_pages, 0);
699 		break;
700 	case PIPE_INTERRUPT:
701 		req->u.intr.interval = urb->interval;
702 		break;
703 	case PIPE_CONTROL:
704 		if (urb->setup_packet)
705 			memcpy(req->u.ctrl, urb->setup_packet, 8);
706 		break;
707 	case PIPE_BULK:
708 		break;
709 	default:
710 		break;
711 	}
712 
713 	if (nr_grants)
714 		gnttab_free_grant_references(gref_head);
715 
716 	return 0;
717 }
718 
719 static void xenhcd_gnttab_done(struct xenhcd_info *info, unsigned int id)
720 {
721 	struct usb_shadow *shadow = info->shadow + id;
722 	int nr_segs = 0;
723 	int i;
724 
725 	nr_segs = shadow->req.nr_buffer_segs;
726 
727 	if (xenusb_pipeisoc(shadow->req.pipe))
728 		nr_segs += shadow->req.u.isoc.nr_frame_desc_segs;
729 
730 	for (i = 0; i < nr_segs; i++) {
731 		if (!gnttab_try_end_foreign_access(shadow->req.seg[i].gref))
732 			xenhcd_set_error(info, "backend didn't release grant");
733 	}
734 
735 	shadow->req.nr_buffer_segs = 0;
736 	shadow->req.u.isoc.nr_frame_desc_segs = 0;
737 }
738 
739 static int xenhcd_translate_status(int status)
740 {
741 	switch (status) {
742 	case XENUSB_STATUS_OK:
743 		return 0;
744 	case XENUSB_STATUS_NODEV:
745 		return -ENODEV;
746 	case XENUSB_STATUS_INVAL:
747 		return -EINVAL;
748 	case XENUSB_STATUS_STALL:
749 		return -EPIPE;
750 	case XENUSB_STATUS_IOERROR:
751 		return -EPROTO;
752 	case XENUSB_STATUS_BABBLE:
753 		return -EOVERFLOW;
754 	default:
755 		return -ESHUTDOWN;
756 	}
757 }
758 
759 static void xenhcd_giveback_urb(struct xenhcd_info *info, struct urb *urb,
760 				int status)
761 {
762 	struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
763 	int priv_status = urbp->status;
764 
765 	list_del_init(&urbp->list);
766 	xenhcd_free_urb_priv(urbp);
767 
768 	if (urb->status == -EINPROGRESS)
769 		urb->status = xenhcd_translate_status(status);
770 
771 	spin_unlock(&info->lock);
772 	usb_hcd_giveback_urb(xenhcd_info_to_hcd(info), urb,
773 			     priv_status <= 0 ? priv_status : urb->status);
774 	spin_lock(&info->lock);
775 }
776 
777 static int xenhcd_do_request(struct xenhcd_info *info, struct urb_priv *urbp)
778 {
779 	struct xenusb_urb_request *req;
780 	struct urb *urb = urbp->urb;
781 	unsigned int id;
782 	int notify;
783 	int ret;
784 
785 	id = xenhcd_get_id_from_freelist(info);
786 	req = &info->shadow[id].req;
787 	req->id = id;
788 
789 	if (unlikely(urbp->unlinked)) {
790 		req->u.unlink.unlink_id = urbp->req_id;
791 		req->pipe = xenusb_setunlink_pipe(xenhcd_pipe_urb_to_xenusb(
792 						 urb->pipe, urb->dev->portnum));
793 		urbp->unlink_req_id = id;
794 	} else {
795 		ret = xenhcd_map_urb_for_request(info, urb, req);
796 		if (ret) {
797 			xenhcd_add_id_to_freelist(info, id);
798 			return ret;
799 		}
800 		urbp->req_id = id;
801 	}
802 
803 	req = RING_GET_REQUEST(&info->urb_ring, info->urb_ring.req_prod_pvt);
804 	*req = info->shadow[id].req;
805 
806 	info->urb_ring.req_prod_pvt++;
807 	info->shadow[id].urb = urb;
808 
809 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->urb_ring, notify);
810 	if (notify)
811 		notify_remote_via_irq(info->irq);
812 
813 	return 0;
814 }
815 
816 static void xenhcd_kick_pending_urbs(struct xenhcd_info *info)
817 {
818 	struct urb_priv *urbp;
819 
820 	while (!list_empty(&info->pending_submit_list)) {
821 		if (RING_FULL(&info->urb_ring)) {
822 			xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
823 			return;
824 		}
825 
826 		urbp = list_entry(info->pending_submit_list.next,
827 				  struct urb_priv, list);
828 		if (!xenhcd_do_request(info, urbp))
829 			list_move_tail(&urbp->list, &info->in_progress_list);
830 		else
831 			xenhcd_giveback_urb(info, urbp->urb, -ESHUTDOWN);
832 	}
833 	xenhcd_timer_action_done(info, TIMER_SCAN_PENDING_URBS);
834 }
835 
836 /*
837  * caller must lock info->lock
838  */
839 static void xenhcd_cancel_all_enqueued_urbs(struct xenhcd_info *info)
840 {
841 	struct urb_priv *urbp, *tmp;
842 	int req_id;
843 
844 	list_for_each_entry_safe(urbp, tmp, &info->in_progress_list, list) {
845 		req_id = urbp->req_id;
846 		if (!urbp->unlinked) {
847 			xenhcd_gnttab_done(info, req_id);
848 			if (info->error)
849 				return;
850 			if (urbp->urb->status == -EINPROGRESS)
851 				/* not dequeued */
852 				xenhcd_giveback_urb(info, urbp->urb,
853 						    -ESHUTDOWN);
854 			else	/* dequeued */
855 				xenhcd_giveback_urb(info, urbp->urb,
856 						    urbp->urb->status);
857 		}
858 		info->shadow[req_id].urb = NULL;
859 	}
860 
861 	list_for_each_entry_safe(urbp, tmp, &info->pending_submit_list, list)
862 		xenhcd_giveback_urb(info, urbp->urb, -ESHUTDOWN);
863 }
864 
865 /*
866  * caller must lock info->lock
867  */
868 static void xenhcd_giveback_unlinked_urbs(struct xenhcd_info *info)
869 {
870 	struct urb_priv *urbp, *tmp;
871 
872 	list_for_each_entry_safe(urbp, tmp, &info->giveback_waiting_list, list)
873 		xenhcd_giveback_urb(info, urbp->urb, urbp->urb->status);
874 }
875 
876 static int xenhcd_submit_urb(struct xenhcd_info *info, struct urb_priv *urbp)
877 {
878 	int ret;
879 
880 	if (RING_FULL(&info->urb_ring)) {
881 		list_add_tail(&urbp->list, &info->pending_submit_list);
882 		xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
883 		return 0;
884 	}
885 
886 	if (!list_empty(&info->pending_submit_list)) {
887 		list_add_tail(&urbp->list, &info->pending_submit_list);
888 		xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
889 		return 0;
890 	}
891 
892 	ret = xenhcd_do_request(info, urbp);
893 	if (ret == 0)
894 		list_add_tail(&urbp->list, &info->in_progress_list);
895 
896 	return ret;
897 }
898 
899 static int xenhcd_unlink_urb(struct xenhcd_info *info, struct urb_priv *urbp)
900 {
901 	int ret;
902 
903 	/* already unlinked? */
904 	if (urbp->unlinked)
905 		return -EBUSY;
906 
907 	urbp->unlinked = true;
908 
909 	/* the urb is still in pending_submit queue */
910 	if (urbp->req_id == ~0) {
911 		list_move_tail(&urbp->list, &info->giveback_waiting_list);
912 		xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
913 		return 0;
914 	}
915 
916 	/* send unlink request to backend */
917 	if (RING_FULL(&info->urb_ring)) {
918 		list_move_tail(&urbp->list, &info->pending_unlink_list);
919 		xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
920 		return 0;
921 	}
922 
923 	if (!list_empty(&info->pending_unlink_list)) {
924 		list_move_tail(&urbp->list, &info->pending_unlink_list);
925 		xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
926 		return 0;
927 	}
928 
929 	ret = xenhcd_do_request(info, urbp);
930 	if (ret == 0)
931 		list_move_tail(&urbp->list, &info->in_progress_list);
932 
933 	return ret;
934 }
935 
936 static int xenhcd_urb_request_done(struct xenhcd_info *info)
937 {
938 	struct xenusb_urb_response res;
939 	struct urb *urb;
940 	RING_IDX i, rp;
941 	__u16 id;
942 	int more_to_do = 0;
943 	unsigned long flags;
944 
945 	spin_lock_irqsave(&info->lock, flags);
946 
947 	rp = info->urb_ring.sring->rsp_prod;
948 	if (RING_RESPONSE_PROD_OVERFLOW(&info->urb_ring, rp)) {
949 		xenhcd_set_error(info, "Illegal index on urb-ring");
950 		goto err;
951 	}
952 	rmb(); /* ensure we see queued responses up to "rp" */
953 
954 	for (i = info->urb_ring.rsp_cons; i != rp; i++) {
955 		RING_COPY_RESPONSE(&info->urb_ring, i, &res);
956 		id = res.id;
957 		if (id >= XENUSB_URB_RING_SIZE) {
958 			xenhcd_set_error(info, "Illegal data on urb-ring");
959 			goto err;
960 		}
961 
962 		if (likely(xenusb_pipesubmit(info->shadow[id].req.pipe))) {
963 			xenhcd_gnttab_done(info, id);
964 			if (info->error)
965 				goto err;
966 			urb = info->shadow[id].urb;
967 			if (likely(urb)) {
968 				urb->actual_length = res.actual_length;
969 				urb->error_count = res.error_count;
970 				urb->start_frame = res.start_frame;
971 				xenhcd_giveback_urb(info, urb, res.status);
972 			}
973 		}
974 
975 		xenhcd_add_id_to_freelist(info, id);
976 	}
977 	info->urb_ring.rsp_cons = i;
978 
979 	if (i != info->urb_ring.req_prod_pvt)
980 		RING_FINAL_CHECK_FOR_RESPONSES(&info->urb_ring, more_to_do);
981 	else
982 		info->urb_ring.sring->rsp_event = i + 1;
983 
984 	spin_unlock_irqrestore(&info->lock, flags);
985 
986 	return more_to_do;
987 
988  err:
989 	spin_unlock_irqrestore(&info->lock, flags);
990 	return 0;
991 }
992 
993 static int xenhcd_conn_notify(struct xenhcd_info *info)
994 {
995 	struct xenusb_conn_response res;
996 	struct xenusb_conn_request *req;
997 	RING_IDX rc, rp;
998 	__u16 id;
999 	__u8 portnum, speed;
1000 	int more_to_do = 0;
1001 	int notify;
1002 	int port_changed = 0;
1003 	unsigned long flags;
1004 
1005 	spin_lock_irqsave(&info->lock, flags);
1006 
1007 	rc = info->conn_ring.rsp_cons;
1008 	rp = info->conn_ring.sring->rsp_prod;
1009 	if (RING_RESPONSE_PROD_OVERFLOW(&info->conn_ring, rp)) {
1010 		xenhcd_set_error(info, "Illegal index on conn-ring");
1011 		spin_unlock_irqrestore(&info->lock, flags);
1012 		return 0;
1013 	}
1014 	rmb(); /* ensure we see queued responses up to "rp" */
1015 
1016 	while (rc != rp) {
1017 		RING_COPY_RESPONSE(&info->conn_ring, rc, &res);
1018 		id = res.id;
1019 		portnum = res.portnum;
1020 		speed = res.speed;
1021 		info->conn_ring.rsp_cons = ++rc;
1022 
1023 		if (xenhcd_rhport_connect(info, portnum, speed)) {
1024 			xenhcd_set_error(info, "Illegal data on conn-ring");
1025 			spin_unlock_irqrestore(&info->lock, flags);
1026 			return 0;
1027 		}
1028 
1029 		if (info->ports[portnum - 1].c_connection)
1030 			port_changed = 1;
1031 
1032 		barrier();
1033 
1034 		req = RING_GET_REQUEST(&info->conn_ring,
1035 				       info->conn_ring.req_prod_pvt);
1036 		req->id = id;
1037 		info->conn_ring.req_prod_pvt++;
1038 	}
1039 
1040 	if (rc != info->conn_ring.req_prod_pvt)
1041 		RING_FINAL_CHECK_FOR_RESPONSES(&info->conn_ring, more_to_do);
1042 	else
1043 		info->conn_ring.sring->rsp_event = rc + 1;
1044 
1045 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify);
1046 	if (notify)
1047 		notify_remote_via_irq(info->irq);
1048 
1049 	spin_unlock_irqrestore(&info->lock, flags);
1050 
1051 	if (port_changed)
1052 		usb_hcd_poll_rh_status(xenhcd_info_to_hcd(info));
1053 
1054 	return more_to_do;
1055 }
1056 
1057 static irqreturn_t xenhcd_int(int irq, void *dev_id)
1058 {
1059 	struct xenhcd_info *info = (struct xenhcd_info *)dev_id;
1060 
1061 	if (unlikely(info->error))
1062 		return IRQ_HANDLED;
1063 
1064 	while (xenhcd_urb_request_done(info) | xenhcd_conn_notify(info))
1065 		/* Yield point for this unbounded loop. */
1066 		cond_resched();
1067 
1068 	return IRQ_HANDLED;
1069 }
1070 
1071 static void xenhcd_destroy_rings(struct xenhcd_info *info)
1072 {
1073 	if (info->irq)
1074 		unbind_from_irqhandler(info->irq, info);
1075 	info->irq = 0;
1076 
1077 	if (info->urb_ring_ref != GRANT_INVALID_REF) {
1078 		gnttab_end_foreign_access(info->urb_ring_ref, 0,
1079 					  (unsigned long)info->urb_ring.sring);
1080 		info->urb_ring_ref = GRANT_INVALID_REF;
1081 	}
1082 	info->urb_ring.sring = NULL;
1083 
1084 	if (info->conn_ring_ref != GRANT_INVALID_REF) {
1085 		gnttab_end_foreign_access(info->conn_ring_ref, 0,
1086 					  (unsigned long)info->conn_ring.sring);
1087 		info->conn_ring_ref = GRANT_INVALID_REF;
1088 	}
1089 	info->conn_ring.sring = NULL;
1090 }
1091 
1092 static int xenhcd_setup_rings(struct xenbus_device *dev,
1093 			      struct xenhcd_info *info)
1094 {
1095 	struct xenusb_urb_sring *urb_sring;
1096 	struct xenusb_conn_sring *conn_sring;
1097 	grant_ref_t gref;
1098 	int err;
1099 
1100 	info->urb_ring_ref = GRANT_INVALID_REF;
1101 	info->conn_ring_ref = GRANT_INVALID_REF;
1102 
1103 	urb_sring = (struct xenusb_urb_sring *)get_zeroed_page(
1104 							GFP_NOIO | __GFP_HIGH);
1105 	if (!urb_sring) {
1106 		xenbus_dev_fatal(dev, -ENOMEM, "allocating urb ring");
1107 		return -ENOMEM;
1108 	}
1109 	SHARED_RING_INIT(urb_sring);
1110 	FRONT_RING_INIT(&info->urb_ring, urb_sring, PAGE_SIZE);
1111 
1112 	err = xenbus_grant_ring(dev, urb_sring, 1, &gref);
1113 	if (err < 0) {
1114 		free_page((unsigned long)urb_sring);
1115 		info->urb_ring.sring = NULL;
1116 		goto fail;
1117 	}
1118 	info->urb_ring_ref = gref;
1119 
1120 	conn_sring = (struct xenusb_conn_sring *)get_zeroed_page(
1121 							GFP_NOIO | __GFP_HIGH);
1122 	if (!conn_sring) {
1123 		xenbus_dev_fatal(dev, -ENOMEM, "allocating conn ring");
1124 		err = -ENOMEM;
1125 		goto fail;
1126 	}
1127 	SHARED_RING_INIT(conn_sring);
1128 	FRONT_RING_INIT(&info->conn_ring, conn_sring, PAGE_SIZE);
1129 
1130 	err = xenbus_grant_ring(dev, conn_sring, 1, &gref);
1131 	if (err < 0) {
1132 		free_page((unsigned long)conn_sring);
1133 		info->conn_ring.sring = NULL;
1134 		goto fail;
1135 	}
1136 	info->conn_ring_ref = gref;
1137 
1138 	err = xenbus_alloc_evtchn(dev, &info->evtchn);
1139 	if (err) {
1140 		xenbus_dev_fatal(dev, err, "xenbus_alloc_evtchn");
1141 		goto fail;
1142 	}
1143 
1144 	err = bind_evtchn_to_irq(info->evtchn);
1145 	if (err <= 0) {
1146 		xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq");
1147 		goto fail;
1148 	}
1149 
1150 	info->irq = err;
1151 
1152 	err = request_threaded_irq(info->irq, NULL, xenhcd_int,
1153 				   IRQF_ONESHOT, "xenhcd", info);
1154 	if (err) {
1155 		xenbus_dev_fatal(dev, err, "request_threaded_irq");
1156 		goto free_irq;
1157 	}
1158 
1159 	return 0;
1160 
1161 free_irq:
1162 	unbind_from_irqhandler(info->irq, info);
1163 fail:
1164 	xenhcd_destroy_rings(info);
1165 	return err;
1166 }
1167 
1168 static int xenhcd_talk_to_backend(struct xenbus_device *dev,
1169 				  struct xenhcd_info *info)
1170 {
1171 	const char *message;
1172 	struct xenbus_transaction xbt;
1173 	int err;
1174 
1175 	err = xenhcd_setup_rings(dev, info);
1176 	if (err)
1177 		return err;
1178 
1179 again:
1180 	err = xenbus_transaction_start(&xbt);
1181 	if (err) {
1182 		xenbus_dev_fatal(dev, err, "starting transaction");
1183 		goto destroy_ring;
1184 	}
1185 
1186 	err = xenbus_printf(xbt, dev->nodename, "urb-ring-ref", "%u",
1187 			    info->urb_ring_ref);
1188 	if (err) {
1189 		message = "writing urb-ring-ref";
1190 		goto abort_transaction;
1191 	}
1192 
1193 	err = xenbus_printf(xbt, dev->nodename, "conn-ring-ref", "%u",
1194 			    info->conn_ring_ref);
1195 	if (err) {
1196 		message = "writing conn-ring-ref";
1197 		goto abort_transaction;
1198 	}
1199 
1200 	err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
1201 			    info->evtchn);
1202 	if (err) {
1203 		message = "writing event-channel";
1204 		goto abort_transaction;
1205 	}
1206 
1207 	err = xenbus_transaction_end(xbt, 0);
1208 	if (err) {
1209 		if (err == -EAGAIN)
1210 			goto again;
1211 		xenbus_dev_fatal(dev, err, "completing transaction");
1212 		goto destroy_ring;
1213 	}
1214 
1215 	return 0;
1216 
1217 abort_transaction:
1218 	xenbus_transaction_end(xbt, 1);
1219 	xenbus_dev_fatal(dev, err, "%s", message);
1220 
1221 destroy_ring:
1222 	xenhcd_destroy_rings(info);
1223 
1224 	return err;
1225 }
1226 
1227 static int xenhcd_connect(struct xenbus_device *dev)
1228 {
1229 	struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
1230 	struct xenusb_conn_request *req;
1231 	int idx, err;
1232 	int notify;
1233 	char name[TASK_COMM_LEN];
1234 	struct usb_hcd *hcd;
1235 
1236 	hcd = xenhcd_info_to_hcd(info);
1237 	snprintf(name, TASK_COMM_LEN, "xenhcd.%d", hcd->self.busnum);
1238 
1239 	err = xenhcd_talk_to_backend(dev, info);
1240 	if (err)
1241 		return err;
1242 
1243 	/* prepare ring for hotplug notification */
1244 	for (idx = 0; idx < XENUSB_CONN_RING_SIZE; idx++) {
1245 		req = RING_GET_REQUEST(&info->conn_ring, idx);
1246 		req->id = idx;
1247 	}
1248 	info->conn_ring.req_prod_pvt = idx;
1249 
1250 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify);
1251 	if (notify)
1252 		notify_remote_via_irq(info->irq);
1253 
1254 	return 0;
1255 }
1256 
1257 static void xenhcd_disconnect(struct xenbus_device *dev)
1258 {
1259 	struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
1260 	struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
1261 
1262 	usb_remove_hcd(hcd);
1263 	xenbus_frontend_closed(dev);
1264 }
1265 
1266 static void xenhcd_watchdog(struct timer_list *timer)
1267 {
1268 	struct xenhcd_info *info = from_timer(info, timer, watchdog);
1269 	unsigned long flags;
1270 
1271 	spin_lock_irqsave(&info->lock, flags);
1272 	if (likely(HC_IS_RUNNING(xenhcd_info_to_hcd(info)->state))) {
1273 		xenhcd_timer_action_done(info, TIMER_RING_WATCHDOG);
1274 		xenhcd_giveback_unlinked_urbs(info);
1275 		xenhcd_kick_pending_urbs(info);
1276 	}
1277 	spin_unlock_irqrestore(&info->lock, flags);
1278 }
1279 
1280 /*
1281  * one-time HC init
1282  */
1283 static int xenhcd_setup(struct usb_hcd *hcd)
1284 {
1285 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
1286 
1287 	spin_lock_init(&info->lock);
1288 	INIT_LIST_HEAD(&info->pending_submit_list);
1289 	INIT_LIST_HEAD(&info->pending_unlink_list);
1290 	INIT_LIST_HEAD(&info->in_progress_list);
1291 	INIT_LIST_HEAD(&info->giveback_waiting_list);
1292 	timer_setup(&info->watchdog, xenhcd_watchdog, 0);
1293 
1294 	hcd->has_tt = (hcd->driver->flags & HCD_MASK) != HCD_USB11;
1295 
1296 	return 0;
1297 }
1298 
1299 /*
1300  * start HC running
1301  */
1302 static int xenhcd_run(struct usb_hcd *hcd)
1303 {
1304 	hcd->uses_new_polling = 1;
1305 	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1306 	hcd->state = HC_STATE_RUNNING;
1307 	return 0;
1308 }
1309 
1310 /*
1311  * stop running HC
1312  */
1313 static void xenhcd_stop(struct usb_hcd *hcd)
1314 {
1315 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
1316 
1317 	del_timer_sync(&info->watchdog);
1318 	spin_lock_irq(&info->lock);
1319 	/* cancel all urbs */
1320 	hcd->state = HC_STATE_HALT;
1321 	xenhcd_cancel_all_enqueued_urbs(info);
1322 	xenhcd_giveback_unlinked_urbs(info);
1323 	spin_unlock_irq(&info->lock);
1324 }
1325 
1326 /*
1327  * called as .urb_enqueue()
1328  * non-error returns are promise to giveback the urb later
1329  */
1330 static int xenhcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1331 			      gfp_t mem_flags)
1332 {
1333 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
1334 	struct urb_priv *urbp;
1335 	unsigned long flags;
1336 	int ret;
1337 
1338 	if (unlikely(info->error))
1339 		return -ESHUTDOWN;
1340 
1341 	urbp = kmem_cache_zalloc(xenhcd_urbp_cachep, mem_flags);
1342 	if (!urbp)
1343 		return -ENOMEM;
1344 
1345 	spin_lock_irqsave(&info->lock, flags);
1346 
1347 	urbp->urb = urb;
1348 	urb->hcpriv = urbp;
1349 	urbp->req_id = ~0;
1350 	urbp->unlink_req_id = ~0;
1351 	INIT_LIST_HEAD(&urbp->list);
1352 	urbp->status = 1;
1353 	urb->unlinked = false;
1354 
1355 	ret = xenhcd_submit_urb(info, urbp);
1356 
1357 	if (ret)
1358 		xenhcd_free_urb_priv(urbp);
1359 
1360 	spin_unlock_irqrestore(&info->lock, flags);
1361 
1362 	return ret;
1363 }
1364 
1365 /*
1366  * called as .urb_dequeue()
1367  */
1368 static int xenhcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1369 {
1370 	struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
1371 	struct urb_priv *urbp;
1372 	unsigned long flags;
1373 	int ret = 0;
1374 
1375 	spin_lock_irqsave(&info->lock, flags);
1376 
1377 	urbp = urb->hcpriv;
1378 	if (urbp) {
1379 		urbp->status = status;
1380 		ret = xenhcd_unlink_urb(info, urbp);
1381 	}
1382 
1383 	spin_unlock_irqrestore(&info->lock, flags);
1384 
1385 	return ret;
1386 }
1387 
1388 /*
1389  * called from usb_get_current_frame_number(),
1390  * but, almost all drivers not use such function.
1391  */
1392 static int xenhcd_get_frame(struct usb_hcd *hcd)
1393 {
1394 	/* it means error, but probably no problem :-) */
1395 	return 0;
1396 }
1397 
1398 static struct hc_driver xenhcd_usb20_hc_driver = {
1399 	.description = "xen-hcd",
1400 	.product_desc = "Xen USB2.0 Virtual Host Controller",
1401 	.hcd_priv_size = sizeof(struct xenhcd_info),
1402 	.flags = HCD_USB2,
1403 
1404 	/* basic HC lifecycle operations */
1405 	.reset = xenhcd_setup,
1406 	.start = xenhcd_run,
1407 	.stop = xenhcd_stop,
1408 
1409 	/* managing urb I/O */
1410 	.urb_enqueue = xenhcd_urb_enqueue,
1411 	.urb_dequeue = xenhcd_urb_dequeue,
1412 	.get_frame_number = xenhcd_get_frame,
1413 
1414 	/* root hub operations */
1415 	.hub_status_data = xenhcd_hub_status_data,
1416 	.hub_control = xenhcd_hub_control,
1417 #ifdef CONFIG_PM
1418 	.bus_suspend = xenhcd_bus_suspend,
1419 	.bus_resume = xenhcd_bus_resume,
1420 #endif
1421 };
1422 
1423 static struct hc_driver xenhcd_usb11_hc_driver = {
1424 	.description = "xen-hcd",
1425 	.product_desc = "Xen USB1.1 Virtual Host Controller",
1426 	.hcd_priv_size = sizeof(struct xenhcd_info),
1427 	.flags = HCD_USB11,
1428 
1429 	/* basic HC lifecycle operations */
1430 	.reset = xenhcd_setup,
1431 	.start = xenhcd_run,
1432 	.stop = xenhcd_stop,
1433 
1434 	/* managing urb I/O */
1435 	.urb_enqueue = xenhcd_urb_enqueue,
1436 	.urb_dequeue = xenhcd_urb_dequeue,
1437 	.get_frame_number = xenhcd_get_frame,
1438 
1439 	/* root hub operations */
1440 	.hub_status_data = xenhcd_hub_status_data,
1441 	.hub_control = xenhcd_hub_control,
1442 #ifdef CONFIG_PM
1443 	.bus_suspend = xenhcd_bus_suspend,
1444 	.bus_resume = xenhcd_bus_resume,
1445 #endif
1446 };
1447 
1448 static struct usb_hcd *xenhcd_create_hcd(struct xenbus_device *dev)
1449 {
1450 	int i;
1451 	int err = 0;
1452 	int num_ports;
1453 	int usb_ver;
1454 	struct usb_hcd *hcd = NULL;
1455 	struct xenhcd_info *info;
1456 
1457 	err = xenbus_scanf(XBT_NIL, dev->otherend, "num-ports", "%d",
1458 			   &num_ports);
1459 	if (err != 1) {
1460 		xenbus_dev_fatal(dev, err, "reading num-ports");
1461 		return ERR_PTR(-EINVAL);
1462 	}
1463 	if (num_ports < 1 || num_ports > XENUSB_MAX_PORTNR) {
1464 		xenbus_dev_fatal(dev, err, "invalid num-ports");
1465 		return ERR_PTR(-EINVAL);
1466 	}
1467 
1468 	err = xenbus_scanf(XBT_NIL, dev->otherend, "usb-ver", "%d", &usb_ver);
1469 	if (err != 1) {
1470 		xenbus_dev_fatal(dev, err, "reading usb-ver");
1471 		return ERR_PTR(-EINVAL);
1472 	}
1473 	switch (usb_ver) {
1474 	case XENUSB_VER_USB11:
1475 		hcd = usb_create_hcd(&xenhcd_usb11_hc_driver, &dev->dev,
1476 				     dev_name(&dev->dev));
1477 		break;
1478 	case XENUSB_VER_USB20:
1479 		hcd = usb_create_hcd(&xenhcd_usb20_hc_driver, &dev->dev,
1480 				     dev_name(&dev->dev));
1481 		break;
1482 	default:
1483 		xenbus_dev_fatal(dev, err, "invalid usb-ver");
1484 		return ERR_PTR(-EINVAL);
1485 	}
1486 	if (!hcd) {
1487 		xenbus_dev_fatal(dev, err,
1488 				 "fail to allocate USB host controller");
1489 		return ERR_PTR(-ENOMEM);
1490 	}
1491 
1492 	info = xenhcd_hcd_to_info(hcd);
1493 	info->xbdev = dev;
1494 	info->rh_numports = num_ports;
1495 
1496 	for (i = 0; i < XENUSB_URB_RING_SIZE; i++) {
1497 		info->shadow[i].req.id = i + 1;
1498 		info->shadow[i].urb = NULL;
1499 	}
1500 	info->shadow[XENUSB_URB_RING_SIZE - 1].req.id = 0x0fff;
1501 
1502 	return hcd;
1503 }
1504 
1505 static void xenhcd_backend_changed(struct xenbus_device *dev,
1506 				   enum xenbus_state backend_state)
1507 {
1508 	switch (backend_state) {
1509 	case XenbusStateInitialising:
1510 	case XenbusStateReconfiguring:
1511 	case XenbusStateReconfigured:
1512 	case XenbusStateUnknown:
1513 		break;
1514 
1515 	case XenbusStateInitWait:
1516 	case XenbusStateInitialised:
1517 	case XenbusStateConnected:
1518 		if (dev->state != XenbusStateInitialising)
1519 			break;
1520 		if (!xenhcd_connect(dev))
1521 			xenbus_switch_state(dev, XenbusStateConnected);
1522 		break;
1523 
1524 	case XenbusStateClosed:
1525 		if (dev->state == XenbusStateClosed)
1526 			break;
1527 		fallthrough;	/* Missed the backend's Closing state. */
1528 	case XenbusStateClosing:
1529 		xenhcd_disconnect(dev);
1530 		break;
1531 
1532 	default:
1533 		xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
1534 				 backend_state);
1535 		break;
1536 	}
1537 }
1538 
1539 static int xenhcd_remove(struct xenbus_device *dev)
1540 {
1541 	struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
1542 	struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
1543 
1544 	xenhcd_destroy_rings(info);
1545 	usb_put_hcd(hcd);
1546 
1547 	return 0;
1548 }
1549 
1550 static int xenhcd_probe(struct xenbus_device *dev,
1551 			const struct xenbus_device_id *id)
1552 {
1553 	int err;
1554 	struct usb_hcd *hcd;
1555 	struct xenhcd_info *info;
1556 
1557 	if (usb_disabled())
1558 		return -ENODEV;
1559 
1560 	hcd = xenhcd_create_hcd(dev);
1561 	if (IS_ERR(hcd)) {
1562 		err = PTR_ERR(hcd);
1563 		xenbus_dev_fatal(dev, err,
1564 				 "fail to create usb host controller");
1565 		return err;
1566 	}
1567 
1568 	info = xenhcd_hcd_to_info(hcd);
1569 	dev_set_drvdata(&dev->dev, info);
1570 
1571 	err = usb_add_hcd(hcd, 0, 0);
1572 	if (err) {
1573 		xenbus_dev_fatal(dev, err, "fail to add USB host controller");
1574 		usb_put_hcd(hcd);
1575 		dev_set_drvdata(&dev->dev, NULL);
1576 	}
1577 
1578 	return err;
1579 }
1580 
1581 static const struct xenbus_device_id xenhcd_ids[] = {
1582 	{ "vusb" },
1583 	{ "" },
1584 };
1585 
1586 static struct xenbus_driver xenhcd_driver = {
1587 	.ids			= xenhcd_ids,
1588 	.probe			= xenhcd_probe,
1589 	.otherend_changed	= xenhcd_backend_changed,
1590 	.remove			= xenhcd_remove,
1591 };
1592 
1593 static int __init xenhcd_init(void)
1594 {
1595 	if (!xen_domain())
1596 		return -ENODEV;
1597 
1598 	xenhcd_urbp_cachep = kmem_cache_create("xenhcd_urb_priv",
1599 					sizeof(struct urb_priv), 0, 0, NULL);
1600 	if (!xenhcd_urbp_cachep) {
1601 		pr_err("xenhcd failed to create kmem cache\n");
1602 		return -ENOMEM;
1603 	}
1604 
1605 	return xenbus_register_frontend(&xenhcd_driver);
1606 }
1607 module_init(xenhcd_init);
1608 
1609 static void __exit xenhcd_exit(void)
1610 {
1611 	kmem_cache_destroy(xenhcd_urbp_cachep);
1612 	xenbus_unregister_driver(&xenhcd_driver);
1613 }
1614 module_exit(xenhcd_exit);
1615 
1616 MODULE_ALIAS("xen:vusb");
1617 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
1618 MODULE_DESCRIPTION("Xen USB Virtual Host Controller driver (xen-hcd)");
1619 MODULE_LICENSE("Dual BSD/GPL");
1620