xref: /linux/drivers/usb/host/xhci.c (revision f9c41a62bba3f3f7ef3541b2a025e3371bcbba97)
1 /*
2  * xHCI host controller driver
3  *
4  * Copyright (C) 2008 Intel Corp.
5  *
6  * Author: Sarah Sharp
7  * Some code borrowed from the Linux EHCI driver.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16  * for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 #include <linux/pci.h>
24 #include <linux/irq.h>
25 #include <linux/log2.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/slab.h>
29 #include <linux/dmi.h>
30 
31 #include "xhci.h"
32 
33 #define DRIVER_AUTHOR "Sarah Sharp"
34 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
35 
36 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
37 static int link_quirk;
38 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
39 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
40 
41 /* TODO: copied from ehci-hcd.c - can this be refactored? */
42 /*
43  * xhci_handshake - spin reading hc until handshake completes or fails
44  * @ptr: address of hc register to be read
45  * @mask: bits to look at in result of read
46  * @done: value of those bits when handshake succeeds
47  * @usec: timeout in microseconds
48  *
49  * Returns negative errno, or zero on success
50  *
51  * Success happens when the "mask" bits have the specified value (hardware
52  * handshake done).  There are two failure modes:  "usec" have passed (major
53  * hardware flakeout), or the register reads as all-ones (hardware removed).
54  */
55 int xhci_handshake(struct xhci_hcd *xhci, void __iomem *ptr,
56 		      u32 mask, u32 done, int usec)
57 {
58 	u32	result;
59 
60 	do {
61 		result = xhci_readl(xhci, ptr);
62 		if (result == ~(u32)0)		/* card removed */
63 			return -ENODEV;
64 		result &= mask;
65 		if (result == done)
66 			return 0;
67 		udelay(1);
68 		usec--;
69 	} while (usec > 0);
70 	return -ETIMEDOUT;
71 }
72 
73 /*
74  * Disable interrupts and begin the xHCI halting process.
75  */
76 void xhci_quiesce(struct xhci_hcd *xhci)
77 {
78 	u32 halted;
79 	u32 cmd;
80 	u32 mask;
81 
82 	mask = ~(XHCI_IRQS);
83 	halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
84 	if (!halted)
85 		mask &= ~CMD_RUN;
86 
87 	cmd = xhci_readl(xhci, &xhci->op_regs->command);
88 	cmd &= mask;
89 	xhci_writel(xhci, cmd, &xhci->op_regs->command);
90 }
91 
92 /*
93  * Force HC into halt state.
94  *
95  * Disable any IRQs and clear the run/stop bit.
96  * HC will complete any current and actively pipelined transactions, and
97  * should halt within 16 ms of the run/stop bit being cleared.
98  * Read HC Halted bit in the status register to see when the HC is finished.
99  */
100 int xhci_halt(struct xhci_hcd *xhci)
101 {
102 	int ret;
103 	xhci_dbg(xhci, "// Halt the HC\n");
104 	xhci_quiesce(xhci);
105 
106 	ret = xhci_handshake(xhci, &xhci->op_regs->status,
107 			STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
108 	if (!ret) {
109 		xhci->xhc_state |= XHCI_STATE_HALTED;
110 		xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
111 	} else
112 		xhci_warn(xhci, "Host not halted after %u microseconds.\n",
113 				XHCI_MAX_HALT_USEC);
114 	return ret;
115 }
116 
117 /*
118  * Set the run bit and wait for the host to be running.
119  */
120 static int xhci_start(struct xhci_hcd *xhci)
121 {
122 	u32 temp;
123 	int ret;
124 
125 	temp = xhci_readl(xhci, &xhci->op_regs->command);
126 	temp |= (CMD_RUN);
127 	xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
128 			temp);
129 	xhci_writel(xhci, temp, &xhci->op_regs->command);
130 
131 	/*
132 	 * Wait for the HCHalted Status bit to be 0 to indicate the host is
133 	 * running.
134 	 */
135 	ret = xhci_handshake(xhci, &xhci->op_regs->status,
136 			STS_HALT, 0, XHCI_MAX_HALT_USEC);
137 	if (ret == -ETIMEDOUT)
138 		xhci_err(xhci, "Host took too long to start, "
139 				"waited %u microseconds.\n",
140 				XHCI_MAX_HALT_USEC);
141 	if (!ret)
142 		xhci->xhc_state &= ~XHCI_STATE_HALTED;
143 	return ret;
144 }
145 
146 /*
147  * Reset a halted HC.
148  *
149  * This resets pipelines, timers, counters, state machines, etc.
150  * Transactions will be terminated immediately, and operational registers
151  * will be set to their defaults.
152  */
153 int xhci_reset(struct xhci_hcd *xhci)
154 {
155 	u32 command;
156 	u32 state;
157 	int ret, i;
158 
159 	state = xhci_readl(xhci, &xhci->op_regs->status);
160 	if ((state & STS_HALT) == 0) {
161 		xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
162 		return 0;
163 	}
164 
165 	xhci_dbg(xhci, "// Reset the HC\n");
166 	command = xhci_readl(xhci, &xhci->op_regs->command);
167 	command |= CMD_RESET;
168 	xhci_writel(xhci, command, &xhci->op_regs->command);
169 
170 	ret = xhci_handshake(xhci, &xhci->op_regs->command,
171 			CMD_RESET, 0, 10 * 1000 * 1000);
172 	if (ret)
173 		return ret;
174 
175 	xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
176 	/*
177 	 * xHCI cannot write to any doorbells or operational registers other
178 	 * than status until the "Controller Not Ready" flag is cleared.
179 	 */
180 	ret = xhci_handshake(xhci, &xhci->op_regs->status,
181 			STS_CNR, 0, 10 * 1000 * 1000);
182 
183 	for (i = 0; i < 2; ++i) {
184 		xhci->bus_state[i].port_c_suspend = 0;
185 		xhci->bus_state[i].suspended_ports = 0;
186 		xhci->bus_state[i].resuming_ports = 0;
187 	}
188 
189 	return ret;
190 }
191 
192 #ifdef CONFIG_PCI
193 static int xhci_free_msi(struct xhci_hcd *xhci)
194 {
195 	int i;
196 
197 	if (!xhci->msix_entries)
198 		return -EINVAL;
199 
200 	for (i = 0; i < xhci->msix_count; i++)
201 		if (xhci->msix_entries[i].vector)
202 			free_irq(xhci->msix_entries[i].vector,
203 					xhci_to_hcd(xhci));
204 	return 0;
205 }
206 
207 /*
208  * Set up MSI
209  */
210 static int xhci_setup_msi(struct xhci_hcd *xhci)
211 {
212 	int ret;
213 	struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
214 
215 	ret = pci_enable_msi(pdev);
216 	if (ret) {
217 		xhci_dbg(xhci, "failed to allocate MSI entry\n");
218 		return ret;
219 	}
220 
221 	ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
222 				0, "xhci_hcd", xhci_to_hcd(xhci));
223 	if (ret) {
224 		xhci_dbg(xhci, "disable MSI interrupt\n");
225 		pci_disable_msi(pdev);
226 	}
227 
228 	return ret;
229 }
230 
231 /*
232  * Free IRQs
233  * free all IRQs request
234  */
235 static void xhci_free_irq(struct xhci_hcd *xhci)
236 {
237 	struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
238 	int ret;
239 
240 	/* return if using legacy interrupt */
241 	if (xhci_to_hcd(xhci)->irq > 0)
242 		return;
243 
244 	ret = xhci_free_msi(xhci);
245 	if (!ret)
246 		return;
247 	if (pdev->irq > 0)
248 		free_irq(pdev->irq, xhci_to_hcd(xhci));
249 
250 	return;
251 }
252 
253 /*
254  * Set up MSI-X
255  */
256 static int xhci_setup_msix(struct xhci_hcd *xhci)
257 {
258 	int i, ret = 0;
259 	struct usb_hcd *hcd = xhci_to_hcd(xhci);
260 	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
261 
262 	/*
263 	 * calculate number of msi-x vectors supported.
264 	 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
265 	 *   with max number of interrupters based on the xhci HCSPARAMS1.
266 	 * - num_online_cpus: maximum msi-x vectors per CPUs core.
267 	 *   Add additional 1 vector to ensure always available interrupt.
268 	 */
269 	xhci->msix_count = min(num_online_cpus() + 1,
270 				HCS_MAX_INTRS(xhci->hcs_params1));
271 
272 	xhci->msix_entries =
273 		kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
274 				GFP_KERNEL);
275 	if (!xhci->msix_entries) {
276 		xhci_err(xhci, "Failed to allocate MSI-X entries\n");
277 		return -ENOMEM;
278 	}
279 
280 	for (i = 0; i < xhci->msix_count; i++) {
281 		xhci->msix_entries[i].entry = i;
282 		xhci->msix_entries[i].vector = 0;
283 	}
284 
285 	ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
286 	if (ret) {
287 		xhci_dbg(xhci, "Failed to enable MSI-X\n");
288 		goto free_entries;
289 	}
290 
291 	for (i = 0; i < xhci->msix_count; i++) {
292 		ret = request_irq(xhci->msix_entries[i].vector,
293 				(irq_handler_t)xhci_msi_irq,
294 				0, "xhci_hcd", xhci_to_hcd(xhci));
295 		if (ret)
296 			goto disable_msix;
297 	}
298 
299 	hcd->msix_enabled = 1;
300 	return ret;
301 
302 disable_msix:
303 	xhci_dbg(xhci, "disable MSI-X interrupt\n");
304 	xhci_free_irq(xhci);
305 	pci_disable_msix(pdev);
306 free_entries:
307 	kfree(xhci->msix_entries);
308 	xhci->msix_entries = NULL;
309 	return ret;
310 }
311 
312 /* Free any IRQs and disable MSI-X */
313 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
314 {
315 	struct usb_hcd *hcd = xhci_to_hcd(xhci);
316 	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
317 
318 	xhci_free_irq(xhci);
319 
320 	if (xhci->msix_entries) {
321 		pci_disable_msix(pdev);
322 		kfree(xhci->msix_entries);
323 		xhci->msix_entries = NULL;
324 	} else {
325 		pci_disable_msi(pdev);
326 	}
327 
328 	hcd->msix_enabled = 0;
329 	return;
330 }
331 
332 static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
333 {
334 	int i;
335 
336 	if (xhci->msix_entries) {
337 		for (i = 0; i < xhci->msix_count; i++)
338 			synchronize_irq(xhci->msix_entries[i].vector);
339 	}
340 }
341 
342 static int xhci_try_enable_msi(struct usb_hcd *hcd)
343 {
344 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
345 	struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
346 	int ret;
347 
348 	/*
349 	 * Some Fresco Logic host controllers advertise MSI, but fail to
350 	 * generate interrupts.  Don't even try to enable MSI.
351 	 */
352 	if (xhci->quirks & XHCI_BROKEN_MSI)
353 		goto legacy_irq;
354 
355 	/* unregister the legacy interrupt */
356 	if (hcd->irq)
357 		free_irq(hcd->irq, hcd);
358 	hcd->irq = 0;
359 
360 	ret = xhci_setup_msix(xhci);
361 	if (ret)
362 		/* fall back to msi*/
363 		ret = xhci_setup_msi(xhci);
364 
365 	if (!ret)
366 		/* hcd->irq is 0, we have MSI */
367 		return 0;
368 
369 	if (!pdev->irq) {
370 		xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
371 		return -EINVAL;
372 	}
373 
374  legacy_irq:
375 	/* fall back to legacy interrupt*/
376 	ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
377 			hcd->irq_descr, hcd);
378 	if (ret) {
379 		xhci_err(xhci, "request interrupt %d failed\n",
380 				pdev->irq);
381 		return ret;
382 	}
383 	hcd->irq = pdev->irq;
384 	return 0;
385 }
386 
387 #else
388 
389 static int xhci_try_enable_msi(struct usb_hcd *hcd)
390 {
391 	return 0;
392 }
393 
394 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
395 {
396 }
397 
398 static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
399 {
400 }
401 
402 #endif
403 
404 static void compliance_mode_recovery(unsigned long arg)
405 {
406 	struct xhci_hcd *xhci;
407 	struct usb_hcd *hcd;
408 	u32 temp;
409 	int i;
410 
411 	xhci = (struct xhci_hcd *)arg;
412 
413 	for (i = 0; i < xhci->num_usb3_ports; i++) {
414 		temp = xhci_readl(xhci, xhci->usb3_ports[i]);
415 		if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
416 			/*
417 			 * Compliance Mode Detected. Letting USB Core
418 			 * handle the Warm Reset
419 			 */
420 			xhci_dbg(xhci, "Compliance Mode Detected->Port %d!\n",
421 					i + 1);
422 			xhci_dbg(xhci, "Attempting Recovery routine!\n");
423 			hcd = xhci->shared_hcd;
424 
425 			if (hcd->state == HC_STATE_SUSPENDED)
426 				usb_hcd_resume_root_hub(hcd);
427 
428 			usb_hcd_poll_rh_status(hcd);
429 		}
430 	}
431 
432 	if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1))
433 		mod_timer(&xhci->comp_mode_recovery_timer,
434 			jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
435 }
436 
437 /*
438  * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
439  * that causes ports behind that hardware to enter compliance mode sometimes.
440  * The quirk creates a timer that polls every 2 seconds the link state of
441  * each host controller's port and recovers it by issuing a Warm reset
442  * if Compliance mode is detected, otherwise the port will become "dead" (no
443  * device connections or disconnections will be detected anymore). Becasue no
444  * status event is generated when entering compliance mode (per xhci spec),
445  * this quirk is needed on systems that have the failing hardware installed.
446  */
447 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
448 {
449 	xhci->port_status_u0 = 0;
450 	init_timer(&xhci->comp_mode_recovery_timer);
451 
452 	xhci->comp_mode_recovery_timer.data = (unsigned long) xhci;
453 	xhci->comp_mode_recovery_timer.function = compliance_mode_recovery;
454 	xhci->comp_mode_recovery_timer.expires = jiffies +
455 			msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
456 
457 	set_timer_slack(&xhci->comp_mode_recovery_timer,
458 			msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
459 	add_timer(&xhci->comp_mode_recovery_timer);
460 	xhci_dbg(xhci, "Compliance Mode Recovery Timer Initialized.\n");
461 }
462 
463 /*
464  * This function identifies the systems that have installed the SN65LVPE502CP
465  * USB3.0 re-driver and that need the Compliance Mode Quirk.
466  * Systems:
467  * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
468  */
469 static bool compliance_mode_recovery_timer_quirk_check(void)
470 {
471 	const char *dmi_product_name, *dmi_sys_vendor;
472 
473 	dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
474 	dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
475 	if (!dmi_product_name || !dmi_sys_vendor)
476 		return false;
477 
478 	if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
479 		return false;
480 
481 	if (strstr(dmi_product_name, "Z420") ||
482 			strstr(dmi_product_name, "Z620") ||
483 			strstr(dmi_product_name, "Z820") ||
484 			strstr(dmi_product_name, "Z1 Workstation"))
485 		return true;
486 
487 	return false;
488 }
489 
490 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
491 {
492 	return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1));
493 }
494 
495 
496 /*
497  * Initialize memory for HCD and xHC (one-time init).
498  *
499  * Program the PAGESIZE register, initialize the device context array, create
500  * device contexts (?), set up a command ring segment (or two?), create event
501  * ring (one for now).
502  */
503 int xhci_init(struct usb_hcd *hcd)
504 {
505 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
506 	int retval = 0;
507 
508 	xhci_dbg(xhci, "xhci_init\n");
509 	spin_lock_init(&xhci->lock);
510 	if (xhci->hci_version == 0x95 && link_quirk) {
511 		xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
512 		xhci->quirks |= XHCI_LINK_TRB_QUIRK;
513 	} else {
514 		xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
515 	}
516 	retval = xhci_mem_init(xhci, GFP_KERNEL);
517 	xhci_dbg(xhci, "Finished xhci_init\n");
518 
519 	/* Initializing Compliance Mode Recovery Data If Needed */
520 	if (compliance_mode_recovery_timer_quirk_check()) {
521 		xhci->quirks |= XHCI_COMP_MODE_QUIRK;
522 		compliance_mode_recovery_timer_init(xhci);
523 	}
524 
525 	return retval;
526 }
527 
528 /*-------------------------------------------------------------------------*/
529 
530 
531 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
532 static void xhci_event_ring_work(unsigned long arg)
533 {
534 	unsigned long flags;
535 	int temp;
536 	u64 temp_64;
537 	struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
538 	int i, j;
539 
540 	xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
541 
542 	spin_lock_irqsave(&xhci->lock, flags);
543 	temp = xhci_readl(xhci, &xhci->op_regs->status);
544 	xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
545 	if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
546 			(xhci->xhc_state & XHCI_STATE_HALTED)) {
547 		xhci_dbg(xhci, "HW died, polling stopped.\n");
548 		spin_unlock_irqrestore(&xhci->lock, flags);
549 		return;
550 	}
551 
552 	temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
553 	xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
554 	xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
555 	xhci->error_bitmask = 0;
556 	xhci_dbg(xhci, "Event ring:\n");
557 	xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
558 	xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
559 	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
560 	temp_64 &= ~ERST_PTR_MASK;
561 	xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
562 	xhci_dbg(xhci, "Command ring:\n");
563 	xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
564 	xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
565 	xhci_dbg_cmd_ptrs(xhci);
566 	for (i = 0; i < MAX_HC_SLOTS; ++i) {
567 		if (!xhci->devs[i])
568 			continue;
569 		for (j = 0; j < 31; ++j) {
570 			xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
571 		}
572 	}
573 	spin_unlock_irqrestore(&xhci->lock, flags);
574 
575 	if (!xhci->zombie)
576 		mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
577 	else
578 		xhci_dbg(xhci, "Quit polling the event ring.\n");
579 }
580 #endif
581 
582 static int xhci_run_finished(struct xhci_hcd *xhci)
583 {
584 	if (xhci_start(xhci)) {
585 		xhci_halt(xhci);
586 		return -ENODEV;
587 	}
588 	xhci->shared_hcd->state = HC_STATE_RUNNING;
589 	xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
590 
591 	if (xhci->quirks & XHCI_NEC_HOST)
592 		xhci_ring_cmd_db(xhci);
593 
594 	xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
595 	return 0;
596 }
597 
598 /*
599  * Start the HC after it was halted.
600  *
601  * This function is called by the USB core when the HC driver is added.
602  * Its opposite is xhci_stop().
603  *
604  * xhci_init() must be called once before this function can be called.
605  * Reset the HC, enable device slot contexts, program DCBAAP, and
606  * set command ring pointer and event ring pointer.
607  *
608  * Setup MSI-X vectors and enable interrupts.
609  */
610 int xhci_run(struct usb_hcd *hcd)
611 {
612 	u32 temp;
613 	u64 temp_64;
614 	int ret;
615 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
616 
617 	/* Start the xHCI host controller running only after the USB 2.0 roothub
618 	 * is setup.
619 	 */
620 
621 	hcd->uses_new_polling = 1;
622 	if (!usb_hcd_is_primary_hcd(hcd))
623 		return xhci_run_finished(xhci);
624 
625 	xhci_dbg(xhci, "xhci_run\n");
626 
627 	ret = xhci_try_enable_msi(hcd);
628 	if (ret)
629 		return ret;
630 
631 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
632 	init_timer(&xhci->event_ring_timer);
633 	xhci->event_ring_timer.data = (unsigned long) xhci;
634 	xhci->event_ring_timer.function = xhci_event_ring_work;
635 	/* Poll the event ring */
636 	xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
637 	xhci->zombie = 0;
638 	xhci_dbg(xhci, "Setting event ring polling timer\n");
639 	add_timer(&xhci->event_ring_timer);
640 #endif
641 
642 	xhci_dbg(xhci, "Command ring memory map follows:\n");
643 	xhci_debug_ring(xhci, xhci->cmd_ring);
644 	xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
645 	xhci_dbg_cmd_ptrs(xhci);
646 
647 	xhci_dbg(xhci, "ERST memory map follows:\n");
648 	xhci_dbg_erst(xhci, &xhci->erst);
649 	xhci_dbg(xhci, "Event ring:\n");
650 	xhci_debug_ring(xhci, xhci->event_ring);
651 	xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
652 	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
653 	temp_64 &= ~ERST_PTR_MASK;
654 	xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
655 
656 	xhci_dbg(xhci, "// Set the interrupt modulation register\n");
657 	temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
658 	temp &= ~ER_IRQ_INTERVAL_MASK;
659 	temp |= (u32) 160;
660 	xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
661 
662 	/* Set the HCD state before we enable the irqs */
663 	temp = xhci_readl(xhci, &xhci->op_regs->command);
664 	temp |= (CMD_EIE);
665 	xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
666 			temp);
667 	xhci_writel(xhci, temp, &xhci->op_regs->command);
668 
669 	temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
670 	xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
671 			xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
672 	xhci_writel(xhci, ER_IRQ_ENABLE(temp),
673 			&xhci->ir_set->irq_pending);
674 	xhci_print_ir_set(xhci, 0);
675 
676 	if (xhci->quirks & XHCI_NEC_HOST)
677 		xhci_queue_vendor_command(xhci, 0, 0, 0,
678 				TRB_TYPE(TRB_NEC_GET_FW));
679 
680 	xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
681 	return 0;
682 }
683 
684 static void xhci_only_stop_hcd(struct usb_hcd *hcd)
685 {
686 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
687 
688 	spin_lock_irq(&xhci->lock);
689 	xhci_halt(xhci);
690 
691 	/* The shared_hcd is going to be deallocated shortly (the USB core only
692 	 * calls this function when allocation fails in usb_add_hcd(), or
693 	 * usb_remove_hcd() is called).  So we need to unset xHCI's pointer.
694 	 */
695 	xhci->shared_hcd = NULL;
696 	spin_unlock_irq(&xhci->lock);
697 }
698 
699 /*
700  * Stop xHCI driver.
701  *
702  * This function is called by the USB core when the HC driver is removed.
703  * Its opposite is xhci_run().
704  *
705  * Disable device contexts, disable IRQs, and quiesce the HC.
706  * Reset the HC, finish any completed transactions, and cleanup memory.
707  */
708 void xhci_stop(struct usb_hcd *hcd)
709 {
710 	u32 temp;
711 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
712 
713 	if (!usb_hcd_is_primary_hcd(hcd)) {
714 		xhci_only_stop_hcd(xhci->shared_hcd);
715 		return;
716 	}
717 
718 	spin_lock_irq(&xhci->lock);
719 	/* Make sure the xHC is halted for a USB3 roothub
720 	 * (xhci_stop() could be called as part of failed init).
721 	 */
722 	xhci_halt(xhci);
723 	xhci_reset(xhci);
724 	spin_unlock_irq(&xhci->lock);
725 
726 	xhci_cleanup_msix(xhci);
727 
728 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
729 	/* Tell the event ring poll function not to reschedule */
730 	xhci->zombie = 1;
731 	del_timer_sync(&xhci->event_ring_timer);
732 #endif
733 
734 	/* Deleting Compliance Mode Recovery Timer */
735 	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
736 			(!(xhci_all_ports_seen_u0(xhci))))
737 		del_timer_sync(&xhci->comp_mode_recovery_timer);
738 
739 	if (xhci->quirks & XHCI_AMD_PLL_FIX)
740 		usb_amd_dev_put();
741 
742 	xhci_dbg(xhci, "// Disabling event ring interrupts\n");
743 	temp = xhci_readl(xhci, &xhci->op_regs->status);
744 	xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
745 	temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
746 	xhci_writel(xhci, ER_IRQ_DISABLE(temp),
747 			&xhci->ir_set->irq_pending);
748 	xhci_print_ir_set(xhci, 0);
749 
750 	xhci_dbg(xhci, "cleaning up memory\n");
751 	xhci_mem_cleanup(xhci);
752 	xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
753 		    xhci_readl(xhci, &xhci->op_regs->status));
754 }
755 
756 /*
757  * Shutdown HC (not bus-specific)
758  *
759  * This is called when the machine is rebooting or halting.  We assume that the
760  * machine will be powered off, and the HC's internal state will be reset.
761  * Don't bother to free memory.
762  *
763  * This will only ever be called with the main usb_hcd (the USB3 roothub).
764  */
765 void xhci_shutdown(struct usb_hcd *hcd)
766 {
767 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
768 
769 	if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
770 		usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
771 
772 	spin_lock_irq(&xhci->lock);
773 	xhci_halt(xhci);
774 	spin_unlock_irq(&xhci->lock);
775 
776 	xhci_cleanup_msix(xhci);
777 
778 	xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
779 		    xhci_readl(xhci, &xhci->op_regs->status));
780 }
781 
782 #ifdef CONFIG_PM
783 static void xhci_save_registers(struct xhci_hcd *xhci)
784 {
785 	xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
786 	xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
787 	xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
788 	xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
789 	xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
790 	xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
791 	xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
792 	xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
793 	xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
794 }
795 
796 static void xhci_restore_registers(struct xhci_hcd *xhci)
797 {
798 	xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
799 	xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
800 	xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
801 	xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
802 	xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
803 	xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
804 	xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
805 	xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
806 	xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
807 }
808 
809 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
810 {
811 	u64	val_64;
812 
813 	/* step 2: initialize command ring buffer */
814 	val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
815 	val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
816 		(xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
817 				      xhci->cmd_ring->dequeue) &
818 		 (u64) ~CMD_RING_RSVD_BITS) |
819 		xhci->cmd_ring->cycle_state;
820 	xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
821 			(long unsigned long) val_64);
822 	xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
823 }
824 
825 /*
826  * The whole command ring must be cleared to zero when we suspend the host.
827  *
828  * The host doesn't save the command ring pointer in the suspend well, so we
829  * need to re-program it on resume.  Unfortunately, the pointer must be 64-byte
830  * aligned, because of the reserved bits in the command ring dequeue pointer
831  * register.  Therefore, we can't just set the dequeue pointer back in the
832  * middle of the ring (TRBs are 16-byte aligned).
833  */
834 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
835 {
836 	struct xhci_ring *ring;
837 	struct xhci_segment *seg;
838 
839 	ring = xhci->cmd_ring;
840 	seg = ring->deq_seg;
841 	do {
842 		memset(seg->trbs, 0,
843 			sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
844 		seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
845 			cpu_to_le32(~TRB_CYCLE);
846 		seg = seg->next;
847 	} while (seg != ring->deq_seg);
848 
849 	/* Reset the software enqueue and dequeue pointers */
850 	ring->deq_seg = ring->first_seg;
851 	ring->dequeue = ring->first_seg->trbs;
852 	ring->enq_seg = ring->deq_seg;
853 	ring->enqueue = ring->dequeue;
854 
855 	ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
856 	/*
857 	 * Ring is now zeroed, so the HW should look for change of ownership
858 	 * when the cycle bit is set to 1.
859 	 */
860 	ring->cycle_state = 1;
861 
862 	/*
863 	 * Reset the hardware dequeue pointer.
864 	 * Yes, this will need to be re-written after resume, but we're paranoid
865 	 * and want to make sure the hardware doesn't access bogus memory
866 	 * because, say, the BIOS or an SMI started the host without changing
867 	 * the command ring pointers.
868 	 */
869 	xhci_set_cmd_ring_deq(xhci);
870 }
871 
872 /*
873  * Stop HC (not bus-specific)
874  *
875  * This is called when the machine transition into S3/S4 mode.
876  *
877  */
878 int xhci_suspend(struct xhci_hcd *xhci)
879 {
880 	int			rc = 0;
881 	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
882 	u32			command;
883 
884 	if (hcd->state != HC_STATE_SUSPENDED ||
885 			xhci->shared_hcd->state != HC_STATE_SUSPENDED)
886 		return -EINVAL;
887 
888 	/* Don't poll the roothubs on bus suspend. */
889 	xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
890 	clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
891 	del_timer_sync(&hcd->rh_timer);
892 
893 	spin_lock_irq(&xhci->lock);
894 	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
895 	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
896 	/* step 1: stop endpoint */
897 	/* skipped assuming that port suspend has done */
898 
899 	/* step 2: clear Run/Stop bit */
900 	command = xhci_readl(xhci, &xhci->op_regs->command);
901 	command &= ~CMD_RUN;
902 	xhci_writel(xhci, command, &xhci->op_regs->command);
903 	if (xhci_handshake(xhci, &xhci->op_regs->status,
904 		      STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) {
905 		xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
906 		spin_unlock_irq(&xhci->lock);
907 		return -ETIMEDOUT;
908 	}
909 	xhci_clear_command_ring(xhci);
910 
911 	/* step 3: save registers */
912 	xhci_save_registers(xhci);
913 
914 	/* step 4: set CSS flag */
915 	command = xhci_readl(xhci, &xhci->op_regs->command);
916 	command |= CMD_CSS;
917 	xhci_writel(xhci, command, &xhci->op_regs->command);
918 	if (xhci_handshake(xhci, &xhci->op_regs->status,
919 				STS_SAVE, 0, 10 * 1000)) {
920 		xhci_warn(xhci, "WARN: xHC save state timeout\n");
921 		spin_unlock_irq(&xhci->lock);
922 		return -ETIMEDOUT;
923 	}
924 	spin_unlock_irq(&xhci->lock);
925 
926 	/*
927 	 * Deleting Compliance Mode Recovery Timer because the xHCI Host
928 	 * is about to be suspended.
929 	 */
930 	if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
931 			(!(xhci_all_ports_seen_u0(xhci)))) {
932 		del_timer_sync(&xhci->comp_mode_recovery_timer);
933 		xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted!\n");
934 	}
935 
936 	/* step 5: remove core well power */
937 	/* synchronize irq when using MSI-X */
938 	xhci_msix_sync_irqs(xhci);
939 
940 	return rc;
941 }
942 
943 /*
944  * start xHC (not bus-specific)
945  *
946  * This is called when the machine transition from S3/S4 mode.
947  *
948  */
949 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
950 {
951 	u32			command, temp = 0;
952 	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
953 	struct usb_hcd		*secondary_hcd;
954 	int			retval = 0;
955 
956 	/* Wait a bit if either of the roothubs need to settle from the
957 	 * transition into bus suspend.
958 	 */
959 	if (time_before(jiffies, xhci->bus_state[0].next_statechange) ||
960 			time_before(jiffies,
961 				xhci->bus_state[1].next_statechange))
962 		msleep(100);
963 
964 	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
965 	set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
966 
967 	spin_lock_irq(&xhci->lock);
968 	if (xhci->quirks & XHCI_RESET_ON_RESUME)
969 		hibernated = true;
970 
971 	if (!hibernated) {
972 		/* step 1: restore register */
973 		xhci_restore_registers(xhci);
974 		/* step 2: initialize command ring buffer */
975 		xhci_set_cmd_ring_deq(xhci);
976 		/* step 3: restore state and start state*/
977 		/* step 3: set CRS flag */
978 		command = xhci_readl(xhci, &xhci->op_regs->command);
979 		command |= CMD_CRS;
980 		xhci_writel(xhci, command, &xhci->op_regs->command);
981 		if (xhci_handshake(xhci, &xhci->op_regs->status,
982 			      STS_RESTORE, 0, 10 * 1000)) {
983 			xhci_warn(xhci, "WARN: xHC restore state timeout\n");
984 			spin_unlock_irq(&xhci->lock);
985 			return -ETIMEDOUT;
986 		}
987 		temp = xhci_readl(xhci, &xhci->op_regs->status);
988 	}
989 
990 	/* If restore operation fails, re-initialize the HC during resume */
991 	if ((temp & STS_SRE) || hibernated) {
992 		/* Let the USB core know _both_ roothubs lost power. */
993 		usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
994 		usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
995 
996 		xhci_dbg(xhci, "Stop HCD\n");
997 		xhci_halt(xhci);
998 		xhci_reset(xhci);
999 		spin_unlock_irq(&xhci->lock);
1000 		xhci_cleanup_msix(xhci);
1001 
1002 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
1003 		/* Tell the event ring poll function not to reschedule */
1004 		xhci->zombie = 1;
1005 		del_timer_sync(&xhci->event_ring_timer);
1006 #endif
1007 
1008 		xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1009 		temp = xhci_readl(xhci, &xhci->op_regs->status);
1010 		xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
1011 		temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
1012 		xhci_writel(xhci, ER_IRQ_DISABLE(temp),
1013 				&xhci->ir_set->irq_pending);
1014 		xhci_print_ir_set(xhci, 0);
1015 
1016 		xhci_dbg(xhci, "cleaning up memory\n");
1017 		xhci_mem_cleanup(xhci);
1018 		xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1019 			    xhci_readl(xhci, &xhci->op_regs->status));
1020 
1021 		/* USB core calls the PCI reinit and start functions twice:
1022 		 * first with the primary HCD, and then with the secondary HCD.
1023 		 * If we don't do the same, the host will never be started.
1024 		 */
1025 		if (!usb_hcd_is_primary_hcd(hcd))
1026 			secondary_hcd = hcd;
1027 		else
1028 			secondary_hcd = xhci->shared_hcd;
1029 
1030 		xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1031 		retval = xhci_init(hcd->primary_hcd);
1032 		if (retval)
1033 			return retval;
1034 		xhci_dbg(xhci, "Start the primary HCD\n");
1035 		retval = xhci_run(hcd->primary_hcd);
1036 		if (!retval) {
1037 			xhci_dbg(xhci, "Start the secondary HCD\n");
1038 			retval = xhci_run(secondary_hcd);
1039 		}
1040 		hcd->state = HC_STATE_SUSPENDED;
1041 		xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1042 		goto done;
1043 	}
1044 
1045 	/* step 4: set Run/Stop bit */
1046 	command = xhci_readl(xhci, &xhci->op_regs->command);
1047 	command |= CMD_RUN;
1048 	xhci_writel(xhci, command, &xhci->op_regs->command);
1049 	xhci_handshake(xhci, &xhci->op_regs->status, STS_HALT,
1050 		  0, 250 * 1000);
1051 
1052 	/* step 5: walk topology and initialize portsc,
1053 	 * portpmsc and portli
1054 	 */
1055 	/* this is done in bus_resume */
1056 
1057 	/* step 6: restart each of the previously
1058 	 * Running endpoints by ringing their doorbells
1059 	 */
1060 
1061 	spin_unlock_irq(&xhci->lock);
1062 
1063  done:
1064 	if (retval == 0) {
1065 		usb_hcd_resume_root_hub(hcd);
1066 		usb_hcd_resume_root_hub(xhci->shared_hcd);
1067 	}
1068 
1069 	/*
1070 	 * If system is subject to the Quirk, Compliance Mode Timer needs to
1071 	 * be re-initialized Always after a system resume. Ports are subject
1072 	 * to suffer the Compliance Mode issue again. It doesn't matter if
1073 	 * ports have entered previously to U0 before system's suspension.
1074 	 */
1075 	if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
1076 		compliance_mode_recovery_timer_init(xhci);
1077 
1078 	/* Re-enable port polling. */
1079 	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
1080 	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1081 	usb_hcd_poll_rh_status(hcd);
1082 
1083 	return retval;
1084 }
1085 #endif	/* CONFIG_PM */
1086 
1087 /*-------------------------------------------------------------------------*/
1088 
1089 /**
1090  * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1091  * HCDs.  Find the index for an endpoint given its descriptor.  Use the return
1092  * value to right shift 1 for the bitmask.
1093  *
1094  * Index  = (epnum * 2) + direction - 1,
1095  * where direction = 0 for OUT, 1 for IN.
1096  * For control endpoints, the IN index is used (OUT index is unused), so
1097  * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1098  */
1099 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1100 {
1101 	unsigned int index;
1102 	if (usb_endpoint_xfer_control(desc))
1103 		index = (unsigned int) (usb_endpoint_num(desc)*2);
1104 	else
1105 		index = (unsigned int) (usb_endpoint_num(desc)*2) +
1106 			(usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1107 	return index;
1108 }
1109 
1110 /* Find the flag for this endpoint (for use in the control context).  Use the
1111  * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
1112  * bit 1, etc.
1113  */
1114 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1115 {
1116 	return 1 << (xhci_get_endpoint_index(desc) + 1);
1117 }
1118 
1119 /* Find the flag for this endpoint (for use in the control context).  Use the
1120  * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
1121  * bit 1, etc.
1122  */
1123 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
1124 {
1125 	return 1 << (ep_index + 1);
1126 }
1127 
1128 /* Compute the last valid endpoint context index.  Basically, this is the
1129  * endpoint index plus one.  For slot contexts with more than valid endpoint,
1130  * we find the most significant bit set in the added contexts flags.
1131  * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1132  * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1133  */
1134 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1135 {
1136 	return fls(added_ctxs) - 1;
1137 }
1138 
1139 /* Returns 1 if the arguments are OK;
1140  * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1141  */
1142 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1143 		struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1144 		const char *func) {
1145 	struct xhci_hcd	*xhci;
1146 	struct xhci_virt_device	*virt_dev;
1147 
1148 	if (!hcd || (check_ep && !ep) || !udev) {
1149 		printk(KERN_DEBUG "xHCI %s called with invalid args\n",
1150 				func);
1151 		return -EINVAL;
1152 	}
1153 	if (!udev->parent) {
1154 		printk(KERN_DEBUG "xHCI %s called for root hub\n",
1155 				func);
1156 		return 0;
1157 	}
1158 
1159 	xhci = hcd_to_xhci(hcd);
1160 	if (xhci->xhc_state & XHCI_STATE_HALTED)
1161 		return -ENODEV;
1162 
1163 	if (check_virt_dev) {
1164 		if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1165 			printk(KERN_DEBUG "xHCI %s called with unaddressed "
1166 						"device\n", func);
1167 			return -EINVAL;
1168 		}
1169 
1170 		virt_dev = xhci->devs[udev->slot_id];
1171 		if (virt_dev->udev != udev) {
1172 			printk(KERN_DEBUG "xHCI %s called with udev and "
1173 					  "virt_dev does not match\n", func);
1174 			return -EINVAL;
1175 		}
1176 	}
1177 
1178 	return 1;
1179 }
1180 
1181 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1182 		struct usb_device *udev, struct xhci_command *command,
1183 		bool ctx_change, bool must_succeed);
1184 
1185 /*
1186  * Full speed devices may have a max packet size greater than 8 bytes, but the
1187  * USB core doesn't know that until it reads the first 8 bytes of the
1188  * descriptor.  If the usb_device's max packet size changes after that point,
1189  * we need to issue an evaluate context command and wait on it.
1190  */
1191 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1192 		unsigned int ep_index, struct urb *urb)
1193 {
1194 	struct xhci_container_ctx *in_ctx;
1195 	struct xhci_container_ctx *out_ctx;
1196 	struct xhci_input_control_ctx *ctrl_ctx;
1197 	struct xhci_ep_ctx *ep_ctx;
1198 	int max_packet_size;
1199 	int hw_max_packet_size;
1200 	int ret = 0;
1201 
1202 	out_ctx = xhci->devs[slot_id]->out_ctx;
1203 	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1204 	hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1205 	max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1206 	if (hw_max_packet_size != max_packet_size) {
1207 		xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
1208 		xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
1209 				max_packet_size);
1210 		xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
1211 				hw_max_packet_size);
1212 		xhci_dbg(xhci, "Issuing evaluate context command.\n");
1213 
1214 		/* Set up the modified control endpoint 0 */
1215 		xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1216 				xhci->devs[slot_id]->out_ctx, ep_index);
1217 		in_ctx = xhci->devs[slot_id]->in_ctx;
1218 		ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1219 		ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1220 		ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1221 
1222 		/* Set up the input context flags for the command */
1223 		/* FIXME: This won't work if a non-default control endpoint
1224 		 * changes max packet sizes.
1225 		 */
1226 		ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1227 		ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1228 		ctrl_ctx->drop_flags = 0;
1229 
1230 		xhci_dbg(xhci, "Slot %d input context\n", slot_id);
1231 		xhci_dbg_ctx(xhci, in_ctx, ep_index);
1232 		xhci_dbg(xhci, "Slot %d output context\n", slot_id);
1233 		xhci_dbg_ctx(xhci, out_ctx, ep_index);
1234 
1235 		ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
1236 				true, false);
1237 
1238 		/* Clean up the input context for later use by bandwidth
1239 		 * functions.
1240 		 */
1241 		ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1242 	}
1243 	return ret;
1244 }
1245 
1246 /*
1247  * non-error returns are a promise to giveback() the urb later
1248  * we drop ownership so next owner (or urb unlink) can get it
1249  */
1250 int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1251 {
1252 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1253 	struct xhci_td *buffer;
1254 	unsigned long flags;
1255 	int ret = 0;
1256 	unsigned int slot_id, ep_index;
1257 	struct urb_priv	*urb_priv;
1258 	int size, i;
1259 
1260 	if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1261 					true, true, __func__) <= 0)
1262 		return -EINVAL;
1263 
1264 	slot_id = urb->dev->slot_id;
1265 	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1266 
1267 	if (!HCD_HW_ACCESSIBLE(hcd)) {
1268 		if (!in_interrupt())
1269 			xhci_dbg(xhci, "urb submitted during PCI suspend\n");
1270 		ret = -ESHUTDOWN;
1271 		goto exit;
1272 	}
1273 
1274 	if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1275 		size = urb->number_of_packets;
1276 	else
1277 		size = 1;
1278 
1279 	urb_priv = kzalloc(sizeof(struct urb_priv) +
1280 				  size * sizeof(struct xhci_td *), mem_flags);
1281 	if (!urb_priv)
1282 		return -ENOMEM;
1283 
1284 	buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags);
1285 	if (!buffer) {
1286 		kfree(urb_priv);
1287 		return -ENOMEM;
1288 	}
1289 
1290 	for (i = 0; i < size; i++) {
1291 		urb_priv->td[i] = buffer;
1292 		buffer++;
1293 	}
1294 
1295 	urb_priv->length = size;
1296 	urb_priv->td_cnt = 0;
1297 	urb->hcpriv = urb_priv;
1298 
1299 	if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1300 		/* Check to see if the max packet size for the default control
1301 		 * endpoint changed during FS device enumeration
1302 		 */
1303 		if (urb->dev->speed == USB_SPEED_FULL) {
1304 			ret = xhci_check_maxpacket(xhci, slot_id,
1305 					ep_index, urb);
1306 			if (ret < 0) {
1307 				xhci_urb_free_priv(xhci, urb_priv);
1308 				urb->hcpriv = NULL;
1309 				return ret;
1310 			}
1311 		}
1312 
1313 		/* We have a spinlock and interrupts disabled, so we must pass
1314 		 * atomic context to this function, which may allocate memory.
1315 		 */
1316 		spin_lock_irqsave(&xhci->lock, flags);
1317 		if (xhci->xhc_state & XHCI_STATE_DYING)
1318 			goto dying;
1319 		ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1320 				slot_id, ep_index);
1321 		if (ret)
1322 			goto free_priv;
1323 		spin_unlock_irqrestore(&xhci->lock, flags);
1324 	} else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1325 		spin_lock_irqsave(&xhci->lock, flags);
1326 		if (xhci->xhc_state & XHCI_STATE_DYING)
1327 			goto dying;
1328 		if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1329 				EP_GETTING_STREAMS) {
1330 			xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1331 					"is transitioning to using streams.\n");
1332 			ret = -EINVAL;
1333 		} else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1334 				EP_GETTING_NO_STREAMS) {
1335 			xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1336 					"is transitioning to "
1337 					"not having streams.\n");
1338 			ret = -EINVAL;
1339 		} else {
1340 			ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1341 					slot_id, ep_index);
1342 		}
1343 		if (ret)
1344 			goto free_priv;
1345 		spin_unlock_irqrestore(&xhci->lock, flags);
1346 	} else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1347 		spin_lock_irqsave(&xhci->lock, flags);
1348 		if (xhci->xhc_state & XHCI_STATE_DYING)
1349 			goto dying;
1350 		ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1351 				slot_id, ep_index);
1352 		if (ret)
1353 			goto free_priv;
1354 		spin_unlock_irqrestore(&xhci->lock, flags);
1355 	} else {
1356 		spin_lock_irqsave(&xhci->lock, flags);
1357 		if (xhci->xhc_state & XHCI_STATE_DYING)
1358 			goto dying;
1359 		ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1360 				slot_id, ep_index);
1361 		if (ret)
1362 			goto free_priv;
1363 		spin_unlock_irqrestore(&xhci->lock, flags);
1364 	}
1365 exit:
1366 	return ret;
1367 dying:
1368 	xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1369 			"non-responsive xHCI host.\n",
1370 			urb->ep->desc.bEndpointAddress, urb);
1371 	ret = -ESHUTDOWN;
1372 free_priv:
1373 	xhci_urb_free_priv(xhci, urb_priv);
1374 	urb->hcpriv = NULL;
1375 	spin_unlock_irqrestore(&xhci->lock, flags);
1376 	return ret;
1377 }
1378 
1379 /* Get the right ring for the given URB.
1380  * If the endpoint supports streams, boundary check the URB's stream ID.
1381  * If the endpoint doesn't support streams, return the singular endpoint ring.
1382  */
1383 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1384 		struct urb *urb)
1385 {
1386 	unsigned int slot_id;
1387 	unsigned int ep_index;
1388 	unsigned int stream_id;
1389 	struct xhci_virt_ep *ep;
1390 
1391 	slot_id = urb->dev->slot_id;
1392 	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1393 	stream_id = urb->stream_id;
1394 	ep = &xhci->devs[slot_id]->eps[ep_index];
1395 	/* Common case: no streams */
1396 	if (!(ep->ep_state & EP_HAS_STREAMS))
1397 		return ep->ring;
1398 
1399 	if (stream_id == 0) {
1400 		xhci_warn(xhci,
1401 				"WARN: Slot ID %u, ep index %u has streams, "
1402 				"but URB has no stream ID.\n",
1403 				slot_id, ep_index);
1404 		return NULL;
1405 	}
1406 
1407 	if (stream_id < ep->stream_info->num_streams)
1408 		return ep->stream_info->stream_rings[stream_id];
1409 
1410 	xhci_warn(xhci,
1411 			"WARN: Slot ID %u, ep index %u has "
1412 			"stream IDs 1 to %u allocated, "
1413 			"but stream ID %u is requested.\n",
1414 			slot_id, ep_index,
1415 			ep->stream_info->num_streams - 1,
1416 			stream_id);
1417 	return NULL;
1418 }
1419 
1420 /*
1421  * Remove the URB's TD from the endpoint ring.  This may cause the HC to stop
1422  * USB transfers, potentially stopping in the middle of a TRB buffer.  The HC
1423  * should pick up where it left off in the TD, unless a Set Transfer Ring
1424  * Dequeue Pointer is issued.
1425  *
1426  * The TRBs that make up the buffers for the canceled URB will be "removed" from
1427  * the ring.  Since the ring is a contiguous structure, they can't be physically
1428  * removed.  Instead, there are two options:
1429  *
1430  *  1) If the HC is in the middle of processing the URB to be canceled, we
1431  *     simply move the ring's dequeue pointer past those TRBs using the Set
1432  *     Transfer Ring Dequeue Pointer command.  This will be the common case,
1433  *     when drivers timeout on the last submitted URB and attempt to cancel.
1434  *
1435  *  2) If the HC is in the middle of a different TD, we turn the TRBs into a
1436  *     series of 1-TRB transfer no-op TDs.  (No-ops shouldn't be chained.)  The
1437  *     HC will need to invalidate the any TRBs it has cached after the stop
1438  *     endpoint command, as noted in the xHCI 0.95 errata.
1439  *
1440  *  3) The TD may have completed by the time the Stop Endpoint Command
1441  *     completes, so software needs to handle that case too.
1442  *
1443  * This function should protect against the TD enqueueing code ringing the
1444  * doorbell while this code is waiting for a Stop Endpoint command to complete.
1445  * It also needs to account for multiple cancellations on happening at the same
1446  * time for the same endpoint.
1447  *
1448  * Note that this function can be called in any context, or so says
1449  * usb_hcd_unlink_urb()
1450  */
1451 int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1452 {
1453 	unsigned long flags;
1454 	int ret, i;
1455 	u32 temp;
1456 	struct xhci_hcd *xhci;
1457 	struct urb_priv	*urb_priv;
1458 	struct xhci_td *td;
1459 	unsigned int ep_index;
1460 	struct xhci_ring *ep_ring;
1461 	struct xhci_virt_ep *ep;
1462 
1463 	xhci = hcd_to_xhci(hcd);
1464 	spin_lock_irqsave(&xhci->lock, flags);
1465 	/* Make sure the URB hasn't completed or been unlinked already */
1466 	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1467 	if (ret || !urb->hcpriv)
1468 		goto done;
1469 	temp = xhci_readl(xhci, &xhci->op_regs->status);
1470 	if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
1471 		xhci_dbg(xhci, "HW died, freeing TD.\n");
1472 		urb_priv = urb->hcpriv;
1473 		for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1474 			td = urb_priv->td[i];
1475 			if (!list_empty(&td->td_list))
1476 				list_del_init(&td->td_list);
1477 			if (!list_empty(&td->cancelled_td_list))
1478 				list_del_init(&td->cancelled_td_list);
1479 		}
1480 
1481 		usb_hcd_unlink_urb_from_ep(hcd, urb);
1482 		spin_unlock_irqrestore(&xhci->lock, flags);
1483 		usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1484 		xhci_urb_free_priv(xhci, urb_priv);
1485 		return ret;
1486 	}
1487 	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
1488 			(xhci->xhc_state & XHCI_STATE_HALTED)) {
1489 		xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1490 				"non-responsive xHCI host.\n",
1491 				urb->ep->desc.bEndpointAddress, urb);
1492 		/* Let the stop endpoint command watchdog timer (which set this
1493 		 * state) finish cleaning up the endpoint TD lists.  We must
1494 		 * have caught it in the middle of dropping a lock and giving
1495 		 * back an URB.
1496 		 */
1497 		goto done;
1498 	}
1499 
1500 	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1501 	ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1502 	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1503 	if (!ep_ring) {
1504 		ret = -EINVAL;
1505 		goto done;
1506 	}
1507 
1508 	urb_priv = urb->hcpriv;
1509 	i = urb_priv->td_cnt;
1510 	if (i < urb_priv->length)
1511 		xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, "
1512 				"starting at offset 0x%llx\n",
1513 				urb, urb->dev->devpath,
1514 				urb->ep->desc.bEndpointAddress,
1515 				(unsigned long long) xhci_trb_virt_to_dma(
1516 					urb_priv->td[i]->start_seg,
1517 					urb_priv->td[i]->first_trb));
1518 
1519 	for (; i < urb_priv->length; i++) {
1520 		td = urb_priv->td[i];
1521 		list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1522 	}
1523 
1524 	/* Queue a stop endpoint command, but only if this is
1525 	 * the first cancellation to be handled.
1526 	 */
1527 	if (!(ep->ep_state & EP_HALT_PENDING)) {
1528 		ep->ep_state |= EP_HALT_PENDING;
1529 		ep->stop_cmds_pending++;
1530 		ep->stop_cmd_timer.expires = jiffies +
1531 			XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1532 		add_timer(&ep->stop_cmd_timer);
1533 		xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
1534 		xhci_ring_cmd_db(xhci);
1535 	}
1536 done:
1537 	spin_unlock_irqrestore(&xhci->lock, flags);
1538 	return ret;
1539 }
1540 
1541 /* Drop an endpoint from a new bandwidth configuration for this device.
1542  * Only one call to this function is allowed per endpoint before
1543  * check_bandwidth() or reset_bandwidth() must be called.
1544  * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1545  * add the endpoint to the schedule with possibly new parameters denoted by a
1546  * different endpoint descriptor in usb_host_endpoint.
1547  * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1548  * not allowed.
1549  *
1550  * The USB core will not allow URBs to be queued to an endpoint that is being
1551  * disabled, so there's no need for mutual exclusion to protect
1552  * the xhci->devs[slot_id] structure.
1553  */
1554 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1555 		struct usb_host_endpoint *ep)
1556 {
1557 	struct xhci_hcd *xhci;
1558 	struct xhci_container_ctx *in_ctx, *out_ctx;
1559 	struct xhci_input_control_ctx *ctrl_ctx;
1560 	struct xhci_slot_ctx *slot_ctx;
1561 	unsigned int last_ctx;
1562 	unsigned int ep_index;
1563 	struct xhci_ep_ctx *ep_ctx;
1564 	u32 drop_flag;
1565 	u32 new_add_flags, new_drop_flags, new_slot_info;
1566 	int ret;
1567 
1568 	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1569 	if (ret <= 0)
1570 		return ret;
1571 	xhci = hcd_to_xhci(hcd);
1572 	if (xhci->xhc_state & XHCI_STATE_DYING)
1573 		return -ENODEV;
1574 
1575 	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1576 	drop_flag = xhci_get_endpoint_flag(&ep->desc);
1577 	if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1578 		xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1579 				__func__, drop_flag);
1580 		return 0;
1581 	}
1582 
1583 	in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1584 	out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1585 	ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1586 	ep_index = xhci_get_endpoint_index(&ep->desc);
1587 	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1588 	/* If the HC already knows the endpoint is disabled,
1589 	 * or the HCD has noted it is disabled, ignore this request
1590 	 */
1591 	if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
1592 	     cpu_to_le32(EP_STATE_DISABLED)) ||
1593 	    le32_to_cpu(ctrl_ctx->drop_flags) &
1594 	    xhci_get_endpoint_flag(&ep->desc)) {
1595 		xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1596 				__func__, ep);
1597 		return 0;
1598 	}
1599 
1600 	ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1601 	new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1602 
1603 	ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1604 	new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1605 
1606 	last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
1607 	slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1608 	/* Update the last valid endpoint context, if we deleted the last one */
1609 	if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
1610 	    LAST_CTX(last_ctx)) {
1611 		slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1612 		slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1613 	}
1614 	new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1615 
1616 	xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1617 
1618 	xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1619 			(unsigned int) ep->desc.bEndpointAddress,
1620 			udev->slot_id,
1621 			(unsigned int) new_drop_flags,
1622 			(unsigned int) new_add_flags,
1623 			(unsigned int) new_slot_info);
1624 	return 0;
1625 }
1626 
1627 /* Add an endpoint to a new possible bandwidth configuration for this device.
1628  * Only one call to this function is allowed per endpoint before
1629  * check_bandwidth() or reset_bandwidth() must be called.
1630  * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1631  * add the endpoint to the schedule with possibly new parameters denoted by a
1632  * different endpoint descriptor in usb_host_endpoint.
1633  * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1634  * not allowed.
1635  *
1636  * The USB core will not allow URBs to be queued to an endpoint until the
1637  * configuration or alt setting is installed in the device, so there's no need
1638  * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1639  */
1640 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1641 		struct usb_host_endpoint *ep)
1642 {
1643 	struct xhci_hcd *xhci;
1644 	struct xhci_container_ctx *in_ctx, *out_ctx;
1645 	unsigned int ep_index;
1646 	struct xhci_slot_ctx *slot_ctx;
1647 	struct xhci_input_control_ctx *ctrl_ctx;
1648 	u32 added_ctxs;
1649 	unsigned int last_ctx;
1650 	u32 new_add_flags, new_drop_flags, new_slot_info;
1651 	struct xhci_virt_device *virt_dev;
1652 	int ret = 0;
1653 
1654 	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1655 	if (ret <= 0) {
1656 		/* So we won't queue a reset ep command for a root hub */
1657 		ep->hcpriv = NULL;
1658 		return ret;
1659 	}
1660 	xhci = hcd_to_xhci(hcd);
1661 	if (xhci->xhc_state & XHCI_STATE_DYING)
1662 		return -ENODEV;
1663 
1664 	added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1665 	last_ctx = xhci_last_valid_endpoint(added_ctxs);
1666 	if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1667 		/* FIXME when we have to issue an evaluate endpoint command to
1668 		 * deal with ep0 max packet size changing once we get the
1669 		 * descriptors
1670 		 */
1671 		xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1672 				__func__, added_ctxs);
1673 		return 0;
1674 	}
1675 
1676 	virt_dev = xhci->devs[udev->slot_id];
1677 	in_ctx = virt_dev->in_ctx;
1678 	out_ctx = virt_dev->out_ctx;
1679 	ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1680 	ep_index = xhci_get_endpoint_index(&ep->desc);
1681 
1682 	/* If this endpoint is already in use, and the upper layers are trying
1683 	 * to add it again without dropping it, reject the addition.
1684 	 */
1685 	if (virt_dev->eps[ep_index].ring &&
1686 			!(le32_to_cpu(ctrl_ctx->drop_flags) &
1687 				xhci_get_endpoint_flag(&ep->desc))) {
1688 		xhci_warn(xhci, "Trying to add endpoint 0x%x "
1689 				"without dropping it.\n",
1690 				(unsigned int) ep->desc.bEndpointAddress);
1691 		return -EINVAL;
1692 	}
1693 
1694 	/* If the HCD has already noted the endpoint is enabled,
1695 	 * ignore this request.
1696 	 */
1697 	if (le32_to_cpu(ctrl_ctx->add_flags) &
1698 	    xhci_get_endpoint_flag(&ep->desc)) {
1699 		xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1700 				__func__, ep);
1701 		return 0;
1702 	}
1703 
1704 	/*
1705 	 * Configuration and alternate setting changes must be done in
1706 	 * process context, not interrupt context (or so documenation
1707 	 * for usb_set_interface() and usb_set_configuration() claim).
1708 	 */
1709 	if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
1710 		dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1711 				__func__, ep->desc.bEndpointAddress);
1712 		return -ENOMEM;
1713 	}
1714 
1715 	ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
1716 	new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1717 
1718 	/* If xhci_endpoint_disable() was called for this endpoint, but the
1719 	 * xHC hasn't been notified yet through the check_bandwidth() call,
1720 	 * this re-adds a new state for the endpoint from the new endpoint
1721 	 * descriptors.  We must drop and re-add this endpoint, so we leave the
1722 	 * drop flags alone.
1723 	 */
1724 	new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1725 
1726 	slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1727 	/* Update the last valid endpoint context, if we just added one past */
1728 	if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
1729 	    LAST_CTX(last_ctx)) {
1730 		slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1731 		slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
1732 	}
1733 	new_slot_info = le32_to_cpu(slot_ctx->dev_info);
1734 
1735 	/* Store the usb_device pointer for later use */
1736 	ep->hcpriv = udev;
1737 
1738 	xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1739 			(unsigned int) ep->desc.bEndpointAddress,
1740 			udev->slot_id,
1741 			(unsigned int) new_drop_flags,
1742 			(unsigned int) new_add_flags,
1743 			(unsigned int) new_slot_info);
1744 	return 0;
1745 }
1746 
1747 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1748 {
1749 	struct xhci_input_control_ctx *ctrl_ctx;
1750 	struct xhci_ep_ctx *ep_ctx;
1751 	struct xhci_slot_ctx *slot_ctx;
1752 	int i;
1753 
1754 	/* When a device's add flag and drop flag are zero, any subsequent
1755 	 * configure endpoint command will leave that endpoint's state
1756 	 * untouched.  Make sure we don't leave any old state in the input
1757 	 * endpoint contexts.
1758 	 */
1759 	ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1760 	ctrl_ctx->drop_flags = 0;
1761 	ctrl_ctx->add_flags = 0;
1762 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1763 	slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
1764 	/* Endpoint 0 is always valid */
1765 	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
1766 	for (i = 1; i < 31; ++i) {
1767 		ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1768 		ep_ctx->ep_info = 0;
1769 		ep_ctx->ep_info2 = 0;
1770 		ep_ctx->deq = 0;
1771 		ep_ctx->tx_info = 0;
1772 	}
1773 }
1774 
1775 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1776 		struct usb_device *udev, u32 *cmd_status)
1777 {
1778 	int ret;
1779 
1780 	switch (*cmd_status) {
1781 	case COMP_ENOMEM:
1782 		dev_warn(&udev->dev, "Not enough host controller resources "
1783 				"for new device state.\n");
1784 		ret = -ENOMEM;
1785 		/* FIXME: can we allocate more resources for the HC? */
1786 		break;
1787 	case COMP_BW_ERR:
1788 	case COMP_2ND_BW_ERR:
1789 		dev_warn(&udev->dev, "Not enough bandwidth "
1790 				"for new device state.\n");
1791 		ret = -ENOSPC;
1792 		/* FIXME: can we go back to the old state? */
1793 		break;
1794 	case COMP_TRB_ERR:
1795 		/* the HCD set up something wrong */
1796 		dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1797 				"add flag = 1, "
1798 				"and endpoint is not disabled.\n");
1799 		ret = -EINVAL;
1800 		break;
1801 	case COMP_DEV_ERR:
1802 		dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint "
1803 				"configure command.\n");
1804 		ret = -ENODEV;
1805 		break;
1806 	case COMP_SUCCESS:
1807 		dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1808 		ret = 0;
1809 		break;
1810 	default:
1811 		xhci_err(xhci, "ERROR: unexpected command completion "
1812 				"code 0x%x.\n", *cmd_status);
1813 		ret = -EINVAL;
1814 		break;
1815 	}
1816 	return ret;
1817 }
1818 
1819 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1820 		struct usb_device *udev, u32 *cmd_status)
1821 {
1822 	int ret;
1823 	struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1824 
1825 	switch (*cmd_status) {
1826 	case COMP_EINVAL:
1827 		dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1828 				"context command.\n");
1829 		ret = -EINVAL;
1830 		break;
1831 	case COMP_EBADSLT:
1832 		dev_warn(&udev->dev, "WARN: slot not enabled for"
1833 				"evaluate context command.\n");
1834 		ret = -EINVAL;
1835 		break;
1836 	case COMP_CTX_STATE:
1837 		dev_warn(&udev->dev, "WARN: invalid context state for "
1838 				"evaluate context command.\n");
1839 		xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1840 		ret = -EINVAL;
1841 		break;
1842 	case COMP_DEV_ERR:
1843 		dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate "
1844 				"context command.\n");
1845 		ret = -ENODEV;
1846 		break;
1847 	case COMP_MEL_ERR:
1848 		/* Max Exit Latency too large error */
1849 		dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
1850 		ret = -EINVAL;
1851 		break;
1852 	case COMP_SUCCESS:
1853 		dev_dbg(&udev->dev, "Successful evaluate context command\n");
1854 		ret = 0;
1855 		break;
1856 	default:
1857 		xhci_err(xhci, "ERROR: unexpected command completion "
1858 				"code 0x%x.\n", *cmd_status);
1859 		ret = -EINVAL;
1860 		break;
1861 	}
1862 	return ret;
1863 }
1864 
1865 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
1866 		struct xhci_container_ctx *in_ctx)
1867 {
1868 	struct xhci_input_control_ctx *ctrl_ctx;
1869 	u32 valid_add_flags;
1870 	u32 valid_drop_flags;
1871 
1872 	ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1873 	/* Ignore the slot flag (bit 0), and the default control endpoint flag
1874 	 * (bit 1).  The default control endpoint is added during the Address
1875 	 * Device command and is never removed until the slot is disabled.
1876 	 */
1877 	valid_add_flags = ctrl_ctx->add_flags >> 2;
1878 	valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1879 
1880 	/* Use hweight32 to count the number of ones in the add flags, or
1881 	 * number of endpoints added.  Don't count endpoints that are changed
1882 	 * (both added and dropped).
1883 	 */
1884 	return hweight32(valid_add_flags) -
1885 		hweight32(valid_add_flags & valid_drop_flags);
1886 }
1887 
1888 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
1889 		struct xhci_container_ctx *in_ctx)
1890 {
1891 	struct xhci_input_control_ctx *ctrl_ctx;
1892 	u32 valid_add_flags;
1893 	u32 valid_drop_flags;
1894 
1895 	ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1896 	valid_add_flags = ctrl_ctx->add_flags >> 2;
1897 	valid_drop_flags = ctrl_ctx->drop_flags >> 2;
1898 
1899 	return hweight32(valid_drop_flags) -
1900 		hweight32(valid_add_flags & valid_drop_flags);
1901 }
1902 
1903 /*
1904  * We need to reserve the new number of endpoints before the configure endpoint
1905  * command completes.  We can't subtract the dropped endpoints from the number
1906  * of active endpoints until the command completes because we can oversubscribe
1907  * the host in this case:
1908  *
1909  *  - the first configure endpoint command drops more endpoints than it adds
1910  *  - a second configure endpoint command that adds more endpoints is queued
1911  *  - the first configure endpoint command fails, so the config is unchanged
1912  *  - the second command may succeed, even though there isn't enough resources
1913  *
1914  * Must be called with xhci->lock held.
1915  */
1916 static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
1917 		struct xhci_container_ctx *in_ctx)
1918 {
1919 	u32 added_eps;
1920 
1921 	added_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1922 	if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
1923 		xhci_dbg(xhci, "Not enough ep ctxs: "
1924 				"%u active, need to add %u, limit is %u.\n",
1925 				xhci->num_active_eps, added_eps,
1926 				xhci->limit_active_eps);
1927 		return -ENOMEM;
1928 	}
1929 	xhci->num_active_eps += added_eps;
1930 	xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
1931 			xhci->num_active_eps);
1932 	return 0;
1933 }
1934 
1935 /*
1936  * The configure endpoint was failed by the xHC for some other reason, so we
1937  * need to revert the resources that failed configuration would have used.
1938  *
1939  * Must be called with xhci->lock held.
1940  */
1941 static void xhci_free_host_resources(struct xhci_hcd *xhci,
1942 		struct xhci_container_ctx *in_ctx)
1943 {
1944 	u32 num_failed_eps;
1945 
1946 	num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx);
1947 	xhci->num_active_eps -= num_failed_eps;
1948 	xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
1949 			num_failed_eps,
1950 			xhci->num_active_eps);
1951 }
1952 
1953 /*
1954  * Now that the command has completed, clean up the active endpoint count by
1955  * subtracting out the endpoints that were dropped (but not changed).
1956  *
1957  * Must be called with xhci->lock held.
1958  */
1959 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
1960 		struct xhci_container_ctx *in_ctx)
1961 {
1962 	u32 num_dropped_eps;
1963 
1964 	num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx);
1965 	xhci->num_active_eps -= num_dropped_eps;
1966 	if (num_dropped_eps)
1967 		xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
1968 				num_dropped_eps,
1969 				xhci->num_active_eps);
1970 }
1971 
1972 static unsigned int xhci_get_block_size(struct usb_device *udev)
1973 {
1974 	switch (udev->speed) {
1975 	case USB_SPEED_LOW:
1976 	case USB_SPEED_FULL:
1977 		return FS_BLOCK;
1978 	case USB_SPEED_HIGH:
1979 		return HS_BLOCK;
1980 	case USB_SPEED_SUPER:
1981 		return SS_BLOCK;
1982 	case USB_SPEED_UNKNOWN:
1983 	case USB_SPEED_WIRELESS:
1984 	default:
1985 		/* Should never happen */
1986 		return 1;
1987 	}
1988 }
1989 
1990 static unsigned int
1991 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
1992 {
1993 	if (interval_bw->overhead[LS_OVERHEAD_TYPE])
1994 		return LS_OVERHEAD;
1995 	if (interval_bw->overhead[FS_OVERHEAD_TYPE])
1996 		return FS_OVERHEAD;
1997 	return HS_OVERHEAD;
1998 }
1999 
2000 /* If we are changing a LS/FS device under a HS hub,
2001  * make sure (if we are activating a new TT) that the HS bus has enough
2002  * bandwidth for this new TT.
2003  */
2004 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2005 		struct xhci_virt_device *virt_dev,
2006 		int old_active_eps)
2007 {
2008 	struct xhci_interval_bw_table *bw_table;
2009 	struct xhci_tt_bw_info *tt_info;
2010 
2011 	/* Find the bandwidth table for the root port this TT is attached to. */
2012 	bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2013 	tt_info = virt_dev->tt_info;
2014 	/* If this TT already had active endpoints, the bandwidth for this TT
2015 	 * has already been added.  Removing all periodic endpoints (and thus
2016 	 * making the TT enactive) will only decrease the bandwidth used.
2017 	 */
2018 	if (old_active_eps)
2019 		return 0;
2020 	if (old_active_eps == 0 && tt_info->active_eps != 0) {
2021 		if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2022 			return -ENOMEM;
2023 		return 0;
2024 	}
2025 	/* Not sure why we would have no new active endpoints...
2026 	 *
2027 	 * Maybe because of an Evaluate Context change for a hub update or a
2028 	 * control endpoint 0 max packet size change?
2029 	 * FIXME: skip the bandwidth calculation in that case.
2030 	 */
2031 	return 0;
2032 }
2033 
2034 static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2035 		struct xhci_virt_device *virt_dev)
2036 {
2037 	unsigned int bw_reserved;
2038 
2039 	bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2040 	if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2041 		return -ENOMEM;
2042 
2043 	bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2044 	if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2045 		return -ENOMEM;
2046 
2047 	return 0;
2048 }
2049 
2050 /*
2051  * This algorithm is a very conservative estimate of the worst-case scheduling
2052  * scenario for any one interval.  The hardware dynamically schedules the
2053  * packets, so we can't tell which microframe could be the limiting factor in
2054  * the bandwidth scheduling.  This only takes into account periodic endpoints.
2055  *
2056  * Obviously, we can't solve an NP complete problem to find the minimum worst
2057  * case scenario.  Instead, we come up with an estimate that is no less than
2058  * the worst case bandwidth used for any one microframe, but may be an
2059  * over-estimate.
2060  *
2061  * We walk the requirements for each endpoint by interval, starting with the
2062  * smallest interval, and place packets in the schedule where there is only one
2063  * possible way to schedule packets for that interval.  In order to simplify
2064  * this algorithm, we record the largest max packet size for each interval, and
2065  * assume all packets will be that size.
2066  *
2067  * For interval 0, we obviously must schedule all packets for each interval.
2068  * The bandwidth for interval 0 is just the amount of data to be transmitted
2069  * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2070  * the number of packets).
2071  *
2072  * For interval 1, we have two possible microframes to schedule those packets
2073  * in.  For this algorithm, if we can schedule the same number of packets for
2074  * each possible scheduling opportunity (each microframe), we will do so.  The
2075  * remaining number of packets will be saved to be transmitted in the gaps in
2076  * the next interval's scheduling sequence.
2077  *
2078  * As we move those remaining packets to be scheduled with interval 2 packets,
2079  * we have to double the number of remaining packets to transmit.  This is
2080  * because the intervals are actually powers of 2, and we would be transmitting
2081  * the previous interval's packets twice in this interval.  We also have to be
2082  * sure that when we look at the largest max packet size for this interval, we
2083  * also look at the largest max packet size for the remaining packets and take
2084  * the greater of the two.
2085  *
2086  * The algorithm continues to evenly distribute packets in each scheduling
2087  * opportunity, and push the remaining packets out, until we get to the last
2088  * interval.  Then those packets and their associated overhead are just added
2089  * to the bandwidth used.
2090  */
2091 static int xhci_check_bw_table(struct xhci_hcd *xhci,
2092 		struct xhci_virt_device *virt_dev,
2093 		int old_active_eps)
2094 {
2095 	unsigned int bw_reserved;
2096 	unsigned int max_bandwidth;
2097 	unsigned int bw_used;
2098 	unsigned int block_size;
2099 	struct xhci_interval_bw_table *bw_table;
2100 	unsigned int packet_size = 0;
2101 	unsigned int overhead = 0;
2102 	unsigned int packets_transmitted = 0;
2103 	unsigned int packets_remaining = 0;
2104 	unsigned int i;
2105 
2106 	if (virt_dev->udev->speed == USB_SPEED_SUPER)
2107 		return xhci_check_ss_bw(xhci, virt_dev);
2108 
2109 	if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2110 		max_bandwidth = HS_BW_LIMIT;
2111 		/* Convert percent of bus BW reserved to blocks reserved */
2112 		bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2113 	} else {
2114 		max_bandwidth = FS_BW_LIMIT;
2115 		bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2116 	}
2117 
2118 	bw_table = virt_dev->bw_table;
2119 	/* We need to translate the max packet size and max ESIT payloads into
2120 	 * the units the hardware uses.
2121 	 */
2122 	block_size = xhci_get_block_size(virt_dev->udev);
2123 
2124 	/* If we are manipulating a LS/FS device under a HS hub, double check
2125 	 * that the HS bus has enough bandwidth if we are activing a new TT.
2126 	 */
2127 	if (virt_dev->tt_info) {
2128 		xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
2129 				virt_dev->real_port);
2130 		if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2131 			xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2132 					"newly activated TT.\n");
2133 			return -ENOMEM;
2134 		}
2135 		xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n",
2136 				virt_dev->tt_info->slot_id,
2137 				virt_dev->tt_info->ttport);
2138 	} else {
2139 		xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
2140 				virt_dev->real_port);
2141 	}
2142 
2143 	/* Add in how much bandwidth will be used for interval zero, or the
2144 	 * rounded max ESIT payload + number of packets * largest overhead.
2145 	 */
2146 	bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2147 		bw_table->interval_bw[0].num_packets *
2148 		xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2149 
2150 	for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2151 		unsigned int bw_added;
2152 		unsigned int largest_mps;
2153 		unsigned int interval_overhead;
2154 
2155 		/*
2156 		 * How many packets could we transmit in this interval?
2157 		 * If packets didn't fit in the previous interval, we will need
2158 		 * to transmit that many packets twice within this interval.
2159 		 */
2160 		packets_remaining = 2 * packets_remaining +
2161 			bw_table->interval_bw[i].num_packets;
2162 
2163 		/* Find the largest max packet size of this or the previous
2164 		 * interval.
2165 		 */
2166 		if (list_empty(&bw_table->interval_bw[i].endpoints))
2167 			largest_mps = 0;
2168 		else {
2169 			struct xhci_virt_ep *virt_ep;
2170 			struct list_head *ep_entry;
2171 
2172 			ep_entry = bw_table->interval_bw[i].endpoints.next;
2173 			virt_ep = list_entry(ep_entry,
2174 					struct xhci_virt_ep, bw_endpoint_list);
2175 			/* Convert to blocks, rounding up */
2176 			largest_mps = DIV_ROUND_UP(
2177 					virt_ep->bw_info.max_packet_size,
2178 					block_size);
2179 		}
2180 		if (largest_mps > packet_size)
2181 			packet_size = largest_mps;
2182 
2183 		/* Use the larger overhead of this or the previous interval. */
2184 		interval_overhead = xhci_get_largest_overhead(
2185 				&bw_table->interval_bw[i]);
2186 		if (interval_overhead > overhead)
2187 			overhead = interval_overhead;
2188 
2189 		/* How many packets can we evenly distribute across
2190 		 * (1 << (i + 1)) possible scheduling opportunities?
2191 		 */
2192 		packets_transmitted = packets_remaining >> (i + 1);
2193 
2194 		/* Add in the bandwidth used for those scheduled packets */
2195 		bw_added = packets_transmitted * (overhead + packet_size);
2196 
2197 		/* How many packets do we have remaining to transmit? */
2198 		packets_remaining = packets_remaining % (1 << (i + 1));
2199 
2200 		/* What largest max packet size should those packets have? */
2201 		/* If we've transmitted all packets, don't carry over the
2202 		 * largest packet size.
2203 		 */
2204 		if (packets_remaining == 0) {
2205 			packet_size = 0;
2206 			overhead = 0;
2207 		} else if (packets_transmitted > 0) {
2208 			/* Otherwise if we do have remaining packets, and we've
2209 			 * scheduled some packets in this interval, take the
2210 			 * largest max packet size from endpoints with this
2211 			 * interval.
2212 			 */
2213 			packet_size = largest_mps;
2214 			overhead = interval_overhead;
2215 		}
2216 		/* Otherwise carry over packet_size and overhead from the last
2217 		 * time we had a remainder.
2218 		 */
2219 		bw_used += bw_added;
2220 		if (bw_used > max_bandwidth) {
2221 			xhci_warn(xhci, "Not enough bandwidth. "
2222 					"Proposed: %u, Max: %u\n",
2223 				bw_used, max_bandwidth);
2224 			return -ENOMEM;
2225 		}
2226 	}
2227 	/*
2228 	 * Ok, we know we have some packets left over after even-handedly
2229 	 * scheduling interval 15.  We don't know which microframes they will
2230 	 * fit into, so we over-schedule and say they will be scheduled every
2231 	 * microframe.
2232 	 */
2233 	if (packets_remaining > 0)
2234 		bw_used += overhead + packet_size;
2235 
2236 	if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2237 		unsigned int port_index = virt_dev->real_port - 1;
2238 
2239 		/* OK, we're manipulating a HS device attached to a
2240 		 * root port bandwidth domain.  Include the number of active TTs
2241 		 * in the bandwidth used.
2242 		 */
2243 		bw_used += TT_HS_OVERHEAD *
2244 			xhci->rh_bw[port_index].num_active_tts;
2245 	}
2246 
2247 	xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2248 		"Available: %u " "percent\n",
2249 		bw_used, max_bandwidth, bw_reserved,
2250 		(max_bandwidth - bw_used - bw_reserved) * 100 /
2251 		max_bandwidth);
2252 
2253 	bw_used += bw_reserved;
2254 	if (bw_used > max_bandwidth) {
2255 		xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2256 				bw_used, max_bandwidth);
2257 		return -ENOMEM;
2258 	}
2259 
2260 	bw_table->bw_used = bw_used;
2261 	return 0;
2262 }
2263 
2264 static bool xhci_is_async_ep(unsigned int ep_type)
2265 {
2266 	return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2267 					ep_type != ISOC_IN_EP &&
2268 					ep_type != INT_IN_EP);
2269 }
2270 
2271 static bool xhci_is_sync_in_ep(unsigned int ep_type)
2272 {
2273 	return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2274 }
2275 
2276 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2277 {
2278 	unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2279 
2280 	if (ep_bw->ep_interval == 0)
2281 		return SS_OVERHEAD_BURST +
2282 			(ep_bw->mult * ep_bw->num_packets *
2283 					(SS_OVERHEAD + mps));
2284 	return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2285 				(SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2286 				1 << ep_bw->ep_interval);
2287 
2288 }
2289 
2290 void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2291 		struct xhci_bw_info *ep_bw,
2292 		struct xhci_interval_bw_table *bw_table,
2293 		struct usb_device *udev,
2294 		struct xhci_virt_ep *virt_ep,
2295 		struct xhci_tt_bw_info *tt_info)
2296 {
2297 	struct xhci_interval_bw	*interval_bw;
2298 	int normalized_interval;
2299 
2300 	if (xhci_is_async_ep(ep_bw->type))
2301 		return;
2302 
2303 	if (udev->speed == USB_SPEED_SUPER) {
2304 		if (xhci_is_sync_in_ep(ep_bw->type))
2305 			xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2306 				xhci_get_ss_bw_consumed(ep_bw);
2307 		else
2308 			xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2309 				xhci_get_ss_bw_consumed(ep_bw);
2310 		return;
2311 	}
2312 
2313 	/* SuperSpeed endpoints never get added to intervals in the table, so
2314 	 * this check is only valid for HS/FS/LS devices.
2315 	 */
2316 	if (list_empty(&virt_ep->bw_endpoint_list))
2317 		return;
2318 	/* For LS/FS devices, we need to translate the interval expressed in
2319 	 * microframes to frames.
2320 	 */
2321 	if (udev->speed == USB_SPEED_HIGH)
2322 		normalized_interval = ep_bw->ep_interval;
2323 	else
2324 		normalized_interval = ep_bw->ep_interval - 3;
2325 
2326 	if (normalized_interval == 0)
2327 		bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2328 	interval_bw = &bw_table->interval_bw[normalized_interval];
2329 	interval_bw->num_packets -= ep_bw->num_packets;
2330 	switch (udev->speed) {
2331 	case USB_SPEED_LOW:
2332 		interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2333 		break;
2334 	case USB_SPEED_FULL:
2335 		interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2336 		break;
2337 	case USB_SPEED_HIGH:
2338 		interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2339 		break;
2340 	case USB_SPEED_SUPER:
2341 	case USB_SPEED_UNKNOWN:
2342 	case USB_SPEED_WIRELESS:
2343 		/* Should never happen because only LS/FS/HS endpoints will get
2344 		 * added to the endpoint list.
2345 		 */
2346 		return;
2347 	}
2348 	if (tt_info)
2349 		tt_info->active_eps -= 1;
2350 	list_del_init(&virt_ep->bw_endpoint_list);
2351 }
2352 
2353 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2354 		struct xhci_bw_info *ep_bw,
2355 		struct xhci_interval_bw_table *bw_table,
2356 		struct usb_device *udev,
2357 		struct xhci_virt_ep *virt_ep,
2358 		struct xhci_tt_bw_info *tt_info)
2359 {
2360 	struct xhci_interval_bw	*interval_bw;
2361 	struct xhci_virt_ep *smaller_ep;
2362 	int normalized_interval;
2363 
2364 	if (xhci_is_async_ep(ep_bw->type))
2365 		return;
2366 
2367 	if (udev->speed == USB_SPEED_SUPER) {
2368 		if (xhci_is_sync_in_ep(ep_bw->type))
2369 			xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2370 				xhci_get_ss_bw_consumed(ep_bw);
2371 		else
2372 			xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2373 				xhci_get_ss_bw_consumed(ep_bw);
2374 		return;
2375 	}
2376 
2377 	/* For LS/FS devices, we need to translate the interval expressed in
2378 	 * microframes to frames.
2379 	 */
2380 	if (udev->speed == USB_SPEED_HIGH)
2381 		normalized_interval = ep_bw->ep_interval;
2382 	else
2383 		normalized_interval = ep_bw->ep_interval - 3;
2384 
2385 	if (normalized_interval == 0)
2386 		bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2387 	interval_bw = &bw_table->interval_bw[normalized_interval];
2388 	interval_bw->num_packets += ep_bw->num_packets;
2389 	switch (udev->speed) {
2390 	case USB_SPEED_LOW:
2391 		interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2392 		break;
2393 	case USB_SPEED_FULL:
2394 		interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2395 		break;
2396 	case USB_SPEED_HIGH:
2397 		interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2398 		break;
2399 	case USB_SPEED_SUPER:
2400 	case USB_SPEED_UNKNOWN:
2401 	case USB_SPEED_WIRELESS:
2402 		/* Should never happen because only LS/FS/HS endpoints will get
2403 		 * added to the endpoint list.
2404 		 */
2405 		return;
2406 	}
2407 
2408 	if (tt_info)
2409 		tt_info->active_eps += 1;
2410 	/* Insert the endpoint into the list, largest max packet size first. */
2411 	list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2412 			bw_endpoint_list) {
2413 		if (ep_bw->max_packet_size >=
2414 				smaller_ep->bw_info.max_packet_size) {
2415 			/* Add the new ep before the smaller endpoint */
2416 			list_add_tail(&virt_ep->bw_endpoint_list,
2417 					&smaller_ep->bw_endpoint_list);
2418 			return;
2419 		}
2420 	}
2421 	/* Add the new endpoint at the end of the list. */
2422 	list_add_tail(&virt_ep->bw_endpoint_list,
2423 			&interval_bw->endpoints);
2424 }
2425 
2426 void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2427 		struct xhci_virt_device *virt_dev,
2428 		int old_active_eps)
2429 {
2430 	struct xhci_root_port_bw_info *rh_bw_info;
2431 	if (!virt_dev->tt_info)
2432 		return;
2433 
2434 	rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2435 	if (old_active_eps == 0 &&
2436 				virt_dev->tt_info->active_eps != 0) {
2437 		rh_bw_info->num_active_tts += 1;
2438 		rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2439 	} else if (old_active_eps != 0 &&
2440 				virt_dev->tt_info->active_eps == 0) {
2441 		rh_bw_info->num_active_tts -= 1;
2442 		rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2443 	}
2444 }
2445 
2446 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2447 		struct xhci_virt_device *virt_dev,
2448 		struct xhci_container_ctx *in_ctx)
2449 {
2450 	struct xhci_bw_info ep_bw_info[31];
2451 	int i;
2452 	struct xhci_input_control_ctx *ctrl_ctx;
2453 	int old_active_eps = 0;
2454 
2455 	if (virt_dev->tt_info)
2456 		old_active_eps = virt_dev->tt_info->active_eps;
2457 
2458 	ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2459 
2460 	for (i = 0; i < 31; i++) {
2461 		if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2462 			continue;
2463 
2464 		/* Make a copy of the BW info in case we need to revert this */
2465 		memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2466 				sizeof(ep_bw_info[i]));
2467 		/* Drop the endpoint from the interval table if the endpoint is
2468 		 * being dropped or changed.
2469 		 */
2470 		if (EP_IS_DROPPED(ctrl_ctx, i))
2471 			xhci_drop_ep_from_interval_table(xhci,
2472 					&virt_dev->eps[i].bw_info,
2473 					virt_dev->bw_table,
2474 					virt_dev->udev,
2475 					&virt_dev->eps[i],
2476 					virt_dev->tt_info);
2477 	}
2478 	/* Overwrite the information stored in the endpoints' bw_info */
2479 	xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2480 	for (i = 0; i < 31; i++) {
2481 		/* Add any changed or added endpoints to the interval table */
2482 		if (EP_IS_ADDED(ctrl_ctx, i))
2483 			xhci_add_ep_to_interval_table(xhci,
2484 					&virt_dev->eps[i].bw_info,
2485 					virt_dev->bw_table,
2486 					virt_dev->udev,
2487 					&virt_dev->eps[i],
2488 					virt_dev->tt_info);
2489 	}
2490 
2491 	if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2492 		/* Ok, this fits in the bandwidth we have.
2493 		 * Update the number of active TTs.
2494 		 */
2495 		xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2496 		return 0;
2497 	}
2498 
2499 	/* We don't have enough bandwidth for this, revert the stored info. */
2500 	for (i = 0; i < 31; i++) {
2501 		if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2502 			continue;
2503 
2504 		/* Drop the new copies of any added or changed endpoints from
2505 		 * the interval table.
2506 		 */
2507 		if (EP_IS_ADDED(ctrl_ctx, i)) {
2508 			xhci_drop_ep_from_interval_table(xhci,
2509 					&virt_dev->eps[i].bw_info,
2510 					virt_dev->bw_table,
2511 					virt_dev->udev,
2512 					&virt_dev->eps[i],
2513 					virt_dev->tt_info);
2514 		}
2515 		/* Revert the endpoint back to its old information */
2516 		memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2517 				sizeof(ep_bw_info[i]));
2518 		/* Add any changed or dropped endpoints back into the table */
2519 		if (EP_IS_DROPPED(ctrl_ctx, i))
2520 			xhci_add_ep_to_interval_table(xhci,
2521 					&virt_dev->eps[i].bw_info,
2522 					virt_dev->bw_table,
2523 					virt_dev->udev,
2524 					&virt_dev->eps[i],
2525 					virt_dev->tt_info);
2526 	}
2527 	return -ENOMEM;
2528 }
2529 
2530 
2531 /* Issue a configure endpoint command or evaluate context command
2532  * and wait for it to finish.
2533  */
2534 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2535 		struct usb_device *udev,
2536 		struct xhci_command *command,
2537 		bool ctx_change, bool must_succeed)
2538 {
2539 	int ret;
2540 	int timeleft;
2541 	unsigned long flags;
2542 	struct xhci_container_ctx *in_ctx;
2543 	struct completion *cmd_completion;
2544 	u32 *cmd_status;
2545 	struct xhci_virt_device *virt_dev;
2546 	union xhci_trb *cmd_trb;
2547 
2548 	spin_lock_irqsave(&xhci->lock, flags);
2549 	virt_dev = xhci->devs[udev->slot_id];
2550 
2551 	if (command)
2552 		in_ctx = command->in_ctx;
2553 	else
2554 		in_ctx = virt_dev->in_ctx;
2555 
2556 	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2557 			xhci_reserve_host_resources(xhci, in_ctx)) {
2558 		spin_unlock_irqrestore(&xhci->lock, flags);
2559 		xhci_warn(xhci, "Not enough host resources, "
2560 				"active endpoint contexts = %u\n",
2561 				xhci->num_active_eps);
2562 		return -ENOMEM;
2563 	}
2564 	if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2565 			xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
2566 		if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2567 			xhci_free_host_resources(xhci, in_ctx);
2568 		spin_unlock_irqrestore(&xhci->lock, flags);
2569 		xhci_warn(xhci, "Not enough bandwidth\n");
2570 		return -ENOMEM;
2571 	}
2572 
2573 	if (command) {
2574 		cmd_completion = command->completion;
2575 		cmd_status = &command->status;
2576 		command->command_trb = xhci->cmd_ring->enqueue;
2577 
2578 		/* Enqueue pointer can be left pointing to the link TRB,
2579 		 * we must handle that
2580 		 */
2581 		if (TRB_TYPE_LINK_LE32(command->command_trb->link.control))
2582 			command->command_trb =
2583 				xhci->cmd_ring->enq_seg->next->trbs;
2584 
2585 		list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
2586 	} else {
2587 		cmd_completion = &virt_dev->cmd_completion;
2588 		cmd_status = &virt_dev->cmd_status;
2589 	}
2590 	init_completion(cmd_completion);
2591 
2592 	cmd_trb = xhci->cmd_ring->dequeue;
2593 	if (!ctx_change)
2594 		ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
2595 				udev->slot_id, must_succeed);
2596 	else
2597 		ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
2598 				udev->slot_id, must_succeed);
2599 	if (ret < 0) {
2600 		if (command)
2601 			list_del(&command->cmd_list);
2602 		if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2603 			xhci_free_host_resources(xhci, in_ctx);
2604 		spin_unlock_irqrestore(&xhci->lock, flags);
2605 		xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
2606 		return -ENOMEM;
2607 	}
2608 	xhci_ring_cmd_db(xhci);
2609 	spin_unlock_irqrestore(&xhci->lock, flags);
2610 
2611 	/* Wait for the configure endpoint command to complete */
2612 	timeleft = wait_for_completion_interruptible_timeout(
2613 			cmd_completion,
2614 			XHCI_CMD_DEFAULT_TIMEOUT);
2615 	if (timeleft <= 0) {
2616 		xhci_warn(xhci, "%s while waiting for %s command\n",
2617 				timeleft == 0 ? "Timeout" : "Signal",
2618 				ctx_change == 0 ?
2619 					"configure endpoint" :
2620 					"evaluate context");
2621 		/* cancel the configure endpoint command */
2622 		ret = xhci_cancel_cmd(xhci, command, cmd_trb);
2623 		if (ret < 0)
2624 			return ret;
2625 		return -ETIME;
2626 	}
2627 
2628 	if (!ctx_change)
2629 		ret = xhci_configure_endpoint_result(xhci, udev, cmd_status);
2630 	else
2631 		ret = xhci_evaluate_context_result(xhci, udev, cmd_status);
2632 
2633 	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2634 		spin_lock_irqsave(&xhci->lock, flags);
2635 		/* If the command failed, remove the reserved resources.
2636 		 * Otherwise, clean up the estimate to include dropped eps.
2637 		 */
2638 		if (ret)
2639 			xhci_free_host_resources(xhci, in_ctx);
2640 		else
2641 			xhci_finish_resource_reservation(xhci, in_ctx);
2642 		spin_unlock_irqrestore(&xhci->lock, flags);
2643 	}
2644 	return ret;
2645 }
2646 
2647 /* Called after one or more calls to xhci_add_endpoint() or
2648  * xhci_drop_endpoint().  If this call fails, the USB core is expected
2649  * to call xhci_reset_bandwidth().
2650  *
2651  * Since we are in the middle of changing either configuration or
2652  * installing a new alt setting, the USB core won't allow URBs to be
2653  * enqueued for any endpoint on the old config or interface.  Nothing
2654  * else should be touching the xhci->devs[slot_id] structure, so we
2655  * don't need to take the xhci->lock for manipulating that.
2656  */
2657 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2658 {
2659 	int i;
2660 	int ret = 0;
2661 	struct xhci_hcd *xhci;
2662 	struct xhci_virt_device	*virt_dev;
2663 	struct xhci_input_control_ctx *ctrl_ctx;
2664 	struct xhci_slot_ctx *slot_ctx;
2665 
2666 	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2667 	if (ret <= 0)
2668 		return ret;
2669 	xhci = hcd_to_xhci(hcd);
2670 	if (xhci->xhc_state & XHCI_STATE_DYING)
2671 		return -ENODEV;
2672 
2673 	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2674 	virt_dev = xhci->devs[udev->slot_id];
2675 
2676 	/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
2677 	ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2678 	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2679 	ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
2680 	ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
2681 
2682 	/* Don't issue the command if there's no endpoints to update. */
2683 	if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
2684 			ctrl_ctx->drop_flags == 0)
2685 		return 0;
2686 
2687 	xhci_dbg(xhci, "New Input Control Context:\n");
2688 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2689 	xhci_dbg_ctx(xhci, virt_dev->in_ctx,
2690 		     LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2691 
2692 	ret = xhci_configure_endpoint(xhci, udev, NULL,
2693 			false, false);
2694 	if (ret) {
2695 		/* Callee should call reset_bandwidth() */
2696 		return ret;
2697 	}
2698 
2699 	xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
2700 	xhci_dbg_ctx(xhci, virt_dev->out_ctx,
2701 		     LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));
2702 
2703 	/* Free any rings that were dropped, but not changed. */
2704 	for (i = 1; i < 31; ++i) {
2705 		if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
2706 		    !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
2707 			xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2708 	}
2709 	xhci_zero_in_ctx(xhci, virt_dev);
2710 	/*
2711 	 * Install any rings for completely new endpoints or changed endpoints,
2712 	 * and free or cache any old rings from changed endpoints.
2713 	 */
2714 	for (i = 1; i < 31; ++i) {
2715 		if (!virt_dev->eps[i].new_ring)
2716 			continue;
2717 		/* Only cache or free the old ring if it exists.
2718 		 * It may not if this is the first add of an endpoint.
2719 		 */
2720 		if (virt_dev->eps[i].ring) {
2721 			xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2722 		}
2723 		virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
2724 		virt_dev->eps[i].new_ring = NULL;
2725 	}
2726 
2727 	return ret;
2728 }
2729 
2730 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2731 {
2732 	struct xhci_hcd *xhci;
2733 	struct xhci_virt_device	*virt_dev;
2734 	int i, ret;
2735 
2736 	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2737 	if (ret <= 0)
2738 		return;
2739 	xhci = hcd_to_xhci(hcd);
2740 
2741 	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
2742 	virt_dev = xhci->devs[udev->slot_id];
2743 	/* Free any rings allocated for added endpoints */
2744 	for (i = 0; i < 31; ++i) {
2745 		if (virt_dev->eps[i].new_ring) {
2746 			xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
2747 			virt_dev->eps[i].new_ring = NULL;
2748 		}
2749 	}
2750 	xhci_zero_in_ctx(xhci, virt_dev);
2751 }
2752 
2753 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
2754 		struct xhci_container_ctx *in_ctx,
2755 		struct xhci_container_ctx *out_ctx,
2756 		u32 add_flags, u32 drop_flags)
2757 {
2758 	struct xhci_input_control_ctx *ctrl_ctx;
2759 	ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
2760 	ctrl_ctx->add_flags = cpu_to_le32(add_flags);
2761 	ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
2762 	xhci_slot_copy(xhci, in_ctx, out_ctx);
2763 	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
2764 
2765 	xhci_dbg(xhci, "Input Context:\n");
2766 	xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
2767 }
2768 
2769 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
2770 		unsigned int slot_id, unsigned int ep_index,
2771 		struct xhci_dequeue_state *deq_state)
2772 {
2773 	struct xhci_container_ctx *in_ctx;
2774 	struct xhci_ep_ctx *ep_ctx;
2775 	u32 added_ctxs;
2776 	dma_addr_t addr;
2777 
2778 	xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
2779 			xhci->devs[slot_id]->out_ctx, ep_index);
2780 	in_ctx = xhci->devs[slot_id]->in_ctx;
2781 	ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
2782 	addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
2783 			deq_state->new_deq_ptr);
2784 	if (addr == 0) {
2785 		xhci_warn(xhci, "WARN Cannot submit config ep after "
2786 				"reset ep command\n");
2787 		xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
2788 				deq_state->new_deq_seg,
2789 				deq_state->new_deq_ptr);
2790 		return;
2791 	}
2792 	ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
2793 
2794 	added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
2795 	xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
2796 			xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
2797 }
2798 
2799 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2800 		struct usb_device *udev, unsigned int ep_index)
2801 {
2802 	struct xhci_dequeue_state deq_state;
2803 	struct xhci_virt_ep *ep;
2804 
2805 	xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
2806 	ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2807 	/* We need to move the HW's dequeue pointer past this TD,
2808 	 * or it will attempt to resend it on the next doorbell ring.
2809 	 */
2810 	xhci_find_new_dequeue_state(xhci, udev->slot_id,
2811 			ep_index, ep->stopped_stream, ep->stopped_td,
2812 			&deq_state);
2813 
2814 	/* HW with the reset endpoint quirk will use the saved dequeue state to
2815 	 * issue a configure endpoint command later.
2816 	 */
2817 	if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
2818 		xhci_dbg(xhci, "Queueing new dequeue state\n");
2819 		xhci_queue_new_dequeue_state(xhci, udev->slot_id,
2820 				ep_index, ep->stopped_stream, &deq_state);
2821 	} else {
2822 		/* Better hope no one uses the input context between now and the
2823 		 * reset endpoint completion!
2824 		 * XXX: No idea how this hardware will react when stream rings
2825 		 * are enabled.
2826 		 */
2827 		xhci_dbg(xhci, "Setting up input context for "
2828 				"configure endpoint command\n");
2829 		xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
2830 				ep_index, &deq_state);
2831 	}
2832 }
2833 
2834 /* Deal with stalled endpoints.  The core should have sent the control message
2835  * to clear the halt condition.  However, we need to make the xHCI hardware
2836  * reset its sequence number, since a device will expect a sequence number of
2837  * zero after the halt condition is cleared.
2838  * Context: in_interrupt
2839  */
2840 void xhci_endpoint_reset(struct usb_hcd *hcd,
2841 		struct usb_host_endpoint *ep)
2842 {
2843 	struct xhci_hcd *xhci;
2844 	struct usb_device *udev;
2845 	unsigned int ep_index;
2846 	unsigned long flags;
2847 	int ret;
2848 	struct xhci_virt_ep *virt_ep;
2849 
2850 	xhci = hcd_to_xhci(hcd);
2851 	udev = (struct usb_device *) ep->hcpriv;
2852 	/* Called with a root hub endpoint (or an endpoint that wasn't added
2853 	 * with xhci_add_endpoint()
2854 	 */
2855 	if (!ep->hcpriv)
2856 		return;
2857 	ep_index = xhci_get_endpoint_index(&ep->desc);
2858 	virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2859 	if (!virt_ep->stopped_td) {
2860 		xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
2861 				ep->desc.bEndpointAddress);
2862 		return;
2863 	}
2864 	if (usb_endpoint_xfer_control(&ep->desc)) {
2865 		xhci_dbg(xhci, "Control endpoint stall already handled.\n");
2866 		return;
2867 	}
2868 
2869 	xhci_dbg(xhci, "Queueing reset endpoint command\n");
2870 	spin_lock_irqsave(&xhci->lock, flags);
2871 	ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
2872 	/*
2873 	 * Can't change the ring dequeue pointer until it's transitioned to the
2874 	 * stopped state, which is only upon a successful reset endpoint
2875 	 * command.  Better hope that last command worked!
2876 	 */
2877 	if (!ret) {
2878 		xhci_cleanup_stalled_ring(xhci, udev, ep_index);
2879 		kfree(virt_ep->stopped_td);
2880 		xhci_ring_cmd_db(xhci);
2881 	}
2882 	virt_ep->stopped_td = NULL;
2883 	virt_ep->stopped_trb = NULL;
2884 	virt_ep->stopped_stream = 0;
2885 	spin_unlock_irqrestore(&xhci->lock, flags);
2886 
2887 	if (ret)
2888 		xhci_warn(xhci, "FIXME allocate a new ring segment\n");
2889 }
2890 
2891 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
2892 		struct usb_device *udev, struct usb_host_endpoint *ep,
2893 		unsigned int slot_id)
2894 {
2895 	int ret;
2896 	unsigned int ep_index;
2897 	unsigned int ep_state;
2898 
2899 	if (!ep)
2900 		return -EINVAL;
2901 	ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
2902 	if (ret <= 0)
2903 		return -EINVAL;
2904 	if (ep->ss_ep_comp.bmAttributes == 0) {
2905 		xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
2906 				" descriptor for ep 0x%x does not support streams\n",
2907 				ep->desc.bEndpointAddress);
2908 		return -EINVAL;
2909 	}
2910 
2911 	ep_index = xhci_get_endpoint_index(&ep->desc);
2912 	ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
2913 	if (ep_state & EP_HAS_STREAMS ||
2914 			ep_state & EP_GETTING_STREAMS) {
2915 		xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
2916 				"already has streams set up.\n",
2917 				ep->desc.bEndpointAddress);
2918 		xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
2919 				"dynamic stream context array reallocation.\n");
2920 		return -EINVAL;
2921 	}
2922 	if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
2923 		xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
2924 				"endpoint 0x%x; URBs are pending.\n",
2925 				ep->desc.bEndpointAddress);
2926 		return -EINVAL;
2927 	}
2928 	return 0;
2929 }
2930 
2931 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
2932 		unsigned int *num_streams, unsigned int *num_stream_ctxs)
2933 {
2934 	unsigned int max_streams;
2935 
2936 	/* The stream context array size must be a power of two */
2937 	*num_stream_ctxs = roundup_pow_of_two(*num_streams);
2938 	/*
2939 	 * Find out how many primary stream array entries the host controller
2940 	 * supports.  Later we may use secondary stream arrays (similar to 2nd
2941 	 * level page entries), but that's an optional feature for xHCI host
2942 	 * controllers. xHCs must support at least 4 stream IDs.
2943 	 */
2944 	max_streams = HCC_MAX_PSA(xhci->hcc_params);
2945 	if (*num_stream_ctxs > max_streams) {
2946 		xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
2947 				max_streams);
2948 		*num_stream_ctxs = max_streams;
2949 		*num_streams = max_streams;
2950 	}
2951 }
2952 
2953 /* Returns an error code if one of the endpoint already has streams.
2954  * This does not change any data structures, it only checks and gathers
2955  * information.
2956  */
2957 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
2958 		struct usb_device *udev,
2959 		struct usb_host_endpoint **eps, unsigned int num_eps,
2960 		unsigned int *num_streams, u32 *changed_ep_bitmask)
2961 {
2962 	unsigned int max_streams;
2963 	unsigned int endpoint_flag;
2964 	int i;
2965 	int ret;
2966 
2967 	for (i = 0; i < num_eps; i++) {
2968 		ret = xhci_check_streams_endpoint(xhci, udev,
2969 				eps[i], udev->slot_id);
2970 		if (ret < 0)
2971 			return ret;
2972 
2973 		max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
2974 		if (max_streams < (*num_streams - 1)) {
2975 			xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
2976 					eps[i]->desc.bEndpointAddress,
2977 					max_streams);
2978 			*num_streams = max_streams+1;
2979 		}
2980 
2981 		endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
2982 		if (*changed_ep_bitmask & endpoint_flag)
2983 			return -EINVAL;
2984 		*changed_ep_bitmask |= endpoint_flag;
2985 	}
2986 	return 0;
2987 }
2988 
2989 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
2990 		struct usb_device *udev,
2991 		struct usb_host_endpoint **eps, unsigned int num_eps)
2992 {
2993 	u32 changed_ep_bitmask = 0;
2994 	unsigned int slot_id;
2995 	unsigned int ep_index;
2996 	unsigned int ep_state;
2997 	int i;
2998 
2999 	slot_id = udev->slot_id;
3000 	if (!xhci->devs[slot_id])
3001 		return 0;
3002 
3003 	for (i = 0; i < num_eps; i++) {
3004 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3005 		ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3006 		/* Are streams already being freed for the endpoint? */
3007 		if (ep_state & EP_GETTING_NO_STREAMS) {
3008 			xhci_warn(xhci, "WARN Can't disable streams for "
3009 					"endpoint 0x%x\n, "
3010 					"streams are being disabled already.",
3011 					eps[i]->desc.bEndpointAddress);
3012 			return 0;
3013 		}
3014 		/* Are there actually any streams to free? */
3015 		if (!(ep_state & EP_HAS_STREAMS) &&
3016 				!(ep_state & EP_GETTING_STREAMS)) {
3017 			xhci_warn(xhci, "WARN Can't disable streams for "
3018 					"endpoint 0x%x\n, "
3019 					"streams are already disabled!",
3020 					eps[i]->desc.bEndpointAddress);
3021 			xhci_warn(xhci, "WARN xhci_free_streams() called "
3022 					"with non-streams endpoint\n");
3023 			return 0;
3024 		}
3025 		changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3026 	}
3027 	return changed_ep_bitmask;
3028 }
3029 
3030 /*
3031  * The USB device drivers use this function (though the HCD interface in USB
3032  * core) to prepare a set of bulk endpoints to use streams.  Streams are used to
3033  * coordinate mass storage command queueing across multiple endpoints (basically
3034  * a stream ID == a task ID).
3035  *
3036  * Setting up streams involves allocating the same size stream context array
3037  * for each endpoint and issuing a configure endpoint command for all endpoints.
3038  *
3039  * Don't allow the call to succeed if one endpoint only supports one stream
3040  * (which means it doesn't support streams at all).
3041  *
3042  * Drivers may get less stream IDs than they asked for, if the host controller
3043  * hardware or endpoints claim they can't support the number of requested
3044  * stream IDs.
3045  */
3046 int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3047 		struct usb_host_endpoint **eps, unsigned int num_eps,
3048 		unsigned int num_streams, gfp_t mem_flags)
3049 {
3050 	int i, ret;
3051 	struct xhci_hcd *xhci;
3052 	struct xhci_virt_device *vdev;
3053 	struct xhci_command *config_cmd;
3054 	unsigned int ep_index;
3055 	unsigned int num_stream_ctxs;
3056 	unsigned long flags;
3057 	u32 changed_ep_bitmask = 0;
3058 
3059 	if (!eps)
3060 		return -EINVAL;
3061 
3062 	/* Add one to the number of streams requested to account for
3063 	 * stream 0 that is reserved for xHCI usage.
3064 	 */
3065 	num_streams += 1;
3066 	xhci = hcd_to_xhci(hcd);
3067 	xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3068 			num_streams);
3069 
3070 	config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
3071 	if (!config_cmd) {
3072 		xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
3073 		return -ENOMEM;
3074 	}
3075 
3076 	/* Check to make sure all endpoints are not already configured for
3077 	 * streams.  While we're at it, find the maximum number of streams that
3078 	 * all the endpoints will support and check for duplicate endpoints.
3079 	 */
3080 	spin_lock_irqsave(&xhci->lock, flags);
3081 	ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3082 			num_eps, &num_streams, &changed_ep_bitmask);
3083 	if (ret < 0) {
3084 		xhci_free_command(xhci, config_cmd);
3085 		spin_unlock_irqrestore(&xhci->lock, flags);
3086 		return ret;
3087 	}
3088 	if (num_streams <= 1) {
3089 		xhci_warn(xhci, "WARN: endpoints can't handle "
3090 				"more than one stream.\n");
3091 		xhci_free_command(xhci, config_cmd);
3092 		spin_unlock_irqrestore(&xhci->lock, flags);
3093 		return -EINVAL;
3094 	}
3095 	vdev = xhci->devs[udev->slot_id];
3096 	/* Mark each endpoint as being in transition, so
3097 	 * xhci_urb_enqueue() will reject all URBs.
3098 	 */
3099 	for (i = 0; i < num_eps; i++) {
3100 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3101 		vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3102 	}
3103 	spin_unlock_irqrestore(&xhci->lock, flags);
3104 
3105 	/* Setup internal data structures and allocate HW data structures for
3106 	 * streams (but don't install the HW structures in the input context
3107 	 * until we're sure all memory allocation succeeded).
3108 	 */
3109 	xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3110 	xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3111 			num_stream_ctxs, num_streams);
3112 
3113 	for (i = 0; i < num_eps; i++) {
3114 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3115 		vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3116 				num_stream_ctxs,
3117 				num_streams, mem_flags);
3118 		if (!vdev->eps[ep_index].stream_info)
3119 			goto cleanup;
3120 		/* Set maxPstreams in endpoint context and update deq ptr to
3121 		 * point to stream context array. FIXME
3122 		 */
3123 	}
3124 
3125 	/* Set up the input context for a configure endpoint command. */
3126 	for (i = 0; i < num_eps; i++) {
3127 		struct xhci_ep_ctx *ep_ctx;
3128 
3129 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3130 		ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3131 
3132 		xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3133 				vdev->out_ctx, ep_index);
3134 		xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3135 				vdev->eps[ep_index].stream_info);
3136 	}
3137 	/* Tell the HW to drop its old copy of the endpoint context info
3138 	 * and add the updated copy from the input context.
3139 	 */
3140 	xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3141 			vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3142 
3143 	/* Issue and wait for the configure endpoint command */
3144 	ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3145 			false, false);
3146 
3147 	/* xHC rejected the configure endpoint command for some reason, so we
3148 	 * leave the old ring intact and free our internal streams data
3149 	 * structure.
3150 	 */
3151 	if (ret < 0)
3152 		goto cleanup;
3153 
3154 	spin_lock_irqsave(&xhci->lock, flags);
3155 	for (i = 0; i < num_eps; i++) {
3156 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3157 		vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3158 		xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3159 			 udev->slot_id, ep_index);
3160 		vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3161 	}
3162 	xhci_free_command(xhci, config_cmd);
3163 	spin_unlock_irqrestore(&xhci->lock, flags);
3164 
3165 	/* Subtract 1 for stream 0, which drivers can't use */
3166 	return num_streams - 1;
3167 
3168 cleanup:
3169 	/* If it didn't work, free the streams! */
3170 	for (i = 0; i < num_eps; i++) {
3171 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3172 		xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3173 		vdev->eps[ep_index].stream_info = NULL;
3174 		/* FIXME Unset maxPstreams in endpoint context and
3175 		 * update deq ptr to point to normal string ring.
3176 		 */
3177 		vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3178 		vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3179 		xhci_endpoint_zero(xhci, vdev, eps[i]);
3180 	}
3181 	xhci_free_command(xhci, config_cmd);
3182 	return -ENOMEM;
3183 }
3184 
3185 /* Transition the endpoint from using streams to being a "normal" endpoint
3186  * without streams.
3187  *
3188  * Modify the endpoint context state, submit a configure endpoint command,
3189  * and free all endpoint rings for streams if that completes successfully.
3190  */
3191 int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3192 		struct usb_host_endpoint **eps, unsigned int num_eps,
3193 		gfp_t mem_flags)
3194 {
3195 	int i, ret;
3196 	struct xhci_hcd *xhci;
3197 	struct xhci_virt_device *vdev;
3198 	struct xhci_command *command;
3199 	unsigned int ep_index;
3200 	unsigned long flags;
3201 	u32 changed_ep_bitmask;
3202 
3203 	xhci = hcd_to_xhci(hcd);
3204 	vdev = xhci->devs[udev->slot_id];
3205 
3206 	/* Set up a configure endpoint command to remove the streams rings */
3207 	spin_lock_irqsave(&xhci->lock, flags);
3208 	changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3209 			udev, eps, num_eps);
3210 	if (changed_ep_bitmask == 0) {
3211 		spin_unlock_irqrestore(&xhci->lock, flags);
3212 		return -EINVAL;
3213 	}
3214 
3215 	/* Use the xhci_command structure from the first endpoint.  We may have
3216 	 * allocated too many, but the driver may call xhci_free_streams() for
3217 	 * each endpoint it grouped into one call to xhci_alloc_streams().
3218 	 */
3219 	ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3220 	command = vdev->eps[ep_index].stream_info->free_streams_command;
3221 	for (i = 0; i < num_eps; i++) {
3222 		struct xhci_ep_ctx *ep_ctx;
3223 
3224 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3225 		ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3226 		xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3227 			EP_GETTING_NO_STREAMS;
3228 
3229 		xhci_endpoint_copy(xhci, command->in_ctx,
3230 				vdev->out_ctx, ep_index);
3231 		xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
3232 				&vdev->eps[ep_index]);
3233 	}
3234 	xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3235 			vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
3236 	spin_unlock_irqrestore(&xhci->lock, flags);
3237 
3238 	/* Issue and wait for the configure endpoint command,
3239 	 * which must succeed.
3240 	 */
3241 	ret = xhci_configure_endpoint(xhci, udev, command,
3242 			false, true);
3243 
3244 	/* xHC rejected the configure endpoint command for some reason, so we
3245 	 * leave the streams rings intact.
3246 	 */
3247 	if (ret < 0)
3248 		return ret;
3249 
3250 	spin_lock_irqsave(&xhci->lock, flags);
3251 	for (i = 0; i < num_eps; i++) {
3252 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3253 		xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3254 		vdev->eps[ep_index].stream_info = NULL;
3255 		/* FIXME Unset maxPstreams in endpoint context and
3256 		 * update deq ptr to point to normal string ring.
3257 		 */
3258 		vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3259 		vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3260 	}
3261 	spin_unlock_irqrestore(&xhci->lock, flags);
3262 
3263 	return 0;
3264 }
3265 
3266 /*
3267  * Deletes endpoint resources for endpoints that were active before a Reset
3268  * Device command, or a Disable Slot command.  The Reset Device command leaves
3269  * the control endpoint intact, whereas the Disable Slot command deletes it.
3270  *
3271  * Must be called with xhci->lock held.
3272  */
3273 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3274 	struct xhci_virt_device *virt_dev, bool drop_control_ep)
3275 {
3276 	int i;
3277 	unsigned int num_dropped_eps = 0;
3278 	unsigned int drop_flags = 0;
3279 
3280 	for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3281 		if (virt_dev->eps[i].ring) {
3282 			drop_flags |= 1 << i;
3283 			num_dropped_eps++;
3284 		}
3285 	}
3286 	xhci->num_active_eps -= num_dropped_eps;
3287 	if (num_dropped_eps)
3288 		xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
3289 				"%u now active.\n",
3290 				num_dropped_eps, drop_flags,
3291 				xhci->num_active_eps);
3292 }
3293 
3294 /*
3295  * This submits a Reset Device Command, which will set the device state to 0,
3296  * set the device address to 0, and disable all the endpoints except the default
3297  * control endpoint.  The USB core should come back and call
3298  * xhci_address_device(), and then re-set up the configuration.  If this is
3299  * called because of a usb_reset_and_verify_device(), then the old alternate
3300  * settings will be re-installed through the normal bandwidth allocation
3301  * functions.
3302  *
3303  * Wait for the Reset Device command to finish.  Remove all structures
3304  * associated with the endpoints that were disabled.  Clear the input device
3305  * structure?  Cache the rings?  Reset the control endpoint 0 max packet size?
3306  *
3307  * If the virt_dev to be reset does not exist or does not match the udev,
3308  * it means the device is lost, possibly due to the xHC restore error and
3309  * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3310  * re-allocate the device.
3311  */
3312 int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3313 {
3314 	int ret, i;
3315 	unsigned long flags;
3316 	struct xhci_hcd *xhci;
3317 	unsigned int slot_id;
3318 	struct xhci_virt_device *virt_dev;
3319 	struct xhci_command *reset_device_cmd;
3320 	int timeleft;
3321 	int last_freed_endpoint;
3322 	struct xhci_slot_ctx *slot_ctx;
3323 	int old_active_eps = 0;
3324 
3325 	ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3326 	if (ret <= 0)
3327 		return ret;
3328 	xhci = hcd_to_xhci(hcd);
3329 	slot_id = udev->slot_id;
3330 	virt_dev = xhci->devs[slot_id];
3331 	if (!virt_dev) {
3332 		xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3333 				"not exist. Re-allocate the device\n", slot_id);
3334 		ret = xhci_alloc_dev(hcd, udev);
3335 		if (ret == 1)
3336 			return 0;
3337 		else
3338 			return -EINVAL;
3339 	}
3340 
3341 	if (virt_dev->udev != udev) {
3342 		/* If the virt_dev and the udev does not match, this virt_dev
3343 		 * may belong to another udev.
3344 		 * Re-allocate the device.
3345 		 */
3346 		xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3347 				"not match the udev. Re-allocate the device\n",
3348 				slot_id);
3349 		ret = xhci_alloc_dev(hcd, udev);
3350 		if (ret == 1)
3351 			return 0;
3352 		else
3353 			return -EINVAL;
3354 	}
3355 
3356 	/* If device is not setup, there is no point in resetting it */
3357 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3358 	if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3359 						SLOT_STATE_DISABLED)
3360 		return 0;
3361 
3362 	xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3363 	/* Allocate the command structure that holds the struct completion.
3364 	 * Assume we're in process context, since the normal device reset
3365 	 * process has to wait for the device anyway.  Storage devices are
3366 	 * reset as part of error handling, so use GFP_NOIO instead of
3367 	 * GFP_KERNEL.
3368 	 */
3369 	reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
3370 	if (!reset_device_cmd) {
3371 		xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3372 		return -ENOMEM;
3373 	}
3374 
3375 	/* Attempt to submit the Reset Device command to the command ring */
3376 	spin_lock_irqsave(&xhci->lock, flags);
3377 	reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
3378 
3379 	/* Enqueue pointer can be left pointing to the link TRB,
3380 	 * we must handle that
3381 	 */
3382 	if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control))
3383 		reset_device_cmd->command_trb =
3384 			xhci->cmd_ring->enq_seg->next->trbs;
3385 
3386 	list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
3387 	ret = xhci_queue_reset_device(xhci, slot_id);
3388 	if (ret) {
3389 		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3390 		list_del(&reset_device_cmd->cmd_list);
3391 		spin_unlock_irqrestore(&xhci->lock, flags);
3392 		goto command_cleanup;
3393 	}
3394 	xhci_ring_cmd_db(xhci);
3395 	spin_unlock_irqrestore(&xhci->lock, flags);
3396 
3397 	/* Wait for the Reset Device command to finish */
3398 	timeleft = wait_for_completion_interruptible_timeout(
3399 			reset_device_cmd->completion,
3400 			USB_CTRL_SET_TIMEOUT);
3401 	if (timeleft <= 0) {
3402 		xhci_warn(xhci, "%s while waiting for reset device command\n",
3403 				timeleft == 0 ? "Timeout" : "Signal");
3404 		spin_lock_irqsave(&xhci->lock, flags);
3405 		/* The timeout might have raced with the event ring handler, so
3406 		 * only delete from the list if the item isn't poisoned.
3407 		 */
3408 		if (reset_device_cmd->cmd_list.next != LIST_POISON1)
3409 			list_del(&reset_device_cmd->cmd_list);
3410 		spin_unlock_irqrestore(&xhci->lock, flags);
3411 		ret = -ETIME;
3412 		goto command_cleanup;
3413 	}
3414 
3415 	/* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3416 	 * unless we tried to reset a slot ID that wasn't enabled,
3417 	 * or the device wasn't in the addressed or configured state.
3418 	 */
3419 	ret = reset_device_cmd->status;
3420 	switch (ret) {
3421 	case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
3422 	case COMP_CTX_STATE: /* 0.96 completion code for same thing */
3423 		xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
3424 				slot_id,
3425 				xhci_get_slot_state(xhci, virt_dev->out_ctx));
3426 		xhci_info(xhci, "Not freeing device rings.\n");
3427 		/* Don't treat this as an error.  May change my mind later. */
3428 		ret = 0;
3429 		goto command_cleanup;
3430 	case COMP_SUCCESS:
3431 		xhci_dbg(xhci, "Successful reset device command.\n");
3432 		break;
3433 	default:
3434 		if (xhci_is_vendor_info_code(xhci, ret))
3435 			break;
3436 		xhci_warn(xhci, "Unknown completion code %u for "
3437 				"reset device command.\n", ret);
3438 		ret = -EINVAL;
3439 		goto command_cleanup;
3440 	}
3441 
3442 	/* Free up host controller endpoint resources */
3443 	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3444 		spin_lock_irqsave(&xhci->lock, flags);
3445 		/* Don't delete the default control endpoint resources */
3446 		xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3447 		spin_unlock_irqrestore(&xhci->lock, flags);
3448 	}
3449 
3450 	/* Everything but endpoint 0 is disabled, so free or cache the rings. */
3451 	last_freed_endpoint = 1;
3452 	for (i = 1; i < 31; ++i) {
3453 		struct xhci_virt_ep *ep = &virt_dev->eps[i];
3454 
3455 		if (ep->ep_state & EP_HAS_STREAMS) {
3456 			xhci_free_stream_info(xhci, ep->stream_info);
3457 			ep->stream_info = NULL;
3458 			ep->ep_state &= ~EP_HAS_STREAMS;
3459 		}
3460 
3461 		if (ep->ring) {
3462 			xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
3463 			last_freed_endpoint = i;
3464 		}
3465 		if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3466 			xhci_drop_ep_from_interval_table(xhci,
3467 					&virt_dev->eps[i].bw_info,
3468 					virt_dev->bw_table,
3469 					udev,
3470 					&virt_dev->eps[i],
3471 					virt_dev->tt_info);
3472 		xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3473 	}
3474 	/* If necessary, update the number of active TTs on this root port */
3475 	xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3476 
3477 	xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
3478 	xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
3479 	ret = 0;
3480 
3481 command_cleanup:
3482 	xhci_free_command(xhci, reset_device_cmd);
3483 	return ret;
3484 }
3485 
3486 /*
3487  * At this point, the struct usb_device is about to go away, the device has
3488  * disconnected, and all traffic has been stopped and the endpoints have been
3489  * disabled.  Free any HC data structures associated with that device.
3490  */
3491 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3492 {
3493 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3494 	struct xhci_virt_device *virt_dev;
3495 	unsigned long flags;
3496 	u32 state;
3497 	int i, ret;
3498 
3499 	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3500 	/* If the host is halted due to driver unload, we still need to free the
3501 	 * device.
3502 	 */
3503 	if (ret <= 0 && ret != -ENODEV)
3504 		return;
3505 
3506 	virt_dev = xhci->devs[udev->slot_id];
3507 
3508 	/* Stop any wayward timer functions (which may grab the lock) */
3509 	for (i = 0; i < 31; ++i) {
3510 		virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
3511 		del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3512 	}
3513 
3514 	if (udev->usb2_hw_lpm_enabled) {
3515 		xhci_set_usb2_hardware_lpm(hcd, udev, 0);
3516 		udev->usb2_hw_lpm_enabled = 0;
3517 	}
3518 
3519 	spin_lock_irqsave(&xhci->lock, flags);
3520 	/* Don't disable the slot if the host controller is dead. */
3521 	state = xhci_readl(xhci, &xhci->op_regs->status);
3522 	if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3523 			(xhci->xhc_state & XHCI_STATE_HALTED)) {
3524 		xhci_free_virt_device(xhci, udev->slot_id);
3525 		spin_unlock_irqrestore(&xhci->lock, flags);
3526 		return;
3527 	}
3528 
3529 	if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
3530 		spin_unlock_irqrestore(&xhci->lock, flags);
3531 		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3532 		return;
3533 	}
3534 	xhci_ring_cmd_db(xhci);
3535 	spin_unlock_irqrestore(&xhci->lock, flags);
3536 	/*
3537 	 * Event command completion handler will free any data structures
3538 	 * associated with the slot.  XXX Can free sleep?
3539 	 */
3540 }
3541 
3542 /*
3543  * Checks if we have enough host controller resources for the default control
3544  * endpoint.
3545  *
3546  * Must be called with xhci->lock held.
3547  */
3548 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3549 {
3550 	if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
3551 		xhci_dbg(xhci, "Not enough ep ctxs: "
3552 				"%u active, need to add 1, limit is %u.\n",
3553 				xhci->num_active_eps, xhci->limit_active_eps);
3554 		return -ENOMEM;
3555 	}
3556 	xhci->num_active_eps += 1;
3557 	xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
3558 			xhci->num_active_eps);
3559 	return 0;
3560 }
3561 
3562 
3563 /*
3564  * Returns 0 if the xHC ran out of device slots, the Enable Slot command
3565  * timed out, or allocating memory failed.  Returns 1 on success.
3566  */
3567 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3568 {
3569 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3570 	unsigned long flags;
3571 	int timeleft;
3572 	int ret;
3573 	union xhci_trb *cmd_trb;
3574 
3575 	spin_lock_irqsave(&xhci->lock, flags);
3576 	cmd_trb = xhci->cmd_ring->dequeue;
3577 	ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
3578 	if (ret) {
3579 		spin_unlock_irqrestore(&xhci->lock, flags);
3580 		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3581 		return 0;
3582 	}
3583 	xhci_ring_cmd_db(xhci);
3584 	spin_unlock_irqrestore(&xhci->lock, flags);
3585 
3586 	/* XXX: how much time for xHC slot assignment? */
3587 	timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3588 			XHCI_CMD_DEFAULT_TIMEOUT);
3589 	if (timeleft <= 0) {
3590 		xhci_warn(xhci, "%s while waiting for a slot\n",
3591 				timeleft == 0 ? "Timeout" : "Signal");
3592 		/* cancel the enable slot request */
3593 		return xhci_cancel_cmd(xhci, NULL, cmd_trb);
3594 	}
3595 
3596 	if (!xhci->slot_id) {
3597 		xhci_err(xhci, "Error while assigning device slot ID\n");
3598 		return 0;
3599 	}
3600 
3601 	if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3602 		spin_lock_irqsave(&xhci->lock, flags);
3603 		ret = xhci_reserve_host_control_ep_resources(xhci);
3604 		if (ret) {
3605 			spin_unlock_irqrestore(&xhci->lock, flags);
3606 			xhci_warn(xhci, "Not enough host resources, "
3607 					"active endpoint contexts = %u\n",
3608 					xhci->num_active_eps);
3609 			goto disable_slot;
3610 		}
3611 		spin_unlock_irqrestore(&xhci->lock, flags);
3612 	}
3613 	/* Use GFP_NOIO, since this function can be called from
3614 	 * xhci_discover_or_reset_device(), which may be called as part of
3615 	 * mass storage driver error handling.
3616 	 */
3617 	if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
3618 		xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
3619 		goto disable_slot;
3620 	}
3621 	udev->slot_id = xhci->slot_id;
3622 	/* Is this a LS or FS device under a HS hub? */
3623 	/* Hub or peripherial? */
3624 	return 1;
3625 
3626 disable_slot:
3627 	/* Disable slot, if we can do it without mem alloc */
3628 	spin_lock_irqsave(&xhci->lock, flags);
3629 	if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
3630 		xhci_ring_cmd_db(xhci);
3631 	spin_unlock_irqrestore(&xhci->lock, flags);
3632 	return 0;
3633 }
3634 
3635 /*
3636  * Issue an Address Device command (which will issue a SetAddress request to
3637  * the device).
3638  * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
3639  * we should only issue and wait on one address command at the same time.
3640  *
3641  * We add one to the device address issued by the hardware because the USB core
3642  * uses address 1 for the root hubs (even though they're not really devices).
3643  */
3644 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
3645 {
3646 	unsigned long flags;
3647 	int timeleft;
3648 	struct xhci_virt_device *virt_dev;
3649 	int ret = 0;
3650 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3651 	struct xhci_slot_ctx *slot_ctx;
3652 	struct xhci_input_control_ctx *ctrl_ctx;
3653 	u64 temp_64;
3654 	union xhci_trb *cmd_trb;
3655 
3656 	if (!udev->slot_id) {
3657 		xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
3658 		return -EINVAL;
3659 	}
3660 
3661 	virt_dev = xhci->devs[udev->slot_id];
3662 
3663 	if (WARN_ON(!virt_dev)) {
3664 		/*
3665 		 * In plug/unplug torture test with an NEC controller,
3666 		 * a zero-dereference was observed once due to virt_dev = 0.
3667 		 * Print useful debug rather than crash if it is observed again!
3668 		 */
3669 		xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
3670 			udev->slot_id);
3671 		return -EINVAL;
3672 	}
3673 
3674 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3675 	/*
3676 	 * If this is the first Set Address since device plug-in or
3677 	 * virt_device realloaction after a resume with an xHCI power loss,
3678 	 * then set up the slot context.
3679 	 */
3680 	if (!slot_ctx->dev_info)
3681 		xhci_setup_addressable_virt_dev(xhci, udev);
3682 	/* Otherwise, update the control endpoint ring enqueue pointer. */
3683 	else
3684 		xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
3685 	ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
3686 	ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
3687 	ctrl_ctx->drop_flags = 0;
3688 
3689 	xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3690 	xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3691 
3692 	spin_lock_irqsave(&xhci->lock, flags);
3693 	cmd_trb = xhci->cmd_ring->dequeue;
3694 	ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
3695 					udev->slot_id);
3696 	if (ret) {
3697 		spin_unlock_irqrestore(&xhci->lock, flags);
3698 		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3699 		return ret;
3700 	}
3701 	xhci_ring_cmd_db(xhci);
3702 	spin_unlock_irqrestore(&xhci->lock, flags);
3703 
3704 	/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
3705 	timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
3706 			XHCI_CMD_DEFAULT_TIMEOUT);
3707 	/* FIXME: From section 4.3.4: "Software shall be responsible for timing
3708 	 * the SetAddress() "recovery interval" required by USB and aborting the
3709 	 * command on a timeout.
3710 	 */
3711 	if (timeleft <= 0) {
3712 		xhci_warn(xhci, "%s while waiting for address device command\n",
3713 				timeleft == 0 ? "Timeout" : "Signal");
3714 		/* cancel the address device command */
3715 		ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
3716 		if (ret < 0)
3717 			return ret;
3718 		return -ETIME;
3719 	}
3720 
3721 	switch (virt_dev->cmd_status) {
3722 	case COMP_CTX_STATE:
3723 	case COMP_EBADSLT:
3724 		xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
3725 				udev->slot_id);
3726 		ret = -EINVAL;
3727 		break;
3728 	case COMP_TX_ERR:
3729 		dev_warn(&udev->dev, "Device not responding to set address.\n");
3730 		ret = -EPROTO;
3731 		break;
3732 	case COMP_DEV_ERR:
3733 		dev_warn(&udev->dev, "ERROR: Incompatible device for address "
3734 				"device command.\n");
3735 		ret = -ENODEV;
3736 		break;
3737 	case COMP_SUCCESS:
3738 		xhci_dbg(xhci, "Successful Address Device command\n");
3739 		break;
3740 	default:
3741 		xhci_err(xhci, "ERROR: unexpected command completion "
3742 				"code 0x%x.\n", virt_dev->cmd_status);
3743 		xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3744 		xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3745 		ret = -EINVAL;
3746 		break;
3747 	}
3748 	if (ret) {
3749 		return ret;
3750 	}
3751 	temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
3752 	xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
3753 	xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
3754 		 udev->slot_id,
3755 		 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
3756 		 (unsigned long long)
3757 		 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
3758 	xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
3759 			(unsigned long long)virt_dev->out_ctx->dma);
3760 	xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
3761 	xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
3762 	xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
3763 	xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
3764 	/*
3765 	 * USB core uses address 1 for the roothubs, so we add one to the
3766 	 * address given back to us by the HC.
3767 	 */
3768 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3769 	/* Use kernel assigned address for devices; store xHC assigned
3770 	 * address locally. */
3771 	virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
3772 		+ 1;
3773 	/* Zero the input context control for later use */
3774 	ctrl_ctx->add_flags = 0;
3775 	ctrl_ctx->drop_flags = 0;
3776 
3777 	xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
3778 
3779 	return 0;
3780 }
3781 
3782 /*
3783  * Transfer the port index into real index in the HW port status
3784  * registers. Caculate offset between the port's PORTSC register
3785  * and port status base. Divide the number of per port register
3786  * to get the real index. The raw port number bases 1.
3787  */
3788 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
3789 {
3790 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3791 	__le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
3792 	__le32 __iomem *addr;
3793 	int raw_port;
3794 
3795 	if (hcd->speed != HCD_USB3)
3796 		addr = xhci->usb2_ports[port1 - 1];
3797 	else
3798 		addr = xhci->usb3_ports[port1 - 1];
3799 
3800 	raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
3801 	return raw_port;
3802 }
3803 
3804 #ifdef CONFIG_USB_SUSPEND
3805 
3806 /* BESL to HIRD Encoding array for USB2 LPM */
3807 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
3808 	3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
3809 
3810 /* Calculate HIRD/BESL for USB2 PORTPMSC*/
3811 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
3812 					struct usb_device *udev)
3813 {
3814 	int u2del, besl, besl_host;
3815 	int besl_device = 0;
3816 	u32 field;
3817 
3818 	u2del = HCS_U2_LATENCY(xhci->hcs_params3);
3819 	field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
3820 
3821 	if (field & USB_BESL_SUPPORT) {
3822 		for (besl_host = 0; besl_host < 16; besl_host++) {
3823 			if (xhci_besl_encoding[besl_host] >= u2del)
3824 				break;
3825 		}
3826 		/* Use baseline BESL value as default */
3827 		if (field & USB_BESL_BASELINE_VALID)
3828 			besl_device = USB_GET_BESL_BASELINE(field);
3829 		else if (field & USB_BESL_DEEP_VALID)
3830 			besl_device = USB_GET_BESL_DEEP(field);
3831 	} else {
3832 		if (u2del <= 50)
3833 			besl_host = 0;
3834 		else
3835 			besl_host = (u2del - 51) / 75 + 1;
3836 	}
3837 
3838 	besl = besl_host + besl_device;
3839 	if (besl > 15)
3840 		besl = 15;
3841 
3842 	return besl;
3843 }
3844 
3845 static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd,
3846 					struct usb_device *udev)
3847 {
3848 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
3849 	struct dev_info	*dev_info;
3850 	__le32 __iomem	**port_array;
3851 	__le32 __iomem	*addr, *pm_addr;
3852 	u32		temp, dev_id;
3853 	unsigned int	port_num;
3854 	unsigned long	flags;
3855 	int		hird;
3856 	int		ret;
3857 
3858 	if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support ||
3859 			!udev->lpm_capable)
3860 		return -EINVAL;
3861 
3862 	/* we only support lpm for non-hub device connected to root hub yet */
3863 	if (!udev->parent || udev->parent->parent ||
3864 			udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3865 		return -EINVAL;
3866 
3867 	spin_lock_irqsave(&xhci->lock, flags);
3868 
3869 	/* Look for devices in lpm_failed_devs list */
3870 	dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 |
3871 			le16_to_cpu(udev->descriptor.idProduct);
3872 	list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) {
3873 		if (dev_info->dev_id == dev_id) {
3874 			ret = -EINVAL;
3875 			goto finish;
3876 		}
3877 	}
3878 
3879 	port_array = xhci->usb2_ports;
3880 	port_num = udev->portnum - 1;
3881 
3882 	if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) {
3883 		xhci_dbg(xhci, "invalid port number %d\n", udev->portnum);
3884 		ret = -EINVAL;
3885 		goto finish;
3886 	}
3887 
3888 	/*
3889 	 * Test USB 2.0 software LPM.
3890 	 * FIXME: some xHCI 1.0 hosts may implement a new register to set up
3891 	 * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1
3892 	 * in the June 2011 errata release.
3893 	 */
3894 	xhci_dbg(xhci, "test port %d software LPM\n", port_num);
3895 	/*
3896 	 * Set L1 Device Slot and HIRD/BESL.
3897 	 * Check device's USB 2.0 extension descriptor to determine whether
3898 	 * HIRD or BESL shoule be used. See USB2.0 LPM errata.
3899 	 */
3900 	pm_addr = port_array[port_num] + 1;
3901 	hird = xhci_calculate_hird_besl(xhci, udev);
3902 	temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird);
3903 	xhci_writel(xhci, temp, pm_addr);
3904 
3905 	/* Set port link state to U2(L1) */
3906 	addr = port_array[port_num];
3907 	xhci_set_link_state(xhci, port_array, port_num, XDEV_U2);
3908 
3909 	/* wait for ACK */
3910 	spin_unlock_irqrestore(&xhci->lock, flags);
3911 	msleep(10);
3912 	spin_lock_irqsave(&xhci->lock, flags);
3913 
3914 	/* Check L1 Status */
3915 	ret = xhci_handshake(xhci, pm_addr,
3916 			PORT_L1S_MASK, PORT_L1S_SUCCESS, 125);
3917 	if (ret != -ETIMEDOUT) {
3918 		/* enter L1 successfully */
3919 		temp = xhci_readl(xhci, addr);
3920 		xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n",
3921 				port_num, temp);
3922 		ret = 0;
3923 	} else {
3924 		temp = xhci_readl(xhci, pm_addr);
3925 		xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n",
3926 				port_num, temp & PORT_L1S_MASK);
3927 		ret = -EINVAL;
3928 	}
3929 
3930 	/* Resume the port */
3931 	xhci_set_link_state(xhci, port_array, port_num, XDEV_U0);
3932 
3933 	spin_unlock_irqrestore(&xhci->lock, flags);
3934 	msleep(10);
3935 	spin_lock_irqsave(&xhci->lock, flags);
3936 
3937 	/* Clear PLC */
3938 	xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC);
3939 
3940 	/* Check PORTSC to make sure the device is in the right state */
3941 	if (!ret) {
3942 		temp = xhci_readl(xhci, addr);
3943 		xhci_dbg(xhci, "resumed port %d status 0x%x\n",	port_num, temp);
3944 		if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) ||
3945 				(temp & PORT_PLS_MASK) != XDEV_U0) {
3946 			xhci_dbg(xhci, "port L1 resume fail\n");
3947 			ret = -EINVAL;
3948 		}
3949 	}
3950 
3951 	if (ret) {
3952 		/* Insert dev to lpm_failed_devs list */
3953 		xhci_warn(xhci, "device LPM test failed, may disconnect and "
3954 				"re-enumerate\n");
3955 		dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC);
3956 		if (!dev_info) {
3957 			ret = -ENOMEM;
3958 			goto finish;
3959 		}
3960 		dev_info->dev_id = dev_id;
3961 		INIT_LIST_HEAD(&dev_info->list);
3962 		list_add(&dev_info->list, &xhci->lpm_failed_devs);
3963 	} else {
3964 		xhci_ring_device(xhci, udev->slot_id);
3965 	}
3966 
3967 finish:
3968 	spin_unlock_irqrestore(&xhci->lock, flags);
3969 	return ret;
3970 }
3971 
3972 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
3973 			struct usb_device *udev, int enable)
3974 {
3975 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
3976 	__le32 __iomem	**port_array;
3977 	__le32 __iomem	*pm_addr;
3978 	u32		temp;
3979 	unsigned int	port_num;
3980 	unsigned long	flags;
3981 	int		hird;
3982 
3983 	if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support ||
3984 			!udev->lpm_capable)
3985 		return -EPERM;
3986 
3987 	if (!udev->parent || udev->parent->parent ||
3988 			udev->descriptor.bDeviceClass == USB_CLASS_HUB)
3989 		return -EPERM;
3990 
3991 	if (udev->usb2_hw_lpm_capable != 1)
3992 		return -EPERM;
3993 
3994 	spin_lock_irqsave(&xhci->lock, flags);
3995 
3996 	port_array = xhci->usb2_ports;
3997 	port_num = udev->portnum - 1;
3998 	pm_addr = port_array[port_num] + 1;
3999 	temp = xhci_readl(xhci, pm_addr);
4000 
4001 	xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4002 			enable ? "enable" : "disable", port_num);
4003 
4004 	hird = xhci_calculate_hird_besl(xhci, udev);
4005 
4006 	if (enable) {
4007 		temp &= ~PORT_HIRD_MASK;
4008 		temp |= PORT_HIRD(hird) | PORT_RWE;
4009 		xhci_writel(xhci, temp, pm_addr);
4010 		temp = xhci_readl(xhci, pm_addr);
4011 		temp |= PORT_HLE;
4012 		xhci_writel(xhci, temp, pm_addr);
4013 	} else {
4014 		temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK);
4015 		xhci_writel(xhci, temp, pm_addr);
4016 	}
4017 
4018 	spin_unlock_irqrestore(&xhci->lock, flags);
4019 	return 0;
4020 }
4021 
4022 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4023 {
4024 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
4025 	int		ret;
4026 
4027 	ret = xhci_usb2_software_lpm_test(hcd, udev);
4028 	if (!ret) {
4029 		xhci_dbg(xhci, "software LPM test succeed\n");
4030 		if (xhci->hw_lpm_support == 1) {
4031 			udev->usb2_hw_lpm_capable = 1;
4032 			ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1);
4033 			if (!ret)
4034 				udev->usb2_hw_lpm_enabled = 1;
4035 		}
4036 	}
4037 
4038 	return 0;
4039 }
4040 
4041 #else
4042 
4043 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4044 				struct usb_device *udev, int enable)
4045 {
4046 	return 0;
4047 }
4048 
4049 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4050 {
4051 	return 0;
4052 }
4053 
4054 #endif /* CONFIG_USB_SUSPEND */
4055 
4056 /*---------------------- USB 3.0 Link PM functions ------------------------*/
4057 
4058 #ifdef CONFIG_PM
4059 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
4060 static unsigned long long xhci_service_interval_to_ns(
4061 		struct usb_endpoint_descriptor *desc)
4062 {
4063 	return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4064 }
4065 
4066 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4067 		enum usb3_link_state state)
4068 {
4069 	unsigned long long sel;
4070 	unsigned long long pel;
4071 	unsigned int max_sel_pel;
4072 	char *state_name;
4073 
4074 	switch (state) {
4075 	case USB3_LPM_U1:
4076 		/* Convert SEL and PEL stored in nanoseconds to microseconds */
4077 		sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4078 		pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4079 		max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4080 		state_name = "U1";
4081 		break;
4082 	case USB3_LPM_U2:
4083 		sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4084 		pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4085 		max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4086 		state_name = "U2";
4087 		break;
4088 	default:
4089 		dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4090 				__func__);
4091 		return USB3_LPM_DISABLED;
4092 	}
4093 
4094 	if (sel <= max_sel_pel && pel <= max_sel_pel)
4095 		return USB3_LPM_DEVICE_INITIATED;
4096 
4097 	if (sel > max_sel_pel)
4098 		dev_dbg(&udev->dev, "Device-initiated %s disabled "
4099 				"due to long SEL %llu ms\n",
4100 				state_name, sel);
4101 	else
4102 		dev_dbg(&udev->dev, "Device-initiated %s disabled "
4103 				"due to long PEL %llu\n ms",
4104 				state_name, pel);
4105 	return USB3_LPM_DISABLED;
4106 }
4107 
4108 /* Returns the hub-encoded U1 timeout value.
4109  * The U1 timeout should be the maximum of the following values:
4110  *  - For control endpoints, U1 system exit latency (SEL) * 3
4111  *  - For bulk endpoints, U1 SEL * 5
4112  *  - For interrupt endpoints:
4113  *    - Notification EPs, U1 SEL * 3
4114  *    - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4115  *  - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4116  */
4117 static u16 xhci_calculate_intel_u1_timeout(struct usb_device *udev,
4118 		struct usb_endpoint_descriptor *desc)
4119 {
4120 	unsigned long long timeout_ns;
4121 	int ep_type;
4122 	int intr_type;
4123 
4124 	ep_type = usb_endpoint_type(desc);
4125 	switch (ep_type) {
4126 	case USB_ENDPOINT_XFER_CONTROL:
4127 		timeout_ns = udev->u1_params.sel * 3;
4128 		break;
4129 	case USB_ENDPOINT_XFER_BULK:
4130 		timeout_ns = udev->u1_params.sel * 5;
4131 		break;
4132 	case USB_ENDPOINT_XFER_INT:
4133 		intr_type = usb_endpoint_interrupt_type(desc);
4134 		if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4135 			timeout_ns = udev->u1_params.sel * 3;
4136 			break;
4137 		}
4138 		/* Otherwise the calculation is the same as isoc eps */
4139 	case USB_ENDPOINT_XFER_ISOC:
4140 		timeout_ns = xhci_service_interval_to_ns(desc);
4141 		timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4142 		if (timeout_ns < udev->u1_params.sel * 2)
4143 			timeout_ns = udev->u1_params.sel * 2;
4144 		break;
4145 	default:
4146 		return 0;
4147 	}
4148 
4149 	/* The U1 timeout is encoded in 1us intervals. */
4150 	timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4151 	/* Don't return a timeout of zero, because that's USB3_LPM_DISABLED. */
4152 	if (timeout_ns == USB3_LPM_DISABLED)
4153 		timeout_ns++;
4154 
4155 	/* If the necessary timeout value is bigger than what we can set in the
4156 	 * USB 3.0 hub, we have to disable hub-initiated U1.
4157 	 */
4158 	if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4159 		return timeout_ns;
4160 	dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4161 			"due to long timeout %llu ms\n", timeout_ns);
4162 	return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4163 }
4164 
4165 /* Returns the hub-encoded U2 timeout value.
4166  * The U2 timeout should be the maximum of:
4167  *  - 10 ms (to avoid the bandwidth impact on the scheduler)
4168  *  - largest bInterval of any active periodic endpoint (to avoid going
4169  *    into lower power link states between intervals).
4170  *  - the U2 Exit Latency of the device
4171  */
4172 static u16 xhci_calculate_intel_u2_timeout(struct usb_device *udev,
4173 		struct usb_endpoint_descriptor *desc)
4174 {
4175 	unsigned long long timeout_ns;
4176 	unsigned long long u2_del_ns;
4177 
4178 	timeout_ns = 10 * 1000 * 1000;
4179 
4180 	if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4181 			(xhci_service_interval_to_ns(desc) > timeout_ns))
4182 		timeout_ns = xhci_service_interval_to_ns(desc);
4183 
4184 	u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4185 	if (u2_del_ns > timeout_ns)
4186 		timeout_ns = u2_del_ns;
4187 
4188 	/* The U2 timeout is encoded in 256us intervals */
4189 	timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4190 	/* If the necessary timeout value is bigger than what we can set in the
4191 	 * USB 3.0 hub, we have to disable hub-initiated U2.
4192 	 */
4193 	if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4194 		return timeout_ns;
4195 	dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4196 			"due to long timeout %llu ms\n", timeout_ns);
4197 	return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4198 }
4199 
4200 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4201 		struct usb_device *udev,
4202 		struct usb_endpoint_descriptor *desc,
4203 		enum usb3_link_state state,
4204 		u16 *timeout)
4205 {
4206 	if (state == USB3_LPM_U1) {
4207 		if (xhci->quirks & XHCI_INTEL_HOST)
4208 			return xhci_calculate_intel_u1_timeout(udev, desc);
4209 	} else {
4210 		if (xhci->quirks & XHCI_INTEL_HOST)
4211 			return xhci_calculate_intel_u2_timeout(udev, desc);
4212 	}
4213 
4214 	return USB3_LPM_DISABLED;
4215 }
4216 
4217 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4218 		struct usb_device *udev,
4219 		struct usb_endpoint_descriptor *desc,
4220 		enum usb3_link_state state,
4221 		u16 *timeout)
4222 {
4223 	u16 alt_timeout;
4224 
4225 	alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4226 		desc, state, timeout);
4227 
4228 	/* If we found we can't enable hub-initiated LPM, or
4229 	 * the U1 or U2 exit latency was too high to allow
4230 	 * device-initiated LPM as well, just stop searching.
4231 	 */
4232 	if (alt_timeout == USB3_LPM_DISABLED ||
4233 			alt_timeout == USB3_LPM_DEVICE_INITIATED) {
4234 		*timeout = alt_timeout;
4235 		return -E2BIG;
4236 	}
4237 	if (alt_timeout > *timeout)
4238 		*timeout = alt_timeout;
4239 	return 0;
4240 }
4241 
4242 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4243 		struct usb_device *udev,
4244 		struct usb_host_interface *alt,
4245 		enum usb3_link_state state,
4246 		u16 *timeout)
4247 {
4248 	int j;
4249 
4250 	for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4251 		if (xhci_update_timeout_for_endpoint(xhci, udev,
4252 					&alt->endpoint[j].desc, state, timeout))
4253 			return -E2BIG;
4254 		continue;
4255 	}
4256 	return 0;
4257 }
4258 
4259 static int xhci_check_intel_tier_policy(struct usb_device *udev,
4260 		enum usb3_link_state state)
4261 {
4262 	struct usb_device *parent;
4263 	unsigned int num_hubs;
4264 
4265 	if (state == USB3_LPM_U2)
4266 		return 0;
4267 
4268 	/* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4269 	for (parent = udev->parent, num_hubs = 0; parent->parent;
4270 			parent = parent->parent)
4271 		num_hubs++;
4272 
4273 	if (num_hubs < 2)
4274 		return 0;
4275 
4276 	dev_dbg(&udev->dev, "Disabling U1 link state for device"
4277 			" below second-tier hub.\n");
4278 	dev_dbg(&udev->dev, "Plug device into first-tier hub "
4279 			"to decrease power consumption.\n");
4280 	return -E2BIG;
4281 }
4282 
4283 static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4284 		struct usb_device *udev,
4285 		enum usb3_link_state state)
4286 {
4287 	if (xhci->quirks & XHCI_INTEL_HOST)
4288 		return xhci_check_intel_tier_policy(udev, state);
4289 	return -EINVAL;
4290 }
4291 
4292 /* Returns the U1 or U2 timeout that should be enabled.
4293  * If the tier check or timeout setting functions return with a non-zero exit
4294  * code, that means the timeout value has been finalized and we shouldn't look
4295  * at any more endpoints.
4296  */
4297 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4298 			struct usb_device *udev, enum usb3_link_state state)
4299 {
4300 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4301 	struct usb_host_config *config;
4302 	char *state_name;
4303 	int i;
4304 	u16 timeout = USB3_LPM_DISABLED;
4305 
4306 	if (state == USB3_LPM_U1)
4307 		state_name = "U1";
4308 	else if (state == USB3_LPM_U2)
4309 		state_name = "U2";
4310 	else {
4311 		dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4312 				state);
4313 		return timeout;
4314 	}
4315 
4316 	if (xhci_check_tier_policy(xhci, udev, state) < 0)
4317 		return timeout;
4318 
4319 	/* Gather some information about the currently installed configuration
4320 	 * and alternate interface settings.
4321 	 */
4322 	if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4323 			state, &timeout))
4324 		return timeout;
4325 
4326 	config = udev->actconfig;
4327 	if (!config)
4328 		return timeout;
4329 
4330 	for (i = 0; i < USB_MAXINTERFACES; i++) {
4331 		struct usb_driver *driver;
4332 		struct usb_interface *intf = config->interface[i];
4333 
4334 		if (!intf)
4335 			continue;
4336 
4337 		/* Check if any currently bound drivers want hub-initiated LPM
4338 		 * disabled.
4339 		 */
4340 		if (intf->dev.driver) {
4341 			driver = to_usb_driver(intf->dev.driver);
4342 			if (driver && driver->disable_hub_initiated_lpm) {
4343 				dev_dbg(&udev->dev, "Hub-initiated %s disabled "
4344 						"at request of driver %s\n",
4345 						state_name, driver->name);
4346 				return xhci_get_timeout_no_hub_lpm(udev, state);
4347 			}
4348 		}
4349 
4350 		/* Not sure how this could happen... */
4351 		if (!intf->cur_altsetting)
4352 			continue;
4353 
4354 		if (xhci_update_timeout_for_interface(xhci, udev,
4355 					intf->cur_altsetting,
4356 					state, &timeout))
4357 			return timeout;
4358 	}
4359 	return timeout;
4360 }
4361 
4362 /*
4363  * Issue an Evaluate Context command to change the Maximum Exit Latency in the
4364  * slot context.  If that succeeds, store the new MEL in the xhci_virt_device.
4365  */
4366 static int xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4367 			struct usb_device *udev, u16 max_exit_latency)
4368 {
4369 	struct xhci_virt_device *virt_dev;
4370 	struct xhci_command *command;
4371 	struct xhci_input_control_ctx *ctrl_ctx;
4372 	struct xhci_slot_ctx *slot_ctx;
4373 	unsigned long flags;
4374 	int ret;
4375 
4376 	spin_lock_irqsave(&xhci->lock, flags);
4377 	if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) {
4378 		spin_unlock_irqrestore(&xhci->lock, flags);
4379 		return 0;
4380 	}
4381 
4382 	/* Attempt to issue an Evaluate Context command to change the MEL. */
4383 	virt_dev = xhci->devs[udev->slot_id];
4384 	command = xhci->lpm_command;
4385 	xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4386 	spin_unlock_irqrestore(&xhci->lock, flags);
4387 
4388 	ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx);
4389 	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4390 	slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4391 	slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4392 	slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4393 
4394 	xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n");
4395 	xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
4396 	xhci_dbg_ctx(xhci, command->in_ctx, 0);
4397 
4398 	/* Issue and wait for the evaluate context command. */
4399 	ret = xhci_configure_endpoint(xhci, udev, command,
4400 			true, true);
4401 	xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id);
4402 	xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0);
4403 
4404 	if (!ret) {
4405 		spin_lock_irqsave(&xhci->lock, flags);
4406 		virt_dev->current_mel = max_exit_latency;
4407 		spin_unlock_irqrestore(&xhci->lock, flags);
4408 	}
4409 	return ret;
4410 }
4411 
4412 static int calculate_max_exit_latency(struct usb_device *udev,
4413 		enum usb3_link_state state_changed,
4414 		u16 hub_encoded_timeout)
4415 {
4416 	unsigned long long u1_mel_us = 0;
4417 	unsigned long long u2_mel_us = 0;
4418 	unsigned long long mel_us = 0;
4419 	bool disabling_u1;
4420 	bool disabling_u2;
4421 	bool enabling_u1;
4422 	bool enabling_u2;
4423 
4424 	disabling_u1 = (state_changed == USB3_LPM_U1 &&
4425 			hub_encoded_timeout == USB3_LPM_DISABLED);
4426 	disabling_u2 = (state_changed == USB3_LPM_U2 &&
4427 			hub_encoded_timeout == USB3_LPM_DISABLED);
4428 
4429 	enabling_u1 = (state_changed == USB3_LPM_U1 &&
4430 			hub_encoded_timeout != USB3_LPM_DISABLED);
4431 	enabling_u2 = (state_changed == USB3_LPM_U2 &&
4432 			hub_encoded_timeout != USB3_LPM_DISABLED);
4433 
4434 	/* If U1 was already enabled and we're not disabling it,
4435 	 * or we're going to enable U1, account for the U1 max exit latency.
4436 	 */
4437 	if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4438 			enabling_u1)
4439 		u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4440 	if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4441 			enabling_u2)
4442 		u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4443 
4444 	if (u1_mel_us > u2_mel_us)
4445 		mel_us = u1_mel_us;
4446 	else
4447 		mel_us = u2_mel_us;
4448 	/* xHCI host controller max exit latency field is only 16 bits wide. */
4449 	if (mel_us > MAX_EXIT) {
4450 		dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
4451 				"is too big.\n", mel_us);
4452 		return -E2BIG;
4453 	}
4454 	return mel_us;
4455 }
4456 
4457 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
4458 int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4459 			struct usb_device *udev, enum usb3_link_state state)
4460 {
4461 	struct xhci_hcd	*xhci;
4462 	u16 hub_encoded_timeout;
4463 	int mel;
4464 	int ret;
4465 
4466 	xhci = hcd_to_xhci(hcd);
4467 	/* The LPM timeout values are pretty host-controller specific, so don't
4468 	 * enable hub-initiated timeouts unless the vendor has provided
4469 	 * information about their timeout algorithm.
4470 	 */
4471 	if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4472 			!xhci->devs[udev->slot_id])
4473 		return USB3_LPM_DISABLED;
4474 
4475 	hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
4476 	mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
4477 	if (mel < 0) {
4478 		/* Max Exit Latency is too big, disable LPM. */
4479 		hub_encoded_timeout = USB3_LPM_DISABLED;
4480 		mel = 0;
4481 	}
4482 
4483 	ret = xhci_change_max_exit_latency(xhci, udev, mel);
4484 	if (ret)
4485 		return ret;
4486 	return hub_encoded_timeout;
4487 }
4488 
4489 int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4490 			struct usb_device *udev, enum usb3_link_state state)
4491 {
4492 	struct xhci_hcd	*xhci;
4493 	u16 mel;
4494 	int ret;
4495 
4496 	xhci = hcd_to_xhci(hcd);
4497 	if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
4498 			!xhci->devs[udev->slot_id])
4499 		return 0;
4500 
4501 	mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
4502 	ret = xhci_change_max_exit_latency(xhci, udev, mel);
4503 	if (ret)
4504 		return ret;
4505 	return 0;
4506 }
4507 #else /* CONFIG_PM */
4508 
4509 int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
4510 			struct usb_device *udev, enum usb3_link_state state)
4511 {
4512 	return USB3_LPM_DISABLED;
4513 }
4514 
4515 int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
4516 			struct usb_device *udev, enum usb3_link_state state)
4517 {
4518 	return 0;
4519 }
4520 #endif	/* CONFIG_PM */
4521 
4522 /*-------------------------------------------------------------------------*/
4523 
4524 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
4525  * internal data structures for the device.
4526  */
4527 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
4528 			struct usb_tt *tt, gfp_t mem_flags)
4529 {
4530 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4531 	struct xhci_virt_device *vdev;
4532 	struct xhci_command *config_cmd;
4533 	struct xhci_input_control_ctx *ctrl_ctx;
4534 	struct xhci_slot_ctx *slot_ctx;
4535 	unsigned long flags;
4536 	unsigned think_time;
4537 	int ret;
4538 
4539 	/* Ignore root hubs */
4540 	if (!hdev->parent)
4541 		return 0;
4542 
4543 	vdev = xhci->devs[hdev->slot_id];
4544 	if (!vdev) {
4545 		xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
4546 		return -EINVAL;
4547 	}
4548 	config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
4549 	if (!config_cmd) {
4550 		xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
4551 		return -ENOMEM;
4552 	}
4553 
4554 	spin_lock_irqsave(&xhci->lock, flags);
4555 	if (hdev->speed == USB_SPEED_HIGH &&
4556 			xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
4557 		xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
4558 		xhci_free_command(xhci, config_cmd);
4559 		spin_unlock_irqrestore(&xhci->lock, flags);
4560 		return -ENOMEM;
4561 	}
4562 
4563 	xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
4564 	ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
4565 	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4566 	slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
4567 	slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
4568 	if (tt->multi)
4569 		slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
4570 	if (xhci->hci_version > 0x95) {
4571 		xhci_dbg(xhci, "xHCI version %x needs hub "
4572 				"TT think time and number of ports\n",
4573 				(unsigned int) xhci->hci_version);
4574 		slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
4575 		/* Set TT think time - convert from ns to FS bit times.
4576 		 * 0 = 8 FS bit times, 1 = 16 FS bit times,
4577 		 * 2 = 24 FS bit times, 3 = 32 FS bit times.
4578 		 *
4579 		 * xHCI 1.0: this field shall be 0 if the device is not a
4580 		 * High-spped hub.
4581 		 */
4582 		think_time = tt->think_time;
4583 		if (think_time != 0)
4584 			think_time = (think_time / 666) - 1;
4585 		if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
4586 			slot_ctx->tt_info |=
4587 				cpu_to_le32(TT_THINK_TIME(think_time));
4588 	} else {
4589 		xhci_dbg(xhci, "xHCI version %x doesn't need hub "
4590 				"TT think time or number of ports\n",
4591 				(unsigned int) xhci->hci_version);
4592 	}
4593 	slot_ctx->dev_state = 0;
4594 	spin_unlock_irqrestore(&xhci->lock, flags);
4595 
4596 	xhci_dbg(xhci, "Set up %s for hub device.\n",
4597 			(xhci->hci_version > 0x95) ?
4598 			"configure endpoint" : "evaluate context");
4599 	xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
4600 	xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
4601 
4602 	/* Issue and wait for the configure endpoint or
4603 	 * evaluate context command.
4604 	 */
4605 	if (xhci->hci_version > 0x95)
4606 		ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4607 				false, false);
4608 	else
4609 		ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
4610 				true, false);
4611 
4612 	xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
4613 	xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
4614 
4615 	xhci_free_command(xhci, config_cmd);
4616 	return ret;
4617 }
4618 
4619 int xhci_get_frame(struct usb_hcd *hcd)
4620 {
4621 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4622 	/* EHCI mods by the periodic size.  Why? */
4623 	return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
4624 }
4625 
4626 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
4627 {
4628 	struct xhci_hcd		*xhci;
4629 	struct device		*dev = hcd->self.controller;
4630 	int			retval;
4631 	u32			temp;
4632 
4633 	/* Accept arbitrarily long scatter-gather lists */
4634 	hcd->self.sg_tablesize = ~0;
4635 	/* XHCI controllers don't stop the ep queue on short packets :| */
4636 	hcd->self.no_stop_on_short = 1;
4637 
4638 	if (usb_hcd_is_primary_hcd(hcd)) {
4639 		xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
4640 		if (!xhci)
4641 			return -ENOMEM;
4642 		*((struct xhci_hcd **) hcd->hcd_priv) = xhci;
4643 		xhci->main_hcd = hcd;
4644 		/* Mark the first roothub as being USB 2.0.
4645 		 * The xHCI driver will register the USB 3.0 roothub.
4646 		 */
4647 		hcd->speed = HCD_USB2;
4648 		hcd->self.root_hub->speed = USB_SPEED_HIGH;
4649 		/*
4650 		 * USB 2.0 roothub under xHCI has an integrated TT,
4651 		 * (rate matching hub) as opposed to having an OHCI/UHCI
4652 		 * companion controller.
4653 		 */
4654 		hcd->has_tt = 1;
4655 	} else {
4656 		/* xHCI private pointer was set in xhci_pci_probe for the second
4657 		 * registered roothub.
4658 		 */
4659 		xhci = hcd_to_xhci(hcd);
4660 		temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4661 		if (HCC_64BIT_ADDR(temp)) {
4662 			xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4663 			dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4664 		} else {
4665 			dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4666 		}
4667 		return 0;
4668 	}
4669 
4670 	xhci->cap_regs = hcd->regs;
4671 	xhci->op_regs = hcd->regs +
4672 		HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
4673 	xhci->run_regs = hcd->regs +
4674 		(xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
4675 	/* Cache read-only capability registers */
4676 	xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
4677 	xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
4678 	xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
4679 	xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
4680 	xhci->hci_version = HC_VERSION(xhci->hcc_params);
4681 	xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4682 	xhci_print_registers(xhci);
4683 
4684 	get_quirks(dev, xhci);
4685 
4686 	/* Make sure the HC is halted. */
4687 	retval = xhci_halt(xhci);
4688 	if (retval)
4689 		goto error;
4690 
4691 	xhci_dbg(xhci, "Resetting HCD\n");
4692 	/* Reset the internal HC memory state and registers. */
4693 	retval = xhci_reset(xhci);
4694 	if (retval)
4695 		goto error;
4696 	xhci_dbg(xhci, "Reset complete\n");
4697 
4698 	temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
4699 	if (HCC_64BIT_ADDR(temp)) {
4700 		xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
4701 		dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
4702 	} else {
4703 		dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
4704 	}
4705 
4706 	xhci_dbg(xhci, "Calling HCD init\n");
4707 	/* Initialize HCD and host controller data structures. */
4708 	retval = xhci_init(hcd);
4709 	if (retval)
4710 		goto error;
4711 	xhci_dbg(xhci, "Called HCD init\n");
4712 	return 0;
4713 error:
4714 	kfree(xhci);
4715 	return retval;
4716 }
4717 
4718 MODULE_DESCRIPTION(DRIVER_DESC);
4719 MODULE_AUTHOR(DRIVER_AUTHOR);
4720 MODULE_LICENSE("GPL");
4721 
4722 static int __init xhci_hcd_init(void)
4723 {
4724 	int retval;
4725 
4726 	retval = xhci_register_pci();
4727 	if (retval < 0) {
4728 		printk(KERN_DEBUG "Problem registering PCI driver.");
4729 		return retval;
4730 	}
4731 	retval = xhci_register_plat();
4732 	if (retval < 0) {
4733 		printk(KERN_DEBUG "Problem registering platform driver.");
4734 		goto unreg_pci;
4735 	}
4736 	/*
4737 	 * Check the compiler generated sizes of structures that must be laid
4738 	 * out in specific ways for hardware access.
4739 	 */
4740 	BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
4741 	BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
4742 	BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
4743 	/* xhci_device_control has eight fields, and also
4744 	 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
4745 	 */
4746 	BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
4747 	BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
4748 	BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
4749 	BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
4750 	BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
4751 	/* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
4752 	BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
4753 	return 0;
4754 unreg_pci:
4755 	xhci_unregister_pci();
4756 	return retval;
4757 }
4758 module_init(xhci_hcd_init);
4759 
4760 static void __exit xhci_hcd_cleanup(void)
4761 {
4762 	xhci_unregister_pci();
4763 	xhci_unregister_plat();
4764 }
4765 module_exit(xhci_hcd_cleanup);
4766