xref: /linux/drivers/usb/host/xhci.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  * xHCI host controller driver
3  *
4  * Copyright (C) 2008 Intel Corp.
5  *
6  * Author: Sarah Sharp
7  * Some code borrowed from the Linux EHCI driver.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16  * for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software Foundation,
20  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22 
23 #include <linux/pci.h>
24 #include <linux/irq.h>
25 #include <linux/log2.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/slab.h>
29 
30 #include "xhci.h"
31 
32 #define DRIVER_AUTHOR "Sarah Sharp"
33 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
34 
35 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
36 static int link_quirk;
37 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
39 
40 /* TODO: copied from ehci-hcd.c - can this be refactored? */
41 /*
42  * handshake - spin reading hc until handshake completes or fails
43  * @ptr: address of hc register to be read
44  * @mask: bits to look at in result of read
45  * @done: value of those bits when handshake succeeds
46  * @usec: timeout in microseconds
47  *
48  * Returns negative errno, or zero on success
49  *
50  * Success happens when the "mask" bits have the specified value (hardware
51  * handshake done).  There are two failure modes:  "usec" have passed (major
52  * hardware flakeout), or the register reads as all-ones (hardware removed).
53  */
54 static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
55 		      u32 mask, u32 done, int usec)
56 {
57 	u32	result;
58 
59 	do {
60 		result = xhci_readl(xhci, ptr);
61 		if (result == ~(u32)0)		/* card removed */
62 			return -ENODEV;
63 		result &= mask;
64 		if (result == done)
65 			return 0;
66 		udelay(1);
67 		usec--;
68 	} while (usec > 0);
69 	return -ETIMEDOUT;
70 }
71 
72 /*
73  * Disable interrupts and begin the xHCI halting process.
74  */
75 void xhci_quiesce(struct xhci_hcd *xhci)
76 {
77 	u32 halted;
78 	u32 cmd;
79 	u32 mask;
80 
81 	mask = ~(XHCI_IRQS);
82 	halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
83 	if (!halted)
84 		mask &= ~CMD_RUN;
85 
86 	cmd = xhci_readl(xhci, &xhci->op_regs->command);
87 	cmd &= mask;
88 	xhci_writel(xhci, cmd, &xhci->op_regs->command);
89 }
90 
91 /*
92  * Force HC into halt state.
93  *
94  * Disable any IRQs and clear the run/stop bit.
95  * HC will complete any current and actively pipelined transactions, and
96  * should halt within 16 microframes of the run/stop bit being cleared.
97  * Read HC Halted bit in the status register to see when the HC is finished.
98  * XXX: shouldn't we set HC_STATE_HALT here somewhere?
99  */
100 int xhci_halt(struct xhci_hcd *xhci)
101 {
102 	xhci_dbg(xhci, "// Halt the HC\n");
103 	xhci_quiesce(xhci);
104 
105 	return handshake(xhci, &xhci->op_regs->status,
106 			STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
107 }
108 
109 /*
110  * Set the run bit and wait for the host to be running.
111  */
112 int xhci_start(struct xhci_hcd *xhci)
113 {
114 	u32 temp;
115 	int ret;
116 
117 	temp = xhci_readl(xhci, &xhci->op_regs->command);
118 	temp |= (CMD_RUN);
119 	xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
120 			temp);
121 	xhci_writel(xhci, temp, &xhci->op_regs->command);
122 
123 	/*
124 	 * Wait for the HCHalted Status bit to be 0 to indicate the host is
125 	 * running.
126 	 */
127 	ret = handshake(xhci, &xhci->op_regs->status,
128 			STS_HALT, 0, XHCI_MAX_HALT_USEC);
129 	if (ret == -ETIMEDOUT)
130 		xhci_err(xhci, "Host took too long to start, "
131 				"waited %u microseconds.\n",
132 				XHCI_MAX_HALT_USEC);
133 	return ret;
134 }
135 
136 /*
137  * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
138  *
139  * This resets pipelines, timers, counters, state machines, etc.
140  * Transactions will be terminated immediately, and operational registers
141  * will be set to their defaults.
142  */
143 int xhci_reset(struct xhci_hcd *xhci)
144 {
145 	u32 command;
146 	u32 state;
147 	int ret;
148 
149 	state = xhci_readl(xhci, &xhci->op_regs->status);
150 	if ((state & STS_HALT) == 0) {
151 		xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
152 		return 0;
153 	}
154 
155 	xhci_dbg(xhci, "// Reset the HC\n");
156 	command = xhci_readl(xhci, &xhci->op_regs->command);
157 	command |= CMD_RESET;
158 	xhci_writel(xhci, command, &xhci->op_regs->command);
159 	/* XXX: Why does EHCI set this here?  Shouldn't other code do this? */
160 	xhci_to_hcd(xhci)->state = HC_STATE_HALT;
161 
162 	ret = handshake(xhci, &xhci->op_regs->command,
163 			CMD_RESET, 0, 250 * 1000);
164 	if (ret)
165 		return ret;
166 
167 	xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
168 	/*
169 	 * xHCI cannot write to any doorbells or operational registers other
170 	 * than status until the "Controller Not Ready" flag is cleared.
171 	 */
172 	return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
173 }
174 
175 /*
176  * Free IRQs
177  * free all IRQs request
178  */
179 static void xhci_free_irq(struct xhci_hcd *xhci)
180 {
181 	int i;
182 	struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
183 
184 	/* return if using legacy interrupt */
185 	if (xhci_to_hcd(xhci)->irq >= 0)
186 		return;
187 
188 	if (xhci->msix_entries) {
189 		for (i = 0; i < xhci->msix_count; i++)
190 			if (xhci->msix_entries[i].vector)
191 				free_irq(xhci->msix_entries[i].vector,
192 						xhci_to_hcd(xhci));
193 	} else if (pdev->irq >= 0)
194 		free_irq(pdev->irq, xhci_to_hcd(xhci));
195 
196 	return;
197 }
198 
199 /*
200  * Set up MSI
201  */
202 static int xhci_setup_msi(struct xhci_hcd *xhci)
203 {
204 	int ret;
205 	struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
206 
207 	ret = pci_enable_msi(pdev);
208 	if (ret) {
209 		xhci_err(xhci, "failed to allocate MSI entry\n");
210 		return ret;
211 	}
212 
213 	ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
214 				0, "xhci_hcd", xhci_to_hcd(xhci));
215 	if (ret) {
216 		xhci_err(xhci, "disable MSI interrupt\n");
217 		pci_disable_msi(pdev);
218 	}
219 
220 	return ret;
221 }
222 
223 /*
224  * Set up MSI-X
225  */
226 static int xhci_setup_msix(struct xhci_hcd *xhci)
227 {
228 	int i, ret = 0;
229 	struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
230 
231 	/*
232 	 * calculate number of msi-x vectors supported.
233 	 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
234 	 *   with max number of interrupters based on the xhci HCSPARAMS1.
235 	 * - num_online_cpus: maximum msi-x vectors per CPUs core.
236 	 *   Add additional 1 vector to ensure always available interrupt.
237 	 */
238 	xhci->msix_count = min(num_online_cpus() + 1,
239 				HCS_MAX_INTRS(xhci->hcs_params1));
240 
241 	xhci->msix_entries =
242 		kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
243 				GFP_KERNEL);
244 	if (!xhci->msix_entries) {
245 		xhci_err(xhci, "Failed to allocate MSI-X entries\n");
246 		return -ENOMEM;
247 	}
248 
249 	for (i = 0; i < xhci->msix_count; i++) {
250 		xhci->msix_entries[i].entry = i;
251 		xhci->msix_entries[i].vector = 0;
252 	}
253 
254 	ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
255 	if (ret) {
256 		xhci_err(xhci, "Failed to enable MSI-X\n");
257 		goto free_entries;
258 	}
259 
260 	for (i = 0; i < xhci->msix_count; i++) {
261 		ret = request_irq(xhci->msix_entries[i].vector,
262 				(irq_handler_t)xhci_msi_irq,
263 				0, "xhci_hcd", xhci_to_hcd(xhci));
264 		if (ret)
265 			goto disable_msix;
266 	}
267 
268 	return ret;
269 
270 disable_msix:
271 	xhci_err(xhci, "disable MSI-X interrupt\n");
272 	xhci_free_irq(xhci);
273 	pci_disable_msix(pdev);
274 free_entries:
275 	kfree(xhci->msix_entries);
276 	xhci->msix_entries = NULL;
277 	return ret;
278 }
279 
280 /* Free any IRQs and disable MSI-X */
281 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
282 {
283 	struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
284 
285 	xhci_free_irq(xhci);
286 
287 	if (xhci->msix_entries) {
288 		pci_disable_msix(pdev);
289 		kfree(xhci->msix_entries);
290 		xhci->msix_entries = NULL;
291 	} else {
292 		pci_disable_msi(pdev);
293 	}
294 
295 	return;
296 }
297 
298 /*
299  * Initialize memory for HCD and xHC (one-time init).
300  *
301  * Program the PAGESIZE register, initialize the device context array, create
302  * device contexts (?), set up a command ring segment (or two?), create event
303  * ring (one for now).
304  */
305 int xhci_init(struct usb_hcd *hcd)
306 {
307 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
308 	int retval = 0;
309 
310 	xhci_dbg(xhci, "xhci_init\n");
311 	spin_lock_init(&xhci->lock);
312 	if (link_quirk) {
313 		xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
314 		xhci->quirks |= XHCI_LINK_TRB_QUIRK;
315 	} else {
316 		xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
317 	}
318 	retval = xhci_mem_init(xhci, GFP_KERNEL);
319 	xhci_dbg(xhci, "Finished xhci_init\n");
320 
321 	return retval;
322 }
323 
324 /*-------------------------------------------------------------------------*/
325 
326 
327 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
328 void xhci_event_ring_work(unsigned long arg)
329 {
330 	unsigned long flags;
331 	int temp;
332 	u64 temp_64;
333 	struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
334 	int i, j;
335 
336 	xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
337 
338 	spin_lock_irqsave(&xhci->lock, flags);
339 	temp = xhci_readl(xhci, &xhci->op_regs->status);
340 	xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
341 	if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
342 		xhci_dbg(xhci, "HW died, polling stopped.\n");
343 		spin_unlock_irqrestore(&xhci->lock, flags);
344 		return;
345 	}
346 
347 	temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
348 	xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
349 	xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
350 	xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
351 	xhci->error_bitmask = 0;
352 	xhci_dbg(xhci, "Event ring:\n");
353 	xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
354 	xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
355 	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
356 	temp_64 &= ~ERST_PTR_MASK;
357 	xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
358 	xhci_dbg(xhci, "Command ring:\n");
359 	xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
360 	xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
361 	xhci_dbg_cmd_ptrs(xhci);
362 	for (i = 0; i < MAX_HC_SLOTS; ++i) {
363 		if (!xhci->devs[i])
364 			continue;
365 		for (j = 0; j < 31; ++j) {
366 			xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
367 		}
368 	}
369 
370 	if (xhci->noops_submitted != NUM_TEST_NOOPS)
371 		if (xhci_setup_one_noop(xhci))
372 			xhci_ring_cmd_db(xhci);
373 	spin_unlock_irqrestore(&xhci->lock, flags);
374 
375 	if (!xhci->zombie)
376 		mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
377 	else
378 		xhci_dbg(xhci, "Quit polling the event ring.\n");
379 }
380 #endif
381 
382 /*
383  * Start the HC after it was halted.
384  *
385  * This function is called by the USB core when the HC driver is added.
386  * Its opposite is xhci_stop().
387  *
388  * xhci_init() must be called once before this function can be called.
389  * Reset the HC, enable device slot contexts, program DCBAAP, and
390  * set command ring pointer and event ring pointer.
391  *
392  * Setup MSI-X vectors and enable interrupts.
393  */
394 int xhci_run(struct usb_hcd *hcd)
395 {
396 	u32 temp;
397 	u64 temp_64;
398 	u32 ret;
399 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
400 	struct pci_dev  *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
401 	void (*doorbell)(struct xhci_hcd *) = NULL;
402 
403 	hcd->uses_new_polling = 1;
404 
405 	xhci_dbg(xhci, "xhci_run\n");
406 	/* unregister the legacy interrupt */
407 	if (hcd->irq)
408 		free_irq(hcd->irq, hcd);
409 	hcd->irq = -1;
410 
411 	ret = xhci_setup_msix(xhci);
412 	if (ret)
413 		/* fall back to msi*/
414 		ret = xhci_setup_msi(xhci);
415 
416 	if (ret) {
417 		/* fall back to legacy interrupt*/
418 		ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
419 					hcd->irq_descr, hcd);
420 		if (ret) {
421 			xhci_err(xhci, "request interrupt %d failed\n",
422 					pdev->irq);
423 			return ret;
424 		}
425 		hcd->irq = pdev->irq;
426 	}
427 
428 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
429 	init_timer(&xhci->event_ring_timer);
430 	xhci->event_ring_timer.data = (unsigned long) xhci;
431 	xhci->event_ring_timer.function = xhci_event_ring_work;
432 	/* Poll the event ring */
433 	xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
434 	xhci->zombie = 0;
435 	xhci_dbg(xhci, "Setting event ring polling timer\n");
436 	add_timer(&xhci->event_ring_timer);
437 #endif
438 
439 	xhci_dbg(xhci, "Command ring memory map follows:\n");
440 	xhci_debug_ring(xhci, xhci->cmd_ring);
441 	xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
442 	xhci_dbg_cmd_ptrs(xhci);
443 
444 	xhci_dbg(xhci, "ERST memory map follows:\n");
445 	xhci_dbg_erst(xhci, &xhci->erst);
446 	xhci_dbg(xhci, "Event ring:\n");
447 	xhci_debug_ring(xhci, xhci->event_ring);
448 	xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
449 	temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
450 	temp_64 &= ~ERST_PTR_MASK;
451 	xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
452 
453 	xhci_dbg(xhci, "// Set the interrupt modulation register\n");
454 	temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
455 	temp &= ~ER_IRQ_INTERVAL_MASK;
456 	temp |= (u32) 160;
457 	xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
458 
459 	/* Set the HCD state before we enable the irqs */
460 	hcd->state = HC_STATE_RUNNING;
461 	temp = xhci_readl(xhci, &xhci->op_regs->command);
462 	temp |= (CMD_EIE);
463 	xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
464 			temp);
465 	xhci_writel(xhci, temp, &xhci->op_regs->command);
466 
467 	temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
468 	xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
469 			xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
470 	xhci_writel(xhci, ER_IRQ_ENABLE(temp),
471 			&xhci->ir_set->irq_pending);
472 	xhci_print_ir_set(xhci, xhci->ir_set, 0);
473 
474 	if (NUM_TEST_NOOPS > 0)
475 		doorbell = xhci_setup_one_noop(xhci);
476 	if (xhci->quirks & XHCI_NEC_HOST)
477 		xhci_queue_vendor_command(xhci, 0, 0, 0,
478 				TRB_TYPE(TRB_NEC_GET_FW));
479 
480 	if (xhci_start(xhci)) {
481 		xhci_halt(xhci);
482 		return -ENODEV;
483 	}
484 
485 	if (doorbell)
486 		(*doorbell)(xhci);
487 	if (xhci->quirks & XHCI_NEC_HOST)
488 		xhci_ring_cmd_db(xhci);
489 
490 	xhci_dbg(xhci, "Finished xhci_run\n");
491 	return 0;
492 }
493 
494 /*
495  * Stop xHCI driver.
496  *
497  * This function is called by the USB core when the HC driver is removed.
498  * Its opposite is xhci_run().
499  *
500  * Disable device contexts, disable IRQs, and quiesce the HC.
501  * Reset the HC, finish any completed transactions, and cleanup memory.
502  */
503 void xhci_stop(struct usb_hcd *hcd)
504 {
505 	u32 temp;
506 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
507 
508 	spin_lock_irq(&xhci->lock);
509 	xhci_halt(xhci);
510 	xhci_reset(xhci);
511 	xhci_cleanup_msix(xhci);
512 	spin_unlock_irq(&xhci->lock);
513 
514 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
515 	/* Tell the event ring poll function not to reschedule */
516 	xhci->zombie = 1;
517 	del_timer_sync(&xhci->event_ring_timer);
518 #endif
519 
520 	xhci_dbg(xhci, "// Disabling event ring interrupts\n");
521 	temp = xhci_readl(xhci, &xhci->op_regs->status);
522 	xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
523 	temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
524 	xhci_writel(xhci, ER_IRQ_DISABLE(temp),
525 			&xhci->ir_set->irq_pending);
526 	xhci_print_ir_set(xhci, xhci->ir_set, 0);
527 
528 	xhci_dbg(xhci, "cleaning up memory\n");
529 	xhci_mem_cleanup(xhci);
530 	xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
531 		    xhci_readl(xhci, &xhci->op_regs->status));
532 }
533 
534 /*
535  * Shutdown HC (not bus-specific)
536  *
537  * This is called when the machine is rebooting or halting.  We assume that the
538  * machine will be powered off, and the HC's internal state will be reset.
539  * Don't bother to free memory.
540  */
541 void xhci_shutdown(struct usb_hcd *hcd)
542 {
543 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
544 
545 	spin_lock_irq(&xhci->lock);
546 	xhci_halt(xhci);
547 	xhci_cleanup_msix(xhci);
548 	spin_unlock_irq(&xhci->lock);
549 
550 	xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
551 		    xhci_readl(xhci, &xhci->op_regs->status));
552 }
553 
554 /*-------------------------------------------------------------------------*/
555 
556 /**
557  * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
558  * HCDs.  Find the index for an endpoint given its descriptor.  Use the return
559  * value to right shift 1 for the bitmask.
560  *
561  * Index  = (epnum * 2) + direction - 1,
562  * where direction = 0 for OUT, 1 for IN.
563  * For control endpoints, the IN index is used (OUT index is unused), so
564  * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
565  */
566 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
567 {
568 	unsigned int index;
569 	if (usb_endpoint_xfer_control(desc))
570 		index = (unsigned int) (usb_endpoint_num(desc)*2);
571 	else
572 		index = (unsigned int) (usb_endpoint_num(desc)*2) +
573 			(usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
574 	return index;
575 }
576 
577 /* Find the flag for this endpoint (for use in the control context).  Use the
578  * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
579  * bit 1, etc.
580  */
581 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
582 {
583 	return 1 << (xhci_get_endpoint_index(desc) + 1);
584 }
585 
586 /* Find the flag for this endpoint (for use in the control context).  Use the
587  * endpoint index to create a bitmask.  The slot context is bit 0, endpoint 0 is
588  * bit 1, etc.
589  */
590 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
591 {
592 	return 1 << (ep_index + 1);
593 }
594 
595 /* Compute the last valid endpoint context index.  Basically, this is the
596  * endpoint index plus one.  For slot contexts with more than valid endpoint,
597  * we find the most significant bit set in the added contexts flags.
598  * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
599  * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
600  */
601 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
602 {
603 	return fls(added_ctxs) - 1;
604 }
605 
606 /* Returns 1 if the arguments are OK;
607  * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
608  */
609 int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
610 		struct usb_host_endpoint *ep, int check_ep, const char *func) {
611 	if (!hcd || (check_ep && !ep) || !udev) {
612 		printk(KERN_DEBUG "xHCI %s called with invalid args\n",
613 				func);
614 		return -EINVAL;
615 	}
616 	if (!udev->parent) {
617 		printk(KERN_DEBUG "xHCI %s called for root hub\n",
618 				func);
619 		return 0;
620 	}
621 	if (!udev->slot_id) {
622 		printk(KERN_DEBUG "xHCI %s called with unaddressed device\n",
623 				func);
624 		return -EINVAL;
625 	}
626 	return 1;
627 }
628 
629 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
630 		struct usb_device *udev, struct xhci_command *command,
631 		bool ctx_change, bool must_succeed);
632 
633 /*
634  * Full speed devices may have a max packet size greater than 8 bytes, but the
635  * USB core doesn't know that until it reads the first 8 bytes of the
636  * descriptor.  If the usb_device's max packet size changes after that point,
637  * we need to issue an evaluate context command and wait on it.
638  */
639 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
640 		unsigned int ep_index, struct urb *urb)
641 {
642 	struct xhci_container_ctx *in_ctx;
643 	struct xhci_container_ctx *out_ctx;
644 	struct xhci_input_control_ctx *ctrl_ctx;
645 	struct xhci_ep_ctx *ep_ctx;
646 	int max_packet_size;
647 	int hw_max_packet_size;
648 	int ret = 0;
649 
650 	out_ctx = xhci->devs[slot_id]->out_ctx;
651 	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
652 	hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2);
653 	max_packet_size = urb->dev->ep0.desc.wMaxPacketSize;
654 	if (hw_max_packet_size != max_packet_size) {
655 		xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
656 		xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
657 				max_packet_size);
658 		xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
659 				hw_max_packet_size);
660 		xhci_dbg(xhci, "Issuing evaluate context command.\n");
661 
662 		/* Set up the modified control endpoint 0 */
663 		xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
664 				xhci->devs[slot_id]->out_ctx, ep_index);
665 		in_ctx = xhci->devs[slot_id]->in_ctx;
666 		ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
667 		ep_ctx->ep_info2 &= ~MAX_PACKET_MASK;
668 		ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size);
669 
670 		/* Set up the input context flags for the command */
671 		/* FIXME: This won't work if a non-default control endpoint
672 		 * changes max packet sizes.
673 		 */
674 		ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
675 		ctrl_ctx->add_flags = EP0_FLAG;
676 		ctrl_ctx->drop_flags = 0;
677 
678 		xhci_dbg(xhci, "Slot %d input context\n", slot_id);
679 		xhci_dbg_ctx(xhci, in_ctx, ep_index);
680 		xhci_dbg(xhci, "Slot %d output context\n", slot_id);
681 		xhci_dbg_ctx(xhci, out_ctx, ep_index);
682 
683 		ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
684 				true, false);
685 
686 		/* Clean up the input context for later use by bandwidth
687 		 * functions.
688 		 */
689 		ctrl_ctx->add_flags = SLOT_FLAG;
690 	}
691 	return ret;
692 }
693 
694 /*
695  * non-error returns are a promise to giveback() the urb later
696  * we drop ownership so next owner (or urb unlink) can get it
697  */
698 int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
699 {
700 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
701 	unsigned long flags;
702 	int ret = 0;
703 	unsigned int slot_id, ep_index;
704 	struct urb_priv	*urb_priv;
705 	int size, i;
706 
707 	if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0)
708 		return -EINVAL;
709 
710 	slot_id = urb->dev->slot_id;
711 	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
712 
713 	if (!xhci->devs || !xhci->devs[slot_id]) {
714 		if (!in_interrupt())
715 			dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n");
716 		ret = -EINVAL;
717 		goto exit;
718 	}
719 	if (!HCD_HW_ACCESSIBLE(hcd)) {
720 		if (!in_interrupt())
721 			xhci_dbg(xhci, "urb submitted during PCI suspend\n");
722 		ret = -ESHUTDOWN;
723 		goto exit;
724 	}
725 
726 	if (usb_endpoint_xfer_isoc(&urb->ep->desc))
727 		size = urb->number_of_packets;
728 	else
729 		size = 1;
730 
731 	urb_priv = kzalloc(sizeof(struct urb_priv) +
732 				  size * sizeof(struct xhci_td *), mem_flags);
733 	if (!urb_priv)
734 		return -ENOMEM;
735 
736 	for (i = 0; i < size; i++) {
737 		urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags);
738 		if (!urb_priv->td[i]) {
739 			urb_priv->length = i;
740 			xhci_urb_free_priv(xhci, urb_priv);
741 			return -ENOMEM;
742 		}
743 	}
744 
745 	urb_priv->length = size;
746 	urb_priv->td_cnt = 0;
747 	urb->hcpriv = urb_priv;
748 
749 	if (usb_endpoint_xfer_control(&urb->ep->desc)) {
750 		/* Check to see if the max packet size for the default control
751 		 * endpoint changed during FS device enumeration
752 		 */
753 		if (urb->dev->speed == USB_SPEED_FULL) {
754 			ret = xhci_check_maxpacket(xhci, slot_id,
755 					ep_index, urb);
756 			if (ret < 0)
757 				return ret;
758 		}
759 
760 		/* We have a spinlock and interrupts disabled, so we must pass
761 		 * atomic context to this function, which may allocate memory.
762 		 */
763 		spin_lock_irqsave(&xhci->lock, flags);
764 		if (xhci->xhc_state & XHCI_STATE_DYING)
765 			goto dying;
766 		ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
767 				slot_id, ep_index);
768 		spin_unlock_irqrestore(&xhci->lock, flags);
769 	} else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
770 		spin_lock_irqsave(&xhci->lock, flags);
771 		if (xhci->xhc_state & XHCI_STATE_DYING)
772 			goto dying;
773 		if (xhci->devs[slot_id]->eps[ep_index].ep_state &
774 				EP_GETTING_STREAMS) {
775 			xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
776 					"is transitioning to using streams.\n");
777 			ret = -EINVAL;
778 		} else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
779 				EP_GETTING_NO_STREAMS) {
780 			xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
781 					"is transitioning to "
782 					"not having streams.\n");
783 			ret = -EINVAL;
784 		} else {
785 			ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
786 					slot_id, ep_index);
787 		}
788 		spin_unlock_irqrestore(&xhci->lock, flags);
789 	} else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
790 		spin_lock_irqsave(&xhci->lock, flags);
791 		if (xhci->xhc_state & XHCI_STATE_DYING)
792 			goto dying;
793 		ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
794 				slot_id, ep_index);
795 		spin_unlock_irqrestore(&xhci->lock, flags);
796 	} else {
797 		spin_lock_irqsave(&xhci->lock, flags);
798 		if (xhci->xhc_state & XHCI_STATE_DYING)
799 			goto dying;
800 		ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
801 				slot_id, ep_index);
802 		spin_unlock_irqrestore(&xhci->lock, flags);
803 	}
804 exit:
805 	return ret;
806 dying:
807 	xhci_urb_free_priv(xhci, urb_priv);
808 	urb->hcpriv = NULL;
809 	xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
810 			"non-responsive xHCI host.\n",
811 			urb->ep->desc.bEndpointAddress, urb);
812 	spin_unlock_irqrestore(&xhci->lock, flags);
813 	return -ESHUTDOWN;
814 }
815 
816 /* Get the right ring for the given URB.
817  * If the endpoint supports streams, boundary check the URB's stream ID.
818  * If the endpoint doesn't support streams, return the singular endpoint ring.
819  */
820 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
821 		struct urb *urb)
822 {
823 	unsigned int slot_id;
824 	unsigned int ep_index;
825 	unsigned int stream_id;
826 	struct xhci_virt_ep *ep;
827 
828 	slot_id = urb->dev->slot_id;
829 	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
830 	stream_id = urb->stream_id;
831 	ep = &xhci->devs[slot_id]->eps[ep_index];
832 	/* Common case: no streams */
833 	if (!(ep->ep_state & EP_HAS_STREAMS))
834 		return ep->ring;
835 
836 	if (stream_id == 0) {
837 		xhci_warn(xhci,
838 				"WARN: Slot ID %u, ep index %u has streams, "
839 				"but URB has no stream ID.\n",
840 				slot_id, ep_index);
841 		return NULL;
842 	}
843 
844 	if (stream_id < ep->stream_info->num_streams)
845 		return ep->stream_info->stream_rings[stream_id];
846 
847 	xhci_warn(xhci,
848 			"WARN: Slot ID %u, ep index %u has "
849 			"stream IDs 1 to %u allocated, "
850 			"but stream ID %u is requested.\n",
851 			slot_id, ep_index,
852 			ep->stream_info->num_streams - 1,
853 			stream_id);
854 	return NULL;
855 }
856 
857 /*
858  * Remove the URB's TD from the endpoint ring.  This may cause the HC to stop
859  * USB transfers, potentially stopping in the middle of a TRB buffer.  The HC
860  * should pick up where it left off in the TD, unless a Set Transfer Ring
861  * Dequeue Pointer is issued.
862  *
863  * The TRBs that make up the buffers for the canceled URB will be "removed" from
864  * the ring.  Since the ring is a contiguous structure, they can't be physically
865  * removed.  Instead, there are two options:
866  *
867  *  1) If the HC is in the middle of processing the URB to be canceled, we
868  *     simply move the ring's dequeue pointer past those TRBs using the Set
869  *     Transfer Ring Dequeue Pointer command.  This will be the common case,
870  *     when drivers timeout on the last submitted URB and attempt to cancel.
871  *
872  *  2) If the HC is in the middle of a different TD, we turn the TRBs into a
873  *     series of 1-TRB transfer no-op TDs.  (No-ops shouldn't be chained.)  The
874  *     HC will need to invalidate the any TRBs it has cached after the stop
875  *     endpoint command, as noted in the xHCI 0.95 errata.
876  *
877  *  3) The TD may have completed by the time the Stop Endpoint Command
878  *     completes, so software needs to handle that case too.
879  *
880  * This function should protect against the TD enqueueing code ringing the
881  * doorbell while this code is waiting for a Stop Endpoint command to complete.
882  * It also needs to account for multiple cancellations on happening at the same
883  * time for the same endpoint.
884  *
885  * Note that this function can be called in any context, or so says
886  * usb_hcd_unlink_urb()
887  */
888 int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
889 {
890 	unsigned long flags;
891 	int ret, i;
892 	u32 temp;
893 	struct xhci_hcd *xhci;
894 	struct urb_priv	*urb_priv;
895 	struct xhci_td *td;
896 	unsigned int ep_index;
897 	struct xhci_ring *ep_ring;
898 	struct xhci_virt_ep *ep;
899 
900 	xhci = hcd_to_xhci(hcd);
901 	spin_lock_irqsave(&xhci->lock, flags);
902 	/* Make sure the URB hasn't completed or been unlinked already */
903 	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
904 	if (ret || !urb->hcpriv)
905 		goto done;
906 	temp = xhci_readl(xhci, &xhci->op_regs->status);
907 	if (temp == 0xffffffff) {
908 		xhci_dbg(xhci, "HW died, freeing TD.\n");
909 		urb_priv = urb->hcpriv;
910 
911 		usb_hcd_unlink_urb_from_ep(hcd, urb);
912 		spin_unlock_irqrestore(&xhci->lock, flags);
913 		usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN);
914 		xhci_urb_free_priv(xhci, urb_priv);
915 		return ret;
916 	}
917 	if (xhci->xhc_state & XHCI_STATE_DYING) {
918 		xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
919 				"non-responsive xHCI host.\n",
920 				urb->ep->desc.bEndpointAddress, urb);
921 		/* Let the stop endpoint command watchdog timer (which set this
922 		 * state) finish cleaning up the endpoint TD lists.  We must
923 		 * have caught it in the middle of dropping a lock and giving
924 		 * back an URB.
925 		 */
926 		goto done;
927 	}
928 
929 	xhci_dbg(xhci, "Cancel URB %p\n", urb);
930 	xhci_dbg(xhci, "Event ring:\n");
931 	xhci_debug_ring(xhci, xhci->event_ring);
932 	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
933 	ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
934 	ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
935 	if (!ep_ring) {
936 		ret = -EINVAL;
937 		goto done;
938 	}
939 
940 	xhci_dbg(xhci, "Endpoint ring:\n");
941 	xhci_debug_ring(xhci, ep_ring);
942 
943 	urb_priv = urb->hcpriv;
944 
945 	for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
946 		td = urb_priv->td[i];
947 		list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
948 	}
949 
950 	/* Queue a stop endpoint command, but only if this is
951 	 * the first cancellation to be handled.
952 	 */
953 	if (!(ep->ep_state & EP_HALT_PENDING)) {
954 		ep->ep_state |= EP_HALT_PENDING;
955 		ep->stop_cmds_pending++;
956 		ep->stop_cmd_timer.expires = jiffies +
957 			XHCI_STOP_EP_CMD_TIMEOUT * HZ;
958 		add_timer(&ep->stop_cmd_timer);
959 		xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
960 		xhci_ring_cmd_db(xhci);
961 	}
962 done:
963 	spin_unlock_irqrestore(&xhci->lock, flags);
964 	return ret;
965 }
966 
967 /* Drop an endpoint from a new bandwidth configuration for this device.
968  * Only one call to this function is allowed per endpoint before
969  * check_bandwidth() or reset_bandwidth() must be called.
970  * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
971  * add the endpoint to the schedule with possibly new parameters denoted by a
972  * different endpoint descriptor in usb_host_endpoint.
973  * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
974  * not allowed.
975  *
976  * The USB core will not allow URBs to be queued to an endpoint that is being
977  * disabled, so there's no need for mutual exclusion to protect
978  * the xhci->devs[slot_id] structure.
979  */
980 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
981 		struct usb_host_endpoint *ep)
982 {
983 	struct xhci_hcd *xhci;
984 	struct xhci_container_ctx *in_ctx, *out_ctx;
985 	struct xhci_input_control_ctx *ctrl_ctx;
986 	struct xhci_slot_ctx *slot_ctx;
987 	unsigned int last_ctx;
988 	unsigned int ep_index;
989 	struct xhci_ep_ctx *ep_ctx;
990 	u32 drop_flag;
991 	u32 new_add_flags, new_drop_flags, new_slot_info;
992 	int ret;
993 
994 	ret = xhci_check_args(hcd, udev, ep, 1, __func__);
995 	if (ret <= 0)
996 		return ret;
997 	xhci = hcd_to_xhci(hcd);
998 	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
999 
1000 	drop_flag = xhci_get_endpoint_flag(&ep->desc);
1001 	if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1002 		xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1003 				__func__, drop_flag);
1004 		return 0;
1005 	}
1006 
1007 	if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1008 		xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1009 				__func__);
1010 		return -EINVAL;
1011 	}
1012 
1013 	in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1014 	out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1015 	ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1016 	ep_index = xhci_get_endpoint_index(&ep->desc);
1017 	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1018 	/* If the HC already knows the endpoint is disabled,
1019 	 * or the HCD has noted it is disabled, ignore this request
1020 	 */
1021 	if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
1022 			ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
1023 		xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1024 				__func__, ep);
1025 		return 0;
1026 	}
1027 
1028 	ctrl_ctx->drop_flags |= drop_flag;
1029 	new_drop_flags = ctrl_ctx->drop_flags;
1030 
1031 	ctrl_ctx->add_flags &= ~drop_flag;
1032 	new_add_flags = ctrl_ctx->add_flags;
1033 
1034 	last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags);
1035 	slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1036 	/* Update the last valid endpoint context, if we deleted the last one */
1037 	if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
1038 		slot_ctx->dev_info &= ~LAST_CTX_MASK;
1039 		slot_ctx->dev_info |= LAST_CTX(last_ctx);
1040 	}
1041 	new_slot_info = slot_ctx->dev_info;
1042 
1043 	xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1044 
1045 	xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1046 			(unsigned int) ep->desc.bEndpointAddress,
1047 			udev->slot_id,
1048 			(unsigned int) new_drop_flags,
1049 			(unsigned int) new_add_flags,
1050 			(unsigned int) new_slot_info);
1051 	return 0;
1052 }
1053 
1054 /* Add an endpoint to a new possible bandwidth configuration for this device.
1055  * Only one call to this function is allowed per endpoint before
1056  * check_bandwidth() or reset_bandwidth() must be called.
1057  * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1058  * add the endpoint to the schedule with possibly new parameters denoted by a
1059  * different endpoint descriptor in usb_host_endpoint.
1060  * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1061  * not allowed.
1062  *
1063  * The USB core will not allow URBs to be queued to an endpoint until the
1064  * configuration or alt setting is installed in the device, so there's no need
1065  * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1066  */
1067 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1068 		struct usb_host_endpoint *ep)
1069 {
1070 	struct xhci_hcd *xhci;
1071 	struct xhci_container_ctx *in_ctx, *out_ctx;
1072 	unsigned int ep_index;
1073 	struct xhci_ep_ctx *ep_ctx;
1074 	struct xhci_slot_ctx *slot_ctx;
1075 	struct xhci_input_control_ctx *ctrl_ctx;
1076 	u32 added_ctxs;
1077 	unsigned int last_ctx;
1078 	u32 new_add_flags, new_drop_flags, new_slot_info;
1079 	int ret = 0;
1080 
1081 	ret = xhci_check_args(hcd, udev, ep, 1, __func__);
1082 	if (ret <= 0) {
1083 		/* So we won't queue a reset ep command for a root hub */
1084 		ep->hcpriv = NULL;
1085 		return ret;
1086 	}
1087 	xhci = hcd_to_xhci(hcd);
1088 
1089 	added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1090 	last_ctx = xhci_last_valid_endpoint(added_ctxs);
1091 	if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1092 		/* FIXME when we have to issue an evaluate endpoint command to
1093 		 * deal with ep0 max packet size changing once we get the
1094 		 * descriptors
1095 		 */
1096 		xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1097 				__func__, added_ctxs);
1098 		return 0;
1099 	}
1100 
1101 	if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1102 		xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1103 				__func__);
1104 		return -EINVAL;
1105 	}
1106 
1107 	in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1108 	out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1109 	ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1110 	ep_index = xhci_get_endpoint_index(&ep->desc);
1111 	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1112 	/* If the HCD has already noted the endpoint is enabled,
1113 	 * ignore this request.
1114 	 */
1115 	if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
1116 		xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1117 				__func__, ep);
1118 		return 0;
1119 	}
1120 
1121 	/*
1122 	 * Configuration and alternate setting changes must be done in
1123 	 * process context, not interrupt context (or so documenation
1124 	 * for usb_set_interface() and usb_set_configuration() claim).
1125 	 */
1126 	if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
1127 				udev, ep, GFP_NOIO) < 0) {
1128 		dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1129 				__func__, ep->desc.bEndpointAddress);
1130 		return -ENOMEM;
1131 	}
1132 
1133 	ctrl_ctx->add_flags |= added_ctxs;
1134 	new_add_flags = ctrl_ctx->add_flags;
1135 
1136 	/* If xhci_endpoint_disable() was called for this endpoint, but the
1137 	 * xHC hasn't been notified yet through the check_bandwidth() call,
1138 	 * this re-adds a new state for the endpoint from the new endpoint
1139 	 * descriptors.  We must drop and re-add this endpoint, so we leave the
1140 	 * drop flags alone.
1141 	 */
1142 	new_drop_flags = ctrl_ctx->drop_flags;
1143 
1144 	slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1145 	/* Update the last valid endpoint context, if we just added one past */
1146 	if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
1147 		slot_ctx->dev_info &= ~LAST_CTX_MASK;
1148 		slot_ctx->dev_info |= LAST_CTX(last_ctx);
1149 	}
1150 	new_slot_info = slot_ctx->dev_info;
1151 
1152 	/* Store the usb_device pointer for later use */
1153 	ep->hcpriv = udev;
1154 
1155 	xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1156 			(unsigned int) ep->desc.bEndpointAddress,
1157 			udev->slot_id,
1158 			(unsigned int) new_drop_flags,
1159 			(unsigned int) new_add_flags,
1160 			(unsigned int) new_slot_info);
1161 	return 0;
1162 }
1163 
1164 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1165 {
1166 	struct xhci_input_control_ctx *ctrl_ctx;
1167 	struct xhci_ep_ctx *ep_ctx;
1168 	struct xhci_slot_ctx *slot_ctx;
1169 	int i;
1170 
1171 	/* When a device's add flag and drop flag are zero, any subsequent
1172 	 * configure endpoint command will leave that endpoint's state
1173 	 * untouched.  Make sure we don't leave any old state in the input
1174 	 * endpoint contexts.
1175 	 */
1176 	ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1177 	ctrl_ctx->drop_flags = 0;
1178 	ctrl_ctx->add_flags = 0;
1179 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1180 	slot_ctx->dev_info &= ~LAST_CTX_MASK;
1181 	/* Endpoint 0 is always valid */
1182 	slot_ctx->dev_info |= LAST_CTX(1);
1183 	for (i = 1; i < 31; ++i) {
1184 		ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1185 		ep_ctx->ep_info = 0;
1186 		ep_ctx->ep_info2 = 0;
1187 		ep_ctx->deq = 0;
1188 		ep_ctx->tx_info = 0;
1189 	}
1190 }
1191 
1192 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1193 		struct usb_device *udev, int *cmd_status)
1194 {
1195 	int ret;
1196 
1197 	switch (*cmd_status) {
1198 	case COMP_ENOMEM:
1199 		dev_warn(&udev->dev, "Not enough host controller resources "
1200 				"for new device state.\n");
1201 		ret = -ENOMEM;
1202 		/* FIXME: can we allocate more resources for the HC? */
1203 		break;
1204 	case COMP_BW_ERR:
1205 		dev_warn(&udev->dev, "Not enough bandwidth "
1206 				"for new device state.\n");
1207 		ret = -ENOSPC;
1208 		/* FIXME: can we go back to the old state? */
1209 		break;
1210 	case COMP_TRB_ERR:
1211 		/* the HCD set up something wrong */
1212 		dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1213 				"add flag = 1, "
1214 				"and endpoint is not disabled.\n");
1215 		ret = -EINVAL;
1216 		break;
1217 	case COMP_SUCCESS:
1218 		dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1219 		ret = 0;
1220 		break;
1221 	default:
1222 		xhci_err(xhci, "ERROR: unexpected command completion "
1223 				"code 0x%x.\n", *cmd_status);
1224 		ret = -EINVAL;
1225 		break;
1226 	}
1227 	return ret;
1228 }
1229 
1230 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1231 		struct usb_device *udev, int *cmd_status)
1232 {
1233 	int ret;
1234 	struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1235 
1236 	switch (*cmd_status) {
1237 	case COMP_EINVAL:
1238 		dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1239 				"context command.\n");
1240 		ret = -EINVAL;
1241 		break;
1242 	case COMP_EBADSLT:
1243 		dev_warn(&udev->dev, "WARN: slot not enabled for"
1244 				"evaluate context command.\n");
1245 	case COMP_CTX_STATE:
1246 		dev_warn(&udev->dev, "WARN: invalid context state for "
1247 				"evaluate context command.\n");
1248 		xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1249 		ret = -EINVAL;
1250 		break;
1251 	case COMP_SUCCESS:
1252 		dev_dbg(&udev->dev, "Successful evaluate context command\n");
1253 		ret = 0;
1254 		break;
1255 	default:
1256 		xhci_err(xhci, "ERROR: unexpected command completion "
1257 				"code 0x%x.\n", *cmd_status);
1258 		ret = -EINVAL;
1259 		break;
1260 	}
1261 	return ret;
1262 }
1263 
1264 /* Issue a configure endpoint command or evaluate context command
1265  * and wait for it to finish.
1266  */
1267 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1268 		struct usb_device *udev,
1269 		struct xhci_command *command,
1270 		bool ctx_change, bool must_succeed)
1271 {
1272 	int ret;
1273 	int timeleft;
1274 	unsigned long flags;
1275 	struct xhci_container_ctx *in_ctx;
1276 	struct completion *cmd_completion;
1277 	int *cmd_status;
1278 	struct xhci_virt_device *virt_dev;
1279 
1280 	spin_lock_irqsave(&xhci->lock, flags);
1281 	virt_dev = xhci->devs[udev->slot_id];
1282 	if (command) {
1283 		in_ctx = command->in_ctx;
1284 		cmd_completion = command->completion;
1285 		cmd_status = &command->status;
1286 		command->command_trb = xhci->cmd_ring->enqueue;
1287 		list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
1288 	} else {
1289 		in_ctx = virt_dev->in_ctx;
1290 		cmd_completion = &virt_dev->cmd_completion;
1291 		cmd_status = &virt_dev->cmd_status;
1292 	}
1293 	init_completion(cmd_completion);
1294 
1295 	if (!ctx_change)
1296 		ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
1297 				udev->slot_id, must_succeed);
1298 	else
1299 		ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
1300 				udev->slot_id);
1301 	if (ret < 0) {
1302 		if (command)
1303 			list_del(&command->cmd_list);
1304 		spin_unlock_irqrestore(&xhci->lock, flags);
1305 		xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
1306 		return -ENOMEM;
1307 	}
1308 	xhci_ring_cmd_db(xhci);
1309 	spin_unlock_irqrestore(&xhci->lock, flags);
1310 
1311 	/* Wait for the configure endpoint command to complete */
1312 	timeleft = wait_for_completion_interruptible_timeout(
1313 			cmd_completion,
1314 			USB_CTRL_SET_TIMEOUT);
1315 	if (timeleft <= 0) {
1316 		xhci_warn(xhci, "%s while waiting for %s command\n",
1317 				timeleft == 0 ? "Timeout" : "Signal",
1318 				ctx_change == 0 ?
1319 					"configure endpoint" :
1320 					"evaluate context");
1321 		/* FIXME cancel the configure endpoint command */
1322 		return -ETIME;
1323 	}
1324 
1325 	if (!ctx_change)
1326 		return xhci_configure_endpoint_result(xhci, udev, cmd_status);
1327 	return xhci_evaluate_context_result(xhci, udev, cmd_status);
1328 }
1329 
1330 /* Called after one or more calls to xhci_add_endpoint() or
1331  * xhci_drop_endpoint().  If this call fails, the USB core is expected
1332  * to call xhci_reset_bandwidth().
1333  *
1334  * Since we are in the middle of changing either configuration or
1335  * installing a new alt setting, the USB core won't allow URBs to be
1336  * enqueued for any endpoint on the old config or interface.  Nothing
1337  * else should be touching the xhci->devs[slot_id] structure, so we
1338  * don't need to take the xhci->lock for manipulating that.
1339  */
1340 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1341 {
1342 	int i;
1343 	int ret = 0;
1344 	struct xhci_hcd *xhci;
1345 	struct xhci_virt_device	*virt_dev;
1346 	struct xhci_input_control_ctx *ctrl_ctx;
1347 	struct xhci_slot_ctx *slot_ctx;
1348 
1349 	ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
1350 	if (ret <= 0)
1351 		return ret;
1352 	xhci = hcd_to_xhci(hcd);
1353 
1354 	if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) {
1355 		xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1356 				__func__);
1357 		return -EINVAL;
1358 	}
1359 	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1360 	virt_dev = xhci->devs[udev->slot_id];
1361 
1362 	/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
1363 	ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1364 	ctrl_ctx->add_flags |= SLOT_FLAG;
1365 	ctrl_ctx->add_flags &= ~EP0_FLAG;
1366 	ctrl_ctx->drop_flags &= ~SLOT_FLAG;
1367 	ctrl_ctx->drop_flags &= ~EP0_FLAG;
1368 	xhci_dbg(xhci, "New Input Control Context:\n");
1369 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1370 	xhci_dbg_ctx(xhci, virt_dev->in_ctx,
1371 			LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
1372 
1373 	ret = xhci_configure_endpoint(xhci, udev, NULL,
1374 			false, false);
1375 	if (ret) {
1376 		/* Callee should call reset_bandwidth() */
1377 		return ret;
1378 	}
1379 
1380 	xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
1381 	xhci_dbg_ctx(xhci, virt_dev->out_ctx,
1382 			LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
1383 
1384 	xhci_zero_in_ctx(xhci, virt_dev);
1385 	/* Install new rings and free or cache any old rings */
1386 	for (i = 1; i < 31; ++i) {
1387 		if (!virt_dev->eps[i].new_ring)
1388 			continue;
1389 		/* Only cache or free the old ring if it exists.
1390 		 * It may not if this is the first add of an endpoint.
1391 		 */
1392 		if (virt_dev->eps[i].ring) {
1393 			xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1394 		}
1395 		virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
1396 		virt_dev->eps[i].new_ring = NULL;
1397 	}
1398 
1399 	return ret;
1400 }
1401 
1402 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1403 {
1404 	struct xhci_hcd *xhci;
1405 	struct xhci_virt_device	*virt_dev;
1406 	int i, ret;
1407 
1408 	ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
1409 	if (ret <= 0)
1410 		return;
1411 	xhci = hcd_to_xhci(hcd);
1412 
1413 	if (!xhci->devs || !xhci->devs[udev->slot_id]) {
1414 		xhci_warn(xhci, "xHCI %s called with unaddressed device\n",
1415 				__func__);
1416 		return;
1417 	}
1418 	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1419 	virt_dev = xhci->devs[udev->slot_id];
1420 	/* Free any rings allocated for added endpoints */
1421 	for (i = 0; i < 31; ++i) {
1422 		if (virt_dev->eps[i].new_ring) {
1423 			xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
1424 			virt_dev->eps[i].new_ring = NULL;
1425 		}
1426 	}
1427 	xhci_zero_in_ctx(xhci, virt_dev);
1428 }
1429 
1430 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
1431 		struct xhci_container_ctx *in_ctx,
1432 		struct xhci_container_ctx *out_ctx,
1433 		u32 add_flags, u32 drop_flags)
1434 {
1435 	struct xhci_input_control_ctx *ctrl_ctx;
1436 	ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1437 	ctrl_ctx->add_flags = add_flags;
1438 	ctrl_ctx->drop_flags = drop_flags;
1439 	xhci_slot_copy(xhci, in_ctx, out_ctx);
1440 	ctrl_ctx->add_flags |= SLOT_FLAG;
1441 
1442 	xhci_dbg(xhci, "Input Context:\n");
1443 	xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
1444 }
1445 
1446 void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
1447 		unsigned int slot_id, unsigned int ep_index,
1448 		struct xhci_dequeue_state *deq_state)
1449 {
1450 	struct xhci_container_ctx *in_ctx;
1451 	struct xhci_ep_ctx *ep_ctx;
1452 	u32 added_ctxs;
1453 	dma_addr_t addr;
1454 
1455 	xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1456 			xhci->devs[slot_id]->out_ctx, ep_index);
1457 	in_ctx = xhci->devs[slot_id]->in_ctx;
1458 	ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1459 	addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
1460 			deq_state->new_deq_ptr);
1461 	if (addr == 0) {
1462 		xhci_warn(xhci, "WARN Cannot submit config ep after "
1463 				"reset ep command\n");
1464 		xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
1465 				deq_state->new_deq_seg,
1466 				deq_state->new_deq_ptr);
1467 		return;
1468 	}
1469 	ep_ctx->deq = addr | deq_state->new_cycle_state;
1470 
1471 	added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
1472 	xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
1473 			xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
1474 }
1475 
1476 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
1477 		struct usb_device *udev, unsigned int ep_index)
1478 {
1479 	struct xhci_dequeue_state deq_state;
1480 	struct xhci_virt_ep *ep;
1481 
1482 	xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
1483 	ep = &xhci->devs[udev->slot_id]->eps[ep_index];
1484 	/* We need to move the HW's dequeue pointer past this TD,
1485 	 * or it will attempt to resend it on the next doorbell ring.
1486 	 */
1487 	xhci_find_new_dequeue_state(xhci, udev->slot_id,
1488 			ep_index, ep->stopped_stream, ep->stopped_td,
1489 			&deq_state);
1490 
1491 	/* HW with the reset endpoint quirk will use the saved dequeue state to
1492 	 * issue a configure endpoint command later.
1493 	 */
1494 	if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
1495 		xhci_dbg(xhci, "Queueing new dequeue state\n");
1496 		xhci_queue_new_dequeue_state(xhci, udev->slot_id,
1497 				ep_index, ep->stopped_stream, &deq_state);
1498 	} else {
1499 		/* Better hope no one uses the input context between now and the
1500 		 * reset endpoint completion!
1501 		 * XXX: No idea how this hardware will react when stream rings
1502 		 * are enabled.
1503 		 */
1504 		xhci_dbg(xhci, "Setting up input context for "
1505 				"configure endpoint command\n");
1506 		xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
1507 				ep_index, &deq_state);
1508 	}
1509 }
1510 
1511 /* Deal with stalled endpoints.  The core should have sent the control message
1512  * to clear the halt condition.  However, we need to make the xHCI hardware
1513  * reset its sequence number, since a device will expect a sequence number of
1514  * zero after the halt condition is cleared.
1515  * Context: in_interrupt
1516  */
1517 void xhci_endpoint_reset(struct usb_hcd *hcd,
1518 		struct usb_host_endpoint *ep)
1519 {
1520 	struct xhci_hcd *xhci;
1521 	struct usb_device *udev;
1522 	unsigned int ep_index;
1523 	unsigned long flags;
1524 	int ret;
1525 	struct xhci_virt_ep *virt_ep;
1526 
1527 	xhci = hcd_to_xhci(hcd);
1528 	udev = (struct usb_device *) ep->hcpriv;
1529 	/* Called with a root hub endpoint (or an endpoint that wasn't added
1530 	 * with xhci_add_endpoint()
1531 	 */
1532 	if (!ep->hcpriv)
1533 		return;
1534 	ep_index = xhci_get_endpoint_index(&ep->desc);
1535 	virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
1536 	if (!virt_ep->stopped_td) {
1537 		xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
1538 				ep->desc.bEndpointAddress);
1539 		return;
1540 	}
1541 	if (usb_endpoint_xfer_control(&ep->desc)) {
1542 		xhci_dbg(xhci, "Control endpoint stall already handled.\n");
1543 		return;
1544 	}
1545 
1546 	xhci_dbg(xhci, "Queueing reset endpoint command\n");
1547 	spin_lock_irqsave(&xhci->lock, flags);
1548 	ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
1549 	/*
1550 	 * Can't change the ring dequeue pointer until it's transitioned to the
1551 	 * stopped state, which is only upon a successful reset endpoint
1552 	 * command.  Better hope that last command worked!
1553 	 */
1554 	if (!ret) {
1555 		xhci_cleanup_stalled_ring(xhci, udev, ep_index);
1556 		kfree(virt_ep->stopped_td);
1557 		xhci_ring_cmd_db(xhci);
1558 	}
1559 	virt_ep->stopped_td = NULL;
1560 	virt_ep->stopped_trb = NULL;
1561 	virt_ep->stopped_stream = 0;
1562 	spin_unlock_irqrestore(&xhci->lock, flags);
1563 
1564 	if (ret)
1565 		xhci_warn(xhci, "FIXME allocate a new ring segment\n");
1566 }
1567 
1568 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
1569 		struct usb_device *udev, struct usb_host_endpoint *ep,
1570 		unsigned int slot_id)
1571 {
1572 	int ret;
1573 	unsigned int ep_index;
1574 	unsigned int ep_state;
1575 
1576 	if (!ep)
1577 		return -EINVAL;
1578 	ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, __func__);
1579 	if (ret <= 0)
1580 		return -EINVAL;
1581 	if (ep->ss_ep_comp.bmAttributes == 0) {
1582 		xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
1583 				" descriptor for ep 0x%x does not support streams\n",
1584 				ep->desc.bEndpointAddress);
1585 		return -EINVAL;
1586 	}
1587 
1588 	ep_index = xhci_get_endpoint_index(&ep->desc);
1589 	ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1590 	if (ep_state & EP_HAS_STREAMS ||
1591 			ep_state & EP_GETTING_STREAMS) {
1592 		xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
1593 				"already has streams set up.\n",
1594 				ep->desc.bEndpointAddress);
1595 		xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
1596 				"dynamic stream context array reallocation.\n");
1597 		return -EINVAL;
1598 	}
1599 	if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
1600 		xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
1601 				"endpoint 0x%x; URBs are pending.\n",
1602 				ep->desc.bEndpointAddress);
1603 		return -EINVAL;
1604 	}
1605 	return 0;
1606 }
1607 
1608 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
1609 		unsigned int *num_streams, unsigned int *num_stream_ctxs)
1610 {
1611 	unsigned int max_streams;
1612 
1613 	/* The stream context array size must be a power of two */
1614 	*num_stream_ctxs = roundup_pow_of_two(*num_streams);
1615 	/*
1616 	 * Find out how many primary stream array entries the host controller
1617 	 * supports.  Later we may use secondary stream arrays (similar to 2nd
1618 	 * level page entries), but that's an optional feature for xHCI host
1619 	 * controllers. xHCs must support at least 4 stream IDs.
1620 	 */
1621 	max_streams = HCC_MAX_PSA(xhci->hcc_params);
1622 	if (*num_stream_ctxs > max_streams) {
1623 		xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
1624 				max_streams);
1625 		*num_stream_ctxs = max_streams;
1626 		*num_streams = max_streams;
1627 	}
1628 }
1629 
1630 /* Returns an error code if one of the endpoint already has streams.
1631  * This does not change any data structures, it only checks and gathers
1632  * information.
1633  */
1634 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
1635 		struct usb_device *udev,
1636 		struct usb_host_endpoint **eps, unsigned int num_eps,
1637 		unsigned int *num_streams, u32 *changed_ep_bitmask)
1638 {
1639 	unsigned int max_streams;
1640 	unsigned int endpoint_flag;
1641 	int i;
1642 	int ret;
1643 
1644 	for (i = 0; i < num_eps; i++) {
1645 		ret = xhci_check_streams_endpoint(xhci, udev,
1646 				eps[i], udev->slot_id);
1647 		if (ret < 0)
1648 			return ret;
1649 
1650 		max_streams = USB_SS_MAX_STREAMS(
1651 				eps[i]->ss_ep_comp.bmAttributes);
1652 		if (max_streams < (*num_streams - 1)) {
1653 			xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
1654 					eps[i]->desc.bEndpointAddress,
1655 					max_streams);
1656 			*num_streams = max_streams+1;
1657 		}
1658 
1659 		endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
1660 		if (*changed_ep_bitmask & endpoint_flag)
1661 			return -EINVAL;
1662 		*changed_ep_bitmask |= endpoint_flag;
1663 	}
1664 	return 0;
1665 }
1666 
1667 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
1668 		struct usb_device *udev,
1669 		struct usb_host_endpoint **eps, unsigned int num_eps)
1670 {
1671 	u32 changed_ep_bitmask = 0;
1672 	unsigned int slot_id;
1673 	unsigned int ep_index;
1674 	unsigned int ep_state;
1675 	int i;
1676 
1677 	slot_id = udev->slot_id;
1678 	if (!xhci->devs[slot_id])
1679 		return 0;
1680 
1681 	for (i = 0; i < num_eps; i++) {
1682 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1683 		ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1684 		/* Are streams already being freed for the endpoint? */
1685 		if (ep_state & EP_GETTING_NO_STREAMS) {
1686 			xhci_warn(xhci, "WARN Can't disable streams for "
1687 					"endpoint 0x%x\n, "
1688 					"streams are being disabled already.",
1689 					eps[i]->desc.bEndpointAddress);
1690 			return 0;
1691 		}
1692 		/* Are there actually any streams to free? */
1693 		if (!(ep_state & EP_HAS_STREAMS) &&
1694 				!(ep_state & EP_GETTING_STREAMS)) {
1695 			xhci_warn(xhci, "WARN Can't disable streams for "
1696 					"endpoint 0x%x\n, "
1697 					"streams are already disabled!",
1698 					eps[i]->desc.bEndpointAddress);
1699 			xhci_warn(xhci, "WARN xhci_free_streams() called "
1700 					"with non-streams endpoint\n");
1701 			return 0;
1702 		}
1703 		changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
1704 	}
1705 	return changed_ep_bitmask;
1706 }
1707 
1708 /*
1709  * The USB device drivers use this function (though the HCD interface in USB
1710  * core) to prepare a set of bulk endpoints to use streams.  Streams are used to
1711  * coordinate mass storage command queueing across multiple endpoints (basically
1712  * a stream ID == a task ID).
1713  *
1714  * Setting up streams involves allocating the same size stream context array
1715  * for each endpoint and issuing a configure endpoint command for all endpoints.
1716  *
1717  * Don't allow the call to succeed if one endpoint only supports one stream
1718  * (which means it doesn't support streams at all).
1719  *
1720  * Drivers may get less stream IDs than they asked for, if the host controller
1721  * hardware or endpoints claim they can't support the number of requested
1722  * stream IDs.
1723  */
1724 int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
1725 		struct usb_host_endpoint **eps, unsigned int num_eps,
1726 		unsigned int num_streams, gfp_t mem_flags)
1727 {
1728 	int i, ret;
1729 	struct xhci_hcd *xhci;
1730 	struct xhci_virt_device *vdev;
1731 	struct xhci_command *config_cmd;
1732 	unsigned int ep_index;
1733 	unsigned int num_stream_ctxs;
1734 	unsigned long flags;
1735 	u32 changed_ep_bitmask = 0;
1736 
1737 	if (!eps)
1738 		return -EINVAL;
1739 
1740 	/* Add one to the number of streams requested to account for
1741 	 * stream 0 that is reserved for xHCI usage.
1742 	 */
1743 	num_streams += 1;
1744 	xhci = hcd_to_xhci(hcd);
1745 	xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
1746 			num_streams);
1747 
1748 	config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
1749 	if (!config_cmd) {
1750 		xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
1751 		return -ENOMEM;
1752 	}
1753 
1754 	/* Check to make sure all endpoints are not already configured for
1755 	 * streams.  While we're at it, find the maximum number of streams that
1756 	 * all the endpoints will support and check for duplicate endpoints.
1757 	 */
1758 	spin_lock_irqsave(&xhci->lock, flags);
1759 	ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
1760 			num_eps, &num_streams, &changed_ep_bitmask);
1761 	if (ret < 0) {
1762 		xhci_free_command(xhci, config_cmd);
1763 		spin_unlock_irqrestore(&xhci->lock, flags);
1764 		return ret;
1765 	}
1766 	if (num_streams <= 1) {
1767 		xhci_warn(xhci, "WARN: endpoints can't handle "
1768 				"more than one stream.\n");
1769 		xhci_free_command(xhci, config_cmd);
1770 		spin_unlock_irqrestore(&xhci->lock, flags);
1771 		return -EINVAL;
1772 	}
1773 	vdev = xhci->devs[udev->slot_id];
1774 	/* Mark each endpoint as being in transistion, so
1775 	 * xhci_urb_enqueue() will reject all URBs.
1776 	 */
1777 	for (i = 0; i < num_eps; i++) {
1778 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1779 		vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
1780 	}
1781 	spin_unlock_irqrestore(&xhci->lock, flags);
1782 
1783 	/* Setup internal data structures and allocate HW data structures for
1784 	 * streams (but don't install the HW structures in the input context
1785 	 * until we're sure all memory allocation succeeded).
1786 	 */
1787 	xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
1788 	xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
1789 			num_stream_ctxs, num_streams);
1790 
1791 	for (i = 0; i < num_eps; i++) {
1792 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1793 		vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
1794 				num_stream_ctxs,
1795 				num_streams, mem_flags);
1796 		if (!vdev->eps[ep_index].stream_info)
1797 			goto cleanup;
1798 		/* Set maxPstreams in endpoint context and update deq ptr to
1799 		 * point to stream context array. FIXME
1800 		 */
1801 	}
1802 
1803 	/* Set up the input context for a configure endpoint command. */
1804 	for (i = 0; i < num_eps; i++) {
1805 		struct xhci_ep_ctx *ep_ctx;
1806 
1807 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1808 		ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
1809 
1810 		xhci_endpoint_copy(xhci, config_cmd->in_ctx,
1811 				vdev->out_ctx, ep_index);
1812 		xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
1813 				vdev->eps[ep_index].stream_info);
1814 	}
1815 	/* Tell the HW to drop its old copy of the endpoint context info
1816 	 * and add the updated copy from the input context.
1817 	 */
1818 	xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
1819 			vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
1820 
1821 	/* Issue and wait for the configure endpoint command */
1822 	ret = xhci_configure_endpoint(xhci, udev, config_cmd,
1823 			false, false);
1824 
1825 	/* xHC rejected the configure endpoint command for some reason, so we
1826 	 * leave the old ring intact and free our internal streams data
1827 	 * structure.
1828 	 */
1829 	if (ret < 0)
1830 		goto cleanup;
1831 
1832 	spin_lock_irqsave(&xhci->lock, flags);
1833 	for (i = 0; i < num_eps; i++) {
1834 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1835 		vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
1836 		xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
1837 			 udev->slot_id, ep_index);
1838 		vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
1839 	}
1840 	xhci_free_command(xhci, config_cmd);
1841 	spin_unlock_irqrestore(&xhci->lock, flags);
1842 
1843 	/* Subtract 1 for stream 0, which drivers can't use */
1844 	return num_streams - 1;
1845 
1846 cleanup:
1847 	/* If it didn't work, free the streams! */
1848 	for (i = 0; i < num_eps; i++) {
1849 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1850 		xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
1851 		vdev->eps[ep_index].stream_info = NULL;
1852 		/* FIXME Unset maxPstreams in endpoint context and
1853 		 * update deq ptr to point to normal string ring.
1854 		 */
1855 		vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
1856 		vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
1857 		xhci_endpoint_zero(xhci, vdev, eps[i]);
1858 	}
1859 	xhci_free_command(xhci, config_cmd);
1860 	return -ENOMEM;
1861 }
1862 
1863 /* Transition the endpoint from using streams to being a "normal" endpoint
1864  * without streams.
1865  *
1866  * Modify the endpoint context state, submit a configure endpoint command,
1867  * and free all endpoint rings for streams if that completes successfully.
1868  */
1869 int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
1870 		struct usb_host_endpoint **eps, unsigned int num_eps,
1871 		gfp_t mem_flags)
1872 {
1873 	int i, ret;
1874 	struct xhci_hcd *xhci;
1875 	struct xhci_virt_device *vdev;
1876 	struct xhci_command *command;
1877 	unsigned int ep_index;
1878 	unsigned long flags;
1879 	u32 changed_ep_bitmask;
1880 
1881 	xhci = hcd_to_xhci(hcd);
1882 	vdev = xhci->devs[udev->slot_id];
1883 
1884 	/* Set up a configure endpoint command to remove the streams rings */
1885 	spin_lock_irqsave(&xhci->lock, flags);
1886 	changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
1887 			udev, eps, num_eps);
1888 	if (changed_ep_bitmask == 0) {
1889 		spin_unlock_irqrestore(&xhci->lock, flags);
1890 		return -EINVAL;
1891 	}
1892 
1893 	/* Use the xhci_command structure from the first endpoint.  We may have
1894 	 * allocated too many, but the driver may call xhci_free_streams() for
1895 	 * each endpoint it grouped into one call to xhci_alloc_streams().
1896 	 */
1897 	ep_index = xhci_get_endpoint_index(&eps[0]->desc);
1898 	command = vdev->eps[ep_index].stream_info->free_streams_command;
1899 	for (i = 0; i < num_eps; i++) {
1900 		struct xhci_ep_ctx *ep_ctx;
1901 
1902 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1903 		ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1904 		xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
1905 			EP_GETTING_NO_STREAMS;
1906 
1907 		xhci_endpoint_copy(xhci, command->in_ctx,
1908 				vdev->out_ctx, ep_index);
1909 		xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
1910 				&vdev->eps[ep_index]);
1911 	}
1912 	xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
1913 			vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
1914 	spin_unlock_irqrestore(&xhci->lock, flags);
1915 
1916 	/* Issue and wait for the configure endpoint command,
1917 	 * which must succeed.
1918 	 */
1919 	ret = xhci_configure_endpoint(xhci, udev, command,
1920 			false, true);
1921 
1922 	/* xHC rejected the configure endpoint command for some reason, so we
1923 	 * leave the streams rings intact.
1924 	 */
1925 	if (ret < 0)
1926 		return ret;
1927 
1928 	spin_lock_irqsave(&xhci->lock, flags);
1929 	for (i = 0; i < num_eps; i++) {
1930 		ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1931 		xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
1932 		vdev->eps[ep_index].stream_info = NULL;
1933 		/* FIXME Unset maxPstreams in endpoint context and
1934 		 * update deq ptr to point to normal string ring.
1935 		 */
1936 		vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
1937 		vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
1938 	}
1939 	spin_unlock_irqrestore(&xhci->lock, flags);
1940 
1941 	return 0;
1942 }
1943 
1944 /*
1945  * This submits a Reset Device Command, which will set the device state to 0,
1946  * set the device address to 0, and disable all the endpoints except the default
1947  * control endpoint.  The USB core should come back and call
1948  * xhci_address_device(), and then re-set up the configuration.  If this is
1949  * called because of a usb_reset_and_verify_device(), then the old alternate
1950  * settings will be re-installed through the normal bandwidth allocation
1951  * functions.
1952  *
1953  * Wait for the Reset Device command to finish.  Remove all structures
1954  * associated with the endpoints that were disabled.  Clear the input device
1955  * structure?  Cache the rings?  Reset the control endpoint 0 max packet size?
1956  */
1957 int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
1958 {
1959 	int ret, i;
1960 	unsigned long flags;
1961 	struct xhci_hcd *xhci;
1962 	unsigned int slot_id;
1963 	struct xhci_virt_device *virt_dev;
1964 	struct xhci_command *reset_device_cmd;
1965 	int timeleft;
1966 	int last_freed_endpoint;
1967 
1968 	ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
1969 	if (ret <= 0)
1970 		return ret;
1971 	xhci = hcd_to_xhci(hcd);
1972 	slot_id = udev->slot_id;
1973 	virt_dev = xhci->devs[slot_id];
1974 	if (!virt_dev) {
1975 		xhci_dbg(xhci, "%s called with invalid slot ID %u\n",
1976 				__func__, slot_id);
1977 		return -EINVAL;
1978 	}
1979 
1980 	xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
1981 	/* Allocate the command structure that holds the struct completion.
1982 	 * Assume we're in process context, since the normal device reset
1983 	 * process has to wait for the device anyway.  Storage devices are
1984 	 * reset as part of error handling, so use GFP_NOIO instead of
1985 	 * GFP_KERNEL.
1986 	 */
1987 	reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
1988 	if (!reset_device_cmd) {
1989 		xhci_dbg(xhci, "Couldn't allocate command structure.\n");
1990 		return -ENOMEM;
1991 	}
1992 
1993 	/* Attempt to submit the Reset Device command to the command ring */
1994 	spin_lock_irqsave(&xhci->lock, flags);
1995 	reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
1996 	list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
1997 	ret = xhci_queue_reset_device(xhci, slot_id);
1998 	if (ret) {
1999 		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2000 		list_del(&reset_device_cmd->cmd_list);
2001 		spin_unlock_irqrestore(&xhci->lock, flags);
2002 		goto command_cleanup;
2003 	}
2004 	xhci_ring_cmd_db(xhci);
2005 	spin_unlock_irqrestore(&xhci->lock, flags);
2006 
2007 	/* Wait for the Reset Device command to finish */
2008 	timeleft = wait_for_completion_interruptible_timeout(
2009 			reset_device_cmd->completion,
2010 			USB_CTRL_SET_TIMEOUT);
2011 	if (timeleft <= 0) {
2012 		xhci_warn(xhci, "%s while waiting for reset device command\n",
2013 				timeleft == 0 ? "Timeout" : "Signal");
2014 		spin_lock_irqsave(&xhci->lock, flags);
2015 		/* The timeout might have raced with the event ring handler, so
2016 		 * only delete from the list if the item isn't poisoned.
2017 		 */
2018 		if (reset_device_cmd->cmd_list.next != LIST_POISON1)
2019 			list_del(&reset_device_cmd->cmd_list);
2020 		spin_unlock_irqrestore(&xhci->lock, flags);
2021 		ret = -ETIME;
2022 		goto command_cleanup;
2023 	}
2024 
2025 	/* The Reset Device command can't fail, according to the 0.95/0.96 spec,
2026 	 * unless we tried to reset a slot ID that wasn't enabled,
2027 	 * or the device wasn't in the addressed or configured state.
2028 	 */
2029 	ret = reset_device_cmd->status;
2030 	switch (ret) {
2031 	case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
2032 	case COMP_CTX_STATE: /* 0.96 completion code for same thing */
2033 		xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
2034 				slot_id,
2035 				xhci_get_slot_state(xhci, virt_dev->out_ctx));
2036 		xhci_info(xhci, "Not freeing device rings.\n");
2037 		/* Don't treat this as an error.  May change my mind later. */
2038 		ret = 0;
2039 		goto command_cleanup;
2040 	case COMP_SUCCESS:
2041 		xhci_dbg(xhci, "Successful reset device command.\n");
2042 		break;
2043 	default:
2044 		if (xhci_is_vendor_info_code(xhci, ret))
2045 			break;
2046 		xhci_warn(xhci, "Unknown completion code %u for "
2047 				"reset device command.\n", ret);
2048 		ret = -EINVAL;
2049 		goto command_cleanup;
2050 	}
2051 
2052 	/* Everything but endpoint 0 is disabled, so free or cache the rings. */
2053 	last_freed_endpoint = 1;
2054 	for (i = 1; i < 31; ++i) {
2055 		if (!virt_dev->eps[i].ring)
2056 			continue;
2057 		xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2058 		last_freed_endpoint = i;
2059 	}
2060 	xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
2061 	xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
2062 	ret = 0;
2063 
2064 command_cleanup:
2065 	xhci_free_command(xhci, reset_device_cmd);
2066 	return ret;
2067 }
2068 
2069 /*
2070  * At this point, the struct usb_device is about to go away, the device has
2071  * disconnected, and all traffic has been stopped and the endpoints have been
2072  * disabled.  Free any HC data structures associated with that device.
2073  */
2074 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
2075 {
2076 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2077 	struct xhci_virt_device *virt_dev;
2078 	unsigned long flags;
2079 	u32 state;
2080 	int i;
2081 
2082 	if (udev->slot_id == 0)
2083 		return;
2084 	virt_dev = xhci->devs[udev->slot_id];
2085 	if (!virt_dev)
2086 		return;
2087 
2088 	/* Stop any wayward timer functions (which may grab the lock) */
2089 	for (i = 0; i < 31; ++i) {
2090 		virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
2091 		del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
2092 	}
2093 
2094 	spin_lock_irqsave(&xhci->lock, flags);
2095 	/* Don't disable the slot if the host controller is dead. */
2096 	state = xhci_readl(xhci, &xhci->op_regs->status);
2097 	if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
2098 		xhci_free_virt_device(xhci, udev->slot_id);
2099 		spin_unlock_irqrestore(&xhci->lock, flags);
2100 		return;
2101 	}
2102 
2103 	if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
2104 		spin_unlock_irqrestore(&xhci->lock, flags);
2105 		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2106 		return;
2107 	}
2108 	xhci_ring_cmd_db(xhci);
2109 	spin_unlock_irqrestore(&xhci->lock, flags);
2110 	/*
2111 	 * Event command completion handler will free any data structures
2112 	 * associated with the slot.  XXX Can free sleep?
2113 	 */
2114 }
2115 
2116 /*
2117  * Returns 0 if the xHC ran out of device slots, the Enable Slot command
2118  * timed out, or allocating memory failed.  Returns 1 on success.
2119  */
2120 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2121 {
2122 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2123 	unsigned long flags;
2124 	int timeleft;
2125 	int ret;
2126 
2127 	spin_lock_irqsave(&xhci->lock, flags);
2128 	ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
2129 	if (ret) {
2130 		spin_unlock_irqrestore(&xhci->lock, flags);
2131 		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2132 		return 0;
2133 	}
2134 	xhci_ring_cmd_db(xhci);
2135 	spin_unlock_irqrestore(&xhci->lock, flags);
2136 
2137 	/* XXX: how much time for xHC slot assignment? */
2138 	timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
2139 			USB_CTRL_SET_TIMEOUT);
2140 	if (timeleft <= 0) {
2141 		xhci_warn(xhci, "%s while waiting for a slot\n",
2142 				timeleft == 0 ? "Timeout" : "Signal");
2143 		/* FIXME cancel the enable slot request */
2144 		return 0;
2145 	}
2146 
2147 	if (!xhci->slot_id) {
2148 		xhci_err(xhci, "Error while assigning device slot ID\n");
2149 		return 0;
2150 	}
2151 	/* xhci_alloc_virt_device() does not touch rings; no need to lock */
2152 	if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
2153 		/* Disable slot, if we can do it without mem alloc */
2154 		xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
2155 		spin_lock_irqsave(&xhci->lock, flags);
2156 		if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
2157 			xhci_ring_cmd_db(xhci);
2158 		spin_unlock_irqrestore(&xhci->lock, flags);
2159 		return 0;
2160 	}
2161 	udev->slot_id = xhci->slot_id;
2162 	/* Is this a LS or FS device under a HS hub? */
2163 	/* Hub or peripherial? */
2164 	return 1;
2165 }
2166 
2167 /*
2168  * Issue an Address Device command (which will issue a SetAddress request to
2169  * the device).
2170  * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
2171  * we should only issue and wait on one address command at the same time.
2172  *
2173  * We add one to the device address issued by the hardware because the USB core
2174  * uses address 1 for the root hubs (even though they're not really devices).
2175  */
2176 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2177 {
2178 	unsigned long flags;
2179 	int timeleft;
2180 	struct xhci_virt_device *virt_dev;
2181 	int ret = 0;
2182 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2183 	struct xhci_slot_ctx *slot_ctx;
2184 	struct xhci_input_control_ctx *ctrl_ctx;
2185 	u64 temp_64;
2186 
2187 	if (!udev->slot_id) {
2188 		xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
2189 		return -EINVAL;
2190 	}
2191 
2192 	virt_dev = xhci->devs[udev->slot_id];
2193 
2194 	/* If this is a Set Address to an unconfigured device, setup ep 0 */
2195 	if (!udev->config)
2196 		xhci_setup_addressable_virt_dev(xhci, udev);
2197 	else
2198 		xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
2199 	/* Otherwise, assume the core has the device configured how it wants */
2200 	xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
2201 	xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
2202 
2203 	spin_lock_irqsave(&xhci->lock, flags);
2204 	ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
2205 					udev->slot_id);
2206 	if (ret) {
2207 		spin_unlock_irqrestore(&xhci->lock, flags);
2208 		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2209 		return ret;
2210 	}
2211 	xhci_ring_cmd_db(xhci);
2212 	spin_unlock_irqrestore(&xhci->lock, flags);
2213 
2214 	/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
2215 	timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
2216 			USB_CTRL_SET_TIMEOUT);
2217 	/* FIXME: From section 4.3.4: "Software shall be responsible for timing
2218 	 * the SetAddress() "recovery interval" required by USB and aborting the
2219 	 * command on a timeout.
2220 	 */
2221 	if (timeleft <= 0) {
2222 		xhci_warn(xhci, "%s while waiting for a slot\n",
2223 				timeleft == 0 ? "Timeout" : "Signal");
2224 		/* FIXME cancel the address device command */
2225 		return -ETIME;
2226 	}
2227 
2228 	switch (virt_dev->cmd_status) {
2229 	case COMP_CTX_STATE:
2230 	case COMP_EBADSLT:
2231 		xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
2232 				udev->slot_id);
2233 		ret = -EINVAL;
2234 		break;
2235 	case COMP_TX_ERR:
2236 		dev_warn(&udev->dev, "Device not responding to set address.\n");
2237 		ret = -EPROTO;
2238 		break;
2239 	case COMP_SUCCESS:
2240 		xhci_dbg(xhci, "Successful Address Device command\n");
2241 		break;
2242 	default:
2243 		xhci_err(xhci, "ERROR: unexpected command completion "
2244 				"code 0x%x.\n", virt_dev->cmd_status);
2245 		xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
2246 		xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
2247 		ret = -EINVAL;
2248 		break;
2249 	}
2250 	if (ret) {
2251 		return ret;
2252 	}
2253 	temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
2254 	xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
2255 	xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
2256 			udev->slot_id,
2257 			&xhci->dcbaa->dev_context_ptrs[udev->slot_id],
2258 			(unsigned long long)
2259 				xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
2260 	xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
2261 			(unsigned long long)virt_dev->out_ctx->dma);
2262 	xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
2263 	xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
2264 	xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
2265 	xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
2266 	/*
2267 	 * USB core uses address 1 for the roothubs, so we add one to the
2268 	 * address given back to us by the HC.
2269 	 */
2270 	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
2271 	udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1;
2272 	/* Zero the input context control for later use */
2273 	ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2274 	ctrl_ctx->add_flags = 0;
2275 	ctrl_ctx->drop_flags = 0;
2276 
2277 	xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
2278 	/* XXX Meh, not sure if anyone else but choose_address uses this. */
2279 	set_bit(udev->devnum, udev->bus->devmap.devicemap);
2280 
2281 	return 0;
2282 }
2283 
2284 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
2285  * internal data structures for the device.
2286  */
2287 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
2288 			struct usb_tt *tt, gfp_t mem_flags)
2289 {
2290 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2291 	struct xhci_virt_device *vdev;
2292 	struct xhci_command *config_cmd;
2293 	struct xhci_input_control_ctx *ctrl_ctx;
2294 	struct xhci_slot_ctx *slot_ctx;
2295 	unsigned long flags;
2296 	unsigned think_time;
2297 	int ret;
2298 
2299 	/* Ignore root hubs */
2300 	if (!hdev->parent)
2301 		return 0;
2302 
2303 	vdev = xhci->devs[hdev->slot_id];
2304 	if (!vdev) {
2305 		xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
2306 		return -EINVAL;
2307 	}
2308 	config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
2309 	if (!config_cmd) {
2310 		xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2311 		return -ENOMEM;
2312 	}
2313 
2314 	spin_lock_irqsave(&xhci->lock, flags);
2315 	xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
2316 	ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
2317 	ctrl_ctx->add_flags |= SLOT_FLAG;
2318 	slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
2319 	slot_ctx->dev_info |= DEV_HUB;
2320 	if (tt->multi)
2321 		slot_ctx->dev_info |= DEV_MTT;
2322 	if (xhci->hci_version > 0x95) {
2323 		xhci_dbg(xhci, "xHCI version %x needs hub "
2324 				"TT think time and number of ports\n",
2325 				(unsigned int) xhci->hci_version);
2326 		slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild);
2327 		/* Set TT think time - convert from ns to FS bit times.
2328 		 * 0 = 8 FS bit times, 1 = 16 FS bit times,
2329 		 * 2 = 24 FS bit times, 3 = 32 FS bit times.
2330 		 */
2331 		think_time = tt->think_time;
2332 		if (think_time != 0)
2333 			think_time = (think_time / 666) - 1;
2334 		slot_ctx->tt_info |= TT_THINK_TIME(think_time);
2335 	} else {
2336 		xhci_dbg(xhci, "xHCI version %x doesn't need hub "
2337 				"TT think time or number of ports\n",
2338 				(unsigned int) xhci->hci_version);
2339 	}
2340 	slot_ctx->dev_state = 0;
2341 	spin_unlock_irqrestore(&xhci->lock, flags);
2342 
2343 	xhci_dbg(xhci, "Set up %s for hub device.\n",
2344 			(xhci->hci_version > 0x95) ?
2345 			"configure endpoint" : "evaluate context");
2346 	xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
2347 	xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
2348 
2349 	/* Issue and wait for the configure endpoint or
2350 	 * evaluate context command.
2351 	 */
2352 	if (xhci->hci_version > 0x95)
2353 		ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
2354 				false, false);
2355 	else
2356 		ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
2357 				true, false);
2358 
2359 	xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
2360 	xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
2361 
2362 	xhci_free_command(xhci, config_cmd);
2363 	return ret;
2364 }
2365 
2366 int xhci_get_frame(struct usb_hcd *hcd)
2367 {
2368 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2369 	/* EHCI mods by the periodic size.  Why? */
2370 	return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
2371 }
2372 
2373 MODULE_DESCRIPTION(DRIVER_DESC);
2374 MODULE_AUTHOR(DRIVER_AUTHOR);
2375 MODULE_LICENSE("GPL");
2376 
2377 static int __init xhci_hcd_init(void)
2378 {
2379 #ifdef CONFIG_PCI
2380 	int retval = 0;
2381 
2382 	retval = xhci_register_pci();
2383 
2384 	if (retval < 0) {
2385 		printk(KERN_DEBUG "Problem registering PCI driver.");
2386 		return retval;
2387 	}
2388 #endif
2389 	/*
2390 	 * Check the compiler generated sizes of structures that must be laid
2391 	 * out in specific ways for hardware access.
2392 	 */
2393 	BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
2394 	BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
2395 	BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
2396 	/* xhci_device_control has eight fields, and also
2397 	 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
2398 	 */
2399 	BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
2400 	BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
2401 	BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
2402 	BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
2403 	BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
2404 	/* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
2405 	BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
2406 	BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
2407 	return 0;
2408 }
2409 module_init(xhci_hcd_init);
2410 
2411 static void __exit xhci_hcd_cleanup(void)
2412 {
2413 #ifdef CONFIG_PCI
2414 	xhci_unregister_pci();
2415 #endif
2416 }
2417 module_exit(xhci_hcd_cleanup);
2418