1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/pci.h> 24 #include <linux/irq.h> 25 #include <linux/log2.h> 26 #include <linux/module.h> 27 #include <linux/moduleparam.h> 28 #include <linux/slab.h> 29 30 #include "xhci.h" 31 32 #define DRIVER_AUTHOR "Sarah Sharp" 33 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" 34 35 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ 36 static int link_quirk; 37 module_param(link_quirk, int, S_IRUGO | S_IWUSR); 38 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); 39 40 /* TODO: copied from ehci-hcd.c - can this be refactored? */ 41 /* 42 * handshake - spin reading hc until handshake completes or fails 43 * @ptr: address of hc register to be read 44 * @mask: bits to look at in result of read 45 * @done: value of those bits when handshake succeeds 46 * @usec: timeout in microseconds 47 * 48 * Returns negative errno, or zero on success 49 * 50 * Success happens when the "mask" bits have the specified value (hardware 51 * handshake done). There are two failure modes: "usec" have passed (major 52 * hardware flakeout), or the register reads as all-ones (hardware removed). 53 */ 54 static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, 55 u32 mask, u32 done, int usec) 56 { 57 u32 result; 58 59 do { 60 result = xhci_readl(xhci, ptr); 61 if (result == ~(u32)0) /* card removed */ 62 return -ENODEV; 63 result &= mask; 64 if (result == done) 65 return 0; 66 udelay(1); 67 usec--; 68 } while (usec > 0); 69 return -ETIMEDOUT; 70 } 71 72 /* 73 * Disable interrupts and begin the xHCI halting process. 74 */ 75 void xhci_quiesce(struct xhci_hcd *xhci) 76 { 77 u32 halted; 78 u32 cmd; 79 u32 mask; 80 81 mask = ~(XHCI_IRQS); 82 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; 83 if (!halted) 84 mask &= ~CMD_RUN; 85 86 cmd = xhci_readl(xhci, &xhci->op_regs->command); 87 cmd &= mask; 88 xhci_writel(xhci, cmd, &xhci->op_regs->command); 89 } 90 91 /* 92 * Force HC into halt state. 93 * 94 * Disable any IRQs and clear the run/stop bit. 95 * HC will complete any current and actively pipelined transactions, and 96 * should halt within 16 ms of the run/stop bit being cleared. 97 * Read HC Halted bit in the status register to see when the HC is finished. 98 */ 99 int xhci_halt(struct xhci_hcd *xhci) 100 { 101 int ret; 102 xhci_dbg(xhci, "// Halt the HC\n"); 103 xhci_quiesce(xhci); 104 105 ret = handshake(xhci, &xhci->op_regs->status, 106 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); 107 if (!ret) 108 xhci->xhc_state |= XHCI_STATE_HALTED; 109 return ret; 110 } 111 112 /* 113 * Set the run bit and wait for the host to be running. 114 */ 115 static int xhci_start(struct xhci_hcd *xhci) 116 { 117 u32 temp; 118 int ret; 119 120 temp = xhci_readl(xhci, &xhci->op_regs->command); 121 temp |= (CMD_RUN); 122 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", 123 temp); 124 xhci_writel(xhci, temp, &xhci->op_regs->command); 125 126 /* 127 * Wait for the HCHalted Status bit to be 0 to indicate the host is 128 * running. 129 */ 130 ret = handshake(xhci, &xhci->op_regs->status, 131 STS_HALT, 0, XHCI_MAX_HALT_USEC); 132 if (ret == -ETIMEDOUT) 133 xhci_err(xhci, "Host took too long to start, " 134 "waited %u microseconds.\n", 135 XHCI_MAX_HALT_USEC); 136 if (!ret) 137 xhci->xhc_state &= ~XHCI_STATE_HALTED; 138 return ret; 139 } 140 141 /* 142 * Reset a halted HC. 143 * 144 * This resets pipelines, timers, counters, state machines, etc. 145 * Transactions will be terminated immediately, and operational registers 146 * will be set to their defaults. 147 */ 148 int xhci_reset(struct xhci_hcd *xhci) 149 { 150 u32 command; 151 u32 state; 152 int ret; 153 154 state = xhci_readl(xhci, &xhci->op_regs->status); 155 if ((state & STS_HALT) == 0) { 156 xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); 157 return 0; 158 } 159 160 xhci_dbg(xhci, "// Reset the HC\n"); 161 command = xhci_readl(xhci, &xhci->op_regs->command); 162 command |= CMD_RESET; 163 xhci_writel(xhci, command, &xhci->op_regs->command); 164 165 ret = handshake(xhci, &xhci->op_regs->command, 166 CMD_RESET, 0, 250 * 1000); 167 if (ret) 168 return ret; 169 170 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n"); 171 /* 172 * xHCI cannot write to any doorbells or operational registers other 173 * than status until the "Controller Not Ready" flag is cleared. 174 */ 175 return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); 176 } 177 178 /* 179 * Free IRQs 180 * free all IRQs request 181 */ 182 static void xhci_free_irq(struct xhci_hcd *xhci) 183 { 184 int i; 185 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 186 187 /* return if using legacy interrupt */ 188 if (xhci_to_hcd(xhci)->irq >= 0) 189 return; 190 191 if (xhci->msix_entries) { 192 for (i = 0; i < xhci->msix_count; i++) 193 if (xhci->msix_entries[i].vector) 194 free_irq(xhci->msix_entries[i].vector, 195 xhci_to_hcd(xhci)); 196 } else if (pdev->irq >= 0) 197 free_irq(pdev->irq, xhci_to_hcd(xhci)); 198 199 return; 200 } 201 202 /* 203 * Set up MSI 204 */ 205 static int xhci_setup_msi(struct xhci_hcd *xhci) 206 { 207 int ret; 208 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 209 210 ret = pci_enable_msi(pdev); 211 if (ret) { 212 xhci_err(xhci, "failed to allocate MSI entry\n"); 213 return ret; 214 } 215 216 ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq, 217 0, "xhci_hcd", xhci_to_hcd(xhci)); 218 if (ret) { 219 xhci_err(xhci, "disable MSI interrupt\n"); 220 pci_disable_msi(pdev); 221 } 222 223 return ret; 224 } 225 226 /* 227 * Set up MSI-X 228 */ 229 static int xhci_setup_msix(struct xhci_hcd *xhci) 230 { 231 int i, ret = 0; 232 struct usb_hcd *hcd = xhci_to_hcd(xhci); 233 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 234 235 /* 236 * calculate number of msi-x vectors supported. 237 * - HCS_MAX_INTRS: the max number of interrupts the host can handle, 238 * with max number of interrupters based on the xhci HCSPARAMS1. 239 * - num_online_cpus: maximum msi-x vectors per CPUs core. 240 * Add additional 1 vector to ensure always available interrupt. 241 */ 242 xhci->msix_count = min(num_online_cpus() + 1, 243 HCS_MAX_INTRS(xhci->hcs_params1)); 244 245 xhci->msix_entries = 246 kmalloc((sizeof(struct msix_entry))*xhci->msix_count, 247 GFP_KERNEL); 248 if (!xhci->msix_entries) { 249 xhci_err(xhci, "Failed to allocate MSI-X entries\n"); 250 return -ENOMEM; 251 } 252 253 for (i = 0; i < xhci->msix_count; i++) { 254 xhci->msix_entries[i].entry = i; 255 xhci->msix_entries[i].vector = 0; 256 } 257 258 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); 259 if (ret) { 260 xhci_err(xhci, "Failed to enable MSI-X\n"); 261 goto free_entries; 262 } 263 264 for (i = 0; i < xhci->msix_count; i++) { 265 ret = request_irq(xhci->msix_entries[i].vector, 266 (irq_handler_t)xhci_msi_irq, 267 0, "xhci_hcd", xhci_to_hcd(xhci)); 268 if (ret) 269 goto disable_msix; 270 } 271 272 hcd->msix_enabled = 1; 273 return ret; 274 275 disable_msix: 276 xhci_err(xhci, "disable MSI-X interrupt\n"); 277 xhci_free_irq(xhci); 278 pci_disable_msix(pdev); 279 free_entries: 280 kfree(xhci->msix_entries); 281 xhci->msix_entries = NULL; 282 return ret; 283 } 284 285 /* Free any IRQs and disable MSI-X */ 286 static void xhci_cleanup_msix(struct xhci_hcd *xhci) 287 { 288 struct usb_hcd *hcd = xhci_to_hcd(xhci); 289 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 290 291 xhci_free_irq(xhci); 292 293 if (xhci->msix_entries) { 294 pci_disable_msix(pdev); 295 kfree(xhci->msix_entries); 296 xhci->msix_entries = NULL; 297 } else { 298 pci_disable_msi(pdev); 299 } 300 301 hcd->msix_enabled = 0; 302 return; 303 } 304 305 /* 306 * Initialize memory for HCD and xHC (one-time init). 307 * 308 * Program the PAGESIZE register, initialize the device context array, create 309 * device contexts (?), set up a command ring segment (or two?), create event 310 * ring (one for now). 311 */ 312 int xhci_init(struct usb_hcd *hcd) 313 { 314 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 315 int retval = 0; 316 317 xhci_dbg(xhci, "xhci_init\n"); 318 spin_lock_init(&xhci->lock); 319 if (link_quirk) { 320 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); 321 xhci->quirks |= XHCI_LINK_TRB_QUIRK; 322 } else { 323 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); 324 } 325 retval = xhci_mem_init(xhci, GFP_KERNEL); 326 xhci_dbg(xhci, "Finished xhci_init\n"); 327 328 return retval; 329 } 330 331 /*-------------------------------------------------------------------------*/ 332 333 334 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 335 static void xhci_event_ring_work(unsigned long arg) 336 { 337 unsigned long flags; 338 int temp; 339 u64 temp_64; 340 struct xhci_hcd *xhci = (struct xhci_hcd *) arg; 341 int i, j; 342 343 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies); 344 345 spin_lock_irqsave(&xhci->lock, flags); 346 temp = xhci_readl(xhci, &xhci->op_regs->status); 347 xhci_dbg(xhci, "op reg status = 0x%x\n", temp); 348 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { 349 xhci_dbg(xhci, "HW died, polling stopped.\n"); 350 spin_unlock_irqrestore(&xhci->lock, flags); 351 return; 352 } 353 354 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 355 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); 356 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); 357 xhci->error_bitmask = 0; 358 xhci_dbg(xhci, "Event ring:\n"); 359 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); 360 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 361 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 362 temp_64 &= ~ERST_PTR_MASK; 363 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); 364 xhci_dbg(xhci, "Command ring:\n"); 365 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); 366 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 367 xhci_dbg_cmd_ptrs(xhci); 368 for (i = 0; i < MAX_HC_SLOTS; ++i) { 369 if (!xhci->devs[i]) 370 continue; 371 for (j = 0; j < 31; ++j) { 372 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]); 373 } 374 } 375 spin_unlock_irqrestore(&xhci->lock, flags); 376 377 if (!xhci->zombie) 378 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ); 379 else 380 xhci_dbg(xhci, "Quit polling the event ring.\n"); 381 } 382 #endif 383 384 static int xhci_run_finished(struct xhci_hcd *xhci) 385 { 386 if (xhci_start(xhci)) { 387 xhci_halt(xhci); 388 return -ENODEV; 389 } 390 xhci->shared_hcd->state = HC_STATE_RUNNING; 391 392 if (xhci->quirks & XHCI_NEC_HOST) 393 xhci_ring_cmd_db(xhci); 394 395 xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n"); 396 return 0; 397 } 398 399 /* 400 * Start the HC after it was halted. 401 * 402 * This function is called by the USB core when the HC driver is added. 403 * Its opposite is xhci_stop(). 404 * 405 * xhci_init() must be called once before this function can be called. 406 * Reset the HC, enable device slot contexts, program DCBAAP, and 407 * set command ring pointer and event ring pointer. 408 * 409 * Setup MSI-X vectors and enable interrupts. 410 */ 411 int xhci_run(struct usb_hcd *hcd) 412 { 413 u32 temp; 414 u64 temp_64; 415 u32 ret; 416 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 417 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 418 419 /* Start the xHCI host controller running only after the USB 2.0 roothub 420 * is setup. 421 */ 422 423 hcd->uses_new_polling = 1; 424 if (!usb_hcd_is_primary_hcd(hcd)) 425 return xhci_run_finished(xhci); 426 427 xhci_dbg(xhci, "xhci_run\n"); 428 /* unregister the legacy interrupt */ 429 if (hcd->irq) 430 free_irq(hcd->irq, hcd); 431 hcd->irq = -1; 432 433 ret = xhci_setup_msix(xhci); 434 if (ret) 435 /* fall back to msi*/ 436 ret = xhci_setup_msi(xhci); 437 438 if (ret) { 439 /* fall back to legacy interrupt*/ 440 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, 441 hcd->irq_descr, hcd); 442 if (ret) { 443 xhci_err(xhci, "request interrupt %d failed\n", 444 pdev->irq); 445 return ret; 446 } 447 hcd->irq = pdev->irq; 448 } 449 450 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 451 init_timer(&xhci->event_ring_timer); 452 xhci->event_ring_timer.data = (unsigned long) xhci; 453 xhci->event_ring_timer.function = xhci_event_ring_work; 454 /* Poll the event ring */ 455 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; 456 xhci->zombie = 0; 457 xhci_dbg(xhci, "Setting event ring polling timer\n"); 458 add_timer(&xhci->event_ring_timer); 459 #endif 460 461 xhci_dbg(xhci, "Command ring memory map follows:\n"); 462 xhci_debug_ring(xhci, xhci->cmd_ring); 463 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 464 xhci_dbg_cmd_ptrs(xhci); 465 466 xhci_dbg(xhci, "ERST memory map follows:\n"); 467 xhci_dbg_erst(xhci, &xhci->erst); 468 xhci_dbg(xhci, "Event ring:\n"); 469 xhci_debug_ring(xhci, xhci->event_ring); 470 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 471 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 472 temp_64 &= ~ERST_PTR_MASK; 473 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); 474 475 xhci_dbg(xhci, "// Set the interrupt modulation register\n"); 476 temp = xhci_readl(xhci, &xhci->ir_set->irq_control); 477 temp &= ~ER_IRQ_INTERVAL_MASK; 478 temp |= (u32) 160; 479 xhci_writel(xhci, temp, &xhci->ir_set->irq_control); 480 481 /* Set the HCD state before we enable the irqs */ 482 temp = xhci_readl(xhci, &xhci->op_regs->command); 483 temp |= (CMD_EIE); 484 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", 485 temp); 486 xhci_writel(xhci, temp, &xhci->op_regs->command); 487 488 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 489 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", 490 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); 491 xhci_writel(xhci, ER_IRQ_ENABLE(temp), 492 &xhci->ir_set->irq_pending); 493 xhci_print_ir_set(xhci, 0); 494 495 if (xhci->quirks & XHCI_NEC_HOST) 496 xhci_queue_vendor_command(xhci, 0, 0, 0, 497 TRB_TYPE(TRB_NEC_GET_FW)); 498 499 xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n"); 500 return 0; 501 } 502 503 static void xhci_only_stop_hcd(struct usb_hcd *hcd) 504 { 505 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 506 507 spin_lock_irq(&xhci->lock); 508 xhci_halt(xhci); 509 510 /* The shared_hcd is going to be deallocated shortly (the USB core only 511 * calls this function when allocation fails in usb_add_hcd(), or 512 * usb_remove_hcd() is called). So we need to unset xHCI's pointer. 513 */ 514 xhci->shared_hcd = NULL; 515 spin_unlock_irq(&xhci->lock); 516 } 517 518 /* 519 * Stop xHCI driver. 520 * 521 * This function is called by the USB core when the HC driver is removed. 522 * Its opposite is xhci_run(). 523 * 524 * Disable device contexts, disable IRQs, and quiesce the HC. 525 * Reset the HC, finish any completed transactions, and cleanup memory. 526 */ 527 void xhci_stop(struct usb_hcd *hcd) 528 { 529 u32 temp; 530 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 531 532 if (!usb_hcd_is_primary_hcd(hcd)) { 533 xhci_only_stop_hcd(xhci->shared_hcd); 534 return; 535 } 536 537 spin_lock_irq(&xhci->lock); 538 /* Make sure the xHC is halted for a USB3 roothub 539 * (xhci_stop() could be called as part of failed init). 540 */ 541 xhci_halt(xhci); 542 xhci_reset(xhci); 543 spin_unlock_irq(&xhci->lock); 544 545 xhci_cleanup_msix(xhci); 546 547 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 548 /* Tell the event ring poll function not to reschedule */ 549 xhci->zombie = 1; 550 del_timer_sync(&xhci->event_ring_timer); 551 #endif 552 553 if (xhci->quirks & XHCI_AMD_PLL_FIX) 554 usb_amd_dev_put(); 555 556 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 557 temp = xhci_readl(xhci, &xhci->op_regs->status); 558 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); 559 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 560 xhci_writel(xhci, ER_IRQ_DISABLE(temp), 561 &xhci->ir_set->irq_pending); 562 xhci_print_ir_set(xhci, 0); 563 564 xhci_dbg(xhci, "cleaning up memory\n"); 565 xhci_mem_cleanup(xhci); 566 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", 567 xhci_readl(xhci, &xhci->op_regs->status)); 568 } 569 570 /* 571 * Shutdown HC (not bus-specific) 572 * 573 * This is called when the machine is rebooting or halting. We assume that the 574 * machine will be powered off, and the HC's internal state will be reset. 575 * Don't bother to free memory. 576 * 577 * This will only ever be called with the main usb_hcd (the USB3 roothub). 578 */ 579 void xhci_shutdown(struct usb_hcd *hcd) 580 { 581 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 582 583 spin_lock_irq(&xhci->lock); 584 xhci_halt(xhci); 585 spin_unlock_irq(&xhci->lock); 586 587 xhci_cleanup_msix(xhci); 588 589 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", 590 xhci_readl(xhci, &xhci->op_regs->status)); 591 } 592 593 #ifdef CONFIG_PM 594 static void xhci_save_registers(struct xhci_hcd *xhci) 595 { 596 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command); 597 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification); 598 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 599 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg); 600 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); 601 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control); 602 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size); 603 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); 604 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 605 } 606 607 static void xhci_restore_registers(struct xhci_hcd *xhci) 608 { 609 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command); 610 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification); 611 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); 612 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg); 613 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending); 614 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control); 615 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size); 616 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); 617 } 618 619 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) 620 { 621 u64 val_64; 622 623 /* step 2: initialize command ring buffer */ 624 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 625 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 626 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 627 xhci->cmd_ring->dequeue) & 628 (u64) ~CMD_RING_RSVD_BITS) | 629 xhci->cmd_ring->cycle_state; 630 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", 631 (long unsigned long) val_64); 632 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 633 } 634 635 /* 636 * The whole command ring must be cleared to zero when we suspend the host. 637 * 638 * The host doesn't save the command ring pointer in the suspend well, so we 639 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte 640 * aligned, because of the reserved bits in the command ring dequeue pointer 641 * register. Therefore, we can't just set the dequeue pointer back in the 642 * middle of the ring (TRBs are 16-byte aligned). 643 */ 644 static void xhci_clear_command_ring(struct xhci_hcd *xhci) 645 { 646 struct xhci_ring *ring; 647 struct xhci_segment *seg; 648 649 ring = xhci->cmd_ring; 650 seg = ring->deq_seg; 651 do { 652 memset(seg->trbs, 0, SEGMENT_SIZE); 653 seg = seg->next; 654 } while (seg != ring->deq_seg); 655 656 /* Reset the software enqueue and dequeue pointers */ 657 ring->deq_seg = ring->first_seg; 658 ring->dequeue = ring->first_seg->trbs; 659 ring->enq_seg = ring->deq_seg; 660 ring->enqueue = ring->dequeue; 661 662 /* 663 * Ring is now zeroed, so the HW should look for change of ownership 664 * when the cycle bit is set to 1. 665 */ 666 ring->cycle_state = 1; 667 668 /* 669 * Reset the hardware dequeue pointer. 670 * Yes, this will need to be re-written after resume, but we're paranoid 671 * and want to make sure the hardware doesn't access bogus memory 672 * because, say, the BIOS or an SMI started the host without changing 673 * the command ring pointers. 674 */ 675 xhci_set_cmd_ring_deq(xhci); 676 } 677 678 /* 679 * Stop HC (not bus-specific) 680 * 681 * This is called when the machine transition into S3/S4 mode. 682 * 683 */ 684 int xhci_suspend(struct xhci_hcd *xhci) 685 { 686 int rc = 0; 687 struct usb_hcd *hcd = xhci_to_hcd(xhci); 688 u32 command; 689 int i; 690 691 spin_lock_irq(&xhci->lock); 692 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 693 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 694 /* step 1: stop endpoint */ 695 /* skipped assuming that port suspend has done */ 696 697 /* step 2: clear Run/Stop bit */ 698 command = xhci_readl(xhci, &xhci->op_regs->command); 699 command &= ~CMD_RUN; 700 xhci_writel(xhci, command, &xhci->op_regs->command); 701 if (handshake(xhci, &xhci->op_regs->status, 702 STS_HALT, STS_HALT, 100*100)) { 703 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); 704 spin_unlock_irq(&xhci->lock); 705 return -ETIMEDOUT; 706 } 707 xhci_clear_command_ring(xhci); 708 709 /* step 3: save registers */ 710 xhci_save_registers(xhci); 711 712 /* step 4: set CSS flag */ 713 command = xhci_readl(xhci, &xhci->op_regs->command); 714 command |= CMD_CSS; 715 xhci_writel(xhci, command, &xhci->op_regs->command); 716 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) { 717 xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n"); 718 spin_unlock_irq(&xhci->lock); 719 return -ETIMEDOUT; 720 } 721 spin_unlock_irq(&xhci->lock); 722 723 /* step 5: remove core well power */ 724 /* synchronize irq when using MSI-X */ 725 if (xhci->msix_entries) { 726 for (i = 0; i < xhci->msix_count; i++) 727 synchronize_irq(xhci->msix_entries[i].vector); 728 } 729 730 return rc; 731 } 732 733 /* 734 * start xHC (not bus-specific) 735 * 736 * This is called when the machine transition from S3/S4 mode. 737 * 738 */ 739 int xhci_resume(struct xhci_hcd *xhci, bool hibernated) 740 { 741 u32 command, temp = 0; 742 struct usb_hcd *hcd = xhci_to_hcd(xhci); 743 struct usb_hcd *secondary_hcd; 744 int retval; 745 746 /* Wait a bit if either of the roothubs need to settle from the 747 * transition into bus suspend. 748 */ 749 if (time_before(jiffies, xhci->bus_state[0].next_statechange) || 750 time_before(jiffies, 751 xhci->bus_state[1].next_statechange)) 752 msleep(100); 753 754 spin_lock_irq(&xhci->lock); 755 756 if (!hibernated) { 757 /* step 1: restore register */ 758 xhci_restore_registers(xhci); 759 /* step 2: initialize command ring buffer */ 760 xhci_set_cmd_ring_deq(xhci); 761 /* step 3: restore state and start state*/ 762 /* step 3: set CRS flag */ 763 command = xhci_readl(xhci, &xhci->op_regs->command); 764 command |= CMD_CRS; 765 xhci_writel(xhci, command, &xhci->op_regs->command); 766 if (handshake(xhci, &xhci->op_regs->status, 767 STS_RESTORE, 0, 10*100)) { 768 xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n"); 769 spin_unlock_irq(&xhci->lock); 770 return -ETIMEDOUT; 771 } 772 temp = xhci_readl(xhci, &xhci->op_regs->status); 773 } 774 775 /* If restore operation fails, re-initialize the HC during resume */ 776 if ((temp & STS_SRE) || hibernated) { 777 /* Let the USB core know _both_ roothubs lost power. */ 778 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); 779 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); 780 781 xhci_dbg(xhci, "Stop HCD\n"); 782 xhci_halt(xhci); 783 xhci_reset(xhci); 784 spin_unlock_irq(&xhci->lock); 785 xhci_cleanup_msix(xhci); 786 787 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 788 /* Tell the event ring poll function not to reschedule */ 789 xhci->zombie = 1; 790 del_timer_sync(&xhci->event_ring_timer); 791 #endif 792 793 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 794 temp = xhci_readl(xhci, &xhci->op_regs->status); 795 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); 796 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 797 xhci_writel(xhci, ER_IRQ_DISABLE(temp), 798 &xhci->ir_set->irq_pending); 799 xhci_print_ir_set(xhci, 0); 800 801 xhci_dbg(xhci, "cleaning up memory\n"); 802 xhci_mem_cleanup(xhci); 803 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", 804 xhci_readl(xhci, &xhci->op_regs->status)); 805 806 /* USB core calls the PCI reinit and start functions twice: 807 * first with the primary HCD, and then with the secondary HCD. 808 * If we don't do the same, the host will never be started. 809 */ 810 if (!usb_hcd_is_primary_hcd(hcd)) 811 secondary_hcd = hcd; 812 else 813 secondary_hcd = xhci->shared_hcd; 814 815 xhci_dbg(xhci, "Initialize the xhci_hcd\n"); 816 retval = xhci_init(hcd->primary_hcd); 817 if (retval) 818 return retval; 819 xhci_dbg(xhci, "Start the primary HCD\n"); 820 retval = xhci_run(hcd->primary_hcd); 821 if (retval) 822 goto failed_restart; 823 824 xhci_dbg(xhci, "Start the secondary HCD\n"); 825 retval = xhci_run(secondary_hcd); 826 if (!retval) { 827 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 828 set_bit(HCD_FLAG_HW_ACCESSIBLE, 829 &xhci->shared_hcd->flags); 830 } 831 failed_restart: 832 hcd->state = HC_STATE_SUSPENDED; 833 xhci->shared_hcd->state = HC_STATE_SUSPENDED; 834 return retval; 835 } 836 837 /* step 4: set Run/Stop bit */ 838 command = xhci_readl(xhci, &xhci->op_regs->command); 839 command |= CMD_RUN; 840 xhci_writel(xhci, command, &xhci->op_regs->command); 841 handshake(xhci, &xhci->op_regs->status, STS_HALT, 842 0, 250 * 1000); 843 844 /* step 5: walk topology and initialize portsc, 845 * portpmsc and portli 846 */ 847 /* this is done in bus_resume */ 848 849 /* step 6: restart each of the previously 850 * Running endpoints by ringing their doorbells 851 */ 852 853 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 854 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 855 856 spin_unlock_irq(&xhci->lock); 857 return 0; 858 } 859 #endif /* CONFIG_PM */ 860 861 /*-------------------------------------------------------------------------*/ 862 863 /** 864 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and 865 * HCDs. Find the index for an endpoint given its descriptor. Use the return 866 * value to right shift 1 for the bitmask. 867 * 868 * Index = (epnum * 2) + direction - 1, 869 * where direction = 0 for OUT, 1 for IN. 870 * For control endpoints, the IN index is used (OUT index is unused), so 871 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) 872 */ 873 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) 874 { 875 unsigned int index; 876 if (usb_endpoint_xfer_control(desc)) 877 index = (unsigned int) (usb_endpoint_num(desc)*2); 878 else 879 index = (unsigned int) (usb_endpoint_num(desc)*2) + 880 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; 881 return index; 882 } 883 884 /* Find the flag for this endpoint (for use in the control context). Use the 885 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 886 * bit 1, etc. 887 */ 888 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) 889 { 890 return 1 << (xhci_get_endpoint_index(desc) + 1); 891 } 892 893 /* Find the flag for this endpoint (for use in the control context). Use the 894 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 895 * bit 1, etc. 896 */ 897 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) 898 { 899 return 1 << (ep_index + 1); 900 } 901 902 /* Compute the last valid endpoint context index. Basically, this is the 903 * endpoint index plus one. For slot contexts with more than valid endpoint, 904 * we find the most significant bit set in the added contexts flags. 905 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 906 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. 907 */ 908 unsigned int xhci_last_valid_endpoint(u32 added_ctxs) 909 { 910 return fls(added_ctxs) - 1; 911 } 912 913 /* Returns 1 if the arguments are OK; 914 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 915 */ 916 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 917 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, 918 const char *func) { 919 struct xhci_hcd *xhci; 920 struct xhci_virt_device *virt_dev; 921 922 if (!hcd || (check_ep && !ep) || !udev) { 923 printk(KERN_DEBUG "xHCI %s called with invalid args\n", 924 func); 925 return -EINVAL; 926 } 927 if (!udev->parent) { 928 printk(KERN_DEBUG "xHCI %s called for root hub\n", 929 func); 930 return 0; 931 } 932 933 if (check_virt_dev) { 934 xhci = hcd_to_xhci(hcd); 935 if (!udev->slot_id || !xhci->devs 936 || !xhci->devs[udev->slot_id]) { 937 printk(KERN_DEBUG "xHCI %s called with unaddressed " 938 "device\n", func); 939 return -EINVAL; 940 } 941 942 virt_dev = xhci->devs[udev->slot_id]; 943 if (virt_dev->udev != udev) { 944 printk(KERN_DEBUG "xHCI %s called with udev and " 945 "virt_dev does not match\n", func); 946 return -EINVAL; 947 } 948 } 949 950 return 1; 951 } 952 953 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 954 struct usb_device *udev, struct xhci_command *command, 955 bool ctx_change, bool must_succeed); 956 957 /* 958 * Full speed devices may have a max packet size greater than 8 bytes, but the 959 * USB core doesn't know that until it reads the first 8 bytes of the 960 * descriptor. If the usb_device's max packet size changes after that point, 961 * we need to issue an evaluate context command and wait on it. 962 */ 963 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, 964 unsigned int ep_index, struct urb *urb) 965 { 966 struct xhci_container_ctx *in_ctx; 967 struct xhci_container_ctx *out_ctx; 968 struct xhci_input_control_ctx *ctrl_ctx; 969 struct xhci_ep_ctx *ep_ctx; 970 int max_packet_size; 971 int hw_max_packet_size; 972 int ret = 0; 973 974 out_ctx = xhci->devs[slot_id]->out_ctx; 975 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 976 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); 977 max_packet_size = le16_to_cpu(urb->dev->ep0.desc.wMaxPacketSize); 978 if (hw_max_packet_size != max_packet_size) { 979 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); 980 xhci_dbg(xhci, "Max packet size in usb_device = %d\n", 981 max_packet_size); 982 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n", 983 hw_max_packet_size); 984 xhci_dbg(xhci, "Issuing evaluate context command.\n"); 985 986 /* Set up the modified control endpoint 0 */ 987 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 988 xhci->devs[slot_id]->out_ctx, ep_index); 989 in_ctx = xhci->devs[slot_id]->in_ctx; 990 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 991 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); 992 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); 993 994 /* Set up the input context flags for the command */ 995 /* FIXME: This won't work if a non-default control endpoint 996 * changes max packet sizes. 997 */ 998 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 999 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); 1000 ctrl_ctx->drop_flags = 0; 1001 1002 xhci_dbg(xhci, "Slot %d input context\n", slot_id); 1003 xhci_dbg_ctx(xhci, in_ctx, ep_index); 1004 xhci_dbg(xhci, "Slot %d output context\n", slot_id); 1005 xhci_dbg_ctx(xhci, out_ctx, ep_index); 1006 1007 ret = xhci_configure_endpoint(xhci, urb->dev, NULL, 1008 true, false); 1009 1010 /* Clean up the input context for later use by bandwidth 1011 * functions. 1012 */ 1013 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); 1014 } 1015 return ret; 1016 } 1017 1018 /* 1019 * non-error returns are a promise to giveback() the urb later 1020 * we drop ownership so next owner (or urb unlink) can get it 1021 */ 1022 int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) 1023 { 1024 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1025 unsigned long flags; 1026 int ret = 0; 1027 unsigned int slot_id, ep_index; 1028 struct urb_priv *urb_priv; 1029 int size, i; 1030 1031 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, 1032 true, true, __func__) <= 0) 1033 return -EINVAL; 1034 1035 slot_id = urb->dev->slot_id; 1036 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1037 1038 if (!HCD_HW_ACCESSIBLE(hcd)) { 1039 if (!in_interrupt()) 1040 xhci_dbg(xhci, "urb submitted during PCI suspend\n"); 1041 ret = -ESHUTDOWN; 1042 goto exit; 1043 } 1044 1045 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) 1046 size = urb->number_of_packets; 1047 else 1048 size = 1; 1049 1050 urb_priv = kzalloc(sizeof(struct urb_priv) + 1051 size * sizeof(struct xhci_td *), mem_flags); 1052 if (!urb_priv) 1053 return -ENOMEM; 1054 1055 for (i = 0; i < size; i++) { 1056 urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags); 1057 if (!urb_priv->td[i]) { 1058 urb_priv->length = i; 1059 xhci_urb_free_priv(xhci, urb_priv); 1060 return -ENOMEM; 1061 } 1062 } 1063 1064 urb_priv->length = size; 1065 urb_priv->td_cnt = 0; 1066 urb->hcpriv = urb_priv; 1067 1068 if (usb_endpoint_xfer_control(&urb->ep->desc)) { 1069 /* Check to see if the max packet size for the default control 1070 * endpoint changed during FS device enumeration 1071 */ 1072 if (urb->dev->speed == USB_SPEED_FULL) { 1073 ret = xhci_check_maxpacket(xhci, slot_id, 1074 ep_index, urb); 1075 if (ret < 0) 1076 return ret; 1077 } 1078 1079 /* We have a spinlock and interrupts disabled, so we must pass 1080 * atomic context to this function, which may allocate memory. 1081 */ 1082 spin_lock_irqsave(&xhci->lock, flags); 1083 if (xhci->xhc_state & XHCI_STATE_DYING) 1084 goto dying; 1085 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1086 slot_id, ep_index); 1087 spin_unlock_irqrestore(&xhci->lock, flags); 1088 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { 1089 spin_lock_irqsave(&xhci->lock, flags); 1090 if (xhci->xhc_state & XHCI_STATE_DYING) 1091 goto dying; 1092 if (xhci->devs[slot_id]->eps[ep_index].ep_state & 1093 EP_GETTING_STREAMS) { 1094 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " 1095 "is transitioning to using streams.\n"); 1096 ret = -EINVAL; 1097 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & 1098 EP_GETTING_NO_STREAMS) { 1099 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " 1100 "is transitioning to " 1101 "not having streams.\n"); 1102 ret = -EINVAL; 1103 } else { 1104 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1105 slot_id, ep_index); 1106 } 1107 spin_unlock_irqrestore(&xhci->lock, flags); 1108 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 1109 spin_lock_irqsave(&xhci->lock, flags); 1110 if (xhci->xhc_state & XHCI_STATE_DYING) 1111 goto dying; 1112 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1113 slot_id, ep_index); 1114 spin_unlock_irqrestore(&xhci->lock, flags); 1115 } else { 1116 spin_lock_irqsave(&xhci->lock, flags); 1117 if (xhci->xhc_state & XHCI_STATE_DYING) 1118 goto dying; 1119 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1120 slot_id, ep_index); 1121 spin_unlock_irqrestore(&xhci->lock, flags); 1122 } 1123 exit: 1124 return ret; 1125 dying: 1126 xhci_urb_free_priv(xhci, urb_priv); 1127 urb->hcpriv = NULL; 1128 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " 1129 "non-responsive xHCI host.\n", 1130 urb->ep->desc.bEndpointAddress, urb); 1131 spin_unlock_irqrestore(&xhci->lock, flags); 1132 return -ESHUTDOWN; 1133 } 1134 1135 /* Get the right ring for the given URB. 1136 * If the endpoint supports streams, boundary check the URB's stream ID. 1137 * If the endpoint doesn't support streams, return the singular endpoint ring. 1138 */ 1139 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, 1140 struct urb *urb) 1141 { 1142 unsigned int slot_id; 1143 unsigned int ep_index; 1144 unsigned int stream_id; 1145 struct xhci_virt_ep *ep; 1146 1147 slot_id = urb->dev->slot_id; 1148 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1149 stream_id = urb->stream_id; 1150 ep = &xhci->devs[slot_id]->eps[ep_index]; 1151 /* Common case: no streams */ 1152 if (!(ep->ep_state & EP_HAS_STREAMS)) 1153 return ep->ring; 1154 1155 if (stream_id == 0) { 1156 xhci_warn(xhci, 1157 "WARN: Slot ID %u, ep index %u has streams, " 1158 "but URB has no stream ID.\n", 1159 slot_id, ep_index); 1160 return NULL; 1161 } 1162 1163 if (stream_id < ep->stream_info->num_streams) 1164 return ep->stream_info->stream_rings[stream_id]; 1165 1166 xhci_warn(xhci, 1167 "WARN: Slot ID %u, ep index %u has " 1168 "stream IDs 1 to %u allocated, " 1169 "but stream ID %u is requested.\n", 1170 slot_id, ep_index, 1171 ep->stream_info->num_streams - 1, 1172 stream_id); 1173 return NULL; 1174 } 1175 1176 /* 1177 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop 1178 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC 1179 * should pick up where it left off in the TD, unless a Set Transfer Ring 1180 * Dequeue Pointer is issued. 1181 * 1182 * The TRBs that make up the buffers for the canceled URB will be "removed" from 1183 * the ring. Since the ring is a contiguous structure, they can't be physically 1184 * removed. Instead, there are two options: 1185 * 1186 * 1) If the HC is in the middle of processing the URB to be canceled, we 1187 * simply move the ring's dequeue pointer past those TRBs using the Set 1188 * Transfer Ring Dequeue Pointer command. This will be the common case, 1189 * when drivers timeout on the last submitted URB and attempt to cancel. 1190 * 1191 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a 1192 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The 1193 * HC will need to invalidate the any TRBs it has cached after the stop 1194 * endpoint command, as noted in the xHCI 0.95 errata. 1195 * 1196 * 3) The TD may have completed by the time the Stop Endpoint Command 1197 * completes, so software needs to handle that case too. 1198 * 1199 * This function should protect against the TD enqueueing code ringing the 1200 * doorbell while this code is waiting for a Stop Endpoint command to complete. 1201 * It also needs to account for multiple cancellations on happening at the same 1202 * time for the same endpoint. 1203 * 1204 * Note that this function can be called in any context, or so says 1205 * usb_hcd_unlink_urb() 1206 */ 1207 int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 1208 { 1209 unsigned long flags; 1210 int ret, i; 1211 u32 temp; 1212 struct xhci_hcd *xhci; 1213 struct urb_priv *urb_priv; 1214 struct xhci_td *td; 1215 unsigned int ep_index; 1216 struct xhci_ring *ep_ring; 1217 struct xhci_virt_ep *ep; 1218 1219 xhci = hcd_to_xhci(hcd); 1220 spin_lock_irqsave(&xhci->lock, flags); 1221 /* Make sure the URB hasn't completed or been unlinked already */ 1222 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 1223 if (ret || !urb->hcpriv) 1224 goto done; 1225 temp = xhci_readl(xhci, &xhci->op_regs->status); 1226 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1227 xhci_dbg(xhci, "HW died, freeing TD.\n"); 1228 urb_priv = urb->hcpriv; 1229 1230 usb_hcd_unlink_urb_from_ep(hcd, urb); 1231 spin_unlock_irqrestore(&xhci->lock, flags); 1232 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); 1233 xhci_urb_free_priv(xhci, urb_priv); 1234 return ret; 1235 } 1236 if (xhci->xhc_state & XHCI_STATE_DYING) { 1237 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " 1238 "non-responsive xHCI host.\n", 1239 urb->ep->desc.bEndpointAddress, urb); 1240 /* Let the stop endpoint command watchdog timer (which set this 1241 * state) finish cleaning up the endpoint TD lists. We must 1242 * have caught it in the middle of dropping a lock and giving 1243 * back an URB. 1244 */ 1245 goto done; 1246 } 1247 1248 xhci_dbg(xhci, "Cancel URB %p\n", urb); 1249 xhci_dbg(xhci, "Event ring:\n"); 1250 xhci_debug_ring(xhci, xhci->event_ring); 1251 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1252 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; 1253 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 1254 if (!ep_ring) { 1255 ret = -EINVAL; 1256 goto done; 1257 } 1258 1259 xhci_dbg(xhci, "Endpoint ring:\n"); 1260 xhci_debug_ring(xhci, ep_ring); 1261 1262 urb_priv = urb->hcpriv; 1263 1264 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { 1265 td = urb_priv->td[i]; 1266 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); 1267 } 1268 1269 /* Queue a stop endpoint command, but only if this is 1270 * the first cancellation to be handled. 1271 */ 1272 if (!(ep->ep_state & EP_HALT_PENDING)) { 1273 ep->ep_state |= EP_HALT_PENDING; 1274 ep->stop_cmds_pending++; 1275 ep->stop_cmd_timer.expires = jiffies + 1276 XHCI_STOP_EP_CMD_TIMEOUT * HZ; 1277 add_timer(&ep->stop_cmd_timer); 1278 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0); 1279 xhci_ring_cmd_db(xhci); 1280 } 1281 done: 1282 spin_unlock_irqrestore(&xhci->lock, flags); 1283 return ret; 1284 } 1285 1286 /* Drop an endpoint from a new bandwidth configuration for this device. 1287 * Only one call to this function is allowed per endpoint before 1288 * check_bandwidth() or reset_bandwidth() must be called. 1289 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1290 * add the endpoint to the schedule with possibly new parameters denoted by a 1291 * different endpoint descriptor in usb_host_endpoint. 1292 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1293 * not allowed. 1294 * 1295 * The USB core will not allow URBs to be queued to an endpoint that is being 1296 * disabled, so there's no need for mutual exclusion to protect 1297 * the xhci->devs[slot_id] structure. 1298 */ 1299 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1300 struct usb_host_endpoint *ep) 1301 { 1302 struct xhci_hcd *xhci; 1303 struct xhci_container_ctx *in_ctx, *out_ctx; 1304 struct xhci_input_control_ctx *ctrl_ctx; 1305 struct xhci_slot_ctx *slot_ctx; 1306 unsigned int last_ctx; 1307 unsigned int ep_index; 1308 struct xhci_ep_ctx *ep_ctx; 1309 u32 drop_flag; 1310 u32 new_add_flags, new_drop_flags, new_slot_info; 1311 int ret; 1312 1313 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1314 if (ret <= 0) 1315 return ret; 1316 xhci = hcd_to_xhci(hcd); 1317 if (xhci->xhc_state & XHCI_STATE_DYING) 1318 return -ENODEV; 1319 1320 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1321 drop_flag = xhci_get_endpoint_flag(&ep->desc); 1322 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { 1323 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", 1324 __func__, drop_flag); 1325 return 0; 1326 } 1327 1328 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1329 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1330 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1331 ep_index = xhci_get_endpoint_index(&ep->desc); 1332 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1333 /* If the HC already knows the endpoint is disabled, 1334 * or the HCD has noted it is disabled, ignore this request 1335 */ 1336 if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) == 1337 EP_STATE_DISABLED || 1338 le32_to_cpu(ctrl_ctx->drop_flags) & 1339 xhci_get_endpoint_flag(&ep->desc)) { 1340 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 1341 __func__, ep); 1342 return 0; 1343 } 1344 1345 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); 1346 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1347 1348 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); 1349 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1350 1351 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)); 1352 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1353 /* Update the last valid endpoint context, if we deleted the last one */ 1354 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) > 1355 LAST_CTX(last_ctx)) { 1356 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1357 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); 1358 } 1359 new_slot_info = le32_to_cpu(slot_ctx->dev_info); 1360 1361 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 1362 1363 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", 1364 (unsigned int) ep->desc.bEndpointAddress, 1365 udev->slot_id, 1366 (unsigned int) new_drop_flags, 1367 (unsigned int) new_add_flags, 1368 (unsigned int) new_slot_info); 1369 return 0; 1370 } 1371 1372 /* Add an endpoint to a new possible bandwidth configuration for this device. 1373 * Only one call to this function is allowed per endpoint before 1374 * check_bandwidth() or reset_bandwidth() must be called. 1375 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1376 * add the endpoint to the schedule with possibly new parameters denoted by a 1377 * different endpoint descriptor in usb_host_endpoint. 1378 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1379 * not allowed. 1380 * 1381 * The USB core will not allow URBs to be queued to an endpoint until the 1382 * configuration or alt setting is installed in the device, so there's no need 1383 * for mutual exclusion to protect the xhci->devs[slot_id] structure. 1384 */ 1385 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1386 struct usb_host_endpoint *ep) 1387 { 1388 struct xhci_hcd *xhci; 1389 struct xhci_container_ctx *in_ctx, *out_ctx; 1390 unsigned int ep_index; 1391 struct xhci_ep_ctx *ep_ctx; 1392 struct xhci_slot_ctx *slot_ctx; 1393 struct xhci_input_control_ctx *ctrl_ctx; 1394 u32 added_ctxs; 1395 unsigned int last_ctx; 1396 u32 new_add_flags, new_drop_flags, new_slot_info; 1397 int ret = 0; 1398 1399 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1400 if (ret <= 0) { 1401 /* So we won't queue a reset ep command for a root hub */ 1402 ep->hcpriv = NULL; 1403 return ret; 1404 } 1405 xhci = hcd_to_xhci(hcd); 1406 if (xhci->xhc_state & XHCI_STATE_DYING) 1407 return -ENODEV; 1408 1409 added_ctxs = xhci_get_endpoint_flag(&ep->desc); 1410 last_ctx = xhci_last_valid_endpoint(added_ctxs); 1411 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { 1412 /* FIXME when we have to issue an evaluate endpoint command to 1413 * deal with ep0 max packet size changing once we get the 1414 * descriptors 1415 */ 1416 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", 1417 __func__, added_ctxs); 1418 return 0; 1419 } 1420 1421 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1422 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1423 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1424 ep_index = xhci_get_endpoint_index(&ep->desc); 1425 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1426 /* If the HCD has already noted the endpoint is enabled, 1427 * ignore this request. 1428 */ 1429 if (le32_to_cpu(ctrl_ctx->add_flags) & 1430 xhci_get_endpoint_flag(&ep->desc)) { 1431 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 1432 __func__, ep); 1433 return 0; 1434 } 1435 1436 /* 1437 * Configuration and alternate setting changes must be done in 1438 * process context, not interrupt context (or so documenation 1439 * for usb_set_interface() and usb_set_configuration() claim). 1440 */ 1441 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], 1442 udev, ep, GFP_NOIO) < 0) { 1443 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 1444 __func__, ep->desc.bEndpointAddress); 1445 return -ENOMEM; 1446 } 1447 1448 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); 1449 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1450 1451 /* If xhci_endpoint_disable() was called for this endpoint, but the 1452 * xHC hasn't been notified yet through the check_bandwidth() call, 1453 * this re-adds a new state for the endpoint from the new endpoint 1454 * descriptors. We must drop and re-add this endpoint, so we leave the 1455 * drop flags alone. 1456 */ 1457 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1458 1459 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1460 /* Update the last valid endpoint context, if we just added one past */ 1461 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) < 1462 LAST_CTX(last_ctx)) { 1463 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1464 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); 1465 } 1466 new_slot_info = le32_to_cpu(slot_ctx->dev_info); 1467 1468 /* Store the usb_device pointer for later use */ 1469 ep->hcpriv = udev; 1470 1471 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", 1472 (unsigned int) ep->desc.bEndpointAddress, 1473 udev->slot_id, 1474 (unsigned int) new_drop_flags, 1475 (unsigned int) new_add_flags, 1476 (unsigned int) new_slot_info); 1477 return 0; 1478 } 1479 1480 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) 1481 { 1482 struct xhci_input_control_ctx *ctrl_ctx; 1483 struct xhci_ep_ctx *ep_ctx; 1484 struct xhci_slot_ctx *slot_ctx; 1485 int i; 1486 1487 /* When a device's add flag and drop flag are zero, any subsequent 1488 * configure endpoint command will leave that endpoint's state 1489 * untouched. Make sure we don't leave any old state in the input 1490 * endpoint contexts. 1491 */ 1492 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 1493 ctrl_ctx->drop_flags = 0; 1494 ctrl_ctx->add_flags = 0; 1495 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 1496 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1497 /* Endpoint 0 is always valid */ 1498 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 1499 for (i = 1; i < 31; ++i) { 1500 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); 1501 ep_ctx->ep_info = 0; 1502 ep_ctx->ep_info2 = 0; 1503 ep_ctx->deq = 0; 1504 ep_ctx->tx_info = 0; 1505 } 1506 } 1507 1508 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, 1509 struct usb_device *udev, u32 *cmd_status) 1510 { 1511 int ret; 1512 1513 switch (*cmd_status) { 1514 case COMP_ENOMEM: 1515 dev_warn(&udev->dev, "Not enough host controller resources " 1516 "for new device state.\n"); 1517 ret = -ENOMEM; 1518 /* FIXME: can we allocate more resources for the HC? */ 1519 break; 1520 case COMP_BW_ERR: 1521 dev_warn(&udev->dev, "Not enough bandwidth " 1522 "for new device state.\n"); 1523 ret = -ENOSPC; 1524 /* FIXME: can we go back to the old state? */ 1525 break; 1526 case COMP_TRB_ERR: 1527 /* the HCD set up something wrong */ 1528 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " 1529 "add flag = 1, " 1530 "and endpoint is not disabled.\n"); 1531 ret = -EINVAL; 1532 break; 1533 case COMP_SUCCESS: 1534 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); 1535 ret = 0; 1536 break; 1537 default: 1538 xhci_err(xhci, "ERROR: unexpected command completion " 1539 "code 0x%x.\n", *cmd_status); 1540 ret = -EINVAL; 1541 break; 1542 } 1543 return ret; 1544 } 1545 1546 static int xhci_evaluate_context_result(struct xhci_hcd *xhci, 1547 struct usb_device *udev, u32 *cmd_status) 1548 { 1549 int ret; 1550 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; 1551 1552 switch (*cmd_status) { 1553 case COMP_EINVAL: 1554 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " 1555 "context command.\n"); 1556 ret = -EINVAL; 1557 break; 1558 case COMP_EBADSLT: 1559 dev_warn(&udev->dev, "WARN: slot not enabled for" 1560 "evaluate context command.\n"); 1561 case COMP_CTX_STATE: 1562 dev_warn(&udev->dev, "WARN: invalid context state for " 1563 "evaluate context command.\n"); 1564 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); 1565 ret = -EINVAL; 1566 break; 1567 case COMP_MEL_ERR: 1568 /* Max Exit Latency too large error */ 1569 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); 1570 ret = -EINVAL; 1571 break; 1572 case COMP_SUCCESS: 1573 dev_dbg(&udev->dev, "Successful evaluate context command\n"); 1574 ret = 0; 1575 break; 1576 default: 1577 xhci_err(xhci, "ERROR: unexpected command completion " 1578 "code 0x%x.\n", *cmd_status); 1579 ret = -EINVAL; 1580 break; 1581 } 1582 return ret; 1583 } 1584 1585 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, 1586 struct xhci_container_ctx *in_ctx) 1587 { 1588 struct xhci_input_control_ctx *ctrl_ctx; 1589 u32 valid_add_flags; 1590 u32 valid_drop_flags; 1591 1592 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1593 /* Ignore the slot flag (bit 0), and the default control endpoint flag 1594 * (bit 1). The default control endpoint is added during the Address 1595 * Device command and is never removed until the slot is disabled. 1596 */ 1597 valid_add_flags = ctrl_ctx->add_flags >> 2; 1598 valid_drop_flags = ctrl_ctx->drop_flags >> 2; 1599 1600 /* Use hweight32 to count the number of ones in the add flags, or 1601 * number of endpoints added. Don't count endpoints that are changed 1602 * (both added and dropped). 1603 */ 1604 return hweight32(valid_add_flags) - 1605 hweight32(valid_add_flags & valid_drop_flags); 1606 } 1607 1608 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, 1609 struct xhci_container_ctx *in_ctx) 1610 { 1611 struct xhci_input_control_ctx *ctrl_ctx; 1612 u32 valid_add_flags; 1613 u32 valid_drop_flags; 1614 1615 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1616 valid_add_flags = ctrl_ctx->add_flags >> 2; 1617 valid_drop_flags = ctrl_ctx->drop_flags >> 2; 1618 1619 return hweight32(valid_drop_flags) - 1620 hweight32(valid_add_flags & valid_drop_flags); 1621 } 1622 1623 /* 1624 * We need to reserve the new number of endpoints before the configure endpoint 1625 * command completes. We can't subtract the dropped endpoints from the number 1626 * of active endpoints until the command completes because we can oversubscribe 1627 * the host in this case: 1628 * 1629 * - the first configure endpoint command drops more endpoints than it adds 1630 * - a second configure endpoint command that adds more endpoints is queued 1631 * - the first configure endpoint command fails, so the config is unchanged 1632 * - the second command may succeed, even though there isn't enough resources 1633 * 1634 * Must be called with xhci->lock held. 1635 */ 1636 static int xhci_reserve_host_resources(struct xhci_hcd *xhci, 1637 struct xhci_container_ctx *in_ctx) 1638 { 1639 u32 added_eps; 1640 1641 added_eps = xhci_count_num_new_endpoints(xhci, in_ctx); 1642 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { 1643 xhci_dbg(xhci, "Not enough ep ctxs: " 1644 "%u active, need to add %u, limit is %u.\n", 1645 xhci->num_active_eps, added_eps, 1646 xhci->limit_active_eps); 1647 return -ENOMEM; 1648 } 1649 xhci->num_active_eps += added_eps; 1650 xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps, 1651 xhci->num_active_eps); 1652 return 0; 1653 } 1654 1655 /* 1656 * The configure endpoint was failed by the xHC for some other reason, so we 1657 * need to revert the resources that failed configuration would have used. 1658 * 1659 * Must be called with xhci->lock held. 1660 */ 1661 static void xhci_free_host_resources(struct xhci_hcd *xhci, 1662 struct xhci_container_ctx *in_ctx) 1663 { 1664 u32 num_failed_eps; 1665 1666 num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx); 1667 xhci->num_active_eps -= num_failed_eps; 1668 xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n", 1669 num_failed_eps, 1670 xhci->num_active_eps); 1671 } 1672 1673 /* 1674 * Now that the command has completed, clean up the active endpoint count by 1675 * subtracting out the endpoints that were dropped (but not changed). 1676 * 1677 * Must be called with xhci->lock held. 1678 */ 1679 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, 1680 struct xhci_container_ctx *in_ctx) 1681 { 1682 u32 num_dropped_eps; 1683 1684 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx); 1685 xhci->num_active_eps -= num_dropped_eps; 1686 if (num_dropped_eps) 1687 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n", 1688 num_dropped_eps, 1689 xhci->num_active_eps); 1690 } 1691 1692 /* Issue a configure endpoint command or evaluate context command 1693 * and wait for it to finish. 1694 */ 1695 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 1696 struct usb_device *udev, 1697 struct xhci_command *command, 1698 bool ctx_change, bool must_succeed) 1699 { 1700 int ret; 1701 int timeleft; 1702 unsigned long flags; 1703 struct xhci_container_ctx *in_ctx; 1704 struct completion *cmd_completion; 1705 u32 *cmd_status; 1706 struct xhci_virt_device *virt_dev; 1707 1708 spin_lock_irqsave(&xhci->lock, flags); 1709 virt_dev = xhci->devs[udev->slot_id]; 1710 if (command) { 1711 in_ctx = command->in_ctx; 1712 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && 1713 xhci_reserve_host_resources(xhci, in_ctx)) { 1714 spin_unlock_irqrestore(&xhci->lock, flags); 1715 xhci_warn(xhci, "Not enough host resources, " 1716 "active endpoint contexts = %u\n", 1717 xhci->num_active_eps); 1718 return -ENOMEM; 1719 } 1720 1721 cmd_completion = command->completion; 1722 cmd_status = &command->status; 1723 command->command_trb = xhci->cmd_ring->enqueue; 1724 1725 /* Enqueue pointer can be left pointing to the link TRB, 1726 * we must handle that 1727 */ 1728 if ((le32_to_cpu(command->command_trb->link.control) 1729 & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)) 1730 command->command_trb = 1731 xhci->cmd_ring->enq_seg->next->trbs; 1732 1733 list_add_tail(&command->cmd_list, &virt_dev->cmd_list); 1734 } else { 1735 in_ctx = virt_dev->in_ctx; 1736 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && 1737 xhci_reserve_host_resources(xhci, in_ctx)) { 1738 spin_unlock_irqrestore(&xhci->lock, flags); 1739 xhci_warn(xhci, "Not enough host resources, " 1740 "active endpoint contexts = %u\n", 1741 xhci->num_active_eps); 1742 return -ENOMEM; 1743 } 1744 cmd_completion = &virt_dev->cmd_completion; 1745 cmd_status = &virt_dev->cmd_status; 1746 } 1747 init_completion(cmd_completion); 1748 1749 if (!ctx_change) 1750 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, 1751 udev->slot_id, must_succeed); 1752 else 1753 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, 1754 udev->slot_id); 1755 if (ret < 0) { 1756 if (command) 1757 list_del(&command->cmd_list); 1758 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 1759 xhci_free_host_resources(xhci, in_ctx); 1760 spin_unlock_irqrestore(&xhci->lock, flags); 1761 xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); 1762 return -ENOMEM; 1763 } 1764 xhci_ring_cmd_db(xhci); 1765 spin_unlock_irqrestore(&xhci->lock, flags); 1766 1767 /* Wait for the configure endpoint command to complete */ 1768 timeleft = wait_for_completion_interruptible_timeout( 1769 cmd_completion, 1770 USB_CTRL_SET_TIMEOUT); 1771 if (timeleft <= 0) { 1772 xhci_warn(xhci, "%s while waiting for %s command\n", 1773 timeleft == 0 ? "Timeout" : "Signal", 1774 ctx_change == 0 ? 1775 "configure endpoint" : 1776 "evaluate context"); 1777 /* FIXME cancel the configure endpoint command */ 1778 return -ETIME; 1779 } 1780 1781 if (!ctx_change) 1782 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status); 1783 else 1784 ret = xhci_evaluate_context_result(xhci, udev, cmd_status); 1785 1786 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 1787 spin_lock_irqsave(&xhci->lock, flags); 1788 /* If the command failed, remove the reserved resources. 1789 * Otherwise, clean up the estimate to include dropped eps. 1790 */ 1791 if (ret) 1792 xhci_free_host_resources(xhci, in_ctx); 1793 else 1794 xhci_finish_resource_reservation(xhci, in_ctx); 1795 spin_unlock_irqrestore(&xhci->lock, flags); 1796 } 1797 return ret; 1798 } 1799 1800 /* Called after one or more calls to xhci_add_endpoint() or 1801 * xhci_drop_endpoint(). If this call fails, the USB core is expected 1802 * to call xhci_reset_bandwidth(). 1803 * 1804 * Since we are in the middle of changing either configuration or 1805 * installing a new alt setting, the USB core won't allow URBs to be 1806 * enqueued for any endpoint on the old config or interface. Nothing 1807 * else should be touching the xhci->devs[slot_id] structure, so we 1808 * don't need to take the xhci->lock for manipulating that. 1809 */ 1810 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 1811 { 1812 int i; 1813 int ret = 0; 1814 struct xhci_hcd *xhci; 1815 struct xhci_virt_device *virt_dev; 1816 struct xhci_input_control_ctx *ctrl_ctx; 1817 struct xhci_slot_ctx *slot_ctx; 1818 1819 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 1820 if (ret <= 0) 1821 return ret; 1822 xhci = hcd_to_xhci(hcd); 1823 if (xhci->xhc_state & XHCI_STATE_DYING) 1824 return -ENODEV; 1825 1826 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1827 virt_dev = xhci->devs[udev->slot_id]; 1828 1829 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ 1830 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 1831 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 1832 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); 1833 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); 1834 xhci_dbg(xhci, "New Input Control Context:\n"); 1835 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 1836 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 1837 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); 1838 1839 ret = xhci_configure_endpoint(xhci, udev, NULL, 1840 false, false); 1841 if (ret) { 1842 /* Callee should call reset_bandwidth() */ 1843 return ret; 1844 } 1845 1846 xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); 1847 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1848 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); 1849 1850 /* Free any rings that were dropped, but not changed. */ 1851 for (i = 1; i < 31; ++i) { 1852 if ((ctrl_ctx->drop_flags & (1 << (i + 1))) && 1853 !(ctrl_ctx->add_flags & (1 << (i + 1)))) 1854 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 1855 } 1856 xhci_zero_in_ctx(xhci, virt_dev); 1857 /* 1858 * Install any rings for completely new endpoints or changed endpoints, 1859 * and free or cache any old rings from changed endpoints. 1860 */ 1861 for (i = 1; i < 31; ++i) { 1862 if (!virt_dev->eps[i].new_ring) 1863 continue; 1864 /* Only cache or free the old ring if it exists. 1865 * It may not if this is the first add of an endpoint. 1866 */ 1867 if (virt_dev->eps[i].ring) { 1868 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 1869 } 1870 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; 1871 virt_dev->eps[i].new_ring = NULL; 1872 } 1873 1874 return ret; 1875 } 1876 1877 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 1878 { 1879 struct xhci_hcd *xhci; 1880 struct xhci_virt_device *virt_dev; 1881 int i, ret; 1882 1883 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 1884 if (ret <= 0) 1885 return; 1886 xhci = hcd_to_xhci(hcd); 1887 1888 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1889 virt_dev = xhci->devs[udev->slot_id]; 1890 /* Free any rings allocated for added endpoints */ 1891 for (i = 0; i < 31; ++i) { 1892 if (virt_dev->eps[i].new_ring) { 1893 xhci_ring_free(xhci, virt_dev->eps[i].new_ring); 1894 virt_dev->eps[i].new_ring = NULL; 1895 } 1896 } 1897 xhci_zero_in_ctx(xhci, virt_dev); 1898 } 1899 1900 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, 1901 struct xhci_container_ctx *in_ctx, 1902 struct xhci_container_ctx *out_ctx, 1903 u32 add_flags, u32 drop_flags) 1904 { 1905 struct xhci_input_control_ctx *ctrl_ctx; 1906 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1907 ctrl_ctx->add_flags = cpu_to_le32(add_flags); 1908 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); 1909 xhci_slot_copy(xhci, in_ctx, out_ctx); 1910 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 1911 1912 xhci_dbg(xhci, "Input Context:\n"); 1913 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); 1914 } 1915 1916 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, 1917 unsigned int slot_id, unsigned int ep_index, 1918 struct xhci_dequeue_state *deq_state) 1919 { 1920 struct xhci_container_ctx *in_ctx; 1921 struct xhci_ep_ctx *ep_ctx; 1922 u32 added_ctxs; 1923 dma_addr_t addr; 1924 1925 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 1926 xhci->devs[slot_id]->out_ctx, ep_index); 1927 in_ctx = xhci->devs[slot_id]->in_ctx; 1928 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 1929 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, 1930 deq_state->new_deq_ptr); 1931 if (addr == 0) { 1932 xhci_warn(xhci, "WARN Cannot submit config ep after " 1933 "reset ep command\n"); 1934 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", 1935 deq_state->new_deq_seg, 1936 deq_state->new_deq_ptr); 1937 return; 1938 } 1939 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); 1940 1941 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); 1942 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, 1943 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs); 1944 } 1945 1946 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, 1947 struct usb_device *udev, unsigned int ep_index) 1948 { 1949 struct xhci_dequeue_state deq_state; 1950 struct xhci_virt_ep *ep; 1951 1952 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); 1953 ep = &xhci->devs[udev->slot_id]->eps[ep_index]; 1954 /* We need to move the HW's dequeue pointer past this TD, 1955 * or it will attempt to resend it on the next doorbell ring. 1956 */ 1957 xhci_find_new_dequeue_state(xhci, udev->slot_id, 1958 ep_index, ep->stopped_stream, ep->stopped_td, 1959 &deq_state); 1960 1961 /* HW with the reset endpoint quirk will use the saved dequeue state to 1962 * issue a configure endpoint command later. 1963 */ 1964 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { 1965 xhci_dbg(xhci, "Queueing new dequeue state\n"); 1966 xhci_queue_new_dequeue_state(xhci, udev->slot_id, 1967 ep_index, ep->stopped_stream, &deq_state); 1968 } else { 1969 /* Better hope no one uses the input context between now and the 1970 * reset endpoint completion! 1971 * XXX: No idea how this hardware will react when stream rings 1972 * are enabled. 1973 */ 1974 xhci_dbg(xhci, "Setting up input context for " 1975 "configure endpoint command\n"); 1976 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, 1977 ep_index, &deq_state); 1978 } 1979 } 1980 1981 /* Deal with stalled endpoints. The core should have sent the control message 1982 * to clear the halt condition. However, we need to make the xHCI hardware 1983 * reset its sequence number, since a device will expect a sequence number of 1984 * zero after the halt condition is cleared. 1985 * Context: in_interrupt 1986 */ 1987 void xhci_endpoint_reset(struct usb_hcd *hcd, 1988 struct usb_host_endpoint *ep) 1989 { 1990 struct xhci_hcd *xhci; 1991 struct usb_device *udev; 1992 unsigned int ep_index; 1993 unsigned long flags; 1994 int ret; 1995 struct xhci_virt_ep *virt_ep; 1996 1997 xhci = hcd_to_xhci(hcd); 1998 udev = (struct usb_device *) ep->hcpriv; 1999 /* Called with a root hub endpoint (or an endpoint that wasn't added 2000 * with xhci_add_endpoint() 2001 */ 2002 if (!ep->hcpriv) 2003 return; 2004 ep_index = xhci_get_endpoint_index(&ep->desc); 2005 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; 2006 if (!virt_ep->stopped_td) { 2007 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", 2008 ep->desc.bEndpointAddress); 2009 return; 2010 } 2011 if (usb_endpoint_xfer_control(&ep->desc)) { 2012 xhci_dbg(xhci, "Control endpoint stall already handled.\n"); 2013 return; 2014 } 2015 2016 xhci_dbg(xhci, "Queueing reset endpoint command\n"); 2017 spin_lock_irqsave(&xhci->lock, flags); 2018 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); 2019 /* 2020 * Can't change the ring dequeue pointer until it's transitioned to the 2021 * stopped state, which is only upon a successful reset endpoint 2022 * command. Better hope that last command worked! 2023 */ 2024 if (!ret) { 2025 xhci_cleanup_stalled_ring(xhci, udev, ep_index); 2026 kfree(virt_ep->stopped_td); 2027 xhci_ring_cmd_db(xhci); 2028 } 2029 virt_ep->stopped_td = NULL; 2030 virt_ep->stopped_trb = NULL; 2031 virt_ep->stopped_stream = 0; 2032 spin_unlock_irqrestore(&xhci->lock, flags); 2033 2034 if (ret) 2035 xhci_warn(xhci, "FIXME allocate a new ring segment\n"); 2036 } 2037 2038 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 2039 struct usb_device *udev, struct usb_host_endpoint *ep, 2040 unsigned int slot_id) 2041 { 2042 int ret; 2043 unsigned int ep_index; 2044 unsigned int ep_state; 2045 2046 if (!ep) 2047 return -EINVAL; 2048 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); 2049 if (ret <= 0) 2050 return -EINVAL; 2051 if (ep->ss_ep_comp.bmAttributes == 0) { 2052 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" 2053 " descriptor for ep 0x%x does not support streams\n", 2054 ep->desc.bEndpointAddress); 2055 return -EINVAL; 2056 } 2057 2058 ep_index = xhci_get_endpoint_index(&ep->desc); 2059 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 2060 if (ep_state & EP_HAS_STREAMS || 2061 ep_state & EP_GETTING_STREAMS) { 2062 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " 2063 "already has streams set up.\n", 2064 ep->desc.bEndpointAddress); 2065 xhci_warn(xhci, "Send email to xHCI maintainer and ask for " 2066 "dynamic stream context array reallocation.\n"); 2067 return -EINVAL; 2068 } 2069 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { 2070 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " 2071 "endpoint 0x%x; URBs are pending.\n", 2072 ep->desc.bEndpointAddress); 2073 return -EINVAL; 2074 } 2075 return 0; 2076 } 2077 2078 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, 2079 unsigned int *num_streams, unsigned int *num_stream_ctxs) 2080 { 2081 unsigned int max_streams; 2082 2083 /* The stream context array size must be a power of two */ 2084 *num_stream_ctxs = roundup_pow_of_two(*num_streams); 2085 /* 2086 * Find out how many primary stream array entries the host controller 2087 * supports. Later we may use secondary stream arrays (similar to 2nd 2088 * level page entries), but that's an optional feature for xHCI host 2089 * controllers. xHCs must support at least 4 stream IDs. 2090 */ 2091 max_streams = HCC_MAX_PSA(xhci->hcc_params); 2092 if (*num_stream_ctxs > max_streams) { 2093 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", 2094 max_streams); 2095 *num_stream_ctxs = max_streams; 2096 *num_streams = max_streams; 2097 } 2098 } 2099 2100 /* Returns an error code if one of the endpoint already has streams. 2101 * This does not change any data structures, it only checks and gathers 2102 * information. 2103 */ 2104 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, 2105 struct usb_device *udev, 2106 struct usb_host_endpoint **eps, unsigned int num_eps, 2107 unsigned int *num_streams, u32 *changed_ep_bitmask) 2108 { 2109 unsigned int max_streams; 2110 unsigned int endpoint_flag; 2111 int i; 2112 int ret; 2113 2114 for (i = 0; i < num_eps; i++) { 2115 ret = xhci_check_streams_endpoint(xhci, udev, 2116 eps[i], udev->slot_id); 2117 if (ret < 0) 2118 return ret; 2119 2120 max_streams = USB_SS_MAX_STREAMS( 2121 eps[i]->ss_ep_comp.bmAttributes); 2122 if (max_streams < (*num_streams - 1)) { 2123 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", 2124 eps[i]->desc.bEndpointAddress, 2125 max_streams); 2126 *num_streams = max_streams+1; 2127 } 2128 2129 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); 2130 if (*changed_ep_bitmask & endpoint_flag) 2131 return -EINVAL; 2132 *changed_ep_bitmask |= endpoint_flag; 2133 } 2134 return 0; 2135 } 2136 2137 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, 2138 struct usb_device *udev, 2139 struct usb_host_endpoint **eps, unsigned int num_eps) 2140 { 2141 u32 changed_ep_bitmask = 0; 2142 unsigned int slot_id; 2143 unsigned int ep_index; 2144 unsigned int ep_state; 2145 int i; 2146 2147 slot_id = udev->slot_id; 2148 if (!xhci->devs[slot_id]) 2149 return 0; 2150 2151 for (i = 0; i < num_eps; i++) { 2152 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2153 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 2154 /* Are streams already being freed for the endpoint? */ 2155 if (ep_state & EP_GETTING_NO_STREAMS) { 2156 xhci_warn(xhci, "WARN Can't disable streams for " 2157 "endpoint 0x%x\n, " 2158 "streams are being disabled already.", 2159 eps[i]->desc.bEndpointAddress); 2160 return 0; 2161 } 2162 /* Are there actually any streams to free? */ 2163 if (!(ep_state & EP_HAS_STREAMS) && 2164 !(ep_state & EP_GETTING_STREAMS)) { 2165 xhci_warn(xhci, "WARN Can't disable streams for " 2166 "endpoint 0x%x\n, " 2167 "streams are already disabled!", 2168 eps[i]->desc.bEndpointAddress); 2169 xhci_warn(xhci, "WARN xhci_free_streams() called " 2170 "with non-streams endpoint\n"); 2171 return 0; 2172 } 2173 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); 2174 } 2175 return changed_ep_bitmask; 2176 } 2177 2178 /* 2179 * The USB device drivers use this function (though the HCD interface in USB 2180 * core) to prepare a set of bulk endpoints to use streams. Streams are used to 2181 * coordinate mass storage command queueing across multiple endpoints (basically 2182 * a stream ID == a task ID). 2183 * 2184 * Setting up streams involves allocating the same size stream context array 2185 * for each endpoint and issuing a configure endpoint command for all endpoints. 2186 * 2187 * Don't allow the call to succeed if one endpoint only supports one stream 2188 * (which means it doesn't support streams at all). 2189 * 2190 * Drivers may get less stream IDs than they asked for, if the host controller 2191 * hardware or endpoints claim they can't support the number of requested 2192 * stream IDs. 2193 */ 2194 int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, 2195 struct usb_host_endpoint **eps, unsigned int num_eps, 2196 unsigned int num_streams, gfp_t mem_flags) 2197 { 2198 int i, ret; 2199 struct xhci_hcd *xhci; 2200 struct xhci_virt_device *vdev; 2201 struct xhci_command *config_cmd; 2202 unsigned int ep_index; 2203 unsigned int num_stream_ctxs; 2204 unsigned long flags; 2205 u32 changed_ep_bitmask = 0; 2206 2207 if (!eps) 2208 return -EINVAL; 2209 2210 /* Add one to the number of streams requested to account for 2211 * stream 0 that is reserved for xHCI usage. 2212 */ 2213 num_streams += 1; 2214 xhci = hcd_to_xhci(hcd); 2215 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", 2216 num_streams); 2217 2218 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); 2219 if (!config_cmd) { 2220 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); 2221 return -ENOMEM; 2222 } 2223 2224 /* Check to make sure all endpoints are not already configured for 2225 * streams. While we're at it, find the maximum number of streams that 2226 * all the endpoints will support and check for duplicate endpoints. 2227 */ 2228 spin_lock_irqsave(&xhci->lock, flags); 2229 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, 2230 num_eps, &num_streams, &changed_ep_bitmask); 2231 if (ret < 0) { 2232 xhci_free_command(xhci, config_cmd); 2233 spin_unlock_irqrestore(&xhci->lock, flags); 2234 return ret; 2235 } 2236 if (num_streams <= 1) { 2237 xhci_warn(xhci, "WARN: endpoints can't handle " 2238 "more than one stream.\n"); 2239 xhci_free_command(xhci, config_cmd); 2240 spin_unlock_irqrestore(&xhci->lock, flags); 2241 return -EINVAL; 2242 } 2243 vdev = xhci->devs[udev->slot_id]; 2244 /* Mark each endpoint as being in transition, so 2245 * xhci_urb_enqueue() will reject all URBs. 2246 */ 2247 for (i = 0; i < num_eps; i++) { 2248 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2249 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; 2250 } 2251 spin_unlock_irqrestore(&xhci->lock, flags); 2252 2253 /* Setup internal data structures and allocate HW data structures for 2254 * streams (but don't install the HW structures in the input context 2255 * until we're sure all memory allocation succeeded). 2256 */ 2257 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); 2258 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", 2259 num_stream_ctxs, num_streams); 2260 2261 for (i = 0; i < num_eps; i++) { 2262 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2263 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, 2264 num_stream_ctxs, 2265 num_streams, mem_flags); 2266 if (!vdev->eps[ep_index].stream_info) 2267 goto cleanup; 2268 /* Set maxPstreams in endpoint context and update deq ptr to 2269 * point to stream context array. FIXME 2270 */ 2271 } 2272 2273 /* Set up the input context for a configure endpoint command. */ 2274 for (i = 0; i < num_eps; i++) { 2275 struct xhci_ep_ctx *ep_ctx; 2276 2277 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2278 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); 2279 2280 xhci_endpoint_copy(xhci, config_cmd->in_ctx, 2281 vdev->out_ctx, ep_index); 2282 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, 2283 vdev->eps[ep_index].stream_info); 2284 } 2285 /* Tell the HW to drop its old copy of the endpoint context info 2286 * and add the updated copy from the input context. 2287 */ 2288 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, 2289 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); 2290 2291 /* Issue and wait for the configure endpoint command */ 2292 ret = xhci_configure_endpoint(xhci, udev, config_cmd, 2293 false, false); 2294 2295 /* xHC rejected the configure endpoint command for some reason, so we 2296 * leave the old ring intact and free our internal streams data 2297 * structure. 2298 */ 2299 if (ret < 0) 2300 goto cleanup; 2301 2302 spin_lock_irqsave(&xhci->lock, flags); 2303 for (i = 0; i < num_eps; i++) { 2304 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2305 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 2306 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", 2307 udev->slot_id, ep_index); 2308 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; 2309 } 2310 xhci_free_command(xhci, config_cmd); 2311 spin_unlock_irqrestore(&xhci->lock, flags); 2312 2313 /* Subtract 1 for stream 0, which drivers can't use */ 2314 return num_streams - 1; 2315 2316 cleanup: 2317 /* If it didn't work, free the streams! */ 2318 for (i = 0; i < num_eps; i++) { 2319 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2320 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 2321 vdev->eps[ep_index].stream_info = NULL; 2322 /* FIXME Unset maxPstreams in endpoint context and 2323 * update deq ptr to point to normal string ring. 2324 */ 2325 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 2326 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 2327 xhci_endpoint_zero(xhci, vdev, eps[i]); 2328 } 2329 xhci_free_command(xhci, config_cmd); 2330 return -ENOMEM; 2331 } 2332 2333 /* Transition the endpoint from using streams to being a "normal" endpoint 2334 * without streams. 2335 * 2336 * Modify the endpoint context state, submit a configure endpoint command, 2337 * and free all endpoint rings for streams if that completes successfully. 2338 */ 2339 int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, 2340 struct usb_host_endpoint **eps, unsigned int num_eps, 2341 gfp_t mem_flags) 2342 { 2343 int i, ret; 2344 struct xhci_hcd *xhci; 2345 struct xhci_virt_device *vdev; 2346 struct xhci_command *command; 2347 unsigned int ep_index; 2348 unsigned long flags; 2349 u32 changed_ep_bitmask; 2350 2351 xhci = hcd_to_xhci(hcd); 2352 vdev = xhci->devs[udev->slot_id]; 2353 2354 /* Set up a configure endpoint command to remove the streams rings */ 2355 spin_lock_irqsave(&xhci->lock, flags); 2356 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, 2357 udev, eps, num_eps); 2358 if (changed_ep_bitmask == 0) { 2359 spin_unlock_irqrestore(&xhci->lock, flags); 2360 return -EINVAL; 2361 } 2362 2363 /* Use the xhci_command structure from the first endpoint. We may have 2364 * allocated too many, but the driver may call xhci_free_streams() for 2365 * each endpoint it grouped into one call to xhci_alloc_streams(). 2366 */ 2367 ep_index = xhci_get_endpoint_index(&eps[0]->desc); 2368 command = vdev->eps[ep_index].stream_info->free_streams_command; 2369 for (i = 0; i < num_eps; i++) { 2370 struct xhci_ep_ctx *ep_ctx; 2371 2372 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2373 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); 2374 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= 2375 EP_GETTING_NO_STREAMS; 2376 2377 xhci_endpoint_copy(xhci, command->in_ctx, 2378 vdev->out_ctx, ep_index); 2379 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx, 2380 &vdev->eps[ep_index]); 2381 } 2382 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, 2383 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); 2384 spin_unlock_irqrestore(&xhci->lock, flags); 2385 2386 /* Issue and wait for the configure endpoint command, 2387 * which must succeed. 2388 */ 2389 ret = xhci_configure_endpoint(xhci, udev, command, 2390 false, true); 2391 2392 /* xHC rejected the configure endpoint command for some reason, so we 2393 * leave the streams rings intact. 2394 */ 2395 if (ret < 0) 2396 return ret; 2397 2398 spin_lock_irqsave(&xhci->lock, flags); 2399 for (i = 0; i < num_eps; i++) { 2400 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2401 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 2402 vdev->eps[ep_index].stream_info = NULL; 2403 /* FIXME Unset maxPstreams in endpoint context and 2404 * update deq ptr to point to normal string ring. 2405 */ 2406 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; 2407 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 2408 } 2409 spin_unlock_irqrestore(&xhci->lock, flags); 2410 2411 return 0; 2412 } 2413 2414 /* 2415 * Deletes endpoint resources for endpoints that were active before a Reset 2416 * Device command, or a Disable Slot command. The Reset Device command leaves 2417 * the control endpoint intact, whereas the Disable Slot command deletes it. 2418 * 2419 * Must be called with xhci->lock held. 2420 */ 2421 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, 2422 struct xhci_virt_device *virt_dev, bool drop_control_ep) 2423 { 2424 int i; 2425 unsigned int num_dropped_eps = 0; 2426 unsigned int drop_flags = 0; 2427 2428 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { 2429 if (virt_dev->eps[i].ring) { 2430 drop_flags |= 1 << i; 2431 num_dropped_eps++; 2432 } 2433 } 2434 xhci->num_active_eps -= num_dropped_eps; 2435 if (num_dropped_eps) 2436 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, " 2437 "%u now active.\n", 2438 num_dropped_eps, drop_flags, 2439 xhci->num_active_eps); 2440 } 2441 2442 /* 2443 * This submits a Reset Device Command, which will set the device state to 0, 2444 * set the device address to 0, and disable all the endpoints except the default 2445 * control endpoint. The USB core should come back and call 2446 * xhci_address_device(), and then re-set up the configuration. If this is 2447 * called because of a usb_reset_and_verify_device(), then the old alternate 2448 * settings will be re-installed through the normal bandwidth allocation 2449 * functions. 2450 * 2451 * Wait for the Reset Device command to finish. Remove all structures 2452 * associated with the endpoints that were disabled. Clear the input device 2453 * structure? Cache the rings? Reset the control endpoint 0 max packet size? 2454 * 2455 * If the virt_dev to be reset does not exist or does not match the udev, 2456 * it means the device is lost, possibly due to the xHC restore error and 2457 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to 2458 * re-allocate the device. 2459 */ 2460 int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) 2461 { 2462 int ret, i; 2463 unsigned long flags; 2464 struct xhci_hcd *xhci; 2465 unsigned int slot_id; 2466 struct xhci_virt_device *virt_dev; 2467 struct xhci_command *reset_device_cmd; 2468 int timeleft; 2469 int last_freed_endpoint; 2470 2471 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); 2472 if (ret <= 0) 2473 return ret; 2474 xhci = hcd_to_xhci(hcd); 2475 slot_id = udev->slot_id; 2476 virt_dev = xhci->devs[slot_id]; 2477 if (!virt_dev) { 2478 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 2479 "not exist. Re-allocate the device\n", slot_id); 2480 ret = xhci_alloc_dev(hcd, udev); 2481 if (ret == 1) 2482 return 0; 2483 else 2484 return -EINVAL; 2485 } 2486 2487 if (virt_dev->udev != udev) { 2488 /* If the virt_dev and the udev does not match, this virt_dev 2489 * may belong to another udev. 2490 * Re-allocate the device. 2491 */ 2492 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 2493 "not match the udev. Re-allocate the device\n", 2494 slot_id); 2495 ret = xhci_alloc_dev(hcd, udev); 2496 if (ret == 1) 2497 return 0; 2498 else 2499 return -EINVAL; 2500 } 2501 2502 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); 2503 /* Allocate the command structure that holds the struct completion. 2504 * Assume we're in process context, since the normal device reset 2505 * process has to wait for the device anyway. Storage devices are 2506 * reset as part of error handling, so use GFP_NOIO instead of 2507 * GFP_KERNEL. 2508 */ 2509 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); 2510 if (!reset_device_cmd) { 2511 xhci_dbg(xhci, "Couldn't allocate command structure.\n"); 2512 return -ENOMEM; 2513 } 2514 2515 /* Attempt to submit the Reset Device command to the command ring */ 2516 spin_lock_irqsave(&xhci->lock, flags); 2517 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; 2518 2519 /* Enqueue pointer can be left pointing to the link TRB, 2520 * we must handle that 2521 */ 2522 if ((le32_to_cpu(reset_device_cmd->command_trb->link.control) 2523 & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK)) 2524 reset_device_cmd->command_trb = 2525 xhci->cmd_ring->enq_seg->next->trbs; 2526 2527 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); 2528 ret = xhci_queue_reset_device(xhci, slot_id); 2529 if (ret) { 2530 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 2531 list_del(&reset_device_cmd->cmd_list); 2532 spin_unlock_irqrestore(&xhci->lock, flags); 2533 goto command_cleanup; 2534 } 2535 xhci_ring_cmd_db(xhci); 2536 spin_unlock_irqrestore(&xhci->lock, flags); 2537 2538 /* Wait for the Reset Device command to finish */ 2539 timeleft = wait_for_completion_interruptible_timeout( 2540 reset_device_cmd->completion, 2541 USB_CTRL_SET_TIMEOUT); 2542 if (timeleft <= 0) { 2543 xhci_warn(xhci, "%s while waiting for reset device command\n", 2544 timeleft == 0 ? "Timeout" : "Signal"); 2545 spin_lock_irqsave(&xhci->lock, flags); 2546 /* The timeout might have raced with the event ring handler, so 2547 * only delete from the list if the item isn't poisoned. 2548 */ 2549 if (reset_device_cmd->cmd_list.next != LIST_POISON1) 2550 list_del(&reset_device_cmd->cmd_list); 2551 spin_unlock_irqrestore(&xhci->lock, flags); 2552 ret = -ETIME; 2553 goto command_cleanup; 2554 } 2555 2556 /* The Reset Device command can't fail, according to the 0.95/0.96 spec, 2557 * unless we tried to reset a slot ID that wasn't enabled, 2558 * or the device wasn't in the addressed or configured state. 2559 */ 2560 ret = reset_device_cmd->status; 2561 switch (ret) { 2562 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ 2563 case COMP_CTX_STATE: /* 0.96 completion code for same thing */ 2564 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n", 2565 slot_id, 2566 xhci_get_slot_state(xhci, virt_dev->out_ctx)); 2567 xhci_info(xhci, "Not freeing device rings.\n"); 2568 /* Don't treat this as an error. May change my mind later. */ 2569 ret = 0; 2570 goto command_cleanup; 2571 case COMP_SUCCESS: 2572 xhci_dbg(xhci, "Successful reset device command.\n"); 2573 break; 2574 default: 2575 if (xhci_is_vendor_info_code(xhci, ret)) 2576 break; 2577 xhci_warn(xhci, "Unknown completion code %u for " 2578 "reset device command.\n", ret); 2579 ret = -EINVAL; 2580 goto command_cleanup; 2581 } 2582 2583 /* Free up host controller endpoint resources */ 2584 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 2585 spin_lock_irqsave(&xhci->lock, flags); 2586 /* Don't delete the default control endpoint resources */ 2587 xhci_free_device_endpoint_resources(xhci, virt_dev, false); 2588 spin_unlock_irqrestore(&xhci->lock, flags); 2589 } 2590 2591 /* Everything but endpoint 0 is disabled, so free or cache the rings. */ 2592 last_freed_endpoint = 1; 2593 for (i = 1; i < 31; ++i) { 2594 struct xhci_virt_ep *ep = &virt_dev->eps[i]; 2595 2596 if (ep->ep_state & EP_HAS_STREAMS) { 2597 xhci_free_stream_info(xhci, ep->stream_info); 2598 ep->stream_info = NULL; 2599 ep->ep_state &= ~EP_HAS_STREAMS; 2600 } 2601 2602 if (ep->ring) { 2603 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2604 last_freed_endpoint = i; 2605 } 2606 } 2607 xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); 2608 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); 2609 ret = 0; 2610 2611 command_cleanup: 2612 xhci_free_command(xhci, reset_device_cmd); 2613 return ret; 2614 } 2615 2616 /* 2617 * At this point, the struct usb_device is about to go away, the device has 2618 * disconnected, and all traffic has been stopped and the endpoints have been 2619 * disabled. Free any HC data structures associated with that device. 2620 */ 2621 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) 2622 { 2623 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2624 struct xhci_virt_device *virt_dev; 2625 unsigned long flags; 2626 u32 state; 2627 int i, ret; 2628 2629 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2630 if (ret <= 0) 2631 return; 2632 2633 virt_dev = xhci->devs[udev->slot_id]; 2634 2635 /* Stop any wayward timer functions (which may grab the lock) */ 2636 for (i = 0; i < 31; ++i) { 2637 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; 2638 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); 2639 } 2640 2641 spin_lock_irqsave(&xhci->lock, flags); 2642 /* Don't disable the slot if the host controller is dead. */ 2643 state = xhci_readl(xhci, &xhci->op_regs->status); 2644 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) { 2645 xhci_free_virt_device(xhci, udev->slot_id); 2646 spin_unlock_irqrestore(&xhci->lock, flags); 2647 return; 2648 } 2649 2650 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { 2651 spin_unlock_irqrestore(&xhci->lock, flags); 2652 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 2653 return; 2654 } 2655 xhci_ring_cmd_db(xhci); 2656 spin_unlock_irqrestore(&xhci->lock, flags); 2657 /* 2658 * Event command completion handler will free any data structures 2659 * associated with the slot. XXX Can free sleep? 2660 */ 2661 } 2662 2663 /* 2664 * Checks if we have enough host controller resources for the default control 2665 * endpoint. 2666 * 2667 * Must be called with xhci->lock held. 2668 */ 2669 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) 2670 { 2671 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { 2672 xhci_dbg(xhci, "Not enough ep ctxs: " 2673 "%u active, need to add 1, limit is %u.\n", 2674 xhci->num_active_eps, xhci->limit_active_eps); 2675 return -ENOMEM; 2676 } 2677 xhci->num_active_eps += 1; 2678 xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n", 2679 xhci->num_active_eps); 2680 return 0; 2681 } 2682 2683 2684 /* 2685 * Returns 0 if the xHC ran out of device slots, the Enable Slot command 2686 * timed out, or allocating memory failed. Returns 1 on success. 2687 */ 2688 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) 2689 { 2690 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2691 unsigned long flags; 2692 int timeleft; 2693 int ret; 2694 2695 spin_lock_irqsave(&xhci->lock, flags); 2696 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); 2697 if (ret) { 2698 spin_unlock_irqrestore(&xhci->lock, flags); 2699 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 2700 return 0; 2701 } 2702 xhci_ring_cmd_db(xhci); 2703 spin_unlock_irqrestore(&xhci->lock, flags); 2704 2705 /* XXX: how much time for xHC slot assignment? */ 2706 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, 2707 USB_CTRL_SET_TIMEOUT); 2708 if (timeleft <= 0) { 2709 xhci_warn(xhci, "%s while waiting for a slot\n", 2710 timeleft == 0 ? "Timeout" : "Signal"); 2711 /* FIXME cancel the enable slot request */ 2712 return 0; 2713 } 2714 2715 if (!xhci->slot_id) { 2716 xhci_err(xhci, "Error while assigning device slot ID\n"); 2717 return 0; 2718 } 2719 2720 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 2721 spin_lock_irqsave(&xhci->lock, flags); 2722 ret = xhci_reserve_host_control_ep_resources(xhci); 2723 if (ret) { 2724 spin_unlock_irqrestore(&xhci->lock, flags); 2725 xhci_warn(xhci, "Not enough host resources, " 2726 "active endpoint contexts = %u\n", 2727 xhci->num_active_eps); 2728 goto disable_slot; 2729 } 2730 spin_unlock_irqrestore(&xhci->lock, flags); 2731 } 2732 /* Use GFP_NOIO, since this function can be called from 2733 * xhci_discover_or_reset_device(), which may be called as part of 2734 * mass storage driver error handling. 2735 */ 2736 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { 2737 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 2738 goto disable_slot; 2739 } 2740 udev->slot_id = xhci->slot_id; 2741 /* Is this a LS or FS device under a HS hub? */ 2742 /* Hub or peripherial? */ 2743 return 1; 2744 2745 disable_slot: 2746 /* Disable slot, if we can do it without mem alloc */ 2747 spin_lock_irqsave(&xhci->lock, flags); 2748 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) 2749 xhci_ring_cmd_db(xhci); 2750 spin_unlock_irqrestore(&xhci->lock, flags); 2751 return 0; 2752 } 2753 2754 /* 2755 * Issue an Address Device command (which will issue a SetAddress request to 2756 * the device). 2757 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so 2758 * we should only issue and wait on one address command at the same time. 2759 * 2760 * We add one to the device address issued by the hardware because the USB core 2761 * uses address 1 for the root hubs (even though they're not really devices). 2762 */ 2763 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) 2764 { 2765 unsigned long flags; 2766 int timeleft; 2767 struct xhci_virt_device *virt_dev; 2768 int ret = 0; 2769 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2770 struct xhci_slot_ctx *slot_ctx; 2771 struct xhci_input_control_ctx *ctrl_ctx; 2772 u64 temp_64; 2773 2774 if (!udev->slot_id) { 2775 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); 2776 return -EINVAL; 2777 } 2778 2779 virt_dev = xhci->devs[udev->slot_id]; 2780 2781 if (WARN_ON(!virt_dev)) { 2782 /* 2783 * In plug/unplug torture test with an NEC controller, 2784 * a zero-dereference was observed once due to virt_dev = 0. 2785 * Print useful debug rather than crash if it is observed again! 2786 */ 2787 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", 2788 udev->slot_id); 2789 return -EINVAL; 2790 } 2791 2792 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 2793 /* 2794 * If this is the first Set Address since device plug-in or 2795 * virt_device realloaction after a resume with an xHCI power loss, 2796 * then set up the slot context. 2797 */ 2798 if (!slot_ctx->dev_info) 2799 xhci_setup_addressable_virt_dev(xhci, udev); 2800 /* Otherwise, update the control endpoint ring enqueue pointer. */ 2801 else 2802 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); 2803 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 2804 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 2805 2806 spin_lock_irqsave(&xhci->lock, flags); 2807 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, 2808 udev->slot_id); 2809 if (ret) { 2810 spin_unlock_irqrestore(&xhci->lock, flags); 2811 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 2812 return ret; 2813 } 2814 xhci_ring_cmd_db(xhci); 2815 spin_unlock_irqrestore(&xhci->lock, flags); 2816 2817 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ 2818 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, 2819 USB_CTRL_SET_TIMEOUT); 2820 /* FIXME: From section 4.3.4: "Software shall be responsible for timing 2821 * the SetAddress() "recovery interval" required by USB and aborting the 2822 * command on a timeout. 2823 */ 2824 if (timeleft <= 0) { 2825 xhci_warn(xhci, "%s while waiting for a slot\n", 2826 timeleft == 0 ? "Timeout" : "Signal"); 2827 /* FIXME cancel the address device command */ 2828 return -ETIME; 2829 } 2830 2831 switch (virt_dev->cmd_status) { 2832 case COMP_CTX_STATE: 2833 case COMP_EBADSLT: 2834 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n", 2835 udev->slot_id); 2836 ret = -EINVAL; 2837 break; 2838 case COMP_TX_ERR: 2839 dev_warn(&udev->dev, "Device not responding to set address.\n"); 2840 ret = -EPROTO; 2841 break; 2842 case COMP_SUCCESS: 2843 xhci_dbg(xhci, "Successful Address Device command\n"); 2844 break; 2845 default: 2846 xhci_err(xhci, "ERROR: unexpected command completion " 2847 "code 0x%x.\n", virt_dev->cmd_status); 2848 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 2849 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 2850 ret = -EINVAL; 2851 break; 2852 } 2853 if (ret) { 2854 return ret; 2855 } 2856 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 2857 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); 2858 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", 2859 udev->slot_id, 2860 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], 2861 (unsigned long long) 2862 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); 2863 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", 2864 (unsigned long long)virt_dev->out_ctx->dma); 2865 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 2866 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 2867 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 2868 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 2869 /* 2870 * USB core uses address 1 for the roothubs, so we add one to the 2871 * address given back to us by the HC. 2872 */ 2873 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 2874 /* Use kernel assigned address for devices; store xHC assigned 2875 * address locally. */ 2876 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) 2877 + 1; 2878 /* Zero the input context control for later use */ 2879 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 2880 ctrl_ctx->add_flags = 0; 2881 ctrl_ctx->drop_flags = 0; 2882 2883 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address); 2884 2885 return 0; 2886 } 2887 2888 /* Once a hub descriptor is fetched for a device, we need to update the xHC's 2889 * internal data structures for the device. 2890 */ 2891 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, 2892 struct usb_tt *tt, gfp_t mem_flags) 2893 { 2894 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2895 struct xhci_virt_device *vdev; 2896 struct xhci_command *config_cmd; 2897 struct xhci_input_control_ctx *ctrl_ctx; 2898 struct xhci_slot_ctx *slot_ctx; 2899 unsigned long flags; 2900 unsigned think_time; 2901 int ret; 2902 2903 /* Ignore root hubs */ 2904 if (!hdev->parent) 2905 return 0; 2906 2907 vdev = xhci->devs[hdev->slot_id]; 2908 if (!vdev) { 2909 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); 2910 return -EINVAL; 2911 } 2912 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); 2913 if (!config_cmd) { 2914 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); 2915 return -ENOMEM; 2916 } 2917 2918 spin_lock_irqsave(&xhci->lock, flags); 2919 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); 2920 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); 2921 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 2922 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); 2923 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); 2924 if (tt->multi) 2925 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 2926 if (xhci->hci_version > 0x95) { 2927 xhci_dbg(xhci, "xHCI version %x needs hub " 2928 "TT think time and number of ports\n", 2929 (unsigned int) xhci->hci_version); 2930 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); 2931 /* Set TT think time - convert from ns to FS bit times. 2932 * 0 = 8 FS bit times, 1 = 16 FS bit times, 2933 * 2 = 24 FS bit times, 3 = 32 FS bit times. 2934 * 2935 * xHCI 1.0: this field shall be 0 if the device is not a 2936 * High-spped hub. 2937 */ 2938 think_time = tt->think_time; 2939 if (think_time != 0) 2940 think_time = (think_time / 666) - 1; 2941 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) 2942 slot_ctx->tt_info |= 2943 cpu_to_le32(TT_THINK_TIME(think_time)); 2944 } else { 2945 xhci_dbg(xhci, "xHCI version %x doesn't need hub " 2946 "TT think time or number of ports\n", 2947 (unsigned int) xhci->hci_version); 2948 } 2949 slot_ctx->dev_state = 0; 2950 spin_unlock_irqrestore(&xhci->lock, flags); 2951 2952 xhci_dbg(xhci, "Set up %s for hub device.\n", 2953 (xhci->hci_version > 0x95) ? 2954 "configure endpoint" : "evaluate context"); 2955 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); 2956 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); 2957 2958 /* Issue and wait for the configure endpoint or 2959 * evaluate context command. 2960 */ 2961 if (xhci->hci_version > 0x95) 2962 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 2963 false, false); 2964 else 2965 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 2966 true, false); 2967 2968 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); 2969 xhci_dbg_ctx(xhci, vdev->out_ctx, 0); 2970 2971 xhci_free_command(xhci, config_cmd); 2972 return ret; 2973 } 2974 2975 int xhci_get_frame(struct usb_hcd *hcd) 2976 { 2977 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 2978 /* EHCI mods by the periodic size. Why? */ 2979 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; 2980 } 2981 2982 MODULE_DESCRIPTION(DRIVER_DESC); 2983 MODULE_AUTHOR(DRIVER_AUTHOR); 2984 MODULE_LICENSE("GPL"); 2985 2986 static int __init xhci_hcd_init(void) 2987 { 2988 #ifdef CONFIG_PCI 2989 int retval = 0; 2990 2991 retval = xhci_register_pci(); 2992 2993 if (retval < 0) { 2994 printk(KERN_DEBUG "Problem registering PCI driver."); 2995 return retval; 2996 } 2997 #endif 2998 /* 2999 * Check the compiler generated sizes of structures that must be laid 3000 * out in specific ways for hardware access. 3001 */ 3002 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); 3003 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); 3004 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); 3005 /* xhci_device_control has eight fields, and also 3006 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx 3007 */ 3008 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); 3009 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); 3010 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); 3011 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8); 3012 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); 3013 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ 3014 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); 3015 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); 3016 return 0; 3017 } 3018 module_init(xhci_hcd_init); 3019 3020 static void __exit xhci_hcd_cleanup(void) 3021 { 3022 #ifdef CONFIG_PCI 3023 xhci_unregister_pci(); 3024 #endif 3025 } 3026 module_exit(xhci_hcd_cleanup); 3027