1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/pci.h> 24 #include <linux/irq.h> 25 #include <linux/log2.h> 26 #include <linux/module.h> 27 #include <linux/moduleparam.h> 28 #include <linux/slab.h> 29 30 #include "xhci.h" 31 32 #define DRIVER_AUTHOR "Sarah Sharp" 33 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" 34 35 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ 36 static int link_quirk; 37 module_param(link_quirk, int, S_IRUGO | S_IWUSR); 38 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); 39 40 /* TODO: copied from ehci-hcd.c - can this be refactored? */ 41 /* 42 * handshake - spin reading hc until handshake completes or fails 43 * @ptr: address of hc register to be read 44 * @mask: bits to look at in result of read 45 * @done: value of those bits when handshake succeeds 46 * @usec: timeout in microseconds 47 * 48 * Returns negative errno, or zero on success 49 * 50 * Success happens when the "mask" bits have the specified value (hardware 51 * handshake done). There are two failure modes: "usec" have passed (major 52 * hardware flakeout), or the register reads as all-ones (hardware removed). 53 */ 54 static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, 55 u32 mask, u32 done, int usec) 56 { 57 u32 result; 58 59 do { 60 result = xhci_readl(xhci, ptr); 61 if (result == ~(u32)0) /* card removed */ 62 return -ENODEV; 63 result &= mask; 64 if (result == done) 65 return 0; 66 udelay(1); 67 usec--; 68 } while (usec > 0); 69 return -ETIMEDOUT; 70 } 71 72 /* 73 * Disable interrupts and begin the xHCI halting process. 74 */ 75 void xhci_quiesce(struct xhci_hcd *xhci) 76 { 77 u32 halted; 78 u32 cmd; 79 u32 mask; 80 81 mask = ~(XHCI_IRQS); 82 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; 83 if (!halted) 84 mask &= ~CMD_RUN; 85 86 cmd = xhci_readl(xhci, &xhci->op_regs->command); 87 cmd &= mask; 88 xhci_writel(xhci, cmd, &xhci->op_regs->command); 89 } 90 91 /* 92 * Force HC into halt state. 93 * 94 * Disable any IRQs and clear the run/stop bit. 95 * HC will complete any current and actively pipelined transactions, and 96 * should halt within 16 ms of the run/stop bit being cleared. 97 * Read HC Halted bit in the status register to see when the HC is finished. 98 */ 99 int xhci_halt(struct xhci_hcd *xhci) 100 { 101 int ret; 102 xhci_dbg(xhci, "// Halt the HC\n"); 103 xhci_quiesce(xhci); 104 105 ret = handshake(xhci, &xhci->op_regs->status, 106 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); 107 if (!ret) 108 xhci->xhc_state |= XHCI_STATE_HALTED; 109 return ret; 110 } 111 112 /* 113 * Set the run bit and wait for the host to be running. 114 */ 115 static int xhci_start(struct xhci_hcd *xhci) 116 { 117 u32 temp; 118 int ret; 119 120 temp = xhci_readl(xhci, &xhci->op_regs->command); 121 temp |= (CMD_RUN); 122 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", 123 temp); 124 xhci_writel(xhci, temp, &xhci->op_regs->command); 125 126 /* 127 * Wait for the HCHalted Status bit to be 0 to indicate the host is 128 * running. 129 */ 130 ret = handshake(xhci, &xhci->op_regs->status, 131 STS_HALT, 0, XHCI_MAX_HALT_USEC); 132 if (ret == -ETIMEDOUT) 133 xhci_err(xhci, "Host took too long to start, " 134 "waited %u microseconds.\n", 135 XHCI_MAX_HALT_USEC); 136 if (!ret) 137 xhci->xhc_state &= ~XHCI_STATE_HALTED; 138 return ret; 139 } 140 141 /* 142 * Reset a halted HC. 143 * 144 * This resets pipelines, timers, counters, state machines, etc. 145 * Transactions will be terminated immediately, and operational registers 146 * will be set to their defaults. 147 */ 148 int xhci_reset(struct xhci_hcd *xhci) 149 { 150 u32 command; 151 u32 state; 152 int ret; 153 154 state = xhci_readl(xhci, &xhci->op_regs->status); 155 if ((state & STS_HALT) == 0) { 156 xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); 157 return 0; 158 } 159 160 xhci_dbg(xhci, "// Reset the HC\n"); 161 command = xhci_readl(xhci, &xhci->op_regs->command); 162 command |= CMD_RESET; 163 xhci_writel(xhci, command, &xhci->op_regs->command); 164 165 ret = handshake(xhci, &xhci->op_regs->command, 166 CMD_RESET, 0, 250 * 1000); 167 if (ret) 168 return ret; 169 170 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n"); 171 /* 172 * xHCI cannot write to any doorbells or operational registers other 173 * than status until the "Controller Not Ready" flag is cleared. 174 */ 175 return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); 176 } 177 178 #ifdef CONFIG_PCI 179 static int xhci_free_msi(struct xhci_hcd *xhci) 180 { 181 int i; 182 183 if (!xhci->msix_entries) 184 return -EINVAL; 185 186 for (i = 0; i < xhci->msix_count; i++) 187 if (xhci->msix_entries[i].vector) 188 free_irq(xhci->msix_entries[i].vector, 189 xhci_to_hcd(xhci)); 190 return 0; 191 } 192 193 /* 194 * Set up MSI 195 */ 196 static int xhci_setup_msi(struct xhci_hcd *xhci) 197 { 198 int ret; 199 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 200 201 ret = pci_enable_msi(pdev); 202 if (ret) { 203 xhci_err(xhci, "failed to allocate MSI entry\n"); 204 return ret; 205 } 206 207 ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq, 208 0, "xhci_hcd", xhci_to_hcd(xhci)); 209 if (ret) { 210 xhci_err(xhci, "disable MSI interrupt\n"); 211 pci_disable_msi(pdev); 212 } 213 214 return ret; 215 } 216 217 /* 218 * Free IRQs 219 * free all IRQs request 220 */ 221 static void xhci_free_irq(struct xhci_hcd *xhci) 222 { 223 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 224 int ret; 225 226 /* return if using legacy interrupt */ 227 if (xhci_to_hcd(xhci)->irq >= 0) 228 return; 229 230 ret = xhci_free_msi(xhci); 231 if (!ret) 232 return; 233 if (pdev->irq >= 0) 234 free_irq(pdev->irq, xhci_to_hcd(xhci)); 235 236 return; 237 } 238 239 /* 240 * Set up MSI-X 241 */ 242 static int xhci_setup_msix(struct xhci_hcd *xhci) 243 { 244 int i, ret = 0; 245 struct usb_hcd *hcd = xhci_to_hcd(xhci); 246 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 247 248 /* 249 * calculate number of msi-x vectors supported. 250 * - HCS_MAX_INTRS: the max number of interrupts the host can handle, 251 * with max number of interrupters based on the xhci HCSPARAMS1. 252 * - num_online_cpus: maximum msi-x vectors per CPUs core. 253 * Add additional 1 vector to ensure always available interrupt. 254 */ 255 xhci->msix_count = min(num_online_cpus() + 1, 256 HCS_MAX_INTRS(xhci->hcs_params1)); 257 258 xhci->msix_entries = 259 kmalloc((sizeof(struct msix_entry))*xhci->msix_count, 260 GFP_KERNEL); 261 if (!xhci->msix_entries) { 262 xhci_err(xhci, "Failed to allocate MSI-X entries\n"); 263 return -ENOMEM; 264 } 265 266 for (i = 0; i < xhci->msix_count; i++) { 267 xhci->msix_entries[i].entry = i; 268 xhci->msix_entries[i].vector = 0; 269 } 270 271 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); 272 if (ret) { 273 xhci_err(xhci, "Failed to enable MSI-X\n"); 274 goto free_entries; 275 } 276 277 for (i = 0; i < xhci->msix_count; i++) { 278 ret = request_irq(xhci->msix_entries[i].vector, 279 (irq_handler_t)xhci_msi_irq, 280 0, "xhci_hcd", xhci_to_hcd(xhci)); 281 if (ret) 282 goto disable_msix; 283 } 284 285 hcd->msix_enabled = 1; 286 return ret; 287 288 disable_msix: 289 xhci_err(xhci, "disable MSI-X interrupt\n"); 290 xhci_free_irq(xhci); 291 pci_disable_msix(pdev); 292 free_entries: 293 kfree(xhci->msix_entries); 294 xhci->msix_entries = NULL; 295 return ret; 296 } 297 298 /* Free any IRQs and disable MSI-X */ 299 static void xhci_cleanup_msix(struct xhci_hcd *xhci) 300 { 301 struct usb_hcd *hcd = xhci_to_hcd(xhci); 302 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 303 304 xhci_free_irq(xhci); 305 306 if (xhci->msix_entries) { 307 pci_disable_msix(pdev); 308 kfree(xhci->msix_entries); 309 xhci->msix_entries = NULL; 310 } else { 311 pci_disable_msi(pdev); 312 } 313 314 hcd->msix_enabled = 0; 315 return; 316 } 317 318 static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) 319 { 320 int i; 321 322 if (xhci->msix_entries) { 323 for (i = 0; i < xhci->msix_count; i++) 324 synchronize_irq(xhci->msix_entries[i].vector); 325 } 326 } 327 328 static int xhci_try_enable_msi(struct usb_hcd *hcd) 329 { 330 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 331 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 332 int ret; 333 334 /* 335 * Some Fresco Logic host controllers advertise MSI, but fail to 336 * generate interrupts. Don't even try to enable MSI. 337 */ 338 if (xhci->quirks & XHCI_BROKEN_MSI) 339 return 0; 340 341 /* unregister the legacy interrupt */ 342 if (hcd->irq) 343 free_irq(hcd->irq, hcd); 344 hcd->irq = -1; 345 346 ret = xhci_setup_msix(xhci); 347 if (ret) 348 /* fall back to msi*/ 349 ret = xhci_setup_msi(xhci); 350 351 if (!ret) 352 /* hcd->irq is -1, we have MSI */ 353 return 0; 354 355 /* fall back to legacy interrupt*/ 356 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, 357 hcd->irq_descr, hcd); 358 if (ret) { 359 xhci_err(xhci, "request interrupt %d failed\n", 360 pdev->irq); 361 return ret; 362 } 363 hcd->irq = pdev->irq; 364 return 0; 365 } 366 367 #else 368 369 static int xhci_try_enable_msi(struct usb_hcd *hcd) 370 { 371 return 0; 372 } 373 374 static void xhci_cleanup_msix(struct xhci_hcd *xhci) 375 { 376 } 377 378 static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) 379 { 380 } 381 382 #endif 383 384 /* 385 * Initialize memory for HCD and xHC (one-time init). 386 * 387 * Program the PAGESIZE register, initialize the device context array, create 388 * device contexts (?), set up a command ring segment (or two?), create event 389 * ring (one for now). 390 */ 391 int xhci_init(struct usb_hcd *hcd) 392 { 393 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 394 int retval = 0; 395 396 xhci_dbg(xhci, "xhci_init\n"); 397 spin_lock_init(&xhci->lock); 398 if (xhci->hci_version == 0x95 && link_quirk) { 399 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n"); 400 xhci->quirks |= XHCI_LINK_TRB_QUIRK; 401 } else { 402 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); 403 } 404 retval = xhci_mem_init(xhci, GFP_KERNEL); 405 xhci_dbg(xhci, "Finished xhci_init\n"); 406 407 return retval; 408 } 409 410 /*-------------------------------------------------------------------------*/ 411 412 413 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 414 static void xhci_event_ring_work(unsigned long arg) 415 { 416 unsigned long flags; 417 int temp; 418 u64 temp_64; 419 struct xhci_hcd *xhci = (struct xhci_hcd *) arg; 420 int i, j; 421 422 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies); 423 424 spin_lock_irqsave(&xhci->lock, flags); 425 temp = xhci_readl(xhci, &xhci->op_regs->status); 426 xhci_dbg(xhci, "op reg status = 0x%x\n", temp); 427 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || 428 (xhci->xhc_state & XHCI_STATE_HALTED)) { 429 xhci_dbg(xhci, "HW died, polling stopped.\n"); 430 spin_unlock_irqrestore(&xhci->lock, flags); 431 return; 432 } 433 434 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 435 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); 436 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); 437 xhci->error_bitmask = 0; 438 xhci_dbg(xhci, "Event ring:\n"); 439 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); 440 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 441 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 442 temp_64 &= ~ERST_PTR_MASK; 443 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); 444 xhci_dbg(xhci, "Command ring:\n"); 445 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); 446 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 447 xhci_dbg_cmd_ptrs(xhci); 448 for (i = 0; i < MAX_HC_SLOTS; ++i) { 449 if (!xhci->devs[i]) 450 continue; 451 for (j = 0; j < 31; ++j) { 452 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]); 453 } 454 } 455 spin_unlock_irqrestore(&xhci->lock, flags); 456 457 if (!xhci->zombie) 458 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ); 459 else 460 xhci_dbg(xhci, "Quit polling the event ring.\n"); 461 } 462 #endif 463 464 static int xhci_run_finished(struct xhci_hcd *xhci) 465 { 466 if (xhci_start(xhci)) { 467 xhci_halt(xhci); 468 return -ENODEV; 469 } 470 xhci->shared_hcd->state = HC_STATE_RUNNING; 471 472 if (xhci->quirks & XHCI_NEC_HOST) 473 xhci_ring_cmd_db(xhci); 474 475 xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n"); 476 return 0; 477 } 478 479 /* 480 * Start the HC after it was halted. 481 * 482 * This function is called by the USB core when the HC driver is added. 483 * Its opposite is xhci_stop(). 484 * 485 * xhci_init() must be called once before this function can be called. 486 * Reset the HC, enable device slot contexts, program DCBAAP, and 487 * set command ring pointer and event ring pointer. 488 * 489 * Setup MSI-X vectors and enable interrupts. 490 */ 491 int xhci_run(struct usb_hcd *hcd) 492 { 493 u32 temp; 494 u64 temp_64; 495 int ret; 496 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 497 498 /* Start the xHCI host controller running only after the USB 2.0 roothub 499 * is setup. 500 */ 501 502 hcd->uses_new_polling = 1; 503 if (!usb_hcd_is_primary_hcd(hcd)) 504 return xhci_run_finished(xhci); 505 506 xhci_dbg(xhci, "xhci_run\n"); 507 508 ret = xhci_try_enable_msi(hcd); 509 if (ret) 510 return ret; 511 512 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 513 init_timer(&xhci->event_ring_timer); 514 xhci->event_ring_timer.data = (unsigned long) xhci; 515 xhci->event_ring_timer.function = xhci_event_ring_work; 516 /* Poll the event ring */ 517 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; 518 xhci->zombie = 0; 519 xhci_dbg(xhci, "Setting event ring polling timer\n"); 520 add_timer(&xhci->event_ring_timer); 521 #endif 522 523 xhci_dbg(xhci, "Command ring memory map follows:\n"); 524 xhci_debug_ring(xhci, xhci->cmd_ring); 525 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 526 xhci_dbg_cmd_ptrs(xhci); 527 528 xhci_dbg(xhci, "ERST memory map follows:\n"); 529 xhci_dbg_erst(xhci, &xhci->erst); 530 xhci_dbg(xhci, "Event ring:\n"); 531 xhci_debug_ring(xhci, xhci->event_ring); 532 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 533 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 534 temp_64 &= ~ERST_PTR_MASK; 535 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); 536 537 xhci_dbg(xhci, "// Set the interrupt modulation register\n"); 538 temp = xhci_readl(xhci, &xhci->ir_set->irq_control); 539 temp &= ~ER_IRQ_INTERVAL_MASK; 540 temp |= (u32) 160; 541 xhci_writel(xhci, temp, &xhci->ir_set->irq_control); 542 543 /* Set the HCD state before we enable the irqs */ 544 temp = xhci_readl(xhci, &xhci->op_regs->command); 545 temp |= (CMD_EIE); 546 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", 547 temp); 548 xhci_writel(xhci, temp, &xhci->op_regs->command); 549 550 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 551 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", 552 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); 553 xhci_writel(xhci, ER_IRQ_ENABLE(temp), 554 &xhci->ir_set->irq_pending); 555 xhci_print_ir_set(xhci, 0); 556 557 if (xhci->quirks & XHCI_NEC_HOST) 558 xhci_queue_vendor_command(xhci, 0, 0, 0, 559 TRB_TYPE(TRB_NEC_GET_FW)); 560 561 xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n"); 562 return 0; 563 } 564 565 static void xhci_only_stop_hcd(struct usb_hcd *hcd) 566 { 567 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 568 569 spin_lock_irq(&xhci->lock); 570 xhci_halt(xhci); 571 572 /* The shared_hcd is going to be deallocated shortly (the USB core only 573 * calls this function when allocation fails in usb_add_hcd(), or 574 * usb_remove_hcd() is called). So we need to unset xHCI's pointer. 575 */ 576 xhci->shared_hcd = NULL; 577 spin_unlock_irq(&xhci->lock); 578 } 579 580 /* 581 * Stop xHCI driver. 582 * 583 * This function is called by the USB core when the HC driver is removed. 584 * Its opposite is xhci_run(). 585 * 586 * Disable device contexts, disable IRQs, and quiesce the HC. 587 * Reset the HC, finish any completed transactions, and cleanup memory. 588 */ 589 void xhci_stop(struct usb_hcd *hcd) 590 { 591 u32 temp; 592 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 593 594 if (!usb_hcd_is_primary_hcd(hcd)) { 595 xhci_only_stop_hcd(xhci->shared_hcd); 596 return; 597 } 598 599 spin_lock_irq(&xhci->lock); 600 /* Make sure the xHC is halted for a USB3 roothub 601 * (xhci_stop() could be called as part of failed init). 602 */ 603 xhci_halt(xhci); 604 xhci_reset(xhci); 605 spin_unlock_irq(&xhci->lock); 606 607 xhci_cleanup_msix(xhci); 608 609 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 610 /* Tell the event ring poll function not to reschedule */ 611 xhci->zombie = 1; 612 del_timer_sync(&xhci->event_ring_timer); 613 #endif 614 615 if (xhci->quirks & XHCI_AMD_PLL_FIX) 616 usb_amd_dev_put(); 617 618 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 619 temp = xhci_readl(xhci, &xhci->op_regs->status); 620 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); 621 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 622 xhci_writel(xhci, ER_IRQ_DISABLE(temp), 623 &xhci->ir_set->irq_pending); 624 xhci_print_ir_set(xhci, 0); 625 626 xhci_dbg(xhci, "cleaning up memory\n"); 627 xhci_mem_cleanup(xhci); 628 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", 629 xhci_readl(xhci, &xhci->op_regs->status)); 630 } 631 632 /* 633 * Shutdown HC (not bus-specific) 634 * 635 * This is called when the machine is rebooting or halting. We assume that the 636 * machine will be powered off, and the HC's internal state will be reset. 637 * Don't bother to free memory. 638 * 639 * This will only ever be called with the main usb_hcd (the USB3 roothub). 640 */ 641 void xhci_shutdown(struct usb_hcd *hcd) 642 { 643 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 644 645 spin_lock_irq(&xhci->lock); 646 xhci_halt(xhci); 647 spin_unlock_irq(&xhci->lock); 648 649 xhci_cleanup_msix(xhci); 650 651 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", 652 xhci_readl(xhci, &xhci->op_regs->status)); 653 } 654 655 #ifdef CONFIG_PM 656 static void xhci_save_registers(struct xhci_hcd *xhci) 657 { 658 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command); 659 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification); 660 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 661 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg); 662 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); 663 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control); 664 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size); 665 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); 666 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 667 } 668 669 static void xhci_restore_registers(struct xhci_hcd *xhci) 670 { 671 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command); 672 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification); 673 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); 674 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg); 675 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending); 676 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control); 677 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size); 678 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); 679 } 680 681 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) 682 { 683 u64 val_64; 684 685 /* step 2: initialize command ring buffer */ 686 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 687 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 688 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 689 xhci->cmd_ring->dequeue) & 690 (u64) ~CMD_RING_RSVD_BITS) | 691 xhci->cmd_ring->cycle_state; 692 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", 693 (long unsigned long) val_64); 694 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 695 } 696 697 /* 698 * The whole command ring must be cleared to zero when we suspend the host. 699 * 700 * The host doesn't save the command ring pointer in the suspend well, so we 701 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte 702 * aligned, because of the reserved bits in the command ring dequeue pointer 703 * register. Therefore, we can't just set the dequeue pointer back in the 704 * middle of the ring (TRBs are 16-byte aligned). 705 */ 706 static void xhci_clear_command_ring(struct xhci_hcd *xhci) 707 { 708 struct xhci_ring *ring; 709 struct xhci_segment *seg; 710 711 ring = xhci->cmd_ring; 712 seg = ring->deq_seg; 713 do { 714 memset(seg->trbs, 0, 715 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); 716 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= 717 cpu_to_le32(~TRB_CYCLE); 718 seg = seg->next; 719 } while (seg != ring->deq_seg); 720 721 /* Reset the software enqueue and dequeue pointers */ 722 ring->deq_seg = ring->first_seg; 723 ring->dequeue = ring->first_seg->trbs; 724 ring->enq_seg = ring->deq_seg; 725 ring->enqueue = ring->dequeue; 726 727 /* 728 * Ring is now zeroed, so the HW should look for change of ownership 729 * when the cycle bit is set to 1. 730 */ 731 ring->cycle_state = 1; 732 733 /* 734 * Reset the hardware dequeue pointer. 735 * Yes, this will need to be re-written after resume, but we're paranoid 736 * and want to make sure the hardware doesn't access bogus memory 737 * because, say, the BIOS or an SMI started the host without changing 738 * the command ring pointers. 739 */ 740 xhci_set_cmd_ring_deq(xhci); 741 } 742 743 /* 744 * Stop HC (not bus-specific) 745 * 746 * This is called when the machine transition into S3/S4 mode. 747 * 748 */ 749 int xhci_suspend(struct xhci_hcd *xhci) 750 { 751 int rc = 0; 752 struct usb_hcd *hcd = xhci_to_hcd(xhci); 753 u32 command; 754 755 spin_lock_irq(&xhci->lock); 756 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 757 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 758 /* step 1: stop endpoint */ 759 /* skipped assuming that port suspend has done */ 760 761 /* step 2: clear Run/Stop bit */ 762 command = xhci_readl(xhci, &xhci->op_regs->command); 763 command &= ~CMD_RUN; 764 xhci_writel(xhci, command, &xhci->op_regs->command); 765 if (handshake(xhci, &xhci->op_regs->status, 766 STS_HALT, STS_HALT, 100*100)) { 767 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); 768 spin_unlock_irq(&xhci->lock); 769 return -ETIMEDOUT; 770 } 771 xhci_clear_command_ring(xhci); 772 773 /* step 3: save registers */ 774 xhci_save_registers(xhci); 775 776 /* step 4: set CSS flag */ 777 command = xhci_readl(xhci, &xhci->op_regs->command); 778 command |= CMD_CSS; 779 xhci_writel(xhci, command, &xhci->op_regs->command); 780 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) { 781 xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n"); 782 spin_unlock_irq(&xhci->lock); 783 return -ETIMEDOUT; 784 } 785 spin_unlock_irq(&xhci->lock); 786 787 /* step 5: remove core well power */ 788 /* synchronize irq when using MSI-X */ 789 xhci_msix_sync_irqs(xhci); 790 791 return rc; 792 } 793 794 /* 795 * start xHC (not bus-specific) 796 * 797 * This is called when the machine transition from S3/S4 mode. 798 * 799 */ 800 int xhci_resume(struct xhci_hcd *xhci, bool hibernated) 801 { 802 u32 command, temp = 0; 803 struct usb_hcd *hcd = xhci_to_hcd(xhci); 804 struct usb_hcd *secondary_hcd; 805 int retval = 0; 806 807 /* Wait a bit if either of the roothubs need to settle from the 808 * transition into bus suspend. 809 */ 810 if (time_before(jiffies, xhci->bus_state[0].next_statechange) || 811 time_before(jiffies, 812 xhci->bus_state[1].next_statechange)) 813 msleep(100); 814 815 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 816 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 817 818 spin_lock_irq(&xhci->lock); 819 if (xhci->quirks & XHCI_RESET_ON_RESUME) 820 hibernated = true; 821 822 if (!hibernated) { 823 /* step 1: restore register */ 824 xhci_restore_registers(xhci); 825 /* step 2: initialize command ring buffer */ 826 xhci_set_cmd_ring_deq(xhci); 827 /* step 3: restore state and start state*/ 828 /* step 3: set CRS flag */ 829 command = xhci_readl(xhci, &xhci->op_regs->command); 830 command |= CMD_CRS; 831 xhci_writel(xhci, command, &xhci->op_regs->command); 832 if (handshake(xhci, &xhci->op_regs->status, 833 STS_RESTORE, 0, 10*100)) { 834 xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n"); 835 spin_unlock_irq(&xhci->lock); 836 return -ETIMEDOUT; 837 } 838 temp = xhci_readl(xhci, &xhci->op_regs->status); 839 } 840 841 /* If restore operation fails, re-initialize the HC during resume */ 842 if ((temp & STS_SRE) || hibernated) { 843 /* Let the USB core know _both_ roothubs lost power. */ 844 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); 845 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); 846 847 xhci_dbg(xhci, "Stop HCD\n"); 848 xhci_halt(xhci); 849 xhci_reset(xhci); 850 spin_unlock_irq(&xhci->lock); 851 xhci_cleanup_msix(xhci); 852 853 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 854 /* Tell the event ring poll function not to reschedule */ 855 xhci->zombie = 1; 856 del_timer_sync(&xhci->event_ring_timer); 857 #endif 858 859 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 860 temp = xhci_readl(xhci, &xhci->op_regs->status); 861 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); 862 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 863 xhci_writel(xhci, ER_IRQ_DISABLE(temp), 864 &xhci->ir_set->irq_pending); 865 xhci_print_ir_set(xhci, 0); 866 867 xhci_dbg(xhci, "cleaning up memory\n"); 868 xhci_mem_cleanup(xhci); 869 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", 870 xhci_readl(xhci, &xhci->op_regs->status)); 871 872 /* USB core calls the PCI reinit and start functions twice: 873 * first with the primary HCD, and then with the secondary HCD. 874 * If we don't do the same, the host will never be started. 875 */ 876 if (!usb_hcd_is_primary_hcd(hcd)) 877 secondary_hcd = hcd; 878 else 879 secondary_hcd = xhci->shared_hcd; 880 881 xhci_dbg(xhci, "Initialize the xhci_hcd\n"); 882 retval = xhci_init(hcd->primary_hcd); 883 if (retval) 884 return retval; 885 xhci_dbg(xhci, "Start the primary HCD\n"); 886 retval = xhci_run(hcd->primary_hcd); 887 if (!retval) { 888 xhci_dbg(xhci, "Start the secondary HCD\n"); 889 retval = xhci_run(secondary_hcd); 890 } 891 hcd->state = HC_STATE_SUSPENDED; 892 xhci->shared_hcd->state = HC_STATE_SUSPENDED; 893 goto done; 894 } 895 896 /* step 4: set Run/Stop bit */ 897 command = xhci_readl(xhci, &xhci->op_regs->command); 898 command |= CMD_RUN; 899 xhci_writel(xhci, command, &xhci->op_regs->command); 900 handshake(xhci, &xhci->op_regs->status, STS_HALT, 901 0, 250 * 1000); 902 903 /* step 5: walk topology and initialize portsc, 904 * portpmsc and portli 905 */ 906 /* this is done in bus_resume */ 907 908 /* step 6: restart each of the previously 909 * Running endpoints by ringing their doorbells 910 */ 911 912 spin_unlock_irq(&xhci->lock); 913 914 done: 915 if (retval == 0) { 916 usb_hcd_resume_root_hub(hcd); 917 usb_hcd_resume_root_hub(xhci->shared_hcd); 918 } 919 return retval; 920 } 921 #endif /* CONFIG_PM */ 922 923 /*-------------------------------------------------------------------------*/ 924 925 /** 926 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and 927 * HCDs. Find the index for an endpoint given its descriptor. Use the return 928 * value to right shift 1 for the bitmask. 929 * 930 * Index = (epnum * 2) + direction - 1, 931 * where direction = 0 for OUT, 1 for IN. 932 * For control endpoints, the IN index is used (OUT index is unused), so 933 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) 934 */ 935 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) 936 { 937 unsigned int index; 938 if (usb_endpoint_xfer_control(desc)) 939 index = (unsigned int) (usb_endpoint_num(desc)*2); 940 else 941 index = (unsigned int) (usb_endpoint_num(desc)*2) + 942 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; 943 return index; 944 } 945 946 /* Find the flag for this endpoint (for use in the control context). Use the 947 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 948 * bit 1, etc. 949 */ 950 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) 951 { 952 return 1 << (xhci_get_endpoint_index(desc) + 1); 953 } 954 955 /* Find the flag for this endpoint (for use in the control context). Use the 956 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 957 * bit 1, etc. 958 */ 959 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) 960 { 961 return 1 << (ep_index + 1); 962 } 963 964 /* Compute the last valid endpoint context index. Basically, this is the 965 * endpoint index plus one. For slot contexts with more than valid endpoint, 966 * we find the most significant bit set in the added contexts flags. 967 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 968 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. 969 */ 970 unsigned int xhci_last_valid_endpoint(u32 added_ctxs) 971 { 972 return fls(added_ctxs) - 1; 973 } 974 975 /* Returns 1 if the arguments are OK; 976 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 977 */ 978 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 979 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, 980 const char *func) { 981 struct xhci_hcd *xhci; 982 struct xhci_virt_device *virt_dev; 983 984 if (!hcd || (check_ep && !ep) || !udev) { 985 printk(KERN_DEBUG "xHCI %s called with invalid args\n", 986 func); 987 return -EINVAL; 988 } 989 if (!udev->parent) { 990 printk(KERN_DEBUG "xHCI %s called for root hub\n", 991 func); 992 return 0; 993 } 994 995 xhci = hcd_to_xhci(hcd); 996 if (xhci->xhc_state & XHCI_STATE_HALTED) 997 return -ENODEV; 998 999 if (check_virt_dev) { 1000 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { 1001 printk(KERN_DEBUG "xHCI %s called with unaddressed " 1002 "device\n", func); 1003 return -EINVAL; 1004 } 1005 1006 virt_dev = xhci->devs[udev->slot_id]; 1007 if (virt_dev->udev != udev) { 1008 printk(KERN_DEBUG "xHCI %s called with udev and " 1009 "virt_dev does not match\n", func); 1010 return -EINVAL; 1011 } 1012 } 1013 1014 return 1; 1015 } 1016 1017 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 1018 struct usb_device *udev, struct xhci_command *command, 1019 bool ctx_change, bool must_succeed); 1020 1021 /* 1022 * Full speed devices may have a max packet size greater than 8 bytes, but the 1023 * USB core doesn't know that until it reads the first 8 bytes of the 1024 * descriptor. If the usb_device's max packet size changes after that point, 1025 * we need to issue an evaluate context command and wait on it. 1026 */ 1027 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, 1028 unsigned int ep_index, struct urb *urb) 1029 { 1030 struct xhci_container_ctx *in_ctx; 1031 struct xhci_container_ctx *out_ctx; 1032 struct xhci_input_control_ctx *ctrl_ctx; 1033 struct xhci_ep_ctx *ep_ctx; 1034 int max_packet_size; 1035 int hw_max_packet_size; 1036 int ret = 0; 1037 1038 out_ctx = xhci->devs[slot_id]->out_ctx; 1039 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1040 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); 1041 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); 1042 if (hw_max_packet_size != max_packet_size) { 1043 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n"); 1044 xhci_dbg(xhci, "Max packet size in usb_device = %d\n", 1045 max_packet_size); 1046 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n", 1047 hw_max_packet_size); 1048 xhci_dbg(xhci, "Issuing evaluate context command.\n"); 1049 1050 /* Set up the modified control endpoint 0 */ 1051 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 1052 xhci->devs[slot_id]->out_ctx, ep_index); 1053 in_ctx = xhci->devs[slot_id]->in_ctx; 1054 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 1055 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); 1056 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); 1057 1058 /* Set up the input context flags for the command */ 1059 /* FIXME: This won't work if a non-default control endpoint 1060 * changes max packet sizes. 1061 */ 1062 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1063 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); 1064 ctrl_ctx->drop_flags = 0; 1065 1066 xhci_dbg(xhci, "Slot %d input context\n", slot_id); 1067 xhci_dbg_ctx(xhci, in_ctx, ep_index); 1068 xhci_dbg(xhci, "Slot %d output context\n", slot_id); 1069 xhci_dbg_ctx(xhci, out_ctx, ep_index); 1070 1071 ret = xhci_configure_endpoint(xhci, urb->dev, NULL, 1072 true, false); 1073 1074 /* Clean up the input context for later use by bandwidth 1075 * functions. 1076 */ 1077 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); 1078 } 1079 return ret; 1080 } 1081 1082 /* 1083 * non-error returns are a promise to giveback() the urb later 1084 * we drop ownership so next owner (or urb unlink) can get it 1085 */ 1086 int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) 1087 { 1088 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1089 struct xhci_td *buffer; 1090 unsigned long flags; 1091 int ret = 0; 1092 unsigned int slot_id, ep_index; 1093 struct urb_priv *urb_priv; 1094 int size, i; 1095 1096 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, 1097 true, true, __func__) <= 0) 1098 return -EINVAL; 1099 1100 slot_id = urb->dev->slot_id; 1101 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1102 1103 if (!HCD_HW_ACCESSIBLE(hcd)) { 1104 if (!in_interrupt()) 1105 xhci_dbg(xhci, "urb submitted during PCI suspend\n"); 1106 ret = -ESHUTDOWN; 1107 goto exit; 1108 } 1109 1110 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) 1111 size = urb->number_of_packets; 1112 else 1113 size = 1; 1114 1115 urb_priv = kzalloc(sizeof(struct urb_priv) + 1116 size * sizeof(struct xhci_td *), mem_flags); 1117 if (!urb_priv) 1118 return -ENOMEM; 1119 1120 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags); 1121 if (!buffer) { 1122 kfree(urb_priv); 1123 return -ENOMEM; 1124 } 1125 1126 for (i = 0; i < size; i++) { 1127 urb_priv->td[i] = buffer; 1128 buffer++; 1129 } 1130 1131 urb_priv->length = size; 1132 urb_priv->td_cnt = 0; 1133 urb->hcpriv = urb_priv; 1134 1135 if (usb_endpoint_xfer_control(&urb->ep->desc)) { 1136 /* Check to see if the max packet size for the default control 1137 * endpoint changed during FS device enumeration 1138 */ 1139 if (urb->dev->speed == USB_SPEED_FULL) { 1140 ret = xhci_check_maxpacket(xhci, slot_id, 1141 ep_index, urb); 1142 if (ret < 0) { 1143 xhci_urb_free_priv(xhci, urb_priv); 1144 urb->hcpriv = NULL; 1145 return ret; 1146 } 1147 } 1148 1149 /* We have a spinlock and interrupts disabled, so we must pass 1150 * atomic context to this function, which may allocate memory. 1151 */ 1152 spin_lock_irqsave(&xhci->lock, flags); 1153 if (xhci->xhc_state & XHCI_STATE_DYING) 1154 goto dying; 1155 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1156 slot_id, ep_index); 1157 if (ret) 1158 goto free_priv; 1159 spin_unlock_irqrestore(&xhci->lock, flags); 1160 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { 1161 spin_lock_irqsave(&xhci->lock, flags); 1162 if (xhci->xhc_state & XHCI_STATE_DYING) 1163 goto dying; 1164 if (xhci->devs[slot_id]->eps[ep_index].ep_state & 1165 EP_GETTING_STREAMS) { 1166 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " 1167 "is transitioning to using streams.\n"); 1168 ret = -EINVAL; 1169 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & 1170 EP_GETTING_NO_STREAMS) { 1171 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " 1172 "is transitioning to " 1173 "not having streams.\n"); 1174 ret = -EINVAL; 1175 } else { 1176 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1177 slot_id, ep_index); 1178 } 1179 if (ret) 1180 goto free_priv; 1181 spin_unlock_irqrestore(&xhci->lock, flags); 1182 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 1183 spin_lock_irqsave(&xhci->lock, flags); 1184 if (xhci->xhc_state & XHCI_STATE_DYING) 1185 goto dying; 1186 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1187 slot_id, ep_index); 1188 if (ret) 1189 goto free_priv; 1190 spin_unlock_irqrestore(&xhci->lock, flags); 1191 } else { 1192 spin_lock_irqsave(&xhci->lock, flags); 1193 if (xhci->xhc_state & XHCI_STATE_DYING) 1194 goto dying; 1195 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1196 slot_id, ep_index); 1197 if (ret) 1198 goto free_priv; 1199 spin_unlock_irqrestore(&xhci->lock, flags); 1200 } 1201 exit: 1202 return ret; 1203 dying: 1204 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " 1205 "non-responsive xHCI host.\n", 1206 urb->ep->desc.bEndpointAddress, urb); 1207 ret = -ESHUTDOWN; 1208 free_priv: 1209 xhci_urb_free_priv(xhci, urb_priv); 1210 urb->hcpriv = NULL; 1211 spin_unlock_irqrestore(&xhci->lock, flags); 1212 return ret; 1213 } 1214 1215 /* Get the right ring for the given URB. 1216 * If the endpoint supports streams, boundary check the URB's stream ID. 1217 * If the endpoint doesn't support streams, return the singular endpoint ring. 1218 */ 1219 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, 1220 struct urb *urb) 1221 { 1222 unsigned int slot_id; 1223 unsigned int ep_index; 1224 unsigned int stream_id; 1225 struct xhci_virt_ep *ep; 1226 1227 slot_id = urb->dev->slot_id; 1228 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1229 stream_id = urb->stream_id; 1230 ep = &xhci->devs[slot_id]->eps[ep_index]; 1231 /* Common case: no streams */ 1232 if (!(ep->ep_state & EP_HAS_STREAMS)) 1233 return ep->ring; 1234 1235 if (stream_id == 0) { 1236 xhci_warn(xhci, 1237 "WARN: Slot ID %u, ep index %u has streams, " 1238 "but URB has no stream ID.\n", 1239 slot_id, ep_index); 1240 return NULL; 1241 } 1242 1243 if (stream_id < ep->stream_info->num_streams) 1244 return ep->stream_info->stream_rings[stream_id]; 1245 1246 xhci_warn(xhci, 1247 "WARN: Slot ID %u, ep index %u has " 1248 "stream IDs 1 to %u allocated, " 1249 "but stream ID %u is requested.\n", 1250 slot_id, ep_index, 1251 ep->stream_info->num_streams - 1, 1252 stream_id); 1253 return NULL; 1254 } 1255 1256 /* 1257 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop 1258 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC 1259 * should pick up where it left off in the TD, unless a Set Transfer Ring 1260 * Dequeue Pointer is issued. 1261 * 1262 * The TRBs that make up the buffers for the canceled URB will be "removed" from 1263 * the ring. Since the ring is a contiguous structure, they can't be physically 1264 * removed. Instead, there are two options: 1265 * 1266 * 1) If the HC is in the middle of processing the URB to be canceled, we 1267 * simply move the ring's dequeue pointer past those TRBs using the Set 1268 * Transfer Ring Dequeue Pointer command. This will be the common case, 1269 * when drivers timeout on the last submitted URB and attempt to cancel. 1270 * 1271 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a 1272 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The 1273 * HC will need to invalidate the any TRBs it has cached after the stop 1274 * endpoint command, as noted in the xHCI 0.95 errata. 1275 * 1276 * 3) The TD may have completed by the time the Stop Endpoint Command 1277 * completes, so software needs to handle that case too. 1278 * 1279 * This function should protect against the TD enqueueing code ringing the 1280 * doorbell while this code is waiting for a Stop Endpoint command to complete. 1281 * It also needs to account for multiple cancellations on happening at the same 1282 * time for the same endpoint. 1283 * 1284 * Note that this function can be called in any context, or so says 1285 * usb_hcd_unlink_urb() 1286 */ 1287 int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 1288 { 1289 unsigned long flags; 1290 int ret, i; 1291 u32 temp; 1292 struct xhci_hcd *xhci; 1293 struct urb_priv *urb_priv; 1294 struct xhci_td *td; 1295 unsigned int ep_index; 1296 struct xhci_ring *ep_ring; 1297 struct xhci_virt_ep *ep; 1298 1299 xhci = hcd_to_xhci(hcd); 1300 spin_lock_irqsave(&xhci->lock, flags); 1301 /* Make sure the URB hasn't completed or been unlinked already */ 1302 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 1303 if (ret || !urb->hcpriv) 1304 goto done; 1305 temp = xhci_readl(xhci, &xhci->op_regs->status); 1306 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1307 xhci_dbg(xhci, "HW died, freeing TD.\n"); 1308 urb_priv = urb->hcpriv; 1309 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { 1310 td = urb_priv->td[i]; 1311 if (!list_empty(&td->td_list)) 1312 list_del_init(&td->td_list); 1313 if (!list_empty(&td->cancelled_td_list)) 1314 list_del_init(&td->cancelled_td_list); 1315 } 1316 1317 usb_hcd_unlink_urb_from_ep(hcd, urb); 1318 spin_unlock_irqrestore(&xhci->lock, flags); 1319 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); 1320 xhci_urb_free_priv(xhci, urb_priv); 1321 return ret; 1322 } 1323 if ((xhci->xhc_state & XHCI_STATE_DYING) || 1324 (xhci->xhc_state & XHCI_STATE_HALTED)) { 1325 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on " 1326 "non-responsive xHCI host.\n", 1327 urb->ep->desc.bEndpointAddress, urb); 1328 /* Let the stop endpoint command watchdog timer (which set this 1329 * state) finish cleaning up the endpoint TD lists. We must 1330 * have caught it in the middle of dropping a lock and giving 1331 * back an URB. 1332 */ 1333 goto done; 1334 } 1335 1336 xhci_dbg(xhci, "Cancel URB %p\n", urb); 1337 xhci_dbg(xhci, "Event ring:\n"); 1338 xhci_debug_ring(xhci, xhci->event_ring); 1339 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1340 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; 1341 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 1342 if (!ep_ring) { 1343 ret = -EINVAL; 1344 goto done; 1345 } 1346 1347 xhci_dbg(xhci, "Endpoint ring:\n"); 1348 xhci_debug_ring(xhci, ep_ring); 1349 1350 urb_priv = urb->hcpriv; 1351 1352 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { 1353 td = urb_priv->td[i]; 1354 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); 1355 } 1356 1357 /* Queue a stop endpoint command, but only if this is 1358 * the first cancellation to be handled. 1359 */ 1360 if (!(ep->ep_state & EP_HALT_PENDING)) { 1361 ep->ep_state |= EP_HALT_PENDING; 1362 ep->stop_cmds_pending++; 1363 ep->stop_cmd_timer.expires = jiffies + 1364 XHCI_STOP_EP_CMD_TIMEOUT * HZ; 1365 add_timer(&ep->stop_cmd_timer); 1366 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0); 1367 xhci_ring_cmd_db(xhci); 1368 } 1369 done: 1370 spin_unlock_irqrestore(&xhci->lock, flags); 1371 return ret; 1372 } 1373 1374 /* Drop an endpoint from a new bandwidth configuration for this device. 1375 * Only one call to this function is allowed per endpoint before 1376 * check_bandwidth() or reset_bandwidth() must be called. 1377 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1378 * add the endpoint to the schedule with possibly new parameters denoted by a 1379 * different endpoint descriptor in usb_host_endpoint. 1380 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1381 * not allowed. 1382 * 1383 * The USB core will not allow URBs to be queued to an endpoint that is being 1384 * disabled, so there's no need for mutual exclusion to protect 1385 * the xhci->devs[slot_id] structure. 1386 */ 1387 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1388 struct usb_host_endpoint *ep) 1389 { 1390 struct xhci_hcd *xhci; 1391 struct xhci_container_ctx *in_ctx, *out_ctx; 1392 struct xhci_input_control_ctx *ctrl_ctx; 1393 struct xhci_slot_ctx *slot_ctx; 1394 unsigned int last_ctx; 1395 unsigned int ep_index; 1396 struct xhci_ep_ctx *ep_ctx; 1397 u32 drop_flag; 1398 u32 new_add_flags, new_drop_flags, new_slot_info; 1399 int ret; 1400 1401 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1402 if (ret <= 0) 1403 return ret; 1404 xhci = hcd_to_xhci(hcd); 1405 if (xhci->xhc_state & XHCI_STATE_DYING) 1406 return -ENODEV; 1407 1408 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1409 drop_flag = xhci_get_endpoint_flag(&ep->desc); 1410 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { 1411 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", 1412 __func__, drop_flag); 1413 return 0; 1414 } 1415 1416 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1417 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1418 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1419 ep_index = xhci_get_endpoint_index(&ep->desc); 1420 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1421 /* If the HC already knows the endpoint is disabled, 1422 * or the HCD has noted it is disabled, ignore this request 1423 */ 1424 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == 1425 cpu_to_le32(EP_STATE_DISABLED)) || 1426 le32_to_cpu(ctrl_ctx->drop_flags) & 1427 xhci_get_endpoint_flag(&ep->desc)) { 1428 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 1429 __func__, ep); 1430 return 0; 1431 } 1432 1433 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); 1434 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1435 1436 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); 1437 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1438 1439 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)); 1440 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1441 /* Update the last valid endpoint context, if we deleted the last one */ 1442 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) > 1443 LAST_CTX(last_ctx)) { 1444 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1445 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); 1446 } 1447 new_slot_info = le32_to_cpu(slot_ctx->dev_info); 1448 1449 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 1450 1451 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", 1452 (unsigned int) ep->desc.bEndpointAddress, 1453 udev->slot_id, 1454 (unsigned int) new_drop_flags, 1455 (unsigned int) new_add_flags, 1456 (unsigned int) new_slot_info); 1457 return 0; 1458 } 1459 1460 /* Add an endpoint to a new possible bandwidth configuration for this device. 1461 * Only one call to this function is allowed per endpoint before 1462 * check_bandwidth() or reset_bandwidth() must be called. 1463 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1464 * add the endpoint to the schedule with possibly new parameters denoted by a 1465 * different endpoint descriptor in usb_host_endpoint. 1466 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1467 * not allowed. 1468 * 1469 * The USB core will not allow URBs to be queued to an endpoint until the 1470 * configuration or alt setting is installed in the device, so there's no need 1471 * for mutual exclusion to protect the xhci->devs[slot_id] structure. 1472 */ 1473 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1474 struct usb_host_endpoint *ep) 1475 { 1476 struct xhci_hcd *xhci; 1477 struct xhci_container_ctx *in_ctx, *out_ctx; 1478 unsigned int ep_index; 1479 struct xhci_ep_ctx *ep_ctx; 1480 struct xhci_slot_ctx *slot_ctx; 1481 struct xhci_input_control_ctx *ctrl_ctx; 1482 u32 added_ctxs; 1483 unsigned int last_ctx; 1484 u32 new_add_flags, new_drop_flags, new_slot_info; 1485 struct xhci_virt_device *virt_dev; 1486 int ret = 0; 1487 1488 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1489 if (ret <= 0) { 1490 /* So we won't queue a reset ep command for a root hub */ 1491 ep->hcpriv = NULL; 1492 return ret; 1493 } 1494 xhci = hcd_to_xhci(hcd); 1495 if (xhci->xhc_state & XHCI_STATE_DYING) 1496 return -ENODEV; 1497 1498 added_ctxs = xhci_get_endpoint_flag(&ep->desc); 1499 last_ctx = xhci_last_valid_endpoint(added_ctxs); 1500 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { 1501 /* FIXME when we have to issue an evaluate endpoint command to 1502 * deal with ep0 max packet size changing once we get the 1503 * descriptors 1504 */ 1505 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", 1506 __func__, added_ctxs); 1507 return 0; 1508 } 1509 1510 virt_dev = xhci->devs[udev->slot_id]; 1511 in_ctx = virt_dev->in_ctx; 1512 out_ctx = virt_dev->out_ctx; 1513 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1514 ep_index = xhci_get_endpoint_index(&ep->desc); 1515 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1516 1517 /* If this endpoint is already in use, and the upper layers are trying 1518 * to add it again without dropping it, reject the addition. 1519 */ 1520 if (virt_dev->eps[ep_index].ring && 1521 !(le32_to_cpu(ctrl_ctx->drop_flags) & 1522 xhci_get_endpoint_flag(&ep->desc))) { 1523 xhci_warn(xhci, "Trying to add endpoint 0x%x " 1524 "without dropping it.\n", 1525 (unsigned int) ep->desc.bEndpointAddress); 1526 return -EINVAL; 1527 } 1528 1529 /* If the HCD has already noted the endpoint is enabled, 1530 * ignore this request. 1531 */ 1532 if (le32_to_cpu(ctrl_ctx->add_flags) & 1533 xhci_get_endpoint_flag(&ep->desc)) { 1534 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 1535 __func__, ep); 1536 return 0; 1537 } 1538 1539 /* 1540 * Configuration and alternate setting changes must be done in 1541 * process context, not interrupt context (or so documenation 1542 * for usb_set_interface() and usb_set_configuration() claim). 1543 */ 1544 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { 1545 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 1546 __func__, ep->desc.bEndpointAddress); 1547 return -ENOMEM; 1548 } 1549 1550 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); 1551 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1552 1553 /* If xhci_endpoint_disable() was called for this endpoint, but the 1554 * xHC hasn't been notified yet through the check_bandwidth() call, 1555 * this re-adds a new state for the endpoint from the new endpoint 1556 * descriptors. We must drop and re-add this endpoint, so we leave the 1557 * drop flags alone. 1558 */ 1559 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1560 1561 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1562 /* Update the last valid endpoint context, if we just added one past */ 1563 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) < 1564 LAST_CTX(last_ctx)) { 1565 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1566 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); 1567 } 1568 new_slot_info = le32_to_cpu(slot_ctx->dev_info); 1569 1570 /* Store the usb_device pointer for later use */ 1571 ep->hcpriv = udev; 1572 1573 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", 1574 (unsigned int) ep->desc.bEndpointAddress, 1575 udev->slot_id, 1576 (unsigned int) new_drop_flags, 1577 (unsigned int) new_add_flags, 1578 (unsigned int) new_slot_info); 1579 return 0; 1580 } 1581 1582 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) 1583 { 1584 struct xhci_input_control_ctx *ctrl_ctx; 1585 struct xhci_ep_ctx *ep_ctx; 1586 struct xhci_slot_ctx *slot_ctx; 1587 int i; 1588 1589 /* When a device's add flag and drop flag are zero, any subsequent 1590 * configure endpoint command will leave that endpoint's state 1591 * untouched. Make sure we don't leave any old state in the input 1592 * endpoint contexts. 1593 */ 1594 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 1595 ctrl_ctx->drop_flags = 0; 1596 ctrl_ctx->add_flags = 0; 1597 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 1598 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1599 /* Endpoint 0 is always valid */ 1600 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 1601 for (i = 1; i < 31; ++i) { 1602 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); 1603 ep_ctx->ep_info = 0; 1604 ep_ctx->ep_info2 = 0; 1605 ep_ctx->deq = 0; 1606 ep_ctx->tx_info = 0; 1607 } 1608 } 1609 1610 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, 1611 struct usb_device *udev, u32 *cmd_status) 1612 { 1613 int ret; 1614 1615 switch (*cmd_status) { 1616 case COMP_ENOMEM: 1617 dev_warn(&udev->dev, "Not enough host controller resources " 1618 "for new device state.\n"); 1619 ret = -ENOMEM; 1620 /* FIXME: can we allocate more resources for the HC? */ 1621 break; 1622 case COMP_BW_ERR: 1623 dev_warn(&udev->dev, "Not enough bandwidth " 1624 "for new device state.\n"); 1625 ret = -ENOSPC; 1626 /* FIXME: can we go back to the old state? */ 1627 break; 1628 case COMP_TRB_ERR: 1629 /* the HCD set up something wrong */ 1630 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " 1631 "add flag = 1, " 1632 "and endpoint is not disabled.\n"); 1633 ret = -EINVAL; 1634 break; 1635 case COMP_DEV_ERR: 1636 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint " 1637 "configure command.\n"); 1638 ret = -ENODEV; 1639 break; 1640 case COMP_SUCCESS: 1641 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); 1642 ret = 0; 1643 break; 1644 default: 1645 xhci_err(xhci, "ERROR: unexpected command completion " 1646 "code 0x%x.\n", *cmd_status); 1647 ret = -EINVAL; 1648 break; 1649 } 1650 return ret; 1651 } 1652 1653 static int xhci_evaluate_context_result(struct xhci_hcd *xhci, 1654 struct usb_device *udev, u32 *cmd_status) 1655 { 1656 int ret; 1657 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; 1658 1659 switch (*cmd_status) { 1660 case COMP_EINVAL: 1661 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " 1662 "context command.\n"); 1663 ret = -EINVAL; 1664 break; 1665 case COMP_EBADSLT: 1666 dev_warn(&udev->dev, "WARN: slot not enabled for" 1667 "evaluate context command.\n"); 1668 case COMP_CTX_STATE: 1669 dev_warn(&udev->dev, "WARN: invalid context state for " 1670 "evaluate context command.\n"); 1671 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); 1672 ret = -EINVAL; 1673 break; 1674 case COMP_DEV_ERR: 1675 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate " 1676 "context command.\n"); 1677 ret = -ENODEV; 1678 break; 1679 case COMP_MEL_ERR: 1680 /* Max Exit Latency too large error */ 1681 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); 1682 ret = -EINVAL; 1683 break; 1684 case COMP_SUCCESS: 1685 dev_dbg(&udev->dev, "Successful evaluate context command\n"); 1686 ret = 0; 1687 break; 1688 default: 1689 xhci_err(xhci, "ERROR: unexpected command completion " 1690 "code 0x%x.\n", *cmd_status); 1691 ret = -EINVAL; 1692 break; 1693 } 1694 return ret; 1695 } 1696 1697 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, 1698 struct xhci_container_ctx *in_ctx) 1699 { 1700 struct xhci_input_control_ctx *ctrl_ctx; 1701 u32 valid_add_flags; 1702 u32 valid_drop_flags; 1703 1704 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1705 /* Ignore the slot flag (bit 0), and the default control endpoint flag 1706 * (bit 1). The default control endpoint is added during the Address 1707 * Device command and is never removed until the slot is disabled. 1708 */ 1709 valid_add_flags = ctrl_ctx->add_flags >> 2; 1710 valid_drop_flags = ctrl_ctx->drop_flags >> 2; 1711 1712 /* Use hweight32 to count the number of ones in the add flags, or 1713 * number of endpoints added. Don't count endpoints that are changed 1714 * (both added and dropped). 1715 */ 1716 return hweight32(valid_add_flags) - 1717 hweight32(valid_add_flags & valid_drop_flags); 1718 } 1719 1720 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, 1721 struct xhci_container_ctx *in_ctx) 1722 { 1723 struct xhci_input_control_ctx *ctrl_ctx; 1724 u32 valid_add_flags; 1725 u32 valid_drop_flags; 1726 1727 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1728 valid_add_flags = ctrl_ctx->add_flags >> 2; 1729 valid_drop_flags = ctrl_ctx->drop_flags >> 2; 1730 1731 return hweight32(valid_drop_flags) - 1732 hweight32(valid_add_flags & valid_drop_flags); 1733 } 1734 1735 /* 1736 * We need to reserve the new number of endpoints before the configure endpoint 1737 * command completes. We can't subtract the dropped endpoints from the number 1738 * of active endpoints until the command completes because we can oversubscribe 1739 * the host in this case: 1740 * 1741 * - the first configure endpoint command drops more endpoints than it adds 1742 * - a second configure endpoint command that adds more endpoints is queued 1743 * - the first configure endpoint command fails, so the config is unchanged 1744 * - the second command may succeed, even though there isn't enough resources 1745 * 1746 * Must be called with xhci->lock held. 1747 */ 1748 static int xhci_reserve_host_resources(struct xhci_hcd *xhci, 1749 struct xhci_container_ctx *in_ctx) 1750 { 1751 u32 added_eps; 1752 1753 added_eps = xhci_count_num_new_endpoints(xhci, in_ctx); 1754 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { 1755 xhci_dbg(xhci, "Not enough ep ctxs: " 1756 "%u active, need to add %u, limit is %u.\n", 1757 xhci->num_active_eps, added_eps, 1758 xhci->limit_active_eps); 1759 return -ENOMEM; 1760 } 1761 xhci->num_active_eps += added_eps; 1762 xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps, 1763 xhci->num_active_eps); 1764 return 0; 1765 } 1766 1767 /* 1768 * The configure endpoint was failed by the xHC for some other reason, so we 1769 * need to revert the resources that failed configuration would have used. 1770 * 1771 * Must be called with xhci->lock held. 1772 */ 1773 static void xhci_free_host_resources(struct xhci_hcd *xhci, 1774 struct xhci_container_ctx *in_ctx) 1775 { 1776 u32 num_failed_eps; 1777 1778 num_failed_eps = xhci_count_num_new_endpoints(xhci, in_ctx); 1779 xhci->num_active_eps -= num_failed_eps; 1780 xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n", 1781 num_failed_eps, 1782 xhci->num_active_eps); 1783 } 1784 1785 /* 1786 * Now that the command has completed, clean up the active endpoint count by 1787 * subtracting out the endpoints that were dropped (but not changed). 1788 * 1789 * Must be called with xhci->lock held. 1790 */ 1791 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, 1792 struct xhci_container_ctx *in_ctx) 1793 { 1794 u32 num_dropped_eps; 1795 1796 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, in_ctx); 1797 xhci->num_active_eps -= num_dropped_eps; 1798 if (num_dropped_eps) 1799 xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n", 1800 num_dropped_eps, 1801 xhci->num_active_eps); 1802 } 1803 1804 unsigned int xhci_get_block_size(struct usb_device *udev) 1805 { 1806 switch (udev->speed) { 1807 case USB_SPEED_LOW: 1808 case USB_SPEED_FULL: 1809 return FS_BLOCK; 1810 case USB_SPEED_HIGH: 1811 return HS_BLOCK; 1812 case USB_SPEED_SUPER: 1813 return SS_BLOCK; 1814 case USB_SPEED_UNKNOWN: 1815 case USB_SPEED_WIRELESS: 1816 default: 1817 /* Should never happen */ 1818 return 1; 1819 } 1820 } 1821 1822 unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) 1823 { 1824 if (interval_bw->overhead[LS_OVERHEAD_TYPE]) 1825 return LS_OVERHEAD; 1826 if (interval_bw->overhead[FS_OVERHEAD_TYPE]) 1827 return FS_OVERHEAD; 1828 return HS_OVERHEAD; 1829 } 1830 1831 /* If we are changing a LS/FS device under a HS hub, 1832 * make sure (if we are activating a new TT) that the HS bus has enough 1833 * bandwidth for this new TT. 1834 */ 1835 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, 1836 struct xhci_virt_device *virt_dev, 1837 int old_active_eps) 1838 { 1839 struct xhci_interval_bw_table *bw_table; 1840 struct xhci_tt_bw_info *tt_info; 1841 1842 /* Find the bandwidth table for the root port this TT is attached to. */ 1843 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; 1844 tt_info = virt_dev->tt_info; 1845 /* If this TT already had active endpoints, the bandwidth for this TT 1846 * has already been added. Removing all periodic endpoints (and thus 1847 * making the TT enactive) will only decrease the bandwidth used. 1848 */ 1849 if (old_active_eps) 1850 return 0; 1851 if (old_active_eps == 0 && tt_info->active_eps != 0) { 1852 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) 1853 return -ENOMEM; 1854 return 0; 1855 } 1856 /* Not sure why we would have no new active endpoints... 1857 * 1858 * Maybe because of an Evaluate Context change for a hub update or a 1859 * control endpoint 0 max packet size change? 1860 * FIXME: skip the bandwidth calculation in that case. 1861 */ 1862 return 0; 1863 } 1864 1865 static int xhci_check_ss_bw(struct xhci_hcd *xhci, 1866 struct xhci_virt_device *virt_dev) 1867 { 1868 unsigned int bw_reserved; 1869 1870 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); 1871 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) 1872 return -ENOMEM; 1873 1874 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); 1875 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) 1876 return -ENOMEM; 1877 1878 return 0; 1879 } 1880 1881 /* 1882 * This algorithm is a very conservative estimate of the worst-case scheduling 1883 * scenario for any one interval. The hardware dynamically schedules the 1884 * packets, so we can't tell which microframe could be the limiting factor in 1885 * the bandwidth scheduling. This only takes into account periodic endpoints. 1886 * 1887 * Obviously, we can't solve an NP complete problem to find the minimum worst 1888 * case scenario. Instead, we come up with an estimate that is no less than 1889 * the worst case bandwidth used for any one microframe, but may be an 1890 * over-estimate. 1891 * 1892 * We walk the requirements for each endpoint by interval, starting with the 1893 * smallest interval, and place packets in the schedule where there is only one 1894 * possible way to schedule packets for that interval. In order to simplify 1895 * this algorithm, we record the largest max packet size for each interval, and 1896 * assume all packets will be that size. 1897 * 1898 * For interval 0, we obviously must schedule all packets for each interval. 1899 * The bandwidth for interval 0 is just the amount of data to be transmitted 1900 * (the sum of all max ESIT payload sizes, plus any overhead per packet times 1901 * the number of packets). 1902 * 1903 * For interval 1, we have two possible microframes to schedule those packets 1904 * in. For this algorithm, if we can schedule the same number of packets for 1905 * each possible scheduling opportunity (each microframe), we will do so. The 1906 * remaining number of packets will be saved to be transmitted in the gaps in 1907 * the next interval's scheduling sequence. 1908 * 1909 * As we move those remaining packets to be scheduled with interval 2 packets, 1910 * we have to double the number of remaining packets to transmit. This is 1911 * because the intervals are actually powers of 2, and we would be transmitting 1912 * the previous interval's packets twice in this interval. We also have to be 1913 * sure that when we look at the largest max packet size for this interval, we 1914 * also look at the largest max packet size for the remaining packets and take 1915 * the greater of the two. 1916 * 1917 * The algorithm continues to evenly distribute packets in each scheduling 1918 * opportunity, and push the remaining packets out, until we get to the last 1919 * interval. Then those packets and their associated overhead are just added 1920 * to the bandwidth used. 1921 */ 1922 static int xhci_check_bw_table(struct xhci_hcd *xhci, 1923 struct xhci_virt_device *virt_dev, 1924 int old_active_eps) 1925 { 1926 unsigned int bw_reserved; 1927 unsigned int max_bandwidth; 1928 unsigned int bw_used; 1929 unsigned int block_size; 1930 struct xhci_interval_bw_table *bw_table; 1931 unsigned int packet_size = 0; 1932 unsigned int overhead = 0; 1933 unsigned int packets_transmitted = 0; 1934 unsigned int packets_remaining = 0; 1935 unsigned int i; 1936 1937 if (virt_dev->udev->speed == USB_SPEED_SUPER) 1938 return xhci_check_ss_bw(xhci, virt_dev); 1939 1940 if (virt_dev->udev->speed == USB_SPEED_HIGH) { 1941 max_bandwidth = HS_BW_LIMIT; 1942 /* Convert percent of bus BW reserved to blocks reserved */ 1943 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); 1944 } else { 1945 max_bandwidth = FS_BW_LIMIT; 1946 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); 1947 } 1948 1949 bw_table = virt_dev->bw_table; 1950 /* We need to translate the max packet size and max ESIT payloads into 1951 * the units the hardware uses. 1952 */ 1953 block_size = xhci_get_block_size(virt_dev->udev); 1954 1955 /* If we are manipulating a LS/FS device under a HS hub, double check 1956 * that the HS bus has enough bandwidth if we are activing a new TT. 1957 */ 1958 if (virt_dev->tt_info) { 1959 xhci_dbg(xhci, "Recalculating BW for rootport %u\n", 1960 virt_dev->real_port); 1961 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { 1962 xhci_warn(xhci, "Not enough bandwidth on HS bus for " 1963 "newly activated TT.\n"); 1964 return -ENOMEM; 1965 } 1966 xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n", 1967 virt_dev->tt_info->slot_id, 1968 virt_dev->tt_info->ttport); 1969 } else { 1970 xhci_dbg(xhci, "Recalculating BW for rootport %u\n", 1971 virt_dev->real_port); 1972 } 1973 1974 /* Add in how much bandwidth will be used for interval zero, or the 1975 * rounded max ESIT payload + number of packets * largest overhead. 1976 */ 1977 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + 1978 bw_table->interval_bw[0].num_packets * 1979 xhci_get_largest_overhead(&bw_table->interval_bw[0]); 1980 1981 for (i = 1; i < XHCI_MAX_INTERVAL; i++) { 1982 unsigned int bw_added; 1983 unsigned int largest_mps; 1984 unsigned int interval_overhead; 1985 1986 /* 1987 * How many packets could we transmit in this interval? 1988 * If packets didn't fit in the previous interval, we will need 1989 * to transmit that many packets twice within this interval. 1990 */ 1991 packets_remaining = 2 * packets_remaining + 1992 bw_table->interval_bw[i].num_packets; 1993 1994 /* Find the largest max packet size of this or the previous 1995 * interval. 1996 */ 1997 if (list_empty(&bw_table->interval_bw[i].endpoints)) 1998 largest_mps = 0; 1999 else { 2000 struct xhci_virt_ep *virt_ep; 2001 struct list_head *ep_entry; 2002 2003 ep_entry = bw_table->interval_bw[i].endpoints.next; 2004 virt_ep = list_entry(ep_entry, 2005 struct xhci_virt_ep, bw_endpoint_list); 2006 /* Convert to blocks, rounding up */ 2007 largest_mps = DIV_ROUND_UP( 2008 virt_ep->bw_info.max_packet_size, 2009 block_size); 2010 } 2011 if (largest_mps > packet_size) 2012 packet_size = largest_mps; 2013 2014 /* Use the larger overhead of this or the previous interval. */ 2015 interval_overhead = xhci_get_largest_overhead( 2016 &bw_table->interval_bw[i]); 2017 if (interval_overhead > overhead) 2018 overhead = interval_overhead; 2019 2020 /* How many packets can we evenly distribute across 2021 * (1 << (i + 1)) possible scheduling opportunities? 2022 */ 2023 packets_transmitted = packets_remaining >> (i + 1); 2024 2025 /* Add in the bandwidth used for those scheduled packets */ 2026 bw_added = packets_transmitted * (overhead + packet_size); 2027 2028 /* How many packets do we have remaining to transmit? */ 2029 packets_remaining = packets_remaining % (1 << (i + 1)); 2030 2031 /* What largest max packet size should those packets have? */ 2032 /* If we've transmitted all packets, don't carry over the 2033 * largest packet size. 2034 */ 2035 if (packets_remaining == 0) { 2036 packet_size = 0; 2037 overhead = 0; 2038 } else if (packets_transmitted > 0) { 2039 /* Otherwise if we do have remaining packets, and we've 2040 * scheduled some packets in this interval, take the 2041 * largest max packet size from endpoints with this 2042 * interval. 2043 */ 2044 packet_size = largest_mps; 2045 overhead = interval_overhead; 2046 } 2047 /* Otherwise carry over packet_size and overhead from the last 2048 * time we had a remainder. 2049 */ 2050 bw_used += bw_added; 2051 if (bw_used > max_bandwidth) { 2052 xhci_warn(xhci, "Not enough bandwidth. " 2053 "Proposed: %u, Max: %u\n", 2054 bw_used, max_bandwidth); 2055 return -ENOMEM; 2056 } 2057 } 2058 /* 2059 * Ok, we know we have some packets left over after even-handedly 2060 * scheduling interval 15. We don't know which microframes they will 2061 * fit into, so we over-schedule and say they will be scheduled every 2062 * microframe. 2063 */ 2064 if (packets_remaining > 0) 2065 bw_used += overhead + packet_size; 2066 2067 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { 2068 unsigned int port_index = virt_dev->real_port - 1; 2069 2070 /* OK, we're manipulating a HS device attached to a 2071 * root port bandwidth domain. Include the number of active TTs 2072 * in the bandwidth used. 2073 */ 2074 bw_used += TT_HS_OVERHEAD * 2075 xhci->rh_bw[port_index].num_active_tts; 2076 } 2077 2078 xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, " 2079 "Available: %u " "percent\n", 2080 bw_used, max_bandwidth, bw_reserved, 2081 (max_bandwidth - bw_used - bw_reserved) * 100 / 2082 max_bandwidth); 2083 2084 bw_used += bw_reserved; 2085 if (bw_used > max_bandwidth) { 2086 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", 2087 bw_used, max_bandwidth); 2088 return -ENOMEM; 2089 } 2090 2091 bw_table->bw_used = bw_used; 2092 return 0; 2093 } 2094 2095 static bool xhci_is_async_ep(unsigned int ep_type) 2096 { 2097 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && 2098 ep_type != ISOC_IN_EP && 2099 ep_type != INT_IN_EP); 2100 } 2101 2102 static bool xhci_is_sync_in_ep(unsigned int ep_type) 2103 { 2104 return (ep_type == ISOC_IN_EP || ep_type != INT_IN_EP); 2105 } 2106 2107 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) 2108 { 2109 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); 2110 2111 if (ep_bw->ep_interval == 0) 2112 return SS_OVERHEAD_BURST + 2113 (ep_bw->mult * ep_bw->num_packets * 2114 (SS_OVERHEAD + mps)); 2115 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * 2116 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), 2117 1 << ep_bw->ep_interval); 2118 2119 } 2120 2121 void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, 2122 struct xhci_bw_info *ep_bw, 2123 struct xhci_interval_bw_table *bw_table, 2124 struct usb_device *udev, 2125 struct xhci_virt_ep *virt_ep, 2126 struct xhci_tt_bw_info *tt_info) 2127 { 2128 struct xhci_interval_bw *interval_bw; 2129 int normalized_interval; 2130 2131 if (xhci_is_async_ep(ep_bw->type)) 2132 return; 2133 2134 if (udev->speed == USB_SPEED_SUPER) { 2135 if (xhci_is_sync_in_ep(ep_bw->type)) 2136 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= 2137 xhci_get_ss_bw_consumed(ep_bw); 2138 else 2139 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= 2140 xhci_get_ss_bw_consumed(ep_bw); 2141 return; 2142 } 2143 2144 /* SuperSpeed endpoints never get added to intervals in the table, so 2145 * this check is only valid for HS/FS/LS devices. 2146 */ 2147 if (list_empty(&virt_ep->bw_endpoint_list)) 2148 return; 2149 /* For LS/FS devices, we need to translate the interval expressed in 2150 * microframes to frames. 2151 */ 2152 if (udev->speed == USB_SPEED_HIGH) 2153 normalized_interval = ep_bw->ep_interval; 2154 else 2155 normalized_interval = ep_bw->ep_interval - 3; 2156 2157 if (normalized_interval == 0) 2158 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; 2159 interval_bw = &bw_table->interval_bw[normalized_interval]; 2160 interval_bw->num_packets -= ep_bw->num_packets; 2161 switch (udev->speed) { 2162 case USB_SPEED_LOW: 2163 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; 2164 break; 2165 case USB_SPEED_FULL: 2166 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; 2167 break; 2168 case USB_SPEED_HIGH: 2169 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; 2170 break; 2171 case USB_SPEED_SUPER: 2172 case USB_SPEED_UNKNOWN: 2173 case USB_SPEED_WIRELESS: 2174 /* Should never happen because only LS/FS/HS endpoints will get 2175 * added to the endpoint list. 2176 */ 2177 return; 2178 } 2179 if (tt_info) 2180 tt_info->active_eps -= 1; 2181 list_del_init(&virt_ep->bw_endpoint_list); 2182 } 2183 2184 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, 2185 struct xhci_bw_info *ep_bw, 2186 struct xhci_interval_bw_table *bw_table, 2187 struct usb_device *udev, 2188 struct xhci_virt_ep *virt_ep, 2189 struct xhci_tt_bw_info *tt_info) 2190 { 2191 struct xhci_interval_bw *interval_bw; 2192 struct xhci_virt_ep *smaller_ep; 2193 int normalized_interval; 2194 2195 if (xhci_is_async_ep(ep_bw->type)) 2196 return; 2197 2198 if (udev->speed == USB_SPEED_SUPER) { 2199 if (xhci_is_sync_in_ep(ep_bw->type)) 2200 xhci->devs[udev->slot_id]->bw_table->ss_bw_in += 2201 xhci_get_ss_bw_consumed(ep_bw); 2202 else 2203 xhci->devs[udev->slot_id]->bw_table->ss_bw_out += 2204 xhci_get_ss_bw_consumed(ep_bw); 2205 return; 2206 } 2207 2208 /* For LS/FS devices, we need to translate the interval expressed in 2209 * microframes to frames. 2210 */ 2211 if (udev->speed == USB_SPEED_HIGH) 2212 normalized_interval = ep_bw->ep_interval; 2213 else 2214 normalized_interval = ep_bw->ep_interval - 3; 2215 2216 if (normalized_interval == 0) 2217 bw_table->interval0_esit_payload += ep_bw->max_esit_payload; 2218 interval_bw = &bw_table->interval_bw[normalized_interval]; 2219 interval_bw->num_packets += ep_bw->num_packets; 2220 switch (udev->speed) { 2221 case USB_SPEED_LOW: 2222 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; 2223 break; 2224 case USB_SPEED_FULL: 2225 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; 2226 break; 2227 case USB_SPEED_HIGH: 2228 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; 2229 break; 2230 case USB_SPEED_SUPER: 2231 case USB_SPEED_UNKNOWN: 2232 case USB_SPEED_WIRELESS: 2233 /* Should never happen because only LS/FS/HS endpoints will get 2234 * added to the endpoint list. 2235 */ 2236 return; 2237 } 2238 2239 if (tt_info) 2240 tt_info->active_eps += 1; 2241 /* Insert the endpoint into the list, largest max packet size first. */ 2242 list_for_each_entry(smaller_ep, &interval_bw->endpoints, 2243 bw_endpoint_list) { 2244 if (ep_bw->max_packet_size >= 2245 smaller_ep->bw_info.max_packet_size) { 2246 /* Add the new ep before the smaller endpoint */ 2247 list_add_tail(&virt_ep->bw_endpoint_list, 2248 &smaller_ep->bw_endpoint_list); 2249 return; 2250 } 2251 } 2252 /* Add the new endpoint at the end of the list. */ 2253 list_add_tail(&virt_ep->bw_endpoint_list, 2254 &interval_bw->endpoints); 2255 } 2256 2257 void xhci_update_tt_active_eps(struct xhci_hcd *xhci, 2258 struct xhci_virt_device *virt_dev, 2259 int old_active_eps) 2260 { 2261 struct xhci_root_port_bw_info *rh_bw_info; 2262 if (!virt_dev->tt_info) 2263 return; 2264 2265 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; 2266 if (old_active_eps == 0 && 2267 virt_dev->tt_info->active_eps != 0) { 2268 rh_bw_info->num_active_tts += 1; 2269 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; 2270 } else if (old_active_eps != 0 && 2271 virt_dev->tt_info->active_eps == 0) { 2272 rh_bw_info->num_active_tts -= 1; 2273 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; 2274 } 2275 } 2276 2277 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, 2278 struct xhci_virt_device *virt_dev, 2279 struct xhci_container_ctx *in_ctx) 2280 { 2281 struct xhci_bw_info ep_bw_info[31]; 2282 int i; 2283 struct xhci_input_control_ctx *ctrl_ctx; 2284 int old_active_eps = 0; 2285 2286 if (virt_dev->tt_info) 2287 old_active_eps = virt_dev->tt_info->active_eps; 2288 2289 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 2290 2291 for (i = 0; i < 31; i++) { 2292 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2293 continue; 2294 2295 /* Make a copy of the BW info in case we need to revert this */ 2296 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, 2297 sizeof(ep_bw_info[i])); 2298 /* Drop the endpoint from the interval table if the endpoint is 2299 * being dropped or changed. 2300 */ 2301 if (EP_IS_DROPPED(ctrl_ctx, i)) 2302 xhci_drop_ep_from_interval_table(xhci, 2303 &virt_dev->eps[i].bw_info, 2304 virt_dev->bw_table, 2305 virt_dev->udev, 2306 &virt_dev->eps[i], 2307 virt_dev->tt_info); 2308 } 2309 /* Overwrite the information stored in the endpoints' bw_info */ 2310 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); 2311 for (i = 0; i < 31; i++) { 2312 /* Add any changed or added endpoints to the interval table */ 2313 if (EP_IS_ADDED(ctrl_ctx, i)) 2314 xhci_add_ep_to_interval_table(xhci, 2315 &virt_dev->eps[i].bw_info, 2316 virt_dev->bw_table, 2317 virt_dev->udev, 2318 &virt_dev->eps[i], 2319 virt_dev->tt_info); 2320 } 2321 2322 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { 2323 /* Ok, this fits in the bandwidth we have. 2324 * Update the number of active TTs. 2325 */ 2326 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 2327 return 0; 2328 } 2329 2330 /* We don't have enough bandwidth for this, revert the stored info. */ 2331 for (i = 0; i < 31; i++) { 2332 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2333 continue; 2334 2335 /* Drop the new copies of any added or changed endpoints from 2336 * the interval table. 2337 */ 2338 if (EP_IS_ADDED(ctrl_ctx, i)) { 2339 xhci_drop_ep_from_interval_table(xhci, 2340 &virt_dev->eps[i].bw_info, 2341 virt_dev->bw_table, 2342 virt_dev->udev, 2343 &virt_dev->eps[i], 2344 virt_dev->tt_info); 2345 } 2346 /* Revert the endpoint back to its old information */ 2347 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], 2348 sizeof(ep_bw_info[i])); 2349 /* Add any changed or dropped endpoints back into the table */ 2350 if (EP_IS_DROPPED(ctrl_ctx, i)) 2351 xhci_add_ep_to_interval_table(xhci, 2352 &virt_dev->eps[i].bw_info, 2353 virt_dev->bw_table, 2354 virt_dev->udev, 2355 &virt_dev->eps[i], 2356 virt_dev->tt_info); 2357 } 2358 return -ENOMEM; 2359 } 2360 2361 2362 /* Issue a configure endpoint command or evaluate context command 2363 * and wait for it to finish. 2364 */ 2365 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 2366 struct usb_device *udev, 2367 struct xhci_command *command, 2368 bool ctx_change, bool must_succeed) 2369 { 2370 int ret; 2371 int timeleft; 2372 unsigned long flags; 2373 struct xhci_container_ctx *in_ctx; 2374 struct completion *cmd_completion; 2375 u32 *cmd_status; 2376 struct xhci_virt_device *virt_dev; 2377 2378 spin_lock_irqsave(&xhci->lock, flags); 2379 virt_dev = xhci->devs[udev->slot_id]; 2380 2381 if (command) 2382 in_ctx = command->in_ctx; 2383 else 2384 in_ctx = virt_dev->in_ctx; 2385 2386 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && 2387 xhci_reserve_host_resources(xhci, in_ctx)) { 2388 spin_unlock_irqrestore(&xhci->lock, flags); 2389 xhci_warn(xhci, "Not enough host resources, " 2390 "active endpoint contexts = %u\n", 2391 xhci->num_active_eps); 2392 return -ENOMEM; 2393 } 2394 if ((xhci->quirks & XHCI_SW_BW_CHECKING) && 2395 xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) { 2396 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2397 xhci_free_host_resources(xhci, in_ctx); 2398 spin_unlock_irqrestore(&xhci->lock, flags); 2399 xhci_warn(xhci, "Not enough bandwidth\n"); 2400 return -ENOMEM; 2401 } 2402 2403 if (command) { 2404 cmd_completion = command->completion; 2405 cmd_status = &command->status; 2406 command->command_trb = xhci->cmd_ring->enqueue; 2407 2408 /* Enqueue pointer can be left pointing to the link TRB, 2409 * we must handle that 2410 */ 2411 if (TRB_TYPE_LINK_LE32(command->command_trb->link.control)) 2412 command->command_trb = 2413 xhci->cmd_ring->enq_seg->next->trbs; 2414 2415 list_add_tail(&command->cmd_list, &virt_dev->cmd_list); 2416 } else { 2417 cmd_completion = &virt_dev->cmd_completion; 2418 cmd_status = &virt_dev->cmd_status; 2419 } 2420 init_completion(cmd_completion); 2421 2422 if (!ctx_change) 2423 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, 2424 udev->slot_id, must_succeed); 2425 else 2426 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, 2427 udev->slot_id); 2428 if (ret < 0) { 2429 if (command) 2430 list_del(&command->cmd_list); 2431 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2432 xhci_free_host_resources(xhci, in_ctx); 2433 spin_unlock_irqrestore(&xhci->lock, flags); 2434 xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); 2435 return -ENOMEM; 2436 } 2437 xhci_ring_cmd_db(xhci); 2438 spin_unlock_irqrestore(&xhci->lock, flags); 2439 2440 /* Wait for the configure endpoint command to complete */ 2441 timeleft = wait_for_completion_interruptible_timeout( 2442 cmd_completion, 2443 USB_CTRL_SET_TIMEOUT); 2444 if (timeleft <= 0) { 2445 xhci_warn(xhci, "%s while waiting for %s command\n", 2446 timeleft == 0 ? "Timeout" : "Signal", 2447 ctx_change == 0 ? 2448 "configure endpoint" : 2449 "evaluate context"); 2450 /* FIXME cancel the configure endpoint command */ 2451 return -ETIME; 2452 } 2453 2454 if (!ctx_change) 2455 ret = xhci_configure_endpoint_result(xhci, udev, cmd_status); 2456 else 2457 ret = xhci_evaluate_context_result(xhci, udev, cmd_status); 2458 2459 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 2460 spin_lock_irqsave(&xhci->lock, flags); 2461 /* If the command failed, remove the reserved resources. 2462 * Otherwise, clean up the estimate to include dropped eps. 2463 */ 2464 if (ret) 2465 xhci_free_host_resources(xhci, in_ctx); 2466 else 2467 xhci_finish_resource_reservation(xhci, in_ctx); 2468 spin_unlock_irqrestore(&xhci->lock, flags); 2469 } 2470 return ret; 2471 } 2472 2473 /* Called after one or more calls to xhci_add_endpoint() or 2474 * xhci_drop_endpoint(). If this call fails, the USB core is expected 2475 * to call xhci_reset_bandwidth(). 2476 * 2477 * Since we are in the middle of changing either configuration or 2478 * installing a new alt setting, the USB core won't allow URBs to be 2479 * enqueued for any endpoint on the old config or interface. Nothing 2480 * else should be touching the xhci->devs[slot_id] structure, so we 2481 * don't need to take the xhci->lock for manipulating that. 2482 */ 2483 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 2484 { 2485 int i; 2486 int ret = 0; 2487 struct xhci_hcd *xhci; 2488 struct xhci_virt_device *virt_dev; 2489 struct xhci_input_control_ctx *ctrl_ctx; 2490 struct xhci_slot_ctx *slot_ctx; 2491 2492 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2493 if (ret <= 0) 2494 return ret; 2495 xhci = hcd_to_xhci(hcd); 2496 if (xhci->xhc_state & XHCI_STATE_DYING) 2497 return -ENODEV; 2498 2499 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 2500 virt_dev = xhci->devs[udev->slot_id]; 2501 2502 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ 2503 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 2504 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 2505 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); 2506 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); 2507 2508 /* Don't issue the command if there's no endpoints to update. */ 2509 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && 2510 ctrl_ctx->drop_flags == 0) 2511 return 0; 2512 2513 xhci_dbg(xhci, "New Input Control Context:\n"); 2514 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 2515 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2516 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); 2517 2518 ret = xhci_configure_endpoint(xhci, udev, NULL, 2519 false, false); 2520 if (ret) { 2521 /* Callee should call reset_bandwidth() */ 2522 return ret; 2523 } 2524 2525 xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); 2526 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2527 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); 2528 2529 /* Free any rings that were dropped, but not changed. */ 2530 for (i = 1; i < 31; ++i) { 2531 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && 2532 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) 2533 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2534 } 2535 xhci_zero_in_ctx(xhci, virt_dev); 2536 /* 2537 * Install any rings for completely new endpoints or changed endpoints, 2538 * and free or cache any old rings from changed endpoints. 2539 */ 2540 for (i = 1; i < 31; ++i) { 2541 if (!virt_dev->eps[i].new_ring) 2542 continue; 2543 /* Only cache or free the old ring if it exists. 2544 * It may not if this is the first add of an endpoint. 2545 */ 2546 if (virt_dev->eps[i].ring) { 2547 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2548 } 2549 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; 2550 virt_dev->eps[i].new_ring = NULL; 2551 } 2552 2553 return ret; 2554 } 2555 2556 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 2557 { 2558 struct xhci_hcd *xhci; 2559 struct xhci_virt_device *virt_dev; 2560 int i, ret; 2561 2562 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2563 if (ret <= 0) 2564 return; 2565 xhci = hcd_to_xhci(hcd); 2566 2567 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 2568 virt_dev = xhci->devs[udev->slot_id]; 2569 /* Free any rings allocated for added endpoints */ 2570 for (i = 0; i < 31; ++i) { 2571 if (virt_dev->eps[i].new_ring) { 2572 xhci_ring_free(xhci, virt_dev->eps[i].new_ring); 2573 virt_dev->eps[i].new_ring = NULL; 2574 } 2575 } 2576 xhci_zero_in_ctx(xhci, virt_dev); 2577 } 2578 2579 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, 2580 struct xhci_container_ctx *in_ctx, 2581 struct xhci_container_ctx *out_ctx, 2582 u32 add_flags, u32 drop_flags) 2583 { 2584 struct xhci_input_control_ctx *ctrl_ctx; 2585 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 2586 ctrl_ctx->add_flags = cpu_to_le32(add_flags); 2587 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); 2588 xhci_slot_copy(xhci, in_ctx, out_ctx); 2589 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 2590 2591 xhci_dbg(xhci, "Input Context:\n"); 2592 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); 2593 } 2594 2595 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, 2596 unsigned int slot_id, unsigned int ep_index, 2597 struct xhci_dequeue_state *deq_state) 2598 { 2599 struct xhci_container_ctx *in_ctx; 2600 struct xhci_ep_ctx *ep_ctx; 2601 u32 added_ctxs; 2602 dma_addr_t addr; 2603 2604 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 2605 xhci->devs[slot_id]->out_ctx, ep_index); 2606 in_ctx = xhci->devs[slot_id]->in_ctx; 2607 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 2608 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, 2609 deq_state->new_deq_ptr); 2610 if (addr == 0) { 2611 xhci_warn(xhci, "WARN Cannot submit config ep after " 2612 "reset ep command\n"); 2613 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", 2614 deq_state->new_deq_seg, 2615 deq_state->new_deq_ptr); 2616 return; 2617 } 2618 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); 2619 2620 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); 2621 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, 2622 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs); 2623 } 2624 2625 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, 2626 struct usb_device *udev, unsigned int ep_index) 2627 { 2628 struct xhci_dequeue_state deq_state; 2629 struct xhci_virt_ep *ep; 2630 2631 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); 2632 ep = &xhci->devs[udev->slot_id]->eps[ep_index]; 2633 /* We need to move the HW's dequeue pointer past this TD, 2634 * or it will attempt to resend it on the next doorbell ring. 2635 */ 2636 xhci_find_new_dequeue_state(xhci, udev->slot_id, 2637 ep_index, ep->stopped_stream, ep->stopped_td, 2638 &deq_state); 2639 2640 /* HW with the reset endpoint quirk will use the saved dequeue state to 2641 * issue a configure endpoint command later. 2642 */ 2643 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { 2644 xhci_dbg(xhci, "Queueing new dequeue state\n"); 2645 xhci_queue_new_dequeue_state(xhci, udev->slot_id, 2646 ep_index, ep->stopped_stream, &deq_state); 2647 } else { 2648 /* Better hope no one uses the input context between now and the 2649 * reset endpoint completion! 2650 * XXX: No idea how this hardware will react when stream rings 2651 * are enabled. 2652 */ 2653 xhci_dbg(xhci, "Setting up input context for " 2654 "configure endpoint command\n"); 2655 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, 2656 ep_index, &deq_state); 2657 } 2658 } 2659 2660 /* Deal with stalled endpoints. The core should have sent the control message 2661 * to clear the halt condition. However, we need to make the xHCI hardware 2662 * reset its sequence number, since a device will expect a sequence number of 2663 * zero after the halt condition is cleared. 2664 * Context: in_interrupt 2665 */ 2666 void xhci_endpoint_reset(struct usb_hcd *hcd, 2667 struct usb_host_endpoint *ep) 2668 { 2669 struct xhci_hcd *xhci; 2670 struct usb_device *udev; 2671 unsigned int ep_index; 2672 unsigned long flags; 2673 int ret; 2674 struct xhci_virt_ep *virt_ep; 2675 2676 xhci = hcd_to_xhci(hcd); 2677 udev = (struct usb_device *) ep->hcpriv; 2678 /* Called with a root hub endpoint (or an endpoint that wasn't added 2679 * with xhci_add_endpoint() 2680 */ 2681 if (!ep->hcpriv) 2682 return; 2683 ep_index = xhci_get_endpoint_index(&ep->desc); 2684 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; 2685 if (!virt_ep->stopped_td) { 2686 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", 2687 ep->desc.bEndpointAddress); 2688 return; 2689 } 2690 if (usb_endpoint_xfer_control(&ep->desc)) { 2691 xhci_dbg(xhci, "Control endpoint stall already handled.\n"); 2692 return; 2693 } 2694 2695 xhci_dbg(xhci, "Queueing reset endpoint command\n"); 2696 spin_lock_irqsave(&xhci->lock, flags); 2697 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); 2698 /* 2699 * Can't change the ring dequeue pointer until it's transitioned to the 2700 * stopped state, which is only upon a successful reset endpoint 2701 * command. Better hope that last command worked! 2702 */ 2703 if (!ret) { 2704 xhci_cleanup_stalled_ring(xhci, udev, ep_index); 2705 kfree(virt_ep->stopped_td); 2706 xhci_ring_cmd_db(xhci); 2707 } 2708 virt_ep->stopped_td = NULL; 2709 virt_ep->stopped_trb = NULL; 2710 virt_ep->stopped_stream = 0; 2711 spin_unlock_irqrestore(&xhci->lock, flags); 2712 2713 if (ret) 2714 xhci_warn(xhci, "FIXME allocate a new ring segment\n"); 2715 } 2716 2717 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 2718 struct usb_device *udev, struct usb_host_endpoint *ep, 2719 unsigned int slot_id) 2720 { 2721 int ret; 2722 unsigned int ep_index; 2723 unsigned int ep_state; 2724 2725 if (!ep) 2726 return -EINVAL; 2727 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); 2728 if (ret <= 0) 2729 return -EINVAL; 2730 if (ep->ss_ep_comp.bmAttributes == 0) { 2731 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" 2732 " descriptor for ep 0x%x does not support streams\n", 2733 ep->desc.bEndpointAddress); 2734 return -EINVAL; 2735 } 2736 2737 ep_index = xhci_get_endpoint_index(&ep->desc); 2738 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 2739 if (ep_state & EP_HAS_STREAMS || 2740 ep_state & EP_GETTING_STREAMS) { 2741 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " 2742 "already has streams set up.\n", 2743 ep->desc.bEndpointAddress); 2744 xhci_warn(xhci, "Send email to xHCI maintainer and ask for " 2745 "dynamic stream context array reallocation.\n"); 2746 return -EINVAL; 2747 } 2748 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { 2749 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " 2750 "endpoint 0x%x; URBs are pending.\n", 2751 ep->desc.bEndpointAddress); 2752 return -EINVAL; 2753 } 2754 return 0; 2755 } 2756 2757 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, 2758 unsigned int *num_streams, unsigned int *num_stream_ctxs) 2759 { 2760 unsigned int max_streams; 2761 2762 /* The stream context array size must be a power of two */ 2763 *num_stream_ctxs = roundup_pow_of_two(*num_streams); 2764 /* 2765 * Find out how many primary stream array entries the host controller 2766 * supports. Later we may use secondary stream arrays (similar to 2nd 2767 * level page entries), but that's an optional feature for xHCI host 2768 * controllers. xHCs must support at least 4 stream IDs. 2769 */ 2770 max_streams = HCC_MAX_PSA(xhci->hcc_params); 2771 if (*num_stream_ctxs > max_streams) { 2772 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", 2773 max_streams); 2774 *num_stream_ctxs = max_streams; 2775 *num_streams = max_streams; 2776 } 2777 } 2778 2779 /* Returns an error code if one of the endpoint already has streams. 2780 * This does not change any data structures, it only checks and gathers 2781 * information. 2782 */ 2783 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, 2784 struct usb_device *udev, 2785 struct usb_host_endpoint **eps, unsigned int num_eps, 2786 unsigned int *num_streams, u32 *changed_ep_bitmask) 2787 { 2788 unsigned int max_streams; 2789 unsigned int endpoint_flag; 2790 int i; 2791 int ret; 2792 2793 for (i = 0; i < num_eps; i++) { 2794 ret = xhci_check_streams_endpoint(xhci, udev, 2795 eps[i], udev->slot_id); 2796 if (ret < 0) 2797 return ret; 2798 2799 max_streams = USB_SS_MAX_STREAMS( 2800 eps[i]->ss_ep_comp.bmAttributes); 2801 if (max_streams < (*num_streams - 1)) { 2802 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", 2803 eps[i]->desc.bEndpointAddress, 2804 max_streams); 2805 *num_streams = max_streams+1; 2806 } 2807 2808 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); 2809 if (*changed_ep_bitmask & endpoint_flag) 2810 return -EINVAL; 2811 *changed_ep_bitmask |= endpoint_flag; 2812 } 2813 return 0; 2814 } 2815 2816 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, 2817 struct usb_device *udev, 2818 struct usb_host_endpoint **eps, unsigned int num_eps) 2819 { 2820 u32 changed_ep_bitmask = 0; 2821 unsigned int slot_id; 2822 unsigned int ep_index; 2823 unsigned int ep_state; 2824 int i; 2825 2826 slot_id = udev->slot_id; 2827 if (!xhci->devs[slot_id]) 2828 return 0; 2829 2830 for (i = 0; i < num_eps; i++) { 2831 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2832 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 2833 /* Are streams already being freed for the endpoint? */ 2834 if (ep_state & EP_GETTING_NO_STREAMS) { 2835 xhci_warn(xhci, "WARN Can't disable streams for " 2836 "endpoint 0x%x\n, " 2837 "streams are being disabled already.", 2838 eps[i]->desc.bEndpointAddress); 2839 return 0; 2840 } 2841 /* Are there actually any streams to free? */ 2842 if (!(ep_state & EP_HAS_STREAMS) && 2843 !(ep_state & EP_GETTING_STREAMS)) { 2844 xhci_warn(xhci, "WARN Can't disable streams for " 2845 "endpoint 0x%x\n, " 2846 "streams are already disabled!", 2847 eps[i]->desc.bEndpointAddress); 2848 xhci_warn(xhci, "WARN xhci_free_streams() called " 2849 "with non-streams endpoint\n"); 2850 return 0; 2851 } 2852 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); 2853 } 2854 return changed_ep_bitmask; 2855 } 2856 2857 /* 2858 * The USB device drivers use this function (though the HCD interface in USB 2859 * core) to prepare a set of bulk endpoints to use streams. Streams are used to 2860 * coordinate mass storage command queueing across multiple endpoints (basically 2861 * a stream ID == a task ID). 2862 * 2863 * Setting up streams involves allocating the same size stream context array 2864 * for each endpoint and issuing a configure endpoint command for all endpoints. 2865 * 2866 * Don't allow the call to succeed if one endpoint only supports one stream 2867 * (which means it doesn't support streams at all). 2868 * 2869 * Drivers may get less stream IDs than they asked for, if the host controller 2870 * hardware or endpoints claim they can't support the number of requested 2871 * stream IDs. 2872 */ 2873 int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, 2874 struct usb_host_endpoint **eps, unsigned int num_eps, 2875 unsigned int num_streams, gfp_t mem_flags) 2876 { 2877 int i, ret; 2878 struct xhci_hcd *xhci; 2879 struct xhci_virt_device *vdev; 2880 struct xhci_command *config_cmd; 2881 unsigned int ep_index; 2882 unsigned int num_stream_ctxs; 2883 unsigned long flags; 2884 u32 changed_ep_bitmask = 0; 2885 2886 if (!eps) 2887 return -EINVAL; 2888 2889 /* Add one to the number of streams requested to account for 2890 * stream 0 that is reserved for xHCI usage. 2891 */ 2892 num_streams += 1; 2893 xhci = hcd_to_xhci(hcd); 2894 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", 2895 num_streams); 2896 2897 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); 2898 if (!config_cmd) { 2899 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); 2900 return -ENOMEM; 2901 } 2902 2903 /* Check to make sure all endpoints are not already configured for 2904 * streams. While we're at it, find the maximum number of streams that 2905 * all the endpoints will support and check for duplicate endpoints. 2906 */ 2907 spin_lock_irqsave(&xhci->lock, flags); 2908 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, 2909 num_eps, &num_streams, &changed_ep_bitmask); 2910 if (ret < 0) { 2911 xhci_free_command(xhci, config_cmd); 2912 spin_unlock_irqrestore(&xhci->lock, flags); 2913 return ret; 2914 } 2915 if (num_streams <= 1) { 2916 xhci_warn(xhci, "WARN: endpoints can't handle " 2917 "more than one stream.\n"); 2918 xhci_free_command(xhci, config_cmd); 2919 spin_unlock_irqrestore(&xhci->lock, flags); 2920 return -EINVAL; 2921 } 2922 vdev = xhci->devs[udev->slot_id]; 2923 /* Mark each endpoint as being in transition, so 2924 * xhci_urb_enqueue() will reject all URBs. 2925 */ 2926 for (i = 0; i < num_eps; i++) { 2927 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2928 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; 2929 } 2930 spin_unlock_irqrestore(&xhci->lock, flags); 2931 2932 /* Setup internal data structures and allocate HW data structures for 2933 * streams (but don't install the HW structures in the input context 2934 * until we're sure all memory allocation succeeded). 2935 */ 2936 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); 2937 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", 2938 num_stream_ctxs, num_streams); 2939 2940 for (i = 0; i < num_eps; i++) { 2941 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2942 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, 2943 num_stream_ctxs, 2944 num_streams, mem_flags); 2945 if (!vdev->eps[ep_index].stream_info) 2946 goto cleanup; 2947 /* Set maxPstreams in endpoint context and update deq ptr to 2948 * point to stream context array. FIXME 2949 */ 2950 } 2951 2952 /* Set up the input context for a configure endpoint command. */ 2953 for (i = 0; i < num_eps; i++) { 2954 struct xhci_ep_ctx *ep_ctx; 2955 2956 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2957 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); 2958 2959 xhci_endpoint_copy(xhci, config_cmd->in_ctx, 2960 vdev->out_ctx, ep_index); 2961 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, 2962 vdev->eps[ep_index].stream_info); 2963 } 2964 /* Tell the HW to drop its old copy of the endpoint context info 2965 * and add the updated copy from the input context. 2966 */ 2967 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, 2968 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); 2969 2970 /* Issue and wait for the configure endpoint command */ 2971 ret = xhci_configure_endpoint(xhci, udev, config_cmd, 2972 false, false); 2973 2974 /* xHC rejected the configure endpoint command for some reason, so we 2975 * leave the old ring intact and free our internal streams data 2976 * structure. 2977 */ 2978 if (ret < 0) 2979 goto cleanup; 2980 2981 spin_lock_irqsave(&xhci->lock, flags); 2982 for (i = 0; i < num_eps; i++) { 2983 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2984 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 2985 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", 2986 udev->slot_id, ep_index); 2987 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; 2988 } 2989 xhci_free_command(xhci, config_cmd); 2990 spin_unlock_irqrestore(&xhci->lock, flags); 2991 2992 /* Subtract 1 for stream 0, which drivers can't use */ 2993 return num_streams - 1; 2994 2995 cleanup: 2996 /* If it didn't work, free the streams! */ 2997 for (i = 0; i < num_eps; i++) { 2998 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 2999 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3000 vdev->eps[ep_index].stream_info = NULL; 3001 /* FIXME Unset maxPstreams in endpoint context and 3002 * update deq ptr to point to normal string ring. 3003 */ 3004 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 3005 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3006 xhci_endpoint_zero(xhci, vdev, eps[i]); 3007 } 3008 xhci_free_command(xhci, config_cmd); 3009 return -ENOMEM; 3010 } 3011 3012 /* Transition the endpoint from using streams to being a "normal" endpoint 3013 * without streams. 3014 * 3015 * Modify the endpoint context state, submit a configure endpoint command, 3016 * and free all endpoint rings for streams if that completes successfully. 3017 */ 3018 int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, 3019 struct usb_host_endpoint **eps, unsigned int num_eps, 3020 gfp_t mem_flags) 3021 { 3022 int i, ret; 3023 struct xhci_hcd *xhci; 3024 struct xhci_virt_device *vdev; 3025 struct xhci_command *command; 3026 unsigned int ep_index; 3027 unsigned long flags; 3028 u32 changed_ep_bitmask; 3029 3030 xhci = hcd_to_xhci(hcd); 3031 vdev = xhci->devs[udev->slot_id]; 3032 3033 /* Set up a configure endpoint command to remove the streams rings */ 3034 spin_lock_irqsave(&xhci->lock, flags); 3035 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, 3036 udev, eps, num_eps); 3037 if (changed_ep_bitmask == 0) { 3038 spin_unlock_irqrestore(&xhci->lock, flags); 3039 return -EINVAL; 3040 } 3041 3042 /* Use the xhci_command structure from the first endpoint. We may have 3043 * allocated too many, but the driver may call xhci_free_streams() for 3044 * each endpoint it grouped into one call to xhci_alloc_streams(). 3045 */ 3046 ep_index = xhci_get_endpoint_index(&eps[0]->desc); 3047 command = vdev->eps[ep_index].stream_info->free_streams_command; 3048 for (i = 0; i < num_eps; i++) { 3049 struct xhci_ep_ctx *ep_ctx; 3050 3051 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3052 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); 3053 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= 3054 EP_GETTING_NO_STREAMS; 3055 3056 xhci_endpoint_copy(xhci, command->in_ctx, 3057 vdev->out_ctx, ep_index); 3058 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx, 3059 &vdev->eps[ep_index]); 3060 } 3061 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, 3062 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask); 3063 spin_unlock_irqrestore(&xhci->lock, flags); 3064 3065 /* Issue and wait for the configure endpoint command, 3066 * which must succeed. 3067 */ 3068 ret = xhci_configure_endpoint(xhci, udev, command, 3069 false, true); 3070 3071 /* xHC rejected the configure endpoint command for some reason, so we 3072 * leave the streams rings intact. 3073 */ 3074 if (ret < 0) 3075 return ret; 3076 3077 spin_lock_irqsave(&xhci->lock, flags); 3078 for (i = 0; i < num_eps; i++) { 3079 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3080 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3081 vdev->eps[ep_index].stream_info = NULL; 3082 /* FIXME Unset maxPstreams in endpoint context and 3083 * update deq ptr to point to normal string ring. 3084 */ 3085 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; 3086 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3087 } 3088 spin_unlock_irqrestore(&xhci->lock, flags); 3089 3090 return 0; 3091 } 3092 3093 /* 3094 * Deletes endpoint resources for endpoints that were active before a Reset 3095 * Device command, or a Disable Slot command. The Reset Device command leaves 3096 * the control endpoint intact, whereas the Disable Slot command deletes it. 3097 * 3098 * Must be called with xhci->lock held. 3099 */ 3100 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, 3101 struct xhci_virt_device *virt_dev, bool drop_control_ep) 3102 { 3103 int i; 3104 unsigned int num_dropped_eps = 0; 3105 unsigned int drop_flags = 0; 3106 3107 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { 3108 if (virt_dev->eps[i].ring) { 3109 drop_flags |= 1 << i; 3110 num_dropped_eps++; 3111 } 3112 } 3113 xhci->num_active_eps -= num_dropped_eps; 3114 if (num_dropped_eps) 3115 xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, " 3116 "%u now active.\n", 3117 num_dropped_eps, drop_flags, 3118 xhci->num_active_eps); 3119 } 3120 3121 /* 3122 * This submits a Reset Device Command, which will set the device state to 0, 3123 * set the device address to 0, and disable all the endpoints except the default 3124 * control endpoint. The USB core should come back and call 3125 * xhci_address_device(), and then re-set up the configuration. If this is 3126 * called because of a usb_reset_and_verify_device(), then the old alternate 3127 * settings will be re-installed through the normal bandwidth allocation 3128 * functions. 3129 * 3130 * Wait for the Reset Device command to finish. Remove all structures 3131 * associated with the endpoints that were disabled. Clear the input device 3132 * structure? Cache the rings? Reset the control endpoint 0 max packet size? 3133 * 3134 * If the virt_dev to be reset does not exist or does not match the udev, 3135 * it means the device is lost, possibly due to the xHC restore error and 3136 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to 3137 * re-allocate the device. 3138 */ 3139 int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) 3140 { 3141 int ret, i; 3142 unsigned long flags; 3143 struct xhci_hcd *xhci; 3144 unsigned int slot_id; 3145 struct xhci_virt_device *virt_dev; 3146 struct xhci_command *reset_device_cmd; 3147 int timeleft; 3148 int last_freed_endpoint; 3149 struct xhci_slot_ctx *slot_ctx; 3150 int old_active_eps = 0; 3151 3152 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); 3153 if (ret <= 0) 3154 return ret; 3155 xhci = hcd_to_xhci(hcd); 3156 slot_id = udev->slot_id; 3157 virt_dev = xhci->devs[slot_id]; 3158 if (!virt_dev) { 3159 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3160 "not exist. Re-allocate the device\n", slot_id); 3161 ret = xhci_alloc_dev(hcd, udev); 3162 if (ret == 1) 3163 return 0; 3164 else 3165 return -EINVAL; 3166 } 3167 3168 if (virt_dev->udev != udev) { 3169 /* If the virt_dev and the udev does not match, this virt_dev 3170 * may belong to another udev. 3171 * Re-allocate the device. 3172 */ 3173 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3174 "not match the udev. Re-allocate the device\n", 3175 slot_id); 3176 ret = xhci_alloc_dev(hcd, udev); 3177 if (ret == 1) 3178 return 0; 3179 else 3180 return -EINVAL; 3181 } 3182 3183 /* If device is not setup, there is no point in resetting it */ 3184 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3185 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 3186 SLOT_STATE_DISABLED) 3187 return 0; 3188 3189 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); 3190 /* Allocate the command structure that holds the struct completion. 3191 * Assume we're in process context, since the normal device reset 3192 * process has to wait for the device anyway. Storage devices are 3193 * reset as part of error handling, so use GFP_NOIO instead of 3194 * GFP_KERNEL. 3195 */ 3196 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); 3197 if (!reset_device_cmd) { 3198 xhci_dbg(xhci, "Couldn't allocate command structure.\n"); 3199 return -ENOMEM; 3200 } 3201 3202 /* Attempt to submit the Reset Device command to the command ring */ 3203 spin_lock_irqsave(&xhci->lock, flags); 3204 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; 3205 3206 /* Enqueue pointer can be left pointing to the link TRB, 3207 * we must handle that 3208 */ 3209 if (TRB_TYPE_LINK_LE32(reset_device_cmd->command_trb->link.control)) 3210 reset_device_cmd->command_trb = 3211 xhci->cmd_ring->enq_seg->next->trbs; 3212 3213 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); 3214 ret = xhci_queue_reset_device(xhci, slot_id); 3215 if (ret) { 3216 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3217 list_del(&reset_device_cmd->cmd_list); 3218 spin_unlock_irqrestore(&xhci->lock, flags); 3219 goto command_cleanup; 3220 } 3221 xhci_ring_cmd_db(xhci); 3222 spin_unlock_irqrestore(&xhci->lock, flags); 3223 3224 /* Wait for the Reset Device command to finish */ 3225 timeleft = wait_for_completion_interruptible_timeout( 3226 reset_device_cmd->completion, 3227 USB_CTRL_SET_TIMEOUT); 3228 if (timeleft <= 0) { 3229 xhci_warn(xhci, "%s while waiting for reset device command\n", 3230 timeleft == 0 ? "Timeout" : "Signal"); 3231 spin_lock_irqsave(&xhci->lock, flags); 3232 /* The timeout might have raced with the event ring handler, so 3233 * only delete from the list if the item isn't poisoned. 3234 */ 3235 if (reset_device_cmd->cmd_list.next != LIST_POISON1) 3236 list_del(&reset_device_cmd->cmd_list); 3237 spin_unlock_irqrestore(&xhci->lock, flags); 3238 ret = -ETIME; 3239 goto command_cleanup; 3240 } 3241 3242 /* The Reset Device command can't fail, according to the 0.95/0.96 spec, 3243 * unless we tried to reset a slot ID that wasn't enabled, 3244 * or the device wasn't in the addressed or configured state. 3245 */ 3246 ret = reset_device_cmd->status; 3247 switch (ret) { 3248 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ 3249 case COMP_CTX_STATE: /* 0.96 completion code for same thing */ 3250 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n", 3251 slot_id, 3252 xhci_get_slot_state(xhci, virt_dev->out_ctx)); 3253 xhci_info(xhci, "Not freeing device rings.\n"); 3254 /* Don't treat this as an error. May change my mind later. */ 3255 ret = 0; 3256 goto command_cleanup; 3257 case COMP_SUCCESS: 3258 xhci_dbg(xhci, "Successful reset device command.\n"); 3259 break; 3260 default: 3261 if (xhci_is_vendor_info_code(xhci, ret)) 3262 break; 3263 xhci_warn(xhci, "Unknown completion code %u for " 3264 "reset device command.\n", ret); 3265 ret = -EINVAL; 3266 goto command_cleanup; 3267 } 3268 3269 /* Free up host controller endpoint resources */ 3270 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 3271 spin_lock_irqsave(&xhci->lock, flags); 3272 /* Don't delete the default control endpoint resources */ 3273 xhci_free_device_endpoint_resources(xhci, virt_dev, false); 3274 spin_unlock_irqrestore(&xhci->lock, flags); 3275 } 3276 3277 /* Everything but endpoint 0 is disabled, so free or cache the rings. */ 3278 last_freed_endpoint = 1; 3279 for (i = 1; i < 31; ++i) { 3280 struct xhci_virt_ep *ep = &virt_dev->eps[i]; 3281 3282 if (ep->ep_state & EP_HAS_STREAMS) { 3283 xhci_free_stream_info(xhci, ep->stream_info); 3284 ep->stream_info = NULL; 3285 ep->ep_state &= ~EP_HAS_STREAMS; 3286 } 3287 3288 if (ep->ring) { 3289 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 3290 last_freed_endpoint = i; 3291 } 3292 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) 3293 xhci_drop_ep_from_interval_table(xhci, 3294 &virt_dev->eps[i].bw_info, 3295 virt_dev->bw_table, 3296 udev, 3297 &virt_dev->eps[i], 3298 virt_dev->tt_info); 3299 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); 3300 } 3301 /* If necessary, update the number of active TTs on this root port */ 3302 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 3303 3304 xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); 3305 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); 3306 ret = 0; 3307 3308 command_cleanup: 3309 xhci_free_command(xhci, reset_device_cmd); 3310 return ret; 3311 } 3312 3313 /* 3314 * At this point, the struct usb_device is about to go away, the device has 3315 * disconnected, and all traffic has been stopped and the endpoints have been 3316 * disabled. Free any HC data structures associated with that device. 3317 */ 3318 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) 3319 { 3320 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3321 struct xhci_virt_device *virt_dev; 3322 unsigned long flags; 3323 u32 state; 3324 int i, ret; 3325 3326 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 3327 /* If the host is halted due to driver unload, we still need to free the 3328 * device. 3329 */ 3330 if (ret <= 0 && ret != -ENODEV) 3331 return; 3332 3333 virt_dev = xhci->devs[udev->slot_id]; 3334 3335 /* Stop any wayward timer functions (which may grab the lock) */ 3336 for (i = 0; i < 31; ++i) { 3337 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; 3338 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); 3339 } 3340 3341 if (udev->usb2_hw_lpm_enabled) { 3342 xhci_set_usb2_hardware_lpm(hcd, udev, 0); 3343 udev->usb2_hw_lpm_enabled = 0; 3344 } 3345 3346 spin_lock_irqsave(&xhci->lock, flags); 3347 /* Don't disable the slot if the host controller is dead. */ 3348 state = xhci_readl(xhci, &xhci->op_regs->status); 3349 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || 3350 (xhci->xhc_state & XHCI_STATE_HALTED)) { 3351 xhci_free_virt_device(xhci, udev->slot_id); 3352 spin_unlock_irqrestore(&xhci->lock, flags); 3353 return; 3354 } 3355 3356 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { 3357 spin_unlock_irqrestore(&xhci->lock, flags); 3358 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3359 return; 3360 } 3361 xhci_ring_cmd_db(xhci); 3362 spin_unlock_irqrestore(&xhci->lock, flags); 3363 /* 3364 * Event command completion handler will free any data structures 3365 * associated with the slot. XXX Can free sleep? 3366 */ 3367 } 3368 3369 /* 3370 * Checks if we have enough host controller resources for the default control 3371 * endpoint. 3372 * 3373 * Must be called with xhci->lock held. 3374 */ 3375 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) 3376 { 3377 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { 3378 xhci_dbg(xhci, "Not enough ep ctxs: " 3379 "%u active, need to add 1, limit is %u.\n", 3380 xhci->num_active_eps, xhci->limit_active_eps); 3381 return -ENOMEM; 3382 } 3383 xhci->num_active_eps += 1; 3384 xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n", 3385 xhci->num_active_eps); 3386 return 0; 3387 } 3388 3389 3390 /* 3391 * Returns 0 if the xHC ran out of device slots, the Enable Slot command 3392 * timed out, or allocating memory failed. Returns 1 on success. 3393 */ 3394 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) 3395 { 3396 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3397 unsigned long flags; 3398 int timeleft; 3399 int ret; 3400 3401 spin_lock_irqsave(&xhci->lock, flags); 3402 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); 3403 if (ret) { 3404 spin_unlock_irqrestore(&xhci->lock, flags); 3405 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3406 return 0; 3407 } 3408 xhci_ring_cmd_db(xhci); 3409 spin_unlock_irqrestore(&xhci->lock, flags); 3410 3411 /* XXX: how much time for xHC slot assignment? */ 3412 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, 3413 USB_CTRL_SET_TIMEOUT); 3414 if (timeleft <= 0) { 3415 xhci_warn(xhci, "%s while waiting for a slot\n", 3416 timeleft == 0 ? "Timeout" : "Signal"); 3417 /* FIXME cancel the enable slot request */ 3418 return 0; 3419 } 3420 3421 if (!xhci->slot_id) { 3422 xhci_err(xhci, "Error while assigning device slot ID\n"); 3423 return 0; 3424 } 3425 3426 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 3427 spin_lock_irqsave(&xhci->lock, flags); 3428 ret = xhci_reserve_host_control_ep_resources(xhci); 3429 if (ret) { 3430 spin_unlock_irqrestore(&xhci->lock, flags); 3431 xhci_warn(xhci, "Not enough host resources, " 3432 "active endpoint contexts = %u\n", 3433 xhci->num_active_eps); 3434 goto disable_slot; 3435 } 3436 spin_unlock_irqrestore(&xhci->lock, flags); 3437 } 3438 /* Use GFP_NOIO, since this function can be called from 3439 * xhci_discover_or_reset_device(), which may be called as part of 3440 * mass storage driver error handling. 3441 */ 3442 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { 3443 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 3444 goto disable_slot; 3445 } 3446 udev->slot_id = xhci->slot_id; 3447 /* Is this a LS or FS device under a HS hub? */ 3448 /* Hub or peripherial? */ 3449 return 1; 3450 3451 disable_slot: 3452 /* Disable slot, if we can do it without mem alloc */ 3453 spin_lock_irqsave(&xhci->lock, flags); 3454 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) 3455 xhci_ring_cmd_db(xhci); 3456 spin_unlock_irqrestore(&xhci->lock, flags); 3457 return 0; 3458 } 3459 3460 /* 3461 * Issue an Address Device command (which will issue a SetAddress request to 3462 * the device). 3463 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so 3464 * we should only issue and wait on one address command at the same time. 3465 * 3466 * We add one to the device address issued by the hardware because the USB core 3467 * uses address 1 for the root hubs (even though they're not really devices). 3468 */ 3469 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) 3470 { 3471 unsigned long flags; 3472 int timeleft; 3473 struct xhci_virt_device *virt_dev; 3474 int ret = 0; 3475 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3476 struct xhci_slot_ctx *slot_ctx; 3477 struct xhci_input_control_ctx *ctrl_ctx; 3478 u64 temp_64; 3479 3480 if (!udev->slot_id) { 3481 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); 3482 return -EINVAL; 3483 } 3484 3485 virt_dev = xhci->devs[udev->slot_id]; 3486 3487 if (WARN_ON(!virt_dev)) { 3488 /* 3489 * In plug/unplug torture test with an NEC controller, 3490 * a zero-dereference was observed once due to virt_dev = 0. 3491 * Print useful debug rather than crash if it is observed again! 3492 */ 3493 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", 3494 udev->slot_id); 3495 return -EINVAL; 3496 } 3497 3498 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 3499 /* 3500 * If this is the first Set Address since device plug-in or 3501 * virt_device realloaction after a resume with an xHCI power loss, 3502 * then set up the slot context. 3503 */ 3504 if (!slot_ctx->dev_info) 3505 xhci_setup_addressable_virt_dev(xhci, udev); 3506 /* Otherwise, update the control endpoint ring enqueue pointer. */ 3507 else 3508 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); 3509 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 3510 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); 3511 ctrl_ctx->drop_flags = 0; 3512 3513 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 3514 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 3515 3516 spin_lock_irqsave(&xhci->lock, flags); 3517 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, 3518 udev->slot_id); 3519 if (ret) { 3520 spin_unlock_irqrestore(&xhci->lock, flags); 3521 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3522 return ret; 3523 } 3524 xhci_ring_cmd_db(xhci); 3525 spin_unlock_irqrestore(&xhci->lock, flags); 3526 3527 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ 3528 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, 3529 USB_CTRL_SET_TIMEOUT); 3530 /* FIXME: From section 4.3.4: "Software shall be responsible for timing 3531 * the SetAddress() "recovery interval" required by USB and aborting the 3532 * command on a timeout. 3533 */ 3534 if (timeleft <= 0) { 3535 xhci_warn(xhci, "%s while waiting for address device command\n", 3536 timeleft == 0 ? "Timeout" : "Signal"); 3537 /* FIXME cancel the address device command */ 3538 return -ETIME; 3539 } 3540 3541 switch (virt_dev->cmd_status) { 3542 case COMP_CTX_STATE: 3543 case COMP_EBADSLT: 3544 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n", 3545 udev->slot_id); 3546 ret = -EINVAL; 3547 break; 3548 case COMP_TX_ERR: 3549 dev_warn(&udev->dev, "Device not responding to set address.\n"); 3550 ret = -EPROTO; 3551 break; 3552 case COMP_DEV_ERR: 3553 dev_warn(&udev->dev, "ERROR: Incompatible device for address " 3554 "device command.\n"); 3555 ret = -ENODEV; 3556 break; 3557 case COMP_SUCCESS: 3558 xhci_dbg(xhci, "Successful Address Device command\n"); 3559 break; 3560 default: 3561 xhci_err(xhci, "ERROR: unexpected command completion " 3562 "code 0x%x.\n", virt_dev->cmd_status); 3563 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 3564 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 3565 ret = -EINVAL; 3566 break; 3567 } 3568 if (ret) { 3569 return ret; 3570 } 3571 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 3572 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); 3573 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", 3574 udev->slot_id, 3575 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], 3576 (unsigned long long) 3577 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); 3578 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", 3579 (unsigned long long)virt_dev->out_ctx->dma); 3580 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 3581 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 3582 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 3583 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 3584 /* 3585 * USB core uses address 1 for the roothubs, so we add one to the 3586 * address given back to us by the HC. 3587 */ 3588 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3589 /* Use kernel assigned address for devices; store xHC assigned 3590 * address locally. */ 3591 virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK) 3592 + 1; 3593 /* Zero the input context control for later use */ 3594 ctrl_ctx->add_flags = 0; 3595 ctrl_ctx->drop_flags = 0; 3596 3597 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address); 3598 3599 return 0; 3600 } 3601 3602 #ifdef CONFIG_USB_SUSPEND 3603 3604 /* BESL to HIRD Encoding array for USB2 LPM */ 3605 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, 3606 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; 3607 3608 /* Calculate HIRD/BESL for USB2 PORTPMSC*/ 3609 static int xhci_calculate_hird_besl(int u2del, bool use_besl) 3610 { 3611 int hird; 3612 3613 if (use_besl) { 3614 for (hird = 0; hird < 16; hird++) { 3615 if (xhci_besl_encoding[hird] >= u2del) 3616 break; 3617 } 3618 } else { 3619 if (u2del <= 50) 3620 hird = 0; 3621 else 3622 hird = (u2del - 51) / 75 + 1; 3623 3624 if (hird > 15) 3625 hird = 15; 3626 } 3627 3628 return hird; 3629 } 3630 3631 static int xhci_usb2_software_lpm_test(struct usb_hcd *hcd, 3632 struct usb_device *udev) 3633 { 3634 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3635 struct dev_info *dev_info; 3636 __le32 __iomem **port_array; 3637 __le32 __iomem *addr, *pm_addr; 3638 u32 temp, dev_id; 3639 unsigned int port_num; 3640 unsigned long flags; 3641 int u2del, hird; 3642 int ret; 3643 3644 if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support || 3645 !udev->lpm_capable) 3646 return -EINVAL; 3647 3648 /* we only support lpm for non-hub device connected to root hub yet */ 3649 if (!udev->parent || udev->parent->parent || 3650 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 3651 return -EINVAL; 3652 3653 spin_lock_irqsave(&xhci->lock, flags); 3654 3655 /* Look for devices in lpm_failed_devs list */ 3656 dev_id = le16_to_cpu(udev->descriptor.idVendor) << 16 | 3657 le16_to_cpu(udev->descriptor.idProduct); 3658 list_for_each_entry(dev_info, &xhci->lpm_failed_devs, list) { 3659 if (dev_info->dev_id == dev_id) { 3660 ret = -EINVAL; 3661 goto finish; 3662 } 3663 } 3664 3665 port_array = xhci->usb2_ports; 3666 port_num = udev->portnum - 1; 3667 3668 if (port_num > HCS_MAX_PORTS(xhci->hcs_params1)) { 3669 xhci_dbg(xhci, "invalid port number %d\n", udev->portnum); 3670 ret = -EINVAL; 3671 goto finish; 3672 } 3673 3674 /* 3675 * Test USB 2.0 software LPM. 3676 * FIXME: some xHCI 1.0 hosts may implement a new register to set up 3677 * hardware-controlled USB 2.0 LPM. See section 5.4.11 and 4.23.5.1.1.1 3678 * in the June 2011 errata release. 3679 */ 3680 xhci_dbg(xhci, "test port %d software LPM\n", port_num); 3681 /* 3682 * Set L1 Device Slot and HIRD/BESL. 3683 * Check device's USB 2.0 extension descriptor to determine whether 3684 * HIRD or BESL shoule be used. See USB2.0 LPM errata. 3685 */ 3686 pm_addr = port_array[port_num] + 1; 3687 u2del = HCS_U2_LATENCY(xhci->hcs_params3); 3688 if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2)) 3689 hird = xhci_calculate_hird_besl(u2del, 1); 3690 else 3691 hird = xhci_calculate_hird_besl(u2del, 0); 3692 3693 temp = PORT_L1DS(udev->slot_id) | PORT_HIRD(hird); 3694 xhci_writel(xhci, temp, pm_addr); 3695 3696 /* Set port link state to U2(L1) */ 3697 addr = port_array[port_num]; 3698 xhci_set_link_state(xhci, port_array, port_num, XDEV_U2); 3699 3700 /* wait for ACK */ 3701 spin_unlock_irqrestore(&xhci->lock, flags); 3702 msleep(10); 3703 spin_lock_irqsave(&xhci->lock, flags); 3704 3705 /* Check L1 Status */ 3706 ret = handshake(xhci, pm_addr, PORT_L1S_MASK, PORT_L1S_SUCCESS, 125); 3707 if (ret != -ETIMEDOUT) { 3708 /* enter L1 successfully */ 3709 temp = xhci_readl(xhci, addr); 3710 xhci_dbg(xhci, "port %d entered L1 state, port status 0x%x\n", 3711 port_num, temp); 3712 ret = 0; 3713 } else { 3714 temp = xhci_readl(xhci, pm_addr); 3715 xhci_dbg(xhci, "port %d software lpm failed, L1 status %d\n", 3716 port_num, temp & PORT_L1S_MASK); 3717 ret = -EINVAL; 3718 } 3719 3720 /* Resume the port */ 3721 xhci_set_link_state(xhci, port_array, port_num, XDEV_U0); 3722 3723 spin_unlock_irqrestore(&xhci->lock, flags); 3724 msleep(10); 3725 spin_lock_irqsave(&xhci->lock, flags); 3726 3727 /* Clear PLC */ 3728 xhci_test_and_clear_bit(xhci, port_array, port_num, PORT_PLC); 3729 3730 /* Check PORTSC to make sure the device is in the right state */ 3731 if (!ret) { 3732 temp = xhci_readl(xhci, addr); 3733 xhci_dbg(xhci, "resumed port %d status 0x%x\n", port_num, temp); 3734 if (!(temp & PORT_CONNECT) || !(temp & PORT_PE) || 3735 (temp & PORT_PLS_MASK) != XDEV_U0) { 3736 xhci_dbg(xhci, "port L1 resume fail\n"); 3737 ret = -EINVAL; 3738 } 3739 } 3740 3741 if (ret) { 3742 /* Insert dev to lpm_failed_devs list */ 3743 xhci_warn(xhci, "device LPM test failed, may disconnect and " 3744 "re-enumerate\n"); 3745 dev_info = kzalloc(sizeof(struct dev_info), GFP_ATOMIC); 3746 if (!dev_info) { 3747 ret = -ENOMEM; 3748 goto finish; 3749 } 3750 dev_info->dev_id = dev_id; 3751 INIT_LIST_HEAD(&dev_info->list); 3752 list_add(&dev_info->list, &xhci->lpm_failed_devs); 3753 } else { 3754 xhci_ring_device(xhci, udev->slot_id); 3755 } 3756 3757 finish: 3758 spin_unlock_irqrestore(&xhci->lock, flags); 3759 return ret; 3760 } 3761 3762 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 3763 struct usb_device *udev, int enable) 3764 { 3765 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3766 __le32 __iomem **port_array; 3767 __le32 __iomem *pm_addr; 3768 u32 temp; 3769 unsigned int port_num; 3770 unsigned long flags; 3771 int u2del, hird; 3772 3773 if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support || 3774 !udev->lpm_capable) 3775 return -EPERM; 3776 3777 if (!udev->parent || udev->parent->parent || 3778 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 3779 return -EPERM; 3780 3781 if (udev->usb2_hw_lpm_capable != 1) 3782 return -EPERM; 3783 3784 spin_lock_irqsave(&xhci->lock, flags); 3785 3786 port_array = xhci->usb2_ports; 3787 port_num = udev->portnum - 1; 3788 pm_addr = port_array[port_num] + 1; 3789 temp = xhci_readl(xhci, pm_addr); 3790 3791 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", 3792 enable ? "enable" : "disable", port_num); 3793 3794 u2del = HCS_U2_LATENCY(xhci->hcs_params3); 3795 if (le32_to_cpu(udev->bos->ext_cap->bmAttributes) & (1 << 2)) 3796 hird = xhci_calculate_hird_besl(u2del, 1); 3797 else 3798 hird = xhci_calculate_hird_besl(u2del, 0); 3799 3800 if (enable) { 3801 temp &= ~PORT_HIRD_MASK; 3802 temp |= PORT_HIRD(hird) | PORT_RWE; 3803 xhci_writel(xhci, temp, pm_addr); 3804 temp = xhci_readl(xhci, pm_addr); 3805 temp |= PORT_HLE; 3806 xhci_writel(xhci, temp, pm_addr); 3807 } else { 3808 temp &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK); 3809 xhci_writel(xhci, temp, pm_addr); 3810 } 3811 3812 spin_unlock_irqrestore(&xhci->lock, flags); 3813 return 0; 3814 } 3815 3816 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 3817 { 3818 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3819 int ret; 3820 3821 ret = xhci_usb2_software_lpm_test(hcd, udev); 3822 if (!ret) { 3823 xhci_dbg(xhci, "software LPM test succeed\n"); 3824 if (xhci->hw_lpm_support == 1) { 3825 udev->usb2_hw_lpm_capable = 1; 3826 ret = xhci_set_usb2_hardware_lpm(hcd, udev, 1); 3827 if (!ret) 3828 udev->usb2_hw_lpm_enabled = 1; 3829 } 3830 } 3831 3832 return 0; 3833 } 3834 3835 #else 3836 3837 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 3838 struct usb_device *udev, int enable) 3839 { 3840 return 0; 3841 } 3842 3843 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 3844 { 3845 return 0; 3846 } 3847 3848 #endif /* CONFIG_USB_SUSPEND */ 3849 3850 /* Once a hub descriptor is fetched for a device, we need to update the xHC's 3851 * internal data structures for the device. 3852 */ 3853 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, 3854 struct usb_tt *tt, gfp_t mem_flags) 3855 { 3856 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3857 struct xhci_virt_device *vdev; 3858 struct xhci_command *config_cmd; 3859 struct xhci_input_control_ctx *ctrl_ctx; 3860 struct xhci_slot_ctx *slot_ctx; 3861 unsigned long flags; 3862 unsigned think_time; 3863 int ret; 3864 3865 /* Ignore root hubs */ 3866 if (!hdev->parent) 3867 return 0; 3868 3869 vdev = xhci->devs[hdev->slot_id]; 3870 if (!vdev) { 3871 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); 3872 return -EINVAL; 3873 } 3874 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); 3875 if (!config_cmd) { 3876 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); 3877 return -ENOMEM; 3878 } 3879 3880 spin_lock_irqsave(&xhci->lock, flags); 3881 if (hdev->speed == USB_SPEED_HIGH && 3882 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { 3883 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); 3884 xhci_free_command(xhci, config_cmd); 3885 spin_unlock_irqrestore(&xhci->lock, flags); 3886 return -ENOMEM; 3887 } 3888 3889 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); 3890 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); 3891 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 3892 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); 3893 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); 3894 if (tt->multi) 3895 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 3896 if (xhci->hci_version > 0x95) { 3897 xhci_dbg(xhci, "xHCI version %x needs hub " 3898 "TT think time and number of ports\n", 3899 (unsigned int) xhci->hci_version); 3900 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); 3901 /* Set TT think time - convert from ns to FS bit times. 3902 * 0 = 8 FS bit times, 1 = 16 FS bit times, 3903 * 2 = 24 FS bit times, 3 = 32 FS bit times. 3904 * 3905 * xHCI 1.0: this field shall be 0 if the device is not a 3906 * High-spped hub. 3907 */ 3908 think_time = tt->think_time; 3909 if (think_time != 0) 3910 think_time = (think_time / 666) - 1; 3911 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) 3912 slot_ctx->tt_info |= 3913 cpu_to_le32(TT_THINK_TIME(think_time)); 3914 } else { 3915 xhci_dbg(xhci, "xHCI version %x doesn't need hub " 3916 "TT think time or number of ports\n", 3917 (unsigned int) xhci->hci_version); 3918 } 3919 slot_ctx->dev_state = 0; 3920 spin_unlock_irqrestore(&xhci->lock, flags); 3921 3922 xhci_dbg(xhci, "Set up %s for hub device.\n", 3923 (xhci->hci_version > 0x95) ? 3924 "configure endpoint" : "evaluate context"); 3925 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); 3926 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); 3927 3928 /* Issue and wait for the configure endpoint or 3929 * evaluate context command. 3930 */ 3931 if (xhci->hci_version > 0x95) 3932 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 3933 false, false); 3934 else 3935 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 3936 true, false); 3937 3938 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); 3939 xhci_dbg_ctx(xhci, vdev->out_ctx, 0); 3940 3941 xhci_free_command(xhci, config_cmd); 3942 return ret; 3943 } 3944 3945 int xhci_get_frame(struct usb_hcd *hcd) 3946 { 3947 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3948 /* EHCI mods by the periodic size. Why? */ 3949 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; 3950 } 3951 3952 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) 3953 { 3954 struct xhci_hcd *xhci; 3955 struct device *dev = hcd->self.controller; 3956 int retval; 3957 u32 temp; 3958 3959 hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2; 3960 3961 if (usb_hcd_is_primary_hcd(hcd)) { 3962 xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL); 3963 if (!xhci) 3964 return -ENOMEM; 3965 *((struct xhci_hcd **) hcd->hcd_priv) = xhci; 3966 xhci->main_hcd = hcd; 3967 /* Mark the first roothub as being USB 2.0. 3968 * The xHCI driver will register the USB 3.0 roothub. 3969 */ 3970 hcd->speed = HCD_USB2; 3971 hcd->self.root_hub->speed = USB_SPEED_HIGH; 3972 /* 3973 * USB 2.0 roothub under xHCI has an integrated TT, 3974 * (rate matching hub) as opposed to having an OHCI/UHCI 3975 * companion controller. 3976 */ 3977 hcd->has_tt = 1; 3978 } else { 3979 /* xHCI private pointer was set in xhci_pci_probe for the second 3980 * registered roothub. 3981 */ 3982 xhci = hcd_to_xhci(hcd); 3983 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); 3984 if (HCC_64BIT_ADDR(temp)) { 3985 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); 3986 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); 3987 } else { 3988 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32)); 3989 } 3990 return 0; 3991 } 3992 3993 xhci->cap_regs = hcd->regs; 3994 xhci->op_regs = hcd->regs + 3995 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase)); 3996 xhci->run_regs = hcd->regs + 3997 (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK); 3998 /* Cache read-only capability registers */ 3999 xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1); 4000 xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2); 4001 xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3); 4002 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase); 4003 xhci->hci_version = HC_VERSION(xhci->hcc_params); 4004 xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params); 4005 xhci_print_registers(xhci); 4006 4007 get_quirks(dev, xhci); 4008 4009 /* Make sure the HC is halted. */ 4010 retval = xhci_halt(xhci); 4011 if (retval) 4012 goto error; 4013 4014 xhci_dbg(xhci, "Resetting HCD\n"); 4015 /* Reset the internal HC memory state and registers. */ 4016 retval = xhci_reset(xhci); 4017 if (retval) 4018 goto error; 4019 xhci_dbg(xhci, "Reset complete\n"); 4020 4021 temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); 4022 if (HCC_64BIT_ADDR(temp)) { 4023 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); 4024 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)); 4025 } else { 4026 dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32)); 4027 } 4028 4029 xhci_dbg(xhci, "Calling HCD init\n"); 4030 /* Initialize HCD and host controller data structures. */ 4031 retval = xhci_init(hcd); 4032 if (retval) 4033 goto error; 4034 xhci_dbg(xhci, "Called HCD init\n"); 4035 return 0; 4036 error: 4037 kfree(xhci); 4038 return retval; 4039 } 4040 4041 MODULE_DESCRIPTION(DRIVER_DESC); 4042 MODULE_AUTHOR(DRIVER_AUTHOR); 4043 MODULE_LICENSE("GPL"); 4044 4045 static int __init xhci_hcd_init(void) 4046 { 4047 int retval; 4048 4049 retval = xhci_register_pci(); 4050 if (retval < 0) { 4051 printk(KERN_DEBUG "Problem registering PCI driver."); 4052 return retval; 4053 } 4054 /* 4055 * Check the compiler generated sizes of structures that must be laid 4056 * out in specific ways for hardware access. 4057 */ 4058 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); 4059 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); 4060 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); 4061 /* xhci_device_control has eight fields, and also 4062 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx 4063 */ 4064 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); 4065 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); 4066 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); 4067 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8); 4068 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); 4069 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ 4070 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); 4071 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); 4072 return 0; 4073 } 4074 module_init(xhci_hcd_init); 4075 4076 static void __exit xhci_hcd_cleanup(void) 4077 { 4078 xhci_unregister_pci(); 4079 } 4080 module_exit(xhci_hcd_cleanup); 4081