1 /* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/pci.h> 24 #include <linux/irq.h> 25 #include <linux/log2.h> 26 #include <linux/module.h> 27 #include <linux/moduleparam.h> 28 #include <linux/slab.h> 29 #include <linux/dmi.h> 30 #include <linux/dma-mapping.h> 31 32 #include "xhci.h" 33 #include "xhci-trace.h" 34 35 #define DRIVER_AUTHOR "Sarah Sharp" 36 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" 37 38 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ 39 static int link_quirk; 40 module_param(link_quirk, int, S_IRUGO | S_IWUSR); 41 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); 42 43 static unsigned int quirks; 44 module_param(quirks, uint, S_IRUGO); 45 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); 46 47 /* TODO: copied from ehci-hcd.c - can this be refactored? */ 48 /* 49 * xhci_handshake - spin reading hc until handshake completes or fails 50 * @ptr: address of hc register to be read 51 * @mask: bits to look at in result of read 52 * @done: value of those bits when handshake succeeds 53 * @usec: timeout in microseconds 54 * 55 * Returns negative errno, or zero on success 56 * 57 * Success happens when the "mask" bits have the specified value (hardware 58 * handshake done). There are two failure modes: "usec" have passed (major 59 * hardware flakeout), or the register reads as all-ones (hardware removed). 60 */ 61 int xhci_handshake(struct xhci_hcd *xhci, void __iomem *ptr, 62 u32 mask, u32 done, int usec) 63 { 64 u32 result; 65 66 do { 67 result = readl(ptr); 68 if (result == ~(u32)0) /* card removed */ 69 return -ENODEV; 70 result &= mask; 71 if (result == done) 72 return 0; 73 udelay(1); 74 usec--; 75 } while (usec > 0); 76 return -ETIMEDOUT; 77 } 78 79 /* 80 * Disable interrupts and begin the xHCI halting process. 81 */ 82 void xhci_quiesce(struct xhci_hcd *xhci) 83 { 84 u32 halted; 85 u32 cmd; 86 u32 mask; 87 88 mask = ~(XHCI_IRQS); 89 halted = readl(&xhci->op_regs->status) & STS_HALT; 90 if (!halted) 91 mask &= ~CMD_RUN; 92 93 cmd = readl(&xhci->op_regs->command); 94 cmd &= mask; 95 writel(cmd, &xhci->op_regs->command); 96 } 97 98 /* 99 * Force HC into halt state. 100 * 101 * Disable any IRQs and clear the run/stop bit. 102 * HC will complete any current and actively pipelined transactions, and 103 * should halt within 16 ms of the run/stop bit being cleared. 104 * Read HC Halted bit in the status register to see when the HC is finished. 105 */ 106 int xhci_halt(struct xhci_hcd *xhci) 107 { 108 int ret; 109 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); 110 xhci_quiesce(xhci); 111 112 ret = xhci_handshake(xhci, &xhci->op_regs->status, 113 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); 114 if (!ret) { 115 xhci->xhc_state |= XHCI_STATE_HALTED; 116 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 117 } else 118 xhci_warn(xhci, "Host not halted after %u microseconds.\n", 119 XHCI_MAX_HALT_USEC); 120 return ret; 121 } 122 123 /* 124 * Set the run bit and wait for the host to be running. 125 */ 126 static int xhci_start(struct xhci_hcd *xhci) 127 { 128 u32 temp; 129 int ret; 130 131 temp = readl(&xhci->op_regs->command); 132 temp |= (CMD_RUN); 133 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", 134 temp); 135 writel(temp, &xhci->op_regs->command); 136 137 /* 138 * Wait for the HCHalted Status bit to be 0 to indicate the host is 139 * running. 140 */ 141 ret = xhci_handshake(xhci, &xhci->op_regs->status, 142 STS_HALT, 0, XHCI_MAX_HALT_USEC); 143 if (ret == -ETIMEDOUT) 144 xhci_err(xhci, "Host took too long to start, " 145 "waited %u microseconds.\n", 146 XHCI_MAX_HALT_USEC); 147 if (!ret) 148 xhci->xhc_state &= ~XHCI_STATE_HALTED; 149 return ret; 150 } 151 152 /* 153 * Reset a halted HC. 154 * 155 * This resets pipelines, timers, counters, state machines, etc. 156 * Transactions will be terminated immediately, and operational registers 157 * will be set to their defaults. 158 */ 159 int xhci_reset(struct xhci_hcd *xhci) 160 { 161 u32 command; 162 u32 state; 163 int ret, i; 164 165 state = readl(&xhci->op_regs->status); 166 if ((state & STS_HALT) == 0) { 167 xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); 168 return 0; 169 } 170 171 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); 172 command = readl(&xhci->op_regs->command); 173 command |= CMD_RESET; 174 writel(command, &xhci->op_regs->command); 175 176 ret = xhci_handshake(xhci, &xhci->op_regs->command, 177 CMD_RESET, 0, 10 * 1000 * 1000); 178 if (ret) 179 return ret; 180 181 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 182 "Wait for controller to be ready for doorbell rings"); 183 /* 184 * xHCI cannot write to any doorbells or operational registers other 185 * than status until the "Controller Not Ready" flag is cleared. 186 */ 187 ret = xhci_handshake(xhci, &xhci->op_regs->status, 188 STS_CNR, 0, 10 * 1000 * 1000); 189 190 for (i = 0; i < 2; ++i) { 191 xhci->bus_state[i].port_c_suspend = 0; 192 xhci->bus_state[i].suspended_ports = 0; 193 xhci->bus_state[i].resuming_ports = 0; 194 } 195 196 return ret; 197 } 198 199 #ifdef CONFIG_PCI 200 static int xhci_free_msi(struct xhci_hcd *xhci) 201 { 202 int i; 203 204 if (!xhci->msix_entries) 205 return -EINVAL; 206 207 for (i = 0; i < xhci->msix_count; i++) 208 if (xhci->msix_entries[i].vector) 209 free_irq(xhci->msix_entries[i].vector, 210 xhci_to_hcd(xhci)); 211 return 0; 212 } 213 214 /* 215 * Set up MSI 216 */ 217 static int xhci_setup_msi(struct xhci_hcd *xhci) 218 { 219 int ret; 220 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 221 222 ret = pci_enable_msi(pdev); 223 if (ret) { 224 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 225 "failed to allocate MSI entry"); 226 return ret; 227 } 228 229 ret = request_irq(pdev->irq, xhci_msi_irq, 230 0, "xhci_hcd", xhci_to_hcd(xhci)); 231 if (ret) { 232 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 233 "disable MSI interrupt"); 234 pci_disable_msi(pdev); 235 } 236 237 return ret; 238 } 239 240 /* 241 * Free IRQs 242 * free all IRQs request 243 */ 244 static void xhci_free_irq(struct xhci_hcd *xhci) 245 { 246 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 247 int ret; 248 249 /* return if using legacy interrupt */ 250 if (xhci_to_hcd(xhci)->irq > 0) 251 return; 252 253 ret = xhci_free_msi(xhci); 254 if (!ret) 255 return; 256 if (pdev->irq > 0) 257 free_irq(pdev->irq, xhci_to_hcd(xhci)); 258 259 return; 260 } 261 262 /* 263 * Set up MSI-X 264 */ 265 static int xhci_setup_msix(struct xhci_hcd *xhci) 266 { 267 int i, ret = 0; 268 struct usb_hcd *hcd = xhci_to_hcd(xhci); 269 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 270 271 /* 272 * calculate number of msi-x vectors supported. 273 * - HCS_MAX_INTRS: the max number of interrupts the host can handle, 274 * with max number of interrupters based on the xhci HCSPARAMS1. 275 * - num_online_cpus: maximum msi-x vectors per CPUs core. 276 * Add additional 1 vector to ensure always available interrupt. 277 */ 278 xhci->msix_count = min(num_online_cpus() + 1, 279 HCS_MAX_INTRS(xhci->hcs_params1)); 280 281 xhci->msix_entries = 282 kmalloc((sizeof(struct msix_entry))*xhci->msix_count, 283 GFP_KERNEL); 284 if (!xhci->msix_entries) { 285 xhci_err(xhci, "Failed to allocate MSI-X entries\n"); 286 return -ENOMEM; 287 } 288 289 for (i = 0; i < xhci->msix_count; i++) { 290 xhci->msix_entries[i].entry = i; 291 xhci->msix_entries[i].vector = 0; 292 } 293 294 ret = pci_enable_msix_exact(pdev, xhci->msix_entries, xhci->msix_count); 295 if (ret) { 296 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 297 "Failed to enable MSI-X"); 298 goto free_entries; 299 } 300 301 for (i = 0; i < xhci->msix_count; i++) { 302 ret = request_irq(xhci->msix_entries[i].vector, 303 xhci_msi_irq, 304 0, "xhci_hcd", xhci_to_hcd(xhci)); 305 if (ret) 306 goto disable_msix; 307 } 308 309 hcd->msix_enabled = 1; 310 return ret; 311 312 disable_msix: 313 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt"); 314 xhci_free_irq(xhci); 315 pci_disable_msix(pdev); 316 free_entries: 317 kfree(xhci->msix_entries); 318 xhci->msix_entries = NULL; 319 return ret; 320 } 321 322 /* Free any IRQs and disable MSI-X */ 323 static void xhci_cleanup_msix(struct xhci_hcd *xhci) 324 { 325 struct usb_hcd *hcd = xhci_to_hcd(xhci); 326 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 327 328 if (xhci->quirks & XHCI_PLAT) 329 return; 330 331 xhci_free_irq(xhci); 332 333 if (xhci->msix_entries) { 334 pci_disable_msix(pdev); 335 kfree(xhci->msix_entries); 336 xhci->msix_entries = NULL; 337 } else { 338 pci_disable_msi(pdev); 339 } 340 341 hcd->msix_enabled = 0; 342 return; 343 } 344 345 static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci) 346 { 347 int i; 348 349 if (xhci->msix_entries) { 350 for (i = 0; i < xhci->msix_count; i++) 351 synchronize_irq(xhci->msix_entries[i].vector); 352 } 353 } 354 355 static int xhci_try_enable_msi(struct usb_hcd *hcd) 356 { 357 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 358 struct pci_dev *pdev; 359 int ret; 360 361 /* The xhci platform device has set up IRQs through usb_add_hcd. */ 362 if (xhci->quirks & XHCI_PLAT) 363 return 0; 364 365 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 366 /* 367 * Some Fresco Logic host controllers advertise MSI, but fail to 368 * generate interrupts. Don't even try to enable MSI. 369 */ 370 if (xhci->quirks & XHCI_BROKEN_MSI) 371 goto legacy_irq; 372 373 /* unregister the legacy interrupt */ 374 if (hcd->irq) 375 free_irq(hcd->irq, hcd); 376 hcd->irq = 0; 377 378 ret = xhci_setup_msix(xhci); 379 if (ret) 380 /* fall back to msi*/ 381 ret = xhci_setup_msi(xhci); 382 383 if (!ret) 384 /* hcd->irq is 0, we have MSI */ 385 return 0; 386 387 if (!pdev->irq) { 388 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); 389 return -EINVAL; 390 } 391 392 legacy_irq: 393 if (!strlen(hcd->irq_descr)) 394 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d", 395 hcd->driver->description, hcd->self.busnum); 396 397 /* fall back to legacy interrupt*/ 398 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, 399 hcd->irq_descr, hcd); 400 if (ret) { 401 xhci_err(xhci, "request interrupt %d failed\n", 402 pdev->irq); 403 return ret; 404 } 405 hcd->irq = pdev->irq; 406 return 0; 407 } 408 409 #else 410 411 static inline int xhci_try_enable_msi(struct usb_hcd *hcd) 412 { 413 return 0; 414 } 415 416 static inline void xhci_cleanup_msix(struct xhci_hcd *xhci) 417 { 418 } 419 420 static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci) 421 { 422 } 423 424 #endif 425 426 static void compliance_mode_recovery(unsigned long arg) 427 { 428 struct xhci_hcd *xhci; 429 struct usb_hcd *hcd; 430 u32 temp; 431 int i; 432 433 xhci = (struct xhci_hcd *)arg; 434 435 for (i = 0; i < xhci->num_usb3_ports; i++) { 436 temp = readl(xhci->usb3_ports[i]); 437 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { 438 /* 439 * Compliance Mode Detected. Letting USB Core 440 * handle the Warm Reset 441 */ 442 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 443 "Compliance mode detected->port %d", 444 i + 1); 445 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 446 "Attempting compliance mode recovery"); 447 hcd = xhci->shared_hcd; 448 449 if (hcd->state == HC_STATE_SUSPENDED) 450 usb_hcd_resume_root_hub(hcd); 451 452 usb_hcd_poll_rh_status(hcd); 453 } 454 } 455 456 if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1)) 457 mod_timer(&xhci->comp_mode_recovery_timer, 458 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); 459 } 460 461 /* 462 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver 463 * that causes ports behind that hardware to enter compliance mode sometimes. 464 * The quirk creates a timer that polls every 2 seconds the link state of 465 * each host controller's port and recovers it by issuing a Warm reset 466 * if Compliance mode is detected, otherwise the port will become "dead" (no 467 * device connections or disconnections will be detected anymore). Becasue no 468 * status event is generated when entering compliance mode (per xhci spec), 469 * this quirk is needed on systems that have the failing hardware installed. 470 */ 471 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) 472 { 473 xhci->port_status_u0 = 0; 474 init_timer(&xhci->comp_mode_recovery_timer); 475 476 xhci->comp_mode_recovery_timer.data = (unsigned long) xhci; 477 xhci->comp_mode_recovery_timer.function = compliance_mode_recovery; 478 xhci->comp_mode_recovery_timer.expires = jiffies + 479 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); 480 481 set_timer_slack(&xhci->comp_mode_recovery_timer, 482 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); 483 add_timer(&xhci->comp_mode_recovery_timer); 484 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 485 "Compliance mode recovery timer initialized"); 486 } 487 488 /* 489 * This function identifies the systems that have installed the SN65LVPE502CP 490 * USB3.0 re-driver and that need the Compliance Mode Quirk. 491 * Systems: 492 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 493 */ 494 bool xhci_compliance_mode_recovery_timer_quirk_check(void) 495 { 496 const char *dmi_product_name, *dmi_sys_vendor; 497 498 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME); 499 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); 500 if (!dmi_product_name || !dmi_sys_vendor) 501 return false; 502 503 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard"))) 504 return false; 505 506 if (strstr(dmi_product_name, "Z420") || 507 strstr(dmi_product_name, "Z620") || 508 strstr(dmi_product_name, "Z820") || 509 strstr(dmi_product_name, "Z1 Workstation")) 510 return true; 511 512 return false; 513 } 514 515 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) 516 { 517 return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1)); 518 } 519 520 521 /* 522 * Initialize memory for HCD and xHC (one-time init). 523 * 524 * Program the PAGESIZE register, initialize the device context array, create 525 * device contexts (?), set up a command ring segment (or two?), create event 526 * ring (one for now). 527 */ 528 int xhci_init(struct usb_hcd *hcd) 529 { 530 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 531 int retval = 0; 532 533 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init"); 534 spin_lock_init(&xhci->lock); 535 if (xhci->hci_version == 0x95 && link_quirk) { 536 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 537 "QUIRK: Not clearing Link TRB chain bits."); 538 xhci->quirks |= XHCI_LINK_TRB_QUIRK; 539 } else { 540 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 541 "xHCI doesn't need link TRB QUIRK"); 542 } 543 retval = xhci_mem_init(xhci, GFP_KERNEL); 544 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init"); 545 546 /* Initializing Compliance Mode Recovery Data If Needed */ 547 if (xhci_compliance_mode_recovery_timer_quirk_check()) { 548 xhci->quirks |= XHCI_COMP_MODE_QUIRK; 549 compliance_mode_recovery_timer_init(xhci); 550 } 551 552 return retval; 553 } 554 555 /*-------------------------------------------------------------------------*/ 556 557 558 static int xhci_run_finished(struct xhci_hcd *xhci) 559 { 560 if (xhci_start(xhci)) { 561 xhci_halt(xhci); 562 return -ENODEV; 563 } 564 xhci->shared_hcd->state = HC_STATE_RUNNING; 565 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; 566 567 if (xhci->quirks & XHCI_NEC_HOST) 568 xhci_ring_cmd_db(xhci); 569 570 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 571 "Finished xhci_run for USB3 roothub"); 572 return 0; 573 } 574 575 /* 576 * Start the HC after it was halted. 577 * 578 * This function is called by the USB core when the HC driver is added. 579 * Its opposite is xhci_stop(). 580 * 581 * xhci_init() must be called once before this function can be called. 582 * Reset the HC, enable device slot contexts, program DCBAAP, and 583 * set command ring pointer and event ring pointer. 584 * 585 * Setup MSI-X vectors and enable interrupts. 586 */ 587 int xhci_run(struct usb_hcd *hcd) 588 { 589 u32 temp; 590 u64 temp_64; 591 int ret; 592 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 593 594 /* Start the xHCI host controller running only after the USB 2.0 roothub 595 * is setup. 596 */ 597 598 hcd->uses_new_polling = 1; 599 if (!usb_hcd_is_primary_hcd(hcd)) 600 return xhci_run_finished(xhci); 601 602 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); 603 604 ret = xhci_try_enable_msi(hcd); 605 if (ret) 606 return ret; 607 608 xhci_dbg(xhci, "Command ring memory map follows:\n"); 609 xhci_debug_ring(xhci, xhci->cmd_ring); 610 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 611 xhci_dbg_cmd_ptrs(xhci); 612 613 xhci_dbg(xhci, "ERST memory map follows:\n"); 614 xhci_dbg_erst(xhci, &xhci->erst); 615 xhci_dbg(xhci, "Event ring:\n"); 616 xhci_debug_ring(xhci, xhci->event_ring); 617 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 618 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 619 temp_64 &= ~ERST_PTR_MASK; 620 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 621 "ERST deq = 64'h%0lx", (long unsigned int) temp_64); 622 623 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 624 "// Set the interrupt modulation register"); 625 temp = readl(&xhci->ir_set->irq_control); 626 temp &= ~ER_IRQ_INTERVAL_MASK; 627 temp |= (u32) 160; 628 writel(temp, &xhci->ir_set->irq_control); 629 630 /* Set the HCD state before we enable the irqs */ 631 temp = readl(&xhci->op_regs->command); 632 temp |= (CMD_EIE); 633 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 634 "// Enable interrupts, cmd = 0x%x.", temp); 635 writel(temp, &xhci->op_regs->command); 636 637 temp = readl(&xhci->ir_set->irq_pending); 638 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 639 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending", 640 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); 641 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); 642 xhci_print_ir_set(xhci, 0); 643 644 if (xhci->quirks & XHCI_NEC_HOST) { 645 struct xhci_command *command; 646 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); 647 if (!command) 648 return -ENOMEM; 649 xhci_queue_vendor_command(xhci, command, 0, 0, 0, 650 TRB_TYPE(TRB_NEC_GET_FW)); 651 } 652 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 653 "Finished xhci_run for USB2 roothub"); 654 return 0; 655 } 656 657 static void xhci_only_stop_hcd(struct usb_hcd *hcd) 658 { 659 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 660 661 spin_lock_irq(&xhci->lock); 662 xhci_halt(xhci); 663 664 /* The shared_hcd is going to be deallocated shortly (the USB core only 665 * calls this function when allocation fails in usb_add_hcd(), or 666 * usb_remove_hcd() is called). So we need to unset xHCI's pointer. 667 */ 668 xhci->shared_hcd = NULL; 669 spin_unlock_irq(&xhci->lock); 670 } 671 672 /* 673 * Stop xHCI driver. 674 * 675 * This function is called by the USB core when the HC driver is removed. 676 * Its opposite is xhci_run(). 677 * 678 * Disable device contexts, disable IRQs, and quiesce the HC. 679 * Reset the HC, finish any completed transactions, and cleanup memory. 680 */ 681 void xhci_stop(struct usb_hcd *hcd) 682 { 683 u32 temp; 684 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 685 686 if (!usb_hcd_is_primary_hcd(hcd)) { 687 xhci_only_stop_hcd(xhci->shared_hcd); 688 return; 689 } 690 691 spin_lock_irq(&xhci->lock); 692 /* Make sure the xHC is halted for a USB3 roothub 693 * (xhci_stop() could be called as part of failed init). 694 */ 695 xhci_halt(xhci); 696 xhci_reset(xhci); 697 spin_unlock_irq(&xhci->lock); 698 699 xhci_cleanup_msix(xhci); 700 701 /* Deleting Compliance Mode Recovery Timer */ 702 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 703 (!(xhci_all_ports_seen_u0(xhci)))) { 704 del_timer_sync(&xhci->comp_mode_recovery_timer); 705 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 706 "%s: compliance mode recovery timer deleted", 707 __func__); 708 } 709 710 if (xhci->quirks & XHCI_AMD_PLL_FIX) 711 usb_amd_dev_put(); 712 713 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 714 "// Disabling event ring interrupts"); 715 temp = readl(&xhci->op_regs->status); 716 writel(temp & ~STS_EINT, &xhci->op_regs->status); 717 temp = readl(&xhci->ir_set->irq_pending); 718 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); 719 xhci_print_ir_set(xhci, 0); 720 721 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); 722 xhci_mem_cleanup(xhci); 723 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 724 "xhci_stop completed - status = %x", 725 readl(&xhci->op_regs->status)); 726 } 727 728 /* 729 * Shutdown HC (not bus-specific) 730 * 731 * This is called when the machine is rebooting or halting. We assume that the 732 * machine will be powered off, and the HC's internal state will be reset. 733 * Don't bother to free memory. 734 * 735 * This will only ever be called with the main usb_hcd (the USB3 roothub). 736 */ 737 void xhci_shutdown(struct usb_hcd *hcd) 738 { 739 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 740 741 if (xhci->quirks & XHCI_SPURIOUS_REBOOT) 742 usb_disable_xhci_ports(to_pci_dev(hcd->self.controller)); 743 744 spin_lock_irq(&xhci->lock); 745 xhci_halt(xhci); 746 /* Workaround for spurious wakeups at shutdown with HSW */ 747 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) 748 xhci_reset(xhci); 749 spin_unlock_irq(&xhci->lock); 750 751 xhci_cleanup_msix(xhci); 752 753 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 754 "xhci_shutdown completed - status = %x", 755 readl(&xhci->op_regs->status)); 756 757 /* Yet another workaround for spurious wakeups at shutdown with HSW */ 758 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) 759 pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot); 760 } 761 762 #ifdef CONFIG_PM 763 static void xhci_save_registers(struct xhci_hcd *xhci) 764 { 765 xhci->s3.command = readl(&xhci->op_regs->command); 766 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); 767 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 768 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); 769 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size); 770 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); 771 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 772 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending); 773 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control); 774 } 775 776 static void xhci_restore_registers(struct xhci_hcd *xhci) 777 { 778 writel(xhci->s3.command, &xhci->op_regs->command); 779 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); 780 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); 781 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); 782 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size); 783 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); 784 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue); 785 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending); 786 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control); 787 } 788 789 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) 790 { 791 u64 val_64; 792 793 /* step 2: initialize command ring buffer */ 794 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 795 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | 796 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, 797 xhci->cmd_ring->dequeue) & 798 (u64) ~CMD_RING_RSVD_BITS) | 799 xhci->cmd_ring->cycle_state; 800 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 801 "// Setting command ring address to 0x%llx", 802 (long unsigned long) val_64); 803 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); 804 } 805 806 /* 807 * The whole command ring must be cleared to zero when we suspend the host. 808 * 809 * The host doesn't save the command ring pointer in the suspend well, so we 810 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte 811 * aligned, because of the reserved bits in the command ring dequeue pointer 812 * register. Therefore, we can't just set the dequeue pointer back in the 813 * middle of the ring (TRBs are 16-byte aligned). 814 */ 815 static void xhci_clear_command_ring(struct xhci_hcd *xhci) 816 { 817 struct xhci_ring *ring; 818 struct xhci_segment *seg; 819 820 ring = xhci->cmd_ring; 821 seg = ring->deq_seg; 822 do { 823 memset(seg->trbs, 0, 824 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); 825 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= 826 cpu_to_le32(~TRB_CYCLE); 827 seg = seg->next; 828 } while (seg != ring->deq_seg); 829 830 /* Reset the software enqueue and dequeue pointers */ 831 ring->deq_seg = ring->first_seg; 832 ring->dequeue = ring->first_seg->trbs; 833 ring->enq_seg = ring->deq_seg; 834 ring->enqueue = ring->dequeue; 835 836 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1; 837 /* 838 * Ring is now zeroed, so the HW should look for change of ownership 839 * when the cycle bit is set to 1. 840 */ 841 ring->cycle_state = 1; 842 843 /* 844 * Reset the hardware dequeue pointer. 845 * Yes, this will need to be re-written after resume, but we're paranoid 846 * and want to make sure the hardware doesn't access bogus memory 847 * because, say, the BIOS or an SMI started the host without changing 848 * the command ring pointers. 849 */ 850 xhci_set_cmd_ring_deq(xhci); 851 } 852 853 /* 854 * Stop HC (not bus-specific) 855 * 856 * This is called when the machine transition into S3/S4 mode. 857 * 858 */ 859 int xhci_suspend(struct xhci_hcd *xhci) 860 { 861 int rc = 0; 862 unsigned int delay = XHCI_MAX_HALT_USEC; 863 struct usb_hcd *hcd = xhci_to_hcd(xhci); 864 u32 command; 865 866 if (hcd->state != HC_STATE_SUSPENDED || 867 xhci->shared_hcd->state != HC_STATE_SUSPENDED) 868 return -EINVAL; 869 870 /* Don't poll the roothubs on bus suspend. */ 871 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); 872 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 873 del_timer_sync(&hcd->rh_timer); 874 875 spin_lock_irq(&xhci->lock); 876 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 877 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 878 /* step 1: stop endpoint */ 879 /* skipped assuming that port suspend has done */ 880 881 /* step 2: clear Run/Stop bit */ 882 command = readl(&xhci->op_regs->command); 883 command &= ~CMD_RUN; 884 writel(command, &xhci->op_regs->command); 885 886 /* Some chips from Fresco Logic need an extraordinary delay */ 887 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; 888 889 if (xhci_handshake(xhci, &xhci->op_regs->status, 890 STS_HALT, STS_HALT, delay)) { 891 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n"); 892 spin_unlock_irq(&xhci->lock); 893 return -ETIMEDOUT; 894 } 895 xhci_clear_command_ring(xhci); 896 897 /* step 3: save registers */ 898 xhci_save_registers(xhci); 899 900 /* step 4: set CSS flag */ 901 command = readl(&xhci->op_regs->command); 902 command |= CMD_CSS; 903 writel(command, &xhci->op_regs->command); 904 if (xhci_handshake(xhci, &xhci->op_regs->status, 905 STS_SAVE, 0, 10 * 1000)) { 906 xhci_warn(xhci, "WARN: xHC save state timeout\n"); 907 spin_unlock_irq(&xhci->lock); 908 return -ETIMEDOUT; 909 } 910 spin_unlock_irq(&xhci->lock); 911 912 /* 913 * Deleting Compliance Mode Recovery Timer because the xHCI Host 914 * is about to be suspended. 915 */ 916 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 917 (!(xhci_all_ports_seen_u0(xhci)))) { 918 del_timer_sync(&xhci->comp_mode_recovery_timer); 919 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 920 "%s: compliance mode recovery timer deleted", 921 __func__); 922 } 923 924 /* step 5: remove core well power */ 925 /* synchronize irq when using MSI-X */ 926 xhci_msix_sync_irqs(xhci); 927 928 return rc; 929 } 930 931 /* 932 * start xHC (not bus-specific) 933 * 934 * This is called when the machine transition from S3/S4 mode. 935 * 936 */ 937 int xhci_resume(struct xhci_hcd *xhci, bool hibernated) 938 { 939 u32 command, temp = 0, status; 940 struct usb_hcd *hcd = xhci_to_hcd(xhci); 941 struct usb_hcd *secondary_hcd; 942 int retval = 0; 943 bool comp_timer_running = false; 944 945 /* Wait a bit if either of the roothubs need to settle from the 946 * transition into bus suspend. 947 */ 948 if (time_before(jiffies, xhci->bus_state[0].next_statechange) || 949 time_before(jiffies, 950 xhci->bus_state[1].next_statechange)) 951 msleep(100); 952 953 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 954 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 955 956 spin_lock_irq(&xhci->lock); 957 if (xhci->quirks & XHCI_RESET_ON_RESUME) 958 hibernated = true; 959 960 if (!hibernated) { 961 /* step 1: restore register */ 962 xhci_restore_registers(xhci); 963 /* step 2: initialize command ring buffer */ 964 xhci_set_cmd_ring_deq(xhci); 965 /* step 3: restore state and start state*/ 966 /* step 3: set CRS flag */ 967 command = readl(&xhci->op_regs->command); 968 command |= CMD_CRS; 969 writel(command, &xhci->op_regs->command); 970 if (xhci_handshake(xhci, &xhci->op_regs->status, 971 STS_RESTORE, 0, 10 * 1000)) { 972 xhci_warn(xhci, "WARN: xHC restore state timeout\n"); 973 spin_unlock_irq(&xhci->lock); 974 return -ETIMEDOUT; 975 } 976 temp = readl(&xhci->op_regs->status); 977 } 978 979 /* If restore operation fails, re-initialize the HC during resume */ 980 if ((temp & STS_SRE) || hibernated) { 981 982 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 983 !(xhci_all_ports_seen_u0(xhci))) { 984 del_timer_sync(&xhci->comp_mode_recovery_timer); 985 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 986 "Compliance Mode Recovery Timer deleted!"); 987 } 988 989 /* Let the USB core know _both_ roothubs lost power. */ 990 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); 991 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); 992 993 xhci_dbg(xhci, "Stop HCD\n"); 994 xhci_halt(xhci); 995 xhci_reset(xhci); 996 spin_unlock_irq(&xhci->lock); 997 xhci_cleanup_msix(xhci); 998 999 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 1000 temp = readl(&xhci->op_regs->status); 1001 writel(temp & ~STS_EINT, &xhci->op_regs->status); 1002 temp = readl(&xhci->ir_set->irq_pending); 1003 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending); 1004 xhci_print_ir_set(xhci, 0); 1005 1006 xhci_dbg(xhci, "cleaning up memory\n"); 1007 xhci_mem_cleanup(xhci); 1008 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", 1009 readl(&xhci->op_regs->status)); 1010 1011 /* USB core calls the PCI reinit and start functions twice: 1012 * first with the primary HCD, and then with the secondary HCD. 1013 * If we don't do the same, the host will never be started. 1014 */ 1015 if (!usb_hcd_is_primary_hcd(hcd)) 1016 secondary_hcd = hcd; 1017 else 1018 secondary_hcd = xhci->shared_hcd; 1019 1020 xhci_dbg(xhci, "Initialize the xhci_hcd\n"); 1021 retval = xhci_init(hcd->primary_hcd); 1022 if (retval) 1023 return retval; 1024 comp_timer_running = true; 1025 1026 xhci_dbg(xhci, "Start the primary HCD\n"); 1027 retval = xhci_run(hcd->primary_hcd); 1028 if (!retval) { 1029 xhci_dbg(xhci, "Start the secondary HCD\n"); 1030 retval = xhci_run(secondary_hcd); 1031 } 1032 hcd->state = HC_STATE_SUSPENDED; 1033 xhci->shared_hcd->state = HC_STATE_SUSPENDED; 1034 goto done; 1035 } 1036 1037 /* step 4: set Run/Stop bit */ 1038 command = readl(&xhci->op_regs->command); 1039 command |= CMD_RUN; 1040 writel(command, &xhci->op_regs->command); 1041 xhci_handshake(xhci, &xhci->op_regs->status, STS_HALT, 1042 0, 250 * 1000); 1043 1044 /* step 5: walk topology and initialize portsc, 1045 * portpmsc and portli 1046 */ 1047 /* this is done in bus_resume */ 1048 1049 /* step 6: restart each of the previously 1050 * Running endpoints by ringing their doorbells 1051 */ 1052 1053 spin_unlock_irq(&xhci->lock); 1054 1055 done: 1056 if (retval == 0) { 1057 /* Resume root hubs only when have pending events. */ 1058 status = readl(&xhci->op_regs->status); 1059 if (status & STS_EINT) { 1060 usb_hcd_resume_root_hub(hcd); 1061 usb_hcd_resume_root_hub(xhci->shared_hcd); 1062 } 1063 } 1064 1065 /* 1066 * If system is subject to the Quirk, Compliance Mode Timer needs to 1067 * be re-initialized Always after a system resume. Ports are subject 1068 * to suffer the Compliance Mode issue again. It doesn't matter if 1069 * ports have entered previously to U0 before system's suspension. 1070 */ 1071 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) 1072 compliance_mode_recovery_timer_init(xhci); 1073 1074 /* Re-enable port polling. */ 1075 xhci_dbg(xhci, "%s: starting port polling.\n", __func__); 1076 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); 1077 usb_hcd_poll_rh_status(hcd); 1078 1079 return retval; 1080 } 1081 #endif /* CONFIG_PM */ 1082 1083 /*-------------------------------------------------------------------------*/ 1084 1085 /** 1086 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and 1087 * HCDs. Find the index for an endpoint given its descriptor. Use the return 1088 * value to right shift 1 for the bitmask. 1089 * 1090 * Index = (epnum * 2) + direction - 1, 1091 * where direction = 0 for OUT, 1 for IN. 1092 * For control endpoints, the IN index is used (OUT index is unused), so 1093 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) 1094 */ 1095 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) 1096 { 1097 unsigned int index; 1098 if (usb_endpoint_xfer_control(desc)) 1099 index = (unsigned int) (usb_endpoint_num(desc)*2); 1100 else 1101 index = (unsigned int) (usb_endpoint_num(desc)*2) + 1102 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; 1103 return index; 1104 } 1105 1106 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint 1107 * address from the XHCI endpoint index. 1108 */ 1109 unsigned int xhci_get_endpoint_address(unsigned int ep_index) 1110 { 1111 unsigned int number = DIV_ROUND_UP(ep_index, 2); 1112 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN; 1113 return direction | number; 1114 } 1115 1116 /* Find the flag for this endpoint (for use in the control context). Use the 1117 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 1118 * bit 1, etc. 1119 */ 1120 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) 1121 { 1122 return 1 << (xhci_get_endpoint_index(desc) + 1); 1123 } 1124 1125 /* Find the flag for this endpoint (for use in the control context). Use the 1126 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 1127 * bit 1, etc. 1128 */ 1129 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index) 1130 { 1131 return 1 << (ep_index + 1); 1132 } 1133 1134 /* Compute the last valid endpoint context index. Basically, this is the 1135 * endpoint index plus one. For slot contexts with more than valid endpoint, 1136 * we find the most significant bit set in the added contexts flags. 1137 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 1138 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. 1139 */ 1140 unsigned int xhci_last_valid_endpoint(u32 added_ctxs) 1141 { 1142 return fls(added_ctxs) - 1; 1143 } 1144 1145 /* Returns 1 if the arguments are OK; 1146 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 1147 */ 1148 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 1149 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, 1150 const char *func) { 1151 struct xhci_hcd *xhci; 1152 struct xhci_virt_device *virt_dev; 1153 1154 if (!hcd || (check_ep && !ep) || !udev) { 1155 pr_debug("xHCI %s called with invalid args\n", func); 1156 return -EINVAL; 1157 } 1158 if (!udev->parent) { 1159 pr_debug("xHCI %s called for root hub\n", func); 1160 return 0; 1161 } 1162 1163 xhci = hcd_to_xhci(hcd); 1164 if (check_virt_dev) { 1165 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { 1166 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", 1167 func); 1168 return -EINVAL; 1169 } 1170 1171 virt_dev = xhci->devs[udev->slot_id]; 1172 if (virt_dev->udev != udev) { 1173 xhci_dbg(xhci, "xHCI %s called with udev and " 1174 "virt_dev does not match\n", func); 1175 return -EINVAL; 1176 } 1177 } 1178 1179 if (xhci->xhc_state & XHCI_STATE_HALTED) 1180 return -ENODEV; 1181 1182 return 1; 1183 } 1184 1185 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 1186 struct usb_device *udev, struct xhci_command *command, 1187 bool ctx_change, bool must_succeed); 1188 1189 /* 1190 * Full speed devices may have a max packet size greater than 8 bytes, but the 1191 * USB core doesn't know that until it reads the first 8 bytes of the 1192 * descriptor. If the usb_device's max packet size changes after that point, 1193 * we need to issue an evaluate context command and wait on it. 1194 */ 1195 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, 1196 unsigned int ep_index, struct urb *urb) 1197 { 1198 struct xhci_container_ctx *out_ctx; 1199 struct xhci_input_control_ctx *ctrl_ctx; 1200 struct xhci_ep_ctx *ep_ctx; 1201 struct xhci_command *command; 1202 int max_packet_size; 1203 int hw_max_packet_size; 1204 int ret = 0; 1205 1206 out_ctx = xhci->devs[slot_id]->out_ctx; 1207 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1208 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); 1209 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); 1210 if (hw_max_packet_size != max_packet_size) { 1211 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1212 "Max Packet Size for ep 0 changed."); 1213 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1214 "Max packet size in usb_device = %d", 1215 max_packet_size); 1216 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1217 "Max packet size in xHCI HW = %d", 1218 hw_max_packet_size); 1219 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1220 "Issuing evaluate context command."); 1221 1222 /* Set up the input context flags for the command */ 1223 /* FIXME: This won't work if a non-default control endpoint 1224 * changes max packet sizes. 1225 */ 1226 1227 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); 1228 if (!command) 1229 return -ENOMEM; 1230 1231 command->in_ctx = xhci->devs[slot_id]->in_ctx; 1232 ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx); 1233 if (!ctrl_ctx) { 1234 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1235 __func__); 1236 ret = -ENOMEM; 1237 goto command_cleanup; 1238 } 1239 /* Set up the modified control endpoint 0 */ 1240 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 1241 xhci->devs[slot_id]->out_ctx, ep_index); 1242 1243 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); 1244 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); 1245 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); 1246 1247 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); 1248 ctrl_ctx->drop_flags = 0; 1249 1250 xhci_dbg(xhci, "Slot %d input context\n", slot_id); 1251 xhci_dbg_ctx(xhci, command->in_ctx, ep_index); 1252 xhci_dbg(xhci, "Slot %d output context\n", slot_id); 1253 xhci_dbg_ctx(xhci, out_ctx, ep_index); 1254 1255 ret = xhci_configure_endpoint(xhci, urb->dev, command, 1256 true, false); 1257 1258 /* Clean up the input context for later use by bandwidth 1259 * functions. 1260 */ 1261 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); 1262 command_cleanup: 1263 kfree(command->completion); 1264 kfree(command); 1265 } 1266 return ret; 1267 } 1268 1269 /* 1270 * non-error returns are a promise to giveback() the urb later 1271 * we drop ownership so next owner (or urb unlink) can get it 1272 */ 1273 int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) 1274 { 1275 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1276 struct xhci_td *buffer; 1277 unsigned long flags; 1278 int ret = 0; 1279 unsigned int slot_id, ep_index; 1280 struct urb_priv *urb_priv; 1281 int size, i; 1282 1283 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, 1284 true, true, __func__) <= 0) 1285 return -EINVAL; 1286 1287 slot_id = urb->dev->slot_id; 1288 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1289 1290 if (!HCD_HW_ACCESSIBLE(hcd)) { 1291 if (!in_interrupt()) 1292 xhci_dbg(xhci, "urb submitted during PCI suspend\n"); 1293 ret = -ESHUTDOWN; 1294 goto exit; 1295 } 1296 1297 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) 1298 size = urb->number_of_packets; 1299 else 1300 size = 1; 1301 1302 urb_priv = kzalloc(sizeof(struct urb_priv) + 1303 size * sizeof(struct xhci_td *), mem_flags); 1304 if (!urb_priv) 1305 return -ENOMEM; 1306 1307 buffer = kzalloc(size * sizeof(struct xhci_td), mem_flags); 1308 if (!buffer) { 1309 kfree(urb_priv); 1310 return -ENOMEM; 1311 } 1312 1313 for (i = 0; i < size; i++) { 1314 urb_priv->td[i] = buffer; 1315 buffer++; 1316 } 1317 1318 urb_priv->length = size; 1319 urb_priv->td_cnt = 0; 1320 urb->hcpriv = urb_priv; 1321 1322 if (usb_endpoint_xfer_control(&urb->ep->desc)) { 1323 /* Check to see if the max packet size for the default control 1324 * endpoint changed during FS device enumeration 1325 */ 1326 if (urb->dev->speed == USB_SPEED_FULL) { 1327 ret = xhci_check_maxpacket(xhci, slot_id, 1328 ep_index, urb); 1329 if (ret < 0) { 1330 xhci_urb_free_priv(xhci, urb_priv); 1331 urb->hcpriv = NULL; 1332 return ret; 1333 } 1334 } 1335 1336 /* We have a spinlock and interrupts disabled, so we must pass 1337 * atomic context to this function, which may allocate memory. 1338 */ 1339 spin_lock_irqsave(&xhci->lock, flags); 1340 if (xhci->xhc_state & XHCI_STATE_DYING) 1341 goto dying; 1342 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1343 slot_id, ep_index); 1344 if (ret) 1345 goto free_priv; 1346 spin_unlock_irqrestore(&xhci->lock, flags); 1347 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { 1348 spin_lock_irqsave(&xhci->lock, flags); 1349 if (xhci->xhc_state & XHCI_STATE_DYING) 1350 goto dying; 1351 if (xhci->devs[slot_id]->eps[ep_index].ep_state & 1352 EP_GETTING_STREAMS) { 1353 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " 1354 "is transitioning to using streams.\n"); 1355 ret = -EINVAL; 1356 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state & 1357 EP_GETTING_NO_STREAMS) { 1358 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep " 1359 "is transitioning to " 1360 "not having streams.\n"); 1361 ret = -EINVAL; 1362 } else { 1363 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1364 slot_id, ep_index); 1365 } 1366 if (ret) 1367 goto free_priv; 1368 spin_unlock_irqrestore(&xhci->lock, flags); 1369 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 1370 spin_lock_irqsave(&xhci->lock, flags); 1371 if (xhci->xhc_state & XHCI_STATE_DYING) 1372 goto dying; 1373 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1374 slot_id, ep_index); 1375 if (ret) 1376 goto free_priv; 1377 spin_unlock_irqrestore(&xhci->lock, flags); 1378 } else { 1379 spin_lock_irqsave(&xhci->lock, flags); 1380 if (xhci->xhc_state & XHCI_STATE_DYING) 1381 goto dying; 1382 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1383 slot_id, ep_index); 1384 if (ret) 1385 goto free_priv; 1386 spin_unlock_irqrestore(&xhci->lock, flags); 1387 } 1388 exit: 1389 return ret; 1390 dying: 1391 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for " 1392 "non-responsive xHCI host.\n", 1393 urb->ep->desc.bEndpointAddress, urb); 1394 ret = -ESHUTDOWN; 1395 free_priv: 1396 xhci_urb_free_priv(xhci, urb_priv); 1397 urb->hcpriv = NULL; 1398 spin_unlock_irqrestore(&xhci->lock, flags); 1399 return ret; 1400 } 1401 1402 /* Get the right ring for the given URB. 1403 * If the endpoint supports streams, boundary check the URB's stream ID. 1404 * If the endpoint doesn't support streams, return the singular endpoint ring. 1405 */ 1406 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, 1407 struct urb *urb) 1408 { 1409 unsigned int slot_id; 1410 unsigned int ep_index; 1411 unsigned int stream_id; 1412 struct xhci_virt_ep *ep; 1413 1414 slot_id = urb->dev->slot_id; 1415 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1416 stream_id = urb->stream_id; 1417 ep = &xhci->devs[slot_id]->eps[ep_index]; 1418 /* Common case: no streams */ 1419 if (!(ep->ep_state & EP_HAS_STREAMS)) 1420 return ep->ring; 1421 1422 if (stream_id == 0) { 1423 xhci_warn(xhci, 1424 "WARN: Slot ID %u, ep index %u has streams, " 1425 "but URB has no stream ID.\n", 1426 slot_id, ep_index); 1427 return NULL; 1428 } 1429 1430 if (stream_id < ep->stream_info->num_streams) 1431 return ep->stream_info->stream_rings[stream_id]; 1432 1433 xhci_warn(xhci, 1434 "WARN: Slot ID %u, ep index %u has " 1435 "stream IDs 1 to %u allocated, " 1436 "but stream ID %u is requested.\n", 1437 slot_id, ep_index, 1438 ep->stream_info->num_streams - 1, 1439 stream_id); 1440 return NULL; 1441 } 1442 1443 /* 1444 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop 1445 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC 1446 * should pick up where it left off in the TD, unless a Set Transfer Ring 1447 * Dequeue Pointer is issued. 1448 * 1449 * The TRBs that make up the buffers for the canceled URB will be "removed" from 1450 * the ring. Since the ring is a contiguous structure, they can't be physically 1451 * removed. Instead, there are two options: 1452 * 1453 * 1) If the HC is in the middle of processing the URB to be canceled, we 1454 * simply move the ring's dequeue pointer past those TRBs using the Set 1455 * Transfer Ring Dequeue Pointer command. This will be the common case, 1456 * when drivers timeout on the last submitted URB and attempt to cancel. 1457 * 1458 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a 1459 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The 1460 * HC will need to invalidate the any TRBs it has cached after the stop 1461 * endpoint command, as noted in the xHCI 0.95 errata. 1462 * 1463 * 3) The TD may have completed by the time the Stop Endpoint Command 1464 * completes, so software needs to handle that case too. 1465 * 1466 * This function should protect against the TD enqueueing code ringing the 1467 * doorbell while this code is waiting for a Stop Endpoint command to complete. 1468 * It also needs to account for multiple cancellations on happening at the same 1469 * time for the same endpoint. 1470 * 1471 * Note that this function can be called in any context, or so says 1472 * usb_hcd_unlink_urb() 1473 */ 1474 int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 1475 { 1476 unsigned long flags; 1477 int ret, i; 1478 u32 temp; 1479 struct xhci_hcd *xhci; 1480 struct urb_priv *urb_priv; 1481 struct xhci_td *td; 1482 unsigned int ep_index; 1483 struct xhci_ring *ep_ring; 1484 struct xhci_virt_ep *ep; 1485 struct xhci_command *command; 1486 1487 xhci = hcd_to_xhci(hcd); 1488 spin_lock_irqsave(&xhci->lock, flags); 1489 /* Make sure the URB hasn't completed or been unlinked already */ 1490 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 1491 if (ret || !urb->hcpriv) 1492 goto done; 1493 temp = readl(&xhci->op_regs->status); 1494 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) { 1495 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1496 "HW died, freeing TD."); 1497 urb_priv = urb->hcpriv; 1498 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { 1499 td = urb_priv->td[i]; 1500 if (!list_empty(&td->td_list)) 1501 list_del_init(&td->td_list); 1502 if (!list_empty(&td->cancelled_td_list)) 1503 list_del_init(&td->cancelled_td_list); 1504 } 1505 1506 usb_hcd_unlink_urb_from_ep(hcd, urb); 1507 spin_unlock_irqrestore(&xhci->lock, flags); 1508 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); 1509 xhci_urb_free_priv(xhci, urb_priv); 1510 return ret; 1511 } 1512 if ((xhci->xhc_state & XHCI_STATE_DYING) || 1513 (xhci->xhc_state & XHCI_STATE_HALTED)) { 1514 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1515 "Ep 0x%x: URB %p to be canceled on " 1516 "non-responsive xHCI host.", 1517 urb->ep->desc.bEndpointAddress, urb); 1518 /* Let the stop endpoint command watchdog timer (which set this 1519 * state) finish cleaning up the endpoint TD lists. We must 1520 * have caught it in the middle of dropping a lock and giving 1521 * back an URB. 1522 */ 1523 goto done; 1524 } 1525 1526 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1527 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; 1528 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 1529 if (!ep_ring) { 1530 ret = -EINVAL; 1531 goto done; 1532 } 1533 1534 urb_priv = urb->hcpriv; 1535 i = urb_priv->td_cnt; 1536 if (i < urb_priv->length) 1537 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1538 "Cancel URB %p, dev %s, ep 0x%x, " 1539 "starting at offset 0x%llx", 1540 urb, urb->dev->devpath, 1541 urb->ep->desc.bEndpointAddress, 1542 (unsigned long long) xhci_trb_virt_to_dma( 1543 urb_priv->td[i]->start_seg, 1544 urb_priv->td[i]->first_trb)); 1545 1546 for (; i < urb_priv->length; i++) { 1547 td = urb_priv->td[i]; 1548 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); 1549 } 1550 1551 /* Queue a stop endpoint command, but only if this is 1552 * the first cancellation to be handled. 1553 */ 1554 if (!(ep->ep_state & EP_HALT_PENDING)) { 1555 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); 1556 ep->ep_state |= EP_HALT_PENDING; 1557 ep->stop_cmds_pending++; 1558 ep->stop_cmd_timer.expires = jiffies + 1559 XHCI_STOP_EP_CMD_TIMEOUT * HZ; 1560 add_timer(&ep->stop_cmd_timer); 1561 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, 1562 ep_index, 0); 1563 xhci_ring_cmd_db(xhci); 1564 } 1565 done: 1566 spin_unlock_irqrestore(&xhci->lock, flags); 1567 return ret; 1568 } 1569 1570 /* Drop an endpoint from a new bandwidth configuration for this device. 1571 * Only one call to this function is allowed per endpoint before 1572 * check_bandwidth() or reset_bandwidth() must be called. 1573 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1574 * add the endpoint to the schedule with possibly new parameters denoted by a 1575 * different endpoint descriptor in usb_host_endpoint. 1576 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1577 * not allowed. 1578 * 1579 * The USB core will not allow URBs to be queued to an endpoint that is being 1580 * disabled, so there's no need for mutual exclusion to protect 1581 * the xhci->devs[slot_id] structure. 1582 */ 1583 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1584 struct usb_host_endpoint *ep) 1585 { 1586 struct xhci_hcd *xhci; 1587 struct xhci_container_ctx *in_ctx, *out_ctx; 1588 struct xhci_input_control_ctx *ctrl_ctx; 1589 struct xhci_slot_ctx *slot_ctx; 1590 unsigned int last_ctx; 1591 unsigned int ep_index; 1592 struct xhci_ep_ctx *ep_ctx; 1593 u32 drop_flag; 1594 u32 new_add_flags, new_drop_flags, new_slot_info; 1595 int ret; 1596 1597 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1598 if (ret <= 0) 1599 return ret; 1600 xhci = hcd_to_xhci(hcd); 1601 if (xhci->xhc_state & XHCI_STATE_DYING) 1602 return -ENODEV; 1603 1604 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1605 drop_flag = xhci_get_endpoint_flag(&ep->desc); 1606 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { 1607 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", 1608 __func__, drop_flag); 1609 return 0; 1610 } 1611 1612 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1613 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1614 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1615 if (!ctrl_ctx) { 1616 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1617 __func__); 1618 return 0; 1619 } 1620 1621 ep_index = xhci_get_endpoint_index(&ep->desc); 1622 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1623 /* If the HC already knows the endpoint is disabled, 1624 * or the HCD has noted it is disabled, ignore this request 1625 */ 1626 if (((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) == 1627 cpu_to_le32(EP_STATE_DISABLED)) || 1628 le32_to_cpu(ctrl_ctx->drop_flags) & 1629 xhci_get_endpoint_flag(&ep->desc)) { 1630 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 1631 __func__, ep); 1632 return 0; 1633 } 1634 1635 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); 1636 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1637 1638 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); 1639 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1640 1641 last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)); 1642 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1643 /* Update the last valid endpoint context, if we deleted the last one */ 1644 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) > 1645 LAST_CTX(last_ctx)) { 1646 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1647 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); 1648 } 1649 new_slot_info = le32_to_cpu(slot_ctx->dev_info); 1650 1651 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 1652 1653 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", 1654 (unsigned int) ep->desc.bEndpointAddress, 1655 udev->slot_id, 1656 (unsigned int) new_drop_flags, 1657 (unsigned int) new_add_flags, 1658 (unsigned int) new_slot_info); 1659 return 0; 1660 } 1661 1662 /* Add an endpoint to a new possible bandwidth configuration for this device. 1663 * Only one call to this function is allowed per endpoint before 1664 * check_bandwidth() or reset_bandwidth() must be called. 1665 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1666 * add the endpoint to the schedule with possibly new parameters denoted by a 1667 * different endpoint descriptor in usb_host_endpoint. 1668 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1669 * not allowed. 1670 * 1671 * The USB core will not allow URBs to be queued to an endpoint until the 1672 * configuration or alt setting is installed in the device, so there's no need 1673 * for mutual exclusion to protect the xhci->devs[slot_id] structure. 1674 */ 1675 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1676 struct usb_host_endpoint *ep) 1677 { 1678 struct xhci_hcd *xhci; 1679 struct xhci_container_ctx *in_ctx, *out_ctx; 1680 unsigned int ep_index; 1681 struct xhci_slot_ctx *slot_ctx; 1682 struct xhci_input_control_ctx *ctrl_ctx; 1683 u32 added_ctxs; 1684 unsigned int last_ctx; 1685 u32 new_add_flags, new_drop_flags, new_slot_info; 1686 struct xhci_virt_device *virt_dev; 1687 int ret = 0; 1688 1689 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1690 if (ret <= 0) { 1691 /* So we won't queue a reset ep command for a root hub */ 1692 ep->hcpriv = NULL; 1693 return ret; 1694 } 1695 xhci = hcd_to_xhci(hcd); 1696 if (xhci->xhc_state & XHCI_STATE_DYING) 1697 return -ENODEV; 1698 1699 added_ctxs = xhci_get_endpoint_flag(&ep->desc); 1700 last_ctx = xhci_last_valid_endpoint(added_ctxs); 1701 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { 1702 /* FIXME when we have to issue an evaluate endpoint command to 1703 * deal with ep0 max packet size changing once we get the 1704 * descriptors 1705 */ 1706 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", 1707 __func__, added_ctxs); 1708 return 0; 1709 } 1710 1711 virt_dev = xhci->devs[udev->slot_id]; 1712 in_ctx = virt_dev->in_ctx; 1713 out_ctx = virt_dev->out_ctx; 1714 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 1715 if (!ctrl_ctx) { 1716 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1717 __func__); 1718 return 0; 1719 } 1720 1721 ep_index = xhci_get_endpoint_index(&ep->desc); 1722 /* If this endpoint is already in use, and the upper layers are trying 1723 * to add it again without dropping it, reject the addition. 1724 */ 1725 if (virt_dev->eps[ep_index].ring && 1726 !(le32_to_cpu(ctrl_ctx->drop_flags) & 1727 xhci_get_endpoint_flag(&ep->desc))) { 1728 xhci_warn(xhci, "Trying to add endpoint 0x%x " 1729 "without dropping it.\n", 1730 (unsigned int) ep->desc.bEndpointAddress); 1731 return -EINVAL; 1732 } 1733 1734 /* If the HCD has already noted the endpoint is enabled, 1735 * ignore this request. 1736 */ 1737 if (le32_to_cpu(ctrl_ctx->add_flags) & 1738 xhci_get_endpoint_flag(&ep->desc)) { 1739 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 1740 __func__, ep); 1741 return 0; 1742 } 1743 1744 /* 1745 * Configuration and alternate setting changes must be done in 1746 * process context, not interrupt context (or so documenation 1747 * for usb_set_interface() and usb_set_configuration() claim). 1748 */ 1749 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { 1750 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 1751 __func__, ep->desc.bEndpointAddress); 1752 return -ENOMEM; 1753 } 1754 1755 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); 1756 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1757 1758 /* If xhci_endpoint_disable() was called for this endpoint, but the 1759 * xHC hasn't been notified yet through the check_bandwidth() call, 1760 * this re-adds a new state for the endpoint from the new endpoint 1761 * descriptors. We must drop and re-add this endpoint, so we leave the 1762 * drop flags alone. 1763 */ 1764 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1765 1766 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 1767 /* Update the last valid endpoint context, if we just added one past */ 1768 if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) < 1769 LAST_CTX(last_ctx)) { 1770 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1771 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx)); 1772 } 1773 new_slot_info = le32_to_cpu(slot_ctx->dev_info); 1774 1775 /* Store the usb_device pointer for later use */ 1776 ep->hcpriv = udev; 1777 1778 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", 1779 (unsigned int) ep->desc.bEndpointAddress, 1780 udev->slot_id, 1781 (unsigned int) new_drop_flags, 1782 (unsigned int) new_add_flags, 1783 (unsigned int) new_slot_info); 1784 return 0; 1785 } 1786 1787 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) 1788 { 1789 struct xhci_input_control_ctx *ctrl_ctx; 1790 struct xhci_ep_ctx *ep_ctx; 1791 struct xhci_slot_ctx *slot_ctx; 1792 int i; 1793 1794 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 1795 if (!ctrl_ctx) { 1796 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1797 __func__); 1798 return; 1799 } 1800 1801 /* When a device's add flag and drop flag are zero, any subsequent 1802 * configure endpoint command will leave that endpoint's state 1803 * untouched. Make sure we don't leave any old state in the input 1804 * endpoint contexts. 1805 */ 1806 ctrl_ctx->drop_flags = 0; 1807 ctrl_ctx->add_flags = 0; 1808 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 1809 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 1810 /* Endpoint 0 is always valid */ 1811 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 1812 for (i = 1; i < 31; ++i) { 1813 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); 1814 ep_ctx->ep_info = 0; 1815 ep_ctx->ep_info2 = 0; 1816 ep_ctx->deq = 0; 1817 ep_ctx->tx_info = 0; 1818 } 1819 } 1820 1821 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, 1822 struct usb_device *udev, u32 *cmd_status) 1823 { 1824 int ret; 1825 1826 switch (*cmd_status) { 1827 case COMP_CMD_ABORT: 1828 case COMP_CMD_STOP: 1829 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); 1830 ret = -ETIME; 1831 break; 1832 case COMP_ENOMEM: 1833 dev_warn(&udev->dev, "Not enough host controller resources " 1834 "for new device state.\n"); 1835 ret = -ENOMEM; 1836 /* FIXME: can we allocate more resources for the HC? */ 1837 break; 1838 case COMP_BW_ERR: 1839 case COMP_2ND_BW_ERR: 1840 dev_warn(&udev->dev, "Not enough bandwidth " 1841 "for new device state.\n"); 1842 ret = -ENOSPC; 1843 /* FIXME: can we go back to the old state? */ 1844 break; 1845 case COMP_TRB_ERR: 1846 /* the HCD set up something wrong */ 1847 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " 1848 "add flag = 1, " 1849 "and endpoint is not disabled.\n"); 1850 ret = -EINVAL; 1851 break; 1852 case COMP_DEV_ERR: 1853 dev_warn(&udev->dev, "ERROR: Incompatible device for endpoint " 1854 "configure command.\n"); 1855 ret = -ENODEV; 1856 break; 1857 case COMP_SUCCESS: 1858 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1859 "Successful Endpoint Configure command"); 1860 ret = 0; 1861 break; 1862 default: 1863 xhci_err(xhci, "ERROR: unexpected command completion " 1864 "code 0x%x.\n", *cmd_status); 1865 ret = -EINVAL; 1866 break; 1867 } 1868 return ret; 1869 } 1870 1871 static int xhci_evaluate_context_result(struct xhci_hcd *xhci, 1872 struct usb_device *udev, u32 *cmd_status) 1873 { 1874 int ret; 1875 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; 1876 1877 switch (*cmd_status) { 1878 case COMP_CMD_ABORT: 1879 case COMP_CMD_STOP: 1880 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); 1881 ret = -ETIME; 1882 break; 1883 case COMP_EINVAL: 1884 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate " 1885 "context command.\n"); 1886 ret = -EINVAL; 1887 break; 1888 case COMP_EBADSLT: 1889 dev_warn(&udev->dev, "WARN: slot not enabled for" 1890 "evaluate context command.\n"); 1891 ret = -EINVAL; 1892 break; 1893 case COMP_CTX_STATE: 1894 dev_warn(&udev->dev, "WARN: invalid context state for " 1895 "evaluate context command.\n"); 1896 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1); 1897 ret = -EINVAL; 1898 break; 1899 case COMP_DEV_ERR: 1900 dev_warn(&udev->dev, "ERROR: Incompatible device for evaluate " 1901 "context command.\n"); 1902 ret = -ENODEV; 1903 break; 1904 case COMP_MEL_ERR: 1905 /* Max Exit Latency too large error */ 1906 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); 1907 ret = -EINVAL; 1908 break; 1909 case COMP_SUCCESS: 1910 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1911 "Successful evaluate context command"); 1912 ret = 0; 1913 break; 1914 default: 1915 xhci_err(xhci, "ERROR: unexpected command completion " 1916 "code 0x%x.\n", *cmd_status); 1917 ret = -EINVAL; 1918 break; 1919 } 1920 return ret; 1921 } 1922 1923 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, 1924 struct xhci_input_control_ctx *ctrl_ctx) 1925 { 1926 u32 valid_add_flags; 1927 u32 valid_drop_flags; 1928 1929 /* Ignore the slot flag (bit 0), and the default control endpoint flag 1930 * (bit 1). The default control endpoint is added during the Address 1931 * Device command and is never removed until the slot is disabled. 1932 */ 1933 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; 1934 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; 1935 1936 /* Use hweight32 to count the number of ones in the add flags, or 1937 * number of endpoints added. Don't count endpoints that are changed 1938 * (both added and dropped). 1939 */ 1940 return hweight32(valid_add_flags) - 1941 hweight32(valid_add_flags & valid_drop_flags); 1942 } 1943 1944 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, 1945 struct xhci_input_control_ctx *ctrl_ctx) 1946 { 1947 u32 valid_add_flags; 1948 u32 valid_drop_flags; 1949 1950 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; 1951 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; 1952 1953 return hweight32(valid_drop_flags) - 1954 hweight32(valid_add_flags & valid_drop_flags); 1955 } 1956 1957 /* 1958 * We need to reserve the new number of endpoints before the configure endpoint 1959 * command completes. We can't subtract the dropped endpoints from the number 1960 * of active endpoints until the command completes because we can oversubscribe 1961 * the host in this case: 1962 * 1963 * - the first configure endpoint command drops more endpoints than it adds 1964 * - a second configure endpoint command that adds more endpoints is queued 1965 * - the first configure endpoint command fails, so the config is unchanged 1966 * - the second command may succeed, even though there isn't enough resources 1967 * 1968 * Must be called with xhci->lock held. 1969 */ 1970 static int xhci_reserve_host_resources(struct xhci_hcd *xhci, 1971 struct xhci_input_control_ctx *ctrl_ctx) 1972 { 1973 u32 added_eps; 1974 1975 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); 1976 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { 1977 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1978 "Not enough ep ctxs: " 1979 "%u active, need to add %u, limit is %u.", 1980 xhci->num_active_eps, added_eps, 1981 xhci->limit_active_eps); 1982 return -ENOMEM; 1983 } 1984 xhci->num_active_eps += added_eps; 1985 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1986 "Adding %u ep ctxs, %u now active.", added_eps, 1987 xhci->num_active_eps); 1988 return 0; 1989 } 1990 1991 /* 1992 * The configure endpoint was failed by the xHC for some other reason, so we 1993 * need to revert the resources that failed configuration would have used. 1994 * 1995 * Must be called with xhci->lock held. 1996 */ 1997 static void xhci_free_host_resources(struct xhci_hcd *xhci, 1998 struct xhci_input_control_ctx *ctrl_ctx) 1999 { 2000 u32 num_failed_eps; 2001 2002 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); 2003 xhci->num_active_eps -= num_failed_eps; 2004 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2005 "Removing %u failed ep ctxs, %u now active.", 2006 num_failed_eps, 2007 xhci->num_active_eps); 2008 } 2009 2010 /* 2011 * Now that the command has completed, clean up the active endpoint count by 2012 * subtracting out the endpoints that were dropped (but not changed). 2013 * 2014 * Must be called with xhci->lock held. 2015 */ 2016 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, 2017 struct xhci_input_control_ctx *ctrl_ctx) 2018 { 2019 u32 num_dropped_eps; 2020 2021 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); 2022 xhci->num_active_eps -= num_dropped_eps; 2023 if (num_dropped_eps) 2024 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2025 "Removing %u dropped ep ctxs, %u now active.", 2026 num_dropped_eps, 2027 xhci->num_active_eps); 2028 } 2029 2030 static unsigned int xhci_get_block_size(struct usb_device *udev) 2031 { 2032 switch (udev->speed) { 2033 case USB_SPEED_LOW: 2034 case USB_SPEED_FULL: 2035 return FS_BLOCK; 2036 case USB_SPEED_HIGH: 2037 return HS_BLOCK; 2038 case USB_SPEED_SUPER: 2039 return SS_BLOCK; 2040 case USB_SPEED_UNKNOWN: 2041 case USB_SPEED_WIRELESS: 2042 default: 2043 /* Should never happen */ 2044 return 1; 2045 } 2046 } 2047 2048 static unsigned int 2049 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) 2050 { 2051 if (interval_bw->overhead[LS_OVERHEAD_TYPE]) 2052 return LS_OVERHEAD; 2053 if (interval_bw->overhead[FS_OVERHEAD_TYPE]) 2054 return FS_OVERHEAD; 2055 return HS_OVERHEAD; 2056 } 2057 2058 /* If we are changing a LS/FS device under a HS hub, 2059 * make sure (if we are activating a new TT) that the HS bus has enough 2060 * bandwidth for this new TT. 2061 */ 2062 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, 2063 struct xhci_virt_device *virt_dev, 2064 int old_active_eps) 2065 { 2066 struct xhci_interval_bw_table *bw_table; 2067 struct xhci_tt_bw_info *tt_info; 2068 2069 /* Find the bandwidth table for the root port this TT is attached to. */ 2070 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table; 2071 tt_info = virt_dev->tt_info; 2072 /* If this TT already had active endpoints, the bandwidth for this TT 2073 * has already been added. Removing all periodic endpoints (and thus 2074 * making the TT enactive) will only decrease the bandwidth used. 2075 */ 2076 if (old_active_eps) 2077 return 0; 2078 if (old_active_eps == 0 && tt_info->active_eps != 0) { 2079 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) 2080 return -ENOMEM; 2081 return 0; 2082 } 2083 /* Not sure why we would have no new active endpoints... 2084 * 2085 * Maybe because of an Evaluate Context change for a hub update or a 2086 * control endpoint 0 max packet size change? 2087 * FIXME: skip the bandwidth calculation in that case. 2088 */ 2089 return 0; 2090 } 2091 2092 static int xhci_check_ss_bw(struct xhci_hcd *xhci, 2093 struct xhci_virt_device *virt_dev) 2094 { 2095 unsigned int bw_reserved; 2096 2097 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); 2098 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) 2099 return -ENOMEM; 2100 2101 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); 2102 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) 2103 return -ENOMEM; 2104 2105 return 0; 2106 } 2107 2108 /* 2109 * This algorithm is a very conservative estimate of the worst-case scheduling 2110 * scenario for any one interval. The hardware dynamically schedules the 2111 * packets, so we can't tell which microframe could be the limiting factor in 2112 * the bandwidth scheduling. This only takes into account periodic endpoints. 2113 * 2114 * Obviously, we can't solve an NP complete problem to find the minimum worst 2115 * case scenario. Instead, we come up with an estimate that is no less than 2116 * the worst case bandwidth used for any one microframe, but may be an 2117 * over-estimate. 2118 * 2119 * We walk the requirements for each endpoint by interval, starting with the 2120 * smallest interval, and place packets in the schedule where there is only one 2121 * possible way to schedule packets for that interval. In order to simplify 2122 * this algorithm, we record the largest max packet size for each interval, and 2123 * assume all packets will be that size. 2124 * 2125 * For interval 0, we obviously must schedule all packets for each interval. 2126 * The bandwidth for interval 0 is just the amount of data to be transmitted 2127 * (the sum of all max ESIT payload sizes, plus any overhead per packet times 2128 * the number of packets). 2129 * 2130 * For interval 1, we have two possible microframes to schedule those packets 2131 * in. For this algorithm, if we can schedule the same number of packets for 2132 * each possible scheduling opportunity (each microframe), we will do so. The 2133 * remaining number of packets will be saved to be transmitted in the gaps in 2134 * the next interval's scheduling sequence. 2135 * 2136 * As we move those remaining packets to be scheduled with interval 2 packets, 2137 * we have to double the number of remaining packets to transmit. This is 2138 * because the intervals are actually powers of 2, and we would be transmitting 2139 * the previous interval's packets twice in this interval. We also have to be 2140 * sure that when we look at the largest max packet size for this interval, we 2141 * also look at the largest max packet size for the remaining packets and take 2142 * the greater of the two. 2143 * 2144 * The algorithm continues to evenly distribute packets in each scheduling 2145 * opportunity, and push the remaining packets out, until we get to the last 2146 * interval. Then those packets and their associated overhead are just added 2147 * to the bandwidth used. 2148 */ 2149 static int xhci_check_bw_table(struct xhci_hcd *xhci, 2150 struct xhci_virt_device *virt_dev, 2151 int old_active_eps) 2152 { 2153 unsigned int bw_reserved; 2154 unsigned int max_bandwidth; 2155 unsigned int bw_used; 2156 unsigned int block_size; 2157 struct xhci_interval_bw_table *bw_table; 2158 unsigned int packet_size = 0; 2159 unsigned int overhead = 0; 2160 unsigned int packets_transmitted = 0; 2161 unsigned int packets_remaining = 0; 2162 unsigned int i; 2163 2164 if (virt_dev->udev->speed == USB_SPEED_SUPER) 2165 return xhci_check_ss_bw(xhci, virt_dev); 2166 2167 if (virt_dev->udev->speed == USB_SPEED_HIGH) { 2168 max_bandwidth = HS_BW_LIMIT; 2169 /* Convert percent of bus BW reserved to blocks reserved */ 2170 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); 2171 } else { 2172 max_bandwidth = FS_BW_LIMIT; 2173 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); 2174 } 2175 2176 bw_table = virt_dev->bw_table; 2177 /* We need to translate the max packet size and max ESIT payloads into 2178 * the units the hardware uses. 2179 */ 2180 block_size = xhci_get_block_size(virt_dev->udev); 2181 2182 /* If we are manipulating a LS/FS device under a HS hub, double check 2183 * that the HS bus has enough bandwidth if we are activing a new TT. 2184 */ 2185 if (virt_dev->tt_info) { 2186 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2187 "Recalculating BW for rootport %u", 2188 virt_dev->real_port); 2189 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { 2190 xhci_warn(xhci, "Not enough bandwidth on HS bus for " 2191 "newly activated TT.\n"); 2192 return -ENOMEM; 2193 } 2194 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2195 "Recalculating BW for TT slot %u port %u", 2196 virt_dev->tt_info->slot_id, 2197 virt_dev->tt_info->ttport); 2198 } else { 2199 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2200 "Recalculating BW for rootport %u", 2201 virt_dev->real_port); 2202 } 2203 2204 /* Add in how much bandwidth will be used for interval zero, or the 2205 * rounded max ESIT payload + number of packets * largest overhead. 2206 */ 2207 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + 2208 bw_table->interval_bw[0].num_packets * 2209 xhci_get_largest_overhead(&bw_table->interval_bw[0]); 2210 2211 for (i = 1; i < XHCI_MAX_INTERVAL; i++) { 2212 unsigned int bw_added; 2213 unsigned int largest_mps; 2214 unsigned int interval_overhead; 2215 2216 /* 2217 * How many packets could we transmit in this interval? 2218 * If packets didn't fit in the previous interval, we will need 2219 * to transmit that many packets twice within this interval. 2220 */ 2221 packets_remaining = 2 * packets_remaining + 2222 bw_table->interval_bw[i].num_packets; 2223 2224 /* Find the largest max packet size of this or the previous 2225 * interval. 2226 */ 2227 if (list_empty(&bw_table->interval_bw[i].endpoints)) 2228 largest_mps = 0; 2229 else { 2230 struct xhci_virt_ep *virt_ep; 2231 struct list_head *ep_entry; 2232 2233 ep_entry = bw_table->interval_bw[i].endpoints.next; 2234 virt_ep = list_entry(ep_entry, 2235 struct xhci_virt_ep, bw_endpoint_list); 2236 /* Convert to blocks, rounding up */ 2237 largest_mps = DIV_ROUND_UP( 2238 virt_ep->bw_info.max_packet_size, 2239 block_size); 2240 } 2241 if (largest_mps > packet_size) 2242 packet_size = largest_mps; 2243 2244 /* Use the larger overhead of this or the previous interval. */ 2245 interval_overhead = xhci_get_largest_overhead( 2246 &bw_table->interval_bw[i]); 2247 if (interval_overhead > overhead) 2248 overhead = interval_overhead; 2249 2250 /* How many packets can we evenly distribute across 2251 * (1 << (i + 1)) possible scheduling opportunities? 2252 */ 2253 packets_transmitted = packets_remaining >> (i + 1); 2254 2255 /* Add in the bandwidth used for those scheduled packets */ 2256 bw_added = packets_transmitted * (overhead + packet_size); 2257 2258 /* How many packets do we have remaining to transmit? */ 2259 packets_remaining = packets_remaining % (1 << (i + 1)); 2260 2261 /* What largest max packet size should those packets have? */ 2262 /* If we've transmitted all packets, don't carry over the 2263 * largest packet size. 2264 */ 2265 if (packets_remaining == 0) { 2266 packet_size = 0; 2267 overhead = 0; 2268 } else if (packets_transmitted > 0) { 2269 /* Otherwise if we do have remaining packets, and we've 2270 * scheduled some packets in this interval, take the 2271 * largest max packet size from endpoints with this 2272 * interval. 2273 */ 2274 packet_size = largest_mps; 2275 overhead = interval_overhead; 2276 } 2277 /* Otherwise carry over packet_size and overhead from the last 2278 * time we had a remainder. 2279 */ 2280 bw_used += bw_added; 2281 if (bw_used > max_bandwidth) { 2282 xhci_warn(xhci, "Not enough bandwidth. " 2283 "Proposed: %u, Max: %u\n", 2284 bw_used, max_bandwidth); 2285 return -ENOMEM; 2286 } 2287 } 2288 /* 2289 * Ok, we know we have some packets left over after even-handedly 2290 * scheduling interval 15. We don't know which microframes they will 2291 * fit into, so we over-schedule and say they will be scheduled every 2292 * microframe. 2293 */ 2294 if (packets_remaining > 0) 2295 bw_used += overhead + packet_size; 2296 2297 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { 2298 unsigned int port_index = virt_dev->real_port - 1; 2299 2300 /* OK, we're manipulating a HS device attached to a 2301 * root port bandwidth domain. Include the number of active TTs 2302 * in the bandwidth used. 2303 */ 2304 bw_used += TT_HS_OVERHEAD * 2305 xhci->rh_bw[port_index].num_active_tts; 2306 } 2307 2308 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2309 "Final bandwidth: %u, Limit: %u, Reserved: %u, " 2310 "Available: %u " "percent", 2311 bw_used, max_bandwidth, bw_reserved, 2312 (max_bandwidth - bw_used - bw_reserved) * 100 / 2313 max_bandwidth); 2314 2315 bw_used += bw_reserved; 2316 if (bw_used > max_bandwidth) { 2317 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", 2318 bw_used, max_bandwidth); 2319 return -ENOMEM; 2320 } 2321 2322 bw_table->bw_used = bw_used; 2323 return 0; 2324 } 2325 2326 static bool xhci_is_async_ep(unsigned int ep_type) 2327 { 2328 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && 2329 ep_type != ISOC_IN_EP && 2330 ep_type != INT_IN_EP); 2331 } 2332 2333 static bool xhci_is_sync_in_ep(unsigned int ep_type) 2334 { 2335 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP); 2336 } 2337 2338 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) 2339 { 2340 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); 2341 2342 if (ep_bw->ep_interval == 0) 2343 return SS_OVERHEAD_BURST + 2344 (ep_bw->mult * ep_bw->num_packets * 2345 (SS_OVERHEAD + mps)); 2346 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * 2347 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), 2348 1 << ep_bw->ep_interval); 2349 2350 } 2351 2352 void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, 2353 struct xhci_bw_info *ep_bw, 2354 struct xhci_interval_bw_table *bw_table, 2355 struct usb_device *udev, 2356 struct xhci_virt_ep *virt_ep, 2357 struct xhci_tt_bw_info *tt_info) 2358 { 2359 struct xhci_interval_bw *interval_bw; 2360 int normalized_interval; 2361 2362 if (xhci_is_async_ep(ep_bw->type)) 2363 return; 2364 2365 if (udev->speed == USB_SPEED_SUPER) { 2366 if (xhci_is_sync_in_ep(ep_bw->type)) 2367 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= 2368 xhci_get_ss_bw_consumed(ep_bw); 2369 else 2370 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= 2371 xhci_get_ss_bw_consumed(ep_bw); 2372 return; 2373 } 2374 2375 /* SuperSpeed endpoints never get added to intervals in the table, so 2376 * this check is only valid for HS/FS/LS devices. 2377 */ 2378 if (list_empty(&virt_ep->bw_endpoint_list)) 2379 return; 2380 /* For LS/FS devices, we need to translate the interval expressed in 2381 * microframes to frames. 2382 */ 2383 if (udev->speed == USB_SPEED_HIGH) 2384 normalized_interval = ep_bw->ep_interval; 2385 else 2386 normalized_interval = ep_bw->ep_interval - 3; 2387 2388 if (normalized_interval == 0) 2389 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; 2390 interval_bw = &bw_table->interval_bw[normalized_interval]; 2391 interval_bw->num_packets -= ep_bw->num_packets; 2392 switch (udev->speed) { 2393 case USB_SPEED_LOW: 2394 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; 2395 break; 2396 case USB_SPEED_FULL: 2397 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; 2398 break; 2399 case USB_SPEED_HIGH: 2400 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; 2401 break; 2402 case USB_SPEED_SUPER: 2403 case USB_SPEED_UNKNOWN: 2404 case USB_SPEED_WIRELESS: 2405 /* Should never happen because only LS/FS/HS endpoints will get 2406 * added to the endpoint list. 2407 */ 2408 return; 2409 } 2410 if (tt_info) 2411 tt_info->active_eps -= 1; 2412 list_del_init(&virt_ep->bw_endpoint_list); 2413 } 2414 2415 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, 2416 struct xhci_bw_info *ep_bw, 2417 struct xhci_interval_bw_table *bw_table, 2418 struct usb_device *udev, 2419 struct xhci_virt_ep *virt_ep, 2420 struct xhci_tt_bw_info *tt_info) 2421 { 2422 struct xhci_interval_bw *interval_bw; 2423 struct xhci_virt_ep *smaller_ep; 2424 int normalized_interval; 2425 2426 if (xhci_is_async_ep(ep_bw->type)) 2427 return; 2428 2429 if (udev->speed == USB_SPEED_SUPER) { 2430 if (xhci_is_sync_in_ep(ep_bw->type)) 2431 xhci->devs[udev->slot_id]->bw_table->ss_bw_in += 2432 xhci_get_ss_bw_consumed(ep_bw); 2433 else 2434 xhci->devs[udev->slot_id]->bw_table->ss_bw_out += 2435 xhci_get_ss_bw_consumed(ep_bw); 2436 return; 2437 } 2438 2439 /* For LS/FS devices, we need to translate the interval expressed in 2440 * microframes to frames. 2441 */ 2442 if (udev->speed == USB_SPEED_HIGH) 2443 normalized_interval = ep_bw->ep_interval; 2444 else 2445 normalized_interval = ep_bw->ep_interval - 3; 2446 2447 if (normalized_interval == 0) 2448 bw_table->interval0_esit_payload += ep_bw->max_esit_payload; 2449 interval_bw = &bw_table->interval_bw[normalized_interval]; 2450 interval_bw->num_packets += ep_bw->num_packets; 2451 switch (udev->speed) { 2452 case USB_SPEED_LOW: 2453 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; 2454 break; 2455 case USB_SPEED_FULL: 2456 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; 2457 break; 2458 case USB_SPEED_HIGH: 2459 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; 2460 break; 2461 case USB_SPEED_SUPER: 2462 case USB_SPEED_UNKNOWN: 2463 case USB_SPEED_WIRELESS: 2464 /* Should never happen because only LS/FS/HS endpoints will get 2465 * added to the endpoint list. 2466 */ 2467 return; 2468 } 2469 2470 if (tt_info) 2471 tt_info->active_eps += 1; 2472 /* Insert the endpoint into the list, largest max packet size first. */ 2473 list_for_each_entry(smaller_ep, &interval_bw->endpoints, 2474 bw_endpoint_list) { 2475 if (ep_bw->max_packet_size >= 2476 smaller_ep->bw_info.max_packet_size) { 2477 /* Add the new ep before the smaller endpoint */ 2478 list_add_tail(&virt_ep->bw_endpoint_list, 2479 &smaller_ep->bw_endpoint_list); 2480 return; 2481 } 2482 } 2483 /* Add the new endpoint at the end of the list. */ 2484 list_add_tail(&virt_ep->bw_endpoint_list, 2485 &interval_bw->endpoints); 2486 } 2487 2488 void xhci_update_tt_active_eps(struct xhci_hcd *xhci, 2489 struct xhci_virt_device *virt_dev, 2490 int old_active_eps) 2491 { 2492 struct xhci_root_port_bw_info *rh_bw_info; 2493 if (!virt_dev->tt_info) 2494 return; 2495 2496 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1]; 2497 if (old_active_eps == 0 && 2498 virt_dev->tt_info->active_eps != 0) { 2499 rh_bw_info->num_active_tts += 1; 2500 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; 2501 } else if (old_active_eps != 0 && 2502 virt_dev->tt_info->active_eps == 0) { 2503 rh_bw_info->num_active_tts -= 1; 2504 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; 2505 } 2506 } 2507 2508 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, 2509 struct xhci_virt_device *virt_dev, 2510 struct xhci_container_ctx *in_ctx) 2511 { 2512 struct xhci_bw_info ep_bw_info[31]; 2513 int i; 2514 struct xhci_input_control_ctx *ctrl_ctx; 2515 int old_active_eps = 0; 2516 2517 if (virt_dev->tt_info) 2518 old_active_eps = virt_dev->tt_info->active_eps; 2519 2520 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 2521 if (!ctrl_ctx) { 2522 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2523 __func__); 2524 return -ENOMEM; 2525 } 2526 2527 for (i = 0; i < 31; i++) { 2528 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2529 continue; 2530 2531 /* Make a copy of the BW info in case we need to revert this */ 2532 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, 2533 sizeof(ep_bw_info[i])); 2534 /* Drop the endpoint from the interval table if the endpoint is 2535 * being dropped or changed. 2536 */ 2537 if (EP_IS_DROPPED(ctrl_ctx, i)) 2538 xhci_drop_ep_from_interval_table(xhci, 2539 &virt_dev->eps[i].bw_info, 2540 virt_dev->bw_table, 2541 virt_dev->udev, 2542 &virt_dev->eps[i], 2543 virt_dev->tt_info); 2544 } 2545 /* Overwrite the information stored in the endpoints' bw_info */ 2546 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); 2547 for (i = 0; i < 31; i++) { 2548 /* Add any changed or added endpoints to the interval table */ 2549 if (EP_IS_ADDED(ctrl_ctx, i)) 2550 xhci_add_ep_to_interval_table(xhci, 2551 &virt_dev->eps[i].bw_info, 2552 virt_dev->bw_table, 2553 virt_dev->udev, 2554 &virt_dev->eps[i], 2555 virt_dev->tt_info); 2556 } 2557 2558 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { 2559 /* Ok, this fits in the bandwidth we have. 2560 * Update the number of active TTs. 2561 */ 2562 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 2563 return 0; 2564 } 2565 2566 /* We don't have enough bandwidth for this, revert the stored info. */ 2567 for (i = 0; i < 31; i++) { 2568 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2569 continue; 2570 2571 /* Drop the new copies of any added or changed endpoints from 2572 * the interval table. 2573 */ 2574 if (EP_IS_ADDED(ctrl_ctx, i)) { 2575 xhci_drop_ep_from_interval_table(xhci, 2576 &virt_dev->eps[i].bw_info, 2577 virt_dev->bw_table, 2578 virt_dev->udev, 2579 &virt_dev->eps[i], 2580 virt_dev->tt_info); 2581 } 2582 /* Revert the endpoint back to its old information */ 2583 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], 2584 sizeof(ep_bw_info[i])); 2585 /* Add any changed or dropped endpoints back into the table */ 2586 if (EP_IS_DROPPED(ctrl_ctx, i)) 2587 xhci_add_ep_to_interval_table(xhci, 2588 &virt_dev->eps[i].bw_info, 2589 virt_dev->bw_table, 2590 virt_dev->udev, 2591 &virt_dev->eps[i], 2592 virt_dev->tt_info); 2593 } 2594 return -ENOMEM; 2595 } 2596 2597 2598 /* Issue a configure endpoint command or evaluate context command 2599 * and wait for it to finish. 2600 */ 2601 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 2602 struct usb_device *udev, 2603 struct xhci_command *command, 2604 bool ctx_change, bool must_succeed) 2605 { 2606 int ret; 2607 unsigned long flags; 2608 struct xhci_input_control_ctx *ctrl_ctx; 2609 struct xhci_virt_device *virt_dev; 2610 2611 if (!command) 2612 return -EINVAL; 2613 2614 spin_lock_irqsave(&xhci->lock, flags); 2615 virt_dev = xhci->devs[udev->slot_id]; 2616 2617 ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx); 2618 if (!ctrl_ctx) { 2619 spin_unlock_irqrestore(&xhci->lock, flags); 2620 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2621 __func__); 2622 return -ENOMEM; 2623 } 2624 2625 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && 2626 xhci_reserve_host_resources(xhci, ctrl_ctx)) { 2627 spin_unlock_irqrestore(&xhci->lock, flags); 2628 xhci_warn(xhci, "Not enough host resources, " 2629 "active endpoint contexts = %u\n", 2630 xhci->num_active_eps); 2631 return -ENOMEM; 2632 } 2633 if ((xhci->quirks & XHCI_SW_BW_CHECKING) && 2634 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { 2635 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2636 xhci_free_host_resources(xhci, ctrl_ctx); 2637 spin_unlock_irqrestore(&xhci->lock, flags); 2638 xhci_warn(xhci, "Not enough bandwidth\n"); 2639 return -ENOMEM; 2640 } 2641 2642 if (!ctx_change) 2643 ret = xhci_queue_configure_endpoint(xhci, command, 2644 command->in_ctx->dma, 2645 udev->slot_id, must_succeed); 2646 else 2647 ret = xhci_queue_evaluate_context(xhci, command, 2648 command->in_ctx->dma, 2649 udev->slot_id, must_succeed); 2650 if (ret < 0) { 2651 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 2652 xhci_free_host_resources(xhci, ctrl_ctx); 2653 spin_unlock_irqrestore(&xhci->lock, flags); 2654 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 2655 "FIXME allocate a new ring segment"); 2656 return -ENOMEM; 2657 } 2658 xhci_ring_cmd_db(xhci); 2659 spin_unlock_irqrestore(&xhci->lock, flags); 2660 2661 /* Wait for the configure endpoint command to complete */ 2662 wait_for_completion(command->completion); 2663 2664 if (!ctx_change) 2665 ret = xhci_configure_endpoint_result(xhci, udev, 2666 &command->status); 2667 else 2668 ret = xhci_evaluate_context_result(xhci, udev, 2669 &command->status); 2670 2671 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 2672 spin_lock_irqsave(&xhci->lock, flags); 2673 /* If the command failed, remove the reserved resources. 2674 * Otherwise, clean up the estimate to include dropped eps. 2675 */ 2676 if (ret) 2677 xhci_free_host_resources(xhci, ctrl_ctx); 2678 else 2679 xhci_finish_resource_reservation(xhci, ctrl_ctx); 2680 spin_unlock_irqrestore(&xhci->lock, flags); 2681 } 2682 return ret; 2683 } 2684 2685 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, 2686 struct xhci_virt_device *vdev, int i) 2687 { 2688 struct xhci_virt_ep *ep = &vdev->eps[i]; 2689 2690 if (ep->ep_state & EP_HAS_STREAMS) { 2691 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n", 2692 xhci_get_endpoint_address(i)); 2693 xhci_free_stream_info(xhci, ep->stream_info); 2694 ep->stream_info = NULL; 2695 ep->ep_state &= ~EP_HAS_STREAMS; 2696 } 2697 } 2698 2699 /* Called after one or more calls to xhci_add_endpoint() or 2700 * xhci_drop_endpoint(). If this call fails, the USB core is expected 2701 * to call xhci_reset_bandwidth(). 2702 * 2703 * Since we are in the middle of changing either configuration or 2704 * installing a new alt setting, the USB core won't allow URBs to be 2705 * enqueued for any endpoint on the old config or interface. Nothing 2706 * else should be touching the xhci->devs[slot_id] structure, so we 2707 * don't need to take the xhci->lock for manipulating that. 2708 */ 2709 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 2710 { 2711 int i; 2712 int ret = 0; 2713 struct xhci_hcd *xhci; 2714 struct xhci_virt_device *virt_dev; 2715 struct xhci_input_control_ctx *ctrl_ctx; 2716 struct xhci_slot_ctx *slot_ctx; 2717 struct xhci_command *command; 2718 2719 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2720 if (ret <= 0) 2721 return ret; 2722 xhci = hcd_to_xhci(hcd); 2723 if (xhci->xhc_state & XHCI_STATE_DYING) 2724 return -ENODEV; 2725 2726 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 2727 virt_dev = xhci->devs[udev->slot_id]; 2728 2729 command = xhci_alloc_command(xhci, false, true, GFP_KERNEL); 2730 if (!command) 2731 return -ENOMEM; 2732 2733 command->in_ctx = virt_dev->in_ctx; 2734 2735 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ 2736 ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx); 2737 if (!ctrl_ctx) { 2738 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2739 __func__); 2740 ret = -ENOMEM; 2741 goto command_cleanup; 2742 } 2743 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 2744 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); 2745 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); 2746 2747 /* Don't issue the command if there's no endpoints to update. */ 2748 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && 2749 ctrl_ctx->drop_flags == 0) { 2750 ret = 0; 2751 goto command_cleanup; 2752 } 2753 xhci_dbg(xhci, "New Input Control Context:\n"); 2754 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 2755 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2756 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); 2757 2758 ret = xhci_configure_endpoint(xhci, udev, command, 2759 false, false); 2760 if (ret) 2761 /* Callee should call reset_bandwidth() */ 2762 goto command_cleanup; 2763 2764 xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); 2765 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2766 LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info))); 2767 2768 /* Free any rings that were dropped, but not changed. */ 2769 for (i = 1; i < 31; ++i) { 2770 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && 2771 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) { 2772 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2773 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); 2774 } 2775 } 2776 xhci_zero_in_ctx(xhci, virt_dev); 2777 /* 2778 * Install any rings for completely new endpoints or changed endpoints, 2779 * and free or cache any old rings from changed endpoints. 2780 */ 2781 for (i = 1; i < 31; ++i) { 2782 if (!virt_dev->eps[i].new_ring) 2783 continue; 2784 /* Only cache or free the old ring if it exists. 2785 * It may not if this is the first add of an endpoint. 2786 */ 2787 if (virt_dev->eps[i].ring) { 2788 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 2789 } 2790 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); 2791 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; 2792 virt_dev->eps[i].new_ring = NULL; 2793 } 2794 command_cleanup: 2795 kfree(command->completion); 2796 kfree(command); 2797 2798 return ret; 2799 } 2800 2801 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 2802 { 2803 struct xhci_hcd *xhci; 2804 struct xhci_virt_device *virt_dev; 2805 int i, ret; 2806 2807 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 2808 if (ret <= 0) 2809 return; 2810 xhci = hcd_to_xhci(hcd); 2811 2812 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 2813 virt_dev = xhci->devs[udev->slot_id]; 2814 /* Free any rings allocated for added endpoints */ 2815 for (i = 0; i < 31; ++i) { 2816 if (virt_dev->eps[i].new_ring) { 2817 xhci_ring_free(xhci, virt_dev->eps[i].new_ring); 2818 virt_dev->eps[i].new_ring = NULL; 2819 } 2820 } 2821 xhci_zero_in_ctx(xhci, virt_dev); 2822 } 2823 2824 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, 2825 struct xhci_container_ctx *in_ctx, 2826 struct xhci_container_ctx *out_ctx, 2827 struct xhci_input_control_ctx *ctrl_ctx, 2828 u32 add_flags, u32 drop_flags) 2829 { 2830 ctrl_ctx->add_flags = cpu_to_le32(add_flags); 2831 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); 2832 xhci_slot_copy(xhci, in_ctx, out_ctx); 2833 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 2834 2835 xhci_dbg(xhci, "Input Context:\n"); 2836 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); 2837 } 2838 2839 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, 2840 unsigned int slot_id, unsigned int ep_index, 2841 struct xhci_dequeue_state *deq_state) 2842 { 2843 struct xhci_input_control_ctx *ctrl_ctx; 2844 struct xhci_container_ctx *in_ctx; 2845 struct xhci_ep_ctx *ep_ctx; 2846 u32 added_ctxs; 2847 dma_addr_t addr; 2848 2849 in_ctx = xhci->devs[slot_id]->in_ctx; 2850 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 2851 if (!ctrl_ctx) { 2852 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2853 __func__); 2854 return; 2855 } 2856 2857 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx, 2858 xhci->devs[slot_id]->out_ctx, ep_index); 2859 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); 2860 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg, 2861 deq_state->new_deq_ptr); 2862 if (addr == 0) { 2863 xhci_warn(xhci, "WARN Cannot submit config ep after " 2864 "reset ep command\n"); 2865 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n", 2866 deq_state->new_deq_seg, 2867 deq_state->new_deq_ptr); 2868 return; 2869 } 2870 ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state); 2871 2872 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index); 2873 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx, 2874 xhci->devs[slot_id]->out_ctx, ctrl_ctx, 2875 added_ctxs, added_ctxs); 2876 } 2877 2878 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, 2879 struct usb_device *udev, unsigned int ep_index) 2880 { 2881 struct xhci_dequeue_state deq_state; 2882 struct xhci_virt_ep *ep; 2883 2884 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 2885 "Cleaning up stalled endpoint ring"); 2886 ep = &xhci->devs[udev->slot_id]->eps[ep_index]; 2887 /* We need to move the HW's dequeue pointer past this TD, 2888 * or it will attempt to resend it on the next doorbell ring. 2889 */ 2890 xhci_find_new_dequeue_state(xhci, udev->slot_id, 2891 ep_index, ep->stopped_stream, ep->stopped_td, 2892 &deq_state); 2893 2894 /* HW with the reset endpoint quirk will use the saved dequeue state to 2895 * issue a configure endpoint command later. 2896 */ 2897 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { 2898 struct xhci_command *command; 2899 /* Can't sleep if we're called from cleanup_halted_endpoint() */ 2900 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); 2901 if (!command) 2902 return; 2903 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 2904 "Queueing new dequeue state"); 2905 xhci_queue_new_dequeue_state(xhci, command, udev->slot_id, 2906 ep_index, ep->stopped_stream, &deq_state); 2907 } else { 2908 /* Better hope no one uses the input context between now and the 2909 * reset endpoint completion! 2910 * XXX: No idea how this hardware will react when stream rings 2911 * are enabled. 2912 */ 2913 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2914 "Setting up input context for " 2915 "configure endpoint command"); 2916 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id, 2917 ep_index, &deq_state); 2918 } 2919 } 2920 2921 /* Deal with stalled endpoints. The core should have sent the control message 2922 * to clear the halt condition. However, we need to make the xHCI hardware 2923 * reset its sequence number, since a device will expect a sequence number of 2924 * zero after the halt condition is cleared. 2925 * Context: in_interrupt 2926 */ 2927 void xhci_endpoint_reset(struct usb_hcd *hcd, 2928 struct usb_host_endpoint *ep) 2929 { 2930 struct xhci_hcd *xhci; 2931 struct usb_device *udev; 2932 unsigned int ep_index; 2933 unsigned long flags; 2934 int ret; 2935 struct xhci_virt_ep *virt_ep; 2936 struct xhci_command *command; 2937 2938 xhci = hcd_to_xhci(hcd); 2939 udev = (struct usb_device *) ep->hcpriv; 2940 /* Called with a root hub endpoint (or an endpoint that wasn't added 2941 * with xhci_add_endpoint() 2942 */ 2943 if (!ep->hcpriv) 2944 return; 2945 ep_index = xhci_get_endpoint_index(&ep->desc); 2946 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; 2947 if (!virt_ep->stopped_td) { 2948 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 2949 "Endpoint 0x%x not halted, refusing to reset.", 2950 ep->desc.bEndpointAddress); 2951 return; 2952 } 2953 if (usb_endpoint_xfer_control(&ep->desc)) { 2954 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 2955 "Control endpoint stall already handled."); 2956 return; 2957 } 2958 2959 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); 2960 if (!command) 2961 return; 2962 2963 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, 2964 "Queueing reset endpoint command"); 2965 spin_lock_irqsave(&xhci->lock, flags); 2966 ret = xhci_queue_reset_ep(xhci, command, udev->slot_id, ep_index); 2967 /* 2968 * Can't change the ring dequeue pointer until it's transitioned to the 2969 * stopped state, which is only upon a successful reset endpoint 2970 * command. Better hope that last command worked! 2971 */ 2972 if (!ret) { 2973 xhci_cleanup_stalled_ring(xhci, udev, ep_index); 2974 kfree(virt_ep->stopped_td); 2975 xhci_ring_cmd_db(xhci); 2976 } 2977 virt_ep->stopped_td = NULL; 2978 virt_ep->stopped_stream = 0; 2979 spin_unlock_irqrestore(&xhci->lock, flags); 2980 2981 if (ret) 2982 xhci_warn(xhci, "FIXME allocate a new ring segment\n"); 2983 } 2984 2985 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 2986 struct usb_device *udev, struct usb_host_endpoint *ep, 2987 unsigned int slot_id) 2988 { 2989 int ret; 2990 unsigned int ep_index; 2991 unsigned int ep_state; 2992 2993 if (!ep) 2994 return -EINVAL; 2995 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); 2996 if (ret <= 0) 2997 return -EINVAL; 2998 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { 2999 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" 3000 " descriptor for ep 0x%x does not support streams\n", 3001 ep->desc.bEndpointAddress); 3002 return -EINVAL; 3003 } 3004 3005 ep_index = xhci_get_endpoint_index(&ep->desc); 3006 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 3007 if (ep_state & EP_HAS_STREAMS || 3008 ep_state & EP_GETTING_STREAMS) { 3009 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " 3010 "already has streams set up.\n", 3011 ep->desc.bEndpointAddress); 3012 xhci_warn(xhci, "Send email to xHCI maintainer and ask for " 3013 "dynamic stream context array reallocation.\n"); 3014 return -EINVAL; 3015 } 3016 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { 3017 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " 3018 "endpoint 0x%x; URBs are pending.\n", 3019 ep->desc.bEndpointAddress); 3020 return -EINVAL; 3021 } 3022 return 0; 3023 } 3024 3025 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, 3026 unsigned int *num_streams, unsigned int *num_stream_ctxs) 3027 { 3028 unsigned int max_streams; 3029 3030 /* The stream context array size must be a power of two */ 3031 *num_stream_ctxs = roundup_pow_of_two(*num_streams); 3032 /* 3033 * Find out how many primary stream array entries the host controller 3034 * supports. Later we may use secondary stream arrays (similar to 2nd 3035 * level page entries), but that's an optional feature for xHCI host 3036 * controllers. xHCs must support at least 4 stream IDs. 3037 */ 3038 max_streams = HCC_MAX_PSA(xhci->hcc_params); 3039 if (*num_stream_ctxs > max_streams) { 3040 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", 3041 max_streams); 3042 *num_stream_ctxs = max_streams; 3043 *num_streams = max_streams; 3044 } 3045 } 3046 3047 /* Returns an error code if one of the endpoint already has streams. 3048 * This does not change any data structures, it only checks and gathers 3049 * information. 3050 */ 3051 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, 3052 struct usb_device *udev, 3053 struct usb_host_endpoint **eps, unsigned int num_eps, 3054 unsigned int *num_streams, u32 *changed_ep_bitmask) 3055 { 3056 unsigned int max_streams; 3057 unsigned int endpoint_flag; 3058 int i; 3059 int ret; 3060 3061 for (i = 0; i < num_eps; i++) { 3062 ret = xhci_check_streams_endpoint(xhci, udev, 3063 eps[i], udev->slot_id); 3064 if (ret < 0) 3065 return ret; 3066 3067 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp); 3068 if (max_streams < (*num_streams - 1)) { 3069 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", 3070 eps[i]->desc.bEndpointAddress, 3071 max_streams); 3072 *num_streams = max_streams+1; 3073 } 3074 3075 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); 3076 if (*changed_ep_bitmask & endpoint_flag) 3077 return -EINVAL; 3078 *changed_ep_bitmask |= endpoint_flag; 3079 } 3080 return 0; 3081 } 3082 3083 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, 3084 struct usb_device *udev, 3085 struct usb_host_endpoint **eps, unsigned int num_eps) 3086 { 3087 u32 changed_ep_bitmask = 0; 3088 unsigned int slot_id; 3089 unsigned int ep_index; 3090 unsigned int ep_state; 3091 int i; 3092 3093 slot_id = udev->slot_id; 3094 if (!xhci->devs[slot_id]) 3095 return 0; 3096 3097 for (i = 0; i < num_eps; i++) { 3098 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3099 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 3100 /* Are streams already being freed for the endpoint? */ 3101 if (ep_state & EP_GETTING_NO_STREAMS) { 3102 xhci_warn(xhci, "WARN Can't disable streams for " 3103 "endpoint 0x%x, " 3104 "streams are being disabled already\n", 3105 eps[i]->desc.bEndpointAddress); 3106 return 0; 3107 } 3108 /* Are there actually any streams to free? */ 3109 if (!(ep_state & EP_HAS_STREAMS) && 3110 !(ep_state & EP_GETTING_STREAMS)) { 3111 xhci_warn(xhci, "WARN Can't disable streams for " 3112 "endpoint 0x%x, " 3113 "streams are already disabled!\n", 3114 eps[i]->desc.bEndpointAddress); 3115 xhci_warn(xhci, "WARN xhci_free_streams() called " 3116 "with non-streams endpoint\n"); 3117 return 0; 3118 } 3119 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); 3120 } 3121 return changed_ep_bitmask; 3122 } 3123 3124 /* 3125 * The USB device drivers use this function (though the HCD interface in USB 3126 * core) to prepare a set of bulk endpoints to use streams. Streams are used to 3127 * coordinate mass storage command queueing across multiple endpoints (basically 3128 * a stream ID == a task ID). 3129 * 3130 * Setting up streams involves allocating the same size stream context array 3131 * for each endpoint and issuing a configure endpoint command for all endpoints. 3132 * 3133 * Don't allow the call to succeed if one endpoint only supports one stream 3134 * (which means it doesn't support streams at all). 3135 * 3136 * Drivers may get less stream IDs than they asked for, if the host controller 3137 * hardware or endpoints claim they can't support the number of requested 3138 * stream IDs. 3139 */ 3140 int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, 3141 struct usb_host_endpoint **eps, unsigned int num_eps, 3142 unsigned int num_streams, gfp_t mem_flags) 3143 { 3144 int i, ret; 3145 struct xhci_hcd *xhci; 3146 struct xhci_virt_device *vdev; 3147 struct xhci_command *config_cmd; 3148 struct xhci_input_control_ctx *ctrl_ctx; 3149 unsigned int ep_index; 3150 unsigned int num_stream_ctxs; 3151 unsigned long flags; 3152 u32 changed_ep_bitmask = 0; 3153 3154 if (!eps) 3155 return -EINVAL; 3156 3157 /* Add one to the number of streams requested to account for 3158 * stream 0 that is reserved for xHCI usage. 3159 */ 3160 num_streams += 1; 3161 xhci = hcd_to_xhci(hcd); 3162 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", 3163 num_streams); 3164 3165 /* MaxPSASize value 0 (2 streams) means streams are not supported */ 3166 if (HCC_MAX_PSA(xhci->hcc_params) < 4) { 3167 xhci_dbg(xhci, "xHCI controller does not support streams.\n"); 3168 return -ENOSYS; 3169 } 3170 3171 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); 3172 if (!config_cmd) { 3173 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); 3174 return -ENOMEM; 3175 } 3176 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); 3177 if (!ctrl_ctx) { 3178 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3179 __func__); 3180 xhci_free_command(xhci, config_cmd); 3181 return -ENOMEM; 3182 } 3183 3184 /* Check to make sure all endpoints are not already configured for 3185 * streams. While we're at it, find the maximum number of streams that 3186 * all the endpoints will support and check for duplicate endpoints. 3187 */ 3188 spin_lock_irqsave(&xhci->lock, flags); 3189 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, 3190 num_eps, &num_streams, &changed_ep_bitmask); 3191 if (ret < 0) { 3192 xhci_free_command(xhci, config_cmd); 3193 spin_unlock_irqrestore(&xhci->lock, flags); 3194 return ret; 3195 } 3196 if (num_streams <= 1) { 3197 xhci_warn(xhci, "WARN: endpoints can't handle " 3198 "more than one stream.\n"); 3199 xhci_free_command(xhci, config_cmd); 3200 spin_unlock_irqrestore(&xhci->lock, flags); 3201 return -EINVAL; 3202 } 3203 vdev = xhci->devs[udev->slot_id]; 3204 /* Mark each endpoint as being in transition, so 3205 * xhci_urb_enqueue() will reject all URBs. 3206 */ 3207 for (i = 0; i < num_eps; i++) { 3208 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3209 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; 3210 } 3211 spin_unlock_irqrestore(&xhci->lock, flags); 3212 3213 /* Setup internal data structures and allocate HW data structures for 3214 * streams (but don't install the HW structures in the input context 3215 * until we're sure all memory allocation succeeded). 3216 */ 3217 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); 3218 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", 3219 num_stream_ctxs, num_streams); 3220 3221 for (i = 0; i < num_eps; i++) { 3222 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3223 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, 3224 num_stream_ctxs, 3225 num_streams, mem_flags); 3226 if (!vdev->eps[ep_index].stream_info) 3227 goto cleanup; 3228 /* Set maxPstreams in endpoint context and update deq ptr to 3229 * point to stream context array. FIXME 3230 */ 3231 } 3232 3233 /* Set up the input context for a configure endpoint command. */ 3234 for (i = 0; i < num_eps; i++) { 3235 struct xhci_ep_ctx *ep_ctx; 3236 3237 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3238 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); 3239 3240 xhci_endpoint_copy(xhci, config_cmd->in_ctx, 3241 vdev->out_ctx, ep_index); 3242 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, 3243 vdev->eps[ep_index].stream_info); 3244 } 3245 /* Tell the HW to drop its old copy of the endpoint context info 3246 * and add the updated copy from the input context. 3247 */ 3248 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, 3249 vdev->out_ctx, ctrl_ctx, 3250 changed_ep_bitmask, changed_ep_bitmask); 3251 3252 /* Issue and wait for the configure endpoint command */ 3253 ret = xhci_configure_endpoint(xhci, udev, config_cmd, 3254 false, false); 3255 3256 /* xHC rejected the configure endpoint command for some reason, so we 3257 * leave the old ring intact and free our internal streams data 3258 * structure. 3259 */ 3260 if (ret < 0) 3261 goto cleanup; 3262 3263 spin_lock_irqsave(&xhci->lock, flags); 3264 for (i = 0; i < num_eps; i++) { 3265 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3266 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 3267 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", 3268 udev->slot_id, ep_index); 3269 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; 3270 } 3271 xhci_free_command(xhci, config_cmd); 3272 spin_unlock_irqrestore(&xhci->lock, flags); 3273 3274 /* Subtract 1 for stream 0, which drivers can't use */ 3275 return num_streams - 1; 3276 3277 cleanup: 3278 /* If it didn't work, free the streams! */ 3279 for (i = 0; i < num_eps; i++) { 3280 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3281 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3282 vdev->eps[ep_index].stream_info = NULL; 3283 /* FIXME Unset maxPstreams in endpoint context and 3284 * update deq ptr to point to normal string ring. 3285 */ 3286 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 3287 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3288 xhci_endpoint_zero(xhci, vdev, eps[i]); 3289 } 3290 xhci_free_command(xhci, config_cmd); 3291 return -ENOMEM; 3292 } 3293 3294 /* Transition the endpoint from using streams to being a "normal" endpoint 3295 * without streams. 3296 * 3297 * Modify the endpoint context state, submit a configure endpoint command, 3298 * and free all endpoint rings for streams if that completes successfully. 3299 */ 3300 int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, 3301 struct usb_host_endpoint **eps, unsigned int num_eps, 3302 gfp_t mem_flags) 3303 { 3304 int i, ret; 3305 struct xhci_hcd *xhci; 3306 struct xhci_virt_device *vdev; 3307 struct xhci_command *command; 3308 struct xhci_input_control_ctx *ctrl_ctx; 3309 unsigned int ep_index; 3310 unsigned long flags; 3311 u32 changed_ep_bitmask; 3312 3313 xhci = hcd_to_xhci(hcd); 3314 vdev = xhci->devs[udev->slot_id]; 3315 3316 /* Set up a configure endpoint command to remove the streams rings */ 3317 spin_lock_irqsave(&xhci->lock, flags); 3318 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, 3319 udev, eps, num_eps); 3320 if (changed_ep_bitmask == 0) { 3321 spin_unlock_irqrestore(&xhci->lock, flags); 3322 return -EINVAL; 3323 } 3324 3325 /* Use the xhci_command structure from the first endpoint. We may have 3326 * allocated too many, but the driver may call xhci_free_streams() for 3327 * each endpoint it grouped into one call to xhci_alloc_streams(). 3328 */ 3329 ep_index = xhci_get_endpoint_index(&eps[0]->desc); 3330 command = vdev->eps[ep_index].stream_info->free_streams_command; 3331 ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx); 3332 if (!ctrl_ctx) { 3333 spin_unlock_irqrestore(&xhci->lock, flags); 3334 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3335 __func__); 3336 return -EINVAL; 3337 } 3338 3339 for (i = 0; i < num_eps; i++) { 3340 struct xhci_ep_ctx *ep_ctx; 3341 3342 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3343 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); 3344 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= 3345 EP_GETTING_NO_STREAMS; 3346 3347 xhci_endpoint_copy(xhci, command->in_ctx, 3348 vdev->out_ctx, ep_index); 3349 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx, 3350 &vdev->eps[ep_index]); 3351 } 3352 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, 3353 vdev->out_ctx, ctrl_ctx, 3354 changed_ep_bitmask, changed_ep_bitmask); 3355 spin_unlock_irqrestore(&xhci->lock, flags); 3356 3357 /* Issue and wait for the configure endpoint command, 3358 * which must succeed. 3359 */ 3360 ret = xhci_configure_endpoint(xhci, udev, command, 3361 false, true); 3362 3363 /* xHC rejected the configure endpoint command for some reason, so we 3364 * leave the streams rings intact. 3365 */ 3366 if (ret < 0) 3367 return ret; 3368 3369 spin_lock_irqsave(&xhci->lock, flags); 3370 for (i = 0; i < num_eps; i++) { 3371 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3372 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3373 vdev->eps[ep_index].stream_info = NULL; 3374 /* FIXME Unset maxPstreams in endpoint context and 3375 * update deq ptr to point to normal string ring. 3376 */ 3377 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; 3378 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3379 } 3380 spin_unlock_irqrestore(&xhci->lock, flags); 3381 3382 return 0; 3383 } 3384 3385 /* 3386 * Deletes endpoint resources for endpoints that were active before a Reset 3387 * Device command, or a Disable Slot command. The Reset Device command leaves 3388 * the control endpoint intact, whereas the Disable Slot command deletes it. 3389 * 3390 * Must be called with xhci->lock held. 3391 */ 3392 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, 3393 struct xhci_virt_device *virt_dev, bool drop_control_ep) 3394 { 3395 int i; 3396 unsigned int num_dropped_eps = 0; 3397 unsigned int drop_flags = 0; 3398 3399 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { 3400 if (virt_dev->eps[i].ring) { 3401 drop_flags |= 1 << i; 3402 num_dropped_eps++; 3403 } 3404 } 3405 xhci->num_active_eps -= num_dropped_eps; 3406 if (num_dropped_eps) 3407 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3408 "Dropped %u ep ctxs, flags = 0x%x, " 3409 "%u now active.", 3410 num_dropped_eps, drop_flags, 3411 xhci->num_active_eps); 3412 } 3413 3414 /* 3415 * This submits a Reset Device Command, which will set the device state to 0, 3416 * set the device address to 0, and disable all the endpoints except the default 3417 * control endpoint. The USB core should come back and call 3418 * xhci_address_device(), and then re-set up the configuration. If this is 3419 * called because of a usb_reset_and_verify_device(), then the old alternate 3420 * settings will be re-installed through the normal bandwidth allocation 3421 * functions. 3422 * 3423 * Wait for the Reset Device command to finish. Remove all structures 3424 * associated with the endpoints that were disabled. Clear the input device 3425 * structure? Cache the rings? Reset the control endpoint 0 max packet size? 3426 * 3427 * If the virt_dev to be reset does not exist or does not match the udev, 3428 * it means the device is lost, possibly due to the xHC restore error and 3429 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to 3430 * re-allocate the device. 3431 */ 3432 int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) 3433 { 3434 int ret, i; 3435 unsigned long flags; 3436 struct xhci_hcd *xhci; 3437 unsigned int slot_id; 3438 struct xhci_virt_device *virt_dev; 3439 struct xhci_command *reset_device_cmd; 3440 int last_freed_endpoint; 3441 struct xhci_slot_ctx *slot_ctx; 3442 int old_active_eps = 0; 3443 3444 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); 3445 if (ret <= 0) 3446 return ret; 3447 xhci = hcd_to_xhci(hcd); 3448 slot_id = udev->slot_id; 3449 virt_dev = xhci->devs[slot_id]; 3450 if (!virt_dev) { 3451 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3452 "not exist. Re-allocate the device\n", slot_id); 3453 ret = xhci_alloc_dev(hcd, udev); 3454 if (ret == 1) 3455 return 0; 3456 else 3457 return -EINVAL; 3458 } 3459 3460 if (virt_dev->udev != udev) { 3461 /* If the virt_dev and the udev does not match, this virt_dev 3462 * may belong to another udev. 3463 * Re-allocate the device. 3464 */ 3465 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3466 "not match the udev. Re-allocate the device\n", 3467 slot_id); 3468 ret = xhci_alloc_dev(hcd, udev); 3469 if (ret == 1) 3470 return 0; 3471 else 3472 return -EINVAL; 3473 } 3474 3475 /* If device is not setup, there is no point in resetting it */ 3476 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3477 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 3478 SLOT_STATE_DISABLED) 3479 return 0; 3480 3481 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); 3482 /* Allocate the command structure that holds the struct completion. 3483 * Assume we're in process context, since the normal device reset 3484 * process has to wait for the device anyway. Storage devices are 3485 * reset as part of error handling, so use GFP_NOIO instead of 3486 * GFP_KERNEL. 3487 */ 3488 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); 3489 if (!reset_device_cmd) { 3490 xhci_dbg(xhci, "Couldn't allocate command structure.\n"); 3491 return -ENOMEM; 3492 } 3493 3494 /* Attempt to submit the Reset Device command to the command ring */ 3495 spin_lock_irqsave(&xhci->lock, flags); 3496 3497 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id); 3498 if (ret) { 3499 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3500 spin_unlock_irqrestore(&xhci->lock, flags); 3501 goto command_cleanup; 3502 } 3503 xhci_ring_cmd_db(xhci); 3504 spin_unlock_irqrestore(&xhci->lock, flags); 3505 3506 /* Wait for the Reset Device command to finish */ 3507 wait_for_completion(reset_device_cmd->completion); 3508 3509 /* The Reset Device command can't fail, according to the 0.95/0.96 spec, 3510 * unless we tried to reset a slot ID that wasn't enabled, 3511 * or the device wasn't in the addressed or configured state. 3512 */ 3513 ret = reset_device_cmd->status; 3514 switch (ret) { 3515 case COMP_CMD_ABORT: 3516 case COMP_CMD_STOP: 3517 xhci_warn(xhci, "Timeout waiting for reset device command\n"); 3518 ret = -ETIME; 3519 goto command_cleanup; 3520 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ 3521 case COMP_CTX_STATE: /* 0.96 completion code for same thing */ 3522 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", 3523 slot_id, 3524 xhci_get_slot_state(xhci, virt_dev->out_ctx)); 3525 xhci_dbg(xhci, "Not freeing device rings.\n"); 3526 /* Don't treat this as an error. May change my mind later. */ 3527 ret = 0; 3528 goto command_cleanup; 3529 case COMP_SUCCESS: 3530 xhci_dbg(xhci, "Successful reset device command.\n"); 3531 break; 3532 default: 3533 if (xhci_is_vendor_info_code(xhci, ret)) 3534 break; 3535 xhci_warn(xhci, "Unknown completion code %u for " 3536 "reset device command.\n", ret); 3537 ret = -EINVAL; 3538 goto command_cleanup; 3539 } 3540 3541 /* Free up host controller endpoint resources */ 3542 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 3543 spin_lock_irqsave(&xhci->lock, flags); 3544 /* Don't delete the default control endpoint resources */ 3545 xhci_free_device_endpoint_resources(xhci, virt_dev, false); 3546 spin_unlock_irqrestore(&xhci->lock, flags); 3547 } 3548 3549 /* Everything but endpoint 0 is disabled, so free or cache the rings. */ 3550 last_freed_endpoint = 1; 3551 for (i = 1; i < 31; ++i) { 3552 struct xhci_virt_ep *ep = &virt_dev->eps[i]; 3553 3554 if (ep->ep_state & EP_HAS_STREAMS) { 3555 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n", 3556 xhci_get_endpoint_address(i)); 3557 xhci_free_stream_info(xhci, ep->stream_info); 3558 ep->stream_info = NULL; 3559 ep->ep_state &= ~EP_HAS_STREAMS; 3560 } 3561 3562 if (ep->ring) { 3563 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 3564 last_freed_endpoint = i; 3565 } 3566 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) 3567 xhci_drop_ep_from_interval_table(xhci, 3568 &virt_dev->eps[i].bw_info, 3569 virt_dev->bw_table, 3570 udev, 3571 &virt_dev->eps[i], 3572 virt_dev->tt_info); 3573 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); 3574 } 3575 /* If necessary, update the number of active TTs on this root port */ 3576 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 3577 3578 xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); 3579 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); 3580 ret = 0; 3581 3582 command_cleanup: 3583 xhci_free_command(xhci, reset_device_cmd); 3584 return ret; 3585 } 3586 3587 /* 3588 * At this point, the struct usb_device is about to go away, the device has 3589 * disconnected, and all traffic has been stopped and the endpoints have been 3590 * disabled. Free any HC data structures associated with that device. 3591 */ 3592 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) 3593 { 3594 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3595 struct xhci_virt_device *virt_dev; 3596 unsigned long flags; 3597 u32 state; 3598 int i, ret; 3599 struct xhci_command *command; 3600 3601 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); 3602 if (!command) 3603 return; 3604 3605 #ifndef CONFIG_USB_DEFAULT_PERSIST 3606 /* 3607 * We called pm_runtime_get_noresume when the device was attached. 3608 * Decrement the counter here to allow controller to runtime suspend 3609 * if no devices remain. 3610 */ 3611 if (xhci->quirks & XHCI_RESET_ON_RESUME) 3612 pm_runtime_put_noidle(hcd->self.controller); 3613 #endif 3614 3615 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 3616 /* If the host is halted due to driver unload, we still need to free the 3617 * device. 3618 */ 3619 if (ret <= 0 && ret != -ENODEV) { 3620 kfree(command); 3621 return; 3622 } 3623 3624 virt_dev = xhci->devs[udev->slot_id]; 3625 3626 /* Stop any wayward timer functions (which may grab the lock) */ 3627 for (i = 0; i < 31; ++i) { 3628 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING; 3629 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer); 3630 } 3631 3632 spin_lock_irqsave(&xhci->lock, flags); 3633 /* Don't disable the slot if the host controller is dead. */ 3634 state = readl(&xhci->op_regs->status); 3635 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || 3636 (xhci->xhc_state & XHCI_STATE_HALTED)) { 3637 xhci_free_virt_device(xhci, udev->slot_id); 3638 spin_unlock_irqrestore(&xhci->lock, flags); 3639 kfree(command); 3640 return; 3641 } 3642 3643 if (xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, 3644 udev->slot_id)) { 3645 spin_unlock_irqrestore(&xhci->lock, flags); 3646 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3647 return; 3648 } 3649 xhci_ring_cmd_db(xhci); 3650 spin_unlock_irqrestore(&xhci->lock, flags); 3651 3652 /* 3653 * Event command completion handler will free any data structures 3654 * associated with the slot. XXX Can free sleep? 3655 */ 3656 } 3657 3658 /* 3659 * Checks if we have enough host controller resources for the default control 3660 * endpoint. 3661 * 3662 * Must be called with xhci->lock held. 3663 */ 3664 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) 3665 { 3666 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { 3667 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3668 "Not enough ep ctxs: " 3669 "%u active, need to add 1, limit is %u.", 3670 xhci->num_active_eps, xhci->limit_active_eps); 3671 return -ENOMEM; 3672 } 3673 xhci->num_active_eps += 1; 3674 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3675 "Adding 1 ep ctx, %u now active.", 3676 xhci->num_active_eps); 3677 return 0; 3678 } 3679 3680 3681 /* 3682 * Returns 0 if the xHC ran out of device slots, the Enable Slot command 3683 * timed out, or allocating memory failed. Returns 1 on success. 3684 */ 3685 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) 3686 { 3687 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3688 unsigned long flags; 3689 int ret; 3690 struct xhci_command *command; 3691 3692 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); 3693 if (!command) 3694 return 0; 3695 3696 spin_lock_irqsave(&xhci->lock, flags); 3697 command->completion = &xhci->addr_dev; 3698 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); 3699 if (ret) { 3700 spin_unlock_irqrestore(&xhci->lock, flags); 3701 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 3702 kfree(command); 3703 return 0; 3704 } 3705 xhci_ring_cmd_db(xhci); 3706 spin_unlock_irqrestore(&xhci->lock, flags); 3707 3708 wait_for_completion(command->completion); 3709 3710 if (!xhci->slot_id || command->status != COMP_SUCCESS) { 3711 xhci_err(xhci, "Error while assigning device slot ID\n"); 3712 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", 3713 HCS_MAX_SLOTS( 3714 readl(&xhci->cap_regs->hcs_params1))); 3715 kfree(command); 3716 return 0; 3717 } 3718 3719 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 3720 spin_lock_irqsave(&xhci->lock, flags); 3721 ret = xhci_reserve_host_control_ep_resources(xhci); 3722 if (ret) { 3723 spin_unlock_irqrestore(&xhci->lock, flags); 3724 xhci_warn(xhci, "Not enough host resources, " 3725 "active endpoint contexts = %u\n", 3726 xhci->num_active_eps); 3727 goto disable_slot; 3728 } 3729 spin_unlock_irqrestore(&xhci->lock, flags); 3730 } 3731 /* Use GFP_NOIO, since this function can be called from 3732 * xhci_discover_or_reset_device(), which may be called as part of 3733 * mass storage driver error handling. 3734 */ 3735 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) { 3736 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 3737 goto disable_slot; 3738 } 3739 udev->slot_id = xhci->slot_id; 3740 3741 #ifndef CONFIG_USB_DEFAULT_PERSIST 3742 /* 3743 * If resetting upon resume, we can't put the controller into runtime 3744 * suspend if there is a device attached. 3745 */ 3746 if (xhci->quirks & XHCI_RESET_ON_RESUME) 3747 pm_runtime_get_noresume(hcd->self.controller); 3748 #endif 3749 3750 3751 kfree(command); 3752 /* Is this a LS or FS device under a HS hub? */ 3753 /* Hub or peripherial? */ 3754 return 1; 3755 3756 disable_slot: 3757 /* Disable slot, if we can do it without mem alloc */ 3758 spin_lock_irqsave(&xhci->lock, flags); 3759 command->completion = NULL; 3760 command->status = 0; 3761 if (!xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, 3762 udev->slot_id)) 3763 xhci_ring_cmd_db(xhci); 3764 spin_unlock_irqrestore(&xhci->lock, flags); 3765 return 0; 3766 } 3767 3768 /* 3769 * Issue an Address Device command and optionally send a corresponding 3770 * SetAddress request to the device. 3771 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so 3772 * we should only issue and wait on one address command at the same time. 3773 */ 3774 static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, 3775 enum xhci_setup_dev setup) 3776 { 3777 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address"; 3778 unsigned long flags; 3779 struct xhci_virt_device *virt_dev; 3780 int ret = 0; 3781 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3782 struct xhci_slot_ctx *slot_ctx; 3783 struct xhci_input_control_ctx *ctrl_ctx; 3784 u64 temp_64; 3785 struct xhci_command *command; 3786 3787 if (!udev->slot_id) { 3788 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3789 "Bad Slot ID %d", udev->slot_id); 3790 return -EINVAL; 3791 } 3792 3793 virt_dev = xhci->devs[udev->slot_id]; 3794 3795 if (WARN_ON(!virt_dev)) { 3796 /* 3797 * In plug/unplug torture test with an NEC controller, 3798 * a zero-dereference was observed once due to virt_dev = 0. 3799 * Print useful debug rather than crash if it is observed again! 3800 */ 3801 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", 3802 udev->slot_id); 3803 return -EINVAL; 3804 } 3805 3806 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); 3807 if (!command) 3808 return -ENOMEM; 3809 3810 command->in_ctx = virt_dev->in_ctx; 3811 command->completion = &xhci->addr_dev; 3812 3813 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 3814 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 3815 if (!ctrl_ctx) { 3816 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3817 __func__); 3818 kfree(command); 3819 return -EINVAL; 3820 } 3821 /* 3822 * If this is the first Set Address since device plug-in or 3823 * virt_device realloaction after a resume with an xHCI power loss, 3824 * then set up the slot context. 3825 */ 3826 if (!slot_ctx->dev_info) 3827 xhci_setup_addressable_virt_dev(xhci, udev); 3828 /* Otherwise, update the control endpoint ring enqueue pointer. */ 3829 else 3830 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); 3831 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); 3832 ctrl_ctx->drop_flags = 0; 3833 3834 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 3835 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 3836 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, 3837 le32_to_cpu(slot_ctx->dev_info) >> 27); 3838 3839 spin_lock_irqsave(&xhci->lock, flags); 3840 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, 3841 udev->slot_id, setup); 3842 if (ret) { 3843 spin_unlock_irqrestore(&xhci->lock, flags); 3844 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3845 "FIXME: allocate a command ring segment"); 3846 kfree(command); 3847 return ret; 3848 } 3849 xhci_ring_cmd_db(xhci); 3850 spin_unlock_irqrestore(&xhci->lock, flags); 3851 3852 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ 3853 wait_for_completion(command->completion); 3854 3855 /* FIXME: From section 4.3.4: "Software shall be responsible for timing 3856 * the SetAddress() "recovery interval" required by USB and aborting the 3857 * command on a timeout. 3858 */ 3859 switch (command->status) { 3860 case COMP_CMD_ABORT: 3861 case COMP_CMD_STOP: 3862 xhci_warn(xhci, "Timeout while waiting for setup device command\n"); 3863 ret = -ETIME; 3864 break; 3865 case COMP_CTX_STATE: 3866 case COMP_EBADSLT: 3867 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", 3868 act, udev->slot_id); 3869 ret = -EINVAL; 3870 break; 3871 case COMP_TX_ERR: 3872 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act); 3873 ret = -EPROTO; 3874 break; 3875 case COMP_DEV_ERR: 3876 dev_warn(&udev->dev, 3877 "ERROR: Incompatible device for setup %s command\n", act); 3878 ret = -ENODEV; 3879 break; 3880 case COMP_SUCCESS: 3881 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3882 "Successful setup %s command", act); 3883 break; 3884 default: 3885 xhci_err(xhci, 3886 "ERROR: unexpected setup %s command completion code 0x%x.\n", 3887 act, command->status); 3888 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 3889 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 3890 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1); 3891 ret = -EINVAL; 3892 break; 3893 } 3894 if (ret) { 3895 kfree(command); 3896 return ret; 3897 } 3898 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 3899 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3900 "Op regs DCBAA ptr = %#016llx", temp_64); 3901 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3902 "Slot ID %d dcbaa entry @%p = %#016llx", 3903 udev->slot_id, 3904 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], 3905 (unsigned long long) 3906 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); 3907 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3908 "Output Context DMA address = %#08llx", 3909 (unsigned long long)virt_dev->out_ctx->dma); 3910 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 3911 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 3912 trace_xhci_address_ctx(xhci, virt_dev->in_ctx, 3913 le32_to_cpu(slot_ctx->dev_info) >> 27); 3914 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 3915 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 3916 /* 3917 * USB core uses address 1 for the roothubs, so we add one to the 3918 * address given back to us by the HC. 3919 */ 3920 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3921 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 3922 le32_to_cpu(slot_ctx->dev_info) >> 27); 3923 /* Zero the input context control for later use */ 3924 ctrl_ctx->add_flags = 0; 3925 ctrl_ctx->drop_flags = 0; 3926 3927 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 3928 "Internal device address = %d", 3929 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); 3930 kfree(command); 3931 return 0; 3932 } 3933 3934 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) 3935 { 3936 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS); 3937 } 3938 3939 int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) 3940 { 3941 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY); 3942 } 3943 3944 /* 3945 * Transfer the port index into real index in the HW port status 3946 * registers. Caculate offset between the port's PORTSC register 3947 * and port status base. Divide the number of per port register 3948 * to get the real index. The raw port number bases 1. 3949 */ 3950 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) 3951 { 3952 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 3953 __le32 __iomem *base_addr = &xhci->op_regs->port_status_base; 3954 __le32 __iomem *addr; 3955 int raw_port; 3956 3957 if (hcd->speed != HCD_USB3) 3958 addr = xhci->usb2_ports[port1 - 1]; 3959 else 3960 addr = xhci->usb3_ports[port1 - 1]; 3961 3962 raw_port = (addr - base_addr)/NUM_PORT_REGS + 1; 3963 return raw_port; 3964 } 3965 3966 /* 3967 * Issue an Evaluate Context command to change the Maximum Exit Latency in the 3968 * slot context. If that succeeds, store the new MEL in the xhci_virt_device. 3969 */ 3970 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, 3971 struct usb_device *udev, u16 max_exit_latency) 3972 { 3973 struct xhci_virt_device *virt_dev; 3974 struct xhci_command *command; 3975 struct xhci_input_control_ctx *ctrl_ctx; 3976 struct xhci_slot_ctx *slot_ctx; 3977 unsigned long flags; 3978 int ret; 3979 3980 spin_lock_irqsave(&xhci->lock, flags); 3981 if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) { 3982 spin_unlock_irqrestore(&xhci->lock, flags); 3983 return 0; 3984 } 3985 3986 /* Attempt to issue an Evaluate Context command to change the MEL. */ 3987 virt_dev = xhci->devs[udev->slot_id]; 3988 command = xhci->lpm_command; 3989 ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx); 3990 if (!ctrl_ctx) { 3991 spin_unlock_irqrestore(&xhci->lock, flags); 3992 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3993 __func__); 3994 return -ENOMEM; 3995 } 3996 3997 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); 3998 spin_unlock_irqrestore(&xhci->lock, flags); 3999 4000 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 4001 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); 4002 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); 4003 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); 4004 4005 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 4006 "Set up evaluate context for LPM MEL change."); 4007 xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id); 4008 xhci_dbg_ctx(xhci, command->in_ctx, 0); 4009 4010 /* Issue and wait for the evaluate context command. */ 4011 ret = xhci_configure_endpoint(xhci, udev, command, 4012 true, true); 4013 xhci_dbg(xhci, "Slot %u Output Context:\n", udev->slot_id); 4014 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 0); 4015 4016 if (!ret) { 4017 spin_lock_irqsave(&xhci->lock, flags); 4018 virt_dev->current_mel = max_exit_latency; 4019 spin_unlock_irqrestore(&xhci->lock, flags); 4020 } 4021 return ret; 4022 } 4023 4024 #ifdef CONFIG_PM_RUNTIME 4025 4026 /* BESL to HIRD Encoding array for USB2 LPM */ 4027 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, 4028 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; 4029 4030 /* Calculate HIRD/BESL for USB2 PORTPMSC*/ 4031 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, 4032 struct usb_device *udev) 4033 { 4034 int u2del, besl, besl_host; 4035 int besl_device = 0; 4036 u32 field; 4037 4038 u2del = HCS_U2_LATENCY(xhci->hcs_params3); 4039 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4040 4041 if (field & USB_BESL_SUPPORT) { 4042 for (besl_host = 0; besl_host < 16; besl_host++) { 4043 if (xhci_besl_encoding[besl_host] >= u2del) 4044 break; 4045 } 4046 /* Use baseline BESL value as default */ 4047 if (field & USB_BESL_BASELINE_VALID) 4048 besl_device = USB_GET_BESL_BASELINE(field); 4049 else if (field & USB_BESL_DEEP_VALID) 4050 besl_device = USB_GET_BESL_DEEP(field); 4051 } else { 4052 if (u2del <= 50) 4053 besl_host = 0; 4054 else 4055 besl_host = (u2del - 51) / 75 + 1; 4056 } 4057 4058 besl = besl_host + besl_device; 4059 if (besl > 15) 4060 besl = 15; 4061 4062 return besl; 4063 } 4064 4065 /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */ 4066 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev) 4067 { 4068 u32 field; 4069 int l1; 4070 int besld = 0; 4071 int hirdm = 0; 4072 4073 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4074 4075 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */ 4076 l1 = udev->l1_params.timeout / 256; 4077 4078 /* device has preferred BESLD */ 4079 if (field & USB_BESL_DEEP_VALID) { 4080 besld = USB_GET_BESL_DEEP(field); 4081 hirdm = 1; 4082 } 4083 4084 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm); 4085 } 4086 4087 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 4088 struct usb_device *udev, int enable) 4089 { 4090 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4091 __le32 __iomem **port_array; 4092 __le32 __iomem *pm_addr, *hlpm_addr; 4093 u32 pm_val, hlpm_val, field; 4094 unsigned int port_num; 4095 unsigned long flags; 4096 int hird, exit_latency; 4097 int ret; 4098 4099 if (hcd->speed == HCD_USB3 || !xhci->hw_lpm_support || 4100 !udev->lpm_capable) 4101 return -EPERM; 4102 4103 if (!udev->parent || udev->parent->parent || 4104 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 4105 return -EPERM; 4106 4107 if (udev->usb2_hw_lpm_capable != 1) 4108 return -EPERM; 4109 4110 spin_lock_irqsave(&xhci->lock, flags); 4111 4112 port_array = xhci->usb2_ports; 4113 port_num = udev->portnum - 1; 4114 pm_addr = port_array[port_num] + PORTPMSC; 4115 pm_val = readl(pm_addr); 4116 hlpm_addr = port_array[port_num] + PORTHLPMC; 4117 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4118 4119 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", 4120 enable ? "enable" : "disable", port_num + 1); 4121 4122 if (enable) { 4123 /* Host supports BESL timeout instead of HIRD */ 4124 if (udev->usb2_hw_lpm_besl_capable) { 4125 /* if device doesn't have a preferred BESL value use a 4126 * default one which works with mixed HIRD and BESL 4127 * systems. See XHCI_DEFAULT_BESL definition in xhci.h 4128 */ 4129 if ((field & USB_BESL_SUPPORT) && 4130 (field & USB_BESL_BASELINE_VALID)) 4131 hird = USB_GET_BESL_BASELINE(field); 4132 else 4133 hird = udev->l1_params.besl; 4134 4135 exit_latency = xhci_besl_encoding[hird]; 4136 spin_unlock_irqrestore(&xhci->lock, flags); 4137 4138 /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx 4139 * input context for link powermanagement evaluate 4140 * context commands. It is protected by hcd->bandwidth 4141 * mutex and is shared by all devices. We need to set 4142 * the max ext latency in USB 2 BESL LPM as well, so 4143 * use the same mutex and xhci_change_max_exit_latency() 4144 */ 4145 mutex_lock(hcd->bandwidth_mutex); 4146 ret = xhci_change_max_exit_latency(xhci, udev, 4147 exit_latency); 4148 mutex_unlock(hcd->bandwidth_mutex); 4149 4150 if (ret < 0) 4151 return ret; 4152 spin_lock_irqsave(&xhci->lock, flags); 4153 4154 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev); 4155 writel(hlpm_val, hlpm_addr); 4156 /* flush write */ 4157 readl(hlpm_addr); 4158 } else { 4159 hird = xhci_calculate_hird_besl(xhci, udev); 4160 } 4161 4162 pm_val &= ~PORT_HIRD_MASK; 4163 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id); 4164 writel(pm_val, pm_addr); 4165 pm_val = readl(pm_addr); 4166 pm_val |= PORT_HLE; 4167 writel(pm_val, pm_addr); 4168 /* flush write */ 4169 readl(pm_addr); 4170 } else { 4171 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK); 4172 writel(pm_val, pm_addr); 4173 /* flush write */ 4174 readl(pm_addr); 4175 if (udev->usb2_hw_lpm_besl_capable) { 4176 spin_unlock_irqrestore(&xhci->lock, flags); 4177 mutex_lock(hcd->bandwidth_mutex); 4178 xhci_change_max_exit_latency(xhci, udev, 0); 4179 mutex_unlock(hcd->bandwidth_mutex); 4180 return 0; 4181 } 4182 } 4183 4184 spin_unlock_irqrestore(&xhci->lock, flags); 4185 return 0; 4186 } 4187 4188 /* check if a usb2 port supports a given extened capability protocol 4189 * only USB2 ports extended protocol capability values are cached. 4190 * Return 1 if capability is supported 4191 */ 4192 static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port, 4193 unsigned capability) 4194 { 4195 u32 port_offset, port_count; 4196 int i; 4197 4198 for (i = 0; i < xhci->num_ext_caps; i++) { 4199 if (xhci->ext_caps[i] & capability) { 4200 /* port offsets starts at 1 */ 4201 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1; 4202 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]); 4203 if (port >= port_offset && 4204 port < port_offset + port_count) 4205 return 1; 4206 } 4207 } 4208 return 0; 4209 } 4210 4211 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 4212 { 4213 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4214 int portnum = udev->portnum - 1; 4215 4216 if (hcd->speed == HCD_USB3 || !xhci->sw_lpm_support || 4217 !udev->lpm_capable) 4218 return 0; 4219 4220 /* we only support lpm for non-hub device connected to root hub yet */ 4221 if (!udev->parent || udev->parent->parent || 4222 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 4223 return 0; 4224 4225 if (xhci->hw_lpm_support == 1 && 4226 xhci_check_usb2_port_capability( 4227 xhci, portnum, XHCI_HLC)) { 4228 udev->usb2_hw_lpm_capable = 1; 4229 udev->l1_params.timeout = XHCI_L1_TIMEOUT; 4230 udev->l1_params.besl = XHCI_DEFAULT_BESL; 4231 if (xhci_check_usb2_port_capability(xhci, portnum, 4232 XHCI_BLC)) 4233 udev->usb2_hw_lpm_besl_capable = 1; 4234 } 4235 4236 return 0; 4237 } 4238 4239 #else 4240 4241 int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 4242 struct usb_device *udev, int enable) 4243 { 4244 return 0; 4245 } 4246 4247 int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 4248 { 4249 return 0; 4250 } 4251 4252 #endif /* CONFIG_PM_RUNTIME */ 4253 4254 /*---------------------- USB 3.0 Link PM functions ------------------------*/ 4255 4256 #ifdef CONFIG_PM 4257 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */ 4258 static unsigned long long xhci_service_interval_to_ns( 4259 struct usb_endpoint_descriptor *desc) 4260 { 4261 return (1ULL << (desc->bInterval - 1)) * 125 * 1000; 4262 } 4263 4264 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, 4265 enum usb3_link_state state) 4266 { 4267 unsigned long long sel; 4268 unsigned long long pel; 4269 unsigned int max_sel_pel; 4270 char *state_name; 4271 4272 switch (state) { 4273 case USB3_LPM_U1: 4274 /* Convert SEL and PEL stored in nanoseconds to microseconds */ 4275 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); 4276 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); 4277 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL; 4278 state_name = "U1"; 4279 break; 4280 case USB3_LPM_U2: 4281 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); 4282 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); 4283 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL; 4284 state_name = "U2"; 4285 break; 4286 default: 4287 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", 4288 __func__); 4289 return USB3_LPM_DISABLED; 4290 } 4291 4292 if (sel <= max_sel_pel && pel <= max_sel_pel) 4293 return USB3_LPM_DEVICE_INITIATED; 4294 4295 if (sel > max_sel_pel) 4296 dev_dbg(&udev->dev, "Device-initiated %s disabled " 4297 "due to long SEL %llu ms\n", 4298 state_name, sel); 4299 else 4300 dev_dbg(&udev->dev, "Device-initiated %s disabled " 4301 "due to long PEL %llu ms\n", 4302 state_name, pel); 4303 return USB3_LPM_DISABLED; 4304 } 4305 4306 /* Returns the hub-encoded U1 timeout value. 4307 * The U1 timeout should be the maximum of the following values: 4308 * - For control endpoints, U1 system exit latency (SEL) * 3 4309 * - For bulk endpoints, U1 SEL * 5 4310 * - For interrupt endpoints: 4311 * - Notification EPs, U1 SEL * 3 4312 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2) 4313 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2) 4314 */ 4315 static u16 xhci_calculate_intel_u1_timeout(struct usb_device *udev, 4316 struct usb_endpoint_descriptor *desc) 4317 { 4318 unsigned long long timeout_ns; 4319 int ep_type; 4320 int intr_type; 4321 4322 ep_type = usb_endpoint_type(desc); 4323 switch (ep_type) { 4324 case USB_ENDPOINT_XFER_CONTROL: 4325 timeout_ns = udev->u1_params.sel * 3; 4326 break; 4327 case USB_ENDPOINT_XFER_BULK: 4328 timeout_ns = udev->u1_params.sel * 5; 4329 break; 4330 case USB_ENDPOINT_XFER_INT: 4331 intr_type = usb_endpoint_interrupt_type(desc); 4332 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) { 4333 timeout_ns = udev->u1_params.sel * 3; 4334 break; 4335 } 4336 /* Otherwise the calculation is the same as isoc eps */ 4337 case USB_ENDPOINT_XFER_ISOC: 4338 timeout_ns = xhci_service_interval_to_ns(desc); 4339 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); 4340 if (timeout_ns < udev->u1_params.sel * 2) 4341 timeout_ns = udev->u1_params.sel * 2; 4342 break; 4343 default: 4344 return 0; 4345 } 4346 4347 /* The U1 timeout is encoded in 1us intervals. */ 4348 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000); 4349 /* Don't return a timeout of zero, because that's USB3_LPM_DISABLED. */ 4350 if (timeout_ns == USB3_LPM_DISABLED) 4351 timeout_ns++; 4352 4353 /* If the necessary timeout value is bigger than what we can set in the 4354 * USB 3.0 hub, we have to disable hub-initiated U1. 4355 */ 4356 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) 4357 return timeout_ns; 4358 dev_dbg(&udev->dev, "Hub-initiated U1 disabled " 4359 "due to long timeout %llu ms\n", timeout_ns); 4360 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1); 4361 } 4362 4363 /* Returns the hub-encoded U2 timeout value. 4364 * The U2 timeout should be the maximum of: 4365 * - 10 ms (to avoid the bandwidth impact on the scheduler) 4366 * - largest bInterval of any active periodic endpoint (to avoid going 4367 * into lower power link states between intervals). 4368 * - the U2 Exit Latency of the device 4369 */ 4370 static u16 xhci_calculate_intel_u2_timeout(struct usb_device *udev, 4371 struct usb_endpoint_descriptor *desc) 4372 { 4373 unsigned long long timeout_ns; 4374 unsigned long long u2_del_ns; 4375 4376 timeout_ns = 10 * 1000 * 1000; 4377 4378 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) && 4379 (xhci_service_interval_to_ns(desc) > timeout_ns)) 4380 timeout_ns = xhci_service_interval_to_ns(desc); 4381 4382 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL; 4383 if (u2_del_ns > timeout_ns) 4384 timeout_ns = u2_del_ns; 4385 4386 /* The U2 timeout is encoded in 256us intervals */ 4387 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); 4388 /* If the necessary timeout value is bigger than what we can set in the 4389 * USB 3.0 hub, we have to disable hub-initiated U2. 4390 */ 4391 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) 4392 return timeout_ns; 4393 dev_dbg(&udev->dev, "Hub-initiated U2 disabled " 4394 "due to long timeout %llu ms\n", timeout_ns); 4395 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2); 4396 } 4397 4398 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, 4399 struct usb_device *udev, 4400 struct usb_endpoint_descriptor *desc, 4401 enum usb3_link_state state, 4402 u16 *timeout) 4403 { 4404 if (state == USB3_LPM_U1) { 4405 if (xhci->quirks & XHCI_INTEL_HOST) 4406 return xhci_calculate_intel_u1_timeout(udev, desc); 4407 } else { 4408 if (xhci->quirks & XHCI_INTEL_HOST) 4409 return xhci_calculate_intel_u2_timeout(udev, desc); 4410 } 4411 4412 return USB3_LPM_DISABLED; 4413 } 4414 4415 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, 4416 struct usb_device *udev, 4417 struct usb_endpoint_descriptor *desc, 4418 enum usb3_link_state state, 4419 u16 *timeout) 4420 { 4421 u16 alt_timeout; 4422 4423 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, 4424 desc, state, timeout); 4425 4426 /* If we found we can't enable hub-initiated LPM, or 4427 * the U1 or U2 exit latency was too high to allow 4428 * device-initiated LPM as well, just stop searching. 4429 */ 4430 if (alt_timeout == USB3_LPM_DISABLED || 4431 alt_timeout == USB3_LPM_DEVICE_INITIATED) { 4432 *timeout = alt_timeout; 4433 return -E2BIG; 4434 } 4435 if (alt_timeout > *timeout) 4436 *timeout = alt_timeout; 4437 return 0; 4438 } 4439 4440 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, 4441 struct usb_device *udev, 4442 struct usb_host_interface *alt, 4443 enum usb3_link_state state, 4444 u16 *timeout) 4445 { 4446 int j; 4447 4448 for (j = 0; j < alt->desc.bNumEndpoints; j++) { 4449 if (xhci_update_timeout_for_endpoint(xhci, udev, 4450 &alt->endpoint[j].desc, state, timeout)) 4451 return -E2BIG; 4452 continue; 4453 } 4454 return 0; 4455 } 4456 4457 static int xhci_check_intel_tier_policy(struct usb_device *udev, 4458 enum usb3_link_state state) 4459 { 4460 struct usb_device *parent; 4461 unsigned int num_hubs; 4462 4463 if (state == USB3_LPM_U2) 4464 return 0; 4465 4466 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */ 4467 for (parent = udev->parent, num_hubs = 0; parent->parent; 4468 parent = parent->parent) 4469 num_hubs++; 4470 4471 if (num_hubs < 2) 4472 return 0; 4473 4474 dev_dbg(&udev->dev, "Disabling U1 link state for device" 4475 " below second-tier hub.\n"); 4476 dev_dbg(&udev->dev, "Plug device into first-tier hub " 4477 "to decrease power consumption.\n"); 4478 return -E2BIG; 4479 } 4480 4481 static int xhci_check_tier_policy(struct xhci_hcd *xhci, 4482 struct usb_device *udev, 4483 enum usb3_link_state state) 4484 { 4485 if (xhci->quirks & XHCI_INTEL_HOST) 4486 return xhci_check_intel_tier_policy(udev, state); 4487 return -EINVAL; 4488 } 4489 4490 /* Returns the U1 or U2 timeout that should be enabled. 4491 * If the tier check or timeout setting functions return with a non-zero exit 4492 * code, that means the timeout value has been finalized and we shouldn't look 4493 * at any more endpoints. 4494 */ 4495 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, 4496 struct usb_device *udev, enum usb3_link_state state) 4497 { 4498 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4499 struct usb_host_config *config; 4500 char *state_name; 4501 int i; 4502 u16 timeout = USB3_LPM_DISABLED; 4503 4504 if (state == USB3_LPM_U1) 4505 state_name = "U1"; 4506 else if (state == USB3_LPM_U2) 4507 state_name = "U2"; 4508 else { 4509 dev_warn(&udev->dev, "Can't enable unknown link state %i\n", 4510 state); 4511 return timeout; 4512 } 4513 4514 if (xhci_check_tier_policy(xhci, udev, state) < 0) 4515 return timeout; 4516 4517 /* Gather some information about the currently installed configuration 4518 * and alternate interface settings. 4519 */ 4520 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, 4521 state, &timeout)) 4522 return timeout; 4523 4524 config = udev->actconfig; 4525 if (!config) 4526 return timeout; 4527 4528 for (i = 0; i < config->desc.bNumInterfaces; i++) { 4529 struct usb_driver *driver; 4530 struct usb_interface *intf = config->interface[i]; 4531 4532 if (!intf) 4533 continue; 4534 4535 /* Check if any currently bound drivers want hub-initiated LPM 4536 * disabled. 4537 */ 4538 if (intf->dev.driver) { 4539 driver = to_usb_driver(intf->dev.driver); 4540 if (driver && driver->disable_hub_initiated_lpm) { 4541 dev_dbg(&udev->dev, "Hub-initiated %s disabled " 4542 "at request of driver %s\n", 4543 state_name, driver->name); 4544 return xhci_get_timeout_no_hub_lpm(udev, state); 4545 } 4546 } 4547 4548 /* Not sure how this could happen... */ 4549 if (!intf->cur_altsetting) 4550 continue; 4551 4552 if (xhci_update_timeout_for_interface(xhci, udev, 4553 intf->cur_altsetting, 4554 state, &timeout)) 4555 return timeout; 4556 } 4557 return timeout; 4558 } 4559 4560 static int calculate_max_exit_latency(struct usb_device *udev, 4561 enum usb3_link_state state_changed, 4562 u16 hub_encoded_timeout) 4563 { 4564 unsigned long long u1_mel_us = 0; 4565 unsigned long long u2_mel_us = 0; 4566 unsigned long long mel_us = 0; 4567 bool disabling_u1; 4568 bool disabling_u2; 4569 bool enabling_u1; 4570 bool enabling_u2; 4571 4572 disabling_u1 = (state_changed == USB3_LPM_U1 && 4573 hub_encoded_timeout == USB3_LPM_DISABLED); 4574 disabling_u2 = (state_changed == USB3_LPM_U2 && 4575 hub_encoded_timeout == USB3_LPM_DISABLED); 4576 4577 enabling_u1 = (state_changed == USB3_LPM_U1 && 4578 hub_encoded_timeout != USB3_LPM_DISABLED); 4579 enabling_u2 = (state_changed == USB3_LPM_U2 && 4580 hub_encoded_timeout != USB3_LPM_DISABLED); 4581 4582 /* If U1 was already enabled and we're not disabling it, 4583 * or we're going to enable U1, account for the U1 max exit latency. 4584 */ 4585 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) || 4586 enabling_u1) 4587 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000); 4588 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) || 4589 enabling_u2) 4590 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000); 4591 4592 if (u1_mel_us > u2_mel_us) 4593 mel_us = u1_mel_us; 4594 else 4595 mel_us = u2_mel_us; 4596 /* xHCI host controller max exit latency field is only 16 bits wide. */ 4597 if (mel_us > MAX_EXIT) { 4598 dev_warn(&udev->dev, "Link PM max exit latency of %lluus " 4599 "is too big.\n", mel_us); 4600 return -E2BIG; 4601 } 4602 return mel_us; 4603 } 4604 4605 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ 4606 int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, 4607 struct usb_device *udev, enum usb3_link_state state) 4608 { 4609 struct xhci_hcd *xhci; 4610 u16 hub_encoded_timeout; 4611 int mel; 4612 int ret; 4613 4614 xhci = hcd_to_xhci(hcd); 4615 /* The LPM timeout values are pretty host-controller specific, so don't 4616 * enable hub-initiated timeouts unless the vendor has provided 4617 * information about their timeout algorithm. 4618 */ 4619 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || 4620 !xhci->devs[udev->slot_id]) 4621 return USB3_LPM_DISABLED; 4622 4623 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); 4624 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout); 4625 if (mel < 0) { 4626 /* Max Exit Latency is too big, disable LPM. */ 4627 hub_encoded_timeout = USB3_LPM_DISABLED; 4628 mel = 0; 4629 } 4630 4631 ret = xhci_change_max_exit_latency(xhci, udev, mel); 4632 if (ret) 4633 return ret; 4634 return hub_encoded_timeout; 4635 } 4636 4637 int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, 4638 struct usb_device *udev, enum usb3_link_state state) 4639 { 4640 struct xhci_hcd *xhci; 4641 u16 mel; 4642 int ret; 4643 4644 xhci = hcd_to_xhci(hcd); 4645 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || 4646 !xhci->devs[udev->slot_id]) 4647 return 0; 4648 4649 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED); 4650 ret = xhci_change_max_exit_latency(xhci, udev, mel); 4651 if (ret) 4652 return ret; 4653 return 0; 4654 } 4655 #else /* CONFIG_PM */ 4656 4657 int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, 4658 struct usb_device *udev, enum usb3_link_state state) 4659 { 4660 return USB3_LPM_DISABLED; 4661 } 4662 4663 int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, 4664 struct usb_device *udev, enum usb3_link_state state) 4665 { 4666 return 0; 4667 } 4668 #endif /* CONFIG_PM */ 4669 4670 /*-------------------------------------------------------------------------*/ 4671 4672 /* Once a hub descriptor is fetched for a device, we need to update the xHC's 4673 * internal data structures for the device. 4674 */ 4675 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, 4676 struct usb_tt *tt, gfp_t mem_flags) 4677 { 4678 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4679 struct xhci_virt_device *vdev; 4680 struct xhci_command *config_cmd; 4681 struct xhci_input_control_ctx *ctrl_ctx; 4682 struct xhci_slot_ctx *slot_ctx; 4683 unsigned long flags; 4684 unsigned think_time; 4685 int ret; 4686 4687 /* Ignore root hubs */ 4688 if (!hdev->parent) 4689 return 0; 4690 4691 vdev = xhci->devs[hdev->slot_id]; 4692 if (!vdev) { 4693 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); 4694 return -EINVAL; 4695 } 4696 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); 4697 if (!config_cmd) { 4698 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); 4699 return -ENOMEM; 4700 } 4701 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx); 4702 if (!ctrl_ctx) { 4703 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 4704 __func__); 4705 xhci_free_command(xhci, config_cmd); 4706 return -ENOMEM; 4707 } 4708 4709 spin_lock_irqsave(&xhci->lock, flags); 4710 if (hdev->speed == USB_SPEED_HIGH && 4711 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { 4712 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); 4713 xhci_free_command(xhci, config_cmd); 4714 spin_unlock_irqrestore(&xhci->lock, flags); 4715 return -ENOMEM; 4716 } 4717 4718 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); 4719 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 4720 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); 4721 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); 4722 if (tt->multi) 4723 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 4724 if (xhci->hci_version > 0x95) { 4725 xhci_dbg(xhci, "xHCI version %x needs hub " 4726 "TT think time and number of ports\n", 4727 (unsigned int) xhci->hci_version); 4728 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); 4729 /* Set TT think time - convert from ns to FS bit times. 4730 * 0 = 8 FS bit times, 1 = 16 FS bit times, 4731 * 2 = 24 FS bit times, 3 = 32 FS bit times. 4732 * 4733 * xHCI 1.0: this field shall be 0 if the device is not a 4734 * High-spped hub. 4735 */ 4736 think_time = tt->think_time; 4737 if (think_time != 0) 4738 think_time = (think_time / 666) - 1; 4739 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) 4740 slot_ctx->tt_info |= 4741 cpu_to_le32(TT_THINK_TIME(think_time)); 4742 } else { 4743 xhci_dbg(xhci, "xHCI version %x doesn't need hub " 4744 "TT think time or number of ports\n", 4745 (unsigned int) xhci->hci_version); 4746 } 4747 slot_ctx->dev_state = 0; 4748 spin_unlock_irqrestore(&xhci->lock, flags); 4749 4750 xhci_dbg(xhci, "Set up %s for hub device.\n", 4751 (xhci->hci_version > 0x95) ? 4752 "configure endpoint" : "evaluate context"); 4753 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id); 4754 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0); 4755 4756 /* Issue and wait for the configure endpoint or 4757 * evaluate context command. 4758 */ 4759 if (xhci->hci_version > 0x95) 4760 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 4761 false, false); 4762 else 4763 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 4764 true, false); 4765 4766 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id); 4767 xhci_dbg_ctx(xhci, vdev->out_ctx, 0); 4768 4769 xhci_free_command(xhci, config_cmd); 4770 return ret; 4771 } 4772 4773 int xhci_get_frame(struct usb_hcd *hcd) 4774 { 4775 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4776 /* EHCI mods by the periodic size. Why? */ 4777 return readl(&xhci->run_regs->microframe_index) >> 3; 4778 } 4779 4780 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) 4781 { 4782 struct xhci_hcd *xhci; 4783 struct device *dev = hcd->self.controller; 4784 int retval; 4785 4786 /* Accept arbitrarily long scatter-gather lists */ 4787 hcd->self.sg_tablesize = ~0; 4788 4789 /* support to build packet from discontinuous buffers */ 4790 hcd->self.no_sg_constraint = 1; 4791 4792 /* XHCI controllers don't stop the ep queue on short packets :| */ 4793 hcd->self.no_stop_on_short = 1; 4794 4795 if (usb_hcd_is_primary_hcd(hcd)) { 4796 xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL); 4797 if (!xhci) 4798 return -ENOMEM; 4799 *((struct xhci_hcd **) hcd->hcd_priv) = xhci; 4800 xhci->main_hcd = hcd; 4801 /* Mark the first roothub as being USB 2.0. 4802 * The xHCI driver will register the USB 3.0 roothub. 4803 */ 4804 hcd->speed = HCD_USB2; 4805 hcd->self.root_hub->speed = USB_SPEED_HIGH; 4806 /* 4807 * USB 2.0 roothub under xHCI has an integrated TT, 4808 * (rate matching hub) as opposed to having an OHCI/UHCI 4809 * companion controller. 4810 */ 4811 hcd->has_tt = 1; 4812 } else { 4813 /* xHCI private pointer was set in xhci_pci_probe for the second 4814 * registered roothub. 4815 */ 4816 return 0; 4817 } 4818 4819 xhci->cap_regs = hcd->regs; 4820 xhci->op_regs = hcd->regs + 4821 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); 4822 xhci->run_regs = hcd->regs + 4823 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK); 4824 /* Cache read-only capability registers */ 4825 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1); 4826 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2); 4827 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3); 4828 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase); 4829 xhci->hci_version = HC_VERSION(xhci->hcc_params); 4830 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); 4831 xhci_print_registers(xhci); 4832 4833 xhci->quirks = quirks; 4834 4835 get_quirks(dev, xhci); 4836 4837 /* In xhci controllers which follow xhci 1.0 spec gives a spurious 4838 * success event after a short transfer. This quirk will ignore such 4839 * spurious event. 4840 */ 4841 if (xhci->hci_version > 0x96) 4842 xhci->quirks |= XHCI_SPURIOUS_SUCCESS; 4843 4844 /* Make sure the HC is halted. */ 4845 retval = xhci_halt(xhci); 4846 if (retval) 4847 goto error; 4848 4849 xhci_dbg(xhci, "Resetting HCD\n"); 4850 /* Reset the internal HC memory state and registers. */ 4851 retval = xhci_reset(xhci); 4852 if (retval) 4853 goto error; 4854 xhci_dbg(xhci, "Reset complete\n"); 4855 4856 /* Set dma_mask and coherent_dma_mask to 64-bits, 4857 * if xHC supports 64-bit addressing */ 4858 if (HCC_64BIT_ADDR(xhci->hcc_params) && 4859 !dma_set_mask(dev, DMA_BIT_MASK(64))) { 4860 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); 4861 dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); 4862 } 4863 4864 xhci_dbg(xhci, "Calling HCD init\n"); 4865 /* Initialize HCD and host controller data structures. */ 4866 retval = xhci_init(hcd); 4867 if (retval) 4868 goto error; 4869 xhci_dbg(xhci, "Called HCD init\n"); 4870 return 0; 4871 error: 4872 kfree(xhci); 4873 return retval; 4874 } 4875 4876 MODULE_DESCRIPTION(DRIVER_DESC); 4877 MODULE_AUTHOR(DRIVER_AUTHOR); 4878 MODULE_LICENSE("GPL"); 4879 4880 static int __init xhci_hcd_init(void) 4881 { 4882 int retval; 4883 4884 retval = xhci_register_pci(); 4885 if (retval < 0) { 4886 pr_debug("Problem registering PCI driver.\n"); 4887 return retval; 4888 } 4889 retval = xhci_register_plat(); 4890 if (retval < 0) { 4891 pr_debug("Problem registering platform driver.\n"); 4892 goto unreg_pci; 4893 } 4894 /* 4895 * Check the compiler generated sizes of structures that must be laid 4896 * out in specific ways for hardware access. 4897 */ 4898 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); 4899 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); 4900 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); 4901 /* xhci_device_control has eight fields, and also 4902 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx 4903 */ 4904 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); 4905 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); 4906 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); 4907 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8); 4908 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); 4909 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ 4910 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); 4911 return 0; 4912 unreg_pci: 4913 xhci_unregister_pci(); 4914 return retval; 4915 } 4916 module_init(xhci_hcd_init); 4917 4918 static void __exit xhci_hcd_cleanup(void) 4919 { 4920 xhci_unregister_pci(); 4921 xhci_unregister_plat(); 4922 } 4923 module_exit(xhci_hcd_cleanup); 4924