1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * xHCI host controller driver 4 * 5 * Copyright (C) 2008 Intel Corp. 6 * 7 * Author: Sarah Sharp 8 * Some code borrowed from the Linux EHCI driver. 9 */ 10 11 #include <linux/jiffies.h> 12 #include <linux/pci.h> 13 #include <linux/iommu.h> 14 #include <linux/iopoll.h> 15 #include <linux/irq.h> 16 #include <linux/log2.h> 17 #include <linux/module.h> 18 #include <linux/moduleparam.h> 19 #include <linux/slab.h> 20 #include <linux/string_choices.h> 21 #include <linux/dmi.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/usb/xhci-sideband.h> 24 25 #include "xhci.h" 26 #include "xhci-trace.h" 27 #include "xhci-debugfs.h" 28 #include "xhci-dbgcap.h" 29 30 #define DRIVER_AUTHOR "Sarah Sharp" 31 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" 32 33 #define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) 34 35 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ 36 static int link_quirk; 37 module_param(link_quirk, int, S_IRUGO | S_IWUSR); 38 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); 39 40 static unsigned long long quirks; 41 module_param(quirks, ullong, S_IRUGO); 42 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default"); 43 44 void xhci_portsc_writel(struct xhci_port *port, u32 val) 45 { 46 trace_xhci_portsc_writel(port, val); 47 writel(val, &port->port_reg->portsc); 48 } 49 EXPORT_SYMBOL_GPL(xhci_portsc_writel); 50 51 u32 xhci_portsc_readl(struct xhci_port *port) 52 { 53 return readl(&port->port_reg->portsc); 54 } 55 EXPORT_SYMBOL_GPL(xhci_portsc_readl); 56 57 static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring) 58 { 59 struct xhci_segment *seg; 60 61 if (!td || !td->start_seg) 62 return false; 63 64 xhci_for_each_ring_seg(ring->first_seg, seg) { 65 if (seg == td->start_seg) 66 return true; 67 } 68 69 return false; 70 } 71 72 /* 73 * xhci_handshake - spin reading hc until handshake completes or fails 74 * @ptr: address of hc register to be read 75 * @mask: bits to look at in result of read 76 * @done: value of those bits when handshake succeeds 77 * @usec: timeout in microseconds 78 * 79 * Returns negative errno, or zero on success 80 * 81 * Success happens when the "mask" bits have the specified value (hardware 82 * handshake done). There are two failure modes: "usec" have passed (major 83 * hardware flakeout), or the register reads as all-ones (hardware removed). 84 */ 85 int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) 86 { 87 u32 result; 88 int ret; 89 90 ret = readl_poll_timeout_atomic(ptr, result, 91 (result & mask) == done || 92 result == U32_MAX, 93 1, timeout_us); 94 if (result == U32_MAX) /* card removed */ 95 return -ENODEV; 96 97 return ret; 98 } 99 100 /* 101 * Disable interrupts and begin the xHCI halting process. 102 */ 103 void xhci_quiesce(struct xhci_hcd *xhci) 104 { 105 u32 halted; 106 u32 cmd; 107 u32 mask; 108 109 mask = ~(XHCI_IRQS); 110 halted = readl(&xhci->op_regs->status) & STS_HALT; 111 if (!halted) 112 mask &= ~CMD_RUN; 113 114 cmd = readl(&xhci->op_regs->command); 115 cmd &= mask; 116 writel(cmd, &xhci->op_regs->command); 117 } 118 119 /* 120 * Force HC into halt state. 121 * 122 * Disable any IRQs and clear the run/stop bit. 123 * HC will complete any current and actively pipelined transactions, and 124 * should halt within 16 ms of the run/stop bit being cleared. 125 * Read HC Halted bit in the status register to see when the HC is finished. 126 */ 127 int xhci_halt(struct xhci_hcd *xhci) 128 { 129 int ret; 130 131 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC"); 132 xhci_quiesce(xhci); 133 134 ret = xhci_handshake(&xhci->op_regs->status, 135 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); 136 if (ret) { 137 if (!(xhci->xhc_state & XHCI_STATE_DYING)) 138 xhci_warn(xhci, "Host halt failed, %d\n", ret); 139 return ret; 140 } 141 142 xhci->xhc_state |= XHCI_STATE_HALTED; 143 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 144 145 return ret; 146 } 147 148 /* 149 * Set the run bit and wait for the host to be running. 150 */ 151 int xhci_start(struct xhci_hcd *xhci) 152 { 153 u32 temp; 154 int ret; 155 156 temp = readl(&xhci->op_regs->command); 157 temp |= (CMD_RUN); 158 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.", 159 temp); 160 writel(temp, &xhci->op_regs->command); 161 162 /* 163 * Wait for the HCHalted Status bit to be 0 to indicate the host is 164 * running. 165 */ 166 ret = xhci_handshake(&xhci->op_regs->status, 167 STS_HALT, 0, XHCI_MAX_HALT_USEC); 168 if (ret == -ETIMEDOUT) 169 xhci_err(xhci, "Host took too long to start, " 170 "waited %u microseconds.\n", 171 XHCI_MAX_HALT_USEC); 172 if (!ret) { 173 /* clear state flags. Including dying, halted or removing */ 174 xhci->xhc_state = 0; 175 xhci->run_graceperiod = jiffies + msecs_to_jiffies(500); 176 } 177 178 return ret; 179 } 180 181 /* 182 * Reset a halted HC. 183 * 184 * This resets pipelines, timers, counters, state machines, etc. 185 * Transactions will be terminated immediately, and operational registers 186 * will be set to their defaults. 187 */ 188 int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) 189 { 190 u32 command; 191 u32 state; 192 int ret; 193 194 state = readl(&xhci->op_regs->status); 195 196 if (state == ~(u32)0) { 197 if (!(xhci->xhc_state & XHCI_STATE_DYING)) 198 xhci_warn(xhci, "Host not accessible, reset failed.\n"); 199 return -ENODEV; 200 } 201 202 if ((state & STS_HALT) == 0) { 203 xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); 204 return 0; 205 } 206 207 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC"); 208 command = readl(&xhci->op_regs->command); 209 command |= CMD_RESET; 210 writel(command, &xhci->op_regs->command); 211 212 /* Existing Intel xHCI controllers require a delay of 1 mS, 213 * after setting the CMD_RESET bit, and before accessing any 214 * HC registers. This allows the HC to complete the 215 * reset operation and be ready for HC register access. 216 * Without this delay, the subsequent HC register access, 217 * may result in a system hang very rarely. 218 */ 219 if (xhci->quirks & XHCI_INTEL_HOST) 220 udelay(1000); 221 222 ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us); 223 if (ret) 224 return ret; 225 226 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) 227 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller)); 228 229 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 230 "Wait for controller to be ready for doorbell rings"); 231 /* 232 * xHCI cannot write to any doorbells or operational registers other 233 * than status until the "Controller Not Ready" flag is cleared. 234 */ 235 ret = xhci_handshake(&xhci->op_regs->status, STS_CNR, 0, timeout_us); 236 237 xhci->usb2_rhub.bus_state.port_c_suspend = 0; 238 xhci->usb2_rhub.bus_state.suspended_ports = 0; 239 xhci->usb2_rhub.bus_state.resuming_ports = 0; 240 xhci->usb3_rhub.bus_state.port_c_suspend = 0; 241 xhci->usb3_rhub.bus_state.suspended_ports = 0; 242 xhci->usb3_rhub.bus_state.resuming_ports = 0; 243 244 return ret; 245 } 246 247 static void xhci_zero_64b_regs(struct xhci_hcd *xhci) 248 { 249 struct device *dev = xhci_to_hcd(xhci)->self.sysdev; 250 struct iommu_domain *domain; 251 int err, i; 252 u64 val; 253 254 /* 255 * Some Renesas controllers get into a weird state if they are 256 * reset while programmed with 64bit addresses (they will preserve 257 * the top half of the address in internal, non visible 258 * registers). You end up with half the address coming from the 259 * kernel, and the other half coming from the firmware. Also, 260 * changing the programming leads to extra accesses even if the 261 * controller is supposed to be halted. The controller ends up with 262 * a fatal fault, and is then ripe for being properly reset. 263 * 264 * Special care is taken to only apply this if the device is behind 265 * an iommu. Doing anything when there is no iommu is definitely 266 * unsafe... 267 */ 268 domain = iommu_get_domain_for_dev(dev); 269 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !domain || 270 domain->type == IOMMU_DOMAIN_IDENTITY) 271 return; 272 273 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n"); 274 275 /* Clear HSEIE so that faults do not get signaled */ 276 val = readl(&xhci->op_regs->command); 277 val &= ~CMD_HSEIE; 278 writel(val, &xhci->op_regs->command); 279 280 /* Clear HSE (aka FATAL) */ 281 val = readl(&xhci->op_regs->status); 282 val |= STS_FATAL; 283 writel(val, &xhci->op_regs->status); 284 285 /* Now zero the registers, and brace for impact */ 286 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 287 if (upper_32_bits(val)) 288 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); 289 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 290 if (upper_32_bits(val)) 291 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); 292 293 for (i = 0; i < xhci->max_interrupters; i++) { 294 struct xhci_intr_reg __iomem *ir; 295 296 ir = &xhci->run_regs->ir_set[i]; 297 val = xhci_read_64(xhci, &ir->erst_base); 298 if (upper_32_bits(val)) 299 xhci_write_64(xhci, 0, &ir->erst_base); 300 val= xhci_read_64(xhci, &ir->erst_dequeue); 301 if (upper_32_bits(val)) 302 xhci_write_64(xhci, 0, &ir->erst_dequeue); 303 } 304 305 /* Wait for the fault to appear. It will be cleared on reset */ 306 err = xhci_handshake(&xhci->op_regs->status, 307 STS_FATAL, STS_FATAL, 308 XHCI_MAX_HALT_USEC); 309 if (!err) 310 xhci_info(xhci, "Fault detected\n"); 311 } 312 313 int xhci_enable_interrupter(struct xhci_interrupter *ir) 314 { 315 u32 iman; 316 317 if (!ir || !ir->ir_set) 318 return -EINVAL; 319 320 iman = readl(&ir->ir_set->iman); 321 iman &= ~IMAN_IP; 322 iman |= IMAN_IE; 323 writel(iman, &ir->ir_set->iman); 324 325 /* Read operation to guarantee the write has been flushed from posted buffers */ 326 readl(&ir->ir_set->iman); 327 return 0; 328 } 329 330 int xhci_disable_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir) 331 { 332 u32 iman; 333 334 if (!ir || !ir->ir_set) 335 return -EINVAL; 336 337 iman = readl(&ir->ir_set->iman); 338 iman &= ~IMAN_IP; 339 iman &= ~IMAN_IE; 340 writel(iman, &ir->ir_set->iman); 341 342 iman = readl(&ir->ir_set->iman); 343 if (iman & IMAN_IP) 344 xhci_dbg(xhci, "%s: Interrupt pending\n", __func__); 345 346 return 0; 347 } 348 349 /* interrupt moderation interval imod_interval in nanoseconds */ 350 int xhci_set_interrupter_moderation(struct xhci_interrupter *ir, 351 u32 imod_interval) 352 { 353 u32 imod; 354 355 if (!ir || !ir->ir_set) 356 return -EINVAL; 357 358 /* IMODI value in IMOD register is in 250ns increments */ 359 imod_interval = umin(imod_interval / 250, IMODI_MASK); 360 361 imod = readl(&ir->ir_set->imod); 362 imod &= ~IMODI_MASK; 363 imod |= imod_interval; 364 writel(imod, &ir->ir_set->imod); 365 366 return 0; 367 } 368 369 static void compliance_mode_recovery(struct timer_list *t) 370 { 371 struct xhci_hcd *xhci; 372 struct usb_hcd *hcd; 373 struct xhci_hub *rhub; 374 u32 temp; 375 int i; 376 377 xhci = timer_container_of(xhci, t, comp_mode_recovery_timer); 378 rhub = &xhci->usb3_rhub; 379 hcd = rhub->hcd; 380 381 if (!hcd) 382 return; 383 384 for (i = 0; i < rhub->num_ports; i++) { 385 temp = xhci_portsc_readl(rhub->ports[i]); 386 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { 387 /* 388 * Compliance Mode Detected. Letting USB Core 389 * handle the Warm Reset 390 */ 391 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 392 "Compliance mode detected->port %d", 393 i + 1); 394 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 395 "Attempting compliance mode recovery"); 396 397 if (hcd->state == HC_STATE_SUSPENDED) 398 usb_hcd_resume_root_hub(hcd); 399 400 usb_hcd_poll_rh_status(hcd); 401 } 402 } 403 404 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1)) 405 mod_timer(&xhci->comp_mode_recovery_timer, 406 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); 407 } 408 409 /* 410 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver 411 * that causes ports behind that hardware to enter compliance mode sometimes. 412 * The quirk creates a timer that polls every 2 seconds the link state of 413 * each host controller's port and recovers it by issuing a Warm reset 414 * if Compliance mode is detected, otherwise the port will become "dead" (no 415 * device connections or disconnections will be detected anymore). Becasue no 416 * status event is generated when entering compliance mode (per xhci spec), 417 * this quirk is needed on systems that have the failing hardware installed. 418 */ 419 static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) 420 { 421 xhci->port_status_u0 = 0; 422 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery, 423 0); 424 xhci->comp_mode_recovery_timer.expires = jiffies + 425 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); 426 427 add_timer(&xhci->comp_mode_recovery_timer); 428 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 429 "Compliance mode recovery timer initialized"); 430 } 431 432 /* 433 * This function identifies the systems that have installed the SN65LVPE502CP 434 * USB3.0 re-driver and that need the Compliance Mode Quirk. 435 * Systems: 436 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 437 */ 438 static bool xhci_compliance_mode_recovery_timer_quirk_check(void) 439 { 440 const char *dmi_product_name, *dmi_sys_vendor; 441 442 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME); 443 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); 444 if (!dmi_product_name || !dmi_sys_vendor) 445 return false; 446 447 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard"))) 448 return false; 449 450 if (strstr(dmi_product_name, "Z420") || 451 strstr(dmi_product_name, "Z620") || 452 strstr(dmi_product_name, "Z820") || 453 strstr(dmi_product_name, "Z1 Workstation")) 454 return true; 455 456 return false; 457 } 458 459 static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) 460 { 461 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1)); 462 } 463 464 static void xhci_hcd_page_size(struct xhci_hcd *xhci) 465 { 466 u32 page_size; 467 468 page_size = readl(&xhci->op_regs->page_size) & XHCI_PAGE_SIZE_MASK; 469 if (!is_power_of_2(page_size)) { 470 xhci_warn(xhci, "Invalid page size register = 0x%x\n", page_size); 471 /* Fallback to 4K page size, since that's common */ 472 page_size = 1; 473 } 474 475 xhci->page_size = page_size << 12; 476 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "HCD page size set to %iK", 477 xhci->page_size >> 10); 478 } 479 480 static void xhci_enable_max_dev_slots(struct xhci_hcd *xhci) 481 { 482 u32 config_reg; 483 484 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xHC can handle at most %d device slots", 485 xhci->max_slots); 486 487 config_reg = readl(&xhci->op_regs->config_reg); 488 config_reg &= ~HCS_SLOTS_MASK; 489 config_reg |= xhci->max_slots; 490 491 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting Max device slots reg = 0x%x", 492 config_reg); 493 writel(config_reg, &xhci->op_regs->config_reg); 494 } 495 496 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) 497 { 498 dma_addr_t deq_dma; 499 u64 crcr; 500 501 deq_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, xhci->cmd_ring->dequeue); 502 deq_dma &= CMD_RING_PTR_MASK; 503 504 crcr = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 505 crcr &= ~CMD_RING_PTR_MASK; 506 crcr |= deq_dma; 507 508 crcr &= ~CMD_RING_CYCLE; 509 crcr |= xhci->cmd_ring->cycle_state; 510 511 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Setting command ring address to 0x%llx", crcr); 512 xhci_write_64(xhci, crcr, &xhci->op_regs->cmd_ring); 513 } 514 515 static void xhci_set_doorbell_ptr(struct xhci_hcd *xhci) 516 { 517 u32 offset; 518 519 offset = readl(&xhci->cap_regs->db_off) & DBOFF_MASK; 520 xhci->dba = (void __iomem *)xhci->cap_regs + offset; 521 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 522 "Doorbell array is located at offset 0x%x from cap regs base addr", offset); 523 } 524 525 /* 526 * Enable USB 3.0 device notifications for function remote wake, which is necessary 527 * for allowing USB 3.0 devices to do remote wakeup from U3 (device suspend). 528 */ 529 static void xhci_set_dev_notifications(struct xhci_hcd *xhci) 530 { 531 u32 dev_notf; 532 533 dev_notf = readl(&xhci->op_regs->dev_notification); 534 dev_notf &= ~DEV_NOTE_MASK; 535 dev_notf |= DEV_NOTE_FWAKE; 536 writel(dev_notf, &xhci->op_regs->dev_notification); 537 } 538 539 /* Setup basic xHCI registers */ 540 static void xhci_init(struct usb_hcd *hcd) 541 { 542 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 543 544 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Starting %s", __func__); 545 546 /* Set the Number of Device Slots Enabled to the maximum supported value */ 547 xhci_enable_max_dev_slots(xhci); 548 549 /* Initialize the Command ring */ 550 xhci_ring_init(xhci, xhci->cmd_ring); 551 /* 552 * Reserve one command ring TRB for disabling LPM. 553 * Since the USB core grabs the shared usb_bus bandwidth mutex before 554 * disabling LPM, we only need to reserve one TRB for all devices. 555 */ 556 xhci->cmd_ring_reserved_trbs = 1; 557 /* Set the address in the Command Ring Control register */ 558 xhci_set_cmd_ring_deq(xhci); 559 560 /* Set Device Context Base Address Array pointer */ 561 xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr); 562 563 /* Set Doorbell array pointer */ 564 xhci_set_doorbell_ptr(xhci); 565 566 /* Set USB 3.0 device notifications for function remote wake */ 567 xhci_set_dev_notifications(xhci); 568 569 /* Initialize the Primary interrupter */ 570 xhci_ring_init(xhci, xhci->interrupters[0]->event_ring); 571 xhci_add_interrupter(xhci, 0); 572 xhci->interrupters[0]->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX; 573 574 /* Initializing Compliance Mode Recovery Data If Needed */ 575 if (xhci_compliance_mode_recovery_timer_quirk_check()) { 576 xhci->quirks |= XHCI_COMP_MODE_QUIRK; 577 compliance_mode_recovery_timer_init(xhci); 578 } 579 580 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished %s", __func__); 581 } 582 583 /*-------------------------------------------------------------------------*/ 584 585 static int xhci_run_finished(struct xhci_hcd *xhci) 586 { 587 struct xhci_interrupter *ir = xhci->interrupters[0]; 588 unsigned long flags; 589 u32 temp; 590 591 /* 592 * Enable interrupts before starting the host (xhci 4.2 and 5.5.2). 593 * Protect the short window before host is running with a lock 594 */ 595 spin_lock_irqsave(&xhci->lock, flags); 596 597 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts"); 598 temp = readl(&xhci->op_regs->command); 599 temp |= (CMD_EIE); 600 writel(temp, &xhci->op_regs->command); 601 602 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter"); 603 xhci_enable_interrupter(ir); 604 605 if (xhci_start(xhci)) { 606 xhci_halt(xhci); 607 spin_unlock_irqrestore(&xhci->lock, flags); 608 return -ENODEV; 609 } 610 611 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; 612 613 if (xhci->quirks & XHCI_NEC_HOST) 614 xhci_ring_cmd_db(xhci); 615 616 spin_unlock_irqrestore(&xhci->lock, flags); 617 618 return 0; 619 } 620 621 /* 622 * Start the HC after it was halted. 623 * 624 * This function is called by the USB core when the HC driver is added. 625 * Its opposite is xhci_stop(). 626 * 627 * xhci_init() must be called once before this function can be called. 628 * Reset the HC, enable device slot contexts, program DCBAAP, and 629 * set command ring pointer and event ring pointer. 630 * 631 * Setup MSI-X vectors and enable interrupts. 632 */ 633 int xhci_run(struct usb_hcd *hcd) 634 { 635 u64 temp_64; 636 int ret; 637 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 638 struct xhci_interrupter *ir = xhci->interrupters[0]; 639 /* Start the xHCI host controller running only after the USB 2.0 roothub 640 * is setup. 641 */ 642 643 hcd->uses_new_polling = 1; 644 if (hcd->msi_enabled) 645 ir->ip_autoclear = true; 646 647 if (!usb_hcd_is_primary_hcd(hcd)) 648 return xhci_run_finished(xhci); 649 650 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run"); 651 652 temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); 653 temp_64 &= ERST_PTR_MASK; 654 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 655 "ERST deq = 64'h%0lx", (long unsigned int) temp_64); 656 657 xhci_set_interrupter_moderation(ir, xhci->imod_interval); 658 659 if (xhci->quirks & XHCI_NEC_HOST) { 660 struct xhci_command *command; 661 662 command = xhci_alloc_command(xhci, false, GFP_KERNEL); 663 if (!command) 664 return -ENOMEM; 665 666 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0, 667 TRB_TYPE(TRB_NEC_GET_FW)); 668 if (ret) 669 xhci_free_command(xhci, command); 670 } 671 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 672 "Finished %s for main hcd", __func__); 673 674 xhci_create_dbc_dev(xhci); 675 676 xhci_debugfs_init(xhci); 677 678 if (xhci_has_one_roothub(xhci)) 679 return xhci_run_finished(xhci); 680 681 set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags); 682 683 return 0; 684 } 685 EXPORT_SYMBOL_GPL(xhci_run); 686 687 /* 688 * Stop xHCI driver. 689 * 690 * This function is called by the USB core when the HC driver is removed. 691 * Its opposite is xhci_run(). 692 * 693 * Disable device contexts, disable IRQs, and quiesce the HC. 694 * Reset the HC, finish any completed transactions, and cleanup memory. 695 */ 696 void xhci_stop(struct usb_hcd *hcd) 697 { 698 u32 temp; 699 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 700 struct xhci_interrupter *ir = xhci->interrupters[0]; 701 702 mutex_lock(&xhci->mutex); 703 704 /* Only halt host and free memory after both hcds are removed */ 705 if (!usb_hcd_is_primary_hcd(hcd)) { 706 mutex_unlock(&xhci->mutex); 707 return; 708 } 709 710 xhci_remove_dbc_dev(xhci); 711 712 spin_lock_irq(&xhci->lock); 713 xhci->xhc_state |= XHCI_STATE_HALTED; 714 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; 715 xhci_halt(xhci); 716 xhci_reset(xhci, XHCI_RESET_SHORT_USEC); 717 spin_unlock_irq(&xhci->lock); 718 719 /* Deleting Compliance Mode Recovery Timer */ 720 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 721 (!(xhci_all_ports_seen_u0(xhci)))) { 722 timer_delete_sync(&xhci->comp_mode_recovery_timer); 723 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 724 "%s: compliance mode recovery timer deleted", 725 __func__); 726 } 727 728 if (xhci->quirks & XHCI_AMD_PLL_FIX) 729 usb_amd_dev_put(); 730 731 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 732 "// Disabling event ring interrupts"); 733 temp = readl(&xhci->op_regs->status); 734 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status); 735 xhci_disable_interrupter(xhci, ir); 736 737 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory"); 738 xhci_mem_cleanup(xhci); 739 xhci_debugfs_exit(xhci); 740 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 741 "xhci_stop completed - status = %x", 742 readl(&xhci->op_regs->status)); 743 mutex_unlock(&xhci->mutex); 744 } 745 EXPORT_SYMBOL_GPL(xhci_stop); 746 747 /* 748 * Shutdown HC (not bus-specific) 749 * 750 * This is called when the machine is rebooting or halting. We assume that the 751 * machine will be powered off, and the HC's internal state will be reset. 752 * Don't bother to free memory. 753 * 754 * This will only ever be called with the main usb_hcd (the USB3 roothub). 755 */ 756 void xhci_shutdown(struct usb_hcd *hcd) 757 { 758 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 759 760 if (xhci->quirks & XHCI_SPURIOUS_REBOOT) 761 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); 762 763 /* Don't poll the roothubs after shutdown. */ 764 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", 765 __func__, hcd->self.busnum); 766 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 767 timer_delete_sync(&hcd->rh_timer); 768 769 if (xhci->shared_hcd) { 770 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 771 timer_delete_sync(&xhci->shared_hcd->rh_timer); 772 } 773 774 spin_lock_irq(&xhci->lock); 775 xhci_halt(xhci); 776 777 /* 778 * Workaround for spurious wakeps at shutdown with HSW, and for boot 779 * firmware delay in ADL-P PCH if port are left in U3 at shutdown 780 */ 781 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP || 782 xhci->quirks & XHCI_RESET_TO_DEFAULT) 783 xhci_reset(xhci, XHCI_RESET_SHORT_USEC); 784 785 spin_unlock_irq(&xhci->lock); 786 787 xhci_dbg_trace(xhci, trace_xhci_dbg_init, 788 "xhci_shutdown completed - status = %x", 789 readl(&xhci->op_regs->status)); 790 } 791 EXPORT_SYMBOL_GPL(xhci_shutdown); 792 793 #ifdef CONFIG_PM 794 static void xhci_save_registers(struct xhci_hcd *xhci) 795 { 796 struct xhci_interrupter *ir; 797 unsigned int i; 798 799 xhci->s3.command = readl(&xhci->op_regs->command); 800 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification); 801 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 802 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg); 803 804 /* save both primary and all secondary interrupters */ 805 /* fixme, shold we lock to prevent race with remove secondary interrupter? */ 806 for (i = 0; i < xhci->max_interrupters; i++) { 807 ir = xhci->interrupters[i]; 808 if (!ir) 809 continue; 810 811 ir->s3_erst_size = readl(&ir->ir_set->erst_size); 812 ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base); 813 ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue); 814 ir->s3_iman = readl(&ir->ir_set->iman); 815 ir->s3_imod = readl(&ir->ir_set->imod); 816 } 817 } 818 819 static void xhci_restore_registers(struct xhci_hcd *xhci) 820 { 821 struct xhci_interrupter *ir; 822 unsigned int i; 823 824 writel(xhci->s3.command, &xhci->op_regs->command); 825 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification); 826 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); 827 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg); 828 829 /* FIXME should we lock to protect against freeing of interrupters */ 830 for (i = 0; i < xhci->max_interrupters; i++) { 831 ir = xhci->interrupters[i]; 832 if (!ir) 833 continue; 834 835 writel(ir->s3_erst_size, &ir->ir_set->erst_size); 836 xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base); 837 xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue); 838 writel(ir->s3_iman, &ir->ir_set->iman); 839 writel(ir->s3_imod, &ir->ir_set->imod); 840 } 841 } 842 843 /* 844 * The whole command ring must be cleared to zero when we suspend the host. 845 * 846 * The host doesn't save the command ring pointer in the suspend well, so we 847 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte 848 * aligned, because of the reserved bits in the command ring dequeue pointer 849 * register. Therefore, we can't just set the dequeue pointer back in the 850 * middle of the ring (TRBs are 16-byte aligned). 851 */ 852 static void xhci_clear_command_ring(struct xhci_hcd *xhci) 853 { 854 struct xhci_ring *ring; 855 struct xhci_segment *seg; 856 857 ring = xhci->cmd_ring; 858 xhci_for_each_ring_seg(ring->first_seg, seg) { 859 /* erase all TRBs before the link */ 860 memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); 861 /* clear link cycle bit */ 862 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE); 863 } 864 865 xhci_initialize_ring_info(ring); 866 /* 867 * Reset the hardware dequeue pointer. 868 * Yes, this will need to be re-written after resume, but we're paranoid 869 * and want to make sure the hardware doesn't access bogus memory 870 * because, say, the BIOS or an SMI started the host without changing 871 * the command ring pointers. 872 */ 873 xhci_set_cmd_ring_deq(xhci); 874 } 875 876 /* 877 * Disable port wake bits if do_wakeup is not set. 878 * 879 * Also clear a possible internal port wake state left hanging for ports that 880 * detected termination but never successfully enumerated (trained to 0U). 881 * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done 882 * at enumeration clears this wake, force one here as well for unconnected ports 883 */ 884 885 static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci, 886 struct xhci_hub *rhub, 887 bool do_wakeup) 888 { 889 unsigned long flags; 890 u32 t1, t2, portsc; 891 int i; 892 893 spin_lock_irqsave(&xhci->lock, flags); 894 895 for (i = 0; i < rhub->num_ports; i++) { 896 portsc = xhci_portsc_readl(rhub->ports[i]); 897 t1 = xhci_port_state_to_neutral(portsc); 898 t2 = t1; 899 900 /* clear wake bits if do_wake is not set */ 901 if (!do_wakeup) 902 t2 &= ~PORT_WAKE_BITS; 903 904 /* Don't touch csc bit if connected or connect change is set */ 905 if (!(portsc & (PORT_CSC | PORT_CONNECT))) 906 t2 |= PORT_CSC; 907 908 if (t1 != t2) { 909 xhci_portsc_writel(rhub->ports[i], t2); 910 xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n", 911 rhub->hcd->self.busnum, i + 1, portsc, t2); 912 } 913 } 914 spin_unlock_irqrestore(&xhci->lock, flags); 915 } 916 917 static bool xhci_pending_portevent(struct xhci_hcd *xhci) 918 { 919 struct xhci_port **ports; 920 int port_index; 921 u32 status; 922 u32 portsc; 923 924 status = readl(&xhci->op_regs->status); 925 if (status & STS_EINT) 926 return true; 927 /* 928 * Checking STS_EINT is not enough as there is a lag between a change 929 * bit being set and the Port Status Change Event that it generated 930 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2. 931 */ 932 933 port_index = xhci->usb2_rhub.num_ports; 934 ports = xhci->usb2_rhub.ports; 935 while (port_index--) { 936 portsc = xhci_portsc_readl(ports[port_index]); 937 if (portsc & PORT_CHANGE_MASK || 938 (portsc & PORT_PLS_MASK) == XDEV_RESUME) 939 return true; 940 } 941 port_index = xhci->usb3_rhub.num_ports; 942 ports = xhci->usb3_rhub.ports; 943 while (port_index--) { 944 portsc = xhci_portsc_readl(ports[port_index]); 945 if (portsc & (PORT_CHANGE_MASK | PORT_CAS) || 946 (portsc & PORT_PLS_MASK) == XDEV_RESUME) 947 return true; 948 } 949 return false; 950 } 951 952 /* 953 * Stop HC (not bus-specific) 954 * 955 * This is called when the machine transition into S3/S4 mode. 956 * 957 */ 958 int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) 959 { 960 int err; 961 unsigned int delay = XHCI_MAX_HALT_USEC * 2; 962 struct usb_hcd *hcd = xhci_to_hcd(xhci); 963 u32 command; 964 u32 usbsts; 965 966 if (!hcd->state) 967 return 0; 968 969 if (hcd->state != HC_STATE_SUSPENDED || 970 (xhci->shared_hcd && xhci->shared_hcd->state != HC_STATE_SUSPENDED)) 971 return -EINVAL; 972 973 /* Clear root port wake on bits if wakeup not allowed. */ 974 xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup); 975 xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup); 976 977 if (!HCD_HW_ACCESSIBLE(hcd)) 978 return 0; 979 980 xhci_dbc_suspend(xhci); 981 982 /* Don't poll the roothubs on bus suspend. */ 983 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n", 984 __func__, hcd->self.busnum); 985 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 986 timer_delete_sync(&hcd->rh_timer); 987 if (xhci->shared_hcd) { 988 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 989 timer_delete_sync(&xhci->shared_hcd->rh_timer); 990 } 991 992 if (xhci->quirks & XHCI_SUSPEND_DELAY) 993 usleep_range(1000, 1500); 994 995 spin_lock_irq(&xhci->lock); 996 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 997 if (xhci->shared_hcd) 998 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 999 /* step 1: stop endpoint */ 1000 /* skipped assuming that port suspend has done */ 1001 1002 /* step 2: clear Run/Stop bit */ 1003 command = readl(&xhci->op_regs->command); 1004 command &= ~CMD_RUN; 1005 writel(command, &xhci->op_regs->command); 1006 1007 /* Some chips from Fresco Logic need an extraordinary delay */ 1008 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1; 1009 1010 err = xhci_handshake(&xhci->op_regs->status, STS_HALT, STS_HALT, delay); 1011 if (err) { 1012 xhci_warn(xhci, "Clearing Run/Stop bit failed %d\n", err); 1013 goto handshake_error; 1014 } 1015 xhci_clear_command_ring(xhci); 1016 1017 /* step 3: save registers */ 1018 xhci_save_registers(xhci); 1019 1020 /* step 4: set CSS flag */ 1021 command = readl(&xhci->op_regs->command); 1022 command |= CMD_CSS; 1023 writel(command, &xhci->op_regs->command); 1024 1025 err = xhci_handshake(&xhci->op_regs->status, STS_SAVE, 0, 20 * USEC_PER_MSEC); 1026 usbsts = readl(&xhci->op_regs->status); 1027 xhci->broken_suspend = 0; 1028 if (err) { 1029 /* 1030 * AMD SNPS xHC 3.0 occasionally does not clear the 1031 * SSS bit of USBSTS and when driver tries to poll 1032 * to see if the xHC clears BIT(8) which never happens 1033 * and driver assumes that controller is not responding 1034 * and times out. To workaround this, its good to check 1035 * if SRE and HCE bits are not set (as per xhci 1036 * Section 5.4.2) and bypass the timeout. 1037 */ 1038 if (!(xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND)) { 1039 xhci_warn(xhci, "Controller Save State failed %d\n", err); 1040 goto handshake_error; 1041 } 1042 1043 if (usbsts & (STS_SRE | STS_HCE)) { 1044 xhci_warn(xhci, "Controller Save State failed, USBSTS 0x%08x\n", usbsts); 1045 goto handshake_error; 1046 } 1047 1048 xhci_dbg(xhci, "SNPS broken suspend, save state unreliable\n"); 1049 xhci->broken_suspend = 1; 1050 } else if (usbsts & STS_SRE) { 1051 xhci_warn(xhci, "Suspend Save Error (SRE), USBSTS 0x%08x\n", usbsts); 1052 } 1053 spin_unlock_irq(&xhci->lock); 1054 1055 /* 1056 * Deleting Compliance Mode Recovery Timer because the xHCI Host 1057 * is about to be suspended. 1058 */ 1059 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 1060 (!(xhci_all_ports_seen_u0(xhci)))) { 1061 timer_delete_sync(&xhci->comp_mode_recovery_timer); 1062 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1063 "%s: compliance mode recovery timer deleted", 1064 __func__); 1065 } 1066 1067 return 0; 1068 1069 handshake_error: 1070 spin_unlock_irq(&xhci->lock); 1071 return -ETIMEDOUT; 1072 } 1073 EXPORT_SYMBOL_GPL(xhci_suspend); 1074 1075 /* 1076 * start xHC (not bus-specific) 1077 * 1078 * This is called when the machine transition from S3/S4 mode. 1079 * 1080 */ 1081 int xhci_resume(struct xhci_hcd *xhci, bool power_lost, bool is_auto_resume) 1082 { 1083 u32 command, temp = 0; 1084 struct usb_hcd *hcd = xhci_to_hcd(xhci); 1085 struct xhci_segment *seg; 1086 int retval = 0; 1087 bool pending_portevent = false; 1088 bool suspended_usb3_devs = false; 1089 bool reset_registers = false; 1090 1091 if (!hcd->state) 1092 return 0; 1093 1094 /* Wait a bit if either of the roothubs need to settle from the 1095 * transition into bus suspend. 1096 */ 1097 1098 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) || 1099 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange)) 1100 msleep(100); 1101 1102 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 1103 if (xhci->shared_hcd) 1104 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 1105 1106 spin_lock_irq(&xhci->lock); 1107 1108 if (power_lost || xhci->broken_suspend || xhci->quirks & XHCI_RESET_ON_RESUME) { 1109 xhci_dbg(xhci, "HC state lost, performing host controller reset\n"); 1110 reset_registers = true; 1111 } else { 1112 xhci_dbg(xhci, "HC state intact, continuing without reset\n"); 1113 /* 1114 * Some controllers might lose power during suspend, so wait 1115 * for controller not ready bit to clear, just as in xHC init. 1116 */ 1117 retval = xhci_handshake(&xhci->op_regs->status, 1118 STS_CNR, 0, 10 * 1000 * 1000); 1119 if (retval) { 1120 xhci_warn(xhci, "Controller not ready at resume %d\n", 1121 retval); 1122 spin_unlock_irq(&xhci->lock); 1123 return retval; 1124 } 1125 /* step 1: restore register */ 1126 xhci_restore_registers(xhci); 1127 /* step 2: initialize command ring buffer */ 1128 xhci_set_cmd_ring_deq(xhci); 1129 /* step 3: restore state and start state*/ 1130 /* step 3: set CRS flag */ 1131 command = readl(&xhci->op_regs->command); 1132 command |= CMD_CRS; 1133 writel(command, &xhci->op_regs->command); 1134 /* 1135 * Some controllers take up to 55+ ms to complete the controller 1136 * restore so setting the timeout to 100ms. Xhci specification 1137 * doesn't mention any timeout value. 1138 */ 1139 if (xhci_handshake(&xhci->op_regs->status, 1140 STS_RESTORE, 0, 100 * 1000)) { 1141 xhci_warn(xhci, "WARN: xHC restore state timeout\n"); 1142 spin_unlock_irq(&xhci->lock); 1143 return -ETIMEDOUT; 1144 } 1145 1146 /* re-initialize the HC on Restore Error, or Host Controller Error */ 1147 temp = readl(&xhci->op_regs->status); 1148 if ((temp & (STS_SRE | STS_HCE)) && !(xhci->xhc_state & XHCI_STATE_REMOVING)) { 1149 xhci_warn(xhci, "xHC error in resume, USBSTS 0x%x, Reinit\n", temp); 1150 reset_registers = true; 1151 } 1152 } 1153 1154 if (reset_registers) { 1155 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && 1156 !(xhci_all_ports_seen_u0(xhci))) { 1157 timer_delete_sync(&xhci->comp_mode_recovery_timer); 1158 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 1159 "Compliance Mode Recovery Timer deleted!"); 1160 } 1161 1162 /* Let the USB core know _both_ roothubs lost power. */ 1163 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub); 1164 if (xhci->shared_hcd) 1165 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub); 1166 1167 xhci_dbg(xhci, "Stop HCD\n"); 1168 xhci_halt(xhci); 1169 xhci_zero_64b_regs(xhci); 1170 if (xhci->xhc_state & XHCI_STATE_REMOVING) 1171 retval = -ENODEV; 1172 else 1173 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); 1174 spin_unlock_irq(&xhci->lock); 1175 if (retval) 1176 return retval; 1177 1178 cancel_delayed_work_sync(&xhci->cmd_timer); 1179 1180 /* Delete all remaining commands */ 1181 xhci_cleanup_command_queue(xhci); 1182 1183 /* Clear data which is re-initilized during runtime */ 1184 xhci_for_each_ring_seg(xhci->interrupters[0]->event_ring->first_seg, seg) 1185 memset(seg->trbs, 0, sizeof(union xhci_trb) * TRBS_PER_SEGMENT); 1186 1187 for (int i = xhci->max_slots; i > 0; i--) 1188 xhci_free_virt_devices_depth_first(xhci, i); 1189 1190 xhci_rh_bw_cleanup(xhci); 1191 1192 xhci->cmd_ring_reserved_trbs = 0; 1193 xhci_for_each_ring_seg(xhci->cmd_ring->first_seg, seg) 1194 memset(seg->trbs, 0, sizeof(union xhci_trb) * TRBS_PER_SEGMENT); 1195 1196 xhci_debugfs_exit(xhci); 1197 1198 xhci_init(hcd); 1199 1200 /* 1201 * USB core calls the PCI reinit and start functions twice: 1202 * first with the primary HCD, and then with the secondary HCD. 1203 * If we don't do the same, the host will never be started. 1204 */ 1205 xhci_dbg(xhci, "Start the primary HCD\n"); 1206 retval = xhci_run(hcd); 1207 if (!retval && xhci->shared_hcd) { 1208 xhci_dbg(xhci, "Start the secondary HCD\n"); 1209 retval = xhci_run(xhci->shared_hcd); 1210 } 1211 if (retval) 1212 return retval; 1213 /* 1214 * Resume roothubs unconditionally as PORTSC change bits are not 1215 * immediately visible after xHC reset 1216 */ 1217 hcd->state = HC_STATE_SUSPENDED; 1218 1219 if (xhci->shared_hcd) { 1220 xhci->shared_hcd->state = HC_STATE_SUSPENDED; 1221 usb_hcd_resume_root_hub(xhci->shared_hcd); 1222 } 1223 usb_hcd_resume_root_hub(hcd); 1224 1225 goto done; 1226 } 1227 1228 /* step 4: set Run/Stop bit */ 1229 command = readl(&xhci->op_regs->command); 1230 command |= CMD_RUN; 1231 writel(command, &xhci->op_regs->command); 1232 xhci_handshake(&xhci->op_regs->status, STS_HALT, 1233 0, 250 * 1000); 1234 1235 /* step 5: walk topology and initialize portsc, 1236 * portpmsc and portli 1237 */ 1238 /* this is done in bus_resume */ 1239 1240 /* step 6: restart each of the previously 1241 * Running endpoints by ringing their doorbells 1242 */ 1243 1244 spin_unlock_irq(&xhci->lock); 1245 1246 xhci_dbc_resume(xhci); 1247 1248 /* 1249 * Resume roothubs only if there are pending events. 1250 * USB 3 devices resend U3 LFPS wake after a 100ms delay if 1251 * the first wake signalling failed, give it that chance if 1252 * there are suspended USB 3 devices. 1253 */ 1254 if (xhci->usb3_rhub.bus_state.suspended_ports || xhci->usb3_rhub.bus_state.bus_suspended) 1255 suspended_usb3_devs = true; 1256 1257 pending_portevent = xhci_pending_portevent(xhci); 1258 if (suspended_usb3_devs && !pending_portevent && is_auto_resume) { 1259 msleep(120); 1260 pending_portevent = xhci_pending_portevent(xhci); 1261 } 1262 1263 if (pending_portevent) { 1264 if (xhci->shared_hcd) 1265 usb_hcd_resume_root_hub(xhci->shared_hcd); 1266 usb_hcd_resume_root_hub(hcd); 1267 } 1268 1269 /* 1270 * If system is subject to the Quirk, Compliance Mode Timer needs to 1271 * be re-initialized Always after a system resume. Ports are subject 1272 * to suffer the Compliance Mode issue again. It doesn't matter if 1273 * ports have entered previously to U0 before system's suspension. 1274 */ 1275 if (xhci->quirks & XHCI_COMP_MODE_QUIRK) 1276 compliance_mode_recovery_timer_init(xhci); 1277 done: 1278 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) 1279 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller)); 1280 1281 /* Re-enable port polling. */ 1282 xhci_dbg(xhci, "%s: starting usb%d port polling.\n", 1283 __func__, hcd->self.busnum); 1284 if (xhci->shared_hcd) { 1285 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 1286 usb_hcd_poll_rh_status(xhci->shared_hcd); 1287 } 1288 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); 1289 usb_hcd_poll_rh_status(hcd); 1290 1291 return retval; 1292 } 1293 EXPORT_SYMBOL_GPL(xhci_resume); 1294 #endif /* CONFIG_PM */ 1295 1296 /*-------------------------------------------------------------------------*/ 1297 1298 static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb) 1299 { 1300 void *temp; 1301 int ret = 0; 1302 unsigned int buf_len; 1303 enum dma_data_direction dir; 1304 1305 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1306 buf_len = urb->transfer_buffer_length; 1307 1308 temp = kzalloc_node(buf_len, GFP_ATOMIC, 1309 dev_to_node(hcd->self.sysdev)); 1310 if (!temp) 1311 return -ENOMEM; 1312 1313 if (usb_urb_dir_out(urb)) 1314 sg_pcopy_to_buffer(urb->sg, urb->num_sgs, 1315 temp, buf_len, 0); 1316 1317 urb->transfer_buffer = temp; 1318 urb->transfer_dma = dma_map_single(hcd->self.sysdev, 1319 urb->transfer_buffer, 1320 urb->transfer_buffer_length, 1321 dir); 1322 1323 if (dma_mapping_error(hcd->self.sysdev, 1324 urb->transfer_dma)) { 1325 ret = -EAGAIN; 1326 kfree(temp); 1327 } else { 1328 urb->transfer_flags |= URB_DMA_MAP_SINGLE; 1329 } 1330 1331 return ret; 1332 } 1333 1334 static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd, 1335 struct urb *urb) 1336 { 1337 bool ret = false; 1338 unsigned int i; 1339 unsigned int len = 0; 1340 unsigned int trb_size; 1341 unsigned int max_pkt; 1342 struct scatterlist *sg; 1343 struct scatterlist *tail_sg; 1344 1345 tail_sg = urb->sg; 1346 max_pkt = xhci_usb_endpoint_maxp(urb->dev, urb->ep); 1347 1348 if (!urb->num_sgs) 1349 return ret; 1350 1351 if (urb->dev->speed >= USB_SPEED_SUPER) 1352 trb_size = TRB_CACHE_SIZE_SS; 1353 else 1354 trb_size = TRB_CACHE_SIZE_HS; 1355 1356 if (urb->transfer_buffer_length != 0 && 1357 !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { 1358 for_each_sg(urb->sg, sg, urb->num_sgs, i) { 1359 len = len + sg->length; 1360 if (i > trb_size - 2) { 1361 len = len - tail_sg->length; 1362 if (len < max_pkt) { 1363 ret = true; 1364 break; 1365 } 1366 1367 tail_sg = sg_next(tail_sg); 1368 } 1369 } 1370 } 1371 return ret; 1372 } 1373 1374 static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb) 1375 { 1376 unsigned int len; 1377 unsigned int buf_len; 1378 enum dma_data_direction dir; 1379 1380 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1381 1382 buf_len = urb->transfer_buffer_length; 1383 1384 if (IS_ENABLED(CONFIG_HAS_DMA) && 1385 (urb->transfer_flags & URB_DMA_MAP_SINGLE)) 1386 dma_unmap_single(hcd->self.sysdev, 1387 urb->transfer_dma, 1388 urb->transfer_buffer_length, 1389 dir); 1390 1391 if (usb_urb_dir_in(urb)) { 1392 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, 1393 urb->transfer_buffer, 1394 buf_len, 1395 0); 1396 if (len != buf_len) { 1397 xhci_dbg(hcd_to_xhci(hcd), 1398 "Copy from tmp buf to urb sg list failed\n"); 1399 urb->actual_length = len; 1400 } 1401 } 1402 urb->transfer_flags &= ~URB_DMA_MAP_SINGLE; 1403 kfree(urb->transfer_buffer); 1404 urb->transfer_buffer = NULL; 1405 } 1406 1407 /* 1408 * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT), 1409 * we'll copy the actual data into the TRB address register. This is limited to 1410 * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize 1411 * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed. 1412 */ 1413 static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, 1414 gfp_t mem_flags) 1415 { 1416 struct xhci_hcd *xhci; 1417 1418 xhci = hcd_to_xhci(hcd); 1419 1420 if (xhci_urb_suitable_for_idt(urb)) 1421 return 0; 1422 1423 if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) { 1424 if (xhci_urb_temp_buffer_required(hcd, urb)) 1425 return xhci_map_temp_buffer(hcd, urb); 1426 } 1427 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); 1428 } 1429 1430 static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) 1431 { 1432 struct xhci_hcd *xhci; 1433 bool unmap_temp_buf = false; 1434 1435 xhci = hcd_to_xhci(hcd); 1436 1437 if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE)) 1438 unmap_temp_buf = true; 1439 1440 if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf) 1441 xhci_unmap_temp_buf(hcd, urb); 1442 else 1443 usb_hcd_unmap_urb_for_dma(hcd, urb); 1444 } 1445 1446 /** 1447 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and 1448 * HCDs. Find the index for an endpoint given its descriptor. Use the return 1449 * value to right shift 1 for the bitmask. 1450 * @desc: USB endpoint descriptor to determine index for 1451 * 1452 * Index = (epnum * 2) + direction - 1, 1453 * where direction = 0 for OUT, 1 for IN. 1454 * For control endpoints, the IN index is used (OUT index is unused), so 1455 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) 1456 */ 1457 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) 1458 { 1459 unsigned int index; 1460 if (usb_endpoint_xfer_control(desc)) 1461 index = (unsigned int) (usb_endpoint_num(desc)*2); 1462 else 1463 index = (unsigned int) (usb_endpoint_num(desc)*2) + 1464 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; 1465 return index; 1466 } 1467 EXPORT_SYMBOL_GPL(xhci_get_endpoint_index); 1468 1469 /* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint 1470 * address from the XHCI endpoint index. 1471 */ 1472 static unsigned int xhci_get_endpoint_address(unsigned int ep_index) 1473 { 1474 unsigned int number = DIV_ROUND_UP(ep_index, 2); 1475 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN; 1476 return direction | number; 1477 } 1478 1479 /* Find the flag for this endpoint (for use in the control context). Use the 1480 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 1481 * bit 1, etc. 1482 */ 1483 static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) 1484 { 1485 return 1 << (xhci_get_endpoint_index(desc) + 1); 1486 } 1487 1488 /* Compute the last valid endpoint context index. Basically, this is the 1489 * endpoint index plus one. For slot contexts with more than valid endpoint, 1490 * we find the most significant bit set in the added contexts flags. 1491 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 1492 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. 1493 */ 1494 unsigned int xhci_last_valid_endpoint(u32 added_ctxs) 1495 { 1496 return fls(added_ctxs) - 1; 1497 } 1498 1499 /* Returns 1 if the arguments are OK; 1500 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 1501 */ 1502 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 1503 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, 1504 const char *func) { 1505 struct xhci_hcd *xhci; 1506 struct xhci_virt_device *virt_dev; 1507 1508 if (!hcd || (check_ep && !ep) || !udev) { 1509 pr_debug("xHCI %s called with invalid args\n", func); 1510 return -EINVAL; 1511 } 1512 if (!udev->parent) { 1513 pr_debug("xHCI %s called for root hub\n", func); 1514 return 0; 1515 } 1516 1517 xhci = hcd_to_xhci(hcd); 1518 if (check_virt_dev) { 1519 if (!udev->slot_id || !xhci->devs[udev->slot_id]) { 1520 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n", 1521 func); 1522 return -EINVAL; 1523 } 1524 1525 virt_dev = xhci->devs[udev->slot_id]; 1526 if (virt_dev->udev != udev) { 1527 xhci_dbg(xhci, "xHCI %s called with udev and " 1528 "virt_dev does not match\n", func); 1529 return -EINVAL; 1530 } 1531 } 1532 1533 if (xhci->xhc_state & XHCI_STATE_HALTED) 1534 return -ENODEV; 1535 1536 return 1; 1537 } 1538 1539 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 1540 struct usb_device *udev, struct xhci_command *command, 1541 bool ctx_change, bool must_succeed); 1542 1543 /* 1544 * Full speed devices may have a max packet size greater than 8 bytes, but the 1545 * USB core doesn't know that until it reads the first 8 bytes of the 1546 * descriptor. If the usb_device's max packet size changes after that point, 1547 * we need to issue an evaluate context command and wait on it. 1548 */ 1549 static int xhci_check_ep0_maxpacket(struct xhci_hcd *xhci, struct xhci_virt_device *vdev) 1550 { 1551 struct xhci_input_control_ctx *ctrl_ctx; 1552 struct xhci_ep_ctx *ep_ctx; 1553 struct xhci_command *command; 1554 int max_packet_size; 1555 int hw_max_packet_size; 1556 int ret = 0; 1557 1558 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, 0); 1559 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); 1560 max_packet_size = usb_endpoint_maxp(&vdev->udev->ep0.desc); 1561 1562 if (hw_max_packet_size == max_packet_size) 1563 return 0; 1564 1565 switch (max_packet_size) { 1566 case 8: case 16: case 32: case 64: case 9: 1567 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1568 "Max Packet Size for ep 0 changed."); 1569 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1570 "Max packet size in usb_device = %d", 1571 max_packet_size); 1572 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1573 "Max packet size in xHCI HW = %d", 1574 hw_max_packet_size); 1575 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 1576 "Issuing evaluate context command."); 1577 1578 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 1579 if (!command) 1580 return -ENOMEM; 1581 1582 command->in_ctx = vdev->in_ctx; 1583 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 1584 if (!ctrl_ctx) { 1585 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1586 __func__); 1587 ret = -ENOMEM; 1588 break; 1589 } 1590 /* Set up the modified control endpoint 0 */ 1591 xhci_endpoint_copy(xhci, vdev->in_ctx, vdev->out_ctx, 0); 1592 1593 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, 0); 1594 ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */ 1595 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); 1596 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); 1597 1598 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); 1599 ctrl_ctx->drop_flags = 0; 1600 1601 ret = xhci_configure_endpoint(xhci, vdev->udev, command, 1602 true, false); 1603 /* Clean up the input context for later use by bandwidth functions */ 1604 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); 1605 break; 1606 default: 1607 dev_dbg(&vdev->udev->dev, "incorrect max packet size %d for ep0\n", 1608 max_packet_size); 1609 return -EINVAL; 1610 } 1611 1612 kfree(command->completion); 1613 kfree(command); 1614 1615 return ret; 1616 } 1617 1618 /* 1619 * non-error returns are a promise to giveback() the urb later 1620 * we drop ownership so next owner (or urb unlink) can get it 1621 */ 1622 static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) 1623 { 1624 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1625 unsigned long flags; 1626 int ret = 0; 1627 unsigned int slot_id, ep_index; 1628 unsigned int *ep_state; 1629 struct urb_priv *urb_priv; 1630 int num_tds; 1631 1632 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1633 1634 if (usb_endpoint_xfer_isoc(&urb->ep->desc)) 1635 num_tds = urb->number_of_packets; 1636 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) && 1637 urb->transfer_buffer_length > 0 && 1638 urb->transfer_flags & URB_ZERO_PACKET && 1639 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc))) 1640 num_tds = 2; 1641 else 1642 num_tds = 1; 1643 1644 urb_priv = kzalloc_flex(*urb_priv, td, num_tds, mem_flags); 1645 if (!urb_priv) 1646 return -ENOMEM; 1647 1648 urb_priv->num_tds = num_tds; 1649 urb_priv->num_tds_done = 0; 1650 urb->hcpriv = urb_priv; 1651 1652 trace_xhci_urb_enqueue(urb); 1653 1654 spin_lock_irqsave(&xhci->lock, flags); 1655 1656 ret = xhci_check_args(hcd, urb->dev, urb->ep, 1657 true, true, __func__); 1658 if (ret <= 0) { 1659 ret = ret ? ret : -EINVAL; 1660 goto free_priv; 1661 } 1662 1663 slot_id = urb->dev->slot_id; 1664 1665 if (!HCD_HW_ACCESSIBLE(hcd)) { 1666 ret = -ESHUTDOWN; 1667 goto free_priv; 1668 } 1669 1670 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) { 1671 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n"); 1672 ret = -ENODEV; 1673 goto free_priv; 1674 } 1675 1676 if (xhci->xhc_state & XHCI_STATE_DYING) { 1677 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n", 1678 urb->ep->desc.bEndpointAddress, urb); 1679 ret = -ESHUTDOWN; 1680 goto free_priv; 1681 } 1682 1683 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state; 1684 1685 if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) { 1686 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n", 1687 *ep_state); 1688 ret = -EINVAL; 1689 goto free_priv; 1690 } 1691 if (*ep_state & EP_SOFT_CLEAR_TOGGLE) { 1692 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n"); 1693 ret = -EINVAL; 1694 goto free_priv; 1695 } 1696 1697 switch (usb_endpoint_type(&urb->ep->desc)) { 1698 1699 case USB_ENDPOINT_XFER_CONTROL: 1700 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 1701 slot_id, ep_index); 1702 break; 1703 case USB_ENDPOINT_XFER_BULK: 1704 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 1705 slot_id, ep_index); 1706 break; 1707 case USB_ENDPOINT_XFER_INT: 1708 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 1709 slot_id, ep_index); 1710 break; 1711 case USB_ENDPOINT_XFER_ISOC: 1712 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb, 1713 slot_id, ep_index); 1714 } 1715 1716 if (ret) { 1717 free_priv: 1718 xhci_urb_free_priv(urb_priv); 1719 urb->hcpriv = NULL; 1720 } 1721 spin_unlock_irqrestore(&xhci->lock, flags); 1722 return ret; 1723 } 1724 1725 /* 1726 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop 1727 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC 1728 * should pick up where it left off in the TD, unless a Set Transfer Ring 1729 * Dequeue Pointer is issued. 1730 * 1731 * The TRBs that make up the buffers for the canceled URB will be "removed" from 1732 * the ring. Since the ring is a contiguous structure, they can't be physically 1733 * removed. Instead, there are two options: 1734 * 1735 * 1) If the HC is in the middle of processing the URB to be canceled, we 1736 * simply move the ring's dequeue pointer past those TRBs using the Set 1737 * Transfer Ring Dequeue Pointer command. This will be the common case, 1738 * when drivers timeout on the last submitted URB and attempt to cancel. 1739 * 1740 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a 1741 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The 1742 * HC will need to invalidate the any TRBs it has cached after the stop 1743 * endpoint command, as noted in the xHCI 0.95 errata. 1744 * 1745 * 3) The TD may have completed by the time the Stop Endpoint Command 1746 * completes, so software needs to handle that case too. 1747 * 1748 * This function should protect against the TD enqueueing code ringing the 1749 * doorbell while this code is waiting for a Stop Endpoint command to complete. 1750 * It also needs to account for multiple cancellations on happening at the same 1751 * time for the same endpoint. 1752 * 1753 * Note that this function can be called in any context, or so says 1754 * usb_hcd_unlink_urb() 1755 */ 1756 static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 1757 { 1758 unsigned long flags; 1759 int ret, i; 1760 u32 temp; 1761 struct xhci_hcd *xhci; 1762 struct urb_priv *urb_priv; 1763 struct xhci_td *td; 1764 unsigned int ep_index; 1765 struct xhci_ring *ep_ring; 1766 struct xhci_virt_ep *ep; 1767 struct xhci_command *command; 1768 struct xhci_virt_device *vdev; 1769 1770 xhci = hcd_to_xhci(hcd); 1771 spin_lock_irqsave(&xhci->lock, flags); 1772 1773 trace_xhci_urb_dequeue(urb); 1774 1775 /* Make sure the URB hasn't completed or been unlinked already */ 1776 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 1777 if (ret) 1778 goto done; 1779 1780 /* give back URB now if we can't queue it for cancel */ 1781 vdev = xhci->devs[urb->dev->slot_id]; 1782 urb_priv = urb->hcpriv; 1783 if (!vdev || !urb_priv) 1784 goto err_giveback; 1785 1786 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 1787 ep = &vdev->eps[ep_index]; 1788 ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 1789 if (!ep || !ep_ring) 1790 goto err_giveback; 1791 1792 /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */ 1793 temp = readl(&xhci->op_regs->status); 1794 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) { 1795 xhci_hc_died(xhci); 1796 goto done; 1797 } 1798 1799 /* 1800 * check ring is not re-allocated since URB was enqueued. If it is, then 1801 * make sure none of the ring related pointers in this URB private data 1802 * are touched, such as td_list, otherwise we overwrite freed data 1803 */ 1804 if (!td_on_ring(&urb_priv->td[0], ep_ring)) { 1805 xhci_err(xhci, "Canceled URB td not found on endpoint ring"); 1806 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) { 1807 td = &urb_priv->td[i]; 1808 if (!list_empty(&td->cancelled_td_list)) 1809 list_del_init(&td->cancelled_td_list); 1810 } 1811 goto err_giveback; 1812 } 1813 1814 if (xhci->xhc_state & XHCI_STATE_HALTED) { 1815 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1816 "HC halted, freeing TD manually."); 1817 for (i = urb_priv->num_tds_done; 1818 i < urb_priv->num_tds; 1819 i++) { 1820 td = &urb_priv->td[i]; 1821 if (!list_empty(&td->td_list)) 1822 list_del_init(&td->td_list); 1823 if (!list_empty(&td->cancelled_td_list)) 1824 list_del_init(&td->cancelled_td_list); 1825 } 1826 goto err_giveback; 1827 } 1828 1829 i = urb_priv->num_tds_done; 1830 if (i < urb_priv->num_tds) 1831 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, 1832 "Cancel URB %p, dev %s, ep 0x%x, " 1833 "starting at offset 0x%llx", 1834 urb, urb->dev->devpath, 1835 urb->ep->desc.bEndpointAddress, 1836 (unsigned long long) xhci_trb_virt_to_dma( 1837 urb_priv->td[i].start_seg, 1838 urb_priv->td[i].start_trb)); 1839 1840 for (; i < urb_priv->num_tds; i++) { 1841 td = &urb_priv->td[i]; 1842 /* TD can already be on cancelled list if ep halted on it */ 1843 if (list_empty(&td->cancelled_td_list)) { 1844 td->cancel_status = TD_DIRTY; 1845 list_add_tail(&td->cancelled_td_list, 1846 &ep->cancelled_td_list); 1847 } 1848 } 1849 1850 /* These completion handlers will sort out cancelled TDs for us */ 1851 if (ep->ep_state & (EP_STOP_CMD_PENDING | EP_HALTED | SET_DEQ_PENDING)) { 1852 xhci_dbg(xhci, "Not queuing Stop Endpoint on slot %d ep %d in state 0x%x\n", 1853 urb->dev->slot_id, ep_index, ep->ep_state); 1854 goto done; 1855 } 1856 1857 /* In this case no commands are pending but the endpoint is stopped */ 1858 if (ep->ep_state & EP_CLEARING_TT) { 1859 /* and cancelled TDs can be given back right away */ 1860 xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n", 1861 urb->dev->slot_id, ep_index, ep->ep_state); 1862 xhci_process_cancelled_tds(ep); 1863 } else { 1864 /* Otherwise, queue a new Stop Endpoint command */ 1865 command = xhci_alloc_command(xhci, false, GFP_ATOMIC); 1866 if (!command) { 1867 ret = -ENOMEM; 1868 goto done; 1869 } 1870 ep->stop_time = jiffies; 1871 ep->ep_state |= EP_STOP_CMD_PENDING; 1872 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, 1873 ep_index, 0); 1874 xhci_ring_cmd_db(xhci); 1875 } 1876 done: 1877 spin_unlock_irqrestore(&xhci->lock, flags); 1878 return ret; 1879 1880 err_giveback: 1881 if (urb_priv) 1882 xhci_urb_free_priv(urb_priv); 1883 usb_hcd_unlink_urb_from_ep(hcd, urb); 1884 spin_unlock_irqrestore(&xhci->lock, flags); 1885 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN); 1886 return ret; 1887 } 1888 1889 /* Drop an endpoint from a new bandwidth configuration for this device. 1890 * Only one call to this function is allowed per endpoint before 1891 * check_bandwidth() or reset_bandwidth() must be called. 1892 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1893 * add the endpoint to the schedule with possibly new parameters denoted by a 1894 * different endpoint descriptor in usb_host_endpoint. 1895 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1896 * not allowed. 1897 * 1898 * The USB core will not allow URBs to be queued to an endpoint that is being 1899 * disabled, so there's no need for mutual exclusion to protect 1900 * the xhci->devs[slot_id] structure. 1901 */ 1902 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1903 struct usb_host_endpoint *ep) 1904 { 1905 struct xhci_hcd *xhci; 1906 struct xhci_container_ctx *in_ctx, *out_ctx; 1907 struct xhci_input_control_ctx *ctrl_ctx; 1908 unsigned int ep_index; 1909 struct xhci_ep_ctx *ep_ctx; 1910 u32 drop_flag; 1911 u32 new_add_flags, new_drop_flags; 1912 int ret; 1913 1914 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1915 if (ret <= 0) 1916 return ret; 1917 xhci = hcd_to_xhci(hcd); 1918 if (xhci->xhc_state & XHCI_STATE_DYING) 1919 return -ENODEV; 1920 1921 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1922 drop_flag = xhci_get_endpoint_flag(&ep->desc); 1923 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { 1924 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", 1925 __func__, drop_flag); 1926 return 0; 1927 } 1928 1929 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 1930 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 1931 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 1932 if (!ctrl_ctx) { 1933 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 1934 __func__); 1935 return 0; 1936 } 1937 1938 ep_index = xhci_get_endpoint_index(&ep->desc); 1939 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 1940 /* If the HC already knows the endpoint is disabled, 1941 * or the HCD has noted it is disabled, ignore this request 1942 */ 1943 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) || 1944 le32_to_cpu(ctrl_ctx->drop_flags) & 1945 xhci_get_endpoint_flag(&ep->desc)) { 1946 /* Do not warn when called after a usb_device_reset */ 1947 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) 1948 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 1949 __func__, ep); 1950 return 0; 1951 } 1952 1953 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); 1954 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 1955 1956 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); 1957 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 1958 1959 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index); 1960 1961 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 1962 1963 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", 1964 (unsigned int) ep->desc.bEndpointAddress, 1965 udev->slot_id, 1966 (unsigned int) new_drop_flags, 1967 (unsigned int) new_add_flags); 1968 return 0; 1969 } 1970 EXPORT_SYMBOL_GPL(xhci_drop_endpoint); 1971 1972 /* Add an endpoint to a new possible bandwidth configuration for this device. 1973 * Only one call to this function is allowed per endpoint before 1974 * check_bandwidth() or reset_bandwidth() must be called. 1975 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 1976 * add the endpoint to the schedule with possibly new parameters denoted by a 1977 * different endpoint descriptor in usb_host_endpoint. 1978 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 1979 * not allowed. 1980 * 1981 * The USB core will not allow URBs to be queued to an endpoint until the 1982 * configuration or alt setting is installed in the device, so there's no need 1983 * for mutual exclusion to protect the xhci->devs[slot_id] structure. 1984 */ 1985 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 1986 struct usb_host_endpoint *ep) 1987 { 1988 struct xhci_hcd *xhci; 1989 struct xhci_container_ctx *in_ctx; 1990 unsigned int ep_index; 1991 struct xhci_input_control_ctx *ctrl_ctx; 1992 struct xhci_ep_ctx *ep_ctx; 1993 u32 added_ctxs; 1994 u32 new_add_flags, new_drop_flags; 1995 struct xhci_virt_device *virt_dev; 1996 int ret = 0; 1997 1998 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); 1999 if (ret <= 0) { 2000 /* So we won't queue a reset ep command for a root hub */ 2001 ep->hcpriv = NULL; 2002 return ret; 2003 } 2004 xhci = hcd_to_xhci(hcd); 2005 if (xhci->xhc_state & XHCI_STATE_DYING) 2006 return -ENODEV; 2007 2008 added_ctxs = xhci_get_endpoint_flag(&ep->desc); 2009 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { 2010 /* FIXME when we have to issue an evaluate endpoint command to 2011 * deal with ep0 max packet size changing once we get the 2012 * descriptors 2013 */ 2014 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", 2015 __func__, added_ctxs); 2016 return 0; 2017 } 2018 2019 virt_dev = xhci->devs[udev->slot_id]; 2020 in_ctx = virt_dev->in_ctx; 2021 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 2022 if (!ctrl_ctx) { 2023 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2024 __func__); 2025 return 0; 2026 } 2027 2028 ep_index = xhci_get_endpoint_index(&ep->desc); 2029 /* If this endpoint is already in use, and the upper layers are trying 2030 * to add it again without dropping it, reject the addition. 2031 */ 2032 if (virt_dev->eps[ep_index].ring && 2033 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) { 2034 xhci_warn(xhci, "Trying to add endpoint 0x%x " 2035 "without dropping it.\n", 2036 (unsigned int) ep->desc.bEndpointAddress); 2037 return -EINVAL; 2038 } 2039 2040 /* If the HCD has already noted the endpoint is enabled, 2041 * ignore this request. 2042 */ 2043 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) { 2044 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 2045 __func__, ep); 2046 return 0; 2047 } 2048 2049 /* 2050 * Configuration and alternate setting changes must be done in 2051 * process context, not interrupt context (or so documenation 2052 * for usb_set_interface() and usb_set_configuration() claim). 2053 */ 2054 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) { 2055 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 2056 __func__, ep->desc.bEndpointAddress); 2057 return -ENOMEM; 2058 } 2059 2060 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); 2061 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); 2062 2063 /* If xhci_endpoint_disable() was called for this endpoint, but the 2064 * xHC hasn't been notified yet through the check_bandwidth() call, 2065 * this re-adds a new state for the endpoint from the new endpoint 2066 * descriptors. We must drop and re-add this endpoint, so we leave the 2067 * drop flags alone. 2068 */ 2069 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); 2070 2071 /* Store the usb_device pointer for later use */ 2072 ep->hcpriv = udev; 2073 2074 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 2075 trace_xhci_add_endpoint(ep_ctx); 2076 2077 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", 2078 (unsigned int) ep->desc.bEndpointAddress, 2079 udev->slot_id, 2080 (unsigned int) new_drop_flags, 2081 (unsigned int) new_add_flags); 2082 return 0; 2083 } 2084 EXPORT_SYMBOL_GPL(xhci_add_endpoint); 2085 2086 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) 2087 { 2088 struct xhci_input_control_ctx *ctrl_ctx; 2089 struct xhci_ep_ctx *ep_ctx; 2090 struct xhci_slot_ctx *slot_ctx; 2091 int i; 2092 2093 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); 2094 if (!ctrl_ctx) { 2095 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2096 __func__); 2097 return; 2098 } 2099 2100 /* When a device's add flag and drop flag are zero, any subsequent 2101 * configure endpoint command will leave that endpoint's state 2102 * untouched. Make sure we don't leave any old state in the input 2103 * endpoint contexts. 2104 */ 2105 ctrl_ctx->drop_flags = 0; 2106 ctrl_ctx->add_flags = 0; 2107 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 2108 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 2109 /* Endpoint 0 is always valid */ 2110 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); 2111 for (i = 1; i < 31; i++) { 2112 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); 2113 ep_ctx->ep_info = 0; 2114 ep_ctx->ep_info2 = 0; 2115 ep_ctx->deq = 0; 2116 ep_ctx->tx_info = 0; 2117 } 2118 } 2119 2120 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, 2121 struct usb_device *udev, u32 *cmd_status) 2122 { 2123 int ret; 2124 2125 switch (*cmd_status) { 2126 case COMP_COMMAND_ABORTED: 2127 case COMP_COMMAND_RING_STOPPED: 2128 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); 2129 ret = -ETIME; 2130 break; 2131 case COMP_RESOURCE_ERROR: 2132 dev_warn(&udev->dev, 2133 "Not enough host controller resources for new device state.\n"); 2134 ret = -ENOMEM; 2135 /* FIXME: can we allocate more resources for the HC? */ 2136 break; 2137 case COMP_BANDWIDTH_ERROR: 2138 case COMP_SECONDARY_BANDWIDTH_ERROR: 2139 dev_warn(&udev->dev, 2140 "Not enough bandwidth for new device state.\n"); 2141 ret = -ENOSPC; 2142 /* FIXME: can we go back to the old state? */ 2143 break; 2144 case COMP_TRB_ERROR: 2145 /* the HCD set up something wrong */ 2146 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, " 2147 "add flag = 1, " 2148 "and endpoint is not disabled.\n"); 2149 ret = -EINVAL; 2150 break; 2151 case COMP_INCOMPATIBLE_DEVICE_ERROR: 2152 dev_warn(&udev->dev, 2153 "ERROR: Incompatible device for endpoint configure command.\n"); 2154 ret = -ENODEV; 2155 break; 2156 case COMP_SUCCESS: 2157 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 2158 "Successful Endpoint Configure command"); 2159 ret = 0; 2160 break; 2161 default: 2162 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", 2163 *cmd_status); 2164 ret = -EINVAL; 2165 break; 2166 } 2167 return ret; 2168 } 2169 2170 static int xhci_evaluate_context_result(struct xhci_hcd *xhci, 2171 struct usb_device *udev, u32 *cmd_status) 2172 { 2173 int ret; 2174 2175 switch (*cmd_status) { 2176 case COMP_COMMAND_ABORTED: 2177 case COMP_COMMAND_RING_STOPPED: 2178 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); 2179 ret = -ETIME; 2180 break; 2181 case COMP_PARAMETER_ERROR: 2182 dev_warn(&udev->dev, 2183 "WARN: xHCI driver setup invalid evaluate context command.\n"); 2184 ret = -EINVAL; 2185 break; 2186 case COMP_SLOT_NOT_ENABLED_ERROR: 2187 dev_warn(&udev->dev, 2188 "WARN: slot not enabled for evaluate context command.\n"); 2189 ret = -EINVAL; 2190 break; 2191 case COMP_CONTEXT_STATE_ERROR: 2192 dev_warn(&udev->dev, 2193 "WARN: invalid context state for evaluate context command.\n"); 2194 ret = -EINVAL; 2195 break; 2196 case COMP_INCOMPATIBLE_DEVICE_ERROR: 2197 dev_warn(&udev->dev, 2198 "ERROR: Incompatible device for evaluate context command.\n"); 2199 ret = -ENODEV; 2200 break; 2201 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR: 2202 /* Max Exit Latency too large error */ 2203 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n"); 2204 ret = -EINVAL; 2205 break; 2206 case COMP_SUCCESS: 2207 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 2208 "Successful evaluate context command"); 2209 ret = 0; 2210 break; 2211 default: 2212 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n", 2213 *cmd_status); 2214 ret = -EINVAL; 2215 break; 2216 } 2217 return ret; 2218 } 2219 2220 static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, 2221 struct xhci_input_control_ctx *ctrl_ctx) 2222 { 2223 u32 valid_add_flags; 2224 u32 valid_drop_flags; 2225 2226 /* Ignore the slot flag (bit 0), and the default control endpoint flag 2227 * (bit 1). The default control endpoint is added during the Address 2228 * Device command and is never removed until the slot is disabled. 2229 */ 2230 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; 2231 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; 2232 2233 /* Use hweight32 to count the number of ones in the add flags, or 2234 * number of endpoints added. Don't count endpoints that are changed 2235 * (both added and dropped). 2236 */ 2237 return hweight32(valid_add_flags) - 2238 hweight32(valid_add_flags & valid_drop_flags); 2239 } 2240 2241 static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, 2242 struct xhci_input_control_ctx *ctrl_ctx) 2243 { 2244 u32 valid_add_flags; 2245 u32 valid_drop_flags; 2246 2247 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; 2248 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; 2249 2250 return hweight32(valid_drop_flags) - 2251 hweight32(valid_add_flags & valid_drop_flags); 2252 } 2253 2254 /* 2255 * We need to reserve the new number of endpoints before the configure endpoint 2256 * command completes. We can't subtract the dropped endpoints from the number 2257 * of active endpoints until the command completes because we can oversubscribe 2258 * the host in this case: 2259 * 2260 * - the first configure endpoint command drops more endpoints than it adds 2261 * - a second configure endpoint command that adds more endpoints is queued 2262 * - the first configure endpoint command fails, so the config is unchanged 2263 * - the second command may succeed, even though there isn't enough resources 2264 * 2265 * Must be called with xhci->lock held. 2266 */ 2267 static int xhci_reserve_host_resources(struct xhci_hcd *xhci, 2268 struct xhci_input_control_ctx *ctrl_ctx) 2269 { 2270 u32 added_eps; 2271 2272 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); 2273 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) { 2274 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2275 "Not enough ep ctxs: " 2276 "%u active, need to add %u, limit is %u.", 2277 xhci->num_active_eps, added_eps, 2278 xhci->limit_active_eps); 2279 return -ENOMEM; 2280 } 2281 xhci->num_active_eps += added_eps; 2282 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2283 "Adding %u ep ctxs, %u now active.", added_eps, 2284 xhci->num_active_eps); 2285 return 0; 2286 } 2287 2288 /* 2289 * The configure endpoint was failed by the xHC for some other reason, so we 2290 * need to revert the resources that failed configuration would have used. 2291 * 2292 * Must be called with xhci->lock held. 2293 */ 2294 static void xhci_free_host_resources(struct xhci_hcd *xhci, 2295 struct xhci_input_control_ctx *ctrl_ctx) 2296 { 2297 u32 num_failed_eps; 2298 2299 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx); 2300 xhci->num_active_eps -= num_failed_eps; 2301 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2302 "Removing %u failed ep ctxs, %u now active.", 2303 num_failed_eps, 2304 xhci->num_active_eps); 2305 } 2306 2307 /* 2308 * Now that the command has completed, clean up the active endpoint count by 2309 * subtracting out the endpoints that were dropped (but not changed). 2310 * 2311 * Must be called with xhci->lock held. 2312 */ 2313 static void xhci_finish_resource_reservation(struct xhci_hcd *xhci, 2314 struct xhci_input_control_ctx *ctrl_ctx) 2315 { 2316 u32 num_dropped_eps; 2317 2318 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx); 2319 xhci->num_active_eps -= num_dropped_eps; 2320 if (num_dropped_eps) 2321 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2322 "Removing %u dropped ep ctxs, %u now active.", 2323 num_dropped_eps, 2324 xhci->num_active_eps); 2325 } 2326 2327 static unsigned int xhci_get_block_size(struct usb_device *udev) 2328 { 2329 switch (udev->speed) { 2330 case USB_SPEED_LOW: 2331 case USB_SPEED_FULL: 2332 return FS_BLOCK; 2333 case USB_SPEED_HIGH: 2334 return HS_BLOCK; 2335 case USB_SPEED_SUPER: 2336 case USB_SPEED_SUPER_PLUS: 2337 return SS_BLOCK; 2338 case USB_SPEED_UNKNOWN: 2339 default: 2340 /* Should never happen */ 2341 return 1; 2342 } 2343 } 2344 2345 static unsigned int 2346 xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw) 2347 { 2348 if (interval_bw->overhead[LS_OVERHEAD_TYPE]) 2349 return LS_OVERHEAD; 2350 if (interval_bw->overhead[FS_OVERHEAD_TYPE]) 2351 return FS_OVERHEAD; 2352 return HS_OVERHEAD; 2353 } 2354 2355 /* If we are changing a LS/FS device under a HS hub, 2356 * make sure (if we are activating a new TT) that the HS bus has enough 2357 * bandwidth for this new TT. 2358 */ 2359 static int xhci_check_tt_bw_table(struct xhci_hcd *xhci, 2360 struct xhci_virt_device *virt_dev, 2361 int old_active_eps) 2362 { 2363 struct xhci_interval_bw_table *bw_table; 2364 struct xhci_tt_bw_info *tt_info; 2365 2366 /* Find the bandwidth table for the root port this TT is attached to. */ 2367 bw_table = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].bw_table; 2368 tt_info = virt_dev->tt_info; 2369 /* If this TT already had active endpoints, the bandwidth for this TT 2370 * has already been added. Removing all periodic endpoints (and thus 2371 * making the TT enactive) will only decrease the bandwidth used. 2372 */ 2373 if (old_active_eps) 2374 return 0; 2375 if (old_active_eps == 0 && tt_info->active_eps != 0) { 2376 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT) 2377 return -ENOMEM; 2378 return 0; 2379 } 2380 /* Not sure why we would have no new active endpoints... 2381 * 2382 * Maybe because of an Evaluate Context change for a hub update or a 2383 * control endpoint 0 max packet size change? 2384 * FIXME: skip the bandwidth calculation in that case. 2385 */ 2386 return 0; 2387 } 2388 2389 static int xhci_check_ss_bw(struct xhci_hcd *xhci, 2390 struct xhci_virt_device *virt_dev) 2391 { 2392 unsigned int bw_reserved; 2393 2394 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100); 2395 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved)) 2396 return -ENOMEM; 2397 2398 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100); 2399 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved)) 2400 return -ENOMEM; 2401 2402 return 0; 2403 } 2404 2405 /* 2406 * This algorithm is a very conservative estimate of the worst-case scheduling 2407 * scenario for any one interval. The hardware dynamically schedules the 2408 * packets, so we can't tell which microframe could be the limiting factor in 2409 * the bandwidth scheduling. This only takes into account periodic endpoints. 2410 * 2411 * Obviously, we can't solve an NP complete problem to find the minimum worst 2412 * case scenario. Instead, we come up with an estimate that is no less than 2413 * the worst case bandwidth used for any one microframe, but may be an 2414 * over-estimate. 2415 * 2416 * We walk the requirements for each endpoint by interval, starting with the 2417 * smallest interval, and place packets in the schedule where there is only one 2418 * possible way to schedule packets for that interval. In order to simplify 2419 * this algorithm, we record the largest max packet size for each interval, and 2420 * assume all packets will be that size. 2421 * 2422 * For interval 0, we obviously must schedule all packets for each interval. 2423 * The bandwidth for interval 0 is just the amount of data to be transmitted 2424 * (the sum of all max ESIT payload sizes, plus any overhead per packet times 2425 * the number of packets). 2426 * 2427 * For interval 1, we have two possible microframes to schedule those packets 2428 * in. For this algorithm, if we can schedule the same number of packets for 2429 * each possible scheduling opportunity (each microframe), we will do so. The 2430 * remaining number of packets will be saved to be transmitted in the gaps in 2431 * the next interval's scheduling sequence. 2432 * 2433 * As we move those remaining packets to be scheduled with interval 2 packets, 2434 * we have to double the number of remaining packets to transmit. This is 2435 * because the intervals are actually powers of 2, and we would be transmitting 2436 * the previous interval's packets twice in this interval. We also have to be 2437 * sure that when we look at the largest max packet size for this interval, we 2438 * also look at the largest max packet size for the remaining packets and take 2439 * the greater of the two. 2440 * 2441 * The algorithm continues to evenly distribute packets in each scheduling 2442 * opportunity, and push the remaining packets out, until we get to the last 2443 * interval. Then those packets and their associated overhead are just added 2444 * to the bandwidth used. 2445 */ 2446 static int xhci_check_bw_table(struct xhci_hcd *xhci, 2447 struct xhci_virt_device *virt_dev, 2448 int old_active_eps) 2449 { 2450 unsigned int bw_reserved; 2451 unsigned int max_bandwidth; 2452 unsigned int bw_used; 2453 unsigned int block_size; 2454 struct xhci_interval_bw_table *bw_table; 2455 unsigned int packet_size = 0; 2456 unsigned int overhead = 0; 2457 unsigned int packets_transmitted = 0; 2458 unsigned int packets_remaining = 0; 2459 unsigned int i; 2460 2461 if (virt_dev->udev->speed >= USB_SPEED_SUPER) 2462 return xhci_check_ss_bw(xhci, virt_dev); 2463 2464 if (virt_dev->udev->speed == USB_SPEED_HIGH) { 2465 max_bandwidth = HS_BW_LIMIT; 2466 /* Convert percent of bus BW reserved to blocks reserved */ 2467 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100); 2468 } else { 2469 max_bandwidth = FS_BW_LIMIT; 2470 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100); 2471 } 2472 2473 bw_table = virt_dev->bw_table; 2474 /* We need to translate the max packet size and max ESIT payloads into 2475 * the units the hardware uses. 2476 */ 2477 block_size = xhci_get_block_size(virt_dev->udev); 2478 2479 /* If we are manipulating a LS/FS device under a HS hub, double check 2480 * that the HS bus has enough bandwidth if we are activing a new TT. 2481 */ 2482 if (virt_dev->tt_info) { 2483 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2484 "Recalculating BW for rootport %u", 2485 virt_dev->rhub_port->hw_portnum + 1); 2486 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) { 2487 xhci_warn(xhci, "Not enough bandwidth on HS bus for " 2488 "newly activated TT.\n"); 2489 return -ENOMEM; 2490 } 2491 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2492 "Recalculating BW for TT slot %u port %u", 2493 virt_dev->tt_info->slot_id, 2494 virt_dev->tt_info->ttport); 2495 } else { 2496 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2497 "Recalculating BW for rootport %u", 2498 virt_dev->rhub_port->hw_portnum + 1); 2499 } 2500 2501 /* Add in how much bandwidth will be used for interval zero, or the 2502 * rounded max ESIT payload + number of packets * largest overhead. 2503 */ 2504 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) + 2505 bw_table->interval_bw[0].num_packets * 2506 xhci_get_largest_overhead(&bw_table->interval_bw[0]); 2507 2508 for (i = 1; i < XHCI_MAX_INTERVAL; i++) { 2509 unsigned int bw_added; 2510 unsigned int largest_mps; 2511 unsigned int interval_overhead; 2512 2513 /* 2514 * How many packets could we transmit in this interval? 2515 * If packets didn't fit in the previous interval, we will need 2516 * to transmit that many packets twice within this interval. 2517 */ 2518 packets_remaining = 2 * packets_remaining + 2519 bw_table->interval_bw[i].num_packets; 2520 2521 /* Find the largest max packet size of this or the previous 2522 * interval. 2523 */ 2524 if (list_empty(&bw_table->interval_bw[i].endpoints)) 2525 largest_mps = 0; 2526 else { 2527 struct xhci_virt_ep *virt_ep; 2528 struct list_head *ep_entry; 2529 2530 ep_entry = bw_table->interval_bw[i].endpoints.next; 2531 virt_ep = list_entry(ep_entry, 2532 struct xhci_virt_ep, bw_endpoint_list); 2533 /* Convert to blocks, rounding up */ 2534 largest_mps = DIV_ROUND_UP( 2535 virt_ep->bw_info.max_packet_size, 2536 block_size); 2537 } 2538 if (largest_mps > packet_size) 2539 packet_size = largest_mps; 2540 2541 /* Use the larger overhead of this or the previous interval. */ 2542 interval_overhead = xhci_get_largest_overhead( 2543 &bw_table->interval_bw[i]); 2544 if (interval_overhead > overhead) 2545 overhead = interval_overhead; 2546 2547 /* How many packets can we evenly distribute across 2548 * (1 << (i + 1)) possible scheduling opportunities? 2549 */ 2550 packets_transmitted = packets_remaining >> (i + 1); 2551 2552 /* Add in the bandwidth used for those scheduled packets */ 2553 bw_added = packets_transmitted * (overhead + packet_size); 2554 2555 /* How many packets do we have remaining to transmit? */ 2556 packets_remaining = packets_remaining % (1 << (i + 1)); 2557 2558 /* What largest max packet size should those packets have? */ 2559 /* If we've transmitted all packets, don't carry over the 2560 * largest packet size. 2561 */ 2562 if (packets_remaining == 0) { 2563 packet_size = 0; 2564 overhead = 0; 2565 } else if (packets_transmitted > 0) { 2566 /* Otherwise if we do have remaining packets, and we've 2567 * scheduled some packets in this interval, take the 2568 * largest max packet size from endpoints with this 2569 * interval. 2570 */ 2571 packet_size = largest_mps; 2572 overhead = interval_overhead; 2573 } 2574 /* Otherwise carry over packet_size and overhead from the last 2575 * time we had a remainder. 2576 */ 2577 bw_used += bw_added; 2578 if (bw_used > max_bandwidth) { 2579 xhci_warn(xhci, "Not enough bandwidth. " 2580 "Proposed: %u, Max: %u\n", 2581 bw_used, max_bandwidth); 2582 return -ENOMEM; 2583 } 2584 } 2585 /* 2586 * Ok, we know we have some packets left over after even-handedly 2587 * scheduling interval 15. We don't know which microframes they will 2588 * fit into, so we over-schedule and say they will be scheduled every 2589 * microframe. 2590 */ 2591 if (packets_remaining > 0) 2592 bw_used += overhead + packet_size; 2593 2594 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) { 2595 /* OK, we're manipulating a HS device attached to a 2596 * root port bandwidth domain. Include the number of active TTs 2597 * in the bandwidth used. 2598 */ 2599 bw_used += TT_HS_OVERHEAD * 2600 xhci->rh_bw[virt_dev->rhub_port->hw_portnum].num_active_tts; 2601 } 2602 2603 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 2604 "Final bandwidth: %u, Limit: %u, Reserved: %u, " 2605 "Available: %u " "percent", 2606 bw_used, max_bandwidth, bw_reserved, 2607 (max_bandwidth - bw_used - bw_reserved) * 100 / 2608 max_bandwidth); 2609 2610 bw_used += bw_reserved; 2611 if (bw_used > max_bandwidth) { 2612 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n", 2613 bw_used, max_bandwidth); 2614 return -ENOMEM; 2615 } 2616 2617 bw_table->bw_used = bw_used; 2618 return 0; 2619 } 2620 2621 static bool xhci_is_async_ep(unsigned int ep_type) 2622 { 2623 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && 2624 ep_type != ISOC_IN_EP && 2625 ep_type != INT_IN_EP); 2626 } 2627 2628 static bool xhci_is_sync_in_ep(unsigned int ep_type) 2629 { 2630 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP); 2631 } 2632 2633 static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw) 2634 { 2635 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK); 2636 2637 if (ep_bw->ep_interval == 0) 2638 return SS_OVERHEAD_BURST + 2639 (ep_bw->mult * ep_bw->num_packets * 2640 (SS_OVERHEAD + mps)); 2641 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets * 2642 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST), 2643 1 << ep_bw->ep_interval); 2644 2645 } 2646 2647 static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci, 2648 struct xhci_bw_info *ep_bw, 2649 struct xhci_interval_bw_table *bw_table, 2650 struct usb_device *udev, 2651 struct xhci_virt_ep *virt_ep, 2652 struct xhci_tt_bw_info *tt_info) 2653 { 2654 struct xhci_interval_bw *interval_bw; 2655 int normalized_interval; 2656 2657 if (xhci_is_async_ep(ep_bw->type)) 2658 return; 2659 2660 if (udev->speed >= USB_SPEED_SUPER) { 2661 if (xhci_is_sync_in_ep(ep_bw->type)) 2662 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -= 2663 xhci_get_ss_bw_consumed(ep_bw); 2664 else 2665 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -= 2666 xhci_get_ss_bw_consumed(ep_bw); 2667 return; 2668 } 2669 2670 /* SuperSpeed endpoints never get added to intervals in the table, so 2671 * this check is only valid for HS/FS/LS devices. 2672 */ 2673 if (list_empty(&virt_ep->bw_endpoint_list)) 2674 return; 2675 /* For LS/FS devices, we need to translate the interval expressed in 2676 * microframes to frames. 2677 */ 2678 if (udev->speed == USB_SPEED_HIGH) 2679 normalized_interval = ep_bw->ep_interval; 2680 else 2681 normalized_interval = ep_bw->ep_interval - 3; 2682 2683 if (normalized_interval == 0) 2684 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload; 2685 interval_bw = &bw_table->interval_bw[normalized_interval]; 2686 interval_bw->num_packets -= ep_bw->num_packets; 2687 switch (udev->speed) { 2688 case USB_SPEED_LOW: 2689 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1; 2690 break; 2691 case USB_SPEED_FULL: 2692 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1; 2693 break; 2694 case USB_SPEED_HIGH: 2695 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1; 2696 break; 2697 default: 2698 /* Should never happen because only LS/FS/HS endpoints will get 2699 * added to the endpoint list. 2700 */ 2701 return; 2702 } 2703 if (tt_info) 2704 tt_info->active_eps -= 1; 2705 list_del_init(&virt_ep->bw_endpoint_list); 2706 } 2707 2708 static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci, 2709 struct xhci_bw_info *ep_bw, 2710 struct xhci_interval_bw_table *bw_table, 2711 struct usb_device *udev, 2712 struct xhci_virt_ep *virt_ep, 2713 struct xhci_tt_bw_info *tt_info) 2714 { 2715 struct xhci_interval_bw *interval_bw; 2716 struct xhci_virt_ep *smaller_ep; 2717 int normalized_interval; 2718 2719 if (xhci_is_async_ep(ep_bw->type)) 2720 return; 2721 2722 if (udev->speed == USB_SPEED_SUPER) { 2723 if (xhci_is_sync_in_ep(ep_bw->type)) 2724 xhci->devs[udev->slot_id]->bw_table->ss_bw_in += 2725 xhci_get_ss_bw_consumed(ep_bw); 2726 else 2727 xhci->devs[udev->slot_id]->bw_table->ss_bw_out += 2728 xhci_get_ss_bw_consumed(ep_bw); 2729 return; 2730 } 2731 2732 /* For LS/FS devices, we need to translate the interval expressed in 2733 * microframes to frames. 2734 */ 2735 if (udev->speed == USB_SPEED_HIGH) 2736 normalized_interval = ep_bw->ep_interval; 2737 else 2738 normalized_interval = ep_bw->ep_interval - 3; 2739 2740 if (normalized_interval == 0) 2741 bw_table->interval0_esit_payload += ep_bw->max_esit_payload; 2742 interval_bw = &bw_table->interval_bw[normalized_interval]; 2743 interval_bw->num_packets += ep_bw->num_packets; 2744 switch (udev->speed) { 2745 case USB_SPEED_LOW: 2746 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1; 2747 break; 2748 case USB_SPEED_FULL: 2749 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1; 2750 break; 2751 case USB_SPEED_HIGH: 2752 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1; 2753 break; 2754 default: 2755 /* Should never happen because only LS/FS/HS endpoints will get 2756 * added to the endpoint list. 2757 */ 2758 return; 2759 } 2760 2761 if (tt_info) 2762 tt_info->active_eps += 1; 2763 /* Insert the endpoint into the list, largest max packet size first. */ 2764 list_for_each_entry(smaller_ep, &interval_bw->endpoints, 2765 bw_endpoint_list) { 2766 if (ep_bw->max_packet_size >= 2767 smaller_ep->bw_info.max_packet_size) { 2768 /* Add the new ep before the smaller endpoint */ 2769 list_add_tail(&virt_ep->bw_endpoint_list, 2770 &smaller_ep->bw_endpoint_list); 2771 return; 2772 } 2773 } 2774 /* Add the new endpoint at the end of the list. */ 2775 list_add_tail(&virt_ep->bw_endpoint_list, 2776 &interval_bw->endpoints); 2777 } 2778 2779 void xhci_update_tt_active_eps(struct xhci_hcd *xhci, 2780 struct xhci_virt_device *virt_dev, 2781 int old_active_eps) 2782 { 2783 struct xhci_root_port_bw_info *rh_bw_info; 2784 if (!virt_dev->tt_info) 2785 return; 2786 2787 rh_bw_info = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum]; 2788 if (old_active_eps == 0 && 2789 virt_dev->tt_info->active_eps != 0) { 2790 rh_bw_info->num_active_tts += 1; 2791 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD; 2792 } else if (old_active_eps != 0 && 2793 virt_dev->tt_info->active_eps == 0) { 2794 rh_bw_info->num_active_tts -= 1; 2795 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD; 2796 } 2797 } 2798 2799 static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, 2800 struct xhci_virt_device *virt_dev, 2801 struct xhci_container_ctx *in_ctx) 2802 { 2803 struct xhci_bw_info ep_bw_info[31]; 2804 int i; 2805 struct xhci_input_control_ctx *ctrl_ctx; 2806 int old_active_eps = 0; 2807 2808 if (virt_dev->tt_info) 2809 old_active_eps = virt_dev->tt_info->active_eps; 2810 2811 ctrl_ctx = xhci_get_input_control_ctx(in_ctx); 2812 if (!ctrl_ctx) { 2813 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2814 __func__); 2815 return -ENOMEM; 2816 } 2817 2818 for (i = 0; i < 31; i++) { 2819 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2820 continue; 2821 2822 /* Make a copy of the BW info in case we need to revert this */ 2823 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, 2824 sizeof(ep_bw_info[i])); 2825 /* Drop the endpoint from the interval table if the endpoint is 2826 * being dropped or changed. 2827 */ 2828 if (EP_IS_DROPPED(ctrl_ctx, i)) 2829 xhci_drop_ep_from_interval_table(xhci, 2830 &virt_dev->eps[i].bw_info, 2831 virt_dev->bw_table, 2832 virt_dev->udev, 2833 &virt_dev->eps[i], 2834 virt_dev->tt_info); 2835 } 2836 /* Overwrite the information stored in the endpoints' bw_info */ 2837 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); 2838 for (i = 0; i < 31; i++) { 2839 /* Add any changed or added endpoints to the interval table */ 2840 if (EP_IS_ADDED(ctrl_ctx, i)) 2841 xhci_add_ep_to_interval_table(xhci, 2842 &virt_dev->eps[i].bw_info, 2843 virt_dev->bw_table, 2844 virt_dev->udev, 2845 &virt_dev->eps[i], 2846 virt_dev->tt_info); 2847 } 2848 2849 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { 2850 /* Ok, this fits in the bandwidth we have. 2851 * Update the number of active TTs. 2852 */ 2853 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 2854 return 0; 2855 } 2856 2857 /* We don't have enough bandwidth for this, revert the stored info. */ 2858 for (i = 0; i < 31; i++) { 2859 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) 2860 continue; 2861 2862 /* Drop the new copies of any added or changed endpoints from 2863 * the interval table. 2864 */ 2865 if (EP_IS_ADDED(ctrl_ctx, i)) { 2866 xhci_drop_ep_from_interval_table(xhci, 2867 &virt_dev->eps[i].bw_info, 2868 virt_dev->bw_table, 2869 virt_dev->udev, 2870 &virt_dev->eps[i], 2871 virt_dev->tt_info); 2872 } 2873 /* Revert the endpoint back to its old information */ 2874 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], 2875 sizeof(ep_bw_info[i])); 2876 /* Add any changed or dropped endpoints back into the table */ 2877 if (EP_IS_DROPPED(ctrl_ctx, i)) 2878 xhci_add_ep_to_interval_table(xhci, 2879 &virt_dev->eps[i].bw_info, 2880 virt_dev->bw_table, 2881 virt_dev->udev, 2882 &virt_dev->eps[i], 2883 virt_dev->tt_info); 2884 } 2885 return -ENOMEM; 2886 } 2887 2888 /* 2889 * Synchronous XHCI stop endpoint helper. Issues the stop endpoint command and 2890 * waits for the command completion before returning. This does not call 2891 * xhci_handle_cmd_stop_ep(), which has additional handling for 'context error' 2892 * cases, along with transfer ring cleanup. 2893 * 2894 * xhci_stop_endpoint_sync() is intended to be utilized by clients that manage 2895 * their own transfer ring, such as offload situations. 2896 */ 2897 int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int suspend, 2898 gfp_t gfp_flags) 2899 { 2900 struct xhci_command *command; 2901 struct xhci_ep_ctx *ep_ctx; 2902 unsigned long flags; 2903 int ret = -ENODEV; 2904 2905 command = xhci_alloc_command(xhci, true, gfp_flags); 2906 if (!command) 2907 return -ENOMEM; 2908 2909 spin_lock_irqsave(&xhci->lock, flags); 2910 2911 /* make sure endpoint exists and is running before stopping it */ 2912 if (ep->ring) { 2913 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); 2914 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) 2915 ret = xhci_queue_stop_endpoint(xhci, command, 2916 ep->vdev->slot_id, 2917 ep->ep_index, suspend); 2918 } 2919 2920 if (ret < 0) { 2921 spin_unlock_irqrestore(&xhci->lock, flags); 2922 goto out; 2923 } 2924 2925 xhci_ring_cmd_db(xhci); 2926 spin_unlock_irqrestore(&xhci->lock, flags); 2927 2928 wait_for_completion(command->completion); 2929 2930 /* No handling for COMP_CONTEXT_STATE_ERROR done at command completion*/ 2931 if (command->status == COMP_COMMAND_ABORTED || 2932 command->status == COMP_COMMAND_RING_STOPPED) { 2933 xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); 2934 ret = -ETIME; 2935 } 2936 out: 2937 xhci_free_command(xhci, command); 2938 2939 return ret; 2940 } 2941 EXPORT_SYMBOL_GPL(xhci_stop_endpoint_sync); 2942 2943 /* 2944 * xhci_usb_endpoint_maxp - get endpoint max packet size 2945 * @host_ep: USB host endpoint to be checked 2946 * 2947 * Returns max packet from the correct descriptor 2948 */ 2949 int xhci_usb_endpoint_maxp(struct usb_device *udev, 2950 struct usb_host_endpoint *host_ep) 2951 { 2952 if (usb_endpoint_is_hs_isoc_double(udev, host_ep)) 2953 return le16_to_cpu(host_ep->eusb2_isoc_ep_comp.wMaxPacketSize); 2954 return usb_endpoint_maxp(&host_ep->desc); 2955 } 2956 2957 /* Issue a configure endpoint command or evaluate context command 2958 * and wait for it to finish. 2959 */ 2960 static int xhci_configure_endpoint(struct xhci_hcd *xhci, 2961 struct usb_device *udev, 2962 struct xhci_command *command, 2963 bool ctx_change, bool must_succeed) 2964 { 2965 int ret; 2966 unsigned long flags; 2967 struct xhci_input_control_ctx *ctrl_ctx; 2968 struct xhci_virt_device *virt_dev; 2969 struct xhci_slot_ctx *slot_ctx; 2970 2971 if (!command) 2972 return -EINVAL; 2973 2974 spin_lock_irqsave(&xhci->lock, flags); 2975 2976 if (xhci->xhc_state & XHCI_STATE_DYING) { 2977 spin_unlock_irqrestore(&xhci->lock, flags); 2978 return -ESHUTDOWN; 2979 } 2980 2981 virt_dev = xhci->devs[udev->slot_id]; 2982 2983 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 2984 if (!ctrl_ctx) { 2985 spin_unlock_irqrestore(&xhci->lock, flags); 2986 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 2987 __func__); 2988 return -ENOMEM; 2989 } 2990 2991 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) && 2992 xhci_reserve_host_resources(xhci, ctrl_ctx)) { 2993 spin_unlock_irqrestore(&xhci->lock, flags); 2994 xhci_warn(xhci, "Not enough host resources, " 2995 "active endpoint contexts = %u\n", 2996 xhci->num_active_eps); 2997 return -ENOMEM; 2998 } 2999 if ((xhci->quirks & XHCI_SW_BW_CHECKING) && !ctx_change && 3000 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) { 3001 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 3002 xhci_free_host_resources(xhci, ctrl_ctx); 3003 spin_unlock_irqrestore(&xhci->lock, flags); 3004 xhci_warn(xhci, "Not enough bandwidth\n"); 3005 return -ENOMEM; 3006 } 3007 3008 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); 3009 3010 trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx); 3011 trace_xhci_configure_endpoint(slot_ctx); 3012 3013 if (!ctx_change) 3014 ret = xhci_queue_configure_endpoint(xhci, command, 3015 command->in_ctx->dma, 3016 udev->slot_id, must_succeed); 3017 else 3018 ret = xhci_queue_evaluate_context(xhci, command, 3019 command->in_ctx->dma, 3020 udev->slot_id, must_succeed); 3021 if (ret < 0) { 3022 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) 3023 xhci_free_host_resources(xhci, ctrl_ctx); 3024 spin_unlock_irqrestore(&xhci->lock, flags); 3025 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 3026 "FIXME allocate a new ring segment"); 3027 return -ENOMEM; 3028 } 3029 xhci_ring_cmd_db(xhci); 3030 spin_unlock_irqrestore(&xhci->lock, flags); 3031 3032 /* Wait for the configure endpoint command to complete */ 3033 wait_for_completion(command->completion); 3034 3035 if (!ctx_change) 3036 ret = xhci_configure_endpoint_result(xhci, udev, 3037 &command->status); 3038 else 3039 ret = xhci_evaluate_context_result(xhci, udev, 3040 &command->status); 3041 3042 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 3043 spin_lock_irqsave(&xhci->lock, flags); 3044 /* If the command failed, remove the reserved resources. 3045 * Otherwise, clean up the estimate to include dropped eps. 3046 */ 3047 if (ret) 3048 xhci_free_host_resources(xhci, ctrl_ctx); 3049 else 3050 xhci_finish_resource_reservation(xhci, ctrl_ctx); 3051 spin_unlock_irqrestore(&xhci->lock, flags); 3052 } 3053 return ret; 3054 } 3055 3056 static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, 3057 struct xhci_virt_device *vdev, int i) 3058 { 3059 struct xhci_virt_ep *ep = &vdev->eps[i]; 3060 3061 if (ep->ep_state & EP_HAS_STREAMS) { 3062 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n", 3063 xhci_get_endpoint_address(i)); 3064 xhci_free_stream_info(xhci, ep->stream_info); 3065 ep->stream_info = NULL; 3066 ep->ep_state &= ~EP_HAS_STREAMS; 3067 } 3068 } 3069 3070 /* Called after one or more calls to xhci_add_endpoint() or 3071 * xhci_drop_endpoint(). If this call fails, the USB core is expected 3072 * to call xhci_reset_bandwidth(). 3073 * 3074 * Since we are in the middle of changing either configuration or 3075 * installing a new alt setting, the USB core won't allow URBs to be 3076 * enqueued for any endpoint on the old config or interface. Nothing 3077 * else should be touching the xhci->devs[slot_id] structure, so we 3078 * don't need to take the xhci->lock for manipulating that. 3079 */ 3080 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 3081 { 3082 int i; 3083 int ret = 0; 3084 struct xhci_hcd *xhci; 3085 struct xhci_virt_device *virt_dev; 3086 struct xhci_input_control_ctx *ctrl_ctx; 3087 struct xhci_slot_ctx *slot_ctx; 3088 struct xhci_command *command; 3089 3090 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 3091 if (ret <= 0) 3092 return ret; 3093 xhci = hcd_to_xhci(hcd); 3094 if ((xhci->xhc_state & XHCI_STATE_DYING) || 3095 (xhci->xhc_state & XHCI_STATE_REMOVING)) 3096 return -ENODEV; 3097 3098 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 3099 virt_dev = xhci->devs[udev->slot_id]; 3100 3101 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 3102 if (!command) 3103 return -ENOMEM; 3104 3105 command->in_ctx = virt_dev->in_ctx; 3106 3107 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ 3108 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 3109 if (!ctrl_ctx) { 3110 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3111 __func__); 3112 ret = -ENOMEM; 3113 goto command_cleanup; 3114 } 3115 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 3116 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); 3117 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); 3118 3119 /* Don't issue the command if there's no endpoints to update. */ 3120 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && 3121 ctrl_ctx->drop_flags == 0) { 3122 ret = 0; 3123 goto command_cleanup; 3124 } 3125 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ 3126 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 3127 for (i = 31; i >= 1; i--) { 3128 __le32 le32 = cpu_to_le32(BIT(i)); 3129 3130 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32)) 3131 || (ctrl_ctx->add_flags & le32) || i == 1) { 3132 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); 3133 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); 3134 break; 3135 } 3136 } 3137 3138 ret = xhci_configure_endpoint(xhci, udev, command, 3139 false, false); 3140 if (ret) 3141 /* Callee should call reset_bandwidth() */ 3142 goto command_cleanup; 3143 3144 /* Free any rings that were dropped, but not changed. */ 3145 for (i = 1; i < 31; i++) { 3146 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && 3147 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) { 3148 xhci_free_endpoint_ring(xhci, virt_dev, i); 3149 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); 3150 } 3151 } 3152 xhci_zero_in_ctx(xhci, virt_dev); 3153 /* 3154 * Install any rings for completely new endpoints or changed endpoints, 3155 * and free any old rings from changed endpoints. 3156 */ 3157 for (i = 1; i < 31; i++) { 3158 if (!virt_dev->eps[i].new_ring) 3159 continue; 3160 /* Only free the old ring if it exists. 3161 * It may not if this is the first add of an endpoint. 3162 */ 3163 if (virt_dev->eps[i].ring) { 3164 xhci_free_endpoint_ring(xhci, virt_dev, i); 3165 } 3166 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); 3167 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; 3168 virt_dev->eps[i].new_ring = NULL; 3169 xhci_debugfs_create_endpoint(xhci, virt_dev, i); 3170 } 3171 command_cleanup: 3172 kfree(command->completion); 3173 kfree(command); 3174 3175 return ret; 3176 } 3177 EXPORT_SYMBOL_GPL(xhci_check_bandwidth); 3178 3179 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 3180 { 3181 struct xhci_hcd *xhci; 3182 struct xhci_virt_device *virt_dev; 3183 int i, ret; 3184 3185 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 3186 if (ret <= 0) 3187 return; 3188 xhci = hcd_to_xhci(hcd); 3189 3190 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 3191 virt_dev = xhci->devs[udev->slot_id]; 3192 /* Free any rings allocated for added endpoints */ 3193 for (i = 0; i < 31; i++) { 3194 if (virt_dev->eps[i].new_ring) { 3195 xhci_debugfs_remove_endpoint(xhci, virt_dev, i); 3196 xhci_ring_free(xhci, virt_dev->eps[i].new_ring); 3197 virt_dev->eps[i].new_ring = NULL; 3198 } 3199 } 3200 xhci_zero_in_ctx(xhci, virt_dev); 3201 } 3202 EXPORT_SYMBOL_GPL(xhci_reset_bandwidth); 3203 3204 /* 3205 * Get the available bandwidth of the ports under the xhci roothub. 3206 * EIO means the command failed: command not implemented or unsupported 3207 * speed (TRB Error), some ASMedia complete with Parameter Error when 3208 * querying the root hub (slot_id = 0), or other error or timeout. 3209 */ 3210 int xhci_get_port_bandwidth(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, 3211 u8 dev_speed) 3212 { 3213 struct xhci_command *cmd; 3214 unsigned long flags; 3215 int ret; 3216 3217 if (!ctx || !xhci) 3218 return -EINVAL; 3219 3220 cmd = xhci_alloc_command(xhci, true, GFP_KERNEL); 3221 if (!cmd) 3222 return -ENOMEM; 3223 3224 cmd->in_ctx = ctx; 3225 3226 /* get xhci port bandwidth, refer to xhci rev1_2 protocol 4.6.15 */ 3227 spin_lock_irqsave(&xhci->lock, flags); 3228 3229 ret = xhci_queue_get_port_bw(xhci, cmd, ctx->dma, dev_speed, 0); 3230 if (ret) { 3231 spin_unlock_irqrestore(&xhci->lock, flags); 3232 goto err_out; 3233 } 3234 xhci_ring_cmd_db(xhci); 3235 spin_unlock_irqrestore(&xhci->lock, flags); 3236 3237 wait_for_completion(cmd->completion); 3238 if (cmd->status != COMP_SUCCESS) 3239 ret = -EIO; 3240 err_out: 3241 kfree(cmd->completion); 3242 kfree(cmd); 3243 3244 return ret; 3245 } 3246 3247 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, 3248 struct xhci_container_ctx *in_ctx, 3249 struct xhci_container_ctx *out_ctx, 3250 struct xhci_input_control_ctx *ctrl_ctx, 3251 u32 add_flags, u32 drop_flags) 3252 { 3253 ctrl_ctx->add_flags = cpu_to_le32(add_flags); 3254 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); 3255 xhci_slot_copy(xhci, in_ctx, out_ctx); 3256 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 3257 } 3258 3259 static void xhci_endpoint_disable(struct usb_hcd *hcd, 3260 struct usb_host_endpoint *host_ep) 3261 { 3262 struct xhci_hcd *xhci; 3263 struct xhci_virt_device *vdev; 3264 struct xhci_virt_ep *ep; 3265 struct usb_device *udev; 3266 unsigned long flags; 3267 unsigned int ep_index; 3268 3269 xhci = hcd_to_xhci(hcd); 3270 rescan: 3271 spin_lock_irqsave(&xhci->lock, flags); 3272 3273 udev = (struct usb_device *)host_ep->hcpriv; 3274 if (!udev || !udev->slot_id) 3275 goto done; 3276 3277 vdev = xhci->devs[udev->slot_id]; 3278 if (!vdev) 3279 goto done; 3280 3281 ep_index = xhci_get_endpoint_index(&host_ep->desc); 3282 ep = &vdev->eps[ep_index]; 3283 3284 /* wait for hub_tt_work to finish clearing hub TT */ 3285 if (ep->ep_state & EP_CLEARING_TT) { 3286 spin_unlock_irqrestore(&xhci->lock, flags); 3287 schedule_timeout_uninterruptible(1); 3288 goto rescan; 3289 } 3290 3291 if (ep->ep_state) 3292 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n", 3293 ep->ep_state); 3294 done: 3295 spin_unlock_irqrestore(&xhci->lock, flags); 3296 } 3297 3298 /* 3299 * Called after usb core issues a clear halt control message. 3300 * The host side of the halt should already be cleared by a reset endpoint 3301 * command issued when the STALL event was received. 3302 * 3303 * The reset endpoint command may only be issued to endpoints in the halted 3304 * state. For software that wishes to reset the data toggle or sequence number 3305 * of an endpoint that isn't in the halted state this function will issue a 3306 * configure endpoint command with the Drop and Add bits set for the target 3307 * endpoint. Refer to the additional note in xhci spcification section 4.6.8. 3308 * 3309 * vdev may be lost due to xHC restore error and re-initialization during S3/S4 3310 * resume. A new vdev will be allocated later by xhci_discover_or_reset_device() 3311 */ 3312 3313 static void xhci_endpoint_reset(struct usb_hcd *hcd, 3314 struct usb_host_endpoint *host_ep) 3315 { 3316 struct xhci_hcd *xhci; 3317 struct usb_device *udev; 3318 struct xhci_virt_device *vdev; 3319 struct xhci_virt_ep *ep; 3320 struct xhci_input_control_ctx *ctrl_ctx; 3321 struct xhci_command *stop_cmd, *cfg_cmd; 3322 unsigned int ep_index; 3323 unsigned long flags; 3324 u32 ep_flag; 3325 int err; 3326 3327 xhci = hcd_to_xhci(hcd); 3328 ep_index = xhci_get_endpoint_index(&host_ep->desc); 3329 3330 /* 3331 * Usb core assumes a max packet value for ep0 on FS devices until the 3332 * real value is read from the descriptor. Core resets Ep0 if values 3333 * mismatch. Reconfigure the xhci ep0 endpoint context here in that case 3334 */ 3335 if (usb_endpoint_xfer_control(&host_ep->desc) && ep_index == 0) { 3336 3337 udev = container_of(host_ep, struct usb_device, ep0); 3338 if (udev->speed != USB_SPEED_FULL || !udev->slot_id) 3339 return; 3340 3341 vdev = xhci->devs[udev->slot_id]; 3342 if (!vdev || vdev->udev != udev) 3343 return; 3344 3345 xhci_check_ep0_maxpacket(xhci, vdev); 3346 3347 /* Nothing else should be done here for ep0 during ep reset */ 3348 return; 3349 } 3350 3351 if (!host_ep->hcpriv) 3352 return; 3353 udev = (struct usb_device *) host_ep->hcpriv; 3354 vdev = xhci->devs[udev->slot_id]; 3355 3356 if (!udev->slot_id || !vdev) 3357 return; 3358 3359 ep = &vdev->eps[ep_index]; 3360 3361 /* Bail out if toggle is already being cleared by a endpoint reset */ 3362 spin_lock_irqsave(&xhci->lock, flags); 3363 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) { 3364 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE; 3365 spin_unlock_irqrestore(&xhci->lock, flags); 3366 return; 3367 } 3368 spin_unlock_irqrestore(&xhci->lock, flags); 3369 /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */ 3370 if (usb_endpoint_xfer_control(&host_ep->desc) || 3371 usb_endpoint_xfer_isoc(&host_ep->desc)) 3372 return; 3373 3374 ep_flag = xhci_get_endpoint_flag(&host_ep->desc); 3375 3376 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG) 3377 return; 3378 3379 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT); 3380 if (!stop_cmd) 3381 return; 3382 3383 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT); 3384 if (!cfg_cmd) 3385 goto cleanup; 3386 3387 spin_lock_irqsave(&xhci->lock, flags); 3388 3389 /* block queuing new trbs and ringing ep doorbell */ 3390 ep->ep_state |= EP_SOFT_CLEAR_TOGGLE; 3391 3392 /* 3393 * Make sure endpoint ring is empty before resetting the toggle/seq. 3394 * Driver is required to synchronously cancel all transfer request. 3395 * Stop the endpoint to force xHC to update the output context 3396 */ 3397 3398 if (!list_empty(&ep->ring->td_list)) { 3399 dev_err(&udev->dev, "EP not empty, refuse reset\n"); 3400 spin_unlock_irqrestore(&xhci->lock, flags); 3401 xhci_free_command(xhci, cfg_cmd); 3402 goto cleanup; 3403 } 3404 3405 err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, 3406 ep_index, 0); 3407 if (err < 0) { 3408 spin_unlock_irqrestore(&xhci->lock, flags); 3409 xhci_free_command(xhci, cfg_cmd); 3410 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ", 3411 __func__, err); 3412 goto cleanup; 3413 } 3414 3415 xhci_ring_cmd_db(xhci); 3416 spin_unlock_irqrestore(&xhci->lock, flags); 3417 3418 wait_for_completion(stop_cmd->completion); 3419 3420 spin_lock_irqsave(&xhci->lock, flags); 3421 3422 /* config ep command clears toggle if add and drop ep flags are set */ 3423 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx); 3424 if (!ctrl_ctx) { 3425 spin_unlock_irqrestore(&xhci->lock, flags); 3426 xhci_free_command(xhci, cfg_cmd); 3427 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3428 __func__); 3429 goto cleanup; 3430 } 3431 3432 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx, 3433 ctrl_ctx, ep_flag, ep_flag); 3434 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index); 3435 3436 err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma, 3437 udev->slot_id, false); 3438 if (err < 0) { 3439 spin_unlock_irqrestore(&xhci->lock, flags); 3440 xhci_free_command(xhci, cfg_cmd); 3441 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ", 3442 __func__, err); 3443 goto cleanup; 3444 } 3445 3446 xhci_ring_cmd_db(xhci); 3447 spin_unlock_irqrestore(&xhci->lock, flags); 3448 3449 wait_for_completion(cfg_cmd->completion); 3450 3451 xhci_free_command(xhci, cfg_cmd); 3452 cleanup: 3453 xhci_free_command(xhci, stop_cmd); 3454 spin_lock_irqsave(&xhci->lock, flags); 3455 if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) 3456 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; 3457 spin_unlock_irqrestore(&xhci->lock, flags); 3458 } 3459 3460 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 3461 struct usb_device *udev, struct usb_host_endpoint *ep, 3462 unsigned int slot_id) 3463 { 3464 int ret; 3465 unsigned int ep_index; 3466 unsigned int ep_state; 3467 3468 if (!ep) 3469 return -EINVAL; 3470 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__); 3471 if (ret <= 0) 3472 return ret ? ret : -EINVAL; 3473 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) { 3474 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion" 3475 " descriptor for ep 0x%x does not support streams\n", 3476 ep->desc.bEndpointAddress); 3477 return -EINVAL; 3478 } 3479 3480 ep_index = xhci_get_endpoint_index(&ep->desc); 3481 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 3482 if (ep_state & EP_HAS_STREAMS || 3483 ep_state & EP_GETTING_STREAMS) { 3484 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x " 3485 "already has streams set up.\n", 3486 ep->desc.bEndpointAddress); 3487 xhci_warn(xhci, "Send email to xHCI maintainer and ask for " 3488 "dynamic stream context array reallocation.\n"); 3489 return -EINVAL; 3490 } 3491 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) { 3492 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk " 3493 "endpoint 0x%x; URBs are pending.\n", 3494 ep->desc.bEndpointAddress); 3495 return -EINVAL; 3496 } 3497 return 0; 3498 } 3499 3500 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci, 3501 unsigned int *num_streams, unsigned int *num_stream_ctxs) 3502 { 3503 unsigned int max_streams; 3504 3505 /* The stream context array size must be a power of two */ 3506 *num_stream_ctxs = roundup_pow_of_two(*num_streams); 3507 /* 3508 * Find out how many primary stream array entries the host controller 3509 * supports. Later we may use secondary stream arrays (similar to 2nd 3510 * level page entries), but that's an optional feature for xHCI host 3511 * controllers. xHCs must support at least 4 stream IDs. 3512 */ 3513 max_streams = HCC_MAX_PSA(xhci->hcc_params); 3514 if (*num_stream_ctxs > max_streams) { 3515 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n", 3516 max_streams); 3517 *num_stream_ctxs = max_streams; 3518 *num_streams = max_streams; 3519 } 3520 } 3521 3522 /* Returns an error code if one of the endpoint already has streams. 3523 * This does not change any data structures, it only checks and gathers 3524 * information. 3525 */ 3526 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, 3527 struct usb_device *udev, 3528 struct usb_host_endpoint **eps, unsigned int num_eps, 3529 unsigned int *num_streams, u32 *changed_ep_bitmask) 3530 { 3531 unsigned int max_streams; 3532 unsigned int endpoint_flag; 3533 int i; 3534 int ret; 3535 3536 for (i = 0; i < num_eps; i++) { 3537 ret = xhci_check_streams_endpoint(xhci, udev, 3538 eps[i], udev->slot_id); 3539 if (ret < 0) 3540 return ret; 3541 3542 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp); 3543 if (max_streams < (*num_streams - 1)) { 3544 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", 3545 eps[i]->desc.bEndpointAddress, 3546 max_streams); 3547 *num_streams = max_streams+1; 3548 } 3549 3550 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc); 3551 if (*changed_ep_bitmask & endpoint_flag) 3552 return -EINVAL; 3553 *changed_ep_bitmask |= endpoint_flag; 3554 } 3555 return 0; 3556 } 3557 3558 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci, 3559 struct usb_device *udev, 3560 struct usb_host_endpoint **eps, unsigned int num_eps) 3561 { 3562 u32 changed_ep_bitmask = 0; 3563 unsigned int slot_id; 3564 unsigned int ep_index; 3565 unsigned int ep_state; 3566 int i; 3567 3568 slot_id = udev->slot_id; 3569 if (!xhci->devs[slot_id]) 3570 return 0; 3571 3572 for (i = 0; i < num_eps; i++) { 3573 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3574 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state; 3575 /* Are streams already being freed for the endpoint? */ 3576 if (ep_state & EP_GETTING_NO_STREAMS) { 3577 xhci_warn(xhci, "WARN Can't disable streams for " 3578 "endpoint 0x%x, " 3579 "streams are being disabled already\n", 3580 eps[i]->desc.bEndpointAddress); 3581 return 0; 3582 } 3583 /* Are there actually any streams to free? */ 3584 if (!(ep_state & EP_HAS_STREAMS) && 3585 !(ep_state & EP_GETTING_STREAMS)) { 3586 xhci_warn(xhci, "WARN Can't disable streams for " 3587 "endpoint 0x%x, " 3588 "streams are already disabled!\n", 3589 eps[i]->desc.bEndpointAddress); 3590 xhci_warn(xhci, "WARN xhci_free_streams() called " 3591 "with non-streams endpoint\n"); 3592 return 0; 3593 } 3594 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc); 3595 } 3596 return changed_ep_bitmask; 3597 } 3598 3599 /* 3600 * The USB device drivers use this function (through the HCD interface in USB 3601 * core) to prepare a set of bulk endpoints to use streams. Streams are used to 3602 * coordinate mass storage command queueing across multiple endpoints (basically 3603 * a stream ID == a task ID). 3604 * 3605 * Setting up streams involves allocating the same size stream context array 3606 * for each endpoint and issuing a configure endpoint command for all endpoints. 3607 * 3608 * Don't allow the call to succeed if one endpoint only supports one stream 3609 * (which means it doesn't support streams at all). 3610 * 3611 * Drivers may get less stream IDs than they asked for, if the host controller 3612 * hardware or endpoints claim they can't support the number of requested 3613 * stream IDs. 3614 */ 3615 static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, 3616 struct usb_host_endpoint **eps, unsigned int num_eps, 3617 unsigned int num_streams, gfp_t mem_flags) 3618 { 3619 int i, ret; 3620 struct xhci_hcd *xhci; 3621 struct xhci_virt_device *vdev; 3622 struct xhci_command *config_cmd; 3623 struct xhci_input_control_ctx *ctrl_ctx; 3624 unsigned int ep_index; 3625 unsigned int num_stream_ctxs; 3626 unsigned int max_packet; 3627 unsigned long flags; 3628 u32 changed_ep_bitmask = 0; 3629 3630 if (!eps) 3631 return -EINVAL; 3632 3633 /* Add one to the number of streams requested to account for 3634 * stream 0 that is reserved for xHCI usage. 3635 */ 3636 num_streams += 1; 3637 xhci = hcd_to_xhci(hcd); 3638 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n", 3639 num_streams); 3640 3641 /* MaxPSASize value 0 (2 streams) means streams are not supported */ 3642 if ((xhci->quirks & XHCI_BROKEN_STREAMS) || 3643 HCC_MAX_PSA(xhci->hcc_params) < 4) { 3644 xhci_dbg(xhci, "xHCI controller does not support streams.\n"); 3645 return -ENOSYS; 3646 } 3647 3648 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); 3649 if (!config_cmd) 3650 return -ENOMEM; 3651 3652 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); 3653 if (!ctrl_ctx) { 3654 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3655 __func__); 3656 xhci_free_command(xhci, config_cmd); 3657 return -ENOMEM; 3658 } 3659 3660 /* Check to make sure all endpoints are not already configured for 3661 * streams. While we're at it, find the maximum number of streams that 3662 * all the endpoints will support and check for duplicate endpoints. 3663 */ 3664 spin_lock_irqsave(&xhci->lock, flags); 3665 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps, 3666 num_eps, &num_streams, &changed_ep_bitmask); 3667 if (ret < 0) { 3668 xhci_free_command(xhci, config_cmd); 3669 spin_unlock_irqrestore(&xhci->lock, flags); 3670 return ret; 3671 } 3672 if (num_streams <= 1) { 3673 xhci_warn(xhci, "WARN: endpoints can't handle " 3674 "more than one stream.\n"); 3675 xhci_free_command(xhci, config_cmd); 3676 spin_unlock_irqrestore(&xhci->lock, flags); 3677 return -EINVAL; 3678 } 3679 vdev = xhci->devs[udev->slot_id]; 3680 /* Mark each endpoint as being in transition, so 3681 * xhci_urb_enqueue() will reject all URBs. 3682 */ 3683 for (i = 0; i < num_eps; i++) { 3684 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3685 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS; 3686 } 3687 spin_unlock_irqrestore(&xhci->lock, flags); 3688 3689 /* Setup internal data structures and allocate HW data structures for 3690 * streams (but don't install the HW structures in the input context 3691 * until we're sure all memory allocation succeeded). 3692 */ 3693 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs); 3694 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n", 3695 num_stream_ctxs, num_streams); 3696 3697 for (i = 0; i < num_eps; i++) { 3698 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3699 max_packet = usb_endpoint_maxp(&eps[i]->desc); 3700 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci, 3701 num_stream_ctxs, 3702 num_streams, 3703 max_packet, mem_flags); 3704 if (!vdev->eps[ep_index].stream_info) 3705 goto cleanup; 3706 /* Set maxPstreams in endpoint context and update deq ptr to 3707 * point to stream context array. FIXME 3708 */ 3709 } 3710 3711 /* Set up the input context for a configure endpoint command. */ 3712 for (i = 0; i < num_eps; i++) { 3713 struct xhci_ep_ctx *ep_ctx; 3714 3715 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3716 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index); 3717 3718 xhci_endpoint_copy(xhci, config_cmd->in_ctx, 3719 vdev->out_ctx, ep_index); 3720 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx, 3721 vdev->eps[ep_index].stream_info); 3722 } 3723 /* Tell the HW to drop its old copy of the endpoint context info 3724 * and add the updated copy from the input context. 3725 */ 3726 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx, 3727 vdev->out_ctx, ctrl_ctx, 3728 changed_ep_bitmask, changed_ep_bitmask); 3729 3730 /* Issue and wait for the configure endpoint command */ 3731 ret = xhci_configure_endpoint(xhci, udev, config_cmd, 3732 false, false); 3733 3734 /* xHC rejected the configure endpoint command for some reason, so we 3735 * leave the old ring intact and free our internal streams data 3736 * structure. 3737 */ 3738 if (ret < 0) 3739 goto cleanup; 3740 3741 spin_lock_irqsave(&xhci->lock, flags); 3742 for (i = 0; i < num_eps; i++) { 3743 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3744 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 3745 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", 3746 udev->slot_id, ep_index); 3747 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; 3748 } 3749 xhci_free_command(xhci, config_cmd); 3750 spin_unlock_irqrestore(&xhci->lock, flags); 3751 3752 for (i = 0; i < num_eps; i++) { 3753 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3754 xhci_debugfs_create_stream_files(xhci, vdev, ep_index); 3755 } 3756 /* Subtract 1 for stream 0, which drivers can't use */ 3757 return num_streams - 1; 3758 3759 cleanup: 3760 /* If it didn't work, free the streams! */ 3761 for (i = 0; i < num_eps; i++) { 3762 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3763 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3764 vdev->eps[ep_index].stream_info = NULL; 3765 /* FIXME Unset maxPstreams in endpoint context and 3766 * update deq ptr to point to normal string ring. 3767 */ 3768 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS; 3769 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3770 xhci_endpoint_zero(xhci, vdev, eps[i]); 3771 } 3772 xhci_free_command(xhci, config_cmd); 3773 return -ENOMEM; 3774 } 3775 3776 /* Transition the endpoint from using streams to being a "normal" endpoint 3777 * without streams. 3778 * 3779 * Modify the endpoint context state, submit a configure endpoint command, 3780 * and free all endpoint rings for streams if that completes successfully. 3781 */ 3782 static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev, 3783 struct usb_host_endpoint **eps, unsigned int num_eps, 3784 gfp_t mem_flags) 3785 { 3786 int i, ret; 3787 struct xhci_hcd *xhci; 3788 struct xhci_virt_device *vdev; 3789 struct xhci_command *command; 3790 struct xhci_input_control_ctx *ctrl_ctx; 3791 unsigned int ep_index; 3792 unsigned long flags; 3793 u32 changed_ep_bitmask; 3794 3795 xhci = hcd_to_xhci(hcd); 3796 vdev = xhci->devs[udev->slot_id]; 3797 3798 /* Set up a configure endpoint command to remove the streams rings */ 3799 spin_lock_irqsave(&xhci->lock, flags); 3800 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci, 3801 udev, eps, num_eps); 3802 if (changed_ep_bitmask == 0) { 3803 spin_unlock_irqrestore(&xhci->lock, flags); 3804 return -EINVAL; 3805 } 3806 3807 /* Use the xhci_command structure from the first endpoint. We may have 3808 * allocated too many, but the driver may call xhci_free_streams() for 3809 * each endpoint it grouped into one call to xhci_alloc_streams(). 3810 */ 3811 ep_index = xhci_get_endpoint_index(&eps[0]->desc); 3812 command = vdev->eps[ep_index].stream_info->free_streams_command; 3813 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 3814 if (!ctrl_ctx) { 3815 spin_unlock_irqrestore(&xhci->lock, flags); 3816 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 3817 __func__); 3818 return -EINVAL; 3819 } 3820 3821 for (i = 0; i < num_eps; i++) { 3822 struct xhci_ep_ctx *ep_ctx; 3823 3824 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3825 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index); 3826 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |= 3827 EP_GETTING_NO_STREAMS; 3828 3829 xhci_endpoint_copy(xhci, command->in_ctx, 3830 vdev->out_ctx, ep_index); 3831 xhci_setup_no_streams_ep_input_ctx(ep_ctx, 3832 &vdev->eps[ep_index]); 3833 } 3834 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx, 3835 vdev->out_ctx, ctrl_ctx, 3836 changed_ep_bitmask, changed_ep_bitmask); 3837 spin_unlock_irqrestore(&xhci->lock, flags); 3838 3839 /* Issue and wait for the configure endpoint command, 3840 * which must succeed. 3841 */ 3842 ret = xhci_configure_endpoint(xhci, udev, command, 3843 false, true); 3844 3845 /* xHC rejected the configure endpoint command for some reason, so we 3846 * leave the streams rings intact. 3847 */ 3848 if (ret < 0) 3849 return ret; 3850 3851 spin_lock_irqsave(&xhci->lock, flags); 3852 for (i = 0; i < num_eps; i++) { 3853 ep_index = xhci_get_endpoint_index(&eps[i]->desc); 3854 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info); 3855 vdev->eps[ep_index].stream_info = NULL; 3856 /* FIXME Unset maxPstreams in endpoint context and 3857 * update deq ptr to point to normal string ring. 3858 */ 3859 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS; 3860 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS; 3861 } 3862 spin_unlock_irqrestore(&xhci->lock, flags); 3863 3864 return 0; 3865 } 3866 3867 /* 3868 * Deletes endpoint resources for endpoints that were active before a Reset 3869 * Device command, or a Disable Slot command. The Reset Device command leaves 3870 * the control endpoint intact, whereas the Disable Slot command deletes it. 3871 * 3872 * Must be called with xhci->lock held. 3873 */ 3874 void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci, 3875 struct xhci_virt_device *virt_dev, bool drop_control_ep) 3876 { 3877 int i; 3878 unsigned int num_dropped_eps = 0; 3879 unsigned int drop_flags = 0; 3880 3881 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) { 3882 if (virt_dev->eps[i].ring) { 3883 drop_flags |= 1 << i; 3884 num_dropped_eps++; 3885 } 3886 } 3887 xhci->num_active_eps -= num_dropped_eps; 3888 if (num_dropped_eps) 3889 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 3890 "Dropped %u ep ctxs, flags = 0x%x, " 3891 "%u now active.", 3892 num_dropped_eps, drop_flags, 3893 xhci->num_active_eps); 3894 } 3895 3896 static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); 3897 3898 /* 3899 * This submits a Reset Device Command, which will set the device state to 0, 3900 * set the device address to 0, and disable all the endpoints except the default 3901 * control endpoint. The USB core should come back and call 3902 * xhci_address_device(), and then re-set up the configuration. If this is 3903 * called because of a usb_reset_and_verify_device(), then the old alternate 3904 * settings will be re-installed through the normal bandwidth allocation 3905 * functions. 3906 * 3907 * Wait for the Reset Device command to finish. Remove all structures 3908 * associated with the endpoints that were disabled. Clear the input device 3909 * structure? Reset the control endpoint 0 max packet size? 3910 * 3911 * If the virt_dev to be reset does not exist or does not match the udev, 3912 * it means the device is lost, possibly due to the xHC restore error and 3913 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to 3914 * re-allocate the device. 3915 */ 3916 static int xhci_discover_or_reset_device(struct usb_hcd *hcd, 3917 struct usb_device *udev) 3918 { 3919 int ret, i; 3920 unsigned long flags; 3921 struct xhci_hcd *xhci; 3922 unsigned int slot_id; 3923 struct xhci_virt_device *virt_dev; 3924 struct xhci_command *reset_device_cmd; 3925 struct xhci_slot_ctx *slot_ctx; 3926 int old_active_eps = 0; 3927 3928 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); 3929 if (ret <= 0) 3930 return ret; 3931 xhci = hcd_to_xhci(hcd); 3932 slot_id = udev->slot_id; 3933 virt_dev = xhci->devs[slot_id]; 3934 if (!virt_dev) { 3935 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3936 "not exist. Re-allocate the device\n", slot_id); 3937 ret = xhci_alloc_dev(hcd, udev); 3938 if (ret == 1) 3939 return 0; 3940 else 3941 return -EINVAL; 3942 } 3943 3944 if (virt_dev->tt_info) 3945 old_active_eps = virt_dev->tt_info->active_eps; 3946 3947 if (virt_dev->udev != udev) { 3948 /* If the virt_dev and the udev does not match, this virt_dev 3949 * may belong to another udev. 3950 * Re-allocate the device. 3951 */ 3952 xhci_dbg(xhci, "The device to be reset with slot ID %u does " 3953 "not match the udev. Re-allocate the device\n", 3954 slot_id); 3955 ret = xhci_alloc_dev(hcd, udev); 3956 if (ret == 1) 3957 return 0; 3958 else 3959 return -EINVAL; 3960 } 3961 3962 /* If device is not setup, there is no point in resetting it */ 3963 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 3964 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 3965 SLOT_STATE_DISABLED) 3966 return 0; 3967 3968 if (xhci->quirks & XHCI_ETRON_HOST) { 3969 /* 3970 * Obtaining a new device slot to inform the xHCI host that 3971 * the USB device has been reset. 3972 */ 3973 ret = xhci_disable_and_free_slot(xhci, udev->slot_id); 3974 if (!ret) { 3975 ret = xhci_alloc_dev(hcd, udev); 3976 if (ret == 1) 3977 ret = 0; 3978 else 3979 ret = -EINVAL; 3980 } 3981 return ret; 3982 } 3983 3984 trace_xhci_discover_or_reset_device(slot_ctx); 3985 3986 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); 3987 /* Allocate the command structure that holds the struct completion. 3988 * Assume we're in process context, since the normal device reset 3989 * process has to wait for the device anyway. Storage devices are 3990 * reset as part of error handling, so use GFP_NOIO instead of 3991 * GFP_KERNEL. 3992 */ 3993 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO); 3994 if (!reset_device_cmd) { 3995 xhci_dbg(xhci, "Couldn't allocate command structure.\n"); 3996 return -ENOMEM; 3997 } 3998 3999 /* Attempt to submit the Reset Device command to the command ring */ 4000 spin_lock_irqsave(&xhci->lock, flags); 4001 4002 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id); 4003 if (ret) { 4004 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 4005 spin_unlock_irqrestore(&xhci->lock, flags); 4006 goto command_cleanup; 4007 } 4008 xhci_ring_cmd_db(xhci); 4009 spin_unlock_irqrestore(&xhci->lock, flags); 4010 4011 /* Wait for the Reset Device command to finish */ 4012 wait_for_completion(reset_device_cmd->completion); 4013 4014 /* The Reset Device command can't fail, according to the 0.95/0.96 spec, 4015 * unless we tried to reset a slot ID that wasn't enabled, 4016 * or the device wasn't in the addressed or configured state. 4017 */ 4018 ret = reset_device_cmd->status; 4019 switch (ret) { 4020 case COMP_COMMAND_ABORTED: 4021 case COMP_COMMAND_RING_STOPPED: 4022 xhci_warn(xhci, "Timeout waiting for reset device command\n"); 4023 ret = -ETIME; 4024 goto command_cleanup; 4025 case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */ 4026 case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */ 4027 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n", 4028 slot_id, 4029 xhci_get_slot_state(xhci, virt_dev->out_ctx)); 4030 xhci_dbg(xhci, "Not freeing device rings.\n"); 4031 /* Don't treat this as an error. May change my mind later. */ 4032 virt_dev->flags = 0; 4033 ret = 0; 4034 goto command_cleanup; 4035 case COMP_SUCCESS: 4036 xhci_dbg(xhci, "Successful reset device command.\n"); 4037 break; 4038 default: 4039 if (xhci_is_vendor_info_code(xhci, ret)) 4040 break; 4041 xhci_warn(xhci, "Unknown completion code %u for " 4042 "reset device command.\n", ret); 4043 ret = -EINVAL; 4044 goto command_cleanup; 4045 } 4046 4047 /* Free up host controller endpoint resources */ 4048 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 4049 spin_lock_irqsave(&xhci->lock, flags); 4050 /* Don't delete the default control endpoint resources */ 4051 xhci_free_device_endpoint_resources(xhci, virt_dev, false); 4052 spin_unlock_irqrestore(&xhci->lock, flags); 4053 } 4054 4055 /* Everything but endpoint 0 is disabled, so free the rings. */ 4056 for (i = 1; i < 31; i++) { 4057 struct xhci_virt_ep *ep = &virt_dev->eps[i]; 4058 4059 if (ep->ep_state & EP_HAS_STREAMS) { 4060 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n", 4061 xhci_get_endpoint_address(i)); 4062 xhci_free_stream_info(xhci, ep->stream_info); 4063 ep->stream_info = NULL; 4064 ep->ep_state &= ~EP_HAS_STREAMS; 4065 } 4066 4067 if (ep->ring) { 4068 if (ep->sideband) 4069 xhci_sideband_notify_ep_ring_free(ep->sideband, i); 4070 xhci_debugfs_remove_endpoint(xhci, virt_dev, i); 4071 xhci_free_endpoint_ring(xhci, virt_dev, i); 4072 } 4073 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list)) 4074 xhci_drop_ep_from_interval_table(xhci, 4075 &virt_dev->eps[i].bw_info, 4076 virt_dev->bw_table, 4077 udev, 4078 &virt_dev->eps[i], 4079 virt_dev->tt_info); 4080 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info); 4081 } 4082 /* If necessary, update the number of active TTs on this root port */ 4083 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps); 4084 virt_dev->flags = 0; 4085 ret = 0; 4086 4087 command_cleanup: 4088 xhci_free_command(xhci, reset_device_cmd); 4089 return ret; 4090 } 4091 4092 /* 4093 * At this point, the struct usb_device is about to go away, the device has 4094 * disconnected, and all traffic has been stopped and the endpoints have been 4095 * disabled. Free any HC data structures associated with that device. 4096 */ 4097 static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) 4098 { 4099 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4100 struct xhci_virt_device *virt_dev; 4101 struct xhci_slot_ctx *slot_ctx; 4102 unsigned long flags; 4103 int i, ret; 4104 4105 /* 4106 * We called pm_runtime_get_noresume when the device was attached. 4107 * Decrement the counter here to allow controller to runtime suspend 4108 * if no devices remain. 4109 */ 4110 if (xhci->quirks & XHCI_RESET_ON_RESUME) 4111 pm_runtime_put_noidle(hcd->self.controller); 4112 4113 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); 4114 /* If the host is halted due to driver unload, we still need to free the 4115 * device. 4116 */ 4117 if (ret <= 0 && ret != -ENODEV) 4118 return; 4119 4120 virt_dev = xhci->devs[udev->slot_id]; 4121 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 4122 trace_xhci_free_dev(slot_ctx); 4123 4124 /* Stop any wayward timer functions (which may grab the lock) */ 4125 for (i = 0; i < 31; i++) 4126 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING; 4127 virt_dev->udev = NULL; 4128 xhci_disable_slot(xhci, udev->slot_id); 4129 4130 spin_lock_irqsave(&xhci->lock, flags); 4131 xhci_free_virt_device(xhci, virt_dev, udev->slot_id); 4132 spin_unlock_irqrestore(&xhci->lock, flags); 4133 4134 } 4135 4136 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) 4137 { 4138 struct xhci_command *command; 4139 unsigned long flags; 4140 u32 state; 4141 int ret; 4142 4143 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 4144 if (!command) 4145 return -ENOMEM; 4146 4147 xhci_debugfs_remove_slot(xhci, slot_id); 4148 4149 spin_lock_irqsave(&xhci->lock, flags); 4150 /* Don't disable the slot if the host controller is dead. */ 4151 state = readl(&xhci->op_regs->status); 4152 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) || 4153 (xhci->xhc_state & XHCI_STATE_HALTED)) { 4154 spin_unlock_irqrestore(&xhci->lock, flags); 4155 xhci_free_command(xhci, command); 4156 return -ENODEV; 4157 } 4158 4159 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT, 4160 slot_id); 4161 if (ret) { 4162 spin_unlock_irqrestore(&xhci->lock, flags); 4163 xhci_free_command(xhci, command); 4164 return ret; 4165 } 4166 xhci_ring_cmd_db(xhci); 4167 spin_unlock_irqrestore(&xhci->lock, flags); 4168 4169 wait_for_completion(command->completion); 4170 4171 if (command->status != COMP_SUCCESS) 4172 xhci_warn(xhci, "Unsuccessful disable slot %u command, status %d\n", 4173 slot_id, command->status); 4174 4175 xhci_free_command(xhci, command); 4176 4177 return 0; 4178 } 4179 4180 int xhci_disable_and_free_slot(struct xhci_hcd *xhci, u32 slot_id) 4181 { 4182 struct xhci_virt_device *vdev = xhci->devs[slot_id]; 4183 int ret; 4184 4185 ret = xhci_disable_slot(xhci, slot_id); 4186 xhci_free_virt_device(xhci, vdev, slot_id); 4187 return ret; 4188 } 4189 4190 /* 4191 * Checks if we have enough host controller resources for the default control 4192 * endpoint. 4193 * 4194 * Must be called with xhci->lock held. 4195 */ 4196 static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) 4197 { 4198 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) { 4199 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 4200 "Not enough ep ctxs: " 4201 "%u active, need to add 1, limit is %u.", 4202 xhci->num_active_eps, xhci->limit_active_eps); 4203 return -ENOMEM; 4204 } 4205 xhci->num_active_eps += 1; 4206 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 4207 "Adding 1 ep ctx, %u now active.", 4208 xhci->num_active_eps); 4209 return 0; 4210 } 4211 4212 4213 /* 4214 * Returns 0 if the xHC ran out of device slots, the Enable Slot command 4215 * timed out, or allocating memory failed. Returns 1 on success. 4216 */ 4217 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) 4218 { 4219 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4220 struct xhci_virt_device *vdev; 4221 struct xhci_slot_ctx *slot_ctx; 4222 unsigned long flags; 4223 int ret, slot_id; 4224 struct xhci_command *command; 4225 4226 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 4227 if (!command) 4228 return 0; 4229 4230 spin_lock_irqsave(&xhci->lock, flags); 4231 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0); 4232 if (ret) { 4233 spin_unlock_irqrestore(&xhci->lock, flags); 4234 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 4235 xhci_free_command(xhci, command); 4236 return 0; 4237 } 4238 xhci_ring_cmd_db(xhci); 4239 spin_unlock_irqrestore(&xhci->lock, flags); 4240 4241 wait_for_completion(command->completion); 4242 slot_id = command->slot_id; 4243 4244 if (!slot_id || command->status != COMP_SUCCESS) { 4245 xhci_err(xhci, "Error while assigning device slot ID: %s\n", 4246 xhci_trb_comp_code_string(command->status)); 4247 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n", 4248 xhci->max_slots); 4249 xhci_free_command(xhci, command); 4250 return 0; 4251 } 4252 4253 xhci_free_command(xhci, command); 4254 4255 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) { 4256 spin_lock_irqsave(&xhci->lock, flags); 4257 ret = xhci_reserve_host_control_ep_resources(xhci); 4258 if (ret) { 4259 spin_unlock_irqrestore(&xhci->lock, flags); 4260 xhci_warn(xhci, "Not enough host resources, " 4261 "active endpoint contexts = %u\n", 4262 xhci->num_active_eps); 4263 goto disable_slot; 4264 } 4265 spin_unlock_irqrestore(&xhci->lock, flags); 4266 } 4267 /* Use GFP_NOIO, since this function can be called from 4268 * xhci_discover_or_reset_device(), which may be called as part of 4269 * mass storage driver error handling. 4270 */ 4271 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) { 4272 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 4273 goto disable_slot; 4274 } 4275 vdev = xhci->devs[slot_id]; 4276 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); 4277 trace_xhci_alloc_dev(slot_ctx); 4278 4279 udev->slot_id = slot_id; 4280 4281 xhci_debugfs_create_slot(xhci, slot_id); 4282 4283 /* 4284 * If resetting upon resume, we can't put the controller into runtime 4285 * suspend if there is a device attached. 4286 */ 4287 if (xhci->quirks & XHCI_RESET_ON_RESUME) 4288 pm_runtime_get_noresume(hcd->self.controller); 4289 4290 /* Is this a LS or FS device under a HS hub? */ 4291 /* Hub or peripherial? */ 4292 return 1; 4293 4294 disable_slot: 4295 xhci_disable_and_free_slot(xhci, udev->slot_id); 4296 4297 return 0; 4298 } 4299 4300 /** 4301 * xhci_setup_device - issues an Address Device command to assign a unique 4302 * USB bus address. 4303 * @hcd: USB host controller data structure. 4304 * @udev: USB dev structure representing the connected device. 4305 * @setup: Enum specifying setup mode: address only or with context. 4306 * @timeout_ms: Max wait time (ms) for the command operation to complete. 4307 * 4308 * Return: 0 if successful; otherwise, negative error code. 4309 */ 4310 static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, 4311 enum xhci_setup_dev setup, unsigned int timeout_ms) 4312 { 4313 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address"; 4314 unsigned long flags; 4315 struct xhci_virt_device *virt_dev; 4316 int ret = 0; 4317 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4318 struct xhci_slot_ctx *slot_ctx; 4319 struct xhci_input_control_ctx *ctrl_ctx; 4320 u64 temp_64; 4321 struct xhci_command *command = NULL; 4322 4323 mutex_lock(&xhci->mutex); 4324 4325 if (xhci->xhc_state) { /* dying, removing or halted */ 4326 ret = -ESHUTDOWN; 4327 goto out; 4328 } 4329 4330 if (!udev->slot_id) { 4331 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4332 "Bad Slot ID %d", udev->slot_id); 4333 ret = -EINVAL; 4334 goto out; 4335 } 4336 4337 virt_dev = xhci->devs[udev->slot_id]; 4338 4339 if (WARN_ON(!virt_dev)) { 4340 /* 4341 * In plug/unplug torture test with an NEC controller, 4342 * a zero-dereference was observed once due to virt_dev = 0. 4343 * Print useful debug rather than crash if it is observed again! 4344 */ 4345 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n", 4346 udev->slot_id); 4347 ret = -EINVAL; 4348 goto out; 4349 } 4350 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 4351 trace_xhci_setup_device_slot(slot_ctx); 4352 4353 if (setup == SETUP_CONTEXT_ONLY) { 4354 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == 4355 SLOT_STATE_DEFAULT) { 4356 xhci_dbg(xhci, "Slot already in default state\n"); 4357 goto out; 4358 } 4359 } 4360 4361 command = xhci_alloc_command(xhci, true, GFP_KERNEL); 4362 if (!command) { 4363 ret = -ENOMEM; 4364 goto out; 4365 } 4366 4367 command->in_ctx = virt_dev->in_ctx; 4368 command->timeout_ms = timeout_ms; 4369 4370 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 4371 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx); 4372 if (!ctrl_ctx) { 4373 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 4374 __func__); 4375 ret = -EINVAL; 4376 goto out; 4377 } 4378 /* 4379 * If this is the first Set Address since device plug-in or 4380 * virt_device realloaction after a resume with an xHCI power loss, 4381 * then set up the slot context. 4382 */ 4383 if (!slot_ctx->dev_info) 4384 xhci_setup_addressable_virt_dev(xhci, udev); 4385 /* Otherwise, update the control endpoint ring enqueue pointer. */ 4386 else 4387 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); 4388 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); 4389 ctrl_ctx->drop_flags = 0; 4390 4391 trace_xhci_address_ctx(xhci, virt_dev->in_ctx); 4392 4393 trace_xhci_address_ctrl_ctx(ctrl_ctx); 4394 spin_lock_irqsave(&xhci->lock, flags); 4395 trace_xhci_setup_device(virt_dev); 4396 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, 4397 udev->slot_id, setup); 4398 if (ret) { 4399 spin_unlock_irqrestore(&xhci->lock, flags); 4400 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4401 "FIXME: allocate a command ring segment"); 4402 goto out; 4403 } 4404 xhci_ring_cmd_db(xhci); 4405 spin_unlock_irqrestore(&xhci->lock, flags); 4406 4407 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ 4408 wait_for_completion(command->completion); 4409 4410 /* FIXME: From section 4.3.4: "Software shall be responsible for timing 4411 * the SetAddress() "recovery interval" required by USB and aborting the 4412 * command on a timeout. 4413 */ 4414 switch (command->status) { 4415 case COMP_COMMAND_ABORTED: 4416 case COMP_COMMAND_RING_STOPPED: 4417 xhci_warn(xhci, "Timeout while waiting for setup device command\n"); 4418 ret = -ETIME; 4419 break; 4420 case COMP_CONTEXT_STATE_ERROR: 4421 case COMP_SLOT_NOT_ENABLED_ERROR: 4422 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n", 4423 act, udev->slot_id); 4424 ret = -EINVAL; 4425 break; 4426 case COMP_USB_TRANSACTION_ERROR: 4427 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act); 4428 4429 mutex_unlock(&xhci->mutex); 4430 ret = xhci_disable_and_free_slot(xhci, udev->slot_id); 4431 if (!ret) { 4432 if (xhci_alloc_dev(hcd, udev) == 1) 4433 xhci_setup_addressable_virt_dev(xhci, udev); 4434 } 4435 kfree(command->completion); 4436 kfree(command); 4437 return -EPROTO; 4438 case COMP_INCOMPATIBLE_DEVICE_ERROR: 4439 dev_warn(&udev->dev, 4440 "ERROR: Incompatible device for setup %s command\n", act); 4441 ret = -ENODEV; 4442 break; 4443 case COMP_SUCCESS: 4444 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4445 "Successful setup %s command", act); 4446 break; 4447 default: 4448 xhci_err(xhci, 4449 "ERROR: unexpected setup %s command completion code 0x%x.\n", 4450 act, command->status); 4451 trace_xhci_address_ctx(xhci, virt_dev->out_ctx); 4452 ret = -EINVAL; 4453 break; 4454 } 4455 if (ret) 4456 goto out; 4457 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 4458 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4459 "Op regs DCBAA ptr = %#016llx", temp_64); 4460 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4461 "Slot ID %d dcbaa entry @%p = %#016llx", 4462 udev->slot_id, 4463 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], 4464 (unsigned long long) 4465 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); 4466 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4467 "Output Context DMA address = %#08llx", 4468 (unsigned long long)virt_dev->out_ctx->dma); 4469 trace_xhci_address_ctx(xhci, virt_dev->in_ctx); 4470 /* 4471 * USB core uses address 1 for the roothubs, so we add one to the 4472 * address given back to us by the HC. 4473 */ 4474 trace_xhci_address_ctx(xhci, virt_dev->out_ctx); 4475 /* Zero the input context control for later use */ 4476 ctrl_ctx->add_flags = 0; 4477 ctrl_ctx->drop_flags = 0; 4478 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 4479 udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); 4480 4481 xhci_dbg_trace(xhci, trace_xhci_dbg_address, 4482 "Internal device address = %d", 4483 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); 4484 out: 4485 mutex_unlock(&xhci->mutex); 4486 if (command) { 4487 kfree(command->completion); 4488 kfree(command); 4489 } 4490 return ret; 4491 } 4492 4493 static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev, 4494 unsigned int timeout_ms) 4495 { 4496 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS, timeout_ms); 4497 } 4498 4499 static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) 4500 { 4501 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY, 4502 XHCI_CMD_DEFAULT_TIMEOUT); 4503 } 4504 4505 /* 4506 * Transfer the port index into real index in the HW port status 4507 * registers. Caculate offset between the port's PORTSC register 4508 * and port status base. Divide the number of per port register 4509 * to get the real index. The raw port number bases 1. 4510 */ 4511 int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) 4512 { 4513 struct xhci_hub *rhub; 4514 4515 rhub = xhci_get_rhub(hcd); 4516 return rhub->ports[port1 - 1]->hw_portnum + 1; 4517 } 4518 4519 /* 4520 * Issue an Evaluate Context command to change the Maximum Exit Latency in the 4521 * slot context. If that succeeds, store the new MEL in the xhci_virt_device. 4522 */ 4523 static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, 4524 struct usb_device *udev, u16 max_exit_latency) 4525 { 4526 struct xhci_virt_device *virt_dev; 4527 struct xhci_command *command; 4528 struct xhci_input_control_ctx *ctrl_ctx; 4529 struct xhci_slot_ctx *slot_ctx; 4530 unsigned long flags; 4531 int ret; 4532 4533 command = xhci_alloc_command_with_ctx(xhci, true, GFP_KERNEL); 4534 if (!command) 4535 return -ENOMEM; 4536 4537 spin_lock_irqsave(&xhci->lock, flags); 4538 4539 virt_dev = xhci->devs[udev->slot_id]; 4540 4541 /* 4542 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and 4543 * xHC was re-initialized. Exit latency will be set later after 4544 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated 4545 */ 4546 4547 if (!virt_dev || max_exit_latency == virt_dev->current_mel) { 4548 spin_unlock_irqrestore(&xhci->lock, flags); 4549 xhci_free_command(xhci, command); 4550 return 0; 4551 } 4552 4553 /* Attempt to issue an Evaluate Context command to change the MEL. */ 4554 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx); 4555 if (!ctrl_ctx) { 4556 spin_unlock_irqrestore(&xhci->lock, flags); 4557 xhci_free_command(xhci, command); 4558 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 4559 __func__); 4560 return -ENOMEM; 4561 } 4562 4563 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx); 4564 spin_unlock_irqrestore(&xhci->lock, flags); 4565 4566 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 4567 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); 4568 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); 4569 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); 4570 slot_ctx->dev_state = 0; 4571 4572 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, 4573 "Set up evaluate context for LPM MEL change."); 4574 4575 /* Issue and wait for the evaluate context command. */ 4576 ret = xhci_configure_endpoint(xhci, udev, command, 4577 true, true); 4578 4579 if (!ret) { 4580 spin_lock_irqsave(&xhci->lock, flags); 4581 virt_dev->current_mel = max_exit_latency; 4582 spin_unlock_irqrestore(&xhci->lock, flags); 4583 } 4584 4585 xhci_free_command(xhci, command); 4586 4587 return ret; 4588 } 4589 4590 #ifdef CONFIG_PM 4591 4592 /* BESL to HIRD Encoding array for USB2 LPM */ 4593 static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000, 4594 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000}; 4595 4596 /* Calculate HIRD/BESL for USB2 PORTPMSC*/ 4597 static int xhci_calculate_hird_besl(struct xhci_hcd *xhci, 4598 struct usb_device *udev) 4599 { 4600 int u2del, besl, besl_host; 4601 int besl_device = 0; 4602 u32 field; 4603 4604 u2del = HCS_U2_LATENCY(xhci->hcs_params3); 4605 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4606 4607 if (field & USB_BESL_SUPPORT) { 4608 for (besl_host = 0; besl_host < 16; besl_host++) { 4609 if (xhci_besl_encoding[besl_host] >= u2del) 4610 break; 4611 } 4612 /* Use baseline BESL value as default */ 4613 if (field & USB_BESL_BASELINE_VALID) 4614 besl_device = USB_GET_BESL_BASELINE(field); 4615 else if (field & USB_BESL_DEEP_VALID) 4616 besl_device = USB_GET_BESL_DEEP(field); 4617 } else { 4618 if (u2del <= 50) 4619 besl_host = 0; 4620 else 4621 besl_host = (u2del - 51) / 75 + 1; 4622 } 4623 4624 besl = besl_host + besl_device; 4625 if (besl > 15) 4626 besl = 15; 4627 4628 return besl; 4629 } 4630 4631 /* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */ 4632 static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev) 4633 { 4634 u32 field; 4635 int l1; 4636 int besld = 0; 4637 int hirdm = 0; 4638 4639 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4640 4641 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */ 4642 l1 = udev->l1_params.timeout / 256; 4643 4644 /* device has preferred BESLD */ 4645 if (field & USB_BESL_DEEP_VALID) { 4646 besld = USB_GET_BESL_DEEP(field); 4647 hirdm = 1; 4648 } 4649 4650 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm); 4651 } 4652 4653 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 4654 struct usb_device *udev, int enable) 4655 { 4656 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4657 struct xhci_port **ports; 4658 struct xhci_port_regs __iomem *port_reg; 4659 u32 pm_val, hlpm_val, field; 4660 unsigned int port_num; 4661 unsigned long flags; 4662 int hird, exit_latency; 4663 int ret; 4664 4665 if (xhci->quirks & XHCI_HW_LPM_DISABLE) 4666 return -EPERM; 4667 4668 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support || 4669 !udev->lpm_capable) 4670 return -EPERM; 4671 4672 if (!udev->parent || udev->parent->parent || 4673 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 4674 return -EPERM; 4675 4676 if (udev->usb2_hw_lpm_capable != 1) 4677 return -EPERM; 4678 4679 spin_lock_irqsave(&xhci->lock, flags); 4680 4681 ports = xhci->usb2_rhub.ports; 4682 port_num = udev->portnum - 1; 4683 port_reg = ports[port_num]->port_reg; 4684 pm_val = readl(&port_reg->portpmsc); 4685 4686 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n", 4687 str_enable_disable(enable), port_num + 1); 4688 4689 if (enable) { 4690 /* Host supports BESL timeout instead of HIRD */ 4691 if (udev->usb2_hw_lpm_besl_capable) { 4692 /* if device doesn't have a preferred BESL value use a 4693 * default one which works with mixed HIRD and BESL 4694 * systems. See XHCI_DEFAULT_BESL definition in xhci.h 4695 */ 4696 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes); 4697 if ((field & USB_BESL_SUPPORT) && 4698 (field & USB_BESL_BASELINE_VALID)) 4699 hird = USB_GET_BESL_BASELINE(field); 4700 else 4701 hird = udev->l1_params.besl; 4702 4703 exit_latency = xhci_besl_encoding[hird]; 4704 spin_unlock_irqrestore(&xhci->lock, flags); 4705 4706 ret = xhci_change_max_exit_latency(xhci, udev, 4707 exit_latency); 4708 if (ret < 0) 4709 return ret; 4710 spin_lock_irqsave(&xhci->lock, flags); 4711 4712 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev); 4713 writel(hlpm_val, &port_reg->porthlmpc); 4714 /* flush write */ 4715 readl(&port_reg->porthlmpc); 4716 } else { 4717 hird = xhci_calculate_hird_besl(xhci, udev); 4718 } 4719 4720 pm_val &= ~PORT_HIRD_MASK; 4721 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id); 4722 writel(pm_val, &port_reg->portpmsc); 4723 pm_val = readl(&port_reg->portpmsc); 4724 pm_val |= PORT_HLE; 4725 writel(pm_val, &port_reg->portpmsc); 4726 /* flush write */ 4727 readl(&port_reg->portpmsc); 4728 } else { 4729 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK); 4730 writel(pm_val, &port_reg->portpmsc); 4731 /* flush write */ 4732 readl(&port_reg->portpmsc); 4733 if (udev->usb2_hw_lpm_besl_capable) { 4734 spin_unlock_irqrestore(&xhci->lock, flags); 4735 xhci_change_max_exit_latency(xhci, udev, 0); 4736 readl_poll_timeout(&ports[port_num]->port_reg->portsc, pm_val, 4737 (pm_val & PORT_PLS_MASK) == XDEV_U0, 4738 100, 10000); 4739 return 0; 4740 } 4741 } 4742 4743 spin_unlock_irqrestore(&xhci->lock, flags); 4744 return 0; 4745 } 4746 4747 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 4748 { 4749 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 4750 struct xhci_port *port; 4751 u32 capability; 4752 4753 /* Check if USB3 device at root port is tunneled over USB4 */ 4754 if (hcd->speed >= HCD_USB3 && !udev->parent->parent) { 4755 port = xhci->usb3_rhub.ports[udev->portnum - 1]; 4756 4757 udev->tunnel_mode = xhci_port_is_tunneled(xhci, port); 4758 if (udev->tunnel_mode == USB_LINK_UNKNOWN) 4759 dev_dbg(&udev->dev, "link tunnel state unknown\n"); 4760 else if (udev->tunnel_mode == USB_LINK_TUNNELED) 4761 dev_dbg(&udev->dev, "tunneled over USB4 link\n"); 4762 else if (udev->tunnel_mode == USB_LINK_NATIVE) 4763 dev_dbg(&udev->dev, "native USB 3.x link\n"); 4764 return 0; 4765 } 4766 4767 if (hcd->speed >= HCD_USB3 || !udev->lpm_capable || !xhci->hw_lpm_support) 4768 return 0; 4769 4770 /* we only support lpm for non-hub device connected to root hub yet */ 4771 if (!udev->parent || udev->parent->parent || 4772 udev->descriptor.bDeviceClass == USB_CLASS_HUB) 4773 return 0; 4774 4775 port = xhci->usb2_rhub.ports[udev->portnum - 1]; 4776 capability = port->port_cap->protocol_caps; 4777 4778 if (capability & XHCI_HLC) { 4779 udev->usb2_hw_lpm_capable = 1; 4780 udev->l1_params.timeout = XHCI_L1_TIMEOUT; 4781 udev->l1_params.besl = XHCI_DEFAULT_BESL; 4782 if (capability & XHCI_BLC) 4783 udev->usb2_hw_lpm_besl_capable = 1; 4784 } 4785 4786 return 0; 4787 } 4788 4789 /*---------------------- USB 3.0 Link PM functions ------------------------*/ 4790 4791 /* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */ 4792 static unsigned long long xhci_service_interval_to_ns( 4793 struct usb_endpoint_descriptor *desc) 4794 { 4795 return (1ULL << (desc->bInterval - 1)) * 125 * 1000; 4796 } 4797 4798 static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev, 4799 enum usb3_link_state state) 4800 { 4801 unsigned long long sel; 4802 unsigned long long pel; 4803 unsigned int max_sel_pel; 4804 char *state_name; 4805 4806 switch (state) { 4807 case USB3_LPM_U1: 4808 /* Convert SEL and PEL stored in nanoseconds to microseconds */ 4809 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); 4810 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000); 4811 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL; 4812 state_name = "U1"; 4813 break; 4814 case USB3_LPM_U2: 4815 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); 4816 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000); 4817 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL; 4818 state_name = "U2"; 4819 break; 4820 default: 4821 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", 4822 __func__); 4823 return USB3_LPM_DISABLED; 4824 } 4825 4826 if (sel <= max_sel_pel && pel <= max_sel_pel) 4827 return USB3_LPM_DEVICE_INITIATED; 4828 4829 if (sel > max_sel_pel) 4830 dev_dbg(&udev->dev, "Device-initiated %s disabled " 4831 "due to long SEL %llu ms\n", 4832 state_name, sel); 4833 else 4834 dev_dbg(&udev->dev, "Device-initiated %s disabled " 4835 "due to long PEL %llu ms\n", 4836 state_name, pel); 4837 return USB3_LPM_DISABLED; 4838 } 4839 4840 /* The U1 timeout should be the maximum of the following values: 4841 * - For control endpoints, U1 system exit latency (SEL) * 3 4842 * - For bulk endpoints, U1 SEL * 5 4843 * - For interrupt endpoints: 4844 * - Notification EPs, U1 SEL * 3 4845 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2) 4846 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2) 4847 */ 4848 static unsigned long long xhci_calculate_intel_u1_timeout( 4849 struct usb_device *udev, 4850 struct usb_endpoint_descriptor *desc) 4851 { 4852 unsigned long long timeout_ns; 4853 int ep_type; 4854 int intr_type; 4855 4856 ep_type = usb_endpoint_type(desc); 4857 switch (ep_type) { 4858 case USB_ENDPOINT_XFER_CONTROL: 4859 timeout_ns = udev->u1_params.sel * 3; 4860 break; 4861 case USB_ENDPOINT_XFER_BULK: 4862 timeout_ns = udev->u1_params.sel * 5; 4863 break; 4864 case USB_ENDPOINT_XFER_INT: 4865 intr_type = usb_endpoint_interrupt_type(desc); 4866 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) { 4867 timeout_ns = udev->u1_params.sel * 3; 4868 break; 4869 } 4870 /* Otherwise the calculation is the same as isoc eps */ 4871 fallthrough; 4872 case USB_ENDPOINT_XFER_ISOC: 4873 timeout_ns = xhci_service_interval_to_ns(desc); 4874 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100); 4875 if (timeout_ns < udev->u1_params.sel * 2) 4876 timeout_ns = udev->u1_params.sel * 2; 4877 break; 4878 default: 4879 return 0; 4880 } 4881 4882 return timeout_ns; 4883 } 4884 4885 /* Returns the hub-encoded U1 timeout value. */ 4886 static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci, 4887 struct usb_device *udev, 4888 struct usb_endpoint_descriptor *desc) 4889 { 4890 unsigned long long timeout_ns; 4891 4892 /* Prevent U1 if service interval is shorter than U1 exit latency */ 4893 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { 4894 if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) { 4895 dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n"); 4896 return USB3_LPM_DISABLED; 4897 } 4898 } 4899 4900 if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST)) 4901 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); 4902 else 4903 timeout_ns = udev->u1_params.sel; 4904 4905 /* The U1 timeout is encoded in 1us intervals. 4906 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED. 4907 */ 4908 if (timeout_ns == USB3_LPM_DISABLED) 4909 timeout_ns = 1; 4910 else 4911 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000); 4912 4913 /* If the necessary timeout value is bigger than what we can set in the 4914 * USB 3.0 hub, we have to disable hub-initiated U1. 4915 */ 4916 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT) 4917 return timeout_ns; 4918 dev_dbg(&udev->dev, "Hub-initiated U1 disabled due to long timeout %lluus\n", 4919 timeout_ns); 4920 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1); 4921 } 4922 4923 /* The U2 timeout should be the maximum of: 4924 * - 10 ms (to avoid the bandwidth impact on the scheduler) 4925 * - largest bInterval of any active periodic endpoint (to avoid going 4926 * into lower power link states between intervals). 4927 * - the U2 Exit Latency of the device 4928 */ 4929 static unsigned long long xhci_calculate_intel_u2_timeout( 4930 struct usb_device *udev, 4931 struct usb_endpoint_descriptor *desc) 4932 { 4933 unsigned long long timeout_ns; 4934 unsigned long long u2_del_ns; 4935 4936 timeout_ns = 10 * 1000 * 1000; 4937 4938 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) && 4939 (xhci_service_interval_to_ns(desc) > timeout_ns)) 4940 timeout_ns = xhci_service_interval_to_ns(desc); 4941 4942 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL; 4943 if (u2_del_ns > timeout_ns) 4944 timeout_ns = u2_del_ns; 4945 4946 return timeout_ns; 4947 } 4948 4949 /* Returns the hub-encoded U2 timeout value. */ 4950 static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci, 4951 struct usb_device *udev, 4952 struct usb_endpoint_descriptor *desc) 4953 { 4954 unsigned long long timeout_ns; 4955 4956 /* Prevent U2 if service interval is shorter than U2 exit latency */ 4957 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { 4958 if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) { 4959 dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n"); 4960 return USB3_LPM_DISABLED; 4961 } 4962 } 4963 4964 if (xhci->quirks & (XHCI_INTEL_HOST | XHCI_ZHAOXIN_HOST)) 4965 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); 4966 else 4967 timeout_ns = udev->u2_params.sel; 4968 4969 /* The U2 timeout is encoded in 256us intervals */ 4970 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000); 4971 /* If the necessary timeout value is bigger than what we can set in the 4972 * USB 3.0 hub, we have to disable hub-initiated U2. 4973 */ 4974 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT) 4975 return timeout_ns; 4976 dev_dbg(&udev->dev, "Hub-initiated U2 disabled due to long timeout %lluus\n", 4977 timeout_ns * 256); 4978 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2); 4979 } 4980 4981 static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci, 4982 struct usb_device *udev, 4983 struct usb_endpoint_descriptor *desc, 4984 enum usb3_link_state state, 4985 u16 *timeout) 4986 { 4987 if (state == USB3_LPM_U1) 4988 return xhci_calculate_u1_timeout(xhci, udev, desc); 4989 else if (state == USB3_LPM_U2) 4990 return xhci_calculate_u2_timeout(xhci, udev, desc); 4991 4992 return USB3_LPM_DISABLED; 4993 } 4994 4995 static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci, 4996 struct usb_device *udev, 4997 struct usb_endpoint_descriptor *desc, 4998 enum usb3_link_state state, 4999 u16 *timeout) 5000 { 5001 u16 alt_timeout; 5002 5003 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev, 5004 desc, state, timeout); 5005 5006 /* If we found we can't enable hub-initiated LPM, and 5007 * the U1 or U2 exit latency was too high to allow 5008 * device-initiated LPM as well, then we will disable LPM 5009 * for this device, so stop searching any further. 5010 */ 5011 if (alt_timeout == USB3_LPM_DISABLED) { 5012 *timeout = alt_timeout; 5013 return -E2BIG; 5014 } 5015 if (alt_timeout > *timeout) 5016 *timeout = alt_timeout; 5017 return 0; 5018 } 5019 5020 static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci, 5021 struct usb_device *udev, 5022 struct usb_host_interface *alt, 5023 enum usb3_link_state state, 5024 u16 *timeout) 5025 { 5026 int j; 5027 5028 for (j = 0; j < alt->desc.bNumEndpoints; j++) { 5029 if (xhci_update_timeout_for_endpoint(xhci, udev, 5030 &alt->endpoint[j].desc, state, timeout)) 5031 return -E2BIG; 5032 } 5033 return 0; 5034 } 5035 5036 static int xhci_check_tier_policy(struct xhci_hcd *xhci, 5037 struct usb_device *udev, 5038 enum usb3_link_state state) 5039 { 5040 struct usb_device *parent = udev->parent; 5041 int tier = 1; /* roothub is tier1 */ 5042 5043 while (parent) { 5044 parent = parent->parent; 5045 tier++; 5046 } 5047 5048 if (xhci->quirks & XHCI_INTEL_HOST && tier > 3) 5049 goto fail; 5050 if (xhci->quirks & XHCI_ZHAOXIN_HOST && tier > 2) 5051 goto fail; 5052 5053 return 0; 5054 fail: 5055 dev_dbg(&udev->dev, "Tier policy prevents U1/U2 LPM states for devices at tier %d\n", 5056 tier); 5057 return -E2BIG; 5058 } 5059 5060 /* Returns the U1 or U2 timeout that should be enabled. 5061 * If the tier check or timeout setting functions return with a non-zero exit 5062 * code, that means the timeout value has been finalized and we shouldn't look 5063 * at any more endpoints. 5064 */ 5065 static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd, 5066 struct usb_device *udev, enum usb3_link_state state) 5067 { 5068 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 5069 struct usb_host_config *config; 5070 char *state_name; 5071 int i; 5072 u16 timeout = USB3_LPM_DISABLED; 5073 5074 if (state == USB3_LPM_U1) 5075 state_name = "U1"; 5076 else if (state == USB3_LPM_U2) 5077 state_name = "U2"; 5078 else { 5079 dev_warn(&udev->dev, "Can't enable unknown link state %i\n", 5080 state); 5081 return timeout; 5082 } 5083 5084 /* Gather some information about the currently installed configuration 5085 * and alternate interface settings. 5086 */ 5087 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc, 5088 state, &timeout)) 5089 return timeout; 5090 5091 config = udev->actconfig; 5092 if (!config) 5093 return timeout; 5094 5095 for (i = 0; i < config->desc.bNumInterfaces; i++) { 5096 struct usb_driver *driver; 5097 struct usb_interface *intf = config->interface[i]; 5098 5099 if (!intf) 5100 continue; 5101 5102 /* Check if any currently bound drivers want hub-initiated LPM 5103 * disabled. 5104 */ 5105 if (intf->dev.driver) { 5106 driver = to_usb_driver(intf->dev.driver); 5107 if (driver && driver->disable_hub_initiated_lpm) { 5108 dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n", 5109 state_name, driver->name); 5110 timeout = xhci_get_timeout_no_hub_lpm(udev, 5111 state); 5112 if (timeout == USB3_LPM_DISABLED) 5113 return timeout; 5114 } 5115 } 5116 5117 /* Not sure how this could happen... */ 5118 if (!intf->cur_altsetting) 5119 continue; 5120 5121 if (xhci_update_timeout_for_interface(xhci, udev, 5122 intf->cur_altsetting, 5123 state, &timeout)) 5124 return timeout; 5125 } 5126 return timeout; 5127 } 5128 5129 static int calculate_max_exit_latency(struct usb_device *udev, 5130 enum usb3_link_state state_changed, 5131 u16 hub_encoded_timeout) 5132 { 5133 unsigned long long u1_mel_us = 0; 5134 unsigned long long u2_mel_us = 0; 5135 unsigned long long mel_us = 0; 5136 bool disabling_u1; 5137 bool disabling_u2; 5138 bool enabling_u1; 5139 bool enabling_u2; 5140 5141 disabling_u1 = (state_changed == USB3_LPM_U1 && 5142 hub_encoded_timeout == USB3_LPM_DISABLED); 5143 disabling_u2 = (state_changed == USB3_LPM_U2 && 5144 hub_encoded_timeout == USB3_LPM_DISABLED); 5145 5146 enabling_u1 = (state_changed == USB3_LPM_U1 && 5147 hub_encoded_timeout != USB3_LPM_DISABLED); 5148 enabling_u2 = (state_changed == USB3_LPM_U2 && 5149 hub_encoded_timeout != USB3_LPM_DISABLED); 5150 5151 /* If U1 was already enabled and we're not disabling it, 5152 * or we're going to enable U1, account for the U1 max exit latency. 5153 */ 5154 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) || 5155 enabling_u1) 5156 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000); 5157 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) || 5158 enabling_u2) 5159 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000); 5160 5161 mel_us = max(u1_mel_us, u2_mel_us); 5162 5163 /* xHCI host controller max exit latency field is only 16 bits wide. */ 5164 if (mel_us > MAX_EXIT) { 5165 dev_warn(&udev->dev, "Link PM max exit latency of %lluus " 5166 "is too big.\n", mel_us); 5167 return -E2BIG; 5168 } 5169 return mel_us; 5170 } 5171 5172 /* Returns the USB3 hub-encoded value for the U1/U2 timeout. */ 5173 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, 5174 struct usb_device *udev, enum usb3_link_state state) 5175 { 5176 struct xhci_hcd *xhci; 5177 struct xhci_port *port; 5178 u16 hub_encoded_timeout; 5179 int mel; 5180 int ret; 5181 5182 xhci = hcd_to_xhci(hcd); 5183 /* The LPM timeout values are pretty host-controller specific, so don't 5184 * enable hub-initiated timeouts unless the vendor has provided 5185 * information about their timeout algorithm. 5186 */ 5187 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || 5188 !xhci->devs[udev->slot_id]) 5189 return USB3_LPM_DISABLED; 5190 5191 if (xhci_check_tier_policy(xhci, udev, state) < 0) 5192 return USB3_LPM_DISABLED; 5193 5194 /* If connected to root port then check port can handle lpm */ 5195 if (udev->parent && !udev->parent->parent) { 5196 port = xhci->usb3_rhub.ports[udev->portnum - 1]; 5197 if (port->lpm_incapable) 5198 return USB3_LPM_DISABLED; 5199 } 5200 5201 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state); 5202 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout); 5203 if (mel < 0) { 5204 /* Max Exit Latency is too big, disable LPM. */ 5205 hub_encoded_timeout = USB3_LPM_DISABLED; 5206 mel = 0; 5207 } 5208 5209 ret = xhci_change_max_exit_latency(xhci, udev, mel); 5210 if (ret) 5211 return ret; 5212 return hub_encoded_timeout; 5213 } 5214 5215 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, 5216 struct usb_device *udev, enum usb3_link_state state) 5217 { 5218 struct xhci_hcd *xhci; 5219 u16 mel; 5220 5221 xhci = hcd_to_xhci(hcd); 5222 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) || 5223 !xhci->devs[udev->slot_id]) 5224 return 0; 5225 5226 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED); 5227 return xhci_change_max_exit_latency(xhci, udev, mel); 5228 } 5229 #else /* CONFIG_PM */ 5230 5231 static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd, 5232 struct usb_device *udev, int enable) 5233 { 5234 return 0; 5235 } 5236 5237 static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev) 5238 { 5239 return 0; 5240 } 5241 5242 static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd, 5243 struct usb_device *udev, enum usb3_link_state state) 5244 { 5245 return USB3_LPM_DISABLED; 5246 } 5247 5248 static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd, 5249 struct usb_device *udev, enum usb3_link_state state) 5250 { 5251 return 0; 5252 } 5253 #endif /* CONFIG_PM */ 5254 5255 /*-------------------------------------------------------------------------*/ 5256 5257 /* Once a hub descriptor is fetched for a device, we need to update the xHC's 5258 * internal data structures for the device. 5259 */ 5260 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, 5261 struct usb_tt *tt, gfp_t mem_flags) 5262 { 5263 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 5264 struct xhci_virt_device *vdev; 5265 struct xhci_command *config_cmd; 5266 struct xhci_input_control_ctx *ctrl_ctx; 5267 struct xhci_slot_ctx *slot_ctx; 5268 unsigned long flags; 5269 unsigned think_time; 5270 int ret; 5271 5272 /* Ignore root hubs */ 5273 if (!hdev->parent) 5274 return 0; 5275 5276 vdev = xhci->devs[hdev->slot_id]; 5277 if (!vdev) { 5278 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); 5279 return -EINVAL; 5280 } 5281 5282 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags); 5283 if (!config_cmd) 5284 return -ENOMEM; 5285 5286 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx); 5287 if (!ctrl_ctx) { 5288 xhci_warn(xhci, "%s: Could not get input context, bad type.\n", 5289 __func__); 5290 xhci_free_command(xhci, config_cmd); 5291 return -ENOMEM; 5292 } 5293 5294 spin_lock_irqsave(&xhci->lock, flags); 5295 if (hdev->speed == USB_SPEED_HIGH && 5296 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) { 5297 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n"); 5298 xhci_free_command(xhci, config_cmd); 5299 spin_unlock_irqrestore(&xhci->lock, flags); 5300 return -ENOMEM; 5301 } 5302 5303 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); 5304 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); 5305 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); 5306 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); 5307 /* 5308 * refer to section 6.2.2: MTT should be 0 for full speed hub, 5309 * but it may be already set to 1 when setup an xHCI virtual 5310 * device, so clear it anyway. 5311 */ 5312 if (tt->multi) 5313 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); 5314 else if (hdev->speed == USB_SPEED_FULL) 5315 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT); 5316 5317 if (xhci->hci_version > 0x95) { 5318 xhci_dbg(xhci, "xHCI version %x needs hub " 5319 "TT think time and number of ports\n", 5320 (unsigned int) xhci->hci_version); 5321 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild)); 5322 /* Set TT think time - convert from ns to FS bit times. 5323 * 0 = 8 FS bit times, 1 = 16 FS bit times, 5324 * 2 = 24 FS bit times, 3 = 32 FS bit times. 5325 * 5326 * xHCI 1.0: this field shall be 0 if the device is not a 5327 * High-spped hub. 5328 */ 5329 think_time = tt->think_time; 5330 if (think_time != 0) 5331 think_time = (think_time / 666) - 1; 5332 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH) 5333 slot_ctx->tt_info |= 5334 cpu_to_le32(TT_THINK_TIME(think_time)); 5335 } else { 5336 xhci_dbg(xhci, "xHCI version %x doesn't need hub " 5337 "TT think time or number of ports\n", 5338 (unsigned int) xhci->hci_version); 5339 } 5340 slot_ctx->dev_state = 0; 5341 spin_unlock_irqrestore(&xhci->lock, flags); 5342 5343 xhci_dbg(xhci, "Set up %s for hub device.\n", 5344 (xhci->hci_version > 0x95) ? 5345 "configure endpoint" : "evaluate context"); 5346 5347 /* Issue and wait for the configure endpoint or 5348 * evaluate context command. 5349 */ 5350 if (xhci->hci_version > 0x95) 5351 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 5352 false, false); 5353 else 5354 ret = xhci_configure_endpoint(xhci, hdev, config_cmd, 5355 true, false); 5356 5357 xhci_free_command(xhci, config_cmd); 5358 return ret; 5359 } 5360 EXPORT_SYMBOL_GPL(xhci_update_hub_device); 5361 5362 static int xhci_get_frame(struct usb_hcd *hcd) 5363 { 5364 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 5365 /* EHCI mods by the periodic size. Why? */ 5366 return readl(&xhci->run_regs->microframe_index) >> 3; 5367 } 5368 5369 static void xhci_hcd_init_usb2_data(struct xhci_hcd *xhci, struct usb_hcd *hcd) 5370 { 5371 xhci->usb2_rhub.hcd = hcd; 5372 hcd->speed = HCD_USB2; 5373 hcd->self.root_hub->speed = USB_SPEED_HIGH; 5374 /* 5375 * USB 2.0 roothub under xHCI has an integrated TT, 5376 * (rate matching hub) as opposed to having an OHCI/UHCI 5377 * companion controller. 5378 */ 5379 hcd->has_tt = 1; 5380 } 5381 5382 static void xhci_hcd_init_usb3_data(struct xhci_hcd *xhci, struct usb_hcd *hcd) 5383 { 5384 unsigned int minor_rev; 5385 5386 /* 5387 * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts 5388 * should return 0x31 for sbrn, or that the minor revision 5389 * is a two digit BCD containig minor and sub-minor numbers. 5390 * This was later clarified in xHCI 1.2. 5391 * 5392 * Some USB 3.1 capable hosts therefore have sbrn 0x30, and 5393 * minor revision set to 0x1 instead of 0x10. 5394 */ 5395 if (xhci->usb3_rhub.min_rev == 0x1) 5396 minor_rev = 1; 5397 else 5398 minor_rev = xhci->usb3_rhub.min_rev / 0x10; 5399 5400 switch (minor_rev) { 5401 case 2: 5402 hcd->speed = HCD_USB32; 5403 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; 5404 hcd->self.root_hub->rx_lanes = 2; 5405 hcd->self.root_hub->tx_lanes = 2; 5406 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2; 5407 break; 5408 case 1: 5409 hcd->speed = HCD_USB31; 5410 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; 5411 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1; 5412 break; 5413 } 5414 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n", 5415 minor_rev, minor_rev ? "Enhanced " : ""); 5416 5417 xhci->usb3_rhub.hcd = hcd; 5418 } 5419 5420 int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) 5421 { 5422 struct xhci_hcd *xhci; 5423 /* 5424 * TODO: Check with DWC3 clients for sysdev according to 5425 * quirks 5426 */ 5427 struct device *dev = hcd->self.sysdev; 5428 int retval; 5429 u32 hcs_params1; 5430 5431 /* Accept arbitrarily long scatter-gather lists */ 5432 hcd->self.sg_tablesize = ~0; 5433 5434 /* support to build packet from discontinuous buffers */ 5435 hcd->self.no_sg_constraint = 1; 5436 5437 /* XHCI controllers don't stop the ep queue on short packets :| */ 5438 hcd->self.no_stop_on_short = 1; 5439 5440 xhci = hcd_to_xhci(hcd); 5441 5442 if (!usb_hcd_is_primary_hcd(hcd)) { 5443 xhci_hcd_init_usb3_data(xhci, hcd); 5444 return 0; 5445 } 5446 5447 mutex_init(&xhci->mutex); 5448 xhci->main_hcd = hcd; 5449 xhci->cap_regs = hcd->regs; 5450 xhci->op_regs = hcd->regs + 5451 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase)); 5452 xhci->run_regs = hcd->regs + 5453 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK); 5454 /* Cache read-only capability registers */ 5455 hcs_params1 = readl(&xhci->cap_regs->hcs_params1); 5456 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2); 5457 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3); 5458 xhci->hci_version = HC_VERSION(readl(&xhci->cap_regs->hc_capbase)); 5459 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params); 5460 if (xhci->hci_version > 0x100) 5461 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); 5462 5463 xhci->max_slots = HCS_MAX_SLOTS(hcs_params1); 5464 xhci->max_ports = min(HCS_MAX_PORTS(hcs_params1), MAX_HC_PORTS); 5465 /* xhci-plat or xhci-pci might have set max_interrupters already */ 5466 if (!xhci->max_interrupters) 5467 xhci->max_interrupters = min(HCS_MAX_INTRS(hcs_params1), MAX_HC_INTRS); 5468 else if (xhci->max_interrupters > HCS_MAX_INTRS(hcs_params1)) 5469 xhci->max_interrupters = HCS_MAX_INTRS(hcs_params1); 5470 5471 xhci->quirks |= quirks; 5472 5473 if (get_quirks) 5474 get_quirks(dev, xhci); 5475 5476 /* In xhci controllers which follow xhci 1.0 spec gives a spurious 5477 * success event after a short transfer. This quirk will ignore such 5478 * spurious event. 5479 */ 5480 if (xhci->hci_version > 0x96) 5481 xhci->quirks |= XHCI_SPURIOUS_SUCCESS; 5482 5483 if (xhci->hci_version == 0x95 && link_quirk) { 5484 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits"); 5485 xhci->quirks |= XHCI_LINK_TRB_QUIRK; 5486 } 5487 5488 /* Make sure the HC is halted. */ 5489 retval = xhci_halt(xhci); 5490 if (retval) 5491 return retval; 5492 5493 xhci_zero_64b_regs(xhci); 5494 5495 xhci_dbg(xhci, "Resetting HCD\n"); 5496 /* Reset the internal HC memory state and registers. */ 5497 retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); 5498 if (retval) 5499 return retval; 5500 xhci_dbg(xhci, "Reset complete\n"); 5501 5502 /* 5503 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0) 5504 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit 5505 * address memory pointers actually. So, this driver clears the AC64 5506 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev, 5507 * DMA_BIT_MASK(32)) in this xhci_gen_setup(). 5508 */ 5509 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) 5510 xhci->hcc_params &= ~BIT(0); 5511 5512 /* Set dma_mask and coherent_dma_mask to 64-bits, 5513 * if xHC supports 64-bit addressing */ 5514 if ((xhci->hcc_params & HCC_64BIT_ADDR) && 5515 !dma_set_mask(dev, DMA_BIT_MASK(64))) { 5516 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); 5517 dma_set_coherent_mask(dev, DMA_BIT_MASK(64)); 5518 } else { 5519 /* 5520 * This is to avoid error in cases where a 32-bit USB 5521 * controller is used on a 64-bit capable system. 5522 */ 5523 retval = dma_set_mask(dev, DMA_BIT_MASK(32)); 5524 if (retval) 5525 return retval; 5526 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n"); 5527 dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); 5528 } 5529 5530 spin_lock_init(&xhci->lock); 5531 INIT_LIST_HEAD(&xhci->cmd_list); 5532 INIT_DELAYED_WORK(&xhci->cmd_timer, xhci_handle_command_timeout); 5533 init_completion(&xhci->cmd_ring_stop_completion); 5534 xhci_hcd_page_size(xhci); 5535 5536 memset(xhci->devs, 0, MAX_HC_SLOTS * sizeof(*xhci->devs)); 5537 5538 /* Allocate xHCI data structures */ 5539 retval = xhci_mem_init(xhci, GFP_KERNEL); 5540 if (retval) 5541 return retval; 5542 5543 /* Initialize HCD and host controller data structures */ 5544 xhci_init(hcd); 5545 5546 if (xhci_hcd_is_usb3(hcd)) 5547 xhci_hcd_init_usb3_data(xhci, hcd); 5548 else 5549 xhci_hcd_init_usb2_data(xhci, hcd); 5550 5551 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n", 5552 xhci->hcc_params, xhci->hci_version, xhci->quirks); 5553 5554 return 0; 5555 } 5556 EXPORT_SYMBOL_GPL(xhci_gen_setup); 5557 5558 static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd, 5559 struct usb_host_endpoint *ep) 5560 { 5561 struct xhci_hcd *xhci; 5562 struct usb_device *udev; 5563 unsigned int slot_id; 5564 unsigned int ep_index; 5565 unsigned long flags; 5566 5567 xhci = hcd_to_xhci(hcd); 5568 5569 spin_lock_irqsave(&xhci->lock, flags); 5570 udev = (struct usb_device *)ep->hcpriv; 5571 slot_id = udev->slot_id; 5572 ep_index = xhci_get_endpoint_index(&ep->desc); 5573 5574 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT; 5575 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 5576 spin_unlock_irqrestore(&xhci->lock, flags); 5577 } 5578 5579 static const struct hc_driver xhci_hc_driver = { 5580 .description = "xhci-hcd", 5581 .product_desc = "xHCI Host Controller", 5582 .hcd_priv_size = sizeof(struct xhci_hcd), 5583 5584 /* 5585 * generic hardware linkage 5586 */ 5587 .irq = xhci_irq, 5588 .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED | 5589 HCD_BH, 5590 5591 /* 5592 * basic lifecycle operations 5593 */ 5594 .reset = NULL, /* set in xhci_init_driver() */ 5595 .start = xhci_run, 5596 .stop = xhci_stop, 5597 .shutdown = xhci_shutdown, 5598 5599 /* 5600 * managing i/o requests and associated device resources 5601 */ 5602 .map_urb_for_dma = xhci_map_urb_for_dma, 5603 .unmap_urb_for_dma = xhci_unmap_urb_for_dma, 5604 .urb_enqueue = xhci_urb_enqueue, 5605 .urb_dequeue = xhci_urb_dequeue, 5606 .alloc_dev = xhci_alloc_dev, 5607 .free_dev = xhci_free_dev, 5608 .alloc_streams = xhci_alloc_streams, 5609 .free_streams = xhci_free_streams, 5610 .add_endpoint = xhci_add_endpoint, 5611 .drop_endpoint = xhci_drop_endpoint, 5612 .endpoint_disable = xhci_endpoint_disable, 5613 .endpoint_reset = xhci_endpoint_reset, 5614 .check_bandwidth = xhci_check_bandwidth, 5615 .reset_bandwidth = xhci_reset_bandwidth, 5616 .address_device = xhci_address_device, 5617 .enable_device = xhci_enable_device, 5618 .update_hub_device = xhci_update_hub_device, 5619 .reset_device = xhci_discover_or_reset_device, 5620 5621 /* 5622 * scheduling support 5623 */ 5624 .get_frame_number = xhci_get_frame, 5625 5626 /* 5627 * root hub support 5628 */ 5629 .hub_control = xhci_hub_control, 5630 .hub_status_data = xhci_hub_status_data, 5631 .bus_suspend = xhci_bus_suspend, 5632 .bus_resume = xhci_bus_resume, 5633 .get_resuming_ports = xhci_get_resuming_ports, 5634 5635 /* 5636 * call back when device connected and addressed 5637 */ 5638 .update_device = xhci_update_device, 5639 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, 5640 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, 5641 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, 5642 .find_raw_port_number = xhci_find_raw_port_number, 5643 .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete, 5644 }; 5645 5646 void xhci_init_driver(struct hc_driver *drv, 5647 const struct xhci_driver_overrides *over) 5648 { 5649 BUG_ON(!over); 5650 5651 /* Copy the generic table to drv then apply the overrides */ 5652 *drv = xhci_hc_driver; 5653 5654 if (over) { 5655 drv->hcd_priv_size += over->extra_priv_size; 5656 if (over->reset) 5657 drv->reset = over->reset; 5658 if (over->start) 5659 drv->start = over->start; 5660 if (over->add_endpoint) 5661 drv->add_endpoint = over->add_endpoint; 5662 if (over->drop_endpoint) 5663 drv->drop_endpoint = over->drop_endpoint; 5664 if (over->check_bandwidth) 5665 drv->check_bandwidth = over->check_bandwidth; 5666 if (over->reset_bandwidth) 5667 drv->reset_bandwidth = over->reset_bandwidth; 5668 if (over->update_hub_device) 5669 drv->update_hub_device = over->update_hub_device; 5670 if (over->hub_control) 5671 drv->hub_control = over->hub_control; 5672 } 5673 } 5674 EXPORT_SYMBOL_GPL(xhci_init_driver); 5675 5676 MODULE_DESCRIPTION(DRIVER_DESC); 5677 MODULE_AUTHOR(DRIVER_AUTHOR); 5678 MODULE_LICENSE("GPL"); 5679 5680 static int __init xhci_hcd_init(void) 5681 { 5682 /* 5683 * Check the compiler generated sizes of structures that must be laid 5684 * out in specific ways for hardware access. 5685 */ 5686 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); 5687 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); 5688 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); 5689 /* xhci_device_control has eight fields, and also 5690 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx 5691 */ 5692 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); 5693 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); 5694 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); 5695 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8); 5696 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); 5697 /* xhci_run_regs has eight fields and embeds 1024 xhci_intr_regs */ 5698 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*1024)*32/8); 5699 5700 if (usb_disabled()) 5701 return -ENODEV; 5702 5703 xhci_debugfs_create_root(); 5704 xhci_dbc_init(); 5705 5706 return 0; 5707 } 5708 5709 /* 5710 * If an init function is provided, an exit function must also be provided 5711 * to allow module unload. 5712 */ 5713 static void __exit xhci_hcd_fini(void) 5714 { 5715 xhci_debugfs_remove_root(); 5716 xhci_dbc_exit(); 5717 } 5718 5719 module_init(xhci_hcd_init); 5720 module_exit(xhci_hcd_fini); 5721