1 /* 2 * This file contains code to reset and initialize USB host controllers. 3 * Some of it includes work-arounds for PCI hardware and BIOS quirks. 4 * It may need to run early during booting -- before USB would normally 5 * initialize -- to ensure that Linux doesn't use any legacy modes. 6 * 7 * Copyright (c) 1999 Martin Mares <mj@ucw.cz> 8 * (and others) 9 */ 10 11 #include <linux/types.h> 12 #include <linux/kernel.h> 13 #include <linux/pci.h> 14 #include <linux/init.h> 15 #include <linux/delay.h> 16 #include <linux/acpi.h> 17 #include <linux/dmi.h> 18 #include "pci-quirks.h" 19 #include "xhci-ext-caps.h" 20 21 22 #define UHCI_USBLEGSUP 0xc0 /* legacy support */ 23 #define UHCI_USBCMD 0 /* command register */ 24 #define UHCI_USBINTR 4 /* interrupt register */ 25 #define UHCI_USBLEGSUP_RWC 0x8f00 /* the R/WC bits */ 26 #define UHCI_USBLEGSUP_RO 0x5040 /* R/O and reserved bits */ 27 #define UHCI_USBCMD_RUN 0x0001 /* RUN/STOP bit */ 28 #define UHCI_USBCMD_HCRESET 0x0002 /* Host Controller reset */ 29 #define UHCI_USBCMD_EGSM 0x0008 /* Global Suspend Mode */ 30 #define UHCI_USBCMD_CONFIGURE 0x0040 /* Config Flag */ 31 #define UHCI_USBINTR_RESUME 0x0002 /* Resume interrupt enable */ 32 33 #define OHCI_CONTROL 0x04 34 #define OHCI_CMDSTATUS 0x08 35 #define OHCI_INTRSTATUS 0x0c 36 #define OHCI_INTRENABLE 0x10 37 #define OHCI_INTRDISABLE 0x14 38 #define OHCI_FMINTERVAL 0x34 39 #define OHCI_HCR (1 << 0) /* host controller reset */ 40 #define OHCI_OCR (1 << 3) /* ownership change request */ 41 #define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ 42 #define OHCI_CTRL_IR (1 << 8) /* interrupt routing */ 43 #define OHCI_INTR_OC (1 << 30) /* ownership change */ 44 45 #define EHCI_HCC_PARAMS 0x08 /* extended capabilities */ 46 #define EHCI_USBCMD 0 /* command register */ 47 #define EHCI_USBCMD_RUN (1 << 0) /* RUN/STOP bit */ 48 #define EHCI_USBSTS 4 /* status register */ 49 #define EHCI_USBSTS_HALTED (1 << 12) /* HCHalted bit */ 50 #define EHCI_USBINTR 8 /* interrupt register */ 51 #define EHCI_CONFIGFLAG 0x40 /* configured flag register */ 52 #define EHCI_USBLEGSUP 0 /* legacy support register */ 53 #define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */ 54 #define EHCI_USBLEGSUP_OS (1 << 24) /* OS semaphore */ 55 #define EHCI_USBLEGCTLSTS 4 /* legacy control/status */ 56 #define EHCI_USBLEGCTLSTS_SOOE (1 << 13) /* SMI on ownership change */ 57 58 /* AMD quirk use */ 59 #define AB_REG_BAR_LOW 0xe0 60 #define AB_REG_BAR_HIGH 0xe1 61 #define AB_REG_BAR_SB700 0xf0 62 #define AB_INDX(addr) ((addr) + 0x00) 63 #define AB_DATA(addr) ((addr) + 0x04) 64 #define AX_INDXC 0x30 65 #define AX_DATAC 0x34 66 67 #define NB_PCIE_INDX_ADDR 0xe0 68 #define NB_PCIE_INDX_DATA 0xe4 69 #define PCIE_P_CNTL 0x10040 70 #define BIF_NB 0x10002 71 #define NB_PIF0_PWRDOWN_0 0x01100012 72 #define NB_PIF0_PWRDOWN_1 0x01100013 73 74 #define USB_INTEL_XUSB2PR 0xD0 75 #define USB_INTEL_USB3_PSSEN 0xD8 76 77 static struct amd_chipset_info { 78 struct pci_dev *nb_dev; 79 struct pci_dev *smbus_dev; 80 int nb_type; 81 int sb_type; 82 int isoc_reqs; 83 int probe_count; 84 int probe_result; 85 } amd_chipset; 86 87 static DEFINE_SPINLOCK(amd_lock); 88 89 int usb_amd_find_chipset_info(void) 90 { 91 u8 rev = 0; 92 unsigned long flags; 93 struct amd_chipset_info info; 94 int ret; 95 96 spin_lock_irqsave(&amd_lock, flags); 97 98 /* probe only once */ 99 if (amd_chipset.probe_count > 0) { 100 amd_chipset.probe_count++; 101 spin_unlock_irqrestore(&amd_lock, flags); 102 return amd_chipset.probe_result; 103 } 104 memset(&info, 0, sizeof(info)); 105 spin_unlock_irqrestore(&amd_lock, flags); 106 107 info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL); 108 if (info.smbus_dev) { 109 rev = info.smbus_dev->revision; 110 if (rev >= 0x40) 111 info.sb_type = 1; 112 else if (rev >= 0x30 && rev <= 0x3b) 113 info.sb_type = 3; 114 } else { 115 info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 116 0x780b, NULL); 117 if (!info.smbus_dev) { 118 ret = 0; 119 goto commit; 120 } 121 122 rev = info.smbus_dev->revision; 123 if (rev >= 0x11 && rev <= 0x18) 124 info.sb_type = 2; 125 } 126 127 if (info.sb_type == 0) { 128 if (info.smbus_dev) { 129 pci_dev_put(info.smbus_dev); 130 info.smbus_dev = NULL; 131 } 132 ret = 0; 133 goto commit; 134 } 135 136 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL); 137 if (info.nb_dev) { 138 info.nb_type = 1; 139 } else { 140 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL); 141 if (info.nb_dev) { 142 info.nb_type = 2; 143 } else { 144 info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 145 0x9600, NULL); 146 if (info.nb_dev) 147 info.nb_type = 3; 148 } 149 } 150 151 ret = info.probe_result = 1; 152 printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); 153 154 commit: 155 156 spin_lock_irqsave(&amd_lock, flags); 157 if (amd_chipset.probe_count > 0) { 158 /* race - someone else was faster - drop devices */ 159 160 /* Mark that we where here */ 161 amd_chipset.probe_count++; 162 ret = amd_chipset.probe_result; 163 164 spin_unlock_irqrestore(&amd_lock, flags); 165 166 if (info.nb_dev) 167 pci_dev_put(info.nb_dev); 168 if (info.smbus_dev) 169 pci_dev_put(info.smbus_dev); 170 171 } else { 172 /* no race - commit the result */ 173 info.probe_count++; 174 amd_chipset = info; 175 spin_unlock_irqrestore(&amd_lock, flags); 176 } 177 178 return ret; 179 } 180 EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); 181 182 /* 183 * The hardware normally enables the A-link power management feature, which 184 * lets the system lower the power consumption in idle states. 185 * 186 * This USB quirk prevents the link going into that lower power state 187 * during isochronous transfers. 188 * 189 * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of 190 * some AMD platforms may stutter or have breaks occasionally. 191 */ 192 static void usb_amd_quirk_pll(int disable) 193 { 194 u32 addr, addr_low, addr_high, val; 195 u32 bit = disable ? 0 : 1; 196 unsigned long flags; 197 198 spin_lock_irqsave(&amd_lock, flags); 199 200 if (disable) { 201 amd_chipset.isoc_reqs++; 202 if (amd_chipset.isoc_reqs > 1) { 203 spin_unlock_irqrestore(&amd_lock, flags); 204 return; 205 } 206 } else { 207 amd_chipset.isoc_reqs--; 208 if (amd_chipset.isoc_reqs > 0) { 209 spin_unlock_irqrestore(&amd_lock, flags); 210 return; 211 } 212 } 213 214 if (amd_chipset.sb_type == 1 || amd_chipset.sb_type == 2) { 215 outb_p(AB_REG_BAR_LOW, 0xcd6); 216 addr_low = inb_p(0xcd7); 217 outb_p(AB_REG_BAR_HIGH, 0xcd6); 218 addr_high = inb_p(0xcd7); 219 addr = addr_high << 8 | addr_low; 220 221 outl_p(0x30, AB_INDX(addr)); 222 outl_p(0x40, AB_DATA(addr)); 223 outl_p(0x34, AB_INDX(addr)); 224 val = inl_p(AB_DATA(addr)); 225 } else if (amd_chipset.sb_type == 3) { 226 pci_read_config_dword(amd_chipset.smbus_dev, 227 AB_REG_BAR_SB700, &addr); 228 outl(AX_INDXC, AB_INDX(addr)); 229 outl(0x40, AB_DATA(addr)); 230 outl(AX_DATAC, AB_INDX(addr)); 231 val = inl(AB_DATA(addr)); 232 } else { 233 spin_unlock_irqrestore(&amd_lock, flags); 234 return; 235 } 236 237 if (disable) { 238 val &= ~0x08; 239 val |= (1 << 4) | (1 << 9); 240 } else { 241 val |= 0x08; 242 val &= ~((1 << 4) | (1 << 9)); 243 } 244 outl_p(val, AB_DATA(addr)); 245 246 if (!amd_chipset.nb_dev) { 247 spin_unlock_irqrestore(&amd_lock, flags); 248 return; 249 } 250 251 if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) { 252 addr = PCIE_P_CNTL; 253 pci_write_config_dword(amd_chipset.nb_dev, 254 NB_PCIE_INDX_ADDR, addr); 255 pci_read_config_dword(amd_chipset.nb_dev, 256 NB_PCIE_INDX_DATA, &val); 257 258 val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12)); 259 val |= bit | (bit << 3) | (bit << 12); 260 val |= ((!bit) << 4) | ((!bit) << 9); 261 pci_write_config_dword(amd_chipset.nb_dev, 262 NB_PCIE_INDX_DATA, val); 263 264 addr = BIF_NB; 265 pci_write_config_dword(amd_chipset.nb_dev, 266 NB_PCIE_INDX_ADDR, addr); 267 pci_read_config_dword(amd_chipset.nb_dev, 268 NB_PCIE_INDX_DATA, &val); 269 val &= ~(1 << 8); 270 val |= bit << 8; 271 272 pci_write_config_dword(amd_chipset.nb_dev, 273 NB_PCIE_INDX_DATA, val); 274 } else if (amd_chipset.nb_type == 2) { 275 addr = NB_PIF0_PWRDOWN_0; 276 pci_write_config_dword(amd_chipset.nb_dev, 277 NB_PCIE_INDX_ADDR, addr); 278 pci_read_config_dword(amd_chipset.nb_dev, 279 NB_PCIE_INDX_DATA, &val); 280 if (disable) 281 val &= ~(0x3f << 7); 282 else 283 val |= 0x3f << 7; 284 285 pci_write_config_dword(amd_chipset.nb_dev, 286 NB_PCIE_INDX_DATA, val); 287 288 addr = NB_PIF0_PWRDOWN_1; 289 pci_write_config_dword(amd_chipset.nb_dev, 290 NB_PCIE_INDX_ADDR, addr); 291 pci_read_config_dword(amd_chipset.nb_dev, 292 NB_PCIE_INDX_DATA, &val); 293 if (disable) 294 val &= ~(0x3f << 7); 295 else 296 val |= 0x3f << 7; 297 298 pci_write_config_dword(amd_chipset.nb_dev, 299 NB_PCIE_INDX_DATA, val); 300 } 301 302 spin_unlock_irqrestore(&amd_lock, flags); 303 return; 304 } 305 306 void usb_amd_quirk_pll_disable(void) 307 { 308 usb_amd_quirk_pll(1); 309 } 310 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable); 311 312 void usb_amd_quirk_pll_enable(void) 313 { 314 usb_amd_quirk_pll(0); 315 } 316 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable); 317 318 void usb_amd_dev_put(void) 319 { 320 struct pci_dev *nb, *smbus; 321 unsigned long flags; 322 323 spin_lock_irqsave(&amd_lock, flags); 324 325 amd_chipset.probe_count--; 326 if (amd_chipset.probe_count > 0) { 327 spin_unlock_irqrestore(&amd_lock, flags); 328 return; 329 } 330 331 /* save them to pci_dev_put outside of spinlock */ 332 nb = amd_chipset.nb_dev; 333 smbus = amd_chipset.smbus_dev; 334 335 amd_chipset.nb_dev = NULL; 336 amd_chipset.smbus_dev = NULL; 337 amd_chipset.nb_type = 0; 338 amd_chipset.sb_type = 0; 339 amd_chipset.isoc_reqs = 0; 340 amd_chipset.probe_result = 0; 341 342 spin_unlock_irqrestore(&amd_lock, flags); 343 344 if (nb) 345 pci_dev_put(nb); 346 if (smbus) 347 pci_dev_put(smbus); 348 } 349 EXPORT_SYMBOL_GPL(usb_amd_dev_put); 350 351 /* 352 * Make sure the controller is completely inactive, unable to 353 * generate interrupts or do DMA. 354 */ 355 void uhci_reset_hc(struct pci_dev *pdev, unsigned long base) 356 { 357 /* Turn off PIRQ enable and SMI enable. (This also turns off the 358 * BIOS's USB Legacy Support.) Turn off all the R/WC bits too. 359 */ 360 pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC); 361 362 /* Reset the HC - this will force us to get a 363 * new notification of any already connected 364 * ports due to the virtual disconnect that it 365 * implies. 366 */ 367 outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD); 368 mb(); 369 udelay(5); 370 if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET) 371 dev_warn(&pdev->dev, "HCRESET not completed yet!\n"); 372 373 /* Just to be safe, disable interrupt requests and 374 * make sure the controller is stopped. 375 */ 376 outw(0, base + UHCI_USBINTR); 377 outw(0, base + UHCI_USBCMD); 378 } 379 EXPORT_SYMBOL_GPL(uhci_reset_hc); 380 381 /* 382 * Initialize a controller that was newly discovered or has just been 383 * resumed. In either case we can't be sure of its previous state. 384 * 385 * Returns: 1 if the controller was reset, 0 otherwise. 386 */ 387 int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base) 388 { 389 u16 legsup; 390 unsigned int cmd, intr; 391 392 /* 393 * When restarting a suspended controller, we expect all the 394 * settings to be the same as we left them: 395 * 396 * PIRQ and SMI disabled, no R/W bits set in USBLEGSUP; 397 * Controller is stopped and configured with EGSM set; 398 * No interrupts enabled except possibly Resume Detect. 399 * 400 * If any of these conditions are violated we do a complete reset. 401 */ 402 pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup); 403 if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) { 404 dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n", 405 __func__, legsup); 406 goto reset_needed; 407 } 408 409 cmd = inw(base + UHCI_USBCMD); 410 if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) || 411 !(cmd & UHCI_USBCMD_EGSM)) { 412 dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n", 413 __func__, cmd); 414 goto reset_needed; 415 } 416 417 intr = inw(base + UHCI_USBINTR); 418 if (intr & (~UHCI_USBINTR_RESUME)) { 419 dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n", 420 __func__, intr); 421 goto reset_needed; 422 } 423 return 0; 424 425 reset_needed: 426 dev_dbg(&pdev->dev, "Performing full reset\n"); 427 uhci_reset_hc(pdev, base); 428 return 1; 429 } 430 EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc); 431 432 static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask) 433 { 434 u16 cmd; 435 return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask); 436 } 437 438 #define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO) 439 #define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY) 440 441 static void __devinit quirk_usb_handoff_uhci(struct pci_dev *pdev) 442 { 443 unsigned long base = 0; 444 int i; 445 446 if (!pio_enabled(pdev)) 447 return; 448 449 for (i = 0; i < PCI_ROM_RESOURCE; i++) 450 if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) { 451 base = pci_resource_start(pdev, i); 452 break; 453 } 454 455 if (base) 456 uhci_check_and_reset_hc(pdev, base); 457 } 458 459 static int __devinit mmio_resource_enabled(struct pci_dev *pdev, int idx) 460 { 461 return pci_resource_start(pdev, idx) && mmio_enabled(pdev); 462 } 463 464 static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev) 465 { 466 void __iomem *base; 467 u32 control; 468 469 if (!mmio_resource_enabled(pdev, 0)) 470 return; 471 472 base = pci_ioremap_bar(pdev, 0); 473 if (base == NULL) 474 return; 475 476 control = readl(base + OHCI_CONTROL); 477 478 /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */ 479 #ifdef __hppa__ 480 #define OHCI_CTRL_MASK (OHCI_CTRL_RWC | OHCI_CTRL_IR) 481 #else 482 #define OHCI_CTRL_MASK OHCI_CTRL_RWC 483 484 if (control & OHCI_CTRL_IR) { 485 int wait_time = 500; /* arbitrary; 5 seconds */ 486 writel(OHCI_INTR_OC, base + OHCI_INTRENABLE); 487 writel(OHCI_OCR, base + OHCI_CMDSTATUS); 488 while (wait_time > 0 && 489 readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) { 490 wait_time -= 10; 491 msleep(10); 492 } 493 if (wait_time <= 0) 494 dev_warn(&pdev->dev, "OHCI: BIOS handoff failed" 495 " (BIOS bug?) %08x\n", 496 readl(base + OHCI_CONTROL)); 497 } 498 #endif 499 500 /* reset controller, preserving RWC (and possibly IR) */ 501 writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); 502 readl(base + OHCI_CONTROL); 503 504 /* Some NVIDIA controllers stop working if kept in RESET for too long */ 505 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) { 506 u32 fminterval; 507 int cnt; 508 509 /* drive reset for at least 50 ms (7.1.7.5) */ 510 msleep(50); 511 512 /* software reset of the controller, preserving HcFmInterval */ 513 fminterval = readl(base + OHCI_FMINTERVAL); 514 writel(OHCI_HCR, base + OHCI_CMDSTATUS); 515 516 /* reset requires max 10 us delay */ 517 for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */ 518 if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0) 519 break; 520 udelay(1); 521 } 522 writel(fminterval, base + OHCI_FMINTERVAL); 523 524 /* Now we're in the SUSPEND state with all devices reset 525 * and wakeups and interrupts disabled 526 */ 527 } 528 529 /* 530 * disable interrupts 531 */ 532 writel(~(u32)0, base + OHCI_INTRDISABLE); 533 writel(~(u32)0, base + OHCI_INTRSTATUS); 534 535 iounmap(base); 536 } 537 538 static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = { 539 { 540 /* Pegatron Lucid (ExoPC) */ 541 .matches = { 542 DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"), 543 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"), 544 }, 545 }, 546 { 547 /* Pegatron Lucid (Ordissimo AIRIS) */ 548 .matches = { 549 DMI_MATCH(DMI_BOARD_NAME, "M11JB"), 550 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-GE-133"), 551 }, 552 }, 553 { } 554 }; 555 556 static void __devinit ehci_bios_handoff(struct pci_dev *pdev, 557 void __iomem *op_reg_base, 558 u32 cap, u8 offset) 559 { 560 int try_handoff = 1, tried_handoff = 0; 561 562 /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying 563 * the handoff on its unused controller. Skip it. */ 564 if (pdev->vendor == 0x8086 && pdev->device == 0x283a) { 565 if (dmi_check_system(ehci_dmi_nohandoff_table)) 566 try_handoff = 0; 567 } 568 569 if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) { 570 dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n"); 571 572 #if 0 573 /* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on, 574 * but that seems dubious in general (the BIOS left it off intentionally) 575 * and is known to prevent some systems from booting. so we won't do this 576 * unless maybe we can determine when we're on a system that needs SMI forced. 577 */ 578 /* BIOS workaround (?): be sure the pre-Linux code 579 * receives the SMI 580 */ 581 pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val); 582 pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 583 val | EHCI_USBLEGCTLSTS_SOOE); 584 #endif 585 586 /* some systems get upset if this semaphore is 587 * set for any other reason than forcing a BIOS 588 * handoff.. 589 */ 590 pci_write_config_byte(pdev, offset + 3, 1); 591 } 592 593 /* if boot firmware now owns EHCI, spin till it hands it over. */ 594 if (try_handoff) { 595 int msec = 1000; 596 while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) { 597 tried_handoff = 1; 598 msleep(10); 599 msec -= 10; 600 pci_read_config_dword(pdev, offset, &cap); 601 } 602 } 603 604 if (cap & EHCI_USBLEGSUP_BIOS) { 605 /* well, possibly buggy BIOS... try to shut it down, 606 * and hope nothing goes too wrong 607 */ 608 if (try_handoff) 609 dev_warn(&pdev->dev, "EHCI: BIOS handoff failed" 610 " (BIOS bug?) %08x\n", cap); 611 pci_write_config_byte(pdev, offset + 2, 0); 612 } 613 614 /* just in case, always disable EHCI SMIs */ 615 pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0); 616 617 /* If the BIOS ever owned the controller then we can't expect 618 * any power sessions to remain intact. 619 */ 620 if (tried_handoff) 621 writel(0, op_reg_base + EHCI_CONFIGFLAG); 622 } 623 624 static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev) 625 { 626 void __iomem *base, *op_reg_base; 627 u32 hcc_params, cap, val; 628 u8 offset, cap_length; 629 int wait_time, delta, count = 256/4; 630 631 if (!mmio_resource_enabled(pdev, 0)) 632 return; 633 634 base = pci_ioremap_bar(pdev, 0); 635 if (base == NULL) 636 return; 637 638 cap_length = readb(base); 639 op_reg_base = base + cap_length; 640 641 /* EHCI 0.96 and later may have "extended capabilities" 642 * spec section 5.1 explains the bios handoff, e.g. for 643 * booting from USB disk or using a usb keyboard 644 */ 645 hcc_params = readl(base + EHCI_HCC_PARAMS); 646 offset = (hcc_params >> 8) & 0xff; 647 while (offset && --count) { 648 pci_read_config_dword(pdev, offset, &cap); 649 650 switch (cap & 0xff) { 651 case 1: 652 ehci_bios_handoff(pdev, op_reg_base, cap, offset); 653 break; 654 case 0: /* Illegal reserved cap, set cap=0 so we exit */ 655 cap = 0; /* then fallthrough... */ 656 default: 657 dev_warn(&pdev->dev, "EHCI: unrecognized capability " 658 "%02x\n", cap & 0xff); 659 } 660 offset = (cap >> 8) & 0xff; 661 } 662 if (!count) 663 dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n"); 664 665 /* 666 * halt EHCI & disable its interrupts in any case 667 */ 668 val = readl(op_reg_base + EHCI_USBSTS); 669 if ((val & EHCI_USBSTS_HALTED) == 0) { 670 val = readl(op_reg_base + EHCI_USBCMD); 671 val &= ~EHCI_USBCMD_RUN; 672 writel(val, op_reg_base + EHCI_USBCMD); 673 674 wait_time = 2000; 675 delta = 100; 676 do { 677 writel(0x3f, op_reg_base + EHCI_USBSTS); 678 udelay(delta); 679 wait_time -= delta; 680 val = readl(op_reg_base + EHCI_USBSTS); 681 if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) { 682 break; 683 } 684 } while (wait_time > 0); 685 } 686 writel(0, op_reg_base + EHCI_USBINTR); 687 writel(0x3f, op_reg_base + EHCI_USBSTS); 688 689 iounmap(base); 690 } 691 692 /* 693 * handshake - spin reading a register until handshake completes 694 * @ptr: address of hc register to be read 695 * @mask: bits to look at in result of read 696 * @done: value of those bits when handshake succeeds 697 * @wait_usec: timeout in microseconds 698 * @delay_usec: delay in microseconds to wait between polling 699 * 700 * Polls a register every delay_usec microseconds. 701 * Returns 0 when the mask bits have the value done. 702 * Returns -ETIMEDOUT if this condition is not true after 703 * wait_usec microseconds have passed. 704 */ 705 static int handshake(void __iomem *ptr, u32 mask, u32 done, 706 int wait_usec, int delay_usec) 707 { 708 u32 result; 709 710 do { 711 result = readl(ptr); 712 result &= mask; 713 if (result == done) 714 return 0; 715 udelay(delay_usec); 716 wait_usec -= delay_usec; 717 } while (wait_usec > 0); 718 return -ETIMEDOUT; 719 } 720 721 bool usb_is_intel_switchable_xhci(struct pci_dev *pdev) 722 { 723 return pdev->class == PCI_CLASS_SERIAL_USB_XHCI && 724 pdev->vendor == PCI_VENDOR_ID_INTEL && 725 pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI; 726 } 727 EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci); 728 729 /* 730 * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that 731 * share some number of ports. These ports can be switched between either 732 * controller. Not all of the ports under the EHCI host controller may be 733 * switchable. 734 * 735 * The ports should be switched over to xHCI before PCI probes for any device 736 * start. This avoids active devices under EHCI being disconnected during the 737 * port switchover, which could cause loss of data on USB storage devices, or 738 * failed boot when the root file system is on a USB mass storage device and is 739 * enumerated under EHCI first. 740 * 741 * We write into the xHC's PCI configuration space in some Intel-specific 742 * registers to switch the ports over. The USB 3.0 terminations and the USB 743 * 2.0 data wires are switched separately. We want to enable the SuperSpeed 744 * terminations before switching the USB 2.0 wires over, so that USB 3.0 745 * devices connect at SuperSpeed, rather than at USB 2.0 speeds. 746 */ 747 void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) 748 { 749 u32 ports_available; 750 751 ports_available = 0xffffffff; 752 /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable 753 * Register, to turn on SuperSpeed terminations for all 754 * available ports. 755 */ 756 pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 757 cpu_to_le32(ports_available)); 758 759 pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 760 &ports_available); 761 dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled " 762 "under xHCI: 0x%x\n", ports_available); 763 764 ports_available = 0xffffffff; 765 /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to 766 * switch the USB 2.0 power and data lines over to the xHCI 767 * host. 768 */ 769 pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 770 cpu_to_le32(ports_available)); 771 772 pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 773 &ports_available); 774 dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over " 775 "to xHCI: 0x%x\n", ports_available); 776 } 777 EXPORT_SYMBOL_GPL(usb_enable_xhci_ports); 778 779 /** 780 * PCI Quirks for xHCI. 781 * 782 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS. 783 * It signals to the BIOS that the OS wants control of the host controller, 784 * and then waits 5 seconds for the BIOS to hand over control. 785 * If we timeout, assume the BIOS is broken and take control anyway. 786 */ 787 static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev) 788 { 789 void __iomem *base; 790 int ext_cap_offset; 791 void __iomem *op_reg_base; 792 u32 val; 793 int timeout; 794 795 if (!mmio_resource_enabled(pdev, 0)) 796 return; 797 798 base = ioremap_nocache(pci_resource_start(pdev, 0), 799 pci_resource_len(pdev, 0)); 800 if (base == NULL) 801 return; 802 803 /* 804 * Find the Legacy Support Capability register - 805 * this is optional for xHCI host controllers. 806 */ 807 ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET); 808 do { 809 if (!ext_cap_offset) 810 /* We've reached the end of the extended capabilities */ 811 goto hc_init; 812 val = readl(base + ext_cap_offset); 813 if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY) 814 break; 815 ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset); 816 } while (1); 817 818 /* If the BIOS owns the HC, signal that the OS wants it, and wait */ 819 if (val & XHCI_HC_BIOS_OWNED) { 820 writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); 821 822 /* Wait for 5 seconds with 10 microsecond polling interval */ 823 timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, 824 0, 5000, 10); 825 826 /* Assume a buggy BIOS and take HC ownership anyway */ 827 if (timeout) { 828 dev_warn(&pdev->dev, "xHCI BIOS handoff failed" 829 " (BIOS bug ?) %08x\n", val); 830 writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset); 831 } 832 } 833 834 /* Disable any BIOS SMIs */ 835 writel(XHCI_LEGACY_DISABLE_SMI, 836 base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); 837 838 if (usb_is_intel_switchable_xhci(pdev)) 839 usb_enable_xhci_ports(pdev); 840 hc_init: 841 op_reg_base = base + XHCI_HC_LENGTH(readl(base)); 842 843 /* Wait for the host controller to be ready before writing any 844 * operational or runtime registers. Wait 5 seconds and no more. 845 */ 846 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0, 847 5000, 10); 848 /* Assume a buggy HC and start HC initialization anyway */ 849 if (timeout) { 850 val = readl(op_reg_base + XHCI_STS_OFFSET); 851 dev_warn(&pdev->dev, 852 "xHCI HW not ready after 5 sec (HC bug?) " 853 "status = 0x%x\n", val); 854 } 855 856 /* Send the halt and disable interrupts command */ 857 val = readl(op_reg_base + XHCI_CMD_OFFSET); 858 val &= ~(XHCI_CMD_RUN | XHCI_IRQS); 859 writel(val, op_reg_base + XHCI_CMD_OFFSET); 860 861 /* Wait for the HC to halt - poll every 125 usec (one microframe). */ 862 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1, 863 XHCI_MAX_HALT_USEC, 125); 864 if (timeout) { 865 val = readl(op_reg_base + XHCI_STS_OFFSET); 866 dev_warn(&pdev->dev, 867 "xHCI HW did not halt within %d usec " 868 "status = 0x%x\n", XHCI_MAX_HALT_USEC, val); 869 } 870 871 iounmap(base); 872 } 873 874 static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev) 875 { 876 if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI) 877 quirk_usb_handoff_uhci(pdev); 878 else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI) 879 quirk_usb_handoff_ohci(pdev); 880 else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI) 881 quirk_usb_disable_ehci(pdev); 882 else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI) 883 quirk_usb_handoff_xhci(pdev); 884 } 885 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff); 886