xref: /linux/drivers/usb/host/pci-quirks.c (revision 95e9fd10f06cb5642028b6b851e32b8c8afb4571)
1 /*
2  * This file contains code to reset and initialize USB host controllers.
3  * Some of it includes work-arounds for PCI hardware and BIOS quirks.
4  * It may need to run early during booting -- before USB would normally
5  * initialize -- to ensure that Linux doesn't use any legacy modes.
6  *
7  *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
8  *  (and others)
9  */
10 
11 #include <linux/types.h>
12 #include <linux/kconfig.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/init.h>
16 #include <linux/delay.h>
17 #include <linux/export.h>
18 #include <linux/acpi.h>
19 #include <linux/dmi.h>
20 #include "pci-quirks.h"
21 #include "xhci-ext-caps.h"
22 
23 
24 #define UHCI_USBLEGSUP		0xc0		/* legacy support */
25 #define UHCI_USBCMD		0		/* command register */
26 #define UHCI_USBINTR		4		/* interrupt register */
27 #define UHCI_USBLEGSUP_RWC	0x8f00		/* the R/WC bits */
28 #define UHCI_USBLEGSUP_RO	0x5040		/* R/O and reserved bits */
29 #define UHCI_USBCMD_RUN		0x0001		/* RUN/STOP bit */
30 #define UHCI_USBCMD_HCRESET	0x0002		/* Host Controller reset */
31 #define UHCI_USBCMD_EGSM	0x0008		/* Global Suspend Mode */
32 #define UHCI_USBCMD_CONFIGURE	0x0040		/* Config Flag */
33 #define UHCI_USBINTR_RESUME	0x0002		/* Resume interrupt enable */
34 
35 #define OHCI_CONTROL		0x04
36 #define OHCI_CMDSTATUS		0x08
37 #define OHCI_INTRSTATUS		0x0c
38 #define OHCI_INTRENABLE		0x10
39 #define OHCI_INTRDISABLE	0x14
40 #define OHCI_FMINTERVAL		0x34
41 #define OHCI_HCFS		(3 << 6)	/* hc functional state */
42 #define OHCI_HCR		(1 << 0)	/* host controller reset */
43 #define OHCI_OCR		(1 << 3)	/* ownership change request */
44 #define OHCI_CTRL_RWC		(1 << 9)	/* remote wakeup connected */
45 #define OHCI_CTRL_IR		(1 << 8)	/* interrupt routing */
46 #define OHCI_INTR_OC		(1 << 30)	/* ownership change */
47 
48 #define EHCI_HCC_PARAMS		0x08		/* extended capabilities */
49 #define EHCI_USBCMD		0		/* command register */
50 #define EHCI_USBCMD_RUN		(1 << 0)	/* RUN/STOP bit */
51 #define EHCI_USBSTS		4		/* status register */
52 #define EHCI_USBSTS_HALTED	(1 << 12)	/* HCHalted bit */
53 #define EHCI_USBINTR		8		/* interrupt register */
54 #define EHCI_CONFIGFLAG		0x40		/* configured flag register */
55 #define EHCI_USBLEGSUP		0		/* legacy support register */
56 #define EHCI_USBLEGSUP_BIOS	(1 << 16)	/* BIOS semaphore */
57 #define EHCI_USBLEGSUP_OS	(1 << 24)	/* OS semaphore */
58 #define EHCI_USBLEGCTLSTS	4		/* legacy control/status */
59 #define EHCI_USBLEGCTLSTS_SOOE	(1 << 13)	/* SMI on ownership change */
60 
61 /* AMD quirk use */
62 #define	AB_REG_BAR_LOW		0xe0
63 #define	AB_REG_BAR_HIGH		0xe1
64 #define	AB_REG_BAR_SB700	0xf0
65 #define	AB_INDX(addr)		((addr) + 0x00)
66 #define	AB_DATA(addr)		((addr) + 0x04)
67 #define	AX_INDXC		0x30
68 #define	AX_DATAC		0x34
69 
70 #define	NB_PCIE_INDX_ADDR	0xe0
71 #define	NB_PCIE_INDX_DATA	0xe4
72 #define	PCIE_P_CNTL		0x10040
73 #define	BIF_NB			0x10002
74 #define	NB_PIF0_PWRDOWN_0	0x01100012
75 #define	NB_PIF0_PWRDOWN_1	0x01100013
76 
77 #define USB_INTEL_XUSB2PR      0xD0
78 #define USB_INTEL_USB3_PSSEN   0xD8
79 
80 static struct amd_chipset_info {
81 	struct pci_dev	*nb_dev;
82 	struct pci_dev	*smbus_dev;
83 	int nb_type;
84 	int sb_type;
85 	int isoc_reqs;
86 	int probe_count;
87 	int probe_result;
88 } amd_chipset;
89 
90 static DEFINE_SPINLOCK(amd_lock);
91 
92 int usb_amd_find_chipset_info(void)
93 {
94 	u8 rev = 0;
95 	unsigned long flags;
96 	struct amd_chipset_info info;
97 	int ret;
98 
99 	spin_lock_irqsave(&amd_lock, flags);
100 
101 	/* probe only once */
102 	if (amd_chipset.probe_count > 0) {
103 		amd_chipset.probe_count++;
104 		spin_unlock_irqrestore(&amd_lock, flags);
105 		return amd_chipset.probe_result;
106 	}
107 	memset(&info, 0, sizeof(info));
108 	spin_unlock_irqrestore(&amd_lock, flags);
109 
110 	info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
111 	if (info.smbus_dev) {
112 		rev = info.smbus_dev->revision;
113 		if (rev >= 0x40)
114 			info.sb_type = 1;
115 		else if (rev >= 0x30 && rev <= 0x3b)
116 			info.sb_type = 3;
117 	} else {
118 		info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
119 						0x780b, NULL);
120 		if (!info.smbus_dev) {
121 			ret = 0;
122 			goto commit;
123 		}
124 
125 		rev = info.smbus_dev->revision;
126 		if (rev >= 0x11 && rev <= 0x18)
127 			info.sb_type = 2;
128 	}
129 
130 	if (info.sb_type == 0) {
131 		if (info.smbus_dev) {
132 			pci_dev_put(info.smbus_dev);
133 			info.smbus_dev = NULL;
134 		}
135 		ret = 0;
136 		goto commit;
137 	}
138 
139 	info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL);
140 	if (info.nb_dev) {
141 		info.nb_type = 1;
142 	} else {
143 		info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
144 		if (info.nb_dev) {
145 			info.nb_type = 2;
146 		} else {
147 			info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
148 						     0x9600, NULL);
149 			if (info.nb_dev)
150 				info.nb_type = 3;
151 		}
152 	}
153 
154 	ret = info.probe_result = 1;
155 	printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
156 
157 commit:
158 
159 	spin_lock_irqsave(&amd_lock, flags);
160 	if (amd_chipset.probe_count > 0) {
161 		/* race - someone else was faster - drop devices */
162 
163 		/* Mark that we where here */
164 		amd_chipset.probe_count++;
165 		ret = amd_chipset.probe_result;
166 
167 		spin_unlock_irqrestore(&amd_lock, flags);
168 
169 		if (info.nb_dev)
170 			pci_dev_put(info.nb_dev);
171 		if (info.smbus_dev)
172 			pci_dev_put(info.smbus_dev);
173 
174 	} else {
175 		/* no race - commit the result */
176 		info.probe_count++;
177 		amd_chipset = info;
178 		spin_unlock_irqrestore(&amd_lock, flags);
179 	}
180 
181 	return ret;
182 }
183 EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
184 
185 /*
186  * The hardware normally enables the A-link power management feature, which
187  * lets the system lower the power consumption in idle states.
188  *
189  * This USB quirk prevents the link going into that lower power state
190  * during isochronous transfers.
191  *
192  * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of
193  * some AMD platforms may stutter or have breaks occasionally.
194  */
195 static void usb_amd_quirk_pll(int disable)
196 {
197 	u32 addr, addr_low, addr_high, val;
198 	u32 bit = disable ? 0 : 1;
199 	unsigned long flags;
200 
201 	spin_lock_irqsave(&amd_lock, flags);
202 
203 	if (disable) {
204 		amd_chipset.isoc_reqs++;
205 		if (amd_chipset.isoc_reqs > 1) {
206 			spin_unlock_irqrestore(&amd_lock, flags);
207 			return;
208 		}
209 	} else {
210 		amd_chipset.isoc_reqs--;
211 		if (amd_chipset.isoc_reqs > 0) {
212 			spin_unlock_irqrestore(&amd_lock, flags);
213 			return;
214 		}
215 	}
216 
217 	if (amd_chipset.sb_type == 1 || amd_chipset.sb_type == 2) {
218 		outb_p(AB_REG_BAR_LOW, 0xcd6);
219 		addr_low = inb_p(0xcd7);
220 		outb_p(AB_REG_BAR_HIGH, 0xcd6);
221 		addr_high = inb_p(0xcd7);
222 		addr = addr_high << 8 | addr_low;
223 
224 		outl_p(0x30, AB_INDX(addr));
225 		outl_p(0x40, AB_DATA(addr));
226 		outl_p(0x34, AB_INDX(addr));
227 		val = inl_p(AB_DATA(addr));
228 	} else if (amd_chipset.sb_type == 3) {
229 		pci_read_config_dword(amd_chipset.smbus_dev,
230 					AB_REG_BAR_SB700, &addr);
231 		outl(AX_INDXC, AB_INDX(addr));
232 		outl(0x40, AB_DATA(addr));
233 		outl(AX_DATAC, AB_INDX(addr));
234 		val = inl(AB_DATA(addr));
235 	} else {
236 		spin_unlock_irqrestore(&amd_lock, flags);
237 		return;
238 	}
239 
240 	if (disable) {
241 		val &= ~0x08;
242 		val |= (1 << 4) | (1 << 9);
243 	} else {
244 		val |= 0x08;
245 		val &= ~((1 << 4) | (1 << 9));
246 	}
247 	outl_p(val, AB_DATA(addr));
248 
249 	if (!amd_chipset.nb_dev) {
250 		spin_unlock_irqrestore(&amd_lock, flags);
251 		return;
252 	}
253 
254 	if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) {
255 		addr = PCIE_P_CNTL;
256 		pci_write_config_dword(amd_chipset.nb_dev,
257 					NB_PCIE_INDX_ADDR, addr);
258 		pci_read_config_dword(amd_chipset.nb_dev,
259 					NB_PCIE_INDX_DATA, &val);
260 
261 		val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12));
262 		val |= bit | (bit << 3) | (bit << 12);
263 		val |= ((!bit) << 4) | ((!bit) << 9);
264 		pci_write_config_dword(amd_chipset.nb_dev,
265 					NB_PCIE_INDX_DATA, val);
266 
267 		addr = BIF_NB;
268 		pci_write_config_dword(amd_chipset.nb_dev,
269 					NB_PCIE_INDX_ADDR, addr);
270 		pci_read_config_dword(amd_chipset.nb_dev,
271 					NB_PCIE_INDX_DATA, &val);
272 		val &= ~(1 << 8);
273 		val |= bit << 8;
274 
275 		pci_write_config_dword(amd_chipset.nb_dev,
276 					NB_PCIE_INDX_DATA, val);
277 	} else if (amd_chipset.nb_type == 2) {
278 		addr = NB_PIF0_PWRDOWN_0;
279 		pci_write_config_dword(amd_chipset.nb_dev,
280 					NB_PCIE_INDX_ADDR, addr);
281 		pci_read_config_dword(amd_chipset.nb_dev,
282 					NB_PCIE_INDX_DATA, &val);
283 		if (disable)
284 			val &= ~(0x3f << 7);
285 		else
286 			val |= 0x3f << 7;
287 
288 		pci_write_config_dword(amd_chipset.nb_dev,
289 					NB_PCIE_INDX_DATA, val);
290 
291 		addr = NB_PIF0_PWRDOWN_1;
292 		pci_write_config_dword(amd_chipset.nb_dev,
293 					NB_PCIE_INDX_ADDR, addr);
294 		pci_read_config_dword(amd_chipset.nb_dev,
295 					NB_PCIE_INDX_DATA, &val);
296 		if (disable)
297 			val &= ~(0x3f << 7);
298 		else
299 			val |= 0x3f << 7;
300 
301 		pci_write_config_dword(amd_chipset.nb_dev,
302 					NB_PCIE_INDX_DATA, val);
303 	}
304 
305 	spin_unlock_irqrestore(&amd_lock, flags);
306 	return;
307 }
308 
309 void usb_amd_quirk_pll_disable(void)
310 {
311 	usb_amd_quirk_pll(1);
312 }
313 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable);
314 
315 void usb_amd_quirk_pll_enable(void)
316 {
317 	usb_amd_quirk_pll(0);
318 }
319 EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable);
320 
321 void usb_amd_dev_put(void)
322 {
323 	struct pci_dev *nb, *smbus;
324 	unsigned long flags;
325 
326 	spin_lock_irqsave(&amd_lock, flags);
327 
328 	amd_chipset.probe_count--;
329 	if (amd_chipset.probe_count > 0) {
330 		spin_unlock_irqrestore(&amd_lock, flags);
331 		return;
332 	}
333 
334 	/* save them to pci_dev_put outside of spinlock */
335 	nb    = amd_chipset.nb_dev;
336 	smbus = amd_chipset.smbus_dev;
337 
338 	amd_chipset.nb_dev = NULL;
339 	amd_chipset.smbus_dev = NULL;
340 	amd_chipset.nb_type = 0;
341 	amd_chipset.sb_type = 0;
342 	amd_chipset.isoc_reqs = 0;
343 	amd_chipset.probe_result = 0;
344 
345 	spin_unlock_irqrestore(&amd_lock, flags);
346 
347 	if (nb)
348 		pci_dev_put(nb);
349 	if (smbus)
350 		pci_dev_put(smbus);
351 }
352 EXPORT_SYMBOL_GPL(usb_amd_dev_put);
353 
354 /*
355  * Make sure the controller is completely inactive, unable to
356  * generate interrupts or do DMA.
357  */
358 void uhci_reset_hc(struct pci_dev *pdev, unsigned long base)
359 {
360 	/* Turn off PIRQ enable and SMI enable.  (This also turns off the
361 	 * BIOS's USB Legacy Support.)  Turn off all the R/WC bits too.
362 	 */
363 	pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC);
364 
365 	/* Reset the HC - this will force us to get a
366 	 * new notification of any already connected
367 	 * ports due to the virtual disconnect that it
368 	 * implies.
369 	 */
370 	outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD);
371 	mb();
372 	udelay(5);
373 	if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET)
374 		dev_warn(&pdev->dev, "HCRESET not completed yet!\n");
375 
376 	/* Just to be safe, disable interrupt requests and
377 	 * make sure the controller is stopped.
378 	 */
379 	outw(0, base + UHCI_USBINTR);
380 	outw(0, base + UHCI_USBCMD);
381 }
382 EXPORT_SYMBOL_GPL(uhci_reset_hc);
383 
384 /*
385  * Initialize a controller that was newly discovered or has just been
386  * resumed.  In either case we can't be sure of its previous state.
387  *
388  * Returns: 1 if the controller was reset, 0 otherwise.
389  */
390 int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base)
391 {
392 	u16 legsup;
393 	unsigned int cmd, intr;
394 
395 	/*
396 	 * When restarting a suspended controller, we expect all the
397 	 * settings to be the same as we left them:
398 	 *
399 	 *	PIRQ and SMI disabled, no R/W bits set in USBLEGSUP;
400 	 *	Controller is stopped and configured with EGSM set;
401 	 *	No interrupts enabled except possibly Resume Detect.
402 	 *
403 	 * If any of these conditions are violated we do a complete reset.
404 	 */
405 	pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup);
406 	if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) {
407 		dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n",
408 				__func__, legsup);
409 		goto reset_needed;
410 	}
411 
412 	cmd = inw(base + UHCI_USBCMD);
413 	if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) ||
414 			!(cmd & UHCI_USBCMD_EGSM)) {
415 		dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n",
416 				__func__, cmd);
417 		goto reset_needed;
418 	}
419 
420 	intr = inw(base + UHCI_USBINTR);
421 	if (intr & (~UHCI_USBINTR_RESUME)) {
422 		dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n",
423 				__func__, intr);
424 		goto reset_needed;
425 	}
426 	return 0;
427 
428 reset_needed:
429 	dev_dbg(&pdev->dev, "Performing full reset\n");
430 	uhci_reset_hc(pdev, base);
431 	return 1;
432 }
433 EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc);
434 
435 static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
436 {
437 	u16 cmd;
438 	return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask);
439 }
440 
441 #define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO)
442 #define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY)
443 
444 static void __devinit quirk_usb_handoff_uhci(struct pci_dev *pdev)
445 {
446 	unsigned long base = 0;
447 	int i;
448 
449 	if (!pio_enabled(pdev))
450 		return;
451 
452 	for (i = 0; i < PCI_ROM_RESOURCE; i++)
453 		if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
454 			base = pci_resource_start(pdev, i);
455 			break;
456 		}
457 
458 	if (base)
459 		uhci_check_and_reset_hc(pdev, base);
460 }
461 
462 static int __devinit mmio_resource_enabled(struct pci_dev *pdev, int idx)
463 {
464 	return pci_resource_start(pdev, idx) && mmio_enabled(pdev);
465 }
466 
467 static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev)
468 {
469 	void __iomem *base;
470 	u32 control;
471 	u32 fminterval;
472 	int cnt;
473 
474 	if (!mmio_resource_enabled(pdev, 0))
475 		return;
476 
477 	base = pci_ioremap_bar(pdev, 0);
478 	if (base == NULL)
479 		return;
480 
481 	control = readl(base + OHCI_CONTROL);
482 
483 /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
484 #ifdef __hppa__
485 #define	OHCI_CTRL_MASK		(OHCI_CTRL_RWC | OHCI_CTRL_IR)
486 #else
487 #define	OHCI_CTRL_MASK		OHCI_CTRL_RWC
488 
489 	if (control & OHCI_CTRL_IR) {
490 		int wait_time = 500; /* arbitrary; 5 seconds */
491 		writel(OHCI_INTR_OC, base + OHCI_INTRENABLE);
492 		writel(OHCI_OCR, base + OHCI_CMDSTATUS);
493 		while (wait_time > 0 &&
494 				readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) {
495 			wait_time -= 10;
496 			msleep(10);
497 		}
498 		if (wait_time <= 0)
499 			dev_warn(&pdev->dev, "OHCI: BIOS handoff failed"
500 					" (BIOS bug?) %08x\n",
501 					readl(base + OHCI_CONTROL));
502 	}
503 #endif
504 
505 	/* disable interrupts */
506 	writel((u32) ~0, base + OHCI_INTRDISABLE);
507 
508 	/* Reset the USB bus, if the controller isn't already in RESET */
509 	if (control & OHCI_HCFS) {
510 		/* Go into RESET, preserving RWC (and possibly IR) */
511 		writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
512 		readl(base + OHCI_CONTROL);
513 
514 		/* drive bus reset for at least 50 ms (7.1.7.5) */
515 		msleep(50);
516 	}
517 
518 	/* software reset of the controller, preserving HcFmInterval */
519 	fminterval = readl(base + OHCI_FMINTERVAL);
520 	writel(OHCI_HCR, base + OHCI_CMDSTATUS);
521 
522 	/* reset requires max 10 us delay */
523 	for (cnt = 30; cnt > 0; --cnt) {	/* ... allow extra time */
524 		if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0)
525 			break;
526 		udelay(1);
527 	}
528 	writel(fminterval, base + OHCI_FMINTERVAL);
529 
530 	/* Now the controller is safely in SUSPEND and nothing can wake it up */
531 	iounmap(base);
532 }
533 
534 static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = {
535 	{
536 		/*  Pegatron Lucid (ExoPC) */
537 		.matches = {
538 			DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"),
539 			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"),
540 		},
541 	},
542 	{
543 		/*  Pegatron Lucid (Ordissimo AIRIS) */
544 		.matches = {
545 			DMI_MATCH(DMI_BOARD_NAME, "M11JB"),
546 			DMI_MATCH(DMI_BIOS_VERSION, "Lucid-GE-133"),
547 		},
548 	},
549 	{ }
550 };
551 
552 static void __devinit ehci_bios_handoff(struct pci_dev *pdev,
553 					void __iomem *op_reg_base,
554 					u32 cap, u8 offset)
555 {
556 	int try_handoff = 1, tried_handoff = 0;
557 
558 	/* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
559 	 * the handoff on its unused controller.  Skip it. */
560 	if (pdev->vendor == 0x8086 && pdev->device == 0x283a) {
561 		if (dmi_check_system(ehci_dmi_nohandoff_table))
562 			try_handoff = 0;
563 	}
564 
565 	if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) {
566 		dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n");
567 
568 #if 0
569 /* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on,
570  * but that seems dubious in general (the BIOS left it off intentionally)
571  * and is known to prevent some systems from booting.  so we won't do this
572  * unless maybe we can determine when we're on a system that needs SMI forced.
573  */
574 		/* BIOS workaround (?): be sure the pre-Linux code
575 		 * receives the SMI
576 		 */
577 		pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val);
578 		pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS,
579 				       val | EHCI_USBLEGCTLSTS_SOOE);
580 #endif
581 
582 		/* some systems get upset if this semaphore is
583 		 * set for any other reason than forcing a BIOS
584 		 * handoff..
585 		 */
586 		pci_write_config_byte(pdev, offset + 3, 1);
587 	}
588 
589 	/* if boot firmware now owns EHCI, spin till it hands it over. */
590 	if (try_handoff) {
591 		int msec = 1000;
592 		while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
593 			tried_handoff = 1;
594 			msleep(10);
595 			msec -= 10;
596 			pci_read_config_dword(pdev, offset, &cap);
597 		}
598 	}
599 
600 	if (cap & EHCI_USBLEGSUP_BIOS) {
601 		/* well, possibly buggy BIOS... try to shut it down,
602 		 * and hope nothing goes too wrong
603 		 */
604 		if (try_handoff)
605 			dev_warn(&pdev->dev, "EHCI: BIOS handoff failed"
606 				 " (BIOS bug?) %08x\n", cap);
607 		pci_write_config_byte(pdev, offset + 2, 0);
608 	}
609 
610 	/* just in case, always disable EHCI SMIs */
611 	pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0);
612 
613 	/* If the BIOS ever owned the controller then we can't expect
614 	 * any power sessions to remain intact.
615 	 */
616 	if (tried_handoff)
617 		writel(0, op_reg_base + EHCI_CONFIGFLAG);
618 }
619 
620 static void __devinit quirk_usb_disable_ehci(struct pci_dev *pdev)
621 {
622 	void __iomem *base, *op_reg_base;
623 	u32	hcc_params, cap, val;
624 	u8	offset, cap_length;
625 	int	wait_time, count = 256/4;
626 
627 	if (!mmio_resource_enabled(pdev, 0))
628 		return;
629 
630 	base = pci_ioremap_bar(pdev, 0);
631 	if (base == NULL)
632 		return;
633 
634 	cap_length = readb(base);
635 	op_reg_base = base + cap_length;
636 
637 	/* EHCI 0.96 and later may have "extended capabilities"
638 	 * spec section 5.1 explains the bios handoff, e.g. for
639 	 * booting from USB disk or using a usb keyboard
640 	 */
641 	hcc_params = readl(base + EHCI_HCC_PARAMS);
642 	offset = (hcc_params >> 8) & 0xff;
643 	while (offset && --count) {
644 		pci_read_config_dword(pdev, offset, &cap);
645 
646 		switch (cap & 0xff) {
647 		case 1:
648 			ehci_bios_handoff(pdev, op_reg_base, cap, offset);
649 			break;
650 		case 0: /* Illegal reserved cap, set cap=0 so we exit */
651 			cap = 0; /* then fallthrough... */
652 		default:
653 			dev_warn(&pdev->dev, "EHCI: unrecognized capability "
654 				 "%02x\n", cap & 0xff);
655 		}
656 		offset = (cap >> 8) & 0xff;
657 	}
658 	if (!count)
659 		dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n");
660 
661 	/*
662 	 * halt EHCI & disable its interrupts in any case
663 	 */
664 	val = readl(op_reg_base + EHCI_USBSTS);
665 	if ((val & EHCI_USBSTS_HALTED) == 0) {
666 		val = readl(op_reg_base + EHCI_USBCMD);
667 		val &= ~EHCI_USBCMD_RUN;
668 		writel(val, op_reg_base + EHCI_USBCMD);
669 
670 		wait_time = 2000;
671 		do {
672 			writel(0x3f, op_reg_base + EHCI_USBSTS);
673 			udelay(100);
674 			wait_time -= 100;
675 			val = readl(op_reg_base + EHCI_USBSTS);
676 			if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) {
677 				break;
678 			}
679 		} while (wait_time > 0);
680 	}
681 	writel(0, op_reg_base + EHCI_USBINTR);
682 	writel(0x3f, op_reg_base + EHCI_USBSTS);
683 
684 	iounmap(base);
685 }
686 
687 /*
688  * handshake - spin reading a register until handshake completes
689  * @ptr: address of hc register to be read
690  * @mask: bits to look at in result of read
691  * @done: value of those bits when handshake succeeds
692  * @wait_usec: timeout in microseconds
693  * @delay_usec: delay in microseconds to wait between polling
694  *
695  * Polls a register every delay_usec microseconds.
696  * Returns 0 when the mask bits have the value done.
697  * Returns -ETIMEDOUT if this condition is not true after
698  * wait_usec microseconds have passed.
699  */
700 static int handshake(void __iomem *ptr, u32 mask, u32 done,
701 		int wait_usec, int delay_usec)
702 {
703 	u32	result;
704 
705 	do {
706 		result = readl(ptr);
707 		result &= mask;
708 		if (result == done)
709 			return 0;
710 		udelay(delay_usec);
711 		wait_usec -= delay_usec;
712 	} while (wait_usec > 0);
713 	return -ETIMEDOUT;
714 }
715 
716 #define PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI	0x8C31
717 
718 bool usb_is_intel_ppt_switchable_xhci(struct pci_dev *pdev)
719 {
720 	return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
721 		pdev->vendor == PCI_VENDOR_ID_INTEL &&
722 		pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI;
723 }
724 
725 /* The Intel Lynx Point chipset also has switchable ports. */
726 bool usb_is_intel_lpt_switchable_xhci(struct pci_dev *pdev)
727 {
728 	return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
729 		pdev->vendor == PCI_VENDOR_ID_INTEL &&
730 		pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI;
731 }
732 
733 bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
734 {
735 	return usb_is_intel_ppt_switchable_xhci(pdev) ||
736 		usb_is_intel_lpt_switchable_xhci(pdev);
737 }
738 EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci);
739 
740 /*
741  * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
742  * share some number of ports.  These ports can be switched between either
743  * controller.  Not all of the ports under the EHCI host controller may be
744  * switchable.
745  *
746  * The ports should be switched over to xHCI before PCI probes for any device
747  * start.  This avoids active devices under EHCI being disconnected during the
748  * port switchover, which could cause loss of data on USB storage devices, or
749  * failed boot when the root file system is on a USB mass storage device and is
750  * enumerated under EHCI first.
751  *
752  * We write into the xHC's PCI configuration space in some Intel-specific
753  * registers to switch the ports over.  The USB 3.0 terminations and the USB
754  * 2.0 data wires are switched separately.  We want to enable the SuperSpeed
755  * terminations before switching the USB 2.0 wires over, so that USB 3.0
756  * devices connect at SuperSpeed, rather than at USB 2.0 speeds.
757  */
758 void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
759 {
760 	u32		ports_available;
761 
762 	/* Don't switchover the ports if the user hasn't compiled the xHCI
763 	 * driver.  Otherwise they will see "dead" USB ports that don't power
764 	 * the devices.
765 	 */
766 	if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) {
767 		dev_warn(&xhci_pdev->dev,
768 				"CONFIG_USB_XHCI_HCD is turned off, "
769 				"defaulting to EHCI.\n");
770 		dev_warn(&xhci_pdev->dev,
771 				"USB 3.0 devices will work at USB 2.0 speeds.\n");
772 		return;
773 	}
774 
775 	ports_available = 0xffffffff;
776 	/* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable
777 	 * Register, to turn on SuperSpeed terminations for all
778 	 * available ports.
779 	 */
780 	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
781 			cpu_to_le32(ports_available));
782 
783 	pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
784 			&ports_available);
785 	dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled "
786 			"under xHCI: 0x%x\n", ports_available);
787 
788 	ports_available = 0xffffffff;
789 	/* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to
790 	 * switch the USB 2.0 power and data lines over to the xHCI
791 	 * host.
792 	 */
793 	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
794 			cpu_to_le32(ports_available));
795 
796 	pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
797 			&ports_available);
798 	dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over "
799 			"to xHCI: 0x%x\n", ports_available);
800 }
801 EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
802 
803 void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
804 {
805 	pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
806 	pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
807 }
808 EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
809 
810 /**
811  * PCI Quirks for xHCI.
812  *
813  * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
814  * It signals to the BIOS that the OS wants control of the host controller,
815  * and then waits 5 seconds for the BIOS to hand over control.
816  * If we timeout, assume the BIOS is broken and take control anyway.
817  */
818 static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
819 {
820 	void __iomem *base;
821 	int ext_cap_offset;
822 	void __iomem *op_reg_base;
823 	u32 val;
824 	int timeout;
825 
826 	if (!mmio_resource_enabled(pdev, 0))
827 		return;
828 
829 	base = ioremap_nocache(pci_resource_start(pdev, 0),
830 				pci_resource_len(pdev, 0));
831 	if (base == NULL)
832 		return;
833 
834 	/*
835 	 * Find the Legacy Support Capability register -
836 	 * this is optional for xHCI host controllers.
837 	 */
838 	ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET);
839 	do {
840 		if (!ext_cap_offset)
841 			/* We've reached the end of the extended capabilities */
842 			goto hc_init;
843 		val = readl(base + ext_cap_offset);
844 		if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY)
845 			break;
846 		ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset);
847 	} while (1);
848 
849 	/* If the BIOS owns the HC, signal that the OS wants it, and wait */
850 	if (val & XHCI_HC_BIOS_OWNED) {
851 		writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
852 
853 		/* Wait for 5 seconds with 10 microsecond polling interval */
854 		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
855 				0, 5000, 10);
856 
857 		/* Assume a buggy BIOS and take HC ownership anyway */
858 		if (timeout) {
859 			dev_warn(&pdev->dev, "xHCI BIOS handoff failed"
860 					" (BIOS bug ?) %08x\n", val);
861 			writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset);
862 		}
863 	}
864 
865 	val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
866 	/* Mask off (turn off) any enabled SMIs */
867 	val &= XHCI_LEGACY_DISABLE_SMI;
868 	/* Mask all SMI events bits, RW1C */
869 	val |= XHCI_LEGACY_SMI_EVENTS;
870 	/* Disable any BIOS SMIs and clear all SMI events*/
871 	writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
872 
873 	if (usb_is_intel_switchable_xhci(pdev))
874 		usb_enable_xhci_ports(pdev);
875 hc_init:
876 	op_reg_base = base + XHCI_HC_LENGTH(readl(base));
877 
878 	/* Wait for the host controller to be ready before writing any
879 	 * operational or runtime registers.  Wait 5 seconds and no more.
880 	 */
881 	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
882 			5000, 10);
883 	/* Assume a buggy HC and start HC initialization anyway */
884 	if (timeout) {
885 		val = readl(op_reg_base + XHCI_STS_OFFSET);
886 		dev_warn(&pdev->dev,
887 				"xHCI HW not ready after 5 sec (HC bug?) "
888 				"status = 0x%x\n", val);
889 	}
890 
891 	/* Send the halt and disable interrupts command */
892 	val = readl(op_reg_base + XHCI_CMD_OFFSET);
893 	val &= ~(XHCI_CMD_RUN | XHCI_IRQS);
894 	writel(val, op_reg_base + XHCI_CMD_OFFSET);
895 
896 	/* Wait for the HC to halt - poll every 125 usec (one microframe). */
897 	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1,
898 			XHCI_MAX_HALT_USEC, 125);
899 	if (timeout) {
900 		val = readl(op_reg_base + XHCI_STS_OFFSET);
901 		dev_warn(&pdev->dev,
902 				"xHCI HW did not halt within %d usec "
903 				"status = 0x%x\n", XHCI_MAX_HALT_USEC, val);
904 	}
905 
906 	iounmap(base);
907 }
908 
909 static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
910 {
911 	/* Skip Netlogic mips SoC's internal PCI USB controller.
912 	 * This device does not need/support EHCI/OHCI handoff
913 	 */
914 	if (pdev->vendor == 0x184e)	/* vendor Netlogic */
915 		return;
916 	if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
917 			pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
918 			pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
919 			pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
920 		return;
921 
922 	if (pci_enable_device(pdev) < 0) {
923 		dev_warn(&pdev->dev, "Can't enable PCI device, "
924 				"BIOS handoff failed.\n");
925 		return;
926 	}
927 	if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
928 		quirk_usb_handoff_uhci(pdev);
929 	else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
930 		quirk_usb_handoff_ohci(pdev);
931 	else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI)
932 		quirk_usb_disable_ehci(pdev);
933 	else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
934 		quirk_usb_handoff_xhci(pdev);
935 	pci_disable_device(pdev);
936 }
937 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
938 			PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
939