xref: /linux/drivers/pci/quirks.c (revision 04eeb606a8383b306f4bc6991da8231b5f3924b0)
1 /*
2  *  This file contains work-arounds for many known PCI hardware
3  *  bugs.  Devices present only on certain architectures (host
4  *  bridges et cetera) should be handled in arch-specific code.
5  *
6  *  Note: any quirks for hotpluggable devices must _NOT_ be declared __init.
7  *
8  *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
9  *
10  *  Init/reset quirks for USB host controllers should be in the
11  *  USB quirks file, where their drivers can access reuse it.
12  */
13 
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/export.h>
17 #include <linux/pci.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/acpi.h>
21 #include <linux/kallsyms.h>
22 #include <linux/dmi.h>
23 #include <linux/pci-aspm.h>
24 #include <linux/ioport.h>
25 #include <linux/sched.h>
26 #include <linux/ktime.h>
27 #include <linux/mm.h>
28 #include <asm/dma.h>	/* isa_dma_bridge_buggy */
29 #include "pci.h"
30 
31 /*
32  * Decoding should be disabled for a PCI device during BAR sizing to avoid
33  * conflict. But doing so may cause problems on host bridge and perhaps other
34  * key system devices. For devices that need to have mmio decoding always-on,
35  * we need to set the dev->mmio_always_on bit.
36  */
37 static void quirk_mmio_always_on(struct pci_dev *dev)
38 {
39 	dev->mmio_always_on = 1;
40 }
41 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
42 				PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
43 
44 /* The Mellanox Tavor device gives false positive parity errors
45  * Mark this device with a broken_parity_status, to allow
46  * PCI scanning code to "skip" this now blacklisted device.
47  */
48 static void quirk_mellanox_tavor(struct pci_dev *dev)
49 {
50 	dev->broken_parity_status = 1;	/* This device gives false positives */
51 }
52 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
53 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
54 
55 /* Deal with broken BIOSes that neglect to enable passive release,
56    which can cause problems in combination with the 82441FX/PPro MTRRs */
57 static void quirk_passive_release(struct pci_dev *dev)
58 {
59 	struct pci_dev *d = NULL;
60 	unsigned char dlc;
61 
62 	/* We have to make sure a particular bit is set in the PIIX3
63 	   ISA bridge, so we have to go out and find it. */
64 	while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
65 		pci_read_config_byte(d, 0x82, &dlc);
66 		if (!(dlc & 1<<1)) {
67 			dev_info(&d->dev, "PIIX3: Enabling Passive Release\n");
68 			dlc |= 1<<1;
69 			pci_write_config_byte(d, 0x82, dlc);
70 		}
71 	}
72 }
73 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82441,	quirk_passive_release);
74 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82441,	quirk_passive_release);
75 
76 /*  The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround
77     but VIA don't answer queries. If you happen to have good contacts at VIA
78     ask them for me please -- Alan
79 
80     This appears to be BIOS not version dependent. So presumably there is a
81     chipset level fix */
82 
83 static void quirk_isa_dma_hangs(struct pci_dev *dev)
84 {
85 	if (!isa_dma_bridge_buggy) {
86 		isa_dma_bridge_buggy = 1;
87 		dev_info(&dev->dev, "Activating ISA DMA hang workarounds\n");
88 	}
89 }
90 	/*
91 	 * Its not totally clear which chipsets are the problematic ones
92 	 * We know 82C586 and 82C596 variants are affected.
93 	 */
94 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C586_0,	quirk_isa_dma_hangs);
95 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C596,	quirk_isa_dma_hangs);
96 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82371SB_0,  quirk_isa_dma_hangs);
97 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL,	PCI_DEVICE_ID_AL_M1533,		quirk_isa_dma_hangs);
98 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_1,	quirk_isa_dma_hangs);
99 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_2,	quirk_isa_dma_hangs);
100 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_3,	quirk_isa_dma_hangs);
101 
102 /*
103  * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
104  * for some HT machines to use C4 w/o hanging.
105  */
106 static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
107 {
108 	u32 pmbase;
109 	u16 pm1a;
110 
111 	pci_read_config_dword(dev, 0x40, &pmbase);
112 	pmbase = pmbase & 0xff80;
113 	pm1a = inw(pmbase);
114 
115 	if (pm1a & 0x10) {
116 		dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
117 		outw(0x10, pmbase);
118 	}
119 }
120 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
121 
122 /*
123  *	Chipsets where PCI->PCI transfers vanish or hang
124  */
125 static void quirk_nopcipci(struct pci_dev *dev)
126 {
127 	if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
128 		dev_info(&dev->dev, "Disabling direct PCI/PCI transfers\n");
129 		pci_pci_problems |= PCIPCI_FAIL;
130 	}
131 }
132 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_5597,		quirk_nopcipci);
133 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_496,		quirk_nopcipci);
134 
135 static void quirk_nopciamd(struct pci_dev *dev)
136 {
137 	u8 rev;
138 	pci_read_config_byte(dev, 0x08, &rev);
139 	if (rev == 0x13) {
140 		/* Erratum 24 */
141 		dev_info(&dev->dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n");
142 		pci_pci_problems |= PCIAGP_FAIL;
143 	}
144 }
145 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8151_0,	quirk_nopciamd);
146 
147 /*
148  *	Triton requires workarounds to be used by the drivers
149  */
150 static void quirk_triton(struct pci_dev *dev)
151 {
152 	if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
153 		dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
154 		pci_pci_problems |= PCIPCI_TRITON;
155 	}
156 }
157 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82437,	quirk_triton);
158 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82437VX,	quirk_triton);
159 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82439,	quirk_triton);
160 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82439TX,	quirk_triton);
161 
162 /*
163  *	VIA Apollo KT133 needs PCI latency patch
164  *	Made according to a windows driver based patch by George E. Breese
165  *	see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
166  *	and http://www.georgebreese.com/net/software/#PCI
167  *	Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
168  *	the info on which Mr Breese based his work.
169  *
170  *	Updated based on further information from the site and also on
171  *	information provided by VIA
172  */
173 static void quirk_vialatency(struct pci_dev *dev)
174 {
175 	struct pci_dev *p;
176 	u8 busarb;
177 	/* Ok we have a potential problem chipset here. Now see if we have
178 	   a buggy southbridge */
179 
180 	p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
181 	if (p != NULL) {
182 		/* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */
183 		/* Check for buggy part revisions */
184 		if (p->revision < 0x40 || p->revision > 0x42)
185 			goto exit;
186 	} else {
187 		p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
188 		if (p == NULL)	/* No problem parts */
189 			goto exit;
190 		/* Check for buggy part revisions */
191 		if (p->revision < 0x10 || p->revision > 0x12)
192 			goto exit;
193 	}
194 
195 	/*
196 	 *	Ok we have the problem. Now set the PCI master grant to
197 	 *	occur every master grant. The apparent bug is that under high
198 	 *	PCI load (quite common in Linux of course) you can get data
199 	 *	loss when the CPU is held off the bus for 3 bus master requests
200 	 *	This happens to include the IDE controllers....
201 	 *
202 	 *	VIA only apply this fix when an SB Live! is present but under
203 	 *	both Linux and Windows this isn't enough, and we have seen
204 	 *	corruption without SB Live! but with things like 3 UDMA IDE
205 	 *	controllers. So we ignore that bit of the VIA recommendation..
206 	 */
207 
208 	pci_read_config_byte(dev, 0x76, &busarb);
209 	/* Set bit 4 and bi 5 of byte 76 to 0x01
210 	   "Master priority rotation on every PCI master grant */
211 	busarb &= ~(1<<5);
212 	busarb |= (1<<4);
213 	pci_write_config_byte(dev, 0x76, busarb);
214 	dev_info(&dev->dev, "Applying VIA southbridge workaround\n");
215 exit:
216 	pci_dev_put(p);
217 }
218 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8363_0,	quirk_vialatency);
219 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8371_1,	quirk_vialatency);
220 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8361,		quirk_vialatency);
221 /* Must restore this on a resume from RAM */
222 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8363_0,	quirk_vialatency);
223 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8371_1,	quirk_vialatency);
224 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8361,		quirk_vialatency);
225 
226 /*
227  *	VIA Apollo VP3 needs ETBF on BT848/878
228  */
229 static void quirk_viaetbf(struct pci_dev *dev)
230 {
231 	if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
232 		dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
233 		pci_pci_problems |= PCIPCI_VIAETBF;
234 	}
235 }
236 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C597_0,	quirk_viaetbf);
237 
238 static void quirk_vsfx(struct pci_dev *dev)
239 {
240 	if ((pci_pci_problems&PCIPCI_VSFX) == 0) {
241 		dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
242 		pci_pci_problems |= PCIPCI_VSFX;
243 	}
244 }
245 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C576,	quirk_vsfx);
246 
247 /*
248  *	Ali Magik requires workarounds to be used by the drivers
249  *	that DMA to AGP space. Latency must be set to 0xA and triton
250  *	workaround applied too
251  *	[Info kindly provided by ALi]
252  */
253 static void quirk_alimagik(struct pci_dev *dev)
254 {
255 	if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) {
256 		dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
257 		pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
258 	}
259 }
260 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL,	PCI_DEVICE_ID_AL_M1647,		quirk_alimagik);
261 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL,	PCI_DEVICE_ID_AL_M1651,		quirk_alimagik);
262 
263 /*
264  *	Natoma has some interesting boundary conditions with Zoran stuff
265  *	at least
266  */
267 static void quirk_natoma(struct pci_dev *dev)
268 {
269 	if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
270 		dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
271 		pci_pci_problems |= PCIPCI_NATOMA;
272 	}
273 }
274 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82441,	quirk_natoma);
275 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82443LX_0,	quirk_natoma);
276 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82443LX_1,	quirk_natoma);
277 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82443BX_0,	quirk_natoma);
278 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82443BX_1,	quirk_natoma);
279 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82443BX_2,	quirk_natoma);
280 
281 /*
282  *  This chip can cause PCI parity errors if config register 0xA0 is read
283  *  while DMAs are occurring.
284  */
285 static void quirk_citrine(struct pci_dev *dev)
286 {
287 	dev->cfg_size = 0xA0;
288 }
289 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM,	PCI_DEVICE_ID_IBM_CITRINE,	quirk_citrine);
290 
291 /*  On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
292 static void quirk_extend_bar_to_page(struct pci_dev *dev)
293 {
294 	int i;
295 
296 	for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
297 		struct resource *r = &dev->resource[i];
298 
299 		if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
300 			r->end = PAGE_SIZE - 1;
301 			r->start = 0;
302 			r->flags |= IORESOURCE_UNSET;
303 			dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n",
304 				 i, r);
305 		}
306 	}
307 }
308 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
309 
310 /*
311  *  S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
312  *  If it's needed, re-allocate the region.
313  */
314 static void quirk_s3_64M(struct pci_dev *dev)
315 {
316 	struct resource *r = &dev->resource[0];
317 
318 	if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
319 		r->flags |= IORESOURCE_UNSET;
320 		r->start = 0;
321 		r->end = 0x3ffffff;
322 	}
323 }
324 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,	PCI_DEVICE_ID_S3_868,		quirk_s3_64M);
325 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,	PCI_DEVICE_ID_S3_968,		quirk_s3_64M);
326 
327 /*
328  * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
329  * ver. 1.33  20070103) don't set the correct ISA PCI region header info.
330  * BAR0 should be 8 bytes; instead, it may be set to something like 8k
331  * (which conflicts w/ BAR1's memory range).
332  */
333 static void quirk_cs5536_vsa(struct pci_dev *dev)
334 {
335 	if (pci_resource_len(dev, 0) != 8) {
336 		struct resource *res = &dev->resource[0];
337 		res->end = res->start + 8 - 1;
338 		dev_info(&dev->dev, "CS5536 ISA bridge bug detected (incorrect header); workaround applied\n");
339 	}
340 }
341 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
342 
343 static void quirk_io_region(struct pci_dev *dev, int port,
344 				unsigned size, int nr, const char *name)
345 {
346 	u16 region;
347 	struct pci_bus_region bus_region;
348 	struct resource *res = dev->resource + nr;
349 
350 	pci_read_config_word(dev, port, &region);
351 	region &= ~(size - 1);
352 
353 	if (!region)
354 		return;
355 
356 	res->name = pci_name(dev);
357 	res->flags = IORESOURCE_IO;
358 
359 	/* Convert from PCI bus to resource space */
360 	bus_region.start = region;
361 	bus_region.end = region + size - 1;
362 	pcibios_bus_to_resource(dev->bus, res, &bus_region);
363 
364 	if (!pci_claim_resource(dev, nr))
365 		dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name);
366 }
367 
368 /*
369  *	ATI Northbridge setups MCE the processor if you even
370  *	read somewhere between 0x3b0->0x3bb or read 0x3d3
371  */
372 static void quirk_ati_exploding_mce(struct pci_dev *dev)
373 {
374 	dev_info(&dev->dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n");
375 	/* Mae rhaid i ni beidio ag edrych ar y lleoliadiau I/O hyn */
376 	request_region(0x3b0, 0x0C, "RadeonIGP");
377 	request_region(0x3d3, 0x01, "RadeonIGP");
378 }
379 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI,	PCI_DEVICE_ID_ATI_RS100,   quirk_ati_exploding_mce);
380 
381 /*
382  * Let's make the southbridge information explicit instead
383  * of having to worry about people probing the ACPI areas,
384  * for example.. (Yes, it happens, and if you read the wrong
385  * ACPI register it will put the machine to sleep with no
386  * way of waking it up again. Bummer).
387  *
388  * ALI M7101: Two IO regions pointed to by words at
389  *	0xE0 (64 bytes of ACPI registers)
390  *	0xE2 (32 bytes of SMB registers)
391  */
392 static void quirk_ali7101_acpi(struct pci_dev *dev)
393 {
394 	quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
395 	quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
396 }
397 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL,	PCI_DEVICE_ID_AL_M7101,		quirk_ali7101_acpi);
398 
399 static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
400 {
401 	u32 devres;
402 	u32 mask, size, base;
403 
404 	pci_read_config_dword(dev, port, &devres);
405 	if ((devres & enable) != enable)
406 		return;
407 	mask = (devres >> 16) & 15;
408 	base = devres & 0xffff;
409 	size = 16;
410 	for (;;) {
411 		unsigned bit = size >> 1;
412 		if ((bit & mask) == bit)
413 			break;
414 		size = bit;
415 	}
416 	/*
417 	 * For now we only print it out. Eventually we'll want to
418 	 * reserve it (at least if it's in the 0x1000+ range), but
419 	 * let's get enough confirmation reports first.
420 	 */
421 	base &= -size;
422 	dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base,
423 		 base + size - 1);
424 }
425 
426 static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
427 {
428 	u32 devres;
429 	u32 mask, size, base;
430 
431 	pci_read_config_dword(dev, port, &devres);
432 	if ((devres & enable) != enable)
433 		return;
434 	base = devres & 0xffff0000;
435 	mask = (devres & 0x3f) << 16;
436 	size = 128 << 16;
437 	for (;;) {
438 		unsigned bit = size >> 1;
439 		if ((bit & mask) == bit)
440 			break;
441 		size = bit;
442 	}
443 	/*
444 	 * For now we only print it out. Eventually we'll want to
445 	 * reserve it, but let's get enough confirmation reports first.
446 	 */
447 	base &= -size;
448 	dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base,
449 		 base + size - 1);
450 }
451 
452 /*
453  * PIIX4 ACPI: Two IO regions pointed to by longwords at
454  *	0x40 (64 bytes of ACPI registers)
455  *	0x90 (16 bytes of SMB registers)
456  * and a few strange programmable PIIX4 device resources.
457  */
458 static void quirk_piix4_acpi(struct pci_dev *dev)
459 {
460 	u32 res_a;
461 
462 	quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
463 	quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
464 
465 	/* Device resource A has enables for some of the other ones */
466 	pci_read_config_dword(dev, 0x5c, &res_a);
467 
468 	piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
469 	piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
470 
471 	/* Device resource D is just bitfields for static resources */
472 
473 	/* Device 12 enabled? */
474 	if (res_a & (1 << 29)) {
475 		piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
476 		piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
477 	}
478 	/* Device 13 enabled? */
479 	if (res_a & (1 << 30)) {
480 		piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
481 		piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
482 	}
483 	piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
484 	piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
485 }
486 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82371AB_3,	quirk_piix4_acpi);
487 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82443MX_3,	quirk_piix4_acpi);
488 
489 #define ICH_PMBASE	0x40
490 #define ICH_ACPI_CNTL	0x44
491 #define  ICH4_ACPI_EN	0x10
492 #define  ICH6_ACPI_EN	0x80
493 #define ICH4_GPIOBASE	0x58
494 #define ICH4_GPIO_CNTL	0x5c
495 #define  ICH4_GPIO_EN	0x10
496 #define ICH6_GPIOBASE	0x48
497 #define ICH6_GPIO_CNTL	0x4c
498 #define  ICH6_GPIO_EN	0x10
499 
500 /*
501  * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at
502  *	0x40 (128 bytes of ACPI, GPIO & TCO registers)
503  *	0x58 (64 bytes of GPIO I/O space)
504  */
505 static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
506 {
507 	u8 enable;
508 
509 	/*
510 	 * The check for PCIBIOS_MIN_IO is to ensure we won't create a conflict
511 	 * with low legacy (and fixed) ports. We don't know the decoding
512 	 * priority and can't tell whether the legacy device or the one created
513 	 * here is really at that address.  This happens on boards with broken
514 	 * BIOSes.
515 	*/
516 
517 	pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
518 	if (enable & ICH4_ACPI_EN)
519 		quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
520 				 "ICH4 ACPI/GPIO/TCO");
521 
522 	pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
523 	if (enable & ICH4_GPIO_EN)
524 		quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
525 				"ICH4 GPIO");
526 }
527 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801AA_0,		quirk_ich4_lpc_acpi);
528 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801AB_0,		quirk_ich4_lpc_acpi);
529 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801BA_0,		quirk_ich4_lpc_acpi);
530 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801BA_10,	quirk_ich4_lpc_acpi);
531 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801CA_0,		quirk_ich4_lpc_acpi);
532 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801CA_12,	quirk_ich4_lpc_acpi);
533 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801DB_0,		quirk_ich4_lpc_acpi);
534 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801DB_12,	quirk_ich4_lpc_acpi);
535 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801EB_0,		quirk_ich4_lpc_acpi);
536 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_ESB_1,		quirk_ich4_lpc_acpi);
537 
538 static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
539 {
540 	u8 enable;
541 
542 	pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
543 	if (enable & ICH6_ACPI_EN)
544 		quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
545 				 "ICH6 ACPI/GPIO/TCO");
546 
547 	pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
548 	if (enable & ICH6_GPIO_EN)
549 		quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
550 				"ICH6 GPIO");
551 }
552 
553 static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize)
554 {
555 	u32 val;
556 	u32 size, base;
557 
558 	pci_read_config_dword(dev, reg, &val);
559 
560 	/* Enabled? */
561 	if (!(val & 1))
562 		return;
563 	base = val & 0xfffc;
564 	if (dynsize) {
565 		/*
566 		 * This is not correct. It is 16, 32 or 64 bytes depending on
567 		 * register D31:F0:ADh bits 5:4.
568 		 *
569 		 * But this gets us at least _part_ of it.
570 		 */
571 		size = 16;
572 	} else {
573 		size = 128;
574 	}
575 	base &= ~(size-1);
576 
577 	/* Just print it out for now. We should reserve it after more debugging */
578 	dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
579 }
580 
581 static void quirk_ich6_lpc(struct pci_dev *dev)
582 {
583 	/* Shared ACPI/GPIO decode with all ICH6+ */
584 	ich6_lpc_acpi_gpio(dev);
585 
586 	/* ICH6-specific generic IO decode */
587 	ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0);
588 	ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1);
589 }
590 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
591 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
592 
593 static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name)
594 {
595 	u32 val;
596 	u32 mask, base;
597 
598 	pci_read_config_dword(dev, reg, &val);
599 
600 	/* Enabled? */
601 	if (!(val & 1))
602 		return;
603 
604 	/*
605 	 * IO base in bits 15:2, mask in bits 23:18, both
606 	 * are dword-based
607 	 */
608 	base = val & 0xfffc;
609 	mask = (val >> 16) & 0xfc;
610 	mask |= 3;
611 
612 	/* Just print it out for now. We should reserve it after more debugging */
613 	dev_info(&dev->dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
614 }
615 
616 /* ICH7-10 has the same common LPC generic IO decode registers */
617 static void quirk_ich7_lpc(struct pci_dev *dev)
618 {
619 	/* We share the common ACPI/GPIO decode with ICH6 */
620 	ich6_lpc_acpi_gpio(dev);
621 
622 	/* And have 4 ICH7+ generic decodes */
623 	ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1");
624 	ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2");
625 	ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3");
626 	ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4");
627 }
628 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc);
629 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc);
630 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc);
631 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc);
632 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc);
633 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc);
634 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc);
635 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc);
636 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc);
637 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc);
638 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc);
639 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc);
640 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc);
641 
642 /*
643  * VIA ACPI: One IO region pointed to by longword at
644  *	0x48 or 0x20 (256 bytes of ACPI registers)
645  */
646 static void quirk_vt82c586_acpi(struct pci_dev *dev)
647 {
648 	if (dev->revision & 0x10)
649 		quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES,
650 				"vt82c586 ACPI");
651 }
652 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C586_3,	quirk_vt82c586_acpi);
653 
654 /*
655  * VIA VT82C686 ACPI: Three IO region pointed to by (long)words at
656  *	0x48 (256 bytes of ACPI registers)
657  *	0x70 (128 bytes of hardware monitoring register)
658  *	0x90 (16 bytes of SMB registers)
659  */
660 static void quirk_vt82c686_acpi(struct pci_dev *dev)
661 {
662 	quirk_vt82c586_acpi(dev);
663 
664 	quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1,
665 				 "vt82c686 HW-mon");
666 
667 	quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB");
668 }
669 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686_4,	quirk_vt82c686_acpi);
670 
671 /*
672  * VIA VT8235 ISA Bridge: Two IO regions pointed to by words at
673  *	0x88 (128 bytes of power management registers)
674  *	0xd0 (16 bytes of SMB registers)
675  */
676 static void quirk_vt8235_acpi(struct pci_dev *dev)
677 {
678 	quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
679 	quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB");
680 }
681 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8235,	quirk_vt8235_acpi);
682 
683 /*
684  * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back:
685  *	Disable fast back-to-back on the secondary bus segment
686  */
687 static void quirk_xio2000a(struct pci_dev *dev)
688 {
689 	struct pci_dev *pdev;
690 	u16 command;
691 
692 	dev_warn(&dev->dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n");
693 	list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
694 		pci_read_config_word(pdev, PCI_COMMAND, &command);
695 		if (command & PCI_COMMAND_FAST_BACK)
696 			pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK);
697 	}
698 }
699 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
700 			quirk_xio2000a);
701 
702 #ifdef CONFIG_X86_IO_APIC
703 
704 #include <asm/io_apic.h>
705 
706 /*
707  * VIA 686A/B: If an IO-APIC is active, we need to route all on-chip
708  * devices to the external APIC.
709  *
710  * TODO: When we have device-specific interrupt routers,
711  * this code will go away from quirks.
712  */
713 static void quirk_via_ioapic(struct pci_dev *dev)
714 {
715 	u8 tmp;
716 
717 	if (nr_ioapics < 1)
718 		tmp = 0;    /* nothing routed to external APIC */
719 	else
720 		tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
721 
722 	dev_info(&dev->dev, "%sbling VIA external APIC routing\n",
723 	       tmp == 0 ? "Disa" : "Ena");
724 
725 	/* Offset 0x58: External APIC IRQ output control */
726 	pci_write_config_byte(dev, 0x58, tmp);
727 }
728 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686,	quirk_via_ioapic);
729 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686,	quirk_via_ioapic);
730 
731 /*
732  * VIA 8237: Some BIOSes don't set the 'Bypass APIC De-Assert Message' Bit.
733  * This leads to doubled level interrupt rates.
734  * Set this bit to get rid of cycle wastage.
735  * Otherwise uncritical.
736  */
737 static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
738 {
739 	u8 misc_control2;
740 #define BYPASS_APIC_DEASSERT 8
741 
742 	pci_read_config_byte(dev, 0x5B, &misc_control2);
743 	if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
744 		dev_info(&dev->dev, "Bypassing VIA 8237 APIC De-Assert Message\n");
745 		pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
746 	}
747 }
748 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237,		quirk_via_vt8237_bypass_apic_deassert);
749 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237,		quirk_via_vt8237_bypass_apic_deassert);
750 
751 /*
752  * The AMD io apic can hang the box when an apic irq is masked.
753  * We check all revs >= B0 (yet not in the pre production!) as the bug
754  * is currently marked NoFix
755  *
756  * We have multiple reports of hangs with this chipset that went away with
757  * noapic specified. For the moment we assume it's the erratum. We may be wrong
758  * of course. However the advice is demonstrably good even if so..
759  */
760 static void quirk_amd_ioapic(struct pci_dev *dev)
761 {
762 	if (dev->revision >= 0x02) {
763 		dev_warn(&dev->dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
764 		dev_warn(&dev->dev, "        : booting with the \"noapic\" option\n");
765 	}
766 }
767 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_VIPER_7410,	quirk_amd_ioapic);
768 
769 static void quirk_ioapic_rmw(struct pci_dev *dev)
770 {
771 	if (dev->devfn == 0 && dev->bus->number == 0)
772 		sis_apic_bug = 1;
773 }
774 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI,	PCI_ANY_ID,			quirk_ioapic_rmw);
775 #endif /* CONFIG_X86_IO_APIC */
776 
777 /*
778  * Some settings of MMRBC can lead to data corruption so block changes.
779  * See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide
780  */
781 static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
782 {
783 	if (dev->subordinate && dev->revision <= 0x12) {
784 		dev_info(&dev->dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n",
785 			 dev->revision);
786 		dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
787 	}
788 }
789 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
790 
791 /*
792  * FIXME: it is questionable that quirk_via_acpi
793  * is needed.  It shows up as an ISA bridge, and does not
794  * support the PCI_INTERRUPT_LINE register at all.  Therefore
795  * it seems like setting the pci_dev's 'irq' to the
796  * value of the ACPI SCI interrupt is only done for convenience.
797  *	-jgarzik
798  */
799 static void quirk_via_acpi(struct pci_dev *d)
800 {
801 	/*
802 	 * VIA ACPI device: SCI IRQ line in PCI config byte 0x42
803 	 */
804 	u8 irq;
805 	pci_read_config_byte(d, 0x42, &irq);
806 	irq &= 0xf;
807 	if (irq && (irq != 2))
808 		d->irq = irq;
809 }
810 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C586_3,	quirk_via_acpi);
811 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686_4,	quirk_via_acpi);
812 
813 
814 /*
815  *	VIA bridges which have VLink
816  */
817 
818 static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
819 
820 static void quirk_via_bridge(struct pci_dev *dev)
821 {
822 	/* See what bridge we have and find the device ranges */
823 	switch (dev->device) {
824 	case PCI_DEVICE_ID_VIA_82C686:
825 		/* The VT82C686 is special, it attaches to PCI and can have
826 		   any device number. All its subdevices are functions of
827 		   that single device. */
828 		via_vlink_dev_lo = PCI_SLOT(dev->devfn);
829 		via_vlink_dev_hi = PCI_SLOT(dev->devfn);
830 		break;
831 	case PCI_DEVICE_ID_VIA_8237:
832 	case PCI_DEVICE_ID_VIA_8237A:
833 		via_vlink_dev_lo = 15;
834 		break;
835 	case PCI_DEVICE_ID_VIA_8235:
836 		via_vlink_dev_lo = 16;
837 		break;
838 	case PCI_DEVICE_ID_VIA_8231:
839 	case PCI_DEVICE_ID_VIA_8233_0:
840 	case PCI_DEVICE_ID_VIA_8233A:
841 	case PCI_DEVICE_ID_VIA_8233C_0:
842 		via_vlink_dev_lo = 17;
843 		break;
844 	}
845 }
846 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686,	quirk_via_bridge);
847 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8231,		quirk_via_bridge);
848 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8233_0,	quirk_via_bridge);
849 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8233A,	quirk_via_bridge);
850 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8233C_0,	quirk_via_bridge);
851 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8235,		quirk_via_bridge);
852 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237,		quirk_via_bridge);
853 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237A,	quirk_via_bridge);
854 
855 /**
856  *	quirk_via_vlink		-	VIA VLink IRQ number update
857  *	@dev: PCI device
858  *
859  *	If the device we are dealing with is on a PIC IRQ we need to
860  *	ensure that the IRQ line register which usually is not relevant
861  *	for PCI cards, is actually written so that interrupts get sent
862  *	to the right place.
863  *	We only do this on systems where a VIA south bridge was detected,
864  *	and only for VIA devices on the motherboard (see quirk_via_bridge
865  *	above).
866  */
867 
868 static void quirk_via_vlink(struct pci_dev *dev)
869 {
870 	u8 irq, new_irq;
871 
872 	/* Check if we have VLink at all */
873 	if (via_vlink_dev_lo == -1)
874 		return;
875 
876 	new_irq = dev->irq;
877 
878 	/* Don't quirk interrupts outside the legacy IRQ range */
879 	if (!new_irq || new_irq > 15)
880 		return;
881 
882 	/* Internal device ? */
883 	if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi ||
884 	    PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
885 		return;
886 
887 	/* This is an internal VLink device on a PIC interrupt. The BIOS
888 	   ought to have set this but may not have, so we redo it */
889 
890 	pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
891 	if (new_irq != irq) {
892 		dev_info(&dev->dev, "VIA VLink IRQ fixup, from %d to %d\n",
893 			irq, new_irq);
894 		udelay(15);	/* unknown if delay really needed */
895 		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
896 	}
897 }
898 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
899 
900 /*
901  * VIA VT82C598 has its device ID settable and many BIOSes
902  * set it to the ID of VT82C597 for backward compatibility.
903  * We need to switch it off to be able to recognize the real
904  * type of the chip.
905  */
906 static void quirk_vt82c598_id(struct pci_dev *dev)
907 {
908 	pci_write_config_byte(dev, 0xfc, 0);
909 	pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
910 }
911 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C597_0,	quirk_vt82c598_id);
912 
913 /*
914  * CardBus controllers have a legacy base address that enables them
915  * to respond as i82365 pcmcia controllers.  We don't want them to
916  * do this even if the Linux CardBus driver is not loaded, because
917  * the Linux i82365 driver does not (and should not) handle CardBus.
918  */
919 static void quirk_cardbus_legacy(struct pci_dev *dev)
920 {
921 	pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
922 }
923 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
924 			PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
925 DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
926 			PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
927 
928 /*
929  * Following the PCI ordering rules is optional on the AMD762. I'm not
930  * sure what the designers were smoking but let's not inhale...
931  *
932  * To be fair to AMD, it follows the spec by default, its BIOS people
933  * who turn it off!
934  */
935 static void quirk_amd_ordering(struct pci_dev *dev)
936 {
937 	u32 pcic;
938 	pci_read_config_dword(dev, 0x4C, &pcic);
939 	if ((pcic & 6) != 6) {
940 		pcic |= 6;
941 		dev_warn(&dev->dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
942 		pci_write_config_dword(dev, 0x4C, pcic);
943 		pci_read_config_dword(dev, 0x84, &pcic);
944 		pcic |= (1 << 23);	/* Required in this mode */
945 		pci_write_config_dword(dev, 0x84, pcic);
946 	}
947 }
948 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
949 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
950 
951 /*
952  *	DreamWorks provided workaround for Dunord I-3000 problem
953  *
954  *	This card decodes and responds to addresses not apparently
955  *	assigned to it. We force a larger allocation to ensure that
956  *	nothing gets put too close to it.
957  */
958 static void quirk_dunord(struct pci_dev *dev)
959 {
960 	struct resource *r = &dev->resource[1];
961 
962 	r->flags |= IORESOURCE_UNSET;
963 	r->start = 0;
964 	r->end = 0xffffff;
965 }
966 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD,	PCI_DEVICE_ID_DUNORD_I3000,	quirk_dunord);
967 
968 /*
969  * i82380FB mobile docking controller: its PCI-to-PCI bridge
970  * is subtractive decoding (transparent), and does indicate this
971  * in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80
972  * instead of 0x01.
973  */
974 static void quirk_transparent_bridge(struct pci_dev *dev)
975 {
976 	dev->transparent = 1;
977 }
978 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82380FB,	quirk_transparent_bridge);
979 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA,	0x605,	quirk_transparent_bridge);
980 
981 /*
982  * Common misconfiguration of the MediaGX/Geode PCI master that will
983  * reduce PCI bandwidth from 70MB/s to 25MB/s.  See the GXM/GXLV/GX1
984  * datasheets found at http://www.national.com/analog for info on what
985  * these bits do.  <christer@weinigel.se>
986  */
987 static void quirk_mediagx_master(struct pci_dev *dev)
988 {
989 	u8 reg;
990 
991 	pci_read_config_byte(dev, 0x41, &reg);
992 	if (reg & 2) {
993 		reg &= ~2;
994 		dev_info(&dev->dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n",
995 			 reg);
996 		pci_write_config_byte(dev, 0x41, reg);
997 	}
998 }
999 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX,	PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1000 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX,	PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1001 
1002 /*
1003  *	Ensure C0 rev restreaming is off. This is normally done by
1004  *	the BIOS but in the odd case it is not the results are corruption
1005  *	hence the presence of a Linux check
1006  */
1007 static void quirk_disable_pxb(struct pci_dev *pdev)
1008 {
1009 	u16 config;
1010 
1011 	if (pdev->revision != 0x04)		/* Only C0 requires this */
1012 		return;
1013 	pci_read_config_word(pdev, 0x40, &config);
1014 	if (config & (1<<6)) {
1015 		config &= ~(1<<6);
1016 		pci_write_config_word(pdev, 0x40, config);
1017 		dev_info(&pdev->dev, "C0 revision 450NX. Disabling PCI restreaming\n");
1018 	}
1019 }
1020 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82454NX,	quirk_disable_pxb);
1021 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82454NX,	quirk_disable_pxb);
1022 
1023 static void quirk_amd_ide_mode(struct pci_dev *pdev)
1024 {
1025 	/* set SBX00/Hudson-2 SATA in IDE mode to AHCI mode */
1026 	u8 tmp;
1027 
1028 	pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
1029 	if (tmp == 0x01) {
1030 		pci_read_config_byte(pdev, 0x40, &tmp);
1031 		pci_write_config_byte(pdev, 0x40, tmp|1);
1032 		pci_write_config_byte(pdev, 0x9, 1);
1033 		pci_write_config_byte(pdev, 0xa, 6);
1034 		pci_write_config_byte(pdev, 0x40, tmp);
1035 
1036 		pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
1037 		dev_info(&pdev->dev, "set SATA to AHCI mode\n");
1038 	}
1039 }
1040 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1041 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1042 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1043 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1044 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1045 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1046 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1047 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1048 
1049 /*
1050  *	Serverworks CSB5 IDE does not fully support native mode
1051  */
1052 static void quirk_svwks_csb5ide(struct pci_dev *pdev)
1053 {
1054 	u8 prog;
1055 	pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1056 	if (prog & 5) {
1057 		prog &= ~5;
1058 		pdev->class &= ~5;
1059 		pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
1060 		/* PCI layer will sort out resources */
1061 	}
1062 }
1063 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
1064 
1065 /*
1066  *	Intel 82801CAM ICH3-M datasheet says IDE modes must be the same
1067  */
1068 static void quirk_ide_samemode(struct pci_dev *pdev)
1069 {
1070 	u8 prog;
1071 
1072 	pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1073 
1074 	if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) {
1075 		dev_info(&pdev->dev, "IDE mode mismatch; forcing legacy mode\n");
1076 		prog &= ~5;
1077 		pdev->class &= ~5;
1078 		pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
1079 	}
1080 }
1081 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
1082 
1083 /*
1084  * Some ATA devices break if put into D3
1085  */
1086 
1087 static void quirk_no_ata_d3(struct pci_dev *pdev)
1088 {
1089 	pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
1090 }
1091 /* Quirk the legacy ATA devices only. The AHCI ones are ok */
1092 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID,
1093 				PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1094 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
1095 				PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1096 /* ALi loses some register settings that we cannot then restore */
1097 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
1098 				PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1099 /* VIA comes back fine but we need to keep it alive or ACPI GTM failures
1100    occur when mode detecting */
1101 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
1102 				PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1103 
1104 /* This was originally an Alpha specific thing, but it really fits here.
1105  * The i82375 PCI/EISA bridge appears as non-classified. Fix that.
1106  */
1107 static void quirk_eisa_bridge(struct pci_dev *dev)
1108 {
1109 	dev->class = PCI_CLASS_BRIDGE_EISA << 8;
1110 }
1111 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82375,	quirk_eisa_bridge);
1112 
1113 
1114 /*
1115  * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
1116  * is not activated. The myth is that Asus said that they do not want the
1117  * users to be irritated by just another PCI Device in the Win98 device
1118  * manager. (see the file prog/hotplug/README.p4b in the lm_sensors
1119  * package 2.7.0 for details)
1120  *
1121  * The SMBus PCI Device can be activated by setting a bit in the ICH LPC
1122  * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it
1123  * becomes necessary to do this tweak in two steps -- the chosen trigger
1124  * is either the Host bridge (preferred) or on-board VGA controller.
1125  *
1126  * Note that we used to unhide the SMBus that way on Toshiba laptops
1127  * (Satellite A40 and Tecra M2) but then found that the thermal management
1128  * was done by SMM code, which could cause unsynchronized concurrent
1129  * accesses to the SMBus registers, with potentially bad effects. Thus you
1130  * should be very careful when adding new entries: if SMM is accessing the
1131  * Intel SMBus, this is a very good reason to leave it hidden.
1132  *
1133  * Likewise, many recent laptops use ACPI for thermal management. If the
1134  * ACPI DSDT code accesses the SMBus, then Linux should not access it
1135  * natively, and keeping the SMBus hidden is the right thing to do. If you
1136  * are about to add an entry in the table below, please first disassemble
1137  * the DSDT and double-check that there is no code accessing the SMBus.
1138  */
1139 static int asus_hides_smbus;
1140 
1141 static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
1142 {
1143 	if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1144 		if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
1145 			switch (dev->subsystem_device) {
1146 			case 0x8025: /* P4B-LX */
1147 			case 0x8070: /* P4B */
1148 			case 0x8088: /* P4B533 */
1149 			case 0x1626: /* L3C notebook */
1150 				asus_hides_smbus = 1;
1151 			}
1152 		else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
1153 			switch (dev->subsystem_device) {
1154 			case 0x80b1: /* P4GE-V */
1155 			case 0x80b2: /* P4PE */
1156 			case 0x8093: /* P4B533-V */
1157 				asus_hides_smbus = 1;
1158 			}
1159 		else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
1160 			switch (dev->subsystem_device) {
1161 			case 0x8030: /* P4T533 */
1162 				asus_hides_smbus = 1;
1163 			}
1164 		else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
1165 			switch (dev->subsystem_device) {
1166 			case 0x8070: /* P4G8X Deluxe */
1167 				asus_hides_smbus = 1;
1168 			}
1169 		else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
1170 			switch (dev->subsystem_device) {
1171 			case 0x80c9: /* PU-DLS */
1172 				asus_hides_smbus = 1;
1173 			}
1174 		else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
1175 			switch (dev->subsystem_device) {
1176 			case 0x1751: /* M2N notebook */
1177 			case 0x1821: /* M5N notebook */
1178 			case 0x1897: /* A6L notebook */
1179 				asus_hides_smbus = 1;
1180 			}
1181 		else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1182 			switch (dev->subsystem_device) {
1183 			case 0x184b: /* W1N notebook */
1184 			case 0x186a: /* M6Ne notebook */
1185 				asus_hides_smbus = 1;
1186 			}
1187 		else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1188 			switch (dev->subsystem_device) {
1189 			case 0x80f2: /* P4P800-X */
1190 				asus_hides_smbus = 1;
1191 			}
1192 		else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
1193 			switch (dev->subsystem_device) {
1194 			case 0x1882: /* M6V notebook */
1195 			case 0x1977: /* A6VA notebook */
1196 				asus_hides_smbus = 1;
1197 			}
1198 	} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
1199 		if (dev->device ==  PCI_DEVICE_ID_INTEL_82855PM_HB)
1200 			switch (dev->subsystem_device) {
1201 			case 0x088C: /* HP Compaq nc8000 */
1202 			case 0x0890: /* HP Compaq nc6000 */
1203 				asus_hides_smbus = 1;
1204 			}
1205 		else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1206 			switch (dev->subsystem_device) {
1207 			case 0x12bc: /* HP D330L */
1208 			case 0x12bd: /* HP D530 */
1209 			case 0x006a: /* HP Compaq nx9500 */
1210 				asus_hides_smbus = 1;
1211 			}
1212 		else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
1213 			switch (dev->subsystem_device) {
1214 			case 0x12bf: /* HP xw4100 */
1215 				asus_hides_smbus = 1;
1216 			}
1217 	} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
1218 		if (dev->device ==  PCI_DEVICE_ID_INTEL_82855PM_HB)
1219 			switch (dev->subsystem_device) {
1220 			case 0xC00C: /* Samsung P35 notebook */
1221 				asus_hides_smbus = 1;
1222 		}
1223 	} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
1224 		if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1225 			switch (dev->subsystem_device) {
1226 			case 0x0058: /* Compaq Evo N620c */
1227 				asus_hides_smbus = 1;
1228 			}
1229 		else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3)
1230 			switch (dev->subsystem_device) {
1231 			case 0xB16C: /* Compaq Deskpro EP 401963-001 (PCA# 010174) */
1232 				/* Motherboard doesn't have Host bridge
1233 				 * subvendor/subdevice IDs, therefore checking
1234 				 * its on-board VGA controller */
1235 				asus_hides_smbus = 1;
1236 			}
1237 		else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
1238 			switch (dev->subsystem_device) {
1239 			case 0x00b8: /* Compaq Evo D510 CMT */
1240 			case 0x00b9: /* Compaq Evo D510 SFF */
1241 			case 0x00ba: /* Compaq Evo D510 USDT */
1242 				/* Motherboard doesn't have Host bridge
1243 				 * subvendor/subdevice IDs and on-board VGA
1244 				 * controller is disabled if an AGP card is
1245 				 * inserted, therefore checking USB UHCI
1246 				 * Controller #1 */
1247 				asus_hides_smbus = 1;
1248 			}
1249 		else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
1250 			switch (dev->subsystem_device) {
1251 			case 0x001A: /* Compaq Deskpro EN SSF P667 815E */
1252 				/* Motherboard doesn't have host bridge
1253 				 * subvendor/subdevice IDs, therefore checking
1254 				 * its on-board VGA controller */
1255 				asus_hides_smbus = 1;
1256 			}
1257 	}
1258 }
1259 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82845_HB,	asus_hides_smbus_hostbridge);
1260 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82845G_HB,	asus_hides_smbus_hostbridge);
1261 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82850_HB,	asus_hides_smbus_hostbridge);
1262 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82865_HB,	asus_hides_smbus_hostbridge);
1263 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82875_HB,	asus_hides_smbus_hostbridge);
1264 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_7205_0,	asus_hides_smbus_hostbridge);
1265 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7501_MCH,	asus_hides_smbus_hostbridge);
1266 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82855PM_HB,	asus_hides_smbus_hostbridge);
1267 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82855GM_HB,	asus_hides_smbus_hostbridge);
1268 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
1269 
1270 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82810_IG3,	asus_hides_smbus_hostbridge);
1271 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801DB_2,	asus_hides_smbus_hostbridge);
1272 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82815_CGC,	asus_hides_smbus_hostbridge);
1273 
1274 static void asus_hides_smbus_lpc(struct pci_dev *dev)
1275 {
1276 	u16 val;
1277 
1278 	if (likely(!asus_hides_smbus))
1279 		return;
1280 
1281 	pci_read_config_word(dev, 0xF2, &val);
1282 	if (val & 0x8) {
1283 		pci_write_config_word(dev, 0xF2, val & (~0x8));
1284 		pci_read_config_word(dev, 0xF2, &val);
1285 		if (val & 0x8)
1286 			dev_info(&dev->dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n",
1287 				 val);
1288 		else
1289 			dev_info(&dev->dev, "Enabled i801 SMBus device\n");
1290 	}
1291 }
1292 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801AA_0,	asus_hides_smbus_lpc);
1293 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801DB_0,	asus_hides_smbus_lpc);
1294 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801BA_0,	asus_hides_smbus_lpc);
1295 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801CA_0,	asus_hides_smbus_lpc);
1296 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801CA_12,	asus_hides_smbus_lpc);
1297 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801DB_12,	asus_hides_smbus_lpc);
1298 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801EB_0,	asus_hides_smbus_lpc);
1299 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801AA_0,	asus_hides_smbus_lpc);
1300 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801DB_0,	asus_hides_smbus_lpc);
1301 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801BA_0,	asus_hides_smbus_lpc);
1302 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801CA_0,	asus_hides_smbus_lpc);
1303 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801CA_12,	asus_hides_smbus_lpc);
1304 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801DB_12,	asus_hides_smbus_lpc);
1305 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801EB_0,	asus_hides_smbus_lpc);
1306 
1307 /* It appears we just have one such device. If not, we have a warning */
1308 static void __iomem *asus_rcba_base;
1309 static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
1310 {
1311 	u32 rcba;
1312 
1313 	if (likely(!asus_hides_smbus))
1314 		return;
1315 	WARN_ON(asus_rcba_base);
1316 
1317 	pci_read_config_dword(dev, 0xF0, &rcba);
1318 	/* use bits 31:14, 16 kB aligned */
1319 	asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000);
1320 	if (asus_rcba_base == NULL)
1321 		return;
1322 }
1323 
1324 static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
1325 {
1326 	u32 val;
1327 
1328 	if (likely(!asus_hides_smbus || !asus_rcba_base))
1329 		return;
1330 	/* read the Function Disable register, dword mode only */
1331 	val = readl(asus_rcba_base + 0x3418);
1332 	writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); /* enable the SMBus device */
1333 }
1334 
1335 static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
1336 {
1337 	if (likely(!asus_hides_smbus || !asus_rcba_base))
1338 		return;
1339 	iounmap(asus_rcba_base);
1340 	asus_rcba_base = NULL;
1341 	dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n");
1342 }
1343 
1344 static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
1345 {
1346 	asus_hides_smbus_lpc_ich6_suspend(dev);
1347 	asus_hides_smbus_lpc_ich6_resume_early(dev);
1348 	asus_hides_smbus_lpc_ich6_resume(dev);
1349 }
1350 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1,	asus_hides_smbus_lpc_ich6);
1351 DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1,	asus_hides_smbus_lpc_ich6_suspend);
1352 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1,	asus_hides_smbus_lpc_ich6_resume);
1353 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1,	asus_hides_smbus_lpc_ich6_resume_early);
1354 
1355 /*
1356  * SiS 96x south bridge: BIOS typically hides SMBus device...
1357  */
1358 static void quirk_sis_96x_smbus(struct pci_dev *dev)
1359 {
1360 	u8 val = 0;
1361 	pci_read_config_byte(dev, 0x77, &val);
1362 	if (val & 0x10) {
1363 		dev_info(&dev->dev, "Enabling SiS 96x SMBus\n");
1364 		pci_write_config_byte(dev, 0x77, val & ~0x10);
1365 	}
1366 }
1367 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_961,		quirk_sis_96x_smbus);
1368 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_962,		quirk_sis_96x_smbus);
1369 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_963,		quirk_sis_96x_smbus);
1370 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_LPC,		quirk_sis_96x_smbus);
1371 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_961,		quirk_sis_96x_smbus);
1372 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_962,		quirk_sis_96x_smbus);
1373 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_963,		quirk_sis_96x_smbus);
1374 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_LPC,		quirk_sis_96x_smbus);
1375 
1376 /*
1377  * ... This is further complicated by the fact that some SiS96x south
1378  * bridges pretend to be 85C503/5513 instead.  In that case see if we
1379  * spotted a compatible north bridge to make sure.
1380  * (pci_find_device doesn't work yet)
1381  *
1382  * We can also enable the sis96x bit in the discovery register..
1383  */
1384 #define SIS_DETECT_REGISTER 0x40
1385 
1386 static void quirk_sis_503(struct pci_dev *dev)
1387 {
1388 	u8 reg;
1389 	u16 devid;
1390 
1391 	pci_read_config_byte(dev, SIS_DETECT_REGISTER, &reg);
1392 	pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6));
1393 	pci_read_config_word(dev, PCI_DEVICE_ID, &devid);
1394 	if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) {
1395 		pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg);
1396 		return;
1397 	}
1398 
1399 	/*
1400 	 * Ok, it now shows up as a 96x.. run the 96x quirk by
1401 	 * hand in case it has already been processed.
1402 	 * (depends on link order, which is apparently not guaranteed)
1403 	 */
1404 	dev->device = devid;
1405 	quirk_sis_96x_smbus(dev);
1406 }
1407 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_503,		quirk_sis_503);
1408 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_503,		quirk_sis_503);
1409 
1410 
1411 /*
1412  * On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller
1413  * and MC97 modem controller are disabled when a second PCI soundcard is
1414  * present. This patch, tweaking the VT8237 ISA bridge, enables them.
1415  * -- bjd
1416  */
1417 static void asus_hides_ac97_lpc(struct pci_dev *dev)
1418 {
1419 	u8 val;
1420 	int asus_hides_ac97 = 0;
1421 
1422 	if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1423 		if (dev->device == PCI_DEVICE_ID_VIA_8237)
1424 			asus_hides_ac97 = 1;
1425 	}
1426 
1427 	if (!asus_hides_ac97)
1428 		return;
1429 
1430 	pci_read_config_byte(dev, 0x50, &val);
1431 	if (val & 0xc0) {
1432 		pci_write_config_byte(dev, 0x50, val & (~0xc0));
1433 		pci_read_config_byte(dev, 0x50, &val);
1434 		if (val & 0xc0)
1435 			dev_info(&dev->dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n",
1436 				 val);
1437 		else
1438 			dev_info(&dev->dev, "Enabled onboard AC97/MC97 devices\n");
1439 	}
1440 }
1441 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1442 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1443 
1444 #if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
1445 
1446 /*
1447  *	If we are using libata we can drive this chip properly but must
1448  *	do this early on to make the additional device appear during
1449  *	the PCI scanning.
1450  */
1451 static void quirk_jmicron_ata(struct pci_dev *pdev)
1452 {
1453 	u32 conf1, conf5, class;
1454 	u8 hdr;
1455 
1456 	/* Only poke fn 0 */
1457 	if (PCI_FUNC(pdev->devfn))
1458 		return;
1459 
1460 	pci_read_config_dword(pdev, 0x40, &conf1);
1461 	pci_read_config_dword(pdev, 0x80, &conf5);
1462 
1463 	conf1 &= ~0x00CFF302; /* Clear bit 1, 8, 9, 12-19, 22, 23 */
1464 	conf5 &= ~(1 << 24);  /* Clear bit 24 */
1465 
1466 	switch (pdev->device) {
1467 	case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */
1468 	case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */
1469 	case PCI_DEVICE_ID_JMICRON_JMB364: /* SATA dual ports */
1470 		/* The controller should be in single function ahci mode */
1471 		conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */
1472 		break;
1473 
1474 	case PCI_DEVICE_ID_JMICRON_JMB365:
1475 	case PCI_DEVICE_ID_JMICRON_JMB366:
1476 		/* Redirect IDE second PATA port to the right spot */
1477 		conf5 |= (1 << 24);
1478 		/* Fall through */
1479 	case PCI_DEVICE_ID_JMICRON_JMB361:
1480 	case PCI_DEVICE_ID_JMICRON_JMB363:
1481 	case PCI_DEVICE_ID_JMICRON_JMB369:
1482 		/* Enable dual function mode, AHCI on fn 0, IDE fn1 */
1483 		/* Set the class codes correctly and then direct IDE 0 */
1484 		conf1 |= 0x00C2A1B3; /* Set 0, 1, 4, 5, 7, 8, 13, 15, 17, 22, 23 */
1485 		break;
1486 
1487 	case PCI_DEVICE_ID_JMICRON_JMB368:
1488 		/* The controller should be in single function IDE mode */
1489 		conf1 |= 0x00C00000; /* Set 22, 23 */
1490 		break;
1491 	}
1492 
1493 	pci_write_config_dword(pdev, 0x40, conf1);
1494 	pci_write_config_dword(pdev, 0x80, conf5);
1495 
1496 	/* Update pdev accordingly */
1497 	pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
1498 	pdev->hdr_type = hdr & 0x7f;
1499 	pdev->multifunction = !!(hdr & 0x80);
1500 
1501 	pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class);
1502 	pdev->class = class >> 8;
1503 }
1504 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1505 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1506 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
1507 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1508 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
1509 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1510 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1511 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1512 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
1513 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1514 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1515 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
1516 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1517 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
1518 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1519 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1520 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1521 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
1522 
1523 #endif
1524 
1525 #ifdef CONFIG_X86_IO_APIC
1526 static void quirk_alder_ioapic(struct pci_dev *pdev)
1527 {
1528 	int i;
1529 
1530 	if ((pdev->class >> 8) != 0xff00)
1531 		return;
1532 
1533 	/* the first BAR is the location of the IO APIC...we must
1534 	 * not touch this (and it's already covered by the fixmap), so
1535 	 * forcibly insert it into the resource tree */
1536 	if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
1537 		insert_resource(&iomem_resource, &pdev->resource[0]);
1538 
1539 	/* The next five BARs all seem to be rubbish, so just clean
1540 	 * them out */
1541 	for (i = 1; i < 6; i++)
1542 		memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
1543 }
1544 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_EESSC,	quirk_alder_ioapic);
1545 #endif
1546 
1547 static void quirk_pcie_mch(struct pci_dev *pdev)
1548 {
1549 	pci_msi_off(pdev);
1550 	pdev->no_msi = 1;
1551 }
1552 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7520_MCH,	quirk_pcie_mch);
1553 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7320_MCH,	quirk_pcie_mch);
1554 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7525_MCH,	quirk_pcie_mch);
1555 
1556 
1557 /*
1558  * It's possible for the MSI to get corrupted if shpc and acpi
1559  * are used together on certain PXH-based systems.
1560  */
1561 static void quirk_pcie_pxh(struct pci_dev *dev)
1562 {
1563 	pci_msi_off(dev);
1564 	dev->no_msi = 1;
1565 	dev_warn(&dev->dev, "PXH quirk detected; SHPC device MSI disabled\n");
1566 }
1567 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHD_0,	quirk_pcie_pxh);
1568 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHD_1,	quirk_pcie_pxh);
1569 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_0,	quirk_pcie_pxh);
1570 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_1,	quirk_pcie_pxh);
1571 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHV,	quirk_pcie_pxh);
1572 
1573 /*
1574  * Some Intel PCI Express chipsets have trouble with downstream
1575  * device power management.
1576  */
1577 static void quirk_intel_pcie_pm(struct pci_dev *dev)
1578 {
1579 	pci_pm_d3_delay = 120;
1580 	dev->no_d1d2 = 1;
1581 }
1582 
1583 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25e2, quirk_intel_pcie_pm);
1584 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25e3, quirk_intel_pcie_pm);
1585 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25e4, quirk_intel_pcie_pm);
1586 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25e5, quirk_intel_pcie_pm);
1587 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25e6, quirk_intel_pcie_pm);
1588 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25e7, quirk_intel_pcie_pm);
1589 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25f7, quirk_intel_pcie_pm);
1590 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25f8, quirk_intel_pcie_pm);
1591 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25f9, quirk_intel_pcie_pm);
1592 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x25fa, quirk_intel_pcie_pm);
1593 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2601, quirk_intel_pcie_pm);
1594 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2602, quirk_intel_pcie_pm);
1595 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2603, quirk_intel_pcie_pm);
1596 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2604, quirk_intel_pcie_pm);
1597 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2605, quirk_intel_pcie_pm);
1598 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2606, quirk_intel_pcie_pm);
1599 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2607, quirk_intel_pcie_pm);
1600 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2608, quirk_intel_pcie_pm);
1601 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x2609, quirk_intel_pcie_pm);
1602 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x260a, quirk_intel_pcie_pm);
1603 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	0x260b, quirk_intel_pcie_pm);
1604 
1605 #ifdef CONFIG_X86_IO_APIC
1606 /*
1607  * Boot interrupts on some chipsets cannot be turned off. For these chipsets,
1608  * remap the original interrupt in the linux kernel to the boot interrupt, so
1609  * that a PCI device's interrupt handler is installed on the boot interrupt
1610  * line instead.
1611  */
1612 static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
1613 {
1614 	if (noioapicquirk || noioapicreroute)
1615 		return;
1616 
1617 	dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
1618 	dev_info(&dev->dev, "rerouting interrupts for [%04x:%04x]\n",
1619 		 dev->vendor, dev->device);
1620 }
1621 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80333_0,	quirk_reroute_to_boot_interrupts_intel);
1622 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80333_1,	quirk_reroute_to_boot_interrupts_intel);
1623 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ESB2_0,	quirk_reroute_to_boot_interrupts_intel);
1624 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_0,	quirk_reroute_to_boot_interrupts_intel);
1625 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_1,	quirk_reroute_to_boot_interrupts_intel);
1626 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHV,	quirk_reroute_to_boot_interrupts_intel);
1627 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80332_0,	quirk_reroute_to_boot_interrupts_intel);
1628 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80332_1,	quirk_reroute_to_boot_interrupts_intel);
1629 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80333_0,	quirk_reroute_to_boot_interrupts_intel);
1630 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80333_1,	quirk_reroute_to_boot_interrupts_intel);
1631 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ESB2_0,	quirk_reroute_to_boot_interrupts_intel);
1632 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_0,	quirk_reroute_to_boot_interrupts_intel);
1633 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_1,	quirk_reroute_to_boot_interrupts_intel);
1634 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHV,	quirk_reroute_to_boot_interrupts_intel);
1635 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80332_0,	quirk_reroute_to_boot_interrupts_intel);
1636 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_80332_1,	quirk_reroute_to_boot_interrupts_intel);
1637 
1638 /*
1639  * On some chipsets we can disable the generation of legacy INTx boot
1640  * interrupts.
1641  */
1642 
1643 /*
1644  * IO-APIC1 on 6300ESB generates boot interrupts, see intel order no
1645  * 300641-004US, section 5.7.3.
1646  */
1647 #define INTEL_6300_IOAPIC_ABAR		0x40
1648 #define INTEL_6300_DISABLE_BOOT_IRQ	(1<<14)
1649 
1650 static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
1651 {
1652 	u16 pci_config_word;
1653 
1654 	if (noioapicquirk)
1655 		return;
1656 
1657 	pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word);
1658 	pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
1659 	pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
1660 
1661 	dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
1662 		 dev->vendor, dev->device);
1663 }
1664 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_ESB_10,	quirk_disable_intel_boot_interrupt);
1665 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_ESB_10,	quirk_disable_intel_boot_interrupt);
1666 
1667 /*
1668  * disable boot interrupts on HT-1000
1669  */
1670 #define BC_HT1000_FEATURE_REG		0x64
1671 #define BC_HT1000_PIC_REGS_ENABLE	(1<<0)
1672 #define BC_HT1000_MAP_IDX		0xC00
1673 #define BC_HT1000_MAP_DATA		0xC01
1674 
1675 static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
1676 {
1677 	u32 pci_config_dword;
1678 	u8 irq;
1679 
1680 	if (noioapicquirk)
1681 		return;
1682 
1683 	pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword);
1684 	pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword |
1685 			BC_HT1000_PIC_REGS_ENABLE);
1686 
1687 	for (irq = 0x10; irq < 0x10 + 32; irq++) {
1688 		outb(irq, BC_HT1000_MAP_IDX);
1689 		outb(0x00, BC_HT1000_MAP_DATA);
1690 	}
1691 
1692 	pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
1693 
1694 	dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
1695 		 dev->vendor, dev->device);
1696 }
1697 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS,   PCI_DEVICE_ID_SERVERWORKS_HT1000SB,	quirk_disable_broadcom_boot_interrupt);
1698 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS,   PCI_DEVICE_ID_SERVERWORKS_HT1000SB,	quirk_disable_broadcom_boot_interrupt);
1699 
1700 /*
1701  * disable boot interrupts on AMD and ATI chipsets
1702  */
1703 /*
1704  * NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131
1705  * rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode
1706  * (due to an erratum).
1707  */
1708 #define AMD_813X_MISC			0x40
1709 #define AMD_813X_NOIOAMODE		(1<<0)
1710 #define AMD_813X_REV_B1			0x12
1711 #define AMD_813X_REV_B2			0x13
1712 
1713 static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
1714 {
1715 	u32 pci_config_dword;
1716 
1717 	if (noioapicquirk)
1718 		return;
1719 	if ((dev->revision == AMD_813X_REV_B1) ||
1720 	    (dev->revision == AMD_813X_REV_B2))
1721 		return;
1722 
1723 	pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
1724 	pci_config_dword &= ~AMD_813X_NOIOAMODE;
1725 	pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
1726 
1727 	dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
1728 		 dev->vendor, dev->device);
1729 }
1730 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8131_BRIDGE,	quirk_disable_amd_813x_boot_interrupt);
1731 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8131_BRIDGE,	quirk_disable_amd_813x_boot_interrupt);
1732 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8132_BRIDGE,	quirk_disable_amd_813x_boot_interrupt);
1733 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_8132_BRIDGE,	quirk_disable_amd_813x_boot_interrupt);
1734 
1735 #define AMD_8111_PCI_IRQ_ROUTING	0x56
1736 
1737 static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
1738 {
1739 	u16 pci_config_word;
1740 
1741 	if (noioapicquirk)
1742 		return;
1743 
1744 	pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
1745 	if (!pci_config_word) {
1746 		dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] already disabled\n",
1747 			 dev->vendor, dev->device);
1748 		return;
1749 	}
1750 	pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
1751 	dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
1752 		 dev->vendor, dev->device);
1753 }
1754 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,   PCI_DEVICE_ID_AMD_8111_SMBUS,	quirk_disable_amd_8111_boot_interrupt);
1755 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD,   PCI_DEVICE_ID_AMD_8111_SMBUS,	quirk_disable_amd_8111_boot_interrupt);
1756 #endif /* CONFIG_X86_IO_APIC */
1757 
1758 /*
1759  * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size
1760  * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes.
1761  * Re-allocate the region if needed...
1762  */
1763 static void quirk_tc86c001_ide(struct pci_dev *dev)
1764 {
1765 	struct resource *r = &dev->resource[0];
1766 
1767 	if (r->start & 0x8) {
1768 		r->flags |= IORESOURCE_UNSET;
1769 		r->start = 0;
1770 		r->end = 0xf;
1771 	}
1772 }
1773 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
1774 			 PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
1775 			 quirk_tc86c001_ide);
1776 
1777 /*
1778  * PLX PCI 9050 PCI Target bridge controller has an errata that prevents the
1779  * local configuration registers accessible via BAR0 (memory) or BAR1 (i/o)
1780  * being read correctly if bit 7 of the base address is set.
1781  * The BAR0 or BAR1 region may be disabled (size 0) or enabled (size 128).
1782  * Re-allocate the regions to a 256-byte boundary if necessary.
1783  */
1784 static void quirk_plx_pci9050(struct pci_dev *dev)
1785 {
1786 	unsigned int bar;
1787 
1788 	/* Fixed in revision 2 (PCI 9052). */
1789 	if (dev->revision >= 2)
1790 		return;
1791 	for (bar = 0; bar <= 1; bar++)
1792 		if (pci_resource_len(dev, bar) == 0x80 &&
1793 		    (pci_resource_start(dev, bar) & 0x80)) {
1794 			struct resource *r = &dev->resource[bar];
1795 			dev_info(&dev->dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
1796 				 bar);
1797 			r->flags |= IORESOURCE_UNSET;
1798 			r->start = 0;
1799 			r->end = 0xff;
1800 		}
1801 }
1802 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
1803 			 quirk_plx_pci9050);
1804 /*
1805  * The following Meilhaus (vendor ID 0x1402) device IDs (amongst others)
1806  * may be using the PLX PCI 9050: 0x0630, 0x0940, 0x0950, 0x0960, 0x100b,
1807  * 0x1400, 0x140a, 0x140b, 0x14e0, 0x14ea, 0x14eb, 0x1604, 0x1608, 0x160c,
1808  * 0x168f, 0x2000, 0x2600, 0x3000, 0x810a, 0x810b.
1809  *
1810  * Currently, device IDs 0x2000 and 0x2600 are used by the Comedi "me_daq"
1811  * driver.
1812  */
1813 DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2000, quirk_plx_pci9050);
1814 DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2600, quirk_plx_pci9050);
1815 
1816 static void quirk_netmos(struct pci_dev *dev)
1817 {
1818 	unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
1819 	unsigned int num_serial = dev->subsystem_device & 0xf;
1820 
1821 	/*
1822 	 * These Netmos parts are multiport serial devices with optional
1823 	 * parallel ports.  Even when parallel ports are present, they
1824 	 * are identified as class SERIAL, which means the serial driver
1825 	 * will claim them.  To prevent this, mark them as class OTHER.
1826 	 * These combo devices should be claimed by parport_serial.
1827 	 *
1828 	 * The subdevice ID is of the form 0x00PS, where <P> is the number
1829 	 * of parallel ports and <S> is the number of serial ports.
1830 	 */
1831 	switch (dev->device) {
1832 	case PCI_DEVICE_ID_NETMOS_9835:
1833 		/* Well, this rule doesn't hold for the following 9835 device */
1834 		if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
1835 				dev->subsystem_device == 0x0299)
1836 			return;
1837 	case PCI_DEVICE_ID_NETMOS_9735:
1838 	case PCI_DEVICE_ID_NETMOS_9745:
1839 	case PCI_DEVICE_ID_NETMOS_9845:
1840 	case PCI_DEVICE_ID_NETMOS_9855:
1841 		if (num_parallel) {
1842 			dev_info(&dev->dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n",
1843 				dev->device, num_parallel, num_serial);
1844 			dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
1845 			    (dev->class & 0xff);
1846 		}
1847 	}
1848 }
1849 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
1850 			 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
1851 
1852 static void quirk_e100_interrupt(struct pci_dev *dev)
1853 {
1854 	u16 command, pmcsr;
1855 	u8 __iomem *csr;
1856 	u8 cmd_hi;
1857 
1858 	switch (dev->device) {
1859 	/* PCI IDs taken from drivers/net/e100.c */
1860 	case 0x1029:
1861 	case 0x1030 ... 0x1034:
1862 	case 0x1038 ... 0x103E:
1863 	case 0x1050 ... 0x1057:
1864 	case 0x1059:
1865 	case 0x1064 ... 0x106B:
1866 	case 0x1091 ... 0x1095:
1867 	case 0x1209:
1868 	case 0x1229:
1869 	case 0x2449:
1870 	case 0x2459:
1871 	case 0x245D:
1872 	case 0x27DC:
1873 		break;
1874 	default:
1875 		return;
1876 	}
1877 
1878 	/*
1879 	 * Some firmware hands off the e100 with interrupts enabled,
1880 	 * which can cause a flood of interrupts if packets are
1881 	 * received before the driver attaches to the device.  So
1882 	 * disable all e100 interrupts here.  The driver will
1883 	 * re-enable them when it's ready.
1884 	 */
1885 	pci_read_config_word(dev, PCI_COMMAND, &command);
1886 
1887 	if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
1888 		return;
1889 
1890 	/*
1891 	 * Check that the device is in the D0 power state. If it's not,
1892 	 * there is no point to look any further.
1893 	 */
1894 	if (dev->pm_cap) {
1895 		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1896 		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0)
1897 			return;
1898 	}
1899 
1900 	/* Convert from PCI bus to resource space.  */
1901 	csr = ioremap(pci_resource_start(dev, 0), 8);
1902 	if (!csr) {
1903 		dev_warn(&dev->dev, "Can't map e100 registers\n");
1904 		return;
1905 	}
1906 
1907 	cmd_hi = readb(csr + 3);
1908 	if (cmd_hi == 0) {
1909 		dev_warn(&dev->dev, "Firmware left e100 interrupts enabled; disabling\n");
1910 		writeb(1, csr + 3);
1911 	}
1912 
1913 	iounmap(csr);
1914 }
1915 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
1916 			PCI_CLASS_NETWORK_ETHERNET, 8, quirk_e100_interrupt);
1917 
1918 /*
1919  * The 82575 and 82598 may experience data corruption issues when transitioning
1920  * out of L0S.  To prevent this we need to disable L0S on the pci-e link
1921  */
1922 static void quirk_disable_aspm_l0s(struct pci_dev *dev)
1923 {
1924 	dev_info(&dev->dev, "Disabling L0s\n");
1925 	pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
1926 }
1927 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
1928 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
1929 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
1930 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
1931 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
1932 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
1933 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
1934 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
1935 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
1936 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
1937 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
1938 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
1939 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
1940 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
1941 
1942 static void fixup_rev1_53c810(struct pci_dev *dev)
1943 {
1944 	/* rev 1 ncr53c810 chips don't set the class at all which means
1945 	 * they don't get their resources remapped. Fix that here.
1946 	 */
1947 
1948 	if (dev->class == PCI_CLASS_NOT_DEFINED) {
1949 		dev_info(&dev->dev, "NCR 53c810 rev 1 detected; setting PCI class\n");
1950 		dev->class = PCI_CLASS_STORAGE_SCSI;
1951 	}
1952 }
1953 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
1954 
1955 /* Enable 1k I/O space granularity on the Intel P64H2 */
1956 static void quirk_p64h2_1k_io(struct pci_dev *dev)
1957 {
1958 	u16 en1k;
1959 
1960 	pci_read_config_word(dev, 0x40, &en1k);
1961 
1962 	if (en1k & 0x200) {
1963 		dev_info(&dev->dev, "Enable I/O Space to 1KB granularity\n");
1964 		dev->io_window_1k = 1;
1965 	}
1966 }
1967 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	0x1460,		quirk_p64h2_1k_io);
1968 
1969 /* Under some circumstances, AER is not linked with extended capabilities.
1970  * Force it to be linked by setting the corresponding control bit in the
1971  * config space.
1972  */
1973 static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
1974 {
1975 	uint8_t b;
1976 	if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
1977 		if (!(b & 0x20)) {
1978 			pci_write_config_byte(dev, 0xf41, b | 0x20);
1979 			dev_info(&dev->dev, "Linking AER extended capability\n");
1980 		}
1981 	}
1982 }
1983 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA,  PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
1984 			quirk_nvidia_ck804_pcie_aer_ext_cap);
1985 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA,  PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
1986 			quirk_nvidia_ck804_pcie_aer_ext_cap);
1987 
1988 static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
1989 {
1990 	/*
1991 	 * Disable PCI Bus Parking and PCI Master read caching on CX700
1992 	 * which causes unspecified timing errors with a VT6212L on the PCI
1993 	 * bus leading to USB2.0 packet loss.
1994 	 *
1995 	 * This quirk is only enabled if a second (on the external PCI bus)
1996 	 * VT6212L is found -- the CX700 core itself also contains a USB
1997 	 * host controller with the same PCI ID as the VT6212L.
1998 	 */
1999 
2000 	/* Count VT6212L instances */
2001 	struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA,
2002 		PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
2003 	uint8_t b;
2004 
2005 	/* p should contain the first (internal) VT6212L -- see if we have
2006 	   an external one by searching again */
2007 	p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
2008 	if (!p)
2009 		return;
2010 	pci_dev_put(p);
2011 
2012 	if (pci_read_config_byte(dev, 0x76, &b) == 0) {
2013 		if (b & 0x40) {
2014 			/* Turn off PCI Bus Parking */
2015 			pci_write_config_byte(dev, 0x76, b ^ 0x40);
2016 
2017 			dev_info(&dev->dev, "Disabling VIA CX700 PCI parking\n");
2018 		}
2019 	}
2020 
2021 	if (pci_read_config_byte(dev, 0x72, &b) == 0) {
2022 		if (b != 0) {
2023 			/* Turn off PCI Master read caching */
2024 			pci_write_config_byte(dev, 0x72, 0x0);
2025 
2026 			/* Set PCI Master Bus time-out to "1x16 PCLK" */
2027 			pci_write_config_byte(dev, 0x75, 0x1);
2028 
2029 			/* Disable "Read FIFO Timer" */
2030 			pci_write_config_byte(dev, 0x77, 0x0);
2031 
2032 			dev_info(&dev->dev, "Disabling VIA CX700 PCI caching\n");
2033 		}
2034 	}
2035 }
2036 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
2037 
2038 /*
2039  * For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
2040  * VPD end tag will hang the device.  This problem was initially
2041  * observed when a vpd entry was created in sysfs
2042  * ('/sys/bus/pci/devices/<id>/vpd').   A read to this sysfs entry
2043  * will dump 32k of data.  Reading a full 32k will cause an access
2044  * beyond the VPD end tag causing the device to hang.  Once the device
2045  * is hung, the bnx2 driver will not be able to reset the device.
2046  * We believe that it is legal to read beyond the end tag and
2047  * therefore the solution is to limit the read/write length.
2048  */
2049 static void quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
2050 {
2051 	/*
2052 	 * Only disable the VPD capability for 5706, 5706S, 5708,
2053 	 * 5708S and 5709 rev. A
2054 	 */
2055 	if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
2056 	    (dev->device == PCI_DEVICE_ID_NX2_5706S) ||
2057 	    (dev->device == PCI_DEVICE_ID_NX2_5708) ||
2058 	    (dev->device == PCI_DEVICE_ID_NX2_5708S) ||
2059 	    ((dev->device == PCI_DEVICE_ID_NX2_5709) &&
2060 	     (dev->revision & 0xf0) == 0x0)) {
2061 		if (dev->vpd)
2062 			dev->vpd->len = 0x80;
2063 	}
2064 }
2065 
2066 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2067 			PCI_DEVICE_ID_NX2_5706,
2068 			quirk_brcm_570x_limit_vpd);
2069 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2070 			PCI_DEVICE_ID_NX2_5706S,
2071 			quirk_brcm_570x_limit_vpd);
2072 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2073 			PCI_DEVICE_ID_NX2_5708,
2074 			quirk_brcm_570x_limit_vpd);
2075 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2076 			PCI_DEVICE_ID_NX2_5708S,
2077 			quirk_brcm_570x_limit_vpd);
2078 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2079 			PCI_DEVICE_ID_NX2_5709,
2080 			quirk_brcm_570x_limit_vpd);
2081 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2082 			PCI_DEVICE_ID_NX2_5709S,
2083 			quirk_brcm_570x_limit_vpd);
2084 
2085 static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
2086 {
2087 	u32 rev;
2088 
2089 	pci_read_config_dword(dev, 0xf4, &rev);
2090 
2091 	/* Only CAP the MRRS if the device is a 5719 A0 */
2092 	if (rev == 0x05719000) {
2093 		int readrq = pcie_get_readrq(dev);
2094 		if (readrq > 2048)
2095 			pcie_set_readrq(dev, 2048);
2096 	}
2097 }
2098 
2099 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
2100 			 PCI_DEVICE_ID_TIGON3_5719,
2101 			 quirk_brcm_5719_limit_mrrs);
2102 
2103 /* Originally in EDAC sources for i82875P:
2104  * Intel tells BIOS developers to hide device 6 which
2105  * configures the overflow device access containing
2106  * the DRBs - this is where we expose device 6.
2107  * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
2108  */
2109 static void quirk_unhide_mch_dev6(struct pci_dev *dev)
2110 {
2111 	u8 reg;
2112 
2113 	if (pci_read_config_byte(dev, 0xF4, &reg) == 0 && !(reg & 0x02)) {
2114 		dev_info(&dev->dev, "Enabling MCH 'Overflow' Device\n");
2115 		pci_write_config_byte(dev, 0xF4, reg | 0x02);
2116 	}
2117 }
2118 
2119 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
2120 			quirk_unhide_mch_dev6);
2121 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
2122 			quirk_unhide_mch_dev6);
2123 
2124 #ifdef CONFIG_TILEPRO
2125 /*
2126  * The Tilera TILEmpower tilepro platform needs to set the link speed
2127  * to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed
2128  * setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe
2129  * capability register of the PEX8624 PCIe switch. The switch
2130  * supports link speed auto negotiation, but falsely sets
2131  * the link speed to 5GT/s.
2132  */
2133 static void quirk_tile_plx_gen1(struct pci_dev *dev)
2134 {
2135 	if (tile_plx_gen1) {
2136 		pci_write_config_dword(dev, 0x98, 0x1);
2137 		mdelay(50);
2138 	}
2139 }
2140 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1);
2141 #endif /* CONFIG_TILEPRO */
2142 
2143 #ifdef CONFIG_PCI_MSI
2144 /* Some chipsets do not support MSI. We cannot easily rely on setting
2145  * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
2146  * some other buses controlled by the chipset even if Linux is not
2147  * aware of it.  Instead of setting the flag on all buses in the
2148  * machine, simply disable MSI globally.
2149  */
2150 static void quirk_disable_all_msi(struct pci_dev *dev)
2151 {
2152 	pci_no_msi();
2153 	dev_warn(&dev->dev, "MSI quirk detected; MSI disabled\n");
2154 }
2155 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
2156 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
2157 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
2158 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
2159 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
2160 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
2161 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
2162 
2163 /* Disable MSI on chipsets that are known to not support it */
2164 static void quirk_disable_msi(struct pci_dev *dev)
2165 {
2166 	if (dev->subordinate) {
2167 		dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
2168 		dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2169 	}
2170 }
2171 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
2172 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
2173 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
2174 
2175 /*
2176  * The APC bridge device in AMD 780 family northbridges has some random
2177  * OEM subsystem ID in its vendor ID register (erratum 18), so instead
2178  * we use the possible vendor/device IDs of the host bridge for the
2179  * declared quirk, and search for the APC bridge by slot number.
2180  */
2181 static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
2182 {
2183 	struct pci_dev *apc_bridge;
2184 
2185 	apc_bridge = pci_get_slot(host_bridge->bus, PCI_DEVFN(1, 0));
2186 	if (apc_bridge) {
2187 		if (apc_bridge->device == 0x9602)
2188 			quirk_disable_msi(apc_bridge);
2189 		pci_dev_put(apc_bridge);
2190 	}
2191 }
2192 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
2193 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
2194 
2195 /* Go through the list of Hypertransport capabilities and
2196  * return 1 if a HT MSI capability is found and enabled */
2197 static int msi_ht_cap_enabled(struct pci_dev *dev)
2198 {
2199 	int pos, ttl = 48;
2200 
2201 	pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2202 	while (pos && ttl--) {
2203 		u8 flags;
2204 
2205 		if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2206 					 &flags) == 0) {
2207 			dev_info(&dev->dev, "Found %s HT MSI Mapping\n",
2208 				flags & HT_MSI_FLAGS_ENABLE ?
2209 				"enabled" : "disabled");
2210 			return (flags & HT_MSI_FLAGS_ENABLE) != 0;
2211 		}
2212 
2213 		pos = pci_find_next_ht_capability(dev, pos,
2214 						  HT_CAPTYPE_MSI_MAPPING);
2215 	}
2216 	return 0;
2217 }
2218 
2219 /* Check the hypertransport MSI mapping to know whether MSI is enabled or not */
2220 static void quirk_msi_ht_cap(struct pci_dev *dev)
2221 {
2222 	if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
2223 		dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
2224 		dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2225 	}
2226 }
2227 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
2228 			quirk_msi_ht_cap);
2229 
2230 /* The nVidia CK804 chipset may have 2 HT MSI mappings.
2231  * MSI are supported if the MSI capability set in any of these mappings.
2232  */
2233 static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
2234 {
2235 	struct pci_dev *pdev;
2236 
2237 	if (!dev->subordinate)
2238 		return;
2239 
2240 	/* check HT MSI cap on this chipset and the root one.
2241 	 * a single one having MSI is enough to be sure that MSI are supported.
2242 	 */
2243 	pdev = pci_get_slot(dev->bus, 0);
2244 	if (!pdev)
2245 		return;
2246 	if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) {
2247 		dev_warn(&dev->dev, "MSI quirk detected; subordinate MSI disabled\n");
2248 		dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2249 	}
2250 	pci_dev_put(pdev);
2251 }
2252 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2253 			quirk_nvidia_ck804_msi_ht_cap);
2254 
2255 /* Force enable MSI mapping capability on HT bridges */
2256 static void ht_enable_msi_mapping(struct pci_dev *dev)
2257 {
2258 	int pos, ttl = 48;
2259 
2260 	pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2261 	while (pos && ttl--) {
2262 		u8 flags;
2263 
2264 		if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2265 					 &flags) == 0) {
2266 			dev_info(&dev->dev, "Enabling HT MSI Mapping\n");
2267 
2268 			pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2269 					      flags | HT_MSI_FLAGS_ENABLE);
2270 		}
2271 		pos = pci_find_next_ht_capability(dev, pos,
2272 						  HT_CAPTYPE_MSI_MAPPING);
2273 	}
2274 }
2275 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
2276 			 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
2277 			 ht_enable_msi_mapping);
2278 
2279 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
2280 			 ht_enable_msi_mapping);
2281 
2282 /* The P5N32-SLI motherboards from Asus have a problem with msi
2283  * for the MCP55 NIC. It is not yet determined whether the msi problem
2284  * also affects other devices. As for now, turn off msi for this device.
2285  */
2286 static void nvenet_msi_disable(struct pci_dev *dev)
2287 {
2288 	const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
2289 
2290 	if (board_name &&
2291 	    (strstr(board_name, "P5N32-SLI PREMIUM") ||
2292 	     strstr(board_name, "P5N32-E SLI"))) {
2293 		dev_info(&dev->dev, "Disabling msi for MCP55 NIC on P5N32-SLI\n");
2294 		dev->no_msi = 1;
2295 	}
2296 }
2297 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2298 			PCI_DEVICE_ID_NVIDIA_NVENET_15,
2299 			nvenet_msi_disable);
2300 
2301 /*
2302  * Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing
2303  * config register.  This register controls the routing of legacy
2304  * interrupts from devices that route through the MCP55.  If this register
2305  * is misprogrammed, interrupts are only sent to the BSP, unlike
2306  * conventional systems where the IRQ is broadcast to all online CPUs.  Not
2307  * having this register set properly prevents kdump from booting up
2308  * properly, so let's make sure that we have it set correctly.
2309  * Note that this is an undocumented register.
2310  */
2311 static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
2312 {
2313 	u32 cfg;
2314 
2315 	if (!pci_find_capability(dev, PCI_CAP_ID_HT))
2316 		return;
2317 
2318 	pci_read_config_dword(dev, 0x74, &cfg);
2319 
2320 	if (cfg & ((1 << 2) | (1 << 15))) {
2321 		printk(KERN_INFO "Rewriting irq routing register on MCP55\n");
2322 		cfg &= ~((1 << 2) | (1 << 15));
2323 		pci_write_config_dword(dev, 0x74, cfg);
2324 	}
2325 }
2326 
2327 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2328 			PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
2329 			nvbridge_check_legacy_irq_routing);
2330 
2331 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2332 			PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
2333 			nvbridge_check_legacy_irq_routing);
2334 
2335 static int ht_check_msi_mapping(struct pci_dev *dev)
2336 {
2337 	int pos, ttl = 48;
2338 	int found = 0;
2339 
2340 	/* check if there is HT MSI cap or enabled on this device */
2341 	pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2342 	while (pos && ttl--) {
2343 		u8 flags;
2344 
2345 		if (found < 1)
2346 			found = 1;
2347 		if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2348 					 &flags) == 0) {
2349 			if (flags & HT_MSI_FLAGS_ENABLE) {
2350 				if (found < 2) {
2351 					found = 2;
2352 					break;
2353 				}
2354 			}
2355 		}
2356 		pos = pci_find_next_ht_capability(dev, pos,
2357 						  HT_CAPTYPE_MSI_MAPPING);
2358 	}
2359 
2360 	return found;
2361 }
2362 
2363 static int host_bridge_with_leaf(struct pci_dev *host_bridge)
2364 {
2365 	struct pci_dev *dev;
2366 	int pos;
2367 	int i, dev_no;
2368 	int found = 0;
2369 
2370 	dev_no = host_bridge->devfn >> 3;
2371 	for (i = dev_no + 1; i < 0x20; i++) {
2372 		dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
2373 		if (!dev)
2374 			continue;
2375 
2376 		/* found next host bridge ?*/
2377 		pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2378 		if (pos != 0) {
2379 			pci_dev_put(dev);
2380 			break;
2381 		}
2382 
2383 		if (ht_check_msi_mapping(dev)) {
2384 			found = 1;
2385 			pci_dev_put(dev);
2386 			break;
2387 		}
2388 		pci_dev_put(dev);
2389 	}
2390 
2391 	return found;
2392 }
2393 
2394 #define PCI_HT_CAP_SLAVE_CTRL0     4    /* link control */
2395 #define PCI_HT_CAP_SLAVE_CTRL1     8    /* link control to */
2396 
2397 static int is_end_of_ht_chain(struct pci_dev *dev)
2398 {
2399 	int pos, ctrl_off;
2400 	int end = 0;
2401 	u16 flags, ctrl;
2402 
2403 	pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2404 
2405 	if (!pos)
2406 		goto out;
2407 
2408 	pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
2409 
2410 	ctrl_off = ((flags >> 10) & 1) ?
2411 			PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
2412 	pci_read_config_word(dev, pos + ctrl_off, &ctrl);
2413 
2414 	if (ctrl & (1 << 6))
2415 		end = 1;
2416 
2417 out:
2418 	return end;
2419 }
2420 
2421 static void nv_ht_enable_msi_mapping(struct pci_dev *dev)
2422 {
2423 	struct pci_dev *host_bridge;
2424 	int pos;
2425 	int i, dev_no;
2426 	int found = 0;
2427 
2428 	dev_no = dev->devfn >> 3;
2429 	for (i = dev_no; i >= 0; i--) {
2430 		host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0));
2431 		if (!host_bridge)
2432 			continue;
2433 
2434 		pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2435 		if (pos != 0) {
2436 			found = 1;
2437 			break;
2438 		}
2439 		pci_dev_put(host_bridge);
2440 	}
2441 
2442 	if (!found)
2443 		return;
2444 
2445 	/* don't enable end_device/host_bridge with leaf directly here */
2446 	if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
2447 	    host_bridge_with_leaf(host_bridge))
2448 		goto out;
2449 
2450 	/* root did that ! */
2451 	if (msi_ht_cap_enabled(host_bridge))
2452 		goto out;
2453 
2454 	ht_enable_msi_mapping(dev);
2455 
2456 out:
2457 	pci_dev_put(host_bridge);
2458 }
2459 
2460 static void ht_disable_msi_mapping(struct pci_dev *dev)
2461 {
2462 	int pos, ttl = 48;
2463 
2464 	pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2465 	while (pos && ttl--) {
2466 		u8 flags;
2467 
2468 		if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2469 					 &flags) == 0) {
2470 			dev_info(&dev->dev, "Disabling HT MSI Mapping\n");
2471 
2472 			pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2473 					      flags & ~HT_MSI_FLAGS_ENABLE);
2474 		}
2475 		pos = pci_find_next_ht_capability(dev, pos,
2476 						  HT_CAPTYPE_MSI_MAPPING);
2477 	}
2478 }
2479 
2480 static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
2481 {
2482 	struct pci_dev *host_bridge;
2483 	int pos;
2484 	int found;
2485 
2486 	if (!pci_msi_enabled())
2487 		return;
2488 
2489 	/* check if there is HT MSI cap or enabled on this device */
2490 	found = ht_check_msi_mapping(dev);
2491 
2492 	/* no HT MSI CAP */
2493 	if (found == 0)
2494 		return;
2495 
2496 	/*
2497 	 * HT MSI mapping should be disabled on devices that are below
2498 	 * a non-Hypertransport host bridge. Locate the host bridge...
2499 	 */
2500 	host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
2501 	if (host_bridge == NULL) {
2502 		dev_warn(&dev->dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n");
2503 		return;
2504 	}
2505 
2506 	pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2507 	if (pos != 0) {
2508 		/* Host bridge is to HT */
2509 		if (found == 1) {
2510 			/* it is not enabled, try to enable it */
2511 			if (all)
2512 				ht_enable_msi_mapping(dev);
2513 			else
2514 				nv_ht_enable_msi_mapping(dev);
2515 		}
2516 		goto out;
2517 	}
2518 
2519 	/* HT MSI is not enabled */
2520 	if (found == 1)
2521 		goto out;
2522 
2523 	/* Host bridge is not to HT, disable HT MSI mapping on this device */
2524 	ht_disable_msi_mapping(dev);
2525 
2526 out:
2527 	pci_dev_put(host_bridge);
2528 }
2529 
2530 static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
2531 {
2532 	return __nv_msi_ht_cap_quirk(dev, 1);
2533 }
2534 
2535 static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
2536 {
2537 	return __nv_msi_ht_cap_quirk(dev, 0);
2538 }
2539 
2540 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2541 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2542 
2543 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2544 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2545 
2546 static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
2547 {
2548 	dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2549 }
2550 static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
2551 {
2552 	struct pci_dev *p;
2553 
2554 	/* SB700 MSI issue will be fixed at HW level from revision A21,
2555 	 * we need check PCI REVISION ID of SMBus controller to get SB700
2556 	 * revision.
2557 	 */
2558 	p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
2559 			   NULL);
2560 	if (!p)
2561 		return;
2562 
2563 	if ((p->revision < 0x3B) && (p->revision >= 0x30))
2564 		dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2565 	pci_dev_put(p);
2566 }
2567 static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
2568 {
2569 	/* AR816X/AR817X/E210X MSI is fixed at HW level from revision 0x18 */
2570 	if (dev->revision < 0x18) {
2571 		dev_info(&dev->dev, "set MSI_INTX_DISABLE_BUG flag\n");
2572 		dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2573 	}
2574 }
2575 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2576 			PCI_DEVICE_ID_TIGON3_5780,
2577 			quirk_msi_intx_disable_bug);
2578 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2579 			PCI_DEVICE_ID_TIGON3_5780S,
2580 			quirk_msi_intx_disable_bug);
2581 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2582 			PCI_DEVICE_ID_TIGON3_5714,
2583 			quirk_msi_intx_disable_bug);
2584 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2585 			PCI_DEVICE_ID_TIGON3_5714S,
2586 			quirk_msi_intx_disable_bug);
2587 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2588 			PCI_DEVICE_ID_TIGON3_5715,
2589 			quirk_msi_intx_disable_bug);
2590 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2591 			PCI_DEVICE_ID_TIGON3_5715S,
2592 			quirk_msi_intx_disable_bug);
2593 
2594 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4390,
2595 			quirk_msi_intx_disable_ati_bug);
2596 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4391,
2597 			quirk_msi_intx_disable_ati_bug);
2598 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4392,
2599 			quirk_msi_intx_disable_ati_bug);
2600 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4393,
2601 			quirk_msi_intx_disable_ati_bug);
2602 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4394,
2603 			quirk_msi_intx_disable_ati_bug);
2604 
2605 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4373,
2606 			quirk_msi_intx_disable_bug);
2607 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
2608 			quirk_msi_intx_disable_bug);
2609 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
2610 			quirk_msi_intx_disable_bug);
2611 
2612 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062,
2613 			quirk_msi_intx_disable_bug);
2614 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063,
2615 			quirk_msi_intx_disable_bug);
2616 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060,
2617 			quirk_msi_intx_disable_bug);
2618 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062,
2619 			quirk_msi_intx_disable_bug);
2620 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
2621 			quirk_msi_intx_disable_bug);
2622 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
2623 			quirk_msi_intx_disable_bug);
2624 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1090,
2625 			quirk_msi_intx_disable_qca_bug);
2626 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1091,
2627 			quirk_msi_intx_disable_qca_bug);
2628 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a0,
2629 			quirk_msi_intx_disable_qca_bug);
2630 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1,
2631 			quirk_msi_intx_disable_qca_bug);
2632 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
2633 			quirk_msi_intx_disable_qca_bug);
2634 #endif /* CONFIG_PCI_MSI */
2635 
2636 /* Allow manual resource allocation for PCI hotplug bridges
2637  * via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
2638  * some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6),
2639  * kernel fails to allocate resources when hotplug device is
2640  * inserted and PCI bus is rescanned.
2641  */
2642 static void quirk_hotplug_bridge(struct pci_dev *dev)
2643 {
2644 	dev->is_hotplug_bridge = 1;
2645 }
2646 
2647 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
2648 
2649 /*
2650  * This is a quirk for the Ricoh MMC controller found as a part of
2651  * some mulifunction chips.
2652 
2653  * This is very similar and based on the ricoh_mmc driver written by
2654  * Philip Langdale. Thank you for these magic sequences.
2655  *
2656  * These chips implement the four main memory card controllers (SD, MMC, MS, xD)
2657  * and one or both of cardbus or firewire.
2658  *
2659  * It happens that they implement SD and MMC
2660  * support as separate controllers (and PCI functions). The linux SDHCI
2661  * driver supports MMC cards but the chip detects MMC cards in hardware
2662  * and directs them to the MMC controller - so the SDHCI driver never sees
2663  * them.
2664  *
2665  * To get around this, we must disable the useless MMC controller.
2666  * At that point, the SDHCI controller will start seeing them
2667  * It seems to be the case that the relevant PCI registers to deactivate the
2668  * MMC controller live on PCI function 0, which might be the cardbus controller
2669  * or the firewire controller, depending on the particular chip in question
2670  *
2671  * This has to be done early, because as soon as we disable the MMC controller
2672  * other pci functions shift up one level, e.g. function #2 becomes function
2673  * #1, and this will confuse the pci core.
2674  */
2675 
2676 #ifdef CONFIG_MMC_RICOH_MMC
2677 static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
2678 {
2679 	/* disable via cardbus interface */
2680 	u8 write_enable;
2681 	u8 write_target;
2682 	u8 disable;
2683 
2684 	/* disable must be done via function #0 */
2685 	if (PCI_FUNC(dev->devfn))
2686 		return;
2687 
2688 	pci_read_config_byte(dev, 0xB7, &disable);
2689 	if (disable & 0x02)
2690 		return;
2691 
2692 	pci_read_config_byte(dev, 0x8E, &write_enable);
2693 	pci_write_config_byte(dev, 0x8E, 0xAA);
2694 	pci_read_config_byte(dev, 0x8D, &write_target);
2695 	pci_write_config_byte(dev, 0x8D, 0xB7);
2696 	pci_write_config_byte(dev, 0xB7, disable | 0x02);
2697 	pci_write_config_byte(dev, 0x8E, write_enable);
2698 	pci_write_config_byte(dev, 0x8D, write_target);
2699 
2700 	dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via cardbus function)\n");
2701 	dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
2702 }
2703 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
2704 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
2705 
2706 static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
2707 {
2708 	/* disable via firewire interface */
2709 	u8 write_enable;
2710 	u8 disable;
2711 
2712 	/* disable must be done via function #0 */
2713 	if (PCI_FUNC(dev->devfn))
2714 		return;
2715 	/*
2716 	 * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize
2717 	 * certain types of SD/MMC cards. Lowering the SD base
2718 	 * clock frequency from 200Mhz to 50Mhz fixes this issue.
2719 	 *
2720 	 * 0x150 - SD2.0 mode enable for changing base clock
2721 	 *	   frequency to 50Mhz
2722 	 * 0xe1  - Base clock frequency
2723 	 * 0x32  - 50Mhz new clock frequency
2724 	 * 0xf9  - Key register for 0x150
2725 	 * 0xfc  - key register for 0xe1
2726 	 */
2727 	if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
2728 	    dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
2729 		pci_write_config_byte(dev, 0xf9, 0xfc);
2730 		pci_write_config_byte(dev, 0x150, 0x10);
2731 		pci_write_config_byte(dev, 0xf9, 0x00);
2732 		pci_write_config_byte(dev, 0xfc, 0x01);
2733 		pci_write_config_byte(dev, 0xe1, 0x32);
2734 		pci_write_config_byte(dev, 0xfc, 0x00);
2735 
2736 		dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n");
2737 	}
2738 
2739 	pci_read_config_byte(dev, 0xCB, &disable);
2740 
2741 	if (disable & 0x02)
2742 		return;
2743 
2744 	pci_read_config_byte(dev, 0xCA, &write_enable);
2745 	pci_write_config_byte(dev, 0xCA, 0x57);
2746 	pci_write_config_byte(dev, 0xCB, disable | 0x02);
2747 	pci_write_config_byte(dev, 0xCA, write_enable);
2748 
2749 	dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
2750 	dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
2751 
2752 }
2753 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
2754 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
2755 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
2756 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
2757 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
2758 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
2759 #endif /*CONFIG_MMC_RICOH_MMC*/
2760 
2761 #ifdef CONFIG_DMAR_TABLE
2762 #define VTUNCERRMSK_REG	0x1ac
2763 #define VTD_MSK_SPEC_ERRORS	(1 << 31)
2764 /*
2765  * This is a quirk for masking vt-d spec defined errors to platform error
2766  * handling logic. With out this, platforms using Intel 7500, 5500 chipsets
2767  * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based
2768  * on the RAS config settings of the platform) when a vt-d fault happens.
2769  * The resulting SMI caused the system to hang.
2770  *
2771  * VT-d spec related errors are already handled by the VT-d OS code, so no
2772  * need to report the same error through other channels.
2773  */
2774 static void vtd_mask_spec_errors(struct pci_dev *dev)
2775 {
2776 	u32 word;
2777 
2778 	pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
2779 	pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
2780 }
2781 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
2782 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
2783 #endif
2784 
2785 static void fixup_ti816x_class(struct pci_dev *dev)
2786 {
2787 	/* TI 816x devices do not have class code set when in PCIe boot mode */
2788 	dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
2789 	dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
2790 }
2791 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
2792 				 PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
2793 
2794 /* Some PCIe devices do not work reliably with the claimed maximum
2795  * payload size supported.
2796  */
2797 static void fixup_mpss_256(struct pci_dev *dev)
2798 {
2799 	dev->pcie_mpss = 1; /* 256 bytes */
2800 }
2801 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
2802 			 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
2803 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
2804 			 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
2805 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
2806 			 PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
2807 
2808 /* Intel 5000 and 5100 Memory controllers have an errata with read completion
2809  * coalescing (which is enabled by default on some BIOSes) and MPS of 256B.
2810  * Since there is no way of knowing what the PCIE MPS on each fabric will be
2811  * until all of the devices are discovered and buses walked, read completion
2812  * coalescing must be disabled.  Unfortunately, it cannot be re-enabled because
2813  * it is possible to hotplug a device with MPS of 256B.
2814  */
2815 static void quirk_intel_mc_errata(struct pci_dev *dev)
2816 {
2817 	int err;
2818 	u16 rcc;
2819 
2820 	if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
2821 		return;
2822 
2823 	/* Intel errata specifies bits to change but does not say what they are.
2824 	 * Keeping them magical until such time as the registers and values can
2825 	 * be explained.
2826 	 */
2827 	err = pci_read_config_word(dev, 0x48, &rcc);
2828 	if (err) {
2829 		dev_err(&dev->dev, "Error attempting to read the read completion coalescing register\n");
2830 		return;
2831 	}
2832 
2833 	if (!(rcc & (1 << 10)))
2834 		return;
2835 
2836 	rcc &= ~(1 << 10);
2837 
2838 	err = pci_write_config_word(dev, 0x48, rcc);
2839 	if (err) {
2840 		dev_err(&dev->dev, "Error attempting to write the read completion coalescing register\n");
2841 		return;
2842 	}
2843 
2844 	pr_info_once("Read completion coalescing disabled due to hardware errata relating to 256B MPS\n");
2845 }
2846 /* Intel 5000 series memory controllers and ports 2-7 */
2847 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
2848 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata);
2849 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata);
2850 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata);
2851 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata);
2852 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata);
2853 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata);
2854 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata);
2855 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata);
2856 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata);
2857 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata);
2858 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata);
2859 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata);
2860 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata);
2861 /* Intel 5100 series memory controllers and ports 2-7 */
2862 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata);
2863 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata);
2864 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata);
2865 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata);
2866 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata);
2867 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata);
2868 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata);
2869 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata);
2870 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
2871 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
2872 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
2873 
2874 
2875 /*
2876  * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum.  To
2877  * work around this, query the size it should be configured to by the device and
2878  * modify the resource end to correspond to this new size.
2879  */
2880 static void quirk_intel_ntb(struct pci_dev *dev)
2881 {
2882 	int rc;
2883 	u8 val;
2884 
2885 	rc = pci_read_config_byte(dev, 0x00D0, &val);
2886 	if (rc)
2887 		return;
2888 
2889 	dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1;
2890 
2891 	rc = pci_read_config_byte(dev, 0x00D1, &val);
2892 	if (rc)
2893 		return;
2894 
2895 	dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1;
2896 }
2897 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
2898 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
2899 
2900 static ktime_t fixup_debug_start(struct pci_dev *dev,
2901 				 void (*fn)(struct pci_dev *dev))
2902 {
2903 	ktime_t calltime = ktime_set(0, 0);
2904 
2905 	dev_dbg(&dev->dev, "calling %pF\n", fn);
2906 	if (initcall_debug) {
2907 		pr_debug("calling  %pF @ %i for %s\n",
2908 			 fn, task_pid_nr(current), dev_name(&dev->dev));
2909 		calltime = ktime_get();
2910 	}
2911 
2912 	return calltime;
2913 }
2914 
2915 static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
2916 			       void (*fn)(struct pci_dev *dev))
2917 {
2918 	ktime_t delta, rettime;
2919 	unsigned long long duration;
2920 
2921 	if (initcall_debug) {
2922 		rettime = ktime_get();
2923 		delta = ktime_sub(rettime, calltime);
2924 		duration = (unsigned long long) ktime_to_ns(delta) >> 10;
2925 		pr_debug("pci fixup %pF returned after %lld usecs for %s\n",
2926 			 fn, duration, dev_name(&dev->dev));
2927 	}
2928 }
2929 
2930 /*
2931  * Some BIOS implementations leave the Intel GPU interrupts enabled,
2932  * even though no one is handling them (f.e. i915 driver is never loaded).
2933  * Additionally the interrupt destination is not set up properly
2934  * and the interrupt ends up -somewhere-.
2935  *
2936  * These spurious interrupts are "sticky" and the kernel disables
2937  * the (shared) interrupt line after 100.000+ generated interrupts.
2938  *
2939  * Fix it by disabling the still enabled interrupts.
2940  * This resolves crashes often seen on monitor unplug.
2941  */
2942 #define I915_DEIER_REG 0x4400c
2943 static void disable_igfx_irq(struct pci_dev *dev)
2944 {
2945 	void __iomem *regs = pci_iomap(dev, 0, 0);
2946 	if (regs == NULL) {
2947 		dev_warn(&dev->dev, "igfx quirk: Can't iomap PCI device\n");
2948 		return;
2949 	}
2950 
2951 	/* Check if any interrupt line is still enabled */
2952 	if (readl(regs + I915_DEIER_REG) != 0) {
2953 		dev_warn(&dev->dev, "BIOS left Intel GPU interrupts enabled; disabling\n");
2954 
2955 		writel(0, regs + I915_DEIER_REG);
2956 	}
2957 
2958 	pci_iounmap(dev, regs);
2959 }
2960 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
2961 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
2962 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
2963 
2964 /*
2965  * PCI devices which are on Intel chips can skip the 10ms delay
2966  * before entering D3 mode.
2967  */
2968 static void quirk_remove_d3_delay(struct pci_dev *dev)
2969 {
2970 	dev->d3_delay = 0;
2971 }
2972 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
2973 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
2974 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
2975 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
2976 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
2977 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
2978 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
2979 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
2980 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
2981 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
2982 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
2983 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
2984 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
2985 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
2986 
2987 /*
2988  * Some devices may pass our check in pci_intx_mask_supported if
2989  * PCI_COMMAND_INTX_DISABLE works though they actually do not properly
2990  * support this feature.
2991  */
2992 static void quirk_broken_intx_masking(struct pci_dev *dev)
2993 {
2994 	dev->broken_intx_masking = 1;
2995 }
2996 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030,
2997 			 quirk_broken_intx_masking);
2998 DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
2999 			 quirk_broken_intx_masking);
3000 /*
3001  * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10)
3002  * Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC
3003  *
3004  * RTL8110SC - Fails under PCI device assignment using DisINTx masking.
3005  */
3006 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
3007 			 quirk_broken_intx_masking);
3008 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
3009 			 quirk_broken_intx_masking);
3010 
3011 #ifdef CONFIG_ACPI
3012 /*
3013  * Apple: Shutdown Cactus Ridge Thunderbolt controller.
3014  *
3015  * On Apple hardware the Cactus Ridge Thunderbolt controller needs to be
3016  * shutdown before suspend. Otherwise the native host interface (NHI) will not
3017  * be present after resume if a device was plugged in before suspend.
3018  *
3019  * The thunderbolt controller consists of a pcie switch with downstream
3020  * bridges leading to the NHI and to the tunnel pci bridges.
3021  *
3022  * This quirk cuts power to the whole chip. Therefore we have to apply it
3023  * during suspend_noirq of the upstream bridge.
3024  *
3025  * Power is automagically restored before resume. No action is needed.
3026  */
3027 static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
3028 {
3029 	acpi_handle bridge, SXIO, SXFP, SXLV;
3030 
3031 	if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
3032 		return;
3033 	if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
3034 		return;
3035 	bridge = ACPI_HANDLE(&dev->dev);
3036 	if (!bridge)
3037 		return;
3038 	/*
3039 	 * SXIO and SXLV are present only on machines requiring this quirk.
3040 	 * TB bridges in external devices might have the same device id as those
3041 	 * on the host, but they will not have the associated ACPI methods. This
3042 	 * implicitly checks that we are at the right bridge.
3043 	 */
3044 	if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
3045 	    || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
3046 	    || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
3047 		return;
3048 	dev_info(&dev->dev, "quirk: cutting power to thunderbolt controller...\n");
3049 
3050 	/* magic sequence */
3051 	acpi_execute_simple_method(SXIO, NULL, 1);
3052 	acpi_execute_simple_method(SXFP, NULL, 0);
3053 	msleep(300);
3054 	acpi_execute_simple_method(SXLV, NULL, 0);
3055 	acpi_execute_simple_method(SXIO, NULL, 0);
3056 	acpi_execute_simple_method(SXLV, NULL, 0);
3057 }
3058 DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL, 0x1547,
3059 			       quirk_apple_poweroff_thunderbolt);
3060 
3061 /*
3062  * Apple: Wait for the thunderbolt controller to reestablish pci tunnels.
3063  *
3064  * During suspend the thunderbolt controller is reset and all pci
3065  * tunnels are lost. The NHI driver will try to reestablish all tunnels
3066  * during resume. We have to manually wait for the NHI since there is
3067  * no parent child relationship between the NHI and the tunneled
3068  * bridges.
3069  */
3070 static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
3071 {
3072 	struct pci_dev *sibling = NULL;
3073 	struct pci_dev *nhi = NULL;
3074 
3075 	if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
3076 		return;
3077 	if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
3078 		return;
3079 	/*
3080 	 * Find the NHI and confirm that we are a bridge on the tb host
3081 	 * controller and not on a tb endpoint.
3082 	 */
3083 	sibling = pci_get_slot(dev->bus, 0x0);
3084 	if (sibling == dev)
3085 		goto out; /* we are the downstream bridge to the NHI */
3086 	if (!sibling || !sibling->subordinate)
3087 		goto out;
3088 	nhi = pci_get_slot(sibling->subordinate, 0x0);
3089 	if (!nhi)
3090 		goto out;
3091 	if (nhi->vendor != PCI_VENDOR_ID_INTEL
3092 			|| (nhi->device != 0x1547 && nhi->device != 0x156c)
3093 			|| nhi->subsystem_vendor != 0x2222
3094 			|| nhi->subsystem_device != 0x1111)
3095 		goto out;
3096 	dev_info(&dev->dev, "quirk: wating for thunderbolt to reestablish pci tunnels...\n");
3097 	device_pm_wait_for_dev(&dev->dev, &nhi->dev);
3098 out:
3099 	pci_dev_put(nhi);
3100 	pci_dev_put(sibling);
3101 }
3102 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x1547,
3103 			       quirk_apple_wait_for_thunderbolt);
3104 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, 0x156d,
3105 			       quirk_apple_wait_for_thunderbolt);
3106 #endif
3107 
3108 static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
3109 			  struct pci_fixup *end)
3110 {
3111 	ktime_t calltime;
3112 
3113 	for (; f < end; f++)
3114 		if ((f->class == (u32) (dev->class >> f->class_shift) ||
3115 		     f->class == (u32) PCI_ANY_ID) &&
3116 		    (f->vendor == dev->vendor ||
3117 		     f->vendor == (u16) PCI_ANY_ID) &&
3118 		    (f->device == dev->device ||
3119 		     f->device == (u16) PCI_ANY_ID)) {
3120 			calltime = fixup_debug_start(dev, f->hook);
3121 			f->hook(dev);
3122 			fixup_debug_report(dev, calltime, f->hook);
3123 		}
3124 }
3125 
3126 extern struct pci_fixup __start_pci_fixups_early[];
3127 extern struct pci_fixup __end_pci_fixups_early[];
3128 extern struct pci_fixup __start_pci_fixups_header[];
3129 extern struct pci_fixup __end_pci_fixups_header[];
3130 extern struct pci_fixup __start_pci_fixups_final[];
3131 extern struct pci_fixup __end_pci_fixups_final[];
3132 extern struct pci_fixup __start_pci_fixups_enable[];
3133 extern struct pci_fixup __end_pci_fixups_enable[];
3134 extern struct pci_fixup __start_pci_fixups_resume[];
3135 extern struct pci_fixup __end_pci_fixups_resume[];
3136 extern struct pci_fixup __start_pci_fixups_resume_early[];
3137 extern struct pci_fixup __end_pci_fixups_resume_early[];
3138 extern struct pci_fixup __start_pci_fixups_suspend[];
3139 extern struct pci_fixup __end_pci_fixups_suspend[];
3140 extern struct pci_fixup __start_pci_fixups_suspend_late[];
3141 extern struct pci_fixup __end_pci_fixups_suspend_late[];
3142 
3143 static bool pci_apply_fixup_final_quirks;
3144 
3145 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
3146 {
3147 	struct pci_fixup *start, *end;
3148 
3149 	switch (pass) {
3150 	case pci_fixup_early:
3151 		start = __start_pci_fixups_early;
3152 		end = __end_pci_fixups_early;
3153 		break;
3154 
3155 	case pci_fixup_header:
3156 		start = __start_pci_fixups_header;
3157 		end = __end_pci_fixups_header;
3158 		break;
3159 
3160 	case pci_fixup_final:
3161 		if (!pci_apply_fixup_final_quirks)
3162 			return;
3163 		start = __start_pci_fixups_final;
3164 		end = __end_pci_fixups_final;
3165 		break;
3166 
3167 	case pci_fixup_enable:
3168 		start = __start_pci_fixups_enable;
3169 		end = __end_pci_fixups_enable;
3170 		break;
3171 
3172 	case pci_fixup_resume:
3173 		start = __start_pci_fixups_resume;
3174 		end = __end_pci_fixups_resume;
3175 		break;
3176 
3177 	case pci_fixup_resume_early:
3178 		start = __start_pci_fixups_resume_early;
3179 		end = __end_pci_fixups_resume_early;
3180 		break;
3181 
3182 	case pci_fixup_suspend:
3183 		start = __start_pci_fixups_suspend;
3184 		end = __end_pci_fixups_suspend;
3185 		break;
3186 
3187 	case pci_fixup_suspend_late:
3188 		start = __start_pci_fixups_suspend_late;
3189 		end = __end_pci_fixups_suspend_late;
3190 		break;
3191 
3192 	default:
3193 		/* stupid compiler warning, you would think with an enum... */
3194 		return;
3195 	}
3196 	pci_do_fixups(dev, start, end);
3197 }
3198 EXPORT_SYMBOL(pci_fixup_device);
3199 
3200 
3201 static int __init pci_apply_final_quirks(void)
3202 {
3203 	struct pci_dev *dev = NULL;
3204 	u8 cls = 0;
3205 	u8 tmp;
3206 
3207 	if (pci_cache_line_size)
3208 		printk(KERN_DEBUG "PCI: CLS %u bytes\n",
3209 		       pci_cache_line_size << 2);
3210 
3211 	pci_apply_fixup_final_quirks = true;
3212 	for_each_pci_dev(dev) {
3213 		pci_fixup_device(pci_fixup_final, dev);
3214 		/*
3215 		 * If arch hasn't set it explicitly yet, use the CLS
3216 		 * value shared by all PCI devices.  If there's a
3217 		 * mismatch, fall back to the default value.
3218 		 */
3219 		if (!pci_cache_line_size) {
3220 			pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
3221 			if (!cls)
3222 				cls = tmp;
3223 			if (!tmp || cls == tmp)
3224 				continue;
3225 
3226 			printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n",
3227 			       cls << 2, tmp << 2,
3228 			       pci_dfl_cache_line_size << 2);
3229 			pci_cache_line_size = pci_dfl_cache_line_size;
3230 		}
3231 	}
3232 
3233 	if (!pci_cache_line_size) {
3234 		printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
3235 		       cls << 2, pci_dfl_cache_line_size << 2);
3236 		pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
3237 	}
3238 
3239 	return 0;
3240 }
3241 
3242 fs_initcall_sync(pci_apply_final_quirks);
3243 
3244 /*
3245  * Followings are device-specific reset methods which can be used to
3246  * reset a single function if other methods (e.g. FLR, PM D0->D3) are
3247  * not available.
3248  */
3249 static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
3250 {
3251 	int pos;
3252 
3253 	/* only implement PCI_CLASS_SERIAL_USB at present */
3254 	if (dev->class == PCI_CLASS_SERIAL_USB) {
3255 		pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
3256 		if (!pos)
3257 			return -ENOTTY;
3258 
3259 		if (probe)
3260 			return 0;
3261 
3262 		pci_write_config_byte(dev, pos + 0x4, 1);
3263 		msleep(100);
3264 
3265 		return 0;
3266 	} else {
3267 		return -ENOTTY;
3268 	}
3269 }
3270 
3271 static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
3272 {
3273 	/*
3274 	 * http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf
3275 	 *
3276 	 * The 82599 supports FLR on VFs, but FLR support is reported only
3277 	 * in the PF DEVCAP (sec 9.3.10.4), not in the VF DEVCAP (sec 9.5).
3278 	 * Therefore, we can't use pcie_flr(), which checks the VF DEVCAP.
3279 	 */
3280 
3281 	if (probe)
3282 		return 0;
3283 
3284 	if (!pci_wait_for_pending_transaction(dev))
3285 		dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
3286 
3287 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3288 
3289 	msleep(100);
3290 
3291 	return 0;
3292 }
3293 
3294 #include "../gpu/drm/i915/i915_reg.h"
3295 #define MSG_CTL			0x45010
3296 #define NSDE_PWR_STATE		0xd0100
3297 #define IGD_OPERATION_TIMEOUT	10000     /* set timeout 10 seconds */
3298 
3299 static int reset_ivb_igd(struct pci_dev *dev, int probe)
3300 {
3301 	void __iomem *mmio_base;
3302 	unsigned long timeout;
3303 	u32 val;
3304 
3305 	if (probe)
3306 		return 0;
3307 
3308 	mmio_base = pci_iomap(dev, 0, 0);
3309 	if (!mmio_base)
3310 		return -ENOMEM;
3311 
3312 	iowrite32(0x00000002, mmio_base + MSG_CTL);
3313 
3314 	/*
3315 	 * Clobbering SOUTH_CHICKEN2 register is fine only if the next
3316 	 * driver loaded sets the right bits. However, this's a reset and
3317 	 * the bits have been set by i915 previously, so we clobber
3318 	 * SOUTH_CHICKEN2 register directly here.
3319 	 */
3320 	iowrite32(0x00000005, mmio_base + SOUTH_CHICKEN2);
3321 
3322 	val = ioread32(mmio_base + PCH_PP_CONTROL) & 0xfffffffe;
3323 	iowrite32(val, mmio_base + PCH_PP_CONTROL);
3324 
3325 	timeout = jiffies + msecs_to_jiffies(IGD_OPERATION_TIMEOUT);
3326 	do {
3327 		val = ioread32(mmio_base + PCH_PP_STATUS);
3328 		if ((val & 0xb0000000) == 0)
3329 			goto reset_complete;
3330 		msleep(10);
3331 	} while (time_before(jiffies, timeout));
3332 	dev_warn(&dev->dev, "timeout during reset\n");
3333 
3334 reset_complete:
3335 	iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE);
3336 
3337 	pci_iounmap(dev, mmio_base);
3338 	return 0;
3339 }
3340 
3341 /*
3342  * Device-specific reset method for Chelsio T4-based adapters.
3343  */
3344 static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
3345 {
3346 	u16 old_command;
3347 	u16 msix_flags;
3348 
3349 	/*
3350 	 * If this isn't a Chelsio T4-based device, return -ENOTTY indicating
3351 	 * that we have no device-specific reset method.
3352 	 */
3353 	if ((dev->device & 0xf000) != 0x4000)
3354 		return -ENOTTY;
3355 
3356 	/*
3357 	 * If this is the "probe" phase, return 0 indicating that we can
3358 	 * reset this device.
3359 	 */
3360 	if (probe)
3361 		return 0;
3362 
3363 	/*
3364 	 * T4 can wedge if there are DMAs in flight within the chip and Bus
3365 	 * Master has been disabled.  We need to have it on till the Function
3366 	 * Level Reset completes.  (BUS_MASTER is disabled in
3367 	 * pci_reset_function()).
3368 	 */
3369 	pci_read_config_word(dev, PCI_COMMAND, &old_command);
3370 	pci_write_config_word(dev, PCI_COMMAND,
3371 			      old_command | PCI_COMMAND_MASTER);
3372 
3373 	/*
3374 	 * Perform the actual device function reset, saving and restoring
3375 	 * configuration information around the reset.
3376 	 */
3377 	pci_save_state(dev);
3378 
3379 	/*
3380 	 * T4 also suffers a Head-Of-Line blocking problem if MSI-X interrupts
3381 	 * are disabled when an MSI-X interrupt message needs to be delivered.
3382 	 * So we briefly re-enable MSI-X interrupts for the duration of the
3383 	 * FLR.  The pci_restore_state() below will restore the original
3384 	 * MSI-X state.
3385 	 */
3386 	pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
3387 	if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0)
3388 		pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
3389 				      msix_flags |
3390 				      PCI_MSIX_FLAGS_ENABLE |
3391 				      PCI_MSIX_FLAGS_MASKALL);
3392 
3393 	/*
3394 	 * Start of pcie_flr() code sequence.  This reset code is a copy of
3395 	 * the guts of pcie_flr() because that's not an exported function.
3396 	 */
3397 
3398 	if (!pci_wait_for_pending_transaction(dev))
3399 		dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
3400 
3401 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3402 	msleep(100);
3403 
3404 	/*
3405 	 * End of pcie_flr() code sequence.
3406 	 */
3407 
3408 	/*
3409 	 * Restore the configuration information (BAR values, etc.) including
3410 	 * the original PCI Configuration Space Command word, and return
3411 	 * success.
3412 	 */
3413 	pci_restore_state(dev);
3414 	pci_write_config_word(dev, PCI_COMMAND, old_command);
3415 	return 0;
3416 }
3417 
3418 #define PCI_DEVICE_ID_INTEL_82599_SFP_VF   0x10ed
3419 #define PCI_DEVICE_ID_INTEL_IVB_M_VGA      0x0156
3420 #define PCI_DEVICE_ID_INTEL_IVB_M2_VGA     0x0166
3421 
3422 static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
3423 	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
3424 		 reset_intel_82599_sfp_virtfn },
3425 	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA,
3426 		reset_ivb_igd },
3427 	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
3428 		reset_ivb_igd },
3429 	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
3430 		reset_intel_generic_dev },
3431 	{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
3432 		reset_chelsio_generic_dev },
3433 	{ 0 }
3434 };
3435 
3436 /*
3437  * These device-specific reset methods are here rather than in a driver
3438  * because when a host assigns a device to a guest VM, the host may need
3439  * to reset the device but probably doesn't have a driver for it.
3440  */
3441 int pci_dev_specific_reset(struct pci_dev *dev, int probe)
3442 {
3443 	const struct pci_dev_reset_methods *i;
3444 
3445 	for (i = pci_dev_reset_methods; i->reset; i++) {
3446 		if ((i->vendor == dev->vendor ||
3447 		     i->vendor == (u16)PCI_ANY_ID) &&
3448 		    (i->device == dev->device ||
3449 		     i->device == (u16)PCI_ANY_ID))
3450 			return i->reset(dev, probe);
3451 	}
3452 
3453 	return -ENOTTY;
3454 }
3455 
3456 static void quirk_dma_func0_alias(struct pci_dev *dev)
3457 {
3458 	if (PCI_FUNC(dev->devfn) != 0) {
3459 		dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
3460 		dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
3461 	}
3462 }
3463 
3464 /*
3465  * https://bugzilla.redhat.com/show_bug.cgi?id=605888
3466  *
3467  * Some Ricoh devices use function 0 as the PCIe requester ID for DMA.
3468  */
3469 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
3470 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
3471 
3472 static void quirk_dma_func1_alias(struct pci_dev *dev)
3473 {
3474 	if (PCI_FUNC(dev->devfn) != 1) {
3475 		dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 1);
3476 		dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
3477 	}
3478 }
3479 
3480 /*
3481  * Marvell 88SE9123 uses function 1 as the requester ID for DMA.  In some
3482  * SKUs function 1 is present and is a legacy IDE controller, in other
3483  * SKUs this function is not present, making this a ghost requester.
3484  * https://bugzilla.kernel.org/show_bug.cgi?id=42679
3485  */
3486 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
3487 			 quirk_dma_func1_alias);
3488 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
3489 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
3490 			 quirk_dma_func1_alias);
3491 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
3492 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
3493 			 quirk_dma_func1_alias);
3494 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c59 */
3495 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
3496 			 quirk_dma_func1_alias);
3497 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */
3498 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
3499 			 quirk_dma_func1_alias);
3500 /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */
3501 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
3502 			 quirk_dma_func1_alias);
3503 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
3504 			 quirk_dma_func1_alias);
3505 /* https://bugs.gentoo.org/show_bug.cgi?id=497630 */
3506 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
3507 			 PCI_DEVICE_ID_JMICRON_JMB388_ESD,
3508 			 quirk_dma_func1_alias);
3509 
3510 /*
3511  * A few PCIe-to-PCI bridges fail to expose a PCIe capability, resulting in
3512  * using the wrong DMA alias for the device.  Some of these devices can be
3513  * used as either forward or reverse bridges, so we need to test whether the
3514  * device is operating in the correct mode.  We could probably apply this
3515  * quirk to PCI_ANY_ID, but for now we'll just use known offenders.  The test
3516  * is for a non-root, non-PCIe bridge where the upstream device is PCIe and
3517  * is not a PCIe-to-PCI bridge, then @pdev is actually a PCIe-to-PCI bridge.
3518  */
3519 static void quirk_use_pcie_bridge_dma_alias(struct pci_dev *pdev)
3520 {
3521 	if (!pci_is_root_bus(pdev->bus) &&
3522 	    pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3523 	    !pci_is_pcie(pdev) && pci_is_pcie(pdev->bus->self) &&
3524 	    pci_pcie_type(pdev->bus->self) != PCI_EXP_TYPE_PCI_BRIDGE)
3525 		pdev->dev_flags |= PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS;
3526 }
3527 /* ASM1083/1085, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c46 */
3528 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
3529 			 quirk_use_pcie_bridge_dma_alias);
3530 /* Tundra 8113, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c43 */
3531 DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
3532 /* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */
3533 DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
3534 /* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */
3535 DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
3536 
3537 /*
3538  * AMD has indicated that the devices below do not support peer-to-peer
3539  * in any system where they are found in the southbridge with an AMD
3540  * IOMMU in the system.  Multifunction devices that do not support
3541  * peer-to-peer between functions can claim to support a subset of ACS.
3542  * Such devices effectively enable request redirect (RR) and completion
3543  * redirect (CR) since all transactions are redirected to the upstream
3544  * root complex.
3545  *
3546  * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94086
3547  * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94102
3548  * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/99402
3549  *
3550  * 1002:4385 SBx00 SMBus Controller
3551  * 1002:439c SB7x0/SB8x0/SB9x0 IDE Controller
3552  * 1002:4383 SBx00 Azalia (Intel HDA)
3553  * 1002:439d SB7x0/SB8x0/SB9x0 LPC host controller
3554  * 1002:4384 SBx00 PCI to PCI Bridge
3555  * 1002:4399 SB7x0/SB8x0/SB9x0 USB OHCI2 Controller
3556  *
3557  * https://bugzilla.kernel.org/show_bug.cgi?id=81841#c15
3558  *
3559  * 1022:780f [AMD] FCH PCI Bridge
3560  * 1022:7809 [AMD] FCH USB OHCI Controller
3561  */
3562 static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
3563 {
3564 #ifdef CONFIG_ACPI
3565 	struct acpi_table_header *header = NULL;
3566 	acpi_status status;
3567 
3568 	/* Targeting multifunction devices on the SB (appears on root bus) */
3569 	if (!dev->multifunction || !pci_is_root_bus(dev->bus))
3570 		return -ENODEV;
3571 
3572 	/* The IVRS table describes the AMD IOMMU */
3573 	status = acpi_get_table("IVRS", 0, &header);
3574 	if (ACPI_FAILURE(status))
3575 		return -ENODEV;
3576 
3577 	/* Filter out flags not applicable to multifunction */
3578 	acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
3579 
3580 	return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
3581 #else
3582 	return -ENODEV;
3583 #endif
3584 }
3585 
3586 /*
3587  * Many Intel PCH root ports do provide ACS-like features to disable peer
3588  * transactions and validate bus numbers in requests, but do not provide an
3589  * actual PCIe ACS capability.  This is the list of device IDs known to fall
3590  * into that category as provided by Intel in Red Hat bugzilla 1037684.
3591  */
3592 static const u16 pci_quirk_intel_pch_acs_ids[] = {
3593 	/* Ibexpeak PCH */
3594 	0x3b42, 0x3b43, 0x3b44, 0x3b45, 0x3b46, 0x3b47, 0x3b48, 0x3b49,
3595 	0x3b4a, 0x3b4b, 0x3b4c, 0x3b4d, 0x3b4e, 0x3b4f, 0x3b50, 0x3b51,
3596 	/* Cougarpoint PCH */
3597 	0x1c10, 0x1c11, 0x1c12, 0x1c13, 0x1c14, 0x1c15, 0x1c16, 0x1c17,
3598 	0x1c18, 0x1c19, 0x1c1a, 0x1c1b, 0x1c1c, 0x1c1d, 0x1c1e, 0x1c1f,
3599 	/* Pantherpoint PCH */
3600 	0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14, 0x1e15, 0x1e16, 0x1e17,
3601 	0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d, 0x1e1e, 0x1e1f,
3602 	/* Lynxpoint-H PCH */
3603 	0x8c10, 0x8c11, 0x8c12, 0x8c13, 0x8c14, 0x8c15, 0x8c16, 0x8c17,
3604 	0x8c18, 0x8c19, 0x8c1a, 0x8c1b, 0x8c1c, 0x8c1d, 0x8c1e, 0x8c1f,
3605 	/* Lynxpoint-LP PCH */
3606 	0x9c10, 0x9c11, 0x9c12, 0x9c13, 0x9c14, 0x9c15, 0x9c16, 0x9c17,
3607 	0x9c18, 0x9c19, 0x9c1a, 0x9c1b,
3608 	/* Wildcat PCH */
3609 	0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97,
3610 	0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
3611 	/* Patsburg (X79) PCH */
3612 	0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
3613 };
3614 
3615 static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
3616 {
3617 	int i;
3618 
3619 	/* Filter out a few obvious non-matches first */
3620 	if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
3621 		return false;
3622 
3623 	for (i = 0; i < ARRAY_SIZE(pci_quirk_intel_pch_acs_ids); i++)
3624 		if (pci_quirk_intel_pch_acs_ids[i] == dev->device)
3625 			return true;
3626 
3627 	return false;
3628 }
3629 
3630 #define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV)
3631 
3632 static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
3633 {
3634 	u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ?
3635 		    INTEL_PCH_ACS_FLAGS : 0;
3636 
3637 	if (!pci_quirk_intel_pch_acs_match(dev))
3638 		return -ENOTTY;
3639 
3640 	return acs_flags & ~flags ? 0 : 1;
3641 }
3642 
3643 static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
3644 {
3645 	/*
3646 	 * SV, TB, and UF are not relevant to multifunction endpoints.
3647 	 *
3648 	 * Multifunction devices are only required to implement RR, CR, and DT
3649 	 * in their ACS capability if they support peer-to-peer transactions.
3650 	 * Devices matching this quirk have been verified by the vendor to not
3651 	 * perform peer-to-peer with other functions, allowing us to mask out
3652 	 * these bits as if they were unimplemented in the ACS capability.
3653 	 */
3654 	acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
3655 		       PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
3656 
3657 	return acs_flags ? 0 : 1;
3658 }
3659 
3660 static const struct pci_dev_acs_enabled {
3661 	u16 vendor;
3662 	u16 device;
3663 	int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
3664 } pci_dev_acs_enabled[] = {
3665 	{ PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs },
3666 	{ PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs },
3667 	{ PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs },
3668 	{ PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
3669 	{ PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
3670 	{ PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
3671 	{ PCI_VENDOR_ID_AMD, 0x780f, pci_quirk_amd_sb_acs },
3672 	{ PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs },
3673 	{ PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
3674 	{ PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
3675 	{ PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
3676 	{ PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
3677 	{ PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
3678 	{ PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs },
3679 	{ PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs },
3680 	{ PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs },
3681 	{ PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs },
3682 	{ PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs },
3683 	{ PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs },
3684 	{ PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs },
3685 	{ PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs },
3686 	{ PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs },
3687 	{ PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs },
3688 	{ PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs },
3689 	{ PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs },
3690 	{ PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs },
3691 	{ PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs },
3692 	{ PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs },
3693 	{ PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
3694 	{ PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
3695 	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
3696 	{ 0 }
3697 };
3698 
3699 int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
3700 {
3701 	const struct pci_dev_acs_enabled *i;
3702 	int ret;
3703 
3704 	/*
3705 	 * Allow devices that do not expose standard PCIe ACS capabilities
3706 	 * or control to indicate their support here.  Multi-function express
3707 	 * devices which do not allow internal peer-to-peer between functions,
3708 	 * but do not implement PCIe ACS may wish to return true here.
3709 	 */
3710 	for (i = pci_dev_acs_enabled; i->acs_enabled; i++) {
3711 		if ((i->vendor == dev->vendor ||
3712 		     i->vendor == (u16)PCI_ANY_ID) &&
3713 		    (i->device == dev->device ||
3714 		     i->device == (u16)PCI_ANY_ID)) {
3715 			ret = i->acs_enabled(dev, acs_flags);
3716 			if (ret >= 0)
3717 				return ret;
3718 		}
3719 	}
3720 
3721 	return -ENOTTY;
3722 }
3723 
3724 /* Config space offset of Root Complex Base Address register */
3725 #define INTEL_LPC_RCBA_REG 0xf0
3726 /* 31:14 RCBA address */
3727 #define INTEL_LPC_RCBA_MASK 0xffffc000
3728 /* RCBA Enable */
3729 #define INTEL_LPC_RCBA_ENABLE (1 << 0)
3730 
3731 /* Backbone Scratch Pad Register */
3732 #define INTEL_BSPR_REG 0x1104
3733 /* Backbone Peer Non-Posted Disable */
3734 #define INTEL_BSPR_REG_BPNPD (1 << 8)
3735 /* Backbone Peer Posted Disable */
3736 #define INTEL_BSPR_REG_BPPD  (1 << 9)
3737 
3738 /* Upstream Peer Decode Configuration Register */
3739 #define INTEL_UPDCR_REG 0x1114
3740 /* 5:0 Peer Decode Enable bits */
3741 #define INTEL_UPDCR_REG_MASK 0x3f
3742 
3743 static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
3744 {
3745 	u32 rcba, bspr, updcr;
3746 	void __iomem *rcba_mem;
3747 
3748 	/*
3749 	 * Read the RCBA register from the LPC (D31:F0).  PCH root ports
3750 	 * are D28:F* and therefore get probed before LPC, thus we can't
3751 	 * use pci_get_slot/pci_read_config_dword here.
3752 	 */
3753 	pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
3754 				  INTEL_LPC_RCBA_REG, &rcba);
3755 	if (!(rcba & INTEL_LPC_RCBA_ENABLE))
3756 		return -EINVAL;
3757 
3758 	rcba_mem = ioremap_nocache(rcba & INTEL_LPC_RCBA_MASK,
3759 				   PAGE_ALIGN(INTEL_UPDCR_REG));
3760 	if (!rcba_mem)
3761 		return -ENOMEM;
3762 
3763 	/*
3764 	 * The BSPR can disallow peer cycles, but it's set by soft strap and
3765 	 * therefore read-only.  If both posted and non-posted peer cycles are
3766 	 * disallowed, we're ok.  If either are allowed, then we need to use
3767 	 * the UPDCR to disable peer decodes for each port.  This provides the
3768 	 * PCIe ACS equivalent of PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF
3769 	 */
3770 	bspr = readl(rcba_mem + INTEL_BSPR_REG);
3771 	bspr &= INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD;
3772 	if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) {
3773 		updcr = readl(rcba_mem + INTEL_UPDCR_REG);
3774 		if (updcr & INTEL_UPDCR_REG_MASK) {
3775 			dev_info(&dev->dev, "Disabling UPDCR peer decodes\n");
3776 			updcr &= ~INTEL_UPDCR_REG_MASK;
3777 			writel(updcr, rcba_mem + INTEL_UPDCR_REG);
3778 		}
3779 	}
3780 
3781 	iounmap(rcba_mem);
3782 	return 0;
3783 }
3784 
3785 /* Miscellaneous Port Configuration register */
3786 #define INTEL_MPC_REG 0xd8
3787 /* MPC: Invalid Receive Bus Number Check Enable */
3788 #define INTEL_MPC_REG_IRBNCE (1 << 26)
3789 
3790 static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
3791 {
3792 	u32 mpc;
3793 
3794 	/*
3795 	 * When enabled, the IRBNCE bit of the MPC register enables the
3796 	 * equivalent of PCI ACS Source Validation (PCI_ACS_SV), which
3797 	 * ensures that requester IDs fall within the bus number range
3798 	 * of the bridge.  Enable if not already.
3799 	 */
3800 	pci_read_config_dword(dev, INTEL_MPC_REG, &mpc);
3801 	if (!(mpc & INTEL_MPC_REG_IRBNCE)) {
3802 		dev_info(&dev->dev, "Enabling MPC IRBNCE\n");
3803 		mpc |= INTEL_MPC_REG_IRBNCE;
3804 		pci_write_config_word(dev, INTEL_MPC_REG, mpc);
3805 	}
3806 }
3807 
3808 static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
3809 {
3810 	if (!pci_quirk_intel_pch_acs_match(dev))
3811 		return -ENOTTY;
3812 
3813 	if (pci_quirk_enable_intel_lpc_acs(dev)) {
3814 		dev_warn(&dev->dev, "Failed to enable Intel PCH ACS quirk\n");
3815 		return 0;
3816 	}
3817 
3818 	pci_quirk_enable_intel_rp_mpc_acs(dev);
3819 
3820 	dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK;
3821 
3822 	dev_info(&dev->dev, "Intel PCH root port ACS workaround enabled\n");
3823 
3824 	return 0;
3825 }
3826 
3827 static const struct pci_dev_enable_acs {
3828 	u16 vendor;
3829 	u16 device;
3830 	int (*enable_acs)(struct pci_dev *dev);
3831 } pci_dev_enable_acs[] = {
3832 	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_enable_intel_pch_acs },
3833 	{ 0 }
3834 };
3835 
3836 void pci_dev_specific_enable_acs(struct pci_dev *dev)
3837 {
3838 	const struct pci_dev_enable_acs *i;
3839 	int ret;
3840 
3841 	for (i = pci_dev_enable_acs; i->enable_acs; i++) {
3842 		if ((i->vendor == dev->vendor ||
3843 		     i->vendor == (u16)PCI_ANY_ID) &&
3844 		    (i->device == dev->device ||
3845 		     i->device == (u16)PCI_ANY_ID)) {
3846 			ret = i->enable_acs(dev);
3847 			if (ret >= 0)
3848 				return;
3849 		}
3850 	}
3851 }
3852