1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Exceptions for specific devices. Usually work-arounds for fatal design flaws. 4 */ 5 6 #include <linux/bitfield.h> 7 #include <linux/delay.h> 8 #include <linux/dmi.h> 9 #include <linux/pci.h> 10 #include <linux/suspend.h> 11 #include <linux/vgaarb.h> 12 #include <asm/amd_nb.h> 13 #include <asm/hpet.h> 14 #include <asm/pci_x86.h> 15 16 static void pci_fixup_i450nx(struct pci_dev *d) 17 { 18 /* 19 * i450NX -- Find and scan all secondary buses on all PXB's. 20 */ 21 int pxb, reg; 22 u8 busno, suba, subb; 23 24 dev_warn(&d->dev, "Searching for i450NX host bridges\n"); 25 reg = 0xd0; 26 for(pxb = 0; pxb < 2; pxb++) { 27 pci_read_config_byte(d, reg++, &busno); 28 pci_read_config_byte(d, reg++, &suba); 29 pci_read_config_byte(d, reg++, &subb); 30 dev_dbg(&d->dev, "i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno, 31 suba, subb); 32 if (busno) 33 pcibios_scan_root(busno); /* Bus A */ 34 if (suba < subb) 35 pcibios_scan_root(suba+1); /* Bus B */ 36 } 37 pcibios_last_bus = -1; 38 } 39 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx); 40 41 static void pci_fixup_i450gx(struct pci_dev *d) 42 { 43 /* 44 * i450GX and i450KX -- Find and scan all secondary buses. 45 * (called separately for each PCI bridge found) 46 */ 47 u8 busno; 48 pci_read_config_byte(d, 0x4a, &busno); 49 dev_info(&d->dev, "i440KX/GX host bridge; secondary bus %02x\n", busno); 50 pcibios_scan_root(busno); 51 pcibios_last_bus = -1; 52 } 53 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454GX, pci_fixup_i450gx); 54 55 static void pci_fixup_umc_ide(struct pci_dev *d) 56 { 57 /* 58 * UM8886BF IDE controller sets region type bits incorrectly, 59 * therefore they look like memory despite of them being I/O. 60 */ 61 int i; 62 63 dev_warn(&d->dev, "Fixing base address flags\n"); 64 for(i = 0; i < 4; i++) 65 d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO; 66 } 67 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide); 68 69 static void pci_fixup_latency(struct pci_dev *d) 70 { 71 /* 72 * SiS 5597 and 5598 chipsets require latency timer set to 73 * at most 32 to avoid lockups. 74 */ 75 dev_dbg(&d->dev, "Setting max latency to 32\n"); 76 pcibios_max_latency = 32; 77 } 78 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency); 79 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency); 80 81 static void pci_fixup_piix4_acpi(struct pci_dev *d) 82 { 83 /* 84 * PIIX4 ACPI device: hardwired IRQ9 85 */ 86 d->irq = 9; 87 } 88 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, pci_fixup_piix4_acpi); 89 90 /* 91 * Addresses issues with problems in the memory write queue timer in 92 * certain VIA Northbridges. This bugfix is per VIA's specifications, 93 * except for the KL133/KM133: clearing bit 5 on those Northbridges seems 94 * to trigger a bug in its integrated ProSavage video card, which 95 * causes screen corruption. We only clear bits 6 and 7 for that chipset, 96 * until VIA can provide us with definitive information on why screen 97 * corruption occurs, and what exactly those bits do. 98 * 99 * VIA 8363,8622,8361 Northbridges: 100 * - bits 5, 6, 7 at offset 0x55 need to be turned off 101 * VIA 8367 (KT266x) Northbridges: 102 * - bits 5, 6, 7 at offset 0x95 need to be turned off 103 * VIA 8363 rev 0x81/0x84 (KL133/KM133) Northbridges: 104 * - bits 6, 7 at offset 0x55 need to be turned off 105 */ 106 107 #define VIA_8363_KL133_REVISION_ID 0x81 108 #define VIA_8363_KM133_REVISION_ID 0x84 109 110 static void pci_fixup_via_northbridge_bug(struct pci_dev *d) 111 { 112 u8 v; 113 int where = 0x55; 114 int mask = 0x1f; /* clear bits 5, 6, 7 by default */ 115 116 if (d->device == PCI_DEVICE_ID_VIA_8367_0) { 117 /* fix pci bus latency issues resulted by NB bios error 118 it appears on bug free^Wreduced kt266x's bios forces 119 NB latency to zero */ 120 pci_write_config_byte(d, PCI_LATENCY_TIMER, 0); 121 122 where = 0x95; /* the memory write queue timer register is 123 different for the KT266x's: 0x95 not 0x55 */ 124 } else if (d->device == PCI_DEVICE_ID_VIA_8363_0 && 125 (d->revision == VIA_8363_KL133_REVISION_ID || 126 d->revision == VIA_8363_KM133_REVISION_ID)) { 127 mask = 0x3f; /* clear only bits 6 and 7; clearing bit 5 128 causes screen corruption on the KL133/KM133 */ 129 } 130 131 pci_read_config_byte(d, where, &v); 132 if (v & ~mask) { 133 dev_warn(&d->dev, "Disabling VIA memory write queue (PCI ID %04x, rev %02x): [%02x] %02x & %02x -> %02x\n", \ 134 d->device, d->revision, where, v, mask, v & mask); 135 v &= mask; 136 pci_write_config_byte(d, where, v); 137 } 138 } 139 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug); 140 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug); 141 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug); 142 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug); 143 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug); 144 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug); 145 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug); 146 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug); 147 148 /* 149 * For some reasons Intel decided that certain parts of their 150 * 815, 845 and some other chipsets must look like PCI-to-PCI bridges 151 * while they are obviously not. The 82801 family (AA, AB, BAM/CAM, 152 * BA/CA/DB and E) PCI bridges are actually HUB-to-PCI ones, according 153 * to Intel terminology. These devices do forward all addresses from 154 * system to PCI bus no matter what are their window settings, so they are 155 * "transparent" (or subtractive decoding) from programmers point of view. 156 */ 157 static void pci_fixup_transparent_bridge(struct pci_dev *dev) 158 { 159 if ((dev->device & 0xff00) == 0x2400) 160 dev->transparent = 1; 161 } 162 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, 163 PCI_CLASS_BRIDGE_PCI, 8, pci_fixup_transparent_bridge); 164 165 /* 166 * Fixup for C1 Halt Disconnect problem on nForce2 systems. 167 * 168 * From information provided by "Allen Martin" <AMartin@nvidia.com>: 169 * 170 * A hang is caused when the CPU generates a very fast CONNECT/HALT cycle 171 * sequence. Workaround is to set the SYSTEM_IDLE_TIMEOUT to 80 ns. 172 * This allows the state-machine and timer to return to a proper state within 173 * 80 ns of the CONNECT and probe appearing together. Since the CPU will not 174 * issue another HALT within 80 ns of the initial HALT, the failure condition 175 * is avoided. 176 */ 177 static void pci_fixup_nforce2(struct pci_dev *dev) 178 { 179 u32 val; 180 181 /* 182 * Chip Old value New value 183 * C17 0x1F0FFF01 0x1F01FF01 184 * C18D 0x9F0FFF01 0x9F01FF01 185 * 186 * Northbridge chip version may be determined by 187 * reading the PCI revision ID (0xC1 or greater is C18D). 188 */ 189 pci_read_config_dword(dev, 0x6c, &val); 190 191 /* 192 * Apply fixup if needed, but don't touch disconnect state 193 */ 194 if ((val & 0x00FF0000) != 0x00010000) { 195 dev_warn(&dev->dev, "nForce2 C1 Halt Disconnect fixup\n"); 196 pci_write_config_dword(dev, 0x6c, (val & 0xFF00FFFF) | 0x00010000); 197 } 198 } 199 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2); 200 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2); 201 202 /* Max PCI Express root ports */ 203 #define MAX_PCIEROOT 6 204 static int quirk_aspm_offset[MAX_PCIEROOT << 3]; 205 206 #define GET_INDEX(a, b) ((((a) - PCI_DEVICE_ID_INTEL_MCH_PA) << 3) + ((b) & 7)) 207 208 static int quirk_pcie_aspm_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) 209 { 210 return raw_pci_read(pci_domain_nr(bus), bus->number, 211 devfn, where, size, value); 212 } 213 214 /* 215 * Replace the original pci bus ops for write with a new one that will filter 216 * the request to insure ASPM cannot be enabled. 217 */ 218 static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) 219 { 220 u8 offset; 221 222 offset = quirk_aspm_offset[GET_INDEX(bus->self->device, devfn)]; 223 224 if ((offset) && (where == offset)) 225 value = value & ~PCI_EXP_LNKCTL_ASPMC; 226 227 return raw_pci_write(pci_domain_nr(bus), bus->number, 228 devfn, where, size, value); 229 } 230 231 static struct pci_ops quirk_pcie_aspm_ops = { 232 .read = quirk_pcie_aspm_read, 233 .write = quirk_pcie_aspm_write, 234 }; 235 236 /* 237 * Prevents PCI Express ASPM (Active State Power Management) being enabled. 238 * 239 * Save the register offset, where the ASPM control bits are located, 240 * for each PCI Express device that is in the device list of 241 * the root port in an array for fast indexing. Replace the bus ops 242 * with the modified one. 243 */ 244 static void pcie_rootport_aspm_quirk(struct pci_dev *pdev) 245 { 246 int i; 247 struct pci_bus *pbus; 248 struct pci_dev *dev; 249 250 if ((pbus = pdev->subordinate) == NULL) 251 return; 252 253 /* 254 * Check if the DID of pdev matches one of the six root ports. This 255 * check is needed in the case this function is called directly by the 256 * hot-plug driver. 257 */ 258 if ((pdev->device < PCI_DEVICE_ID_INTEL_MCH_PA) || 259 (pdev->device > PCI_DEVICE_ID_INTEL_MCH_PC1)) 260 return; 261 262 if (list_empty(&pbus->devices)) { 263 /* 264 * If no device is attached to the root port at power-up or 265 * after hot-remove, the pbus->devices is empty and this code 266 * will set the offsets to zero and the bus ops to parent's bus 267 * ops, which is unmodified. 268 */ 269 for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i) 270 quirk_aspm_offset[i] = 0; 271 272 pci_bus_set_ops(pbus, pbus->parent->ops); 273 } else { 274 /* 275 * If devices are attached to the root port at power-up or 276 * after hot-add, the code loops through the device list of 277 * each root port to save the register offsets and replace the 278 * bus ops. 279 */ 280 list_for_each_entry(dev, &pbus->devices, bus_list) 281 /* There are 0 to 8 devices attached to this bus */ 282 quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] = 283 dev->pcie_cap + PCI_EXP_LNKCTL; 284 285 pci_bus_set_ops(pbus, &quirk_pcie_aspm_ops); 286 dev_info(&pbus->dev, "writes to ASPM control bits will be ignored\n"); 287 } 288 289 } 290 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA, pcie_rootport_aspm_quirk); 291 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA1, pcie_rootport_aspm_quirk); 292 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB, pcie_rootport_aspm_quirk); 293 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB1, pcie_rootport_aspm_quirk); 294 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC, pcie_rootport_aspm_quirk); 295 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC1, pcie_rootport_aspm_quirk); 296 297 /* 298 * Fixup to mark boot BIOS video selected by BIOS before it changes 299 * 300 * From information provided by "Jon Smirl" <jonsmirl@gmail.com> 301 * 302 * The standard boot ROM sequence for an x86 machine uses the BIOS 303 * to select an initial video card for boot display. This boot video 304 * card will have its BIOS copied to 0xC0000 in system RAM. 305 * IORESOURCE_ROM_SHADOW is used to associate the boot video 306 * card with this copy. On laptops this copy has to be used since 307 * the main ROM may be compressed or combined with another image. 308 * See pci_map_rom() for use of this flag. Before marking the device 309 * with IORESOURCE_ROM_SHADOW check if a vga_default_device is already set 310 * by either arch code or vga-arbitration; if so only apply the fixup to this 311 * already-determined primary video card. 312 */ 313 314 static void pci_fixup_video(struct pci_dev *pdev) 315 { 316 struct pci_dev *bridge; 317 struct pci_bus *bus; 318 u16 config; 319 struct resource *res; 320 321 /* Is VGA routed to us? */ 322 bus = pdev->bus; 323 while (bus) { 324 bridge = bus->self; 325 326 /* 327 * From information provided by 328 * "David Miller" <davem@davemloft.net> 329 * The bridge control register is valid for PCI header 330 * type BRIDGE, or CARDBUS. Host to PCI controllers use 331 * PCI header type NORMAL. 332 */ 333 if (bridge && (pci_is_bridge(bridge))) { 334 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, 335 &config); 336 if (!(config & PCI_BRIDGE_CTL_VGA)) 337 return; 338 } 339 bus = bus->parent; 340 } 341 if (!vga_default_device() || pdev == vga_default_device()) { 342 pci_read_config_word(pdev, PCI_COMMAND, &config); 343 if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { 344 res = &pdev->resource[PCI_ROM_RESOURCE]; 345 346 pci_disable_rom(pdev); 347 if (res->parent) 348 release_resource(res); 349 350 res->start = 0xC0000; 351 res->end = res->start + 0x20000 - 1; 352 res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW | 353 IORESOURCE_PCI_FIXED; 354 dev_info(&pdev->dev, "Video device with shadowed ROM at %pR\n", 355 res); 356 } 357 } 358 } 359 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, 360 PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video); 361 362 363 static const struct dmi_system_id msi_k8t_dmi_table[] = { 364 { 365 .ident = "MSI-K8T-Neo2Fir", 366 .matches = { 367 DMI_MATCH(DMI_SYS_VENDOR, "MSI"), 368 DMI_MATCH(DMI_PRODUCT_NAME, "MS-6702E"), 369 }, 370 }, 371 {} 372 }; 373 374 /* 375 * The AMD-Athlon64 board MSI "K8T Neo2-FIR" disables the onboard sound 376 * card if a PCI-soundcard is added. 377 * 378 * The BIOS only gives options "DISABLED" and "AUTO". This code sets 379 * the corresponding register-value to enable the soundcard. 380 * 381 * The soundcard is only enabled, if the mainboard is identified 382 * via DMI-tables and the soundcard is detected to be off. 383 */ 384 static void pci_fixup_msi_k8t_onboard_sound(struct pci_dev *dev) 385 { 386 unsigned char val; 387 if (!dmi_check_system(msi_k8t_dmi_table)) 388 return; /* only applies to MSI K8T Neo2-FIR */ 389 390 pci_read_config_byte(dev, 0x50, &val); 391 if (val & 0x40) { 392 pci_write_config_byte(dev, 0x50, val & (~0x40)); 393 394 /* verify the change for status output */ 395 pci_read_config_byte(dev, 0x50, &val); 396 if (val & 0x40) 397 dev_info(&dev->dev, "Detected MSI K8T Neo2-FIR; " 398 "can't enable onboard soundcard!\n"); 399 else 400 dev_info(&dev->dev, "Detected MSI K8T Neo2-FIR; " 401 "enabled onboard soundcard\n"); 402 } 403 } 404 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, 405 pci_fixup_msi_k8t_onboard_sound); 406 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, 407 pci_fixup_msi_k8t_onboard_sound); 408 409 /* 410 * Some Toshiba laptops need extra code to enable their TI TSB43AB22/A. 411 * 412 * We pretend to bring them out of full D3 state, and restore the proper 413 * IRQ, PCI cache line size, and BARs, otherwise the device won't function 414 * properly. In some cases, the device will generate an interrupt on 415 * the wrong IRQ line, causing any devices sharing the line it's 416 * *supposed* to use to be disabled by the kernel's IRQ debug code. 417 */ 418 static u16 toshiba_line_size; 419 420 static const struct dmi_system_id toshiba_ohci1394_dmi_table[] = { 421 { 422 .ident = "Toshiba PS5 based laptop", 423 .matches = { 424 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 425 DMI_MATCH(DMI_PRODUCT_VERSION, "PS5"), 426 }, 427 }, 428 { 429 .ident = "Toshiba PSM4 based laptop", 430 .matches = { 431 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 432 DMI_MATCH(DMI_PRODUCT_VERSION, "PSM4"), 433 }, 434 }, 435 { 436 .ident = "Toshiba A40 based laptop", 437 .matches = { 438 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), 439 DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"), 440 }, 441 }, 442 { } 443 }; 444 445 static void pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev) 446 { 447 if (!dmi_check_system(toshiba_ohci1394_dmi_table)) 448 return; /* only applies to certain Toshibas (so far) */ 449 450 dev->current_state = PCI_D3cold; 451 pci_read_config_word(dev, PCI_CACHE_LINE_SIZE, &toshiba_line_size); 452 } 453 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0x8032, 454 pci_pre_fixup_toshiba_ohci1394); 455 456 static void pci_post_fixup_toshiba_ohci1394(struct pci_dev *dev) 457 { 458 if (!dmi_check_system(toshiba_ohci1394_dmi_table)) 459 return; /* only applies to certain Toshibas (so far) */ 460 461 /* Restore config space on Toshiba laptops */ 462 pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, toshiba_line_size); 463 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, (u8 *)&dev->irq); 464 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 465 pci_resource_start(dev, 0)); 466 pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, 467 pci_resource_start(dev, 1)); 468 } 469 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_TI, 0x8032, 470 pci_post_fixup_toshiba_ohci1394); 471 472 473 /* 474 * Prevent the BIOS trapping accesses to the Cyrix CS5530A video device 475 * configuration space. 476 */ 477 static void pci_early_fixup_cyrix_5530(struct pci_dev *dev) 478 { 479 u8 r; 480 /* clear 'F4 Video Configuration Trap' bit */ 481 pci_read_config_byte(dev, 0x42, &r); 482 r &= 0xfd; 483 pci_write_config_byte(dev, 0x42, r); 484 } 485 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, 486 pci_early_fixup_cyrix_5530); 487 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, 488 pci_early_fixup_cyrix_5530); 489 490 /* 491 * Siemens Nixdorf AG FSC Multiprocessor Interrupt Controller: 492 * prevent update of the BAR0, which doesn't look like a normal BAR. 493 */ 494 static void pci_siemens_interrupt_controller(struct pci_dev *dev) 495 { 496 dev->resource[0].flags |= IORESOURCE_PCI_FIXED; 497 } 498 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015, 499 pci_siemens_interrupt_controller); 500 501 /* 502 * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from 503 * confusing the PCI engine: 504 */ 505 static void sb600_disable_hpet_bar(struct pci_dev *dev) 506 { 507 u8 val; 508 509 /* 510 * The SB600 and SB700 both share the same device 511 * ID, but the PM register 0x55 does something different 512 * for the SB700, so make sure we are dealing with the 513 * SB600 before touching the bit: 514 */ 515 516 pci_read_config_byte(dev, 0x08, &val); 517 518 if (val < 0x2F) { 519 outb(0x55, 0xCD6); 520 val = inb(0xCD7); 521 522 /* Set bit 7 in PM register 0x55 */ 523 outb(0x55, 0xCD6); 524 outb(val | 0x80, 0xCD7); 525 } 526 } 527 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar); 528 529 #ifdef CONFIG_HPET_TIMER 530 static void sb600_hpet_quirk(struct pci_dev *dev) 531 { 532 struct resource *r = &dev->resource[1]; 533 534 if (r->flags & IORESOURCE_MEM && r->start == hpet_address) { 535 r->flags |= IORESOURCE_PCI_FIXED; 536 dev_info(&dev->dev, "reg 0x14 contains HPET; making it immovable\n"); 537 } 538 } 539 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, 0x4385, sb600_hpet_quirk); 540 #endif 541 542 /* 543 * Twinhead H12Y needs us to block out a region otherwise we map devices 544 * there and any access kills the box. 545 * 546 * See: https://bugzilla.kernel.org/show_bug.cgi?id=10231 547 * 548 * Match off the LPC and svid/sdid (older kernels lose the bridge subvendor) 549 */ 550 static void twinhead_reserve_killing_zone(struct pci_dev *dev) 551 { 552 if (dev->subsystem_vendor == 0x14FF && dev->subsystem_device == 0xA003) { 553 pr_info("Reserving memory on Twinhead H12Y\n"); 554 request_mem_region(0xFFB00000, 0x100000, "twinhead"); 555 } 556 } 557 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone); 558 559 /* 560 * Device [8086:2fc0] 561 * Erratum HSE43 562 * CONFIG_TDP_NOMINAL CSR Implemented at Incorrect Offset 563 * https://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v3-spec-update.html 564 * 565 * Devices [8086:6f60,6fa0,6fc0] 566 * Erratum BDF2 567 * PCI BARs in the Home Agent Will Return Non-Zero Values During Enumeration 568 * https://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html 569 */ 570 static void pci_invalid_bar(struct pci_dev *dev) 571 { 572 dev->non_compliant_bars = 1; 573 } 574 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar); 575 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar); 576 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar); 577 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar); 578 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar); 579 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar); 580 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar); 581 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar); 582 583 /* 584 * Device [1022:7808] 585 * 23. USB Wake on Connect/Disconnect with Low Speed Devices 586 * https://support.amd.com/TechDocs/46837.pdf 587 * Appendix A2 588 * https://support.amd.com/TechDocs/42413.pdf 589 */ 590 static void pci_fixup_amd_ehci_pme(struct pci_dev *dev) 591 { 592 dev_info(&dev->dev, "PME# does not work under D3, disabling it\n"); 593 dev->pme_support &= ~((PCI_PM_CAP_PME_D3hot | PCI_PM_CAP_PME_D3cold) 594 >> PCI_PM_CAP_PME_SHIFT); 595 } 596 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme); 597 598 /* 599 * Device [1022:7914] 600 * When in D0, PME# doesn't get asserted when plugging USB 2.0 device. 601 */ 602 static void pci_fixup_amd_fch_xhci_pme(struct pci_dev *dev) 603 { 604 dev_info(&dev->dev, "PME# does not work under D0, disabling it\n"); 605 dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT); 606 } 607 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7914, pci_fixup_amd_fch_xhci_pme); 608 609 /* 610 * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff] 611 * 612 * Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to 613 * the 00:1c.0 Root Port, causes a conflict with [io 0x1804], which is used 614 * for soft poweroff and suspend-to-RAM. 615 * 616 * As far as we know, this is related to the address space, not to the Root 617 * Port itself. Attaching the quirk to the Root Port is a convenience, but 618 * it could probably also be a standalone DMI quirk. 619 * 620 * https://bugzilla.kernel.org/show_bug.cgi?id=103211 621 */ 622 static void quirk_apple_mbp_poweroff(struct pci_dev *pdev) 623 { 624 struct device *dev = &pdev->dev; 625 struct resource *res; 626 627 if ((!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") && 628 !dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5")) || 629 pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x1c, 0)) 630 return; 631 632 res = request_mem_region(0x7fa00000, 0x200000, 633 "MacBook Pro poweroff workaround"); 634 if (res) 635 dev_info(dev, "claimed %s %pR\n", res->name, res); 636 else 637 dev_info(dev, "can't work around MacBook Pro poweroff issue\n"); 638 } 639 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff); 640 641 /* 642 * VMD-enabled root ports will change the source ID for all messages 643 * to the VMD device. Rather than doing device matching with the source 644 * ID, the AER driver should traverse the child device tree, reading 645 * AER registers to find the faulting device. 646 */ 647 static void quirk_no_aersid(struct pci_dev *pdev) 648 { 649 /* VMD Domain */ 650 if (is_vmd(pdev->bus) && pci_is_root_bus(pdev->bus)) 651 pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID; 652 } 653 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, 654 PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid); 655 656 static void quirk_intel_th_dnv(struct pci_dev *dev) 657 { 658 struct resource *r = &dev->resource[4]; 659 660 /* 661 * Denverton reports 2k of RTIT_BAR (intel_th resource 4), which 662 * appears to be 4 MB in reality. 663 */ 664 if (r->end == r->start + 0x7ff) { 665 r->start = 0; 666 r->end = 0x3fffff; 667 r->flags |= IORESOURCE_UNSET; 668 } 669 } 670 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv); 671 672 #ifdef CONFIG_PHYS_ADDR_T_64BIT 673 674 #define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8) 675 #define AMD_141b_MMIO_BASE_RE_MASK BIT(0) 676 #define AMD_141b_MMIO_BASE_WE_MASK BIT(1) 677 #define AMD_141b_MMIO_BASE_MMIOBASE_MASK GENMASK(31,8) 678 679 #define AMD_141b_MMIO_LIMIT(x) (0x84 + (x) * 0x8) 680 #define AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK GENMASK(31,8) 681 682 #define AMD_141b_MMIO_HIGH(x) (0x180 + (x) * 0x4) 683 #define AMD_141b_MMIO_HIGH_MMIOBASE_MASK GENMASK(7,0) 684 #define AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT 16 685 #define AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK GENMASK(23,16) 686 687 /* 688 * The PCI Firmware Spec, rev 3.2, notes that ACPI should optionally allow 689 * configuring host bridge windows using the _PRS and _SRS methods. 690 * 691 * But this is rarely implemented, so we manually enable a large 64bit BAR for 692 * PCIe device on AMD Family 15h (Models 00h-1fh, 30h-3fh, 60h-7fh) Processors 693 * here. 694 */ 695 static void pci_amd_enable_64bit_bar(struct pci_dev *dev) 696 { 697 static const char *name = "PCI Bus 0000:00"; 698 struct resource *res, *conflict; 699 u32 base, limit, high; 700 struct pci_dev *other; 701 unsigned i; 702 703 if (!(pci_probe & PCI_BIG_ROOT_WINDOW)) 704 return; 705 706 /* Check that we are the only device of that type */ 707 other = pci_get_device(dev->vendor, dev->device, NULL); 708 if (other != dev || 709 (other = pci_get_device(dev->vendor, dev->device, other))) { 710 /* This is a multi-socket system, don't touch it for now */ 711 pci_dev_put(other); 712 return; 713 } 714 715 for (i = 0; i < 8; i++) { 716 pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base); 717 pci_read_config_dword(dev, AMD_141b_MMIO_HIGH(i), &high); 718 719 /* Is this slot free? */ 720 if (!(base & (AMD_141b_MMIO_BASE_RE_MASK | 721 AMD_141b_MMIO_BASE_WE_MASK))) 722 break; 723 724 base >>= 8; 725 base |= high << 24; 726 727 /* Abort if a slot already configures a 64bit BAR. */ 728 if (base > 0x10000) 729 return; 730 } 731 if (i == 8) 732 return; 733 734 res = kzalloc(sizeof(*res), GFP_KERNEL); 735 if (!res) 736 return; 737 738 /* 739 * Allocate a 256GB window directly below the 0xfd00000000 hardware 740 * limit (see AMD Family 15h Models 30h-3Fh BKDG, sec 2.4.6). 741 */ 742 res->name = name; 743 res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM | 744 IORESOURCE_MEM_64 | IORESOURCE_WINDOW; 745 res->start = 0xbd00000000ull; 746 res->end = 0xfd00000000ull - 1; 747 748 conflict = request_resource_conflict(&iomem_resource, res); 749 if (conflict) { 750 kfree(res); 751 if (conflict->name != name) 752 return; 753 754 /* We are resuming from suspend; just reenable the window */ 755 res = conflict; 756 } else { 757 dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n", 758 res); 759 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 760 pci_bus_add_resource(dev->bus, res, 0); 761 } 762 763 base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) | 764 AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK; 765 limit = ((res->end + 1) >> 8) & AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK; 766 high = ((res->start >> 40) & AMD_141b_MMIO_HIGH_MMIOBASE_MASK) | 767 ((((res->end + 1) >> 40) << AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT) 768 & AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK); 769 770 pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high); 771 pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit); 772 pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base); 773 } 774 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar); 775 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar); 776 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar); 777 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar); 778 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar); 779 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar); 780 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar); 781 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar); 782 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar); 783 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar); 784 785 #define RS690_LOWER_TOP_OF_DRAM2 0x30 786 #define RS690_LOWER_TOP_OF_DRAM2_VALID 0x1 787 #define RS690_UPPER_TOP_OF_DRAM2 0x31 788 #define RS690_HTIU_NB_INDEX 0xA8 789 #define RS690_HTIU_NB_INDEX_WR_ENABLE 0x100 790 #define RS690_HTIU_NB_DATA 0xAC 791 792 /* 793 * Some BIOS implementations support RAM above 4GB, but do not configure the 794 * PCI host to respond to bus master accesses for these addresses. These 795 * implementations set the TOP_OF_DRAM_SLOT1 register correctly, so PCI DMA 796 * works as expected for addresses below 4GB. 797 * 798 * Reference: "AMD RS690 ASIC Family Register Reference Guide" (pg. 2-57) 799 * https://www.amd.com/system/files/TechDocs/43372_rs690_rrg_3.00o.pdf 800 */ 801 static void rs690_fix_64bit_dma(struct pci_dev *pdev) 802 { 803 u32 val = 0; 804 phys_addr_t top_of_dram = __pa(high_memory - 1) + 1; 805 806 if (top_of_dram <= (1ULL << 32)) 807 return; 808 809 pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX, 810 RS690_LOWER_TOP_OF_DRAM2); 811 pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val); 812 813 if (val) 814 return; 815 816 pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram); 817 818 pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX, 819 RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE); 820 pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32); 821 822 pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX, 823 RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE); 824 pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, 825 top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID); 826 } 827 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma); 828 829 #endif 830 831 #ifdef CONFIG_AMD_NB 832 833 #define AMD_15B8_RCC_DEV2_EPF0_STRAP2 0x10136008 834 #define AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK 0x00000080L 835 836 static void quirk_clear_strap_no_soft_reset_dev2_f0(struct pci_dev *dev) 837 { 838 u32 data; 839 840 if (!amd_smn_read(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, &data)) { 841 data &= ~AMD_15B8_RCC_DEV2_EPF0_STRAP2_NO_SOFT_RESET_DEV2_F0_MASK; 842 if (amd_smn_write(0, AMD_15B8_RCC_DEV2_EPF0_STRAP2, data)) 843 pci_err(dev, "Failed to write data 0x%x\n", data); 844 } else { 845 pci_err(dev, "Failed to read data\n"); 846 } 847 } 848 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b8, quirk_clear_strap_no_soft_reset_dev2_f0); 849 #endif 850 851 /* 852 * When returning from D3cold to D0, firmware on some Google Coral and Reef 853 * family Chromebooks with Intel Apollo Lake SoC clobbers the headers of 854 * both the L1 PM Substates capability and the previous capability for the 855 * "Celeron N3350/Pentium N4200/Atom E3900 Series PCI Express Port B #1". 856 * 857 * Save those values at enumeration-time and restore them at resume. 858 */ 859 860 static u16 prev_cap, l1ss_cap; 861 static u32 prev_header, l1ss_header; 862 863 static void chromeos_save_apl_pci_l1ss_capability(struct pci_dev *dev) 864 { 865 int pos = PCI_CFG_SPACE_SIZE, prev = 0; 866 u32 header, pheader = 0; 867 868 while (pos) { 869 pci_read_config_dword(dev, pos, &header); 870 if (PCI_EXT_CAP_ID(header) == PCI_EXT_CAP_ID_L1SS) { 871 prev_cap = prev; 872 prev_header = pheader; 873 l1ss_cap = pos; 874 l1ss_header = header; 875 return; 876 } 877 878 prev = pos; 879 pheader = header; 880 pos = PCI_EXT_CAP_NEXT(header); 881 } 882 } 883 884 static void chromeos_fixup_apl_pci_l1ss_capability(struct pci_dev *dev) 885 { 886 u32 header; 887 888 if (!prev_cap || !prev_header || !l1ss_cap || !l1ss_header) 889 return; 890 891 /* Fixup the header of L1SS Capability if missing */ 892 pci_read_config_dword(dev, l1ss_cap, &header); 893 if (header != l1ss_header) { 894 pci_write_config_dword(dev, l1ss_cap, l1ss_header); 895 pci_info(dev, "restore L1SS Capability header (was %#010x now %#010x)\n", 896 header, l1ss_header); 897 } 898 899 /* Fixup the link to L1SS Capability if missing */ 900 pci_read_config_dword(dev, prev_cap, &header); 901 if (header != prev_header) { 902 pci_write_config_dword(dev, prev_cap, prev_header); 903 pci_info(dev, "restore previous Capability header (was %#010x now %#010x)\n", 904 header, prev_header); 905 } 906 } 907 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_save_apl_pci_l1ss_capability); 908 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x5ad6, chromeos_fixup_apl_pci_l1ss_capability); 909 910 /* 911 * Disable D3cold on Asus B1400 PCI-NVMe bridge 912 * 913 * On this platform with VMD off, the NVMe device cannot successfully power 914 * back on from D3cold. This appears to be an untested transition by the 915 * vendor: Windows leaves the NVMe and parent bridge in D0 during suspend. 916 * 917 * We disable D3cold on the parent bridge for simplicity, and the fact that 918 * both parent bridge and NVMe device share the same power resource. 919 * 920 * This is only needed on BIOS versions before 308; the newer versions flip 921 * StorageD3Enable from 1 to 0. 922 */ 923 static const struct dmi_system_id asus_nvme_broken_d3cold_table[] = { 924 { 925 .matches = { 926 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 927 DMI_MATCH(DMI_BIOS_VERSION, "B1400CEAE.304"), 928 }, 929 }, 930 { 931 .matches = { 932 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 933 DMI_MATCH(DMI_BIOS_VERSION, "B1400CEAE.305"), 934 }, 935 }, 936 { 937 .matches = { 938 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 939 DMI_MATCH(DMI_BIOS_VERSION, "B1400CEAE.306"), 940 }, 941 }, 942 { 943 .matches = { 944 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 945 DMI_MATCH(DMI_BIOS_VERSION, "B1400CEAE.307"), 946 }, 947 }, 948 {} 949 }; 950 951 static void asus_disable_nvme_d3cold(struct pci_dev *pdev) 952 { 953 if (dmi_check_system(asus_nvme_broken_d3cold_table) > 0) 954 pci_d3cold_disable(pdev); 955 } 956 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x9a09, asus_disable_nvme_d3cold); 957 958 #ifdef CONFIG_SUSPEND 959 /* 960 * Root Ports on some AMD SoCs advertise PME_Support for D3hot and D3cold, but 961 * if the SoC is put into a hardware sleep state by the amd-pmc driver, the 962 * Root Ports don't generate wakeup interrupts for USB devices. 963 * 964 * When suspending, remove D3hot and D3cold from the PME_Support advertised 965 * by the Root Port so we don't use those states if we're expecting wakeup 966 * interrupts. Restore the advertised PME_Support when resuming. 967 */ 968 static void amd_rp_pme_suspend(struct pci_dev *dev) 969 { 970 struct pci_dev *rp; 971 972 /* 973 * PM_SUSPEND_ON means we're doing runtime suspend, which means 974 * amd-pmc will not be involved so PMEs during D3 work as advertised. 975 * 976 * The PMEs *do* work if amd-pmc doesn't put the SoC in the hardware 977 * sleep state, but we assume amd-pmc is always present. 978 */ 979 if (pm_suspend_target_state == PM_SUSPEND_ON) 980 return; 981 982 rp = pcie_find_root_port(dev); 983 if (!rp || !rp->pm_cap) 984 return; 985 986 rp->pme_support &= ~((PCI_PM_CAP_PME_D3hot|PCI_PM_CAP_PME_D3cold) >> 987 PCI_PM_CAP_PME_SHIFT); 988 dev_info_once(&rp->dev, "quirk: disabling D3cold for suspend\n"); 989 } 990 991 static void amd_rp_pme_resume(struct pci_dev *dev) 992 { 993 struct pci_dev *rp; 994 u16 pmc; 995 996 rp = pcie_find_root_port(dev); 997 if (!rp || !rp->pm_cap) 998 return; 999 1000 pci_read_config_word(rp, rp->pm_cap + PCI_PM_PMC, &pmc); 1001 rp->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc); 1002 } 1003 /* Rembrandt (yellow_carp) */ 1004 DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_suspend); 1005 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162e, amd_rp_pme_resume); 1006 DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_suspend); 1007 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x162f, amd_rp_pme_resume); 1008 /* Phoenix (pink_sardine) */ 1009 DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_suspend); 1010 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1668, amd_rp_pme_resume); 1011 DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_suspend); 1012 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1669, amd_rp_pme_resume); 1013 #endif /* CONFIG_SUSPEND */ 1014