1 /*- 2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org> 4 * Copyright (c) 2000, BSDi 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_bus.h" 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/module.h> 38 #include <sys/limits.h> 39 #include <sys/linker.h> 40 #include <sys/fcntl.h> 41 #include <sys/conf.h> 42 #include <sys/kernel.h> 43 #include <sys/queue.h> 44 #include <sys/sysctl.h> 45 #include <sys/endian.h> 46 47 #include <vm/vm.h> 48 #include <vm/pmap.h> 49 #include <vm/vm_extern.h> 50 51 #include <sys/bus.h> 52 #include <machine/bus.h> 53 #include <sys/rman.h> 54 #include <machine/resource.h> 55 #include <machine/stdarg.h> 56 57 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) 58 #include <machine/intr_machdep.h> 59 #endif 60 61 #include <sys/pciio.h> 62 #include <dev/pci/pcireg.h> 63 #include <dev/pci/pcivar.h> 64 #include <dev/pci/pci_private.h> 65 66 #include <dev/usb/controller/xhcireg.h> 67 #include <dev/usb/controller/ehcireg.h> 68 #include <dev/usb/controller/ohcireg.h> 69 #include <dev/usb/controller/uhcireg.h> 70 71 #include "pcib_if.h" 72 #include "pci_if.h" 73 74 #define PCIR_IS_BIOS(cfg, reg) \ 75 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \ 76 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1)) 77 78 static int pci_has_quirk(uint32_t devid, int quirk); 79 static pci_addr_t pci_mapbase(uint64_t mapreg); 80 static const char *pci_maptype(uint64_t mapreg); 81 static int pci_mapsize(uint64_t testval); 82 static int pci_maprange(uint64_t mapreg); 83 static pci_addr_t pci_rombase(uint64_t mapreg); 84 static int pci_romsize(uint64_t testval); 85 static void pci_fixancient(pcicfgregs *cfg); 86 static int pci_printf(pcicfgregs *cfg, const char *fmt, ...); 87 88 static int pci_porten(device_t dev); 89 static int pci_memen(device_t dev); 90 static void pci_assign_interrupt(device_t bus, device_t dev, 91 int force_route); 92 static int pci_add_map(device_t bus, device_t dev, int reg, 93 struct resource_list *rl, int force, int prefetch); 94 static int pci_probe(device_t dev); 95 static int pci_attach(device_t dev); 96 #ifdef PCI_RES_BUS 97 static int pci_detach(device_t dev); 98 #endif 99 static void pci_load_vendor_data(void); 100 static int pci_describe_parse_line(char **ptr, int *vendor, 101 int *device, char **desc); 102 static char *pci_describe_device(device_t dev); 103 static int pci_modevent(module_t mod, int what, void *arg); 104 static void pci_hdrtypedata(device_t pcib, int b, int s, int f, 105 pcicfgregs *cfg); 106 static void pci_read_cap(device_t pcib, pcicfgregs *cfg); 107 static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, 108 int reg, uint32_t *data); 109 #if 0 110 static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, 111 int reg, uint32_t data); 112 #endif 113 static void pci_read_vpd(device_t pcib, pcicfgregs *cfg); 114 static void pci_mask_msix(device_t dev, u_int index); 115 static void pci_unmask_msix(device_t dev, u_int index); 116 static int pci_msi_blacklisted(void); 117 static int pci_msix_blacklisted(void); 118 static void pci_resume_msi(device_t dev); 119 static void pci_resume_msix(device_t dev); 120 static int pci_remap_intr_method(device_t bus, device_t dev, 121 u_int irq); 122 123 static uint16_t pci_get_rid_method(device_t dev, device_t child); 124 125 static device_method_t pci_methods[] = { 126 /* Device interface */ 127 DEVMETHOD(device_probe, pci_probe), 128 DEVMETHOD(device_attach, pci_attach), 129 #ifdef PCI_RES_BUS 130 DEVMETHOD(device_detach, pci_detach), 131 #else 132 DEVMETHOD(device_detach, bus_generic_detach), 133 #endif 134 DEVMETHOD(device_shutdown, bus_generic_shutdown), 135 DEVMETHOD(device_suspend, bus_generic_suspend), 136 DEVMETHOD(device_resume, pci_resume), 137 138 /* Bus interface */ 139 DEVMETHOD(bus_print_child, pci_print_child), 140 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch), 141 DEVMETHOD(bus_read_ivar, pci_read_ivar), 142 DEVMETHOD(bus_write_ivar, pci_write_ivar), 143 DEVMETHOD(bus_driver_added, pci_driver_added), 144 DEVMETHOD(bus_setup_intr, pci_setup_intr), 145 DEVMETHOD(bus_teardown_intr, pci_teardown_intr), 146 147 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag), 148 DEVMETHOD(bus_get_resource_list,pci_get_resource_list), 149 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), 150 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), 151 DEVMETHOD(bus_delete_resource, pci_delete_resource), 152 DEVMETHOD(bus_alloc_resource, pci_alloc_resource), 153 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), 154 DEVMETHOD(bus_release_resource, pci_release_resource), 155 DEVMETHOD(bus_activate_resource, pci_activate_resource), 156 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource), 157 DEVMETHOD(bus_child_detached, pci_child_detached), 158 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method), 159 DEVMETHOD(bus_child_location_str, pci_child_location_str_method), 160 DEVMETHOD(bus_remap_intr, pci_remap_intr_method), 161 DEVMETHOD(bus_suspend_child, pci_suspend_child), 162 DEVMETHOD(bus_resume_child, pci_resume_child), 163 164 /* PCI interface */ 165 DEVMETHOD(pci_read_config, pci_read_config_method), 166 DEVMETHOD(pci_write_config, pci_write_config_method), 167 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method), 168 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method), 169 DEVMETHOD(pci_enable_io, pci_enable_io_method), 170 DEVMETHOD(pci_disable_io, pci_disable_io_method), 171 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method), 172 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method), 173 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method), 174 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method), 175 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method), 176 DEVMETHOD(pci_find_cap, pci_find_cap_method), 177 DEVMETHOD(pci_find_extcap, pci_find_extcap_method), 178 DEVMETHOD(pci_find_htcap, pci_find_htcap_method), 179 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method), 180 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method), 181 DEVMETHOD(pci_enable_msi, pci_enable_msi_method), 182 DEVMETHOD(pci_enable_msix, pci_enable_msix_method), 183 DEVMETHOD(pci_disable_msi, pci_disable_msi_method), 184 DEVMETHOD(pci_remap_msix, pci_remap_msix_method), 185 DEVMETHOD(pci_release_msi, pci_release_msi_method), 186 DEVMETHOD(pci_msi_count, pci_msi_count_method), 187 DEVMETHOD(pci_msix_count, pci_msix_count_method), 188 DEVMETHOD(pci_get_rid, pci_get_rid_method), 189 DEVMETHOD(pci_child_added, pci_child_added_method), 190 191 DEVMETHOD_END 192 }; 193 194 DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc)); 195 196 static devclass_t pci_devclass; 197 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL); 198 MODULE_VERSION(pci, 1); 199 200 static char *pci_vendordata; 201 static size_t pci_vendordata_size; 202 203 struct pci_quirk { 204 uint32_t devid; /* Vendor/device of the card */ 205 int type; 206 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */ 207 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */ 208 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */ 209 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */ 210 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */ 211 #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */ 212 int arg1; 213 int arg2; 214 }; 215 216 static const struct pci_quirk pci_quirks[] = { 217 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */ 218 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 }, 219 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 }, 220 /* As does the Serverworks OSB4 (the SMBus mapping register) */ 221 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 }, 222 223 /* 224 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge 225 * or the CMIC-SL (AKA ServerWorks GC_LE). 226 */ 227 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 228 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 229 230 /* 231 * MSI doesn't work on earlier Intel chipsets including 232 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855. 233 */ 234 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 235 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 236 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 237 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 238 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 239 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 240 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 241 242 /* 243 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX 244 * bridge. 245 */ 246 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 }, 247 248 /* 249 * MSI-X allocation doesn't work properly for devices passed through 250 * by VMware up to at least ESXi 5.1. 251 */ 252 { 0x079015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCI/PCI-X */ 253 { 0x07a015ad, PCI_QUIRK_DISABLE_MSIX, 0, 0 }, /* PCIe */ 254 255 /* 256 * Some virtualization environments emulate an older chipset 257 * but support MSI just fine. QEMU uses the Intel 82440. 258 */ 259 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 }, 260 261 /* 262 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus 263 * controller depending on SoftPciRst register (PM_IO 0x55 [7]). 264 * It prevents us from attaching hpet(4) when the bit is unset. 265 * Note this quirk only affects SB600 revision A13 and earlier. 266 * For SB600 A21 and later, firmware must set the bit to hide it. 267 * For SB700 and later, it is unused and hardcoded to zero. 268 */ 269 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 }, 270 271 /* 272 * Atheros AR8161/AR8162/E2200 Ethernet controllers have a bug that 273 * MSI interrupt does not assert if PCIM_CMD_INTxDIS bit of the 274 * command register is set. 275 */ 276 { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, 277 { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, 278 { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, 279 280 /* 281 * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't 282 * issue MSI interrupts with PCIM_CMD_INTxDIS set either. 283 */ 284 { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */ 285 { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */ 286 { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */ 287 { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */ 288 { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */ 289 { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */ 290 291 { 0 } 292 }; 293 294 /* map register information */ 295 #define PCI_MAPMEM 0x01 /* memory map */ 296 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */ 297 #define PCI_MAPPORT 0x04 /* port map */ 298 299 struct devlist pci_devq; 300 uint32_t pci_generation; 301 uint32_t pci_numdevs = 0; 302 static int pcie_chipset, pcix_chipset; 303 304 /* sysctl vars */ 305 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters"); 306 307 static int pci_enable_io_modes = 1; 308 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN, 309 &pci_enable_io_modes, 1, 310 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\ 311 enable these bits correctly. We'd like to do this all the time, but there\n\ 312 are some peripherals that this causes problems with."); 313 314 static int pci_do_realloc_bars = 0; 315 SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RWTUN, 316 &pci_do_realloc_bars, 0, 317 "Attempt to allocate a new range for any BARs whose original " 318 "firmware-assigned ranges fail to allocate during the initial device scan."); 319 320 static int pci_do_power_nodriver = 0; 321 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RWTUN, 322 &pci_do_power_nodriver, 0, 323 "Place a function into D3 state when no driver attaches to it. 0 means\n\ 324 disable. 1 means conservatively place devices into D3 state. 2 means\n\ 325 agressively place devices into D3 state. 3 means put absolutely everything\n\ 326 in D3 state."); 327 328 int pci_do_power_resume = 1; 329 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RWTUN, 330 &pci_do_power_resume, 1, 331 "Transition from D3 -> D0 on resume."); 332 333 int pci_do_power_suspend = 1; 334 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RWTUN, 335 &pci_do_power_suspend, 1, 336 "Transition from D0 -> D3 on suspend."); 337 338 static int pci_do_msi = 1; 339 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RWTUN, &pci_do_msi, 1, 340 "Enable support for MSI interrupts"); 341 342 static int pci_do_msix = 1; 343 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RWTUN, &pci_do_msix, 1, 344 "Enable support for MSI-X interrupts"); 345 346 static int pci_honor_msi_blacklist = 1; 347 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RDTUN, 348 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X"); 349 350 #if defined(__i386__) || defined(__amd64__) 351 static int pci_usb_takeover = 1; 352 #else 353 static int pci_usb_takeover = 0; 354 #endif 355 SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN, 356 &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\ 357 Disable this if you depend on BIOS emulation of USB devices, that is\n\ 358 you use USB devices (like keyboard or mouse) but do not load USB drivers"); 359 360 static int pci_clear_bars; 361 SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0, 362 "Ignore firmware-assigned resources for BARs."); 363 364 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 365 static int pci_clear_buses; 366 SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0, 367 "Ignore firmware-assigned bus numbers."); 368 #endif 369 370 static int pci_enable_ari = 1; 371 SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari, 372 0, "Enable support for PCIe Alternative RID Interpretation"); 373 374 static int 375 pci_has_quirk(uint32_t devid, int quirk) 376 { 377 const struct pci_quirk *q; 378 379 for (q = &pci_quirks[0]; q->devid; q++) { 380 if (q->devid == devid && q->type == quirk) 381 return (1); 382 } 383 return (0); 384 } 385 386 /* Find a device_t by bus/slot/function in domain 0 */ 387 388 device_t 389 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func) 390 { 391 392 return (pci_find_dbsf(0, bus, slot, func)); 393 } 394 395 /* Find a device_t by domain/bus/slot/function */ 396 397 device_t 398 pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func) 399 { 400 struct pci_devinfo *dinfo; 401 402 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { 403 if ((dinfo->cfg.domain == domain) && 404 (dinfo->cfg.bus == bus) && 405 (dinfo->cfg.slot == slot) && 406 (dinfo->cfg.func == func)) { 407 return (dinfo->cfg.dev); 408 } 409 } 410 411 return (NULL); 412 } 413 414 /* Find a device_t by vendor/device ID */ 415 416 device_t 417 pci_find_device(uint16_t vendor, uint16_t device) 418 { 419 struct pci_devinfo *dinfo; 420 421 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { 422 if ((dinfo->cfg.vendor == vendor) && 423 (dinfo->cfg.device == device)) { 424 return (dinfo->cfg.dev); 425 } 426 } 427 428 return (NULL); 429 } 430 431 device_t 432 pci_find_class(uint8_t class, uint8_t subclass) 433 { 434 struct pci_devinfo *dinfo; 435 436 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { 437 if (dinfo->cfg.baseclass == class && 438 dinfo->cfg.subclass == subclass) { 439 return (dinfo->cfg.dev); 440 } 441 } 442 443 return (NULL); 444 } 445 446 static int 447 pci_printf(pcicfgregs *cfg, const char *fmt, ...) 448 { 449 va_list ap; 450 int retval; 451 452 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot, 453 cfg->func); 454 va_start(ap, fmt); 455 retval += vprintf(fmt, ap); 456 va_end(ap); 457 return (retval); 458 } 459 460 /* return base address of memory or port map */ 461 462 static pci_addr_t 463 pci_mapbase(uint64_t mapreg) 464 { 465 466 if (PCI_BAR_MEM(mapreg)) 467 return (mapreg & PCIM_BAR_MEM_BASE); 468 else 469 return (mapreg & PCIM_BAR_IO_BASE); 470 } 471 472 /* return map type of memory or port map */ 473 474 static const char * 475 pci_maptype(uint64_t mapreg) 476 { 477 478 if (PCI_BAR_IO(mapreg)) 479 return ("I/O Port"); 480 if (mapreg & PCIM_BAR_MEM_PREFETCH) 481 return ("Prefetchable Memory"); 482 return ("Memory"); 483 } 484 485 /* return log2 of map size decoded for memory or port map */ 486 487 static int 488 pci_mapsize(uint64_t testval) 489 { 490 int ln2size; 491 492 testval = pci_mapbase(testval); 493 ln2size = 0; 494 if (testval != 0) { 495 while ((testval & 1) == 0) 496 { 497 ln2size++; 498 testval >>= 1; 499 } 500 } 501 return (ln2size); 502 } 503 504 /* return base address of device ROM */ 505 506 static pci_addr_t 507 pci_rombase(uint64_t mapreg) 508 { 509 510 return (mapreg & PCIM_BIOS_ADDR_MASK); 511 } 512 513 /* return log2 of map size decided for device ROM */ 514 515 static int 516 pci_romsize(uint64_t testval) 517 { 518 int ln2size; 519 520 testval = pci_rombase(testval); 521 ln2size = 0; 522 if (testval != 0) { 523 while ((testval & 1) == 0) 524 { 525 ln2size++; 526 testval >>= 1; 527 } 528 } 529 return (ln2size); 530 } 531 532 /* return log2 of address range supported by map register */ 533 534 static int 535 pci_maprange(uint64_t mapreg) 536 { 537 int ln2range = 0; 538 539 if (PCI_BAR_IO(mapreg)) 540 ln2range = 32; 541 else 542 switch (mapreg & PCIM_BAR_MEM_TYPE) { 543 case PCIM_BAR_MEM_32: 544 ln2range = 32; 545 break; 546 case PCIM_BAR_MEM_1MB: 547 ln2range = 20; 548 break; 549 case PCIM_BAR_MEM_64: 550 ln2range = 64; 551 break; 552 } 553 return (ln2range); 554 } 555 556 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */ 557 558 static void 559 pci_fixancient(pcicfgregs *cfg) 560 { 561 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL) 562 return; 563 564 /* PCI to PCI bridges use header type 1 */ 565 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI) 566 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE; 567 } 568 569 /* extract header type specific config data */ 570 571 static void 572 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg) 573 { 574 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) 575 switch (cfg->hdrtype & PCIM_HDRTYPE) { 576 case PCIM_HDRTYPE_NORMAL: 577 cfg->subvendor = REG(PCIR_SUBVEND_0, 2); 578 cfg->subdevice = REG(PCIR_SUBDEV_0, 2); 579 cfg->nummaps = PCI_MAXMAPS_0; 580 break; 581 case PCIM_HDRTYPE_BRIDGE: 582 cfg->nummaps = PCI_MAXMAPS_1; 583 break; 584 case PCIM_HDRTYPE_CARDBUS: 585 cfg->subvendor = REG(PCIR_SUBVEND_2, 2); 586 cfg->subdevice = REG(PCIR_SUBDEV_2, 2); 587 cfg->nummaps = PCI_MAXMAPS_2; 588 break; 589 } 590 #undef REG 591 } 592 593 /* read configuration header into pcicfgregs structure */ 594 struct pci_devinfo * 595 pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size) 596 { 597 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) 598 pcicfgregs *cfg = NULL; 599 struct pci_devinfo *devlist_entry; 600 struct devlist *devlist_head; 601 602 devlist_head = &pci_devq; 603 604 devlist_entry = NULL; 605 606 if (REG(PCIR_DEVVENDOR, 4) != 0xfffffffful) { 607 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO); 608 if (devlist_entry == NULL) 609 return (NULL); 610 611 cfg = &devlist_entry->cfg; 612 613 cfg->domain = d; 614 cfg->bus = b; 615 cfg->slot = s; 616 cfg->func = f; 617 cfg->vendor = REG(PCIR_VENDOR, 2); 618 cfg->device = REG(PCIR_DEVICE, 2); 619 cfg->cmdreg = REG(PCIR_COMMAND, 2); 620 cfg->statreg = REG(PCIR_STATUS, 2); 621 cfg->baseclass = REG(PCIR_CLASS, 1); 622 cfg->subclass = REG(PCIR_SUBCLASS, 1); 623 cfg->progif = REG(PCIR_PROGIF, 1); 624 cfg->revid = REG(PCIR_REVID, 1); 625 cfg->hdrtype = REG(PCIR_HDRTYPE, 1); 626 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1); 627 cfg->lattimer = REG(PCIR_LATTIMER, 1); 628 cfg->intpin = REG(PCIR_INTPIN, 1); 629 cfg->intline = REG(PCIR_INTLINE, 1); 630 631 cfg->mingnt = REG(PCIR_MINGNT, 1); 632 cfg->maxlat = REG(PCIR_MAXLAT, 1); 633 634 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0; 635 cfg->hdrtype &= ~PCIM_MFDEV; 636 STAILQ_INIT(&cfg->maps); 637 638 pci_fixancient(cfg); 639 pci_hdrtypedata(pcib, b, s, f, cfg); 640 641 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) 642 pci_read_cap(pcib, cfg); 643 644 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links); 645 646 devlist_entry->conf.pc_sel.pc_domain = cfg->domain; 647 devlist_entry->conf.pc_sel.pc_bus = cfg->bus; 648 devlist_entry->conf.pc_sel.pc_dev = cfg->slot; 649 devlist_entry->conf.pc_sel.pc_func = cfg->func; 650 devlist_entry->conf.pc_hdr = cfg->hdrtype; 651 652 devlist_entry->conf.pc_subvendor = cfg->subvendor; 653 devlist_entry->conf.pc_subdevice = cfg->subdevice; 654 devlist_entry->conf.pc_vendor = cfg->vendor; 655 devlist_entry->conf.pc_device = cfg->device; 656 657 devlist_entry->conf.pc_class = cfg->baseclass; 658 devlist_entry->conf.pc_subclass = cfg->subclass; 659 devlist_entry->conf.pc_progif = cfg->progif; 660 devlist_entry->conf.pc_revid = cfg->revid; 661 662 pci_numdevs++; 663 pci_generation++; 664 } 665 return (devlist_entry); 666 #undef REG 667 } 668 669 static void 670 pci_read_cap(device_t pcib, pcicfgregs *cfg) 671 { 672 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) 673 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w) 674 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) 675 uint64_t addr; 676 #endif 677 uint32_t val; 678 int ptr, nextptr, ptrptr; 679 680 switch (cfg->hdrtype & PCIM_HDRTYPE) { 681 case PCIM_HDRTYPE_NORMAL: 682 case PCIM_HDRTYPE_BRIDGE: 683 ptrptr = PCIR_CAP_PTR; 684 break; 685 case PCIM_HDRTYPE_CARDBUS: 686 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */ 687 break; 688 default: 689 return; /* no extended capabilities support */ 690 } 691 nextptr = REG(ptrptr, 1); /* sanity check? */ 692 693 /* 694 * Read capability entries. 695 */ 696 while (nextptr != 0) { 697 /* Sanity check */ 698 if (nextptr > 255) { 699 printf("illegal PCI extended capability offset %d\n", 700 nextptr); 701 return; 702 } 703 /* Find the next entry */ 704 ptr = nextptr; 705 nextptr = REG(ptr + PCICAP_NEXTPTR, 1); 706 707 /* Process this entry */ 708 switch (REG(ptr + PCICAP_ID, 1)) { 709 case PCIY_PMG: /* PCI power management */ 710 if (cfg->pp.pp_cap == 0) { 711 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2); 712 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS; 713 cfg->pp.pp_bse = ptr + PCIR_POWER_BSE; 714 if ((nextptr - ptr) > PCIR_POWER_DATA) 715 cfg->pp.pp_data = ptr + PCIR_POWER_DATA; 716 } 717 break; 718 case PCIY_HT: /* HyperTransport */ 719 /* Determine HT-specific capability type. */ 720 val = REG(ptr + PCIR_HT_COMMAND, 2); 721 722 if ((val & 0xe000) == PCIM_HTCAP_SLAVE) 723 cfg->ht.ht_slave = ptr; 724 725 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) 726 switch (val & PCIM_HTCMD_CAP_MASK) { 727 case PCIM_HTCAP_MSI_MAPPING: 728 if (!(val & PCIM_HTCMD_MSI_FIXED)) { 729 /* Sanity check the mapping window. */ 730 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, 731 4); 732 addr <<= 32; 733 addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, 734 4); 735 if (addr != MSI_INTEL_ADDR_BASE) 736 device_printf(pcib, 737 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n", 738 cfg->domain, cfg->bus, 739 cfg->slot, cfg->func, 740 (long long)addr); 741 } else 742 addr = MSI_INTEL_ADDR_BASE; 743 744 cfg->ht.ht_msimap = ptr; 745 cfg->ht.ht_msictrl = val; 746 cfg->ht.ht_msiaddr = addr; 747 break; 748 } 749 #endif 750 break; 751 case PCIY_MSI: /* PCI MSI */ 752 cfg->msi.msi_location = ptr; 753 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2); 754 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl & 755 PCIM_MSICTRL_MMC_MASK)>>1); 756 break; 757 case PCIY_MSIX: /* PCI MSI-X */ 758 cfg->msix.msix_location = ptr; 759 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2); 760 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl & 761 PCIM_MSIXCTRL_TABLE_SIZE) + 1; 762 val = REG(ptr + PCIR_MSIX_TABLE, 4); 763 cfg->msix.msix_table_bar = PCIR_BAR(val & 764 PCIM_MSIX_BIR_MASK); 765 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK; 766 val = REG(ptr + PCIR_MSIX_PBA, 4); 767 cfg->msix.msix_pba_bar = PCIR_BAR(val & 768 PCIM_MSIX_BIR_MASK); 769 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK; 770 break; 771 case PCIY_VPD: /* PCI Vital Product Data */ 772 cfg->vpd.vpd_reg = ptr; 773 break; 774 case PCIY_SUBVENDOR: 775 /* Should always be true. */ 776 if ((cfg->hdrtype & PCIM_HDRTYPE) == 777 PCIM_HDRTYPE_BRIDGE) { 778 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4); 779 cfg->subvendor = val & 0xffff; 780 cfg->subdevice = val >> 16; 781 } 782 break; 783 case PCIY_PCIX: /* PCI-X */ 784 /* 785 * Assume we have a PCI-X chipset if we have 786 * at least one PCI-PCI bridge with a PCI-X 787 * capability. Note that some systems with 788 * PCI-express or HT chipsets might match on 789 * this check as well. 790 */ 791 if ((cfg->hdrtype & PCIM_HDRTYPE) == 792 PCIM_HDRTYPE_BRIDGE) 793 pcix_chipset = 1; 794 cfg->pcix.pcix_location = ptr; 795 break; 796 case PCIY_EXPRESS: /* PCI-express */ 797 /* 798 * Assume we have a PCI-express chipset if we have 799 * at least one PCI-express device. 800 */ 801 pcie_chipset = 1; 802 cfg->pcie.pcie_location = ptr; 803 val = REG(ptr + PCIER_FLAGS, 2); 804 cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE; 805 break; 806 default: 807 break; 808 } 809 } 810 811 #if defined(__powerpc__) 812 /* 813 * Enable the MSI mapping window for all HyperTransport 814 * slaves. PCI-PCI bridges have their windows enabled via 815 * PCIB_MAP_MSI(). 816 */ 817 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 && 818 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) { 819 device_printf(pcib, 820 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n", 821 cfg->domain, cfg->bus, cfg->slot, cfg->func); 822 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; 823 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl, 824 2); 825 } 826 #endif 827 /* REG and WREG use carry through to next functions */ 828 } 829 830 /* 831 * PCI Vital Product Data 832 */ 833 834 #define PCI_VPD_TIMEOUT 1000000 835 836 static int 837 pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data) 838 { 839 int count = PCI_VPD_TIMEOUT; 840 841 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); 842 843 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2); 844 845 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) { 846 if (--count < 0) 847 return (ENXIO); 848 DELAY(1); /* limit looping */ 849 } 850 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4)); 851 852 return (0); 853 } 854 855 #if 0 856 static int 857 pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data) 858 { 859 int count = PCI_VPD_TIMEOUT; 860 861 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); 862 863 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4); 864 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2); 865 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) { 866 if (--count < 0) 867 return (ENXIO); 868 DELAY(1); /* limit looping */ 869 } 870 871 return (0); 872 } 873 #endif 874 875 #undef PCI_VPD_TIMEOUT 876 877 struct vpd_readstate { 878 device_t pcib; 879 pcicfgregs *cfg; 880 uint32_t val; 881 int bytesinval; 882 int off; 883 uint8_t cksum; 884 }; 885 886 static int 887 vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data) 888 { 889 uint32_t reg; 890 uint8_t byte; 891 892 if (vrs->bytesinval == 0) { 893 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®)) 894 return (ENXIO); 895 vrs->val = le32toh(reg); 896 vrs->off += 4; 897 byte = vrs->val & 0xff; 898 vrs->bytesinval = 3; 899 } else { 900 vrs->val = vrs->val >> 8; 901 byte = vrs->val & 0xff; 902 vrs->bytesinval--; 903 } 904 905 vrs->cksum += byte; 906 *data = byte; 907 return (0); 908 } 909 910 static void 911 pci_read_vpd(device_t pcib, pcicfgregs *cfg) 912 { 913 struct vpd_readstate vrs; 914 int state; 915 int name; 916 int remain; 917 int i; 918 int alloc, off; /* alloc/off for RO/W arrays */ 919 int cksumvalid; 920 int dflen; 921 uint8_t byte; 922 uint8_t byte2; 923 924 /* init vpd reader */ 925 vrs.bytesinval = 0; 926 vrs.off = 0; 927 vrs.pcib = pcib; 928 vrs.cfg = cfg; 929 vrs.cksum = 0; 930 931 state = 0; 932 name = remain = i = 0; /* shut up stupid gcc */ 933 alloc = off = 0; /* shut up stupid gcc */ 934 dflen = 0; /* shut up stupid gcc */ 935 cksumvalid = -1; 936 while (state >= 0) { 937 if (vpd_nextbyte(&vrs, &byte)) { 938 state = -2; 939 break; 940 } 941 #if 0 942 printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \ 943 "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val, 944 vrs.off, vrs.bytesinval, byte, state, remain, name, i); 945 #endif 946 switch (state) { 947 case 0: /* item name */ 948 if (byte & 0x80) { 949 if (vpd_nextbyte(&vrs, &byte2)) { 950 state = -2; 951 break; 952 } 953 remain = byte2; 954 if (vpd_nextbyte(&vrs, &byte2)) { 955 state = -2; 956 break; 957 } 958 remain |= byte2 << 8; 959 if (remain > (0x7f*4 - vrs.off)) { 960 state = -1; 961 pci_printf(cfg, 962 "invalid VPD data, remain %#x\n", 963 remain); 964 } 965 name = byte & 0x7f; 966 } else { 967 remain = byte & 0x7; 968 name = (byte >> 3) & 0xf; 969 } 970 switch (name) { 971 case 0x2: /* String */ 972 cfg->vpd.vpd_ident = malloc(remain + 1, 973 M_DEVBUF, M_WAITOK); 974 i = 0; 975 state = 1; 976 break; 977 case 0xf: /* End */ 978 state = -1; 979 break; 980 case 0x10: /* VPD-R */ 981 alloc = 8; 982 off = 0; 983 cfg->vpd.vpd_ros = malloc(alloc * 984 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF, 985 M_WAITOK | M_ZERO); 986 state = 2; 987 break; 988 case 0x11: /* VPD-W */ 989 alloc = 8; 990 off = 0; 991 cfg->vpd.vpd_w = malloc(alloc * 992 sizeof(*cfg->vpd.vpd_w), M_DEVBUF, 993 M_WAITOK | M_ZERO); 994 state = 5; 995 break; 996 default: /* Invalid data, abort */ 997 state = -1; 998 break; 999 } 1000 break; 1001 1002 case 1: /* Identifier String */ 1003 cfg->vpd.vpd_ident[i++] = byte; 1004 remain--; 1005 if (remain == 0) { 1006 cfg->vpd.vpd_ident[i] = '\0'; 1007 state = 0; 1008 } 1009 break; 1010 1011 case 2: /* VPD-R Keyword Header */ 1012 if (off == alloc) { 1013 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros, 1014 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros), 1015 M_DEVBUF, M_WAITOK | M_ZERO); 1016 } 1017 cfg->vpd.vpd_ros[off].keyword[0] = byte; 1018 if (vpd_nextbyte(&vrs, &byte2)) { 1019 state = -2; 1020 break; 1021 } 1022 cfg->vpd.vpd_ros[off].keyword[1] = byte2; 1023 if (vpd_nextbyte(&vrs, &byte2)) { 1024 state = -2; 1025 break; 1026 } 1027 cfg->vpd.vpd_ros[off].len = dflen = byte2; 1028 if (dflen == 0 && 1029 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV", 1030 2) == 0) { 1031 /* 1032 * if this happens, we can't trust the rest 1033 * of the VPD. 1034 */ 1035 pci_printf(cfg, "bad keyword length: %d\n", 1036 dflen); 1037 cksumvalid = 0; 1038 state = -1; 1039 break; 1040 } else if (dflen == 0) { 1041 cfg->vpd.vpd_ros[off].value = malloc(1 * 1042 sizeof(*cfg->vpd.vpd_ros[off].value), 1043 M_DEVBUF, M_WAITOK); 1044 cfg->vpd.vpd_ros[off].value[0] = '\x00'; 1045 } else 1046 cfg->vpd.vpd_ros[off].value = malloc( 1047 (dflen + 1) * 1048 sizeof(*cfg->vpd.vpd_ros[off].value), 1049 M_DEVBUF, M_WAITOK); 1050 remain -= 3; 1051 i = 0; 1052 /* keep in sync w/ state 3's transistions */ 1053 if (dflen == 0 && remain == 0) 1054 state = 0; 1055 else if (dflen == 0) 1056 state = 2; 1057 else 1058 state = 3; 1059 break; 1060 1061 case 3: /* VPD-R Keyword Value */ 1062 cfg->vpd.vpd_ros[off].value[i++] = byte; 1063 if (strncmp(cfg->vpd.vpd_ros[off].keyword, 1064 "RV", 2) == 0 && cksumvalid == -1) { 1065 if (vrs.cksum == 0) 1066 cksumvalid = 1; 1067 else { 1068 if (bootverbose) 1069 pci_printf(cfg, 1070 "bad VPD cksum, remain %hhu\n", 1071 vrs.cksum); 1072 cksumvalid = 0; 1073 state = -1; 1074 break; 1075 } 1076 } 1077 dflen--; 1078 remain--; 1079 /* keep in sync w/ state 2's transistions */ 1080 if (dflen == 0) 1081 cfg->vpd.vpd_ros[off++].value[i++] = '\0'; 1082 if (dflen == 0 && remain == 0) { 1083 cfg->vpd.vpd_rocnt = off; 1084 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros, 1085 off * sizeof(*cfg->vpd.vpd_ros), 1086 M_DEVBUF, M_WAITOK | M_ZERO); 1087 state = 0; 1088 } else if (dflen == 0) 1089 state = 2; 1090 break; 1091 1092 case 4: 1093 remain--; 1094 if (remain == 0) 1095 state = 0; 1096 break; 1097 1098 case 5: /* VPD-W Keyword Header */ 1099 if (off == alloc) { 1100 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w, 1101 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w), 1102 M_DEVBUF, M_WAITOK | M_ZERO); 1103 } 1104 cfg->vpd.vpd_w[off].keyword[0] = byte; 1105 if (vpd_nextbyte(&vrs, &byte2)) { 1106 state = -2; 1107 break; 1108 } 1109 cfg->vpd.vpd_w[off].keyword[1] = byte2; 1110 if (vpd_nextbyte(&vrs, &byte2)) { 1111 state = -2; 1112 break; 1113 } 1114 cfg->vpd.vpd_w[off].len = dflen = byte2; 1115 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval; 1116 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) * 1117 sizeof(*cfg->vpd.vpd_w[off].value), 1118 M_DEVBUF, M_WAITOK); 1119 remain -= 3; 1120 i = 0; 1121 /* keep in sync w/ state 6's transistions */ 1122 if (dflen == 0 && remain == 0) 1123 state = 0; 1124 else if (dflen == 0) 1125 state = 5; 1126 else 1127 state = 6; 1128 break; 1129 1130 case 6: /* VPD-W Keyword Value */ 1131 cfg->vpd.vpd_w[off].value[i++] = byte; 1132 dflen--; 1133 remain--; 1134 /* keep in sync w/ state 5's transistions */ 1135 if (dflen == 0) 1136 cfg->vpd.vpd_w[off++].value[i++] = '\0'; 1137 if (dflen == 0 && remain == 0) { 1138 cfg->vpd.vpd_wcnt = off; 1139 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w, 1140 off * sizeof(*cfg->vpd.vpd_w), 1141 M_DEVBUF, M_WAITOK | M_ZERO); 1142 state = 0; 1143 } else if (dflen == 0) 1144 state = 5; 1145 break; 1146 1147 default: 1148 pci_printf(cfg, "invalid state: %d\n", state); 1149 state = -1; 1150 break; 1151 } 1152 } 1153 1154 if (cksumvalid == 0 || state < -1) { 1155 /* read-only data bad, clean up */ 1156 if (cfg->vpd.vpd_ros != NULL) { 1157 for (off = 0; cfg->vpd.vpd_ros[off].value; off++) 1158 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF); 1159 free(cfg->vpd.vpd_ros, M_DEVBUF); 1160 cfg->vpd.vpd_ros = NULL; 1161 } 1162 } 1163 if (state < -1) { 1164 /* I/O error, clean up */ 1165 pci_printf(cfg, "failed to read VPD data.\n"); 1166 if (cfg->vpd.vpd_ident != NULL) { 1167 free(cfg->vpd.vpd_ident, M_DEVBUF); 1168 cfg->vpd.vpd_ident = NULL; 1169 } 1170 if (cfg->vpd.vpd_w != NULL) { 1171 for (off = 0; cfg->vpd.vpd_w[off].value; off++) 1172 free(cfg->vpd.vpd_w[off].value, M_DEVBUF); 1173 free(cfg->vpd.vpd_w, M_DEVBUF); 1174 cfg->vpd.vpd_w = NULL; 1175 } 1176 } 1177 cfg->vpd.vpd_cached = 1; 1178 #undef REG 1179 #undef WREG 1180 } 1181 1182 int 1183 pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr) 1184 { 1185 struct pci_devinfo *dinfo = device_get_ivars(child); 1186 pcicfgregs *cfg = &dinfo->cfg; 1187 1188 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) 1189 pci_read_vpd(device_get_parent(dev), cfg); 1190 1191 *identptr = cfg->vpd.vpd_ident; 1192 1193 if (*identptr == NULL) 1194 return (ENXIO); 1195 1196 return (0); 1197 } 1198 1199 int 1200 pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw, 1201 const char **vptr) 1202 { 1203 struct pci_devinfo *dinfo = device_get_ivars(child); 1204 pcicfgregs *cfg = &dinfo->cfg; 1205 int i; 1206 1207 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) 1208 pci_read_vpd(device_get_parent(dev), cfg); 1209 1210 for (i = 0; i < cfg->vpd.vpd_rocnt; i++) 1211 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword, 1212 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) { 1213 *vptr = cfg->vpd.vpd_ros[i].value; 1214 return (0); 1215 } 1216 1217 *vptr = NULL; 1218 return (ENXIO); 1219 } 1220 1221 struct pcicfg_vpd * 1222 pci_fetch_vpd_list(device_t dev) 1223 { 1224 struct pci_devinfo *dinfo = device_get_ivars(dev); 1225 pcicfgregs *cfg = &dinfo->cfg; 1226 1227 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) 1228 pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg); 1229 return (&cfg->vpd); 1230 } 1231 1232 /* 1233 * Find the requested HyperTransport capability and return the offset 1234 * in configuration space via the pointer provided. The function 1235 * returns 0 on success and an error code otherwise. 1236 */ 1237 int 1238 pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg) 1239 { 1240 int ptr, error; 1241 uint16_t val; 1242 1243 error = pci_find_cap(child, PCIY_HT, &ptr); 1244 if (error) 1245 return (error); 1246 1247 /* 1248 * Traverse the capabilities list checking each HT capability 1249 * to see if it matches the requested HT capability. 1250 */ 1251 while (ptr != 0) { 1252 val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2); 1253 if (capability == PCIM_HTCAP_SLAVE || 1254 capability == PCIM_HTCAP_HOST) 1255 val &= 0xe000; 1256 else 1257 val &= PCIM_HTCMD_CAP_MASK; 1258 if (val == capability) { 1259 if (capreg != NULL) 1260 *capreg = ptr; 1261 return (0); 1262 } 1263 1264 /* Skip to the next HT capability. */ 1265 while (ptr != 0) { 1266 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1); 1267 if (pci_read_config(child, ptr + PCICAP_ID, 1) == 1268 PCIY_HT) 1269 break; 1270 } 1271 } 1272 return (ENOENT); 1273 } 1274 1275 /* 1276 * Find the requested capability and return the offset in 1277 * configuration space via the pointer provided. The function returns 1278 * 0 on success and an error code otherwise. 1279 */ 1280 int 1281 pci_find_cap_method(device_t dev, device_t child, int capability, 1282 int *capreg) 1283 { 1284 struct pci_devinfo *dinfo = device_get_ivars(child); 1285 pcicfgregs *cfg = &dinfo->cfg; 1286 u_int32_t status; 1287 u_int8_t ptr; 1288 1289 /* 1290 * Check the CAP_LIST bit of the PCI status register first. 1291 */ 1292 status = pci_read_config(child, PCIR_STATUS, 2); 1293 if (!(status & PCIM_STATUS_CAPPRESENT)) 1294 return (ENXIO); 1295 1296 /* 1297 * Determine the start pointer of the capabilities list. 1298 */ 1299 switch (cfg->hdrtype & PCIM_HDRTYPE) { 1300 case PCIM_HDRTYPE_NORMAL: 1301 case PCIM_HDRTYPE_BRIDGE: 1302 ptr = PCIR_CAP_PTR; 1303 break; 1304 case PCIM_HDRTYPE_CARDBUS: 1305 ptr = PCIR_CAP_PTR_2; 1306 break; 1307 default: 1308 /* XXX: panic? */ 1309 return (ENXIO); /* no extended capabilities support */ 1310 } 1311 ptr = pci_read_config(child, ptr, 1); 1312 1313 /* 1314 * Traverse the capabilities list. 1315 */ 1316 while (ptr != 0) { 1317 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) { 1318 if (capreg != NULL) 1319 *capreg = ptr; 1320 return (0); 1321 } 1322 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1); 1323 } 1324 1325 return (ENOENT); 1326 } 1327 1328 /* 1329 * Find the requested extended capability and return the offset in 1330 * configuration space via the pointer provided. The function returns 1331 * 0 on success and an error code otherwise. 1332 */ 1333 int 1334 pci_find_extcap_method(device_t dev, device_t child, int capability, 1335 int *capreg) 1336 { 1337 struct pci_devinfo *dinfo = device_get_ivars(child); 1338 pcicfgregs *cfg = &dinfo->cfg; 1339 uint32_t ecap; 1340 uint16_t ptr; 1341 1342 /* Only supported for PCI-express devices. */ 1343 if (cfg->pcie.pcie_location == 0) 1344 return (ENXIO); 1345 1346 ptr = PCIR_EXTCAP; 1347 ecap = pci_read_config(child, ptr, 4); 1348 if (ecap == 0xffffffff || ecap == 0) 1349 return (ENOENT); 1350 for (;;) { 1351 if (PCI_EXTCAP_ID(ecap) == capability) { 1352 if (capreg != NULL) 1353 *capreg = ptr; 1354 return (0); 1355 } 1356 ptr = PCI_EXTCAP_NEXTPTR(ecap); 1357 if (ptr == 0) 1358 break; 1359 ecap = pci_read_config(child, ptr, 4); 1360 } 1361 1362 return (ENOENT); 1363 } 1364 1365 /* 1366 * Support for MSI-X message interrupts. 1367 */ 1368 void 1369 pci_enable_msix_method(device_t dev, device_t child, u_int index, 1370 uint64_t address, uint32_t data) 1371 { 1372 struct pci_devinfo *dinfo = device_get_ivars(child); 1373 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1374 uint32_t offset; 1375 1376 KASSERT(msix->msix_table_len > index, ("bogus index")); 1377 offset = msix->msix_table_offset + index * 16; 1378 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff); 1379 bus_write_4(msix->msix_table_res, offset + 4, address >> 32); 1380 bus_write_4(msix->msix_table_res, offset + 8, data); 1381 1382 /* Enable MSI -> HT mapping. */ 1383 pci_ht_map_msi(child, address); 1384 } 1385 1386 void 1387 pci_mask_msix(device_t dev, u_int index) 1388 { 1389 struct pci_devinfo *dinfo = device_get_ivars(dev); 1390 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1391 uint32_t offset, val; 1392 1393 KASSERT(msix->msix_msgnum > index, ("bogus index")); 1394 offset = msix->msix_table_offset + index * 16 + 12; 1395 val = bus_read_4(msix->msix_table_res, offset); 1396 if (!(val & PCIM_MSIX_VCTRL_MASK)) { 1397 val |= PCIM_MSIX_VCTRL_MASK; 1398 bus_write_4(msix->msix_table_res, offset, val); 1399 } 1400 } 1401 1402 void 1403 pci_unmask_msix(device_t dev, u_int index) 1404 { 1405 struct pci_devinfo *dinfo = device_get_ivars(dev); 1406 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1407 uint32_t offset, val; 1408 1409 KASSERT(msix->msix_table_len > index, ("bogus index")); 1410 offset = msix->msix_table_offset + index * 16 + 12; 1411 val = bus_read_4(msix->msix_table_res, offset); 1412 if (val & PCIM_MSIX_VCTRL_MASK) { 1413 val &= ~PCIM_MSIX_VCTRL_MASK; 1414 bus_write_4(msix->msix_table_res, offset, val); 1415 } 1416 } 1417 1418 int 1419 pci_pending_msix(device_t dev, u_int index) 1420 { 1421 struct pci_devinfo *dinfo = device_get_ivars(dev); 1422 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1423 uint32_t offset, bit; 1424 1425 KASSERT(msix->msix_table_len > index, ("bogus index")); 1426 offset = msix->msix_pba_offset + (index / 32) * 4; 1427 bit = 1 << index % 32; 1428 return (bus_read_4(msix->msix_pba_res, offset) & bit); 1429 } 1430 1431 /* 1432 * Restore MSI-X registers and table during resume. If MSI-X is 1433 * enabled then walk the virtual table to restore the actual MSI-X 1434 * table. 1435 */ 1436 static void 1437 pci_resume_msix(device_t dev) 1438 { 1439 struct pci_devinfo *dinfo = device_get_ivars(dev); 1440 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1441 struct msix_table_entry *mte; 1442 struct msix_vector *mv; 1443 int i; 1444 1445 if (msix->msix_alloc > 0) { 1446 /* First, mask all vectors. */ 1447 for (i = 0; i < msix->msix_msgnum; i++) 1448 pci_mask_msix(dev, i); 1449 1450 /* Second, program any messages with at least one handler. */ 1451 for (i = 0; i < msix->msix_table_len; i++) { 1452 mte = &msix->msix_table[i]; 1453 if (mte->mte_vector == 0 || mte->mte_handlers == 0) 1454 continue; 1455 mv = &msix->msix_vectors[mte->mte_vector - 1]; 1456 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data); 1457 pci_unmask_msix(dev, i); 1458 } 1459 } 1460 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL, 1461 msix->msix_ctrl, 2); 1462 } 1463 1464 /* 1465 * Attempt to allocate *count MSI-X messages. The actual number allocated is 1466 * returned in *count. After this function returns, each message will be 1467 * available to the driver as SYS_RES_IRQ resources starting at rid 1. 1468 */ 1469 int 1470 pci_alloc_msix_method(device_t dev, device_t child, int *count) 1471 { 1472 struct pci_devinfo *dinfo = device_get_ivars(child); 1473 pcicfgregs *cfg = &dinfo->cfg; 1474 struct resource_list_entry *rle; 1475 int actual, error, i, irq, max; 1476 1477 /* Don't let count == 0 get us into trouble. */ 1478 if (*count == 0) 1479 return (EINVAL); 1480 1481 /* If rid 0 is allocated, then fail. */ 1482 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); 1483 if (rle != NULL && rle->res != NULL) 1484 return (ENXIO); 1485 1486 /* Already have allocated messages? */ 1487 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) 1488 return (ENXIO); 1489 1490 /* If MSI-X is blacklisted for this system, fail. */ 1491 if (pci_msix_blacklisted()) 1492 return (ENXIO); 1493 1494 /* MSI-X capability present? */ 1495 if (cfg->msix.msix_location == 0 || !pci_do_msix) 1496 return (ENODEV); 1497 1498 /* Make sure the appropriate BARs are mapped. */ 1499 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, 1500 cfg->msix.msix_table_bar); 1501 if (rle == NULL || rle->res == NULL || 1502 !(rman_get_flags(rle->res) & RF_ACTIVE)) 1503 return (ENXIO); 1504 cfg->msix.msix_table_res = rle->res; 1505 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) { 1506 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, 1507 cfg->msix.msix_pba_bar); 1508 if (rle == NULL || rle->res == NULL || 1509 !(rman_get_flags(rle->res) & RF_ACTIVE)) 1510 return (ENXIO); 1511 } 1512 cfg->msix.msix_pba_res = rle->res; 1513 1514 if (bootverbose) 1515 device_printf(child, 1516 "attempting to allocate %d MSI-X vectors (%d supported)\n", 1517 *count, cfg->msix.msix_msgnum); 1518 max = min(*count, cfg->msix.msix_msgnum); 1519 for (i = 0; i < max; i++) { 1520 /* Allocate a message. */ 1521 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq); 1522 if (error) { 1523 if (i == 0) 1524 return (error); 1525 break; 1526 } 1527 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, 1528 irq, 1); 1529 } 1530 actual = i; 1531 1532 if (bootverbose) { 1533 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1); 1534 if (actual == 1) 1535 device_printf(child, "using IRQ %lu for MSI-X\n", 1536 rle->start); 1537 else { 1538 int run; 1539 1540 /* 1541 * Be fancy and try to print contiguous runs of 1542 * IRQ values as ranges. 'irq' is the previous IRQ. 1543 * 'run' is true if we are in a range. 1544 */ 1545 device_printf(child, "using IRQs %lu", rle->start); 1546 irq = rle->start; 1547 run = 0; 1548 for (i = 1; i < actual; i++) { 1549 rle = resource_list_find(&dinfo->resources, 1550 SYS_RES_IRQ, i + 1); 1551 1552 /* Still in a run? */ 1553 if (rle->start == irq + 1) { 1554 run = 1; 1555 irq++; 1556 continue; 1557 } 1558 1559 /* Finish previous range. */ 1560 if (run) { 1561 printf("-%d", irq); 1562 run = 0; 1563 } 1564 1565 /* Start new range. */ 1566 printf(",%lu", rle->start); 1567 irq = rle->start; 1568 } 1569 1570 /* Unfinished range? */ 1571 if (run) 1572 printf("-%d", irq); 1573 printf(" for MSI-X\n"); 1574 } 1575 } 1576 1577 /* Mask all vectors. */ 1578 for (i = 0; i < cfg->msix.msix_msgnum; i++) 1579 pci_mask_msix(child, i); 1580 1581 /* Allocate and initialize vector data and virtual table. */ 1582 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual, 1583 M_DEVBUF, M_WAITOK | M_ZERO); 1584 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual, 1585 M_DEVBUF, M_WAITOK | M_ZERO); 1586 for (i = 0; i < actual; i++) { 1587 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); 1588 cfg->msix.msix_vectors[i].mv_irq = rle->start; 1589 cfg->msix.msix_table[i].mte_vector = i + 1; 1590 } 1591 1592 /* Update control register to enable MSI-X. */ 1593 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; 1594 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL, 1595 cfg->msix.msix_ctrl, 2); 1596 1597 /* Update counts of alloc'd messages. */ 1598 cfg->msix.msix_alloc = actual; 1599 cfg->msix.msix_table_len = actual; 1600 *count = actual; 1601 return (0); 1602 } 1603 1604 /* 1605 * By default, pci_alloc_msix() will assign the allocated IRQ 1606 * resources consecutively to the first N messages in the MSI-X table. 1607 * However, device drivers may want to use different layouts if they 1608 * either receive fewer messages than they asked for, or they wish to 1609 * populate the MSI-X table sparsely. This method allows the driver 1610 * to specify what layout it wants. It must be called after a 1611 * successful pci_alloc_msix() but before any of the associated 1612 * SYS_RES_IRQ resources are allocated via bus_alloc_resource(). 1613 * 1614 * The 'vectors' array contains 'count' message vectors. The array 1615 * maps directly to the MSI-X table in that index 0 in the array 1616 * specifies the vector for the first message in the MSI-X table, etc. 1617 * The vector value in each array index can either be 0 to indicate 1618 * that no vector should be assigned to a message slot, or it can be a 1619 * number from 1 to N (where N is the count returned from a 1620 * succcessful call to pci_alloc_msix()) to indicate which message 1621 * vector (IRQ) to be used for the corresponding message. 1622 * 1623 * On successful return, each message with a non-zero vector will have 1624 * an associated SYS_RES_IRQ whose rid is equal to the array index + 1625 * 1. Additionally, if any of the IRQs allocated via the previous 1626 * call to pci_alloc_msix() are not used in the mapping, those IRQs 1627 * will be freed back to the system automatically. 1628 * 1629 * For example, suppose a driver has a MSI-X table with 6 messages and 1630 * asks for 6 messages, but pci_alloc_msix() only returns a count of 1631 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and 1632 * C. After the call to pci_alloc_msix(), the device will be setup to 1633 * have an MSI-X table of ABC--- (where - means no vector assigned). 1634 * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 }, 1635 * then the MSI-X table will look like A-AB-B, and the 'C' vector will 1636 * be freed back to the system. This device will also have valid 1637 * SYS_RES_IRQ rids of 1, 3, 4, and 6. 1638 * 1639 * In any case, the SYS_RES_IRQ rid X will always map to the message 1640 * at MSI-X table index X - 1 and will only be valid if a vector is 1641 * assigned to that table entry. 1642 */ 1643 int 1644 pci_remap_msix_method(device_t dev, device_t child, int count, 1645 const u_int *vectors) 1646 { 1647 struct pci_devinfo *dinfo = device_get_ivars(child); 1648 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1649 struct resource_list_entry *rle; 1650 int i, irq, j, *used; 1651 1652 /* 1653 * Have to have at least one message in the table but the 1654 * table can't be bigger than the actual MSI-X table in the 1655 * device. 1656 */ 1657 if (count == 0 || count > msix->msix_msgnum) 1658 return (EINVAL); 1659 1660 /* Sanity check the vectors. */ 1661 for (i = 0; i < count; i++) 1662 if (vectors[i] > msix->msix_alloc) 1663 return (EINVAL); 1664 1665 /* 1666 * Make sure there aren't any holes in the vectors to be used. 1667 * It's a big pain to support it, and it doesn't really make 1668 * sense anyway. Also, at least one vector must be used. 1669 */ 1670 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK | 1671 M_ZERO); 1672 for (i = 0; i < count; i++) 1673 if (vectors[i] != 0) 1674 used[vectors[i] - 1] = 1; 1675 for (i = 0; i < msix->msix_alloc - 1; i++) 1676 if (used[i] == 0 && used[i + 1] == 1) { 1677 free(used, M_DEVBUF); 1678 return (EINVAL); 1679 } 1680 if (used[0] != 1) { 1681 free(used, M_DEVBUF); 1682 return (EINVAL); 1683 } 1684 1685 /* Make sure none of the resources are allocated. */ 1686 for (i = 0; i < msix->msix_table_len; i++) { 1687 if (msix->msix_table[i].mte_vector == 0) 1688 continue; 1689 if (msix->msix_table[i].mte_handlers > 0) 1690 return (EBUSY); 1691 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); 1692 KASSERT(rle != NULL, ("missing resource")); 1693 if (rle->res != NULL) 1694 return (EBUSY); 1695 } 1696 1697 /* Free the existing resource list entries. */ 1698 for (i = 0; i < msix->msix_table_len; i++) { 1699 if (msix->msix_table[i].mte_vector == 0) 1700 continue; 1701 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); 1702 } 1703 1704 /* 1705 * Build the new virtual table keeping track of which vectors are 1706 * used. 1707 */ 1708 free(msix->msix_table, M_DEVBUF); 1709 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count, 1710 M_DEVBUF, M_WAITOK | M_ZERO); 1711 for (i = 0; i < count; i++) 1712 msix->msix_table[i].mte_vector = vectors[i]; 1713 msix->msix_table_len = count; 1714 1715 /* Free any unused IRQs and resize the vectors array if necessary. */ 1716 j = msix->msix_alloc - 1; 1717 if (used[j] == 0) { 1718 struct msix_vector *vec; 1719 1720 while (used[j] == 0) { 1721 PCIB_RELEASE_MSIX(device_get_parent(dev), child, 1722 msix->msix_vectors[j].mv_irq); 1723 j--; 1724 } 1725 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF, 1726 M_WAITOK); 1727 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) * 1728 (j + 1)); 1729 free(msix->msix_vectors, M_DEVBUF); 1730 msix->msix_vectors = vec; 1731 msix->msix_alloc = j + 1; 1732 } 1733 free(used, M_DEVBUF); 1734 1735 /* Map the IRQs onto the rids. */ 1736 for (i = 0; i < count; i++) { 1737 if (vectors[i] == 0) 1738 continue; 1739 irq = msix->msix_vectors[vectors[i]].mv_irq; 1740 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, 1741 irq, 1); 1742 } 1743 1744 if (bootverbose) { 1745 device_printf(child, "Remapped MSI-X IRQs as: "); 1746 for (i = 0; i < count; i++) { 1747 if (i != 0) 1748 printf(", "); 1749 if (vectors[i] == 0) 1750 printf("---"); 1751 else 1752 printf("%d", 1753 msix->msix_vectors[vectors[i]].mv_irq); 1754 } 1755 printf("\n"); 1756 } 1757 1758 return (0); 1759 } 1760 1761 static int 1762 pci_release_msix(device_t dev, device_t child) 1763 { 1764 struct pci_devinfo *dinfo = device_get_ivars(child); 1765 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1766 struct resource_list_entry *rle; 1767 int i; 1768 1769 /* Do we have any messages to release? */ 1770 if (msix->msix_alloc == 0) 1771 return (ENODEV); 1772 1773 /* Make sure none of the resources are allocated. */ 1774 for (i = 0; i < msix->msix_table_len; i++) { 1775 if (msix->msix_table[i].mte_vector == 0) 1776 continue; 1777 if (msix->msix_table[i].mte_handlers > 0) 1778 return (EBUSY); 1779 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); 1780 KASSERT(rle != NULL, ("missing resource")); 1781 if (rle->res != NULL) 1782 return (EBUSY); 1783 } 1784 1785 /* Update control register to disable MSI-X. */ 1786 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE; 1787 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL, 1788 msix->msix_ctrl, 2); 1789 1790 /* Free the resource list entries. */ 1791 for (i = 0; i < msix->msix_table_len; i++) { 1792 if (msix->msix_table[i].mte_vector == 0) 1793 continue; 1794 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); 1795 } 1796 free(msix->msix_table, M_DEVBUF); 1797 msix->msix_table_len = 0; 1798 1799 /* Release the IRQs. */ 1800 for (i = 0; i < msix->msix_alloc; i++) 1801 PCIB_RELEASE_MSIX(device_get_parent(dev), child, 1802 msix->msix_vectors[i].mv_irq); 1803 free(msix->msix_vectors, M_DEVBUF); 1804 msix->msix_alloc = 0; 1805 return (0); 1806 } 1807 1808 /* 1809 * Return the max supported MSI-X messages this device supports. 1810 * Basically, assuming the MD code can alloc messages, this function 1811 * should return the maximum value that pci_alloc_msix() can return. 1812 * Thus, it is subject to the tunables, etc. 1813 */ 1814 int 1815 pci_msix_count_method(device_t dev, device_t child) 1816 { 1817 struct pci_devinfo *dinfo = device_get_ivars(child); 1818 struct pcicfg_msix *msix = &dinfo->cfg.msix; 1819 1820 if (pci_do_msix && msix->msix_location != 0) 1821 return (msix->msix_msgnum); 1822 return (0); 1823 } 1824 1825 /* 1826 * HyperTransport MSI mapping control 1827 */ 1828 void 1829 pci_ht_map_msi(device_t dev, uint64_t addr) 1830 { 1831 struct pci_devinfo *dinfo = device_get_ivars(dev); 1832 struct pcicfg_ht *ht = &dinfo->cfg.ht; 1833 1834 if (!ht->ht_msimap) 1835 return; 1836 1837 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) && 1838 ht->ht_msiaddr >> 20 == addr >> 20) { 1839 /* Enable MSI -> HT mapping. */ 1840 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; 1841 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, 1842 ht->ht_msictrl, 2); 1843 } 1844 1845 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) { 1846 /* Disable MSI -> HT mapping. */ 1847 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE; 1848 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, 1849 ht->ht_msictrl, 2); 1850 } 1851 } 1852 1853 int 1854 pci_get_max_read_req(device_t dev) 1855 { 1856 struct pci_devinfo *dinfo = device_get_ivars(dev); 1857 int cap; 1858 uint16_t val; 1859 1860 cap = dinfo->cfg.pcie.pcie_location; 1861 if (cap == 0) 1862 return (0); 1863 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); 1864 val &= PCIEM_CTL_MAX_READ_REQUEST; 1865 val >>= 12; 1866 return (1 << (val + 7)); 1867 } 1868 1869 int 1870 pci_set_max_read_req(device_t dev, int size) 1871 { 1872 struct pci_devinfo *dinfo = device_get_ivars(dev); 1873 int cap; 1874 uint16_t val; 1875 1876 cap = dinfo->cfg.pcie.pcie_location; 1877 if (cap == 0) 1878 return (0); 1879 if (size < 128) 1880 size = 128; 1881 if (size > 4096) 1882 size = 4096; 1883 size = (1 << (fls(size) - 1)); 1884 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2); 1885 val &= ~PCIEM_CTL_MAX_READ_REQUEST; 1886 val |= (fls(size) - 8) << 12; 1887 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2); 1888 return (size); 1889 } 1890 1891 /* 1892 * Support for MSI message signalled interrupts. 1893 */ 1894 void 1895 pci_enable_msi_method(device_t dev, device_t child, uint64_t address, 1896 uint16_t data) 1897 { 1898 struct pci_devinfo *dinfo = device_get_ivars(child); 1899 struct pcicfg_msi *msi = &dinfo->cfg.msi; 1900 1901 /* Write data and address values. */ 1902 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR, 1903 address & 0xffffffff, 4); 1904 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { 1905 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH, 1906 address >> 32, 4); 1907 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT, 1908 data, 2); 1909 } else 1910 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data, 1911 2); 1912 1913 /* Enable MSI in the control register. */ 1914 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE; 1915 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, 1916 msi->msi_ctrl, 2); 1917 1918 /* Enable MSI -> HT mapping. */ 1919 pci_ht_map_msi(child, address); 1920 } 1921 1922 void 1923 pci_disable_msi_method(device_t dev, device_t child) 1924 { 1925 struct pci_devinfo *dinfo = device_get_ivars(child); 1926 struct pcicfg_msi *msi = &dinfo->cfg.msi; 1927 1928 /* Disable MSI -> HT mapping. */ 1929 pci_ht_map_msi(child, 0); 1930 1931 /* Disable MSI in the control register. */ 1932 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE; 1933 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, 1934 msi->msi_ctrl, 2); 1935 } 1936 1937 /* 1938 * Restore MSI registers during resume. If MSI is enabled then 1939 * restore the data and address registers in addition to the control 1940 * register. 1941 */ 1942 static void 1943 pci_resume_msi(device_t dev) 1944 { 1945 struct pci_devinfo *dinfo = device_get_ivars(dev); 1946 struct pcicfg_msi *msi = &dinfo->cfg.msi; 1947 uint64_t address; 1948 uint16_t data; 1949 1950 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) { 1951 address = msi->msi_addr; 1952 data = msi->msi_data; 1953 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR, 1954 address & 0xffffffff, 4); 1955 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { 1956 pci_write_config(dev, msi->msi_location + 1957 PCIR_MSI_ADDR_HIGH, address >> 32, 4); 1958 pci_write_config(dev, msi->msi_location + 1959 PCIR_MSI_DATA_64BIT, data, 2); 1960 } else 1961 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, 1962 data, 2); 1963 } 1964 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, 1965 2); 1966 } 1967 1968 static int 1969 pci_remap_intr_method(device_t bus, device_t dev, u_int irq) 1970 { 1971 struct pci_devinfo *dinfo = device_get_ivars(dev); 1972 pcicfgregs *cfg = &dinfo->cfg; 1973 struct resource_list_entry *rle; 1974 struct msix_table_entry *mte; 1975 struct msix_vector *mv; 1976 uint64_t addr; 1977 uint32_t data; 1978 int error, i, j; 1979 1980 /* 1981 * Handle MSI first. We try to find this IRQ among our list 1982 * of MSI IRQs. If we find it, we request updated address and 1983 * data registers and apply the results. 1984 */ 1985 if (cfg->msi.msi_alloc > 0) { 1986 1987 /* If we don't have any active handlers, nothing to do. */ 1988 if (cfg->msi.msi_handlers == 0) 1989 return (0); 1990 for (i = 0; i < cfg->msi.msi_alloc; i++) { 1991 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1992 i + 1); 1993 if (rle->start == irq) { 1994 error = PCIB_MAP_MSI(device_get_parent(bus), 1995 dev, irq, &addr, &data); 1996 if (error) 1997 return (error); 1998 pci_disable_msi(dev); 1999 dinfo->cfg.msi.msi_addr = addr; 2000 dinfo->cfg.msi.msi_data = data; 2001 pci_enable_msi(dev, addr, data); 2002 return (0); 2003 } 2004 } 2005 return (ENOENT); 2006 } 2007 2008 /* 2009 * For MSI-X, we check to see if we have this IRQ. If we do, 2010 * we request the updated mapping info. If that works, we go 2011 * through all the slots that use this IRQ and update them. 2012 */ 2013 if (cfg->msix.msix_alloc > 0) { 2014 for (i = 0; i < cfg->msix.msix_alloc; i++) { 2015 mv = &cfg->msix.msix_vectors[i]; 2016 if (mv->mv_irq == irq) { 2017 error = PCIB_MAP_MSI(device_get_parent(bus), 2018 dev, irq, &addr, &data); 2019 if (error) 2020 return (error); 2021 mv->mv_address = addr; 2022 mv->mv_data = data; 2023 for (j = 0; j < cfg->msix.msix_table_len; j++) { 2024 mte = &cfg->msix.msix_table[j]; 2025 if (mte->mte_vector != i + 1) 2026 continue; 2027 if (mte->mte_handlers == 0) 2028 continue; 2029 pci_mask_msix(dev, j); 2030 pci_enable_msix(dev, j, addr, data); 2031 pci_unmask_msix(dev, j); 2032 } 2033 } 2034 } 2035 return (ENOENT); 2036 } 2037 2038 return (ENOENT); 2039 } 2040 2041 /* 2042 * Returns true if the specified device is blacklisted because MSI 2043 * doesn't work. 2044 */ 2045 int 2046 pci_msi_device_blacklisted(device_t dev) 2047 { 2048 2049 if (!pci_honor_msi_blacklist) 2050 return (0); 2051 2052 return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI)); 2053 } 2054 2055 /* 2056 * Determine if MSI is blacklisted globally on this system. Currently, 2057 * we just check for blacklisted chipsets as represented by the 2058 * host-PCI bridge at device 0:0:0. In the future, it may become 2059 * necessary to check other system attributes, such as the kenv values 2060 * that give the motherboard manufacturer and model number. 2061 */ 2062 static int 2063 pci_msi_blacklisted(void) 2064 { 2065 device_t dev; 2066 2067 if (!pci_honor_msi_blacklist) 2068 return (0); 2069 2070 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */ 2071 if (!(pcie_chipset || pcix_chipset)) { 2072 if (vm_guest != VM_GUEST_NO) { 2073 /* 2074 * Whitelist older chipsets in virtual 2075 * machines known to support MSI. 2076 */ 2077 dev = pci_find_bsf(0, 0, 0); 2078 if (dev != NULL) 2079 return (!pci_has_quirk(pci_get_devid(dev), 2080 PCI_QUIRK_ENABLE_MSI_VM)); 2081 } 2082 return (1); 2083 } 2084 2085 dev = pci_find_bsf(0, 0, 0); 2086 if (dev != NULL) 2087 return (pci_msi_device_blacklisted(dev)); 2088 return (0); 2089 } 2090 2091 /* 2092 * Returns true if the specified device is blacklisted because MSI-X 2093 * doesn't work. Note that this assumes that if MSI doesn't work, 2094 * MSI-X doesn't either. 2095 */ 2096 int 2097 pci_msix_device_blacklisted(device_t dev) 2098 { 2099 2100 if (!pci_honor_msi_blacklist) 2101 return (0); 2102 2103 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX)) 2104 return (1); 2105 2106 return (pci_msi_device_blacklisted(dev)); 2107 } 2108 2109 /* 2110 * Determine if MSI-X is blacklisted globally on this system. If MSI 2111 * is blacklisted, assume that MSI-X is as well. Check for additional 2112 * chipsets where MSI works but MSI-X does not. 2113 */ 2114 static int 2115 pci_msix_blacklisted(void) 2116 { 2117 device_t dev; 2118 2119 if (!pci_honor_msi_blacklist) 2120 return (0); 2121 2122 dev = pci_find_bsf(0, 0, 0); 2123 if (dev != NULL && pci_has_quirk(pci_get_devid(dev), 2124 PCI_QUIRK_DISABLE_MSIX)) 2125 return (1); 2126 2127 return (pci_msi_blacklisted()); 2128 } 2129 2130 /* 2131 * Attempt to allocate *count MSI messages. The actual number allocated is 2132 * returned in *count. After this function returns, each message will be 2133 * available to the driver as SYS_RES_IRQ resources starting at a rid 1. 2134 */ 2135 int 2136 pci_alloc_msi_method(device_t dev, device_t child, int *count) 2137 { 2138 struct pci_devinfo *dinfo = device_get_ivars(child); 2139 pcicfgregs *cfg = &dinfo->cfg; 2140 struct resource_list_entry *rle; 2141 int actual, error, i, irqs[32]; 2142 uint16_t ctrl; 2143 2144 /* Don't let count == 0 get us into trouble. */ 2145 if (*count == 0) 2146 return (EINVAL); 2147 2148 /* If rid 0 is allocated, then fail. */ 2149 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); 2150 if (rle != NULL && rle->res != NULL) 2151 return (ENXIO); 2152 2153 /* Already have allocated messages? */ 2154 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) 2155 return (ENXIO); 2156 2157 /* If MSI is blacklisted for this system, fail. */ 2158 if (pci_msi_blacklisted()) 2159 return (ENXIO); 2160 2161 /* MSI capability present? */ 2162 if (cfg->msi.msi_location == 0 || !pci_do_msi) 2163 return (ENODEV); 2164 2165 if (bootverbose) 2166 device_printf(child, 2167 "attempting to allocate %d MSI vectors (%d supported)\n", 2168 *count, cfg->msi.msi_msgnum); 2169 2170 /* Don't ask for more than the device supports. */ 2171 actual = min(*count, cfg->msi.msi_msgnum); 2172 2173 /* Don't ask for more than 32 messages. */ 2174 actual = min(actual, 32); 2175 2176 /* MSI requires power of 2 number of messages. */ 2177 if (!powerof2(actual)) 2178 return (EINVAL); 2179 2180 for (;;) { 2181 /* Try to allocate N messages. */ 2182 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual, 2183 actual, irqs); 2184 if (error == 0) 2185 break; 2186 if (actual == 1) 2187 return (error); 2188 2189 /* Try N / 2. */ 2190 actual >>= 1; 2191 } 2192 2193 /* 2194 * We now have N actual messages mapped onto SYS_RES_IRQ 2195 * resources in the irqs[] array, so add new resources 2196 * starting at rid 1. 2197 */ 2198 for (i = 0; i < actual; i++) 2199 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, 2200 irqs[i], irqs[i], 1); 2201 2202 if (bootverbose) { 2203 if (actual == 1) 2204 device_printf(child, "using IRQ %d for MSI\n", irqs[0]); 2205 else { 2206 int run; 2207 2208 /* 2209 * Be fancy and try to print contiguous runs 2210 * of IRQ values as ranges. 'run' is true if 2211 * we are in a range. 2212 */ 2213 device_printf(child, "using IRQs %d", irqs[0]); 2214 run = 0; 2215 for (i = 1; i < actual; i++) { 2216 2217 /* Still in a run? */ 2218 if (irqs[i] == irqs[i - 1] + 1) { 2219 run = 1; 2220 continue; 2221 } 2222 2223 /* Finish previous range. */ 2224 if (run) { 2225 printf("-%d", irqs[i - 1]); 2226 run = 0; 2227 } 2228 2229 /* Start new range. */ 2230 printf(",%d", irqs[i]); 2231 } 2232 2233 /* Unfinished range? */ 2234 if (run) 2235 printf("-%d", irqs[actual - 1]); 2236 printf(" for MSI\n"); 2237 } 2238 } 2239 2240 /* Update control register with actual count. */ 2241 ctrl = cfg->msi.msi_ctrl; 2242 ctrl &= ~PCIM_MSICTRL_MME_MASK; 2243 ctrl |= (ffs(actual) - 1) << 4; 2244 cfg->msi.msi_ctrl = ctrl; 2245 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2); 2246 2247 /* Update counts of alloc'd messages. */ 2248 cfg->msi.msi_alloc = actual; 2249 cfg->msi.msi_handlers = 0; 2250 *count = actual; 2251 return (0); 2252 } 2253 2254 /* Release the MSI messages associated with this device. */ 2255 int 2256 pci_release_msi_method(device_t dev, device_t child) 2257 { 2258 struct pci_devinfo *dinfo = device_get_ivars(child); 2259 struct pcicfg_msi *msi = &dinfo->cfg.msi; 2260 struct resource_list_entry *rle; 2261 int error, i, irqs[32]; 2262 2263 /* Try MSI-X first. */ 2264 error = pci_release_msix(dev, child); 2265 if (error != ENODEV) 2266 return (error); 2267 2268 /* Do we have any messages to release? */ 2269 if (msi->msi_alloc == 0) 2270 return (ENODEV); 2271 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages")); 2272 2273 /* Make sure none of the resources are allocated. */ 2274 if (msi->msi_handlers > 0) 2275 return (EBUSY); 2276 for (i = 0; i < msi->msi_alloc; i++) { 2277 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); 2278 KASSERT(rle != NULL, ("missing MSI resource")); 2279 if (rle->res != NULL) 2280 return (EBUSY); 2281 irqs[i] = rle->start; 2282 } 2283 2284 /* Update control register with 0 count. */ 2285 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE), 2286 ("%s: MSI still enabled", __func__)); 2287 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK; 2288 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, 2289 msi->msi_ctrl, 2); 2290 2291 /* Release the messages. */ 2292 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs); 2293 for (i = 0; i < msi->msi_alloc; i++) 2294 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); 2295 2296 /* Update alloc count. */ 2297 msi->msi_alloc = 0; 2298 msi->msi_addr = 0; 2299 msi->msi_data = 0; 2300 return (0); 2301 } 2302 2303 /* 2304 * Return the max supported MSI messages this device supports. 2305 * Basically, assuming the MD code can alloc messages, this function 2306 * should return the maximum value that pci_alloc_msi() can return. 2307 * Thus, it is subject to the tunables, etc. 2308 */ 2309 int 2310 pci_msi_count_method(device_t dev, device_t child) 2311 { 2312 struct pci_devinfo *dinfo = device_get_ivars(child); 2313 struct pcicfg_msi *msi = &dinfo->cfg.msi; 2314 2315 if (pci_do_msi && msi->msi_location != 0) 2316 return (msi->msi_msgnum); 2317 return (0); 2318 } 2319 2320 /* free pcicfgregs structure and all depending data structures */ 2321 2322 int 2323 pci_freecfg(struct pci_devinfo *dinfo) 2324 { 2325 struct devlist *devlist_head; 2326 struct pci_map *pm, *next; 2327 int i; 2328 2329 devlist_head = &pci_devq; 2330 2331 if (dinfo->cfg.vpd.vpd_reg) { 2332 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF); 2333 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++) 2334 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF); 2335 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF); 2336 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++) 2337 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF); 2338 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF); 2339 } 2340 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) { 2341 free(pm, M_DEVBUF); 2342 } 2343 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links); 2344 free(dinfo, M_DEVBUF); 2345 2346 /* increment the generation count */ 2347 pci_generation++; 2348 2349 /* we're losing one device */ 2350 pci_numdevs--; 2351 return (0); 2352 } 2353 2354 /* 2355 * PCI power manangement 2356 */ 2357 int 2358 pci_set_powerstate_method(device_t dev, device_t child, int state) 2359 { 2360 struct pci_devinfo *dinfo = device_get_ivars(child); 2361 pcicfgregs *cfg = &dinfo->cfg; 2362 uint16_t status; 2363 int result, oldstate, highest, delay; 2364 2365 if (cfg->pp.pp_cap == 0) 2366 return (EOPNOTSUPP); 2367 2368 /* 2369 * Optimize a no state change request away. While it would be OK to 2370 * write to the hardware in theory, some devices have shown odd 2371 * behavior when going from D3 -> D3. 2372 */ 2373 oldstate = pci_get_powerstate(child); 2374 if (oldstate == state) 2375 return (0); 2376 2377 /* 2378 * The PCI power management specification states that after a state 2379 * transition between PCI power states, system software must 2380 * guarantee a minimal delay before the function accesses the device. 2381 * Compute the worst case delay that we need to guarantee before we 2382 * access the device. Many devices will be responsive much more 2383 * quickly than this delay, but there are some that don't respond 2384 * instantly to state changes. Transitions to/from D3 state require 2385 * 10ms, while D2 requires 200us, and D0/1 require none. The delay 2386 * is done below with DELAY rather than a sleeper function because 2387 * this function can be called from contexts where we cannot sleep. 2388 */ 2389 highest = (oldstate > state) ? oldstate : state; 2390 if (highest == PCI_POWERSTATE_D3) 2391 delay = 10000; 2392 else if (highest == PCI_POWERSTATE_D2) 2393 delay = 200; 2394 else 2395 delay = 0; 2396 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2) 2397 & ~PCIM_PSTAT_DMASK; 2398 result = 0; 2399 switch (state) { 2400 case PCI_POWERSTATE_D0: 2401 status |= PCIM_PSTAT_D0; 2402 break; 2403 case PCI_POWERSTATE_D1: 2404 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0) 2405 return (EOPNOTSUPP); 2406 status |= PCIM_PSTAT_D1; 2407 break; 2408 case PCI_POWERSTATE_D2: 2409 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0) 2410 return (EOPNOTSUPP); 2411 status |= PCIM_PSTAT_D2; 2412 break; 2413 case PCI_POWERSTATE_D3: 2414 status |= PCIM_PSTAT_D3; 2415 break; 2416 default: 2417 return (EINVAL); 2418 } 2419 2420 if (bootverbose) 2421 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate, 2422 state); 2423 2424 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2); 2425 if (delay) 2426 DELAY(delay); 2427 return (0); 2428 } 2429 2430 int 2431 pci_get_powerstate_method(device_t dev, device_t child) 2432 { 2433 struct pci_devinfo *dinfo = device_get_ivars(child); 2434 pcicfgregs *cfg = &dinfo->cfg; 2435 uint16_t status; 2436 int result; 2437 2438 if (cfg->pp.pp_cap != 0) { 2439 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2); 2440 switch (status & PCIM_PSTAT_DMASK) { 2441 case PCIM_PSTAT_D0: 2442 result = PCI_POWERSTATE_D0; 2443 break; 2444 case PCIM_PSTAT_D1: 2445 result = PCI_POWERSTATE_D1; 2446 break; 2447 case PCIM_PSTAT_D2: 2448 result = PCI_POWERSTATE_D2; 2449 break; 2450 case PCIM_PSTAT_D3: 2451 result = PCI_POWERSTATE_D3; 2452 break; 2453 default: 2454 result = PCI_POWERSTATE_UNKNOWN; 2455 break; 2456 } 2457 } else { 2458 /* No support, device is always at D0 */ 2459 result = PCI_POWERSTATE_D0; 2460 } 2461 return (result); 2462 } 2463 2464 /* 2465 * Some convenience functions for PCI device drivers. 2466 */ 2467 2468 static __inline void 2469 pci_set_command_bit(device_t dev, device_t child, uint16_t bit) 2470 { 2471 uint16_t command; 2472 2473 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); 2474 command |= bit; 2475 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); 2476 } 2477 2478 static __inline void 2479 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit) 2480 { 2481 uint16_t command; 2482 2483 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); 2484 command &= ~bit; 2485 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); 2486 } 2487 2488 int 2489 pci_enable_busmaster_method(device_t dev, device_t child) 2490 { 2491 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); 2492 return (0); 2493 } 2494 2495 int 2496 pci_disable_busmaster_method(device_t dev, device_t child) 2497 { 2498 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); 2499 return (0); 2500 } 2501 2502 int 2503 pci_enable_io_method(device_t dev, device_t child, int space) 2504 { 2505 uint16_t bit; 2506 2507 switch(space) { 2508 case SYS_RES_IOPORT: 2509 bit = PCIM_CMD_PORTEN; 2510 break; 2511 case SYS_RES_MEMORY: 2512 bit = PCIM_CMD_MEMEN; 2513 break; 2514 default: 2515 return (EINVAL); 2516 } 2517 pci_set_command_bit(dev, child, bit); 2518 return (0); 2519 } 2520 2521 int 2522 pci_disable_io_method(device_t dev, device_t child, int space) 2523 { 2524 uint16_t bit; 2525 2526 switch(space) { 2527 case SYS_RES_IOPORT: 2528 bit = PCIM_CMD_PORTEN; 2529 break; 2530 case SYS_RES_MEMORY: 2531 bit = PCIM_CMD_MEMEN; 2532 break; 2533 default: 2534 return (EINVAL); 2535 } 2536 pci_clear_command_bit(dev, child, bit); 2537 return (0); 2538 } 2539 2540 /* 2541 * New style pci driver. Parent device is either a pci-host-bridge or a 2542 * pci-pci-bridge. Both kinds are represented by instances of pcib. 2543 */ 2544 2545 void 2546 pci_print_verbose(struct pci_devinfo *dinfo) 2547 { 2548 2549 if (bootverbose) { 2550 pcicfgregs *cfg = &dinfo->cfg; 2551 2552 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n", 2553 cfg->vendor, cfg->device, cfg->revid); 2554 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n", 2555 cfg->domain, cfg->bus, cfg->slot, cfg->func); 2556 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n", 2557 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype, 2558 cfg->mfdev); 2559 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n", 2560 cfg->cmdreg, cfg->statreg, cfg->cachelnsz); 2561 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n", 2562 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt, 2563 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250); 2564 if (cfg->intpin > 0) 2565 printf("\tintpin=%c, irq=%d\n", 2566 cfg->intpin +'a' -1, cfg->intline); 2567 if (cfg->pp.pp_cap) { 2568 uint16_t status; 2569 2570 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2); 2571 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n", 2572 cfg->pp.pp_cap & PCIM_PCAP_SPEC, 2573 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "", 2574 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "", 2575 status & PCIM_PSTAT_DMASK); 2576 } 2577 if (cfg->msi.msi_location) { 2578 int ctrl; 2579 2580 ctrl = cfg->msi.msi_ctrl; 2581 printf("\tMSI supports %d message%s%s%s\n", 2582 cfg->msi.msi_msgnum, 2583 (cfg->msi.msi_msgnum == 1) ? "" : "s", 2584 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "", 2585 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":""); 2586 } 2587 if (cfg->msix.msix_location) { 2588 printf("\tMSI-X supports %d message%s ", 2589 cfg->msix.msix_msgnum, 2590 (cfg->msix.msix_msgnum == 1) ? "" : "s"); 2591 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar) 2592 printf("in map 0x%x\n", 2593 cfg->msix.msix_table_bar); 2594 else 2595 printf("in maps 0x%x and 0x%x\n", 2596 cfg->msix.msix_table_bar, 2597 cfg->msix.msix_pba_bar); 2598 } 2599 } 2600 } 2601 2602 static int 2603 pci_porten(device_t dev) 2604 { 2605 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0; 2606 } 2607 2608 static int 2609 pci_memen(device_t dev) 2610 { 2611 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0; 2612 } 2613 2614 static void 2615 pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp) 2616 { 2617 struct pci_devinfo *dinfo; 2618 pci_addr_t map, testval; 2619 int ln2range; 2620 uint16_t cmd; 2621 2622 /* 2623 * The device ROM BAR is special. It is always a 32-bit 2624 * memory BAR. Bit 0 is special and should not be set when 2625 * sizing the BAR. 2626 */ 2627 dinfo = device_get_ivars(dev); 2628 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) { 2629 map = pci_read_config(dev, reg, 4); 2630 pci_write_config(dev, reg, 0xfffffffe, 4); 2631 testval = pci_read_config(dev, reg, 4); 2632 pci_write_config(dev, reg, map, 4); 2633 *mapp = map; 2634 *testvalp = testval; 2635 return; 2636 } 2637 2638 map = pci_read_config(dev, reg, 4); 2639 ln2range = pci_maprange(map); 2640 if (ln2range == 64) 2641 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; 2642 2643 /* 2644 * Disable decoding via the command register before 2645 * determining the BAR's length since we will be placing it in 2646 * a weird state. 2647 */ 2648 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 2649 pci_write_config(dev, PCIR_COMMAND, 2650 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); 2651 2652 /* 2653 * Determine the BAR's length by writing all 1's. The bottom 2654 * log_2(size) bits of the BAR will stick as 0 when we read 2655 * the value back. 2656 */ 2657 pci_write_config(dev, reg, 0xffffffff, 4); 2658 testval = pci_read_config(dev, reg, 4); 2659 if (ln2range == 64) { 2660 pci_write_config(dev, reg + 4, 0xffffffff, 4); 2661 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; 2662 } 2663 2664 /* 2665 * Restore the original value of the BAR. We may have reprogrammed 2666 * the BAR of the low-level console device and when booting verbose, 2667 * we need the console device addressable. 2668 */ 2669 pci_write_config(dev, reg, map, 4); 2670 if (ln2range == 64) 2671 pci_write_config(dev, reg + 4, map >> 32, 4); 2672 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 2673 2674 *mapp = map; 2675 *testvalp = testval; 2676 } 2677 2678 static void 2679 pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base) 2680 { 2681 struct pci_devinfo *dinfo; 2682 int ln2range; 2683 2684 /* The device ROM BAR is always a 32-bit memory BAR. */ 2685 dinfo = device_get_ivars(dev); 2686 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) 2687 ln2range = 32; 2688 else 2689 ln2range = pci_maprange(pm->pm_value); 2690 pci_write_config(dev, pm->pm_reg, base, 4); 2691 if (ln2range == 64) 2692 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4); 2693 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4); 2694 if (ln2range == 64) 2695 pm->pm_value |= (pci_addr_t)pci_read_config(dev, 2696 pm->pm_reg + 4, 4) << 32; 2697 } 2698 2699 struct pci_map * 2700 pci_find_bar(device_t dev, int reg) 2701 { 2702 struct pci_devinfo *dinfo; 2703 struct pci_map *pm; 2704 2705 dinfo = device_get_ivars(dev); 2706 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { 2707 if (pm->pm_reg == reg) 2708 return (pm); 2709 } 2710 return (NULL); 2711 } 2712 2713 int 2714 pci_bar_enabled(device_t dev, struct pci_map *pm) 2715 { 2716 struct pci_devinfo *dinfo; 2717 uint16_t cmd; 2718 2719 dinfo = device_get_ivars(dev); 2720 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) && 2721 !(pm->pm_value & PCIM_BIOS_ENABLE)) 2722 return (0); 2723 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 2724 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value)) 2725 return ((cmd & PCIM_CMD_MEMEN) != 0); 2726 else 2727 return ((cmd & PCIM_CMD_PORTEN) != 0); 2728 } 2729 2730 static struct pci_map * 2731 pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size) 2732 { 2733 struct pci_devinfo *dinfo; 2734 struct pci_map *pm, *prev; 2735 2736 dinfo = device_get_ivars(dev); 2737 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO); 2738 pm->pm_reg = reg; 2739 pm->pm_value = value; 2740 pm->pm_size = size; 2741 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) { 2742 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x", 2743 reg)); 2744 if (STAILQ_NEXT(prev, pm_link) == NULL || 2745 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg) 2746 break; 2747 } 2748 if (prev != NULL) 2749 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link); 2750 else 2751 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link); 2752 return (pm); 2753 } 2754 2755 static void 2756 pci_restore_bars(device_t dev) 2757 { 2758 struct pci_devinfo *dinfo; 2759 struct pci_map *pm; 2760 int ln2range; 2761 2762 dinfo = device_get_ivars(dev); 2763 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { 2764 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) 2765 ln2range = 32; 2766 else 2767 ln2range = pci_maprange(pm->pm_value); 2768 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4); 2769 if (ln2range == 64) 2770 pci_write_config(dev, pm->pm_reg + 4, 2771 pm->pm_value >> 32, 4); 2772 } 2773 } 2774 2775 /* 2776 * Add a resource based on a pci map register. Return 1 if the map 2777 * register is a 32bit map register or 2 if it is a 64bit register. 2778 */ 2779 static int 2780 pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl, 2781 int force, int prefetch) 2782 { 2783 struct pci_map *pm; 2784 pci_addr_t base, map, testval; 2785 pci_addr_t start, end, count; 2786 int barlen, basezero, flags, maprange, mapsize, type; 2787 uint16_t cmd; 2788 struct resource *res; 2789 2790 /* 2791 * The BAR may already exist if the device is a CardBus card 2792 * whose CIS is stored in this BAR. 2793 */ 2794 pm = pci_find_bar(dev, reg); 2795 if (pm != NULL) { 2796 maprange = pci_maprange(pm->pm_value); 2797 barlen = maprange == 64 ? 2 : 1; 2798 return (barlen); 2799 } 2800 2801 pci_read_bar(dev, reg, &map, &testval); 2802 if (PCI_BAR_MEM(map)) { 2803 type = SYS_RES_MEMORY; 2804 if (map & PCIM_BAR_MEM_PREFETCH) 2805 prefetch = 1; 2806 } else 2807 type = SYS_RES_IOPORT; 2808 mapsize = pci_mapsize(testval); 2809 base = pci_mapbase(map); 2810 #ifdef __PCI_BAR_ZERO_VALID 2811 basezero = 0; 2812 #else 2813 basezero = base == 0; 2814 #endif 2815 maprange = pci_maprange(map); 2816 barlen = maprange == 64 ? 2 : 1; 2817 2818 /* 2819 * For I/O registers, if bottom bit is set, and the next bit up 2820 * isn't clear, we know we have a BAR that doesn't conform to the 2821 * spec, so ignore it. Also, sanity check the size of the data 2822 * areas to the type of memory involved. Memory must be at least 2823 * 16 bytes in size, while I/O ranges must be at least 4. 2824 */ 2825 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0) 2826 return (barlen); 2827 if ((type == SYS_RES_MEMORY && mapsize < 4) || 2828 (type == SYS_RES_IOPORT && mapsize < 2)) 2829 return (barlen); 2830 2831 /* Save a record of this BAR. */ 2832 pm = pci_add_bar(dev, reg, map, mapsize); 2833 if (bootverbose) { 2834 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d", 2835 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize); 2836 if (type == SYS_RES_IOPORT && !pci_porten(dev)) 2837 printf(", port disabled\n"); 2838 else if (type == SYS_RES_MEMORY && !pci_memen(dev)) 2839 printf(", memory disabled\n"); 2840 else 2841 printf(", enabled\n"); 2842 } 2843 2844 /* 2845 * If base is 0, then we have problems if this architecture does 2846 * not allow that. It is best to ignore such entries for the 2847 * moment. These will be allocated later if the driver specifically 2848 * requests them. However, some removable busses look better when 2849 * all resources are allocated, so allow '0' to be overriden. 2850 * 2851 * Similarly treat maps whose values is the same as the test value 2852 * read back. These maps have had all f's written to them by the 2853 * BIOS in an attempt to disable the resources. 2854 */ 2855 if (!force && (basezero || map == testval)) 2856 return (barlen); 2857 if ((u_long)base != base) { 2858 device_printf(bus, 2859 "pci%d:%d:%d:%d bar %#x too many address bits", 2860 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), 2861 pci_get_function(dev), reg); 2862 return (barlen); 2863 } 2864 2865 /* 2866 * This code theoretically does the right thing, but has 2867 * undesirable side effects in some cases where peripherals 2868 * respond oddly to having these bits enabled. Let the user 2869 * be able to turn them off (since pci_enable_io_modes is 1 by 2870 * default). 2871 */ 2872 if (pci_enable_io_modes) { 2873 /* Turn on resources that have been left off by a lazy BIOS */ 2874 if (type == SYS_RES_IOPORT && !pci_porten(dev)) { 2875 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 2876 cmd |= PCIM_CMD_PORTEN; 2877 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 2878 } 2879 if (type == SYS_RES_MEMORY && !pci_memen(dev)) { 2880 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 2881 cmd |= PCIM_CMD_MEMEN; 2882 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 2883 } 2884 } else { 2885 if (type == SYS_RES_IOPORT && !pci_porten(dev)) 2886 return (barlen); 2887 if (type == SYS_RES_MEMORY && !pci_memen(dev)) 2888 return (barlen); 2889 } 2890 2891 count = (pci_addr_t)1 << mapsize; 2892 flags = RF_ALIGNMENT_LOG2(mapsize); 2893 if (prefetch) 2894 flags |= RF_PREFETCHABLE; 2895 if (basezero || base == pci_mapbase(testval) || pci_clear_bars) { 2896 start = 0; /* Let the parent decide. */ 2897 end = ~0ul; 2898 } else { 2899 start = base; 2900 end = base + count - 1; 2901 } 2902 resource_list_add(rl, type, reg, start, end, count); 2903 2904 /* 2905 * Try to allocate the resource for this BAR from our parent 2906 * so that this resource range is already reserved. The 2907 * driver for this device will later inherit this resource in 2908 * pci_alloc_resource(). 2909 */ 2910 res = resource_list_reserve(rl, bus, dev, type, ®, start, end, count, 2911 flags); 2912 if (pci_do_realloc_bars && res == NULL && (start != 0 || end != ~0ul)) { 2913 /* 2914 * If the allocation fails, try to allocate a resource for 2915 * this BAR using any available range. The firmware felt 2916 * it was important enough to assign a resource, so don't 2917 * disable decoding if we can help it. 2918 */ 2919 resource_list_delete(rl, type, reg); 2920 resource_list_add(rl, type, reg, 0, ~0ul, count); 2921 res = resource_list_reserve(rl, bus, dev, type, ®, 0, ~0ul, 2922 count, flags); 2923 } 2924 if (res == NULL) { 2925 /* 2926 * If the allocation fails, delete the resource list entry 2927 * and disable decoding for this device. 2928 * 2929 * If the driver requests this resource in the future, 2930 * pci_reserve_map() will try to allocate a fresh 2931 * resource range. 2932 */ 2933 resource_list_delete(rl, type, reg); 2934 pci_disable_io(dev, type); 2935 if (bootverbose) 2936 device_printf(bus, 2937 "pci%d:%d:%d:%d bar %#x failed to allocate\n", 2938 pci_get_domain(dev), pci_get_bus(dev), 2939 pci_get_slot(dev), pci_get_function(dev), reg); 2940 } else { 2941 start = rman_get_start(res); 2942 pci_write_bar(dev, pm, start); 2943 } 2944 return (barlen); 2945 } 2946 2947 /* 2948 * For ATA devices we need to decide early what addressing mode to use. 2949 * Legacy demands that the primary and secondary ATA ports sits on the 2950 * same addresses that old ISA hardware did. This dictates that we use 2951 * those addresses and ignore the BAR's if we cannot set PCI native 2952 * addressing mode. 2953 */ 2954 static void 2955 pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force, 2956 uint32_t prefetchmask) 2957 { 2958 struct resource *r; 2959 int rid, type, progif; 2960 #if 0 2961 /* if this device supports PCI native addressing use it */ 2962 progif = pci_read_config(dev, PCIR_PROGIF, 1); 2963 if ((progif & 0x8a) == 0x8a) { 2964 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) && 2965 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) { 2966 printf("Trying ATA native PCI addressing mode\n"); 2967 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1); 2968 } 2969 } 2970 #endif 2971 progif = pci_read_config(dev, PCIR_PROGIF, 1); 2972 type = SYS_RES_IOPORT; 2973 if (progif & PCIP_STORAGE_IDE_MODEPRIM) { 2974 pci_add_map(bus, dev, PCIR_BAR(0), rl, force, 2975 prefetchmask & (1 << 0)); 2976 pci_add_map(bus, dev, PCIR_BAR(1), rl, force, 2977 prefetchmask & (1 << 1)); 2978 } else { 2979 rid = PCIR_BAR(0); 2980 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8); 2981 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0, 2982 0x1f7, 8, 0); 2983 rid = PCIR_BAR(1); 2984 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1); 2985 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6, 2986 0x3f6, 1, 0); 2987 } 2988 if (progif & PCIP_STORAGE_IDE_MODESEC) { 2989 pci_add_map(bus, dev, PCIR_BAR(2), rl, force, 2990 prefetchmask & (1 << 2)); 2991 pci_add_map(bus, dev, PCIR_BAR(3), rl, force, 2992 prefetchmask & (1 << 3)); 2993 } else { 2994 rid = PCIR_BAR(2); 2995 resource_list_add(rl, type, rid, 0x170, 0x177, 8); 2996 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x170, 2997 0x177, 8, 0); 2998 rid = PCIR_BAR(3); 2999 resource_list_add(rl, type, rid, 0x376, 0x376, 1); 3000 r = resource_list_reserve(rl, bus, dev, type, &rid, 0x376, 3001 0x376, 1, 0); 3002 } 3003 pci_add_map(bus, dev, PCIR_BAR(4), rl, force, 3004 prefetchmask & (1 << 4)); 3005 pci_add_map(bus, dev, PCIR_BAR(5), rl, force, 3006 prefetchmask & (1 << 5)); 3007 } 3008 3009 static void 3010 pci_assign_interrupt(device_t bus, device_t dev, int force_route) 3011 { 3012 struct pci_devinfo *dinfo = device_get_ivars(dev); 3013 pcicfgregs *cfg = &dinfo->cfg; 3014 char tunable_name[64]; 3015 int irq; 3016 3017 /* Has to have an intpin to have an interrupt. */ 3018 if (cfg->intpin == 0) 3019 return; 3020 3021 /* Let the user override the IRQ with a tunable. */ 3022 irq = PCI_INVALID_IRQ; 3023 snprintf(tunable_name, sizeof(tunable_name), 3024 "hw.pci%d.%d.%d.INT%c.irq", 3025 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1); 3026 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0)) 3027 irq = PCI_INVALID_IRQ; 3028 3029 /* 3030 * If we didn't get an IRQ via the tunable, then we either use the 3031 * IRQ value in the intline register or we ask the bus to route an 3032 * interrupt for us. If force_route is true, then we only use the 3033 * value in the intline register if the bus was unable to assign an 3034 * IRQ. 3035 */ 3036 if (!PCI_INTERRUPT_VALID(irq)) { 3037 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route) 3038 irq = PCI_ASSIGN_INTERRUPT(bus, dev); 3039 if (!PCI_INTERRUPT_VALID(irq)) 3040 irq = cfg->intline; 3041 } 3042 3043 /* If after all that we don't have an IRQ, just bail. */ 3044 if (!PCI_INTERRUPT_VALID(irq)) 3045 return; 3046 3047 /* Update the config register if it changed. */ 3048 if (irq != cfg->intline) { 3049 cfg->intline = irq; 3050 pci_write_config(dev, PCIR_INTLINE, irq, 1); 3051 } 3052 3053 /* Add this IRQ as rid 0 interrupt resource. */ 3054 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1); 3055 } 3056 3057 /* Perform early OHCI takeover from SMM. */ 3058 static void 3059 ohci_early_takeover(device_t self) 3060 { 3061 struct resource *res; 3062 uint32_t ctl; 3063 int rid; 3064 int i; 3065 3066 rid = PCIR_BAR(0); 3067 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); 3068 if (res == NULL) 3069 return; 3070 3071 ctl = bus_read_4(res, OHCI_CONTROL); 3072 if (ctl & OHCI_IR) { 3073 if (bootverbose) 3074 printf("ohci early: " 3075 "SMM active, request owner change\n"); 3076 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR); 3077 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) { 3078 DELAY(1000); 3079 ctl = bus_read_4(res, OHCI_CONTROL); 3080 } 3081 if (ctl & OHCI_IR) { 3082 if (bootverbose) 3083 printf("ohci early: " 3084 "SMM does not respond, resetting\n"); 3085 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET); 3086 } 3087 /* Disable interrupts */ 3088 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS); 3089 } 3090 3091 bus_release_resource(self, SYS_RES_MEMORY, rid, res); 3092 } 3093 3094 /* Perform early UHCI takeover from SMM. */ 3095 static void 3096 uhci_early_takeover(device_t self) 3097 { 3098 struct resource *res; 3099 int rid; 3100 3101 /* 3102 * Set the PIRQD enable bit and switch off all the others. We don't 3103 * want legacy support to interfere with us XXX Does this also mean 3104 * that the BIOS won't touch the keyboard anymore if it is connected 3105 * to the ports of the root hub? 3106 */ 3107 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2); 3108 3109 /* Disable interrupts */ 3110 rid = PCI_UHCI_BASE_REG; 3111 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE); 3112 if (res != NULL) { 3113 bus_write_2(res, UHCI_INTR, 0); 3114 bus_release_resource(self, SYS_RES_IOPORT, rid, res); 3115 } 3116 } 3117 3118 /* Perform early EHCI takeover from SMM. */ 3119 static void 3120 ehci_early_takeover(device_t self) 3121 { 3122 struct resource *res; 3123 uint32_t cparams; 3124 uint32_t eec; 3125 uint8_t eecp; 3126 uint8_t bios_sem; 3127 uint8_t offs; 3128 int rid; 3129 int i; 3130 3131 rid = PCIR_BAR(0); 3132 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); 3133 if (res == NULL) 3134 return; 3135 3136 cparams = bus_read_4(res, EHCI_HCCPARAMS); 3137 3138 /* Synchronise with the BIOS if it owns the controller. */ 3139 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0; 3140 eecp = EHCI_EECP_NEXT(eec)) { 3141 eec = pci_read_config(self, eecp, 4); 3142 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) { 3143 continue; 3144 } 3145 bios_sem = pci_read_config(self, eecp + 3146 EHCI_LEGSUP_BIOS_SEM, 1); 3147 if (bios_sem == 0) { 3148 continue; 3149 } 3150 if (bootverbose) 3151 printf("ehci early: " 3152 "SMM active, request owner change\n"); 3153 3154 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1); 3155 3156 for (i = 0; (i < 100) && (bios_sem != 0); i++) { 3157 DELAY(1000); 3158 bios_sem = pci_read_config(self, eecp + 3159 EHCI_LEGSUP_BIOS_SEM, 1); 3160 } 3161 3162 if (bios_sem != 0) { 3163 if (bootverbose) 3164 printf("ehci early: " 3165 "SMM does not respond\n"); 3166 } 3167 /* Disable interrupts */ 3168 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION)); 3169 bus_write_4(res, offs + EHCI_USBINTR, 0); 3170 } 3171 bus_release_resource(self, SYS_RES_MEMORY, rid, res); 3172 } 3173 3174 /* Perform early XHCI takeover from SMM. */ 3175 static void 3176 xhci_early_takeover(device_t self) 3177 { 3178 struct resource *res; 3179 uint32_t cparams; 3180 uint32_t eec; 3181 uint8_t eecp; 3182 uint8_t bios_sem; 3183 uint8_t offs; 3184 int rid; 3185 int i; 3186 3187 rid = PCIR_BAR(0); 3188 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); 3189 if (res == NULL) 3190 return; 3191 3192 cparams = bus_read_4(res, XHCI_HCSPARAMS0); 3193 3194 eec = -1; 3195 3196 /* Synchronise with the BIOS if it owns the controller. */ 3197 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec); 3198 eecp += XHCI_XECP_NEXT(eec) << 2) { 3199 eec = bus_read_4(res, eecp); 3200 3201 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY) 3202 continue; 3203 3204 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM); 3205 if (bios_sem == 0) 3206 continue; 3207 3208 if (bootverbose) 3209 printf("xhci early: " 3210 "SMM active, request owner change\n"); 3211 3212 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1); 3213 3214 /* wait a maximum of 5 second */ 3215 3216 for (i = 0; (i < 5000) && (bios_sem != 0); i++) { 3217 DELAY(1000); 3218 bios_sem = bus_read_1(res, eecp + 3219 XHCI_XECP_BIOS_SEM); 3220 } 3221 3222 if (bios_sem != 0) { 3223 if (bootverbose) 3224 printf("xhci early: " 3225 "SMM does not respond\n"); 3226 } 3227 3228 /* Disable interrupts */ 3229 offs = bus_read_1(res, XHCI_CAPLENGTH); 3230 bus_write_4(res, offs + XHCI_USBCMD, 0); 3231 bus_read_4(res, offs + XHCI_USBSTS); 3232 } 3233 bus_release_resource(self, SYS_RES_MEMORY, rid, res); 3234 } 3235 3236 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 3237 static void 3238 pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg, 3239 struct resource_list *rl) 3240 { 3241 struct resource *res; 3242 char *cp; 3243 u_long start, end, count; 3244 int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus; 3245 3246 switch (cfg->hdrtype & PCIM_HDRTYPE) { 3247 case PCIM_HDRTYPE_BRIDGE: 3248 sec_reg = PCIR_SECBUS_1; 3249 sub_reg = PCIR_SUBBUS_1; 3250 break; 3251 case PCIM_HDRTYPE_CARDBUS: 3252 sec_reg = PCIR_SECBUS_2; 3253 sub_reg = PCIR_SUBBUS_2; 3254 break; 3255 default: 3256 return; 3257 } 3258 3259 /* 3260 * If the existing bus range is valid, attempt to reserve it 3261 * from our parent. If this fails for any reason, clear the 3262 * secbus and subbus registers. 3263 * 3264 * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus? 3265 * This would at least preserve the existing sec_bus if it is 3266 * valid. 3267 */ 3268 sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1); 3269 sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1); 3270 3271 /* Quirk handling. */ 3272 switch (pci_get_devid(dev)) { 3273 case 0x12258086: /* Intel 82454KX/GX (Orion) */ 3274 sup_bus = pci_read_config(dev, 0x41, 1); 3275 if (sup_bus != 0xff) { 3276 sec_bus = sup_bus + 1; 3277 sub_bus = sup_bus + 1; 3278 PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1); 3279 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1); 3280 } 3281 break; 3282 3283 case 0x00dd10de: 3284 /* Compaq R3000 BIOS sets wrong subordinate bus number. */ 3285 if ((cp = kern_getenv("smbios.planar.maker")) == NULL) 3286 break; 3287 if (strncmp(cp, "Compal", 6) != 0) { 3288 freeenv(cp); 3289 break; 3290 } 3291 freeenv(cp); 3292 if ((cp = kern_getenv("smbios.planar.product")) == NULL) 3293 break; 3294 if (strncmp(cp, "08A0", 4) != 0) { 3295 freeenv(cp); 3296 break; 3297 } 3298 freeenv(cp); 3299 if (sub_bus < 0xa) { 3300 sub_bus = 0xa; 3301 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1); 3302 } 3303 break; 3304 } 3305 3306 if (bootverbose) 3307 printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus); 3308 if (sec_bus > 0 && sub_bus >= sec_bus) { 3309 start = sec_bus; 3310 end = sub_bus; 3311 count = end - start + 1; 3312 3313 resource_list_add(rl, PCI_RES_BUS, 0, 0ul, ~0ul, count); 3314 3315 /* 3316 * If requested, clear secondary bus registers in 3317 * bridge devices to force a complete renumbering 3318 * rather than reserving the existing range. However, 3319 * preserve the existing size. 3320 */ 3321 if (pci_clear_buses) 3322 goto clear; 3323 3324 rid = 0; 3325 res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid, 3326 start, end, count, 0); 3327 if (res != NULL) 3328 return; 3329 3330 if (bootverbose) 3331 device_printf(bus, 3332 "pci%d:%d:%d:%d secbus failed to allocate\n", 3333 pci_get_domain(dev), pci_get_bus(dev), 3334 pci_get_slot(dev), pci_get_function(dev)); 3335 } 3336 3337 clear: 3338 PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1); 3339 PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1); 3340 } 3341 3342 static struct resource * 3343 pci_alloc_secbus(device_t dev, device_t child, int *rid, u_long start, 3344 u_long end, u_long count, u_int flags) 3345 { 3346 struct pci_devinfo *dinfo; 3347 pcicfgregs *cfg; 3348 struct resource_list *rl; 3349 struct resource *res; 3350 int sec_reg, sub_reg; 3351 3352 dinfo = device_get_ivars(child); 3353 cfg = &dinfo->cfg; 3354 rl = &dinfo->resources; 3355 switch (cfg->hdrtype & PCIM_HDRTYPE) { 3356 case PCIM_HDRTYPE_BRIDGE: 3357 sec_reg = PCIR_SECBUS_1; 3358 sub_reg = PCIR_SUBBUS_1; 3359 break; 3360 case PCIM_HDRTYPE_CARDBUS: 3361 sec_reg = PCIR_SECBUS_2; 3362 sub_reg = PCIR_SUBBUS_2; 3363 break; 3364 default: 3365 return (NULL); 3366 } 3367 3368 if (*rid != 0) 3369 return (NULL); 3370 3371 if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL) 3372 resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count); 3373 if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) { 3374 res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid, 3375 start, end, count, flags & ~RF_ACTIVE); 3376 if (res == NULL) { 3377 resource_list_delete(rl, PCI_RES_BUS, *rid); 3378 device_printf(child, "allocating %lu bus%s failed\n", 3379 count, count == 1 ? "" : "es"); 3380 return (NULL); 3381 } 3382 if (bootverbose) 3383 device_printf(child, 3384 "Lazy allocation of %lu bus%s at %lu\n", count, 3385 count == 1 ? "" : "es", rman_get_start(res)); 3386 PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1); 3387 PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1); 3388 } 3389 return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start, 3390 end, count, flags)); 3391 } 3392 #endif 3393 3394 void 3395 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask) 3396 { 3397 struct pci_devinfo *dinfo; 3398 pcicfgregs *cfg; 3399 struct resource_list *rl; 3400 const struct pci_quirk *q; 3401 uint32_t devid; 3402 int i; 3403 3404 dinfo = device_get_ivars(dev); 3405 cfg = &dinfo->cfg; 3406 rl = &dinfo->resources; 3407 devid = (cfg->device << 16) | cfg->vendor; 3408 3409 /* ATA devices needs special map treatment */ 3410 if ((pci_get_class(dev) == PCIC_STORAGE) && 3411 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) && 3412 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) || 3413 (!pci_read_config(dev, PCIR_BAR(0), 4) && 3414 !pci_read_config(dev, PCIR_BAR(2), 4))) ) 3415 pci_ata_maps(bus, dev, rl, force, prefetchmask); 3416 else 3417 for (i = 0; i < cfg->nummaps;) { 3418 /* 3419 * Skip quirked resources. 3420 */ 3421 for (q = &pci_quirks[0]; q->devid != 0; q++) 3422 if (q->devid == devid && 3423 q->type == PCI_QUIRK_UNMAP_REG && 3424 q->arg1 == PCIR_BAR(i)) 3425 break; 3426 if (q->devid != 0) { 3427 i++; 3428 continue; 3429 } 3430 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force, 3431 prefetchmask & (1 << i)); 3432 } 3433 3434 /* 3435 * Add additional, quirked resources. 3436 */ 3437 for (q = &pci_quirks[0]; q->devid != 0; q++) 3438 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG) 3439 pci_add_map(bus, dev, q->arg1, rl, force, 0); 3440 3441 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) { 3442 #ifdef __PCI_REROUTE_INTERRUPT 3443 /* 3444 * Try to re-route interrupts. Sometimes the BIOS or 3445 * firmware may leave bogus values in these registers. 3446 * If the re-route fails, then just stick with what we 3447 * have. 3448 */ 3449 pci_assign_interrupt(bus, dev, 1); 3450 #else 3451 pci_assign_interrupt(bus, dev, 0); 3452 #endif 3453 } 3454 3455 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS && 3456 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) { 3457 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI) 3458 xhci_early_takeover(dev); 3459 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI) 3460 ehci_early_takeover(dev); 3461 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI) 3462 ohci_early_takeover(dev); 3463 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI) 3464 uhci_early_takeover(dev); 3465 } 3466 3467 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 3468 /* 3469 * Reserve resources for secondary bus ranges behind bridge 3470 * devices. 3471 */ 3472 pci_reserve_secbus(bus, dev, cfg, rl); 3473 #endif 3474 } 3475 3476 static struct pci_devinfo * 3477 pci_identify_function(device_t pcib, device_t dev, int domain, int busno, 3478 int slot, int func, size_t dinfo_size) 3479 { 3480 struct pci_devinfo *dinfo; 3481 3482 dinfo = pci_read_device(pcib, domain, busno, slot, func, dinfo_size); 3483 if (dinfo != NULL) 3484 pci_add_child(dev, dinfo); 3485 3486 return (dinfo); 3487 } 3488 3489 void 3490 pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size) 3491 { 3492 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w) 3493 device_t pcib = device_get_parent(dev); 3494 struct pci_devinfo *dinfo; 3495 int maxslots; 3496 int s, f, pcifunchigh; 3497 uint8_t hdrtype; 3498 int first_func; 3499 3500 /* 3501 * Try to detect a device at slot 0, function 0. If it exists, try to 3502 * enable ARI. We must enable ARI before detecting the rest of the 3503 * functions on this bus as ARI changes the set of slots and functions 3504 * that are legal on this bus. 3505 */ 3506 dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0, 3507 dinfo_size); 3508 if (dinfo != NULL && pci_enable_ari) 3509 PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev); 3510 3511 /* 3512 * Start looking for new devices on slot 0 at function 1 because we 3513 * just identified the device at slot 0, function 0. 3514 */ 3515 first_func = 1; 3516 3517 KASSERT(dinfo_size >= sizeof(struct pci_devinfo), 3518 ("dinfo_size too small")); 3519 maxslots = PCIB_MAXSLOTS(pcib); 3520 for (s = 0; s <= maxslots; s++, first_func = 0) { 3521 pcifunchigh = 0; 3522 f = 0; 3523 DELAY(1); 3524 hdrtype = REG(PCIR_HDRTYPE, 1); 3525 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) 3526 continue; 3527 if (hdrtype & PCIM_MFDEV) 3528 pcifunchigh = PCIB_MAXFUNCS(pcib); 3529 for (f = first_func; f <= pcifunchigh; f++) 3530 pci_identify_function(pcib, dev, domain, busno, s, f, 3531 dinfo_size); 3532 } 3533 #undef REG 3534 } 3535 3536 void 3537 pci_add_child(device_t bus, struct pci_devinfo *dinfo) 3538 { 3539 dinfo->cfg.dev = device_add_child(bus, NULL, -1); 3540 device_set_ivars(dinfo->cfg.dev, dinfo); 3541 resource_list_init(&dinfo->resources); 3542 pci_cfg_save(dinfo->cfg.dev, dinfo, 0); 3543 pci_cfg_restore(dinfo->cfg.dev, dinfo); 3544 pci_print_verbose(dinfo); 3545 pci_add_resources(bus, dinfo->cfg.dev, 0, 0); 3546 pci_child_added(dinfo->cfg.dev); 3547 } 3548 3549 void 3550 pci_child_added_method(device_t dev, device_t child) 3551 { 3552 3553 } 3554 3555 static int 3556 pci_probe(device_t dev) 3557 { 3558 3559 device_set_desc(dev, "PCI bus"); 3560 3561 /* Allow other subclasses to override this driver. */ 3562 return (BUS_PROBE_GENERIC); 3563 } 3564 3565 int 3566 pci_attach_common(device_t dev) 3567 { 3568 struct pci_softc *sc; 3569 int busno, domain; 3570 #ifdef PCI_DMA_BOUNDARY 3571 int error, tag_valid; 3572 #endif 3573 #ifdef PCI_RES_BUS 3574 int rid; 3575 #endif 3576 3577 sc = device_get_softc(dev); 3578 domain = pcib_get_domain(dev); 3579 busno = pcib_get_bus(dev); 3580 #ifdef PCI_RES_BUS 3581 rid = 0; 3582 sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno, 3583 1, 0); 3584 if (sc->sc_bus == NULL) { 3585 device_printf(dev, "failed to allocate bus number\n"); 3586 return (ENXIO); 3587 } 3588 #endif 3589 if (bootverbose) 3590 device_printf(dev, "domain=%d, physical bus=%d\n", 3591 domain, busno); 3592 #ifdef PCI_DMA_BOUNDARY 3593 tag_valid = 0; 3594 if (device_get_devclass(device_get_parent(device_get_parent(dev))) != 3595 devclass_find("pci")) { 3596 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 3597 PCI_DMA_BOUNDARY, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 3598 NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, 3599 BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_dma_tag); 3600 if (error) 3601 device_printf(dev, "Failed to create DMA tag: %d\n", 3602 error); 3603 else 3604 tag_valid = 1; 3605 } 3606 if (!tag_valid) 3607 #endif 3608 sc->sc_dma_tag = bus_get_dma_tag(dev); 3609 return (0); 3610 } 3611 3612 static int 3613 pci_attach(device_t dev) 3614 { 3615 int busno, domain, error; 3616 3617 error = pci_attach_common(dev); 3618 if (error) 3619 return (error); 3620 3621 /* 3622 * Since there can be multiple independantly numbered PCI 3623 * busses on systems with multiple PCI domains, we can't use 3624 * the unit number to decide which bus we are probing. We ask 3625 * the parent pcib what our domain and bus numbers are. 3626 */ 3627 domain = pcib_get_domain(dev); 3628 busno = pcib_get_bus(dev); 3629 pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo)); 3630 return (bus_generic_attach(dev)); 3631 } 3632 3633 #ifdef PCI_RES_BUS 3634 static int 3635 pci_detach(device_t dev) 3636 { 3637 struct pci_softc *sc; 3638 int error; 3639 3640 error = bus_generic_detach(dev); 3641 if (error) 3642 return (error); 3643 sc = device_get_softc(dev); 3644 return (bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus)); 3645 } 3646 #endif 3647 3648 static void 3649 pci_set_power_child(device_t dev, device_t child, int state) 3650 { 3651 struct pci_devinfo *dinfo; 3652 device_t pcib; 3653 int dstate; 3654 3655 /* 3656 * Set the device to the given state. If the firmware suggests 3657 * a different power state, use it instead. If power management 3658 * is not present, the firmware is responsible for managing 3659 * device power. Skip children who aren't attached since they 3660 * are handled separately. 3661 */ 3662 pcib = device_get_parent(dev); 3663 dinfo = device_get_ivars(child); 3664 dstate = state; 3665 if (device_is_attached(child) && 3666 PCIB_POWER_FOR_SLEEP(pcib, child, &dstate) == 0) 3667 pci_set_powerstate(child, dstate); 3668 } 3669 3670 int 3671 pci_suspend_child(device_t dev, device_t child) 3672 { 3673 struct pci_devinfo *dinfo; 3674 int error; 3675 3676 dinfo = device_get_ivars(child); 3677 3678 /* 3679 * Save the PCI configuration space for the child and set the 3680 * device in the appropriate power state for this sleep state. 3681 */ 3682 pci_cfg_save(child, dinfo, 0); 3683 3684 /* Suspend devices before potentially powering them down. */ 3685 error = bus_generic_suspend_child(dev, child); 3686 3687 if (error) 3688 return (error); 3689 3690 if (pci_do_power_suspend) 3691 pci_set_power_child(dev, child, PCI_POWERSTATE_D3); 3692 3693 return (0); 3694 } 3695 3696 int 3697 pci_resume_child(device_t dev, device_t child) 3698 { 3699 struct pci_devinfo *dinfo; 3700 3701 if (pci_do_power_resume) 3702 pci_set_power_child(dev, child, PCI_POWERSTATE_D0); 3703 3704 dinfo = device_get_ivars(child); 3705 pci_cfg_restore(child, dinfo); 3706 if (!device_is_attached(child)) 3707 pci_cfg_save(child, dinfo, 1); 3708 3709 bus_generic_resume_child(dev, child); 3710 3711 return (0); 3712 } 3713 3714 int 3715 pci_resume(device_t dev) 3716 { 3717 device_t child, *devlist; 3718 int error, i, numdevs; 3719 3720 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0) 3721 return (error); 3722 3723 /* 3724 * Resume critical devices first, then everything else later. 3725 */ 3726 for (i = 0; i < numdevs; i++) { 3727 child = devlist[i]; 3728 switch (pci_get_class(child)) { 3729 case PCIC_DISPLAY: 3730 case PCIC_MEMORY: 3731 case PCIC_BRIDGE: 3732 case PCIC_BASEPERIPH: 3733 BUS_RESUME_CHILD(dev, child); 3734 break; 3735 } 3736 } 3737 for (i = 0; i < numdevs; i++) { 3738 child = devlist[i]; 3739 switch (pci_get_class(child)) { 3740 case PCIC_DISPLAY: 3741 case PCIC_MEMORY: 3742 case PCIC_BRIDGE: 3743 case PCIC_BASEPERIPH: 3744 break; 3745 default: 3746 BUS_RESUME_CHILD(dev, child); 3747 } 3748 } 3749 free(devlist, M_TEMP); 3750 return (0); 3751 } 3752 3753 static void 3754 pci_load_vendor_data(void) 3755 { 3756 caddr_t data; 3757 void *ptr; 3758 size_t sz; 3759 3760 data = preload_search_by_type("pci_vendor_data"); 3761 if (data != NULL) { 3762 ptr = preload_fetch_addr(data); 3763 sz = preload_fetch_size(data); 3764 if (ptr != NULL && sz != 0) { 3765 pci_vendordata = ptr; 3766 pci_vendordata_size = sz; 3767 /* terminate the database */ 3768 pci_vendordata[pci_vendordata_size] = '\n'; 3769 } 3770 } 3771 } 3772 3773 void 3774 pci_driver_added(device_t dev, driver_t *driver) 3775 { 3776 int numdevs; 3777 device_t *devlist; 3778 device_t child; 3779 struct pci_devinfo *dinfo; 3780 int i; 3781 3782 if (bootverbose) 3783 device_printf(dev, "driver added\n"); 3784 DEVICE_IDENTIFY(driver, dev); 3785 if (device_get_children(dev, &devlist, &numdevs) != 0) 3786 return; 3787 for (i = 0; i < numdevs; i++) { 3788 child = devlist[i]; 3789 if (device_get_state(child) != DS_NOTPRESENT) 3790 continue; 3791 dinfo = device_get_ivars(child); 3792 pci_print_verbose(dinfo); 3793 if (bootverbose) 3794 pci_printf(&dinfo->cfg, "reprobing on driver added\n"); 3795 pci_cfg_restore(child, dinfo); 3796 if (device_probe_and_attach(child) != 0) 3797 pci_child_detached(dev, child); 3798 } 3799 free(devlist, M_TEMP); 3800 } 3801 3802 int 3803 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, 3804 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) 3805 { 3806 struct pci_devinfo *dinfo; 3807 struct msix_table_entry *mte; 3808 struct msix_vector *mv; 3809 uint64_t addr; 3810 uint32_t data; 3811 void *cookie; 3812 int error, rid; 3813 3814 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr, 3815 arg, &cookie); 3816 if (error) 3817 return (error); 3818 3819 /* If this is not a direct child, just bail out. */ 3820 if (device_get_parent(child) != dev) { 3821 *cookiep = cookie; 3822 return(0); 3823 } 3824 3825 rid = rman_get_rid(irq); 3826 if (rid == 0) { 3827 /* Make sure that INTx is enabled */ 3828 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); 3829 } else { 3830 /* 3831 * Check to see if the interrupt is MSI or MSI-X. 3832 * Ask our parent to map the MSI and give 3833 * us the address and data register values. 3834 * If we fail for some reason, teardown the 3835 * interrupt handler. 3836 */ 3837 dinfo = device_get_ivars(child); 3838 if (dinfo->cfg.msi.msi_alloc > 0) { 3839 if (dinfo->cfg.msi.msi_addr == 0) { 3840 KASSERT(dinfo->cfg.msi.msi_handlers == 0, 3841 ("MSI has handlers, but vectors not mapped")); 3842 error = PCIB_MAP_MSI(device_get_parent(dev), 3843 child, rman_get_start(irq), &addr, &data); 3844 if (error) 3845 goto bad; 3846 dinfo->cfg.msi.msi_addr = addr; 3847 dinfo->cfg.msi.msi_data = data; 3848 } 3849 if (dinfo->cfg.msi.msi_handlers == 0) 3850 pci_enable_msi(child, dinfo->cfg.msi.msi_addr, 3851 dinfo->cfg.msi.msi_data); 3852 dinfo->cfg.msi.msi_handlers++; 3853 } else { 3854 KASSERT(dinfo->cfg.msix.msix_alloc > 0, 3855 ("No MSI or MSI-X interrupts allocated")); 3856 KASSERT(rid <= dinfo->cfg.msix.msix_table_len, 3857 ("MSI-X index too high")); 3858 mte = &dinfo->cfg.msix.msix_table[rid - 1]; 3859 KASSERT(mte->mte_vector != 0, ("no message vector")); 3860 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1]; 3861 KASSERT(mv->mv_irq == rman_get_start(irq), 3862 ("IRQ mismatch")); 3863 if (mv->mv_address == 0) { 3864 KASSERT(mte->mte_handlers == 0, 3865 ("MSI-X table entry has handlers, but vector not mapped")); 3866 error = PCIB_MAP_MSI(device_get_parent(dev), 3867 child, rman_get_start(irq), &addr, &data); 3868 if (error) 3869 goto bad; 3870 mv->mv_address = addr; 3871 mv->mv_data = data; 3872 } 3873 if (mte->mte_handlers == 0) { 3874 pci_enable_msix(child, rid - 1, mv->mv_address, 3875 mv->mv_data); 3876 pci_unmask_msix(child, rid - 1); 3877 } 3878 mte->mte_handlers++; 3879 } 3880 3881 /* 3882 * Make sure that INTx is disabled if we are using MSI/MSI-X, 3883 * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG, 3884 * in which case we "enable" INTx so MSI/MSI-X actually works. 3885 */ 3886 if (!pci_has_quirk(pci_get_devid(child), 3887 PCI_QUIRK_MSI_INTX_BUG)) 3888 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); 3889 else 3890 pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); 3891 bad: 3892 if (error) { 3893 (void)bus_generic_teardown_intr(dev, child, irq, 3894 cookie); 3895 return (error); 3896 } 3897 } 3898 *cookiep = cookie; 3899 return (0); 3900 } 3901 3902 int 3903 pci_teardown_intr(device_t dev, device_t child, struct resource *irq, 3904 void *cookie) 3905 { 3906 struct msix_table_entry *mte; 3907 struct resource_list_entry *rle; 3908 struct pci_devinfo *dinfo; 3909 int error, rid; 3910 3911 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE)) 3912 return (EINVAL); 3913 3914 /* If this isn't a direct child, just bail out */ 3915 if (device_get_parent(child) != dev) 3916 return(bus_generic_teardown_intr(dev, child, irq, cookie)); 3917 3918 rid = rman_get_rid(irq); 3919 if (rid == 0) { 3920 /* Mask INTx */ 3921 pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); 3922 } else { 3923 /* 3924 * Check to see if the interrupt is MSI or MSI-X. If so, 3925 * decrement the appropriate handlers count and mask the 3926 * MSI-X message, or disable MSI messages if the count 3927 * drops to 0. 3928 */ 3929 dinfo = device_get_ivars(child); 3930 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid); 3931 if (rle->res != irq) 3932 return (EINVAL); 3933 if (dinfo->cfg.msi.msi_alloc > 0) { 3934 KASSERT(rid <= dinfo->cfg.msi.msi_alloc, 3935 ("MSI-X index too high")); 3936 if (dinfo->cfg.msi.msi_handlers == 0) 3937 return (EINVAL); 3938 dinfo->cfg.msi.msi_handlers--; 3939 if (dinfo->cfg.msi.msi_handlers == 0) 3940 pci_disable_msi(child); 3941 } else { 3942 KASSERT(dinfo->cfg.msix.msix_alloc > 0, 3943 ("No MSI or MSI-X interrupts allocated")); 3944 KASSERT(rid <= dinfo->cfg.msix.msix_table_len, 3945 ("MSI-X index too high")); 3946 mte = &dinfo->cfg.msix.msix_table[rid - 1]; 3947 if (mte->mte_handlers == 0) 3948 return (EINVAL); 3949 mte->mte_handlers--; 3950 if (mte->mte_handlers == 0) 3951 pci_mask_msix(child, rid - 1); 3952 } 3953 } 3954 error = bus_generic_teardown_intr(dev, child, irq, cookie); 3955 if (rid > 0) 3956 KASSERT(error == 0, 3957 ("%s: generic teardown failed for MSI/MSI-X", __func__)); 3958 return (error); 3959 } 3960 3961 int 3962 pci_print_child(device_t dev, device_t child) 3963 { 3964 struct pci_devinfo *dinfo; 3965 struct resource_list *rl; 3966 int retval = 0; 3967 3968 dinfo = device_get_ivars(child); 3969 rl = &dinfo->resources; 3970 3971 retval += bus_print_child_header(dev, child); 3972 3973 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx"); 3974 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx"); 3975 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); 3976 if (device_get_flags(dev)) 3977 retval += printf(" flags %#x", device_get_flags(dev)); 3978 3979 retval += printf(" at device %d.%d", pci_get_slot(child), 3980 pci_get_function(child)); 3981 3982 retval += bus_print_child_domain(dev, child); 3983 retval += bus_print_child_footer(dev, child); 3984 3985 return (retval); 3986 } 3987 3988 static const struct 3989 { 3990 int class; 3991 int subclass; 3992 int report; /* 0 = bootverbose, 1 = always */ 3993 const char *desc; 3994 } pci_nomatch_tab[] = { 3995 {PCIC_OLD, -1, 1, "old"}, 3996 {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"}, 3997 {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"}, 3998 {PCIC_STORAGE, -1, 1, "mass storage"}, 3999 {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"}, 4000 {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"}, 4001 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"}, 4002 {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"}, 4003 {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"}, 4004 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"}, 4005 {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"}, 4006 {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"}, 4007 {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"}, 4008 {PCIC_NETWORK, -1, 1, "network"}, 4009 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"}, 4010 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"}, 4011 {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"}, 4012 {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"}, 4013 {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"}, 4014 {PCIC_DISPLAY, -1, 1, "display"}, 4015 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"}, 4016 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"}, 4017 {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"}, 4018 {PCIC_MULTIMEDIA, -1, 1, "multimedia"}, 4019 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, 1, "video"}, 4020 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, 1, "audio"}, 4021 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"}, 4022 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, 1, "HDA"}, 4023 {PCIC_MEMORY, -1, 1, "memory"}, 4024 {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"}, 4025 {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"}, 4026 {PCIC_BRIDGE, -1, 1, "bridge"}, 4027 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"}, 4028 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"}, 4029 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"}, 4030 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"}, 4031 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"}, 4032 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"}, 4033 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"}, 4034 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"}, 4035 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"}, 4036 {PCIC_SIMPLECOMM, -1, 1, "simple comms"}, 4037 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */ 4038 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"}, 4039 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"}, 4040 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"}, 4041 {PCIC_BASEPERIPH, -1, 0, "base peripheral"}, 4042 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"}, 4043 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"}, 4044 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, 1, "timer"}, 4045 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"}, 4046 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"}, 4047 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"}, 4048 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_IOMMU, 1, "IOMMU"}, 4049 {PCIC_INPUTDEV, -1, 1, "input device"}, 4050 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"}, 4051 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"}, 4052 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"}, 4053 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"}, 4054 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"}, 4055 {PCIC_DOCKING, -1, 1, "docking station"}, 4056 {PCIC_PROCESSOR, -1, 1, "processor"}, 4057 {PCIC_SERIALBUS, -1, 1, "serial bus"}, 4058 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"}, 4059 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"}, 4060 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, 1, "SSA"}, 4061 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, 1, "USB"}, 4062 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"}, 4063 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"}, 4064 {PCIC_WIRELESS, -1, 1, "wireless controller"}, 4065 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"}, 4066 {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"}, 4067 {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"}, 4068 {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"}, 4069 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, 1, "I2O"}, 4070 {PCIC_SATCOM, -1, 1, "satellite communication"}, 4071 {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"}, 4072 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"}, 4073 {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"}, 4074 {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"}, 4075 {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"}, 4076 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"}, 4077 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"}, 4078 {PCIC_DASP, -1, 0, "dasp"}, 4079 {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"}, 4080 {0, 0, 0, NULL} 4081 }; 4082 4083 void 4084 pci_probe_nomatch(device_t dev, device_t child) 4085 { 4086 int i, report; 4087 const char *cp, *scp; 4088 char *device; 4089 4090 /* 4091 * Look for a listing for this device in a loaded device database. 4092 */ 4093 report = 1; 4094 if ((device = pci_describe_device(child)) != NULL) { 4095 device_printf(dev, "<%s>", device); 4096 free(device, M_DEVBUF); 4097 } else { 4098 /* 4099 * Scan the class/subclass descriptions for a general 4100 * description. 4101 */ 4102 cp = "unknown"; 4103 scp = NULL; 4104 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) { 4105 if (pci_nomatch_tab[i].class == pci_get_class(child)) { 4106 if (pci_nomatch_tab[i].subclass == -1) { 4107 cp = pci_nomatch_tab[i].desc; 4108 report = pci_nomatch_tab[i].report; 4109 } else if (pci_nomatch_tab[i].subclass == 4110 pci_get_subclass(child)) { 4111 scp = pci_nomatch_tab[i].desc; 4112 report = pci_nomatch_tab[i].report; 4113 } 4114 } 4115 } 4116 if (report || bootverbose) { 4117 device_printf(dev, "<%s%s%s>", 4118 cp ? cp : "", 4119 ((cp != NULL) && (scp != NULL)) ? ", " : "", 4120 scp ? scp : ""); 4121 } 4122 } 4123 if (report || bootverbose) { 4124 printf(" at device %d.%d (no driver attached)\n", 4125 pci_get_slot(child), pci_get_function(child)); 4126 } 4127 pci_cfg_save(child, device_get_ivars(child), 1); 4128 } 4129 4130 void 4131 pci_child_detached(device_t dev, device_t child) 4132 { 4133 struct pci_devinfo *dinfo; 4134 struct resource_list *rl; 4135 4136 dinfo = device_get_ivars(child); 4137 rl = &dinfo->resources; 4138 4139 /* 4140 * Have to deallocate IRQs before releasing any MSI messages and 4141 * have to release MSI messages before deallocating any memory 4142 * BARs. 4143 */ 4144 if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0) 4145 pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n"); 4146 if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) { 4147 pci_printf(&dinfo->cfg, "Device leaked MSI vectors\n"); 4148 (void)pci_release_msi(child); 4149 } 4150 if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0) 4151 pci_printf(&dinfo->cfg, "Device leaked memory resources\n"); 4152 if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0) 4153 pci_printf(&dinfo->cfg, "Device leaked I/O resources\n"); 4154 #ifdef PCI_RES_BUS 4155 if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0) 4156 pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n"); 4157 #endif 4158 4159 pci_cfg_save(child, dinfo, 1); 4160 } 4161 4162 /* 4163 * Parse the PCI device database, if loaded, and return a pointer to a 4164 * description of the device. 4165 * 4166 * The database is flat text formatted as follows: 4167 * 4168 * Any line not in a valid format is ignored. 4169 * Lines are terminated with newline '\n' characters. 4170 * 4171 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then 4172 * the vendor name. 4173 * 4174 * A DEVICE line is entered immediately below the corresponding VENDOR ID. 4175 * - devices cannot be listed without a corresponding VENDOR line. 4176 * A DEVICE line consists of a TAB, the 4 digit (hex) device code, 4177 * another TAB, then the device name. 4178 */ 4179 4180 /* 4181 * Assuming (ptr) points to the beginning of a line in the database, 4182 * return the vendor or device and description of the next entry. 4183 * The value of (vendor) or (device) inappropriate for the entry type 4184 * is set to -1. Returns nonzero at the end of the database. 4185 * 4186 * Note that this is slightly unrobust in the face of corrupt data; 4187 * we attempt to safeguard against this by spamming the end of the 4188 * database with a newline when we initialise. 4189 */ 4190 static int 4191 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc) 4192 { 4193 char *cp = *ptr; 4194 int left; 4195 4196 *device = -1; 4197 *vendor = -1; 4198 **desc = '\0'; 4199 for (;;) { 4200 left = pci_vendordata_size - (cp - pci_vendordata); 4201 if (left <= 0) { 4202 *ptr = cp; 4203 return(1); 4204 } 4205 4206 /* vendor entry? */ 4207 if (*cp != '\t' && 4208 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2) 4209 break; 4210 /* device entry? */ 4211 if (*cp == '\t' && 4212 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2) 4213 break; 4214 4215 /* skip to next line */ 4216 while (*cp != '\n' && left > 0) { 4217 cp++; 4218 left--; 4219 } 4220 if (*cp == '\n') { 4221 cp++; 4222 left--; 4223 } 4224 } 4225 /* skip to next line */ 4226 while (*cp != '\n' && left > 0) { 4227 cp++; 4228 left--; 4229 } 4230 if (*cp == '\n' && left > 0) 4231 cp++; 4232 *ptr = cp; 4233 return(0); 4234 } 4235 4236 static char * 4237 pci_describe_device(device_t dev) 4238 { 4239 int vendor, device; 4240 char *desc, *vp, *dp, *line; 4241 4242 desc = vp = dp = NULL; 4243 4244 /* 4245 * If we have no vendor data, we can't do anything. 4246 */ 4247 if (pci_vendordata == NULL) 4248 goto out; 4249 4250 /* 4251 * Scan the vendor data looking for this device 4252 */ 4253 line = pci_vendordata; 4254 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL) 4255 goto out; 4256 for (;;) { 4257 if (pci_describe_parse_line(&line, &vendor, &device, &vp)) 4258 goto out; 4259 if (vendor == pci_get_vendor(dev)) 4260 break; 4261 } 4262 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL) 4263 goto out; 4264 for (;;) { 4265 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) { 4266 *dp = 0; 4267 break; 4268 } 4269 if (vendor != -1) { 4270 *dp = 0; 4271 break; 4272 } 4273 if (device == pci_get_device(dev)) 4274 break; 4275 } 4276 if (dp[0] == '\0') 4277 snprintf(dp, 80, "0x%x", pci_get_device(dev)); 4278 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) != 4279 NULL) 4280 sprintf(desc, "%s, %s", vp, dp); 4281 out: 4282 if (vp != NULL) 4283 free(vp, M_DEVBUF); 4284 if (dp != NULL) 4285 free(dp, M_DEVBUF); 4286 return(desc); 4287 } 4288 4289 int 4290 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) 4291 { 4292 struct pci_devinfo *dinfo; 4293 pcicfgregs *cfg; 4294 4295 dinfo = device_get_ivars(child); 4296 cfg = &dinfo->cfg; 4297 4298 switch (which) { 4299 case PCI_IVAR_ETHADDR: 4300 /* 4301 * The generic accessor doesn't deal with failure, so 4302 * we set the return value, then return an error. 4303 */ 4304 *((uint8_t **) result) = NULL; 4305 return (EINVAL); 4306 case PCI_IVAR_SUBVENDOR: 4307 *result = cfg->subvendor; 4308 break; 4309 case PCI_IVAR_SUBDEVICE: 4310 *result = cfg->subdevice; 4311 break; 4312 case PCI_IVAR_VENDOR: 4313 *result = cfg->vendor; 4314 break; 4315 case PCI_IVAR_DEVICE: 4316 *result = cfg->device; 4317 break; 4318 case PCI_IVAR_DEVID: 4319 *result = (cfg->device << 16) | cfg->vendor; 4320 break; 4321 case PCI_IVAR_CLASS: 4322 *result = cfg->baseclass; 4323 break; 4324 case PCI_IVAR_SUBCLASS: 4325 *result = cfg->subclass; 4326 break; 4327 case PCI_IVAR_PROGIF: 4328 *result = cfg->progif; 4329 break; 4330 case PCI_IVAR_REVID: 4331 *result = cfg->revid; 4332 break; 4333 case PCI_IVAR_INTPIN: 4334 *result = cfg->intpin; 4335 break; 4336 case PCI_IVAR_IRQ: 4337 *result = cfg->intline; 4338 break; 4339 case PCI_IVAR_DOMAIN: 4340 *result = cfg->domain; 4341 break; 4342 case PCI_IVAR_BUS: 4343 *result = cfg->bus; 4344 break; 4345 case PCI_IVAR_SLOT: 4346 *result = cfg->slot; 4347 break; 4348 case PCI_IVAR_FUNCTION: 4349 *result = cfg->func; 4350 break; 4351 case PCI_IVAR_CMDREG: 4352 *result = cfg->cmdreg; 4353 break; 4354 case PCI_IVAR_CACHELNSZ: 4355 *result = cfg->cachelnsz; 4356 break; 4357 case PCI_IVAR_MINGNT: 4358 *result = cfg->mingnt; 4359 break; 4360 case PCI_IVAR_MAXLAT: 4361 *result = cfg->maxlat; 4362 break; 4363 case PCI_IVAR_LATTIMER: 4364 *result = cfg->lattimer; 4365 break; 4366 default: 4367 return (ENOENT); 4368 } 4369 return (0); 4370 } 4371 4372 int 4373 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) 4374 { 4375 struct pci_devinfo *dinfo; 4376 4377 dinfo = device_get_ivars(child); 4378 4379 switch (which) { 4380 case PCI_IVAR_INTPIN: 4381 dinfo->cfg.intpin = value; 4382 return (0); 4383 case PCI_IVAR_ETHADDR: 4384 case PCI_IVAR_SUBVENDOR: 4385 case PCI_IVAR_SUBDEVICE: 4386 case PCI_IVAR_VENDOR: 4387 case PCI_IVAR_DEVICE: 4388 case PCI_IVAR_DEVID: 4389 case PCI_IVAR_CLASS: 4390 case PCI_IVAR_SUBCLASS: 4391 case PCI_IVAR_PROGIF: 4392 case PCI_IVAR_REVID: 4393 case PCI_IVAR_IRQ: 4394 case PCI_IVAR_DOMAIN: 4395 case PCI_IVAR_BUS: 4396 case PCI_IVAR_SLOT: 4397 case PCI_IVAR_FUNCTION: 4398 return (EINVAL); /* disallow for now */ 4399 4400 default: 4401 return (ENOENT); 4402 } 4403 } 4404 4405 #include "opt_ddb.h" 4406 #ifdef DDB 4407 #include <ddb/ddb.h> 4408 #include <sys/cons.h> 4409 4410 /* 4411 * List resources based on pci map registers, used for within ddb 4412 */ 4413 4414 DB_SHOW_COMMAND(pciregs, db_pci_dump) 4415 { 4416 struct pci_devinfo *dinfo; 4417 struct devlist *devlist_head; 4418 struct pci_conf *p; 4419 const char *name; 4420 int i, error, none_count; 4421 4422 none_count = 0; 4423 /* get the head of the device queue */ 4424 devlist_head = &pci_devq; 4425 4426 /* 4427 * Go through the list of devices and print out devices 4428 */ 4429 for (error = 0, i = 0, 4430 dinfo = STAILQ_FIRST(devlist_head); 4431 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit; 4432 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) { 4433 4434 /* Populate pd_name and pd_unit */ 4435 name = NULL; 4436 if (dinfo->cfg.dev) 4437 name = device_get_name(dinfo->cfg.dev); 4438 4439 p = &dinfo->conf; 4440 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x " 4441 "chip=0x%08x rev=0x%02x hdr=0x%02x\n", 4442 (name && *name) ? name : "none", 4443 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) : 4444 none_count++, 4445 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev, 4446 p->pc_sel.pc_func, (p->pc_class << 16) | 4447 (p->pc_subclass << 8) | p->pc_progif, 4448 (p->pc_subdevice << 16) | p->pc_subvendor, 4449 (p->pc_device << 16) | p->pc_vendor, 4450 p->pc_revid, p->pc_hdr); 4451 } 4452 } 4453 #endif /* DDB */ 4454 4455 static struct resource * 4456 pci_reserve_map(device_t dev, device_t child, int type, int *rid, 4457 u_long start, u_long end, u_long count, u_int flags) 4458 { 4459 struct pci_devinfo *dinfo = device_get_ivars(child); 4460 struct resource_list *rl = &dinfo->resources; 4461 struct resource *res; 4462 struct pci_map *pm; 4463 pci_addr_t map, testval; 4464 int mapsize; 4465 4466 res = NULL; 4467 pm = pci_find_bar(child, *rid); 4468 if (pm != NULL) { 4469 /* This is a BAR that we failed to allocate earlier. */ 4470 mapsize = pm->pm_size; 4471 map = pm->pm_value; 4472 } else { 4473 /* 4474 * Weed out the bogons, and figure out how large the 4475 * BAR/map is. BARs that read back 0 here are bogus 4476 * and unimplemented. Note: atapci in legacy mode are 4477 * special and handled elsewhere in the code. If you 4478 * have a atapci device in legacy mode and it fails 4479 * here, that other code is broken. 4480 */ 4481 pci_read_bar(child, *rid, &map, &testval); 4482 4483 /* 4484 * Determine the size of the BAR and ignore BARs with a size 4485 * of 0. Device ROM BARs use a different mask value. 4486 */ 4487 if (PCIR_IS_BIOS(&dinfo->cfg, *rid)) 4488 mapsize = pci_romsize(testval); 4489 else 4490 mapsize = pci_mapsize(testval); 4491 if (mapsize == 0) 4492 goto out; 4493 pm = pci_add_bar(child, *rid, map, mapsize); 4494 } 4495 4496 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) { 4497 if (type != SYS_RES_MEMORY) { 4498 if (bootverbose) 4499 device_printf(dev, 4500 "child %s requested type %d for rid %#x," 4501 " but the BAR says it is an memio\n", 4502 device_get_nameunit(child), type, *rid); 4503 goto out; 4504 } 4505 } else { 4506 if (type != SYS_RES_IOPORT) { 4507 if (bootverbose) 4508 device_printf(dev, 4509 "child %s requested type %d for rid %#x," 4510 " but the BAR says it is an ioport\n", 4511 device_get_nameunit(child), type, *rid); 4512 goto out; 4513 } 4514 } 4515 4516 /* 4517 * For real BARs, we need to override the size that 4518 * the driver requests, because that's what the BAR 4519 * actually uses and we would otherwise have a 4520 * situation where we might allocate the excess to 4521 * another driver, which won't work. 4522 */ 4523 count = (pci_addr_t)1 << mapsize; 4524 if (RF_ALIGNMENT(flags) < mapsize) 4525 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize); 4526 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH)) 4527 flags |= RF_PREFETCHABLE; 4528 4529 /* 4530 * Allocate enough resource, and then write back the 4531 * appropriate BAR for that resource. 4532 */ 4533 resource_list_add(rl, type, *rid, start, end, count); 4534 res = resource_list_reserve(rl, dev, child, type, rid, start, end, 4535 count, flags & ~RF_ACTIVE); 4536 if (res == NULL) { 4537 resource_list_delete(rl, type, *rid); 4538 device_printf(child, 4539 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n", 4540 count, *rid, type, start, end); 4541 goto out; 4542 } 4543 if (bootverbose) 4544 device_printf(child, 4545 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n", 4546 count, *rid, type, rman_get_start(res)); 4547 map = rman_get_start(res); 4548 pci_write_bar(child, pm, map); 4549 out: 4550 return (res); 4551 } 4552 4553 struct resource * 4554 pci_alloc_resource(device_t dev, device_t child, int type, int *rid, 4555 u_long start, u_long end, u_long count, u_int flags) 4556 { 4557 struct pci_devinfo *dinfo; 4558 struct resource_list *rl; 4559 struct resource_list_entry *rle; 4560 struct resource *res; 4561 pcicfgregs *cfg; 4562 4563 if (device_get_parent(child) != dev) 4564 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, 4565 type, rid, start, end, count, flags)); 4566 4567 /* 4568 * Perform lazy resource allocation 4569 */ 4570 dinfo = device_get_ivars(child); 4571 rl = &dinfo->resources; 4572 cfg = &dinfo->cfg; 4573 switch (type) { 4574 #if defined(NEW_PCIB) && defined(PCI_RES_BUS) 4575 case PCI_RES_BUS: 4576 return (pci_alloc_secbus(dev, child, rid, start, end, count, 4577 flags)); 4578 #endif 4579 case SYS_RES_IRQ: 4580 /* 4581 * Can't alloc legacy interrupt once MSI messages have 4582 * been allocated. 4583 */ 4584 if (*rid == 0 && (cfg->msi.msi_alloc > 0 || 4585 cfg->msix.msix_alloc > 0)) 4586 return (NULL); 4587 4588 /* 4589 * If the child device doesn't have an interrupt 4590 * routed and is deserving of an interrupt, try to 4591 * assign it one. 4592 */ 4593 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) && 4594 (cfg->intpin != 0)) 4595 pci_assign_interrupt(dev, child, 0); 4596 break; 4597 case SYS_RES_IOPORT: 4598 case SYS_RES_MEMORY: 4599 #ifdef NEW_PCIB 4600 /* 4601 * PCI-PCI bridge I/O window resources are not BARs. 4602 * For those allocations just pass the request up the 4603 * tree. 4604 */ 4605 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) { 4606 switch (*rid) { 4607 case PCIR_IOBASEL_1: 4608 case PCIR_MEMBASE_1: 4609 case PCIR_PMBASEL_1: 4610 /* 4611 * XXX: Should we bother creating a resource 4612 * list entry? 4613 */ 4614 return (bus_generic_alloc_resource(dev, child, 4615 type, rid, start, end, count, flags)); 4616 } 4617 } 4618 #endif 4619 /* Reserve resources for this BAR if needed. */ 4620 rle = resource_list_find(rl, type, *rid); 4621 if (rle == NULL) { 4622 res = pci_reserve_map(dev, child, type, rid, start, end, 4623 count, flags); 4624 if (res == NULL) 4625 return (NULL); 4626 } 4627 } 4628 return (resource_list_alloc(rl, dev, child, type, rid, 4629 start, end, count, flags)); 4630 } 4631 4632 int 4633 pci_release_resource(device_t dev, device_t child, int type, int rid, 4634 struct resource *r) 4635 { 4636 struct pci_devinfo *dinfo; 4637 struct resource_list *rl; 4638 pcicfgregs *cfg; 4639 4640 if (device_get_parent(child) != dev) 4641 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, 4642 type, rid, r)); 4643 4644 dinfo = device_get_ivars(child); 4645 cfg = &dinfo->cfg; 4646 #ifdef NEW_PCIB 4647 /* 4648 * PCI-PCI bridge I/O window resources are not BARs. For 4649 * those allocations just pass the request up the tree. 4650 */ 4651 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE && 4652 (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) { 4653 switch (rid) { 4654 case PCIR_IOBASEL_1: 4655 case PCIR_MEMBASE_1: 4656 case PCIR_PMBASEL_1: 4657 return (bus_generic_release_resource(dev, child, type, 4658 rid, r)); 4659 } 4660 } 4661 #endif 4662 4663 rl = &dinfo->resources; 4664 return (resource_list_release(rl, dev, child, type, rid, r)); 4665 } 4666 4667 int 4668 pci_activate_resource(device_t dev, device_t child, int type, int rid, 4669 struct resource *r) 4670 { 4671 struct pci_devinfo *dinfo; 4672 int error; 4673 4674 error = bus_generic_activate_resource(dev, child, type, rid, r); 4675 if (error) 4676 return (error); 4677 4678 /* Enable decoding in the command register when activating BARs. */ 4679 if (device_get_parent(child) == dev) { 4680 /* Device ROMs need their decoding explicitly enabled. */ 4681 dinfo = device_get_ivars(child); 4682 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) 4683 pci_write_bar(child, pci_find_bar(child, rid), 4684 rman_get_start(r) | PCIM_BIOS_ENABLE); 4685 switch (type) { 4686 case SYS_RES_IOPORT: 4687 case SYS_RES_MEMORY: 4688 error = PCI_ENABLE_IO(dev, child, type); 4689 break; 4690 } 4691 } 4692 return (error); 4693 } 4694 4695 int 4696 pci_deactivate_resource(device_t dev, device_t child, int type, 4697 int rid, struct resource *r) 4698 { 4699 struct pci_devinfo *dinfo; 4700 int error; 4701 4702 error = bus_generic_deactivate_resource(dev, child, type, rid, r); 4703 if (error) 4704 return (error); 4705 4706 /* Disable decoding for device ROMs. */ 4707 if (device_get_parent(child) == dev) { 4708 dinfo = device_get_ivars(child); 4709 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) 4710 pci_write_bar(child, pci_find_bar(child, rid), 4711 rman_get_start(r)); 4712 } 4713 return (0); 4714 } 4715 4716 void 4717 pci_delete_child(device_t dev, device_t child) 4718 { 4719 struct resource_list_entry *rle; 4720 struct resource_list *rl; 4721 struct pci_devinfo *dinfo; 4722 4723 dinfo = device_get_ivars(child); 4724 rl = &dinfo->resources; 4725 4726 if (device_is_attached(child)) 4727 device_detach(child); 4728 4729 /* Turn off access to resources we're about to free */ 4730 pci_write_config(child, PCIR_COMMAND, pci_read_config(child, 4731 PCIR_COMMAND, 2) & ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN), 2); 4732 4733 /* Free all allocated resources */ 4734 STAILQ_FOREACH(rle, rl, link) { 4735 if (rle->res) { 4736 if (rman_get_flags(rle->res) & RF_ACTIVE || 4737 resource_list_busy(rl, rle->type, rle->rid)) { 4738 pci_printf(&dinfo->cfg, 4739 "Resource still owned, oops. " 4740 "(type=%d, rid=%d, addr=%lx)\n", 4741 rle->type, rle->rid, 4742 rman_get_start(rle->res)); 4743 bus_release_resource(child, rle->type, rle->rid, 4744 rle->res); 4745 } 4746 resource_list_unreserve(rl, dev, child, rle->type, 4747 rle->rid); 4748 } 4749 } 4750 resource_list_free(rl); 4751 4752 device_delete_child(dev, child); 4753 pci_freecfg(dinfo); 4754 } 4755 4756 void 4757 pci_delete_resource(device_t dev, device_t child, int type, int rid) 4758 { 4759 struct pci_devinfo *dinfo; 4760 struct resource_list *rl; 4761 struct resource_list_entry *rle; 4762 4763 if (device_get_parent(child) != dev) 4764 return; 4765 4766 dinfo = device_get_ivars(child); 4767 rl = &dinfo->resources; 4768 rle = resource_list_find(rl, type, rid); 4769 if (rle == NULL) 4770 return; 4771 4772 if (rle->res) { 4773 if (rman_get_flags(rle->res) & RF_ACTIVE || 4774 resource_list_busy(rl, type, rid)) { 4775 device_printf(dev, "delete_resource: " 4776 "Resource still owned by child, oops. " 4777 "(type=%d, rid=%d, addr=%lx)\n", 4778 type, rid, rman_get_start(rle->res)); 4779 return; 4780 } 4781 resource_list_unreserve(rl, dev, child, type, rid); 4782 } 4783 resource_list_delete(rl, type, rid); 4784 } 4785 4786 struct resource_list * 4787 pci_get_resource_list (device_t dev, device_t child) 4788 { 4789 struct pci_devinfo *dinfo = device_get_ivars(child); 4790 4791 return (&dinfo->resources); 4792 } 4793 4794 bus_dma_tag_t 4795 pci_get_dma_tag(device_t bus, device_t dev) 4796 { 4797 struct pci_softc *sc = device_get_softc(bus); 4798 4799 return (sc->sc_dma_tag); 4800 } 4801 4802 uint32_t 4803 pci_read_config_method(device_t dev, device_t child, int reg, int width) 4804 { 4805 struct pci_devinfo *dinfo = device_get_ivars(child); 4806 pcicfgregs *cfg = &dinfo->cfg; 4807 4808 return (PCIB_READ_CONFIG(device_get_parent(dev), 4809 cfg->bus, cfg->slot, cfg->func, reg, width)); 4810 } 4811 4812 void 4813 pci_write_config_method(device_t dev, device_t child, int reg, 4814 uint32_t val, int width) 4815 { 4816 struct pci_devinfo *dinfo = device_get_ivars(child); 4817 pcicfgregs *cfg = &dinfo->cfg; 4818 4819 PCIB_WRITE_CONFIG(device_get_parent(dev), 4820 cfg->bus, cfg->slot, cfg->func, reg, val, width); 4821 } 4822 4823 int 4824 pci_child_location_str_method(device_t dev, device_t child, char *buf, 4825 size_t buflen) 4826 { 4827 4828 snprintf(buf, buflen, "pci%d:%d:%d:%d", pci_get_domain(child), 4829 pci_get_bus(child), pci_get_slot(child), pci_get_function(child)); 4830 return (0); 4831 } 4832 4833 int 4834 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf, 4835 size_t buflen) 4836 { 4837 struct pci_devinfo *dinfo; 4838 pcicfgregs *cfg; 4839 4840 dinfo = device_get_ivars(child); 4841 cfg = &dinfo->cfg; 4842 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x " 4843 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device, 4844 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass, 4845 cfg->progif); 4846 return (0); 4847 } 4848 4849 int 4850 pci_assign_interrupt_method(device_t dev, device_t child) 4851 { 4852 struct pci_devinfo *dinfo = device_get_ivars(child); 4853 pcicfgregs *cfg = &dinfo->cfg; 4854 4855 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child, 4856 cfg->intpin)); 4857 } 4858 4859 static void 4860 pci_lookup(void *arg, const char *name, device_t *dev) 4861 { 4862 long val; 4863 char *end; 4864 int domain, bus, slot, func; 4865 4866 if (*dev != NULL) 4867 return; 4868 4869 /* 4870 * Accept pciconf-style selectors of either pciD:B:S:F or 4871 * pciB:S:F. In the latter case, the domain is assumed to 4872 * be zero. 4873 */ 4874 if (strncmp(name, "pci", 3) != 0) 4875 return; 4876 val = strtol(name + 3, &end, 10); 4877 if (val < 0 || val > INT_MAX || *end != ':') 4878 return; 4879 domain = val; 4880 val = strtol(end + 1, &end, 10); 4881 if (val < 0 || val > INT_MAX || *end != ':') 4882 return; 4883 bus = val; 4884 val = strtol(end + 1, &end, 10); 4885 if (val < 0 || val > INT_MAX) 4886 return; 4887 slot = val; 4888 if (*end == ':') { 4889 val = strtol(end + 1, &end, 10); 4890 if (val < 0 || val > INT_MAX || *end != '\0') 4891 return; 4892 func = val; 4893 } else if (*end == '\0') { 4894 func = slot; 4895 slot = bus; 4896 bus = domain; 4897 domain = 0; 4898 } else 4899 return; 4900 4901 if (domain > PCI_DOMAINMAX || bus > PCI_BUSMAX || slot > PCI_SLOTMAX || 4902 func > PCIE_ARI_FUNCMAX || (slot != 0 && func > PCI_FUNCMAX)) 4903 return; 4904 4905 *dev = pci_find_dbsf(domain, bus, slot, func); 4906 } 4907 4908 static int 4909 pci_modevent(module_t mod, int what, void *arg) 4910 { 4911 static struct cdev *pci_cdev; 4912 static eventhandler_tag tag; 4913 4914 switch (what) { 4915 case MOD_LOAD: 4916 STAILQ_INIT(&pci_devq); 4917 pci_generation = 0; 4918 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644, 4919 "pci"); 4920 pci_load_vendor_data(); 4921 tag = EVENTHANDLER_REGISTER(dev_lookup, pci_lookup, NULL, 4922 1000); 4923 break; 4924 4925 case MOD_UNLOAD: 4926 if (tag != NULL) 4927 EVENTHANDLER_DEREGISTER(dev_lookup, tag); 4928 destroy_dev(pci_cdev); 4929 break; 4930 } 4931 4932 return (0); 4933 } 4934 4935 static void 4936 pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo) 4937 { 4938 #define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2) 4939 struct pcicfg_pcie *cfg; 4940 int version, pos; 4941 4942 cfg = &dinfo->cfg.pcie; 4943 pos = cfg->pcie_location; 4944 4945 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; 4946 4947 WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl); 4948 4949 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || 4950 cfg->pcie_type == PCIEM_TYPE_ENDPOINT || 4951 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) 4952 WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl); 4953 4954 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || 4955 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && 4956 (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) 4957 WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl); 4958 4959 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || 4960 cfg->pcie_type == PCIEM_TYPE_ROOT_EC) 4961 WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl); 4962 4963 if (version > 1) { 4964 WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2); 4965 WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2); 4966 WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2); 4967 } 4968 #undef WREG 4969 } 4970 4971 static void 4972 pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo) 4973 { 4974 pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 4975 dinfo->cfg.pcix.pcix_command, 2); 4976 } 4977 4978 void 4979 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo) 4980 { 4981 4982 /* 4983 * Only do header type 0 devices. Type 1 devices are bridges, 4984 * which we know need special treatment. Type 2 devices are 4985 * cardbus bridges which also require special treatment. 4986 * Other types are unknown, and we err on the side of safety 4987 * by ignoring them. 4988 */ 4989 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL) 4990 return; 4991 4992 /* 4993 * Restore the device to full power mode. We must do this 4994 * before we restore the registers because moving from D3 to 4995 * D0 will cause the chip's BARs and some other registers to 4996 * be reset to some unknown power on reset values. Cut down 4997 * the noise on boot by doing nothing if we are already in 4998 * state D0. 4999 */ 5000 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) 5001 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 5002 pci_restore_bars(dev); 5003 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2); 5004 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1); 5005 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1); 5006 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1); 5007 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1); 5008 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1); 5009 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1); 5010 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1); 5011 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1); 5012 5013 /* 5014 * Restore extended capabilities for PCI-Express and PCI-X 5015 */ 5016 if (dinfo->cfg.pcie.pcie_location != 0) 5017 pci_cfg_restore_pcie(dev, dinfo); 5018 if (dinfo->cfg.pcix.pcix_location != 0) 5019 pci_cfg_restore_pcix(dev, dinfo); 5020 5021 /* Restore MSI and MSI-X configurations if they are present. */ 5022 if (dinfo->cfg.msi.msi_location != 0) 5023 pci_resume_msi(dev); 5024 if (dinfo->cfg.msix.msix_location != 0) 5025 pci_resume_msix(dev); 5026 } 5027 5028 static void 5029 pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo) 5030 { 5031 #define RREG(n) pci_read_config(dev, pos + (n), 2) 5032 struct pcicfg_pcie *cfg; 5033 int version, pos; 5034 5035 cfg = &dinfo->cfg.pcie; 5036 pos = cfg->pcie_location; 5037 5038 cfg->pcie_flags = RREG(PCIER_FLAGS); 5039 5040 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; 5041 5042 cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL); 5043 5044 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || 5045 cfg->pcie_type == PCIEM_TYPE_ENDPOINT || 5046 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) 5047 cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL); 5048 5049 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || 5050 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && 5051 (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) 5052 cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL); 5053 5054 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || 5055 cfg->pcie_type == PCIEM_TYPE_ROOT_EC) 5056 cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL); 5057 5058 if (version > 1) { 5059 cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2); 5060 cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2); 5061 cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2); 5062 } 5063 #undef RREG 5064 } 5065 5066 static void 5067 pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo) 5068 { 5069 dinfo->cfg.pcix.pcix_command = pci_read_config(dev, 5070 dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2); 5071 } 5072 5073 void 5074 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate) 5075 { 5076 uint32_t cls; 5077 int ps; 5078 5079 /* 5080 * Only do header type 0 devices. Type 1 devices are bridges, which 5081 * we know need special treatment. Type 2 devices are cardbus bridges 5082 * which also require special treatment. Other types are unknown, and 5083 * we err on the side of safety by ignoring them. Powering down 5084 * bridges should not be undertaken lightly. 5085 */ 5086 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL) 5087 return; 5088 5089 /* 5090 * Some drivers apparently write to these registers w/o updating our 5091 * cached copy. No harm happens if we update the copy, so do so here 5092 * so we can restore them. The COMMAND register is modified by the 5093 * bus w/o updating the cache. This should represent the normally 5094 * writable portion of the 'defined' part of type 0 headers. In 5095 * theory we also need to save/restore the PCI capability structures 5096 * we know about, but apart from power we don't know any that are 5097 * writable. 5098 */ 5099 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2); 5100 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2); 5101 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2); 5102 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2); 5103 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2); 5104 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1); 5105 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1); 5106 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1); 5107 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1); 5108 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 5109 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 5110 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1); 5111 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1); 5112 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1); 5113 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1); 5114 5115 if (dinfo->cfg.pcie.pcie_location != 0) 5116 pci_cfg_save_pcie(dev, dinfo); 5117 5118 if (dinfo->cfg.pcix.pcix_location != 0) 5119 pci_cfg_save_pcix(dev, dinfo); 5120 5121 /* 5122 * don't set the state for display devices, base peripherals and 5123 * memory devices since bad things happen when they are powered down. 5124 * We should (a) have drivers that can easily detach and (b) use 5125 * generic drivers for these devices so that some device actually 5126 * attaches. We need to make sure that when we implement (a) we don't 5127 * power the device down on a reattach. 5128 */ 5129 cls = pci_get_class(dev); 5130 if (!setstate) 5131 return; 5132 switch (pci_do_power_nodriver) 5133 { 5134 case 0: /* NO powerdown at all */ 5135 return; 5136 case 1: /* Conservative about what to power down */ 5137 if (cls == PCIC_STORAGE) 5138 return; 5139 /*FALLTHROUGH*/ 5140 case 2: /* Agressive about what to power down */ 5141 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY || 5142 cls == PCIC_BASEPERIPH) 5143 return; 5144 /*FALLTHROUGH*/ 5145 case 3: /* Power down everything */ 5146 break; 5147 } 5148 /* 5149 * PCI spec says we can only go into D3 state from D0 state. 5150 * Transition from D[12] into D0 before going to D3 state. 5151 */ 5152 ps = pci_get_powerstate(dev); 5153 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3) 5154 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 5155 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3) 5156 pci_set_powerstate(dev, PCI_POWERSTATE_D3); 5157 } 5158 5159 /* Wrapper APIs suitable for device driver use. */ 5160 void 5161 pci_save_state(device_t dev) 5162 { 5163 struct pci_devinfo *dinfo; 5164 5165 dinfo = device_get_ivars(dev); 5166 pci_cfg_save(dev, dinfo, 0); 5167 } 5168 5169 void 5170 pci_restore_state(device_t dev) 5171 { 5172 struct pci_devinfo *dinfo; 5173 5174 dinfo = device_get_ivars(dev); 5175 pci_cfg_restore(dev, dinfo); 5176 } 5177 5178 static uint16_t 5179 pci_get_rid_method(device_t dev, device_t child) 5180 { 5181 5182 return (PCIB_GET_RID(device_get_parent(dev), child)); 5183 } 5184