Lines Matching +full:msi +full:- +full:map

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
88 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
89 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
236 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
237 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
238 #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
239 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
240 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
241 #define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
248 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
255 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
256 * or the CMIC-SL (AKA ServerWorks GC_LE).
262 * MSI doesn't work on earlier Intel chipsets including
274 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
281 * but support MSI just fine. QEMU uses the Intel 82440.
297 * a bug that MSI interrupt does not assert if PCIM_CMD_INTxDIS bit
308 * issue MSI interrupts with PCIM_CMD_INTxDIS set either.
325 /* map register information */
326 #define PCI_MAPMEM 0x01 /* memory map */
327 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
328 #define PCI_MAPPORT 0x04 /* port map */
350 "firmware-assigned ranges fail to allocate during the initial device scan.");
363 "Transition from D3 -> D0 on resume.");
368 "Transition from D0 -> D3 on suspend.");
372 "Enable support for MSI interrupts");
376 "Enable support for MSI-X interrupts");
381 "Rewrite entire MSI-X table when updating MSI-X entries");
385 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
400 "Ignore firmware-assigned resources for BARs.");
404 "Ignore firmware-assigned bus numbers.");
437 for (q = &pci_quirks[0]; q->devid; q++) { in pci_has_quirk()
438 if (q->devid == devid && q->type == quirk) in pci_has_quirk()
461 if ((dinfo->cfg.domain == domain) && in pci_find_dbsf()
462 (dinfo->cfg.bus == bus) && in pci_find_dbsf()
463 (dinfo->cfg.slot == slot) && in pci_find_dbsf()
464 (dinfo->cfg.func == func)) { in pci_find_dbsf()
469 return (dinfo != NULL ? dinfo->cfg.dev : NULL); in pci_find_dbsf()
480 if ((dinfo->cfg.vendor == vendor) && in pci_find_device()
481 (dinfo->cfg.device == device)) { in pci_find_device()
482 return (dinfo->cfg.dev); in pci_find_device()
495 if (dinfo->cfg.baseclass == class && in pci_find_class()
496 dinfo->cfg.subclass == subclass) { in pci_find_class()
497 return (dinfo->cfg.dev); in pci_find_class()
512 if (from != dinfo->cfg.dev) in pci_find_class_from()
517 if (dinfo->cfg.baseclass == class && in pci_find_class_from()
518 dinfo->cfg.subclass == subclass) { in pci_find_class_from()
519 return (dinfo->cfg.dev); in pci_find_class_from()
534 if (from != dinfo->cfg.dev) in pci_find_base_class_from()
539 if (dinfo->cfg.baseclass == class) { in pci_find_base_class_from()
540 return (dinfo->cfg.dev); in pci_find_base_class_from()
553 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot, in pci_printf()
554 cfg->func); in pci_printf()
561 /* return base address of memory or port map */
573 /* return map type of memory or port map */
586 /* return log2 of map size decoded for memory or port map */
614 /* return log2 of map size decided for device ROM */
633 /* return log2 of address range supported by map register */
662 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL) in pci_fixancient()
666 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI) in pci_fixancient()
667 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE; in pci_fixancient()
676 switch (cfg->hdrtype & PCIM_HDRTYPE) { in pci_hdrtypedata()
678 cfg->subvendor = REG(PCIR_SUBVEND_0, 2); in pci_hdrtypedata()
679 cfg->subdevice = REG(PCIR_SUBDEV_0, 2); in pci_hdrtypedata()
680 cfg->mingnt = REG(PCIR_MINGNT, 1); in pci_hdrtypedata()
681 cfg->maxlat = REG(PCIR_MAXLAT, 1); in pci_hdrtypedata()
682 cfg->nummaps = PCI_MAXMAPS_0; in pci_hdrtypedata()
685 cfg->bridge.br_seclat = REG(PCIR_SECLAT_1, 1); in pci_hdrtypedata()
686 cfg->bridge.br_subbus = REG(PCIR_SUBBUS_1, 1); in pci_hdrtypedata()
687 cfg->bridge.br_secbus = REG(PCIR_SECBUS_1, 1); in pci_hdrtypedata()
688 cfg->bridge.br_pribus = REG(PCIR_PRIBUS_1, 1); in pci_hdrtypedata()
689 cfg->bridge.br_control = REG(PCIR_BRIDGECTL_1, 2); in pci_hdrtypedata()
690 cfg->nummaps = PCI_MAXMAPS_1; in pci_hdrtypedata()
693 cfg->bridge.br_seclat = REG(PCIR_SECLAT_2, 1); in pci_hdrtypedata()
694 cfg->bridge.br_subbus = REG(PCIR_SUBBUS_2, 1); in pci_hdrtypedata()
695 cfg->bridge.br_secbus = REG(PCIR_SECBUS_2, 1); in pci_hdrtypedata()
696 cfg->bridge.br_pribus = REG(PCIR_PRIBUS_2, 1); in pci_hdrtypedata()
697 cfg->bridge.br_control = REG(PCIR_BRIDGECTL_2, 2); in pci_hdrtypedata()
698 cfg->subvendor = REG(PCIR_SUBVEND_2, 2); in pci_hdrtypedata()
699 cfg->subdevice = REG(PCIR_SUBDEV_2, 2); in pci_hdrtypedata()
700 cfg->nummaps = PCI_MAXMAPS_2; in pci_hdrtypedata()
739 cfg = &devlist_entry->cfg; in pci_fill_devinfo()
741 cfg->domain = d; in pci_fill_devinfo()
742 cfg->bus = b; in pci_fill_devinfo()
743 cfg->slot = s; in pci_fill_devinfo()
744 cfg->func = f; in pci_fill_devinfo()
745 cfg->vendor = vid; in pci_fill_devinfo()
746 cfg->device = did; in pci_fill_devinfo()
747 cfg->cmdreg = REG(PCIR_COMMAND, 2); in pci_fill_devinfo()
748 cfg->statreg = REG(PCIR_STATUS, 2); in pci_fill_devinfo()
749 cfg->baseclass = REG(PCIR_CLASS, 1); in pci_fill_devinfo()
750 cfg->subclass = REG(PCIR_SUBCLASS, 1); in pci_fill_devinfo()
751 cfg->progif = REG(PCIR_PROGIF, 1); in pci_fill_devinfo()
752 cfg->revid = REG(PCIR_REVID, 1); in pci_fill_devinfo()
753 cfg->hdrtype = REG(PCIR_HDRTYPE, 1); in pci_fill_devinfo()
754 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1); in pci_fill_devinfo()
755 cfg->lattimer = REG(PCIR_LATTIMER, 1); in pci_fill_devinfo()
756 cfg->intpin = REG(PCIR_INTPIN, 1); in pci_fill_devinfo()
757 cfg->intline = REG(PCIR_INTLINE, 1); in pci_fill_devinfo()
759 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0; in pci_fill_devinfo()
760 cfg->hdrtype &= ~PCIM_MFDEV; in pci_fill_devinfo()
761 STAILQ_INIT(&cfg->maps); in pci_fill_devinfo()
763 cfg->iov = NULL; in pci_fill_devinfo()
773 devlist_entry->conf.pc_sel.pc_domain = cfg->domain; in pci_fill_devinfo()
774 devlist_entry->conf.pc_sel.pc_bus = cfg->bus; in pci_fill_devinfo()
775 devlist_entry->conf.pc_sel.pc_dev = cfg->slot; in pci_fill_devinfo()
776 devlist_entry->conf.pc_sel.pc_func = cfg->func; in pci_fill_devinfo()
777 devlist_entry->conf.pc_hdr = cfg->hdrtype; in pci_fill_devinfo()
779 devlist_entry->conf.pc_subvendor = cfg->subvendor; in pci_fill_devinfo()
780 devlist_entry->conf.pc_subdevice = cfg->subdevice; in pci_fill_devinfo()
781 devlist_entry->conf.pc_vendor = cfg->vendor; in pci_fill_devinfo()
782 devlist_entry->conf.pc_device = cfg->device; in pci_fill_devinfo()
784 devlist_entry->conf.pc_class = cfg->baseclass; in pci_fill_devinfo()
785 devlist_entry->conf.pc_subclass = cfg->subclass; in pci_fill_devinfo()
786 devlist_entry->conf.pc_progif = cfg->progif; in pci_fill_devinfo()
787 devlist_entry->conf.pc_revid = cfg->revid; in pci_fill_devinfo()
799 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, \ in pci_ea_fill_info()
800 cfg->ea.ea_location + (n), w) in pci_ea_fill_info()
810 if (cfg->ea.ea_location == 0) in pci_ea_fill_info()
813 STAILQ_INIT(&cfg->ea.ea_entries); in pci_ea_fill_info()
823 if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) in pci_ea_fill_info()
828 eae->eae_cfg_offset = cfg->ea.ea_location + ptr; in pci_ea_fill_info()
840 eae->eae_flags = val; in pci_ea_fill_info()
841 eae->eae_bei = (PCIM_EA_BEI & val) >> PCIM_EA_BEI_OFFSET; in pci_ea_fill_info()
856 eae->eae_base = base; in pci_ea_fill_info()
857 eae->eae_max_offset = max_offset; in pci_ea_fill_info()
859 STAILQ_INSERT_TAIL(&cfg->ea.ea_entries, eae, eae_link); in pci_ea_fill_info()
863 cfg->vendor, cfg->device, eae->eae_bei, eae->eae_flags, in pci_ea_fill_info()
864 (uintmax_t)eae->eae_base, (uintmax_t)eae->eae_max_offset); in pci_ea_fill_info()
873 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) in pci_read_cap()
874 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w) in pci_read_cap()
881 switch (cfg->hdrtype & PCIM_HDRTYPE) { in pci_read_cap()
911 cfg->pp.pp_location = ptr; in pci_read_cap()
912 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2); in pci_read_cap()
915 /* Determine HT-specific capability type. */ in pci_read_cap()
919 cfg->ht.ht_slave = ptr; in pci_read_cap()
933 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n", in pci_read_cap()
934 cfg->domain, cfg->bus, in pci_read_cap()
935 cfg->slot, cfg->func, in pci_read_cap()
940 cfg->ht.ht_msimap = ptr; in pci_read_cap()
941 cfg->ht.ht_msictrl = val; in pci_read_cap()
942 cfg->ht.ht_msiaddr = addr; in pci_read_cap()
947 case PCIY_MSI: /* PCI MSI */ in pci_read_cap()
948 cfg->msi.msi_location = ptr; in pci_read_cap()
949 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2); in pci_read_cap()
951 case PCIY_MSIX: /* PCI MSI-X */ in pci_read_cap()
952 cfg->msix.msix_location = ptr; in pci_read_cap()
953 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2); in pci_read_cap()
955 cfg->msix.msix_table_bar = PCIR_BAR(val & in pci_read_cap()
957 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK; in pci_read_cap()
959 cfg->msix.msix_pba_bar = PCIR_BAR(val & in pci_read_cap()
961 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK; in pci_read_cap()
964 cfg->vpd.vpd_reg = ptr; in pci_read_cap()
968 if ((cfg->hdrtype & PCIM_HDRTYPE) == in pci_read_cap()
971 cfg->subvendor = val & 0xffff; in pci_read_cap()
972 cfg->subdevice = val >> 16; in pci_read_cap()
975 case PCIY_PCIX: /* PCI-X */ in pci_read_cap()
977 * Assume we have a PCI-X chipset if we have in pci_read_cap()
978 * at least one PCI-PCI bridge with a PCI-X in pci_read_cap()
980 * PCI-express or HT chipsets might match on in pci_read_cap()
983 if ((cfg->hdrtype & PCIM_HDRTYPE) == in pci_read_cap()
986 cfg->pcix.pcix_location = ptr; in pci_read_cap()
988 case PCIY_EXPRESS: /* PCI-express */ in pci_read_cap()
990 * Assume we have a PCI-express chipset if we have in pci_read_cap()
991 * at least one PCI-express device. in pci_read_cap()
994 cfg->pcie.pcie_location = ptr; in pci_read_cap()
996 cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE; in pci_read_cap()
999 cfg->ea.ea_location = ptr; in pci_read_cap()
1009 * Enable the MSI mapping window for all HyperTransport in pci_read_cap()
1010 * slaves. PCI-PCI bridges have their windows enabled via in pci_read_cap()
1013 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 && in pci_read_cap()
1014 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) { in pci_read_cap()
1016 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n", in pci_read_cap()
1017 cfg->domain, cfg->bus, cfg->slot, cfg->func); in pci_read_cap()
1018 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; in pci_read_cap()
1019 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl, in pci_read_cap()
1039 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2); in pci_read_vpd_reg()
1041 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) { in pci_read_vpd_reg()
1042 if (--count < 0) in pci_read_vpd_reg()
1046 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4)); in pci_read_vpd_reg()
1059 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
1060 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
1061 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
1062 if (--count < 0)
1082 /* return 0 and one byte in *data if no read error, -1 else */
1089 if (vrs->bytesinval == 0) { in vpd_nextbyte()
1090 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, &reg)) in vpd_nextbyte()
1091 return (-1); in vpd_nextbyte()
1092 vrs->val = le32toh(reg); in vpd_nextbyte()
1093 vrs->off += 4; in vpd_nextbyte()
1094 byte = vrs->val & 0xff; in vpd_nextbyte()
1095 vrs->bytesinval = 3; in vpd_nextbyte()
1097 vrs->val = vrs->val >> 8; in vpd_nextbyte()
1098 byte = vrs->val & 0xff; in vpd_nextbyte()
1099 vrs->bytesinval--; in vpd_nextbyte()
1102 vrs->cksum += byte; in vpd_nextbyte()
1107 /* return 0 on match, -1 and "unget" byte on no match */
1114 return (-1); in vpd_expectbyte()
1119 vrs->cksum -= data; in vpd_expectbyte()
1120 vrs->val = (vrs->val << 8) + data; in vpd_expectbyte()
1121 vrs->bytesinval++; in vpd_expectbyte()
1122 return (-1); in vpd_expectbyte()
1125 /* return size if tag matches, -1 on no match, -2 on read error */
1132 return (-1); in vpd_read_tag_size()
1138 return (-2); in vpd_read_tag_size()
1140 return (-2); in vpd_read_tag_size()
1161 /* read VPD keyword and return element size, return -1 on read error */
1168 return (-1); in vpd_read_elem_head()
1170 return (-1); in vpd_read_elem_head()
1172 return (-1); in vpd_read_elem_head()
1206 return (-1); in vpd_read_elem_data()
1222 vrs->cksum -= fixup; in vpd_fixup_cksum()
1225 /* fetch one read-only element and return size of heading + data */
1234 cfg = vrs->cfg; in next_vpd_ro_elem()
1235 vpd = &cfg->vpd; in next_vpd_ro_elem()
1238 return (-1); in next_vpd_ro_elem()
1239 vpd->vpd_ros = alloc_buffer(vpd->vpd_ros, sizeof(*vpd->vpd_ros), vpd->vpd_rocnt); in next_vpd_ro_elem()
1240 vpd_ros = &vpd->vpd_ros[vpd->vpd_rocnt]; in next_vpd_ro_elem()
1241 maxsize -= 3; in next_vpd_ro_elem()
1242 len = vpd_read_elem_data(vrs, vpd_ros->keyword, &vpd_ros->value, maxsize); in next_vpd_ro_elem()
1243 if (vpd_ros->value == NULL) in next_vpd_ro_elem()
1244 return (-1); in next_vpd_ro_elem()
1245 vpd_ros->len = len; in next_vpd_ro_elem()
1246 if (vpd_ros->keyword[0] == 'R' && vpd_ros->keyword[1] == 'V') { in next_vpd_ro_elem()
1247 vpd_fixup_cksum(vrs, vpd_ros->value, len); in next_vpd_ro_elem()
1248 if (vrs->cksum != 0) { in next_vpd_ro_elem()
1250 "invalid VPD checksum %#hhx\n", vrs->cksum); in next_vpd_ro_elem()
1251 return (-1); in next_vpd_ro_elem()
1254 vpd->vpd_rocnt++; in next_vpd_ro_elem()
1268 cfg = vrs->cfg; in next_vpd_rw_elem()
1269 vpd = &cfg->vpd; in next_vpd_rw_elem()
1272 return (-1); in next_vpd_rw_elem()
1273 vpd->vpd_w = alloc_buffer(vpd->vpd_w, sizeof(*vpd->vpd_w), vpd->vpd_wcnt); in next_vpd_rw_elem()
1274 if (vpd->vpd_w == NULL) { in next_vpd_rw_elem()
1276 return (-1); in next_vpd_rw_elem()
1278 vpd_w = &vpd->vpd_w[vpd->vpd_wcnt]; in next_vpd_rw_elem()
1279 maxsize -= 3; in next_vpd_rw_elem()
1280 vpd_w->start = vrs->off + 3 - vrs->bytesinval; in next_vpd_rw_elem()
1281 len = vpd_read_elem_data(vrs, vpd_w->keyword, &vpd_w->value, maxsize); in next_vpd_rw_elem()
1282 if (vpd_w->value == NULL) in next_vpd_rw_elem()
1283 return (-1); in next_vpd_rw_elem()
1284 vpd_w->len = len; in next_vpd_rw_elem()
1285 vpd->vpd_wcnt++; in next_vpd_rw_elem()
1296 free(vpd->vpd_ident, M_DEVBUF); in vpd_free()
1297 for (i = 0; i < vpd->vpd_rocnt; i++) in vpd_free()
1298 free(vpd->vpd_ros[i].value, M_DEVBUF); in vpd_free()
1299 free(vpd->vpd_ros, M_DEVBUF); in vpd_free()
1300 vpd->vpd_rocnt = 0; in vpd_free()
1301 for (i = 0; i < vpd->vpd_wcnt; i++) in vpd_free()
1302 free(vpd->vpd_w[i].value, M_DEVBUF); in vpd_free()
1303 free(vpd->vpd_w, M_DEVBUF); in vpd_free()
1304 vpd->vpd_wcnt = 0; in vpd_free()
1326 /* read VPD ident element - mandatory */ in pci_parse_vpd()
1332 cfg->vpd.vpd_ident = vpd_read_value(&vrs, size); in pci_parse_vpd()
1333 if (cfg->vpd.vpd_ident == NULL) { in pci_parse_vpd()
1338 /* read VPD RO elements - mandatory */ in pci_parse_vpd()
1341 pci_printf(cfg, "no read-only VPD data found\n"); in pci_parse_vpd()
1347 pci_printf(cfg, "error accessing read-only VPD data\n"); in pci_parse_vpd()
1348 return (-1); in pci_parse_vpd()
1350 size -= elem_size; in pci_parse_vpd()
1354 return (-1); in pci_parse_vpd()
1356 /* read VPD RW elements - optional */ in pci_parse_vpd()
1358 if (size == -2) in pci_parse_vpd()
1359 return (-1); in pci_parse_vpd()
1364 return (-1); in pci_parse_vpd()
1366 size -= elem_size; in pci_parse_vpd()
1369 /* read empty END tag - mandatory */ in pci_parse_vpd()
1384 vpd_free(&cfg->vpd); in pci_read_vpd()
1385 cfg->vpd.vpd_cached = 1; in pci_read_vpd()
1394 pcicfgregs *cfg = &dinfo->cfg; in pci_get_vpd_ident_method()
1396 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) in pci_get_vpd_ident_method()
1399 *identptr = cfg->vpd.vpd_ident; in pci_get_vpd_ident_method()
1412 pcicfgregs *cfg = &dinfo->cfg; in pci_get_vpd_readonly_method()
1415 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) in pci_get_vpd_readonly_method()
1418 for (i = 0; i < cfg->vpd.vpd_rocnt; i++) in pci_get_vpd_readonly_method()
1419 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword, in pci_get_vpd_readonly_method()
1420 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) { in pci_get_vpd_readonly_method()
1421 *vptr = cfg->vpd.vpd_ros[i].value; in pci_get_vpd_readonly_method()
1433 pcicfgregs *cfg = &dinfo->cfg; in pci_fetch_vpd_list()
1435 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) in pci_fetch_vpd_list()
1437 return (&cfg->vpd); in pci_fetch_vpd_list()
1531 pcicfgregs *cfg = &dinfo->cfg; in pci_find_cap_method()
1546 switch (cfg->hdrtype & PCIM_HDRTYPE) { in pci_find_cap_method()
1565 for (cnt = 0; ptr != 0 && cnt < (PCIE_REGMAX - 0x40) / 2; cnt++) { in pci_find_cap_method()
1614 pcicfgregs *cfg = &dinfo->cfg; in pci_find_extcap_method()
1618 /* Only supported for PCI-express devices. */ in pci_find_extcap_method()
1619 if (cfg->pcie.pcie_location == 0) in pci_find_extcap_method()
1651 pcicfgregs *cfg = &dinfo->cfg; in pci_find_next_extcap_method()
1655 /* Only supported for PCI-express devices. */ in pci_find_next_extcap_method()
1656 if (cfg->pcie.pcie_location == 0) in pci_find_next_extcap_method()
1677 * Support for MSI-X message interrupts.
1683 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_write_msix_entry()
1686 KASSERT(msix->msix_table_len > index, ("bogus index")); in pci_write_msix_entry()
1687 offset = msix->msix_table_offset + index * 16; in pci_write_msix_entry()
1688 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff); in pci_write_msix_entry()
1689 bus_write_4(msix->msix_table_res, offset + 4, address >> 32); in pci_write_msix_entry()
1690 bus_write_4(msix->msix_table_res, offset + 8, data); in pci_write_msix_entry()
1700 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_enable_msix_method()
1711 msix->msix_location + PCIR_MSIX_CTRL, in pci_enable_msix_method()
1712 msix->msix_ctrl & ~PCIM_MSIXCTRL_MSIX_ENABLE, 2); in pci_enable_msix_method()
1717 /* Enable MSI -> HT mapping. */ in pci_enable_msix_method()
1725 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_mask_msix()
1728 KASSERT(PCI_MSIX_MSGNUM(msix->msix_ctrl) > index, ("bogus index")); in pci_mask_msix()
1729 offset = msix->msix_table_offset + index * 16 + 12; in pci_mask_msix()
1730 val = bus_read_4(msix->msix_table_res, offset); in pci_mask_msix()
1737 bus_write_4(msix->msix_table_res, offset, val); in pci_mask_msix()
1744 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_unmask_msix()
1747 KASSERT(PCI_MSIX_MSGNUM(msix->msix_ctrl) > index, ("bogus index")); in pci_unmask_msix()
1748 offset = msix->msix_table_offset + index * 16 + 12; in pci_unmask_msix()
1749 val = bus_read_4(msix->msix_table_res, offset); in pci_unmask_msix()
1756 bus_write_4(msix->msix_table_res, offset, val); in pci_unmask_msix()
1763 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_pending_msix()
1766 KASSERT(msix->msix_table_len > index, ("bogus index")); in pci_pending_msix()
1767 offset = msix->msix_pba_offset + (index / 32) * 4; in pci_pending_msix()
1769 return (bus_read_4(msix->msix_pba_res, offset) & bit); in pci_pending_msix()
1773 * Restore MSI-X registers and table during resume. If MSI-X is
1774 * enabled then walk the virtual table to restore the actual MSI-X
1781 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_resume_msix()
1786 if (msix->msix_alloc > 0) { in pci_resume_msix()
1787 msgnum = PCI_MSIX_MSGNUM(msix->msix_ctrl); in pci_resume_msix()
1794 for (i = 0; i < msix->msix_table_len; i++) { in pci_resume_msix()
1795 mte = &msix->msix_table[i]; in pci_resume_msix()
1796 if (mte->mte_vector == 0 || mte->mte_handlers == 0) in pci_resume_msix()
1798 mv = &msix->msix_vectors[mte->mte_vector - 1]; in pci_resume_msix()
1799 pci_write_msix_entry(dev, i, mv->mv_address, in pci_resume_msix()
1800 mv->mv_data); in pci_resume_msix()
1804 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL, in pci_resume_msix()
1805 msix->msix_ctrl, 2); in pci_resume_msix()
1809 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1817 pcicfgregs *cfg = &dinfo->cfg; in pci_alloc_msix_method()
1828 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); in pci_alloc_msix_method()
1829 if (rle != NULL && rle->res != NULL) in pci_alloc_msix_method()
1833 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) in pci_alloc_msix_method()
1836 /* If MSI-X is blacklisted for this system, fail. */ in pci_alloc_msix_method()
1840 /* MSI-X capability present? */ in pci_alloc_msix_method()
1841 if (cfg->msix.msix_location == 0 || !pci_do_msix) in pci_alloc_msix_method()
1845 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, in pci_alloc_msix_method()
1846 cfg->msix.msix_table_bar); in pci_alloc_msix_method()
1847 if (rle == NULL || rle->res == NULL || in pci_alloc_msix_method()
1848 !(rman_get_flags(rle->res) & RF_ACTIVE)) in pci_alloc_msix_method()
1850 cfg->msix.msix_table_res = rle->res; in pci_alloc_msix_method()
1851 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) { in pci_alloc_msix_method()
1852 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, in pci_alloc_msix_method()
1853 cfg->msix.msix_pba_bar); in pci_alloc_msix_method()
1854 if (rle == NULL || rle->res == NULL || in pci_alloc_msix_method()
1855 !(rman_get_flags(rle->res) & RF_ACTIVE)) in pci_alloc_msix_method()
1858 cfg->msix.msix_pba_res = rle->res; in pci_alloc_msix_method()
1860 ctrl = pci_read_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL, in pci_alloc_msix_method()
1865 "attempting to allocate %d MSI-X vectors (%d supported)\n", in pci_alloc_msix_method()
1876 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, in pci_alloc_msix_method()
1882 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1); in pci_alloc_msix_method()
1884 device_printf(child, "using IRQ %ju for MSI-X\n", in pci_alloc_msix_method()
1885 rle->start); in pci_alloc_msix_method()
1894 device_printf(child, "using IRQs %ju", rle->start); in pci_alloc_msix_method()
1895 irq = rle->start; in pci_alloc_msix_method()
1898 rle = resource_list_find(&dinfo->resources, in pci_alloc_msix_method()
1902 if (rle->start == irq + 1) { in pci_alloc_msix_method()
1910 printf("-%d", irq); in pci_alloc_msix_method()
1915 printf(",%ju", rle->start); in pci_alloc_msix_method()
1916 irq = rle->start; in pci_alloc_msix_method()
1921 printf("-%d", irq); in pci_alloc_msix_method()
1922 printf(" for MSI-X\n"); in pci_alloc_msix_method()
1931 cfg->msix.msix_vectors = mallocarray(actual, sizeof(struct msix_vector), in pci_alloc_msix_method()
1933 cfg->msix.msix_table = mallocarray(actual, in pci_alloc_msix_method()
1936 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); in pci_alloc_msix_method()
1937 cfg->msix.msix_vectors[i].mv_irq = rle->start; in pci_alloc_msix_method()
1938 cfg->msix.msix_table[i].mte_vector = i + 1; in pci_alloc_msix_method()
1941 /* Update control register to enable MSI-X. */ in pci_alloc_msix_method()
1943 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL, in pci_alloc_msix_method()
1945 cfg->msix.msix_ctrl = ctrl; in pci_alloc_msix_method()
1948 cfg->msix.msix_alloc = actual; in pci_alloc_msix_method()
1949 cfg->msix.msix_table_len = actual; in pci_alloc_msix_method()
1956 * resources consecutively to the first N messages in the MSI-X table.
1959 * populate the MSI-X table sparsely. This method allows the driver
1965 * maps directly to the MSI-X table in that index 0 in the array
1966 * specifies the vector for the first message in the MSI-X table, etc.
1973 * On successful return, each message with a non-zero vector will have
1979 * For example, suppose a driver has a MSI-X table with 6 messages and
1983 * have an MSI-X table of ABC--- (where - means no vector assigned).
1985 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1989 * In any case, the SYS_RES_IRQ rid X will always map to the message
1990 * at MSI-X table index X - 1 and will only be valid if a vector is
1998 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_remap_msix_method()
2005 * table can't be bigger than the actual MSI-X table in the in pci_remap_msix_method()
2008 if (count < 1 || count > PCI_MSIX_MSGNUM(msix->msix_ctrl)) in pci_remap_msix_method()
2013 if (vectors[i] > msix->msix_alloc) in pci_remap_msix_method()
2021 used = mallocarray(msix->msix_alloc, sizeof(*used), M_DEVBUF, M_WAITOK | in pci_remap_msix_method()
2025 used[vectors[i] - 1] = true; in pci_remap_msix_method()
2026 for (i = 0; i < msix->msix_alloc - 1; i++) in pci_remap_msix_method()
2037 for (i = 0; i < msix->msix_table_len; i++) { in pci_remap_msix_method()
2038 if (msix->msix_table[i].mte_vector == 0) in pci_remap_msix_method()
2040 if (msix->msix_table[i].mte_handlers > 0) { in pci_remap_msix_method()
2044 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); in pci_remap_msix_method()
2046 if (rle->res != NULL) { in pci_remap_msix_method()
2053 for (i = 0; i < msix->msix_table_len; i++) { in pci_remap_msix_method()
2054 if (msix->msix_table[i].mte_vector == 0) in pci_remap_msix_method()
2056 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); in pci_remap_msix_method()
2063 free(msix->msix_table, M_DEVBUF); in pci_remap_msix_method()
2064 msix->msix_table = mallocarray(count, sizeof(struct msix_table_entry), in pci_remap_msix_method()
2067 msix->msix_table[i].mte_vector = vectors[i]; in pci_remap_msix_method()
2068 msix->msix_table_len = count; in pci_remap_msix_method()
2071 j = msix->msix_alloc - 1; in pci_remap_msix_method()
2077 msix->msix_vectors[j].mv_irq); in pci_remap_msix_method()
2078 j--; in pci_remap_msix_method()
2082 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) * in pci_remap_msix_method()
2084 free(msix->msix_vectors, M_DEVBUF); in pci_remap_msix_method()
2085 msix->msix_vectors = vec; in pci_remap_msix_method()
2086 msix->msix_alloc = j + 1; in pci_remap_msix_method()
2090 /* Map the IRQs onto the rids. */ in pci_remap_msix_method()
2094 irq = msix->msix_vectors[vectors[i] - 1].mv_irq; in pci_remap_msix_method()
2095 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, in pci_remap_msix_method()
2100 device_printf(child, "Remapped MSI-X IRQs as: "); in pci_remap_msix_method()
2105 printf("---"); in pci_remap_msix_method()
2108 msix->msix_vectors[vectors[i] - 1].mv_irq); in pci_remap_msix_method()
2120 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_release_msix()
2125 if (msix->msix_alloc == 0) in pci_release_msix()
2129 for (i = 0; i < msix->msix_table_len; i++) { in pci_release_msix()
2130 if (msix->msix_table[i].mte_vector == 0) in pci_release_msix()
2132 if (msix->msix_table[i].mte_handlers > 0) in pci_release_msix()
2134 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); in pci_release_msix()
2136 if (rle->res != NULL) in pci_release_msix()
2140 /* Update control register to disable MSI-X. */ in pci_release_msix()
2141 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE; in pci_release_msix()
2142 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL, in pci_release_msix()
2143 msix->msix_ctrl, 2); in pci_release_msix()
2146 for (i = 0; i < msix->msix_table_len; i++) { in pci_release_msix()
2147 if (msix->msix_table[i].mte_vector == 0) in pci_release_msix()
2149 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); in pci_release_msix()
2151 free(msix->msix_table, M_DEVBUF); in pci_release_msix()
2152 msix->msix_table_len = 0; in pci_release_msix()
2155 for (i = 0; i < msix->msix_alloc; i++) in pci_release_msix()
2157 msix->msix_vectors[i].mv_irq); in pci_release_msix()
2158 free(msix->msix_vectors, M_DEVBUF); in pci_release_msix()
2159 msix->msix_alloc = 0; in pci_release_msix()
2164 * Return the max supported MSI-X messages this device supports.
2173 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_msix_count_method()
2176 if (pci_do_msix && msix->msix_location != 0) { in pci_msix_count_method()
2177 ctrl = pci_read_config(child, msix->msix_location + in pci_msix_count_method()
2188 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_msix_pba_bar_method()
2190 if (pci_do_msix && msix->msix_location != 0) in pci_msix_pba_bar_method()
2191 return (msix->msix_pba_bar); in pci_msix_pba_bar_method()
2192 return (-1); in pci_msix_pba_bar_method()
2199 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_msix_table_bar_method()
2201 if (pci_do_msix && msix->msix_location != 0) in pci_msix_table_bar_method()
2202 return (msix->msix_table_bar); in pci_msix_table_bar_method()
2203 return (-1); in pci_msix_table_bar_method()
2207 * HyperTransport MSI mapping control
2213 struct pcicfg_ht *ht = &dinfo->cfg.ht; in pci_ht_map_msi()
2215 if (!ht->ht_msimap) in pci_ht_map_msi()
2218 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) && in pci_ht_map_msi()
2219 ht->ht_msiaddr >> 20 == addr >> 20) { in pci_ht_map_msi()
2220 /* Enable MSI -> HT mapping. */ in pci_ht_map_msi()
2221 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; in pci_ht_map_msi()
2222 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, in pci_ht_map_msi()
2223 ht->ht_msictrl, 2); in pci_ht_map_msi()
2226 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) { in pci_ht_map_msi()
2227 /* Disable MSI -> HT mapping. */ in pci_ht_map_msi()
2228 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE; in pci_ht_map_msi()
2229 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, in pci_ht_map_msi()
2230 ht->ht_msictrl, 2); in pci_ht_map_msi()
2241 cap = dinfo->cfg.pcie.pcie_location; in pci_get_relaxed_ordering_enabled()
2256 cap = dinfo->cfg.pcie.pcie_location; in pci_get_max_payload()
2272 cap = dinfo->cfg.pcie.pcie_location; in pci_get_max_read_req()
2288 cap = dinfo->cfg.pcie.pcie_location; in pci_set_max_read_req()
2295 size = (1 << (fls(size) - 1)); in pci_set_max_read_req()
2298 val |= (fls(size) - 8) << 12; in pci_set_max_read_req()
2309 cap = dinfo->cfg.pcie.pcie_location; in pcie_read_config()
2325 cap = dinfo->cfg.pcie.pcie_location; in pcie_write_config()
2332 * Adjusts a PCI-e capability register by clearing the bits in mask
2346 cap = dinfo->cfg.pcie.pcie_location; in pcie_adjust_config()
2361 * Support for MSI message signalled interrupts.
2368 struct pcicfg_msi *msi = &dinfo->cfg.msi; in pci_enable_msi_method() local
2371 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR, in pci_enable_msi_method()
2373 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { in pci_enable_msi_method()
2374 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH, in pci_enable_msi_method()
2376 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT, in pci_enable_msi_method()
2379 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data, in pci_enable_msi_method()
2382 /* Enable MSI in the control register. */ in pci_enable_msi_method()
2383 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE; in pci_enable_msi_method()
2384 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, in pci_enable_msi_method()
2385 msi->msi_ctrl, 2); in pci_enable_msi_method()
2387 /* Enable MSI -> HT mapping. */ in pci_enable_msi_method()
2395 struct pcicfg_msi *msi = &dinfo->cfg.msi; in pci_disable_msi_method() local
2397 /* Disable MSI -> HT mapping. */ in pci_disable_msi_method()
2400 /* Disable MSI in the control register. */ in pci_disable_msi_method()
2401 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE; in pci_disable_msi_method()
2402 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, in pci_disable_msi_method()
2403 msi->msi_ctrl, 2); in pci_disable_msi_method()
2407 * Restore MSI registers during resume. If MSI is enabled then
2415 struct pcicfg_msi *msi = &dinfo->cfg.msi; in pci_resume_msi() local
2419 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) { in pci_resume_msi()
2420 address = msi->msi_addr; in pci_resume_msi()
2421 data = msi->msi_data; in pci_resume_msi()
2422 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR, in pci_resume_msi()
2424 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { in pci_resume_msi()
2425 pci_write_config(dev, msi->msi_location + in pci_resume_msi()
2427 pci_write_config(dev, msi->msi_location + in pci_resume_msi()
2430 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, in pci_resume_msi()
2433 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, in pci_resume_msi()
2441 pcicfgregs *cfg = &dinfo->cfg; in pci_remap_intr_method()
2451 * Handle MSI first. We try to find this IRQ among our list in pci_remap_intr_method()
2452 * of MSI IRQs. If we find it, we request updated address and in pci_remap_intr_method()
2455 if (cfg->msi.msi_alloc > 0) { in pci_remap_intr_method()
2457 if (cfg->msi.msi_handlers == 0) in pci_remap_intr_method()
2459 for (i = 0; i < cfg->msi.msi_alloc; i++) { in pci_remap_intr_method()
2460 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, in pci_remap_intr_method()
2462 if (rle->start == irq) { in pci_remap_intr_method()
2468 dinfo->cfg.msi.msi_addr = addr; in pci_remap_intr_method()
2469 dinfo->cfg.msi.msi_data = data; in pci_remap_intr_method()
2478 * For MSI-X, we check to see if we have this IRQ. If we do, in pci_remap_intr_method()
2482 if (cfg->msix.msix_alloc > 0) { in pci_remap_intr_method()
2485 for (i = 0; i < cfg->msix.msix_alloc; i++) { in pci_remap_intr_method()
2486 mv = &cfg->msix.msix_vectors[i]; in pci_remap_intr_method()
2487 if (mv->mv_irq == irq) { in pci_remap_intr_method()
2492 mv->mv_address = addr; in pci_remap_intr_method()
2493 mv->mv_data = data; in pci_remap_intr_method()
2494 for (j = 0; j < cfg->msix.msix_table_len; j++) { in pci_remap_intr_method()
2495 mte = &cfg->msix.msix_table[j]; in pci_remap_intr_method()
2496 if (mte->mte_vector != i + 1) in pci_remap_intr_method()
2498 if (mte->mte_handlers == 0) in pci_remap_intr_method()
2514 * Returns true if the specified device is blacklisted because MSI
2528 * Determine if MSI is blacklisted globally on this system. Currently,
2530 * host-PCI bridge at device 0:0:0. In the future, it may become
2542 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */ in pci_msi_blacklisted()
2547 * machines known to support MSI. in pci_msi_blacklisted()
2564 * Returns true if the specified device is blacklisted because MSI-X
2565 * doesn't work. Note that this assumes that if MSI doesn't work,
2566 * MSI-X doesn't either.
2582 * Determine if MSI-X is blacklisted globally on this system. If MSI
2583 * is blacklisted, assume that MSI-X is as well. Check for additional
2584 * chipsets where MSI works but MSI-X does not.
2603 * Attempt to allocate *count MSI messages. The actual number allocated is
2611 pcicfgregs *cfg = &dinfo->cfg; in pci_alloc_msi_method()
2622 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); in pci_alloc_msi_method()
2623 if (rle != NULL && rle->res != NULL) in pci_alloc_msi_method()
2627 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) in pci_alloc_msi_method()
2630 /* If MSI is blacklisted for this system, fail. */ in pci_alloc_msi_method()
2634 /* MSI capability present? */ in pci_alloc_msi_method()
2635 if (cfg->msi.msi_location == 0 || !pci_do_msi) in pci_alloc_msi_method()
2638 ctrl = pci_read_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, 2); in pci_alloc_msi_method()
2642 "attempting to allocate %d MSI vectors (%u supported)\n", in pci_alloc_msi_method()
2651 /* MSI requires power of 2 number of messages. */ in pci_alloc_msi_method()
2674 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, in pci_alloc_msi_method()
2679 device_printf(child, "using IRQ %d for MSI\n", irqs[0]); in pci_alloc_msi_method()
2692 if (irqs[i] == irqs[i - 1] + 1) { in pci_alloc_msi_method()
2699 printf("-%d", irqs[i - 1]); in pci_alloc_msi_method()
2709 printf("-%d", irqs[actual - 1]); in pci_alloc_msi_method()
2710 printf(" for MSI\n"); in pci_alloc_msi_method()
2716 ctrl |= (ffs(actual) - 1) << 4; in pci_alloc_msi_method()
2717 cfg->msi.msi_ctrl = ctrl; in pci_alloc_msi_method()
2718 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2); in pci_alloc_msi_method()
2721 cfg->msi.msi_alloc = actual; in pci_alloc_msi_method()
2722 cfg->msi.msi_handlers = 0; in pci_alloc_msi_method()
2727 /* Release the MSI messages associated with this device. */
2732 struct pcicfg_msi *msi = &dinfo->cfg.msi; in pci_release_msi_method() local
2737 /* Try MSI-X first. */ in pci_release_msi_method()
2743 if (msi->msi_alloc == 0) in pci_release_msi_method()
2745 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages")); in pci_release_msi_method()
2748 if (msi->msi_handlers > 0) in pci_release_msi_method()
2750 for (i = 0; i < msi->msi_alloc; i++) { in pci_release_msi_method()
2751 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); in pci_release_msi_method()
2752 KASSERT(rle != NULL, ("missing MSI resource")); in pci_release_msi_method()
2753 if (rle->res != NULL) in pci_release_msi_method()
2755 irqs[i] = rle->start; in pci_release_msi_method()
2759 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE), in pci_release_msi_method()
2760 ("%s: MSI still enabled", __func__)); in pci_release_msi_method()
2761 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK; in pci_release_msi_method()
2762 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, in pci_release_msi_method()
2763 msi->msi_ctrl, 2); in pci_release_msi_method()
2766 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs); in pci_release_msi_method()
2767 for (i = 0; i < msi->msi_alloc; i++) in pci_release_msi_method()
2768 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); in pci_release_msi_method()
2771 msi->msi_alloc = 0; in pci_release_msi_method()
2772 msi->msi_addr = 0; in pci_release_msi_method()
2773 msi->msi_data = 0; in pci_release_msi_method()
2778 * Return the max supported MSI messages this device supports.
2787 struct pcicfg_msi *msi = &dinfo->cfg.msi; in pci_msi_count_method() local
2790 if (pci_do_msi && msi->msi_location != 0) { in pci_msi_count_method()
2791 ctrl = pci_read_config(child, msi->msi_location + PCIR_MSI_CTRL, in pci_msi_count_method()
2808 if (dinfo->cfg.vpd.vpd_reg) in pci_freecfg()
2809 vpd_free(&dinfo->cfg.vpd); in pci_freecfg()
2811 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) { in pci_freecfg()
2821 pci_numdevs--; in pci_freecfg()
2832 pcicfgregs *cfg = &dinfo->cfg; in pci_set_powerstate_method()
2836 if (cfg->pp.pp_location == 0) in pci_set_powerstate_method()
2842 * behavior when going from D3 -> D3. in pci_set_powerstate_method()
2867 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_location + in pci_set_powerstate_method()
2874 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0) in pci_set_powerstate_method()
2879 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0) in pci_set_powerstate_method()
2894 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_location + PCIR_POWER_STATUS, in pci_set_powerstate_method()
2905 pcicfgregs *cfg = &dinfo->cfg; in pci_get_powerstate_method()
2909 if (cfg->pp.pp_location != 0) { in pci_get_powerstate_method()
2910 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_location + in pci_get_powerstate_method()
2941 pcicfgregs *cfg = &dinfo->cfg; in pci_clear_pme()
2944 if (cfg->pp.pp_location != 0) { in pci_clear_pme()
2945 status = pci_read_config(dev, dinfo->cfg.pp.pp_location + in pci_clear_pme()
2949 pci_write_config(dev, dinfo->cfg.pp.pp_location + in pci_clear_pme()
2959 pcicfgregs *cfg = &dinfo->cfg; in pci_enable_pme()
2962 if (cfg->pp.pp_location != 0) { in pci_enable_pme()
2963 status = pci_read_config(dev, dinfo->cfg.pp.pp_location + in pci_enable_pme()
2966 pci_write_config(dev, dinfo->cfg.pp.pp_location + in pci_enable_pme()
2975 pcicfgregs *cfg = &dinfo->cfg; in pci_has_pm()
2977 return (cfg->pp.pp_location != 0); in pci_has_pm()
3057 * New style pci driver. Parent device is either a pci-host-bridge or a
3058 * pci-pci-bridge. Both kinds are represented by instances of pcib.
3066 pcicfgregs *cfg = &dinfo->cfg; in pci_print_verbose()
3068 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n", in pci_print_verbose()
3069 cfg->vendor, cfg->device, cfg->revid); in pci_print_verbose()
3071 cfg->domain, cfg->bus, cfg->slot, cfg->func); in pci_print_verbose()
3072 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n", in pci_print_verbose()
3073 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype, in pci_print_verbose()
3074 cfg->mfdev); in pci_print_verbose()
3076 cfg->cmdreg, cfg->statreg, cfg->cachelnsz); in pci_print_verbose()
3078 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt, in pci_print_verbose()
3079 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250); in pci_print_verbose()
3080 if (cfg->intpin > 0) in pci_print_verbose()
3082 cfg->intpin +'a' -1, cfg->intline); in pci_print_verbose()
3083 if (cfg->pp.pp_location) { in pci_print_verbose()
3086 status = pci_read_config(cfg->dev, cfg->pp.pp_location + in pci_print_verbose()
3089 cfg->pp.pp_cap & PCIM_PCAP_SPEC, in pci_print_verbose()
3090 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "", in pci_print_verbose()
3091 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "", in pci_print_verbose()
3094 if (cfg->msi.msi_location) { in pci_print_verbose()
3097 ctrl = cfg->msi.msi_ctrl; in pci_print_verbose()
3104 if (cfg->msix.msix_location) { in pci_print_verbose()
3107 msgnum = PCI_MSIX_MSGNUM(cfg->msix.msix_ctrl); in pci_print_verbose()
3108 printf("\tMSI-X supports %d message%s ", in pci_print_verbose()
3110 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar) in pci_print_verbose()
3111 printf("in map 0x%x\n", in pci_print_verbose()
3112 cfg->msix.msix_table_bar); in pci_print_verbose()
3115 cfg->msix.msix_table_bar, in pci_print_verbose()
3116 cfg->msix.msix_pba_bar); in pci_print_verbose()
3138 pci_addr_t map, testval; in pci_read_bar() local
3143 * The device ROM BAR is special. It is always a 32-bit in pci_read_bar()
3148 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) { in pci_read_bar()
3149 map = pci_read_config(dev, reg, 4); in pci_read_bar()
3152 pci_write_config(dev, reg, map, 4); in pci_read_bar()
3153 *mapp = map; in pci_read_bar()
3160 map = pci_read_config(dev, reg, 4); in pci_read_bar()
3161 ln2range = pci_maprange(map); in pci_read_bar()
3163 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; in pci_read_bar()
3172 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); in pci_read_bar()
3181 * and combines the result into a 64-bit value." (section 6.2.5.1) in pci_read_bar()
3196 * the BAR of the low-level console device and when booting verbose, in pci_read_bar()
3199 pci_write_config(dev, reg, map, 4); in pci_read_bar()
3201 pci_write_config(dev, reg + 4, map >> 32, 4); in pci_read_bar()
3204 *mapp = map; in pci_read_bar()
3216 /* The device ROM BAR is always a 32-bit memory BAR. */ in pci_write_bar()
3218 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) in pci_write_bar()
3221 ln2range = pci_maprange(pm->pm_value); in pci_write_bar()
3222 pci_write_config(dev, pm->pm_reg, base, 4); in pci_write_bar()
3224 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4); in pci_write_bar()
3225 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4); in pci_write_bar()
3227 pm->pm_value |= (pci_addr_t)pci_read_config(dev, in pci_write_bar()
3228 pm->pm_reg + 4, 4) << 32; in pci_write_bar()
3238 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { in pci_find_bar()
3239 if (pm->pm_reg == reg) in pci_find_bar()
3251 return (STAILQ_FIRST(&dinfo->cfg.maps)); in pci_first_bar()
3267 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) && in pci_bar_enabled()
3268 !(pm->pm_value & PCIM_BIOS_ENABLE)) in pci_bar_enabled()
3271 if ((dinfo->cfg.flags & PCICFG_VF) != 0) { in pci_bar_enabled()
3274 iov = dinfo->cfg.iov; in pci_bar_enabled()
3275 cmd = pci_read_config(iov->iov_pf, in pci_bar_enabled()
3276 iov->iov_pos + PCIR_SRIOV_CTL, 2); in pci_bar_enabled()
3281 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value)) in pci_bar_enabled()
3295 pm->pm_reg = reg; in pci_add_bar()
3296 pm->pm_value = value; in pci_add_bar()
3297 pm->pm_size = size; in pci_add_bar()
3298 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) { in pci_add_bar()
3299 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x", in pci_add_bar()
3302 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg) in pci_add_bar()
3306 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link); in pci_add_bar()
3308 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link); in pci_add_bar()
3320 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { in pci_restore_bars()
3321 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) in pci_restore_bars()
3324 ln2range = pci_maprange(pm->pm_value); in pci_restore_bars()
3325 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4); in pci_restore_bars()
3327 pci_write_config(dev, pm->pm_reg + 4, in pci_restore_bars()
3328 pm->pm_value >> 32, 4); in pci_restore_bars()
3333 * Add a resource based on a pci map register. Return 1 if the map
3334 * register is a 32bit map register or 2 if it is a 64bit register.
3341 pci_addr_t base, map, testval; in pci_add_map() local
3353 maprange = pci_maprange(pm->pm_value); in pci_add_map()
3358 pci_read_bar(dev, reg, &map, &testval, NULL); in pci_add_map()
3359 if (PCI_BAR_MEM(map)) { in pci_add_map()
3361 if (map & PCIM_BAR_MEM_PREFETCH) in pci_add_map()
3366 base = pci_mapbase(map); in pci_add_map()
3372 maprange = pci_maprange(map); in pci_add_map()
3389 pm = pci_add_bar(dev, reg, map, mapsize); in pci_add_map()
3392 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize); in pci_add_map()
3412 if (!force && (basezero || map == testval)) in pci_add_map()
3457 end = base + count - 1; in pci_add_map()
3571 pcicfgregs *cfg = &dinfo->cfg; in pci_assign_interrupt()
3576 if (cfg->intpin == 0) in pci_assign_interrupt()
3583 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1); in pci_assign_interrupt()
3595 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route) in pci_assign_interrupt()
3598 irq = cfg->intline; in pci_assign_interrupt()
3606 if (irq != cfg->intline) { in pci_assign_interrupt()
3607 cfg->intline = irq; in pci_assign_interrupt()
3612 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1); in pci_assign_interrupt()
3752 eec = -1; in xhci_early_takeover()
3803 switch (cfg->hdrtype & PCIM_HDRTYPE) { in pci_reserve_secbus()
3868 count = end - start + 1; in pci_reserve_secbus()
3910 cfg = &dinfo->cfg; in pci_alloc_secbus()
3911 rl = &dinfo->resources; in pci_alloc_secbus()
3912 switch (cfg->hdrtype & PCIM_HDRTYPE) { in pci_alloc_secbus()
3959 iov = dinfo->cfg.iov; in pci_ea_bei_to_rid()
3961 iov_pos = iov->iov_pos; in pci_ea_bei_to_rid()
3979 return (PCIR_SRIOV_BAR(bei - PCIM_EA_BEI_VF_BAR_0) + in pci_ea_bei_to_rid()
3983 return (-1); in pci_ea_bei_to_rid()
3994 STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) { in pci_ea_is_enabled()
3995 if (pci_ea_bei_to_rid(dev, ea->eae_bei) == rid) in pci_ea_is_enabled()
3996 return ((ea->eae_flags & PCIM_EA_ENABLE) > 0); in pci_ea_is_enabled()
4017 rl = &dinfo->resources; in pci_add_resources_ea()
4021 iov = dinfo->cfg.iov; in pci_add_resources_ea()
4024 if (dinfo->cfg.ea.ea_location == 0) in pci_add_resources_ea()
4027 STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) { in pci_add_resources_ea()
4029 * TODO: Ignore EA-BAR if is not enabled. in pci_add_resources_ea()
4034 * a legacy-BAR mechanism. in pci_add_resources_ea()
4036 if ((ea->eae_flags & PCIM_EA_ENABLE) == 0) in pci_add_resources_ea()
4039 switch ((ea->eae_flags & PCIM_EA_PP) >> PCIM_EA_PP_OFFSET) { in pci_add_resources_ea()
4058 if ((ea->eae_bei < PCIM_EA_BEI_VF_BAR_0) || in pci_add_resources_ea()
4059 (ea->eae_bei > PCIM_EA_BEI_VF_BAR_5)) in pci_add_resources_ea()
4066 if (((ea->eae_bei < PCIM_EA_BEI_BAR_0) || in pci_add_resources_ea()
4067 (ea->eae_bei > PCIM_EA_BEI_BAR_5)) && in pci_add_resources_ea()
4068 (ea->eae_bei != PCIM_EA_BEI_ROM)) in pci_add_resources_ea()
4072 rid = pci_ea_bei_to_rid(dev, ea->eae_bei); in pci_add_resources_ea()
4081 start = ea->eae_base; in pci_add_resources_ea()
4082 count = ea->eae_max_offset + 1; in pci_add_resources_ea()
4085 count = count * iov->iov_num_vfs; in pci_add_resources_ea()
4087 end = start + count - 1; in pci_add_resources_ea()
4102 tmp = pci_read_config(dev, ea->eae_cfg_offset, 4); in pci_add_resources_ea()
4104 pci_write_config(dev, ea->eae_cfg_offset, tmp, 4); in pci_add_resources_ea()
4110 ea->eae_flags = pci_read_config(dev, ea->eae_cfg_offset, 4); in pci_add_resources_ea()
4131 cfg = &dinfo->cfg; in pci_add_resources()
4132 rl = &dinfo->resources; in pci_add_resources()
4133 devid = (cfg->device << 16) | cfg->vendor; in pci_add_resources()
4138 /* ATA devices needs special map treatment */ in pci_add_resources()
4146 for (i = 0; i < cfg->nummaps;) { in pci_add_resources()
4158 for (q = &pci_quirks[0]; q->devid != 0; q++) in pci_add_resources()
4159 if (q->devid == devid && in pci_add_resources()
4160 q->type == PCI_QUIRK_UNMAP_REG && in pci_add_resources()
4161 q->arg1 == PCIR_BAR(i)) in pci_add_resources()
4163 if (q->devid != 0) { in pci_add_resources()
4174 for (q = &pci_quirks[0]; q->devid != 0; q++) in pci_add_resources()
4175 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG) in pci_add_resources()
4176 pci_add_map(bus, dev, q->arg1, rl, force, 0); in pci_add_resources()
4178 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) { in pci_add_resources()
4180 * Try to re-route interrupts. Sometimes the BIOS or in pci_add_resources()
4182 * If the re-route fails, then just stick with what we in pci_add_resources()
4239 PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev); in pci_add_children()
4378 vf_dinfo->cfg.flags |= PCICFG_VF; in pci_add_iov_child()
4381 return (vf_dinfo->cfg.dev); in pci_add_iov_child()
4403 if (dinfo->cfg.pcie.pcie_location == 0) in pcie_setup_mps()
4439 if (dinfo->cfg.pcie.pcie_location != 0 && in pci_add_child_clear_aer()
4440 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) { in pci_add_child_clear_aer()
4441 r2 = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + in pci_add_child_clear_aer()
4445 pci_write_config(dev, dinfo->cfg.pcie.pcie_location + in pci_add_child_clear_aer()
4452 pci_printf(&dinfo->cfg, in pci_add_child_clear_aer()
4453 "clearing AER UC 0x%08x -> 0x%08x\n", in pci_add_child_clear_aer()
4481 pci_printf(&dinfo->cfg, in pci_add_child_clear_aer()
4482 "clearing AER COR 0x%08x -> 0x%08x\n", in pci_add_child_clear_aer()
4498 r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + in pci_add_child_clear_aer()
4502 pci_write_config(dev, dinfo->cfg.pcie.pcie_location + in pci_add_child_clear_aer()
4512 dinfo->cfg.dev = dev = device_add_child(bus, NULL, DEVICE_UNIT_ANY); in pci_add_child()
4514 resource_list_init(&dinfo->resources); in pci_add_child()
4522 pci_child_added(dinfo->cfg.dev); in pci_add_child()
4527 EVENTHANDLER_INVOKE(pci_add_device, dinfo->cfg.dev); in pci_add_child()
4557 sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno, in pci_attach_common()
4559 if (sc->sc_bus == NULL) { in pci_attach_common()
4566 sc->sc_dma_tag = bus_get_dma_tag(dev); in pci_attach_common()
4602 error = bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus); in pci_detach()
4685 * as MSI/MSI-X interrupts are never shared. in pci_suspend_child()
4687 rle = resource_list_find(&dinfo->resources, in pci_suspend_child()
4689 if (rle != NULL && rle->res != NULL) in pci_suspend_child()
4690 (void)bus_suspend_intr(child, rle->res); in pci_suspend_child()
4719 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); in pci_resume_child()
4720 if (rle != NULL && rle->res != NULL) in pci_resume_child()
4721 (void)bus_resume_intr(child, rle->res); in pci_resume_child()
4807 pci_printf(&dinfo->cfg, "reprobing on driver added\n"); in pci_driver_added()
4844 * Check to see if the interrupt is MSI or MSI-X. in pci_setup_intr()
4845 * Ask our parent to map the MSI and give in pci_setup_intr()
4851 if (dinfo->cfg.msi.msi_alloc > 0) { in pci_setup_intr()
4852 if (dinfo->cfg.msi.msi_addr == 0) { in pci_setup_intr()
4853 KASSERT(dinfo->cfg.msi.msi_handlers == 0, in pci_setup_intr()
4854 ("MSI has handlers, but vectors not mapped")); in pci_setup_intr()
4859 dinfo->cfg.msi.msi_addr = addr; in pci_setup_intr()
4860 dinfo->cfg.msi.msi_data = data; in pci_setup_intr()
4862 if (dinfo->cfg.msi.msi_handlers == 0) in pci_setup_intr()
4863 pci_enable_msi(child, dinfo->cfg.msi.msi_addr, in pci_setup_intr()
4864 dinfo->cfg.msi.msi_data); in pci_setup_intr()
4865 dinfo->cfg.msi.msi_handlers++; in pci_setup_intr()
4867 KASSERT(dinfo->cfg.msix.msix_alloc > 0, in pci_setup_intr()
4868 ("No MSI or MSI-X interrupts allocated")); in pci_setup_intr()
4869 KASSERT(rid <= dinfo->cfg.msix.msix_table_len, in pci_setup_intr()
4870 ("MSI-X index too high")); in pci_setup_intr()
4871 mte = &dinfo->cfg.msix.msix_table[rid - 1]; in pci_setup_intr()
4872 KASSERT(mte->mte_vector != 0, ("no message vector")); in pci_setup_intr()
4873 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1]; in pci_setup_intr()
4874 KASSERT(mv->mv_irq == rman_get_start(irq), in pci_setup_intr()
4876 if (mv->mv_address == 0) { in pci_setup_intr()
4877 KASSERT(mte->mte_handlers == 0, in pci_setup_intr()
4878 ("MSI-X table entry has handlers, but vector not mapped")); in pci_setup_intr()
4883 mv->mv_address = addr; in pci_setup_intr()
4884 mv->mv_data = data; in pci_setup_intr()
4894 mte->mte_handlers++; in pci_setup_intr()
4895 if (mte->mte_handlers == 1) { in pci_setup_intr()
4896 pci_enable_msix(child, rid - 1, mv->mv_address, in pci_setup_intr()
4897 mv->mv_data); in pci_setup_intr()
4898 pci_unmask_msix(child, rid - 1); in pci_setup_intr()
4903 * Make sure that INTx is disabled if we are using MSI/MSI-X, in pci_setup_intr()
4905 * in which case we "enable" INTx so MSI/MSI-X actually works. in pci_setup_intr()
4945 * Check to see if the interrupt is MSI or MSI-X. If so, in pci_teardown_intr()
4947 * MSI-X message, or disable MSI messages if the count in pci_teardown_intr()
4951 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid); in pci_teardown_intr()
4952 if (rle->res != irq) in pci_teardown_intr()
4954 if (dinfo->cfg.msi.msi_alloc > 0) { in pci_teardown_intr()
4955 KASSERT(rid <= dinfo->cfg.msi.msi_alloc, in pci_teardown_intr()
4956 ("MSI-X index too high")); in pci_teardown_intr()
4957 if (dinfo->cfg.msi.msi_handlers == 0) in pci_teardown_intr()
4959 dinfo->cfg.msi.msi_handlers--; in pci_teardown_intr()
4960 if (dinfo->cfg.msi.msi_handlers == 0) in pci_teardown_intr()
4963 KASSERT(dinfo->cfg.msix.msix_alloc > 0, in pci_teardown_intr()
4964 ("No MSI or MSI-X interrupts allocated")); in pci_teardown_intr()
4965 KASSERT(rid <= dinfo->cfg.msix.msix_table_len, in pci_teardown_intr()
4966 ("MSI-X index too high")); in pci_teardown_intr()
4967 mte = &dinfo->cfg.msix.msix_table[rid - 1]; in pci_teardown_intr()
4968 if (mte->mte_handlers == 0) in pci_teardown_intr()
4970 mte->mte_handlers--; in pci_teardown_intr()
4971 if (mte->mte_handlers == 0) in pci_teardown_intr()
4972 pci_mask_msix(child, rid - 1); in pci_teardown_intr()
4978 ("%s: generic teardown failed for MSI/MSI-X", __func__)); in pci_teardown_intr()
4990 rl = &dinfo->resources; in pci_print_child()
5016 {PCIC_OLD, -1, 1, "old"},
5017 {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"},
5018 {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"},
5019 {PCIC_STORAGE, -1, 1, "mass storage"},
5029 {PCIC_NETWORK, -1, 1, "network"},
5035 {PCIC_DISPLAY, -1, 1, "display"},
5039 {PCIC_MULTIMEDIA, -1, 1, "multimedia"},
5044 {PCIC_MEMORY, -1, 1, "memory"},
5047 {PCIC_BRIDGE, -1, 1, "bridge"},
5048 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"},
5049 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"},
5050 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"},
5051 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"},
5052 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"},
5053 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"},
5054 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"},
5055 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"},
5056 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"},
5057 {PCIC_SIMPLECOMM, -1, 1, "simple comms"},
5062 {PCIC_BASEPERIPH, -1, 0, "base peripheral"},
5067 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"},
5070 {PCIC_INPUTDEV, -1, 1, "input device"},
5076 {PCIC_DOCKING, -1, 1, "docking station"},
5077 {PCIC_PROCESSOR, -1, 1, "processor"},
5078 {PCIC_SERIALBUS, -1, 1, "serial bus"},
5085 {PCIC_WIRELESS, -1, 1, "wireless controller"},
5089 {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"},
5091 {PCIC_SATCOM, -1, 1, "satellite communication"},
5096 {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"},
5099 {PCIC_DASP, -1, 0, "dasp"},
5104 {PCIC_INSTRUMENT, -1, 0, "non-essential instrumentation"},
5131 if (pci_nomatch_tab[i].subclass == -1) { in pci_probe_nomatch()
5162 rl = &dinfo->resources; in pci_child_detached()
5165 * Have to deallocate IRQs before releasing any MSI messages and in pci_child_detached()
5166 * have to release MSI messages before deallocating any memory in pci_child_detached()
5170 pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n"); in pci_child_detached()
5171 if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) { in pci_child_detached()
5172 if (dinfo->cfg.msi.msi_alloc != 0) in pci_child_detached()
5173 pci_printf(&dinfo->cfg, "Device leaked %d MSI " in pci_child_detached()
5174 "vectors\n", dinfo->cfg.msi.msi_alloc); in pci_child_detached()
5176 pci_printf(&dinfo->cfg, "Device leaked %d MSI-X " in pci_child_detached()
5177 "vectors\n", dinfo->cfg.msix.msix_alloc); in pci_child_detached()
5181 pci_printf(&dinfo->cfg, "Device leaked memory resources\n"); in pci_child_detached()
5183 pci_printf(&dinfo->cfg, "Device leaked I/O resources\n"); in pci_child_detached()
5185 pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n"); in pci_child_detached()
5203 * - devices cannot be listed without a corresponding VENDOR line.
5212 * is set to -1. Returns nonzero at the end of the database.
5224 *device = -1; in pci_describe_parse_line()
5225 *vendor = -1; in pci_describe_parse_line()
5228 left = pci_vendordata_size - (cp - pci_vendordata); in pci_describe_parse_line()
5246 left--; in pci_describe_parse_line()
5250 left--; in pci_describe_parse_line()
5256 left--; in pci_describe_parse_line()
5297 if (vendor != -1) { in pci_describe_device()
5324 cfg = &dinfo->cfg; in pci_read_ivar()
5335 *result = cfg->subvendor; in pci_read_ivar()
5338 *result = cfg->subdevice; in pci_read_ivar()
5341 *result = cfg->vendor; in pci_read_ivar()
5344 *result = cfg->device; in pci_read_ivar()
5347 *result = (cfg->device << 16) | cfg->vendor; in pci_read_ivar()
5350 *result = cfg->baseclass; in pci_read_ivar()
5353 *result = cfg->subclass; in pci_read_ivar()
5356 *result = cfg->progif; in pci_read_ivar()
5359 *result = cfg->revid; in pci_read_ivar()
5362 *result = cfg->intpin; in pci_read_ivar()
5365 *result = cfg->intline; in pci_read_ivar()
5368 *result = cfg->domain; in pci_read_ivar()
5371 *result = cfg->bus; in pci_read_ivar()
5374 *result = cfg->slot; in pci_read_ivar()
5377 *result = cfg->func; in pci_read_ivar()
5380 *result = cfg->cmdreg; in pci_read_ivar()
5383 *result = cfg->cachelnsz; in pci_read_ivar()
5386 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) { in pci_read_ivar()
5387 *result = -1; in pci_read_ivar()
5390 *result = cfg->mingnt; in pci_read_ivar()
5393 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) { in pci_read_ivar()
5394 *result = -1; in pci_read_ivar()
5397 *result = cfg->maxlat; in pci_read_ivar()
5400 *result = cfg->lattimer; in pci_read_ivar()
5417 dinfo->cfg.intpin = value; in pci_write_ivar()
5447 * List resources based on pci map registers, used for within ddb
5471 if (dinfo->cfg.dev) in DB_SHOW_COMMAND_FLAGS()
5472 name = device_get_name(dinfo->cfg.dev); in DB_SHOW_COMMAND_FLAGS()
5474 p = &dinfo->conf; in DB_SHOW_COMMAND_FLAGS()
5478 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) : in DB_SHOW_COMMAND_FLAGS()
5480 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev, in DB_SHOW_COMMAND_FLAGS()
5481 p->pc_sel.pc_func, (p->pc_class << 16) | in DB_SHOW_COMMAND_FLAGS()
5482 (p->pc_subclass << 8) | p->pc_progif, in DB_SHOW_COMMAND_FLAGS()
5483 (p->pc_subdevice << 16) | p->pc_subvendor, in DB_SHOW_COMMAND_FLAGS()
5484 (p->pc_device << 16) | p->pc_vendor, in DB_SHOW_COMMAND_FLAGS()
5485 p->pc_revid, p->pc_hdr); in DB_SHOW_COMMAND_FLAGS()
5496 struct resource_list *rl = &dinfo->resources; in pci_reserve_map()
5500 pci_addr_t map, testval; in pci_reserve_map() local
5512 mapsize = pm->pm_size; in pci_reserve_map()
5513 map = pm->pm_value; in pci_reserve_map()
5517 * BAR/map is. BARs that read back 0 here are bogus in pci_reserve_map()
5523 pci_read_bar(child, *rid, &map, &testval, NULL); in pci_reserve_map()
5529 if (PCIR_IS_BIOS(&dinfo->cfg, *rid)) in pci_reserve_map()
5535 pm = pci_add_bar(child, *rid, map, mapsize); in pci_reserve_map()
5538 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) { in pci_reserve_map()
5568 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH)) in pci_reserve_map()
5593 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); in pci_reserve_map()
5595 map = rman_get_start(res); in pci_reserve_map()
5596 pci_write_bar(child, pm, map); in pci_reserve_map()
5619 rl = &dinfo->resources; in pci_alloc_multi_resource()
5620 cfg = &dinfo->cfg; in pci_alloc_multi_resource()
5627 * Can't alloc legacy interrupt once MSI messages have in pci_alloc_multi_resource()
5630 if (*rid == 0 && (cfg->msi.msi_alloc > 0 || in pci_alloc_multi_resource()
5631 cfg->msix.msix_alloc > 0)) in pci_alloc_multi_resource()
5639 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) && in pci_alloc_multi_resource()
5640 (cfg->intpin != 0)) in pci_alloc_multi_resource()
5646 * PCI-PCI bridge I/O window resources are not BARs. in pci_alloc_multi_resource()
5650 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) { in pci_alloc_multi_resource()
5690 if (dinfo->cfg.flags & PCICFG_VF) { in pci_alloc_resource()
5719 cfg = &dinfo->cfg; in pci_release_resource()
5722 if (cfg->flags & PCICFG_VF) { in pci_release_resource()
5736 * PCI-PCI bridge I/O window resources are not BARs. For in pci_release_resource()
5739 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE && in pci_release_resource()
5750 rl = &dinfo->resources; in pci_release_resource()
5765 if (dinfo->cfg.flags & PCICFG_VF) { in pci_activate_resource()
5788 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) in pci_activate_resource()
5813 if (dinfo->cfg.flags & PCICFG_VF) { in pci_deactivate_resource()
5835 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) in pci_deactivate_resource()
5853 if (dinfo->cfg.flags & PCICFG_VF) { in pci_adjust_resource()
5872 struct resource_map_request *argsp, struct resource_map *map) in pci_map_resource() argument
5879 map)); in pci_map_resource()
5882 if (dinfo->cfg.flags & PCICFG_VF) { in pci_map_resource()
5889 map)); in pci_map_resource()
5896 return (bus_generic_map_resource(dev, child, r, argsp, map)); in pci_map_resource()
5901 struct resource_map *map) in pci_unmap_resource() argument
5907 return (bus_generic_unmap_resource(dev, child, r, map)); in pci_unmap_resource()
5910 if (dinfo->cfg.flags & PCICFG_VF) { in pci_unmap_resource()
5916 return (pci_vf_unmap_mem_resource(dev, child, r, map)); in pci_unmap_resource()
5923 return (bus_generic_unmap_resource(dev, child, r, map)); in pci_unmap_resource()
5934 rl = &dinfo->resources; in pci_child_deleted()
5948 if (rle->res) { in pci_child_deleted()
5949 if (rman_get_flags(rle->res) & RF_ACTIVE || in pci_child_deleted()
5950 resource_list_busy(rl, rle->type, rle->rid)) { in pci_child_deleted()
5951 pci_printf(&dinfo->cfg, in pci_child_deleted()
5954 rle->type, rle->rid, in pci_child_deleted()
5955 rman_get_start(rle->res)); in pci_child_deleted()
5956 bus_release_resource(child, rle->type, rle->rid, in pci_child_deleted()
5957 rle->res); in pci_child_deleted()
5959 resource_list_unreserve(rl, dev, child, rle->type, in pci_child_deleted()
5960 rle->rid); in pci_child_deleted()
5979 rl = &dinfo->resources; in pci_delete_resource()
5984 if (rle->res) { in pci_delete_resource()
5985 if (rman_get_flags(rle->res) & RF_ACTIVE || in pci_delete_resource()
5990 type, rid, rman_get_start(rle->res)); in pci_delete_resource()
6003 return (&dinfo->resources); in pci_get_resource_list()
6020 tag = sc->sc_dma_tag; in pci_get_dma_tag()
6030 return (sc->sc_dma_tag); in pci_get_dma_tag()
6038 pcicfgregs *cfg = &dinfo->cfg; in pci_read_config_method()
6042 * SR-IOV VFs don't implement the VID or DID registers, so we have to in pci_read_config_method()
6045 if (cfg->flags & PCICFG_VF) { in pci_read_config_method()
6049 return (cfg->device << 16 | cfg->vendor); in pci_read_config_method()
6051 return (cfg->vendor); in pci_read_config_method()
6053 return (cfg->vendor & 0xff); in pci_read_config_method()
6059 /* Note that an unaligned 4-byte read is an error. */ in pci_read_config_method()
6061 return (cfg->device); in pci_read_config_method()
6063 return (cfg->device & 0xff); in pci_read_config_method()
6072 cfg->bus, cfg->slot, cfg->func, reg, width)); in pci_read_config_method()
6080 pcicfgregs *cfg = &dinfo->cfg; in pci_write_config_method()
6083 cfg->bus, cfg->slot, cfg->func, reg, val, width); in pci_write_config_method()
6103 cfg = &dinfo->cfg; in pci_child_pnpinfo_method()
6105 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device, in pci_child_pnpinfo_method()
6106 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass, in pci_child_pnpinfo_method()
6107 cfg->progif); in pci_child_pnpinfo_method()
6133 pcicfgregs *cfg = &dinfo->cfg; in pci_assign_interrupt_method()
6136 cfg->intpin)); in pci_assign_interrupt_method()
6150 * Accept pciconf-style selectors of either pciD:B:S:F or in pci_lookup()
6222 cfg = &dinfo->cfg.pcie; in pci_cfg_restore_pcie()
6223 pos = cfg->pcie_location; in pci_cfg_restore_pcie()
6225 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; in pci_cfg_restore_pcie()
6227 WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl); in pci_cfg_restore_pcie()
6229 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || in pci_cfg_restore_pcie()
6230 cfg->pcie_type == PCIEM_TYPE_ENDPOINT || in pci_cfg_restore_pcie()
6231 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) in pci_cfg_restore_pcie()
6232 WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl); in pci_cfg_restore_pcie()
6234 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || in pci_cfg_restore_pcie()
6235 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && in pci_cfg_restore_pcie()
6236 (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) in pci_cfg_restore_pcie()
6237 WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl); in pci_cfg_restore_pcie()
6239 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || in pci_cfg_restore_pcie()
6240 cfg->pcie_type == PCIEM_TYPE_ROOT_EC) in pci_cfg_restore_pcie()
6241 WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl); in pci_cfg_restore_pcie()
6244 WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2); in pci_cfg_restore_pcie()
6245 WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2); in pci_cfg_restore_pcie()
6246 WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2); in pci_cfg_restore_pcie()
6254 pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, in pci_cfg_restore_pcix()
6255 dinfo->cfg.pcix.pcix_command, 2); in pci_cfg_restore_pcix()
6272 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1); in pci_cfg_restore()
6273 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1); in pci_cfg_restore()
6274 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1); in pci_cfg_restore()
6275 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1); in pci_cfg_restore()
6276 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1); in pci_cfg_restore()
6277 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1); in pci_cfg_restore()
6278 switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) { in pci_cfg_restore()
6280 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1); in pci_cfg_restore()
6281 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1); in pci_cfg_restore()
6285 dinfo->cfg.bridge.br_seclat, 1); in pci_cfg_restore()
6287 dinfo->cfg.bridge.br_subbus, 1); in pci_cfg_restore()
6289 dinfo->cfg.bridge.br_secbus, 1); in pci_cfg_restore()
6291 dinfo->cfg.bridge.br_pribus, 1); in pci_cfg_restore()
6293 dinfo->cfg.bridge.br_control, 2); in pci_cfg_restore()
6297 dinfo->cfg.bridge.br_seclat, 1); in pci_cfg_restore()
6299 dinfo->cfg.bridge.br_subbus, 1); in pci_cfg_restore()
6301 dinfo->cfg.bridge.br_secbus, 1); in pci_cfg_restore()
6303 dinfo->cfg.bridge.br_pribus, 1); in pci_cfg_restore()
6305 dinfo->cfg.bridge.br_control, 2); in pci_cfg_restore()
6310 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_BRIDGE) in pci_cfg_restore()
6311 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2); in pci_cfg_restore()
6314 * Restore extended capabilities for PCI-Express and PCI-X in pci_cfg_restore()
6316 if (dinfo->cfg.pcie.pcie_location != 0) in pci_cfg_restore()
6318 if (dinfo->cfg.pcix.pcix_location != 0) in pci_cfg_restore()
6321 /* Restore MSI and MSI-X configurations if they are present. */ in pci_cfg_restore()
6322 if (dinfo->cfg.msi.msi_location != 0) in pci_cfg_restore()
6324 if (dinfo->cfg.msix.msix_location != 0) in pci_cfg_restore()
6328 if (dinfo->cfg.iov != NULL) in pci_cfg_restore()
6340 cfg = &dinfo->cfg.pcie; in pci_cfg_save_pcie()
6341 pos = cfg->pcie_location; in pci_cfg_save_pcie()
6343 cfg->pcie_flags = RREG(PCIER_FLAGS); in pci_cfg_save_pcie()
6345 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; in pci_cfg_save_pcie()
6347 cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL); in pci_cfg_save_pcie()
6349 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || in pci_cfg_save_pcie()
6350 cfg->pcie_type == PCIEM_TYPE_ENDPOINT || in pci_cfg_save_pcie()
6351 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) in pci_cfg_save_pcie()
6352 cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL); in pci_cfg_save_pcie()
6354 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || in pci_cfg_save_pcie()
6355 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && in pci_cfg_save_pcie()
6356 (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) in pci_cfg_save_pcie()
6357 cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL); in pci_cfg_save_pcie()
6359 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || in pci_cfg_save_pcie()
6360 cfg->pcie_type == PCIEM_TYPE_ROOT_EC) in pci_cfg_save_pcie()
6361 cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL); in pci_cfg_save_pcie()
6364 cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2); in pci_cfg_save_pcie()
6365 cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2); in pci_cfg_save_pcie()
6366 cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2); in pci_cfg_save_pcie()
6374 dinfo->cfg.pcix.pcix_command = pci_read_config(dev, in pci_cfg_save_pcix()
6375 dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2); in pci_cfg_save_pcix()
6391 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2); in pci_cfg_save()
6392 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2); in pci_cfg_save()
6393 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2); in pci_cfg_save()
6394 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1); in pci_cfg_save()
6395 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1); in pci_cfg_save()
6396 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); in pci_cfg_save()
6397 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); in pci_cfg_save()
6398 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1); in pci_cfg_save()
6399 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1); in pci_cfg_save()
6400 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1); in pci_cfg_save()
6401 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1); in pci_cfg_save()
6402 switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) { in pci_cfg_save()
6404 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2); in pci_cfg_save()
6405 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2); in pci_cfg_save()
6406 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1); in pci_cfg_save()
6407 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1); in pci_cfg_save()
6410 dinfo->cfg.bridge.br_seclat = pci_read_config(dev, in pci_cfg_save()
6412 dinfo->cfg.bridge.br_subbus = pci_read_config(dev, in pci_cfg_save()
6414 dinfo->cfg.bridge.br_secbus = pci_read_config(dev, in pci_cfg_save()
6416 dinfo->cfg.bridge.br_pribus = pci_read_config(dev, in pci_cfg_save()
6418 dinfo->cfg.bridge.br_control = pci_read_config(dev, in pci_cfg_save()
6422 dinfo->cfg.bridge.br_seclat = pci_read_config(dev, in pci_cfg_save()
6424 dinfo->cfg.bridge.br_subbus = pci_read_config(dev, in pci_cfg_save()
6426 dinfo->cfg.bridge.br_secbus = pci_read_config(dev, in pci_cfg_save()
6428 dinfo->cfg.bridge.br_pribus = pci_read_config(dev, in pci_cfg_save()
6430 dinfo->cfg.bridge.br_control = pci_read_config(dev, in pci_cfg_save()
6432 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_2, 2); in pci_cfg_save()
6433 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_2, 2); in pci_cfg_save()
6437 if (dinfo->cfg.pcie.pcie_location != 0) in pci_cfg_save()
6440 if (dinfo->cfg.pcix.pcix_location != 0) in pci_cfg_save()
6444 if (dinfo->cfg.iov != NULL) in pci_cfg_save()
6523 ("%s: non-pci device %s", __func__, device_get_nameunit(dev))); in pci_find_pcie_root_port()
6526 * Walk the bridge hierarchy until we find a PCI-e root in pci_find_pcie_root_port()
6527 * port or a non-PCI device. in pci_find_pcie_root_port()
6540 * PCI-PCI bridge. in pci_find_pcie_root_port()
6546 if (dinfo->cfg.pcie.pcie_location != 0 && in pci_find_pcie_root_port()
6547 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) in pci_find_pcie_root_port()
6555 * Wait for pending transactions to complete on a PCI-express function.
6561 * exceeded. If dev is not a PCI-express function, this returns true.
6570 cap = dinfo->cfg.pcie.pcie_location; in pcie_wait_for_pending_transactions()
6582 max_delay -= 100; in pcie_wait_for_pending_transactions()
6597 * For non-PCI-express functions this returns 0.
6605 cap = dinfo->cfg.pcie.pcie_location; in pcie_get_max_completion_timeout()
6614 if ((dinfo->cfg.pcie.pcie_flags & PCIEM_FLAGS_VERSION) < 2 || in pcie_get_max_completion_timeout()
6654 s = "Uncorrectable (Non-Fatal)"; in pcie_apei_error()
6690 if (dinfo->cfg.pcie.pcie_location != 0) { in pcie_apei_error()
6691 rs = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + in pcie_apei_error()
6696 pci_write_config(dev, dinfo->cfg.pcie.pcie_location + in pcie_apei_error()
6711 * If dev is not a PCI-express function or does not support FLR, this
6716 * PCI-standard registers via pci_save_state() and
6727 cap = dinfo->cfg.pcie.pcie_location; in pcie_flr()
6738 * which will re-enable busmastering. in pcie_flr()
6747 pci_printf(&dinfo->cfg, in pcie_flr()
6752 * Extend the post-FLR delay to cover the maximum in pcie_flr()
6773 pci_printf(&dinfo->cfg, "Transactions pending after FLR!\n"); in pcie_flr()
6778 * Attempt a power-management reset by cycling the device in/out of D3
6878 while (nelt-- > 0) { in pci_match_device()
6880 if (id->match_flag_vendor) in pci_match_device()
6881 match &= vendor == id->vendor; in pci_match_device()
6882 if (id->match_flag_device) in pci_match_device()
6883 match &= device == id->device; in pci_match_device()
6884 if (id->match_flag_subvendor) in pci_match_device()
6885 match &= subvendor == id->subvendor; in pci_match_device()
6886 if (id->match_flag_subdevice) in pci_match_device()
6887 match &= subdevice == id->subdevice; in pci_match_device()
6888 if (id->match_flag_class) in pci_match_device()
6889 match &= class == id->class_id; in pci_match_device()
6890 if (id->match_flag_subclass) in pci_match_device()
6891 match &= subclass == id->subclass; in pci_match_device()
6892 if (id->match_flag_revid) in pci_match_device()
6893 match &= revid == id->revid; in pci_match_device()
6907 dev = dinfo->cfg.dev; in pci_print_faulted_dev_name()
6908 printf("pci%d:%d:%d:%d", dinfo->cfg.domain, dinfo->cfg.bus, in pci_print_faulted_dev_name()
6909 dinfo->cfg.slot, dinfo->cfg.func); in pci_print_faulted_dev_name()
6925 dev = dinfo->cfg.dev; in pci_print_faulted_dev()
6934 if (dinfo->cfg.pcie.pcie_location != 0) { in pci_print_faulted_dev()
6936 dinfo->cfg.pcie.pcie_location + in pci_print_faulted_dev()
6944 dinfo->cfg.pcie.pcie_location + in pci_print_faulted_dev()
6988 dev = dinfo->cfg.dev; in db_clear_pcie_errors()
6989 r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + in db_clear_pcie_errors()
6991 pci_write_config(dev, dinfo->cfg.pcie.pcie_location + in db_clear_pcie_errors()
7011 dev = dinfo->cfg.dev; in DB_COMMAND_FLAGS()
7020 if (dinfo->cfg.pcie.pcie_location != 0) in DB_COMMAND_FLAGS()