Lines Matching +full:irqs +full:- +full:map +full:- +full:range
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
88 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
89 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
236 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
237 #define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
239 #define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
240 #define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
248 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
255 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
256 * or the CMIC-SL (AKA ServerWorks GC_LE).
274 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
318 * HPE Gen 10 VGA has a memory range that can't be allocated in the
325 /* map register information */
326 #define PCI_MAPMEM 0x01 /* memory map */
327 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
328 #define PCI_MAPPORT 0x04 /* port map */
349 "Attempt to allocate a new range for any BARs whose original "
350 "firmware-assigned ranges fail to allocate during the initial device scan.");
363 "Transition from D3 -> D0 on resume.");
368 "Transition from D0 -> D3 on suspend.");
376 "Enable support for MSI-X interrupts");
381 "Rewrite entire MSI-X table when updating MSI-X entries");
385 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
400 "Ignore firmware-assigned resources for BARs.");
404 "Ignore firmware-assigned bus numbers.");
437 for (q = &pci_quirks[0]; q->devid; q++) { in pci_has_quirk()
438 if (q->devid == devid && q->type == quirk) in pci_has_quirk()
461 if ((dinfo->cfg.domain == domain) && in pci_find_dbsf()
462 (dinfo->cfg.bus == bus) && in pci_find_dbsf()
463 (dinfo->cfg.slot == slot) && in pci_find_dbsf()
464 (dinfo->cfg.func == func)) { in pci_find_dbsf()
469 return (dinfo != NULL ? dinfo->cfg.dev : NULL); in pci_find_dbsf()
480 if ((dinfo->cfg.vendor == vendor) && in pci_find_device()
481 (dinfo->cfg.device == device)) { in pci_find_device()
482 return (dinfo->cfg.dev); in pci_find_device()
495 if (dinfo->cfg.baseclass == class && in pci_find_class()
496 dinfo->cfg.subclass == subclass) { in pci_find_class()
497 return (dinfo->cfg.dev); in pci_find_class()
512 if (from != dinfo->cfg.dev) in pci_find_class_from()
517 if (dinfo->cfg.baseclass == class && in pci_find_class_from()
518 dinfo->cfg.subclass == subclass) { in pci_find_class_from()
519 return (dinfo->cfg.dev); in pci_find_class_from()
532 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot, in pci_printf()
533 cfg->func); in pci_printf()
540 /* return base address of memory or port map */
552 /* return map type of memory or port map */
565 /* return log2 of map size decoded for memory or port map */
593 /* return log2 of map size decided for device ROM */
612 /* return log2 of address range supported by map register */
641 if ((cfg->hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL) in pci_fixancient()
645 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI) in pci_fixancient()
646 cfg->hdrtype = PCIM_HDRTYPE_BRIDGE; in pci_fixancient()
655 switch (cfg->hdrtype & PCIM_HDRTYPE) { in pci_hdrtypedata()
657 cfg->subvendor = REG(PCIR_SUBVEND_0, 2); in pci_hdrtypedata()
658 cfg->subdevice = REG(PCIR_SUBDEV_0, 2); in pci_hdrtypedata()
659 cfg->mingnt = REG(PCIR_MINGNT, 1); in pci_hdrtypedata()
660 cfg->maxlat = REG(PCIR_MAXLAT, 1); in pci_hdrtypedata()
661 cfg->nummaps = PCI_MAXMAPS_0; in pci_hdrtypedata()
664 cfg->bridge.br_seclat = REG(PCIR_SECLAT_1, 1); in pci_hdrtypedata()
665 cfg->bridge.br_subbus = REG(PCIR_SUBBUS_1, 1); in pci_hdrtypedata()
666 cfg->bridge.br_secbus = REG(PCIR_SECBUS_1, 1); in pci_hdrtypedata()
667 cfg->bridge.br_pribus = REG(PCIR_PRIBUS_1, 1); in pci_hdrtypedata()
668 cfg->bridge.br_control = REG(PCIR_BRIDGECTL_1, 2); in pci_hdrtypedata()
669 cfg->nummaps = PCI_MAXMAPS_1; in pci_hdrtypedata()
672 cfg->bridge.br_seclat = REG(PCIR_SECLAT_2, 1); in pci_hdrtypedata()
673 cfg->bridge.br_subbus = REG(PCIR_SUBBUS_2, 1); in pci_hdrtypedata()
674 cfg->bridge.br_secbus = REG(PCIR_SECBUS_2, 1); in pci_hdrtypedata()
675 cfg->bridge.br_pribus = REG(PCIR_PRIBUS_2, 1); in pci_hdrtypedata()
676 cfg->bridge.br_control = REG(PCIR_BRIDGECTL_2, 2); in pci_hdrtypedata()
677 cfg->subvendor = REG(PCIR_SUBVEND_2, 2); in pci_hdrtypedata()
678 cfg->subdevice = REG(PCIR_SUBDEV_2, 2); in pci_hdrtypedata()
679 cfg->nummaps = PCI_MAXMAPS_2; in pci_hdrtypedata()
718 cfg = &devlist_entry->cfg; in pci_fill_devinfo()
720 cfg->domain = d; in pci_fill_devinfo()
721 cfg->bus = b; in pci_fill_devinfo()
722 cfg->slot = s; in pci_fill_devinfo()
723 cfg->func = f; in pci_fill_devinfo()
724 cfg->vendor = vid; in pci_fill_devinfo()
725 cfg->device = did; in pci_fill_devinfo()
726 cfg->cmdreg = REG(PCIR_COMMAND, 2); in pci_fill_devinfo()
727 cfg->statreg = REG(PCIR_STATUS, 2); in pci_fill_devinfo()
728 cfg->baseclass = REG(PCIR_CLASS, 1); in pci_fill_devinfo()
729 cfg->subclass = REG(PCIR_SUBCLASS, 1); in pci_fill_devinfo()
730 cfg->progif = REG(PCIR_PROGIF, 1); in pci_fill_devinfo()
731 cfg->revid = REG(PCIR_REVID, 1); in pci_fill_devinfo()
732 cfg->hdrtype = REG(PCIR_HDRTYPE, 1); in pci_fill_devinfo()
733 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1); in pci_fill_devinfo()
734 cfg->lattimer = REG(PCIR_LATTIMER, 1); in pci_fill_devinfo()
735 cfg->intpin = REG(PCIR_INTPIN, 1); in pci_fill_devinfo()
736 cfg->intline = REG(PCIR_INTLINE, 1); in pci_fill_devinfo()
738 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0; in pci_fill_devinfo()
739 cfg->hdrtype &= ~PCIM_MFDEV; in pci_fill_devinfo()
740 STAILQ_INIT(&cfg->maps); in pci_fill_devinfo()
742 cfg->iov = NULL; in pci_fill_devinfo()
752 devlist_entry->conf.pc_sel.pc_domain = cfg->domain; in pci_fill_devinfo()
753 devlist_entry->conf.pc_sel.pc_bus = cfg->bus; in pci_fill_devinfo()
754 devlist_entry->conf.pc_sel.pc_dev = cfg->slot; in pci_fill_devinfo()
755 devlist_entry->conf.pc_sel.pc_func = cfg->func; in pci_fill_devinfo()
756 devlist_entry->conf.pc_hdr = cfg->hdrtype; in pci_fill_devinfo()
758 devlist_entry->conf.pc_subvendor = cfg->subvendor; in pci_fill_devinfo()
759 devlist_entry->conf.pc_subdevice = cfg->subdevice; in pci_fill_devinfo()
760 devlist_entry->conf.pc_vendor = cfg->vendor; in pci_fill_devinfo()
761 devlist_entry->conf.pc_device = cfg->device; in pci_fill_devinfo()
763 devlist_entry->conf.pc_class = cfg->baseclass; in pci_fill_devinfo()
764 devlist_entry->conf.pc_subclass = cfg->subclass; in pci_fill_devinfo()
765 devlist_entry->conf.pc_progif = cfg->progif; in pci_fill_devinfo()
766 devlist_entry->conf.pc_revid = cfg->revid; in pci_fill_devinfo()
778 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, \ in pci_ea_fill_info()
779 cfg->ea.ea_location + (n), w) in pci_ea_fill_info()
789 if (cfg->ea.ea_location == 0) in pci_ea_fill_info()
792 STAILQ_INIT(&cfg->ea.ea_entries); in pci_ea_fill_info()
802 if ((cfg->hdrtype & PCIM_HDRTYPE) == PCIM_HDRTYPE_BRIDGE) in pci_ea_fill_info()
807 eae->eae_cfg_offset = cfg->ea.ea_location + ptr; in pci_ea_fill_info()
819 eae->eae_flags = val; in pci_ea_fill_info()
820 eae->eae_bei = (PCIM_EA_BEI & val) >> PCIM_EA_BEI_OFFSET; in pci_ea_fill_info()
835 eae->eae_base = base; in pci_ea_fill_info()
836 eae->eae_max_offset = max_offset; in pci_ea_fill_info()
838 STAILQ_INSERT_TAIL(&cfg->ea.ea_entries, eae, eae_link); in pci_ea_fill_info()
842 cfg->vendor, cfg->device, eae->eae_bei, eae->eae_flags, in pci_ea_fill_info()
843 (uintmax_t)eae->eae_base, (uintmax_t)eae->eae_max_offset); in pci_ea_fill_info()
852 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) in pci_read_cap()
853 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w) in pci_read_cap()
860 switch (cfg->hdrtype & PCIM_HDRTYPE) { in pci_read_cap()
890 if (cfg->pp.pp_cap == 0) { in pci_read_cap()
891 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2); in pci_read_cap()
892 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS; in pci_read_cap()
893 cfg->pp.pp_bse = ptr + PCIR_POWER_BSE; in pci_read_cap()
894 if ((nextptr - ptr) > PCIR_POWER_DATA) in pci_read_cap()
895 cfg->pp.pp_data = ptr + PCIR_POWER_DATA; in pci_read_cap()
899 /* Determine HT-specific capability type. */ in pci_read_cap()
903 cfg->ht.ht_slave = ptr; in pci_read_cap()
917 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n", in pci_read_cap()
918 cfg->domain, cfg->bus, in pci_read_cap()
919 cfg->slot, cfg->func, in pci_read_cap()
924 cfg->ht.ht_msimap = ptr; in pci_read_cap()
925 cfg->ht.ht_msictrl = val; in pci_read_cap()
926 cfg->ht.ht_msiaddr = addr; in pci_read_cap()
932 cfg->msi.msi_location = ptr; in pci_read_cap()
933 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2); in pci_read_cap()
934 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl & in pci_read_cap()
937 case PCIY_MSIX: /* PCI MSI-X */ in pci_read_cap()
938 cfg->msix.msix_location = ptr; in pci_read_cap()
939 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2); in pci_read_cap()
940 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl & in pci_read_cap()
943 cfg->msix.msix_table_bar = PCIR_BAR(val & in pci_read_cap()
945 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK; in pci_read_cap()
947 cfg->msix.msix_pba_bar = PCIR_BAR(val & in pci_read_cap()
949 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK; in pci_read_cap()
952 cfg->vpd.vpd_reg = ptr; in pci_read_cap()
956 if ((cfg->hdrtype & PCIM_HDRTYPE) == in pci_read_cap()
959 cfg->subvendor = val & 0xffff; in pci_read_cap()
960 cfg->subdevice = val >> 16; in pci_read_cap()
963 case PCIY_PCIX: /* PCI-X */ in pci_read_cap()
965 * Assume we have a PCI-X chipset if we have in pci_read_cap()
966 * at least one PCI-PCI bridge with a PCI-X in pci_read_cap()
968 * PCI-express or HT chipsets might match on in pci_read_cap()
971 if ((cfg->hdrtype & PCIM_HDRTYPE) == in pci_read_cap()
974 cfg->pcix.pcix_location = ptr; in pci_read_cap()
976 case PCIY_EXPRESS: /* PCI-express */ in pci_read_cap()
978 * Assume we have a PCI-express chipset if we have in pci_read_cap()
979 * at least one PCI-express device. in pci_read_cap()
982 cfg->pcie.pcie_location = ptr; in pci_read_cap()
984 cfg->pcie.pcie_type = val & PCIEM_FLAGS_TYPE; in pci_read_cap()
987 cfg->ea.ea_location = ptr; in pci_read_cap()
998 * slaves. PCI-PCI bridges have their windows enabled via in pci_read_cap()
1001 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 && in pci_read_cap()
1002 !(cfg->ht.ht_msictrl & PCIM_HTCMD_MSI_ENABLE)) { in pci_read_cap()
1005 cfg->domain, cfg->bus, cfg->slot, cfg->func); in pci_read_cap()
1006 cfg->ht.ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; in pci_read_cap()
1007 WREG(cfg->ht.ht_msimap + PCIR_HT_COMMAND, cfg->ht.ht_msictrl, in pci_read_cap()
1027 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2); in pci_read_vpd_reg()
1029 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) { in pci_read_vpd_reg()
1030 if (--count < 0) in pci_read_vpd_reg()
1034 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4)); in pci_read_vpd_reg()
1047 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
1048 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
1049 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
1050 if (--count < 0)
1070 /* return 0 and one byte in *data if no read error, -1 else */
1077 if (vrs->bytesinval == 0) { in vpd_nextbyte()
1078 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®)) in vpd_nextbyte()
1079 return (-1); in vpd_nextbyte()
1080 vrs->val = le32toh(reg); in vpd_nextbyte()
1081 vrs->off += 4; in vpd_nextbyte()
1082 byte = vrs->val & 0xff; in vpd_nextbyte()
1083 vrs->bytesinval = 3; in vpd_nextbyte()
1085 vrs->val = vrs->val >> 8; in vpd_nextbyte()
1086 byte = vrs->val & 0xff; in vpd_nextbyte()
1087 vrs->bytesinval--; in vpd_nextbyte()
1090 vrs->cksum += byte; in vpd_nextbyte()
1095 /* return 0 on match, -1 and "unget" byte on no match */
1102 return (-1); in vpd_expectbyte()
1107 vrs->cksum -= data; in vpd_expectbyte()
1108 vrs->val = (vrs->val << 8) + data; in vpd_expectbyte()
1109 vrs->bytesinval++; in vpd_expectbyte()
1110 return (-1); in vpd_expectbyte()
1113 /* return size if tag matches, -1 on no match, -2 on read error */
1120 return (-1); in vpd_read_tag_size()
1126 return (-2); in vpd_read_tag_size()
1128 return (-2); in vpd_read_tag_size()
1149 /* read VPD keyword and return element size, return -1 on read error */
1156 return (-1); in vpd_read_elem_head()
1158 return (-1); in vpd_read_elem_head()
1160 return (-1); in vpd_read_elem_head()
1194 return (-1); in vpd_read_elem_data()
1210 vrs->cksum -= fixup; in vpd_fixup_cksum()
1213 /* fetch one read-only element and return size of heading + data */
1222 cfg = vrs->cfg; in next_vpd_ro_elem()
1223 vpd = &cfg->vpd; in next_vpd_ro_elem()
1226 return (-1); in next_vpd_ro_elem()
1227 vpd->vpd_ros = alloc_buffer(vpd->vpd_ros, sizeof(*vpd->vpd_ros), vpd->vpd_rocnt); in next_vpd_ro_elem()
1228 vpd_ros = &vpd->vpd_ros[vpd->vpd_rocnt]; in next_vpd_ro_elem()
1229 maxsize -= 3; in next_vpd_ro_elem()
1230 len = vpd_read_elem_data(vrs, vpd_ros->keyword, &vpd_ros->value, maxsize); in next_vpd_ro_elem()
1231 if (vpd_ros->value == NULL) in next_vpd_ro_elem()
1232 return (-1); in next_vpd_ro_elem()
1233 vpd_ros->len = len; in next_vpd_ro_elem()
1234 if (vpd_ros->keyword[0] == 'R' && vpd_ros->keyword[1] == 'V') { in next_vpd_ro_elem()
1235 vpd_fixup_cksum(vrs, vpd_ros->value, len); in next_vpd_ro_elem()
1236 if (vrs->cksum != 0) { in next_vpd_ro_elem()
1238 "invalid VPD checksum %#hhx\n", vrs->cksum); in next_vpd_ro_elem()
1239 return (-1); in next_vpd_ro_elem()
1242 vpd->vpd_rocnt++; in next_vpd_ro_elem()
1256 cfg = vrs->cfg; in next_vpd_rw_elem()
1257 vpd = &cfg->vpd; in next_vpd_rw_elem()
1260 return (-1); in next_vpd_rw_elem()
1261 vpd->vpd_w = alloc_buffer(vpd->vpd_w, sizeof(*vpd->vpd_w), vpd->vpd_wcnt); in next_vpd_rw_elem()
1262 if (vpd->vpd_w == NULL) { in next_vpd_rw_elem()
1264 return (-1); in next_vpd_rw_elem()
1266 vpd_w = &vpd->vpd_w[vpd->vpd_wcnt]; in next_vpd_rw_elem()
1267 maxsize -= 3; in next_vpd_rw_elem()
1268 vpd_w->start = vrs->off + 3 - vrs->bytesinval; in next_vpd_rw_elem()
1269 len = vpd_read_elem_data(vrs, vpd_w->keyword, &vpd_w->value, maxsize); in next_vpd_rw_elem()
1270 if (vpd_w->value == NULL) in next_vpd_rw_elem()
1271 return (-1); in next_vpd_rw_elem()
1272 vpd_w->len = len; in next_vpd_rw_elem()
1273 vpd->vpd_wcnt++; in next_vpd_rw_elem()
1284 free(vpd->vpd_ident, M_DEVBUF); in vpd_free()
1285 for (i = 0; i < vpd->vpd_rocnt; i++) in vpd_free()
1286 free(vpd->vpd_ros[i].value, M_DEVBUF); in vpd_free()
1287 free(vpd->vpd_ros, M_DEVBUF); in vpd_free()
1288 vpd->vpd_rocnt = 0; in vpd_free()
1289 for (i = 0; i < vpd->vpd_wcnt; i++) in vpd_free()
1290 free(vpd->vpd_w[i].value, M_DEVBUF); in vpd_free()
1291 free(vpd->vpd_w, M_DEVBUF); in vpd_free()
1292 vpd->vpd_wcnt = 0; in vpd_free()
1314 /* read VPD ident element - mandatory */ in pci_parse_vpd()
1320 cfg->vpd.vpd_ident = vpd_read_value(&vrs, size); in pci_parse_vpd()
1321 if (cfg->vpd.vpd_ident == NULL) { in pci_parse_vpd()
1326 /* read VPD RO elements - mandatory */ in pci_parse_vpd()
1329 pci_printf(cfg, "no read-only VPD data found\n"); in pci_parse_vpd()
1335 pci_printf(cfg, "error accessing read-only VPD data\n"); in pci_parse_vpd()
1336 return (-1); in pci_parse_vpd()
1338 size -= elem_size; in pci_parse_vpd()
1342 return (-1); in pci_parse_vpd()
1344 /* read VPD RW elements - optional */ in pci_parse_vpd()
1346 if (size == -2) in pci_parse_vpd()
1347 return (-1); in pci_parse_vpd()
1352 return (-1); in pci_parse_vpd()
1354 size -= elem_size; in pci_parse_vpd()
1357 /* read empty END tag - mandatory */ in pci_parse_vpd()
1372 vpd_free(&cfg->vpd); in pci_read_vpd()
1373 cfg->vpd.vpd_cached = 1; in pci_read_vpd()
1382 pcicfgregs *cfg = &dinfo->cfg; in pci_get_vpd_ident_method()
1384 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) in pci_get_vpd_ident_method()
1387 *identptr = cfg->vpd.vpd_ident; in pci_get_vpd_ident_method()
1400 pcicfgregs *cfg = &dinfo->cfg; in pci_get_vpd_readonly_method()
1403 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) in pci_get_vpd_readonly_method()
1406 for (i = 0; i < cfg->vpd.vpd_rocnt; i++) in pci_get_vpd_readonly_method()
1407 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword, in pci_get_vpd_readonly_method()
1408 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) { in pci_get_vpd_readonly_method()
1409 *vptr = cfg->vpd.vpd_ros[i].value; in pci_get_vpd_readonly_method()
1421 pcicfgregs *cfg = &dinfo->cfg; in pci_fetch_vpd_list()
1423 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) in pci_fetch_vpd_list()
1425 return (&cfg->vpd); in pci_fetch_vpd_list()
1519 pcicfgregs *cfg = &dinfo->cfg; in pci_find_cap_method()
1533 switch (cfg->hdrtype & PCIM_HDRTYPE) { in pci_find_cap_method()
1599 pcicfgregs *cfg = &dinfo->cfg; in pci_find_extcap_method()
1603 /* Only supported for PCI-express devices. */ in pci_find_extcap_method()
1604 if (cfg->pcie.pcie_location == 0) in pci_find_extcap_method()
1636 pcicfgregs *cfg = &dinfo->cfg; in pci_find_next_extcap_method()
1640 /* Only supported for PCI-express devices. */ in pci_find_next_extcap_method()
1641 if (cfg->pcie.pcie_location == 0) in pci_find_next_extcap_method()
1662 * Support for MSI-X message interrupts.
1668 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_write_msix_entry()
1671 KASSERT(msix->msix_table_len > index, ("bogus index")); in pci_write_msix_entry()
1672 offset = msix->msix_table_offset + index * 16; in pci_write_msix_entry()
1673 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff); in pci_write_msix_entry()
1674 bus_write_4(msix->msix_table_res, offset + 4, address >> 32); in pci_write_msix_entry()
1675 bus_write_4(msix->msix_table_res, offset + 8, data); in pci_write_msix_entry()
1685 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_enable_msix_method()
1696 msix->msix_location + PCIR_MSIX_CTRL, in pci_enable_msix_method()
1697 msix->msix_ctrl & ~PCIM_MSIXCTRL_MSIX_ENABLE, 2); in pci_enable_msix_method()
1702 /* Enable MSI -> HT mapping. */ in pci_enable_msix_method()
1710 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_mask_msix()
1713 KASSERT(msix->msix_msgnum > index, ("bogus index")); in pci_mask_msix()
1714 offset = msix->msix_table_offset + index * 16 + 12; in pci_mask_msix()
1715 val = bus_read_4(msix->msix_table_res, offset); in pci_mask_msix()
1722 bus_write_4(msix->msix_table_res, offset, val); in pci_mask_msix()
1729 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_unmask_msix()
1732 KASSERT(msix->msix_table_len > index, ("bogus index")); in pci_unmask_msix()
1733 offset = msix->msix_table_offset + index * 16 + 12; in pci_unmask_msix()
1734 val = bus_read_4(msix->msix_table_res, offset); in pci_unmask_msix()
1741 bus_write_4(msix->msix_table_res, offset, val); in pci_unmask_msix()
1748 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_pending_msix()
1751 KASSERT(msix->msix_table_len > index, ("bogus index")); in pci_pending_msix()
1752 offset = msix->msix_pba_offset + (index / 32) * 4; in pci_pending_msix()
1754 return (bus_read_4(msix->msix_pba_res, offset) & bit); in pci_pending_msix()
1758 * Restore MSI-X registers and table during resume. If MSI-X is
1759 * enabled then walk the virtual table to restore the actual MSI-X
1766 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_resume_msix()
1771 if (msix->msix_alloc > 0) { in pci_resume_msix()
1773 for (i = 0; i < msix->msix_msgnum; i++) in pci_resume_msix()
1777 for (i = 0; i < msix->msix_table_len; i++) { in pci_resume_msix()
1778 mte = &msix->msix_table[i]; in pci_resume_msix()
1779 if (mte->mte_vector == 0 || mte->mte_handlers == 0) in pci_resume_msix()
1781 mv = &msix->msix_vectors[mte->mte_vector - 1]; in pci_resume_msix()
1782 pci_write_msix_entry(dev, i, mv->mv_address, in pci_resume_msix()
1783 mv->mv_data); in pci_resume_msix()
1787 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL, in pci_resume_msix()
1788 msix->msix_ctrl, 2); in pci_resume_msix()
1792 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1800 pcicfgregs *cfg = &dinfo->cfg; in pci_alloc_msix_method()
1809 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); in pci_alloc_msix_method()
1810 if (rle != NULL && rle->res != NULL) in pci_alloc_msix_method()
1814 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) in pci_alloc_msix_method()
1817 /* If MSI-X is blacklisted for this system, fail. */ in pci_alloc_msix_method()
1821 /* MSI-X capability present? */ in pci_alloc_msix_method()
1822 if (cfg->msix.msix_location == 0 || !pci_do_msix) in pci_alloc_msix_method()
1826 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, in pci_alloc_msix_method()
1827 cfg->msix.msix_table_bar); in pci_alloc_msix_method()
1828 if (rle == NULL || rle->res == NULL || in pci_alloc_msix_method()
1829 !(rman_get_flags(rle->res) & RF_ACTIVE)) in pci_alloc_msix_method()
1831 cfg->msix.msix_table_res = rle->res; in pci_alloc_msix_method()
1832 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) { in pci_alloc_msix_method()
1833 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, in pci_alloc_msix_method()
1834 cfg->msix.msix_pba_bar); in pci_alloc_msix_method()
1835 if (rle == NULL || rle->res == NULL || in pci_alloc_msix_method()
1836 !(rman_get_flags(rle->res) & RF_ACTIVE)) in pci_alloc_msix_method()
1839 cfg->msix.msix_pba_res = rle->res; in pci_alloc_msix_method()
1843 "attempting to allocate %d MSI-X vectors (%d supported)\n", in pci_alloc_msix_method()
1844 *count, cfg->msix.msix_msgnum); in pci_alloc_msix_method()
1845 max = min(*count, cfg->msix.msix_msgnum); in pci_alloc_msix_method()
1854 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, in pci_alloc_msix_method()
1860 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1); in pci_alloc_msix_method()
1862 device_printf(child, "using IRQ %ju for MSI-X\n", in pci_alloc_msix_method()
1863 rle->start); in pci_alloc_msix_method()
1870 * 'run' is true if we are in a range. in pci_alloc_msix_method()
1872 device_printf(child, "using IRQs %ju", rle->start); in pci_alloc_msix_method()
1873 irq = rle->start; in pci_alloc_msix_method()
1876 rle = resource_list_find(&dinfo->resources, in pci_alloc_msix_method()
1880 if (rle->start == irq + 1) { in pci_alloc_msix_method()
1886 /* Finish previous range. */ in pci_alloc_msix_method()
1888 printf("-%d", irq); in pci_alloc_msix_method()
1892 /* Start new range. */ in pci_alloc_msix_method()
1893 printf(",%ju", rle->start); in pci_alloc_msix_method()
1894 irq = rle->start; in pci_alloc_msix_method()
1897 /* Unfinished range? */ in pci_alloc_msix_method()
1899 printf("-%d", irq); in pci_alloc_msix_method()
1900 printf(" for MSI-X\n"); in pci_alloc_msix_method()
1905 for (i = 0; i < cfg->msix.msix_msgnum; i++) in pci_alloc_msix_method()
1909 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual, in pci_alloc_msix_method()
1911 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual, in pci_alloc_msix_method()
1914 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); in pci_alloc_msix_method()
1915 cfg->msix.msix_vectors[i].mv_irq = rle->start; in pci_alloc_msix_method()
1916 cfg->msix.msix_table[i].mte_vector = i + 1; in pci_alloc_msix_method()
1919 /* Update control register to enable MSI-X. */ in pci_alloc_msix_method()
1920 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; in pci_alloc_msix_method()
1921 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL, in pci_alloc_msix_method()
1922 cfg->msix.msix_ctrl, 2); in pci_alloc_msix_method()
1925 cfg->msix.msix_alloc = actual; in pci_alloc_msix_method()
1926 cfg->msix.msix_table_len = actual; in pci_alloc_msix_method()
1933 * resources consecutively to the first N messages in the MSI-X table.
1936 * populate the MSI-X table sparsely. This method allows the driver
1942 * maps directly to the MSI-X table in that index 0 in the array
1943 * specifies the vector for the first message in the MSI-X table, etc.
1950 * On successful return, each message with a non-zero vector will have
1952 * 1. Additionally, if any of the IRQs allocated via the previous
1953 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1956 * For example, suppose a driver has a MSI-X table with 6 messages and
1960 * have an MSI-X table of ABC--- (where - means no vector assigned).
1962 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1966 * In any case, the SYS_RES_IRQ rid X will always map to the message
1967 * at MSI-X table index X - 1 and will only be valid if a vector is
1975 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_remap_msix_method()
1981 * table can't be bigger than the actual MSI-X table in the in pci_remap_msix_method()
1984 if (count == 0 || count > msix->msix_msgnum) in pci_remap_msix_method()
1989 if (vectors[i] > msix->msix_alloc) in pci_remap_msix_method()
1997 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK | in pci_remap_msix_method()
2001 used[vectors[i] - 1] = 1; in pci_remap_msix_method()
2002 for (i = 0; i < msix->msix_alloc - 1; i++) in pci_remap_msix_method()
2013 for (i = 0; i < msix->msix_table_len; i++) { in pci_remap_msix_method()
2014 if (msix->msix_table[i].mte_vector == 0) in pci_remap_msix_method()
2016 if (msix->msix_table[i].mte_handlers > 0) { in pci_remap_msix_method()
2020 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); in pci_remap_msix_method()
2022 if (rle->res != NULL) { in pci_remap_msix_method()
2029 for (i = 0; i < msix->msix_table_len; i++) { in pci_remap_msix_method()
2030 if (msix->msix_table[i].mte_vector == 0) in pci_remap_msix_method()
2032 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); in pci_remap_msix_method()
2039 free(msix->msix_table, M_DEVBUF); in pci_remap_msix_method()
2040 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count, in pci_remap_msix_method()
2043 msix->msix_table[i].mte_vector = vectors[i]; in pci_remap_msix_method()
2044 msix->msix_table_len = count; in pci_remap_msix_method()
2046 /* Free any unused IRQs and resize the vectors array if necessary. */ in pci_remap_msix_method()
2047 j = msix->msix_alloc - 1; in pci_remap_msix_method()
2053 msix->msix_vectors[j].mv_irq); in pci_remap_msix_method()
2054 j--; in pci_remap_msix_method()
2058 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) * in pci_remap_msix_method()
2060 free(msix->msix_vectors, M_DEVBUF); in pci_remap_msix_method()
2061 msix->msix_vectors = vec; in pci_remap_msix_method()
2062 msix->msix_alloc = j + 1; in pci_remap_msix_method()
2066 /* Map the IRQs onto the rids. */ in pci_remap_msix_method()
2070 irq = msix->msix_vectors[vectors[i] - 1].mv_irq; in pci_remap_msix_method()
2071 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, in pci_remap_msix_method()
2076 device_printf(child, "Remapped MSI-X IRQs as: "); in pci_remap_msix_method()
2081 printf("---"); in pci_remap_msix_method()
2084 msix->msix_vectors[vectors[i] - 1].mv_irq); in pci_remap_msix_method()
2096 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_release_msix()
2101 if (msix->msix_alloc == 0) in pci_release_msix()
2105 for (i = 0; i < msix->msix_table_len; i++) { in pci_release_msix()
2106 if (msix->msix_table[i].mte_vector == 0) in pci_release_msix()
2108 if (msix->msix_table[i].mte_handlers > 0) in pci_release_msix()
2110 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); in pci_release_msix()
2112 if (rle->res != NULL) in pci_release_msix()
2116 /* Update control register to disable MSI-X. */ in pci_release_msix()
2117 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE; in pci_release_msix()
2118 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL, in pci_release_msix()
2119 msix->msix_ctrl, 2); in pci_release_msix()
2122 for (i = 0; i < msix->msix_table_len; i++) { in pci_release_msix()
2123 if (msix->msix_table[i].mte_vector == 0) in pci_release_msix()
2125 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); in pci_release_msix()
2127 free(msix->msix_table, M_DEVBUF); in pci_release_msix()
2128 msix->msix_table_len = 0; in pci_release_msix()
2130 /* Release the IRQs. */ in pci_release_msix()
2131 for (i = 0; i < msix->msix_alloc; i++) in pci_release_msix()
2133 msix->msix_vectors[i].mv_irq); in pci_release_msix()
2134 free(msix->msix_vectors, M_DEVBUF); in pci_release_msix()
2135 msix->msix_alloc = 0; in pci_release_msix()
2140 * Return the max supported MSI-X messages this device supports.
2149 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_msix_count_method()
2151 if (pci_do_msix && msix->msix_location != 0) in pci_msix_count_method()
2152 return (msix->msix_msgnum); in pci_msix_count_method()
2160 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_msix_pba_bar_method()
2162 if (pci_do_msix && msix->msix_location != 0) in pci_msix_pba_bar_method()
2163 return (msix->msix_pba_bar); in pci_msix_pba_bar_method()
2164 return (-1); in pci_msix_pba_bar_method()
2171 struct pcicfg_msix *msix = &dinfo->cfg.msix; in pci_msix_table_bar_method()
2173 if (pci_do_msix && msix->msix_location != 0) in pci_msix_table_bar_method()
2174 return (msix->msix_table_bar); in pci_msix_table_bar_method()
2175 return (-1); in pci_msix_table_bar_method()
2185 struct pcicfg_ht *ht = &dinfo->cfg.ht; in pci_ht_map_msi()
2187 if (!ht->ht_msimap) in pci_ht_map_msi()
2190 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) && in pci_ht_map_msi()
2191 ht->ht_msiaddr >> 20 == addr >> 20) { in pci_ht_map_msi()
2192 /* Enable MSI -> HT mapping. */ in pci_ht_map_msi()
2193 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; in pci_ht_map_msi()
2194 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, in pci_ht_map_msi()
2195 ht->ht_msictrl, 2); in pci_ht_map_msi()
2198 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) { in pci_ht_map_msi()
2199 /* Disable MSI -> HT mapping. */ in pci_ht_map_msi()
2200 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE; in pci_ht_map_msi()
2201 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, in pci_ht_map_msi()
2202 ht->ht_msictrl, 2); in pci_ht_map_msi()
2213 cap = dinfo->cfg.pcie.pcie_location; in pci_get_relaxed_ordering_enabled()
2228 cap = dinfo->cfg.pcie.pcie_location; in pci_get_max_payload()
2244 cap = dinfo->cfg.pcie.pcie_location; in pci_get_max_read_req()
2260 cap = dinfo->cfg.pcie.pcie_location; in pci_set_max_read_req()
2267 size = (1 << (fls(size) - 1)); in pci_set_max_read_req()
2270 val |= (fls(size) - 8) << 12; in pci_set_max_read_req()
2281 cap = dinfo->cfg.pcie.pcie_location; in pcie_read_config()
2297 cap = dinfo->cfg.pcie.pcie_location; in pcie_write_config()
2304 * Adjusts a PCI-e capability register by clearing the bits in mask
2318 cap = dinfo->cfg.pcie.pcie_location; in pcie_adjust_config()
2340 struct pcicfg_msi *msi = &dinfo->cfg.msi; in pci_enable_msi_method()
2343 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR, in pci_enable_msi_method()
2345 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { in pci_enable_msi_method()
2346 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH, in pci_enable_msi_method()
2348 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT, in pci_enable_msi_method()
2351 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data, in pci_enable_msi_method()
2355 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE; in pci_enable_msi_method()
2356 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, in pci_enable_msi_method()
2357 msi->msi_ctrl, 2); in pci_enable_msi_method()
2359 /* Enable MSI -> HT mapping. */ in pci_enable_msi_method()
2367 struct pcicfg_msi *msi = &dinfo->cfg.msi; in pci_disable_msi_method()
2369 /* Disable MSI -> HT mapping. */ in pci_disable_msi_method()
2373 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE; in pci_disable_msi_method()
2374 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, in pci_disable_msi_method()
2375 msi->msi_ctrl, 2); in pci_disable_msi_method()
2387 struct pcicfg_msi *msi = &dinfo->cfg.msi; in pci_resume_msi()
2391 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) { in pci_resume_msi()
2392 address = msi->msi_addr; in pci_resume_msi()
2393 data = msi->msi_data; in pci_resume_msi()
2394 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR, in pci_resume_msi()
2396 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { in pci_resume_msi()
2397 pci_write_config(dev, msi->msi_location + in pci_resume_msi()
2399 pci_write_config(dev, msi->msi_location + in pci_resume_msi()
2402 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, in pci_resume_msi()
2405 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, in pci_resume_msi()
2413 pcicfgregs *cfg = &dinfo->cfg; in pci_remap_intr_method()
2423 * of MSI IRQs. If we find it, we request updated address and in pci_remap_intr_method()
2426 if (cfg->msi.msi_alloc > 0) { in pci_remap_intr_method()
2428 if (cfg->msi.msi_handlers == 0) in pci_remap_intr_method()
2430 for (i = 0; i < cfg->msi.msi_alloc; i++) { in pci_remap_intr_method()
2431 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, in pci_remap_intr_method()
2433 if (rle->start == irq) { in pci_remap_intr_method()
2439 dinfo->cfg.msi.msi_addr = addr; in pci_remap_intr_method()
2440 dinfo->cfg.msi.msi_data = data; in pci_remap_intr_method()
2449 * For MSI-X, we check to see if we have this IRQ. If we do, in pci_remap_intr_method()
2453 if (cfg->msix.msix_alloc > 0) { in pci_remap_intr_method()
2456 for (i = 0; i < cfg->msix.msix_alloc; i++) { in pci_remap_intr_method()
2457 mv = &cfg->msix.msix_vectors[i]; in pci_remap_intr_method()
2458 if (mv->mv_irq == irq) { in pci_remap_intr_method()
2463 mv->mv_address = addr; in pci_remap_intr_method()
2464 mv->mv_data = data; in pci_remap_intr_method()
2465 for (j = 0; j < cfg->msix.msix_table_len; j++) { in pci_remap_intr_method()
2466 mte = &cfg->msix.msix_table[j]; in pci_remap_intr_method()
2467 if (mte->mte_vector != i + 1) in pci_remap_intr_method()
2469 if (mte->mte_handlers == 0) in pci_remap_intr_method()
2501 * host-PCI bridge at device 0:0:0. In the future, it may become
2513 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */ in pci_msi_blacklisted()
2535 * Returns true if the specified device is blacklisted because MSI-X
2537 * MSI-X doesn't either.
2553 * Determine if MSI-X is blacklisted globally on this system. If MSI
2554 * is blacklisted, assume that MSI-X is as well. Check for additional
2555 * chipsets where MSI works but MSI-X does not.
2582 pcicfgregs *cfg = &dinfo->cfg; in pci_alloc_msi_method()
2584 int actual, error, i, irqs[32]; in pci_alloc_msi_method() local
2592 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); in pci_alloc_msi_method()
2593 if (rle != NULL && rle->res != NULL) in pci_alloc_msi_method()
2597 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) in pci_alloc_msi_method()
2605 if (cfg->msi.msi_location == 0 || !pci_do_msi) in pci_alloc_msi_method()
2611 *count, cfg->msi.msi_msgnum); in pci_alloc_msi_method()
2614 actual = min(*count, cfg->msi.msi_msgnum); in pci_alloc_msi_method()
2626 actual, irqs); in pci_alloc_msi_method()
2638 * resources in the irqs[] array, so add new resources in pci_alloc_msi_method()
2642 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, in pci_alloc_msi_method()
2643 irqs[i], irqs[i], 1); in pci_alloc_msi_method()
2647 device_printf(child, "using IRQ %d for MSI\n", irqs[0]); in pci_alloc_msi_method()
2654 * we are in a range. in pci_alloc_msi_method()
2656 device_printf(child, "using IRQs %d", irqs[0]); in pci_alloc_msi_method()
2660 if (irqs[i] == irqs[i - 1] + 1) { in pci_alloc_msi_method()
2665 /* Finish previous range. */ in pci_alloc_msi_method()
2667 printf("-%d", irqs[i - 1]); in pci_alloc_msi_method()
2671 /* Start new range. */ in pci_alloc_msi_method()
2672 printf(",%d", irqs[i]); in pci_alloc_msi_method()
2675 /* Unfinished range? */ in pci_alloc_msi_method()
2677 printf("-%d", irqs[actual - 1]); in pci_alloc_msi_method()
2683 ctrl = cfg->msi.msi_ctrl; in pci_alloc_msi_method()
2685 ctrl |= (ffs(actual) - 1) << 4; in pci_alloc_msi_method()
2686 cfg->msi.msi_ctrl = ctrl; in pci_alloc_msi_method()
2687 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2); in pci_alloc_msi_method()
2690 cfg->msi.msi_alloc = actual; in pci_alloc_msi_method()
2691 cfg->msi.msi_handlers = 0; in pci_alloc_msi_method()
2701 struct pcicfg_msi *msi = &dinfo->cfg.msi; in pci_release_msi_method()
2703 int error, i, irqs[32]; in pci_release_msi_method() local
2705 /* Try MSI-X first. */ in pci_release_msi_method()
2711 if (msi->msi_alloc == 0) in pci_release_msi_method()
2713 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages")); in pci_release_msi_method()
2716 if (msi->msi_handlers > 0) in pci_release_msi_method()
2718 for (i = 0; i < msi->msi_alloc; i++) { in pci_release_msi_method()
2719 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); in pci_release_msi_method()
2721 if (rle->res != NULL) in pci_release_msi_method()
2723 irqs[i] = rle->start; in pci_release_msi_method()
2727 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE), in pci_release_msi_method()
2729 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK; in pci_release_msi_method()
2730 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, in pci_release_msi_method()
2731 msi->msi_ctrl, 2); in pci_release_msi_method()
2734 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs); in pci_release_msi_method()
2735 for (i = 0; i < msi->msi_alloc; i++) in pci_release_msi_method()
2736 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); in pci_release_msi_method()
2739 msi->msi_alloc = 0; in pci_release_msi_method()
2740 msi->msi_addr = 0; in pci_release_msi_method()
2741 msi->msi_data = 0; in pci_release_msi_method()
2755 struct pcicfg_msi *msi = &dinfo->cfg.msi; in pci_msi_count_method()
2757 if (pci_do_msi && msi->msi_location != 0) in pci_msi_count_method()
2758 return (msi->msi_msgnum); in pci_msi_count_method()
2772 if (dinfo->cfg.vpd.vpd_reg) in pci_freecfg()
2773 vpd_free(&dinfo->cfg.vpd); in pci_freecfg()
2775 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) { in pci_freecfg()
2785 pci_numdevs--; in pci_freecfg()
2796 pcicfgregs *cfg = &dinfo->cfg; in pci_set_powerstate_method()
2800 if (cfg->pp.pp_cap == 0) in pci_set_powerstate_method()
2806 * behavior when going from D3 -> D3. in pci_set_powerstate_method()
2831 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2) in pci_set_powerstate_method()
2838 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0) in pci_set_powerstate_method()
2843 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0) in pci_set_powerstate_method()
2858 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2); in pci_set_powerstate_method()
2868 pcicfgregs *cfg = &dinfo->cfg; in pci_get_powerstate_method()
2872 if (cfg->pp.pp_cap != 0) { in pci_get_powerstate_method()
2873 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2); in pci_get_powerstate_method()
2975 * New style pci driver. Parent device is either a pci-host-bridge or a
2976 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2984 pcicfgregs *cfg = &dinfo->cfg; in pci_print_verbose()
2986 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n", in pci_print_verbose()
2987 cfg->vendor, cfg->device, cfg->revid); in pci_print_verbose()
2989 cfg->domain, cfg->bus, cfg->slot, cfg->func); in pci_print_verbose()
2990 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n", in pci_print_verbose()
2991 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype, in pci_print_verbose()
2992 cfg->mfdev); in pci_print_verbose()
2994 cfg->cmdreg, cfg->statreg, cfg->cachelnsz); in pci_print_verbose()
2996 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt, in pci_print_verbose()
2997 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250); in pci_print_verbose()
2998 if (cfg->intpin > 0) in pci_print_verbose()
3000 cfg->intpin +'a' -1, cfg->intline); in pci_print_verbose()
3001 if (cfg->pp.pp_cap) { in pci_print_verbose()
3004 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2); in pci_print_verbose()
3006 cfg->pp.pp_cap & PCIM_PCAP_SPEC, in pci_print_verbose()
3007 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "", in pci_print_verbose()
3008 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "", in pci_print_verbose()
3011 if (cfg->msi.msi_location) { in pci_print_verbose()
3014 ctrl = cfg->msi.msi_ctrl; in pci_print_verbose()
3016 cfg->msi.msi_msgnum, in pci_print_verbose()
3017 (cfg->msi.msi_msgnum == 1) ? "" : "s", in pci_print_verbose()
3021 if (cfg->msix.msix_location) { in pci_print_verbose()
3022 printf("\tMSI-X supports %d message%s ", in pci_print_verbose()
3023 cfg->msix.msix_msgnum, in pci_print_verbose()
3024 (cfg->msix.msix_msgnum == 1) ? "" : "s"); in pci_print_verbose()
3025 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar) in pci_print_verbose()
3026 printf("in map 0x%x\n", in pci_print_verbose()
3027 cfg->msix.msix_table_bar); in pci_print_verbose()
3030 cfg->msix.msix_table_bar, in pci_print_verbose()
3031 cfg->msix.msix_pba_bar); in pci_print_verbose()
3053 pci_addr_t map, testval; in pci_read_bar() local
3058 * The device ROM BAR is special. It is always a 32-bit in pci_read_bar()
3063 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) { in pci_read_bar()
3064 map = pci_read_config(dev, reg, 4); in pci_read_bar()
3067 pci_write_config(dev, reg, map, 4); in pci_read_bar()
3068 *mapp = map; in pci_read_bar()
3075 map = pci_read_config(dev, reg, 4); in pci_read_bar()
3076 ln2range = pci_maprange(map); in pci_read_bar()
3078 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; in pci_read_bar()
3087 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); in pci_read_bar()
3096 * and combines the result into a 64-bit value." (section 6.2.5.1) in pci_read_bar()
3111 * the BAR of the low-level console device and when booting verbose, in pci_read_bar()
3114 pci_write_config(dev, reg, map, 4); in pci_read_bar()
3116 pci_write_config(dev, reg + 4, map >> 32, 4); in pci_read_bar()
3119 *mapp = map; in pci_read_bar()
3131 /* The device ROM BAR is always a 32-bit memory BAR. */ in pci_write_bar()
3133 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) in pci_write_bar()
3136 ln2range = pci_maprange(pm->pm_value); in pci_write_bar()
3137 pci_write_config(dev, pm->pm_reg, base, 4); in pci_write_bar()
3139 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4); in pci_write_bar()
3140 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4); in pci_write_bar()
3142 pm->pm_value |= (pci_addr_t)pci_read_config(dev, in pci_write_bar()
3143 pm->pm_reg + 4, 4) << 32; in pci_write_bar()
3153 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { in pci_find_bar()
3154 if (pm->pm_reg == reg) in pci_find_bar()
3166 return (STAILQ_FIRST(&dinfo->cfg.maps)); in pci_first_bar()
3182 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) && in pci_bar_enabled()
3183 !(pm->pm_value & PCIM_BIOS_ENABLE)) in pci_bar_enabled()
3186 if ((dinfo->cfg.flags & PCICFG_VF) != 0) { in pci_bar_enabled()
3189 iov = dinfo->cfg.iov; in pci_bar_enabled()
3190 cmd = pci_read_config(iov->iov_pf, in pci_bar_enabled()
3191 iov->iov_pos + PCIR_SRIOV_CTL, 2); in pci_bar_enabled()
3196 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value)) in pci_bar_enabled()
3210 pm->pm_reg = reg; in pci_add_bar()
3211 pm->pm_value = value; in pci_add_bar()
3212 pm->pm_size = size; in pci_add_bar()
3213 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) { in pci_add_bar()
3214 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x", in pci_add_bar()
3217 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg) in pci_add_bar()
3221 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link); in pci_add_bar()
3223 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link); in pci_add_bar()
3235 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) { in pci_restore_bars()
3236 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg)) in pci_restore_bars()
3239 ln2range = pci_maprange(pm->pm_value); in pci_restore_bars()
3240 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4); in pci_restore_bars()
3242 pci_write_config(dev, pm->pm_reg + 4, in pci_restore_bars()
3243 pm->pm_value >> 32, 4); in pci_restore_bars()
3248 * Add a resource based on a pci map register. Return 1 if the map
3249 * register is a 32bit map register or 2 if it is a 64bit register.
3256 pci_addr_t base, map, testval; in pci_add_map() local
3268 maprange = pci_maprange(pm->pm_value); in pci_add_map()
3273 pci_read_bar(dev, reg, &map, &testval, NULL); in pci_add_map()
3274 if (PCI_BAR_MEM(map)) { in pci_add_map()
3276 if (map & PCIM_BAR_MEM_PREFETCH) in pci_add_map()
3281 base = pci_mapbase(map); in pci_add_map()
3287 maprange = pci_maprange(map); in pci_add_map()
3304 pm = pci_add_bar(dev, reg, map, mapsize); in pci_add_map()
3306 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d", in pci_add_map()
3307 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize); in pci_add_map()
3327 if (!force && (basezero || map == testval)) in pci_add_map()
3372 end = base + count - 1; in pci_add_map()
3378 * so that this resource range is already reserved. The in pci_add_map()
3389 * this BAR using any available range. The firmware felt in pci_add_map()
3405 * resource range. in pci_add_map()
3486 pcicfgregs *cfg = &dinfo->cfg; in pci_assign_interrupt()
3491 if (cfg->intpin == 0) in pci_assign_interrupt()
3498 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1); in pci_assign_interrupt()
3510 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route) in pci_assign_interrupt()
3513 irq = cfg->intline; in pci_assign_interrupt()
3521 if (irq != cfg->intline) { in pci_assign_interrupt()
3522 cfg->intline = irq; in pci_assign_interrupt()
3527 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1); in pci_assign_interrupt()
3667 eec = -1; in xhci_early_takeover()
3718 switch (cfg->hdrtype & PCIM_HDRTYPE) { in pci_reserve_secbus()
3732 * If the existing bus range is valid, attempt to reserve it in pci_reserve_secbus()
3783 count = end - start + 1; in pci_reserve_secbus()
3790 * rather than reserving the existing range. However, in pci_reserve_secbus()
3825 cfg = &dinfo->cfg; in pci_alloc_secbus()
3826 rl = &dinfo->resources; in pci_alloc_secbus()
3827 switch (cfg->hdrtype & PCIM_HDRTYPE) { in pci_alloc_secbus()
3874 iov = dinfo->cfg.iov; in pci_ea_bei_to_rid()
3876 iov_pos = iov->iov_pos; in pci_ea_bei_to_rid()
3894 return (PCIR_SRIOV_BAR(bei - PCIM_EA_BEI_VF_BAR_0) + in pci_ea_bei_to_rid()
3898 return (-1); in pci_ea_bei_to_rid()
3909 STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) { in pci_ea_is_enabled()
3910 if (pci_ea_bei_to_rid(dev, ea->eae_bei) == rid) in pci_ea_is_enabled()
3911 return ((ea->eae_flags & PCIM_EA_ENABLE) > 0); in pci_ea_is_enabled()
3932 rl = &dinfo->resources; in pci_add_resources_ea()
3936 iov = dinfo->cfg.iov; in pci_add_resources_ea()
3939 if (dinfo->cfg.ea.ea_location == 0) in pci_add_resources_ea()
3942 STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) { in pci_add_resources_ea()
3944 * TODO: Ignore EA-BAR if is not enabled. in pci_add_resources_ea()
3949 * a legacy-BAR mechanism. in pci_add_resources_ea()
3951 if ((ea->eae_flags & PCIM_EA_ENABLE) == 0) in pci_add_resources_ea()
3954 switch ((ea->eae_flags & PCIM_EA_PP) >> PCIM_EA_PP_OFFSET) { in pci_add_resources_ea()
3973 if ((ea->eae_bei < PCIM_EA_BEI_VF_BAR_0) || in pci_add_resources_ea()
3974 (ea->eae_bei > PCIM_EA_BEI_VF_BAR_5)) in pci_add_resources_ea()
3981 if (((ea->eae_bei < PCIM_EA_BEI_BAR_0) || in pci_add_resources_ea()
3982 (ea->eae_bei > PCIM_EA_BEI_BAR_5)) && in pci_add_resources_ea()
3983 (ea->eae_bei != PCIM_EA_BEI_ROM)) in pci_add_resources_ea()
3987 rid = pci_ea_bei_to_rid(dev, ea->eae_bei); in pci_add_resources_ea()
3996 start = ea->eae_base; in pci_add_resources_ea()
3997 count = ea->eae_max_offset + 1; in pci_add_resources_ea()
4000 count = count * iov->iov_num_vfs; in pci_add_resources_ea()
4002 end = start + count - 1; in pci_add_resources_ea()
4017 tmp = pci_read_config(dev, ea->eae_cfg_offset, 4); in pci_add_resources_ea()
4019 pci_write_config(dev, ea->eae_cfg_offset, tmp, 4); in pci_add_resources_ea()
4025 ea->eae_flags = pci_read_config(dev, ea->eae_cfg_offset, 4); in pci_add_resources_ea()
4046 cfg = &dinfo->cfg; in pci_add_resources()
4047 rl = &dinfo->resources; in pci_add_resources()
4048 devid = (cfg->device << 16) | cfg->vendor; in pci_add_resources()
4053 /* ATA devices needs special map treatment */ in pci_add_resources()
4061 for (i = 0; i < cfg->nummaps;) { in pci_add_resources()
4073 for (q = &pci_quirks[0]; q->devid != 0; q++) in pci_add_resources()
4074 if (q->devid == devid && in pci_add_resources()
4075 q->type == PCI_QUIRK_UNMAP_REG && in pci_add_resources()
4076 q->arg1 == PCIR_BAR(i)) in pci_add_resources()
4078 if (q->devid != 0) { in pci_add_resources()
4089 for (q = &pci_quirks[0]; q->devid != 0; q++) in pci_add_resources()
4090 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG) in pci_add_resources()
4091 pci_add_map(bus, dev, q->arg1, rl, force, 0); in pci_add_resources()
4093 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) { in pci_add_resources()
4096 * Try to re-route interrupts. Sometimes the BIOS or in pci_add_resources()
4098 * If the re-route fails, then just stick with what we in pci_add_resources()
4158 PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev); in pci_add_children()
4297 vf_dinfo->cfg.flags |= PCICFG_VF; in pci_add_iov_child()
4300 return (vf_dinfo->cfg.dev); in pci_add_iov_child()
4322 if (dinfo->cfg.pcie.pcie_location == 0) in pcie_setup_mps()
4358 if (dinfo->cfg.pcie.pcie_location != 0 && in pci_add_child_clear_aer()
4359 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) { in pci_add_child_clear_aer()
4360 r2 = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + in pci_add_child_clear_aer()
4364 pci_write_config(dev, dinfo->cfg.pcie.pcie_location + in pci_add_child_clear_aer()
4371 pci_printf(&dinfo->cfg, in pci_add_child_clear_aer()
4372 "clearing AER UC 0x%08x -> 0x%08x\n", in pci_add_child_clear_aer()
4400 pci_printf(&dinfo->cfg, in pci_add_child_clear_aer()
4401 "clearing AER COR 0x%08x -> 0x%08x\n", in pci_add_child_clear_aer()
4417 r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + in pci_add_child_clear_aer()
4421 pci_write_config(dev, dinfo->cfg.pcie.pcie_location + in pci_add_child_clear_aer()
4431 dinfo->cfg.dev = dev = device_add_child(bus, NULL, DEVICE_UNIT_ANY); in pci_add_child()
4433 resource_list_init(&dinfo->resources); in pci_add_child()
4440 pci_child_added(dinfo->cfg.dev); in pci_add_child()
4445 EVENTHANDLER_INVOKE(pci_add_device, dinfo->cfg.dev); in pci_add_child()
4475 sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno, in pci_attach_common()
4477 if (sc->sc_bus == NULL) { in pci_attach_common()
4484 sc->sc_dma_tag = bus_get_dma_tag(dev); in pci_attach_common()
4520 error = bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus); in pci_detach()
4603 * as MSI/MSI-X interrupts are never shared. in pci_suspend_child()
4605 rle = resource_list_find(&dinfo->resources, in pci_suspend_child()
4607 if (rle != NULL && rle->res != NULL) in pci_suspend_child()
4608 (void)bus_suspend_intr(child, rle->res); in pci_suspend_child()
4636 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); in pci_resume_child()
4637 if (rle != NULL && rle->res != NULL) in pci_resume_child()
4638 (void)bus_resume_intr(child, rle->res); in pci_resume_child()
4724 pci_printf(&dinfo->cfg, "reprobing on driver added\n"); in pci_driver_added()
4761 * Check to see if the interrupt is MSI or MSI-X. in pci_setup_intr()
4762 * Ask our parent to map the MSI and give in pci_setup_intr()
4768 if (dinfo->cfg.msi.msi_alloc > 0) { in pci_setup_intr()
4769 if (dinfo->cfg.msi.msi_addr == 0) { in pci_setup_intr()
4770 KASSERT(dinfo->cfg.msi.msi_handlers == 0, in pci_setup_intr()
4776 dinfo->cfg.msi.msi_addr = addr; in pci_setup_intr()
4777 dinfo->cfg.msi.msi_data = data; in pci_setup_intr()
4779 if (dinfo->cfg.msi.msi_handlers == 0) in pci_setup_intr()
4780 pci_enable_msi(child, dinfo->cfg.msi.msi_addr, in pci_setup_intr()
4781 dinfo->cfg.msi.msi_data); in pci_setup_intr()
4782 dinfo->cfg.msi.msi_handlers++; in pci_setup_intr()
4784 KASSERT(dinfo->cfg.msix.msix_alloc > 0, in pci_setup_intr()
4785 ("No MSI or MSI-X interrupts allocated")); in pci_setup_intr()
4786 KASSERT(rid <= dinfo->cfg.msix.msix_table_len, in pci_setup_intr()
4787 ("MSI-X index too high")); in pci_setup_intr()
4788 mte = &dinfo->cfg.msix.msix_table[rid - 1]; in pci_setup_intr()
4789 KASSERT(mte->mte_vector != 0, ("no message vector")); in pci_setup_intr()
4790 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1]; in pci_setup_intr()
4791 KASSERT(mv->mv_irq == rman_get_start(irq), in pci_setup_intr()
4793 if (mv->mv_address == 0) { in pci_setup_intr()
4794 KASSERT(mte->mte_handlers == 0, in pci_setup_intr()
4795 ("MSI-X table entry has handlers, but vector not mapped")); in pci_setup_intr()
4800 mv->mv_address = addr; in pci_setup_intr()
4801 mv->mv_data = data; in pci_setup_intr()
4811 mte->mte_handlers++; in pci_setup_intr()
4812 if (mte->mte_handlers == 1) { in pci_setup_intr()
4813 pci_enable_msix(child, rid - 1, mv->mv_address, in pci_setup_intr()
4814 mv->mv_data); in pci_setup_intr()
4815 pci_unmask_msix(child, rid - 1); in pci_setup_intr()
4820 * Make sure that INTx is disabled if we are using MSI/MSI-X, in pci_setup_intr()
4822 * in which case we "enable" INTx so MSI/MSI-X actually works. in pci_setup_intr()
4862 * Check to see if the interrupt is MSI or MSI-X. If so, in pci_teardown_intr()
4864 * MSI-X message, or disable MSI messages if the count in pci_teardown_intr()
4868 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid); in pci_teardown_intr()
4869 if (rle->res != irq) in pci_teardown_intr()
4871 if (dinfo->cfg.msi.msi_alloc > 0) { in pci_teardown_intr()
4872 KASSERT(rid <= dinfo->cfg.msi.msi_alloc, in pci_teardown_intr()
4873 ("MSI-X index too high")); in pci_teardown_intr()
4874 if (dinfo->cfg.msi.msi_handlers == 0) in pci_teardown_intr()
4876 dinfo->cfg.msi.msi_handlers--; in pci_teardown_intr()
4877 if (dinfo->cfg.msi.msi_handlers == 0) in pci_teardown_intr()
4880 KASSERT(dinfo->cfg.msix.msix_alloc > 0, in pci_teardown_intr()
4881 ("No MSI or MSI-X interrupts allocated")); in pci_teardown_intr()
4882 KASSERT(rid <= dinfo->cfg.msix.msix_table_len, in pci_teardown_intr()
4883 ("MSI-X index too high")); in pci_teardown_intr()
4884 mte = &dinfo->cfg.msix.msix_table[rid - 1]; in pci_teardown_intr()
4885 if (mte->mte_handlers == 0) in pci_teardown_intr()
4887 mte->mte_handlers--; in pci_teardown_intr()
4888 if (mte->mte_handlers == 0) in pci_teardown_intr()
4889 pci_mask_msix(child, rid - 1); in pci_teardown_intr()
4895 ("%s: generic teardown failed for MSI/MSI-X", __func__)); in pci_teardown_intr()
4907 rl = &dinfo->resources; in pci_print_child()
4933 {PCIC_OLD, -1, 1, "old"},
4934 {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"},
4935 {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"},
4936 {PCIC_STORAGE, -1, 1, "mass storage"},
4946 {PCIC_NETWORK, -1, 1, "network"},
4952 {PCIC_DISPLAY, -1, 1, "display"},
4956 {PCIC_MULTIMEDIA, -1, 1, "multimedia"},
4961 {PCIC_MEMORY, -1, 1, "memory"},
4964 {PCIC_BRIDGE, -1, 1, "bridge"},
4965 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"},
4966 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"},
4967 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"},
4968 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"},
4969 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"},
4970 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"},
4971 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"},
4972 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"},
4973 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"},
4974 {PCIC_SIMPLECOMM, -1, 1, "simple comms"},
4979 {PCIC_BASEPERIPH, -1, 0, "base peripheral"},
4984 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"},
4987 {PCIC_INPUTDEV, -1, 1, "input device"},
4993 {PCIC_DOCKING, -1, 1, "docking station"},
4994 {PCIC_PROCESSOR, -1, 1, "processor"},
4995 {PCIC_SERIALBUS, -1, 1, "serial bus"},
5002 {PCIC_WIRELESS, -1, 1, "wireless controller"},
5006 {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"},
5008 {PCIC_SATCOM, -1, 1, "satellite communication"},
5013 {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"},
5016 {PCIC_DASP, -1, 0, "dasp"},
5021 {PCIC_INSTRUMENT, -1, 0, "non-essential instrumentation"},
5048 if (pci_nomatch_tab[i].subclass == -1) { in pci_probe_nomatch()
5079 rl = &dinfo->resources; in pci_child_detached()
5082 * Have to deallocate IRQs before releasing any MSI messages and in pci_child_detached()
5087 pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n"); in pci_child_detached()
5088 if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) { in pci_child_detached()
5089 if (dinfo->cfg.msi.msi_alloc != 0) in pci_child_detached()
5090 pci_printf(&dinfo->cfg, "Device leaked %d MSI " in pci_child_detached()
5091 "vectors\n", dinfo->cfg.msi.msi_alloc); in pci_child_detached()
5093 pci_printf(&dinfo->cfg, "Device leaked %d MSI-X " in pci_child_detached()
5094 "vectors\n", dinfo->cfg.msix.msix_alloc); in pci_child_detached()
5098 pci_printf(&dinfo->cfg, "Device leaked memory resources\n"); in pci_child_detached()
5100 pci_printf(&dinfo->cfg, "Device leaked I/O resources\n"); in pci_child_detached()
5102 pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n"); in pci_child_detached()
5120 * - devices cannot be listed without a corresponding VENDOR line.
5129 * is set to -1. Returns nonzero at the end of the database.
5141 *device = -1; in pci_describe_parse_line()
5142 *vendor = -1; in pci_describe_parse_line()
5145 left = pci_vendordata_size - (cp - pci_vendordata); in pci_describe_parse_line()
5163 left--; in pci_describe_parse_line()
5167 left--; in pci_describe_parse_line()
5173 left--; in pci_describe_parse_line()
5214 if (vendor != -1) { in pci_describe_device()
5241 cfg = &dinfo->cfg; in pci_read_ivar()
5252 *result = cfg->subvendor; in pci_read_ivar()
5255 *result = cfg->subdevice; in pci_read_ivar()
5258 *result = cfg->vendor; in pci_read_ivar()
5261 *result = cfg->device; in pci_read_ivar()
5264 *result = (cfg->device << 16) | cfg->vendor; in pci_read_ivar()
5267 *result = cfg->baseclass; in pci_read_ivar()
5270 *result = cfg->subclass; in pci_read_ivar()
5273 *result = cfg->progif; in pci_read_ivar()
5276 *result = cfg->revid; in pci_read_ivar()
5279 *result = cfg->intpin; in pci_read_ivar()
5282 *result = cfg->intline; in pci_read_ivar()
5285 *result = cfg->domain; in pci_read_ivar()
5288 *result = cfg->bus; in pci_read_ivar()
5291 *result = cfg->slot; in pci_read_ivar()
5294 *result = cfg->func; in pci_read_ivar()
5297 *result = cfg->cmdreg; in pci_read_ivar()
5300 *result = cfg->cachelnsz; in pci_read_ivar()
5303 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) { in pci_read_ivar()
5304 *result = -1; in pci_read_ivar()
5307 *result = cfg->mingnt; in pci_read_ivar()
5310 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) { in pci_read_ivar()
5311 *result = -1; in pci_read_ivar()
5314 *result = cfg->maxlat; in pci_read_ivar()
5317 *result = cfg->lattimer; in pci_read_ivar()
5334 dinfo->cfg.intpin = value; in pci_write_ivar()
5364 * List resources based on pci map registers, used for within ddb
5388 if (dinfo->cfg.dev) in DB_SHOW_COMMAND_FLAGS()
5389 name = device_get_name(dinfo->cfg.dev); in DB_SHOW_COMMAND_FLAGS()
5391 p = &dinfo->conf; in DB_SHOW_COMMAND_FLAGS()
5395 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) : in DB_SHOW_COMMAND_FLAGS()
5397 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev, in DB_SHOW_COMMAND_FLAGS()
5398 p->pc_sel.pc_func, (p->pc_class << 16) | in DB_SHOW_COMMAND_FLAGS()
5399 (p->pc_subclass << 8) | p->pc_progif, in DB_SHOW_COMMAND_FLAGS()
5400 (p->pc_subdevice << 16) | p->pc_subvendor, in DB_SHOW_COMMAND_FLAGS()
5401 (p->pc_device << 16) | p->pc_vendor, in DB_SHOW_COMMAND_FLAGS()
5402 p->pc_revid, p->pc_hdr); in DB_SHOW_COMMAND_FLAGS()
5413 struct resource_list *rl = &dinfo->resources; in pci_reserve_map()
5417 pci_addr_t map, testval; in pci_reserve_map() local
5429 mapsize = pm->pm_size; in pci_reserve_map()
5430 map = pm->pm_value; in pci_reserve_map()
5434 * BAR/map is. BARs that read back 0 here are bogus in pci_reserve_map()
5440 pci_read_bar(child, *rid, &map, &testval, NULL); in pci_reserve_map()
5446 if (PCIR_IS_BIOS(&dinfo->cfg, *rid)) in pci_reserve_map()
5452 pm = pci_add_bar(child, *rid, map, mapsize); in pci_reserve_map()
5455 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) { in pci_reserve_map()
5485 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH)) in pci_reserve_map()
5510 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); in pci_reserve_map()
5512 map = rman_get_start(res); in pci_reserve_map()
5513 pci_write_bar(child, pm, map); in pci_reserve_map()
5536 rl = &dinfo->resources; in pci_alloc_multi_resource()
5537 cfg = &dinfo->cfg; in pci_alloc_multi_resource()
5547 if (*rid == 0 && (cfg->msi.msi_alloc > 0 || in pci_alloc_multi_resource()
5548 cfg->msix.msix_alloc > 0)) in pci_alloc_multi_resource()
5556 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) && in pci_alloc_multi_resource()
5557 (cfg->intpin != 0)) in pci_alloc_multi_resource()
5563 * PCI-PCI bridge I/O window resources are not BARs. in pci_alloc_multi_resource()
5567 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) { in pci_alloc_multi_resource()
5607 if (dinfo->cfg.flags & PCICFG_VF) { in pci_alloc_resource()
5636 cfg = &dinfo->cfg; in pci_release_resource()
5639 if (cfg->flags & PCICFG_VF) { in pci_release_resource()
5653 * PCI-PCI bridge I/O window resources are not BARs. For in pci_release_resource()
5656 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE && in pci_release_resource()
5667 rl = &dinfo->resources; in pci_release_resource()
5682 if (dinfo->cfg.flags & PCICFG_VF) { in pci_activate_resource()
5705 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) in pci_activate_resource()
5730 if (dinfo->cfg.flags & PCICFG_VF) { in pci_deactivate_resource()
5752 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid)) in pci_deactivate_resource()
5770 if (dinfo->cfg.flags & PCICFG_VF) { in pci_adjust_resource()
5789 struct resource_map_request *argsp, struct resource_map *map) in pci_map_resource() argument
5796 map)); in pci_map_resource()
5799 if (dinfo->cfg.flags & PCICFG_VF) { in pci_map_resource()
5806 map)); in pci_map_resource()
5813 return (bus_generic_map_resource(dev, child, r, argsp, map)); in pci_map_resource()
5818 struct resource_map *map) in pci_unmap_resource() argument
5824 return (bus_generic_unmap_resource(dev, child, r, map)); in pci_unmap_resource()
5827 if (dinfo->cfg.flags & PCICFG_VF) { in pci_unmap_resource()
5833 return (pci_vf_unmap_mem_resource(dev, child, r, map)); in pci_unmap_resource()
5840 return (bus_generic_unmap_resource(dev, child, r, map)); in pci_unmap_resource()
5851 rl = &dinfo->resources; in pci_child_deleted()
5865 if (rle->res) { in pci_child_deleted()
5866 if (rman_get_flags(rle->res) & RF_ACTIVE || in pci_child_deleted()
5867 resource_list_busy(rl, rle->type, rle->rid)) { in pci_child_deleted()
5868 pci_printf(&dinfo->cfg, in pci_child_deleted()
5871 rle->type, rle->rid, in pci_child_deleted()
5872 rman_get_start(rle->res)); in pci_child_deleted()
5873 bus_release_resource(child, rle->type, rle->rid, in pci_child_deleted()
5874 rle->res); in pci_child_deleted()
5876 resource_list_unreserve(rl, dev, child, rle->type, in pci_child_deleted()
5877 rle->rid); in pci_child_deleted()
5896 rl = &dinfo->resources; in pci_delete_resource()
5901 if (rle->res) { in pci_delete_resource()
5902 if (rman_get_flags(rle->res) & RF_ACTIVE || in pci_delete_resource()
5907 type, rid, rman_get_start(rle->res)); in pci_delete_resource()
5920 return (&dinfo->resources); in pci_get_resource_list()
5937 tag = sc->sc_dma_tag; in pci_get_dma_tag()
5947 return (sc->sc_dma_tag); in pci_get_dma_tag()
5955 pcicfgregs *cfg = &dinfo->cfg; in pci_read_config_method()
5959 * SR-IOV VFs don't implement the VID or DID registers, so we have to in pci_read_config_method()
5962 if (cfg->flags & PCICFG_VF) { in pci_read_config_method()
5966 return (cfg->device << 16 | cfg->vendor); in pci_read_config_method()
5968 return (cfg->vendor); in pci_read_config_method()
5970 return (cfg->vendor & 0xff); in pci_read_config_method()
5976 /* Note that an unaligned 4-byte read is an error. */ in pci_read_config_method()
5978 return (cfg->device); in pci_read_config_method()
5980 return (cfg->device & 0xff); in pci_read_config_method()
5989 cfg->bus, cfg->slot, cfg->func, reg, width)); in pci_read_config_method()
5997 pcicfgregs *cfg = &dinfo->cfg; in pci_write_config_method()
6000 cfg->bus, cfg->slot, cfg->func, reg, val, width); in pci_write_config_method()
6020 cfg = &dinfo->cfg; in pci_child_pnpinfo_method()
6022 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device, in pci_child_pnpinfo_method()
6023 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass, in pci_child_pnpinfo_method()
6024 cfg->progif); in pci_child_pnpinfo_method()
6050 pcicfgregs *cfg = &dinfo->cfg; in pci_assign_interrupt_method()
6053 cfg->intpin)); in pci_assign_interrupt_method()
6067 * Accept pciconf-style selectors of either pciD:B:S:F or in pci_lookup()
6139 cfg = &dinfo->cfg.pcie; in pci_cfg_restore_pcie()
6140 pos = cfg->pcie_location; in pci_cfg_restore_pcie()
6142 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; in pci_cfg_restore_pcie()
6144 WREG(PCIER_DEVICE_CTL, cfg->pcie_device_ctl); in pci_cfg_restore_pcie()
6146 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || in pci_cfg_restore_pcie()
6147 cfg->pcie_type == PCIEM_TYPE_ENDPOINT || in pci_cfg_restore_pcie()
6148 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) in pci_cfg_restore_pcie()
6149 WREG(PCIER_LINK_CTL, cfg->pcie_link_ctl); in pci_cfg_restore_pcie()
6151 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || in pci_cfg_restore_pcie()
6152 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && in pci_cfg_restore_pcie()
6153 (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) in pci_cfg_restore_pcie()
6154 WREG(PCIER_SLOT_CTL, cfg->pcie_slot_ctl); in pci_cfg_restore_pcie()
6156 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || in pci_cfg_restore_pcie()
6157 cfg->pcie_type == PCIEM_TYPE_ROOT_EC) in pci_cfg_restore_pcie()
6158 WREG(PCIER_ROOT_CTL, cfg->pcie_root_ctl); in pci_cfg_restore_pcie()
6161 WREG(PCIER_DEVICE_CTL2, cfg->pcie_device_ctl2); in pci_cfg_restore_pcie()
6162 WREG(PCIER_LINK_CTL2, cfg->pcie_link_ctl2); in pci_cfg_restore_pcie()
6163 WREG(PCIER_SLOT_CTL2, cfg->pcie_slot_ctl2); in pci_cfg_restore_pcie()
6171 pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, in pci_cfg_restore_pcix()
6172 dinfo->cfg.pcix.pcix_command, 2); in pci_cfg_restore_pcix()
6189 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1); in pci_cfg_restore()
6190 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1); in pci_cfg_restore()
6191 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1); in pci_cfg_restore()
6192 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1); in pci_cfg_restore()
6193 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1); in pci_cfg_restore()
6194 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1); in pci_cfg_restore()
6195 switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) { in pci_cfg_restore()
6197 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1); in pci_cfg_restore()
6198 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1); in pci_cfg_restore()
6202 dinfo->cfg.bridge.br_seclat, 1); in pci_cfg_restore()
6204 dinfo->cfg.bridge.br_subbus, 1); in pci_cfg_restore()
6206 dinfo->cfg.bridge.br_secbus, 1); in pci_cfg_restore()
6208 dinfo->cfg.bridge.br_pribus, 1); in pci_cfg_restore()
6210 dinfo->cfg.bridge.br_control, 2); in pci_cfg_restore()
6214 dinfo->cfg.bridge.br_seclat, 1); in pci_cfg_restore()
6216 dinfo->cfg.bridge.br_subbus, 1); in pci_cfg_restore()
6218 dinfo->cfg.bridge.br_secbus, 1); in pci_cfg_restore()
6220 dinfo->cfg.bridge.br_pribus, 1); in pci_cfg_restore()
6222 dinfo->cfg.bridge.br_control, 2); in pci_cfg_restore()
6227 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_BRIDGE) in pci_cfg_restore()
6228 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2); in pci_cfg_restore()
6231 * Restore extended capabilities for PCI-Express and PCI-X in pci_cfg_restore()
6233 if (dinfo->cfg.pcie.pcie_location != 0) in pci_cfg_restore()
6235 if (dinfo->cfg.pcix.pcix_location != 0) in pci_cfg_restore()
6238 /* Restore MSI and MSI-X configurations if they are present. */ in pci_cfg_restore()
6239 if (dinfo->cfg.msi.msi_location != 0) in pci_cfg_restore()
6241 if (dinfo->cfg.msix.msix_location != 0) in pci_cfg_restore()
6245 if (dinfo->cfg.iov != NULL) in pci_cfg_restore()
6257 cfg = &dinfo->cfg.pcie; in pci_cfg_save_pcie()
6258 pos = cfg->pcie_location; in pci_cfg_save_pcie()
6260 cfg->pcie_flags = RREG(PCIER_FLAGS); in pci_cfg_save_pcie()
6262 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION; in pci_cfg_save_pcie()
6264 cfg->pcie_device_ctl = RREG(PCIER_DEVICE_CTL); in pci_cfg_save_pcie()
6266 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || in pci_cfg_save_pcie()
6267 cfg->pcie_type == PCIEM_TYPE_ENDPOINT || in pci_cfg_save_pcie()
6268 cfg->pcie_type == PCIEM_TYPE_LEGACY_ENDPOINT) in pci_cfg_save_pcie()
6269 cfg->pcie_link_ctl = RREG(PCIER_LINK_CTL); in pci_cfg_save_pcie()
6271 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || in pci_cfg_save_pcie()
6272 (cfg->pcie_type == PCIEM_TYPE_DOWNSTREAM_PORT && in pci_cfg_save_pcie()
6273 (cfg->pcie_flags & PCIEM_FLAGS_SLOT)))) in pci_cfg_save_pcie()
6274 cfg->pcie_slot_ctl = RREG(PCIER_SLOT_CTL); in pci_cfg_save_pcie()
6276 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT || in pci_cfg_save_pcie()
6277 cfg->pcie_type == PCIEM_TYPE_ROOT_EC) in pci_cfg_save_pcie()
6278 cfg->pcie_root_ctl = RREG(PCIER_ROOT_CTL); in pci_cfg_save_pcie()
6281 cfg->pcie_device_ctl2 = RREG(PCIER_DEVICE_CTL2); in pci_cfg_save_pcie()
6282 cfg->pcie_link_ctl2 = RREG(PCIER_LINK_CTL2); in pci_cfg_save_pcie()
6283 cfg->pcie_slot_ctl2 = RREG(PCIER_SLOT_CTL2); in pci_cfg_save_pcie()
6291 dinfo->cfg.pcix.pcix_command = pci_read_config(dev, in pci_cfg_save_pcix()
6292 dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2); in pci_cfg_save_pcix()
6308 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2); in pci_cfg_save()
6309 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2); in pci_cfg_save()
6310 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2); in pci_cfg_save()
6311 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1); in pci_cfg_save()
6312 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1); in pci_cfg_save()
6313 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); in pci_cfg_save()
6314 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); in pci_cfg_save()
6315 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1); in pci_cfg_save()
6316 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1); in pci_cfg_save()
6317 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1); in pci_cfg_save()
6318 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1); in pci_cfg_save()
6319 switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) { in pci_cfg_save()
6321 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2); in pci_cfg_save()
6322 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2); in pci_cfg_save()
6323 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1); in pci_cfg_save()
6324 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1); in pci_cfg_save()
6327 dinfo->cfg.bridge.br_seclat = pci_read_config(dev, in pci_cfg_save()
6329 dinfo->cfg.bridge.br_subbus = pci_read_config(dev, in pci_cfg_save()
6331 dinfo->cfg.bridge.br_secbus = pci_read_config(dev, in pci_cfg_save()
6333 dinfo->cfg.bridge.br_pribus = pci_read_config(dev, in pci_cfg_save()
6335 dinfo->cfg.bridge.br_control = pci_read_config(dev, in pci_cfg_save()
6339 dinfo->cfg.bridge.br_seclat = pci_read_config(dev, in pci_cfg_save()
6341 dinfo->cfg.bridge.br_subbus = pci_read_config(dev, in pci_cfg_save()
6343 dinfo->cfg.bridge.br_secbus = pci_read_config(dev, in pci_cfg_save()
6345 dinfo->cfg.bridge.br_pribus = pci_read_config(dev, in pci_cfg_save()
6347 dinfo->cfg.bridge.br_control = pci_read_config(dev, in pci_cfg_save()
6349 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_2, 2); in pci_cfg_save()
6350 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_2, 2); in pci_cfg_save()
6354 if (dinfo->cfg.pcie.pcie_location != 0) in pci_cfg_save()
6357 if (dinfo->cfg.pcix.pcix_location != 0) in pci_cfg_save()
6361 if (dinfo->cfg.iov != NULL) in pci_cfg_save()
6440 ("%s: non-pci device %s", __func__, device_get_nameunit(dev))); in pci_find_pcie_root_port()
6443 * Walk the bridge hierarchy until we find a PCI-e root in pci_find_pcie_root_port()
6444 * port or a non-PCI device. in pci_find_pcie_root_port()
6457 * PCI-PCI bridge. in pci_find_pcie_root_port()
6463 if (dinfo->cfg.pcie.pcie_location != 0 && in pci_find_pcie_root_port()
6464 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) in pci_find_pcie_root_port()
6472 * Wait for pending transactions to complete on a PCI-express function.
6478 * exceeded. If dev is not a PCI-express function, this returns true.
6487 cap = dinfo->cfg.pcie.pcie_location; in pcie_wait_for_pending_transactions()
6499 max_delay -= 100; in pcie_wait_for_pending_transactions()
6514 * For non-PCI-express functions this returns 0.
6522 cap = dinfo->cfg.pcie.pcie_location; in pcie_get_max_completion_timeout()
6527 * Functions using the 1.x spec use the default timeout range of in pcie_get_max_completion_timeout()
6529 * support programmable timeouts also use this range. in pcie_get_max_completion_timeout()
6531 if ((dinfo->cfg.pcie.pcie_flags & PCIEM_FLAGS_VERSION) < 2 || in pcie_get_max_completion_timeout()
6571 s = "Uncorrectable (Non-Fatal)"; in pcie_apei_error()
6607 if (dinfo->cfg.pcie.pcie_location != 0) { in pcie_apei_error()
6608 rs = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + in pcie_apei_error()
6613 pci_write_config(dev, dinfo->cfg.pcie.pcie_location + in pcie_apei_error()
6628 * If dev is not a PCI-express function or does not support FLR, this
6633 * PCI-standard registers via pci_save_state() and
6644 cap = dinfo->cfg.pcie.pcie_location; in pcie_flr()
6655 * which will re-enable busmastering. in pcie_flr()
6664 pci_printf(&dinfo->cfg, in pcie_flr()
6669 * Extend the post-FLR delay to cover the maximum in pcie_flr()
6690 pci_printf(&dinfo->cfg, "Transactions pending after FLR!\n"); in pcie_flr()
6695 * Attempt a power-management reset by cycling the device in/out of D3
6795 while (nelt-- > 0) { in pci_match_device()
6797 if (id->match_flag_vendor) in pci_match_device()
6798 match &= vendor == id->vendor; in pci_match_device()
6799 if (id->match_flag_device) in pci_match_device()
6800 match &= device == id->device; in pci_match_device()
6801 if (id->match_flag_subvendor) in pci_match_device()
6802 match &= subvendor == id->subvendor; in pci_match_device()
6803 if (id->match_flag_subdevice) in pci_match_device()
6804 match &= subdevice == id->subdevice; in pci_match_device()
6805 if (id->match_flag_class) in pci_match_device()
6806 match &= class == id->class_id; in pci_match_device()
6807 if (id->match_flag_subclass) in pci_match_device()
6808 match &= subclass == id->subclass; in pci_match_device()
6809 if (id->match_flag_revid) in pci_match_device()
6810 match &= revid == id->revid; in pci_match_device()
6824 dev = dinfo->cfg.dev; in pci_print_faulted_dev_name()
6825 printf("pci%d:%d:%d:%d", dinfo->cfg.domain, dinfo->cfg.bus, in pci_print_faulted_dev_name()
6826 dinfo->cfg.slot, dinfo->cfg.func); in pci_print_faulted_dev_name()
6842 dev = dinfo->cfg.dev; in pci_print_faulted_dev()
6851 if (dinfo->cfg.pcie.pcie_location != 0) { in pci_print_faulted_dev()
6853 dinfo->cfg.pcie.pcie_location + in pci_print_faulted_dev()
6861 dinfo->cfg.pcie.pcie_location + in pci_print_faulted_dev()
6905 dev = dinfo->cfg.dev; in db_clear_pcie_errors()
6906 r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location + in db_clear_pcie_errors()
6908 pci_write_config(dev, dinfo->cfg.pcie.pcie_location + in db_clear_pcie_errors()
6928 dev = dinfo->cfg.dev; in DB_COMMAND_FLAGS()
6937 if (dinfo->cfg.pcie.pcie_location != 0) in DB_COMMAND_FLAGS()