Lines Matching +full:bridge +full:- +full:enable

1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6 * David Mosberger-Tang
8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
82 unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay); in pci_dev_d3_sleep()
95 return dev->reset_methods[0] != 0; in pci_reset_supported()
114 * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
125 /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
142 * measured in 32-bit words, not bytes.
184 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
195 max = bus->busn_res.end; in pci_bus_max_busnr()
196 list_for_each_entry(tmp, &bus->children, node) { in pci_bus_max_busnr()
206 * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
218 return -EIO; in pci_status_get_and_clear_errors()
232 struct resource *res = &pdev->resource[bar]; in __pci_ioremap_resource()
233 resource_size_t start = res->start; in __pci_ioremap_resource()
239 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) { in __pci_ioremap_resource()
264 * pci_dev_str_match_path - test if a path string matches a device
275 * A path for a device can be obtained using 'lspci -t'. Using a path
292 wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC); in pci_dev_str_match_path()
294 return -ENOMEM; in pci_dev_str_match_path()
302 ret = -EINVAL; in pci_dev_str_match_path()
306 if (dev->devfn != PCI_DEVFN(slot, func)) { in pci_dev_str_match_path()
313 * bridge because we hold a reference to the top level in pci_dev_str_match_path()
314 * device which should hold a reference to the bridge, in pci_dev_str_match_path()
332 ret = -EINVAL; in pci_dev_str_match_path()
337 ret = (seg == pci_domain_nr(dev->bus) && in pci_dev_str_match_path()
338 bus == dev->bus->number && in pci_dev_str_match_path()
339 dev->devfn == PCI_DEVFN(slot, func)); in pci_dev_str_match_path()
347 * pci_dev_str_match - test if a string matches a device
364 * through the use of 'lspci -t'.
369 * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
391 return -EINVAL; in pci_dev_str_match()
399 if ((!vendor || vendor == dev->vendor) && in pci_dev_str_match()
400 (!device || device == dev->device) && in pci_dev_str_match()
402 subsystem_vendor == dev->subsystem_vendor) && in pci_dev_str_match()
404 subsystem_device == dev->subsystem_device)) in pci_dev_str_match()
434 return __pci_find_next_cap(dev->bus, dev->devfn, in pci_find_next_capability()
460 * pci_find_capability - query for devices' capabilities
475 * %PCI_CAP_ID_PCIX PCI-X
482 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); in pci_find_capability()
484 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); in pci_find_capability()
491 * pci_bus_find_capability - query for devices' capabilities
518 * pci_find_next_ext_capability - Find an extended capability
526 * vendor-specific capability, and this provides a way to find them all.
530 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) in pci_find_next_ext_capability()
534 dev->bus, dev->devfn); in pci_find_next_ext_capability()
539 * pci_find_ext_capability - Find an extended capability
559 * pci_get_dsn - Read and return the 8-byte Device Serial Number
603 PCI_CAP_ID_HT, dev->bus, dev->devfn); in __pci_find_next_ht_cap()
614 PCI_CAP_ID_HT, dev->bus, in __pci_find_next_ht_cap()
615 dev->devfn); in __pci_find_next_ht_cap()
622 * pci_find_next_ht_capability - query a device's HyperTransport capabilities
641 * pci_find_ht_capability - query a device's HyperTransport capabilities
655 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); in pci_find_ht_capability()
664 * pci_find_vsec_capability - Find a vendor-specific extended capability
667 * @cap: Vendor-specific capability ID
679 if (vendor != dev->vendor) in pci_find_vsec_capability()
697 * pci_find_dvsec_capability - Find DVSEC for vendor
700 * @dvsec: Designated Vendor-specific capability ID
729 * pci_find_parent_resource - return resource region of parent bus of given
740 const struct pci_bus *bus = dev->bus; in pci_find_parent_resource()
752 if (r->flags & IORESOURCE_PREFETCH && in pci_find_parent_resource()
753 !(res->flags & IORESOURCE_PREFETCH)) in pci_find_parent_resource()
757 * If we're below a transparent bridge, there may in pci_find_parent_resource()
758 * be both a positively-decoded aperture and a in pci_find_parent_resource()
759 * subtractively-decoded region that contain the BAR. in pci_find_parent_resource()
760 * We want the positively-decoded one, so this depends in pci_find_parent_resource()
772 * pci_find_resource - Return matching PCI device resource
785 struct resource *r = &dev->resource[i]; in pci_find_resource()
787 if (r->start && resource_contains(r, res)) in pci_find_resource()
796 * pci_resource_name - Return the name of the PCI resource
820 "bridge window", /* "io" included in %pR */ in pci_resource_name()
821 "bridge window", /* "mem" included in %pR */ in pci_resource_name()
822 "bridge window", /* "mem pref" included in %pR */ in pci_resource_name()
839 "CardBus bridge window 0", /* I/O */ in pci_resource_name()
840 "CardBus bridge window 1", /* I/O */ in pci_resource_name()
841 "CardBus bridge window 0", /* mem */ in pci_resource_name()
842 "CardBus bridge window 1", /* mem */ in pci_resource_name()
845 if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS && in pci_resource_name()
856 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
871 msleep((1 << (i - 1)) * 100); in pci_wait_for_pending()
884 * pci_request_acs - ask for ACS to be enabled if supported
919 end = delimit - p - 1; in __pci_config_acs()
923 while (end > -1) { in __pci_config_acs()
927 end--; in __pci_config_acs()
932 end--; in __pci_config_acs()
935 end--; in __pci_config_acs()
978 pci_dbg(dev, "ACS control = %#06x\n", caps->ctrl); in __pci_config_acs()
979 pci_dbg(dev, "ACS fw_ctrl = %#06x\n", caps->fw_ctrl); in __pci_config_acs()
985 caps->ctrl = (caps->fw_ctrl & ~mask) | (flags & mask); in __pci_config_acs()
987 pci_info(dev, "Configured ACS to %#06x\n", caps->ctrl); in __pci_config_acs()
991 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
998 caps->ctrl |= (caps->cap & PCI_ACS_SV); in pci_std_enable_acs()
1001 caps->ctrl |= (caps->cap & PCI_ACS_RR); in pci_std_enable_acs()
1004 caps->ctrl |= (caps->cap & PCI_ACS_CR); in pci_std_enable_acs()
1007 caps->ctrl |= (caps->cap & PCI_ACS_UF); in pci_std_enable_acs()
1009 /* Enable Translation Blocking for external devices and noats */ in pci_std_enable_acs()
1010 if (pci_ats_disabled() || dev->external_facing || dev->untrusted) in pci_std_enable_acs()
1011 caps->ctrl |= (caps->cap & PCI_ACS_TB); in pci_std_enable_acs()
1015 * pci_enable_acs - enable ACS if hardware support it
1030 pos = dev->acs_cap; in pci_enable_acs()
1054 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
1107 static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable) in platform_pci_set_wakeup() argument
1112 return acpi_pci_wakeup(dev, enable); in platform_pci_set_wakeup()
1132 * pci_update_current_state - Read power state of given device and cache it
1146 dev->current_state = PCI_D3cold; in pci_update_current_state()
1147 } else if (dev->pm_cap) { in pci_update_current_state()
1150 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); in pci_update_current_state()
1152 dev->current_state = PCI_D3cold; in pci_update_current_state()
1155 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; in pci_update_current_state()
1157 dev->current_state = state; in pci_update_current_state()
1162 * pci_refresh_power_state - Refresh the given device's power state data
1171 pci_update_current_state(dev, dev->current_state); in pci_refresh_power_state()
1175 * pci_platform_power_transition - Use platform to change device power state
1186 else if (!dev->pm_cap) /* Fall back to PCI_D0 */ in pci_platform_power_transition()
1187 dev->current_state = PCI_D0; in pci_platform_power_transition()
1195 pm_request_resume(&pci_dev->dev); in pci_resume_one()
1200 * pci_resume_bus - Walk given bus and runtime resume devices on it
1213 struct pci_dev *root, *bridge; in pci_dev_wait() local
1218 bridge = pci_upstream_bridge(dev); in pci_dev_wait()
1219 if (bridge) in pci_dev_wait()
1232 * Vendor ID until we get non-RRS status. in pci_dev_wait()
1239 * ID for VFs and non-existent devices also returns ~0, so read the in pci_dev_wait()
1247 return -ENOTTY; in pci_dev_wait()
1250 if (root && root->config_rrs_sv) { in pci_dev_wait()
1262 delay - 1, reset_type); in pci_dev_wait()
1263 return -ENOTTY; in pci_dev_wait()
1269 if (pcie_failed_link_retrain(bridge) == 0) { in pci_dev_wait()
1275 delay - 1, reset_type); in pci_dev_wait()
1283 pci_info(dev, "ready %dms after %s\n", delay - 1, in pci_dev_wait()
1286 pci_dbg(dev, "ready %dms after %s\n", delay - 1, in pci_dev_wait()
1293 * pci_power_up - Put the given device into D0
1301 * put the device in D0 via non-PCI means.
1311 if (!dev->pm_cap) { in pci_power_up()
1314 dev->current_state = PCI_D0; in pci_power_up()
1316 dev->current_state = state; in pci_power_up()
1318 return -EIO; in pci_power_up()
1322 dev->current_state = PCI_D3cold; in pci_power_up()
1323 return -EIO; in pci_power_up()
1326 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); in pci_power_up()
1329 pci_power_name(dev->current_state)); in pci_power_up()
1330 dev->current_state = PCI_D3cold; in pci_power_up()
1331 return -EIO; in pci_power_up()
1336 need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) && in pci_power_up()
1346 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0); in pci_power_up()
1355 dev->current_state = PCI_D0; in pci_power_up()
1363 * pci_set_full_power_state - Put a PCI device into D0 and update its state
1382 if (dev->current_state == PCI_D0) in pci_set_full_power_state()
1388 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); in pci_set_full_power_state()
1389 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; in pci_set_full_power_state()
1390 if (dev->current_state != PCI_D0) { in pci_set_full_power_state()
1392 pci_power_name(dev->current_state)); in pci_set_full_power_state()
1410 if (dev->bus->self) in pci_set_full_power_state()
1411 pcie_aspm_pm_state_change(dev->bus->self, locked); in pci_set_full_power_state()
1417 * __pci_dev_set_current_state - Set current state of a PCI device
1425 dev->current_state = state; in __pci_dev_set_current_state()
1430 * pci_bus_set_current_state - Walk given bus and set current state of devices
1452 * pci_set_low_power_state - Put a PCI device into a low-power state.
1457 * Use the device's PCI_PM_CTRL register to put it into a low-power state.
1460 * -EINVAL if the requested state is invalid.
1461 * -EIO if device does not support PCI PM or its PM capabilities register has a
1470 if (!dev->pm_cap) in pci_set_low_power_state()
1471 return -EIO; in pci_set_low_power_state()
1475 * we're already in a low-power state, we can only go deeper. E.g., in pci_set_low_power_state()
1479 if (dev->current_state <= PCI_D3cold && dev->current_state > state) { in pci_set_low_power_state()
1481 pci_power_name(dev->current_state), in pci_set_low_power_state()
1483 return -EINVAL; in pci_set_low_power_state()
1487 if ((state == PCI_D1 && !dev->d1_support) in pci_set_low_power_state()
1488 || (state == PCI_D2 && !dev->d2_support)) in pci_set_low_power_state()
1489 return -EIO; in pci_set_low_power_state()
1491 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); in pci_set_low_power_state()
1494 pci_power_name(dev->current_state), in pci_set_low_power_state()
1496 dev->current_state = PCI_D3cold; in pci_set_low_power_state()
1497 return -EIO; in pci_set_low_power_state()
1504 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); in pci_set_low_power_state()
1512 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); in pci_set_low_power_state()
1513 dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; in pci_set_low_power_state()
1514 if (dev->current_state != state) in pci_set_low_power_state()
1516 pci_power_name(dev->current_state), in pci_set_low_power_state()
1519 if (dev->bus->self) in pci_set_low_power_state()
1520 pcie_aspm_pm_state_change(dev->bus->self, locked); in pci_set_low_power_state()
1537 * If the device or the parent bridge do not support PCI in __pci_set_power_state()
1545 if (dev->current_state == state) in __pci_set_power_state()
1555 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) in __pci_set_power_state()
1568 /* Powering off a bridge may power off the whole hierarchy */ in __pci_set_power_state()
1569 if (dev->current_state == PCI_D3cold) in __pci_set_power_state()
1570 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked); in __pci_set_power_state()
1582 * pci_set_power_state - Set the power state of a PCI device
1590 * -EINVAL if the requested state is invalid.
1591 * -EIO if device does not support PCI PM or its PM capabilities register has a
1619 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) { in _pci_find_saved_cap()
1620 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap) in _pci_find_saved_cap()
1648 return -ENOMEM; in pci_save_pcie_state()
1651 cap = (u16 *)&save_state->cap.data[0]; in pci_save_pcie_state()
1684 * Downstream ports reset the LTR enable bit when link goes down. in pci_restore_pcie_state()
1685 * Check and re-configure the bit here before restoring device. in pci_restore_pcie_state()
1690 cap = (u16 *)&save_state->cap.data[0]; in pci_restore_pcie_state()
1712 return -ENOMEM; in pci_save_pcix_state()
1716 (u16 *)save_state->cap.data); in pci_save_pcix_state()
1731 cap = (u16 *)&save_state->cap.data[0]; in pci_restore_pcix_state()
1737 * pci_save_state - save the PCI configuration space of a device before
1746 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); in pci_save_state()
1748 i * 4, dev->saved_config_space[i]); in pci_save_state()
1750 dev->state_saved = true; in pci_save_state()
1778 pci_dbg(pdev, "restore config %#04x: %#010x -> %#010x\n", in pci_restore_config_dword()
1781 if (retry-- <= 0) in pci_restore_config_dword()
1798 for (index = end; index >= start; index--) in pci_restore_config_space_range()
1800 pdev->saved_config_space[index], in pci_restore_config_space_range()
1806 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) { in pci_restore_config_space()
1811 } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { in pci_restore_config_space()
1831 pos = pdev->rebar_cap; in pci_restore_rebar_state()
1853 * pci_restore_state - Restore the saved state of a PCI device
1858 if (!dev->state_saved) in pci_restore_state()
1883 dev->state_saved = false; in pci_restore_state()
1893 * pci_store_saved_state - Allocate and return an opaque struct containing
1906 if (!dev->state_saved) in pci_store_saved_state()
1911 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) in pci_store_saved_state()
1912 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size; in pci_store_saved_state()
1918 memcpy(state->config_space, dev->saved_config_space, in pci_store_saved_state()
1919 sizeof(state->config_space)); in pci_store_saved_state()
1921 cap = state->cap; in pci_store_saved_state()
1922 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) { in pci_store_saved_state()
1923 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size; in pci_store_saved_state()
1924 memcpy(cap, &tmp->cap, len); in pci_store_saved_state()
1934 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1943 dev->state_saved = false; in pci_load_saved_state()
1948 memcpy(dev->saved_config_space, state->config_space, in pci_load_saved_state()
1949 sizeof(state->config_space)); in pci_load_saved_state()
1951 cap = state->cap; in pci_load_saved_state()
1952 while (cap->size) { in pci_load_saved_state()
1955 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended); in pci_load_saved_state()
1956 if (!tmp || tmp->cap.size != cap->size) in pci_load_saved_state()
1957 return -EINVAL; in pci_load_saved_state()
1959 memcpy(tmp->cap.data, cap->data, tmp->cap.size); in pci_load_saved_state()
1961 sizeof(struct pci_cap_saved_data) + cap->size); in pci_load_saved_state()
1964 dev->state_saved = true; in pci_load_saved_state()
1970 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1992 struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus); in pci_host_bridge_enable_device()
1995 if (host_bridge && host_bridge->enable_device) { in pci_host_bridge_enable_device()
1996 err = host_bridge->enable_device(host_bridge, dev); in pci_host_bridge_enable_device()
2006 struct pci_host_bridge *host_bridge = pci_find_host_bridge(dev->bus); in pci_host_bridge_disable_device()
2008 if (host_bridge && host_bridge->disable_device) in pci_host_bridge_disable_device()
2009 host_bridge->disable_device(host_bridge, dev); in pci_host_bridge_disable_device()
2015 struct pci_dev *bridge; in do_pci_enable_device() local
2020 if (err < 0 && err != -EIO) in do_pci_enable_device()
2023 bridge = pci_upstream_bridge(dev); in do_pci_enable_device()
2024 if (bridge) in do_pci_enable_device()
2025 pcie_aspm_powersave_config_link(bridge); in do_pci_enable_device()
2036 if (dev->msi_enabled || dev->msix_enabled) in do_pci_enable_device()
2057 * pci_reenable_device - Resume abandoned device
2066 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); in pci_reenable_device()
2073 struct pci_dev *bridge; in pci_enable_bridge() local
2076 bridge = pci_upstream_bridge(dev); in pci_enable_bridge()
2077 if (bridge) in pci_enable_bridge()
2078 pci_enable_bridge(bridge); in pci_enable_bridge()
2081 if (!dev->is_busmaster) in pci_enable_bridge()
2088 pci_err(dev, "Error enabling bridge (%d), continuing\n", in pci_enable_bridge()
2095 struct pci_dev *bridge; in pci_enable_device_flags() local
2103 * (e.g. if the device really is in D0 at enable time). in pci_enable_device_flags()
2105 pci_update_current_state(dev, dev->current_state); in pci_enable_device_flags()
2107 if (atomic_inc_return(&dev->enable_cnt) > 1) in pci_enable_device_flags()
2110 bridge = pci_upstream_bridge(dev); in pci_enable_device_flags()
2111 if (bridge) in pci_enable_device_flags()
2112 pci_enable_bridge(bridge); in pci_enable_device_flags()
2116 if (dev->resource[i].flags & flags) in pci_enable_device_flags()
2119 if (dev->resource[i].flags & flags) in pci_enable_device_flags()
2124 atomic_dec(&dev->enable_cnt); in pci_enable_device_flags()
2129 * pci_enable_device_mem - Initialize a device for use with Memory space
2132 * Initialize device before it's used by a driver. Ask low-level code
2133 * to enable Memory resources. Wake up the device if it was suspended.
2143 * pci_enable_device - Initialize device before it's used by a driver.
2146 * Initialize device before it's used by a driver. Ask low-level code
2147 * to enable I/O and memory. Wake up the device if it was suspended.
2150 * Note we don't actually enable the device many times if we call
2160 * pcibios_device_add - provide arch specific hooks when adding device dev
2173 * pcibios_release_device - provide arch specific hooks when releasing
2184 * pcibios_disable_device - disable arch specific PCI resources for device dev
2207 * pci_disable_enabled_device - Disable device without updating enable_cnt
2220 * pci_disable_device - Disable PCI device after use
2224 * anymore. This only involves disabling PCI bus-mastering, if active.
2231 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0, in pci_disable_device()
2232 "disabling already-disabled device"); in pci_disable_device()
2234 if (atomic_dec_return(&dev->enable_cnt) != 0) in pci_disable_device()
2241 dev->is_busmaster = 0; in pci_disable_device()
2246 * pcibios_set_pcie_reset_state - set reset state for device dev
2256 return -EINVAL; in pcibios_set_pcie_reset_state()
2260 * pci_set_pcie_reset_state - set reset state for device dev
2283 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2292 * pci_check_pme_status - Check if given device has generated PME.
2295 * Check the PME status of the device and if set, clear it and clear PME enable
2296 * (if set). Return 'true' if PME status and PME enable were both set or
2305 if (!dev->pm_cap) in pci_check_pme_status()
2308 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; in pci_check_pme_status()
2327 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2336 if (pme_poll_reset && dev->pme_poll) in pci_pme_wakeup()
2337 dev->pme_poll = false; in pci_pme_wakeup()
2341 pm_request_resume(&dev->dev); in pci_pme_wakeup()
2347 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2358 * pci_pme_capable - check the capability of PCI device to generate PME#
2364 if (!dev->pm_cap) in pci_pme_capable()
2367 return !!(dev->pme_support & (1 << state)); in pci_pme_capable()
2377 struct pci_dev *pdev = pme_dev->dev; in pci_pme_list_scan()
2379 if (pdev->pme_poll) { in pci_pme_list_scan()
2380 struct pci_dev *bridge = pdev->bus->self; in pci_pme_list_scan() local
2381 struct device *dev = &pdev->dev; in pci_pme_list_scan()
2382 struct device *bdev = bridge ? &bridge->dev : NULL; in pci_pme_list_scan()
2386 * If we have a bridge, it should be in an active/D0 in pci_pme_list_scan()
2396 if (bridge->current_state != PCI_D0) in pci_pme_list_scan()
2406 pdev->current_state != PCI_D3cold) in pci_pme_list_scan()
2413 list_del(&pme_dev->list); in pci_pme_list_scan()
2423 static void __pci_pme_active(struct pci_dev *dev, bool enable) in __pci_pme_active() argument
2427 if (!dev->pme_support) in __pci_pme_active()
2430 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); in __pci_pme_active()
2431 /* Clear PME_Status by writing 1 to it and enable PME# */ in __pci_pme_active()
2433 if (!enable) in __pci_pme_active()
2436 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); in __pci_pme_active()
2440 * pci_pme_restore - Restore PME configuration after config space restore.
2447 if (!dev->pme_support) in pci_pme_restore()
2450 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); in pci_pme_restore()
2451 if (dev->wakeup_prepared) { in pci_pme_restore()
2458 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); in pci_pme_restore()
2462 * pci_pme_active - enable or disable PCI device's PME# function
2464 * @enable: 'true' to enable PME# generation; 'false' to disable it.
2467 * calling this function with @enable equal to 'true'.
2469 void pci_pme_active(struct pci_dev *dev, bool enable) in pci_pme_active() argument
2471 __pci_pme_active(dev, enable); in pci_pme_active()
2484 * Although PCIe uses in-band PME message instead of PME# line in pci_pme_active()
2493 if (dev->pme_poll) { in pci_pme_active()
2495 if (enable) { in pci_pme_active()
2499 pci_warn(dev, "can't enable PME#\n"); in pci_pme_active()
2502 pme_dev->dev = dev; in pci_pme_active()
2504 list_add(&pme_dev->list, &pci_pme_list); in pci_pme_active()
2513 if (pme_dev->dev == dev) { in pci_pme_active()
2514 list_del(&pme_dev->list); in pci_pme_active()
2523 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled"); in pci_pme_active()
2528 * __pci_enable_wake - enable PCI device as wakeup event source
2531 * @enable: True to enable event generation; false to disable
2534 * When such events involves platform-specific hooks, those hooks are
2542 * -EINVAL is returned if device is not supposed to wake up the system
2544 * the native mechanism fail to enable the generation of wake-up events
2546 static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) in __pci_enable_wake() argument
2551 * Bridges that are not power-manageable directly only signal in __pci_enable_wake()
2554 * power-manageable may signal wakeup for themselves (for example, in __pci_enable_wake()
2561 if (!!enable == !!dev->wakeup_prepared) in __pci_enable_wake()
2566 * Anderson we should be doing PME# wake enable followed by ACPI wake in __pci_enable_wake()
2567 * enable. To disable wake-up we call the platform first, for symmetry. in __pci_enable_wake()
2570 if (enable) { in __pci_enable_wake()
2574 * Enable PME signaling if the device can signal PME from in __pci_enable_wake()
2588 dev->wakeup_prepared = true; in __pci_enable_wake()
2592 dev->wakeup_prepared = false; in __pci_enable_wake()
2599 * pci_enable_wake - change wakeup settings for a PCI device
2602 * @enable: Whether or not to enable event generation
2604 * If @enable is set, check device_may_wakeup() for the device before calling
2607 int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable) in pci_enable_wake() argument
2609 if (enable && !device_may_wakeup(&pci_dev->dev)) in pci_enable_wake()
2610 return -EINVAL; in pci_enable_wake()
2612 return __pci_enable_wake(pci_dev, state, enable); in pci_enable_wake()
2617 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2619 * @enable: True to enable wake-up event generation; false to disable
2622 * and this function allows them to set that up cleanly - pci_enable_wake()
2623 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2628 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2630 int pci_wake_from_d3(struct pci_dev *dev, bool enable) in pci_wake_from_d3() argument
2633 pci_enable_wake(dev, PCI_D3cold, enable) : in pci_wake_from_d3()
2634 pci_enable_wake(dev, PCI_D3hot, enable); in pci_wake_from_d3()
2639 * pci_target_state - find an appropriate low power state for a given PCI dev
2670 * If the device is in D3cold even though it's not power-manageable by in pci_target_state()
2671 * the platform, it may have been powered down by non-standard means. in pci_target_state()
2674 if (dev->current_state == PCI_D3cold) in pci_target_state()
2676 else if (!dev->pm_cap) in pci_target_state()
2679 if (wakeup && dev->pme_support) { in pci_target_state()
2686 while (state && !(dev->pme_support & (1 << state))) in pci_target_state()
2687 state--; in pci_target_state()
2691 else if (dev->pme_support & 1) in pci_target_state()
2699 * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2709 bool wakeup = device_may_wakeup(&dev->dev); in pci_prepare_to_sleep()
2714 return -EIO; in pci_prepare_to_sleep()
2728 * pci_back_from_sleep - turn PCI device on during system-wide transition
2732 * Disable device's system wake-up capability and put it into D0.
2747 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2750 * Prepare @dev to generate wake-up events at run time and put it into a low
2758 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev)); in pci_finish_runtime_suspend()
2760 return -EIO; in pci_finish_runtime_suspend()
2773 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2776 * Return true if the device itself is capable of generating wake-up events
2778 * PME and one of its upstream bridges can generate wake-up events.
2782 struct pci_bus *bus = dev->bus; in pci_dev_run_wake()
2784 if (!dev->pme_support) in pci_dev_run_wake()
2787 /* PME-capable in principle, but not from the target power state */ in pci_dev_run_wake()
2791 if (device_can_wakeup(&dev->dev)) in pci_dev_run_wake()
2794 while (bus->parent) { in pci_dev_run_wake()
2795 struct pci_dev *bridge = bus->self; in pci_dev_run_wake() local
2797 if (device_can_wakeup(&bridge->dev)) in pci_dev_run_wake()
2800 bus = bus->parent; in pci_dev_run_wake()
2804 if (bus->bridge) in pci_dev_run_wake()
2805 return device_can_wakeup(bus->bridge); in pci_dev_run_wake()
2812 * pci_dev_need_resume - Check if it is necessary to resume the device.
2815 * Return 'true' if the device is not runtime-suspended or it has to be
2818 * (system-wide) transition.
2822 struct device *dev = &pci_dev->dev; in pci_dev_need_resume()
2835 return target_state != pci_dev->current_state && in pci_dev_need_resume()
2837 pci_dev->current_state != PCI_D3hot; in pci_dev_need_resume()
2841 * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2853 struct device *dev = &pci_dev->dev; in pci_dev_adjust_pme()
2855 spin_lock_irq(&dev->power.lock); in pci_dev_adjust_pme()
2858 pci_dev->current_state < PCI_D3cold) in pci_dev_adjust_pme()
2861 spin_unlock_irq(&dev->power.lock); in pci_dev_adjust_pme()
2865 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2868 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2874 struct device *dev = &pci_dev->dev; in pci_dev_complete_resume()
2879 spin_lock_irq(&dev->power.lock); in pci_dev_complete_resume()
2881 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold) in pci_dev_complete_resume()
2884 spin_unlock_irq(&dev->power.lock); in pci_dev_complete_resume()
2888 * pci_choose_state - Choose the power state of a PCI device.
2905 struct device *dev = &pdev->dev; in pci_config_pm_runtime_get()
2906 struct device *parent = dev->parent; in pci_config_pm_runtime_get()
2912 * pdev->current_state is set to PCI_D3cold during suspending, in pci_config_pm_runtime_get()
2921 if (pdev->current_state == PCI_D3cold) in pci_config_pm_runtime_get()
2927 struct device *dev = &pdev->dev; in pci_config_pm_runtime_put()
2928 struct device *parent = dev->parent; in pci_config_pm_runtime_put()
2944 .ident = "X299 DESIGNARE EX-CF",
2947 DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2965 * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
2967 .ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
2969 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
2979 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2980 * @bridge: Bridge to check
2984 * Return: Whether it is possible to move the bridge to D3.
2987 * of the bridge, including its hot-removal.
2989 bool pci_bridge_d3_possible(struct pci_dev *bridge) in pci_bridge_d3_possible() argument
2991 if (!pci_is_pcie(bridge)) in pci_bridge_d3_possible()
2994 switch (pci_pcie_type(bridge)) { in pci_bridge_d3_possible()
3005 if (bridge->is_hotplug_bridge && !bridge->is_pciehp) in pci_bridge_d3_possible()
3009 if (bridge->is_pciehp && !pciehp_is_native(bridge)) in pci_bridge_d3_possible()
3016 if (bridge->is_thunderbolt) in pci_bridge_d3_possible()
3019 /* Platform might know better if the bridge supports D3 */ in pci_bridge_d3_possible()
3020 if (platform_pci_bridge_d3(bridge)) in pci_bridge_d3_possible()
3028 if (bridge->is_pciehp) in pci_bridge_d3_possible()
3051 dev->no_d3cold || !dev->d3cold_allowed || in pci_dev_check_d3cold()
3054 (device_may_wakeup(&dev->dev) && in pci_dev_check_d3cold()
3057 /* If it is a bridge it must be allowed to go to D3. */ in pci_dev_check_d3cold()
3066 * pci_bridge_d3_update - Update bridge D3 capabilities
3069 * Update upstream bridge PM capabilities accordingly depending on if the
3075 bool remove = !device_is_registered(&dev->dev); in pci_bridge_d3_update()
3076 struct pci_dev *bridge; in pci_bridge_d3_update() local
3079 bridge = pci_upstream_bridge(dev); in pci_bridge_d3_update()
3080 if (!bridge || !pci_bridge_d3_possible(bridge)) in pci_bridge_d3_update()
3084 * If D3 is currently allowed for the bridge, removing one of its in pci_bridge_d3_update()
3087 if (remove && bridge->bridge_d3) in pci_bridge_d3_update()
3091 * If D3 is currently allowed for the bridge and a child is added or in pci_bridge_d3_update()
3095 * If D3 is currently not allowed for the bridge, checking the device in pci_bridge_d3_update()
3102 * If D3 is currently not allowed for the bridge, this may be caused in pci_bridge_d3_update()
3107 if (d3cold_ok && !bridge->bridge_d3) in pci_bridge_d3_update()
3108 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold, in pci_bridge_d3_update()
3111 if (bridge->bridge_d3 != d3cold_ok) { in pci_bridge_d3_update()
3112 bridge->bridge_d3 = d3cold_ok; in pci_bridge_d3_update()
3114 pci_bridge_d3_update(bridge); in pci_bridge_d3_update()
3119 * pci_d3cold_enable - Enable D3cold for device
3122 * This function can be used in drivers to enable D3cold from the device
3123 * they handle. It also updates upstream PCI bridge PM capabilities
3128 if (dev->no_d3cold) { in pci_d3cold_enable()
3129 dev->no_d3cold = false; in pci_d3cold_enable()
3136 * pci_d3cold_disable - Disable D3cold for device
3140 * they handle. It also updates upstream PCI bridge PM capabilities
3145 if (!dev->no_d3cold) { in pci_d3cold_disable()
3146 dev->no_d3cold = true; in pci_d3cold_disable()
3159 * pci_pm_init - Initialize PM functions of given PCI device
3167 device_enable_async_suspend(&dev->dev); in pci_pm_init()
3168 dev->wakeup_prepared = false; in pci_pm_init()
3170 dev->pm_cap = 0; in pci_pm_init()
3171 dev->pme_support = 0; in pci_pm_init()
3186 dev->pm_cap = pm; in pci_pm_init()
3187 dev->d3hot_delay = PCI_PM_D3HOT_WAIT; in pci_pm_init()
3188 dev->d3cold_delay = PCI_PM_D3COLD_WAIT; in pci_pm_init()
3189 dev->bridge_d3 = pci_bridge_d3_possible(dev); in pci_pm_init()
3190 dev->d3cold_allowed = true; in pci_pm_init()
3192 dev->d1_support = false; in pci_pm_init()
3193 dev->d2_support = false; in pci_pm_init()
3196 dev->d1_support = true; in pci_pm_init()
3198 dev->d2_support = true; in pci_pm_init()
3200 if (dev->d1_support || dev->d2_support) in pci_pm_init()
3202 dev->d1_support ? " D1" : "", in pci_pm_init()
3203 dev->d2_support ? " D2" : ""); in pci_pm_init()
3214 dev->pme_support = FIELD_GET(PCI_PM_CAP_PME_MASK, pmc); in pci_pm_init()
3215 dev->pme_poll = true; in pci_pm_init()
3217 * Make device's PM flags reflect the wake-up capability, but in pci_pm_init()
3218 * let the user space enable it to wake up the system as needed. in pci_pm_init()
3220 device_set_wakeup_capable(&dev->dev, true); in pci_pm_init()
3227 pm_runtime_forbid(&dev->dev); in pci_pm_init()
3228 pm_runtime_set_active(&dev->dev); in pci_pm_init()
3229 pm_runtime_enable(&dev->dev); in pci_pm_init()
3259 return &dev->resource[bei]; in pci_ea_get_resource()
3263 return &dev->resource[PCI_IOV_RESOURCES + in pci_ea_get_resource()
3264 bei - PCI_EA_BEI_VF_BAR0]; in pci_ea_get_resource()
3267 return &dev->resource[PCI_ROM_RESOURCE]; in pci_ea_get_resource()
3327 /* Read Base MSBs (if 64-bit entry) */ in pci_ea_read()
3336 /* entry starts above 32-bit boundary, can't use */ in pci_ea_read()
3346 /* Read MaxOffset MSBs (if 64-bit entry) */ in pci_ea_read()
3368 if (ent_size != ent_offset - offset) { in pci_ea_read()
3370 ent_size, ent_offset - offset); in pci_ea_read()
3374 res->name = pci_name(dev); in pci_ea_read()
3375 res->start = start; in pci_ea_read()
3376 res->end = end; in pci_ea_read()
3377 res->flags = flags; in pci_ea_read()
3410 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT, in pci_ea_init()
3417 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) in pci_ea_init()
3428 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); in pci_add_saved_cap()
3432 * _pci_add_cap_save_buffer - allocate buffer for saving given
3455 return -ENOMEM; in _pci_add_cap_save_buffer()
3457 save_state->cap.cap_nr = cap; in _pci_add_cap_save_buffer()
3458 save_state->cap.cap_extended = extended; in _pci_add_cap_save_buffer()
3459 save_state->cap.size = size; in _pci_add_cap_save_buffer()
3476 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3490 pci_err(dev, "unable to preallocate PCI-X save buffer\n"); in pci_allocate_cap_save_buffers()
3505 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next) in pci_free_cap_save_buffers()
3510 * pci_configure_ari - enable or disable ARI forwarding
3513 * If @dev and its upstream bridge both support ARI, enable ARI in the
3514 * bridge. Otherwise, disable ARI in the bridge.
3519 struct pci_dev *bridge; in pci_configure_ari() local
3521 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn) in pci_configure_ari()
3524 bridge = dev->bus->self; in pci_configure_ari()
3525 if (!bridge) in pci_configure_ari()
3528 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap); in pci_configure_ari()
3533 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, in pci_configure_ari()
3535 bridge->ari_enabled = 1; in pci_configure_ari()
3537 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2, in pci_configure_ari()
3539 bridge->ari_enabled = 0; in pci_configure_ari()
3548 pos = pdev->acs_cap; in pci_acs_flags_enabled()
3555 * capability field can therefore be assumed as hard-wired enabled. in pci_acs_flags_enabled()
3565 * pci_acs_enabled - test ACS against required flags for a given device
3575 * opportunity for peer-to-peer access. We therefore return 'true'
3589 * Conventional PCI and PCI-X devices never support ACS, either in pci_acs_enabled()
3598 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec, in pci_acs_enabled()
3600 * handle them as we would a non-PCIe device. in pci_acs_enabled()
3614 * implement ACS in order to indicate their peer-to-peer capabilities, in pci_acs_enabled()
3615 * regardless of whether they are single- or multi-function devices. in pci_acs_enabled()
3622 * implemented by the remaining PCIe types to indicate peer-to-peer in pci_acs_enabled()
3631 if (!pdev->multifunction) in pci_acs_enabled()
3645 * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3664 if (pci_is_root_bus(pdev->bus)) in pci_acs_path_enabled()
3667 parent = pdev->bus->self; in pci_acs_path_enabled()
3674 * pci_acs_init - Initialize ACS if hardware supports it
3679 dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); in pci_acs_init()
3682 * Attempt to enable ACS regardless of capability because some Root in pci_acs_init()
3692 pdev->rebar_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR); in pci_rebar_init()
3696 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3701 * Returns -ENOTSUPP if resizable BARs are not supported at all.
3702 * Returns -ENOENT if no ctrl register for the BAR could be found.
3713 pos = pdev->rebar_cap; in pci_rebar_find_pos()
3717 return -ENOTSUPP; in pci_rebar_find_pos()
3731 return -ENOENT; in pci_rebar_find_pos()
3735 * pci_rebar_get_possible_sizes - get possible sizes for BAR
3755 if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f && in pci_rebar_get_possible_sizes()
3764 * pci_rebar_get_current_size - get the current size of a BAR
3785 * pci_rebar_set_size - set a new size for a BAR
3810 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3819 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3824 struct pci_bus *bus = dev->bus; in pci_enable_atomic_ops_to_root()
3825 struct pci_dev *bridge; in pci_enable_atomic_ops_to_root() local
3829 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit in pci_enable_atomic_ops_to_root()
3833 if (dev->is_virtfn) in pci_enable_atomic_ops_to_root()
3834 return -EINVAL; in pci_enable_atomic_ops_to_root()
3837 return -EINVAL; in pci_enable_atomic_ops_to_root()
3843 * completers, and no peer-to-peer. in pci_enable_atomic_ops_to_root()
3852 return -EINVAL; in pci_enable_atomic_ops_to_root()
3855 while (bus->parent) { in pci_enable_atomic_ops_to_root()
3856 bridge = bus->self; in pci_enable_atomic_ops_to_root()
3858 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap); in pci_enable_atomic_ops_to_root()
3860 switch (pci_pcie_type(bridge)) { in pci_enable_atomic_ops_to_root()
3865 return -EINVAL; in pci_enable_atomic_ops_to_root()
3871 return -EINVAL; in pci_enable_atomic_ops_to_root()
3876 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) { in pci_enable_atomic_ops_to_root()
3877 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, in pci_enable_atomic_ops_to_root()
3880 return -EINVAL; in pci_enable_atomic_ops_to_root()
3883 bus = bus->parent; in pci_enable_atomic_ops_to_root()
3893 * pci_release_region - Release a PCI bar
3919 * __pci_request_region - Reserved PCI I/O and memory resource
3942 return -EINVAL; in __pci_request_region()
3962 &pdev->resource[bar]); in __pci_request_region()
3963 return -EBUSY; in __pci_request_region()
3967 * pci_request_region - Reserve PCI I/O and memory resource
3988 * pci_release_selected_regions - Release selected PCI I/O and memory resources
4017 while (--i >= 0) in __pci_request_selected_regions()
4021 return -EBUSY; in __pci_request_selected_regions()
4026 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
4041 * pci_request_selected_regions_exclusive - Request regions exclusively
4057 * pci_release_regions - Release reserved PCI I/O and memory resources
4067 pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1); in pci_release_regions()
4072 * pci_request_regions - Reserve PCI I/O and memory resources
4086 ((1 << PCI_STD_NUM_BARS) - 1), name); in pci_request_regions()
4091 * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
4110 ((1 << PCI_STD_NUM_BARS) - 1), name); in pci_request_regions_exclusive()
4126 return -EINVAL; in pci_register_io_range()
4130 return -ENOMEM; in pci_register_io_range()
4132 range->fwnode = fwnode; in pci_register_io_range()
4133 range->size = size; in pci_register_io_range()
4134 range->hw_start = addr; in pci_register_io_range()
4135 range->flags = LOGIC_PIO_CPU_MMIO; in pci_register_io_range()
4142 if (ret == -EEXIST) in pci_register_io_range()
4166 return (unsigned long)-1; in pci_address_to_pio()
4173 * pci_remap_iospace - Remap the memory mapped I/O space
4186 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; in pci_remap_iospace()
4188 if (!(res->flags & IORESOURCE_IO)) in pci_remap_iospace()
4189 return -EINVAL; in pci_remap_iospace()
4191 if (res->end > IO_SPACE_LIMIT) in pci_remap_iospace()
4192 return -EINVAL; in pci_remap_iospace()
4202 return -ENODEV; in pci_remap_iospace()
4209 * pci_unmap_iospace - Unmap the memory mapped I/O space
4219 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; in pci_unmap_iospace()
4226 static void __pci_set_master(struct pci_dev *dev, bool enable) in __pci_set_master() argument
4231 if (enable) in __pci_set_master()
4237 enable ? "enabling" : "disabling"); in __pci_set_master()
4240 dev->is_busmaster = enable; in __pci_set_master()
4244 * pcibios_setup - process "pci=" kernel boot arguments
4256 * pcibios_set_master - enable PCI bus-mastering for device dev
4257 * @dev: the PCI device to enable
4259 * Enables PCI bus-mastering for the device. This is the default
4283 * pci_set_master - enables bus-mastering for device dev
4284 * @dev: the PCI device to enable
4286 * Enables bus-mastering on the device and calls pcibios_set_master()
4297 * pci_clear_master - disables bus-mastering for device dev
4307 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4312 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4314 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4321 return -EINVAL; in pci_set_cacheline_size()
4340 return -EINVAL; in pci_set_cacheline_size()
4345 * pci_set_mwi - enables memory-write-invalidate PCI transaction
4348 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4350 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4366 pci_dbg(dev, "enabling Mem-Wr-Inval\n"); in pci_set_mwi()
4376 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4379 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4382 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4395 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4398 * Disables PCI Memory-Write-Invalidate transaction on the device
4415 * pci_disable_parity - disable parity checking for device
4432 * pci_intx - enables/disables PCI INTx for device dev
4434 * @enable: boolean: whether to enable or disable PCI INTx
4438 void pci_intx(struct pci_dev *pdev, int enable) in pci_intx() argument
4444 if (enable) in pci_intx()
4457 * pci_wait_for_pending_transaction - wait for pending transaction
4473 * pcie_flr - initiate a PCIe function level reset
4486 if (dev->imm_ready) in pcie_flr()
4501 * pcie_reset_flr - initiate a PCIe function level reset
4509 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET) in pcie_reset_flr()
4510 return -ENOTTY; in pcie_reset_flr()
4512 if (!(dev->devcap & PCI_EXP_DEVCAP_FLR)) in pcie_reset_flr()
4513 return -ENOTTY; in pcie_reset_flr()
4529 return -ENOTTY; in pci_af_flr()
4531 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET) in pci_af_flr()
4532 return -ENOTTY; in pci_af_flr()
4536 return -ENOTTY; in pci_af_flr()
4542 * Wait for Transaction Pending bit to clear. A word-aligned test in pci_af_flr()
4552 if (dev->imm_ready) in pci_af_flr()
4567 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4573 * PCI_D0. If that's the case and the device is not in a low-power state
4577 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4585 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET) in pci_pm_reset()
4586 return -ENOTTY; in pci_pm_reset()
4588 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr); in pci_pm_reset()
4590 return -ENOTTY; in pci_pm_reset()
4595 if (dev->current_state != PCI_D0) in pci_pm_reset()
4596 return -EINVAL; in pci_pm_reset()
4600 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); in pci_pm_reset()
4605 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); in pci_pm_reset()
4608 return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS); in pci_pm_reset()
4612 * pcie_wait_for_link_status - Wait for link status change
4617 * Return 0 if successful, or -ETIMEDOUT if status has not changed within
4638 return -ETIMEDOUT; in pcie_wait_for_link_status()
4642 * pcie_retrain_link - Request a link retrain and wait for it to complete
4655 * Return 0 if successful, or -ETIMEDOUT if training has not completed
4674 if (pdev->clear_retrain_link) { in pcie_retrain_link()
4700 if (pdev->subordinate) in pcie_retrain_link()
4701 pcie_update_link_speed(pdev->subordinate); in pcie_retrain_link()
4707 * pcie_wait_for_link_delay - Wait until link is active or inactive
4708 * @pdev: Bridge device
4723 if (!pdev->link_active_reporting) { in pcie_wait_for_link_delay()
4757 * pcie_wait_for_link - Wait until link is active or inactive
4758 * @pdev: Bridge device
4781 list_for_each_entry(pdev, &bus->devices, bus_list) { in pci_bus_max_d3cold_delay()
4782 if (pdev->d3cold_delay < min_delay) in pci_bus_max_d3cold_delay()
4783 min_delay = pdev->d3cold_delay; in pci_bus_max_d3cold_delay()
4784 if (pdev->d3cold_delay > max_delay) in pci_bus_max_d3cold_delay()
4785 max_delay = pdev->d3cold_delay; in pci_bus_max_d3cold_delay()
4792 * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4793 * @dev: PCI bridge
4794 * @reset_type: reset type in human-readable form
4797 * side of the bridge are permitted after D3cold to D0 transition
4804 * Return 0 on success or -ENOTTY if the first device on the secondary bus
4822 * For any hot-added devices the access delay is handled in pciehp in pci_bridge_wait_for_secondary_bus()
4826 if (!dev->subordinate || list_empty(&dev->subordinate->devices)) { in pci_bridge_wait_for_secondary_bus()
4832 delay = pci_bus_max_d3cold_delay(dev->subordinate); in pci_bridge_wait_for_secondary_bus()
4838 child = pci_dev_get(list_first_entry(&dev->subordinate->devices, in pci_bridge_wait_for_secondary_bus()
4843 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before in pci_bridge_wait_for_secondary_bus()
4876 if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay)) in pci_bridge_wait_for_secondary_bus()
4884 if (!dev->link_active_reporting) in pci_bridge_wait_for_secondary_bus()
4885 return -ENOTTY; in pci_bridge_wait_for_secondary_bus()
4889 return -ENOTTY; in pci_bridge_wait_for_secondary_bus()
4892 PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT); in pci_bridge_wait_for_secondary_bus()
4900 return -ENOTTY; in pci_bridge_wait_for_secondary_bus()
4904 PCIE_RESET_READY_POLL_MS - delay); in pci_bridge_wait_for_secondary_bus()
4931 * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4932 * @dev: Bridge device
4934 * Use the bridge control register to assert reset on the secondary bus.
4935 * Devices on the secondary bus are left in power-on state.
4939 if (!dev->block_cfg_access) in pci_bridge_secondary_bus_reset()
4952 if (pci_is_root_bus(dev->bus) || dev->subordinate || in pci_parent_bus_reset()
4953 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) in pci_parent_bus_reset()
4954 return -ENOTTY; in pci_parent_bus_reset()
4956 list_for_each_entry(pdev, &dev->bus->devices, bus_list) in pci_parent_bus_reset()
4958 return -ENOTTY; in pci_parent_bus_reset()
4963 return pci_bridge_secondary_bus_reset(dev->bus->self); in pci_parent_bus_reset()
4968 int rc = -ENOTTY; in pci_reset_hotplug_slot()
4970 if (!hotplug || !try_module_get(hotplug->owner)) in pci_reset_hotplug_slot()
4973 if (hotplug->ops->reset_slot) in pci_reset_hotplug_slot()
4974 rc = hotplug->ops->reset_slot(hotplug, probe); in pci_reset_hotplug_slot()
4976 module_put(hotplug->owner); in pci_reset_hotplug_slot()
4983 if (dev->multifunction || dev->subordinate || !dev->slot || in pci_dev_reset_slot_function()
4984 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) in pci_dev_reset_slot_function()
4985 return -ENOTTY; in pci_dev_reset_slot_function()
4987 return pci_reset_hotplug_slot(dev->slot->hotplug, probe); in pci_dev_reset_slot_function()
5011 * bit in Bridge Control has no effect. When 1, the Port generates in cxl_sbr_masked()
5022 struct pci_dev *bridge = pci_upstream_bridge(dev); in pci_reset_bus_function() local
5029 if (bridge && cxl_sbr_masked(bridge)) { in pci_reset_bus_function()
5033 return -ENOTTY; in pci_reset_bus_function()
5037 if (rc != -ENOTTY) in pci_reset_bus_function()
5044 struct pci_dev *bridge; in cxl_reset_bus_function() local
5048 bridge = pci_upstream_bridge(dev); in cxl_reset_bus_function()
5049 if (!bridge) in cxl_reset_bus_function()
5050 return -ENOTTY; in cxl_reset_bus_function()
5052 dvsec = cxl_port_dvsec(bridge); in cxl_reset_bus_function()
5054 return -ENOTTY; in cxl_reset_bus_function()
5059 rc = pci_read_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, &reg); in cxl_reset_bus_function()
5061 return -ENOTTY; in cxl_reset_bus_function()
5067 pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, in cxl_reset_bus_function()
5074 pci_write_config_word(bridge, dvsec + PCI_DVSEC_CXL_PORT_CTL, in cxl_reset_bus_function()
5083 device_lock(&dev->dev); in pci_dev_lock()
5091 if (device_trylock(&dev->dev)) { in pci_dev_trylock()
5094 device_unlock(&dev->dev); in pci_dev_trylock()
5104 device_unlock(&dev->dev); in pci_dev_unlock()
5111 dev->driver ? dev->driver->err_handler : NULL; in pci_dev_save_and_disable()
5114 * dev->driver->err_handler->reset_prepare() is protected against in pci_dev_save_and_disable()
5115 * races with ->remove() by the device lock, which must be held by in pci_dev_save_and_disable()
5118 if (err_handler && err_handler->reset_prepare) in pci_dev_save_and_disable()
5119 err_handler->reset_prepare(dev); in pci_dev_save_and_disable()
5120 else if (dev->driver) in pci_dev_save_and_disable()
5124 * Wake-up device prior to save. PM registers default to D0 after in pci_dev_save_and_disable()
5126 * to a non-D0 state anyway. in pci_dev_save_and_disable()
5133 * INTx-disable which is set. This not only disables MMIO and I/O port in pci_dev_save_and_disable()
5135 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3 in pci_dev_save_and_disable()
5136 * compliant devices, INTx-disable prevents legacy interrupts. in pci_dev_save_and_disable()
5144 dev->driver ? dev->driver->err_handler : NULL; in pci_dev_restore()
5149 * dev->driver->err_handler->reset_done() is protected against in pci_dev_restore()
5150 * races with ->remove() by the device lock, which must be held by in pci_dev_restore()
5153 if (err_handler && err_handler->reset_done) in pci_dev_restore()
5154 err_handler->reset_done(dev); in pci_dev_restore()
5155 else if (dev->driver) in pci_dev_restore()
5159 /* dev->reset_methods[] is a 0-terminated list of indices into this array */
5172 * __pci_reset_function_locked - reset a PCI device function while holding
5199 * A reset method returns -ENOTTY if it doesn't support this device and in __pci_reset_function_locked()
5207 m = dev->reset_methods[i]; in __pci_reset_function_locked()
5209 return -ENOTTY; in __pci_reset_function_locked()
5212 pci_dbg(dev, "reset via %s\n", method->name); in __pci_reset_function_locked()
5213 rc = method->reset_fn(dev, PCI_RESET_DO_RESET); in __pci_reset_function_locked()
5217 pci_dbg(dev, "%s failed with %d\n", method->name, rc); in __pci_reset_function_locked()
5218 if (rc != -ENOTTY) in __pci_reset_function_locked()
5222 return -ENOTTY; in __pci_reset_function_locked()
5227 * pci_init_reset_methods - check whether device can be safely reset
5232 * other functions in the same device. The PCI device must be in D0-D3hot
5250 dev->reset_methods[i++] = m; in pci_init_reset_methods()
5251 else if (rc != -ENOTTY) in pci_init_reset_methods()
5255 dev->reset_methods[i] = 0; in pci_init_reset_methods()
5259 * pci_reset_function - quiesce and reset a PCI device function
5276 struct pci_dev *bridge; in pci_reset_function() local
5280 return -ENOTTY; in pci_reset_function()
5283 * If there's no upstream bridge, no locking is needed since there is in pci_reset_function()
5284 * no upstream bridge configuration to hold consistent. in pci_reset_function()
5286 bridge = pci_upstream_bridge(dev); in pci_reset_function()
5287 if (bridge) in pci_reset_function()
5288 pci_dev_lock(bridge); in pci_reset_function()
5298 if (bridge) in pci_reset_function()
5299 pci_dev_unlock(bridge); in pci_reset_function()
5306 * pci_reset_function_locked - quiesce and reset a PCI device function
5327 return -ENOTTY; in pci_reset_function_locked()
5340 * pci_try_reset_function - quiesce and reset a PCI device function
5343 * Same as above, except return -EAGAIN if unable to lock device.
5350 return -ENOTTY; in pci_try_reset_function()
5353 return -EAGAIN; in pci_try_reset_function()
5370 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)) in pci_bus_resettable()
5373 list_for_each_entry(dev, &bus->devices, bus_list) { in pci_bus_resettable()
5374 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || in pci_bus_resettable()
5375 (dev->subordinate && !pci_bus_resettable(dev->subordinate))) in pci_bus_resettable()
5387 pci_dev_lock(bus->self); in pci_bus_lock()
5388 list_for_each_entry(dev, &bus->devices, bus_list) { in pci_bus_lock()
5389 if (dev->subordinate) in pci_bus_lock()
5390 pci_bus_lock(dev->subordinate); in pci_bus_lock()
5401 list_for_each_entry(dev, &bus->devices, bus_list) { in pci_bus_unlock()
5402 if (dev->subordinate) in pci_bus_unlock()
5403 pci_bus_unlock(dev->subordinate); in pci_bus_unlock()
5407 pci_dev_unlock(bus->self); in pci_bus_unlock()
5415 if (!pci_dev_trylock(bus->self)) in pci_bus_trylock()
5418 list_for_each_entry(dev, &bus->devices, bus_list) { in pci_bus_trylock()
5419 if (dev->subordinate) { in pci_bus_trylock()
5420 if (!pci_bus_trylock(dev->subordinate)) in pci_bus_trylock()
5428 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) { in pci_bus_trylock()
5429 if (dev->subordinate) in pci_bus_trylock()
5430 pci_bus_unlock(dev->subordinate); in pci_bus_trylock()
5434 pci_dev_unlock(bus->self); in pci_bus_trylock()
5443 if (slot->bus->self && in pci_slot_resettable()
5444 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)) in pci_slot_resettable()
5447 list_for_each_entry(dev, &slot->bus->devices, bus_list) { in pci_slot_resettable()
5448 if (!dev->slot || dev->slot != slot) in pci_slot_resettable()
5450 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || in pci_slot_resettable()
5451 (dev->subordinate && !pci_bus_resettable(dev->subordinate))) in pci_slot_resettable()
5463 list_for_each_entry(dev, &slot->bus->devices, bus_list) { in pci_slot_lock()
5464 if (!dev->slot || dev->slot != slot) in pci_slot_lock()
5466 if (dev->subordinate) in pci_slot_lock()
5467 pci_bus_lock(dev->subordinate); in pci_slot_lock()
5478 list_for_each_entry(dev, &slot->bus->devices, bus_list) { in pci_slot_unlock()
5479 if (!dev->slot || dev->slot != slot) in pci_slot_unlock()
5481 if (dev->subordinate) in pci_slot_unlock()
5482 pci_bus_unlock(dev->subordinate); in pci_slot_unlock()
5493 list_for_each_entry(dev, &slot->bus->devices, bus_list) { in pci_slot_trylock()
5494 if (!dev->slot || dev->slot != slot) in pci_slot_trylock()
5496 if (dev->subordinate) { in pci_slot_trylock()
5497 if (!pci_bus_trylock(dev->subordinate)) { in pci_slot_trylock()
5508 &slot->bus->devices, bus_list) { in pci_slot_trylock()
5509 if (!dev->slot || dev->slot != slot) in pci_slot_trylock()
5511 if (dev->subordinate) in pci_slot_trylock()
5512 pci_bus_unlock(dev->subordinate); in pci_slot_trylock()
5527 list_for_each_entry(dev, &bus->devices, bus_list) { in pci_bus_save_and_disable_locked()
5529 if (dev->subordinate) in pci_bus_save_and_disable_locked()
5530 pci_bus_save_and_disable_locked(dev->subordinate); in pci_bus_save_and_disable_locked()
5543 list_for_each_entry(dev, &bus->devices, bus_list) { in pci_bus_restore_locked()
5545 if (dev->subordinate) { in pci_bus_restore_locked()
5547 pci_bus_restore_locked(dev->subordinate); in pci_bus_restore_locked()
5560 list_for_each_entry(dev, &slot->bus->devices, bus_list) { in pci_slot_save_and_disable_locked()
5561 if (!dev->slot || dev->slot != slot) in pci_slot_save_and_disable_locked()
5564 if (dev->subordinate) in pci_slot_save_and_disable_locked()
5565 pci_bus_save_and_disable_locked(dev->subordinate); in pci_slot_save_and_disable_locked()
5578 list_for_each_entry(dev, &slot->bus->devices, bus_list) { in pci_slot_restore_locked()
5579 if (!dev->slot || dev->slot != slot) in pci_slot_restore_locked()
5582 if (dev->subordinate) { in pci_slot_restore_locked()
5584 pci_bus_restore_locked(dev->subordinate); in pci_slot_restore_locked()
5594 return -ENOTTY; in pci_slot_reset()
5601 rc = pci_reset_hotplug_slot(slot->hotplug, probe); in pci_slot_reset()
5610 * pci_probe_reset_slot - probe whether a PCI slot can be reset
5622 * __pci_reset_slot - Try to reset a PCI slot
5634 * Same as above except return -EAGAIN if the slot cannot be locked
5647 rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET); in __pci_reset_slot()
5651 rc = -EAGAIN; in __pci_reset_slot()
5660 if (!bus->self || !pci_bus_resettable(bus)) in pci_bus_reset()
5661 return -ENOTTY; in pci_bus_reset()
5670 ret = pci_bridge_secondary_bus_reset(bus->self); in pci_bus_reset()
5678 * pci_bus_error_reset - reset the bridge's subordinate bus
5679 * @bridge: The parent device that connects to the bus to reset
5685 int pci_bus_error_reset(struct pci_dev *bridge) in pci_bus_error_reset() argument
5687 struct pci_bus *bus = bridge->subordinate; in pci_bus_error_reset()
5691 return -ENOTTY; in pci_bus_error_reset()
5694 if (list_empty(&bus->slots)) in pci_bus_error_reset()
5697 list_for_each_entry(slot, &bus->slots, list) in pci_bus_error_reset()
5701 list_for_each_entry(slot, &bus->slots, list) in pci_bus_error_reset()
5709 return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET); in pci_bus_error_reset()
5713 * pci_probe_reset_bus - probe whether a PCI bus can be reset
5725 * __pci_reset_bus - Try to reset a PCI bus
5728 * Same as above except return -EAGAIN if the bus cannot be locked
5741 rc = pci_bridge_secondary_bus_reset(bus->self); in __pci_reset_bus()
5745 rc = -EAGAIN; in __pci_reset_bus()
5751 * pci_reset_bus - Try to reset a PCI bus
5754 * Same as above except return -EAGAIN if the bus cannot be locked
5758 return (!pci_probe_reset_slot(pdev->slot)) ? in pci_reset_bus()
5759 __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus); in pci_reset_bus()
5764 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5777 return -EINVAL; in pcix_get_max_mmrbc()
5780 return -EINVAL; in pcix_get_max_mmrbc()
5787 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5800 return -EINVAL; in pcix_get_mmrbc()
5803 return -EINVAL; in pcix_get_mmrbc()
5810 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5825 return -EINVAL; in pcix_set_mmrbc()
5827 v = ffs(mmrbc) - 10; in pcix_set_mmrbc()
5831 return -EINVAL; in pcix_set_mmrbc()
5834 return -EINVAL; in pcix_set_mmrbc()
5837 return -E2BIG; in pcix_set_mmrbc()
5840 return -EINVAL; in pcix_set_mmrbc()
5844 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC)) in pcix_set_mmrbc()
5845 return -EIO; in pcix_set_mmrbc()
5850 return -EIO; in pcix_set_mmrbc()
5857 * pcie_get_readrq - get PCI Express read request size
5873 * pcie_set_readrq - set PCI Express maximum memory read request
5885 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); in pcie_set_readrq() local
5888 return -EINVAL; in pcie_set_readrq()
5892 * size to the max packet size to keep the host bridge from in pcie_set_readrq()
5904 return -EINVAL; in pcie_set_readrq()
5905 v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, firstbit - 8); in pcie_set_readrq()
5907 if (bridge->no_inc_mrrs) { in pcie_set_readrq()
5912 return -EINVAL; in pcie_set_readrq()
5924 * pcie_get_mps - get PCI Express maximum payload size
5940 * pcie_set_mps - set PCI Express maximum payload size
5953 return -EINVAL; in pcie_set_mps()
5955 v = ffs(mps) - 8; in pcie_set_mps()
5956 if (v > dev->pcie_mpss) in pcie_set_mps()
5957 return -EINVAL; in pcie_set_mps()
5986 * pcie_bandwidth_available - determine minimum link settings of a PCIe
6043 * pcie_get_supported_speeds - query Supported Link Speed Vector
6079 /* PCIe r3.0-compliant */ in pcie_get_supported_speeds()
6093 * pcie_get_speed_cap - query for the PCI device's link speed capability
6102 return PCIE_LNKCAP2_SLS2SPEED(dev->supported_speeds); in pcie_get_speed_cap()
6107 * pcie_get_width_cap - query for the PCI device's link width capability
6126 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
6149 * __pcie_print_link_status - Report the PCI device's link speed and width
6169 if (dev->bus && dev->bus->flit_mode) in __pcie_print_link_status()
6186 * pcie_print_link_status - Report the PCI device's link speed and width
6198 * pci_select_bars - Make BAR mask from the type of resource
6214 /* Some architectures require additional programming to enable VGA */
6232 * pci_set_vga_state - set VGA decode state on device and parents if requested
6234 * @decode: true = enable decoding, false = disable decoding
6243 struct pci_dev *bridge; in pci_set_vga_state() local
6266 bus = dev->bus; in pci_set_vga_state()
6268 bridge = bus->self; in pci_set_vga_state()
6269 if (bridge) { in pci_set_vga_state()
6270 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, in pci_set_vga_state()
6276 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, in pci_set_vga_state()
6279 bus = bus->parent; in pci_set_vga_state()
6292 adev = ACPI_COMPANION(&pdev->dev); in pci_pr3_present()
6296 return adev->power.flags.power_resources && in pci_pr3_present()
6297 acpi_has_method(adev->handle, "_PR3"); in pci_pr3_present()
6303 * pci_add_dma_alias - Add a DMA devfn alias for a device
6308 * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6309 * which is used to program permissible bus-devfn source addresses for DMA
6312 * from their logical bus-devfn. Examples include device quirks where the
6313 * device simply uses the wrong devfn, as well as non-transparent bridges
6327 nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from); in pci_add_dma_alias()
6328 devfn_to = devfn_from + nr_devfns - 1; in pci_add_dma_alias()
6330 if (!dev->dma_alias_mask) in pci_add_dma_alias()
6331 dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL); in pci_add_dma_alias()
6332 if (!dev->dma_alias_mask) { in pci_add_dma_alias()
6337 bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns); in pci_add_dma_alias()
6350 return (dev1->dma_alias_mask && in pci_devs_are_dma_aliases()
6351 test_bit(dev2->devfn, dev1->dma_alias_mask)) || in pci_devs_are_dma_aliases()
6352 (dev2->dma_alias_mask && in pci_devs_are_dma_aliases()
6353 test_bit(dev1->devfn, dev2->dma_alias_mask)) || in pci_devs_are_dma_aliases()
6366 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); in pci_device_is_present()
6372 struct pci_dev *bridge = dev->bus->self; in pci_ignore_hotplug() local
6374 dev->ignore_hotplug = 1; in pci_ignore_hotplug()
6375 /* Propagate the "ignore hotplug" setting to the parent bridge. */ in pci_ignore_hotplug()
6376 if (bridge) in pci_ignore_hotplug()
6377 bridge->ignore_hotplug = 1; in pci_ignore_hotplug()
6382 * pci_real_dma_dev - Get PCI DMA device for PCI device
6385 * Permits the platform to provide architecture-specific functionality to
6402 * Arches that don't want to expose struct resource to userland as-is in
6409 *start = rsrc->start; in pci_resource_to_user()
6410 *end = rsrc->end; in pci_resource_to_user()
6417 * pci_specified_resource_alignment - get resource alignment specified by user.
6481 struct resource *r = &dev->resource[bar]; in pci_request_resource_alignment()
6485 if (!(r->flags & IORESOURCE_MEM)) in pci_request_resource_alignment()
6488 if (r->flags & IORESOURCE_PCI_FIXED) { in pci_request_resource_alignment()
6515 * set r->start to the desired alignment. By itself this in pci_request_resource_alignment()
6530 r->start = 0; in pci_request_resource_alignment()
6531 r->end = align - 1; in pci_request_resource_alignment()
6533 r->flags &= ~IORESOURCE_SIZEALIGN; in pci_request_resource_alignment()
6534 r->flags |= IORESOURCE_STARTALIGN; in pci_request_resource_alignment()
6537 r->flags |= IORESOURCE_UNSET; in pci_request_resource_alignment()
6544 * Later on, the kernel will assign page-aligned memory resource back
6556 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec in pci_reassigndev_resource_alignment()
6558 * described by the VF BARx register in the PF's SR-IOV capability. in pci_reassigndev_resource_alignment()
6561 if (dev->is_virtfn) in pci_reassigndev_resource_alignment()
6569 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL && in pci_reassigndev_resource_alignment()
6570 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) { in pci_reassigndev_resource_alignment()
6571 pci_warn(dev, "Can't reassign resources to host bridge\n"); in pci_reassigndev_resource_alignment()
6583 * Need to disable bridge's resource window, in pci_reassigndev_resource_alignment()
6584 * to enable the kernel to reassign new resource in pci_reassigndev_resource_alignment()
6587 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { in pci_reassigndev_resource_alignment()
6589 r = &dev->resource[i]; in pci_reassigndev_resource_alignment()
6590 if (!(r->flags & IORESOURCE_MEM)) in pci_reassigndev_resource_alignment()
6592 r->flags |= IORESOURCE_UNSET; in pci_reassigndev_resource_alignment()
6593 r->end = resource_size(r) - 1; in pci_reassigndev_resource_alignment()
6594 r->start = 0; in pci_reassigndev_resource_alignment()
6617 if (count >= (PAGE_SIZE - 1)) in resource_alignment_store()
6618 return -EINVAL; in resource_alignment_store()
6622 return -ENOMEM; in resource_alignment_store()
6698 domain_nr = of_get_pci_domain_nr(parent->of_node); in of_pci_bus_find_domain_nr()
6720 if (of_get_pci_domain_nr(parent->of_node) == domain_nr) in of_pci_bus_release_domain_nr()
6741 * pci_ext_cfg_avail - can we access extended PCI config space?