1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * pci.h 4 * 5 * PCI defines and function prototypes 6 * Copyright 1994, Drew Eckhardt 7 * Copyright 1997--1999 Martin Mares <mj@ucw.cz> 8 * 9 * PCI Express ASPM defines and function prototypes 10 * Copyright (c) 2007 Intel Corp. 11 * Zhang Yanmin (yanmin.zhang@intel.com) 12 * Shaohua Li (shaohua.li@intel.com) 13 * 14 * For more information, please consult the following manuals (look at 15 * http://www.pcisig.com/ for how to get them): 16 * 17 * PCI BIOS Specification 18 * PCI Local Bus Specification 19 * PCI to PCI Bridge Specification 20 * PCI Express Specification 21 * PCI System Design Guide 22 */ 23 #ifndef LINUX_PCI_H 24 #define LINUX_PCI_H 25 26 #include <linux/args.h> 27 #include <linux/mod_devicetable.h> 28 29 #include <linux/types.h> 30 #include <linux/init.h> 31 #include <linux/ioport.h> 32 #include <linux/list.h> 33 #include <linux/compiler.h> 34 #include <linux/errno.h> 35 #include <linux/kobject.h> 36 #include <linux/atomic.h> 37 #include <linux/device.h> 38 #include <linux/interrupt.h> 39 #include <linux/io.h> 40 #include <linux/resource_ext.h> 41 #include <linux/msi_api.h> 42 #include <uapi/linux/pci.h> 43 44 #include <linux/pci_ids.h> 45 46 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ 47 PCI_STATUS_SIG_SYSTEM_ERROR | \ 48 PCI_STATUS_REC_MASTER_ABORT | \ 49 PCI_STATUS_REC_TARGET_ABORT | \ 50 PCI_STATUS_SIG_TARGET_ABORT | \ 51 PCI_STATUS_PARITY) 52 53 /* Number of reset methods used in pci_reset_fn_methods array in pci.c */ 54 #define PCI_NUM_RESET_METHODS 8 55 56 #define PCI_RESET_PROBE true 57 #define PCI_RESET_DO_RESET false 58 59 /* 60 * The PCI interface treats multi-function devices as independent 61 * devices. The slot/function address of each device is encoded 62 * in a single byte as follows: 63 * 64 * 7:3 = slot 65 * 2:0 = function 66 * 67 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h. 68 * In the interest of not exposing interfaces to user-space unnecessarily, 69 * the following kernel-only defines are being added here. 70 */ 71 #define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) 72 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ 73 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) 74 75 /* pci_slot represents a physical slot */ 76 struct pci_slot { 77 struct pci_bus *bus; /* Bus this slot is on */ 78 struct list_head list; /* Node in list of slots */ 79 struct hotplug_slot *hotplug; /* Hotplug info (move here) */ 80 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ 81 struct kobject kobj; 82 }; 83 84 static inline const char *pci_slot_name(const struct pci_slot *slot) 85 { 86 return kobject_name(&slot->kobj); 87 } 88 89 /* File state for mmap()s on /proc/bus/pci/X/Y */ 90 enum pci_mmap_state { 91 pci_mmap_io, 92 pci_mmap_mem 93 }; 94 95 /* For PCI devices, the region numbers are assigned this way: */ 96 enum { 97 /* #0-5: standard PCI resources */ 98 PCI_STD_RESOURCES, 99 PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1, 100 101 /* #6: expansion ROM resource */ 102 PCI_ROM_RESOURCE, 103 104 /* Device-specific resources */ 105 #ifdef CONFIG_PCI_IOV 106 PCI_IOV_RESOURCES, 107 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1, 108 #endif 109 110 /* PCI-to-PCI (P2P) bridge windows */ 111 #define PCI_BRIDGE_IO_WINDOW (PCI_BRIDGE_RESOURCES + 0) 112 #define PCI_BRIDGE_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 1) 113 #define PCI_BRIDGE_PREF_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 2) 114 115 /* CardBus bridge windows */ 116 #define PCI_CB_BRIDGE_IO_0_WINDOW (PCI_BRIDGE_RESOURCES + 0) 117 #define PCI_CB_BRIDGE_IO_1_WINDOW (PCI_BRIDGE_RESOURCES + 1) 118 #define PCI_CB_BRIDGE_MEM_0_WINDOW (PCI_BRIDGE_RESOURCES + 2) 119 #define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3) 120 121 /* Total number of bridge resources for P2P and CardBus */ 122 #define PCI_P2P_BRIDGE_RESOURCE_NUM 3 123 #define PCI_BRIDGE_RESOURCE_NUM 4 124 125 /* Resources assigned to buses behind the bridge */ 126 PCI_BRIDGE_RESOURCES, 127 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES + 128 PCI_BRIDGE_RESOURCE_NUM - 1, 129 130 /* Total resources associated with a PCI device */ 131 PCI_NUM_RESOURCES, 132 133 /* Preserve this for compatibility */ 134 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES, 135 }; 136 137 /** 138 * enum pci_interrupt_pin - PCI INTx interrupt values 139 * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt 140 * @PCI_INTERRUPT_INTA: PCI INTA pin 141 * @PCI_INTERRUPT_INTB: PCI INTB pin 142 * @PCI_INTERRUPT_INTC: PCI INTC pin 143 * @PCI_INTERRUPT_INTD: PCI INTD pin 144 * 145 * Corresponds to values for legacy PCI INTx interrupts, as can be found in the 146 * PCI_INTERRUPT_PIN register. 147 */ 148 enum pci_interrupt_pin { 149 PCI_INTERRUPT_UNKNOWN, 150 PCI_INTERRUPT_INTA, 151 PCI_INTERRUPT_INTB, 152 PCI_INTERRUPT_INTC, 153 PCI_INTERRUPT_INTD, 154 }; 155 156 /* The number of legacy PCI INTx interrupts */ 157 #define PCI_NUM_INTX 4 158 159 /* 160 * Reading from a device that doesn't respond typically returns ~0. A 161 * successful read from a device may also return ~0, so you need additional 162 * information to reliably identify errors. 163 */ 164 #define PCI_ERROR_RESPONSE (~0ULL) 165 #define PCI_SET_ERROR_RESPONSE(val) (*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE)) 166 #define PCI_POSSIBLE_ERROR(val) ((val) == ((typeof(val)) PCI_ERROR_RESPONSE)) 167 168 /* 169 * pci_power_t values must match the bits in the Capabilities PME_Support 170 * and Control/Status PowerState fields in the Power Management capability. 171 */ 172 typedef int __bitwise pci_power_t; 173 174 #define PCI_D0 ((pci_power_t __force) 0) 175 #define PCI_D1 ((pci_power_t __force) 1) 176 #define PCI_D2 ((pci_power_t __force) 2) 177 #define PCI_D3hot ((pci_power_t __force) 3) 178 #define PCI_D3cold ((pci_power_t __force) 4) 179 #define PCI_UNKNOWN ((pci_power_t __force) 5) 180 #define PCI_POWER_ERROR ((pci_power_t __force) -1) 181 182 /* Remember to update this when the list above changes! */ 183 extern const char *pci_power_names[]; 184 185 static inline const char *pci_power_name(pci_power_t state) 186 { 187 return pci_power_names[1 + (__force int) state]; 188 } 189 190 /** 191 * typedef pci_channel_state_t 192 * 193 * The pci_channel state describes connectivity between the CPU and 194 * the PCI device. If some PCI bus between here and the PCI device 195 * has crashed or locked up, this info is reflected here. 196 */ 197 typedef unsigned int __bitwise pci_channel_state_t; 198 199 enum { 200 /* I/O channel is in normal state */ 201 pci_channel_io_normal = (__force pci_channel_state_t) 1, 202 203 /* I/O to channel is blocked */ 204 pci_channel_io_frozen = (__force pci_channel_state_t) 2, 205 206 /* PCI card is dead */ 207 pci_channel_io_perm_failure = (__force pci_channel_state_t) 3, 208 }; 209 210 typedef unsigned int __bitwise pcie_reset_state_t; 211 212 enum pcie_reset_state { 213 /* Reset is NOT asserted (Use to deassert reset) */ 214 pcie_deassert_reset = (__force pcie_reset_state_t) 1, 215 216 /* Use #PERST to reset PCIe device */ 217 pcie_warm_reset = (__force pcie_reset_state_t) 2, 218 219 /* Use PCIe Hot Reset to reset device */ 220 pcie_hot_reset = (__force pcie_reset_state_t) 3 221 }; 222 223 typedef unsigned short __bitwise pci_dev_flags_t; 224 enum pci_dev_flags { 225 /* INTX_DISABLE in PCI_COMMAND register disables MSI too */ 226 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0), 227 /* Device configuration is irrevocably lost if disabled into D3 */ 228 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1), 229 /* Provide indication device is assigned by a Virtual Machine Manager */ 230 PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2), 231 /* Flag for quirk use to store if quirk-specific ACS is enabled */ 232 PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3), 233 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */ 234 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5), 235 /* Do not use bus resets for device */ 236 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6), 237 /* Do not use PM reset even if device advertises NoSoftRst- */ 238 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), 239 /* Get VPD from function 0 VPD */ 240 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8), 241 /* A non-root bridge where translation occurs, stop alias search here */ 242 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), 243 /* Do not use FLR even if device advertises PCI_AF_CAP */ 244 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), 245 /* Don't use Relaxed Ordering for TLPs directed at this device */ 246 PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11), 247 /* Device does honor MSI masking despite saying otherwise */ 248 PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12), 249 /* Device requires write to PCI_MSIX_ENTRY_DATA before any MSIX reads */ 250 PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST = (__force pci_dev_flags_t) (1 << 13), 251 }; 252 253 enum pci_irq_reroute_variant { 254 INTEL_IRQ_REROUTE_VARIANT = 1, 255 MAX_IRQ_REROUTE_VARIANTS = 3 256 }; 257 258 typedef unsigned short __bitwise pci_bus_flags_t; 259 enum pci_bus_flags { 260 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, 261 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2, 262 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4, 263 PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8, 264 }; 265 266 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */ 267 enum pcie_link_width { 268 PCIE_LNK_WIDTH_RESRV = 0x00, 269 PCIE_LNK_X1 = 0x01, 270 PCIE_LNK_X2 = 0x02, 271 PCIE_LNK_X4 = 0x04, 272 PCIE_LNK_X8 = 0x08, 273 PCIE_LNK_X12 = 0x0c, 274 PCIE_LNK_X16 = 0x10, 275 PCIE_LNK_X32 = 0x20, 276 PCIE_LNK_WIDTH_UNKNOWN = 0xff, 277 }; 278 279 /* See matching string table in pci_speed_string() */ 280 enum pci_bus_speed { 281 PCI_SPEED_33MHz = 0x00, 282 PCI_SPEED_66MHz = 0x01, 283 PCI_SPEED_66MHz_PCIX = 0x02, 284 PCI_SPEED_100MHz_PCIX = 0x03, 285 PCI_SPEED_133MHz_PCIX = 0x04, 286 PCI_SPEED_66MHz_PCIX_ECC = 0x05, 287 PCI_SPEED_100MHz_PCIX_ECC = 0x06, 288 PCI_SPEED_133MHz_PCIX_ECC = 0x07, 289 PCI_SPEED_66MHz_PCIX_266 = 0x09, 290 PCI_SPEED_100MHz_PCIX_266 = 0x0a, 291 PCI_SPEED_133MHz_PCIX_266 = 0x0b, 292 AGP_UNKNOWN = 0x0c, 293 AGP_1X = 0x0d, 294 AGP_2X = 0x0e, 295 AGP_4X = 0x0f, 296 AGP_8X = 0x10, 297 PCI_SPEED_66MHz_PCIX_533 = 0x11, 298 PCI_SPEED_100MHz_PCIX_533 = 0x12, 299 PCI_SPEED_133MHz_PCIX_533 = 0x13, 300 PCIE_SPEED_2_5GT = 0x14, 301 PCIE_SPEED_5_0GT = 0x15, 302 PCIE_SPEED_8_0GT = 0x16, 303 PCIE_SPEED_16_0GT = 0x17, 304 PCIE_SPEED_32_0GT = 0x18, 305 PCIE_SPEED_64_0GT = 0x19, 306 PCI_SPEED_UNKNOWN = 0xff, 307 }; 308 309 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev); 310 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev); 311 312 struct pci_vpd { 313 struct mutex lock; 314 unsigned int len; 315 u8 cap; 316 }; 317 318 struct irq_affinity; 319 struct pcie_bwctrl_data; 320 struct pcie_link_state; 321 struct pci_sriov; 322 struct pci_p2pdma; 323 struct rcec_ea; 324 325 /* struct pci_dev - describes a PCI device 326 * 327 * @supported_speeds: PCIe Supported Link Speeds Vector (+ reserved 0 at 328 * LSB). 0 when the supported speeds cannot be 329 * determined (e.g., for Root Complex Integrated 330 * Endpoints without the relevant Capability 331 * Registers). 332 * @is_hotplug_bridge: Hotplug bridge of any kind (e.g. PCIe Hot-Plug Capable, 333 * Conventional PCI Hot-Plug, ACPI slot). 334 * Such bridges are allocated additional MMIO and bus 335 * number resources to allow for hierarchy expansion. 336 * @is_pciehp: PCIe Hot-Plug Capable bridge. 337 */ 338 struct pci_dev { 339 struct list_head bus_list; /* Node in per-bus list */ 340 struct pci_bus *bus; /* Bus this device is on */ 341 struct pci_bus *subordinate; /* Bus this device bridges to */ 342 343 void *sysdata; /* Hook for sys-specific extension */ 344 struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */ 345 struct pci_slot *slot; /* Physical slot this device is in */ 346 347 unsigned int devfn; /* Encoded device & function index */ 348 unsigned short vendor; 349 unsigned short device; 350 unsigned short subsystem_vendor; 351 unsigned short subsystem_device; 352 unsigned int class; /* 3 bytes: (base,sub,prog-if) */ 353 u8 revision; /* PCI revision, low byte of class word */ 354 u8 hdr_type; /* PCI header type (`multi' flag masked out) */ 355 #ifdef CONFIG_PCIEAER 356 u16 aer_cap; /* AER capability offset */ 357 struct aer_info *aer_info; /* AER info for this device */ 358 #endif 359 #ifdef CONFIG_PCIEPORTBUS 360 struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */ 361 struct pci_dev *rcec; /* Associated RCEC device */ 362 #endif 363 u32 devcap; /* PCIe Device Capabilities */ 364 u16 rebar_cap; /* Resizable BAR capability offset */ 365 u8 pcie_cap; /* PCIe capability offset */ 366 u8 msi_cap; /* MSI capability offset */ 367 u8 msix_cap; /* MSI-X capability offset */ 368 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */ 369 u8 rom_base_reg; /* Config register controlling ROM */ 370 u8 pin; /* Interrupt pin this device uses */ 371 u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */ 372 unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */ 373 374 struct pci_driver *driver; /* Driver bound to this device */ 375 u64 dma_mask; /* Mask of the bits of bus address this 376 device implements. Normally this is 377 0xffffffff. You only need to change 378 this if your device has broken DMA 379 or supports 64-bit transfers. */ 380 u64 msi_addr_mask; /* Mask of the bits of bus address for 381 MSI that this device implements. 382 Normally set based on device 383 capabilities. You only need to 384 change this if your device claims 385 to support 64-bit MSI but implements 386 fewer than 64 address bits. */ 387 388 struct device_dma_parameters dma_parms; 389 390 pci_power_t current_state; /* Current operating state. In ACPI, 391 this is D0-D3, D0 being fully 392 functional, and D3 being off. */ 393 u8 pm_cap; /* PM capability offset */ 394 unsigned int pme_support:5; /* Bitmask of states from which PME# 395 can be generated */ 396 unsigned int pme_poll:1; /* Poll device's PME status bit */ 397 unsigned int pinned:1; /* Whether this dev is pinned */ 398 unsigned int config_rrs_sv:1; /* Config RRS software visibility */ 399 unsigned int imm_ready:1; /* Supports Immediate Readiness */ 400 unsigned int d1_support:1; /* Low power state D1 is supported */ 401 unsigned int d2_support:1; /* Low power state D2 is supported */ 402 unsigned int no_d1d2:1; /* D1 and D2 are forbidden */ 403 unsigned int no_d3cold:1; /* D3cold is forbidden */ 404 unsigned int bridge_d3:1; /* Allow D3 for bridge */ 405 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */ 406 unsigned int mmio_always_on:1; /* Disallow turning off io/mem 407 decoding during BAR sizing */ 408 unsigned int wakeup_prepared:1; 409 unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */ 410 unsigned int ignore_hotplug:1; /* Ignore hotplug events */ 411 unsigned int hotplug_user_indicators:1; /* SlotCtl indicators 412 controlled exclusively by 413 user sysfs */ 414 unsigned int clear_retrain_link:1; /* Need to clear Retrain Link 415 bit manually */ 416 unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */ 417 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */ 418 419 u16 l1ss; /* L1SS Capability pointer */ 420 #ifdef CONFIG_PCIEASPM 421 struct pcie_link_state *link_state; /* ASPM link state */ 422 unsigned int aspm_l0s_support:1; /* ASPM L0s support */ 423 unsigned int aspm_l1_support:1; /* ASPM L1 support */ 424 unsigned int ltr_path:1; /* Latency Tolerance Reporting 425 supported from root to here */ 426 #endif 427 unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */ 428 unsigned int eetlp_prefix_max:3; /* Max # of End-End TLP Prefixes, 0=not supported */ 429 430 pci_channel_state_t error_state; /* Current connectivity state */ 431 struct device dev; /* Generic device interface */ 432 433 int cfg_size; /* Size of config space */ 434 435 /* 436 * Instead of touching interrupt line and base address registers 437 * directly, use the values stored here. They might be different! 438 */ 439 unsigned int irq; 440 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ 441 struct resource driver_exclusive_resource; /* driver exclusive resource ranges */ 442 443 unsigned int transparent:1; /* Subtractive decode bridge */ 444 unsigned int io_window:1; /* Bridge has I/O window */ 445 unsigned int pref_window:1; /* Bridge has pref mem window */ 446 unsigned int pref_64_window:1; /* Pref mem window is 64-bit */ 447 unsigned int multifunction:1; /* Multi-function device */ 448 449 unsigned int is_busmaster:1; /* Is busmaster */ 450 unsigned int no_msi:1; /* May not use MSI */ 451 unsigned int block_cfg_access:1; /* Config space access blocked */ 452 unsigned int broken_parity_status:1; /* Generates false positive parity */ 453 unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */ 454 unsigned int msi_enabled:1; 455 unsigned int msix_enabled:1; 456 unsigned int ari_enabled:1; /* ARI forwarding */ 457 unsigned int ats_enabled:1; /* Address Translation Svc */ 458 unsigned int pasid_enabled:1; /* Process Address Space ID */ 459 unsigned int pri_enabled:1; /* Page Request Interface */ 460 unsigned int tph_enabled:1; /* TLP Processing Hints */ 461 unsigned int fm_enabled:1; /* Flit Mode (segment captured) */ 462 unsigned int is_managed:1; /* Managed via devres */ 463 unsigned int is_msi_managed:1; /* MSI release via devres installed */ 464 unsigned int needs_freset:1; /* Requires fundamental reset */ 465 unsigned int state_saved:1; 466 unsigned int is_physfn:1; 467 unsigned int is_virtfn:1; 468 unsigned int is_hotplug_bridge:1; 469 unsigned int is_pciehp:1; 470 unsigned int shpc_managed:1; /* SHPC owned by shpchp */ 471 unsigned int is_thunderbolt:1; /* Thunderbolt controller */ 472 /* 473 * Devices marked being untrusted are the ones that can potentially 474 * execute DMA attacks and similar. They are typically connected 475 * through external ports such as Thunderbolt but not limited to 476 * that. When an IOMMU is enabled they should be getting full 477 * mappings to make sure they cannot access arbitrary memory. 478 */ 479 unsigned int untrusted:1; 480 /* 481 * Info from the platform, e.g., ACPI or device tree, may mark a 482 * device as "external-facing". An external-facing device is 483 * itself internal but devices downstream from it are external. 484 */ 485 unsigned int external_facing:1; 486 unsigned int broken_intx_masking:1; /* INTx masking can't be used */ 487 unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */ 488 unsigned int irq_managed:1; 489 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */ 490 unsigned int is_probed:1; /* Device probing in progress */ 491 unsigned int link_active_reporting:1;/* Device capable of reporting link active */ 492 unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */ 493 unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */ 494 unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */ 495 unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */ 496 unsigned int non_mappable_bars:1; /* BARs can't be mapped to user-space */ 497 pci_dev_flags_t dev_flags; 498 atomic_t enable_cnt; /* pci_enable_device has been called */ 499 500 spinlock_t pcie_cap_lock; /* Protects RMW ops in capability accessors */ 501 u32 saved_config_space[16]; /* Config space saved at suspend time */ 502 struct hlist_head saved_cap_space; 503 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ 504 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ 505 506 #ifdef CONFIG_HOTPLUG_PCI_PCIE 507 unsigned int broken_cmd_compl:1; /* No compl for some cmds */ 508 #endif 509 #ifdef CONFIG_PCIE_PTM 510 u16 ptm_cap; /* PTM Capability */ 511 unsigned int ptm_root:1; 512 unsigned int ptm_responder:1; 513 unsigned int ptm_requester:1; 514 unsigned int ptm_enabled:1; 515 u8 ptm_granularity; 516 #endif 517 #ifdef CONFIG_PCI_MSI 518 void __iomem *msix_base; 519 raw_spinlock_t msi_lock; 520 #endif 521 struct pci_vpd vpd; 522 #ifdef CONFIG_PCIE_DPC 523 u16 dpc_cap; 524 unsigned int dpc_rp_extensions:1; 525 u8 dpc_rp_log_size; 526 #endif 527 struct pcie_bwctrl_data *link_bwctrl; 528 #ifdef CONFIG_PCI_ATS 529 union { 530 struct pci_sriov *sriov; /* PF: SR-IOV info */ 531 struct pci_dev *physfn; /* VF: related PF */ 532 }; 533 u16 ats_cap; /* ATS Capability offset */ 534 u8 ats_stu; /* ATS Smallest Translation Unit */ 535 #endif 536 #ifdef CONFIG_PCI_PRI 537 u16 pri_cap; /* PRI Capability offset */ 538 u32 pri_reqs_alloc; /* Number of PRI requests allocated */ 539 unsigned int pasid_required:1; /* PRG Response PASID Required */ 540 #endif 541 #ifdef CONFIG_PCI_PASID 542 u16 pasid_cap; /* PASID Capability offset */ 543 u16 pasid_features; 544 #endif 545 #ifdef CONFIG_PCI_P2PDMA 546 struct pci_p2pdma __rcu *p2pdma; 547 #endif 548 #ifdef CONFIG_PCI_DOE 549 struct xarray doe_mbs; /* Data Object Exchange mailboxes */ 550 #endif 551 #ifdef CONFIG_PCI_NPEM 552 struct npem *npem; /* Native PCIe Enclosure Management */ 553 #endif 554 #ifdef CONFIG_PCI_IDE 555 u16 ide_cap; /* Link Integrity & Data Encryption */ 556 u8 nr_ide_mem; /* Address association resources for streams */ 557 u8 nr_link_ide; /* Link Stream count (Selective Stream offset) */ 558 u16 nr_sel_ide; /* Selective Stream count (register block allocator) */ 559 struct ida ide_stream_ida; 560 unsigned int ide_cfg:1; /* Config cycles over IDE */ 561 unsigned int ide_tee_limit:1; /* Disallow T=0 traffic over IDE */ 562 #endif 563 #ifdef CONFIG_PCI_TSM 564 struct pci_tsm *tsm; /* TSM operation state */ 565 #endif 566 u16 acs_cap; /* ACS Capability offset */ 567 u8 supported_speeds; /* Supported Link Speeds Vector */ 568 phys_addr_t rom; /* Physical address if not from BAR */ 569 size_t romlen; /* Length if not from BAR */ 570 /* 571 * Driver name to force a match. Do not set directly, because core 572 * frees it. Use driver_set_override() to set or clear it. 573 */ 574 const char *driver_override; 575 576 unsigned long priv_flags; /* Private flags for the PCI driver */ 577 578 /* These methods index pci_reset_fn_methods[] */ 579 u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */ 580 581 #ifdef CONFIG_PCIE_TPH 582 u16 tph_cap; /* TPH capability offset */ 583 u8 tph_mode; /* TPH mode */ 584 u8 tph_req_type; /* TPH requester type */ 585 #endif 586 }; 587 588 static inline struct pci_dev *pci_physfn(struct pci_dev *dev) 589 { 590 #ifdef CONFIG_PCI_IOV 591 if (dev->is_virtfn) 592 dev = dev->physfn; 593 #endif 594 return dev; 595 } 596 597 struct pci_dev *pci_alloc_dev(struct pci_bus *bus); 598 599 #define to_pci_dev(n) container_of(n, struct pci_dev, dev) 600 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) 601 #define for_each_pci_dev_reverse(d) \ 602 while ((d = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) 603 604 static inline int pci_channel_offline(struct pci_dev *pdev) 605 { 606 return (pdev->error_state != pci_channel_io_normal); 607 } 608 609 /* 610 * Currently in ACPI spec, for each PCI host bridge, PCI Segment 611 * Group number is limited to a 16-bit value, therefore (int)-1 is 612 * not a valid PCI domain number, and can be used as a sentinel 613 * value indicating ->domain_nr is not set by the driver (and 614 * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with 615 * pci_bus_find_domain_nr()). 616 */ 617 #define PCI_DOMAIN_NR_NOT_SET (-1) 618 619 struct pci_host_bridge { 620 struct device dev; 621 struct pci_bus *bus; /* Root bus */ 622 struct pci_ops *ops; 623 struct pci_ops *child_ops; 624 void *sysdata; 625 int busnr; 626 int domain_nr; 627 struct list_head windows; /* resource_entry */ 628 struct list_head dma_ranges; /* dma ranges resource list */ 629 #ifdef CONFIG_PCI_IDE 630 u16 nr_ide_streams; /* Max streams possibly active in @ide_stream_ida */ 631 struct ida ide_stream_ida; 632 struct ida ide_stream_ids_ida; /* track unique ids per domain */ 633 #endif 634 u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */ 635 int (*map_irq)(const struct pci_dev *, u8, u8); 636 void (*release_fn)(struct pci_host_bridge *); 637 int (*enable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev); 638 void (*disable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev); 639 void *release_data; 640 unsigned int ignore_reset_delay:1; /* For entire hierarchy */ 641 unsigned int no_ext_tags:1; /* No Extended Tags */ 642 unsigned int no_inc_mrrs:1; /* No Increase MRRS */ 643 unsigned int native_aer:1; /* OS may use PCIe AER */ 644 unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */ 645 unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */ 646 unsigned int native_pme:1; /* OS may use PCIe PME */ 647 unsigned int native_ltr:1; /* OS may use PCIe LTR */ 648 unsigned int native_dpc:1; /* OS may use PCIe DPC */ 649 unsigned int native_cxl_error:1; /* OS may use CXL RAS/Events */ 650 unsigned int preserve_config:1; /* Preserve FW resource setup */ 651 unsigned int size_windows:1; /* Enable root bus sizing */ 652 unsigned int msi_domain:1; /* Bridge wants MSI domain */ 653 654 /* Resource alignment requirements */ 655 resource_size_t (*align_resource)(struct pci_dev *dev, 656 const struct resource *res, 657 resource_size_t start, 658 resource_size_t size, 659 resource_size_t align); 660 unsigned long private[] ____cacheline_aligned; 661 }; 662 663 #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) 664 665 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge) 666 { 667 return (void *)bridge->private; 668 } 669 670 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv) 671 { 672 return container_of(priv, struct pci_host_bridge, private); 673 } 674 675 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv); 676 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, 677 size_t priv); 678 void pci_free_host_bridge(struct pci_host_bridge *bridge); 679 struct device *pci_get_host_bridge_device(struct pci_dev *dev); 680 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); 681 682 void pci_set_host_bridge_release(struct pci_host_bridge *bridge, 683 void (*release_fn)(struct pci_host_bridge *), 684 void *release_data); 685 686 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge); 687 688 #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ 689 690 struct pci_bus { 691 struct list_head node; /* Node in list of buses */ 692 struct pci_bus *parent; /* Parent bus this bridge is on */ 693 struct list_head children; /* List of child buses */ 694 struct list_head devices; /* List of devices on this bus */ 695 struct pci_dev *self; /* Bridge device as seen by parent */ 696 struct list_head slots; /* List of slots on this bus; 697 protected by pci_slot_mutex */ 698 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM]; 699 struct list_head resources; /* Address space routed to this bus */ 700 struct resource busn_res; /* Bus numbers routed to this bus */ 701 702 struct pci_ops *ops; /* Configuration access functions */ 703 void *sysdata; /* Hook for sys-specific extension */ 704 struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */ 705 706 unsigned char number; /* Bus number */ 707 unsigned char primary; /* Number of primary bridge */ 708 unsigned char max_bus_speed; /* enum pci_bus_speed */ 709 unsigned char cur_bus_speed; /* enum pci_bus_speed */ 710 #ifdef CONFIG_PCI_DOMAINS_GENERIC 711 int domain_nr; 712 #endif 713 714 char name[48]; 715 716 unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */ 717 pci_bus_flags_t bus_flags; /* Inherited by child buses */ 718 struct device *bridge; 719 struct device dev; 720 struct bin_attribute *legacy_io; /* Legacy I/O for this bus */ 721 struct bin_attribute *legacy_mem; /* Legacy mem */ 722 unsigned int is_added:1; 723 unsigned int unsafe_warn:1; /* warned about RW1C config write */ 724 unsigned int flit_mode:1; /* Link in Flit mode */ 725 }; 726 727 #define to_pci_bus(n) container_of(n, struct pci_bus, dev) 728 729 static inline u16 pci_dev_id(struct pci_dev *dev) 730 { 731 return PCI_DEVID(dev->bus->number, dev->devfn); 732 } 733 734 /* 735 * Returns true if the PCI bus is root (behind host-PCI bridge), 736 * false otherwise 737 * 738 * Some code assumes that "bus->self == NULL" means that bus is a root bus. 739 * This is incorrect because "virtual" buses added for SR-IOV (via 740 * virtfn_add_bus()) have "bus->self == NULL" but are not root buses. 741 */ 742 static inline bool pci_is_root_bus(struct pci_bus *pbus) 743 { 744 return !(pbus->parent); 745 } 746 747 /** 748 * pci_is_bridge - check if the PCI device is a bridge 749 * @dev: PCI device 750 * 751 * Return true if the PCI device is bridge whether it has subordinate 752 * or not. 753 */ 754 static inline bool pci_is_bridge(struct pci_dev *dev) 755 { 756 return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 757 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; 758 } 759 760 /** 761 * pci_is_vga - check if the PCI device is a VGA device 762 * @pdev: PCI device 763 * 764 * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define 765 * VGA Base Class and Sub-Classes: 766 * 767 * 03 00 PCI_CLASS_DISPLAY_VGA VGA-compatible or 8514-compatible 768 * 00 01 PCI_CLASS_NOT_DEFINED_VGA VGA-compatible (before Class Code) 769 * 770 * Return true if the PCI device is a VGA device and uses the legacy VGA 771 * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and 772 * aliases). 773 */ 774 static inline bool pci_is_vga(struct pci_dev *pdev) 775 { 776 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) 777 return true; 778 779 if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA) 780 return true; 781 782 return false; 783 } 784 785 /** 786 * pci_is_display - check if the PCI device is a display controller 787 * @pdev: PCI device 788 * 789 * Determine whether the given PCI device corresponds to a display 790 * controller. Display controllers are typically used for graphical output 791 * and are identified based on their class code. 792 * 793 * Return: true if the PCI device is a display controller, false otherwise. 794 */ 795 static inline bool pci_is_display(struct pci_dev *pdev) 796 { 797 return (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY; 798 } 799 800 #define for_each_pci_bridge(dev, bus) \ 801 list_for_each_entry(dev, &bus->devices, bus_list) \ 802 if (!pci_is_bridge(dev)) {} else 803 804 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) 805 { 806 dev = pci_physfn(dev); 807 if (pci_is_root_bus(dev->bus)) 808 return NULL; 809 810 return dev->bus->self; 811 } 812 813 #ifdef CONFIG_PCI_MSI 814 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) 815 { 816 return pci_dev->msi_enabled || pci_dev->msix_enabled; 817 } 818 #else 819 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; } 820 #endif 821 822 /* Error values that may be returned by PCI functions */ 823 #define PCIBIOS_SUCCESSFUL 0x00 824 #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 825 #define PCIBIOS_BAD_VENDOR_ID 0x83 826 #define PCIBIOS_DEVICE_NOT_FOUND 0x86 827 #define PCIBIOS_BAD_REGISTER_NUMBER 0x87 828 #define PCIBIOS_SET_FAILED 0x88 829 #define PCIBIOS_BUFFER_TOO_SMALL 0x89 830 831 /* Translate above to generic errno for passing back through non-PCI code */ 832 static inline int pcibios_err_to_errno(int err) 833 { 834 if (err <= PCIBIOS_SUCCESSFUL) 835 return err; /* Assume already errno */ 836 837 switch (err) { 838 case PCIBIOS_FUNC_NOT_SUPPORTED: 839 return -ENOENT; 840 case PCIBIOS_BAD_VENDOR_ID: 841 return -ENOTTY; 842 case PCIBIOS_DEVICE_NOT_FOUND: 843 return -ENODEV; 844 case PCIBIOS_BAD_REGISTER_NUMBER: 845 return -EFAULT; 846 case PCIBIOS_SET_FAILED: 847 return -EIO; 848 case PCIBIOS_BUFFER_TOO_SMALL: 849 return -ENOSPC; 850 } 851 852 return -ERANGE; 853 } 854 855 /* Low-level architecture-dependent routines */ 856 857 struct pci_ops { 858 int (*add_bus)(struct pci_bus *bus); 859 void (*remove_bus)(struct pci_bus *bus); 860 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where); 861 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); 862 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); 863 int (*assert_perst)(struct pci_bus *bus, bool assert); 864 }; 865 866 /* 867 * ACPI needs to be able to access PCI config space before we've done a 868 * PCI bus scan and created pci_bus structures. 869 */ 870 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, 871 int reg, int len, u32 *val); 872 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, 873 int reg, int len, u32 val); 874 875 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 876 typedef u64 pci_bus_addr_t; 877 #else 878 typedef u32 pci_bus_addr_t; 879 #endif 880 881 struct pci_bus_region { 882 pci_bus_addr_t start; 883 pci_bus_addr_t end; 884 }; 885 886 static inline pci_bus_addr_t pci_bus_region_size(const struct pci_bus_region *region) 887 { 888 return region->end - region->start + 1; 889 } 890 891 struct pci_dynids { 892 spinlock_t lock; /* Protects list, index */ 893 struct list_head list; /* For IDs added at runtime */ 894 }; 895 896 897 /* 898 * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides 899 * a set of callbacks in struct pci_error_handlers, that device driver 900 * will be notified of PCI bus errors, and will be driven to recovery 901 * when an error occurs. 902 */ 903 904 typedef unsigned int __bitwise pci_ers_result_t; 905 906 enum pci_ers_result { 907 /* No result/none/not supported in device driver */ 908 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, 909 910 /* Device driver can recover without slot reset */ 911 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, 912 913 /* Device driver wants slot to be reset */ 914 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, 915 916 /* Device has completely failed, is unrecoverable */ 917 PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4, 918 919 /* Device driver is fully recovered and operational */ 920 PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5, 921 922 /* No AER capabilities registered for the driver */ 923 PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6, 924 }; 925 926 /* PCI bus error event callbacks */ 927 struct pci_error_handlers { 928 /* PCI bus error detected on this device */ 929 pci_ers_result_t (*error_detected)(struct pci_dev *dev, 930 pci_channel_state_t error); 931 932 /* MMIO has been re-enabled, but not DMA */ 933 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); 934 935 /* PCI slot has been reset */ 936 pci_ers_result_t (*slot_reset)(struct pci_dev *dev); 937 938 /* PCI function reset prepare or completed */ 939 void (*reset_prepare)(struct pci_dev *dev); 940 void (*reset_done)(struct pci_dev *dev); 941 942 /* Device driver may resume normal operations */ 943 void (*resume)(struct pci_dev *dev); 944 945 /* Allow device driver to record more details of a correctable error */ 946 void (*cor_error_detected)(struct pci_dev *dev); 947 }; 948 949 950 struct module; 951 952 /** 953 * struct pci_driver - PCI driver structure 954 * @name: Driver name. 955 * @id_table: Pointer to table of device IDs the driver is 956 * interested in. Most drivers should export this 957 * table using MODULE_DEVICE_TABLE(pci,...). 958 * @probe: This probing function gets called (during execution 959 * of pci_register_driver() for already existing 960 * devices or later if a new device gets inserted) for 961 * all PCI devices which match the ID table and are not 962 * "owned" by the other drivers yet. This function gets 963 * passed a "struct pci_dev \*" for each device whose 964 * entry in the ID table matches the device. The probe 965 * function returns zero when the driver chooses to 966 * take "ownership" of the device or an error code 967 * (negative number) otherwise. 968 * The probe function always gets called from process 969 * context, so it can sleep. 970 * @remove: The remove() function gets called whenever a device 971 * being handled by this driver is removed (either during 972 * deregistration of the driver or when it's manually 973 * pulled out of a hot-pluggable slot). 974 * The remove function always gets called from process 975 * context, so it can sleep. 976 * @suspend: Put device into low power state. 977 * @resume: Wake device from low power state. 978 * (Please see Documentation/power/pci.rst for descriptions 979 * of PCI Power Management and the related functions.) 980 * @shutdown: Hook into reboot_notifier_list (kernel/sys.c). 981 * Intended to stop any idling DMA operations. 982 * Useful for enabling wake-on-lan (NIC) or changing 983 * the power state of a device before reboot. 984 * e.g. drivers/net/e100.c. 985 * @sriov_configure: Optional driver callback to allow configuration of 986 * number of VFs to enable via sysfs "sriov_numvfs" file. 987 * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X 988 * vectors on a VF. Triggered via sysfs "sriov_vf_msix_count". 989 * This will change MSI-X Table Size in the VF Message Control 990 * registers. 991 * @sriov_get_vf_total_msix: PF driver callback to get the total number of 992 * MSI-X vectors available for distribution to the VFs. 993 * @err_handler: See Documentation/PCI/pci-error-recovery.rst 994 * @groups: Sysfs attribute groups. 995 * @dev_groups: Attributes attached to the device that will be 996 * created once it is bound to the driver. 997 * @driver: Driver model structure. 998 * @dynids: List of dynamically added device IDs. 999 * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA. 1000 * For most device drivers, no need to care about this flag 1001 * as long as all DMAs are handled through the kernel DMA API. 1002 * For some special ones, for example VFIO drivers, they know 1003 * how to manage the DMA themselves and set this flag so that 1004 * the IOMMU layer will allow them to setup and manage their 1005 * own I/O address space. 1006 */ 1007 struct pci_driver { 1008 const char *name; 1009 const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */ 1010 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ 1011 void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ 1012 int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */ 1013 int (*resume)(struct pci_dev *dev); /* Device woken up */ 1014 void (*shutdown)(struct pci_dev *dev); 1015 int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */ 1016 int (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */ 1017 u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf); 1018 const struct pci_error_handlers *err_handler; 1019 const struct attribute_group **groups; 1020 const struct attribute_group **dev_groups; 1021 struct device_driver driver; 1022 struct pci_dynids dynids; 1023 bool driver_managed_dma; 1024 }; 1025 1026 #define to_pci_driver(__drv) \ 1027 ( __drv ? container_of_const(__drv, struct pci_driver, driver) : NULL ) 1028 1029 /** 1030 * PCI_DEVICE - macro used to describe a specific PCI device 1031 * @vend: the 16 bit PCI Vendor ID 1032 * @dev: the 16 bit PCI Device ID 1033 * 1034 * This macro is used to create a struct pci_device_id that matches a 1035 * specific device. The subvendor and subdevice fields will be set to 1036 * PCI_ANY_ID. 1037 */ 1038 #define PCI_DEVICE(vend,dev) \ 1039 .vendor = (vend), .device = (dev), \ 1040 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID 1041 1042 /** 1043 * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with 1044 * override_only flags. 1045 * @vend: the 16 bit PCI Vendor ID 1046 * @dev: the 16 bit PCI Device ID 1047 * @driver_override: the 32 bit PCI Device override_only 1048 * 1049 * This macro is used to create a struct pci_device_id that matches only a 1050 * driver_override device. The subvendor and subdevice fields will be set to 1051 * PCI_ANY_ID. 1052 */ 1053 #define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \ 1054 .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \ 1055 .subdevice = PCI_ANY_ID, .override_only = (driver_override) 1056 1057 /** 1058 * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO 1059 * "driver_override" PCI device. 1060 * @vend: the 16 bit PCI Vendor ID 1061 * @dev: the 16 bit PCI Device ID 1062 * 1063 * This macro is used to create a struct pci_device_id that matches a 1064 * specific device. The subvendor and subdevice fields will be set to 1065 * PCI_ANY_ID and the driver_override will be set to 1066 * PCI_ID_F_VFIO_DRIVER_OVERRIDE. 1067 */ 1068 #define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \ 1069 PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE) 1070 1071 /** 1072 * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem 1073 * @vend: the 16 bit PCI Vendor ID 1074 * @dev: the 16 bit PCI Device ID 1075 * @subvend: the 16 bit PCI Subvendor ID 1076 * @subdev: the 16 bit PCI Subdevice ID 1077 * 1078 * This macro is used to create a struct pci_device_id that matches a 1079 * specific device with subsystem information. 1080 */ 1081 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \ 1082 .vendor = (vend), .device = (dev), \ 1083 .subvendor = (subvend), .subdevice = (subdev) 1084 1085 /** 1086 * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class 1087 * @dev_class: the class, subclass, prog-if triple for this device 1088 * @dev_class_mask: the class mask for this device 1089 * 1090 * This macro is used to create a struct pci_device_id that matches a 1091 * specific PCI class. The vendor, device, subvendor, and subdevice 1092 * fields will be set to PCI_ANY_ID. 1093 */ 1094 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \ 1095 .class = (dev_class), .class_mask = (dev_class_mask), \ 1096 .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \ 1097 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID 1098 1099 /** 1100 * PCI_VDEVICE - macro used to describe a specific PCI device in short form 1101 * @vend: the vendor name 1102 * @dev: the 16 bit PCI Device ID 1103 * 1104 * This macro is used to create a struct pci_device_id that matches a 1105 * specific PCI device. The subvendor, and subdevice fields will be set 1106 * to PCI_ANY_ID. The macro allows the next field to follow as the device 1107 * private data. 1108 */ 1109 #define PCI_VDEVICE(vend, dev) \ 1110 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ 1111 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 1112 1113 /** 1114 * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form 1115 * @vend: the vendor name 1116 * @dev: the 16 bit PCI Device ID 1117 * @subvend: the 16 bit PCI Subvendor ID 1118 * @subdev: the 16 bit PCI Subdevice ID 1119 * 1120 * Generate the pci_device_id struct layout for the specific PCI 1121 * device/subdevice. Private data may follow the output. 1122 */ 1123 #define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \ 1124 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ 1125 .subvendor = (subvend), .subdevice = (subdev), 0, 0 1126 1127 /** 1128 * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form 1129 * @vend: the vendor name (without PCI_VENDOR_ID_ prefix) 1130 * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix) 1131 * @data: the driver data to be filled 1132 * 1133 * This macro is used to create a struct pci_device_id that matches a 1134 * specific PCI device. The subvendor, and subdevice fields will be set 1135 * to PCI_ANY_ID. 1136 */ 1137 #define PCI_DEVICE_DATA(vend, dev, data) \ 1138 .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \ 1139 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \ 1140 .driver_data = (kernel_ulong_t)(data) 1141 1142 enum { 1143 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */ 1144 PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */ 1145 PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */ 1146 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */ 1147 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */ 1148 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */ 1149 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */ 1150 }; 1151 1152 #define PCI_IRQ_INTX (1 << 0) /* Allow INTx interrupts */ 1153 #define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ 1154 #define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ 1155 #define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ 1156 1157 /* These external functions are only available when PCI support is enabled */ 1158 #ifdef CONFIG_PCI 1159 1160 extern unsigned int pci_flags; 1161 1162 static inline void pci_set_flags(int flags) { pci_flags = flags; } 1163 static inline void pci_add_flags(int flags) { pci_flags |= flags; } 1164 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; } 1165 static inline int pci_has_flag(int flag) { return pci_flags & flag; } 1166 1167 void pcie_bus_configure_settings(struct pci_bus *bus); 1168 1169 enum pcie_bus_config_types { 1170 PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */ 1171 PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */ 1172 PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */ 1173 PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */ 1174 PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */ 1175 }; 1176 1177 extern enum pcie_bus_config_types pcie_bus_config; 1178 1179 extern const struct bus_type pci_bus_type; 1180 1181 /* Do NOT directly access these two variables, unless you are arch-specific PCI 1182 * code, or PCI core code. */ 1183 extern struct list_head pci_root_buses; /* List of all known PCI buses */ 1184 /* Some device drivers need know if PCI is initiated */ 1185 int no_pci_devices(void); 1186 1187 void pcibios_resource_survey_bus(struct pci_bus *bus); 1188 void pcibios_bus_add_device(struct pci_dev *pdev); 1189 void pcibios_add_bus(struct pci_bus *bus); 1190 void pcibios_remove_bus(struct pci_bus *bus); 1191 void pcibios_fixup_bus(struct pci_bus *); 1192 int __must_check pcibios_enable_device(struct pci_dev *, int mask); 1193 /* Architecture-specific versions may override this (weak) */ 1194 char *pcibios_setup(char *str); 1195 1196 /* Used only when drivers/pci/setup.c is used */ 1197 resource_size_t pcibios_align_resource(void *, const struct resource *, 1198 resource_size_t, 1199 resource_size_t); 1200 1201 /* Generic PCI functions used internally */ 1202 1203 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region, 1204 struct resource *res); 1205 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res, 1206 struct pci_bus_region *region); 1207 void pcibios_scan_specific_bus(int busn); 1208 struct pci_bus *pci_find_bus(int domain, int busnr); 1209 void pci_bus_add_devices(const struct pci_bus *bus); 1210 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata); 1211 struct pci_bus *pci_create_root_bus(struct device *parent, int bus, 1212 struct pci_ops *ops, void *sysdata, 1213 struct list_head *resources); 1214 int pci_host_probe(struct pci_host_bridge *bridge); 1215 void pci_probe_flush_workqueue(void); 1216 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax); 1217 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax); 1218 void pci_bus_release_busn_res(struct pci_bus *b); 1219 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, 1220 struct pci_ops *ops, void *sysdata, 1221 struct list_head *resources); 1222 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge); 1223 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, 1224 int busnr); 1225 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, 1226 const char *name, 1227 struct hotplug_slot *hotplug); 1228 void pci_destroy_slot(struct pci_slot *slot); 1229 #ifdef CONFIG_SYSFS 1230 void pci_dev_assign_slot(struct pci_dev *dev); 1231 #else 1232 static inline void pci_dev_assign_slot(struct pci_dev *dev) { } 1233 #endif 1234 int pci_scan_slot(struct pci_bus *bus, int devfn); 1235 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); 1236 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); 1237 unsigned int pci_scan_child_bus(struct pci_bus *bus); 1238 void pci_bus_add_device(struct pci_dev *dev); 1239 void pci_read_bridge_bases(struct pci_bus *child); 1240 struct resource *pci_find_parent_resource(const struct pci_dev *dev, 1241 struct resource *res); 1242 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin); 1243 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); 1244 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); 1245 struct pci_dev *pci_dev_get(struct pci_dev *dev); 1246 void pci_dev_put(struct pci_dev *dev); 1247 DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T)) 1248 void pci_remove_bus(struct pci_bus *b); 1249 void pci_stop_and_remove_bus_device(struct pci_dev *dev); 1250 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev); 1251 void pci_stop_root_bus(struct pci_bus *bus); 1252 void pci_remove_root_bus(struct pci_bus *bus); 1253 void pci_setup_cardbus(struct pci_bus *bus); 1254 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type); 1255 void pci_sort_breadthfirst(void); 1256 #define dev_is_pci(d) ((d)->bus == &pci_bus_type) 1257 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false)) 1258 1259 /* Generic PCI functions exported to card drivers */ 1260 1261 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); 1262 u8 pci_find_capability(struct pci_dev *dev, int cap); 1263 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); 1264 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap); 1265 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap); 1266 u16 pci_find_ext_capability(struct pci_dev *dev, int cap); 1267 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap); 1268 struct pci_bus *pci_find_next_bus(const struct pci_bus *from); 1269 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap); 1270 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec); 1271 1272 u64 pci_get_dsn(struct pci_dev *dev); 1273 1274 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, 1275 struct pci_dev *from); 1276 struct pci_dev *pci_get_device_reverse(unsigned int vendor, unsigned int device, 1277 struct pci_dev *from); 1278 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, 1279 unsigned int ss_vendor, unsigned int ss_device, 1280 struct pci_dev *from); 1281 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); 1282 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, 1283 unsigned int devfn); 1284 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); 1285 struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from); 1286 1287 int pci_dev_present(const struct pci_device_id *ids); 1288 1289 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn, 1290 int where, u8 *val); 1291 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn, 1292 int where, u16 *val); 1293 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn, 1294 int where, u32 *val); 1295 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn, 1296 int where, u8 val); 1297 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, 1298 int where, u16 val); 1299 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, 1300 int where, u32 val); 1301 1302 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, 1303 int where, int size, u32 *val); 1304 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, 1305 int where, int size, u32 val); 1306 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, 1307 int where, int size, u32 *val); 1308 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, 1309 int where, int size, u32 val); 1310 1311 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); 1312 1313 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val); 1314 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val); 1315 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val); 1316 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val); 1317 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val); 1318 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val); 1319 void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos, 1320 u32 clear, u32 set); 1321 1322 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); 1323 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); 1324 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); 1325 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val); 1326 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos, 1327 u16 clear, u16 set); 1328 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos, 1329 u16 clear, u16 set); 1330 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, 1331 u32 clear, u32 set); 1332 1333 /** 1334 * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers 1335 * @dev: PCI device structure of the PCI Express device 1336 * @pos: PCI Express Capability Register 1337 * @clear: Clear bitmask 1338 * @set: Set bitmask 1339 * 1340 * Perform a Read-Modify-Write (RMW) operation using @clear and @set 1341 * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express 1342 * Capability Registers are accessed concurrently in RMW fashion, hence 1343 * require locking which is handled transparently to the caller. 1344 */ 1345 static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev, 1346 int pos, 1347 u16 clear, u16 set) 1348 { 1349 switch (pos) { 1350 case PCI_EXP_LNKCTL: 1351 case PCI_EXP_LNKCTL2: 1352 case PCI_EXP_RTCTL: 1353 return pcie_capability_clear_and_set_word_locked(dev, pos, 1354 clear, set); 1355 default: 1356 return pcie_capability_clear_and_set_word_unlocked(dev, pos, 1357 clear, set); 1358 } 1359 } 1360 1361 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos, 1362 u16 set) 1363 { 1364 return pcie_capability_clear_and_set_word(dev, pos, 0, set); 1365 } 1366 1367 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos, 1368 u32 set) 1369 { 1370 return pcie_capability_clear_and_set_dword(dev, pos, 0, set); 1371 } 1372 1373 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos, 1374 u16 clear) 1375 { 1376 return pcie_capability_clear_and_set_word(dev, pos, clear, 0); 1377 } 1378 1379 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos, 1380 u32 clear) 1381 { 1382 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0); 1383 } 1384 1385 /* User-space driven config access */ 1386 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); 1387 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); 1388 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val); 1389 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val); 1390 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val); 1391 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val); 1392 1393 int __must_check pci_enable_device(struct pci_dev *dev); 1394 int __must_check pci_enable_device_mem(struct pci_dev *dev); 1395 int __must_check pci_reenable_device(struct pci_dev *); 1396 int __must_check pcim_enable_device(struct pci_dev *pdev); 1397 void pcim_pin_device(struct pci_dev *pdev); 1398 1399 static inline bool pci_intx_mask_supported(struct pci_dev *pdev) 1400 { 1401 /* 1402 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is 1403 * writable and no quirk has marked the feature broken. 1404 */ 1405 return !pdev->broken_intx_masking; 1406 } 1407 1408 static inline int pci_is_enabled(struct pci_dev *pdev) 1409 { 1410 return (atomic_read(&pdev->enable_cnt) > 0); 1411 } 1412 1413 static inline int pci_is_managed(struct pci_dev *pdev) 1414 { 1415 return pdev->is_managed; 1416 } 1417 1418 void pci_disable_device(struct pci_dev *dev); 1419 1420 extern unsigned int pcibios_max_latency; 1421 void pci_set_master(struct pci_dev *dev); 1422 void pci_clear_master(struct pci_dev *dev); 1423 1424 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); 1425 int pci_set_cacheline_size(struct pci_dev *dev); 1426 int __must_check pci_set_mwi(struct pci_dev *dev); 1427 int __must_check pcim_set_mwi(struct pci_dev *dev); 1428 int pci_try_set_mwi(struct pci_dev *dev); 1429 void pci_clear_mwi(struct pci_dev *dev); 1430 void pci_disable_parity(struct pci_dev *dev); 1431 void pci_intx(struct pci_dev *dev, int enable); 1432 bool pci_check_and_mask_intx(struct pci_dev *dev); 1433 bool pci_check_and_unmask_intx(struct pci_dev *dev); 1434 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask); 1435 int pci_wait_for_pending_transaction(struct pci_dev *dev); 1436 int pcix_get_max_mmrbc(struct pci_dev *dev); 1437 int pcix_get_mmrbc(struct pci_dev *dev); 1438 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc); 1439 int pcie_get_readrq(struct pci_dev *dev); 1440 int pcie_set_readrq(struct pci_dev *dev, int rq); 1441 int pcie_get_mps(struct pci_dev *dev); 1442 int pcie_set_mps(struct pci_dev *dev, int mps); 1443 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, 1444 enum pci_bus_speed *speed, 1445 enum pcie_link_width *width); 1446 int pcie_link_speed_mbps(struct pci_dev *pdev); 1447 void pcie_print_link_status(struct pci_dev *dev); 1448 int pcie_reset_flr(struct pci_dev *dev, bool probe); 1449 int pcie_flr(struct pci_dev *dev); 1450 int __pci_reset_function_locked(struct pci_dev *dev); 1451 int pci_reset_function(struct pci_dev *dev); 1452 int pci_reset_function_locked(struct pci_dev *dev); 1453 int pci_try_reset_function(struct pci_dev *dev); 1454 int pci_probe_reset_slot(struct pci_slot *slot); 1455 int pci_probe_reset_bus(struct pci_bus *bus); 1456 int pci_reset_bus(struct pci_dev *dev); 1457 void pci_reset_secondary_bus(struct pci_dev *dev); 1458 void pcibios_reset_secondary_bus(struct pci_dev *dev); 1459 void pci_update_resource(struct pci_dev *dev, int resno); 1460 int __must_check pci_assign_resource(struct pci_dev *dev, int i); 1461 int pci_release_resource(struct pci_dev *dev, int resno); 1462 1463 /* Resizable BAR related routines */ 1464 int pci_rebar_bytes_to_size(u64 bytes); 1465 resource_size_t pci_rebar_size_to_bytes(int size); 1466 u64 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar); 1467 bool pci_rebar_size_supported(struct pci_dev *pdev, int bar, int size); 1468 int pci_rebar_get_max_size(struct pci_dev *pdev, int bar); 1469 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size, 1470 int exclude_bars); 1471 1472 int pci_select_bars(struct pci_dev *dev, unsigned long flags); 1473 bool pci_device_is_present(struct pci_dev *pdev); 1474 void pci_ignore_hotplug(struct pci_dev *dev); 1475 struct pci_dev *pci_real_dma_dev(struct pci_dev *dev); 1476 int pci_status_get_and_clear_errors(struct pci_dev *pdev); 1477 1478 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr, 1479 irq_handler_t handler, irq_handler_t thread_fn, void *dev_id, 1480 const char *fmt, ...); 1481 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id); 1482 1483 /* ROM control related routines */ 1484 int pci_enable_rom(struct pci_dev *pdev); 1485 void pci_disable_rom(struct pci_dev *pdev); 1486 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); 1487 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); 1488 1489 /* Power management related routines */ 1490 int pci_save_state(struct pci_dev *dev); 1491 void pci_restore_state(struct pci_dev *dev); 1492 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); 1493 int pci_load_saved_state(struct pci_dev *dev, 1494 struct pci_saved_state *state); 1495 int pci_load_and_free_saved_state(struct pci_dev *dev, 1496 struct pci_saved_state **state); 1497 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state); 1498 int pci_set_power_state(struct pci_dev *dev, pci_power_t state); 1499 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state); 1500 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); 1501 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); 1502 void pci_pme_active(struct pci_dev *dev, bool enable); 1503 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable); 1504 int pci_wake_from_d3(struct pci_dev *dev, bool enable); 1505 int pci_prepare_to_sleep(struct pci_dev *dev); 1506 int pci_back_from_sleep(struct pci_dev *dev); 1507 bool pci_dev_run_wake(struct pci_dev *dev); 1508 void pci_d3cold_enable(struct pci_dev *dev); 1509 void pci_d3cold_disable(struct pci_dev *dev); 1510 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev); 1511 void pci_resume_bus(struct pci_bus *bus); 1512 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state); 1513 1514 /* For use by arch with custom probe code */ 1515 void set_pcie_port_type(struct pci_dev *pdev); 1516 void set_pcie_hotplug_bridge(struct pci_dev *pdev); 1517 1518 /* Functions for PCI Hotplug drivers to use */ 1519 unsigned int pci_rescan_bus(struct pci_bus *bus); 1520 void pci_lock_rescan_remove(void); 1521 void pci_unlock_rescan_remove(void); 1522 1523 /* Vital Product Data routines */ 1524 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 1525 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 1526 ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 1527 ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); 1528 1529 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ 1530 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); 1531 void pci_bus_assign_resources(const struct pci_bus *bus); 1532 void pci_bus_claim_resources(struct pci_bus *bus); 1533 void pci_bus_size_bridges(struct pci_bus *bus); 1534 int pci_claim_resource(struct pci_dev *, int); 1535 int pci_claim_bridge_resource(struct pci_dev *bridge, int i); 1536 void pci_assign_unassigned_resources(void); 1537 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge); 1538 void pci_assign_unassigned_bus_resources(struct pci_bus *bus); 1539 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus); 1540 int pci_enable_resources(struct pci_dev *, int mask); 1541 void pci_assign_irq(struct pci_dev *dev); 1542 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res); 1543 #define HAVE_PCI_REQ_REGIONS 2 1544 int __must_check pci_request_regions(struct pci_dev *, const char *); 1545 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *); 1546 void pci_release_regions(struct pci_dev *); 1547 int __must_check pci_request_region(struct pci_dev *, int, const char *); 1548 void pci_release_region(struct pci_dev *, int); 1549 int pci_request_selected_regions(struct pci_dev *, int, const char *); 1550 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *); 1551 void pci_release_selected_regions(struct pci_dev *, int); 1552 1553 static inline __must_check struct resource * 1554 pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset, 1555 unsigned int len, const char *name) 1556 { 1557 return __request_region(&pdev->driver_exclusive_resource, offset, len, 1558 name, IORESOURCE_EXCLUSIVE); 1559 } 1560 1561 static inline void pci_release_config_region(struct pci_dev *pdev, 1562 unsigned int offset, 1563 unsigned int len) 1564 { 1565 __release_region(&pdev->driver_exclusive_resource, offset, len); 1566 } 1567 1568 /* drivers/pci/bus.c */ 1569 void pci_add_resource(struct list_head *resources, struct resource *res); 1570 void pci_add_resource_offset(struct list_head *resources, struct resource *res, 1571 resource_size_t offset); 1572 void pci_free_resource_list(struct list_head *resources); 1573 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res); 1574 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n); 1575 void pci_bus_remove_resources(struct pci_bus *bus); 1576 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res); 1577 int devm_request_pci_bus_resources(struct device *dev, 1578 struct list_head *resources); 1579 1580 /* Temporary until new and working PCI SBR API in place */ 1581 int pci_bridge_secondary_bus_reset(struct pci_dev *dev); 1582 1583 #define __pci_bus_for_each_res0(bus, res, ...) \ 1584 for (unsigned int __b = 0; \ 1585 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \ 1586 __b++) 1587 1588 #define __pci_bus_for_each_res1(bus, res, __b) \ 1589 for (__b = 0; \ 1590 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \ 1591 __b++) 1592 1593 /** 1594 * pci_bus_for_each_resource - iterate over PCI bus resources 1595 * @bus: the PCI bus 1596 * @res: pointer to the current resource 1597 * @...: optional index of the current resource 1598 * 1599 * Iterate over PCI bus resources. The first part is to go over PCI bus 1600 * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries. 1601 * After that continue with the separate list of the additional resources, 1602 * if not empty. That's why the Logical OR is being used. 1603 * 1604 * Possible usage: 1605 * 1606 * struct pci_bus *bus = ...; 1607 * struct resource *res; 1608 * unsigned int i; 1609 * 1610 * // With optional index 1611 * pci_bus_for_each_resource(bus, res, i) 1612 * pr_info("PCI bus resource[%u]: %pR\n", i, res); 1613 * 1614 * // Without index 1615 * pci_bus_for_each_resource(bus, res) 1616 * _do_something_(res); 1617 */ 1618 #define pci_bus_for_each_resource(bus, res, ...) \ 1619 CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__)) \ 1620 (bus, res, __VA_ARGS__) 1621 1622 int __must_check pci_bus_alloc_resource(struct pci_bus *bus, 1623 struct resource *res, resource_size_t size, 1624 resource_size_t align, resource_size_t min, 1625 unsigned long type_mask, 1626 resource_alignf alignf, 1627 void *alignf_data); 1628 1629 1630 int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr, 1631 resource_size_t size); 1632 unsigned long pci_address_to_pio(phys_addr_t addr); 1633 phys_addr_t pci_pio_to_address(unsigned long pio); 1634 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr); 1635 int devm_pci_remap_iospace(struct device *dev, const struct resource *res, 1636 phys_addr_t phys_addr); 1637 void pci_unmap_iospace(struct resource *res); 1638 void __iomem *devm_pci_remap_cfgspace(struct device *dev, 1639 resource_size_t offset, 1640 resource_size_t size); 1641 void __iomem *devm_pci_remap_cfg_resource(struct device *dev, 1642 struct resource *res); 1643 1644 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) 1645 { 1646 struct pci_bus_region region; 1647 1648 pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]); 1649 return region.start; 1650 } 1651 1652 /* Proper probing supporting hot-pluggable devices */ 1653 int __must_check __pci_register_driver(struct pci_driver *, struct module *, 1654 const char *mod_name); 1655 1656 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */ 1657 #define pci_register_driver(driver) \ 1658 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) 1659 1660 void pci_unregister_driver(struct pci_driver *dev); 1661 1662 /** 1663 * module_pci_driver() - Helper macro for registering a PCI driver 1664 * @__pci_driver: pci_driver struct 1665 * 1666 * Helper macro for PCI drivers which do not do anything special in module 1667 * init/exit. This eliminates a lot of boilerplate. Each module may only 1668 * use this macro once, and calling it replaces module_init() and module_exit() 1669 */ 1670 #define module_pci_driver(__pci_driver) \ 1671 module_driver(__pci_driver, pci_register_driver, pci_unregister_driver) 1672 1673 /** 1674 * builtin_pci_driver() - Helper macro for registering a PCI driver 1675 * @__pci_driver: pci_driver struct 1676 * 1677 * Helper macro for PCI drivers which do not do anything special in their 1678 * init code. This eliminates a lot of boilerplate. Each driver may only 1679 * use this macro once, and calling it replaces device_initcall(...) 1680 */ 1681 #define builtin_pci_driver(__pci_driver) \ 1682 builtin_driver(__pci_driver, pci_register_driver) 1683 1684 struct pci_driver *pci_dev_driver(const struct pci_dev *dev); 1685 int pci_add_dynid(struct pci_driver *drv, 1686 unsigned int vendor, unsigned int device, 1687 unsigned int subvendor, unsigned int subdevice, 1688 unsigned int class, unsigned int class_mask, 1689 unsigned long driver_data); 1690 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, 1691 struct pci_dev *dev); 1692 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, 1693 int pass); 1694 1695 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), 1696 void *userdata); 1697 void pci_walk_bus_reverse(struct pci_bus *top, 1698 int (*cb)(struct pci_dev *, void *), void *userdata); 1699 int pci_cfg_space_size(struct pci_dev *dev); 1700 unsigned char pci_bus_max_busnr(struct pci_bus *bus); 1701 resource_size_t pcibios_window_alignment(struct pci_bus *bus, 1702 unsigned long type); 1703 1704 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0) 1705 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1) 1706 1707 int pci_set_vga_state(struct pci_dev *pdev, bool decode, 1708 unsigned int command_bits, u32 flags); 1709 1710 /* 1711 * Virtual interrupts allow for more interrupts to be allocated 1712 * than the device has interrupts for. These are not programmed 1713 * into the device's MSI-X table and must be handled by some 1714 * other driver means. 1715 */ 1716 #define PCI_IRQ_VIRTUAL (1 << 4) 1717 1718 #define PCI_IRQ_ALL_TYPES (PCI_IRQ_INTX | PCI_IRQ_MSI | PCI_IRQ_MSIX) 1719 1720 #include <linux/dmapool.h> 1721 1722 struct msix_entry { 1723 u32 vector; /* Kernel uses to write allocated vector */ 1724 u16 entry; /* Driver uses to specify entry, OS writes */ 1725 }; 1726 1727 #ifdef CONFIG_PCI_MSI 1728 int pci_msi_vec_count(struct pci_dev *dev); 1729 void pci_disable_msi(struct pci_dev *dev); 1730 int pci_msix_vec_count(struct pci_dev *dev); 1731 void pci_disable_msix(struct pci_dev *dev); 1732 void pci_restore_msi_state(struct pci_dev *dev); 1733 bool pci_msi_enabled(void); 1734 int pci_enable_msi(struct pci_dev *dev); 1735 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, 1736 int minvec, int maxvec); 1737 static inline int pci_enable_msix_exact(struct pci_dev *dev, 1738 struct msix_entry *entries, int nvec) 1739 { 1740 int rc = pci_enable_msix_range(dev, entries, nvec, nvec); 1741 if (rc < 0) 1742 return rc; 1743 return 0; 1744 } 1745 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, 1746 unsigned int max_vecs, unsigned int flags); 1747 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, 1748 unsigned int max_vecs, unsigned int flags, 1749 struct irq_affinity *affd); 1750 1751 bool pci_msix_can_alloc_dyn(struct pci_dev *dev); 1752 struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index, 1753 const struct irq_affinity_desc *affdesc); 1754 void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map); 1755 1756 void pci_free_irq_vectors(struct pci_dev *dev); 1757 int pci_irq_vector(struct pci_dev *dev, unsigned int nr); 1758 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec); 1759 1760 #else 1761 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } 1762 static inline void pci_disable_msi(struct pci_dev *dev) { } 1763 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; } 1764 static inline void pci_disable_msix(struct pci_dev *dev) { } 1765 static inline void pci_restore_msi_state(struct pci_dev *dev) { } 1766 static inline bool pci_msi_enabled(void) { return false; } 1767 static inline int pci_enable_msi(struct pci_dev *dev) 1768 { return -ENOSYS; } 1769 static inline int pci_enable_msix_range(struct pci_dev *dev, 1770 struct msix_entry *entries, int minvec, int maxvec) 1771 { return -ENOSYS; } 1772 static inline int pci_enable_msix_exact(struct pci_dev *dev, 1773 struct msix_entry *entries, int nvec) 1774 { return -ENOSYS; } 1775 1776 static inline int 1777 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, 1778 unsigned int max_vecs, unsigned int flags, 1779 struct irq_affinity *aff_desc) 1780 { 1781 if ((flags & PCI_IRQ_INTX) && min_vecs == 1 && dev->irq) 1782 return 1; 1783 return -ENOSPC; 1784 } 1785 static inline int 1786 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, 1787 unsigned int max_vecs, unsigned int flags) 1788 { 1789 return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, 1790 flags, NULL); 1791 } 1792 1793 static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev) 1794 { return false; } 1795 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index, 1796 const struct irq_affinity_desc *affdesc) 1797 { 1798 struct msi_map map = { .index = -ENOSYS, }; 1799 1800 return map; 1801 } 1802 1803 static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map) 1804 { 1805 } 1806 1807 static inline void pci_free_irq_vectors(struct pci_dev *dev) 1808 { 1809 } 1810 1811 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr) 1812 { 1813 if (WARN_ON_ONCE(nr > 0)) 1814 return -EINVAL; 1815 return dev->irq; 1816 } 1817 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, 1818 int vec) 1819 { 1820 return cpu_possible_mask; 1821 } 1822 #endif 1823 1824 /** 1825 * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq 1826 * @d: the INTx IRQ domain 1827 * @node: the DT node for the device whose interrupt we're translating 1828 * @intspec: the interrupt specifier data from the DT 1829 * @intsize: the number of entries in @intspec 1830 * @out_hwirq: pointer at which to write the hwirq number 1831 * @out_type: pointer at which to write the interrupt type 1832 * 1833 * Translate a PCI INTx interrupt number from device tree in the range 1-4, as 1834 * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range 1835 * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the 1836 * INTx value to obtain the hwirq number. 1837 * 1838 * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range. 1839 */ 1840 static inline int pci_irqd_intx_xlate(struct irq_domain *d, 1841 struct device_node *node, 1842 const u32 *intspec, 1843 unsigned int intsize, 1844 unsigned long *out_hwirq, 1845 unsigned int *out_type) 1846 { 1847 const u32 intx = intspec[0]; 1848 1849 if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD) 1850 return -EINVAL; 1851 1852 *out_hwirq = intx - PCI_INTERRUPT_INTA; 1853 return 0; 1854 } 1855 1856 #ifdef CONFIG_PCIEPORTBUS 1857 extern bool pcie_ports_disabled; 1858 extern bool pcie_ports_native; 1859 1860 int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req, 1861 bool use_lt); 1862 #else 1863 #define pcie_ports_disabled true 1864 #define pcie_ports_native false 1865 1866 static inline int pcie_set_target_speed(struct pci_dev *port, 1867 enum pci_bus_speed speed_req, 1868 bool use_lt) 1869 { 1870 return -EOPNOTSUPP; 1871 } 1872 #endif 1873 1874 #define PCIE_LINK_STATE_L0S (BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */ 1875 #define PCIE_LINK_STATE_L1 BIT(2) /* L1 state */ 1876 #define PCIE_LINK_STATE_L1_1 BIT(3) /* ASPM L1.1 state */ 1877 #define PCIE_LINK_STATE_L1_2 BIT(4) /* ASPM L1.2 state */ 1878 #define PCIE_LINK_STATE_L1_1_PCIPM BIT(5) /* PCI-PM L1.1 state */ 1879 #define PCIE_LINK_STATE_L1_2_PCIPM BIT(6) /* PCI-PM L1.2 state */ 1880 #define PCIE_LINK_STATE_ASPM_ALL (PCIE_LINK_STATE_L0S |\ 1881 PCIE_LINK_STATE_L1 |\ 1882 PCIE_LINK_STATE_L1_1 |\ 1883 PCIE_LINK_STATE_L1_2 |\ 1884 PCIE_LINK_STATE_L1_1_PCIPM |\ 1885 PCIE_LINK_STATE_L1_2_PCIPM) 1886 #define PCIE_LINK_STATE_CLKPM BIT(7) 1887 #define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_ASPM_ALL |\ 1888 PCIE_LINK_STATE_CLKPM) 1889 1890 #ifdef CONFIG_PCIEASPM 1891 int pci_disable_link_state(struct pci_dev *pdev, int state); 1892 int pci_disable_link_state_locked(struct pci_dev *pdev, int state); 1893 int pci_enable_link_state(struct pci_dev *pdev, int state); 1894 int pci_enable_link_state_locked(struct pci_dev *pdev, int state); 1895 void pcie_no_aspm(void); 1896 bool pcie_aspm_support_enabled(void); 1897 bool pcie_aspm_enabled(struct pci_dev *pdev); 1898 #else 1899 static inline int pci_disable_link_state(struct pci_dev *pdev, int state) 1900 { return 0; } 1901 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state) 1902 { return 0; } 1903 static inline int pci_enable_link_state(struct pci_dev *pdev, int state) 1904 { return 0; } 1905 static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state) 1906 { return 0; } 1907 static inline void pcie_no_aspm(void) { } 1908 static inline bool pcie_aspm_support_enabled(void) { return false; } 1909 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; } 1910 #endif 1911 1912 #ifdef CONFIG_HOTPLUG_PCI 1913 void pci_hp_ignore_link_change(struct pci_dev *pdev); 1914 void pci_hp_unignore_link_change(struct pci_dev *pdev); 1915 #else 1916 static inline void pci_hp_ignore_link_change(struct pci_dev *pdev) { } 1917 static inline void pci_hp_unignore_link_change(struct pci_dev *pdev) { } 1918 #endif 1919 1920 #ifdef CONFIG_PCIEAER 1921 bool pci_aer_available(void); 1922 #else 1923 static inline bool pci_aer_available(void) { return false; } 1924 #endif 1925 1926 bool pci_ats_disabled(void); 1927 1928 #define PCIE_PTM_CONTEXT_UPDATE_AUTO 0 1929 #define PCIE_PTM_CONTEXT_UPDATE_MANUAL 1 1930 1931 struct pcie_ptm_ops { 1932 int (*check_capability)(void *drvdata); 1933 int (*context_update_write)(void *drvdata, u8 mode); 1934 int (*context_update_read)(void *drvdata, u8 *mode); 1935 int (*context_valid_write)(void *drvdata, bool valid); 1936 int (*context_valid_read)(void *drvdata, bool *valid); 1937 int (*local_clock_read)(void *drvdata, u64 *clock); 1938 int (*master_clock_read)(void *drvdata, u64 *clock); 1939 int (*t1_read)(void *drvdata, u64 *clock); 1940 int (*t2_read)(void *drvdata, u64 *clock); 1941 int (*t3_read)(void *drvdata, u64 *clock); 1942 int (*t4_read)(void *drvdata, u64 *clock); 1943 1944 bool (*context_update_visible)(void *drvdata); 1945 bool (*context_valid_visible)(void *drvdata); 1946 bool (*local_clock_visible)(void *drvdata); 1947 bool (*master_clock_visible)(void *drvdata); 1948 bool (*t1_visible)(void *drvdata); 1949 bool (*t2_visible)(void *drvdata); 1950 bool (*t3_visible)(void *drvdata); 1951 bool (*t4_visible)(void *drvdata); 1952 }; 1953 1954 struct pci_ptm_debugfs { 1955 struct dentry *debugfs; 1956 const struct pcie_ptm_ops *ops; 1957 struct mutex lock; 1958 void *pdata; 1959 }; 1960 1961 #ifdef CONFIG_PCIE_PTM 1962 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); 1963 void pci_disable_ptm(struct pci_dev *dev); 1964 bool pcie_ptm_enabled(struct pci_dev *dev); 1965 #else 1966 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) 1967 { return -EINVAL; } 1968 static inline void pci_disable_ptm(struct pci_dev *dev) { } 1969 static inline bool pcie_ptm_enabled(struct pci_dev *dev) 1970 { return false; } 1971 #endif 1972 1973 #if IS_ENABLED(CONFIG_DEBUG_FS) && IS_ENABLED(CONFIG_PCIE_PTM) 1974 struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata, 1975 const struct pcie_ptm_ops *ops); 1976 void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs); 1977 #else 1978 static inline struct pci_ptm_debugfs 1979 *pcie_ptm_create_debugfs(struct device *dev, void *pdata, 1980 const struct pcie_ptm_ops *ops) { return NULL; } 1981 static inline void 1982 pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs) { } 1983 #endif 1984 1985 void pci_cfg_access_lock(struct pci_dev *dev); 1986 bool pci_cfg_access_trylock(struct pci_dev *dev); 1987 void pci_cfg_access_unlock(struct pci_dev *dev); 1988 1989 void pci_dev_lock(struct pci_dev *dev); 1990 int pci_dev_trylock(struct pci_dev *dev); 1991 void pci_dev_unlock(struct pci_dev *dev); 1992 DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T)) 1993 1994 /* 1995 * PCI domain support. Sometimes called PCI segment (eg by ACPI), 1996 * a PCI domain is defined to be a set of PCI buses which share 1997 * configuration space. 1998 */ 1999 #ifdef CONFIG_PCI_DOMAINS 2000 extern int pci_domains_supported; 2001 int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max); 2002 void pci_bus_release_emul_domain_nr(int domain_nr); 2003 #else 2004 enum { pci_domains_supported = 0 }; 2005 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 2006 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; } 2007 static inline int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max) 2008 { 2009 return 0; 2010 } 2011 static inline void pci_bus_release_emul_domain_nr(int domain_nr) { } 2012 #endif /* CONFIG_PCI_DOMAINS */ 2013 2014 /* 2015 * Generic implementation for PCI domain support. If your 2016 * architecture does not need custom management of PCI 2017 * domains then this implementation will be used 2018 */ 2019 #ifdef CONFIG_PCI_DOMAINS_GENERIC 2020 static inline int pci_domain_nr(struct pci_bus *bus) 2021 { 2022 return bus->domain_nr; 2023 } 2024 #ifdef CONFIG_ACPI 2025 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus); 2026 #else 2027 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) 2028 { return 0; } 2029 #endif 2030 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent); 2031 void pci_bus_release_domain_nr(struct device *parent, int domain_nr); 2032 #endif 2033 2034 /* Some architectures require additional setup to direct VGA traffic */ 2035 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, 2036 unsigned int command_bits, u32 flags); 2037 void pci_register_set_vga_state(arch_set_vga_state_t func); 2038 2039 static inline int 2040 pci_request_io_regions(struct pci_dev *pdev, const char *name) 2041 { 2042 return pci_request_selected_regions(pdev, 2043 pci_select_bars(pdev, IORESOURCE_IO), name); 2044 } 2045 2046 static inline void 2047 pci_release_io_regions(struct pci_dev *pdev) 2048 { 2049 return pci_release_selected_regions(pdev, 2050 pci_select_bars(pdev, IORESOURCE_IO)); 2051 } 2052 2053 static inline int 2054 pci_request_mem_regions(struct pci_dev *pdev, const char *name) 2055 { 2056 return pci_request_selected_regions(pdev, 2057 pci_select_bars(pdev, IORESOURCE_MEM), name); 2058 } 2059 2060 static inline void 2061 pci_release_mem_regions(struct pci_dev *pdev) 2062 { 2063 return pci_release_selected_regions(pdev, 2064 pci_select_bars(pdev, IORESOURCE_MEM)); 2065 } 2066 2067 #else /* CONFIG_PCI is not enabled */ 2068 2069 static inline void pci_set_flags(int flags) { } 2070 static inline void pci_add_flags(int flags) { } 2071 static inline void pci_clear_flags(int flags) { } 2072 static inline int pci_has_flag(int flag) { return 0; } 2073 2074 /* 2075 * If the system does not have PCI, clearly these return errors. Define 2076 * these as simple inline functions to avoid hair in drivers. 2077 */ 2078 #define _PCI_NOP(o, s, t) \ 2079 static inline int pci_##o##_config_##s(struct pci_dev *dev, \ 2080 int where, t val) \ 2081 { return PCIBIOS_FUNC_NOT_SUPPORTED; } 2082 2083 #define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \ 2084 _PCI_NOP(o, word, u16 x) \ 2085 _PCI_NOP(o, dword, u32 x) 2086 _PCI_NOP_ALL(read, *) 2087 _PCI_NOP_ALL(write,) 2088 2089 static inline void pci_probe_flush_workqueue(void) { } 2090 2091 static inline struct pci_dev *pci_get_device(unsigned int vendor, 2092 unsigned int device, 2093 struct pci_dev *from) 2094 { return NULL; } 2095 2096 static inline struct pci_dev *pci_get_device_reverse(unsigned int vendor, 2097 unsigned int device, 2098 struct pci_dev *from) 2099 { return NULL; } 2100 2101 static inline struct pci_dev *pci_get_subsys(unsigned int vendor, 2102 unsigned int device, 2103 unsigned int ss_vendor, 2104 unsigned int ss_device, 2105 struct pci_dev *from) 2106 { return NULL; } 2107 2108 static inline struct pci_dev *pci_get_class(unsigned int class, 2109 struct pci_dev *from) 2110 { return NULL; } 2111 2112 static inline struct pci_dev *pci_get_base_class(unsigned int class, 2113 struct pci_dev *from) 2114 { return NULL; } 2115 2116 static inline int pci_dev_present(const struct pci_device_id *ids) 2117 { return 0; } 2118 2119 #define no_pci_devices() (1) 2120 #define pci_dev_put(dev) do { } while (0) 2121 2122 static inline void pci_set_master(struct pci_dev *dev) { } 2123 static inline void pci_clear_master(struct pci_dev *dev) { } 2124 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; } 2125 static inline void pci_disable_device(struct pci_dev *dev) { } 2126 static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; } 2127 static inline int pci_assign_resource(struct pci_dev *dev, int i) 2128 { return -EBUSY; } 2129 static inline int __must_check __pci_register_driver(struct pci_driver *drv, 2130 struct module *owner, 2131 const char *mod_name) 2132 { return 0; } 2133 static inline int pci_register_driver(struct pci_driver *drv) 2134 { return 0; } 2135 static inline void pci_unregister_driver(struct pci_driver *drv) { } 2136 static inline u8 pci_find_capability(struct pci_dev *dev, int cap) 2137 { return 0; } 2138 static inline u8 pci_find_next_capability(struct pci_dev *dev, u8 post, int cap) 2139 { return 0; } 2140 static inline u16 pci_find_ext_capability(struct pci_dev *dev, int cap) 2141 { return 0; } 2142 2143 static inline u64 pci_get_dsn(struct pci_dev *dev) 2144 { return 0; } 2145 2146 /* Power management related routines */ 2147 static inline int pci_save_state(struct pci_dev *dev) { return 0; } 2148 static inline void pci_restore_state(struct pci_dev *dev) { } 2149 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) 2150 { return 0; } 2151 static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state) 2152 { return 0; } 2153 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable) 2154 { return 0; } 2155 static inline pci_power_t pci_choose_state(struct pci_dev *dev, 2156 pm_message_t state) 2157 { return PCI_D0; } 2158 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, 2159 int enable) 2160 { return 0; } 2161 2162 static inline struct resource *pci_find_resource(struct pci_dev *dev, 2163 struct resource *res) 2164 { return NULL; } 2165 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) 2166 { return -EIO; } 2167 static inline void pci_release_regions(struct pci_dev *dev) { } 2168 2169 static inline int pci_register_io_range(const struct fwnode_handle *fwnode, 2170 phys_addr_t addr, resource_size_t size) 2171 { return -EINVAL; } 2172 2173 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; } 2174 2175 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from) 2176 { return NULL; } 2177 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, 2178 unsigned int devfn) 2179 { return NULL; } 2180 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain, 2181 unsigned int bus, unsigned int devfn) 2182 { return NULL; } 2183 2184 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 2185 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } 2186 2187 #define dev_is_pci(d) (false) 2188 #define dev_is_pf(d) (false) 2189 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) 2190 { return false; } 2191 static inline int pci_irqd_intx_xlate(struct irq_domain *d, 2192 struct device_node *node, 2193 const u32 *intspec, 2194 unsigned int intsize, 2195 unsigned long *out_hwirq, 2196 unsigned int *out_type) 2197 { return -EINVAL; } 2198 2199 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, 2200 struct pci_dev *dev) 2201 { return NULL; } 2202 static inline bool pci_ats_disabled(void) { return true; } 2203 2204 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr) 2205 { 2206 return -EINVAL; 2207 } 2208 2209 static inline int 2210 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, 2211 unsigned int max_vecs, unsigned int flags, 2212 struct irq_affinity *aff_desc) 2213 { 2214 return -ENOSPC; 2215 } 2216 static inline int 2217 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, 2218 unsigned int max_vecs, unsigned int flags) 2219 { 2220 return -ENOSPC; 2221 } 2222 2223 static inline void pci_free_irq_vectors(struct pci_dev *dev) 2224 { 2225 } 2226 #endif /* CONFIG_PCI */ 2227 2228 /* Include architecture-dependent settings and functions */ 2229 2230 #include <asm/pci.h> 2231 2232 /* 2233 * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff 2234 * is expected to be an offset within that region. 2235 * 2236 */ 2237 int pci_mmap_resource_range(struct pci_dev *dev, int bar, 2238 struct vm_area_struct *vma, 2239 enum pci_mmap_state mmap_state, int write_combine); 2240 2241 #ifndef arch_can_pci_mmap_wc 2242 #define arch_can_pci_mmap_wc() 0 2243 #endif 2244 2245 #ifndef arch_can_pci_mmap_io 2246 #define arch_can_pci_mmap_io() 0 2247 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL) 2248 #else 2249 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma); 2250 #endif 2251 2252 #ifndef pci_root_bus_fwnode 2253 #define pci_root_bus_fwnode(bus) NULL 2254 #endif 2255 2256 /* 2257 * These helpers provide future and backwards compatibility 2258 * for accessing popular PCI BAR info 2259 */ 2260 #define pci_resource_n(dev, bar) (&(dev)->resource[(bar)]) 2261 #define pci_resource_start(dev, bar) (pci_resource_n(dev, bar)->start) 2262 #define pci_resource_end(dev, bar) (pci_resource_n(dev, bar)->end) 2263 #define pci_resource_flags(dev, bar) (pci_resource_n(dev, bar)->flags) 2264 #define pci_resource_len(dev,bar) \ 2265 (pci_resource_end((dev), (bar)) ? \ 2266 resource_size(pci_resource_n((dev), (bar))) : 0) 2267 2268 #define __pci_dev_for_each_res0(dev, res, ...) \ 2269 for (unsigned int __b = 0; \ 2270 __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \ 2271 __b++) 2272 2273 #define __pci_dev_for_each_res1(dev, res, __b) \ 2274 for (__b = 0; \ 2275 __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \ 2276 __b++) 2277 2278 #define pci_dev_for_each_resource(dev, res, ...) \ 2279 CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) \ 2280 (dev, res, __VA_ARGS__) 2281 2282 /* 2283 * Similar to the helpers above, these manipulate per-pci_dev 2284 * driver-specific data. They are really just a wrapper around 2285 * the generic device structure functions of these calls. 2286 */ 2287 static inline void *pci_get_drvdata(struct pci_dev *pdev) 2288 { 2289 return dev_get_drvdata(&pdev->dev); 2290 } 2291 2292 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data) 2293 { 2294 dev_set_drvdata(&pdev->dev, data); 2295 } 2296 2297 static inline const char *pci_name(const struct pci_dev *pdev) 2298 { 2299 return dev_name(&pdev->dev); 2300 } 2301 2302 void pci_resource_to_user(const struct pci_dev *dev, int bar, 2303 const struct resource *rsrc, 2304 resource_size_t *start, resource_size_t *end); 2305 2306 /* 2307 * The world is not perfect and supplies us with broken PCI devices. 2308 * For at least a part of these bugs we need a work-around, so both 2309 * generic (drivers/pci/quirks.c) and per-architecture code can define 2310 * fixup hooks to be called for particular buggy devices. 2311 */ 2312 2313 struct pci_fixup { 2314 u16 vendor; /* Or PCI_ANY_ID */ 2315 u16 device; /* Or PCI_ANY_ID */ 2316 u32 class; /* Or PCI_ANY_ID */ 2317 unsigned int class_shift; /* should be 0, 8, 16 */ 2318 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 2319 int hook_offset; 2320 #else 2321 void (*hook)(struct pci_dev *dev); 2322 #endif 2323 }; 2324 2325 enum pci_fixup_pass { 2326 pci_fixup_early, /* Before probing BARs */ 2327 pci_fixup_header, /* After reading configuration header */ 2328 pci_fixup_final, /* Final phase of device fixups */ 2329 pci_fixup_enable, /* pci_enable_device() time */ 2330 pci_fixup_resume, /* pci_device_resume() */ 2331 pci_fixup_suspend, /* pci_device_suspend() */ 2332 pci_fixup_resume_early, /* pci_device_resume_early() */ 2333 pci_fixup_suspend_late, /* pci_device_suspend_late() */ 2334 }; 2335 2336 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS 2337 #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2338 class_shift, hook) \ 2339 __ADDRESSABLE(hook) \ 2340 asm(".section " #sec ", \"a\" \n" \ 2341 ".balign 16 \n" \ 2342 ".short " #vendor ", " #device " \n" \ 2343 ".long " #class ", " #class_shift " \n" \ 2344 ".long " #hook " - . \n" \ 2345 ".previous \n"); 2346 2347 /* 2348 * Clang's LTO may rename static functions in C, but has no way to 2349 * handle such renamings when referenced from inline asm. To work 2350 * around this, create global C stubs for these cases. 2351 */ 2352 #ifdef CONFIG_LTO_CLANG 2353 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2354 class_shift, hook, stub) \ 2355 void stub(struct pci_dev *dev); \ 2356 void stub(struct pci_dev *dev) \ 2357 { \ 2358 hook(dev); \ 2359 } \ 2360 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2361 class_shift, stub) 2362 #else 2363 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2364 class_shift, hook, stub) \ 2365 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2366 class_shift, hook) 2367 #endif 2368 2369 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2370 class_shift, hook) \ 2371 __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \ 2372 class_shift, hook, __UNIQUE_ID(hook)) 2373 #else 2374 /* Anonymous variables would be nice... */ 2375 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \ 2376 class_shift, hook) \ 2377 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \ 2378 __attribute__((__section__(#section), aligned((sizeof(void *))))) \ 2379 = { vendor, device, class, class_shift, hook }; 2380 #endif 2381 2382 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \ 2383 class_shift, hook) \ 2384 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 2385 hook, vendor, device, class, class_shift, hook) 2386 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \ 2387 class_shift, hook) \ 2388 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 2389 hook, vendor, device, class, class_shift, hook) 2390 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \ 2391 class_shift, hook) \ 2392 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 2393 hook, vendor, device, class, class_shift, hook) 2394 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \ 2395 class_shift, hook) \ 2396 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 2397 hook, vendor, device, class, class_shift, hook) 2398 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ 2399 class_shift, hook) \ 2400 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 2401 resume##hook, vendor, device, class, class_shift, hook) 2402 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ 2403 class_shift, hook) \ 2404 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 2405 resume_early##hook, vendor, device, class, class_shift, hook) 2406 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ 2407 class_shift, hook) \ 2408 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 2409 suspend##hook, vendor, device, class, class_shift, hook) 2410 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \ 2411 class_shift, hook) \ 2412 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ 2413 suspend_late##hook, vendor, device, class, class_shift, hook) 2414 2415 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ 2416 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ 2417 hook, vendor, device, PCI_ANY_ID, 0, hook) 2418 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ 2419 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ 2420 hook, vendor, device, PCI_ANY_ID, 0, hook) 2421 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ 2422 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ 2423 hook, vendor, device, PCI_ANY_ID, 0, hook) 2424 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ 2425 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ 2426 hook, vendor, device, PCI_ANY_ID, 0, hook) 2427 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ 2428 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ 2429 resume##hook, vendor, device, PCI_ANY_ID, 0, hook) 2430 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ 2431 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ 2432 resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook) 2433 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ 2434 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ 2435 suspend##hook, vendor, device, PCI_ANY_ID, 0, hook) 2436 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \ 2437 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ 2438 suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook) 2439 2440 #ifdef CONFIG_PCI_QUIRKS 2441 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); 2442 #else 2443 static inline void pci_fixup_device(enum pci_fixup_pass pass, 2444 struct pci_dev *dev) { } 2445 #endif 2446 2447 int pcim_intx(struct pci_dev *pdev, int enabled); 2448 int pcim_request_all_regions(struct pci_dev *pdev, const char *name); 2449 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); 2450 void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar, 2451 const char *name); 2452 void pcim_iounmap_region(struct pci_dev *pdev, int bar); 2453 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr); 2454 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev); 2455 int pcim_request_region(struct pci_dev *pdev, int bar, const char *name); 2456 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name); 2457 void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar, 2458 unsigned long offset, unsigned long len); 2459 2460 extern int pci_pci_problems; 2461 #define PCIPCI_FAIL 1 /* No PCI PCI DMA */ 2462 #define PCIPCI_TRITON 2 2463 #define PCIPCI_NATOMA 4 2464 #define PCIPCI_VIAETBF 8 2465 #define PCIPCI_VSFX 16 2466 #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */ 2467 #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */ 2468 2469 extern u8 pci_dfl_cache_line_size; 2470 extern u8 pci_cache_line_size; 2471 2472 /* Architecture-specific versions may override these (weak) */ 2473 void pcibios_disable_device(struct pci_dev *dev); 2474 void pcibios_set_master(struct pci_dev *dev); 2475 int pcibios_set_pcie_reset_state(struct pci_dev *dev, 2476 enum pcie_reset_state state); 2477 int pcibios_device_add(struct pci_dev *dev); 2478 void pcibios_release_device(struct pci_dev *dev); 2479 #ifdef CONFIG_PCI 2480 void pcibios_penalize_isa_irq(int irq, int active); 2481 #else 2482 static inline void pcibios_penalize_isa_irq(int irq, int active) {} 2483 #endif 2484 int pcibios_alloc_irq(struct pci_dev *dev); 2485 void pcibios_free_irq(struct pci_dev *dev); 2486 resource_size_t pcibios_default_alignment(void); 2487 2488 #if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE) 2489 extern int pci_create_resource_files(struct pci_dev *dev); 2490 extern void pci_remove_resource_files(struct pci_dev *dev); 2491 #endif 2492 2493 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG) 2494 void __init pci_mmcfg_early_init(void); 2495 void __init pci_mmcfg_late_init(void); 2496 #else 2497 static inline void pci_mmcfg_early_init(void) { } 2498 static inline void pci_mmcfg_late_init(void) { } 2499 #endif 2500 2501 int pci_ext_cfg_avail(void); 2502 2503 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); 2504 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar); 2505 2506 #ifdef CONFIG_PCI_IOV 2507 int pci_iov_virtfn_bus(struct pci_dev *dev, int id); 2508 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id); 2509 int pci_iov_vf_id(struct pci_dev *dev); 2510 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver); 2511 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); 2512 void pci_disable_sriov(struct pci_dev *dev); 2513 2514 int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id); 2515 int pci_iov_add_virtfn(struct pci_dev *dev, int id); 2516 void pci_iov_remove_virtfn(struct pci_dev *dev, int id); 2517 int pci_num_vf(struct pci_dev *dev); 2518 int pci_vfs_assigned(struct pci_dev *dev); 2519 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); 2520 int pci_sriov_get_totalvfs(struct pci_dev *dev); 2521 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn); 2522 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno); 2523 int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size); 2524 u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs); 2525 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe); 2526 2527 /* Arch may override these (weak) */ 2528 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs); 2529 int pcibios_sriov_disable(struct pci_dev *pdev); 2530 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); 2531 #else 2532 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id) 2533 { 2534 return -ENOSYS; 2535 } 2536 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id) 2537 { 2538 return -ENOSYS; 2539 } 2540 2541 static inline int pci_iov_vf_id(struct pci_dev *dev) 2542 { 2543 return -ENOSYS; 2544 } 2545 2546 static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev, 2547 struct pci_driver *pf_driver) 2548 { 2549 return ERR_PTR(-EINVAL); 2550 } 2551 2552 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) 2553 { return -ENODEV; } 2554 2555 static inline int pci_iov_sysfs_link(struct pci_dev *dev, 2556 struct pci_dev *virtfn, int id) 2557 { 2558 return -ENODEV; 2559 } 2560 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id) 2561 { 2562 return -ENOSYS; 2563 } 2564 static inline void pci_iov_remove_virtfn(struct pci_dev *dev, 2565 int id) { } 2566 static inline void pci_disable_sriov(struct pci_dev *dev) { } 2567 static inline int pci_num_vf(struct pci_dev *dev) { return 0; } 2568 static inline int pci_vfs_assigned(struct pci_dev *dev) 2569 { return 0; } 2570 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) 2571 { return 0; } 2572 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev) 2573 { return 0; } 2574 #define pci_sriov_configure_simple NULL 2575 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) 2576 { return 0; } 2577 static inline int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size) 2578 { return -ENODEV; } 2579 static inline u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs) 2580 { return 0; } 2581 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { } 2582 #endif 2583 2584 /** 2585 * pci_pcie_cap - get the saved PCIe capability offset 2586 * @dev: PCI device 2587 * 2588 * PCIe capability offset is calculated at PCI device initialization 2589 * time and saved in the data structure. This function returns saved 2590 * PCIe capability offset. Using this instead of pci_find_capability() 2591 * reduces unnecessary search in the PCI configuration space. If you 2592 * need to calculate PCIe capability offset from raw device for some 2593 * reasons, please use pci_find_capability() instead. 2594 */ 2595 static inline int pci_pcie_cap(struct pci_dev *dev) 2596 { 2597 return dev->pcie_cap; 2598 } 2599 2600 /** 2601 * pci_is_pcie - check if the PCI device is PCI Express capable 2602 * @dev: PCI device 2603 * 2604 * Returns: true if the PCI device is PCI Express capable, false otherwise. 2605 */ 2606 static inline bool pci_is_pcie(struct pci_dev *dev) 2607 { 2608 return pci_pcie_cap(dev); 2609 } 2610 2611 /** 2612 * pcie_caps_reg - get the PCIe Capabilities Register 2613 * @dev: PCI device 2614 */ 2615 static inline u16 pcie_caps_reg(const struct pci_dev *dev) 2616 { 2617 return dev->pcie_flags_reg; 2618 } 2619 2620 /** 2621 * pci_pcie_type - get the PCIe device/port type 2622 * @dev: PCI device 2623 */ 2624 static inline int pci_pcie_type(const struct pci_dev *dev) 2625 { 2626 return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; 2627 } 2628 2629 /** 2630 * pcie_find_root_port - Get the PCIe root port device 2631 * @dev: PCI device 2632 * 2633 * Traverse up the parent chain and return the PCIe Root Port PCI Device 2634 * for a given PCI/PCIe Device. 2635 */ 2636 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev) 2637 { 2638 while (dev) { 2639 if (pci_is_pcie(dev) && 2640 pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) 2641 return dev; 2642 dev = pci_upstream_bridge(dev); 2643 } 2644 2645 return NULL; 2646 } 2647 2648 static inline bool pci_dev_is_disconnected(const struct pci_dev *dev) 2649 { 2650 /* 2651 * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg() 2652 * and read w/o common lock. READ_ONCE() ensures compiler cannot cache 2653 * the value (e.g. inside the loop in pci_dev_wait()). 2654 */ 2655 return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure; 2656 } 2657 2658 void pci_request_acs(void); 2659 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); 2660 bool pci_acs_path_enabled(struct pci_dev *start, 2661 struct pci_dev *end, u16 acs_flags); 2662 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask); 2663 2664 #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ 2665 #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT) 2666 2667 /* Large Resource Data Type Tag Item Names */ 2668 #define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */ 2669 #define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */ 2670 #define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */ 2671 2672 #define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING) 2673 #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA) 2674 #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA) 2675 2676 #define PCI_VPD_RO_KEYWORD_PARTNO "PN" 2677 #define PCI_VPD_RO_KEYWORD_SERIALNO "SN" 2678 #define PCI_VPD_RO_KEYWORD_MFR_ID "MN" 2679 #define PCI_VPD_RO_KEYWORD_VENDOR0 "V0" 2680 #define PCI_VPD_RO_KEYWORD_CHKSUM "RV" 2681 2682 /** 2683 * pci_vpd_alloc - Allocate buffer and read VPD into it 2684 * @dev: PCI device 2685 * @size: pointer to field where VPD length is returned 2686 * 2687 * Returns pointer to allocated buffer or an ERR_PTR in case of failure 2688 */ 2689 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size); 2690 2691 /** 2692 * pci_vpd_find_id_string - Locate id string in VPD 2693 * @buf: Pointer to buffered VPD data 2694 * @len: The length of the buffer area in which to search 2695 * @size: Pointer to field where length of id string is returned 2696 * 2697 * Returns the index of the id string or -ENOENT if not found. 2698 */ 2699 int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size); 2700 2701 /** 2702 * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section 2703 * @buf: Pointer to buffered VPD data 2704 * @len: The length of the buffer area in which to search 2705 * @kw: The keyword to search for 2706 * @size: Pointer to field where length of found keyword data is returned 2707 * 2708 * Returns the index of the information field keyword data or -ENOENT if 2709 * not found. 2710 */ 2711 int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len, 2712 const char *kw, unsigned int *size); 2713 2714 /** 2715 * pci_vpd_check_csum - Check VPD checksum 2716 * @buf: Pointer to buffered VPD data 2717 * @len: VPD size 2718 * 2719 * Returns 1 if VPD has no checksum, otherwise 0 or an errno 2720 */ 2721 int pci_vpd_check_csum(const void *buf, unsigned int len); 2722 2723 /* PCI <-> OF binding helpers */ 2724 #ifdef CONFIG_OF 2725 struct device_node; 2726 struct irq_domain; 2727 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); 2728 bool pci_host_of_has_msi_map(struct device *dev); 2729 2730 /* Arch may override this (weak) */ 2731 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); 2732 2733 #else /* CONFIG_OF */ 2734 static inline struct irq_domain * 2735 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } 2736 static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; } 2737 #endif /* CONFIG_OF */ 2738 2739 static inline struct device_node * 2740 pci_device_to_OF_node(const struct pci_dev *pdev) 2741 { 2742 return pdev ? pdev->dev.of_node : NULL; 2743 } 2744 2745 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) 2746 { 2747 return bus ? bus->dev.of_node : NULL; 2748 } 2749 2750 #ifdef CONFIG_ACPI 2751 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus); 2752 2753 void 2754 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)); 2755 bool pci_pr3_present(struct pci_dev *pdev); 2756 #else 2757 static inline struct irq_domain * 2758 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; } 2759 static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; } 2760 #endif 2761 2762 #if defined(CONFIG_X86) && defined(CONFIG_ACPI) 2763 bool arch_pci_dev_is_removable(struct pci_dev *pdev); 2764 #else 2765 static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; } 2766 #endif 2767 2768 #ifdef CONFIG_EEH 2769 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) 2770 { 2771 return pdev->dev.archdata.edev; 2772 } 2773 #endif 2774 2775 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns); 2776 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2); 2777 int pci_for_each_dma_alias(struct pci_dev *pdev, 2778 int (*fn)(struct pci_dev *pdev, 2779 u16 alias, void *data), void *data); 2780 2781 /* Helper functions for operation of device flag */ 2782 static inline void pci_set_dev_assigned(struct pci_dev *pdev) 2783 { 2784 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; 2785 } 2786 static inline void pci_clear_dev_assigned(struct pci_dev *pdev) 2787 { 2788 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; 2789 } 2790 static inline bool pci_is_dev_assigned(struct pci_dev *pdev) 2791 { 2792 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED; 2793 } 2794 2795 /** 2796 * pci_ari_enabled - query ARI forwarding status 2797 * @bus: the PCI bus 2798 * 2799 * Returns true if ARI forwarding is enabled. 2800 */ 2801 static inline bool pci_ari_enabled(struct pci_bus *bus) 2802 { 2803 return bus->self && bus->self->ari_enabled; 2804 } 2805 2806 /** 2807 * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain 2808 * @pdev: PCI device to check 2809 * 2810 * Walk upwards from @pdev and check for each encountered bridge if it's part 2811 * of a Thunderbolt controller. Reaching the host bridge means @pdev is not 2812 * Thunderbolt-attached. (But rather soldered to the mainboard usually.) 2813 */ 2814 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) 2815 { 2816 struct pci_dev *parent = pdev; 2817 2818 if (pdev->is_thunderbolt) 2819 return true; 2820 2821 while ((parent = pci_upstream_bridge(parent))) 2822 if (parent->is_thunderbolt) 2823 return true; 2824 2825 return false; 2826 } 2827 2828 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) || defined(CONFIG_S390) 2829 void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type); 2830 #endif 2831 2832 #include <linux/dma-mapping.h> 2833 2834 #define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) 2835 #define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) 2836 #define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) 2837 #define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) 2838 #define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) 2839 #define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg) 2840 #define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) 2841 #define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) 2842 #define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) 2843 2844 #define pci_notice_ratelimited(pdev, fmt, arg...) \ 2845 dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg) 2846 2847 #define pci_info_ratelimited(pdev, fmt, arg...) \ 2848 dev_info_ratelimited(&(pdev)->dev, fmt, ##arg) 2849 2850 #define pci_WARN(pdev, condition, fmt, arg...) \ 2851 WARN(condition, "%s %s: " fmt, \ 2852 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg) 2853 2854 #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \ 2855 WARN_ONCE(condition, "%s %s: " fmt, \ 2856 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg) 2857 2858 #endif /* LINUX_PCI_H */ 2859