1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * pci.h
4 *
5 * PCI defines and function prototypes
6 * Copyright 1994, Drew Eckhardt
7 * Copyright 1997--1999 Martin Mares <mj@ucw.cz>
8 *
9 * PCI Express ASPM defines and function prototypes
10 * Copyright (c) 2007 Intel Corp.
11 * Zhang Yanmin (yanmin.zhang@intel.com)
12 * Shaohua Li (shaohua.li@intel.com)
13 *
14 * For more information, please consult the following manuals (look at
15 * http://www.pcisig.com/ for how to get them):
16 *
17 * PCI BIOS Specification
18 * PCI Local Bus Specification
19 * PCI to PCI Bridge Specification
20 * PCI Express Specification
21 * PCI System Design Guide
22 */
23 #ifndef LINUX_PCI_H
24 #define LINUX_PCI_H
25
26 #include <linux/args.h>
27 #include <linux/mod_devicetable.h>
28
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/ioport.h>
32 #include <linux/list.h>
33 #include <linux/compiler.h>
34 #include <linux/errno.h>
35 #include <linux/kobject.h>
36 #include <linux/atomic.h>
37 #include <linux/device.h>
38 #include <linux/interrupt.h>
39 #include <linux/io.h>
40 #include <linux/resource_ext.h>
41 #include <linux/msi_api.h>
42 #include <uapi/linux/pci.h>
43
44 #include <linux/pci_ids.h>
45
46 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
47 PCI_STATUS_SIG_SYSTEM_ERROR | \
48 PCI_STATUS_REC_MASTER_ABORT | \
49 PCI_STATUS_REC_TARGET_ABORT | \
50 PCI_STATUS_SIG_TARGET_ABORT | \
51 PCI_STATUS_PARITY)
52
53 /* Number of reset methods used in pci_reset_fn_methods array in pci.c */
54 #define PCI_NUM_RESET_METHODS 8
55
56 #define PCI_RESET_PROBE true
57 #define PCI_RESET_DO_RESET false
58
59 /*
60 * The PCI interface treats multi-function devices as independent
61 * devices. The slot/function address of each device is encoded
62 * in a single byte as follows:
63 *
64 * 7:3 = slot
65 * 2:0 = function
66 *
67 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
68 * In the interest of not exposing interfaces to user-space unnecessarily,
69 * the following kernel-only defines are being added here.
70 */
71 #define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
72 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
73 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
74
75 /* pci_slot represents a physical slot */
76 struct pci_slot {
77 struct pci_bus *bus; /* Bus this slot is on */
78 struct list_head list; /* Node in list of slots */
79 struct hotplug_slot *hotplug; /* Hotplug info (move here) */
80 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
81 struct kobject kobj;
82 };
83
pci_slot_name(const struct pci_slot * slot)84 static inline const char *pci_slot_name(const struct pci_slot *slot)
85 {
86 return kobject_name(&slot->kobj);
87 }
88
89 /* File state for mmap()s on /proc/bus/pci/X/Y */
90 enum pci_mmap_state {
91 pci_mmap_io,
92 pci_mmap_mem
93 };
94
95 /* For PCI devices, the region numbers are assigned this way: */
96 enum {
97 /* #0-5: standard PCI resources */
98 PCI_STD_RESOURCES,
99 PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
100
101 /* #6: expansion ROM resource */
102 PCI_ROM_RESOURCE,
103
104 /* Device-specific resources */
105 #ifdef CONFIG_PCI_IOV
106 PCI_IOV_RESOURCES,
107 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
108 #endif
109
110 /* PCI-to-PCI (P2P) bridge windows */
111 #define PCI_BRIDGE_IO_WINDOW (PCI_BRIDGE_RESOURCES + 0)
112 #define PCI_BRIDGE_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 1)
113 #define PCI_BRIDGE_PREF_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 2)
114
115 /* CardBus bridge windows */
116 #define PCI_CB_BRIDGE_IO_0_WINDOW (PCI_BRIDGE_RESOURCES + 0)
117 #define PCI_CB_BRIDGE_IO_1_WINDOW (PCI_BRIDGE_RESOURCES + 1)
118 #define PCI_CB_BRIDGE_MEM_0_WINDOW (PCI_BRIDGE_RESOURCES + 2)
119 #define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3)
120
121 /* Total number of bridge resources for P2P and CardBus */
122 #define PCI_P2P_BRIDGE_RESOURCE_NUM 3
123 #define PCI_BRIDGE_RESOURCE_NUM 4
124
125 /* Resources assigned to buses behind the bridge */
126 PCI_BRIDGE_RESOURCES,
127 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
128 PCI_BRIDGE_RESOURCE_NUM - 1,
129
130 /* Total resources associated with a PCI device */
131 PCI_NUM_RESOURCES,
132
133 /* Preserve this for compatibility */
134 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
135 };
136
137 /**
138 * enum pci_interrupt_pin - PCI INTx interrupt values
139 * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
140 * @PCI_INTERRUPT_INTA: PCI INTA pin
141 * @PCI_INTERRUPT_INTB: PCI INTB pin
142 * @PCI_INTERRUPT_INTC: PCI INTC pin
143 * @PCI_INTERRUPT_INTD: PCI INTD pin
144 *
145 * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
146 * PCI_INTERRUPT_PIN register.
147 */
148 enum pci_interrupt_pin {
149 PCI_INTERRUPT_UNKNOWN,
150 PCI_INTERRUPT_INTA,
151 PCI_INTERRUPT_INTB,
152 PCI_INTERRUPT_INTC,
153 PCI_INTERRUPT_INTD,
154 };
155
156 /* The number of legacy PCI INTx interrupts */
157 #define PCI_NUM_INTX 4
158
159 /*
160 * Reading from a device that doesn't respond typically returns ~0. A
161 * successful read from a device may also return ~0, so you need additional
162 * information to reliably identify errors.
163 */
164 #define PCI_ERROR_RESPONSE (~0ULL)
165 #define PCI_SET_ERROR_RESPONSE(val) (*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
166 #define PCI_POSSIBLE_ERROR(val) ((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
167
168 /*
169 * pci_power_t values must match the bits in the Capabilities PME_Support
170 * and Control/Status PowerState fields in the Power Management capability.
171 */
172 typedef int __bitwise pci_power_t;
173
174 #define PCI_D0 ((pci_power_t __force) 0)
175 #define PCI_D1 ((pci_power_t __force) 1)
176 #define PCI_D2 ((pci_power_t __force) 2)
177 #define PCI_D3hot ((pci_power_t __force) 3)
178 #define PCI_D3cold ((pci_power_t __force) 4)
179 #define PCI_UNKNOWN ((pci_power_t __force) 5)
180 #define PCI_POWER_ERROR ((pci_power_t __force) -1)
181
182 /* Remember to update this when the list above changes! */
183 extern const char *pci_power_names[];
184
pci_power_name(pci_power_t state)185 static inline const char *pci_power_name(pci_power_t state)
186 {
187 return pci_power_names[1 + (__force int) state];
188 }
189
190 /**
191 * typedef pci_channel_state_t
192 *
193 * The pci_channel state describes connectivity between the CPU and
194 * the PCI device. If some PCI bus between here and the PCI device
195 * has crashed or locked up, this info is reflected here.
196 */
197 typedef unsigned int __bitwise pci_channel_state_t;
198
199 enum {
200 /* I/O channel is in normal state */
201 pci_channel_io_normal = (__force pci_channel_state_t) 1,
202
203 /* I/O to channel is blocked */
204 pci_channel_io_frozen = (__force pci_channel_state_t) 2,
205
206 /* PCI card is dead */
207 pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
208 };
209
210 typedef unsigned int __bitwise pcie_reset_state_t;
211
212 enum pcie_reset_state {
213 /* Reset is NOT asserted (Use to deassert reset) */
214 pcie_deassert_reset = (__force pcie_reset_state_t) 1,
215
216 /* Use #PERST to reset PCIe device */
217 pcie_warm_reset = (__force pcie_reset_state_t) 2,
218
219 /* Use PCIe Hot Reset to reset device */
220 pcie_hot_reset = (__force pcie_reset_state_t) 3
221 };
222
223 typedef unsigned short __bitwise pci_dev_flags_t;
224 enum pci_dev_flags {
225 /* INTX_DISABLE in PCI_COMMAND register disables MSI too */
226 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
227 /* Device configuration is irrevocably lost if disabled into D3 */
228 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
229 /* Provide indication device is assigned by a Virtual Machine Manager */
230 PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
231 /* Flag for quirk use to store if quirk-specific ACS is enabled */
232 PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
233 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
234 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
235 /* Do not use bus resets for device */
236 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
237 /* Do not use PM reset even if device advertises NoSoftRst- */
238 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
239 /* Get VPD from function 0 VPD */
240 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
241 /* A non-root bridge where translation occurs, stop alias search here */
242 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
243 /* Do not use FLR even if device advertises PCI_AF_CAP */
244 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
245 /* Don't use Relaxed Ordering for TLPs directed at this device */
246 PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
247 /* Device does honor MSI masking despite saying otherwise */
248 PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
249 /* Device requires write to PCI_MSIX_ENTRY_DATA before any MSIX reads */
250 PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST = (__force pci_dev_flags_t) (1 << 13),
251 };
252
253 enum pci_irq_reroute_variant {
254 INTEL_IRQ_REROUTE_VARIANT = 1,
255 MAX_IRQ_REROUTE_VARIANTS = 3
256 };
257
258 typedef unsigned short __bitwise pci_bus_flags_t;
259 enum pci_bus_flags {
260 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
261 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
262 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
263 PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8,
264 };
265
266 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
267 enum pcie_link_width {
268 PCIE_LNK_WIDTH_RESRV = 0x00,
269 PCIE_LNK_X1 = 0x01,
270 PCIE_LNK_X2 = 0x02,
271 PCIE_LNK_X4 = 0x04,
272 PCIE_LNK_X8 = 0x08,
273 PCIE_LNK_X12 = 0x0c,
274 PCIE_LNK_X16 = 0x10,
275 PCIE_LNK_X32 = 0x20,
276 PCIE_LNK_WIDTH_UNKNOWN = 0xff,
277 };
278
279 /* See matching string table in pci_speed_string() */
280 enum pci_bus_speed {
281 PCI_SPEED_33MHz = 0x00,
282 PCI_SPEED_66MHz = 0x01,
283 PCI_SPEED_66MHz_PCIX = 0x02,
284 PCI_SPEED_100MHz_PCIX = 0x03,
285 PCI_SPEED_133MHz_PCIX = 0x04,
286 PCI_SPEED_66MHz_PCIX_ECC = 0x05,
287 PCI_SPEED_100MHz_PCIX_ECC = 0x06,
288 PCI_SPEED_133MHz_PCIX_ECC = 0x07,
289 PCI_SPEED_66MHz_PCIX_266 = 0x09,
290 PCI_SPEED_100MHz_PCIX_266 = 0x0a,
291 PCI_SPEED_133MHz_PCIX_266 = 0x0b,
292 AGP_UNKNOWN = 0x0c,
293 AGP_1X = 0x0d,
294 AGP_2X = 0x0e,
295 AGP_4X = 0x0f,
296 AGP_8X = 0x10,
297 PCI_SPEED_66MHz_PCIX_533 = 0x11,
298 PCI_SPEED_100MHz_PCIX_533 = 0x12,
299 PCI_SPEED_133MHz_PCIX_533 = 0x13,
300 PCIE_SPEED_2_5GT = 0x14,
301 PCIE_SPEED_5_0GT = 0x15,
302 PCIE_SPEED_8_0GT = 0x16,
303 PCIE_SPEED_16_0GT = 0x17,
304 PCIE_SPEED_32_0GT = 0x18,
305 PCIE_SPEED_64_0GT = 0x19,
306 PCI_SPEED_UNKNOWN = 0xff,
307 };
308
309 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
310 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
311
312 struct pci_vpd {
313 struct mutex lock;
314 unsigned int len;
315 u8 cap;
316 };
317
318 struct irq_affinity;
319 struct pcie_bwctrl_data;
320 struct pcie_link_state;
321 struct pci_sriov;
322 struct pci_p2pdma;
323 struct rcec_ea;
324
325 /* struct pci_dev - describes a PCI device
326 *
327 * @supported_speeds: PCIe Supported Link Speeds Vector (+ reserved 0 at
328 * LSB). 0 when the supported speeds cannot be
329 * determined (e.g., for Root Complex Integrated
330 * Endpoints without the relevant Capability
331 * Registers).
332 * @is_hotplug_bridge: Hotplug bridge of any kind (e.g. PCIe Hot-Plug Capable,
333 * Conventional PCI Hot-Plug, ACPI slot).
334 * Such bridges are allocated additional MMIO and bus
335 * number resources to allow for hierarchy expansion.
336 * @is_pciehp: PCIe Hot-Plug Capable bridge.
337 */
338 struct pci_dev {
339 struct list_head bus_list; /* Node in per-bus list */
340 struct pci_bus *bus; /* Bus this device is on */
341 struct pci_bus *subordinate; /* Bus this device bridges to */
342
343 void *sysdata; /* Hook for sys-specific extension */
344 struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */
345 struct pci_slot *slot; /* Physical slot this device is in */
346
347 unsigned int devfn; /* Encoded device & function index */
348 unsigned short vendor;
349 unsigned short device;
350 unsigned short subsystem_vendor;
351 unsigned short subsystem_device;
352 unsigned int class; /* 3 bytes: (base,sub,prog-if) */
353 u8 revision; /* PCI revision, low byte of class word */
354 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
355 #ifdef CONFIG_PCIEAER
356 u16 aer_cap; /* AER capability offset */
357 struct aer_info *aer_info; /* AER info for this device */
358 #endif
359 #ifdef CONFIG_PCIEPORTBUS
360 struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */
361 struct pci_dev *rcec; /* Associated RCEC device */
362 #endif
363 u32 devcap; /* PCIe Device Capabilities */
364 u16 rebar_cap; /* Resizable BAR capability offset */
365 u8 pcie_cap; /* PCIe capability offset */
366 u8 msi_cap; /* MSI capability offset */
367 u8 msix_cap; /* MSI-X capability offset */
368 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
369 u8 rom_base_reg; /* Config register controlling ROM */
370 u8 pin; /* Interrupt pin this device uses */
371 u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */
372 unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */
373
374 struct pci_driver *driver; /* Driver bound to this device */
375 u64 dma_mask; /* Mask of the bits of bus address this
376 device implements. Normally this is
377 0xffffffff. You only need to change
378 this if your device has broken DMA
379 or supports 64-bit transfers. */
380
381 struct device_dma_parameters dma_parms;
382
383 pci_power_t current_state; /* Current operating state. In ACPI,
384 this is D0-D3, D0 being fully
385 functional, and D3 being off. */
386 u8 pm_cap; /* PM capability offset */
387 unsigned int pme_support:5; /* Bitmask of states from which PME#
388 can be generated */
389 unsigned int pme_poll:1; /* Poll device's PME status bit */
390 unsigned int pinned:1; /* Whether this dev is pinned */
391 unsigned int config_rrs_sv:1; /* Config RRS software visibility */
392 unsigned int imm_ready:1; /* Supports Immediate Readiness */
393 unsigned int d1_support:1; /* Low power state D1 is supported */
394 unsigned int d2_support:1; /* Low power state D2 is supported */
395 unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
396 unsigned int no_d3cold:1; /* D3cold is forbidden */
397 unsigned int bridge_d3:1; /* Allow D3 for bridge */
398 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
399 unsigned int mmio_always_on:1; /* Disallow turning off io/mem
400 decoding during BAR sizing */
401 unsigned int wakeup_prepared:1;
402 unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */
403 unsigned int ignore_hotplug:1; /* Ignore hotplug events */
404 unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
405 controlled exclusively by
406 user sysfs */
407 unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
408 bit manually */
409 unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */
410 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
411
412 u16 l1ss; /* L1SS Capability pointer */
413 #ifdef CONFIG_PCIEASPM
414 struct pcie_link_state *link_state; /* ASPM link state */
415 unsigned int ltr_path:1; /* Latency Tolerance Reporting
416 supported from root to here */
417 #endif
418 unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */
419 unsigned int eetlp_prefix_max:3; /* Max # of End-End TLP Prefixes, 0=not supported */
420
421 pci_channel_state_t error_state; /* Current connectivity state */
422 struct device dev; /* Generic device interface */
423
424 int cfg_size; /* Size of config space */
425
426 /*
427 * Instead of touching interrupt line and base address registers
428 * directly, use the values stored here. They might be different!
429 */
430 unsigned int irq;
431 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
432 struct resource driver_exclusive_resource; /* driver exclusive resource ranges */
433
434 unsigned int transparent:1; /* Subtractive decode bridge */
435 unsigned int io_window:1; /* Bridge has I/O window */
436 unsigned int pref_window:1; /* Bridge has pref mem window */
437 unsigned int pref_64_window:1; /* Pref mem window is 64-bit */
438 unsigned int multifunction:1; /* Multi-function device */
439
440 unsigned int is_busmaster:1; /* Is busmaster */
441 unsigned int no_msi:1; /* May not use MSI */
442 unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
443 unsigned int block_cfg_access:1; /* Config space access blocked */
444 unsigned int broken_parity_status:1; /* Generates false positive parity */
445 unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */
446 unsigned int msi_enabled:1;
447 unsigned int msix_enabled:1;
448 unsigned int ari_enabled:1; /* ARI forwarding */
449 unsigned int ats_enabled:1; /* Address Translation Svc */
450 unsigned int pasid_enabled:1; /* Process Address Space ID */
451 unsigned int pri_enabled:1; /* Page Request Interface */
452 unsigned int tph_enabled:1; /* TLP Processing Hints */
453 unsigned int is_managed:1; /* Managed via devres */
454 unsigned int is_msi_managed:1; /* MSI release via devres installed */
455 unsigned int needs_freset:1; /* Requires fundamental reset */
456 unsigned int state_saved:1;
457 unsigned int is_physfn:1;
458 unsigned int is_virtfn:1;
459 unsigned int is_hotplug_bridge:1;
460 unsigned int is_pciehp:1;
461 unsigned int shpc_managed:1; /* SHPC owned by shpchp */
462 unsigned int is_thunderbolt:1; /* Thunderbolt controller */
463 /*
464 * Devices marked being untrusted are the ones that can potentially
465 * execute DMA attacks and similar. They are typically connected
466 * through external ports such as Thunderbolt but not limited to
467 * that. When an IOMMU is enabled they should be getting full
468 * mappings to make sure they cannot access arbitrary memory.
469 */
470 unsigned int untrusted:1;
471 /*
472 * Info from the platform, e.g., ACPI or device tree, may mark a
473 * device as "external-facing". An external-facing device is
474 * itself internal but devices downstream from it are external.
475 */
476 unsigned int external_facing:1;
477 unsigned int broken_intx_masking:1; /* INTx masking can't be used */
478 unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
479 unsigned int irq_managed:1;
480 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
481 unsigned int is_probed:1; /* Device probing in progress */
482 unsigned int link_active_reporting:1;/* Device capable of reporting link active */
483 unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
484 unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
485 unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */
486 unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */
487 unsigned int non_mappable_bars:1; /* BARs can't be mapped to user-space */
488 pci_dev_flags_t dev_flags;
489 atomic_t enable_cnt; /* pci_enable_device has been called */
490
491 spinlock_t pcie_cap_lock; /* Protects RMW ops in capability accessors */
492 u32 saved_config_space[16]; /* Config space saved at suspend time */
493 struct hlist_head saved_cap_space;
494 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
495 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
496
497 #ifdef CONFIG_HOTPLUG_PCI_PCIE
498 unsigned int broken_cmd_compl:1; /* No compl for some cmds */
499 #endif
500 #ifdef CONFIG_PCIE_PTM
501 u16 ptm_cap; /* PTM Capability */
502 unsigned int ptm_root:1;
503 unsigned int ptm_enabled:1;
504 u8 ptm_granularity;
505 #endif
506 #ifdef CONFIG_PCI_MSI
507 void __iomem *msix_base;
508 raw_spinlock_t msi_lock;
509 #endif
510 struct pci_vpd vpd;
511 #ifdef CONFIG_PCIE_DPC
512 u16 dpc_cap;
513 unsigned int dpc_rp_extensions:1;
514 u8 dpc_rp_log_size;
515 #endif
516 struct pcie_bwctrl_data *link_bwctrl;
517 #ifdef CONFIG_PCI_ATS
518 union {
519 struct pci_sriov *sriov; /* PF: SR-IOV info */
520 struct pci_dev *physfn; /* VF: related PF */
521 };
522 u16 ats_cap; /* ATS Capability offset */
523 u8 ats_stu; /* ATS Smallest Translation Unit */
524 #endif
525 #ifdef CONFIG_PCI_PRI
526 u16 pri_cap; /* PRI Capability offset */
527 u32 pri_reqs_alloc; /* Number of PRI requests allocated */
528 unsigned int pasid_required:1; /* PRG Response PASID Required */
529 #endif
530 #ifdef CONFIG_PCI_PASID
531 u16 pasid_cap; /* PASID Capability offset */
532 u16 pasid_features;
533 #endif
534 #ifdef CONFIG_PCI_P2PDMA
535 struct pci_p2pdma __rcu *p2pdma;
536 #endif
537 #ifdef CONFIG_PCI_DOE
538 struct xarray doe_mbs; /* Data Object Exchange mailboxes */
539 #endif
540 #ifdef CONFIG_PCI_NPEM
541 struct npem *npem; /* Native PCIe Enclosure Management */
542 #endif
543 u16 acs_cap; /* ACS Capability offset */
544 u8 supported_speeds; /* Supported Link Speeds Vector */
545 phys_addr_t rom; /* Physical address if not from BAR */
546 size_t romlen; /* Length if not from BAR */
547 /*
548 * Driver name to force a match. Do not set directly, because core
549 * frees it. Use driver_set_override() to set or clear it.
550 */
551 const char *driver_override;
552
553 unsigned long priv_flags; /* Private flags for the PCI driver */
554
555 /* These methods index pci_reset_fn_methods[] */
556 u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
557
558 #ifdef CONFIG_PCIE_TPH
559 u16 tph_cap; /* TPH capability offset */
560 u8 tph_mode; /* TPH mode */
561 u8 tph_req_type; /* TPH requester type */
562 #endif
563 };
564
pci_physfn(struct pci_dev * dev)565 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
566 {
567 #ifdef CONFIG_PCI_IOV
568 if (dev->is_virtfn)
569 dev = dev->physfn;
570 #endif
571 return dev;
572 }
573
574 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
575
576 #define to_pci_dev(n) container_of(n, struct pci_dev, dev)
577 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
578
pci_channel_offline(struct pci_dev * pdev)579 static inline int pci_channel_offline(struct pci_dev *pdev)
580 {
581 return (pdev->error_state != pci_channel_io_normal);
582 }
583
584 /*
585 * Currently in ACPI spec, for each PCI host bridge, PCI Segment
586 * Group number is limited to a 16-bit value, therefore (int)-1 is
587 * not a valid PCI domain number, and can be used as a sentinel
588 * value indicating ->domain_nr is not set by the driver (and
589 * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
590 * pci_bus_find_domain_nr()).
591 */
592 #define PCI_DOMAIN_NR_NOT_SET (-1)
593
594 struct pci_host_bridge {
595 struct device dev;
596 struct pci_bus *bus; /* Root bus */
597 struct pci_ops *ops;
598 struct pci_ops *child_ops;
599 void *sysdata;
600 int busnr;
601 int domain_nr;
602 struct list_head windows; /* resource_entry */
603 struct list_head dma_ranges; /* dma ranges resource list */
604 u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
605 int (*map_irq)(const struct pci_dev *, u8, u8);
606 void (*release_fn)(struct pci_host_bridge *);
607 int (*enable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
608 void (*disable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
609 void *release_data;
610 unsigned int ignore_reset_delay:1; /* For entire hierarchy */
611 unsigned int no_ext_tags:1; /* No Extended Tags */
612 unsigned int no_inc_mrrs:1; /* No Increase MRRS */
613 unsigned int native_aer:1; /* OS may use PCIe AER */
614 unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
615 unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
616 unsigned int native_pme:1; /* OS may use PCIe PME */
617 unsigned int native_ltr:1; /* OS may use PCIe LTR */
618 unsigned int native_dpc:1; /* OS may use PCIe DPC */
619 unsigned int native_cxl_error:1; /* OS may use CXL RAS/Events */
620 unsigned int preserve_config:1; /* Preserve FW resource setup */
621 unsigned int size_windows:1; /* Enable root bus sizing */
622 unsigned int msi_domain:1; /* Bridge wants MSI domain */
623
624 /* Resource alignment requirements */
625 resource_size_t (*align_resource)(struct pci_dev *dev,
626 const struct resource *res,
627 resource_size_t start,
628 resource_size_t size,
629 resource_size_t align);
630 unsigned long private[] ____cacheline_aligned;
631 };
632
633 #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
634
pci_host_bridge_priv(struct pci_host_bridge * bridge)635 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
636 {
637 return (void *)bridge->private;
638 }
639
pci_host_bridge_from_priv(void * priv)640 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
641 {
642 return container_of(priv, struct pci_host_bridge, private);
643 }
644
645 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
646 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
647 size_t priv);
648 void pci_free_host_bridge(struct pci_host_bridge *bridge);
649 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
650
651 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
652 void (*release_fn)(struct pci_host_bridge *),
653 void *release_data);
654
655 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
656
657 #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
658
659 struct pci_bus {
660 struct list_head node; /* Node in list of buses */
661 struct pci_bus *parent; /* Parent bus this bridge is on */
662 struct list_head children; /* List of child buses */
663 struct list_head devices; /* List of devices on this bus */
664 struct pci_dev *self; /* Bridge device as seen by parent */
665 struct list_head slots; /* List of slots on this bus;
666 protected by pci_slot_mutex */
667 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
668 struct list_head resources; /* Address space routed to this bus */
669 struct resource busn_res; /* Bus numbers routed to this bus */
670
671 struct pci_ops *ops; /* Configuration access functions */
672 void *sysdata; /* Hook for sys-specific extension */
673 struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */
674
675 unsigned char number; /* Bus number */
676 unsigned char primary; /* Number of primary bridge */
677 unsigned char max_bus_speed; /* enum pci_bus_speed */
678 unsigned char cur_bus_speed; /* enum pci_bus_speed */
679 #ifdef CONFIG_PCI_DOMAINS_GENERIC
680 int domain_nr;
681 #endif
682
683 char name[48];
684
685 unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */
686 pci_bus_flags_t bus_flags; /* Inherited by child buses */
687 struct device *bridge;
688 struct device dev;
689 struct bin_attribute *legacy_io; /* Legacy I/O for this bus */
690 struct bin_attribute *legacy_mem; /* Legacy mem */
691 unsigned int is_added:1;
692 unsigned int unsafe_warn:1; /* warned about RW1C config write */
693 unsigned int flit_mode:1; /* Link in Flit mode */
694 };
695
696 #define to_pci_bus(n) container_of(n, struct pci_bus, dev)
697
pci_dev_id(struct pci_dev * dev)698 static inline u16 pci_dev_id(struct pci_dev *dev)
699 {
700 return PCI_DEVID(dev->bus->number, dev->devfn);
701 }
702
703 /*
704 * Returns true if the PCI bus is root (behind host-PCI bridge),
705 * false otherwise
706 *
707 * Some code assumes that "bus->self == NULL" means that bus is a root bus.
708 * This is incorrect because "virtual" buses added for SR-IOV (via
709 * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
710 */
pci_is_root_bus(struct pci_bus * pbus)711 static inline bool pci_is_root_bus(struct pci_bus *pbus)
712 {
713 return !(pbus->parent);
714 }
715
716 /**
717 * pci_is_bridge - check if the PCI device is a bridge
718 * @dev: PCI device
719 *
720 * Return true if the PCI device is bridge whether it has subordinate
721 * or not.
722 */
pci_is_bridge(struct pci_dev * dev)723 static inline bool pci_is_bridge(struct pci_dev *dev)
724 {
725 return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
726 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
727 }
728
729 /**
730 * pci_is_vga - check if the PCI device is a VGA device
731 * @pdev: PCI device
732 *
733 * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define
734 * VGA Base Class and Sub-Classes:
735 *
736 * 03 00 PCI_CLASS_DISPLAY_VGA VGA-compatible or 8514-compatible
737 * 00 01 PCI_CLASS_NOT_DEFINED_VGA VGA-compatible (before Class Code)
738 *
739 * Return true if the PCI device is a VGA device and uses the legacy VGA
740 * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and
741 * aliases).
742 */
pci_is_vga(struct pci_dev * pdev)743 static inline bool pci_is_vga(struct pci_dev *pdev)
744 {
745 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
746 return true;
747
748 if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA)
749 return true;
750
751 return false;
752 }
753
754 /**
755 * pci_is_display - check if the PCI device is a display controller
756 * @pdev: PCI device
757 *
758 * Determine whether the given PCI device corresponds to a display
759 * controller. Display controllers are typically used for graphical output
760 * and are identified based on their class code.
761 *
762 * Return: true if the PCI device is a display controller, false otherwise.
763 */
pci_is_display(struct pci_dev * pdev)764 static inline bool pci_is_display(struct pci_dev *pdev)
765 {
766 return (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY;
767 }
768
769 #define for_each_pci_bridge(dev, bus) \
770 list_for_each_entry(dev, &bus->devices, bus_list) \
771 if (!pci_is_bridge(dev)) {} else
772
pci_upstream_bridge(struct pci_dev * dev)773 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
774 {
775 dev = pci_physfn(dev);
776 if (pci_is_root_bus(dev->bus))
777 return NULL;
778
779 return dev->bus->self;
780 }
781
782 #ifdef CONFIG_PCI_MSI
pci_dev_msi_enabled(struct pci_dev * pci_dev)783 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
784 {
785 return pci_dev->msi_enabled || pci_dev->msix_enabled;
786 }
787 #else
pci_dev_msi_enabled(struct pci_dev * pci_dev)788 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
789 #endif
790
791 /* Error values that may be returned by PCI functions */
792 #define PCIBIOS_SUCCESSFUL 0x00
793 #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
794 #define PCIBIOS_BAD_VENDOR_ID 0x83
795 #define PCIBIOS_DEVICE_NOT_FOUND 0x86
796 #define PCIBIOS_BAD_REGISTER_NUMBER 0x87
797 #define PCIBIOS_SET_FAILED 0x88
798 #define PCIBIOS_BUFFER_TOO_SMALL 0x89
799
800 /* Translate above to generic errno for passing back through non-PCI code */
pcibios_err_to_errno(int err)801 static inline int pcibios_err_to_errno(int err)
802 {
803 if (err <= PCIBIOS_SUCCESSFUL)
804 return err; /* Assume already errno */
805
806 switch (err) {
807 case PCIBIOS_FUNC_NOT_SUPPORTED:
808 return -ENOENT;
809 case PCIBIOS_BAD_VENDOR_ID:
810 return -ENOTTY;
811 case PCIBIOS_DEVICE_NOT_FOUND:
812 return -ENODEV;
813 case PCIBIOS_BAD_REGISTER_NUMBER:
814 return -EFAULT;
815 case PCIBIOS_SET_FAILED:
816 return -EIO;
817 case PCIBIOS_BUFFER_TOO_SMALL:
818 return -ENOSPC;
819 }
820
821 return -ERANGE;
822 }
823
824 /* Low-level architecture-dependent routines */
825
826 struct pci_ops {
827 int (*add_bus)(struct pci_bus *bus);
828 void (*remove_bus)(struct pci_bus *bus);
829 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
830 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
831 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
832 };
833
834 /*
835 * ACPI needs to be able to access PCI config space before we've done a
836 * PCI bus scan and created pci_bus structures.
837 */
838 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
839 int reg, int len, u32 *val);
840 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
841 int reg, int len, u32 val);
842
843 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
844 typedef u64 pci_bus_addr_t;
845 #else
846 typedef u32 pci_bus_addr_t;
847 #endif
848
849 struct pci_bus_region {
850 pci_bus_addr_t start;
851 pci_bus_addr_t end;
852 };
853
854 struct pci_dynids {
855 spinlock_t lock; /* Protects list, index */
856 struct list_head list; /* For IDs added at runtime */
857 };
858
859
860 /*
861 * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
862 * a set of callbacks in struct pci_error_handlers, that device driver
863 * will be notified of PCI bus errors, and will be driven to recovery
864 * when an error occurs.
865 */
866
867 typedef unsigned int __bitwise pci_ers_result_t;
868
869 enum pci_ers_result {
870 /* No result/none/not supported in device driver */
871 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
872
873 /* Device driver can recover without slot reset */
874 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
875
876 /* Device driver wants slot to be reset */
877 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
878
879 /* Device has completely failed, is unrecoverable */
880 PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
881
882 /* Device driver is fully recovered and operational */
883 PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
884
885 /* No AER capabilities registered for the driver */
886 PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
887 };
888
889 /* PCI bus error event callbacks */
890 struct pci_error_handlers {
891 /* PCI bus error detected on this device */
892 pci_ers_result_t (*error_detected)(struct pci_dev *dev,
893 pci_channel_state_t error);
894
895 /* MMIO has been re-enabled, but not DMA */
896 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
897
898 /* PCI slot has been reset */
899 pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
900
901 /* PCI function reset prepare or completed */
902 void (*reset_prepare)(struct pci_dev *dev);
903 void (*reset_done)(struct pci_dev *dev);
904
905 /* Device driver may resume normal operations */
906 void (*resume)(struct pci_dev *dev);
907
908 /* Allow device driver to record more details of a correctable error */
909 void (*cor_error_detected)(struct pci_dev *dev);
910 };
911
912
913 struct module;
914
915 /**
916 * struct pci_driver - PCI driver structure
917 * @name: Driver name.
918 * @id_table: Pointer to table of device IDs the driver is
919 * interested in. Most drivers should export this
920 * table using MODULE_DEVICE_TABLE(pci,...).
921 * @probe: This probing function gets called (during execution
922 * of pci_register_driver() for already existing
923 * devices or later if a new device gets inserted) for
924 * all PCI devices which match the ID table and are not
925 * "owned" by the other drivers yet. This function gets
926 * passed a "struct pci_dev \*" for each device whose
927 * entry in the ID table matches the device. The probe
928 * function returns zero when the driver chooses to
929 * take "ownership" of the device or an error code
930 * (negative number) otherwise.
931 * The probe function always gets called from process
932 * context, so it can sleep.
933 * @remove: The remove() function gets called whenever a device
934 * being handled by this driver is removed (either during
935 * deregistration of the driver or when it's manually
936 * pulled out of a hot-pluggable slot).
937 * The remove function always gets called from process
938 * context, so it can sleep.
939 * @suspend: Put device into low power state.
940 * @resume: Wake device from low power state.
941 * (Please see Documentation/power/pci.rst for descriptions
942 * of PCI Power Management and the related functions.)
943 * @shutdown: Hook into reboot_notifier_list (kernel/sys.c).
944 * Intended to stop any idling DMA operations.
945 * Useful for enabling wake-on-lan (NIC) or changing
946 * the power state of a device before reboot.
947 * e.g. drivers/net/e100.c.
948 * @sriov_configure: Optional driver callback to allow configuration of
949 * number of VFs to enable via sysfs "sriov_numvfs" file.
950 * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X
951 * vectors on a VF. Triggered via sysfs "sriov_vf_msix_count".
952 * This will change MSI-X Table Size in the VF Message Control
953 * registers.
954 * @sriov_get_vf_total_msix: PF driver callback to get the total number of
955 * MSI-X vectors available for distribution to the VFs.
956 * @err_handler: See Documentation/PCI/pci-error-recovery.rst
957 * @groups: Sysfs attribute groups.
958 * @dev_groups: Attributes attached to the device that will be
959 * created once it is bound to the driver.
960 * @driver: Driver model structure.
961 * @dynids: List of dynamically added device IDs.
962 * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
963 * For most device drivers, no need to care about this flag
964 * as long as all DMAs are handled through the kernel DMA API.
965 * For some special ones, for example VFIO drivers, they know
966 * how to manage the DMA themselves and set this flag so that
967 * the IOMMU layer will allow them to setup and manage their
968 * own I/O address space.
969 */
970 struct pci_driver {
971 const char *name;
972 const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */
973 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
974 void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
975 int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */
976 int (*resume)(struct pci_dev *dev); /* Device woken up */
977 void (*shutdown)(struct pci_dev *dev);
978 int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
979 int (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */
980 u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf);
981 const struct pci_error_handlers *err_handler;
982 const struct attribute_group **groups;
983 const struct attribute_group **dev_groups;
984 struct device_driver driver;
985 struct pci_dynids dynids;
986 bool driver_managed_dma;
987 };
988
989 #define to_pci_driver(__drv) \
990 ( __drv ? container_of_const(__drv, struct pci_driver, driver) : NULL )
991
992 /**
993 * PCI_DEVICE - macro used to describe a specific PCI device
994 * @vend: the 16 bit PCI Vendor ID
995 * @dev: the 16 bit PCI Device ID
996 *
997 * This macro is used to create a struct pci_device_id that matches a
998 * specific device. The subvendor and subdevice fields will be set to
999 * PCI_ANY_ID.
1000 */
1001 #define PCI_DEVICE(vend,dev) \
1002 .vendor = (vend), .device = (dev), \
1003 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1004
1005 /**
1006 * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
1007 * override_only flags.
1008 * @vend: the 16 bit PCI Vendor ID
1009 * @dev: the 16 bit PCI Device ID
1010 * @driver_override: the 32 bit PCI Device override_only
1011 *
1012 * This macro is used to create a struct pci_device_id that matches only a
1013 * driver_override device. The subvendor and subdevice fields will be set to
1014 * PCI_ANY_ID.
1015 */
1016 #define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
1017 .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
1018 .subdevice = PCI_ANY_ID, .override_only = (driver_override)
1019
1020 /**
1021 * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
1022 * "driver_override" PCI device.
1023 * @vend: the 16 bit PCI Vendor ID
1024 * @dev: the 16 bit PCI Device ID
1025 *
1026 * This macro is used to create a struct pci_device_id that matches a
1027 * specific device. The subvendor and subdevice fields will be set to
1028 * PCI_ANY_ID and the driver_override will be set to
1029 * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
1030 */
1031 #define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
1032 PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
1033
1034 /**
1035 * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
1036 * @vend: the 16 bit PCI Vendor ID
1037 * @dev: the 16 bit PCI Device ID
1038 * @subvend: the 16 bit PCI Subvendor ID
1039 * @subdev: the 16 bit PCI Subdevice ID
1040 *
1041 * This macro is used to create a struct pci_device_id that matches a
1042 * specific device with subsystem information.
1043 */
1044 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
1045 .vendor = (vend), .device = (dev), \
1046 .subvendor = (subvend), .subdevice = (subdev)
1047
1048 /**
1049 * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
1050 * @dev_class: the class, subclass, prog-if triple for this device
1051 * @dev_class_mask: the class mask for this device
1052 *
1053 * This macro is used to create a struct pci_device_id that matches a
1054 * specific PCI class. The vendor, device, subvendor, and subdevice
1055 * fields will be set to PCI_ANY_ID.
1056 */
1057 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
1058 .class = (dev_class), .class_mask = (dev_class_mask), \
1059 .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
1060 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1061
1062 /**
1063 * PCI_VDEVICE - macro used to describe a specific PCI device in short form
1064 * @vend: the vendor name
1065 * @dev: the 16 bit PCI Device ID
1066 *
1067 * This macro is used to create a struct pci_device_id that matches a
1068 * specific PCI device. The subvendor, and subdevice fields will be set
1069 * to PCI_ANY_ID. The macro allows the next field to follow as the device
1070 * private data.
1071 */
1072 #define PCI_VDEVICE(vend, dev) \
1073 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1074 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
1075
1076 /**
1077 * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form
1078 * @vend: the vendor name
1079 * @dev: the 16 bit PCI Device ID
1080 * @subvend: the 16 bit PCI Subvendor ID
1081 * @subdev: the 16 bit PCI Subdevice ID
1082 *
1083 * Generate the pci_device_id struct layout for the specific PCI
1084 * device/subdevice. Private data may follow the output.
1085 */
1086 #define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \
1087 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1088 .subvendor = (subvend), .subdevice = (subdev), 0, 0
1089
1090 /**
1091 * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
1092 * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
1093 * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
1094 * @data: the driver data to be filled
1095 *
1096 * This macro is used to create a struct pci_device_id that matches a
1097 * specific PCI device. The subvendor, and subdevice fields will be set
1098 * to PCI_ANY_ID.
1099 */
1100 #define PCI_DEVICE_DATA(vend, dev, data) \
1101 .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
1102 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
1103 .driver_data = (kernel_ulong_t)(data)
1104
1105 enum {
1106 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */
1107 PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */
1108 PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */
1109 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */
1110 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */
1111 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
1112 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
1113 };
1114
1115 #define PCI_IRQ_INTX (1 << 0) /* Allow INTx interrupts */
1116 #define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
1117 #define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
1118 #define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
1119
1120 /* These external functions are only available when PCI support is enabled */
1121 #ifdef CONFIG_PCI
1122
1123 extern unsigned int pci_flags;
1124
pci_set_flags(int flags)1125 static inline void pci_set_flags(int flags) { pci_flags = flags; }
pci_add_flags(int flags)1126 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
pci_clear_flags(int flags)1127 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
pci_has_flag(int flag)1128 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
1129
1130 void pcie_bus_configure_settings(struct pci_bus *bus);
1131
1132 enum pcie_bus_config_types {
1133 PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */
1134 PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */
1135 PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */
1136 PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */
1137 PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */
1138 };
1139
1140 extern enum pcie_bus_config_types pcie_bus_config;
1141
1142 extern const struct bus_type pci_bus_type;
1143
1144 /* Do NOT directly access these two variables, unless you are arch-specific PCI
1145 * code, or PCI core code. */
1146 extern struct list_head pci_root_buses; /* List of all known PCI buses */
1147 /* Some device drivers need know if PCI is initiated */
1148 int no_pci_devices(void);
1149
1150 void pcibios_resource_survey_bus(struct pci_bus *bus);
1151 void pcibios_bus_add_device(struct pci_dev *pdev);
1152 void pcibios_add_bus(struct pci_bus *bus);
1153 void pcibios_remove_bus(struct pci_bus *bus);
1154 void pcibios_fixup_bus(struct pci_bus *);
1155 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
1156 /* Architecture-specific versions may override this (weak) */
1157 char *pcibios_setup(char *str);
1158
1159 /* Used only when drivers/pci/setup.c is used */
1160 resource_size_t pcibios_align_resource(void *, const struct resource *,
1161 resource_size_t,
1162 resource_size_t);
1163
1164 /* Generic PCI functions used internally */
1165
1166 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
1167 struct resource *res);
1168 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
1169 struct pci_bus_region *region);
1170 void pcibios_scan_specific_bus(int busn);
1171 struct pci_bus *pci_find_bus(int domain, int busnr);
1172 void pci_bus_add_devices(const struct pci_bus *bus);
1173 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
1174 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1175 struct pci_ops *ops, void *sysdata,
1176 struct list_head *resources);
1177 int pci_host_probe(struct pci_host_bridge *bridge);
1178 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
1179 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
1180 void pci_bus_release_busn_res(struct pci_bus *b);
1181 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1182 struct pci_ops *ops, void *sysdata,
1183 struct list_head *resources);
1184 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
1185 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1186 int busnr);
1187 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1188 const char *name,
1189 struct hotplug_slot *hotplug);
1190 void pci_destroy_slot(struct pci_slot *slot);
1191 #ifdef CONFIG_SYSFS
1192 void pci_dev_assign_slot(struct pci_dev *dev);
1193 #else
pci_dev_assign_slot(struct pci_dev * dev)1194 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
1195 #endif
1196 int pci_scan_slot(struct pci_bus *bus, int devfn);
1197 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
1198 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
1199 unsigned int pci_scan_child_bus(struct pci_bus *bus);
1200 void pci_bus_add_device(struct pci_dev *dev);
1201 void pci_read_bridge_bases(struct pci_bus *child);
1202 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
1203 struct resource *res);
1204 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
1205 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
1206 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
1207 struct pci_dev *pci_dev_get(struct pci_dev *dev);
1208 void pci_dev_put(struct pci_dev *dev);
1209 DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
1210 void pci_remove_bus(struct pci_bus *b);
1211 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
1212 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
1213 void pci_stop_root_bus(struct pci_bus *bus);
1214 void pci_remove_root_bus(struct pci_bus *bus);
1215 void pci_setup_cardbus(struct pci_bus *bus);
1216 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
1217 void pci_sort_breadthfirst(void);
1218 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
1219 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
1220
1221 /* Generic PCI functions exported to card drivers */
1222
1223 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1224 u8 pci_find_capability(struct pci_dev *dev, int cap);
1225 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
1226 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
1227 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap);
1228 u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
1229 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
1230 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
1231 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
1232 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
1233
1234 u64 pci_get_dsn(struct pci_dev *dev);
1235
1236 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
1237 struct pci_dev *from);
1238 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
1239 unsigned int ss_vendor, unsigned int ss_device,
1240 struct pci_dev *from);
1241 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
1242 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
1243 unsigned int devfn);
1244 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
1245 struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
1246
1247 int pci_dev_present(const struct pci_device_id *ids);
1248
1249 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
1250 int where, u8 *val);
1251 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
1252 int where, u16 *val);
1253 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
1254 int where, u32 *val);
1255 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
1256 int where, u8 val);
1257 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
1258 int where, u16 val);
1259 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
1260 int where, u32 val);
1261
1262 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
1263 int where, int size, u32 *val);
1264 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
1265 int where, int size, u32 val);
1266 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
1267 int where, int size, u32 *val);
1268 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
1269 int where, int size, u32 val);
1270
1271 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
1272
1273 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
1274 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
1275 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
1276 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1277 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
1278 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
1279 void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
1280 u32 clear, u32 set);
1281
1282 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
1283 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
1284 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1285 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
1286 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
1287 u16 clear, u16 set);
1288 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
1289 u16 clear, u16 set);
1290 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
1291 u32 clear, u32 set);
1292
1293 /**
1294 * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
1295 * @dev: PCI device structure of the PCI Express device
1296 * @pos: PCI Express Capability Register
1297 * @clear: Clear bitmask
1298 * @set: Set bitmask
1299 *
1300 * Perform a Read-Modify-Write (RMW) operation using @clear and @set
1301 * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
1302 * Capability Registers are accessed concurrently in RMW fashion, hence
1303 * require locking which is handled transparently to the caller.
1304 */
pcie_capability_clear_and_set_word(struct pci_dev * dev,int pos,u16 clear,u16 set)1305 static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
1306 int pos,
1307 u16 clear, u16 set)
1308 {
1309 switch (pos) {
1310 case PCI_EXP_LNKCTL:
1311 case PCI_EXP_LNKCTL2:
1312 case PCI_EXP_RTCTL:
1313 return pcie_capability_clear_and_set_word_locked(dev, pos,
1314 clear, set);
1315 default:
1316 return pcie_capability_clear_and_set_word_unlocked(dev, pos,
1317 clear, set);
1318 }
1319 }
1320
pcie_capability_set_word(struct pci_dev * dev,int pos,u16 set)1321 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
1322 u16 set)
1323 {
1324 return pcie_capability_clear_and_set_word(dev, pos, 0, set);
1325 }
1326
pcie_capability_set_dword(struct pci_dev * dev,int pos,u32 set)1327 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
1328 u32 set)
1329 {
1330 return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
1331 }
1332
pcie_capability_clear_word(struct pci_dev * dev,int pos,u16 clear)1333 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
1334 u16 clear)
1335 {
1336 return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
1337 }
1338
pcie_capability_clear_dword(struct pci_dev * dev,int pos,u32 clear)1339 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1340 u32 clear)
1341 {
1342 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1343 }
1344
1345 /* User-space driven config access */
1346 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1347 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1348 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1349 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1350 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1351 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1352
1353 int __must_check pci_enable_device(struct pci_dev *dev);
1354 int __must_check pci_enable_device_mem(struct pci_dev *dev);
1355 int __must_check pci_reenable_device(struct pci_dev *);
1356 int __must_check pcim_enable_device(struct pci_dev *pdev);
1357 void pcim_pin_device(struct pci_dev *pdev);
1358
pci_intx_mask_supported(struct pci_dev * pdev)1359 static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1360 {
1361 /*
1362 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1363 * writable and no quirk has marked the feature broken.
1364 */
1365 return !pdev->broken_intx_masking;
1366 }
1367
pci_is_enabled(struct pci_dev * pdev)1368 static inline int pci_is_enabled(struct pci_dev *pdev)
1369 {
1370 return (atomic_read(&pdev->enable_cnt) > 0);
1371 }
1372
pci_is_managed(struct pci_dev * pdev)1373 static inline int pci_is_managed(struct pci_dev *pdev)
1374 {
1375 return pdev->is_managed;
1376 }
1377
1378 void pci_disable_device(struct pci_dev *dev);
1379
1380 extern unsigned int pcibios_max_latency;
1381 void pci_set_master(struct pci_dev *dev);
1382 void pci_clear_master(struct pci_dev *dev);
1383
1384 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1385 int pci_set_cacheline_size(struct pci_dev *dev);
1386 int __must_check pci_set_mwi(struct pci_dev *dev);
1387 int __must_check pcim_set_mwi(struct pci_dev *dev);
1388 int pci_try_set_mwi(struct pci_dev *dev);
1389 void pci_clear_mwi(struct pci_dev *dev);
1390 void pci_disable_parity(struct pci_dev *dev);
1391 void pci_intx(struct pci_dev *dev, int enable);
1392 bool pci_check_and_mask_intx(struct pci_dev *dev);
1393 bool pci_check_and_unmask_intx(struct pci_dev *dev);
1394 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1395 int pci_wait_for_pending_transaction(struct pci_dev *dev);
1396 int pcix_get_max_mmrbc(struct pci_dev *dev);
1397 int pcix_get_mmrbc(struct pci_dev *dev);
1398 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1399 int pcie_get_readrq(struct pci_dev *dev);
1400 int pcie_set_readrq(struct pci_dev *dev, int rq);
1401 int pcie_get_mps(struct pci_dev *dev);
1402 int pcie_set_mps(struct pci_dev *dev, int mps);
1403 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1404 enum pci_bus_speed *speed,
1405 enum pcie_link_width *width);
1406 int pcie_link_speed_mbps(struct pci_dev *pdev);
1407 void pcie_print_link_status(struct pci_dev *dev);
1408 int pcie_reset_flr(struct pci_dev *dev, bool probe);
1409 int pcie_flr(struct pci_dev *dev);
1410 int __pci_reset_function_locked(struct pci_dev *dev);
1411 int pci_reset_function(struct pci_dev *dev);
1412 int pci_reset_function_locked(struct pci_dev *dev);
1413 int pci_try_reset_function(struct pci_dev *dev);
1414 int pci_probe_reset_slot(struct pci_slot *slot);
1415 int pci_probe_reset_bus(struct pci_bus *bus);
1416 int pci_reset_bus(struct pci_dev *dev);
1417 void pci_reset_secondary_bus(struct pci_dev *dev);
1418 void pcibios_reset_secondary_bus(struct pci_dev *dev);
1419 void pci_update_resource(struct pci_dev *dev, int resno);
1420 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1421 int pci_release_resource(struct pci_dev *dev, int resno);
pci_rebar_bytes_to_size(u64 bytes)1422 static inline int pci_rebar_bytes_to_size(u64 bytes)
1423 {
1424 bytes = roundup_pow_of_two(bytes);
1425
1426 /* Return BAR size as defined in the resizable BAR specification */
1427 return max(ilog2(bytes), 20) - 20;
1428 }
1429
1430 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
1431 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
1432 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1433 bool pci_device_is_present(struct pci_dev *pdev);
1434 void pci_ignore_hotplug(struct pci_dev *dev);
1435 struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
1436 int pci_status_get_and_clear_errors(struct pci_dev *pdev);
1437
1438 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
1439 irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
1440 const char *fmt, ...);
1441 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
1442
1443 /* ROM control related routines */
1444 int pci_enable_rom(struct pci_dev *pdev);
1445 void pci_disable_rom(struct pci_dev *pdev);
1446 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1447 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1448
1449 /* Power management related routines */
1450 int pci_save_state(struct pci_dev *dev);
1451 void pci_restore_state(struct pci_dev *dev);
1452 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1453 int pci_load_saved_state(struct pci_dev *dev,
1454 struct pci_saved_state *state);
1455 int pci_load_and_free_saved_state(struct pci_dev *dev,
1456 struct pci_saved_state **state);
1457 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
1458 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1459 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
1460 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1461 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1462 void pci_pme_active(struct pci_dev *dev, bool enable);
1463 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1464 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1465 int pci_prepare_to_sleep(struct pci_dev *dev);
1466 int pci_back_from_sleep(struct pci_dev *dev);
1467 bool pci_dev_run_wake(struct pci_dev *dev);
1468 void pci_d3cold_enable(struct pci_dev *dev);
1469 void pci_d3cold_disable(struct pci_dev *dev);
1470 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1471 void pci_resume_bus(struct pci_bus *bus);
1472 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1473
1474 /* For use by arch with custom probe code */
1475 void set_pcie_port_type(struct pci_dev *pdev);
1476 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1477
1478 /* Functions for PCI Hotplug drivers to use */
1479 unsigned int pci_rescan_bus(struct pci_bus *bus);
1480 void pci_lock_rescan_remove(void);
1481 void pci_unlock_rescan_remove(void);
1482
1483 /* Vital Product Data routines */
1484 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1485 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1486 ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1487 ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1488
1489 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1490 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1491 void pci_bus_assign_resources(const struct pci_bus *bus);
1492 void pci_bus_claim_resources(struct pci_bus *bus);
1493 void pci_bus_size_bridges(struct pci_bus *bus);
1494 int pci_claim_resource(struct pci_dev *, int);
1495 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1496 void pci_assign_unassigned_resources(void);
1497 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1498 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1499 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1500 int pci_enable_resources(struct pci_dev *, int mask);
1501 void pci_assign_irq(struct pci_dev *dev);
1502 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1503 #define HAVE_PCI_REQ_REGIONS 2
1504 int __must_check pci_request_regions(struct pci_dev *, const char *);
1505 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1506 void pci_release_regions(struct pci_dev *);
1507 int __must_check pci_request_region(struct pci_dev *, int, const char *);
1508 void pci_release_region(struct pci_dev *, int);
1509 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1510 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1511 void pci_release_selected_regions(struct pci_dev *, int);
1512
1513 static inline __must_check struct resource *
pci_request_config_region_exclusive(struct pci_dev * pdev,unsigned int offset,unsigned int len,const char * name)1514 pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
1515 unsigned int len, const char *name)
1516 {
1517 return __request_region(&pdev->driver_exclusive_resource, offset, len,
1518 name, IORESOURCE_EXCLUSIVE);
1519 }
1520
pci_release_config_region(struct pci_dev * pdev,unsigned int offset,unsigned int len)1521 static inline void pci_release_config_region(struct pci_dev *pdev,
1522 unsigned int offset,
1523 unsigned int len)
1524 {
1525 __release_region(&pdev->driver_exclusive_resource, offset, len);
1526 }
1527
1528 /* drivers/pci/bus.c */
1529 void pci_add_resource(struct list_head *resources, struct resource *res);
1530 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1531 resource_size_t offset);
1532 void pci_free_resource_list(struct list_head *resources);
1533 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res);
1534 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1535 void pci_bus_remove_resources(struct pci_bus *bus);
1536 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
1537 int devm_request_pci_bus_resources(struct device *dev,
1538 struct list_head *resources);
1539
1540 /* Temporary until new and working PCI SBR API in place */
1541 int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1542
1543 #define __pci_bus_for_each_res0(bus, res, ...) \
1544 for (unsigned int __b = 0; \
1545 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1546 __b++)
1547
1548 #define __pci_bus_for_each_res1(bus, res, __b) \
1549 for (__b = 0; \
1550 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1551 __b++)
1552
1553 /**
1554 * pci_bus_for_each_resource - iterate over PCI bus resources
1555 * @bus: the PCI bus
1556 * @res: pointer to the current resource
1557 * @...: optional index of the current resource
1558 *
1559 * Iterate over PCI bus resources. The first part is to go over PCI bus
1560 * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries.
1561 * After that continue with the separate list of the additional resources,
1562 * if not empty. That's why the Logical OR is being used.
1563 *
1564 * Possible usage:
1565 *
1566 * struct pci_bus *bus = ...;
1567 * struct resource *res;
1568 * unsigned int i;
1569 *
1570 * // With optional index
1571 * pci_bus_for_each_resource(bus, res, i)
1572 * pr_info("PCI bus resource[%u]: %pR\n", i, res);
1573 *
1574 * // Without index
1575 * pci_bus_for_each_resource(bus, res)
1576 * _do_something_(res);
1577 */
1578 #define pci_bus_for_each_resource(bus, res, ...) \
1579 CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
1580 (bus, res, __VA_ARGS__)
1581
1582 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1583 struct resource *res, resource_size_t size,
1584 resource_size_t align, resource_size_t min,
1585 unsigned long type_mask,
1586 resource_alignf alignf,
1587 void *alignf_data);
1588
1589
1590 int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
1591 resource_size_t size);
1592 unsigned long pci_address_to_pio(phys_addr_t addr);
1593 phys_addr_t pci_pio_to_address(unsigned long pio);
1594 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1595 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
1596 phys_addr_t phys_addr);
1597 void pci_unmap_iospace(struct resource *res);
1598 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
1599 resource_size_t offset,
1600 resource_size_t size);
1601 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
1602 struct resource *res);
1603
pci_bus_address(struct pci_dev * pdev,int bar)1604 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1605 {
1606 struct pci_bus_region region;
1607
1608 pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]);
1609 return region.start;
1610 }
1611
1612 /* Proper probing supporting hot-pluggable devices */
1613 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1614 const char *mod_name);
1615
1616 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1617 #define pci_register_driver(driver) \
1618 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1619
1620 void pci_unregister_driver(struct pci_driver *dev);
1621
1622 /**
1623 * module_pci_driver() - Helper macro for registering a PCI driver
1624 * @__pci_driver: pci_driver struct
1625 *
1626 * Helper macro for PCI drivers which do not do anything special in module
1627 * init/exit. This eliminates a lot of boilerplate. Each module may only
1628 * use this macro once, and calling it replaces module_init() and module_exit()
1629 */
1630 #define module_pci_driver(__pci_driver) \
1631 module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1632
1633 /**
1634 * builtin_pci_driver() - Helper macro for registering a PCI driver
1635 * @__pci_driver: pci_driver struct
1636 *
1637 * Helper macro for PCI drivers which do not do anything special in their
1638 * init code. This eliminates a lot of boilerplate. Each driver may only
1639 * use this macro once, and calling it replaces device_initcall(...)
1640 */
1641 #define builtin_pci_driver(__pci_driver) \
1642 builtin_driver(__pci_driver, pci_register_driver)
1643
1644 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1645 int pci_add_dynid(struct pci_driver *drv,
1646 unsigned int vendor, unsigned int device,
1647 unsigned int subvendor, unsigned int subdevice,
1648 unsigned int class, unsigned int class_mask,
1649 unsigned long driver_data);
1650 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1651 struct pci_dev *dev);
1652 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1653 int pass);
1654
1655 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1656 void *userdata);
1657 int pci_cfg_space_size(struct pci_dev *dev);
1658 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1659 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1660 unsigned long type);
1661
1662 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1663 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1664
1665 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1666 unsigned int command_bits, u32 flags);
1667
1668 /*
1669 * Virtual interrupts allow for more interrupts to be allocated
1670 * than the device has interrupts for. These are not programmed
1671 * into the device's MSI-X table and must be handled by some
1672 * other driver means.
1673 */
1674 #define PCI_IRQ_VIRTUAL (1 << 4)
1675
1676 #define PCI_IRQ_ALL_TYPES (PCI_IRQ_INTX | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1677
1678 #include <linux/dmapool.h>
1679
1680 struct msix_entry {
1681 u32 vector; /* Kernel uses to write allocated vector */
1682 u16 entry; /* Driver uses to specify entry, OS writes */
1683 };
1684
1685 #ifdef CONFIG_PCI_MSI
1686 int pci_msi_vec_count(struct pci_dev *dev);
1687 void pci_disable_msi(struct pci_dev *dev);
1688 int pci_msix_vec_count(struct pci_dev *dev);
1689 void pci_disable_msix(struct pci_dev *dev);
1690 void pci_restore_msi_state(struct pci_dev *dev);
1691 bool pci_msi_enabled(void);
1692 int pci_enable_msi(struct pci_dev *dev);
1693 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1694 int minvec, int maxvec);
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1695 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1696 struct msix_entry *entries, int nvec)
1697 {
1698 int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1699 if (rc < 0)
1700 return rc;
1701 return 0;
1702 }
1703 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1704 unsigned int max_vecs, unsigned int flags);
1705 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1706 unsigned int max_vecs, unsigned int flags,
1707 struct irq_affinity *affd);
1708
1709 bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
1710 struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1711 const struct irq_affinity_desc *affdesc);
1712 void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
1713
1714 void pci_free_irq_vectors(struct pci_dev *dev);
1715 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1716 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1717
1718 #else
pci_msi_vec_count(struct pci_dev * dev)1719 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msi(struct pci_dev * dev)1720 static inline void pci_disable_msi(struct pci_dev *dev) { }
pci_msix_vec_count(struct pci_dev * dev)1721 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msix(struct pci_dev * dev)1722 static inline void pci_disable_msix(struct pci_dev *dev) { }
pci_restore_msi_state(struct pci_dev * dev)1723 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
pci_msi_enabled(void)1724 static inline bool pci_msi_enabled(void) { return false; }
pci_enable_msi(struct pci_dev * dev)1725 static inline int pci_enable_msi(struct pci_dev *dev)
1726 { return -ENOSYS; }
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)1727 static inline int pci_enable_msix_range(struct pci_dev *dev,
1728 struct msix_entry *entries, int minvec, int maxvec)
1729 { return -ENOSYS; }
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1730 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1731 struct msix_entry *entries, int nvec)
1732 { return -ENOSYS; }
1733
1734 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)1735 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1736 unsigned int max_vecs, unsigned int flags,
1737 struct irq_affinity *aff_desc)
1738 {
1739 if ((flags & PCI_IRQ_INTX) && min_vecs == 1 && dev->irq)
1740 return 1;
1741 return -ENOSPC;
1742 }
1743 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)1744 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1745 unsigned int max_vecs, unsigned int flags)
1746 {
1747 return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
1748 flags, NULL);
1749 }
1750
pci_msix_can_alloc_dyn(struct pci_dev * dev)1751 static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
1752 { return false; }
pci_msix_alloc_irq_at(struct pci_dev * dev,unsigned int index,const struct irq_affinity_desc * affdesc)1753 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1754 const struct irq_affinity_desc *affdesc)
1755 {
1756 struct msi_map map = { .index = -ENOSYS, };
1757
1758 return map;
1759 }
1760
pci_msix_free_irq(struct pci_dev * pdev,struct msi_map map)1761 static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
1762 {
1763 }
1764
pci_free_irq_vectors(struct pci_dev * dev)1765 static inline void pci_free_irq_vectors(struct pci_dev *dev)
1766 {
1767 }
1768
pci_irq_vector(struct pci_dev * dev,unsigned int nr)1769 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1770 {
1771 if (WARN_ON_ONCE(nr > 0))
1772 return -EINVAL;
1773 return dev->irq;
1774 }
pci_irq_get_affinity(struct pci_dev * pdev,int vec)1775 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1776 int vec)
1777 {
1778 return cpu_possible_mask;
1779 }
1780 #endif
1781
1782 /**
1783 * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1784 * @d: the INTx IRQ domain
1785 * @node: the DT node for the device whose interrupt we're translating
1786 * @intspec: the interrupt specifier data from the DT
1787 * @intsize: the number of entries in @intspec
1788 * @out_hwirq: pointer at which to write the hwirq number
1789 * @out_type: pointer at which to write the interrupt type
1790 *
1791 * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
1792 * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
1793 * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
1794 * INTx value to obtain the hwirq number.
1795 *
1796 * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
1797 */
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)1798 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1799 struct device_node *node,
1800 const u32 *intspec,
1801 unsigned int intsize,
1802 unsigned long *out_hwirq,
1803 unsigned int *out_type)
1804 {
1805 const u32 intx = intspec[0];
1806
1807 if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
1808 return -EINVAL;
1809
1810 *out_hwirq = intx - PCI_INTERRUPT_INTA;
1811 return 0;
1812 }
1813
1814 #ifdef CONFIG_PCIEPORTBUS
1815 extern bool pcie_ports_disabled;
1816 extern bool pcie_ports_native;
1817
1818 int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req,
1819 bool use_lt);
1820 #else
1821 #define pcie_ports_disabled true
1822 #define pcie_ports_native false
1823
pcie_set_target_speed(struct pci_dev * port,enum pci_bus_speed speed_req,bool use_lt)1824 static inline int pcie_set_target_speed(struct pci_dev *port,
1825 enum pci_bus_speed speed_req,
1826 bool use_lt)
1827 {
1828 return -EOPNOTSUPP;
1829 }
1830 #endif
1831
1832 #define PCIE_LINK_STATE_L0S (BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */
1833 #define PCIE_LINK_STATE_L1 BIT(2) /* L1 state */
1834 #define PCIE_LINK_STATE_L1_1 BIT(3) /* ASPM L1.1 state */
1835 #define PCIE_LINK_STATE_L1_2 BIT(4) /* ASPM L1.2 state */
1836 #define PCIE_LINK_STATE_L1_1_PCIPM BIT(5) /* PCI-PM L1.1 state */
1837 #define PCIE_LINK_STATE_L1_2_PCIPM BIT(6) /* PCI-PM L1.2 state */
1838 #define PCIE_LINK_STATE_ASPM_ALL (PCIE_LINK_STATE_L0S |\
1839 PCIE_LINK_STATE_L1 |\
1840 PCIE_LINK_STATE_L1_1 |\
1841 PCIE_LINK_STATE_L1_2 |\
1842 PCIE_LINK_STATE_L1_1_PCIPM |\
1843 PCIE_LINK_STATE_L1_2_PCIPM)
1844 #define PCIE_LINK_STATE_CLKPM BIT(7)
1845 #define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_ASPM_ALL |\
1846 PCIE_LINK_STATE_CLKPM)
1847
1848 #ifdef CONFIG_PCIEASPM
1849 int pci_disable_link_state(struct pci_dev *pdev, int state);
1850 int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1851 int pci_enable_link_state(struct pci_dev *pdev, int state);
1852 int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
1853 void pcie_no_aspm(void);
1854 bool pcie_aspm_support_enabled(void);
1855 bool pcie_aspm_enabled(struct pci_dev *pdev);
1856 #else
pci_disable_link_state(struct pci_dev * pdev,int state)1857 static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1858 { return 0; }
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1859 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1860 { return 0; }
pci_enable_link_state(struct pci_dev * pdev,int state)1861 static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
1862 { return 0; }
pci_enable_link_state_locked(struct pci_dev * pdev,int state)1863 static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1864 { return 0; }
pcie_no_aspm(void)1865 static inline void pcie_no_aspm(void) { }
pcie_aspm_support_enabled(void)1866 static inline bool pcie_aspm_support_enabled(void) { return false; }
pcie_aspm_enabled(struct pci_dev * pdev)1867 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1868 #endif
1869
1870 #ifdef CONFIG_HOTPLUG_PCI
1871 void pci_hp_ignore_link_change(struct pci_dev *pdev);
1872 void pci_hp_unignore_link_change(struct pci_dev *pdev);
1873 #else
pci_hp_ignore_link_change(struct pci_dev * pdev)1874 static inline void pci_hp_ignore_link_change(struct pci_dev *pdev) { }
pci_hp_unignore_link_change(struct pci_dev * pdev)1875 static inline void pci_hp_unignore_link_change(struct pci_dev *pdev) { }
1876 #endif
1877
1878 #ifdef CONFIG_PCIEAER
1879 bool pci_aer_available(void);
1880 #else
pci_aer_available(void)1881 static inline bool pci_aer_available(void) { return false; }
1882 #endif
1883
1884 bool pci_ats_disabled(void);
1885
1886 #define PCIE_PTM_CONTEXT_UPDATE_AUTO 0
1887 #define PCIE_PTM_CONTEXT_UPDATE_MANUAL 1
1888
1889 struct pcie_ptm_ops {
1890 int (*check_capability)(void *drvdata);
1891 int (*context_update_write)(void *drvdata, u8 mode);
1892 int (*context_update_read)(void *drvdata, u8 *mode);
1893 int (*context_valid_write)(void *drvdata, bool valid);
1894 int (*context_valid_read)(void *drvdata, bool *valid);
1895 int (*local_clock_read)(void *drvdata, u64 *clock);
1896 int (*master_clock_read)(void *drvdata, u64 *clock);
1897 int (*t1_read)(void *drvdata, u64 *clock);
1898 int (*t2_read)(void *drvdata, u64 *clock);
1899 int (*t3_read)(void *drvdata, u64 *clock);
1900 int (*t4_read)(void *drvdata, u64 *clock);
1901
1902 bool (*context_update_visible)(void *drvdata);
1903 bool (*context_valid_visible)(void *drvdata);
1904 bool (*local_clock_visible)(void *drvdata);
1905 bool (*master_clock_visible)(void *drvdata);
1906 bool (*t1_visible)(void *drvdata);
1907 bool (*t2_visible)(void *drvdata);
1908 bool (*t3_visible)(void *drvdata);
1909 bool (*t4_visible)(void *drvdata);
1910 };
1911
1912 struct pci_ptm_debugfs {
1913 struct dentry *debugfs;
1914 const struct pcie_ptm_ops *ops;
1915 struct mutex lock;
1916 void *pdata;
1917 };
1918
1919 #ifdef CONFIG_PCIE_PTM
1920 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
1921 void pci_disable_ptm(struct pci_dev *dev);
1922 bool pcie_ptm_enabled(struct pci_dev *dev);
1923 #else
pci_enable_ptm(struct pci_dev * dev,u8 * granularity)1924 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
1925 { return -EINVAL; }
pci_disable_ptm(struct pci_dev * dev)1926 static inline void pci_disable_ptm(struct pci_dev *dev) { }
pcie_ptm_enabled(struct pci_dev * dev)1927 static inline bool pcie_ptm_enabled(struct pci_dev *dev)
1928 { return false; }
1929 #endif
1930
1931 #if IS_ENABLED(CONFIG_DEBUG_FS) && IS_ENABLED(CONFIG_PCIE_PTM)
1932 struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
1933 const struct pcie_ptm_ops *ops);
1934 void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs);
1935 #else
1936 static inline struct pci_ptm_debugfs
pcie_ptm_create_debugfs(struct device * dev,void * pdata,const struct pcie_ptm_ops * ops)1937 *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
1938 const struct pcie_ptm_ops *ops) { return NULL; }
1939 static inline void
pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs * ptm_debugfs)1940 pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs) { }
1941 #endif
1942
1943 void pci_cfg_access_lock(struct pci_dev *dev);
1944 bool pci_cfg_access_trylock(struct pci_dev *dev);
1945 void pci_cfg_access_unlock(struct pci_dev *dev);
1946
1947 void pci_dev_lock(struct pci_dev *dev);
1948 int pci_dev_trylock(struct pci_dev *dev);
1949 void pci_dev_unlock(struct pci_dev *dev);
1950 DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
1951
1952 /*
1953 * PCI domain support. Sometimes called PCI segment (eg by ACPI),
1954 * a PCI domain is defined to be a set of PCI buses which share
1955 * configuration space.
1956 */
1957 #ifdef CONFIG_PCI_DOMAINS
1958 extern int pci_domains_supported;
1959 #else
1960 enum { pci_domains_supported = 0 };
1961 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1962 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1963 #endif /* CONFIG_PCI_DOMAINS */
1964
1965 /*
1966 * Generic implementation for PCI domain support. If your
1967 * architecture does not need custom management of PCI
1968 * domains then this implementation will be used
1969 */
1970 #ifdef CONFIG_PCI_DOMAINS_GENERIC
pci_domain_nr(struct pci_bus * bus)1971 static inline int pci_domain_nr(struct pci_bus *bus)
1972 {
1973 return bus->domain_nr;
1974 }
1975 #ifdef CONFIG_ACPI
1976 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
1977 #else
acpi_pci_bus_find_domain_nr(struct pci_bus * bus)1978 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1979 { return 0; }
1980 #endif
1981 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1982 void pci_bus_release_domain_nr(struct device *parent, int domain_nr);
1983 #endif
1984
1985 /* Some architectures require additional setup to direct VGA traffic */
1986 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1987 unsigned int command_bits, u32 flags);
1988 void pci_register_set_vga_state(arch_set_vga_state_t func);
1989
1990 static inline int
pci_request_io_regions(struct pci_dev * pdev,const char * name)1991 pci_request_io_regions(struct pci_dev *pdev, const char *name)
1992 {
1993 return pci_request_selected_regions(pdev,
1994 pci_select_bars(pdev, IORESOURCE_IO), name);
1995 }
1996
1997 static inline void
pci_release_io_regions(struct pci_dev * pdev)1998 pci_release_io_regions(struct pci_dev *pdev)
1999 {
2000 return pci_release_selected_regions(pdev,
2001 pci_select_bars(pdev, IORESOURCE_IO));
2002 }
2003
2004 static inline int
pci_request_mem_regions(struct pci_dev * pdev,const char * name)2005 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
2006 {
2007 return pci_request_selected_regions(pdev,
2008 pci_select_bars(pdev, IORESOURCE_MEM), name);
2009 }
2010
2011 static inline void
pci_release_mem_regions(struct pci_dev * pdev)2012 pci_release_mem_regions(struct pci_dev *pdev)
2013 {
2014 return pci_release_selected_regions(pdev,
2015 pci_select_bars(pdev, IORESOURCE_MEM));
2016 }
2017
2018 #else /* CONFIG_PCI is not enabled */
2019
pci_set_flags(int flags)2020 static inline void pci_set_flags(int flags) { }
pci_add_flags(int flags)2021 static inline void pci_add_flags(int flags) { }
pci_clear_flags(int flags)2022 static inline void pci_clear_flags(int flags) { }
pci_has_flag(int flag)2023 static inline int pci_has_flag(int flag) { return 0; }
2024
2025 /*
2026 * If the system does not have PCI, clearly these return errors. Define
2027 * these as simple inline functions to avoid hair in drivers.
2028 */
2029 #define _PCI_NOP(o, s, t) \
2030 static inline int pci_##o##_config_##s(struct pci_dev *dev, \
2031 int where, t val) \
2032 { return PCIBIOS_FUNC_NOT_SUPPORTED; }
2033
2034 #define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \
2035 _PCI_NOP(o, word, u16 x) \
2036 _PCI_NOP(o, dword, u32 x)
2037 _PCI_NOP_ALL(read, *)
2038 _PCI_NOP_ALL(write,)
2039
pci_get_device(unsigned int vendor,unsigned int device,struct pci_dev * from)2040 static inline struct pci_dev *pci_get_device(unsigned int vendor,
2041 unsigned int device,
2042 struct pci_dev *from)
2043 { return NULL; }
2044
pci_get_subsys(unsigned int vendor,unsigned int device,unsigned int ss_vendor,unsigned int ss_device,struct pci_dev * from)2045 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
2046 unsigned int device,
2047 unsigned int ss_vendor,
2048 unsigned int ss_device,
2049 struct pci_dev *from)
2050 { return NULL; }
2051
pci_get_class(unsigned int class,struct pci_dev * from)2052 static inline struct pci_dev *pci_get_class(unsigned int class,
2053 struct pci_dev *from)
2054 { return NULL; }
2055
pci_get_base_class(unsigned int class,struct pci_dev * from)2056 static inline struct pci_dev *pci_get_base_class(unsigned int class,
2057 struct pci_dev *from)
2058 { return NULL; }
2059
pci_dev_present(const struct pci_device_id * ids)2060 static inline int pci_dev_present(const struct pci_device_id *ids)
2061 { return 0; }
2062
2063 #define no_pci_devices() (1)
2064 #define pci_dev_put(dev) do { } while (0)
2065
pci_set_master(struct pci_dev * dev)2066 static inline void pci_set_master(struct pci_dev *dev) { }
pci_clear_master(struct pci_dev * dev)2067 static inline void pci_clear_master(struct pci_dev *dev) { }
pci_enable_device(struct pci_dev * dev)2068 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
pci_disable_device(struct pci_dev * dev)2069 static inline void pci_disable_device(struct pci_dev *dev) { }
pcim_enable_device(struct pci_dev * pdev)2070 static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
pci_assign_resource(struct pci_dev * dev,int i)2071 static inline int pci_assign_resource(struct pci_dev *dev, int i)
2072 { return -EBUSY; }
__pci_register_driver(struct pci_driver * drv,struct module * owner,const char * mod_name)2073 static inline int __must_check __pci_register_driver(struct pci_driver *drv,
2074 struct module *owner,
2075 const char *mod_name)
2076 { return 0; }
pci_register_driver(struct pci_driver * drv)2077 static inline int pci_register_driver(struct pci_driver *drv)
2078 { return 0; }
pci_unregister_driver(struct pci_driver * drv)2079 static inline void pci_unregister_driver(struct pci_driver *drv) { }
pci_find_capability(struct pci_dev * dev,int cap)2080 static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
2081 { return 0; }
pci_find_next_capability(struct pci_dev * dev,u8 post,int cap)2082 static inline u8 pci_find_next_capability(struct pci_dev *dev, u8 post, int cap)
2083 { return 0; }
pci_find_ext_capability(struct pci_dev * dev,int cap)2084 static inline u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
2085 { return 0; }
2086
pci_get_dsn(struct pci_dev * dev)2087 static inline u64 pci_get_dsn(struct pci_dev *dev)
2088 { return 0; }
2089
2090 /* Power management related routines */
pci_save_state(struct pci_dev * dev)2091 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
pci_restore_state(struct pci_dev * dev)2092 static inline void pci_restore_state(struct pci_dev *dev) { }
pci_set_power_state(struct pci_dev * dev,pci_power_t state)2093 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
2094 { return 0; }
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)2095 static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
2096 { return 0; }
pci_wake_from_d3(struct pci_dev * dev,bool enable)2097 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2098 { return 0; }
pci_choose_state(struct pci_dev * dev,pm_message_t state)2099 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
2100 pm_message_t state)
2101 { return PCI_D0; }
pci_enable_wake(struct pci_dev * dev,pci_power_t state,int enable)2102 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
2103 int enable)
2104 { return 0; }
2105
pci_find_resource(struct pci_dev * dev,struct resource * res)2106 static inline struct resource *pci_find_resource(struct pci_dev *dev,
2107 struct resource *res)
2108 { return NULL; }
pci_request_regions(struct pci_dev * dev,const char * res_name)2109 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
2110 { return -EIO; }
pci_release_regions(struct pci_dev * dev)2111 static inline void pci_release_regions(struct pci_dev *dev) { }
2112
pci_register_io_range(const struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)2113 static inline int pci_register_io_range(const struct fwnode_handle *fwnode,
2114 phys_addr_t addr, resource_size_t size)
2115 { return -EINVAL; }
2116
pci_address_to_pio(phys_addr_t addr)2117 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
2118
pci_find_next_bus(const struct pci_bus * from)2119 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
2120 { return NULL; }
pci_get_slot(struct pci_bus * bus,unsigned int devfn)2121 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
2122 unsigned int devfn)
2123 { return NULL; }
pci_get_domain_bus_and_slot(int domain,unsigned int bus,unsigned int devfn)2124 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
2125 unsigned int bus, unsigned int devfn)
2126 { return NULL; }
2127
pci_domain_nr(struct pci_bus * bus)2128 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_dev_get(struct pci_dev * dev)2129 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
2130
2131 #define dev_is_pci(d) (false)
2132 #define dev_is_pf(d) (false)
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)2133 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2134 { return false; }
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)2135 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
2136 struct device_node *node,
2137 const u32 *intspec,
2138 unsigned int intsize,
2139 unsigned long *out_hwirq,
2140 unsigned int *out_type)
2141 { return -EINVAL; }
2142
pci_match_id(const struct pci_device_id * ids,struct pci_dev * dev)2143 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
2144 struct pci_dev *dev)
2145 { return NULL; }
pci_ats_disabled(void)2146 static inline bool pci_ats_disabled(void) { return true; }
2147
pci_irq_vector(struct pci_dev * dev,unsigned int nr)2148 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
2149 {
2150 return -EINVAL;
2151 }
2152
2153 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)2154 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
2155 unsigned int max_vecs, unsigned int flags,
2156 struct irq_affinity *aff_desc)
2157 {
2158 return -ENOSPC;
2159 }
2160 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)2161 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
2162 unsigned int max_vecs, unsigned int flags)
2163 {
2164 return -ENOSPC;
2165 }
2166 #endif /* CONFIG_PCI */
2167
2168 /* Include architecture-dependent settings and functions */
2169
2170 #include <asm/pci.h>
2171
2172 /*
2173 * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
2174 * is expected to be an offset within that region.
2175 *
2176 */
2177 int pci_mmap_resource_range(struct pci_dev *dev, int bar,
2178 struct vm_area_struct *vma,
2179 enum pci_mmap_state mmap_state, int write_combine);
2180
2181 #ifndef arch_can_pci_mmap_wc
2182 #define arch_can_pci_mmap_wc() 0
2183 #endif
2184
2185 #ifndef arch_can_pci_mmap_io
2186 #define arch_can_pci_mmap_io() 0
2187 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
2188 #else
2189 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
2190 #endif
2191
2192 #ifndef pci_root_bus_fwnode
2193 #define pci_root_bus_fwnode(bus) NULL
2194 #endif
2195
2196 /*
2197 * These helpers provide future and backwards compatibility
2198 * for accessing popular PCI BAR info
2199 */
2200 #define pci_resource_n(dev, bar) (&(dev)->resource[(bar)])
2201 #define pci_resource_start(dev, bar) (pci_resource_n(dev, bar)->start)
2202 #define pci_resource_end(dev, bar) (pci_resource_n(dev, bar)->end)
2203 #define pci_resource_flags(dev, bar) (pci_resource_n(dev, bar)->flags)
2204 #define pci_resource_len(dev,bar) \
2205 (pci_resource_end((dev), (bar)) ? \
2206 resource_size(pci_resource_n((dev), (bar))) : 0)
2207
2208 #define __pci_dev_for_each_res0(dev, res, ...) \
2209 for (unsigned int __b = 0; \
2210 __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2211 __b++)
2212
2213 #define __pci_dev_for_each_res1(dev, res, __b) \
2214 for (__b = 0; \
2215 __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2216 __b++)
2217
2218 #define pci_dev_for_each_resource(dev, res, ...) \
2219 CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
2220 (dev, res, __VA_ARGS__)
2221
2222 /*
2223 * Similar to the helpers above, these manipulate per-pci_dev
2224 * driver-specific data. They are really just a wrapper around
2225 * the generic device structure functions of these calls.
2226 */
pci_get_drvdata(struct pci_dev * pdev)2227 static inline void *pci_get_drvdata(struct pci_dev *pdev)
2228 {
2229 return dev_get_drvdata(&pdev->dev);
2230 }
2231
pci_set_drvdata(struct pci_dev * pdev,void * data)2232 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
2233 {
2234 dev_set_drvdata(&pdev->dev, data);
2235 }
2236
pci_name(const struct pci_dev * pdev)2237 static inline const char *pci_name(const struct pci_dev *pdev)
2238 {
2239 return dev_name(&pdev->dev);
2240 }
2241
2242 void pci_resource_to_user(const struct pci_dev *dev, int bar,
2243 const struct resource *rsrc,
2244 resource_size_t *start, resource_size_t *end);
2245
2246 /*
2247 * The world is not perfect and supplies us with broken PCI devices.
2248 * For at least a part of these bugs we need a work-around, so both
2249 * generic (drivers/pci/quirks.c) and per-architecture code can define
2250 * fixup hooks to be called for particular buggy devices.
2251 */
2252
2253 struct pci_fixup {
2254 u16 vendor; /* Or PCI_ANY_ID */
2255 u16 device; /* Or PCI_ANY_ID */
2256 u32 class; /* Or PCI_ANY_ID */
2257 unsigned int class_shift; /* should be 0, 8, 16 */
2258 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2259 int hook_offset;
2260 #else
2261 void (*hook)(struct pci_dev *dev);
2262 #endif
2263 };
2264
2265 enum pci_fixup_pass {
2266 pci_fixup_early, /* Before probing BARs */
2267 pci_fixup_header, /* After reading configuration header */
2268 pci_fixup_final, /* Final phase of device fixups */
2269 pci_fixup_enable, /* pci_enable_device() time */
2270 pci_fixup_resume, /* pci_device_resume() */
2271 pci_fixup_suspend, /* pci_device_suspend() */
2272 pci_fixup_resume_early, /* pci_device_resume_early() */
2273 pci_fixup_suspend_late, /* pci_device_suspend_late() */
2274 };
2275
2276 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2277 #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2278 class_shift, hook) \
2279 __ADDRESSABLE(hook) \
2280 asm(".section " #sec ", \"a\" \n" \
2281 ".balign 16 \n" \
2282 ".short " #vendor ", " #device " \n" \
2283 ".long " #class ", " #class_shift " \n" \
2284 ".long " #hook " - . \n" \
2285 ".previous \n");
2286
2287 /*
2288 * Clang's LTO may rename static functions in C, but has no way to
2289 * handle such renamings when referenced from inline asm. To work
2290 * around this, create global C stubs for these cases.
2291 */
2292 #ifdef CONFIG_LTO_CLANG
2293 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2294 class_shift, hook, stub) \
2295 void stub(struct pci_dev *dev); \
2296 void stub(struct pci_dev *dev) \
2297 { \
2298 hook(dev); \
2299 } \
2300 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2301 class_shift, stub)
2302 #else
2303 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2304 class_shift, hook, stub) \
2305 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2306 class_shift, hook)
2307 #endif
2308
2309 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2310 class_shift, hook) \
2311 __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2312 class_shift, hook, __UNIQUE_ID(hook))
2313 #else
2314 /* Anonymous variables would be nice... */
2315 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
2316 class_shift, hook) \
2317 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
2318 __attribute__((__section__(#section), aligned((sizeof(void *))))) \
2319 = { vendor, device, class, class_shift, hook };
2320 #endif
2321
2322 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
2323 class_shift, hook) \
2324 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
2325 hook, vendor, device, class, class_shift, hook)
2326 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
2327 class_shift, hook) \
2328 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
2329 hook, vendor, device, class, class_shift, hook)
2330 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
2331 class_shift, hook) \
2332 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
2333 hook, vendor, device, class, class_shift, hook)
2334 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
2335 class_shift, hook) \
2336 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
2337 hook, vendor, device, class, class_shift, hook)
2338 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
2339 class_shift, hook) \
2340 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
2341 resume##hook, vendor, device, class, class_shift, hook)
2342 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
2343 class_shift, hook) \
2344 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
2345 resume_early##hook, vendor, device, class, class_shift, hook)
2346 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
2347 class_shift, hook) \
2348 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
2349 suspend##hook, vendor, device, class, class_shift, hook)
2350 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \
2351 class_shift, hook) \
2352 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
2353 suspend_late##hook, vendor, device, class, class_shift, hook)
2354
2355 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
2356 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
2357 hook, vendor, device, PCI_ANY_ID, 0, hook)
2358 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
2359 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
2360 hook, vendor, device, PCI_ANY_ID, 0, hook)
2361 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
2362 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
2363 hook, vendor, device, PCI_ANY_ID, 0, hook)
2364 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
2365 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
2366 hook, vendor, device, PCI_ANY_ID, 0, hook)
2367 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
2368 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
2369 resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
2370 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
2371 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
2372 resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
2373 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
2374 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
2375 suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
2376 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \
2377 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
2378 suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
2379
2380 #ifdef CONFIG_PCI_QUIRKS
2381 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
2382 #else
pci_fixup_device(enum pci_fixup_pass pass,struct pci_dev * dev)2383 static inline void pci_fixup_device(enum pci_fixup_pass pass,
2384 struct pci_dev *dev) { }
2385 #endif
2386
2387 int pcim_intx(struct pci_dev *pdev, int enabled);
2388 int pcim_request_all_regions(struct pci_dev *pdev, const char *name);
2389 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
2390 void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
2391 const char *name);
2392 void pcim_iounmap_region(struct pci_dev *pdev, int bar);
2393 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
2394 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
2395 int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
2396 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
2397 void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
2398 unsigned long offset, unsigned long len);
2399
2400 extern int pci_pci_problems;
2401 #define PCIPCI_FAIL 1 /* No PCI PCI DMA */
2402 #define PCIPCI_TRITON 2
2403 #define PCIPCI_NATOMA 4
2404 #define PCIPCI_VIAETBF 8
2405 #define PCIPCI_VSFX 16
2406 #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */
2407 #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */
2408
2409 extern u8 pci_dfl_cache_line_size;
2410 extern u8 pci_cache_line_size;
2411
2412 /* Architecture-specific versions may override these (weak) */
2413 void pcibios_disable_device(struct pci_dev *dev);
2414 void pcibios_set_master(struct pci_dev *dev);
2415 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
2416 enum pcie_reset_state state);
2417 int pcibios_device_add(struct pci_dev *dev);
2418 void pcibios_release_device(struct pci_dev *dev);
2419 #ifdef CONFIG_PCI
2420 void pcibios_penalize_isa_irq(int irq, int active);
2421 #else
pcibios_penalize_isa_irq(int irq,int active)2422 static inline void pcibios_penalize_isa_irq(int irq, int active) {}
2423 #endif
2424 int pcibios_alloc_irq(struct pci_dev *dev);
2425 void pcibios_free_irq(struct pci_dev *dev);
2426 resource_size_t pcibios_default_alignment(void);
2427
2428 #if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
2429 extern int pci_create_resource_files(struct pci_dev *dev);
2430 extern void pci_remove_resource_files(struct pci_dev *dev);
2431 #endif
2432
2433 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
2434 void __init pci_mmcfg_early_init(void);
2435 void __init pci_mmcfg_late_init(void);
2436 #else
pci_mmcfg_early_init(void)2437 static inline void pci_mmcfg_early_init(void) { }
pci_mmcfg_late_init(void)2438 static inline void pci_mmcfg_late_init(void) { }
2439 #endif
2440
2441 int pci_ext_cfg_avail(void);
2442
2443 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
2444 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
2445
2446 #ifdef CONFIG_PCI_IOV
2447 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
2448 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
2449 int pci_iov_vf_id(struct pci_dev *dev);
2450 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
2451 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
2452 void pci_disable_sriov(struct pci_dev *dev);
2453
2454 int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
2455 int pci_iov_add_virtfn(struct pci_dev *dev, int id);
2456 void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
2457 int pci_num_vf(struct pci_dev *dev);
2458 int pci_vfs_assigned(struct pci_dev *dev);
2459 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
2460 int pci_sriov_get_totalvfs(struct pci_dev *dev);
2461 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
2462 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
2463 int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size);
2464 u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs);
2465 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
2466
2467 /* Arch may override these (weak) */
2468 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
2469 int pcibios_sriov_disable(struct pci_dev *pdev);
2470 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
2471 #else
pci_iov_virtfn_bus(struct pci_dev * dev,int id)2472 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
2473 {
2474 return -ENOSYS;
2475 }
pci_iov_virtfn_devfn(struct pci_dev * dev,int id)2476 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
2477 {
2478 return -ENOSYS;
2479 }
2480
pci_iov_vf_id(struct pci_dev * dev)2481 static inline int pci_iov_vf_id(struct pci_dev *dev)
2482 {
2483 return -ENOSYS;
2484 }
2485
pci_iov_get_pf_drvdata(struct pci_dev * dev,struct pci_driver * pf_driver)2486 static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
2487 struct pci_driver *pf_driver)
2488 {
2489 return ERR_PTR(-EINVAL);
2490 }
2491
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)2492 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
2493 { return -ENODEV; }
2494
pci_iov_sysfs_link(struct pci_dev * dev,struct pci_dev * virtfn,int id)2495 static inline int pci_iov_sysfs_link(struct pci_dev *dev,
2496 struct pci_dev *virtfn, int id)
2497 {
2498 return -ENODEV;
2499 }
pci_iov_add_virtfn(struct pci_dev * dev,int id)2500 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2501 {
2502 return -ENOSYS;
2503 }
pci_iov_remove_virtfn(struct pci_dev * dev,int id)2504 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
2505 int id) { }
pci_disable_sriov(struct pci_dev * dev)2506 static inline void pci_disable_sriov(struct pci_dev *dev) { }
pci_num_vf(struct pci_dev * dev)2507 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
pci_vfs_assigned(struct pci_dev * dev)2508 static inline int pci_vfs_assigned(struct pci_dev *dev)
2509 { return 0; }
pci_sriov_set_totalvfs(struct pci_dev * dev,u16 numvfs)2510 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
2511 { return 0; }
pci_sriov_get_totalvfs(struct pci_dev * dev)2512 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
2513 { return 0; }
2514 #define pci_sriov_configure_simple NULL
pci_iov_resource_size(struct pci_dev * dev,int resno)2515 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
2516 { return 0; }
pci_iov_vf_bar_set_size(struct pci_dev * dev,int resno,int size)2517 static inline int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size)
2518 { return -ENODEV; }
pci_iov_vf_bar_get_sizes(struct pci_dev * dev,int resno,int num_vfs)2519 static inline u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs)
2520 { return 0; }
pci_vf_drivers_autoprobe(struct pci_dev * dev,bool probe)2521 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
2522 #endif
2523
2524 /**
2525 * pci_pcie_cap - get the saved PCIe capability offset
2526 * @dev: PCI device
2527 *
2528 * PCIe capability offset is calculated at PCI device initialization
2529 * time and saved in the data structure. This function returns saved
2530 * PCIe capability offset. Using this instead of pci_find_capability()
2531 * reduces unnecessary search in the PCI configuration space. If you
2532 * need to calculate PCIe capability offset from raw device for some
2533 * reasons, please use pci_find_capability() instead.
2534 */
pci_pcie_cap(struct pci_dev * dev)2535 static inline int pci_pcie_cap(struct pci_dev *dev)
2536 {
2537 return dev->pcie_cap;
2538 }
2539
2540 /**
2541 * pci_is_pcie - check if the PCI device is PCI Express capable
2542 * @dev: PCI device
2543 *
2544 * Returns: true if the PCI device is PCI Express capable, false otherwise.
2545 */
pci_is_pcie(struct pci_dev * dev)2546 static inline bool pci_is_pcie(struct pci_dev *dev)
2547 {
2548 return pci_pcie_cap(dev);
2549 }
2550
2551 /**
2552 * pcie_caps_reg - get the PCIe Capabilities Register
2553 * @dev: PCI device
2554 */
pcie_caps_reg(const struct pci_dev * dev)2555 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
2556 {
2557 return dev->pcie_flags_reg;
2558 }
2559
2560 /**
2561 * pci_pcie_type - get the PCIe device/port type
2562 * @dev: PCI device
2563 */
pci_pcie_type(const struct pci_dev * dev)2564 static inline int pci_pcie_type(const struct pci_dev *dev)
2565 {
2566 return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
2567 }
2568
2569 /**
2570 * pcie_find_root_port - Get the PCIe root port device
2571 * @dev: PCI device
2572 *
2573 * Traverse up the parent chain and return the PCIe Root Port PCI Device
2574 * for a given PCI/PCIe Device.
2575 */
pcie_find_root_port(struct pci_dev * dev)2576 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
2577 {
2578 while (dev) {
2579 if (pci_is_pcie(dev) &&
2580 pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2581 return dev;
2582 dev = pci_upstream_bridge(dev);
2583 }
2584
2585 return NULL;
2586 }
2587
pci_dev_is_disconnected(const struct pci_dev * dev)2588 static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
2589 {
2590 /*
2591 * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg()
2592 * and read w/o common lock. READ_ONCE() ensures compiler cannot cache
2593 * the value (e.g. inside the loop in pci_dev_wait()).
2594 */
2595 return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure;
2596 }
2597
2598 void pci_request_acs(void);
2599 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2600 bool pci_acs_path_enabled(struct pci_dev *start,
2601 struct pci_dev *end, u16 acs_flags);
2602 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2603
2604 #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
2605 #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
2606
2607 /* Large Resource Data Type Tag Item Names */
2608 #define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
2609 #define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */
2610 #define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */
2611
2612 #define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
2613 #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
2614 #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
2615
2616 #define PCI_VPD_RO_KEYWORD_PARTNO "PN"
2617 #define PCI_VPD_RO_KEYWORD_SERIALNO "SN"
2618 #define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
2619 #define PCI_VPD_RO_KEYWORD_VENDOR0 "V0"
2620 #define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
2621
2622 /**
2623 * pci_vpd_alloc - Allocate buffer and read VPD into it
2624 * @dev: PCI device
2625 * @size: pointer to field where VPD length is returned
2626 *
2627 * Returns pointer to allocated buffer or an ERR_PTR in case of failure
2628 */
2629 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
2630
2631 /**
2632 * pci_vpd_find_id_string - Locate id string in VPD
2633 * @buf: Pointer to buffered VPD data
2634 * @len: The length of the buffer area in which to search
2635 * @size: Pointer to field where length of id string is returned
2636 *
2637 * Returns the index of the id string or -ENOENT if not found.
2638 */
2639 int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
2640
2641 /**
2642 * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
2643 * @buf: Pointer to buffered VPD data
2644 * @len: The length of the buffer area in which to search
2645 * @kw: The keyword to search for
2646 * @size: Pointer to field where length of found keyword data is returned
2647 *
2648 * Returns the index of the information field keyword data or -ENOENT if
2649 * not found.
2650 */
2651 int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
2652 const char *kw, unsigned int *size);
2653
2654 /**
2655 * pci_vpd_check_csum - Check VPD checksum
2656 * @buf: Pointer to buffered VPD data
2657 * @len: VPD size
2658 *
2659 * Returns 1 if VPD has no checksum, otherwise 0 or an errno
2660 */
2661 int pci_vpd_check_csum(const void *buf, unsigned int len);
2662
2663 /* PCI <-> OF binding helpers */
2664 #ifdef CONFIG_OF
2665 struct device_node;
2666 struct irq_domain;
2667 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2668 bool pci_host_of_has_msi_map(struct device *dev);
2669
2670 /* Arch may override this (weak) */
2671 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2672
2673 #else /* CONFIG_OF */
2674 static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus * bus)2675 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
pci_host_of_has_msi_map(struct device * dev)2676 static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
2677 #endif /* CONFIG_OF */
2678
2679 static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev * pdev)2680 pci_device_to_OF_node(const struct pci_dev *pdev)
2681 {
2682 return pdev ? pdev->dev.of_node : NULL;
2683 }
2684
pci_bus_to_OF_node(struct pci_bus * bus)2685 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2686 {
2687 return bus ? bus->dev.of_node : NULL;
2688 }
2689
2690 #ifdef CONFIG_ACPI
2691 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2692
2693 void
2694 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2695 bool pci_pr3_present(struct pci_dev *pdev);
2696 #else
2697 static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus * bus)2698 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
pci_pr3_present(struct pci_dev * pdev)2699 static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
2700 #endif
2701
2702 #if defined(CONFIG_X86) && defined(CONFIG_ACPI)
2703 bool arch_pci_dev_is_removable(struct pci_dev *pdev);
2704 #else
arch_pci_dev_is_removable(struct pci_dev * pdev)2705 static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; }
2706 #endif
2707
2708 #ifdef CONFIG_EEH
pci_dev_to_eeh_dev(struct pci_dev * pdev)2709 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2710 {
2711 return pdev->dev.archdata.edev;
2712 }
2713 #endif
2714
2715 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
2716 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2717 int pci_for_each_dma_alias(struct pci_dev *pdev,
2718 int (*fn)(struct pci_dev *pdev,
2719 u16 alias, void *data), void *data);
2720
2721 /* Helper functions for operation of device flag */
pci_set_dev_assigned(struct pci_dev * pdev)2722 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2723 {
2724 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2725 }
pci_clear_dev_assigned(struct pci_dev * pdev)2726 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2727 {
2728 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2729 }
pci_is_dev_assigned(struct pci_dev * pdev)2730 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2731 {
2732 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2733 }
2734
2735 /**
2736 * pci_ari_enabled - query ARI forwarding status
2737 * @bus: the PCI bus
2738 *
2739 * Returns true if ARI forwarding is enabled.
2740 */
pci_ari_enabled(struct pci_bus * bus)2741 static inline bool pci_ari_enabled(struct pci_bus *bus)
2742 {
2743 return bus->self && bus->self->ari_enabled;
2744 }
2745
2746 /**
2747 * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
2748 * @pdev: PCI device to check
2749 *
2750 * Walk upwards from @pdev and check for each encountered bridge if it's part
2751 * of a Thunderbolt controller. Reaching the host bridge means @pdev is not
2752 * Thunderbolt-attached. (But rather soldered to the mainboard usually.)
2753 */
pci_is_thunderbolt_attached(struct pci_dev * pdev)2754 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2755 {
2756 struct pci_dev *parent = pdev;
2757
2758 if (pdev->is_thunderbolt)
2759 return true;
2760
2761 while ((parent = pci_upstream_bridge(parent)))
2762 if (parent->is_thunderbolt)
2763 return true;
2764
2765 return false;
2766 }
2767
2768 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) || defined(CONFIG_S390)
2769 void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
2770 #endif
2771
2772 #include <linux/dma-mapping.h>
2773
2774 #define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
2775 #define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
2776 #define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
2777 #define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
2778 #define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
2779 #define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
2780 #define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
2781 #define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
2782 #define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
2783
2784 #define pci_notice_ratelimited(pdev, fmt, arg...) \
2785 dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2786
2787 #define pci_info_ratelimited(pdev, fmt, arg...) \
2788 dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2789
2790 #define pci_WARN(pdev, condition, fmt, arg...) \
2791 WARN(condition, "%s %s: " fmt, \
2792 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2793
2794 #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
2795 WARN_ONCE(condition, "%s %s: " fmt, \
2796 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2797
2798 #endif /* LINUX_PCI_H */
2799