1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * pci.h
4 *
5 * PCI defines and function prototypes
6 * Copyright 1994, Drew Eckhardt
7 * Copyright 1997--1999 Martin Mares <mj@ucw.cz>
8 *
9 * PCI Express ASPM defines and function prototypes
10 * Copyright (c) 2007 Intel Corp.
11 * Zhang Yanmin (yanmin.zhang@intel.com)
12 * Shaohua Li (shaohua.li@intel.com)
13 *
14 * For more information, please consult the following manuals (look at
15 * http://www.pcisig.com/ for how to get them):
16 *
17 * PCI BIOS Specification
18 * PCI Local Bus Specification
19 * PCI to PCI Bridge Specification
20 * PCI Express Specification
21 * PCI System Design Guide
22 */
23 #ifndef LINUX_PCI_H
24 #define LINUX_PCI_H
25
26 #include <linux/args.h>
27 #include <linux/mod_devicetable.h>
28
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/ioport.h>
32 #include <linux/list.h>
33 #include <linux/compiler.h>
34 #include <linux/errno.h>
35 #include <linux/kobject.h>
36 #include <linux/atomic.h>
37 #include <linux/device.h>
38 #include <linux/interrupt.h>
39 #include <linux/io.h>
40 #include <linux/resource_ext.h>
41 #include <linux/msi_api.h>
42 #include <uapi/linux/pci.h>
43
44 #include <linux/pci_ids.h>
45
46 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
47 PCI_STATUS_SIG_SYSTEM_ERROR | \
48 PCI_STATUS_REC_MASTER_ABORT | \
49 PCI_STATUS_REC_TARGET_ABORT | \
50 PCI_STATUS_SIG_TARGET_ABORT | \
51 PCI_STATUS_PARITY)
52
53 /* Number of reset methods used in pci_reset_fn_methods array in pci.c */
54 #define PCI_NUM_RESET_METHODS 8
55
56 #define PCI_RESET_PROBE true
57 #define PCI_RESET_DO_RESET false
58
59 /*
60 * The PCI interface treats multi-function devices as independent
61 * devices. The slot/function address of each device is encoded
62 * in a single byte as follows:
63 *
64 * 7:3 = slot
65 * 2:0 = function
66 *
67 * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
68 * In the interest of not exposing interfaces to user-space unnecessarily,
69 * the following kernel-only defines are being added here.
70 */
71 #define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
72 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
73 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
74
75 /* pci_slot represents a physical slot */
76 struct pci_slot {
77 struct pci_bus *bus; /* Bus this slot is on */
78 struct list_head list; /* Node in list of slots */
79 struct hotplug_slot *hotplug; /* Hotplug info (move here) */
80 unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
81 struct kobject kobj;
82 };
83
pci_slot_name(const struct pci_slot * slot)84 static inline const char *pci_slot_name(const struct pci_slot *slot)
85 {
86 return kobject_name(&slot->kobj);
87 }
88
89 /* File state for mmap()s on /proc/bus/pci/X/Y */
90 enum pci_mmap_state {
91 pci_mmap_io,
92 pci_mmap_mem
93 };
94
95 /* For PCI devices, the region numbers are assigned this way: */
96 enum {
97 /* #0-5: standard PCI resources */
98 PCI_STD_RESOURCES,
99 PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
100
101 /* #6: expansion ROM resource */
102 PCI_ROM_RESOURCE,
103
104 /* Device-specific resources */
105 #ifdef CONFIG_PCI_IOV
106 PCI_IOV_RESOURCES,
107 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
108 #endif
109
110 /* PCI-to-PCI (P2P) bridge windows */
111 #define PCI_BRIDGE_IO_WINDOW (PCI_BRIDGE_RESOURCES + 0)
112 #define PCI_BRIDGE_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 1)
113 #define PCI_BRIDGE_PREF_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 2)
114
115 /* CardBus bridge windows */
116 #define PCI_CB_BRIDGE_IO_0_WINDOW (PCI_BRIDGE_RESOURCES + 0)
117 #define PCI_CB_BRIDGE_IO_1_WINDOW (PCI_BRIDGE_RESOURCES + 1)
118 #define PCI_CB_BRIDGE_MEM_0_WINDOW (PCI_BRIDGE_RESOURCES + 2)
119 #define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3)
120
121 /* Total number of bridge resources for P2P and CardBus */
122 #define PCI_BRIDGE_RESOURCE_NUM 4
123
124 /* Resources assigned to buses behind the bridge */
125 PCI_BRIDGE_RESOURCES,
126 PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
127 PCI_BRIDGE_RESOURCE_NUM - 1,
128
129 /* Total resources associated with a PCI device */
130 PCI_NUM_RESOURCES,
131
132 /* Preserve this for compatibility */
133 DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
134 };
135
136 /**
137 * enum pci_interrupt_pin - PCI INTx interrupt values
138 * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
139 * @PCI_INTERRUPT_INTA: PCI INTA pin
140 * @PCI_INTERRUPT_INTB: PCI INTB pin
141 * @PCI_INTERRUPT_INTC: PCI INTC pin
142 * @PCI_INTERRUPT_INTD: PCI INTD pin
143 *
144 * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
145 * PCI_INTERRUPT_PIN register.
146 */
147 enum pci_interrupt_pin {
148 PCI_INTERRUPT_UNKNOWN,
149 PCI_INTERRUPT_INTA,
150 PCI_INTERRUPT_INTB,
151 PCI_INTERRUPT_INTC,
152 PCI_INTERRUPT_INTD,
153 };
154
155 /* The number of legacy PCI INTx interrupts */
156 #define PCI_NUM_INTX 4
157
158 /*
159 * Reading from a device that doesn't respond typically returns ~0. A
160 * successful read from a device may also return ~0, so you need additional
161 * information to reliably identify errors.
162 */
163 #define PCI_ERROR_RESPONSE (~0ULL)
164 #define PCI_SET_ERROR_RESPONSE(val) (*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
165 #define PCI_POSSIBLE_ERROR(val) ((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
166
167 /*
168 * pci_power_t values must match the bits in the Capabilities PME_Support
169 * and Control/Status PowerState fields in the Power Management capability.
170 */
171 typedef int __bitwise pci_power_t;
172
173 #define PCI_D0 ((pci_power_t __force) 0)
174 #define PCI_D1 ((pci_power_t __force) 1)
175 #define PCI_D2 ((pci_power_t __force) 2)
176 #define PCI_D3hot ((pci_power_t __force) 3)
177 #define PCI_D3cold ((pci_power_t __force) 4)
178 #define PCI_UNKNOWN ((pci_power_t __force) 5)
179 #define PCI_POWER_ERROR ((pci_power_t __force) -1)
180
181 /* Remember to update this when the list above changes! */
182 extern const char *pci_power_names[];
183
pci_power_name(pci_power_t state)184 static inline const char *pci_power_name(pci_power_t state)
185 {
186 return pci_power_names[1 + (__force int) state];
187 }
188
189 /**
190 * typedef pci_channel_state_t
191 *
192 * The pci_channel state describes connectivity between the CPU and
193 * the PCI device. If some PCI bus between here and the PCI device
194 * has crashed or locked up, this info is reflected here.
195 */
196 typedef unsigned int __bitwise pci_channel_state_t;
197
198 enum {
199 /* I/O channel is in normal state */
200 pci_channel_io_normal = (__force pci_channel_state_t) 1,
201
202 /* I/O to channel is blocked */
203 pci_channel_io_frozen = (__force pci_channel_state_t) 2,
204
205 /* PCI card is dead */
206 pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
207 };
208
209 typedef unsigned int __bitwise pcie_reset_state_t;
210
211 enum pcie_reset_state {
212 /* Reset is NOT asserted (Use to deassert reset) */
213 pcie_deassert_reset = (__force pcie_reset_state_t) 1,
214
215 /* Use #PERST to reset PCIe device */
216 pcie_warm_reset = (__force pcie_reset_state_t) 2,
217
218 /* Use PCIe Hot Reset to reset device */
219 pcie_hot_reset = (__force pcie_reset_state_t) 3
220 };
221
222 typedef unsigned short __bitwise pci_dev_flags_t;
223 enum pci_dev_flags {
224 /* INTX_DISABLE in PCI_COMMAND register disables MSI too */
225 PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
226 /* Device configuration is irrevocably lost if disabled into D3 */
227 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
228 /* Provide indication device is assigned by a Virtual Machine Manager */
229 PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
230 /* Flag for quirk use to store if quirk-specific ACS is enabled */
231 PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
232 /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
233 PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
234 /* Do not use bus resets for device */
235 PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
236 /* Do not use PM reset even if device advertises NoSoftRst- */
237 PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
238 /* Get VPD from function 0 VPD */
239 PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
240 /* A non-root bridge where translation occurs, stop alias search here */
241 PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
242 /* Do not use FLR even if device advertises PCI_AF_CAP */
243 PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
244 /* Don't use Relaxed Ordering for TLPs directed at this device */
245 PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
246 /* Device does honor MSI masking despite saying otherwise */
247 PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
248 /* Device requires write to PCI_MSIX_ENTRY_DATA before any MSIX reads */
249 PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST = (__force pci_dev_flags_t) (1 << 13),
250 };
251
252 enum pci_irq_reroute_variant {
253 INTEL_IRQ_REROUTE_VARIANT = 1,
254 MAX_IRQ_REROUTE_VARIANTS = 3
255 };
256
257 typedef unsigned short __bitwise pci_bus_flags_t;
258 enum pci_bus_flags {
259 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
260 PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
261 PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4,
262 PCI_BUS_FLAGS_NO_EXTCFG = (__force pci_bus_flags_t) 8,
263 };
264
265 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
266 enum pcie_link_width {
267 PCIE_LNK_WIDTH_RESRV = 0x00,
268 PCIE_LNK_X1 = 0x01,
269 PCIE_LNK_X2 = 0x02,
270 PCIE_LNK_X4 = 0x04,
271 PCIE_LNK_X8 = 0x08,
272 PCIE_LNK_X12 = 0x0c,
273 PCIE_LNK_X16 = 0x10,
274 PCIE_LNK_X32 = 0x20,
275 PCIE_LNK_WIDTH_UNKNOWN = 0xff,
276 };
277
278 /* See matching string table in pci_speed_string() */
279 enum pci_bus_speed {
280 PCI_SPEED_33MHz = 0x00,
281 PCI_SPEED_66MHz = 0x01,
282 PCI_SPEED_66MHz_PCIX = 0x02,
283 PCI_SPEED_100MHz_PCIX = 0x03,
284 PCI_SPEED_133MHz_PCIX = 0x04,
285 PCI_SPEED_66MHz_PCIX_ECC = 0x05,
286 PCI_SPEED_100MHz_PCIX_ECC = 0x06,
287 PCI_SPEED_133MHz_PCIX_ECC = 0x07,
288 PCI_SPEED_66MHz_PCIX_266 = 0x09,
289 PCI_SPEED_100MHz_PCIX_266 = 0x0a,
290 PCI_SPEED_133MHz_PCIX_266 = 0x0b,
291 AGP_UNKNOWN = 0x0c,
292 AGP_1X = 0x0d,
293 AGP_2X = 0x0e,
294 AGP_4X = 0x0f,
295 AGP_8X = 0x10,
296 PCI_SPEED_66MHz_PCIX_533 = 0x11,
297 PCI_SPEED_100MHz_PCIX_533 = 0x12,
298 PCI_SPEED_133MHz_PCIX_533 = 0x13,
299 PCIE_SPEED_2_5GT = 0x14,
300 PCIE_SPEED_5_0GT = 0x15,
301 PCIE_SPEED_8_0GT = 0x16,
302 PCIE_SPEED_16_0GT = 0x17,
303 PCIE_SPEED_32_0GT = 0x18,
304 PCIE_SPEED_64_0GT = 0x19,
305 PCI_SPEED_UNKNOWN = 0xff,
306 };
307
308 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
309 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
310
311 struct pci_vpd {
312 struct mutex lock;
313 unsigned int len;
314 u8 cap;
315 };
316
317 struct irq_affinity;
318 struct pcie_bwctrl_data;
319 struct pcie_link_state;
320 struct pci_sriov;
321 struct pci_p2pdma;
322 struct rcec_ea;
323
324 /* struct pci_dev - describes a PCI device
325 *
326 * @supported_speeds: PCIe Supported Link Speeds Vector (+ reserved 0 at
327 * LSB). 0 when the supported speeds cannot be
328 * determined (e.g., for Root Complex Integrated
329 * Endpoints without the relevant Capability
330 * Registers).
331 * @is_hotplug_bridge: Hotplug bridge of any kind (e.g. PCIe Hot-Plug Capable,
332 * Conventional PCI Hot-Plug, ACPI slot).
333 * Such bridges are allocated additional MMIO and bus
334 * number resources to allow for hierarchy expansion.
335 * @is_pciehp: PCIe Hot-Plug Capable bridge.
336 */
337 struct pci_dev {
338 struct list_head bus_list; /* Node in per-bus list */
339 struct pci_bus *bus; /* Bus this device is on */
340 struct pci_bus *subordinate; /* Bus this device bridges to */
341
342 void *sysdata; /* Hook for sys-specific extension */
343 struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */
344 struct pci_slot *slot; /* Physical slot this device is in */
345
346 unsigned int devfn; /* Encoded device & function index */
347 unsigned short vendor;
348 unsigned short device;
349 unsigned short subsystem_vendor;
350 unsigned short subsystem_device;
351 unsigned int class; /* 3 bytes: (base,sub,prog-if) */
352 u8 revision; /* PCI revision, low byte of class word */
353 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
354 #ifdef CONFIG_PCIEAER
355 u16 aer_cap; /* AER capability offset */
356 struct aer_info *aer_info; /* AER info for this device */
357 #endif
358 #ifdef CONFIG_PCIEPORTBUS
359 struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */
360 struct pci_dev *rcec; /* Associated RCEC device */
361 #endif
362 u32 devcap; /* PCIe Device Capabilities */
363 u16 rebar_cap; /* Resizable BAR capability offset */
364 u8 pcie_cap; /* PCIe capability offset */
365 u8 msi_cap; /* MSI capability offset */
366 u8 msix_cap; /* MSI-X capability offset */
367 u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */
368 u8 rom_base_reg; /* Config register controlling ROM */
369 u8 pin; /* Interrupt pin this device uses */
370 u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */
371 unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */
372
373 struct pci_driver *driver; /* Driver bound to this device */
374 u64 dma_mask; /* Mask of the bits of bus address this
375 device implements. Normally this is
376 0xffffffff. You only need to change
377 this if your device has broken DMA
378 or supports 64-bit transfers. */
379
380 struct device_dma_parameters dma_parms;
381
382 pci_power_t current_state; /* Current operating state. In ACPI,
383 this is D0-D3, D0 being fully
384 functional, and D3 being off. */
385 u8 pm_cap; /* PM capability offset */
386 unsigned int pme_support:5; /* Bitmask of states from which PME#
387 can be generated */
388 unsigned int pme_poll:1; /* Poll device's PME status bit */
389 unsigned int pinned:1; /* Whether this dev is pinned */
390 unsigned int config_rrs_sv:1; /* Config RRS software visibility */
391 unsigned int imm_ready:1; /* Supports Immediate Readiness */
392 unsigned int d1_support:1; /* Low power state D1 is supported */
393 unsigned int d2_support:1; /* Low power state D2 is supported */
394 unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
395 unsigned int no_d3cold:1; /* D3cold is forbidden */
396 unsigned int bridge_d3:1; /* Allow D3 for bridge */
397 unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
398 unsigned int mmio_always_on:1; /* Disallow turning off io/mem
399 decoding during BAR sizing */
400 unsigned int wakeup_prepared:1;
401 unsigned int skip_bus_pm:1; /* Internal: Skip bus-level PM */
402 unsigned int ignore_hotplug:1; /* Ignore hotplug events */
403 unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
404 controlled exclusively by
405 user sysfs */
406 unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
407 bit manually */
408 unsigned int d3hot_delay; /* D3hot->D0 transition time in ms */
409 unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
410
411 u16 l1ss; /* L1SS Capability pointer */
412 #ifdef CONFIG_PCIEASPM
413 struct pcie_link_state *link_state; /* ASPM link state */
414 unsigned int ltr_path:1; /* Latency Tolerance Reporting
415 supported from root to here */
416 #endif
417 unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */
418 unsigned int eetlp_prefix_max:3; /* Max # of End-End TLP Prefixes, 0=not supported */
419
420 pci_channel_state_t error_state; /* Current connectivity state */
421 struct device dev; /* Generic device interface */
422
423 int cfg_size; /* Size of config space */
424
425 /*
426 * Instead of touching interrupt line and base address registers
427 * directly, use the values stored here. They might be different!
428 */
429 unsigned int irq;
430 struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
431 struct resource driver_exclusive_resource; /* driver exclusive resource ranges */
432
433 unsigned int transparent:1; /* Subtractive decode bridge */
434 unsigned int io_window:1; /* Bridge has I/O window */
435 unsigned int pref_window:1; /* Bridge has pref mem window */
436 unsigned int pref_64_window:1; /* Pref mem window is 64-bit */
437 unsigned int multifunction:1; /* Multi-function device */
438
439 unsigned int is_busmaster:1; /* Is busmaster */
440 unsigned int no_msi:1; /* May not use MSI */
441 unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
442 unsigned int block_cfg_access:1; /* Config space access blocked */
443 unsigned int broken_parity_status:1; /* Generates false positive parity */
444 unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */
445 unsigned int msi_enabled:1;
446 unsigned int msix_enabled:1;
447 unsigned int ari_enabled:1; /* ARI forwarding */
448 unsigned int ats_enabled:1; /* Address Translation Svc */
449 unsigned int pasid_enabled:1; /* Process Address Space ID */
450 unsigned int pri_enabled:1; /* Page Request Interface */
451 unsigned int tph_enabled:1; /* TLP Processing Hints */
452 unsigned int is_managed:1; /* Managed via devres */
453 unsigned int is_msi_managed:1; /* MSI release via devres installed */
454 unsigned int needs_freset:1; /* Requires fundamental reset */
455 unsigned int state_saved:1;
456 unsigned int is_physfn:1;
457 unsigned int is_virtfn:1;
458 unsigned int is_hotplug_bridge:1;
459 unsigned int is_pciehp:1;
460 unsigned int shpc_managed:1; /* SHPC owned by shpchp */
461 unsigned int is_thunderbolt:1; /* Thunderbolt controller */
462 /*
463 * Devices marked being untrusted are the ones that can potentially
464 * execute DMA attacks and similar. They are typically connected
465 * through external ports such as Thunderbolt but not limited to
466 * that. When an IOMMU is enabled they should be getting full
467 * mappings to make sure they cannot access arbitrary memory.
468 */
469 unsigned int untrusted:1;
470 /*
471 * Info from the platform, e.g., ACPI or device tree, may mark a
472 * device as "external-facing". An external-facing device is
473 * itself internal but devices downstream from it are external.
474 */
475 unsigned int external_facing:1;
476 unsigned int broken_intx_masking:1; /* INTx masking can't be used */
477 unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */
478 unsigned int irq_managed:1;
479 unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
480 unsigned int is_probed:1; /* Device probing in progress */
481 unsigned int link_active_reporting:1;/* Device capable of reporting link active */
482 unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
483 unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
484 unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */
485 unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */
486 unsigned int non_mappable_bars:1; /* BARs can't be mapped to user-space */
487 pci_dev_flags_t dev_flags;
488 atomic_t enable_cnt; /* pci_enable_device has been called */
489
490 spinlock_t pcie_cap_lock; /* Protects RMW ops in capability accessors */
491 u32 saved_config_space[16]; /* Config space saved at suspend time */
492 struct hlist_head saved_cap_space;
493 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
494 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
495
496 #ifdef CONFIG_HOTPLUG_PCI_PCIE
497 unsigned int broken_cmd_compl:1; /* No compl for some cmds */
498 #endif
499 #ifdef CONFIG_PCIE_PTM
500 u16 ptm_cap; /* PTM Capability */
501 unsigned int ptm_root:1;
502 unsigned int ptm_enabled:1;
503 u8 ptm_granularity;
504 #endif
505 #ifdef CONFIG_PCI_MSI
506 void __iomem *msix_base;
507 raw_spinlock_t msi_lock;
508 #endif
509 struct pci_vpd vpd;
510 #ifdef CONFIG_PCIE_DPC
511 u16 dpc_cap;
512 unsigned int dpc_rp_extensions:1;
513 u8 dpc_rp_log_size;
514 #endif
515 struct pcie_bwctrl_data *link_bwctrl;
516 #ifdef CONFIG_PCI_ATS
517 union {
518 struct pci_sriov *sriov; /* PF: SR-IOV info */
519 struct pci_dev *physfn; /* VF: related PF */
520 };
521 u16 ats_cap; /* ATS Capability offset */
522 u8 ats_stu; /* ATS Smallest Translation Unit */
523 #endif
524 #ifdef CONFIG_PCI_PRI
525 u16 pri_cap; /* PRI Capability offset */
526 u32 pri_reqs_alloc; /* Number of PRI requests allocated */
527 unsigned int pasid_required:1; /* PRG Response PASID Required */
528 #endif
529 #ifdef CONFIG_PCI_PASID
530 u16 pasid_cap; /* PASID Capability offset */
531 u16 pasid_features;
532 #endif
533 #ifdef CONFIG_PCI_P2PDMA
534 struct pci_p2pdma __rcu *p2pdma;
535 #endif
536 #ifdef CONFIG_PCI_DOE
537 struct xarray doe_mbs; /* Data Object Exchange mailboxes */
538 #endif
539 #ifdef CONFIG_PCI_NPEM
540 struct npem *npem; /* Native PCIe Enclosure Management */
541 #endif
542 u16 acs_cap; /* ACS Capability offset */
543 u8 supported_speeds; /* Supported Link Speeds Vector */
544 phys_addr_t rom; /* Physical address if not from BAR */
545 size_t romlen; /* Length if not from BAR */
546 /*
547 * Driver name to force a match. Do not set directly, because core
548 * frees it. Use driver_set_override() to set or clear it.
549 */
550 const char *driver_override;
551
552 unsigned long priv_flags; /* Private flags for the PCI driver */
553
554 /* These methods index pci_reset_fn_methods[] */
555 u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
556
557 #ifdef CONFIG_PCIE_TPH
558 u16 tph_cap; /* TPH capability offset */
559 u8 tph_mode; /* TPH mode */
560 u8 tph_req_type; /* TPH requester type */
561 #endif
562 };
563
pci_physfn(struct pci_dev * dev)564 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
565 {
566 #ifdef CONFIG_PCI_IOV
567 if (dev->is_virtfn)
568 dev = dev->physfn;
569 #endif
570 return dev;
571 }
572
573 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
574
575 #define to_pci_dev(n) container_of(n, struct pci_dev, dev)
576 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
577
pci_channel_offline(struct pci_dev * pdev)578 static inline int pci_channel_offline(struct pci_dev *pdev)
579 {
580 return (pdev->error_state != pci_channel_io_normal);
581 }
582
583 /*
584 * Currently in ACPI spec, for each PCI host bridge, PCI Segment
585 * Group number is limited to a 16-bit value, therefore (int)-1 is
586 * not a valid PCI domain number, and can be used as a sentinel
587 * value indicating ->domain_nr is not set by the driver (and
588 * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
589 * pci_bus_find_domain_nr()).
590 */
591 #define PCI_DOMAIN_NR_NOT_SET (-1)
592
593 struct pci_host_bridge {
594 struct device dev;
595 struct pci_bus *bus; /* Root bus */
596 struct pci_ops *ops;
597 struct pci_ops *child_ops;
598 void *sysdata;
599 int busnr;
600 int domain_nr;
601 struct list_head windows; /* resource_entry */
602 struct list_head dma_ranges; /* dma ranges resource list */
603 u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
604 int (*map_irq)(const struct pci_dev *, u8, u8);
605 void (*release_fn)(struct pci_host_bridge *);
606 int (*enable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
607 void (*disable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
608 void *release_data;
609 unsigned int ignore_reset_delay:1; /* For entire hierarchy */
610 unsigned int no_ext_tags:1; /* No Extended Tags */
611 unsigned int no_inc_mrrs:1; /* No Increase MRRS */
612 unsigned int native_aer:1; /* OS may use PCIe AER */
613 unsigned int native_pcie_hotplug:1; /* OS may use PCIe hotplug */
614 unsigned int native_shpc_hotplug:1; /* OS may use SHPC hotplug */
615 unsigned int native_pme:1; /* OS may use PCIe PME */
616 unsigned int native_ltr:1; /* OS may use PCIe LTR */
617 unsigned int native_dpc:1; /* OS may use PCIe DPC */
618 unsigned int native_cxl_error:1; /* OS may use CXL RAS/Events */
619 unsigned int preserve_config:1; /* Preserve FW resource setup */
620 unsigned int size_windows:1; /* Enable root bus sizing */
621 unsigned int msi_domain:1; /* Bridge wants MSI domain */
622
623 /* Resource alignment requirements */
624 resource_size_t (*align_resource)(struct pci_dev *dev,
625 const struct resource *res,
626 resource_size_t start,
627 resource_size_t size,
628 resource_size_t align);
629 unsigned long private[] ____cacheline_aligned;
630 };
631
632 #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
633
pci_host_bridge_priv(struct pci_host_bridge * bridge)634 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
635 {
636 return (void *)bridge->private;
637 }
638
pci_host_bridge_from_priv(void * priv)639 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
640 {
641 return container_of(priv, struct pci_host_bridge, private);
642 }
643
644 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
645 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
646 size_t priv);
647 void pci_free_host_bridge(struct pci_host_bridge *bridge);
648 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
649
650 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
651 void (*release_fn)(struct pci_host_bridge *),
652 void *release_data);
653
654 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
655
656 #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
657
658 struct pci_bus {
659 struct list_head node; /* Node in list of buses */
660 struct pci_bus *parent; /* Parent bus this bridge is on */
661 struct list_head children; /* List of child buses */
662 struct list_head devices; /* List of devices on this bus */
663 struct pci_dev *self; /* Bridge device as seen by parent */
664 struct list_head slots; /* List of slots on this bus;
665 protected by pci_slot_mutex */
666 struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
667 struct list_head resources; /* Address space routed to this bus */
668 struct resource busn_res; /* Bus numbers routed to this bus */
669
670 struct pci_ops *ops; /* Configuration access functions */
671 void *sysdata; /* Hook for sys-specific extension */
672 struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */
673
674 unsigned char number; /* Bus number */
675 unsigned char primary; /* Number of primary bridge */
676 unsigned char max_bus_speed; /* enum pci_bus_speed */
677 unsigned char cur_bus_speed; /* enum pci_bus_speed */
678 #ifdef CONFIG_PCI_DOMAINS_GENERIC
679 int domain_nr;
680 #endif
681
682 char name[48];
683
684 unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */
685 pci_bus_flags_t bus_flags; /* Inherited by child buses */
686 struct device *bridge;
687 struct device dev;
688 struct bin_attribute *legacy_io; /* Legacy I/O for this bus */
689 struct bin_attribute *legacy_mem; /* Legacy mem */
690 unsigned int is_added:1;
691 unsigned int unsafe_warn:1; /* warned about RW1C config write */
692 unsigned int flit_mode:1; /* Link in Flit mode */
693 };
694
695 #define to_pci_bus(n) container_of(n, struct pci_bus, dev)
696
pci_dev_id(struct pci_dev * dev)697 static inline u16 pci_dev_id(struct pci_dev *dev)
698 {
699 return PCI_DEVID(dev->bus->number, dev->devfn);
700 }
701
702 /*
703 * Returns true if the PCI bus is root (behind host-PCI bridge),
704 * false otherwise
705 *
706 * Some code assumes that "bus->self == NULL" means that bus is a root bus.
707 * This is incorrect because "virtual" buses added for SR-IOV (via
708 * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
709 */
pci_is_root_bus(struct pci_bus * pbus)710 static inline bool pci_is_root_bus(struct pci_bus *pbus)
711 {
712 return !(pbus->parent);
713 }
714
715 /**
716 * pci_is_bridge - check if the PCI device is a bridge
717 * @dev: PCI device
718 *
719 * Return true if the PCI device is bridge whether it has subordinate
720 * or not.
721 */
pci_is_bridge(struct pci_dev * dev)722 static inline bool pci_is_bridge(struct pci_dev *dev)
723 {
724 return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
725 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
726 }
727
728 /**
729 * pci_is_vga - check if the PCI device is a VGA device
730 * @pdev: PCI device
731 *
732 * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define
733 * VGA Base Class and Sub-Classes:
734 *
735 * 03 00 PCI_CLASS_DISPLAY_VGA VGA-compatible or 8514-compatible
736 * 00 01 PCI_CLASS_NOT_DEFINED_VGA VGA-compatible (before Class Code)
737 *
738 * Return true if the PCI device is a VGA device and uses the legacy VGA
739 * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and
740 * aliases).
741 */
pci_is_vga(struct pci_dev * pdev)742 static inline bool pci_is_vga(struct pci_dev *pdev)
743 {
744 if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
745 return true;
746
747 if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA)
748 return true;
749
750 return false;
751 }
752
753 /**
754 * pci_is_display - check if the PCI device is a display controller
755 * @pdev: PCI device
756 *
757 * Determine whether the given PCI device corresponds to a display
758 * controller. Display controllers are typically used for graphical output
759 * and are identified based on their class code.
760 *
761 * Return: true if the PCI device is a display controller, false otherwise.
762 */
pci_is_display(struct pci_dev * pdev)763 static inline bool pci_is_display(struct pci_dev *pdev)
764 {
765 return (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY;
766 }
767
768 #define for_each_pci_bridge(dev, bus) \
769 list_for_each_entry(dev, &bus->devices, bus_list) \
770 if (!pci_is_bridge(dev)) {} else
771
pci_upstream_bridge(struct pci_dev * dev)772 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
773 {
774 dev = pci_physfn(dev);
775 if (pci_is_root_bus(dev->bus))
776 return NULL;
777
778 return dev->bus->self;
779 }
780
781 #ifdef CONFIG_PCI_MSI
pci_dev_msi_enabled(struct pci_dev * pci_dev)782 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
783 {
784 return pci_dev->msi_enabled || pci_dev->msix_enabled;
785 }
786 #else
pci_dev_msi_enabled(struct pci_dev * pci_dev)787 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
788 #endif
789
790 /* Error values that may be returned by PCI functions */
791 #define PCIBIOS_SUCCESSFUL 0x00
792 #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81
793 #define PCIBIOS_BAD_VENDOR_ID 0x83
794 #define PCIBIOS_DEVICE_NOT_FOUND 0x86
795 #define PCIBIOS_BAD_REGISTER_NUMBER 0x87
796 #define PCIBIOS_SET_FAILED 0x88
797 #define PCIBIOS_BUFFER_TOO_SMALL 0x89
798
799 /* Translate above to generic errno for passing back through non-PCI code */
pcibios_err_to_errno(int err)800 static inline int pcibios_err_to_errno(int err)
801 {
802 if (err <= PCIBIOS_SUCCESSFUL)
803 return err; /* Assume already errno */
804
805 switch (err) {
806 case PCIBIOS_FUNC_NOT_SUPPORTED:
807 return -ENOENT;
808 case PCIBIOS_BAD_VENDOR_ID:
809 return -ENOTTY;
810 case PCIBIOS_DEVICE_NOT_FOUND:
811 return -ENODEV;
812 case PCIBIOS_BAD_REGISTER_NUMBER:
813 return -EFAULT;
814 case PCIBIOS_SET_FAILED:
815 return -EIO;
816 case PCIBIOS_BUFFER_TOO_SMALL:
817 return -ENOSPC;
818 }
819
820 return -ERANGE;
821 }
822
823 /* Low-level architecture-dependent routines */
824
825 struct pci_ops {
826 int (*add_bus)(struct pci_bus *bus);
827 void (*remove_bus)(struct pci_bus *bus);
828 void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
829 int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
830 int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
831 };
832
833 /*
834 * ACPI needs to be able to access PCI config space before we've done a
835 * PCI bus scan and created pci_bus structures.
836 */
837 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
838 int reg, int len, u32 *val);
839 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
840 int reg, int len, u32 val);
841
842 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
843 typedef u64 pci_bus_addr_t;
844 #else
845 typedef u32 pci_bus_addr_t;
846 #endif
847
848 struct pci_bus_region {
849 pci_bus_addr_t start;
850 pci_bus_addr_t end;
851 };
852
853 struct pci_dynids {
854 spinlock_t lock; /* Protects list, index */
855 struct list_head list; /* For IDs added at runtime */
856 };
857
858
859 /*
860 * PCI Error Recovery System (PCI-ERS). If a PCI device driver provides
861 * a set of callbacks in struct pci_error_handlers, that device driver
862 * will be notified of PCI bus errors, and will be driven to recovery
863 * when an error occurs.
864 */
865
866 typedef unsigned int __bitwise pci_ers_result_t;
867
868 enum pci_ers_result {
869 /* No result/none/not supported in device driver */
870 PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
871
872 /* Device driver can recover without slot reset */
873 PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
874
875 /* Device driver wants slot to be reset */
876 PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
877
878 /* Device has completely failed, is unrecoverable */
879 PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
880
881 /* Device driver is fully recovered and operational */
882 PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
883
884 /* No AER capabilities registered for the driver */
885 PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
886 };
887
888 /* PCI bus error event callbacks */
889 struct pci_error_handlers {
890 /* PCI bus error detected on this device */
891 pci_ers_result_t (*error_detected)(struct pci_dev *dev,
892 pci_channel_state_t error);
893
894 /* MMIO has been re-enabled, but not DMA */
895 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
896
897 /* PCI slot has been reset */
898 pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
899
900 /* PCI function reset prepare or completed */
901 void (*reset_prepare)(struct pci_dev *dev);
902 void (*reset_done)(struct pci_dev *dev);
903
904 /* Device driver may resume normal operations */
905 void (*resume)(struct pci_dev *dev);
906
907 /* Allow device driver to record more details of a correctable error */
908 void (*cor_error_detected)(struct pci_dev *dev);
909 };
910
911
912 struct module;
913
914 /**
915 * struct pci_driver - PCI driver structure
916 * @name: Driver name.
917 * @id_table: Pointer to table of device IDs the driver is
918 * interested in. Most drivers should export this
919 * table using MODULE_DEVICE_TABLE(pci,...).
920 * @probe: This probing function gets called (during execution
921 * of pci_register_driver() for already existing
922 * devices or later if a new device gets inserted) for
923 * all PCI devices which match the ID table and are not
924 * "owned" by the other drivers yet. This function gets
925 * passed a "struct pci_dev \*" for each device whose
926 * entry in the ID table matches the device. The probe
927 * function returns zero when the driver chooses to
928 * take "ownership" of the device or an error code
929 * (negative number) otherwise.
930 * The probe function always gets called from process
931 * context, so it can sleep.
932 * @remove: The remove() function gets called whenever a device
933 * being handled by this driver is removed (either during
934 * deregistration of the driver or when it's manually
935 * pulled out of a hot-pluggable slot).
936 * The remove function always gets called from process
937 * context, so it can sleep.
938 * @suspend: Put device into low power state.
939 * @resume: Wake device from low power state.
940 * (Please see Documentation/power/pci.rst for descriptions
941 * of PCI Power Management and the related functions.)
942 * @shutdown: Hook into reboot_notifier_list (kernel/sys.c).
943 * Intended to stop any idling DMA operations.
944 * Useful for enabling wake-on-lan (NIC) or changing
945 * the power state of a device before reboot.
946 * e.g. drivers/net/e100.c.
947 * @sriov_configure: Optional driver callback to allow configuration of
948 * number of VFs to enable via sysfs "sriov_numvfs" file.
949 * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X
950 * vectors on a VF. Triggered via sysfs "sriov_vf_msix_count".
951 * This will change MSI-X Table Size in the VF Message Control
952 * registers.
953 * @sriov_get_vf_total_msix: PF driver callback to get the total number of
954 * MSI-X vectors available for distribution to the VFs.
955 * @err_handler: See Documentation/PCI/pci-error-recovery.rst
956 * @groups: Sysfs attribute groups.
957 * @dev_groups: Attributes attached to the device that will be
958 * created once it is bound to the driver.
959 * @driver: Driver model structure.
960 * @dynids: List of dynamically added device IDs.
961 * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
962 * For most device drivers, no need to care about this flag
963 * as long as all DMAs are handled through the kernel DMA API.
964 * For some special ones, for example VFIO drivers, they know
965 * how to manage the DMA themselves and set this flag so that
966 * the IOMMU layer will allow them to setup and manage their
967 * own I/O address space.
968 */
969 struct pci_driver {
970 const char *name;
971 const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */
972 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
973 void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
974 int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */
975 int (*resume)(struct pci_dev *dev); /* Device woken up */
976 void (*shutdown)(struct pci_dev *dev);
977 int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
978 int (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */
979 u32 (*sriov_get_vf_total_msix)(struct pci_dev *pf);
980 const struct pci_error_handlers *err_handler;
981 const struct attribute_group **groups;
982 const struct attribute_group **dev_groups;
983 struct device_driver driver;
984 struct pci_dynids dynids;
985 bool driver_managed_dma;
986 };
987
988 #define to_pci_driver(__drv) \
989 ( __drv ? container_of_const(__drv, struct pci_driver, driver) : NULL )
990
991 /**
992 * PCI_DEVICE - macro used to describe a specific PCI device
993 * @vend: the 16 bit PCI Vendor ID
994 * @dev: the 16 bit PCI Device ID
995 *
996 * This macro is used to create a struct pci_device_id that matches a
997 * specific device. The subvendor and subdevice fields will be set to
998 * PCI_ANY_ID.
999 */
1000 #define PCI_DEVICE(vend,dev) \
1001 .vendor = (vend), .device = (dev), \
1002 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1003
1004 /**
1005 * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
1006 * override_only flags.
1007 * @vend: the 16 bit PCI Vendor ID
1008 * @dev: the 16 bit PCI Device ID
1009 * @driver_override: the 32 bit PCI Device override_only
1010 *
1011 * This macro is used to create a struct pci_device_id that matches only a
1012 * driver_override device. The subvendor and subdevice fields will be set to
1013 * PCI_ANY_ID.
1014 */
1015 #define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
1016 .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
1017 .subdevice = PCI_ANY_ID, .override_only = (driver_override)
1018
1019 /**
1020 * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
1021 * "driver_override" PCI device.
1022 * @vend: the 16 bit PCI Vendor ID
1023 * @dev: the 16 bit PCI Device ID
1024 *
1025 * This macro is used to create a struct pci_device_id that matches a
1026 * specific device. The subvendor and subdevice fields will be set to
1027 * PCI_ANY_ID and the driver_override will be set to
1028 * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
1029 */
1030 #define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
1031 PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
1032
1033 /**
1034 * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
1035 * @vend: the 16 bit PCI Vendor ID
1036 * @dev: the 16 bit PCI Device ID
1037 * @subvend: the 16 bit PCI Subvendor ID
1038 * @subdev: the 16 bit PCI Subdevice ID
1039 *
1040 * This macro is used to create a struct pci_device_id that matches a
1041 * specific device with subsystem information.
1042 */
1043 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
1044 .vendor = (vend), .device = (dev), \
1045 .subvendor = (subvend), .subdevice = (subdev)
1046
1047 /**
1048 * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
1049 * @dev_class: the class, subclass, prog-if triple for this device
1050 * @dev_class_mask: the class mask for this device
1051 *
1052 * This macro is used to create a struct pci_device_id that matches a
1053 * specific PCI class. The vendor, device, subvendor, and subdevice
1054 * fields will be set to PCI_ANY_ID.
1055 */
1056 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
1057 .class = (dev_class), .class_mask = (dev_class_mask), \
1058 .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
1059 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1060
1061 /**
1062 * PCI_VDEVICE - macro used to describe a specific PCI device in short form
1063 * @vend: the vendor name
1064 * @dev: the 16 bit PCI Device ID
1065 *
1066 * This macro is used to create a struct pci_device_id that matches a
1067 * specific PCI device. The subvendor, and subdevice fields will be set
1068 * to PCI_ANY_ID. The macro allows the next field to follow as the device
1069 * private data.
1070 */
1071 #define PCI_VDEVICE(vend, dev) \
1072 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1073 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
1074
1075 /**
1076 * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form
1077 * @vend: the vendor name
1078 * @dev: the 16 bit PCI Device ID
1079 * @subvend: the 16 bit PCI Subvendor ID
1080 * @subdev: the 16 bit PCI Subdevice ID
1081 *
1082 * Generate the pci_device_id struct layout for the specific PCI
1083 * device/subdevice. Private data may follow the output.
1084 */
1085 #define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \
1086 .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1087 .subvendor = (subvend), .subdevice = (subdev), 0, 0
1088
1089 /**
1090 * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
1091 * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
1092 * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
1093 * @data: the driver data to be filled
1094 *
1095 * This macro is used to create a struct pci_device_id that matches a
1096 * specific PCI device. The subvendor, and subdevice fields will be set
1097 * to PCI_ANY_ID.
1098 */
1099 #define PCI_DEVICE_DATA(vend, dev, data) \
1100 .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
1101 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
1102 .driver_data = (kernel_ulong_t)(data)
1103
1104 enum {
1105 PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */
1106 PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */
1107 PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */
1108 PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */
1109 PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */
1110 PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
1111 PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */
1112 };
1113
1114 #define PCI_IRQ_INTX (1 << 0) /* Allow INTx interrupts */
1115 #define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */
1116 #define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */
1117 #define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */
1118
1119 /* These external functions are only available when PCI support is enabled */
1120 #ifdef CONFIG_PCI
1121
1122 extern unsigned int pci_flags;
1123
pci_set_flags(int flags)1124 static inline void pci_set_flags(int flags) { pci_flags = flags; }
pci_add_flags(int flags)1125 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
pci_clear_flags(int flags)1126 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
pci_has_flag(int flag)1127 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
1128
1129 void pcie_bus_configure_settings(struct pci_bus *bus);
1130
1131 enum pcie_bus_config_types {
1132 PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */
1133 PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */
1134 PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */
1135 PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */
1136 PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */
1137 };
1138
1139 extern enum pcie_bus_config_types pcie_bus_config;
1140
1141 extern const struct bus_type pci_bus_type;
1142
1143 /* Do NOT directly access these two variables, unless you are arch-specific PCI
1144 * code, or PCI core code. */
1145 extern struct list_head pci_root_buses; /* List of all known PCI buses */
1146 /* Some device drivers need know if PCI is initiated */
1147 int no_pci_devices(void);
1148
1149 void pcibios_resource_survey_bus(struct pci_bus *bus);
1150 void pcibios_bus_add_device(struct pci_dev *pdev);
1151 void pcibios_add_bus(struct pci_bus *bus);
1152 void pcibios_remove_bus(struct pci_bus *bus);
1153 void pcibios_fixup_bus(struct pci_bus *);
1154 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
1155 /* Architecture-specific versions may override this (weak) */
1156 char *pcibios_setup(char *str);
1157
1158 /* Used only when drivers/pci/setup.c is used */
1159 resource_size_t pcibios_align_resource(void *, const struct resource *,
1160 resource_size_t,
1161 resource_size_t);
1162
1163 /* Generic PCI functions used internally */
1164
1165 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
1166 struct resource *res);
1167 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
1168 struct pci_bus_region *region);
1169 void pcibios_scan_specific_bus(int busn);
1170 struct pci_bus *pci_find_bus(int domain, int busnr);
1171 void pci_bus_add_devices(const struct pci_bus *bus);
1172 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
1173 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1174 struct pci_ops *ops, void *sysdata,
1175 struct list_head *resources);
1176 int pci_host_probe(struct pci_host_bridge *bridge);
1177 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
1178 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
1179 void pci_bus_release_busn_res(struct pci_bus *b);
1180 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1181 struct pci_ops *ops, void *sysdata,
1182 struct list_head *resources);
1183 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
1184 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1185 int busnr);
1186 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1187 const char *name,
1188 struct hotplug_slot *hotplug);
1189 void pci_destroy_slot(struct pci_slot *slot);
1190 #ifdef CONFIG_SYSFS
1191 void pci_dev_assign_slot(struct pci_dev *dev);
1192 #else
pci_dev_assign_slot(struct pci_dev * dev)1193 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
1194 #endif
1195 int pci_scan_slot(struct pci_bus *bus, int devfn);
1196 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
1197 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
1198 unsigned int pci_scan_child_bus(struct pci_bus *bus);
1199 void pci_bus_add_device(struct pci_dev *dev);
1200 void pci_read_bridge_bases(struct pci_bus *child);
1201 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
1202 struct resource *res);
1203 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
1204 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
1205 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
1206 struct pci_dev *pci_dev_get(struct pci_dev *dev);
1207 void pci_dev_put(struct pci_dev *dev);
1208 DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
1209 void pci_remove_bus(struct pci_bus *b);
1210 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
1211 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
1212 void pci_stop_root_bus(struct pci_bus *bus);
1213 void pci_remove_root_bus(struct pci_bus *bus);
1214 void pci_setup_cardbus(struct pci_bus *bus);
1215 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
1216 void pci_sort_breadthfirst(void);
1217 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
1218 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
1219
1220 /* Generic PCI functions exported to card drivers */
1221
1222 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1223 u8 pci_find_capability(struct pci_dev *dev, int cap);
1224 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
1225 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
1226 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap);
1227 u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
1228 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
1229 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
1230 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
1231 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
1232
1233 u64 pci_get_dsn(struct pci_dev *dev);
1234
1235 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
1236 struct pci_dev *from);
1237 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
1238 unsigned int ss_vendor, unsigned int ss_device,
1239 struct pci_dev *from);
1240 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
1241 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
1242 unsigned int devfn);
1243 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
1244 struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
1245
1246 int pci_dev_present(const struct pci_device_id *ids);
1247
1248 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
1249 int where, u8 *val);
1250 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
1251 int where, u16 *val);
1252 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
1253 int where, u32 *val);
1254 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
1255 int where, u8 val);
1256 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
1257 int where, u16 val);
1258 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
1259 int where, u32 val);
1260
1261 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
1262 int where, int size, u32 *val);
1263 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
1264 int where, int size, u32 val);
1265 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
1266 int where, int size, u32 *val);
1267 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
1268 int where, int size, u32 val);
1269
1270 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
1271
1272 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
1273 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
1274 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
1275 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1276 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
1277 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
1278 void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
1279 u32 clear, u32 set);
1280
1281 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
1282 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
1283 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1284 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
1285 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
1286 u16 clear, u16 set);
1287 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
1288 u16 clear, u16 set);
1289 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
1290 u32 clear, u32 set);
1291
1292 /**
1293 * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
1294 * @dev: PCI device structure of the PCI Express device
1295 * @pos: PCI Express Capability Register
1296 * @clear: Clear bitmask
1297 * @set: Set bitmask
1298 *
1299 * Perform a Read-Modify-Write (RMW) operation using @clear and @set
1300 * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
1301 * Capability Registers are accessed concurrently in RMW fashion, hence
1302 * require locking which is handled transparently to the caller.
1303 */
pcie_capability_clear_and_set_word(struct pci_dev * dev,int pos,u16 clear,u16 set)1304 static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
1305 int pos,
1306 u16 clear, u16 set)
1307 {
1308 switch (pos) {
1309 case PCI_EXP_LNKCTL:
1310 case PCI_EXP_LNKCTL2:
1311 case PCI_EXP_RTCTL:
1312 return pcie_capability_clear_and_set_word_locked(dev, pos,
1313 clear, set);
1314 default:
1315 return pcie_capability_clear_and_set_word_unlocked(dev, pos,
1316 clear, set);
1317 }
1318 }
1319
pcie_capability_set_word(struct pci_dev * dev,int pos,u16 set)1320 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
1321 u16 set)
1322 {
1323 return pcie_capability_clear_and_set_word(dev, pos, 0, set);
1324 }
1325
pcie_capability_set_dword(struct pci_dev * dev,int pos,u32 set)1326 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
1327 u32 set)
1328 {
1329 return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
1330 }
1331
pcie_capability_clear_word(struct pci_dev * dev,int pos,u16 clear)1332 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
1333 u16 clear)
1334 {
1335 return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
1336 }
1337
pcie_capability_clear_dword(struct pci_dev * dev,int pos,u32 clear)1338 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1339 u32 clear)
1340 {
1341 return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1342 }
1343
1344 /* User-space driven config access */
1345 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1346 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1347 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1348 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1349 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1350 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1351
1352 int __must_check pci_enable_device(struct pci_dev *dev);
1353 int __must_check pci_enable_device_mem(struct pci_dev *dev);
1354 int __must_check pci_reenable_device(struct pci_dev *);
1355 int __must_check pcim_enable_device(struct pci_dev *pdev);
1356 void pcim_pin_device(struct pci_dev *pdev);
1357
pci_intx_mask_supported(struct pci_dev * pdev)1358 static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1359 {
1360 /*
1361 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1362 * writable and no quirk has marked the feature broken.
1363 */
1364 return !pdev->broken_intx_masking;
1365 }
1366
pci_is_enabled(struct pci_dev * pdev)1367 static inline int pci_is_enabled(struct pci_dev *pdev)
1368 {
1369 return (atomic_read(&pdev->enable_cnt) > 0);
1370 }
1371
pci_is_managed(struct pci_dev * pdev)1372 static inline int pci_is_managed(struct pci_dev *pdev)
1373 {
1374 return pdev->is_managed;
1375 }
1376
1377 void pci_disable_device(struct pci_dev *dev);
1378
1379 extern unsigned int pcibios_max_latency;
1380 void pci_set_master(struct pci_dev *dev);
1381 void pci_clear_master(struct pci_dev *dev);
1382
1383 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1384 int pci_set_cacheline_size(struct pci_dev *dev);
1385 int __must_check pci_set_mwi(struct pci_dev *dev);
1386 int __must_check pcim_set_mwi(struct pci_dev *dev);
1387 int pci_try_set_mwi(struct pci_dev *dev);
1388 void pci_clear_mwi(struct pci_dev *dev);
1389 void pci_disable_parity(struct pci_dev *dev);
1390 void pci_intx(struct pci_dev *dev, int enable);
1391 bool pci_check_and_mask_intx(struct pci_dev *dev);
1392 bool pci_check_and_unmask_intx(struct pci_dev *dev);
1393 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1394 int pci_wait_for_pending_transaction(struct pci_dev *dev);
1395 int pcix_get_max_mmrbc(struct pci_dev *dev);
1396 int pcix_get_mmrbc(struct pci_dev *dev);
1397 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1398 int pcie_get_readrq(struct pci_dev *dev);
1399 int pcie_set_readrq(struct pci_dev *dev, int rq);
1400 int pcie_get_mps(struct pci_dev *dev);
1401 int pcie_set_mps(struct pci_dev *dev, int mps);
1402 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1403 enum pci_bus_speed *speed,
1404 enum pcie_link_width *width);
1405 int pcie_link_speed_mbps(struct pci_dev *pdev);
1406 void pcie_print_link_status(struct pci_dev *dev);
1407 int pcie_reset_flr(struct pci_dev *dev, bool probe);
1408 int pcie_flr(struct pci_dev *dev);
1409 int __pci_reset_function_locked(struct pci_dev *dev);
1410 int pci_reset_function(struct pci_dev *dev);
1411 int pci_reset_function_locked(struct pci_dev *dev);
1412 int pci_try_reset_function(struct pci_dev *dev);
1413 int pci_probe_reset_slot(struct pci_slot *slot);
1414 int pci_probe_reset_bus(struct pci_bus *bus);
1415 int pci_reset_bus(struct pci_dev *dev);
1416 void pci_reset_secondary_bus(struct pci_dev *dev);
1417 void pcibios_reset_secondary_bus(struct pci_dev *dev);
1418 void pci_update_resource(struct pci_dev *dev, int resno);
1419 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1420 void pci_release_resource(struct pci_dev *dev, int resno);
pci_rebar_bytes_to_size(u64 bytes)1421 static inline int pci_rebar_bytes_to_size(u64 bytes)
1422 {
1423 bytes = roundup_pow_of_two(bytes);
1424
1425 /* Return BAR size as defined in the resizable BAR specification */
1426 return max(ilog2(bytes), 20) - 20;
1427 }
1428
1429 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
1430 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
1431 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1432 bool pci_device_is_present(struct pci_dev *pdev);
1433 void pci_ignore_hotplug(struct pci_dev *dev);
1434 struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
1435 int pci_status_get_and_clear_errors(struct pci_dev *pdev);
1436
1437 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
1438 irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
1439 const char *fmt, ...);
1440 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
1441
1442 /* ROM control related routines */
1443 int pci_enable_rom(struct pci_dev *pdev);
1444 void pci_disable_rom(struct pci_dev *pdev);
1445 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1446 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1447
1448 /* Power management related routines */
1449 int pci_save_state(struct pci_dev *dev);
1450 void pci_restore_state(struct pci_dev *dev);
1451 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1452 int pci_load_saved_state(struct pci_dev *dev,
1453 struct pci_saved_state *state);
1454 int pci_load_and_free_saved_state(struct pci_dev *dev,
1455 struct pci_saved_state **state);
1456 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
1457 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1458 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
1459 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1460 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1461 void pci_pme_active(struct pci_dev *dev, bool enable);
1462 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1463 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1464 int pci_prepare_to_sleep(struct pci_dev *dev);
1465 int pci_back_from_sleep(struct pci_dev *dev);
1466 bool pci_dev_run_wake(struct pci_dev *dev);
1467 void pci_d3cold_enable(struct pci_dev *dev);
1468 void pci_d3cold_disable(struct pci_dev *dev);
1469 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1470 void pci_resume_bus(struct pci_bus *bus);
1471 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1472
1473 /* For use by arch with custom probe code */
1474 void set_pcie_port_type(struct pci_dev *pdev);
1475 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1476
1477 /* Functions for PCI Hotplug drivers to use */
1478 unsigned int pci_rescan_bus(struct pci_bus *bus);
1479 void pci_lock_rescan_remove(void);
1480 void pci_unlock_rescan_remove(void);
1481
1482 /* Vital Product Data routines */
1483 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1484 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1485 ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1486 ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1487
1488 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1489 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1490 void pci_bus_assign_resources(const struct pci_bus *bus);
1491 void pci_bus_claim_resources(struct pci_bus *bus);
1492 void pci_bus_size_bridges(struct pci_bus *bus);
1493 int pci_claim_resource(struct pci_dev *, int);
1494 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1495 void pci_assign_unassigned_resources(void);
1496 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1497 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1498 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1499 int pci_enable_resources(struct pci_dev *, int mask);
1500 void pci_assign_irq(struct pci_dev *dev);
1501 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1502 #define HAVE_PCI_REQ_REGIONS 2
1503 int __must_check pci_request_regions(struct pci_dev *, const char *);
1504 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1505 void pci_release_regions(struct pci_dev *);
1506 int __must_check pci_request_region(struct pci_dev *, int, const char *);
1507 void pci_release_region(struct pci_dev *, int);
1508 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1509 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1510 void pci_release_selected_regions(struct pci_dev *, int);
1511
1512 static inline __must_check struct resource *
pci_request_config_region_exclusive(struct pci_dev * pdev,unsigned int offset,unsigned int len,const char * name)1513 pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
1514 unsigned int len, const char *name)
1515 {
1516 return __request_region(&pdev->driver_exclusive_resource, offset, len,
1517 name, IORESOURCE_EXCLUSIVE);
1518 }
1519
pci_release_config_region(struct pci_dev * pdev,unsigned int offset,unsigned int len)1520 static inline void pci_release_config_region(struct pci_dev *pdev,
1521 unsigned int offset,
1522 unsigned int len)
1523 {
1524 __release_region(&pdev->driver_exclusive_resource, offset, len);
1525 }
1526
1527 /* drivers/pci/bus.c */
1528 void pci_add_resource(struct list_head *resources, struct resource *res);
1529 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1530 resource_size_t offset);
1531 void pci_free_resource_list(struct list_head *resources);
1532 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res);
1533 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1534 void pci_bus_remove_resources(struct pci_bus *bus);
1535 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
1536 int devm_request_pci_bus_resources(struct device *dev,
1537 struct list_head *resources);
1538
1539 /* Temporary until new and working PCI SBR API in place */
1540 int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1541
1542 #define __pci_bus_for_each_res0(bus, res, ...) \
1543 for (unsigned int __b = 0; \
1544 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1545 __b++)
1546
1547 #define __pci_bus_for_each_res1(bus, res, __b) \
1548 for (__b = 0; \
1549 (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1550 __b++)
1551
1552 /**
1553 * pci_bus_for_each_resource - iterate over PCI bus resources
1554 * @bus: the PCI bus
1555 * @res: pointer to the current resource
1556 * @...: optional index of the current resource
1557 *
1558 * Iterate over PCI bus resources. The first part is to go over PCI bus
1559 * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries.
1560 * After that continue with the separate list of the additional resources,
1561 * if not empty. That's why the Logical OR is being used.
1562 *
1563 * Possible usage:
1564 *
1565 * struct pci_bus *bus = ...;
1566 * struct resource *res;
1567 * unsigned int i;
1568 *
1569 * // With optional index
1570 * pci_bus_for_each_resource(bus, res, i)
1571 * pr_info("PCI bus resource[%u]: %pR\n", i, res);
1572 *
1573 * // Without index
1574 * pci_bus_for_each_resource(bus, res)
1575 * _do_something_(res);
1576 */
1577 #define pci_bus_for_each_resource(bus, res, ...) \
1578 CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
1579 (bus, res, __VA_ARGS__)
1580
1581 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1582 struct resource *res, resource_size_t size,
1583 resource_size_t align, resource_size_t min,
1584 unsigned long type_mask,
1585 resource_alignf alignf,
1586 void *alignf_data);
1587
1588
1589 int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
1590 resource_size_t size);
1591 unsigned long pci_address_to_pio(phys_addr_t addr);
1592 phys_addr_t pci_pio_to_address(unsigned long pio);
1593 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1594 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
1595 phys_addr_t phys_addr);
1596 void pci_unmap_iospace(struct resource *res);
1597 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
1598 resource_size_t offset,
1599 resource_size_t size);
1600 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
1601 struct resource *res);
1602
pci_bus_address(struct pci_dev * pdev,int bar)1603 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1604 {
1605 struct pci_bus_region region;
1606
1607 pcibios_resource_to_bus(pdev->bus, ®ion, &pdev->resource[bar]);
1608 return region.start;
1609 }
1610
1611 /* Proper probing supporting hot-pluggable devices */
1612 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1613 const char *mod_name);
1614
1615 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1616 #define pci_register_driver(driver) \
1617 __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1618
1619 void pci_unregister_driver(struct pci_driver *dev);
1620
1621 /**
1622 * module_pci_driver() - Helper macro for registering a PCI driver
1623 * @__pci_driver: pci_driver struct
1624 *
1625 * Helper macro for PCI drivers which do not do anything special in module
1626 * init/exit. This eliminates a lot of boilerplate. Each module may only
1627 * use this macro once, and calling it replaces module_init() and module_exit()
1628 */
1629 #define module_pci_driver(__pci_driver) \
1630 module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1631
1632 /**
1633 * builtin_pci_driver() - Helper macro for registering a PCI driver
1634 * @__pci_driver: pci_driver struct
1635 *
1636 * Helper macro for PCI drivers which do not do anything special in their
1637 * init code. This eliminates a lot of boilerplate. Each driver may only
1638 * use this macro once, and calling it replaces device_initcall(...)
1639 */
1640 #define builtin_pci_driver(__pci_driver) \
1641 builtin_driver(__pci_driver, pci_register_driver)
1642
1643 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1644 int pci_add_dynid(struct pci_driver *drv,
1645 unsigned int vendor, unsigned int device,
1646 unsigned int subvendor, unsigned int subdevice,
1647 unsigned int class, unsigned int class_mask,
1648 unsigned long driver_data);
1649 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1650 struct pci_dev *dev);
1651 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1652 int pass);
1653
1654 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1655 void *userdata);
1656 int pci_cfg_space_size(struct pci_dev *dev);
1657 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1658 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1659 unsigned long type);
1660
1661 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1662 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1663
1664 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1665 unsigned int command_bits, u32 flags);
1666
1667 /*
1668 * Virtual interrupts allow for more interrupts to be allocated
1669 * than the device has interrupts for. These are not programmed
1670 * into the device's MSI-X table and must be handled by some
1671 * other driver means.
1672 */
1673 #define PCI_IRQ_VIRTUAL (1 << 4)
1674
1675 #define PCI_IRQ_ALL_TYPES (PCI_IRQ_INTX | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1676
1677 #include <linux/dmapool.h>
1678
1679 struct msix_entry {
1680 u32 vector; /* Kernel uses to write allocated vector */
1681 u16 entry; /* Driver uses to specify entry, OS writes */
1682 };
1683
1684 #ifdef CONFIG_PCI_MSI
1685 int pci_msi_vec_count(struct pci_dev *dev);
1686 void pci_disable_msi(struct pci_dev *dev);
1687 int pci_msix_vec_count(struct pci_dev *dev);
1688 void pci_disable_msix(struct pci_dev *dev);
1689 void pci_restore_msi_state(struct pci_dev *dev);
1690 bool pci_msi_enabled(void);
1691 int pci_enable_msi(struct pci_dev *dev);
1692 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1693 int minvec, int maxvec);
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1694 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1695 struct msix_entry *entries, int nvec)
1696 {
1697 int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1698 if (rc < 0)
1699 return rc;
1700 return 0;
1701 }
1702 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1703 unsigned int max_vecs, unsigned int flags);
1704 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1705 unsigned int max_vecs, unsigned int flags,
1706 struct irq_affinity *affd);
1707
1708 bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
1709 struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1710 const struct irq_affinity_desc *affdesc);
1711 void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
1712
1713 void pci_free_irq_vectors(struct pci_dev *dev);
1714 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1715 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1716
1717 #else
pci_msi_vec_count(struct pci_dev * dev)1718 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msi(struct pci_dev * dev)1719 static inline void pci_disable_msi(struct pci_dev *dev) { }
pci_msix_vec_count(struct pci_dev * dev)1720 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msix(struct pci_dev * dev)1721 static inline void pci_disable_msix(struct pci_dev *dev) { }
pci_restore_msi_state(struct pci_dev * dev)1722 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
pci_msi_enabled(void)1723 static inline bool pci_msi_enabled(void) { return false; }
pci_enable_msi(struct pci_dev * dev)1724 static inline int pci_enable_msi(struct pci_dev *dev)
1725 { return -ENOSYS; }
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)1726 static inline int pci_enable_msix_range(struct pci_dev *dev,
1727 struct msix_entry *entries, int minvec, int maxvec)
1728 { return -ENOSYS; }
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1729 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1730 struct msix_entry *entries, int nvec)
1731 { return -ENOSYS; }
1732
1733 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)1734 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1735 unsigned int max_vecs, unsigned int flags,
1736 struct irq_affinity *aff_desc)
1737 {
1738 if ((flags & PCI_IRQ_INTX) && min_vecs == 1 && dev->irq)
1739 return 1;
1740 return -ENOSPC;
1741 }
1742 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)1743 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1744 unsigned int max_vecs, unsigned int flags)
1745 {
1746 return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
1747 flags, NULL);
1748 }
1749
pci_msix_can_alloc_dyn(struct pci_dev * dev)1750 static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
1751 { return false; }
pci_msix_alloc_irq_at(struct pci_dev * dev,unsigned int index,const struct irq_affinity_desc * affdesc)1752 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1753 const struct irq_affinity_desc *affdesc)
1754 {
1755 struct msi_map map = { .index = -ENOSYS, };
1756
1757 return map;
1758 }
1759
pci_msix_free_irq(struct pci_dev * pdev,struct msi_map map)1760 static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
1761 {
1762 }
1763
pci_free_irq_vectors(struct pci_dev * dev)1764 static inline void pci_free_irq_vectors(struct pci_dev *dev)
1765 {
1766 }
1767
pci_irq_vector(struct pci_dev * dev,unsigned int nr)1768 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1769 {
1770 if (WARN_ON_ONCE(nr > 0))
1771 return -EINVAL;
1772 return dev->irq;
1773 }
pci_irq_get_affinity(struct pci_dev * pdev,int vec)1774 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1775 int vec)
1776 {
1777 return cpu_possible_mask;
1778 }
1779 #endif
1780
1781 /**
1782 * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1783 * @d: the INTx IRQ domain
1784 * @node: the DT node for the device whose interrupt we're translating
1785 * @intspec: the interrupt specifier data from the DT
1786 * @intsize: the number of entries in @intspec
1787 * @out_hwirq: pointer at which to write the hwirq number
1788 * @out_type: pointer at which to write the interrupt type
1789 *
1790 * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
1791 * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
1792 * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
1793 * INTx value to obtain the hwirq number.
1794 *
1795 * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
1796 */
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)1797 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1798 struct device_node *node,
1799 const u32 *intspec,
1800 unsigned int intsize,
1801 unsigned long *out_hwirq,
1802 unsigned int *out_type)
1803 {
1804 const u32 intx = intspec[0];
1805
1806 if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
1807 return -EINVAL;
1808
1809 *out_hwirq = intx - PCI_INTERRUPT_INTA;
1810 return 0;
1811 }
1812
1813 #ifdef CONFIG_PCIEPORTBUS
1814 extern bool pcie_ports_disabled;
1815 extern bool pcie_ports_native;
1816
1817 int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req,
1818 bool use_lt);
1819 #else
1820 #define pcie_ports_disabled true
1821 #define pcie_ports_native false
1822
pcie_set_target_speed(struct pci_dev * port,enum pci_bus_speed speed_req,bool use_lt)1823 static inline int pcie_set_target_speed(struct pci_dev *port,
1824 enum pci_bus_speed speed_req,
1825 bool use_lt)
1826 {
1827 return -EOPNOTSUPP;
1828 }
1829 #endif
1830
1831 #define PCIE_LINK_STATE_L0S (BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */
1832 #define PCIE_LINK_STATE_L1 BIT(2) /* L1 state */
1833 #define PCIE_LINK_STATE_L1_1 BIT(3) /* ASPM L1.1 state */
1834 #define PCIE_LINK_STATE_L1_2 BIT(4) /* ASPM L1.2 state */
1835 #define PCIE_LINK_STATE_L1_1_PCIPM BIT(5) /* PCI-PM L1.1 state */
1836 #define PCIE_LINK_STATE_L1_2_PCIPM BIT(6) /* PCI-PM L1.2 state */
1837 #define PCIE_LINK_STATE_ASPM_ALL (PCIE_LINK_STATE_L0S |\
1838 PCIE_LINK_STATE_L1 |\
1839 PCIE_LINK_STATE_L1_1 |\
1840 PCIE_LINK_STATE_L1_2 |\
1841 PCIE_LINK_STATE_L1_1_PCIPM |\
1842 PCIE_LINK_STATE_L1_2_PCIPM)
1843 #define PCIE_LINK_STATE_CLKPM BIT(7)
1844 #define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_ASPM_ALL |\
1845 PCIE_LINK_STATE_CLKPM)
1846
1847 #ifdef CONFIG_PCIEASPM
1848 int pci_disable_link_state(struct pci_dev *pdev, int state);
1849 int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1850 int pci_enable_link_state(struct pci_dev *pdev, int state);
1851 int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
1852 void pcie_no_aspm(void);
1853 bool pcie_aspm_support_enabled(void);
1854 bool pcie_aspm_enabled(struct pci_dev *pdev);
1855 #else
pci_disable_link_state(struct pci_dev * pdev,int state)1856 static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1857 { return 0; }
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1858 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1859 { return 0; }
pci_enable_link_state(struct pci_dev * pdev,int state)1860 static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
1861 { return 0; }
pci_enable_link_state_locked(struct pci_dev * pdev,int state)1862 static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1863 { return 0; }
pcie_no_aspm(void)1864 static inline void pcie_no_aspm(void) { }
pcie_aspm_support_enabled(void)1865 static inline bool pcie_aspm_support_enabled(void) { return false; }
pcie_aspm_enabled(struct pci_dev * pdev)1866 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1867 #endif
1868
1869 #ifdef CONFIG_HOTPLUG_PCI
1870 void pci_hp_ignore_link_change(struct pci_dev *pdev);
1871 void pci_hp_unignore_link_change(struct pci_dev *pdev);
1872 #else
pci_hp_ignore_link_change(struct pci_dev * pdev)1873 static inline void pci_hp_ignore_link_change(struct pci_dev *pdev) { }
pci_hp_unignore_link_change(struct pci_dev * pdev)1874 static inline void pci_hp_unignore_link_change(struct pci_dev *pdev) { }
1875 #endif
1876
1877 #ifdef CONFIG_PCIEAER
1878 bool pci_aer_available(void);
1879 #else
pci_aer_available(void)1880 static inline bool pci_aer_available(void) { return false; }
1881 #endif
1882
1883 bool pci_ats_disabled(void);
1884
1885 #define PCIE_PTM_CONTEXT_UPDATE_AUTO 0
1886 #define PCIE_PTM_CONTEXT_UPDATE_MANUAL 1
1887
1888 struct pcie_ptm_ops {
1889 int (*check_capability)(void *drvdata);
1890 int (*context_update_write)(void *drvdata, u8 mode);
1891 int (*context_update_read)(void *drvdata, u8 *mode);
1892 int (*context_valid_write)(void *drvdata, bool valid);
1893 int (*context_valid_read)(void *drvdata, bool *valid);
1894 int (*local_clock_read)(void *drvdata, u64 *clock);
1895 int (*master_clock_read)(void *drvdata, u64 *clock);
1896 int (*t1_read)(void *drvdata, u64 *clock);
1897 int (*t2_read)(void *drvdata, u64 *clock);
1898 int (*t3_read)(void *drvdata, u64 *clock);
1899 int (*t4_read)(void *drvdata, u64 *clock);
1900
1901 bool (*context_update_visible)(void *drvdata);
1902 bool (*context_valid_visible)(void *drvdata);
1903 bool (*local_clock_visible)(void *drvdata);
1904 bool (*master_clock_visible)(void *drvdata);
1905 bool (*t1_visible)(void *drvdata);
1906 bool (*t2_visible)(void *drvdata);
1907 bool (*t3_visible)(void *drvdata);
1908 bool (*t4_visible)(void *drvdata);
1909 };
1910
1911 struct pci_ptm_debugfs {
1912 struct dentry *debugfs;
1913 const struct pcie_ptm_ops *ops;
1914 struct mutex lock;
1915 void *pdata;
1916 };
1917
1918 #ifdef CONFIG_PCIE_PTM
1919 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
1920 void pci_disable_ptm(struct pci_dev *dev);
1921 bool pcie_ptm_enabled(struct pci_dev *dev);
1922 #else
pci_enable_ptm(struct pci_dev * dev,u8 * granularity)1923 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
1924 { return -EINVAL; }
pci_disable_ptm(struct pci_dev * dev)1925 static inline void pci_disable_ptm(struct pci_dev *dev) { }
pcie_ptm_enabled(struct pci_dev * dev)1926 static inline bool pcie_ptm_enabled(struct pci_dev *dev)
1927 { return false; }
1928 #endif
1929
1930 #if IS_ENABLED(CONFIG_DEBUG_FS) && IS_ENABLED(CONFIG_PCIE_PTM)
1931 struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
1932 const struct pcie_ptm_ops *ops);
1933 void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs);
1934 #else
1935 static inline struct pci_ptm_debugfs
pcie_ptm_create_debugfs(struct device * dev,void * pdata,const struct pcie_ptm_ops * ops)1936 *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
1937 const struct pcie_ptm_ops *ops) { return NULL; }
1938 static inline void
pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs * ptm_debugfs)1939 pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs) { }
1940 #endif
1941
1942 void pci_cfg_access_lock(struct pci_dev *dev);
1943 bool pci_cfg_access_trylock(struct pci_dev *dev);
1944 void pci_cfg_access_unlock(struct pci_dev *dev);
1945
1946 void pci_dev_lock(struct pci_dev *dev);
1947 int pci_dev_trylock(struct pci_dev *dev);
1948 void pci_dev_unlock(struct pci_dev *dev);
1949 DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
1950
1951 /*
1952 * PCI domain support. Sometimes called PCI segment (eg by ACPI),
1953 * a PCI domain is defined to be a set of PCI buses which share
1954 * configuration space.
1955 */
1956 #ifdef CONFIG_PCI_DOMAINS
1957 extern int pci_domains_supported;
1958 #else
1959 enum { pci_domains_supported = 0 };
1960 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1961 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1962 #endif /* CONFIG_PCI_DOMAINS */
1963
1964 /*
1965 * Generic implementation for PCI domain support. If your
1966 * architecture does not need custom management of PCI
1967 * domains then this implementation will be used
1968 */
1969 #ifdef CONFIG_PCI_DOMAINS_GENERIC
pci_domain_nr(struct pci_bus * bus)1970 static inline int pci_domain_nr(struct pci_bus *bus)
1971 {
1972 return bus->domain_nr;
1973 }
1974 #ifdef CONFIG_ACPI
1975 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
1976 #else
acpi_pci_bus_find_domain_nr(struct pci_bus * bus)1977 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1978 { return 0; }
1979 #endif
1980 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1981 void pci_bus_release_domain_nr(struct device *parent, int domain_nr);
1982 #endif
1983
1984 /* Some architectures require additional setup to direct VGA traffic */
1985 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1986 unsigned int command_bits, u32 flags);
1987 void pci_register_set_vga_state(arch_set_vga_state_t func);
1988
1989 static inline int
pci_request_io_regions(struct pci_dev * pdev,const char * name)1990 pci_request_io_regions(struct pci_dev *pdev, const char *name)
1991 {
1992 return pci_request_selected_regions(pdev,
1993 pci_select_bars(pdev, IORESOURCE_IO), name);
1994 }
1995
1996 static inline void
pci_release_io_regions(struct pci_dev * pdev)1997 pci_release_io_regions(struct pci_dev *pdev)
1998 {
1999 return pci_release_selected_regions(pdev,
2000 pci_select_bars(pdev, IORESOURCE_IO));
2001 }
2002
2003 static inline int
pci_request_mem_regions(struct pci_dev * pdev,const char * name)2004 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
2005 {
2006 return pci_request_selected_regions(pdev,
2007 pci_select_bars(pdev, IORESOURCE_MEM), name);
2008 }
2009
2010 static inline void
pci_release_mem_regions(struct pci_dev * pdev)2011 pci_release_mem_regions(struct pci_dev *pdev)
2012 {
2013 return pci_release_selected_regions(pdev,
2014 pci_select_bars(pdev, IORESOURCE_MEM));
2015 }
2016
2017 #else /* CONFIG_PCI is not enabled */
2018
pci_set_flags(int flags)2019 static inline void pci_set_flags(int flags) { }
pci_add_flags(int flags)2020 static inline void pci_add_flags(int flags) { }
pci_clear_flags(int flags)2021 static inline void pci_clear_flags(int flags) { }
pci_has_flag(int flag)2022 static inline int pci_has_flag(int flag) { return 0; }
2023
2024 /*
2025 * If the system does not have PCI, clearly these return errors. Define
2026 * these as simple inline functions to avoid hair in drivers.
2027 */
2028 #define _PCI_NOP(o, s, t) \
2029 static inline int pci_##o##_config_##s(struct pci_dev *dev, \
2030 int where, t val) \
2031 { return PCIBIOS_FUNC_NOT_SUPPORTED; }
2032
2033 #define _PCI_NOP_ALL(o, x) _PCI_NOP(o, byte, u8 x) \
2034 _PCI_NOP(o, word, u16 x) \
2035 _PCI_NOP(o, dword, u32 x)
2036 _PCI_NOP_ALL(read, *)
2037 _PCI_NOP_ALL(write,)
2038
pci_get_device(unsigned int vendor,unsigned int device,struct pci_dev * from)2039 static inline struct pci_dev *pci_get_device(unsigned int vendor,
2040 unsigned int device,
2041 struct pci_dev *from)
2042 { return NULL; }
2043
pci_get_subsys(unsigned int vendor,unsigned int device,unsigned int ss_vendor,unsigned int ss_device,struct pci_dev * from)2044 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
2045 unsigned int device,
2046 unsigned int ss_vendor,
2047 unsigned int ss_device,
2048 struct pci_dev *from)
2049 { return NULL; }
2050
pci_get_class(unsigned int class,struct pci_dev * from)2051 static inline struct pci_dev *pci_get_class(unsigned int class,
2052 struct pci_dev *from)
2053 { return NULL; }
2054
pci_get_base_class(unsigned int class,struct pci_dev * from)2055 static inline struct pci_dev *pci_get_base_class(unsigned int class,
2056 struct pci_dev *from)
2057 { return NULL; }
2058
pci_dev_present(const struct pci_device_id * ids)2059 static inline int pci_dev_present(const struct pci_device_id *ids)
2060 { return 0; }
2061
2062 #define no_pci_devices() (1)
2063 #define pci_dev_put(dev) do { } while (0)
2064
pci_set_master(struct pci_dev * dev)2065 static inline void pci_set_master(struct pci_dev *dev) { }
pci_clear_master(struct pci_dev * dev)2066 static inline void pci_clear_master(struct pci_dev *dev) { }
pci_enable_device(struct pci_dev * dev)2067 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
pci_disable_device(struct pci_dev * dev)2068 static inline void pci_disable_device(struct pci_dev *dev) { }
pcim_enable_device(struct pci_dev * pdev)2069 static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
pci_assign_resource(struct pci_dev * dev,int i)2070 static inline int pci_assign_resource(struct pci_dev *dev, int i)
2071 { return -EBUSY; }
__pci_register_driver(struct pci_driver * drv,struct module * owner,const char * mod_name)2072 static inline int __must_check __pci_register_driver(struct pci_driver *drv,
2073 struct module *owner,
2074 const char *mod_name)
2075 { return 0; }
pci_register_driver(struct pci_driver * drv)2076 static inline int pci_register_driver(struct pci_driver *drv)
2077 { return 0; }
pci_unregister_driver(struct pci_driver * drv)2078 static inline void pci_unregister_driver(struct pci_driver *drv) { }
pci_find_capability(struct pci_dev * dev,int cap)2079 static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
2080 { return 0; }
pci_find_next_capability(struct pci_dev * dev,u8 post,int cap)2081 static inline u8 pci_find_next_capability(struct pci_dev *dev, u8 post, int cap)
2082 { return 0; }
pci_find_ext_capability(struct pci_dev * dev,int cap)2083 static inline u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
2084 { return 0; }
2085
pci_get_dsn(struct pci_dev * dev)2086 static inline u64 pci_get_dsn(struct pci_dev *dev)
2087 { return 0; }
2088
2089 /* Power management related routines */
pci_save_state(struct pci_dev * dev)2090 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
pci_restore_state(struct pci_dev * dev)2091 static inline void pci_restore_state(struct pci_dev *dev) { }
pci_set_power_state(struct pci_dev * dev,pci_power_t state)2092 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
2093 { return 0; }
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)2094 static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
2095 { return 0; }
pci_wake_from_d3(struct pci_dev * dev,bool enable)2096 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2097 { return 0; }
pci_choose_state(struct pci_dev * dev,pm_message_t state)2098 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
2099 pm_message_t state)
2100 { return PCI_D0; }
pci_enable_wake(struct pci_dev * dev,pci_power_t state,int enable)2101 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
2102 int enable)
2103 { return 0; }
2104
pci_find_resource(struct pci_dev * dev,struct resource * res)2105 static inline struct resource *pci_find_resource(struct pci_dev *dev,
2106 struct resource *res)
2107 { return NULL; }
pci_request_regions(struct pci_dev * dev,const char * res_name)2108 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
2109 { return -EIO; }
pci_release_regions(struct pci_dev * dev)2110 static inline void pci_release_regions(struct pci_dev *dev) { }
2111
pci_register_io_range(const struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)2112 static inline int pci_register_io_range(const struct fwnode_handle *fwnode,
2113 phys_addr_t addr, resource_size_t size)
2114 { return -EINVAL; }
2115
pci_address_to_pio(phys_addr_t addr)2116 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
2117
pci_find_next_bus(const struct pci_bus * from)2118 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
2119 { return NULL; }
pci_get_slot(struct pci_bus * bus,unsigned int devfn)2120 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
2121 unsigned int devfn)
2122 { return NULL; }
pci_get_domain_bus_and_slot(int domain,unsigned int bus,unsigned int devfn)2123 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
2124 unsigned int bus, unsigned int devfn)
2125 { return NULL; }
2126
pci_domain_nr(struct pci_bus * bus)2127 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_dev_get(struct pci_dev * dev)2128 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
2129
2130 #define dev_is_pci(d) (false)
2131 #define dev_is_pf(d) (false)
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)2132 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2133 { return false; }
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)2134 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
2135 struct device_node *node,
2136 const u32 *intspec,
2137 unsigned int intsize,
2138 unsigned long *out_hwirq,
2139 unsigned int *out_type)
2140 { return -EINVAL; }
2141
pci_match_id(const struct pci_device_id * ids,struct pci_dev * dev)2142 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
2143 struct pci_dev *dev)
2144 { return NULL; }
pci_ats_disabled(void)2145 static inline bool pci_ats_disabled(void) { return true; }
2146
pci_irq_vector(struct pci_dev * dev,unsigned int nr)2147 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
2148 {
2149 return -EINVAL;
2150 }
2151
2152 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)2153 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
2154 unsigned int max_vecs, unsigned int flags,
2155 struct irq_affinity *aff_desc)
2156 {
2157 return -ENOSPC;
2158 }
2159 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)2160 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
2161 unsigned int max_vecs, unsigned int flags)
2162 {
2163 return -ENOSPC;
2164 }
2165 #endif /* CONFIG_PCI */
2166
2167 /* Include architecture-dependent settings and functions */
2168
2169 #include <asm/pci.h>
2170
2171 /*
2172 * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
2173 * is expected to be an offset within that region.
2174 *
2175 */
2176 int pci_mmap_resource_range(struct pci_dev *dev, int bar,
2177 struct vm_area_struct *vma,
2178 enum pci_mmap_state mmap_state, int write_combine);
2179
2180 #ifndef arch_can_pci_mmap_wc
2181 #define arch_can_pci_mmap_wc() 0
2182 #endif
2183
2184 #ifndef arch_can_pci_mmap_io
2185 #define arch_can_pci_mmap_io() 0
2186 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
2187 #else
2188 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
2189 #endif
2190
2191 #ifndef pci_root_bus_fwnode
2192 #define pci_root_bus_fwnode(bus) NULL
2193 #endif
2194
2195 /*
2196 * These helpers provide future and backwards compatibility
2197 * for accessing popular PCI BAR info
2198 */
2199 #define pci_resource_n(dev, bar) (&(dev)->resource[(bar)])
2200 #define pci_resource_start(dev, bar) (pci_resource_n(dev, bar)->start)
2201 #define pci_resource_end(dev, bar) (pci_resource_n(dev, bar)->end)
2202 #define pci_resource_flags(dev, bar) (pci_resource_n(dev, bar)->flags)
2203 #define pci_resource_len(dev,bar) \
2204 (pci_resource_end((dev), (bar)) ? \
2205 resource_size(pci_resource_n((dev), (bar))) : 0)
2206
2207 #define __pci_dev_for_each_res0(dev, res, ...) \
2208 for (unsigned int __b = 0; \
2209 __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2210 __b++)
2211
2212 #define __pci_dev_for_each_res1(dev, res, __b) \
2213 for (__b = 0; \
2214 __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2215 __b++)
2216
2217 #define pci_dev_for_each_resource(dev, res, ...) \
2218 CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) \
2219 (dev, res, __VA_ARGS__)
2220
2221 /*
2222 * Similar to the helpers above, these manipulate per-pci_dev
2223 * driver-specific data. They are really just a wrapper around
2224 * the generic device structure functions of these calls.
2225 */
pci_get_drvdata(struct pci_dev * pdev)2226 static inline void *pci_get_drvdata(struct pci_dev *pdev)
2227 {
2228 return dev_get_drvdata(&pdev->dev);
2229 }
2230
pci_set_drvdata(struct pci_dev * pdev,void * data)2231 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
2232 {
2233 dev_set_drvdata(&pdev->dev, data);
2234 }
2235
pci_name(const struct pci_dev * pdev)2236 static inline const char *pci_name(const struct pci_dev *pdev)
2237 {
2238 return dev_name(&pdev->dev);
2239 }
2240
2241 void pci_resource_to_user(const struct pci_dev *dev, int bar,
2242 const struct resource *rsrc,
2243 resource_size_t *start, resource_size_t *end);
2244
2245 /*
2246 * The world is not perfect and supplies us with broken PCI devices.
2247 * For at least a part of these bugs we need a work-around, so both
2248 * generic (drivers/pci/quirks.c) and per-architecture code can define
2249 * fixup hooks to be called for particular buggy devices.
2250 */
2251
2252 struct pci_fixup {
2253 u16 vendor; /* Or PCI_ANY_ID */
2254 u16 device; /* Or PCI_ANY_ID */
2255 u32 class; /* Or PCI_ANY_ID */
2256 unsigned int class_shift; /* should be 0, 8, 16 */
2257 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2258 int hook_offset;
2259 #else
2260 void (*hook)(struct pci_dev *dev);
2261 #endif
2262 };
2263
2264 enum pci_fixup_pass {
2265 pci_fixup_early, /* Before probing BARs */
2266 pci_fixup_header, /* After reading configuration header */
2267 pci_fixup_final, /* Final phase of device fixups */
2268 pci_fixup_enable, /* pci_enable_device() time */
2269 pci_fixup_resume, /* pci_device_resume() */
2270 pci_fixup_suspend, /* pci_device_suspend() */
2271 pci_fixup_resume_early, /* pci_device_resume_early() */
2272 pci_fixup_suspend_late, /* pci_device_suspend_late() */
2273 };
2274
2275 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2276 #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2277 class_shift, hook) \
2278 __ADDRESSABLE(hook) \
2279 asm(".section " #sec ", \"a\" \n" \
2280 ".balign 16 \n" \
2281 ".short " #vendor ", " #device " \n" \
2282 ".long " #class ", " #class_shift " \n" \
2283 ".long " #hook " - . \n" \
2284 ".previous \n");
2285
2286 /*
2287 * Clang's LTO may rename static functions in C, but has no way to
2288 * handle such renamings when referenced from inline asm. To work
2289 * around this, create global C stubs for these cases.
2290 */
2291 #ifdef CONFIG_LTO_CLANG
2292 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2293 class_shift, hook, stub) \
2294 void stub(struct pci_dev *dev); \
2295 void stub(struct pci_dev *dev) \
2296 { \
2297 hook(dev); \
2298 } \
2299 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2300 class_shift, stub)
2301 #else
2302 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2303 class_shift, hook, stub) \
2304 ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2305 class_shift, hook)
2306 #endif
2307
2308 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2309 class_shift, hook) \
2310 __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
2311 class_shift, hook, __UNIQUE_ID(hook))
2312 #else
2313 /* Anonymous variables would be nice... */
2314 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
2315 class_shift, hook) \
2316 static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
2317 __attribute__((__section__(#section), aligned((sizeof(void *))))) \
2318 = { vendor, device, class, class_shift, hook };
2319 #endif
2320
2321 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
2322 class_shift, hook) \
2323 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
2324 hook, vendor, device, class, class_shift, hook)
2325 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
2326 class_shift, hook) \
2327 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
2328 hook, vendor, device, class, class_shift, hook)
2329 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
2330 class_shift, hook) \
2331 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
2332 hook, vendor, device, class, class_shift, hook)
2333 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
2334 class_shift, hook) \
2335 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
2336 hook, vendor, device, class, class_shift, hook)
2337 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
2338 class_shift, hook) \
2339 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
2340 resume##hook, vendor, device, class, class_shift, hook)
2341 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
2342 class_shift, hook) \
2343 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
2344 resume_early##hook, vendor, device, class, class_shift, hook)
2345 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
2346 class_shift, hook) \
2347 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
2348 suspend##hook, vendor, device, class, class_shift, hook)
2349 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \
2350 class_shift, hook) \
2351 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
2352 suspend_late##hook, vendor, device, class, class_shift, hook)
2353
2354 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
2355 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
2356 hook, vendor, device, PCI_ANY_ID, 0, hook)
2357 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
2358 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
2359 hook, vendor, device, PCI_ANY_ID, 0, hook)
2360 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
2361 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
2362 hook, vendor, device, PCI_ANY_ID, 0, hook)
2363 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
2364 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
2365 hook, vendor, device, PCI_ANY_ID, 0, hook)
2366 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
2367 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
2368 resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
2369 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
2370 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
2371 resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
2372 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
2373 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
2374 suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
2375 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \
2376 DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \
2377 suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
2378
2379 #ifdef CONFIG_PCI_QUIRKS
2380 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
2381 #else
pci_fixup_device(enum pci_fixup_pass pass,struct pci_dev * dev)2382 static inline void pci_fixup_device(enum pci_fixup_pass pass,
2383 struct pci_dev *dev) { }
2384 #endif
2385
2386 int pcim_intx(struct pci_dev *pdev, int enabled);
2387 int pcim_request_all_regions(struct pci_dev *pdev, const char *name);
2388 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
2389 void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
2390 const char *name);
2391 void pcim_iounmap_region(struct pci_dev *pdev, int bar);
2392 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
2393 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
2394 int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
2395 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
2396 void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
2397 unsigned long offset, unsigned long len);
2398
2399 extern int pci_pci_problems;
2400 #define PCIPCI_FAIL 1 /* No PCI PCI DMA */
2401 #define PCIPCI_TRITON 2
2402 #define PCIPCI_NATOMA 4
2403 #define PCIPCI_VIAETBF 8
2404 #define PCIPCI_VSFX 16
2405 #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */
2406 #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */
2407
2408 extern u8 pci_dfl_cache_line_size;
2409 extern u8 pci_cache_line_size;
2410
2411 /* Architecture-specific versions may override these (weak) */
2412 void pcibios_disable_device(struct pci_dev *dev);
2413 void pcibios_set_master(struct pci_dev *dev);
2414 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
2415 enum pcie_reset_state state);
2416 int pcibios_device_add(struct pci_dev *dev);
2417 void pcibios_release_device(struct pci_dev *dev);
2418 #ifdef CONFIG_PCI
2419 void pcibios_penalize_isa_irq(int irq, int active);
2420 #else
pcibios_penalize_isa_irq(int irq,int active)2421 static inline void pcibios_penalize_isa_irq(int irq, int active) {}
2422 #endif
2423 int pcibios_alloc_irq(struct pci_dev *dev);
2424 void pcibios_free_irq(struct pci_dev *dev);
2425 resource_size_t pcibios_default_alignment(void);
2426
2427 #if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
2428 extern int pci_create_resource_files(struct pci_dev *dev);
2429 extern void pci_remove_resource_files(struct pci_dev *dev);
2430 #endif
2431
2432 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
2433 void __init pci_mmcfg_early_init(void);
2434 void __init pci_mmcfg_late_init(void);
2435 #else
pci_mmcfg_early_init(void)2436 static inline void pci_mmcfg_early_init(void) { }
pci_mmcfg_late_init(void)2437 static inline void pci_mmcfg_late_init(void) { }
2438 #endif
2439
2440 int pci_ext_cfg_avail(void);
2441
2442 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
2443 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
2444
2445 #ifdef CONFIG_PCI_IOV
2446 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
2447 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
2448 int pci_iov_vf_id(struct pci_dev *dev);
2449 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
2450 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
2451 void pci_disable_sriov(struct pci_dev *dev);
2452
2453 int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
2454 int pci_iov_add_virtfn(struct pci_dev *dev, int id);
2455 void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
2456 int pci_num_vf(struct pci_dev *dev);
2457 int pci_vfs_assigned(struct pci_dev *dev);
2458 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
2459 int pci_sriov_get_totalvfs(struct pci_dev *dev);
2460 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
2461 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
2462 int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size);
2463 u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs);
2464 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
2465
2466 /* Arch may override these (weak) */
2467 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
2468 int pcibios_sriov_disable(struct pci_dev *pdev);
2469 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
2470 #else
pci_iov_virtfn_bus(struct pci_dev * dev,int id)2471 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
2472 {
2473 return -ENOSYS;
2474 }
pci_iov_virtfn_devfn(struct pci_dev * dev,int id)2475 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
2476 {
2477 return -ENOSYS;
2478 }
2479
pci_iov_vf_id(struct pci_dev * dev)2480 static inline int pci_iov_vf_id(struct pci_dev *dev)
2481 {
2482 return -ENOSYS;
2483 }
2484
pci_iov_get_pf_drvdata(struct pci_dev * dev,struct pci_driver * pf_driver)2485 static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
2486 struct pci_driver *pf_driver)
2487 {
2488 return ERR_PTR(-EINVAL);
2489 }
2490
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)2491 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
2492 { return -ENODEV; }
2493
pci_iov_sysfs_link(struct pci_dev * dev,struct pci_dev * virtfn,int id)2494 static inline int pci_iov_sysfs_link(struct pci_dev *dev,
2495 struct pci_dev *virtfn, int id)
2496 {
2497 return -ENODEV;
2498 }
pci_iov_add_virtfn(struct pci_dev * dev,int id)2499 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2500 {
2501 return -ENOSYS;
2502 }
pci_iov_remove_virtfn(struct pci_dev * dev,int id)2503 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
2504 int id) { }
pci_disable_sriov(struct pci_dev * dev)2505 static inline void pci_disable_sriov(struct pci_dev *dev) { }
pci_num_vf(struct pci_dev * dev)2506 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
pci_vfs_assigned(struct pci_dev * dev)2507 static inline int pci_vfs_assigned(struct pci_dev *dev)
2508 { return 0; }
pci_sriov_set_totalvfs(struct pci_dev * dev,u16 numvfs)2509 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
2510 { return 0; }
pci_sriov_get_totalvfs(struct pci_dev * dev)2511 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
2512 { return 0; }
2513 #define pci_sriov_configure_simple NULL
pci_iov_resource_size(struct pci_dev * dev,int resno)2514 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
2515 { return 0; }
pci_iov_vf_bar_set_size(struct pci_dev * dev,int resno,int size)2516 static inline int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size)
2517 { return -ENODEV; }
pci_iov_vf_bar_get_sizes(struct pci_dev * dev,int resno,int num_vfs)2518 static inline u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs)
2519 { return 0; }
pci_vf_drivers_autoprobe(struct pci_dev * dev,bool probe)2520 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
2521 #endif
2522
2523 /**
2524 * pci_pcie_cap - get the saved PCIe capability offset
2525 * @dev: PCI device
2526 *
2527 * PCIe capability offset is calculated at PCI device initialization
2528 * time and saved in the data structure. This function returns saved
2529 * PCIe capability offset. Using this instead of pci_find_capability()
2530 * reduces unnecessary search in the PCI configuration space. If you
2531 * need to calculate PCIe capability offset from raw device for some
2532 * reasons, please use pci_find_capability() instead.
2533 */
pci_pcie_cap(struct pci_dev * dev)2534 static inline int pci_pcie_cap(struct pci_dev *dev)
2535 {
2536 return dev->pcie_cap;
2537 }
2538
2539 /**
2540 * pci_is_pcie - check if the PCI device is PCI Express capable
2541 * @dev: PCI device
2542 *
2543 * Returns: true if the PCI device is PCI Express capable, false otherwise.
2544 */
pci_is_pcie(struct pci_dev * dev)2545 static inline bool pci_is_pcie(struct pci_dev *dev)
2546 {
2547 return pci_pcie_cap(dev);
2548 }
2549
2550 /**
2551 * pcie_caps_reg - get the PCIe Capabilities Register
2552 * @dev: PCI device
2553 */
pcie_caps_reg(const struct pci_dev * dev)2554 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
2555 {
2556 return dev->pcie_flags_reg;
2557 }
2558
2559 /**
2560 * pci_pcie_type - get the PCIe device/port type
2561 * @dev: PCI device
2562 */
pci_pcie_type(const struct pci_dev * dev)2563 static inline int pci_pcie_type(const struct pci_dev *dev)
2564 {
2565 return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
2566 }
2567
2568 /**
2569 * pcie_find_root_port - Get the PCIe root port device
2570 * @dev: PCI device
2571 *
2572 * Traverse up the parent chain and return the PCIe Root Port PCI Device
2573 * for a given PCI/PCIe Device.
2574 */
pcie_find_root_port(struct pci_dev * dev)2575 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
2576 {
2577 while (dev) {
2578 if (pci_is_pcie(dev) &&
2579 pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2580 return dev;
2581 dev = pci_upstream_bridge(dev);
2582 }
2583
2584 return NULL;
2585 }
2586
pci_dev_is_disconnected(const struct pci_dev * dev)2587 static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
2588 {
2589 /*
2590 * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg()
2591 * and read w/o common lock. READ_ONCE() ensures compiler cannot cache
2592 * the value (e.g. inside the loop in pci_dev_wait()).
2593 */
2594 return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure;
2595 }
2596
2597 void pci_request_acs(void);
2598 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2599 bool pci_acs_path_enabled(struct pci_dev *start,
2600 struct pci_dev *end, u16 acs_flags);
2601 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2602
2603 #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
2604 #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
2605
2606 /* Large Resource Data Type Tag Item Names */
2607 #define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
2608 #define PCI_VPD_LTIN_RO_DATA 0x10 /* Read-Only Data */
2609 #define PCI_VPD_LTIN_RW_DATA 0x11 /* Read-Write Data */
2610
2611 #define PCI_VPD_LRDT_ID_STRING PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
2612 #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
2613 #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
2614
2615 #define PCI_VPD_RO_KEYWORD_PARTNO "PN"
2616 #define PCI_VPD_RO_KEYWORD_SERIALNO "SN"
2617 #define PCI_VPD_RO_KEYWORD_MFR_ID "MN"
2618 #define PCI_VPD_RO_KEYWORD_VENDOR0 "V0"
2619 #define PCI_VPD_RO_KEYWORD_CHKSUM "RV"
2620
2621 /**
2622 * pci_vpd_alloc - Allocate buffer and read VPD into it
2623 * @dev: PCI device
2624 * @size: pointer to field where VPD length is returned
2625 *
2626 * Returns pointer to allocated buffer or an ERR_PTR in case of failure
2627 */
2628 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
2629
2630 /**
2631 * pci_vpd_find_id_string - Locate id string in VPD
2632 * @buf: Pointer to buffered VPD data
2633 * @len: The length of the buffer area in which to search
2634 * @size: Pointer to field where length of id string is returned
2635 *
2636 * Returns the index of the id string or -ENOENT if not found.
2637 */
2638 int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
2639
2640 /**
2641 * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
2642 * @buf: Pointer to buffered VPD data
2643 * @len: The length of the buffer area in which to search
2644 * @kw: The keyword to search for
2645 * @size: Pointer to field where length of found keyword data is returned
2646 *
2647 * Returns the index of the information field keyword data or -ENOENT if
2648 * not found.
2649 */
2650 int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
2651 const char *kw, unsigned int *size);
2652
2653 /**
2654 * pci_vpd_check_csum - Check VPD checksum
2655 * @buf: Pointer to buffered VPD data
2656 * @len: VPD size
2657 *
2658 * Returns 1 if VPD has no checksum, otherwise 0 or an errno
2659 */
2660 int pci_vpd_check_csum(const void *buf, unsigned int len);
2661
2662 /* PCI <-> OF binding helpers */
2663 #ifdef CONFIG_OF
2664 struct device_node;
2665 struct irq_domain;
2666 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2667 bool pci_host_of_has_msi_map(struct device *dev);
2668
2669 /* Arch may override this (weak) */
2670 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2671
2672 #else /* CONFIG_OF */
2673 static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus * bus)2674 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
pci_host_of_has_msi_map(struct device * dev)2675 static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
2676 #endif /* CONFIG_OF */
2677
2678 static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev * pdev)2679 pci_device_to_OF_node(const struct pci_dev *pdev)
2680 {
2681 return pdev ? pdev->dev.of_node : NULL;
2682 }
2683
pci_bus_to_OF_node(struct pci_bus * bus)2684 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2685 {
2686 return bus ? bus->dev.of_node : NULL;
2687 }
2688
2689 #ifdef CONFIG_ACPI
2690 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2691
2692 void
2693 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2694 bool pci_pr3_present(struct pci_dev *pdev);
2695 #else
2696 static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus * bus)2697 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
pci_pr3_present(struct pci_dev * pdev)2698 static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
2699 #endif
2700
2701 #if defined(CONFIG_X86) && defined(CONFIG_ACPI)
2702 bool arch_pci_dev_is_removable(struct pci_dev *pdev);
2703 #else
arch_pci_dev_is_removable(struct pci_dev * pdev)2704 static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; }
2705 #endif
2706
2707 #ifdef CONFIG_EEH
pci_dev_to_eeh_dev(struct pci_dev * pdev)2708 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2709 {
2710 return pdev->dev.archdata.edev;
2711 }
2712 #endif
2713
2714 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
2715 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2716 int pci_for_each_dma_alias(struct pci_dev *pdev,
2717 int (*fn)(struct pci_dev *pdev,
2718 u16 alias, void *data), void *data);
2719
2720 /* Helper functions for operation of device flag */
pci_set_dev_assigned(struct pci_dev * pdev)2721 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2722 {
2723 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2724 }
pci_clear_dev_assigned(struct pci_dev * pdev)2725 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2726 {
2727 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2728 }
pci_is_dev_assigned(struct pci_dev * pdev)2729 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2730 {
2731 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2732 }
2733
2734 /**
2735 * pci_ari_enabled - query ARI forwarding status
2736 * @bus: the PCI bus
2737 *
2738 * Returns true if ARI forwarding is enabled.
2739 */
pci_ari_enabled(struct pci_bus * bus)2740 static inline bool pci_ari_enabled(struct pci_bus *bus)
2741 {
2742 return bus->self && bus->self->ari_enabled;
2743 }
2744
2745 /**
2746 * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
2747 * @pdev: PCI device to check
2748 *
2749 * Walk upwards from @pdev and check for each encountered bridge if it's part
2750 * of a Thunderbolt controller. Reaching the host bridge means @pdev is not
2751 * Thunderbolt-attached. (But rather soldered to the mainboard usually.)
2752 */
pci_is_thunderbolt_attached(struct pci_dev * pdev)2753 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2754 {
2755 struct pci_dev *parent = pdev;
2756
2757 if (pdev->is_thunderbolt)
2758 return true;
2759
2760 while ((parent = pci_upstream_bridge(parent)))
2761 if (parent->is_thunderbolt)
2762 return true;
2763
2764 return false;
2765 }
2766
2767 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
2768 void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
2769 #endif
2770
2771 #include <linux/dma-mapping.h>
2772
2773 #define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
2774 #define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
2775 #define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
2776 #define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
2777 #define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
2778 #define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
2779 #define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
2780 #define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
2781 #define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
2782
2783 #define pci_notice_ratelimited(pdev, fmt, arg...) \
2784 dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2785
2786 #define pci_info_ratelimited(pdev, fmt, arg...) \
2787 dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2788
2789 #define pci_WARN(pdev, condition, fmt, arg...) \
2790 WARN(condition, "%s %s: " fmt, \
2791 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2792
2793 #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
2794 WARN_ONCE(condition, "%s %s: " fmt, \
2795 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2796
2797 #endif /* LINUX_PCI_H */
2798