xref: /linux/include/linux/pci.h (revision d3b402c5a2d47f51eb0581da1a7b142f82cb10d1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *	pci.h
4  *
5  *	PCI defines and function prototypes
6  *	Copyright 1994, Drew Eckhardt
7  *	Copyright 1997--1999 Martin Mares <mj@ucw.cz>
8  *
9  *	PCI Express ASPM defines and function prototypes
10  *	Copyright (c) 2007 Intel Corp.
11  *		Zhang Yanmin (yanmin.zhang@intel.com)
12  *		Shaohua Li (shaohua.li@intel.com)
13  *
14  *	For more information, please consult the following manuals (look at
15  *	http://www.pcisig.com/ for how to get them):
16  *
17  *	PCI BIOS Specification
18  *	PCI Local Bus Specification
19  *	PCI to PCI Bridge Specification
20  *	PCI Express Specification
21  *	PCI System Design Guide
22  */
23 #ifndef LINUX_PCI_H
24 #define LINUX_PCI_H
25 
26 #include <linux/args.h>
27 #include <linux/mod_devicetable.h>
28 
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/ioport.h>
32 #include <linux/list.h>
33 #include <linux/compiler.h>
34 #include <linux/errno.h>
35 #include <linux/kobject.h>
36 #include <linux/atomic.h>
37 #include <linux/device.h>
38 #include <linux/interrupt.h>
39 #include <linux/io.h>
40 #include <linux/resource_ext.h>
41 #include <linux/msi_api.h>
42 #include <uapi/linux/pci.h>
43 
44 #include <linux/pci_ids.h>
45 
46 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY  | \
47 			       PCI_STATUS_SIG_SYSTEM_ERROR | \
48 			       PCI_STATUS_REC_MASTER_ABORT | \
49 			       PCI_STATUS_REC_TARGET_ABORT | \
50 			       PCI_STATUS_SIG_TARGET_ABORT | \
51 			       PCI_STATUS_PARITY)
52 
53 /* Number of reset methods used in pci_reset_fn_methods array in pci.c */
54 #define PCI_NUM_RESET_METHODS 8
55 
56 #define PCI_RESET_PROBE		true
57 #define PCI_RESET_DO_RESET	false
58 
59 /*
60  * The PCI interface treats multi-function devices as independent
61  * devices.  The slot/function address of each device is encoded
62  * in a single byte as follows:
63  *
64  *	7:3 = slot
65  *	2:0 = function
66  *
67  * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
68  * In the interest of not exposing interfaces to user-space unnecessarily,
69  * the following kernel-only defines are being added here.
70  */
71 #define PCI_DEVID(bus, devfn)	((((u16)(bus)) << 8) | (devfn))
72 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
73 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
74 
75 /* pci_slot represents a physical slot */
76 struct pci_slot {
77 	struct pci_bus		*bus;		/* Bus this slot is on */
78 	struct list_head	list;		/* Node in list of slots */
79 	struct hotplug_slot	*hotplug;	/* Hotplug info (move here) */
80 	unsigned char		number;		/* PCI_SLOT(pci_dev->devfn) */
81 	struct kobject		kobj;
82 };
83 
pci_slot_name(const struct pci_slot * slot)84 static inline const char *pci_slot_name(const struct pci_slot *slot)
85 {
86 	return kobject_name(&slot->kobj);
87 }
88 
89 /* File state for mmap()s on /proc/bus/pci/X/Y */
90 enum pci_mmap_state {
91 	pci_mmap_io,
92 	pci_mmap_mem
93 };
94 
95 /* For PCI devices, the region numbers are assigned this way: */
96 enum {
97 	/* #0-5: standard PCI resources */
98 	PCI_STD_RESOURCES,
99 	PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
100 
101 	/* #6: expansion ROM resource */
102 	PCI_ROM_RESOURCE,
103 
104 	/* Device-specific resources */
105 #ifdef CONFIG_PCI_IOV
106 	PCI_IOV_RESOURCES,
107 	PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
108 #endif
109 
110 /* PCI-to-PCI (P2P) bridge windows */
111 #define PCI_BRIDGE_IO_WINDOW		(PCI_BRIDGE_RESOURCES + 0)
112 #define PCI_BRIDGE_MEM_WINDOW		(PCI_BRIDGE_RESOURCES + 1)
113 #define PCI_BRIDGE_PREF_MEM_WINDOW	(PCI_BRIDGE_RESOURCES + 2)
114 
115 /* CardBus bridge windows */
116 #define PCI_CB_BRIDGE_IO_0_WINDOW	(PCI_BRIDGE_RESOURCES + 0)
117 #define PCI_CB_BRIDGE_IO_1_WINDOW	(PCI_BRIDGE_RESOURCES + 1)
118 #define PCI_CB_BRIDGE_MEM_0_WINDOW	(PCI_BRIDGE_RESOURCES + 2)
119 #define PCI_CB_BRIDGE_MEM_1_WINDOW	(PCI_BRIDGE_RESOURCES + 3)
120 
121 /* Total number of bridge resources for P2P and CardBus */
122 #define PCI_P2P_BRIDGE_RESOURCE_NUM	3
123 #define PCI_BRIDGE_RESOURCE_NUM		4
124 
125 	/* Resources assigned to buses behind the bridge */
126 	PCI_BRIDGE_RESOURCES,
127 	PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
128 				  PCI_BRIDGE_RESOURCE_NUM - 1,
129 
130 	/* Total resources associated with a PCI device */
131 	PCI_NUM_RESOURCES,
132 
133 	/* Preserve this for compatibility */
134 	DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
135 };
136 
137 /**
138  * enum pci_interrupt_pin - PCI INTx interrupt values
139  * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
140  * @PCI_INTERRUPT_INTA: PCI INTA pin
141  * @PCI_INTERRUPT_INTB: PCI INTB pin
142  * @PCI_INTERRUPT_INTC: PCI INTC pin
143  * @PCI_INTERRUPT_INTD: PCI INTD pin
144  *
145  * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
146  * PCI_INTERRUPT_PIN register.
147  */
148 enum pci_interrupt_pin {
149 	PCI_INTERRUPT_UNKNOWN,
150 	PCI_INTERRUPT_INTA,
151 	PCI_INTERRUPT_INTB,
152 	PCI_INTERRUPT_INTC,
153 	PCI_INTERRUPT_INTD,
154 };
155 
156 /* The number of legacy PCI INTx interrupts */
157 #define PCI_NUM_INTX	4
158 
159 /*
160  * Reading from a device that doesn't respond typically returns ~0.  A
161  * successful read from a device may also return ~0, so you need additional
162  * information to reliably identify errors.
163  */
164 #define PCI_ERROR_RESPONSE		(~0ULL)
165 #define PCI_SET_ERROR_RESPONSE(val)	(*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
166 #define PCI_POSSIBLE_ERROR(val)		((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
167 
168 /*
169  * pci_power_t values must match the bits in the Capabilities PME_Support
170  * and Control/Status PowerState fields in the Power Management capability.
171  */
172 typedef int __bitwise pci_power_t;
173 
174 #define PCI_D0		((pci_power_t __force) 0)
175 #define PCI_D1		((pci_power_t __force) 1)
176 #define PCI_D2		((pci_power_t __force) 2)
177 #define PCI_D3hot	((pci_power_t __force) 3)
178 #define PCI_D3cold	((pci_power_t __force) 4)
179 #define PCI_UNKNOWN	((pci_power_t __force) 5)
180 #define PCI_POWER_ERROR	((pci_power_t __force) -1)
181 
182 /* Remember to update this when the list above changes! */
183 extern const char *pci_power_names[];
184 
pci_power_name(pci_power_t state)185 static inline const char *pci_power_name(pci_power_t state)
186 {
187 	return pci_power_names[1 + (__force int) state];
188 }
189 
190 /**
191  * typedef pci_channel_state_t
192  *
193  * The pci_channel state describes connectivity between the CPU and
194  * the PCI device.  If some PCI bus between here and the PCI device
195  * has crashed or locked up, this info is reflected here.
196  */
197 typedef unsigned int __bitwise pci_channel_state_t;
198 
199 enum {
200 	/* I/O channel is in normal state */
201 	pci_channel_io_normal = (__force pci_channel_state_t) 1,
202 
203 	/* I/O to channel is blocked */
204 	pci_channel_io_frozen = (__force pci_channel_state_t) 2,
205 
206 	/* PCI card is dead */
207 	pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
208 };
209 
210 typedef unsigned int __bitwise pcie_reset_state_t;
211 
212 enum pcie_reset_state {
213 	/* Reset is NOT asserted (Use to deassert reset) */
214 	pcie_deassert_reset = (__force pcie_reset_state_t) 1,
215 
216 	/* Use #PERST to reset PCIe device */
217 	pcie_warm_reset = (__force pcie_reset_state_t) 2,
218 
219 	/* Use PCIe Hot Reset to reset device */
220 	pcie_hot_reset = (__force pcie_reset_state_t) 3
221 };
222 
223 typedef unsigned short __bitwise pci_dev_flags_t;
224 enum pci_dev_flags {
225 	/* INTX_DISABLE in PCI_COMMAND register disables MSI too */
226 	PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
227 	/* Device configuration is irrevocably lost if disabled into D3 */
228 	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
229 	/* Provide indication device is assigned by a Virtual Machine Manager */
230 	PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
231 	/* Flag for quirk use to store if quirk-specific ACS is enabled */
232 	PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
233 	/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
234 	PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
235 	/* Do not use bus resets for device */
236 	PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
237 	/* Do not use PM reset even if device advertises NoSoftRst- */
238 	PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
239 	/* Get VPD from function 0 VPD */
240 	PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
241 	/* A non-root bridge where translation occurs, stop alias search here */
242 	PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
243 	/* Do not use FLR even if device advertises PCI_AF_CAP */
244 	PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
245 	/* Don't use Relaxed Ordering for TLPs directed at this device */
246 	PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
247 	/* Device does honor MSI masking despite saying otherwise */
248 	PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
249 	/* Device requires write to PCI_MSIX_ENTRY_DATA before any MSIX reads */
250 	PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST = (__force pci_dev_flags_t) (1 << 13),
251 	/*
252 	 * PCIe to PCI bridge does not create RID aliases because the bridge is
253 	 * integrated with the downstream devices and doesn't use real PCI.
254 	 */
255 	PCI_DEV_FLAGS_PCI_BRIDGE_NO_ALIAS = (__force pci_dev_flags_t) (1 << 14),
256 };
257 
258 enum pci_irq_reroute_variant {
259 	INTEL_IRQ_REROUTE_VARIANT = 1,
260 	MAX_IRQ_REROUTE_VARIANTS = 3
261 };
262 
263 typedef unsigned short __bitwise pci_bus_flags_t;
264 enum pci_bus_flags {
265 	PCI_BUS_FLAGS_NO_MSI	= (__force pci_bus_flags_t) 1,
266 	PCI_BUS_FLAGS_NO_MMRBC	= (__force pci_bus_flags_t) 2,
267 	PCI_BUS_FLAGS_NO_AERSID	= (__force pci_bus_flags_t) 4,
268 	PCI_BUS_FLAGS_NO_EXTCFG	= (__force pci_bus_flags_t) 8,
269 };
270 
271 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
272 enum pcie_link_width {
273 	PCIE_LNK_WIDTH_RESRV	= 0x00,
274 	PCIE_LNK_X1		= 0x01,
275 	PCIE_LNK_X2		= 0x02,
276 	PCIE_LNK_X4		= 0x04,
277 	PCIE_LNK_X8		= 0x08,
278 	PCIE_LNK_X12		= 0x0c,
279 	PCIE_LNK_X16		= 0x10,
280 	PCIE_LNK_X32		= 0x20,
281 	PCIE_LNK_WIDTH_UNKNOWN	= 0xff,
282 };
283 
284 /* See matching string table in pci_speed_string() */
285 enum pci_bus_speed {
286 	PCI_SPEED_33MHz			= 0x00,
287 	PCI_SPEED_66MHz			= 0x01,
288 	PCI_SPEED_66MHz_PCIX		= 0x02,
289 	PCI_SPEED_100MHz_PCIX		= 0x03,
290 	PCI_SPEED_133MHz_PCIX		= 0x04,
291 	PCI_SPEED_66MHz_PCIX_ECC	= 0x05,
292 	PCI_SPEED_100MHz_PCIX_ECC	= 0x06,
293 	PCI_SPEED_133MHz_PCIX_ECC	= 0x07,
294 	PCI_SPEED_66MHz_PCIX_266	= 0x09,
295 	PCI_SPEED_100MHz_PCIX_266	= 0x0a,
296 	PCI_SPEED_133MHz_PCIX_266	= 0x0b,
297 	AGP_UNKNOWN			= 0x0c,
298 	AGP_1X				= 0x0d,
299 	AGP_2X				= 0x0e,
300 	AGP_4X				= 0x0f,
301 	AGP_8X				= 0x10,
302 	PCI_SPEED_66MHz_PCIX_533	= 0x11,
303 	PCI_SPEED_100MHz_PCIX_533	= 0x12,
304 	PCI_SPEED_133MHz_PCIX_533	= 0x13,
305 	PCIE_SPEED_2_5GT		= 0x14,
306 	PCIE_SPEED_5_0GT		= 0x15,
307 	PCIE_SPEED_8_0GT		= 0x16,
308 	PCIE_SPEED_16_0GT		= 0x17,
309 	PCIE_SPEED_32_0GT		= 0x18,
310 	PCIE_SPEED_64_0GT		= 0x19,
311 	PCI_SPEED_UNKNOWN		= 0xff,
312 };
313 
314 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
315 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
316 
317 struct pci_vpd {
318 	struct mutex	lock;
319 	unsigned int	len;
320 	u8		cap;
321 };
322 
323 struct irq_affinity;
324 struct pcie_bwctrl_data;
325 struct pcie_link_state;
326 struct pci_sriov;
327 struct pci_p2pdma;
328 struct rcec_ea;
329 
330 /* struct pci_dev - describes a PCI device
331  *
332  * @supported_speeds:	PCIe Supported Link Speeds Vector (+ reserved 0 at
333  *			LSB). 0 when the supported speeds cannot be
334  *			determined (e.g., for Root Complex Integrated
335  *			Endpoints without the relevant Capability
336  *			Registers).
337  * @is_hotplug_bridge:	Hotplug bridge of any kind (e.g. PCIe Hot-Plug Capable,
338  *			Conventional PCI Hot-Plug, ACPI slot).
339  *			Such bridges are allocated additional MMIO and bus
340  *			number resources to allow for hierarchy expansion.
341  * @is_pciehp:		PCIe Hot-Plug Capable bridge.
342  */
343 struct pci_dev {
344 	struct list_head bus_list;	/* Node in per-bus list */
345 	struct pci_bus	*bus;		/* Bus this device is on */
346 	struct pci_bus	*subordinate;	/* Bus this device bridges to */
347 
348 	void		*sysdata;	/* Hook for sys-specific extension */
349 	struct proc_dir_entry *procent;	/* Device entry in /proc/bus/pci */
350 	struct pci_slot	*slot;		/* Physical slot this device is in */
351 
352 	unsigned int	devfn;		/* Encoded device & function index */
353 	unsigned short	vendor;
354 	unsigned short	device;
355 	unsigned short	subsystem_vendor;
356 	unsigned short	subsystem_device;
357 	unsigned int	class;		/* 3 bytes: (base,sub,prog-if) */
358 	u8		revision;	/* PCI revision, low byte of class word */
359 	u8		hdr_type;	/* PCI header type (`multi' flag masked out) */
360 #ifdef CONFIG_PCIEAER
361 	u16		aer_cap;	/* AER capability offset */
362 	struct aer_info	*aer_info;	/* AER info for this device */
363 #endif
364 #ifdef CONFIG_PCIEPORTBUS
365 	struct rcec_ea	*rcec_ea;	/* RCEC cached endpoint association */
366 	struct pci_dev  *rcec;          /* Associated RCEC device */
367 #endif
368 	u32		devcap;		/* PCIe Device Capabilities */
369 	u16		rebar_cap;	/* Resizable BAR capability offset */
370 	u8		pcie_cap;	/* PCIe capability offset */
371 	u8		msi_cap;	/* MSI capability offset */
372 	u8		msix_cap;	/* MSI-X capability offset */
373 	u8		pcie_mpss:3;	/* PCIe Max Payload Size Supported */
374 	u8		rom_base_reg;	/* Config register controlling ROM */
375 	u8		pin;		/* Interrupt pin this device uses */
376 	u16		pcie_flags_reg;	/* Cached PCIe Capabilities Register */
377 	unsigned long	*dma_alias_mask;/* Mask of enabled devfn aliases */
378 
379 	struct pci_driver *driver;	/* Driver bound to this device */
380 	u64		dma_mask;	/* Mask of the bits of bus address this
381 					   device implements.  Normally this is
382 					   0xffffffff.  You only need to change
383 					   this if your device has broken DMA
384 					   or supports 64-bit transfers.  */
385 	u64		msi_addr_mask;	/* Mask of the bits of bus address for
386 					   MSI that this device implements.
387 					   Normally set based on device
388 					   capabilities. You only need to
389 					   change this if your device claims
390 					   to support 64-bit MSI but implements
391 					   fewer than 64 address bits. */
392 
393 	struct device_dma_parameters dma_parms;
394 
395 	pci_power_t	current_state;	/* Current operating state. In ACPI,
396 					   this is D0-D3, D0 being fully
397 					   functional, and D3 being off. */
398 	u8		pm_cap;		/* PM capability offset */
399 	unsigned int	pme_support:5;	/* Bitmask of states from which PME#
400 					   can be generated */
401 	unsigned int	pme_poll:1;	/* Poll device's PME status bit */
402 	unsigned int	pinned:1;	/* Whether this dev is pinned */
403 	unsigned int	config_rrs_sv:1; /* Config RRS software visibility */
404 	unsigned int	imm_ready:1;	/* Supports Immediate Readiness */
405 	unsigned int	d1_support:1;	/* Low power state D1 is supported */
406 	unsigned int	d2_support:1;	/* Low power state D2 is supported */
407 	unsigned int	no_d1d2:1;	/* D1 and D2 are forbidden */
408 	unsigned int	no_d3cold:1;	/* D3cold is forbidden */
409 	unsigned int	bridge_d3:1;	/* Allow D3 for bridge */
410 	unsigned int	d3cold_allowed:1;	/* D3cold is allowed by user */
411 	unsigned int	mmio_always_on:1;	/* Disallow turning off io/mem
412 						   decoding during BAR sizing */
413 	unsigned int	wakeup_prepared:1;
414 	unsigned int	skip_bus_pm:1;	/* Internal: Skip bus-level PM */
415 	unsigned int	ignore_hotplug:1;	/* Ignore hotplug events */
416 	unsigned int	hotplug_user_indicators:1; /* SlotCtl indicators
417 						      controlled exclusively by
418 						      user sysfs */
419 	unsigned int	clear_retrain_link:1;	/* Need to clear Retrain Link
420 						   bit manually */
421 	unsigned int	no_bw_notif:1;	/* BW notifications may cause issues */
422 	unsigned int	d3hot_delay;	/* D3hot->D0 transition time in ms */
423 	unsigned int	d3cold_delay;	/* D3cold->D0 transition time in ms */
424 
425 	u16		l1ss;		/* L1SS Capability pointer */
426 #ifdef CONFIG_PCIEASPM
427 	struct pcie_link_state	*link_state;	/* ASPM link state */
428 	unsigned int	aspm_l0s_support:1;	/* ASPM L0s support */
429 	unsigned int	aspm_l1_support:1;	/* ASPM L1 support */
430 	unsigned int	ltr_path:1;	/* Latency Tolerance Reporting
431 					   supported from root to here */
432 #endif
433 	unsigned int	pasid_no_tlp:1;		/* PASID works without TLP Prefix */
434 	unsigned int	eetlp_prefix_max:3;	/* Max # of End-End TLP Prefixes, 0=not supported */
435 
436 	pci_channel_state_t error_state;	/* Current connectivity state */
437 	struct device	dev;			/* Generic device interface */
438 
439 	int		cfg_size;		/* Size of config space */
440 
441 	/*
442 	 * Instead of touching interrupt line and base address registers
443 	 * directly, use the values stored here. They might be different!
444 	 */
445 	unsigned int	irq;
446 	struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
447 	struct resource driver_exclusive_resource;	 /* driver exclusive resource ranges */
448 
449 	unsigned int	transparent:1;		/* Subtractive decode bridge */
450 	unsigned int	io_window:1;		/* Bridge has I/O window */
451 	unsigned int	pref_window:1;		/* Bridge has pref mem window */
452 	unsigned int	pref_64_window:1;	/* Pref mem window is 64-bit */
453 	unsigned int	multifunction:1;	/* Multi-function device */
454 
455 	unsigned int	is_busmaster:1;		/* Is busmaster */
456 	unsigned int	no_msi:1;		/* May not use MSI */
457 	unsigned int	block_cfg_access:1;	/* Config space access blocked */
458 	unsigned int	broken_parity_status:1;	/* Generates false positive parity */
459 	unsigned int	irq_reroute_variant:2;	/* Needs IRQ rerouting variant */
460 	unsigned int	msi_enabled:1;
461 	unsigned int	msix_enabled:1;
462 	unsigned int	ari_enabled:1;		/* ARI forwarding */
463 	unsigned int	ats_enabled:1;		/* Address Translation Svc */
464 	unsigned int	pasid_enabled:1;	/* Process Address Space ID */
465 	unsigned int	pri_enabled:1;		/* Page Request Interface */
466 	unsigned int	tph_enabled:1;		/* TLP Processing Hints */
467 	unsigned int	fm_enabled:1;		/* Flit Mode (segment captured) */
468 	unsigned int	is_managed:1;		/* Managed via devres */
469 	unsigned int	is_msi_managed:1;	/* MSI release via devres installed */
470 	unsigned int	needs_freset:1;		/* Requires fundamental reset */
471 	unsigned int	state_saved:1;
472 	unsigned int	is_physfn:1;
473 	unsigned int	is_virtfn:1;
474 	unsigned int	is_hotplug_bridge:1;
475 	unsigned int	is_pciehp:1;
476 	unsigned int	shpc_managed:1;		/* SHPC owned by shpchp */
477 	unsigned int	is_thunderbolt:1;	/* Thunderbolt controller */
478 	unsigned int	is_cxl:1;               /* Compute Express Link (CXL) */
479 	/*
480 	 * Devices marked being untrusted are the ones that can potentially
481 	 * execute DMA attacks and similar. They are typically connected
482 	 * through external ports such as Thunderbolt but not limited to
483 	 * that. When an IOMMU is enabled they should be getting full
484 	 * mappings to make sure they cannot access arbitrary memory.
485 	 */
486 	unsigned int	untrusted:1;
487 	/*
488 	 * Info from the platform, e.g., ACPI or device tree, may mark a
489 	 * device as "external-facing".  An external-facing device is
490 	 * itself internal but devices downstream from it are external.
491 	 */
492 	unsigned int	external_facing:1;
493 	unsigned int	broken_intx_masking:1;	/* INTx masking can't be used */
494 	unsigned int	io_window_1k:1;		/* Intel bridge 1K I/O windows */
495 	unsigned int	irq_managed:1;
496 	unsigned int	non_compliant_bars:1;	/* Broken BARs; ignore them */
497 	unsigned int	is_probed:1;		/* Device probing in progress */
498 	unsigned int	link_active_reporting:1;/* Device capable of reporting link active */
499 	unsigned int	no_vf_scan:1;		/* Don't scan for VFs after IOV enablement */
500 	unsigned int	no_command_memory:1;	/* No PCI_COMMAND_MEMORY */
501 	unsigned int	rom_bar_overlap:1;	/* ROM BAR disable broken */
502 	unsigned int	rom_attr_enabled:1;	/* Display of ROM attribute enabled? */
503 	unsigned int	non_mappable_bars:1;	/* BARs can't be mapped to user-space  */
504 	pci_dev_flags_t dev_flags;
505 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
506 
507 	spinlock_t	pcie_cap_lock;		/* Protects RMW ops in capability accessors */
508 	u32		saved_config_space[16]; /* Config space saved at suspend time */
509 	struct hlist_head saved_cap_space;
510 	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
511 	struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
512 
513 #ifdef CONFIG_HOTPLUG_PCI_PCIE
514 	unsigned int	broken_cmd_compl:1;	/* No compl for some cmds */
515 #endif
516 #ifdef CONFIG_PCIE_PTM
517 	u16		ptm_cap;		/* PTM Capability */
518 	unsigned int	ptm_root:1;
519 	unsigned int	ptm_responder:1;
520 	unsigned int	ptm_requester:1;
521 	unsigned int	ptm_enabled:1;
522 	u8		ptm_granularity;
523 #endif
524 #ifdef CONFIG_PCI_MSI
525 	void __iomem	*msix_base;
526 	raw_spinlock_t	msi_lock;
527 #endif
528 	struct pci_vpd	vpd;
529 #ifdef CONFIG_PCIE_DPC
530 	u16		dpc_cap;
531 	unsigned int	dpc_rp_extensions:1;
532 	u8		dpc_rp_log_size;
533 #endif
534 	struct pcie_bwctrl_data		*link_bwctrl;
535 #ifdef CONFIG_PCI_ATS
536 	union {
537 		struct pci_sriov	*sriov;		/* PF: SR-IOV info */
538 		struct pci_dev		*physfn;	/* VF: related PF */
539 	};
540 	u16		ats_cap;	/* ATS Capability offset */
541 	u8		ats_stu;	/* ATS Smallest Translation Unit */
542 #endif
543 #ifdef CONFIG_PCI_PRI
544 	u16		pri_cap;	/* PRI Capability offset */
545 	u32		pri_reqs_alloc; /* Number of PRI requests allocated */
546 	unsigned int	pasid_required:1; /* PRG Response PASID Required */
547 #endif
548 #ifdef CONFIG_PCI_PASID
549 	u16		pasid_cap;	/* PASID Capability offset */
550 	u16		pasid_features;
551 #endif
552 #ifdef CONFIG_PCI_P2PDMA
553 	struct pci_p2pdma __rcu *p2pdma;
554 #endif
555 #ifdef CONFIG_PCI_DOE
556 	struct xarray	doe_mbs;	/* Data Object Exchange mailboxes */
557 #endif
558 #ifdef CONFIG_PCI_NPEM
559 	struct npem	*npem;		/* Native PCIe Enclosure Management */
560 #endif
561 #ifdef CONFIG_PCI_IDE
562 	u16		ide_cap;	/* Link Integrity & Data Encryption */
563 	u8		nr_ide_mem;	/* Address association resources for streams */
564 	u8		nr_link_ide;	/* Link Stream count (Selective Stream offset) */
565 	u16		nr_sel_ide;	/* Selective Stream count (register block allocator) */
566 	struct ida	ide_stream_ida;
567 	unsigned int	ide_cfg:1;	/* Config cycles over IDE */
568 	unsigned int	ide_tee_limit:1; /* Disallow T=0 traffic over IDE */
569 #endif
570 #ifdef CONFIG_PCI_TSM
571 	struct pci_tsm *tsm;		/* TSM operation state */
572 #endif
573 	u16		acs_cap;	/* ACS Capability offset */
574 	u16		acs_capabilities; /* ACS Capabilities */
575 	u8		supported_speeds; /* Supported Link Speeds Vector */
576 	phys_addr_t	rom;		/* Physical address if not from BAR */
577 	size_t		romlen;		/* Length if not from BAR */
578 	/*
579 	 * Driver name to force a match.  Do not set directly, because core
580 	 * frees it.  Use driver_set_override() to set or clear it.
581 	 */
582 	const char	*driver_override;
583 
584 	unsigned long	priv_flags;	/* Private flags for the PCI driver */
585 
586 	/* These methods index pci_reset_fn_methods[] */
587 	u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
588 
589 #ifdef CONFIG_PCIE_TPH
590 	u16		tph_cap;	/* TPH capability offset */
591 	u8		tph_mode;	/* TPH mode */
592 	u8		tph_req_type;	/* TPH requester type */
593 #endif
594 };
595 
pci_physfn(struct pci_dev * dev)596 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
597 {
598 #ifdef CONFIG_PCI_IOV
599 	if (dev->is_virtfn)
600 		dev = dev->physfn;
601 #endif
602 	return dev;
603 }
604 
605 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
606 
607 #define	to_pci_dev(n) container_of(n, struct pci_dev, dev)
608 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
609 #define for_each_pci_dev_reverse(d) \
610 	while ((d = pci_get_device_reverse(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
611 
pci_channel_offline(struct pci_dev * pdev)612 static inline int pci_channel_offline(struct pci_dev *pdev)
613 {
614 	return (pdev->error_state != pci_channel_io_normal);
615 }
616 
617 /*
618  * Currently in ACPI spec, for each PCI host bridge, PCI Segment
619  * Group number is limited to a 16-bit value, therefore (int)-1 is
620  * not a valid PCI domain number, and can be used as a sentinel
621  * value indicating ->domain_nr is not set by the driver (and
622  * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
623  * pci_bus_find_domain_nr()).
624  */
625 #define PCI_DOMAIN_NR_NOT_SET (-1)
626 
627 struct pci_host_bridge {
628 	struct device	dev;
629 	struct pci_bus	*bus;		/* Root bus */
630 	struct pci_ops	*ops;
631 	struct pci_ops	*child_ops;
632 	void		*sysdata;
633 	int		busnr;
634 	int		domain_nr;
635 	struct list_head windows;	/* resource_entry */
636 	struct list_head dma_ranges;	/* dma ranges resource list */
637 #ifdef CONFIG_PCI_IDE
638 	u16 nr_ide_streams; /* Max streams possibly active in @ide_stream_ida */
639 	struct ida ide_stream_ida;
640 	struct ida ide_stream_ids_ida; /* track unique ids per domain */
641 #endif
642 	u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
643 	int (*map_irq)(const struct pci_dev *, u8, u8);
644 	void (*release_fn)(struct pci_host_bridge *);
645 	int (*enable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
646 	void (*disable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
647 	void		*release_data;
648 	unsigned int	ignore_reset_delay:1;	/* For entire hierarchy */
649 	unsigned int	no_ext_tags:1;		/* No Extended Tags */
650 	unsigned int	no_inc_mrrs:1;		/* No Increase MRRS */
651 	unsigned int	native_aer:1;		/* OS may use PCIe AER */
652 	unsigned int	native_pcie_hotplug:1;	/* OS may use PCIe hotplug */
653 	unsigned int	native_shpc_hotplug:1;	/* OS may use SHPC hotplug */
654 	unsigned int	native_pme:1;		/* OS may use PCIe PME */
655 	unsigned int	native_ltr:1;		/* OS may use PCIe LTR */
656 	unsigned int	native_dpc:1;		/* OS may use PCIe DPC */
657 	unsigned int	native_cxl_error:1;	/* OS may use CXL RAS/Events */
658 	unsigned int	preserve_config:1;	/* Preserve FW resource setup */
659 	unsigned int	size_windows:1;		/* Enable root bus sizing */
660 	unsigned int	msi_domain:1;		/* Bridge wants MSI domain */
661 
662 	/* Resource alignment requirements */
663 	resource_size_t (*align_resource)(struct pci_dev *dev,
664 			const struct resource *res,
665 			resource_size_t start,
666 			resource_size_t size,
667 			resource_size_t align);
668 	unsigned long	private[] ____cacheline_aligned;
669 };
670 
671 #define	to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
672 
pci_host_bridge_priv(struct pci_host_bridge * bridge)673 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
674 {
675 	return (void *)bridge->private;
676 }
677 
pci_host_bridge_from_priv(void * priv)678 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
679 {
680 	return container_of(priv, struct pci_host_bridge, private);
681 }
682 
683 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
684 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
685 						   size_t priv);
686 void pci_free_host_bridge(struct pci_host_bridge *bridge);
687 struct device *pci_get_host_bridge_device(struct pci_dev *dev);
688 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
689 
690 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
691 				 void (*release_fn)(struct pci_host_bridge *),
692 				 void *release_data);
693 
694 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
695 
696 #define PCI_REGION_FLAG_MASK	0x0fU	/* These bits of resource flags tell us the PCI region flags */
697 
698 struct pci_bus {
699 	struct list_head node;		/* Node in list of buses */
700 	struct pci_bus	*parent;	/* Parent bus this bridge is on */
701 	struct list_head children;	/* List of child buses */
702 	struct list_head devices;	/* List of devices on this bus */
703 	struct pci_dev	*self;		/* Bridge device as seen by parent */
704 	struct list_head slots;		/* List of slots on this bus;
705 					   protected by pci_slot_mutex */
706 	struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
707 	struct list_head resources;	/* Address space routed to this bus */
708 	struct resource busn_res;	/* Bus numbers routed to this bus */
709 
710 	struct pci_ops	*ops;		/* Configuration access functions */
711 	void		*sysdata;	/* Hook for sys-specific extension */
712 	struct proc_dir_entry *procdir;	/* Directory entry in /proc/bus/pci */
713 
714 	unsigned char	number;		/* Bus number */
715 	unsigned char	primary;	/* Number of primary bridge */
716 	unsigned char	max_bus_speed;	/* enum pci_bus_speed */
717 	unsigned char	cur_bus_speed;	/* enum pci_bus_speed */
718 #ifdef CONFIG_PCI_DOMAINS_GENERIC
719 	int		domain_nr;
720 #endif
721 
722 	char		name[48];
723 
724 	unsigned short	bridge_ctl;	/* Manage NO_ISA/FBB/et al behaviors */
725 	pci_bus_flags_t bus_flags;	/* Inherited by child buses */
726 	struct device		*bridge;
727 	struct device		dev;
728 	struct bin_attribute	*legacy_io;	/* Legacy I/O for this bus */
729 	struct bin_attribute	*legacy_mem;	/* Legacy mem */
730 	unsigned int		is_added:1;
731 	unsigned int		unsafe_warn:1;	/* warned about RW1C config write */
732 	unsigned int		flit_mode:1;	/* Link in Flit mode */
733 };
734 
735 #define to_pci_bus(n)	container_of(n, struct pci_bus, dev)
736 
pci_dev_id(struct pci_dev * dev)737 static inline u16 pci_dev_id(struct pci_dev *dev)
738 {
739 	return PCI_DEVID(dev->bus->number, dev->devfn);
740 }
741 
742 /*
743  * Returns true if the PCI bus is root (behind host-PCI bridge),
744  * false otherwise
745  *
746  * Some code assumes that "bus->self == NULL" means that bus is a root bus.
747  * This is incorrect because "virtual" buses added for SR-IOV (via
748  * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
749  */
pci_is_root_bus(struct pci_bus * pbus)750 static inline bool pci_is_root_bus(struct pci_bus *pbus)
751 {
752 	return !(pbus->parent);
753 }
754 
755 /**
756  * pci_is_bridge - check if the PCI device is a bridge
757  * @dev: PCI device
758  *
759  * Return true if the PCI device is bridge whether it has subordinate
760  * or not.
761  */
pci_is_bridge(struct pci_dev * dev)762 static inline bool pci_is_bridge(struct pci_dev *dev)
763 {
764 	return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
765 		dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
766 }
767 
768 /**
769  * pci_is_vga - check if the PCI device is a VGA device
770  * @pdev: PCI device
771  *
772  * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define
773  * VGA Base Class and Sub-Classes:
774  *
775  *   03 00  PCI_CLASS_DISPLAY_VGA      VGA-compatible or 8514-compatible
776  *   00 01  PCI_CLASS_NOT_DEFINED_VGA  VGA-compatible (before Class Code)
777  *
778  * Return true if the PCI device is a VGA device and uses the legacy VGA
779  * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and
780  * aliases).
781  */
pci_is_vga(struct pci_dev * pdev)782 static inline bool pci_is_vga(struct pci_dev *pdev)
783 {
784 	if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
785 		return true;
786 
787 	if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA)
788 		return true;
789 
790 	return false;
791 }
792 
793 /**
794  * pci_is_display - check if the PCI device is a display controller
795  * @pdev: PCI device
796  *
797  * Determine whether the given PCI device corresponds to a display
798  * controller. Display controllers are typically used for graphical output
799  * and are identified based on their class code.
800  *
801  * Return: true if the PCI device is a display controller, false otherwise.
802  */
pci_is_display(struct pci_dev * pdev)803 static inline bool pci_is_display(struct pci_dev *pdev)
804 {
805 	return (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY;
806 }
807 
pcie_is_cxl(struct pci_dev * pci_dev)808 static inline bool pcie_is_cxl(struct pci_dev *pci_dev)
809 {
810 	return pci_dev->is_cxl;
811 }
812 
813 #define for_each_pci_bridge(dev, bus)				\
814 	list_for_each_entry(dev, &bus->devices, bus_list)	\
815 		if (!pci_is_bridge(dev)) {} else
816 
pci_upstream_bridge(struct pci_dev * dev)817 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
818 {
819 	dev = pci_physfn(dev);
820 	if (pci_is_root_bus(dev->bus))
821 		return NULL;
822 
823 	return dev->bus->self;
824 }
825 
826 #ifdef CONFIG_PCI_MSI
pci_dev_msi_enabled(struct pci_dev * pci_dev)827 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
828 {
829 	return pci_dev->msi_enabled || pci_dev->msix_enabled;
830 }
831 #else
pci_dev_msi_enabled(struct pci_dev * pci_dev)832 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
833 #endif
834 
835 /* Error values that may be returned by PCI functions */
836 #define PCIBIOS_SUCCESSFUL		0x00
837 #define PCIBIOS_FUNC_NOT_SUPPORTED	0x81
838 #define PCIBIOS_BAD_VENDOR_ID		0x83
839 #define PCIBIOS_DEVICE_NOT_FOUND	0x86
840 #define PCIBIOS_BAD_REGISTER_NUMBER	0x87
841 #define PCIBIOS_SET_FAILED		0x88
842 #define PCIBIOS_BUFFER_TOO_SMALL	0x89
843 
844 /* Translate above to generic errno for passing back through non-PCI code */
pcibios_err_to_errno(int err)845 static inline int pcibios_err_to_errno(int err)
846 {
847 	if (err <= PCIBIOS_SUCCESSFUL)
848 		return err; /* Assume already errno */
849 
850 	switch (err) {
851 	case PCIBIOS_FUNC_NOT_SUPPORTED:
852 		return -ENOENT;
853 	case PCIBIOS_BAD_VENDOR_ID:
854 		return -ENOTTY;
855 	case PCIBIOS_DEVICE_NOT_FOUND:
856 		return -ENODEV;
857 	case PCIBIOS_BAD_REGISTER_NUMBER:
858 		return -EFAULT;
859 	case PCIBIOS_SET_FAILED:
860 		return -EIO;
861 	case PCIBIOS_BUFFER_TOO_SMALL:
862 		return -ENOSPC;
863 	}
864 
865 	return -ERANGE;
866 }
867 
868 /* Low-level architecture-dependent routines */
869 
870 struct pci_ops {
871 	int (*add_bus)(struct pci_bus *bus);
872 	void (*remove_bus)(struct pci_bus *bus);
873 	void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
874 	int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
875 	int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
876 };
877 
878 /*
879  * ACPI needs to be able to access PCI config space before we've done a
880  * PCI bus scan and created pci_bus structures.
881  */
882 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
883 		 int reg, int len, u32 *val);
884 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
885 		  int reg, int len, u32 val);
886 
887 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
888 typedef u64 pci_bus_addr_t;
889 #else
890 typedef u32 pci_bus_addr_t;
891 #endif
892 
893 struct pci_bus_region {
894 	pci_bus_addr_t	start;
895 	pci_bus_addr_t	end;
896 };
897 
pci_bus_region_size(const struct pci_bus_region * region)898 static inline pci_bus_addr_t pci_bus_region_size(const struct pci_bus_region *region)
899 {
900 	return region->end - region->start + 1;
901 }
902 
903 struct pci_dynids {
904 	spinlock_t		lock;	/* Protects list, index */
905 	struct list_head	list;	/* For IDs added at runtime */
906 };
907 
908 
909 /*
910  * PCI Error Recovery System (PCI-ERS).  If a PCI device driver provides
911  * a set of callbacks in struct pci_error_handlers, that device driver
912  * will be notified of PCI bus errors, and will be driven to recovery
913  * when an error occurs.
914  */
915 
916 typedef unsigned int __bitwise pci_ers_result_t;
917 
918 enum pci_ers_result {
919 	/* No result/none/not supported in device driver */
920 	PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
921 
922 	/* Device driver can recover without slot reset */
923 	PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
924 
925 	/* Device driver wants slot to be reset */
926 	PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
927 
928 	/* Device has completely failed, is unrecoverable */
929 	PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
930 
931 	/* Device driver is fully recovered and operational */
932 	PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
933 
934 	/* No AER capabilities registered for the driver */
935 	PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
936 };
937 
938 /* PCI bus error event callbacks */
939 struct pci_error_handlers {
940 	/* PCI bus error detected on this device */
941 	pci_ers_result_t (*error_detected)(struct pci_dev *dev,
942 					   pci_channel_state_t error);
943 
944 	/* MMIO has been re-enabled, but not DMA */
945 	pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
946 
947 	/* PCI slot has been reset */
948 	pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
949 
950 	/* PCI function reset prepare or completed */
951 	void (*reset_prepare)(struct pci_dev *dev);
952 	void (*reset_done)(struct pci_dev *dev);
953 
954 	/* Device driver may resume normal operations */
955 	void (*resume)(struct pci_dev *dev);
956 
957 	/* Allow device driver to record more details of a correctable error */
958 	void (*cor_error_detected)(struct pci_dev *dev);
959 };
960 
961 
962 struct module;
963 
964 /**
965  * struct pci_driver - PCI driver structure
966  * @name:	Driver name.
967  * @id_table:	Pointer to table of device IDs the driver is
968  *		interested in.  Most drivers should export this
969  *		table using MODULE_DEVICE_TABLE(pci,...).
970  * @probe:	This probing function gets called (during execution
971  *		of pci_register_driver() for already existing
972  *		devices or later if a new device gets inserted) for
973  *		all PCI devices which match the ID table and are not
974  *		"owned" by the other drivers yet. This function gets
975  *		passed a "struct pci_dev \*" for each device whose
976  *		entry in the ID table matches the device. The probe
977  *		function returns zero when the driver chooses to
978  *		take "ownership" of the device or an error code
979  *		(negative number) otherwise.
980  *		The probe function always gets called from process
981  *		context, so it can sleep.
982  * @remove:	The remove() function gets called whenever a device
983  *		being handled by this driver is removed (either during
984  *		deregistration of the driver or when it's manually
985  *		pulled out of a hot-pluggable slot).
986  *		The remove function always gets called from process
987  *		context, so it can sleep.
988  * @suspend:	Put device into low power state.
989  * @resume:	Wake device from low power state.
990  *		(Please see Documentation/power/pci.rst for descriptions
991  *		of PCI Power Management and the related functions.)
992  * @shutdown:	Hook into reboot_notifier_list (kernel/sys.c).
993  *		Intended to stop any idling DMA operations.
994  *		Useful for enabling wake-on-lan (NIC) or changing
995  *		the power state of a device before reboot.
996  *		e.g. drivers/net/e100.c.
997  * @sriov_configure: Optional driver callback to allow configuration of
998  *		number of VFs to enable via sysfs "sriov_numvfs" file.
999  * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X
1000  *              vectors on a VF. Triggered via sysfs "sriov_vf_msix_count".
1001  *              This will change MSI-X Table Size in the VF Message Control
1002  *              registers.
1003  * @sriov_get_vf_total_msix: PF driver callback to get the total number of
1004  *              MSI-X vectors available for distribution to the VFs.
1005  * @err_handler: See Documentation/PCI/pci-error-recovery.rst
1006  * @groups:	Sysfs attribute groups.
1007  * @dev_groups: Attributes attached to the device that will be
1008  *              created once it is bound to the driver.
1009  * @driver:	Driver model structure.
1010  * @dynids:	List of dynamically added device IDs.
1011  * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
1012  *		For most device drivers, no need to care about this flag
1013  *		as long as all DMAs are handled through the kernel DMA API.
1014  *		For some special ones, for example VFIO drivers, they know
1015  *		how to manage the DMA themselves and set this flag so that
1016  *		the IOMMU layer will allow them to setup and manage their
1017  *		own I/O address space.
1018  */
1019 struct pci_driver {
1020 	const char		*name;
1021 	const struct pci_device_id *id_table;	/* Must be non-NULL for probe to be called */
1022 	int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id);	/* New device inserted */
1023 	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug capable driver) */
1024 	int  (*suspend)(struct pci_dev *dev, pm_message_t state);	/* Device suspended */
1025 	int  (*resume)(struct pci_dev *dev);	/* Device woken up */
1026 	void (*shutdown)(struct pci_dev *dev);
1027 	int  (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
1028 	int  (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */
1029 	u32  (*sriov_get_vf_total_msix)(struct pci_dev *pf);
1030 	const struct pci_error_handlers *err_handler;
1031 	const struct attribute_group **groups;
1032 	const struct attribute_group **dev_groups;
1033 	struct device_driver	driver;
1034 	struct pci_dynids	dynids;
1035 	bool driver_managed_dma;
1036 };
1037 
1038 #define to_pci_driver(__drv)	\
1039 	( __drv ? container_of_const(__drv, struct pci_driver, driver) : NULL )
1040 
1041 /**
1042  * PCI_DEVICE - macro used to describe a specific PCI device
1043  * @vend: the 16 bit PCI Vendor ID
1044  * @dev: the 16 bit PCI Device ID
1045  *
1046  * This macro is used to create a struct pci_device_id that matches a
1047  * specific device.  The subvendor and subdevice fields will be set to
1048  * PCI_ANY_ID.
1049  */
1050 #define PCI_DEVICE(vend,dev) \
1051 	.vendor = (vend), .device = (dev), \
1052 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1053 
1054 /**
1055  * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
1056  *                              override_only flags.
1057  * @vend: the 16 bit PCI Vendor ID
1058  * @dev: the 16 bit PCI Device ID
1059  * @driver_override: the 32 bit PCI Device override_only
1060  *
1061  * This macro is used to create a struct pci_device_id that matches only a
1062  * driver_override device. The subvendor and subdevice fields will be set to
1063  * PCI_ANY_ID.
1064  */
1065 #define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
1066 	.vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
1067 	.subdevice = PCI_ANY_ID, .override_only = (driver_override)
1068 
1069 /**
1070  * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
1071  *                                   "driver_override" PCI device.
1072  * @vend: the 16 bit PCI Vendor ID
1073  * @dev: the 16 bit PCI Device ID
1074  *
1075  * This macro is used to create a struct pci_device_id that matches a
1076  * specific device. The subvendor and subdevice fields will be set to
1077  * PCI_ANY_ID and the driver_override will be set to
1078  * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
1079  */
1080 #define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
1081 	PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
1082 
1083 /**
1084  * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
1085  * @vend: the 16 bit PCI Vendor ID
1086  * @dev: the 16 bit PCI Device ID
1087  * @subvend: the 16 bit PCI Subvendor ID
1088  * @subdev: the 16 bit PCI Subdevice ID
1089  *
1090  * This macro is used to create a struct pci_device_id that matches a
1091  * specific device with subsystem information.
1092  */
1093 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
1094 	.vendor = (vend), .device = (dev), \
1095 	.subvendor = (subvend), .subdevice = (subdev)
1096 
1097 /**
1098  * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
1099  * @dev_class: the class, subclass, prog-if triple for this device
1100  * @dev_class_mask: the class mask for this device
1101  *
1102  * This macro is used to create a struct pci_device_id that matches a
1103  * specific PCI class.  The vendor, device, subvendor, and subdevice
1104  * fields will be set to PCI_ANY_ID.
1105  */
1106 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
1107 	.class = (dev_class), .class_mask = (dev_class_mask), \
1108 	.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
1109 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1110 
1111 /**
1112  * PCI_VDEVICE - macro used to describe a specific PCI device in short form
1113  * @vend: the vendor name
1114  * @dev: the 16 bit PCI Device ID
1115  *
1116  * This macro is used to create a struct pci_device_id that matches a
1117  * specific PCI device.  The subvendor, and subdevice fields will be set
1118  * to PCI_ANY_ID. The macro allows the next field to follow as the device
1119  * private data.
1120  */
1121 #define PCI_VDEVICE(vend, dev) \
1122 	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1123 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
1124 
1125 /**
1126  * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form
1127  * @vend: the vendor name
1128  * @dev: the 16 bit PCI Device ID
1129  * @subvend: the 16 bit PCI Subvendor ID
1130  * @subdev: the 16 bit PCI Subdevice ID
1131  *
1132  * Generate the pci_device_id struct layout for the specific PCI
1133  * device/subdevice. Private data may follow the output.
1134  */
1135 #define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \
1136 	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1137 	.subvendor = (subvend), .subdevice = (subdev), 0, 0
1138 
1139 /**
1140  * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
1141  * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
1142  * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
1143  * @data: the driver data to be filled
1144  *
1145  * This macro is used to create a struct pci_device_id that matches a
1146  * specific PCI device.  The subvendor, and subdevice fields will be set
1147  * to PCI_ANY_ID.
1148  */
1149 #define PCI_DEVICE_DATA(vend, dev, data) \
1150 	.vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
1151 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
1152 	.driver_data = (kernel_ulong_t)(data)
1153 
1154 enum {
1155 	PCI_REASSIGN_ALL_RSRC	= 0x00000001,	/* Ignore firmware setup */
1156 	PCI_REASSIGN_ALL_BUS	= 0x00000002,	/* Reassign all bus numbers */
1157 	PCI_PROBE_ONLY		= 0x00000004,	/* Use existing setup */
1158 	PCI_CAN_SKIP_ISA_ALIGN	= 0x00000008,	/* Don't do ISA alignment */
1159 	PCI_ENABLE_PROC_DOMAINS	= 0x00000010,	/* Enable domains in /proc */
1160 	PCI_COMPAT_DOMAIN_0	= 0x00000020,	/* ... except domain 0 */
1161 	PCI_SCAN_ALL_PCIE_DEVS	= 0x00000040,	/* Scan all, not just dev 0 */
1162 };
1163 
1164 #define PCI_IRQ_INTX		(1 << 0) /* Allow INTx interrupts */
1165 #define PCI_IRQ_MSI		(1 << 1) /* Allow MSI interrupts */
1166 #define PCI_IRQ_MSIX		(1 << 2) /* Allow MSI-X interrupts */
1167 #define PCI_IRQ_AFFINITY	(1 << 3) /* Auto-assign affinity */
1168 
1169 /* These external functions are only available when PCI support is enabled */
1170 #ifdef CONFIG_PCI
1171 
1172 extern unsigned int pci_flags;
1173 
pci_set_flags(int flags)1174 static inline void pci_set_flags(int flags) { pci_flags = flags; }
pci_add_flags(int flags)1175 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
pci_clear_flags(int flags)1176 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
pci_has_flag(int flag)1177 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
1178 
1179 void pcie_bus_configure_settings(struct pci_bus *bus);
1180 
1181 enum pcie_bus_config_types {
1182 	PCIE_BUS_TUNE_OFF,	/* Don't touch MPS at all */
1183 	PCIE_BUS_DEFAULT,	/* Ensure MPS matches upstream bridge */
1184 	PCIE_BUS_SAFE,		/* Use largest MPS boot-time devices support */
1185 	PCIE_BUS_PERFORMANCE,	/* Use MPS and MRRS for best performance */
1186 	PCIE_BUS_PEER2PEER,	/* Set MPS = 128 for all devices */
1187 };
1188 
1189 extern enum pcie_bus_config_types pcie_bus_config;
1190 
1191 extern const struct bus_type pci_bus_type;
1192 
1193 /* Do NOT directly access these two variables, unless you are arch-specific PCI
1194  * code, or PCI core code. */
1195 extern struct list_head pci_root_buses;	/* List of all known PCI buses */
1196 /* Some device drivers need know if PCI is initiated */
1197 int no_pci_devices(void);
1198 
1199 void pcibios_resource_survey_bus(struct pci_bus *bus);
1200 void pcibios_bus_add_device(struct pci_dev *pdev);
1201 void pcibios_add_bus(struct pci_bus *bus);
1202 void pcibios_remove_bus(struct pci_bus *bus);
1203 void pcibios_fixup_bus(struct pci_bus *);
1204 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
1205 /* Architecture-specific versions may override this (weak) */
1206 char *pcibios_setup(char *str);
1207 
1208 /* Used only when drivers/pci/setup.c is used */
1209 resource_size_t pcibios_align_resource(void *, const struct resource *,
1210 				resource_size_t,
1211 				resource_size_t);
1212 
1213 /* Generic PCI functions used internally */
1214 
1215 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
1216 			     struct resource *res);
1217 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
1218 			     struct pci_bus_region *region);
1219 void pcibios_scan_specific_bus(int busn);
1220 struct pci_bus *pci_find_bus(int domain, int busnr);
1221 void pci_bus_add_devices(const struct pci_bus *bus);
1222 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
1223 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1224 				    struct pci_ops *ops, void *sysdata,
1225 				    struct list_head *resources);
1226 int pci_host_probe(struct pci_host_bridge *bridge);
1227 void pci_probe_flush_workqueue(void);
1228 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
1229 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
1230 void pci_bus_release_busn_res(struct pci_bus *b);
1231 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1232 				  struct pci_ops *ops, void *sysdata,
1233 				  struct list_head *resources);
1234 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
1235 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1236 				int busnr);
1237 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1238 				 const char *name,
1239 				 struct hotplug_slot *hotplug);
1240 void pci_destroy_slot(struct pci_slot *slot);
1241 #ifdef CONFIG_SYSFS
1242 void pci_dev_assign_slot(struct pci_dev *dev);
1243 #else
pci_dev_assign_slot(struct pci_dev * dev)1244 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
1245 #endif
1246 int pci_scan_slot(struct pci_bus *bus, int devfn);
1247 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
1248 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
1249 unsigned int pci_scan_child_bus(struct pci_bus *bus);
1250 void pci_bus_add_device(struct pci_dev *dev);
1251 void pci_read_bridge_bases(struct pci_bus *child);
1252 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
1253 					  struct resource *res);
1254 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
1255 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
1256 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
1257 struct pci_dev *pci_dev_get(struct pci_dev *dev);
1258 void pci_dev_put(struct pci_dev *dev);
1259 DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
1260 void pci_remove_bus(struct pci_bus *b);
1261 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
1262 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
1263 void pci_stop_root_bus(struct pci_bus *bus);
1264 void pci_remove_root_bus(struct pci_bus *bus);
1265 #ifdef CONFIG_CARDBUS
1266 void pci_setup_cardbus_bridge(struct pci_bus *bus);
1267 #else
pci_setup_cardbus_bridge(struct pci_bus * bus)1268 static inline void pci_setup_cardbus_bridge(struct pci_bus *bus) { }
1269 #endif
1270 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
1271 void pci_sort_breadthfirst(void);
1272 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
1273 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
1274 
1275 /* Generic PCI functions exported to card drivers */
1276 
1277 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1278 u8 pci_find_capability(struct pci_dev *dev, int cap);
1279 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
1280 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
1281 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap);
1282 u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
1283 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
1284 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
1285 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
1286 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
1287 
1288 u64 pci_get_dsn(struct pci_dev *dev);
1289 
1290 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
1291 			       struct pci_dev *from);
1292 struct pci_dev *pci_get_device_reverse(unsigned int vendor, unsigned int device,
1293 				       struct pci_dev *from);
1294 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
1295 			       unsigned int ss_vendor, unsigned int ss_device,
1296 			       struct pci_dev *from);
1297 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
1298 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
1299 					    unsigned int devfn);
1300 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
1301 struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
1302 
1303 int pci_dev_present(const struct pci_device_id *ids);
1304 
1305 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
1306 			     int where, u8 *val);
1307 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
1308 			     int where, u16 *val);
1309 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
1310 			      int where, u32 *val);
1311 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
1312 			      int where, u8 val);
1313 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
1314 			      int where, u16 val);
1315 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
1316 			       int where, u32 val);
1317 
1318 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
1319 			    int where, int size, u32 *val);
1320 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
1321 			    int where, int size, u32 val);
1322 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
1323 			      int where, int size, u32 *val);
1324 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
1325 			       int where, int size, u32 val);
1326 
1327 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
1328 
1329 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
1330 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
1331 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
1332 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1333 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
1334 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
1335 void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
1336 				    u32 clear, u32 set);
1337 
1338 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
1339 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
1340 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1341 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
1342 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
1343 						u16 clear, u16 set);
1344 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
1345 					      u16 clear, u16 set);
1346 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
1347 					u32 clear, u32 set);
1348 
1349 /**
1350  * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
1351  * @dev:	PCI device structure of the PCI Express device
1352  * @pos:	PCI Express Capability Register
1353  * @clear:	Clear bitmask
1354  * @set:	Set bitmask
1355  *
1356  * Perform a Read-Modify-Write (RMW) operation using @clear and @set
1357  * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
1358  * Capability Registers are accessed concurrently in RMW fashion, hence
1359  * require locking which is handled transparently to the caller.
1360  */
pcie_capability_clear_and_set_word(struct pci_dev * dev,int pos,u16 clear,u16 set)1361 static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
1362 						     int pos,
1363 						     u16 clear, u16 set)
1364 {
1365 	switch (pos) {
1366 	case PCI_EXP_LNKCTL:
1367 	case PCI_EXP_LNKCTL2:
1368 	case PCI_EXP_RTCTL:
1369 		return pcie_capability_clear_and_set_word_locked(dev, pos,
1370 								 clear, set);
1371 	default:
1372 		return pcie_capability_clear_and_set_word_unlocked(dev, pos,
1373 								   clear, set);
1374 	}
1375 }
1376 
pcie_capability_set_word(struct pci_dev * dev,int pos,u16 set)1377 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
1378 					   u16 set)
1379 {
1380 	return pcie_capability_clear_and_set_word(dev, pos, 0, set);
1381 }
1382 
pcie_capability_set_dword(struct pci_dev * dev,int pos,u32 set)1383 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
1384 					    u32 set)
1385 {
1386 	return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
1387 }
1388 
pcie_capability_clear_word(struct pci_dev * dev,int pos,u16 clear)1389 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
1390 					     u16 clear)
1391 {
1392 	return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
1393 }
1394 
pcie_capability_clear_dword(struct pci_dev * dev,int pos,u32 clear)1395 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1396 					      u32 clear)
1397 {
1398 	return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1399 }
1400 
1401 /* User-space driven config access */
1402 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1403 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1404 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1405 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1406 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1407 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1408 
1409 int __must_check pci_enable_device(struct pci_dev *dev);
1410 int __must_check pci_enable_device_mem(struct pci_dev *dev);
1411 int __must_check pci_reenable_device(struct pci_dev *);
1412 int __must_check pcim_enable_device(struct pci_dev *pdev);
1413 void pcim_pin_device(struct pci_dev *pdev);
1414 
pci_intx_mask_supported(struct pci_dev * pdev)1415 static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1416 {
1417 	/*
1418 	 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1419 	 * writable and no quirk has marked the feature broken.
1420 	 */
1421 	return !pdev->broken_intx_masking;
1422 }
1423 
pci_is_enabled(struct pci_dev * pdev)1424 static inline int pci_is_enabled(struct pci_dev *pdev)
1425 {
1426 	return (atomic_read(&pdev->enable_cnt) > 0);
1427 }
1428 
pci_is_managed(struct pci_dev * pdev)1429 static inline int pci_is_managed(struct pci_dev *pdev)
1430 {
1431 	return pdev->is_managed;
1432 }
1433 
1434 void pci_disable_device(struct pci_dev *dev);
1435 
1436 extern unsigned int pcibios_max_latency;
1437 void pci_set_master(struct pci_dev *dev);
1438 void pci_clear_master(struct pci_dev *dev);
1439 
1440 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1441 int pci_set_cacheline_size(struct pci_dev *dev);
1442 int __must_check pci_set_mwi(struct pci_dev *dev);
1443 int __must_check pcim_set_mwi(struct pci_dev *dev);
1444 int pci_try_set_mwi(struct pci_dev *dev);
1445 void pci_clear_mwi(struct pci_dev *dev);
1446 void pci_disable_parity(struct pci_dev *dev);
1447 void pci_intx(struct pci_dev *dev, int enable);
1448 bool pci_check_and_mask_intx(struct pci_dev *dev);
1449 bool pci_check_and_unmask_intx(struct pci_dev *dev);
1450 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1451 int pci_wait_for_pending_transaction(struct pci_dev *dev);
1452 int pcix_get_max_mmrbc(struct pci_dev *dev);
1453 int pcix_get_mmrbc(struct pci_dev *dev);
1454 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1455 int pcie_get_readrq(struct pci_dev *dev);
1456 int pcie_set_readrq(struct pci_dev *dev, int rq);
1457 int pcie_get_mps(struct pci_dev *dev);
1458 int pcie_set_mps(struct pci_dev *dev, int mps);
1459 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1460 			     enum pci_bus_speed *speed,
1461 			     enum pcie_link_width *width);
1462 int pcie_link_speed_mbps(struct pci_dev *pdev);
1463 void pcie_print_link_status(struct pci_dev *dev);
1464 int pcie_reset_flr(struct pci_dev *dev, bool probe);
1465 int pcie_flr(struct pci_dev *dev);
1466 int __pci_reset_function_locked(struct pci_dev *dev);
1467 int pci_reset_function(struct pci_dev *dev);
1468 int pci_reset_function_locked(struct pci_dev *dev);
1469 int pci_try_reset_function(struct pci_dev *dev);
1470 int pci_probe_reset_slot(struct pci_slot *slot);
1471 int pci_probe_reset_bus(struct pci_bus *bus);
1472 int pci_reset_bus(struct pci_dev *dev);
1473 void pci_reset_secondary_bus(struct pci_dev *dev);
1474 void pcibios_reset_secondary_bus(struct pci_dev *dev);
1475 void pci_update_resource(struct pci_dev *dev, int resno);
1476 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1477 int pci_release_resource(struct pci_dev *dev, int resno);
1478 
1479 /* Resizable BAR related routines */
1480 int pci_rebar_bytes_to_size(u64 bytes);
1481 resource_size_t pci_rebar_size_to_bytes(int size);
1482 u64 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
1483 bool pci_rebar_size_supported(struct pci_dev *pdev, int bar, int size);
1484 int pci_rebar_get_max_size(struct pci_dev *pdev, int bar);
1485 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size,
1486 				     int exclude_bars);
1487 
1488 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1489 bool pci_device_is_present(struct pci_dev *pdev);
1490 void pci_ignore_hotplug(struct pci_dev *dev);
1491 struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
1492 int pci_status_get_and_clear_errors(struct pci_dev *pdev);
1493 
1494 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
1495 		irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
1496 		const char *fmt, ...);
1497 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
1498 
1499 /* ROM control related routines */
1500 int pci_enable_rom(struct pci_dev *pdev);
1501 void pci_disable_rom(struct pci_dev *pdev);
1502 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1503 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1504 
1505 /* Power management related routines */
1506 int pci_save_state(struct pci_dev *dev);
1507 void pci_restore_state(struct pci_dev *dev);
1508 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1509 int pci_load_saved_state(struct pci_dev *dev,
1510 			 struct pci_saved_state *state);
1511 int pci_load_and_free_saved_state(struct pci_dev *dev,
1512 				  struct pci_saved_state **state);
1513 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
1514 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1515 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
1516 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1517 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1518 void pci_pme_active(struct pci_dev *dev, bool enable);
1519 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1520 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1521 int pci_prepare_to_sleep(struct pci_dev *dev);
1522 int pci_back_from_sleep(struct pci_dev *dev);
1523 bool pci_dev_run_wake(struct pci_dev *dev);
1524 void pci_d3cold_enable(struct pci_dev *dev);
1525 void pci_d3cold_disable(struct pci_dev *dev);
1526 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1527 void pci_resume_bus(struct pci_bus *bus);
1528 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1529 
1530 /* For use by arch with custom probe code */
1531 void set_pcie_port_type(struct pci_dev *pdev);
1532 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1533 
1534 /* Functions for PCI Hotplug drivers to use */
1535 unsigned int pci_rescan_bus(struct pci_bus *bus);
1536 void pci_lock_rescan_remove(void);
1537 void pci_unlock_rescan_remove(void);
1538 
1539 /* Vital Product Data routines */
1540 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1541 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1542 ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1543 ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1544 
1545 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1546 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1547 void pci_bus_assign_resources(const struct pci_bus *bus);
1548 void pci_bus_claim_resources(struct pci_bus *bus);
1549 void pci_bus_size_bridges(struct pci_bus *bus);
1550 int pci_claim_resource(struct pci_dev *, int);
1551 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1552 void pci_assign_unassigned_resources(void);
1553 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1554 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1555 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1556 int pci_enable_resources(struct pci_dev *, int mask);
1557 void pci_assign_irq(struct pci_dev *dev);
1558 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1559 #define HAVE_PCI_REQ_REGIONS	2
1560 int __must_check pci_request_regions(struct pci_dev *, const char *);
1561 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1562 void pci_release_regions(struct pci_dev *);
1563 int __must_check pci_request_region(struct pci_dev *, int, const char *);
1564 void pci_release_region(struct pci_dev *, int);
1565 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1566 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1567 void pci_release_selected_regions(struct pci_dev *, int);
1568 
1569 static inline __must_check struct resource *
pci_request_config_region_exclusive(struct pci_dev * pdev,unsigned int offset,unsigned int len,const char * name)1570 pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
1571 				    unsigned int len, const char *name)
1572 {
1573 	return __request_region(&pdev->driver_exclusive_resource, offset, len,
1574 				name, IORESOURCE_EXCLUSIVE);
1575 }
1576 
pci_release_config_region(struct pci_dev * pdev,unsigned int offset,unsigned int len)1577 static inline void pci_release_config_region(struct pci_dev *pdev,
1578 					     unsigned int offset,
1579 					     unsigned int len)
1580 {
1581 	__release_region(&pdev->driver_exclusive_resource, offset, len);
1582 }
1583 
1584 /* drivers/pci/bus.c */
1585 void pci_add_resource(struct list_head *resources, struct resource *res);
1586 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1587 			     resource_size_t offset);
1588 void pci_free_resource_list(struct list_head *resources);
1589 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res);
1590 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1591 void pci_bus_remove_resources(struct pci_bus *bus);
1592 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
1593 int devm_request_pci_bus_resources(struct device *dev,
1594 				   struct list_head *resources);
1595 
1596 /* Temporary until new and working PCI SBR API in place */
1597 int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1598 
1599 #define __pci_bus_for_each_res0(bus, res, ...)				\
1600 	for (unsigned int __b = 0;					\
1601 	     (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1602 	     __b++)
1603 
1604 #define __pci_bus_for_each_res1(bus, res, __b)				\
1605 	for (__b = 0;							\
1606 	     (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1607 	     __b++)
1608 
1609 /**
1610  * pci_bus_for_each_resource - iterate over PCI bus resources
1611  * @bus: the PCI bus
1612  * @res: pointer to the current resource
1613  * @...: optional index of the current resource
1614  *
1615  * Iterate over PCI bus resources. The first part is to go over PCI bus
1616  * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries.
1617  * After that continue with the separate list of the additional resources,
1618  * if not empty. That's why the Logical OR is being used.
1619  *
1620  * Possible usage:
1621  *
1622  *	struct pci_bus *bus = ...;
1623  *	struct resource *res;
1624  *	unsigned int i;
1625  *
1626  * 	// With optional index
1627  * 	pci_bus_for_each_resource(bus, res, i)
1628  * 		pr_info("PCI bus resource[%u]: %pR\n", i, res);
1629  *
1630  * 	// Without index
1631  * 	pci_bus_for_each_resource(bus, res)
1632  * 		_do_something_(res);
1633  */
1634 #define pci_bus_for_each_resource(bus, res, ...)			\
1635 	CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__))	\
1636 		    (bus, res, __VA_ARGS__)
1637 
1638 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1639 			struct resource *res, resource_size_t size,
1640 			resource_size_t align, resource_size_t min,
1641 			unsigned long type_mask,
1642 			resource_alignf alignf,
1643 			void *alignf_data);
1644 
1645 
1646 int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
1647 			resource_size_t size);
1648 unsigned long pci_address_to_pio(phys_addr_t addr);
1649 phys_addr_t pci_pio_to_address(unsigned long pio);
1650 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1651 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
1652 			   phys_addr_t phys_addr);
1653 void pci_unmap_iospace(struct resource *res);
1654 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
1655 				      resource_size_t offset,
1656 				      resource_size_t size);
1657 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
1658 					  struct resource *res);
1659 
pci_bus_address(struct pci_dev * pdev,int bar)1660 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1661 {
1662 	struct pci_bus_region region;
1663 
1664 	pcibios_resource_to_bus(pdev->bus, &region, &pdev->resource[bar]);
1665 	return region.start;
1666 }
1667 
1668 /* Proper probing supporting hot-pluggable devices */
1669 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1670 				       const char *mod_name);
1671 
1672 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1673 #define pci_register_driver(driver)		\
1674 	__pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1675 
1676 void pci_unregister_driver(struct pci_driver *dev);
1677 
1678 /**
1679  * module_pci_driver() - Helper macro for registering a PCI driver
1680  * @__pci_driver: pci_driver struct
1681  *
1682  * Helper macro for PCI drivers which do not do anything special in module
1683  * init/exit. This eliminates a lot of boilerplate. Each module may only
1684  * use this macro once, and calling it replaces module_init() and module_exit()
1685  */
1686 #define module_pci_driver(__pci_driver) \
1687 	module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1688 
1689 /**
1690  * builtin_pci_driver() - Helper macro for registering a PCI driver
1691  * @__pci_driver: pci_driver struct
1692  *
1693  * Helper macro for PCI drivers which do not do anything special in their
1694  * init code. This eliminates a lot of boilerplate. Each driver may only
1695  * use this macro once, and calling it replaces device_initcall(...)
1696  */
1697 #define builtin_pci_driver(__pci_driver) \
1698 	builtin_driver(__pci_driver, pci_register_driver)
1699 
1700 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1701 int pci_add_dynid(struct pci_driver *drv,
1702 		  unsigned int vendor, unsigned int device,
1703 		  unsigned int subvendor, unsigned int subdevice,
1704 		  unsigned int class, unsigned int class_mask,
1705 		  unsigned long driver_data);
1706 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1707 					 struct pci_dev *dev);
1708 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1709 		    int pass);
1710 
1711 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1712 		  void *userdata);
1713 void pci_walk_bus_reverse(struct pci_bus *top,
1714 			  int (*cb)(struct pci_dev *, void *), void *userdata);
1715 int pci_cfg_space_size(struct pci_dev *dev);
1716 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1717 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1718 					 unsigned long type);
1719 
1720 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1721 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1722 
1723 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1724 		      unsigned int command_bits, u32 flags);
1725 
1726 /*
1727  * Virtual interrupts allow for more interrupts to be allocated
1728  * than the device has interrupts for. These are not programmed
1729  * into the device's MSI-X table and must be handled by some
1730  * other driver means.
1731  */
1732 #define PCI_IRQ_VIRTUAL		(1 << 4)
1733 
1734 #define PCI_IRQ_ALL_TYPES	(PCI_IRQ_INTX | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1735 
1736 #include <linux/dmapool.h>
1737 
1738 struct msix_entry {
1739 	u32	vector;	/* Kernel uses to write allocated vector */
1740 	u16	entry;	/* Driver uses to specify entry, OS writes */
1741 };
1742 
1743 #ifdef CONFIG_PCI_MSI
1744 int pci_msi_vec_count(struct pci_dev *dev);
1745 void pci_disable_msi(struct pci_dev *dev);
1746 int pci_msix_vec_count(struct pci_dev *dev);
1747 void pci_disable_msix(struct pci_dev *dev);
1748 void pci_restore_msi_state(struct pci_dev *dev);
1749 bool pci_msi_enabled(void);
1750 int pci_enable_msi(struct pci_dev *dev);
1751 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1752 			  int minvec, int maxvec);
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1753 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1754 					struct msix_entry *entries, int nvec)
1755 {
1756 	int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1757 	if (rc < 0)
1758 		return rc;
1759 	return 0;
1760 }
1761 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1762 			  unsigned int max_vecs, unsigned int flags);
1763 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1764 				   unsigned int max_vecs, unsigned int flags,
1765 				   struct irq_affinity *affd);
1766 
1767 bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
1768 struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1769 				     const struct irq_affinity_desc *affdesc);
1770 void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
1771 
1772 void pci_free_irq_vectors(struct pci_dev *dev);
1773 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1774 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1775 
1776 #else
pci_msi_vec_count(struct pci_dev * dev)1777 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msi(struct pci_dev * dev)1778 static inline void pci_disable_msi(struct pci_dev *dev) { }
pci_msix_vec_count(struct pci_dev * dev)1779 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msix(struct pci_dev * dev)1780 static inline void pci_disable_msix(struct pci_dev *dev) { }
pci_restore_msi_state(struct pci_dev * dev)1781 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
pci_msi_enabled(void)1782 static inline bool pci_msi_enabled(void) { return false; }
pci_enable_msi(struct pci_dev * dev)1783 static inline int pci_enable_msi(struct pci_dev *dev)
1784 { return -ENOSYS; }
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)1785 static inline int pci_enable_msix_range(struct pci_dev *dev,
1786 			struct msix_entry *entries, int minvec, int maxvec)
1787 { return -ENOSYS; }
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1788 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1789 			struct msix_entry *entries, int nvec)
1790 { return -ENOSYS; }
1791 
1792 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)1793 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1794 			       unsigned int max_vecs, unsigned int flags,
1795 			       struct irq_affinity *aff_desc)
1796 {
1797 	if ((flags & PCI_IRQ_INTX) && min_vecs == 1 && dev->irq)
1798 		return 1;
1799 	return -ENOSPC;
1800 }
1801 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)1802 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1803 		      unsigned int max_vecs, unsigned int flags)
1804 {
1805 	return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
1806 					      flags, NULL);
1807 }
1808 
pci_msix_can_alloc_dyn(struct pci_dev * dev)1809 static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
1810 { return false; }
pci_msix_alloc_irq_at(struct pci_dev * dev,unsigned int index,const struct irq_affinity_desc * affdesc)1811 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1812 						   const struct irq_affinity_desc *affdesc)
1813 {
1814 	struct msi_map map = { .index = -ENOSYS, };
1815 
1816 	return map;
1817 }
1818 
pci_msix_free_irq(struct pci_dev * pdev,struct msi_map map)1819 static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
1820 {
1821 }
1822 
pci_free_irq_vectors(struct pci_dev * dev)1823 static inline void pci_free_irq_vectors(struct pci_dev *dev)
1824 {
1825 }
1826 
pci_irq_vector(struct pci_dev * dev,unsigned int nr)1827 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1828 {
1829 	if (WARN_ON_ONCE(nr > 0))
1830 		return -EINVAL;
1831 	return dev->irq;
1832 }
pci_irq_get_affinity(struct pci_dev * pdev,int vec)1833 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1834 		int vec)
1835 {
1836 	return cpu_possible_mask;
1837 }
1838 #endif
1839 
1840 /**
1841  * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1842  * @d: the INTx IRQ domain
1843  * @node: the DT node for the device whose interrupt we're translating
1844  * @intspec: the interrupt specifier data from the DT
1845  * @intsize: the number of entries in @intspec
1846  * @out_hwirq: pointer at which to write the hwirq number
1847  * @out_type: pointer at which to write the interrupt type
1848  *
1849  * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
1850  * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
1851  * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
1852  * INTx value to obtain the hwirq number.
1853  *
1854  * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
1855  */
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)1856 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1857 				      struct device_node *node,
1858 				      const u32 *intspec,
1859 				      unsigned int intsize,
1860 				      unsigned long *out_hwirq,
1861 				      unsigned int *out_type)
1862 {
1863 	const u32 intx = intspec[0];
1864 
1865 	if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
1866 		return -EINVAL;
1867 
1868 	*out_hwirq = intx - PCI_INTERRUPT_INTA;
1869 	return 0;
1870 }
1871 
1872 #ifdef CONFIG_PCIEPORTBUS
1873 extern bool pcie_ports_disabled;
1874 extern bool pcie_ports_native;
1875 
1876 int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req,
1877 			  bool use_lt);
1878 #else
1879 #define pcie_ports_disabled	true
1880 #define pcie_ports_native	false
1881 
pcie_set_target_speed(struct pci_dev * port,enum pci_bus_speed speed_req,bool use_lt)1882 static inline int pcie_set_target_speed(struct pci_dev *port,
1883 					enum pci_bus_speed speed_req,
1884 					bool use_lt)
1885 {
1886 	return -EOPNOTSUPP;
1887 }
1888 #endif
1889 
1890 #define PCIE_LINK_STATE_L0S		(BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */
1891 #define PCIE_LINK_STATE_L1		BIT(2)	/* L1 state */
1892 #define PCIE_LINK_STATE_L1_1		BIT(3)	/* ASPM L1.1 state */
1893 #define PCIE_LINK_STATE_L1_2		BIT(4)	/* ASPM L1.2 state */
1894 #define PCIE_LINK_STATE_L1_1_PCIPM	BIT(5)	/* PCI-PM L1.1 state */
1895 #define PCIE_LINK_STATE_L1_2_PCIPM	BIT(6)	/* PCI-PM L1.2 state */
1896 #define PCIE_LINK_STATE_ASPM_ALL	(PCIE_LINK_STATE_L0S		|\
1897 					 PCIE_LINK_STATE_L1		|\
1898 					 PCIE_LINK_STATE_L1_1		|\
1899 					 PCIE_LINK_STATE_L1_2		|\
1900 					 PCIE_LINK_STATE_L1_1_PCIPM	|\
1901 					 PCIE_LINK_STATE_L1_2_PCIPM)
1902 #define PCIE_LINK_STATE_CLKPM		BIT(7)
1903 #define PCIE_LINK_STATE_ALL		(PCIE_LINK_STATE_ASPM_ALL	|\
1904 					 PCIE_LINK_STATE_CLKPM)
1905 
1906 #ifdef CONFIG_PCIEASPM
1907 int pci_disable_link_state(struct pci_dev *pdev, int state);
1908 int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1909 int pci_enable_link_state(struct pci_dev *pdev, int state);
1910 int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
1911 void pcie_no_aspm(void);
1912 bool pcie_aspm_support_enabled(void);
1913 bool pcie_aspm_enabled(struct pci_dev *pdev);
1914 #else
pci_disable_link_state(struct pci_dev * pdev,int state)1915 static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1916 { return 0; }
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1917 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1918 { return 0; }
pci_enable_link_state(struct pci_dev * pdev,int state)1919 static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
1920 { return 0; }
pci_enable_link_state_locked(struct pci_dev * pdev,int state)1921 static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1922 { return 0; }
pcie_no_aspm(void)1923 static inline void pcie_no_aspm(void) { }
pcie_aspm_support_enabled(void)1924 static inline bool pcie_aspm_support_enabled(void) { return false; }
pcie_aspm_enabled(struct pci_dev * pdev)1925 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1926 #endif
1927 
1928 #ifdef CONFIG_HOTPLUG_PCI
1929 void pci_hp_ignore_link_change(struct pci_dev *pdev);
1930 void pci_hp_unignore_link_change(struct pci_dev *pdev);
1931 #else
pci_hp_ignore_link_change(struct pci_dev * pdev)1932 static inline void pci_hp_ignore_link_change(struct pci_dev *pdev) { }
pci_hp_unignore_link_change(struct pci_dev * pdev)1933 static inline void pci_hp_unignore_link_change(struct pci_dev *pdev) { }
1934 #endif
1935 
1936 #ifdef CONFIG_PCIEAER
1937 bool pci_aer_available(void);
1938 #else
pci_aer_available(void)1939 static inline bool pci_aer_available(void) { return false; }
1940 #endif
1941 
1942 bool pci_ats_disabled(void);
1943 
1944 #define PCIE_PTM_CONTEXT_UPDATE_AUTO 0
1945 #define PCIE_PTM_CONTEXT_UPDATE_MANUAL 1
1946 
1947 struct pcie_ptm_ops {
1948 	int (*check_capability)(void *drvdata);
1949 	int (*context_update_write)(void *drvdata, u8 mode);
1950 	int (*context_update_read)(void *drvdata, u8 *mode);
1951 	int (*context_valid_write)(void *drvdata, bool valid);
1952 	int (*context_valid_read)(void *drvdata, bool *valid);
1953 	int (*local_clock_read)(void *drvdata, u64 *clock);
1954 	int (*master_clock_read)(void *drvdata, u64 *clock);
1955 	int (*t1_read)(void *drvdata, u64 *clock);
1956 	int (*t2_read)(void *drvdata, u64 *clock);
1957 	int (*t3_read)(void *drvdata, u64 *clock);
1958 	int (*t4_read)(void *drvdata, u64 *clock);
1959 
1960 	bool (*context_update_visible)(void *drvdata);
1961 	bool (*context_valid_visible)(void *drvdata);
1962 	bool (*local_clock_visible)(void *drvdata);
1963 	bool (*master_clock_visible)(void *drvdata);
1964 	bool (*t1_visible)(void *drvdata);
1965 	bool (*t2_visible)(void *drvdata);
1966 	bool (*t3_visible)(void *drvdata);
1967 	bool (*t4_visible)(void *drvdata);
1968 };
1969 
1970 struct pci_ptm_debugfs {
1971 	struct dentry *debugfs;
1972 	const struct pcie_ptm_ops *ops;
1973 	struct mutex lock;
1974 	void *pdata;
1975 };
1976 
1977 #ifdef CONFIG_PCIE_PTM
1978 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
1979 void pci_disable_ptm(struct pci_dev *dev);
1980 bool pcie_ptm_enabled(struct pci_dev *dev);
1981 #else
pci_enable_ptm(struct pci_dev * dev,u8 * granularity)1982 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
1983 { return -EINVAL; }
pci_disable_ptm(struct pci_dev * dev)1984 static inline void pci_disable_ptm(struct pci_dev *dev) { }
pcie_ptm_enabled(struct pci_dev * dev)1985 static inline bool pcie_ptm_enabled(struct pci_dev *dev)
1986 { return false; }
1987 #endif
1988 
1989 #if IS_ENABLED(CONFIG_DEBUG_FS) && IS_ENABLED(CONFIG_PCIE_PTM)
1990 struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
1991 						const struct pcie_ptm_ops *ops);
1992 void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs);
1993 #else
1994 static inline struct pci_ptm_debugfs
pcie_ptm_create_debugfs(struct device * dev,void * pdata,const struct pcie_ptm_ops * ops)1995 *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
1996 			 const struct pcie_ptm_ops *ops) { return NULL; }
1997 static inline void
pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs * ptm_debugfs)1998 pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs) { }
1999 #endif
2000 
2001 void pci_cfg_access_lock(struct pci_dev *dev);
2002 bool pci_cfg_access_trylock(struct pci_dev *dev);
2003 void pci_cfg_access_unlock(struct pci_dev *dev);
2004 
2005 void pci_dev_lock(struct pci_dev *dev);
2006 int pci_dev_trylock(struct pci_dev *dev);
2007 void pci_dev_unlock(struct pci_dev *dev);
2008 DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
2009 
2010 /*
2011  * PCI domain support.  Sometimes called PCI segment (eg by ACPI),
2012  * a PCI domain is defined to be a set of PCI buses which share
2013  * configuration space.
2014  */
2015 #ifdef CONFIG_PCI_DOMAINS
2016 extern int pci_domains_supported;
2017 int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max);
2018 void pci_bus_release_emul_domain_nr(int domain_nr);
2019 #else
2020 enum { pci_domains_supported = 0 };
2021 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
2022 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
2023 static inline int pci_bus_find_emul_domain_nr(u32 hint, u32 min, u32 max)
2024 {
2025 	return 0;
2026 }
2027 static inline void pci_bus_release_emul_domain_nr(int domain_nr) { }
2028 #endif /* CONFIG_PCI_DOMAINS */
2029 
2030 /*
2031  * Generic implementation for PCI domain support. If your
2032  * architecture does not need custom management of PCI
2033  * domains then this implementation will be used
2034  */
2035 #ifdef CONFIG_PCI_DOMAINS_GENERIC
pci_domain_nr(struct pci_bus * bus)2036 static inline int pci_domain_nr(struct pci_bus *bus)
2037 {
2038 	return bus->domain_nr;
2039 }
2040 #ifdef CONFIG_ACPI
2041 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
2042 #else
acpi_pci_bus_find_domain_nr(struct pci_bus * bus)2043 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
2044 { return 0; }
2045 #endif
2046 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
2047 void pci_bus_release_domain_nr(struct device *parent, int domain_nr);
2048 #endif
2049 
2050 /* Some architectures require additional setup to direct VGA traffic */
2051 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
2052 				    unsigned int command_bits, u32 flags);
2053 void pci_register_set_vga_state(arch_set_vga_state_t func);
2054 
2055 static inline int
pci_request_io_regions(struct pci_dev * pdev,const char * name)2056 pci_request_io_regions(struct pci_dev *pdev, const char *name)
2057 {
2058 	return pci_request_selected_regions(pdev,
2059 			    pci_select_bars(pdev, IORESOURCE_IO), name);
2060 }
2061 
2062 static inline void
pci_release_io_regions(struct pci_dev * pdev)2063 pci_release_io_regions(struct pci_dev *pdev)
2064 {
2065 	return pci_release_selected_regions(pdev,
2066 			    pci_select_bars(pdev, IORESOURCE_IO));
2067 }
2068 
2069 static inline int
pci_request_mem_regions(struct pci_dev * pdev,const char * name)2070 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
2071 {
2072 	return pci_request_selected_regions(pdev,
2073 			    pci_select_bars(pdev, IORESOURCE_MEM), name);
2074 }
2075 
2076 static inline void
pci_release_mem_regions(struct pci_dev * pdev)2077 pci_release_mem_regions(struct pci_dev *pdev)
2078 {
2079 	return pci_release_selected_regions(pdev,
2080 			    pci_select_bars(pdev, IORESOURCE_MEM));
2081 }
2082 
2083 #else /* CONFIG_PCI is not enabled */
2084 
pci_set_flags(int flags)2085 static inline void pci_set_flags(int flags) { }
pci_add_flags(int flags)2086 static inline void pci_add_flags(int flags) { }
pci_clear_flags(int flags)2087 static inline void pci_clear_flags(int flags) { }
pci_has_flag(int flag)2088 static inline int pci_has_flag(int flag) { return 0; }
2089 
2090 /*
2091  * If the system does not have PCI, clearly these return errors.  Define
2092  * these as simple inline functions to avoid hair in drivers.
2093  */
2094 #define _PCI_NOP(o, s, t) \
2095 	static inline int pci_##o##_config_##s(struct pci_dev *dev, \
2096 						int where, t val) \
2097 		{ return PCIBIOS_FUNC_NOT_SUPPORTED; }
2098 
2099 #define _PCI_NOP_ALL(o, x)	_PCI_NOP(o, byte, u8 x) \
2100 				_PCI_NOP(o, word, u16 x) \
2101 				_PCI_NOP(o, dword, u32 x)
2102 _PCI_NOP_ALL(read, *)
2103 _PCI_NOP_ALL(write,)
2104 
pci_probe_flush_workqueue(void)2105 static inline void pci_probe_flush_workqueue(void) { }
2106 
pci_get_device(unsigned int vendor,unsigned int device,struct pci_dev * from)2107 static inline struct pci_dev *pci_get_device(unsigned int vendor,
2108 					     unsigned int device,
2109 					     struct pci_dev *from)
2110 { return NULL; }
2111 
pci_get_device_reverse(unsigned int vendor,unsigned int device,struct pci_dev * from)2112 static inline struct pci_dev *pci_get_device_reverse(unsigned int vendor,
2113 						     unsigned int device,
2114 						     struct pci_dev *from)
2115 { return NULL; }
2116 
pci_get_subsys(unsigned int vendor,unsigned int device,unsigned int ss_vendor,unsigned int ss_device,struct pci_dev * from)2117 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
2118 					     unsigned int device,
2119 					     unsigned int ss_vendor,
2120 					     unsigned int ss_device,
2121 					     struct pci_dev *from)
2122 { return NULL; }
2123 
pci_get_class(unsigned int class,struct pci_dev * from)2124 static inline struct pci_dev *pci_get_class(unsigned int class,
2125 					    struct pci_dev *from)
2126 { return NULL; }
2127 
pci_get_base_class(unsigned int class,struct pci_dev * from)2128 static inline struct pci_dev *pci_get_base_class(unsigned int class,
2129 						 struct pci_dev *from)
2130 { return NULL; }
2131 
pci_dev_present(const struct pci_device_id * ids)2132 static inline int pci_dev_present(const struct pci_device_id *ids)
2133 { return 0; }
2134 
2135 #define no_pci_devices()	(1)
2136 #define pci_dev_put(dev)	do { } while (0)
2137 
pci_set_master(struct pci_dev * dev)2138 static inline void pci_set_master(struct pci_dev *dev) { }
pci_clear_master(struct pci_dev * dev)2139 static inline void pci_clear_master(struct pci_dev *dev) { }
pci_enable_device(struct pci_dev * dev)2140 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
pci_disable_device(struct pci_dev * dev)2141 static inline void pci_disable_device(struct pci_dev *dev) { }
pcim_enable_device(struct pci_dev * pdev)2142 static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
pci_assign_resource(struct pci_dev * dev,int i)2143 static inline int pci_assign_resource(struct pci_dev *dev, int i)
2144 { return -EBUSY; }
__pci_register_driver(struct pci_driver * drv,struct module * owner,const char * mod_name)2145 static inline int __must_check __pci_register_driver(struct pci_driver *drv,
2146 						     struct module *owner,
2147 						     const char *mod_name)
2148 { return 0; }
pci_register_driver(struct pci_driver * drv)2149 static inline int pci_register_driver(struct pci_driver *drv)
2150 { return 0; }
pci_unregister_driver(struct pci_driver * drv)2151 static inline void pci_unregister_driver(struct pci_driver *drv) { }
pci_find_capability(struct pci_dev * dev,int cap)2152 static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
2153 { return 0; }
pci_find_next_capability(struct pci_dev * dev,u8 post,int cap)2154 static inline u8 pci_find_next_capability(struct pci_dev *dev, u8 post, int cap)
2155 { return 0; }
pci_find_ext_capability(struct pci_dev * dev,int cap)2156 static inline u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
2157 { return 0; }
2158 
pci_get_dsn(struct pci_dev * dev)2159 static inline u64 pci_get_dsn(struct pci_dev *dev)
2160 { return 0; }
2161 
2162 /* Power management related routines */
pci_save_state(struct pci_dev * dev)2163 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
pci_restore_state(struct pci_dev * dev)2164 static inline void pci_restore_state(struct pci_dev *dev) { }
pci_set_power_state(struct pci_dev * dev,pci_power_t state)2165 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
2166 { return 0; }
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)2167 static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
2168 { return 0; }
pci_wake_from_d3(struct pci_dev * dev,bool enable)2169 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2170 { return 0; }
pci_choose_state(struct pci_dev * dev,pm_message_t state)2171 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
2172 					   pm_message_t state)
2173 { return PCI_D0; }
pci_enable_wake(struct pci_dev * dev,pci_power_t state,int enable)2174 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
2175 				  int enable)
2176 { return 0; }
2177 
pci_find_resource(struct pci_dev * dev,struct resource * res)2178 static inline struct resource *pci_find_resource(struct pci_dev *dev,
2179 						 struct resource *res)
2180 { return NULL; }
pci_request_regions(struct pci_dev * dev,const char * res_name)2181 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
2182 { return -EIO; }
pci_release_regions(struct pci_dev * dev)2183 static inline void pci_release_regions(struct pci_dev *dev) { }
2184 
pci_register_io_range(const struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)2185 static inline int pci_register_io_range(const struct fwnode_handle *fwnode,
2186 					phys_addr_t addr, resource_size_t size)
2187 { return -EINVAL; }
2188 
pci_address_to_pio(phys_addr_t addr)2189 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
2190 
pci_find_next_bus(const struct pci_bus * from)2191 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
2192 { return NULL; }
pci_get_slot(struct pci_bus * bus,unsigned int devfn)2193 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
2194 						unsigned int devfn)
2195 { return NULL; }
pci_get_domain_bus_and_slot(int domain,unsigned int bus,unsigned int devfn)2196 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
2197 					unsigned int bus, unsigned int devfn)
2198 { return NULL; }
2199 
pci_domain_nr(struct pci_bus * bus)2200 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_dev_get(struct pci_dev * dev)2201 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
2202 
2203 #define dev_is_pci(d) (false)
2204 #define dev_is_pf(d) (false)
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)2205 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2206 { return false; }
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)2207 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
2208 				      struct device_node *node,
2209 				      const u32 *intspec,
2210 				      unsigned int intsize,
2211 				      unsigned long *out_hwirq,
2212 				      unsigned int *out_type)
2213 { return -EINVAL; }
2214 
pci_match_id(const struct pci_device_id * ids,struct pci_dev * dev)2215 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
2216 							 struct pci_dev *dev)
2217 { return NULL; }
pci_ats_disabled(void)2218 static inline bool pci_ats_disabled(void) { return true; }
2219 
pci_irq_vector(struct pci_dev * dev,unsigned int nr)2220 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
2221 {
2222 	return -EINVAL;
2223 }
2224 
2225 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)2226 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
2227 			       unsigned int max_vecs, unsigned int flags,
2228 			       struct irq_affinity *aff_desc)
2229 {
2230 	return -ENOSPC;
2231 }
2232 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)2233 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
2234 		      unsigned int max_vecs, unsigned int flags)
2235 {
2236 	return -ENOSPC;
2237 }
2238 
pci_free_irq_vectors(struct pci_dev * dev)2239 static inline void pci_free_irq_vectors(struct pci_dev *dev)
2240 {
2241 }
2242 #endif /* CONFIG_PCI */
2243 
2244 /* Include architecture-dependent settings and functions */
2245 
2246 #include <asm/pci.h>
2247 
2248 /*
2249  * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
2250  * is expected to be an offset within that region.
2251  *
2252  */
2253 int pci_mmap_resource_range(struct pci_dev *dev, int bar,
2254 			    struct vm_area_struct *vma,
2255 			    enum pci_mmap_state mmap_state, int write_combine);
2256 
2257 #ifndef arch_can_pci_mmap_wc
2258 #define arch_can_pci_mmap_wc()		0
2259 #endif
2260 
2261 #ifndef arch_can_pci_mmap_io
2262 #define arch_can_pci_mmap_io()		0
2263 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
2264 #else
2265 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
2266 #endif
2267 
2268 #ifndef pci_root_bus_fwnode
2269 #define pci_root_bus_fwnode(bus)	NULL
2270 #endif
2271 
2272 /*
2273  * These helpers provide future and backwards compatibility
2274  * for accessing popular PCI BAR info
2275  */
2276 #define pci_resource_n(dev, bar)	(&(dev)->resource[(bar)])
2277 #define pci_resource_start(dev, bar)	(pci_resource_n(dev, bar)->start)
2278 #define pci_resource_end(dev, bar)	(pci_resource_n(dev, bar)->end)
2279 #define pci_resource_flags(dev, bar)	(pci_resource_n(dev, bar)->flags)
2280 #define pci_resource_len(dev,bar)					\
2281 	(pci_resource_end((dev), (bar)) ? 				\
2282 	 resource_size(pci_resource_n((dev), (bar))) : 0)
2283 
2284 #define __pci_dev_for_each_res0(dev, res, ...)				  \
2285 	for (unsigned int __b = 0;					  \
2286 	     __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2287 	     __b++)
2288 
2289 #define __pci_dev_for_each_res1(dev, res, __b)				  \
2290 	for (__b = 0;							  \
2291 	     __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2292 	     __b++)
2293 
2294 #define pci_dev_for_each_resource(dev, res, ...)			\
2295 	CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) 	\
2296 		    (dev, res, __VA_ARGS__)
2297 
2298 /*
2299  * Similar to the helpers above, these manipulate per-pci_dev
2300  * driver-specific data.  They are really just a wrapper around
2301  * the generic device structure functions of these calls.
2302  */
pci_get_drvdata(struct pci_dev * pdev)2303 static inline void *pci_get_drvdata(struct pci_dev *pdev)
2304 {
2305 	return dev_get_drvdata(&pdev->dev);
2306 }
2307 
pci_set_drvdata(struct pci_dev * pdev,void * data)2308 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
2309 {
2310 	dev_set_drvdata(&pdev->dev, data);
2311 }
2312 
pci_name(const struct pci_dev * pdev)2313 static inline const char *pci_name(const struct pci_dev *pdev)
2314 {
2315 	return dev_name(&pdev->dev);
2316 }
2317 
2318 void pci_resource_to_user(const struct pci_dev *dev, int bar,
2319 			  const struct resource *rsrc,
2320 			  resource_size_t *start, resource_size_t *end);
2321 
2322 /*
2323  * The world is not perfect and supplies us with broken PCI devices.
2324  * For at least a part of these bugs we need a work-around, so both
2325  * generic (drivers/pci/quirks.c) and per-architecture code can define
2326  * fixup hooks to be called for particular buggy devices.
2327  */
2328 
2329 struct pci_fixup {
2330 	u16 vendor;			/* Or PCI_ANY_ID */
2331 	u16 device;			/* Or PCI_ANY_ID */
2332 	u32 class;			/* Or PCI_ANY_ID */
2333 	unsigned int class_shift;	/* should be 0, 8, 16 */
2334 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2335 	int hook_offset;
2336 #else
2337 	void (*hook)(struct pci_dev *dev);
2338 #endif
2339 };
2340 
2341 enum pci_fixup_pass {
2342 	pci_fixup_early,	/* Before probing BARs */
2343 	pci_fixup_header,	/* After reading configuration header */
2344 	pci_fixup_final,	/* Final phase of device fixups */
2345 	pci_fixup_enable,	/* pci_enable_device() time */
2346 	pci_fixup_resume,	/* pci_device_resume() */
2347 	pci_fixup_suspend,	/* pci_device_suspend() */
2348 	pci_fixup_resume_early, /* pci_device_resume_early() */
2349 	pci_fixup_suspend_late,	/* pci_device_suspend_late() */
2350 };
2351 
2352 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2353 #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2354 				    class_shift, hook)			\
2355 	__ADDRESSABLE(hook)						\
2356 	asm(".section "	#sec ", \"a\"				\n"	\
2357 	    ".balign	16					\n"	\
2358 	    ".short "	#vendor ", " #device "			\n"	\
2359 	    ".long "	#class ", " #class_shift "		\n"	\
2360 	    ".long "	#hook " - .				\n"	\
2361 	    ".previous						\n");
2362 
2363 /*
2364  * Clang's LTO may rename static functions in C, but has no way to
2365  * handle such renamings when referenced from inline asm. To work
2366  * around this, create global C stubs for these cases.
2367  */
2368 #ifdef CONFIG_LTO_CLANG
2369 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2370 				  class_shift, hook, stub)		\
2371 	void stub(struct pci_dev *dev);					\
2372 	void stub(struct pci_dev *dev)					\
2373 	{ 								\
2374 		hook(dev); 						\
2375 	}								\
2376 	___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2377 				  class_shift, stub)
2378 #else
2379 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2380 				  class_shift, hook, stub)		\
2381 	___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2382 				  class_shift, hook)
2383 #endif
2384 
2385 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2386 				  class_shift, hook)			\
2387 	__DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2388 				  class_shift, hook, __UNIQUE_ID(hook))
2389 #else
2390 /* Anonymous variables would be nice... */
2391 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class,	\
2392 				  class_shift, hook)			\
2393 	static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used	\
2394 	__attribute__((__section__(#section), aligned((sizeof(void *)))))    \
2395 		= { vendor, device, class, class_shift, hook };
2396 #endif
2397 
2398 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class,		\
2399 					 class_shift, hook)		\
2400 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
2401 		hook, vendor, device, class, class_shift, hook)
2402 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class,		\
2403 					 class_shift, hook)		\
2404 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
2405 		hook, vendor, device, class, class_shift, hook)
2406 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class,		\
2407 					 class_shift, hook)		\
2408 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
2409 		hook, vendor, device, class, class_shift, hook)
2410 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class,		\
2411 					 class_shift, hook)		\
2412 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
2413 		hook, vendor, device, class, class_shift, hook)
2414 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class,		\
2415 					 class_shift, hook)		\
2416 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
2417 		resume##hook, vendor, device, class, class_shift, hook)
2418 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class,	\
2419 					 class_shift, hook)		\
2420 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
2421 		resume_early##hook, vendor, device, class, class_shift, hook)
2422 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class,		\
2423 					 class_shift, hook)		\
2424 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
2425 		suspend##hook, vendor, device, class, class_shift, hook)
2426 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class,	\
2427 					 class_shift, hook)		\
2428 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
2429 		suspend_late##hook, vendor, device, class, class_shift, hook)
2430 
2431 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook)			\
2432 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
2433 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2434 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook)			\
2435 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
2436 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2437 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook)			\
2438 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
2439 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2440 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook)			\
2441 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
2442 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2443 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook)			\
2444 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
2445 		resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
2446 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook)		\
2447 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
2448 		resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
2449 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook)			\
2450 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
2451 		suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
2452 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook)		\
2453 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
2454 		suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
2455 
2456 #ifdef CONFIG_PCI_QUIRKS
2457 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
2458 #else
pci_fixup_device(enum pci_fixup_pass pass,struct pci_dev * dev)2459 static inline void pci_fixup_device(enum pci_fixup_pass pass,
2460 				    struct pci_dev *dev) { }
2461 #endif
2462 
2463 int pcim_intx(struct pci_dev *pdev, int enabled);
2464 int pcim_request_all_regions(struct pci_dev *pdev, const char *name);
2465 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
2466 void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
2467 				const char *name);
2468 void pcim_iounmap_region(struct pci_dev *pdev, int bar);
2469 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
2470 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
2471 int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
2472 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
2473 void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
2474 				unsigned long offset, unsigned long len);
2475 
2476 extern int pci_pci_problems;
2477 #define PCIPCI_FAIL		1	/* No PCI PCI DMA */
2478 #define PCIPCI_TRITON		2
2479 #define PCIPCI_NATOMA		4
2480 #define PCIPCI_VIAETBF		8
2481 #define PCIPCI_VSFX		16
2482 #define PCIPCI_ALIMAGIK		32	/* Need low latency setting */
2483 #define PCIAGP_FAIL		64	/* No PCI to AGP DMA */
2484 
2485 extern u8 pci_dfl_cache_line_size;
2486 extern u8 pci_cache_line_size;
2487 
2488 /* Architecture-specific versions may override these (weak) */
2489 void pcibios_disable_device(struct pci_dev *dev);
2490 void pcibios_set_master(struct pci_dev *dev);
2491 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
2492 				 enum pcie_reset_state state);
2493 int pcibios_device_add(struct pci_dev *dev);
2494 void pcibios_release_device(struct pci_dev *dev);
2495 #ifdef CONFIG_PCI
2496 void pcibios_penalize_isa_irq(int irq, int active);
2497 #else
pcibios_penalize_isa_irq(int irq,int active)2498 static inline void pcibios_penalize_isa_irq(int irq, int active) {}
2499 #endif
2500 int pcibios_alloc_irq(struct pci_dev *dev);
2501 void pcibios_free_irq(struct pci_dev *dev);
2502 resource_size_t pcibios_default_alignment(void);
2503 
2504 #if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
2505 extern int pci_create_resource_files(struct pci_dev *dev);
2506 extern void pci_remove_resource_files(struct pci_dev *dev);
2507 #endif
2508 
2509 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
2510 void __init pci_mmcfg_early_init(void);
2511 void __init pci_mmcfg_late_init(void);
2512 #else
pci_mmcfg_early_init(void)2513 static inline void pci_mmcfg_early_init(void) { }
pci_mmcfg_late_init(void)2514 static inline void pci_mmcfg_late_init(void) { }
2515 #endif
2516 
2517 int pci_ext_cfg_avail(void);
2518 
2519 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
2520 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
2521 
2522 #ifdef CONFIG_PCI_IOV
2523 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
2524 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
2525 int pci_iov_vf_id(struct pci_dev *dev);
2526 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
2527 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
2528 void pci_disable_sriov(struct pci_dev *dev);
2529 
2530 int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
2531 int pci_iov_add_virtfn(struct pci_dev *dev, int id);
2532 void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
2533 int pci_num_vf(struct pci_dev *dev);
2534 int pci_vfs_assigned(struct pci_dev *dev);
2535 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
2536 int pci_sriov_get_totalvfs(struct pci_dev *dev);
2537 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
2538 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
2539 int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size);
2540 u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs);
2541 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
2542 
2543 /* Arch may override these (weak) */
2544 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
2545 int pcibios_sriov_disable(struct pci_dev *pdev);
2546 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
2547 #else
pci_iov_virtfn_bus(struct pci_dev * dev,int id)2548 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
2549 {
2550 	return -ENOSYS;
2551 }
pci_iov_virtfn_devfn(struct pci_dev * dev,int id)2552 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
2553 {
2554 	return -ENOSYS;
2555 }
2556 
pci_iov_vf_id(struct pci_dev * dev)2557 static inline int pci_iov_vf_id(struct pci_dev *dev)
2558 {
2559 	return -ENOSYS;
2560 }
2561 
pci_iov_get_pf_drvdata(struct pci_dev * dev,struct pci_driver * pf_driver)2562 static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
2563 					   struct pci_driver *pf_driver)
2564 {
2565 	return ERR_PTR(-EINVAL);
2566 }
2567 
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)2568 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
2569 { return -ENODEV; }
2570 
pci_iov_sysfs_link(struct pci_dev * dev,struct pci_dev * virtfn,int id)2571 static inline int pci_iov_sysfs_link(struct pci_dev *dev,
2572 				     struct pci_dev *virtfn, int id)
2573 {
2574 	return -ENODEV;
2575 }
pci_iov_add_virtfn(struct pci_dev * dev,int id)2576 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2577 {
2578 	return -ENOSYS;
2579 }
pci_iov_remove_virtfn(struct pci_dev * dev,int id)2580 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
2581 					 int id) { }
pci_disable_sriov(struct pci_dev * dev)2582 static inline void pci_disable_sriov(struct pci_dev *dev) { }
pci_num_vf(struct pci_dev * dev)2583 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
pci_vfs_assigned(struct pci_dev * dev)2584 static inline int pci_vfs_assigned(struct pci_dev *dev)
2585 { return 0; }
pci_sriov_set_totalvfs(struct pci_dev * dev,u16 numvfs)2586 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
2587 { return 0; }
pci_sriov_get_totalvfs(struct pci_dev * dev)2588 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
2589 { return 0; }
2590 #define pci_sriov_configure_simple	NULL
pci_iov_resource_size(struct pci_dev * dev,int resno)2591 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
2592 { return 0; }
pci_iov_vf_bar_set_size(struct pci_dev * dev,int resno,int size)2593 static inline int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size)
2594 { return -ENODEV; }
pci_iov_vf_bar_get_sizes(struct pci_dev * dev,int resno,int num_vfs)2595 static inline u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs)
2596 { return 0; }
pci_vf_drivers_autoprobe(struct pci_dev * dev,bool probe)2597 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
2598 #endif
2599 
2600 /**
2601  * pci_pcie_cap - get the saved PCIe capability offset
2602  * @dev: PCI device
2603  *
2604  * PCIe capability offset is calculated at PCI device initialization
2605  * time and saved in the data structure. This function returns saved
2606  * PCIe capability offset. Using this instead of pci_find_capability()
2607  * reduces unnecessary search in the PCI configuration space. If you
2608  * need to calculate PCIe capability offset from raw device for some
2609  * reasons, please use pci_find_capability() instead.
2610  */
pci_pcie_cap(struct pci_dev * dev)2611 static inline int pci_pcie_cap(struct pci_dev *dev)
2612 {
2613 	return dev->pcie_cap;
2614 }
2615 
2616 /**
2617  * pci_is_pcie - check if the PCI device is PCI Express capable
2618  * @dev: PCI device
2619  *
2620  * Returns: true if the PCI device is PCI Express capable, false otherwise.
2621  */
pci_is_pcie(struct pci_dev * dev)2622 static inline bool pci_is_pcie(struct pci_dev *dev)
2623 {
2624 	return pci_pcie_cap(dev);
2625 }
2626 
2627 /**
2628  * pcie_caps_reg - get the PCIe Capabilities Register
2629  * @dev: PCI device
2630  */
pcie_caps_reg(const struct pci_dev * dev)2631 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
2632 {
2633 	return dev->pcie_flags_reg;
2634 }
2635 
2636 /**
2637  * pci_pcie_type - get the PCIe device/port type
2638  * @dev: PCI device
2639  */
pci_pcie_type(const struct pci_dev * dev)2640 static inline int pci_pcie_type(const struct pci_dev *dev)
2641 {
2642 	return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
2643 }
2644 
2645 /**
2646  * pcie_find_root_port - Get the PCIe root port device
2647  * @dev: PCI device
2648  *
2649  * Traverse up the parent chain and return the PCIe Root Port PCI Device
2650  * for a given PCI/PCIe Device.
2651  */
pcie_find_root_port(struct pci_dev * dev)2652 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
2653 {
2654 	while (dev) {
2655 		if (pci_is_pcie(dev) &&
2656 		    pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2657 			return dev;
2658 		dev = pci_upstream_bridge(dev);
2659 	}
2660 
2661 	return NULL;
2662 }
2663 
pci_dev_is_disconnected(const struct pci_dev * dev)2664 static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
2665 {
2666 	/*
2667 	 * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg()
2668 	 * and read w/o common lock. READ_ONCE() ensures compiler cannot cache
2669 	 * the value (e.g. inside the loop in pci_dev_wait()).
2670 	 */
2671 	return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure;
2672 }
2673 
2674 void pci_request_acs(void);
2675 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2676 bool pci_acs_path_enabled(struct pci_dev *start,
2677 			  struct pci_dev *end, u16 acs_flags);
2678 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2679 
2680 #define PCI_VPD_LRDT			0x80	/* Large Resource Data Type */
2681 #define PCI_VPD_LRDT_ID(x)		((x) | PCI_VPD_LRDT)
2682 
2683 /* Large Resource Data Type Tag Item Names */
2684 #define PCI_VPD_LTIN_ID_STRING		0x02	/* Identifier String */
2685 #define PCI_VPD_LTIN_RO_DATA		0x10	/* Read-Only Data */
2686 #define PCI_VPD_LTIN_RW_DATA		0x11	/* Read-Write Data */
2687 
2688 #define PCI_VPD_LRDT_ID_STRING		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
2689 #define PCI_VPD_LRDT_RO_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
2690 #define PCI_VPD_LRDT_RW_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
2691 
2692 #define PCI_VPD_RO_KEYWORD_PARTNO	"PN"
2693 #define PCI_VPD_RO_KEYWORD_SERIALNO	"SN"
2694 #define PCI_VPD_RO_KEYWORD_MFR_ID	"MN"
2695 #define PCI_VPD_RO_KEYWORD_VENDOR0	"V0"
2696 #define PCI_VPD_RO_KEYWORD_CHKSUM	"RV"
2697 
2698 /**
2699  * pci_vpd_alloc - Allocate buffer and read VPD into it
2700  * @dev: PCI device
2701  * @size: pointer to field where VPD length is returned
2702  *
2703  * Returns pointer to allocated buffer or an ERR_PTR in case of failure
2704  */
2705 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
2706 
2707 /**
2708  * pci_vpd_find_id_string - Locate id string in VPD
2709  * @buf: Pointer to buffered VPD data
2710  * @len: The length of the buffer area in which to search
2711  * @size: Pointer to field where length of id string is returned
2712  *
2713  * Returns the index of the id string or -ENOENT if not found.
2714  */
2715 int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
2716 
2717 /**
2718  * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
2719  * @buf: Pointer to buffered VPD data
2720  * @len: The length of the buffer area in which to search
2721  * @kw: The keyword to search for
2722  * @size: Pointer to field where length of found keyword data is returned
2723  *
2724  * Returns the index of the information field keyword data or -ENOENT if
2725  * not found.
2726  */
2727 int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
2728 				 const char *kw, unsigned int *size);
2729 
2730 /**
2731  * pci_vpd_check_csum - Check VPD checksum
2732  * @buf: Pointer to buffered VPD data
2733  * @len: VPD size
2734  *
2735  * Returns 1 if VPD has no checksum, otherwise 0 or an errno
2736  */
2737 int pci_vpd_check_csum(const void *buf, unsigned int len);
2738 
2739 /* PCI <-> OF binding helpers */
2740 #ifdef CONFIG_OF
2741 struct device_node;
2742 struct irq_domain;
2743 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2744 bool pci_host_of_has_msi_map(struct device *dev);
2745 
2746 /* Arch may override this (weak) */
2747 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2748 
2749 #else	/* CONFIG_OF */
2750 static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus * bus)2751 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
pci_host_of_has_msi_map(struct device * dev)2752 static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
2753 #endif  /* CONFIG_OF */
2754 
2755 static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev * pdev)2756 pci_device_to_OF_node(const struct pci_dev *pdev)
2757 {
2758 	return pdev ? pdev->dev.of_node : NULL;
2759 }
2760 
pci_bus_to_OF_node(struct pci_bus * bus)2761 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2762 {
2763 	return bus ? bus->dev.of_node : NULL;
2764 }
2765 
2766 #ifdef CONFIG_ACPI
2767 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2768 
2769 void
2770 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2771 bool pci_pr3_present(struct pci_dev *pdev);
2772 #else
2773 static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus * bus)2774 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
pci_pr3_present(struct pci_dev * pdev)2775 static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
2776 #endif
2777 
2778 #if defined(CONFIG_X86) && defined(CONFIG_ACPI)
2779 bool arch_pci_dev_is_removable(struct pci_dev *pdev);
2780 #else
arch_pci_dev_is_removable(struct pci_dev * pdev)2781 static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; }
2782 #endif
2783 
2784 #ifdef CONFIG_EEH
pci_dev_to_eeh_dev(struct pci_dev * pdev)2785 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2786 {
2787 	return pdev->dev.archdata.edev;
2788 }
2789 #endif
2790 
2791 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
2792 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2793 int pci_for_each_dma_alias(struct pci_dev *pdev,
2794 			   int (*fn)(struct pci_dev *pdev,
2795 				     u16 alias, void *data), void *data);
2796 
2797 /* Helper functions for operation of device flag */
pci_set_dev_assigned(struct pci_dev * pdev)2798 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2799 {
2800 	pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2801 }
pci_clear_dev_assigned(struct pci_dev * pdev)2802 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2803 {
2804 	pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2805 }
pci_is_dev_assigned(struct pci_dev * pdev)2806 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2807 {
2808 	return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2809 }
2810 
2811 /**
2812  * pci_ari_enabled - query ARI forwarding status
2813  * @bus: the PCI bus
2814  *
2815  * Returns true if ARI forwarding is enabled.
2816  */
pci_ari_enabled(struct pci_bus * bus)2817 static inline bool pci_ari_enabled(struct pci_bus *bus)
2818 {
2819 	return bus->self && bus->self->ari_enabled;
2820 }
2821 
2822 /**
2823  * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
2824  * @pdev: PCI device to check
2825  *
2826  * Walk upwards from @pdev and check for each encountered bridge if it's part
2827  * of a Thunderbolt controller.  Reaching the host bridge means @pdev is not
2828  * Thunderbolt-attached.  (But rather soldered to the mainboard usually.)
2829  */
pci_is_thunderbolt_attached(struct pci_dev * pdev)2830 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2831 {
2832 	struct pci_dev *parent = pdev;
2833 
2834 	if (pdev->is_thunderbolt)
2835 		return true;
2836 
2837 	while ((parent = pci_upstream_bridge(parent)))
2838 		if (parent->is_thunderbolt)
2839 			return true;
2840 
2841 	return false;
2842 }
2843 
2844 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) || defined(CONFIG_S390)
2845 void pci_uevent_ers(struct pci_dev *pdev, enum  pci_ers_result err_type);
2846 #endif
2847 
2848 #include <linux/dma-mapping.h>
2849 
2850 #define pci_emerg(pdev, fmt, arg...)	dev_emerg(&(pdev)->dev, fmt, ##arg)
2851 #define pci_alert(pdev, fmt, arg...)	dev_alert(&(pdev)->dev, fmt, ##arg)
2852 #define pci_crit(pdev, fmt, arg...)	dev_crit(&(pdev)->dev, fmt, ##arg)
2853 #define pci_err(pdev, fmt, arg...)	dev_err(&(pdev)->dev, fmt, ##arg)
2854 #define pci_warn(pdev, fmt, arg...)	dev_warn(&(pdev)->dev, fmt, ##arg)
2855 #define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
2856 #define pci_notice(pdev, fmt, arg...)	dev_notice(&(pdev)->dev, fmt, ##arg)
2857 #define pci_info(pdev, fmt, arg...)	dev_info(&(pdev)->dev, fmt, ##arg)
2858 #define pci_dbg(pdev, fmt, arg...)	dev_dbg(&(pdev)->dev, fmt, ##arg)
2859 
2860 #define pci_notice_ratelimited(pdev, fmt, arg...) \
2861 	dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2862 
2863 #define pci_info_ratelimited(pdev, fmt, arg...) \
2864 	dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2865 
2866 #define pci_WARN(pdev, condition, fmt, arg...) \
2867 	WARN(condition, "%s %s: " fmt, \
2868 	     dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2869 
2870 #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
2871 	WARN_ONCE(condition, "%s %s: " fmt, \
2872 		  dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2873 
2874 #endif /* LINUX_PCI_H */
2875