xref: /linux/include/linux/pci.h (revision 7a0892d2836e12cc61b6823f888629a3eb64e268)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *	pci.h
4  *
5  *	PCI defines and function prototypes
6  *	Copyright 1994, Drew Eckhardt
7  *	Copyright 1997--1999 Martin Mares <mj@ucw.cz>
8  *
9  *	PCI Express ASPM defines and function prototypes
10  *	Copyright (c) 2007 Intel Corp.
11  *		Zhang Yanmin (yanmin.zhang@intel.com)
12  *		Shaohua Li (shaohua.li@intel.com)
13  *
14  *	For more information, please consult the following manuals (look at
15  *	http://www.pcisig.com/ for how to get them):
16  *
17  *	PCI BIOS Specification
18  *	PCI Local Bus Specification
19  *	PCI to PCI Bridge Specification
20  *	PCI Express Specification
21  *	PCI System Design Guide
22  */
23 #ifndef LINUX_PCI_H
24 #define LINUX_PCI_H
25 
26 #include <linux/args.h>
27 #include <linux/mod_devicetable.h>
28 
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/ioport.h>
32 #include <linux/list.h>
33 #include <linux/compiler.h>
34 #include <linux/errno.h>
35 #include <linux/kobject.h>
36 #include <linux/atomic.h>
37 #include <linux/device.h>
38 #include <linux/interrupt.h>
39 #include <linux/io.h>
40 #include <linux/resource_ext.h>
41 #include <linux/msi_api.h>
42 #include <uapi/linux/pci.h>
43 
44 #include <linux/pci_ids.h>
45 
46 #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY  | \
47 			       PCI_STATUS_SIG_SYSTEM_ERROR | \
48 			       PCI_STATUS_REC_MASTER_ABORT | \
49 			       PCI_STATUS_REC_TARGET_ABORT | \
50 			       PCI_STATUS_SIG_TARGET_ABORT | \
51 			       PCI_STATUS_PARITY)
52 
53 /* Number of reset methods used in pci_reset_fn_methods array in pci.c */
54 #define PCI_NUM_RESET_METHODS 8
55 
56 #define PCI_RESET_PROBE		true
57 #define PCI_RESET_DO_RESET	false
58 
59 /*
60  * The PCI interface treats multi-function devices as independent
61  * devices.  The slot/function address of each device is encoded
62  * in a single byte as follows:
63  *
64  *	7:3 = slot
65  *	2:0 = function
66  *
67  * PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
68  * In the interest of not exposing interfaces to user-space unnecessarily,
69  * the following kernel-only defines are being added here.
70  */
71 #define PCI_DEVID(bus, devfn)	((((u16)(bus)) << 8) | (devfn))
72 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
73 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
74 
75 /* pci_slot represents a physical slot */
76 struct pci_slot {
77 	struct pci_bus		*bus;		/* Bus this slot is on */
78 	struct list_head	list;		/* Node in list of slots */
79 	struct hotplug_slot	*hotplug;	/* Hotplug info (move here) */
80 	unsigned char		number;		/* PCI_SLOT(pci_dev->devfn) */
81 	struct kobject		kobj;
82 };
83 
pci_slot_name(const struct pci_slot * slot)84 static inline const char *pci_slot_name(const struct pci_slot *slot)
85 {
86 	return kobject_name(&slot->kobj);
87 }
88 
89 /* File state for mmap()s on /proc/bus/pci/X/Y */
90 enum pci_mmap_state {
91 	pci_mmap_io,
92 	pci_mmap_mem
93 };
94 
95 /* For PCI devices, the region numbers are assigned this way: */
96 enum {
97 	/* #0-5: standard PCI resources */
98 	PCI_STD_RESOURCES,
99 	PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
100 
101 	/* #6: expansion ROM resource */
102 	PCI_ROM_RESOURCE,
103 
104 	/* Device-specific resources */
105 #ifdef CONFIG_PCI_IOV
106 	PCI_IOV_RESOURCES,
107 	PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
108 #endif
109 
110 /* PCI-to-PCI (P2P) bridge windows */
111 #define PCI_BRIDGE_IO_WINDOW		(PCI_BRIDGE_RESOURCES + 0)
112 #define PCI_BRIDGE_MEM_WINDOW		(PCI_BRIDGE_RESOURCES + 1)
113 #define PCI_BRIDGE_PREF_MEM_WINDOW	(PCI_BRIDGE_RESOURCES + 2)
114 
115 /* CardBus bridge windows */
116 #define PCI_CB_BRIDGE_IO_0_WINDOW	(PCI_BRIDGE_RESOURCES + 0)
117 #define PCI_CB_BRIDGE_IO_1_WINDOW	(PCI_BRIDGE_RESOURCES + 1)
118 #define PCI_CB_BRIDGE_MEM_0_WINDOW	(PCI_BRIDGE_RESOURCES + 2)
119 #define PCI_CB_BRIDGE_MEM_1_WINDOW	(PCI_BRIDGE_RESOURCES + 3)
120 
121 /* Total number of bridge resources for P2P and CardBus */
122 #define PCI_P2P_BRIDGE_RESOURCE_NUM	3
123 #define PCI_BRIDGE_RESOURCE_NUM		4
124 
125 	/* Resources assigned to buses behind the bridge */
126 	PCI_BRIDGE_RESOURCES,
127 	PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES +
128 				  PCI_BRIDGE_RESOURCE_NUM - 1,
129 
130 	/* Total resources associated with a PCI device */
131 	PCI_NUM_RESOURCES,
132 
133 	/* Preserve this for compatibility */
134 	DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES,
135 };
136 
137 /**
138  * enum pci_interrupt_pin - PCI INTx interrupt values
139  * @PCI_INTERRUPT_UNKNOWN: Unknown or unassigned interrupt
140  * @PCI_INTERRUPT_INTA: PCI INTA pin
141  * @PCI_INTERRUPT_INTB: PCI INTB pin
142  * @PCI_INTERRUPT_INTC: PCI INTC pin
143  * @PCI_INTERRUPT_INTD: PCI INTD pin
144  *
145  * Corresponds to values for legacy PCI INTx interrupts, as can be found in the
146  * PCI_INTERRUPT_PIN register.
147  */
148 enum pci_interrupt_pin {
149 	PCI_INTERRUPT_UNKNOWN,
150 	PCI_INTERRUPT_INTA,
151 	PCI_INTERRUPT_INTB,
152 	PCI_INTERRUPT_INTC,
153 	PCI_INTERRUPT_INTD,
154 };
155 
156 /* The number of legacy PCI INTx interrupts */
157 #define PCI_NUM_INTX	4
158 
159 /*
160  * Reading from a device that doesn't respond typically returns ~0.  A
161  * successful read from a device may also return ~0, so you need additional
162  * information to reliably identify errors.
163  */
164 #define PCI_ERROR_RESPONSE		(~0ULL)
165 #define PCI_SET_ERROR_RESPONSE(val)	(*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
166 #define PCI_POSSIBLE_ERROR(val)		((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
167 
168 /*
169  * pci_power_t values must match the bits in the Capabilities PME_Support
170  * and Control/Status PowerState fields in the Power Management capability.
171  */
172 typedef int __bitwise pci_power_t;
173 
174 #define PCI_D0		((pci_power_t __force) 0)
175 #define PCI_D1		((pci_power_t __force) 1)
176 #define PCI_D2		((pci_power_t __force) 2)
177 #define PCI_D3hot	((pci_power_t __force) 3)
178 #define PCI_D3cold	((pci_power_t __force) 4)
179 #define PCI_UNKNOWN	((pci_power_t __force) 5)
180 #define PCI_POWER_ERROR	((pci_power_t __force) -1)
181 
182 /* Remember to update this when the list above changes! */
183 extern const char *pci_power_names[];
184 
pci_power_name(pci_power_t state)185 static inline const char *pci_power_name(pci_power_t state)
186 {
187 	return pci_power_names[1 + (__force int) state];
188 }
189 
190 /**
191  * typedef pci_channel_state_t
192  *
193  * The pci_channel state describes connectivity between the CPU and
194  * the PCI device.  If some PCI bus between here and the PCI device
195  * has crashed or locked up, this info is reflected here.
196  */
197 typedef unsigned int __bitwise pci_channel_state_t;
198 
199 enum {
200 	/* I/O channel is in normal state */
201 	pci_channel_io_normal = (__force pci_channel_state_t) 1,
202 
203 	/* I/O to channel is blocked */
204 	pci_channel_io_frozen = (__force pci_channel_state_t) 2,
205 
206 	/* PCI card is dead */
207 	pci_channel_io_perm_failure = (__force pci_channel_state_t) 3,
208 };
209 
210 typedef unsigned int __bitwise pcie_reset_state_t;
211 
212 enum pcie_reset_state {
213 	/* Reset is NOT asserted (Use to deassert reset) */
214 	pcie_deassert_reset = (__force pcie_reset_state_t) 1,
215 
216 	/* Use #PERST to reset PCIe device */
217 	pcie_warm_reset = (__force pcie_reset_state_t) 2,
218 
219 	/* Use PCIe Hot Reset to reset device */
220 	pcie_hot_reset = (__force pcie_reset_state_t) 3
221 };
222 
223 typedef unsigned short __bitwise pci_dev_flags_t;
224 enum pci_dev_flags {
225 	/* INTX_DISABLE in PCI_COMMAND register disables MSI too */
226 	PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0),
227 	/* Device configuration is irrevocably lost if disabled into D3 */
228 	PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1),
229 	/* Provide indication device is assigned by a Virtual Machine Manager */
230 	PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2),
231 	/* Flag for quirk use to store if quirk-specific ACS is enabled */
232 	PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3),
233 	/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
234 	PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
235 	/* Do not use bus resets for device */
236 	PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
237 	/* Do not use PM reset even if device advertises NoSoftRst- */
238 	PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
239 	/* Get VPD from function 0 VPD */
240 	PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
241 	/* A non-root bridge where translation occurs, stop alias search here */
242 	PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
243 	/* Do not use FLR even if device advertises PCI_AF_CAP */
244 	PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
245 	/* Don't use Relaxed Ordering for TLPs directed at this device */
246 	PCI_DEV_FLAGS_NO_RELAXED_ORDERING = (__force pci_dev_flags_t) (1 << 11),
247 	/* Device does honor MSI masking despite saying otherwise */
248 	PCI_DEV_FLAGS_HAS_MSI_MASKING = (__force pci_dev_flags_t) (1 << 12),
249 	/* Device requires write to PCI_MSIX_ENTRY_DATA before any MSIX reads */
250 	PCI_DEV_FLAGS_MSIX_TOUCH_ENTRY_DATA_FIRST = (__force pci_dev_flags_t) (1 << 13),
251 };
252 
253 enum pci_irq_reroute_variant {
254 	INTEL_IRQ_REROUTE_VARIANT = 1,
255 	MAX_IRQ_REROUTE_VARIANTS = 3
256 };
257 
258 typedef unsigned short __bitwise pci_bus_flags_t;
259 enum pci_bus_flags {
260 	PCI_BUS_FLAGS_NO_MSI	= (__force pci_bus_flags_t) 1,
261 	PCI_BUS_FLAGS_NO_MMRBC	= (__force pci_bus_flags_t) 2,
262 	PCI_BUS_FLAGS_NO_AERSID	= (__force pci_bus_flags_t) 4,
263 	PCI_BUS_FLAGS_NO_EXTCFG	= (__force pci_bus_flags_t) 8,
264 };
265 
266 /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */
267 enum pcie_link_width {
268 	PCIE_LNK_WIDTH_RESRV	= 0x00,
269 	PCIE_LNK_X1		= 0x01,
270 	PCIE_LNK_X2		= 0x02,
271 	PCIE_LNK_X4		= 0x04,
272 	PCIE_LNK_X8		= 0x08,
273 	PCIE_LNK_X12		= 0x0c,
274 	PCIE_LNK_X16		= 0x10,
275 	PCIE_LNK_X32		= 0x20,
276 	PCIE_LNK_WIDTH_UNKNOWN	= 0xff,
277 };
278 
279 /* See matching string table in pci_speed_string() */
280 enum pci_bus_speed {
281 	PCI_SPEED_33MHz			= 0x00,
282 	PCI_SPEED_66MHz			= 0x01,
283 	PCI_SPEED_66MHz_PCIX		= 0x02,
284 	PCI_SPEED_100MHz_PCIX		= 0x03,
285 	PCI_SPEED_133MHz_PCIX		= 0x04,
286 	PCI_SPEED_66MHz_PCIX_ECC	= 0x05,
287 	PCI_SPEED_100MHz_PCIX_ECC	= 0x06,
288 	PCI_SPEED_133MHz_PCIX_ECC	= 0x07,
289 	PCI_SPEED_66MHz_PCIX_266	= 0x09,
290 	PCI_SPEED_100MHz_PCIX_266	= 0x0a,
291 	PCI_SPEED_133MHz_PCIX_266	= 0x0b,
292 	AGP_UNKNOWN			= 0x0c,
293 	AGP_1X				= 0x0d,
294 	AGP_2X				= 0x0e,
295 	AGP_4X				= 0x0f,
296 	AGP_8X				= 0x10,
297 	PCI_SPEED_66MHz_PCIX_533	= 0x11,
298 	PCI_SPEED_100MHz_PCIX_533	= 0x12,
299 	PCI_SPEED_133MHz_PCIX_533	= 0x13,
300 	PCIE_SPEED_2_5GT		= 0x14,
301 	PCIE_SPEED_5_0GT		= 0x15,
302 	PCIE_SPEED_8_0GT		= 0x16,
303 	PCIE_SPEED_16_0GT		= 0x17,
304 	PCIE_SPEED_32_0GT		= 0x18,
305 	PCIE_SPEED_64_0GT		= 0x19,
306 	PCI_SPEED_UNKNOWN		= 0xff,
307 };
308 
309 enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
310 enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
311 
312 struct pci_vpd {
313 	struct mutex	lock;
314 	unsigned int	len;
315 	u8		cap;
316 };
317 
318 struct irq_affinity;
319 struct pcie_bwctrl_data;
320 struct pcie_link_state;
321 struct pci_sriov;
322 struct pci_p2pdma;
323 struct rcec_ea;
324 
325 /* struct pci_dev - describes a PCI device
326  *
327  * @supported_speeds:	PCIe Supported Link Speeds Vector (+ reserved 0 at
328  *			LSB). 0 when the supported speeds cannot be
329  *			determined (e.g., for Root Complex Integrated
330  *			Endpoints without the relevant Capability
331  *			Registers).
332  * @is_hotplug_bridge:	Hotplug bridge of any kind (e.g. PCIe Hot-Plug Capable,
333  *			Conventional PCI Hot-Plug, ACPI slot).
334  *			Such bridges are allocated additional MMIO and bus
335  *			number resources to allow for hierarchy expansion.
336  * @is_pciehp:		PCIe Hot-Plug Capable bridge.
337  */
338 struct pci_dev {
339 	struct list_head bus_list;	/* Node in per-bus list */
340 	struct pci_bus	*bus;		/* Bus this device is on */
341 	struct pci_bus	*subordinate;	/* Bus this device bridges to */
342 
343 	void		*sysdata;	/* Hook for sys-specific extension */
344 	struct proc_dir_entry *procent;	/* Device entry in /proc/bus/pci */
345 	struct pci_slot	*slot;		/* Physical slot this device is in */
346 
347 	unsigned int	devfn;		/* Encoded device & function index */
348 	unsigned short	vendor;
349 	unsigned short	device;
350 	unsigned short	subsystem_vendor;
351 	unsigned short	subsystem_device;
352 	unsigned int	class;		/* 3 bytes: (base,sub,prog-if) */
353 	u8		revision;	/* PCI revision, low byte of class word */
354 	u8		hdr_type;	/* PCI header type (`multi' flag masked out) */
355 #ifdef CONFIG_PCIEAER
356 	u16		aer_cap;	/* AER capability offset */
357 	struct aer_info	*aer_info;	/* AER info for this device */
358 #endif
359 #ifdef CONFIG_PCIEPORTBUS
360 	struct rcec_ea	*rcec_ea;	/* RCEC cached endpoint association */
361 	struct pci_dev  *rcec;          /* Associated RCEC device */
362 #endif
363 	u32		devcap;		/* PCIe Device Capabilities */
364 	u16		rebar_cap;	/* Resizable BAR capability offset */
365 	u8		pcie_cap;	/* PCIe capability offset */
366 	u8		msi_cap;	/* MSI capability offset */
367 	u8		msix_cap;	/* MSI-X capability offset */
368 	u8		pcie_mpss:3;	/* PCIe Max Payload Size Supported */
369 	u8		rom_base_reg;	/* Config register controlling ROM */
370 	u8		pin;		/* Interrupt pin this device uses */
371 	u16		pcie_flags_reg;	/* Cached PCIe Capabilities Register */
372 	unsigned long	*dma_alias_mask;/* Mask of enabled devfn aliases */
373 
374 	struct pci_driver *driver;	/* Driver bound to this device */
375 	u64		dma_mask;	/* Mask of the bits of bus address this
376 					   device implements.  Normally this is
377 					   0xffffffff.  You only need to change
378 					   this if your device has broken DMA
379 					   or supports 64-bit transfers.  */
380 
381 	struct device_dma_parameters dma_parms;
382 
383 	pci_power_t	current_state;	/* Current operating state. In ACPI,
384 					   this is D0-D3, D0 being fully
385 					   functional, and D3 being off. */
386 	u8		pm_cap;		/* PM capability offset */
387 	unsigned int	pme_support:5;	/* Bitmask of states from which PME#
388 					   can be generated */
389 	unsigned int	pme_poll:1;	/* Poll device's PME status bit */
390 	unsigned int	pinned:1;	/* Whether this dev is pinned */
391 	unsigned int	config_rrs_sv:1; /* Config RRS software visibility */
392 	unsigned int	imm_ready:1;	/* Supports Immediate Readiness */
393 	unsigned int	d1_support:1;	/* Low power state D1 is supported */
394 	unsigned int	d2_support:1;	/* Low power state D2 is supported */
395 	unsigned int	no_d1d2:1;	/* D1 and D2 are forbidden */
396 	unsigned int	no_d3cold:1;	/* D3cold is forbidden */
397 	unsigned int	bridge_d3:1;	/* Allow D3 for bridge */
398 	unsigned int	d3cold_allowed:1;	/* D3cold is allowed by user */
399 	unsigned int	mmio_always_on:1;	/* Disallow turning off io/mem
400 						   decoding during BAR sizing */
401 	unsigned int	wakeup_prepared:1;
402 	unsigned int	skip_bus_pm:1;	/* Internal: Skip bus-level PM */
403 	unsigned int	ignore_hotplug:1;	/* Ignore hotplug events */
404 	unsigned int	hotplug_user_indicators:1; /* SlotCtl indicators
405 						      controlled exclusively by
406 						      user sysfs */
407 	unsigned int	clear_retrain_link:1;	/* Need to clear Retrain Link
408 						   bit manually */
409 	unsigned int	d3hot_delay;	/* D3hot->D0 transition time in ms */
410 	unsigned int	d3cold_delay;	/* D3cold->D0 transition time in ms */
411 
412 	u16		l1ss;		/* L1SS Capability pointer */
413 #ifdef CONFIG_PCIEASPM
414 	struct pcie_link_state	*link_state;	/* ASPM link state */
415 	unsigned int	aspm_l0s_support:1;	/* ASPM L0s support */
416 	unsigned int	aspm_l1_support:1;	/* ASPM L1 support */
417 	unsigned int	ltr_path:1;	/* Latency Tolerance Reporting
418 					   supported from root to here */
419 #endif
420 	unsigned int	pasid_no_tlp:1;		/* PASID works without TLP Prefix */
421 	unsigned int	eetlp_prefix_max:3;	/* Max # of End-End TLP Prefixes, 0=not supported */
422 
423 	pci_channel_state_t error_state;	/* Current connectivity state */
424 	struct device	dev;			/* Generic device interface */
425 
426 	int		cfg_size;		/* Size of config space */
427 
428 	/*
429 	 * Instead of touching interrupt line and base address registers
430 	 * directly, use the values stored here. They might be different!
431 	 */
432 	unsigned int	irq;
433 	struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
434 	struct resource driver_exclusive_resource;	 /* driver exclusive resource ranges */
435 
436 	unsigned int	transparent:1;		/* Subtractive decode bridge */
437 	unsigned int	io_window:1;		/* Bridge has I/O window */
438 	unsigned int	pref_window:1;		/* Bridge has pref mem window */
439 	unsigned int	pref_64_window:1;	/* Pref mem window is 64-bit */
440 	unsigned int	multifunction:1;	/* Multi-function device */
441 
442 	unsigned int	is_busmaster:1;		/* Is busmaster */
443 	unsigned int	no_msi:1;		/* May not use MSI */
444 	unsigned int	no_64bit_msi:1;		/* May only use 32-bit MSIs */
445 	unsigned int	block_cfg_access:1;	/* Config space access blocked */
446 	unsigned int	broken_parity_status:1;	/* Generates false positive parity */
447 	unsigned int	irq_reroute_variant:2;	/* Needs IRQ rerouting variant */
448 	unsigned int	msi_enabled:1;
449 	unsigned int	msix_enabled:1;
450 	unsigned int	ari_enabled:1;		/* ARI forwarding */
451 	unsigned int	ats_enabled:1;		/* Address Translation Svc */
452 	unsigned int	pasid_enabled:1;	/* Process Address Space ID */
453 	unsigned int	pri_enabled:1;		/* Page Request Interface */
454 	unsigned int	tph_enabled:1;		/* TLP Processing Hints */
455 	unsigned int	is_managed:1;		/* Managed via devres */
456 	unsigned int	is_msi_managed:1;	/* MSI release via devres installed */
457 	unsigned int	needs_freset:1;		/* Requires fundamental reset */
458 	unsigned int	state_saved:1;
459 	unsigned int	is_physfn:1;
460 	unsigned int	is_virtfn:1;
461 	unsigned int	is_hotplug_bridge:1;
462 	unsigned int	is_pciehp:1;
463 	unsigned int	shpc_managed:1;		/* SHPC owned by shpchp */
464 	unsigned int	is_thunderbolt:1;	/* Thunderbolt controller */
465 	/*
466 	 * Devices marked being untrusted are the ones that can potentially
467 	 * execute DMA attacks and similar. They are typically connected
468 	 * through external ports such as Thunderbolt but not limited to
469 	 * that. When an IOMMU is enabled they should be getting full
470 	 * mappings to make sure they cannot access arbitrary memory.
471 	 */
472 	unsigned int	untrusted:1;
473 	/*
474 	 * Info from the platform, e.g., ACPI or device tree, may mark a
475 	 * device as "external-facing".  An external-facing device is
476 	 * itself internal but devices downstream from it are external.
477 	 */
478 	unsigned int	external_facing:1;
479 	unsigned int	broken_intx_masking:1;	/* INTx masking can't be used */
480 	unsigned int	io_window_1k:1;		/* Intel bridge 1K I/O windows */
481 	unsigned int	irq_managed:1;
482 	unsigned int	non_compliant_bars:1;	/* Broken BARs; ignore them */
483 	unsigned int	is_probed:1;		/* Device probing in progress */
484 	unsigned int	link_active_reporting:1;/* Device capable of reporting link active */
485 	unsigned int	no_vf_scan:1;		/* Don't scan for VFs after IOV enablement */
486 	unsigned int	no_command_memory:1;	/* No PCI_COMMAND_MEMORY */
487 	unsigned int	rom_bar_overlap:1;	/* ROM BAR disable broken */
488 	unsigned int	rom_attr_enabled:1;	/* Display of ROM attribute enabled? */
489 	unsigned int	non_mappable_bars:1;	/* BARs can't be mapped to user-space  */
490 	pci_dev_flags_t dev_flags;
491 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
492 
493 	spinlock_t	pcie_cap_lock;		/* Protects RMW ops in capability accessors */
494 	u32		saved_config_space[16]; /* Config space saved at suspend time */
495 	struct hlist_head saved_cap_space;
496 	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
497 	struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
498 
499 #ifdef CONFIG_HOTPLUG_PCI_PCIE
500 	unsigned int	broken_cmd_compl:1;	/* No compl for some cmds */
501 #endif
502 #ifdef CONFIG_PCIE_PTM
503 	u16		ptm_cap;		/* PTM Capability */
504 	unsigned int	ptm_root:1;
505 	unsigned int	ptm_enabled:1;
506 	u8		ptm_granularity;
507 #endif
508 #ifdef CONFIG_PCI_MSI
509 	void __iomem	*msix_base;
510 	raw_spinlock_t	msi_lock;
511 #endif
512 	struct pci_vpd	vpd;
513 #ifdef CONFIG_PCIE_DPC
514 	u16		dpc_cap;
515 	unsigned int	dpc_rp_extensions:1;
516 	u8		dpc_rp_log_size;
517 #endif
518 	struct pcie_bwctrl_data		*link_bwctrl;
519 #ifdef CONFIG_PCI_ATS
520 	union {
521 		struct pci_sriov	*sriov;		/* PF: SR-IOV info */
522 		struct pci_dev		*physfn;	/* VF: related PF */
523 	};
524 	u16		ats_cap;	/* ATS Capability offset */
525 	u8		ats_stu;	/* ATS Smallest Translation Unit */
526 #endif
527 #ifdef CONFIG_PCI_PRI
528 	u16		pri_cap;	/* PRI Capability offset */
529 	u32		pri_reqs_alloc; /* Number of PRI requests allocated */
530 	unsigned int	pasid_required:1; /* PRG Response PASID Required */
531 #endif
532 #ifdef CONFIG_PCI_PASID
533 	u16		pasid_cap;	/* PASID Capability offset */
534 	u16		pasid_features;
535 #endif
536 #ifdef CONFIG_PCI_P2PDMA
537 	struct pci_p2pdma __rcu *p2pdma;
538 #endif
539 #ifdef CONFIG_PCI_DOE
540 	struct xarray	doe_mbs;	/* Data Object Exchange mailboxes */
541 #endif
542 #ifdef CONFIG_PCI_NPEM
543 	struct npem	*npem;		/* Native PCIe Enclosure Management */
544 #endif
545 	u16		acs_cap;	/* ACS Capability offset */
546 	u8		supported_speeds; /* Supported Link Speeds Vector */
547 	phys_addr_t	rom;		/* Physical address if not from BAR */
548 	size_t		romlen;		/* Length if not from BAR */
549 	/*
550 	 * Driver name to force a match.  Do not set directly, because core
551 	 * frees it.  Use driver_set_override() to set or clear it.
552 	 */
553 	const char	*driver_override;
554 
555 	unsigned long	priv_flags;	/* Private flags for the PCI driver */
556 
557 	/* These methods index pci_reset_fn_methods[] */
558 	u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */
559 
560 #ifdef CONFIG_PCIE_TPH
561 	u16		tph_cap;	/* TPH capability offset */
562 	u8		tph_mode;	/* TPH mode */
563 	u8		tph_req_type;	/* TPH requester type */
564 #endif
565 };
566 
pci_physfn(struct pci_dev * dev)567 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
568 {
569 #ifdef CONFIG_PCI_IOV
570 	if (dev->is_virtfn)
571 		dev = dev->physfn;
572 #endif
573 	return dev;
574 }
575 
576 struct pci_dev *pci_alloc_dev(struct pci_bus *bus);
577 
578 #define	to_pci_dev(n) container_of(n, struct pci_dev, dev)
579 #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
580 
pci_channel_offline(struct pci_dev * pdev)581 static inline int pci_channel_offline(struct pci_dev *pdev)
582 {
583 	return (pdev->error_state != pci_channel_io_normal);
584 }
585 
586 /*
587  * Currently in ACPI spec, for each PCI host bridge, PCI Segment
588  * Group number is limited to a 16-bit value, therefore (int)-1 is
589  * not a valid PCI domain number, and can be used as a sentinel
590  * value indicating ->domain_nr is not set by the driver (and
591  * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with
592  * pci_bus_find_domain_nr()).
593  */
594 #define PCI_DOMAIN_NR_NOT_SET (-1)
595 
596 struct pci_host_bridge {
597 	struct device	dev;
598 	struct pci_bus	*bus;		/* Root bus */
599 	struct pci_ops	*ops;
600 	struct pci_ops	*child_ops;
601 	void		*sysdata;
602 	int		busnr;
603 	int		domain_nr;
604 	struct list_head windows;	/* resource_entry */
605 	struct list_head dma_ranges;	/* dma ranges resource list */
606 	u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
607 	int (*map_irq)(const struct pci_dev *, u8, u8);
608 	void (*release_fn)(struct pci_host_bridge *);
609 	int (*enable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
610 	void (*disable_device)(struct pci_host_bridge *bridge, struct pci_dev *dev);
611 	void		*release_data;
612 	unsigned int	ignore_reset_delay:1;	/* For entire hierarchy */
613 	unsigned int	no_ext_tags:1;		/* No Extended Tags */
614 	unsigned int	no_inc_mrrs:1;		/* No Increase MRRS */
615 	unsigned int	native_aer:1;		/* OS may use PCIe AER */
616 	unsigned int	native_pcie_hotplug:1;	/* OS may use PCIe hotplug */
617 	unsigned int	native_shpc_hotplug:1;	/* OS may use SHPC hotplug */
618 	unsigned int	native_pme:1;		/* OS may use PCIe PME */
619 	unsigned int	native_ltr:1;		/* OS may use PCIe LTR */
620 	unsigned int	native_dpc:1;		/* OS may use PCIe DPC */
621 	unsigned int	native_cxl_error:1;	/* OS may use CXL RAS/Events */
622 	unsigned int	preserve_config:1;	/* Preserve FW resource setup */
623 	unsigned int	size_windows:1;		/* Enable root bus sizing */
624 	unsigned int	msi_domain:1;		/* Bridge wants MSI domain */
625 
626 	/* Resource alignment requirements */
627 	resource_size_t (*align_resource)(struct pci_dev *dev,
628 			const struct resource *res,
629 			resource_size_t start,
630 			resource_size_t size,
631 			resource_size_t align);
632 	unsigned long	private[] ____cacheline_aligned;
633 };
634 
635 #define	to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev)
636 
pci_host_bridge_priv(struct pci_host_bridge * bridge)637 static inline void *pci_host_bridge_priv(struct pci_host_bridge *bridge)
638 {
639 	return (void *)bridge->private;
640 }
641 
pci_host_bridge_from_priv(void * priv)642 static inline struct pci_host_bridge *pci_host_bridge_from_priv(void *priv)
643 {
644 	return container_of(priv, struct pci_host_bridge, private);
645 }
646 
647 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv);
648 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
649 						   size_t priv);
650 void pci_free_host_bridge(struct pci_host_bridge *bridge);
651 struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus);
652 
653 void pci_set_host_bridge_release(struct pci_host_bridge *bridge,
654 				 void (*release_fn)(struct pci_host_bridge *),
655 				 void *release_data);
656 
657 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge);
658 
659 #define PCI_REGION_FLAG_MASK	0x0fU	/* These bits of resource flags tell us the PCI region flags */
660 
661 struct pci_bus {
662 	struct list_head node;		/* Node in list of buses */
663 	struct pci_bus	*parent;	/* Parent bus this bridge is on */
664 	struct list_head children;	/* List of child buses */
665 	struct list_head devices;	/* List of devices on this bus */
666 	struct pci_dev	*self;		/* Bridge device as seen by parent */
667 	struct list_head slots;		/* List of slots on this bus;
668 					   protected by pci_slot_mutex */
669 	struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
670 	struct list_head resources;	/* Address space routed to this bus */
671 	struct resource busn_res;	/* Bus numbers routed to this bus */
672 
673 	struct pci_ops	*ops;		/* Configuration access functions */
674 	void		*sysdata;	/* Hook for sys-specific extension */
675 	struct proc_dir_entry *procdir;	/* Directory entry in /proc/bus/pci */
676 
677 	unsigned char	number;		/* Bus number */
678 	unsigned char	primary;	/* Number of primary bridge */
679 	unsigned char	max_bus_speed;	/* enum pci_bus_speed */
680 	unsigned char	cur_bus_speed;	/* enum pci_bus_speed */
681 #ifdef CONFIG_PCI_DOMAINS_GENERIC
682 	int		domain_nr;
683 #endif
684 
685 	char		name[48];
686 
687 	unsigned short	bridge_ctl;	/* Manage NO_ISA/FBB/et al behaviors */
688 	pci_bus_flags_t bus_flags;	/* Inherited by child buses */
689 	struct device		*bridge;
690 	struct device		dev;
691 	struct bin_attribute	*legacy_io;	/* Legacy I/O for this bus */
692 	struct bin_attribute	*legacy_mem;	/* Legacy mem */
693 	unsigned int		is_added:1;
694 	unsigned int		unsafe_warn:1;	/* warned about RW1C config write */
695 	unsigned int		flit_mode:1;	/* Link in Flit mode */
696 };
697 
698 #define to_pci_bus(n)	container_of(n, struct pci_bus, dev)
699 
pci_dev_id(struct pci_dev * dev)700 static inline u16 pci_dev_id(struct pci_dev *dev)
701 {
702 	return PCI_DEVID(dev->bus->number, dev->devfn);
703 }
704 
705 /*
706  * Returns true if the PCI bus is root (behind host-PCI bridge),
707  * false otherwise
708  *
709  * Some code assumes that "bus->self == NULL" means that bus is a root bus.
710  * This is incorrect because "virtual" buses added for SR-IOV (via
711  * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
712  */
pci_is_root_bus(struct pci_bus * pbus)713 static inline bool pci_is_root_bus(struct pci_bus *pbus)
714 {
715 	return !(pbus->parent);
716 }
717 
718 /**
719  * pci_is_bridge - check if the PCI device is a bridge
720  * @dev: PCI device
721  *
722  * Return true if the PCI device is bridge whether it has subordinate
723  * or not.
724  */
pci_is_bridge(struct pci_dev * dev)725 static inline bool pci_is_bridge(struct pci_dev *dev)
726 {
727 	return dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
728 		dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
729 }
730 
731 /**
732  * pci_is_vga - check if the PCI device is a VGA device
733  * @pdev: PCI device
734  *
735  * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define
736  * VGA Base Class and Sub-Classes:
737  *
738  *   03 00  PCI_CLASS_DISPLAY_VGA      VGA-compatible or 8514-compatible
739  *   00 01  PCI_CLASS_NOT_DEFINED_VGA  VGA-compatible (before Class Code)
740  *
741  * Return true if the PCI device is a VGA device and uses the legacy VGA
742  * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and
743  * aliases).
744  */
pci_is_vga(struct pci_dev * pdev)745 static inline bool pci_is_vga(struct pci_dev *pdev)
746 {
747 	if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
748 		return true;
749 
750 	if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA)
751 		return true;
752 
753 	return false;
754 }
755 
756 /**
757  * pci_is_display - check if the PCI device is a display controller
758  * @pdev: PCI device
759  *
760  * Determine whether the given PCI device corresponds to a display
761  * controller. Display controllers are typically used for graphical output
762  * and are identified based on their class code.
763  *
764  * Return: true if the PCI device is a display controller, false otherwise.
765  */
pci_is_display(struct pci_dev * pdev)766 static inline bool pci_is_display(struct pci_dev *pdev)
767 {
768 	return (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY;
769 }
770 
771 #define for_each_pci_bridge(dev, bus)				\
772 	list_for_each_entry(dev, &bus->devices, bus_list)	\
773 		if (!pci_is_bridge(dev)) {} else
774 
pci_upstream_bridge(struct pci_dev * dev)775 static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev)
776 {
777 	dev = pci_physfn(dev);
778 	if (pci_is_root_bus(dev->bus))
779 		return NULL;
780 
781 	return dev->bus->self;
782 }
783 
784 #ifdef CONFIG_PCI_MSI
pci_dev_msi_enabled(struct pci_dev * pci_dev)785 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
786 {
787 	return pci_dev->msi_enabled || pci_dev->msix_enabled;
788 }
789 #else
pci_dev_msi_enabled(struct pci_dev * pci_dev)790 static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; }
791 #endif
792 
793 /* Error values that may be returned by PCI functions */
794 #define PCIBIOS_SUCCESSFUL		0x00
795 #define PCIBIOS_FUNC_NOT_SUPPORTED	0x81
796 #define PCIBIOS_BAD_VENDOR_ID		0x83
797 #define PCIBIOS_DEVICE_NOT_FOUND	0x86
798 #define PCIBIOS_BAD_REGISTER_NUMBER	0x87
799 #define PCIBIOS_SET_FAILED		0x88
800 #define PCIBIOS_BUFFER_TOO_SMALL	0x89
801 
802 /* Translate above to generic errno for passing back through non-PCI code */
pcibios_err_to_errno(int err)803 static inline int pcibios_err_to_errno(int err)
804 {
805 	if (err <= PCIBIOS_SUCCESSFUL)
806 		return err; /* Assume already errno */
807 
808 	switch (err) {
809 	case PCIBIOS_FUNC_NOT_SUPPORTED:
810 		return -ENOENT;
811 	case PCIBIOS_BAD_VENDOR_ID:
812 		return -ENOTTY;
813 	case PCIBIOS_DEVICE_NOT_FOUND:
814 		return -ENODEV;
815 	case PCIBIOS_BAD_REGISTER_NUMBER:
816 		return -EFAULT;
817 	case PCIBIOS_SET_FAILED:
818 		return -EIO;
819 	case PCIBIOS_BUFFER_TOO_SMALL:
820 		return -ENOSPC;
821 	}
822 
823 	return -ERANGE;
824 }
825 
826 /* Low-level architecture-dependent routines */
827 
828 struct pci_ops {
829 	int (*add_bus)(struct pci_bus *bus);
830 	void (*remove_bus)(struct pci_bus *bus);
831 	void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where);
832 	int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
833 	int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
834 };
835 
836 /*
837  * ACPI needs to be able to access PCI config space before we've done a
838  * PCI bus scan and created pci_bus structures.
839  */
840 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
841 		 int reg, int len, u32 *val);
842 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
843 		  int reg, int len, u32 val);
844 
845 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
846 typedef u64 pci_bus_addr_t;
847 #else
848 typedef u32 pci_bus_addr_t;
849 #endif
850 
851 struct pci_bus_region {
852 	pci_bus_addr_t	start;
853 	pci_bus_addr_t	end;
854 };
855 
856 struct pci_dynids {
857 	spinlock_t		lock;	/* Protects list, index */
858 	struct list_head	list;	/* For IDs added at runtime */
859 };
860 
861 
862 /*
863  * PCI Error Recovery System (PCI-ERS).  If a PCI device driver provides
864  * a set of callbacks in struct pci_error_handlers, that device driver
865  * will be notified of PCI bus errors, and will be driven to recovery
866  * when an error occurs.
867  */
868 
869 typedef unsigned int __bitwise pci_ers_result_t;
870 
871 enum pci_ers_result {
872 	/* No result/none/not supported in device driver */
873 	PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1,
874 
875 	/* Device driver can recover without slot reset */
876 	PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2,
877 
878 	/* Device driver wants slot to be reset */
879 	PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3,
880 
881 	/* Device has completely failed, is unrecoverable */
882 	PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4,
883 
884 	/* Device driver is fully recovered and operational */
885 	PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5,
886 
887 	/* No AER capabilities registered for the driver */
888 	PCI_ERS_RESULT_NO_AER_DRIVER = (__force pci_ers_result_t) 6,
889 };
890 
891 /* PCI bus error event callbacks */
892 struct pci_error_handlers {
893 	/* PCI bus error detected on this device */
894 	pci_ers_result_t (*error_detected)(struct pci_dev *dev,
895 					   pci_channel_state_t error);
896 
897 	/* MMIO has been re-enabled, but not DMA */
898 	pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
899 
900 	/* PCI slot has been reset */
901 	pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
902 
903 	/* PCI function reset prepare or completed */
904 	void (*reset_prepare)(struct pci_dev *dev);
905 	void (*reset_done)(struct pci_dev *dev);
906 
907 	/* Device driver may resume normal operations */
908 	void (*resume)(struct pci_dev *dev);
909 
910 	/* Allow device driver to record more details of a correctable error */
911 	void (*cor_error_detected)(struct pci_dev *dev);
912 };
913 
914 
915 struct module;
916 
917 /**
918  * struct pci_driver - PCI driver structure
919  * @name:	Driver name.
920  * @id_table:	Pointer to table of device IDs the driver is
921  *		interested in.  Most drivers should export this
922  *		table using MODULE_DEVICE_TABLE(pci,...).
923  * @probe:	This probing function gets called (during execution
924  *		of pci_register_driver() for already existing
925  *		devices or later if a new device gets inserted) for
926  *		all PCI devices which match the ID table and are not
927  *		"owned" by the other drivers yet. This function gets
928  *		passed a "struct pci_dev \*" for each device whose
929  *		entry in the ID table matches the device. The probe
930  *		function returns zero when the driver chooses to
931  *		take "ownership" of the device or an error code
932  *		(negative number) otherwise.
933  *		The probe function always gets called from process
934  *		context, so it can sleep.
935  * @remove:	The remove() function gets called whenever a device
936  *		being handled by this driver is removed (either during
937  *		deregistration of the driver or when it's manually
938  *		pulled out of a hot-pluggable slot).
939  *		The remove function always gets called from process
940  *		context, so it can sleep.
941  * @suspend:	Put device into low power state.
942  * @resume:	Wake device from low power state.
943  *		(Please see Documentation/power/pci.rst for descriptions
944  *		of PCI Power Management and the related functions.)
945  * @shutdown:	Hook into reboot_notifier_list (kernel/sys.c).
946  *		Intended to stop any idling DMA operations.
947  *		Useful for enabling wake-on-lan (NIC) or changing
948  *		the power state of a device before reboot.
949  *		e.g. drivers/net/e100.c.
950  * @sriov_configure: Optional driver callback to allow configuration of
951  *		number of VFs to enable via sysfs "sriov_numvfs" file.
952  * @sriov_set_msix_vec_count: PF Driver callback to change number of MSI-X
953  *              vectors on a VF. Triggered via sysfs "sriov_vf_msix_count".
954  *              This will change MSI-X Table Size in the VF Message Control
955  *              registers.
956  * @sriov_get_vf_total_msix: PF driver callback to get the total number of
957  *              MSI-X vectors available for distribution to the VFs.
958  * @err_handler: See Documentation/PCI/pci-error-recovery.rst
959  * @groups:	Sysfs attribute groups.
960  * @dev_groups: Attributes attached to the device that will be
961  *              created once it is bound to the driver.
962  * @driver:	Driver model structure.
963  * @dynids:	List of dynamically added device IDs.
964  * @driver_managed_dma: Device driver doesn't use kernel DMA API for DMA.
965  *		For most device drivers, no need to care about this flag
966  *		as long as all DMAs are handled through the kernel DMA API.
967  *		For some special ones, for example VFIO drivers, they know
968  *		how to manage the DMA themselves and set this flag so that
969  *		the IOMMU layer will allow them to setup and manage their
970  *		own I/O address space.
971  */
972 struct pci_driver {
973 	const char		*name;
974 	const struct pci_device_id *id_table;	/* Must be non-NULL for probe to be called */
975 	int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id);	/* New device inserted */
976 	void (*remove)(struct pci_dev *dev);	/* Device removed (NULL if not a hot-plug capable driver) */
977 	int  (*suspend)(struct pci_dev *dev, pm_message_t state);	/* Device suspended */
978 	int  (*resume)(struct pci_dev *dev);	/* Device woken up */
979 	void (*shutdown)(struct pci_dev *dev);
980 	int  (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
981 	int  (*sriov_set_msix_vec_count)(struct pci_dev *vf, int msix_vec_count); /* On PF */
982 	u32  (*sriov_get_vf_total_msix)(struct pci_dev *pf);
983 	const struct pci_error_handlers *err_handler;
984 	const struct attribute_group **groups;
985 	const struct attribute_group **dev_groups;
986 	struct device_driver	driver;
987 	struct pci_dynids	dynids;
988 	bool driver_managed_dma;
989 };
990 
991 #define to_pci_driver(__drv)	\
992 	( __drv ? container_of_const(__drv, struct pci_driver, driver) : NULL )
993 
994 /**
995  * PCI_DEVICE - macro used to describe a specific PCI device
996  * @vend: the 16 bit PCI Vendor ID
997  * @dev: the 16 bit PCI Device ID
998  *
999  * This macro is used to create a struct pci_device_id that matches a
1000  * specific device.  The subvendor and subdevice fields will be set to
1001  * PCI_ANY_ID.
1002  */
1003 #define PCI_DEVICE(vend,dev) \
1004 	.vendor = (vend), .device = (dev), \
1005 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1006 
1007 /**
1008  * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
1009  *                              override_only flags.
1010  * @vend: the 16 bit PCI Vendor ID
1011  * @dev: the 16 bit PCI Device ID
1012  * @driver_override: the 32 bit PCI Device override_only
1013  *
1014  * This macro is used to create a struct pci_device_id that matches only a
1015  * driver_override device. The subvendor and subdevice fields will be set to
1016  * PCI_ANY_ID.
1017  */
1018 #define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
1019 	.vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
1020 	.subdevice = PCI_ANY_ID, .override_only = (driver_override)
1021 
1022 /**
1023  * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
1024  *                                   "driver_override" PCI device.
1025  * @vend: the 16 bit PCI Vendor ID
1026  * @dev: the 16 bit PCI Device ID
1027  *
1028  * This macro is used to create a struct pci_device_id that matches a
1029  * specific device. The subvendor and subdevice fields will be set to
1030  * PCI_ANY_ID and the driver_override will be set to
1031  * PCI_ID_F_VFIO_DRIVER_OVERRIDE.
1032  */
1033 #define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
1034 	PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
1035 
1036 /**
1037  * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
1038  * @vend: the 16 bit PCI Vendor ID
1039  * @dev: the 16 bit PCI Device ID
1040  * @subvend: the 16 bit PCI Subvendor ID
1041  * @subdev: the 16 bit PCI Subdevice ID
1042  *
1043  * This macro is used to create a struct pci_device_id that matches a
1044  * specific device with subsystem information.
1045  */
1046 #define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \
1047 	.vendor = (vend), .device = (dev), \
1048 	.subvendor = (subvend), .subdevice = (subdev)
1049 
1050 /**
1051  * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class
1052  * @dev_class: the class, subclass, prog-if triple for this device
1053  * @dev_class_mask: the class mask for this device
1054  *
1055  * This macro is used to create a struct pci_device_id that matches a
1056  * specific PCI class.  The vendor, device, subvendor, and subdevice
1057  * fields will be set to PCI_ANY_ID.
1058  */
1059 #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \
1060 	.class = (dev_class), .class_mask = (dev_class_mask), \
1061 	.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
1062 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
1063 
1064 /**
1065  * PCI_VDEVICE - macro used to describe a specific PCI device in short form
1066  * @vend: the vendor name
1067  * @dev: the 16 bit PCI Device ID
1068  *
1069  * This macro is used to create a struct pci_device_id that matches a
1070  * specific PCI device.  The subvendor, and subdevice fields will be set
1071  * to PCI_ANY_ID. The macro allows the next field to follow as the device
1072  * private data.
1073  */
1074 #define PCI_VDEVICE(vend, dev) \
1075 	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1076 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
1077 
1078 /**
1079  * PCI_VDEVICE_SUB - describe a specific PCI device/subdevice in a short form
1080  * @vend: the vendor name
1081  * @dev: the 16 bit PCI Device ID
1082  * @subvend: the 16 bit PCI Subvendor ID
1083  * @subdev: the 16 bit PCI Subdevice ID
1084  *
1085  * Generate the pci_device_id struct layout for the specific PCI
1086  * device/subdevice. Private data may follow the output.
1087  */
1088 #define PCI_VDEVICE_SUB(vend, dev, subvend, subdev) \
1089 	.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
1090 	.subvendor = (subvend), .subdevice = (subdev), 0, 0
1091 
1092 /**
1093  * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
1094  * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
1095  * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
1096  * @data: the driver data to be filled
1097  *
1098  * This macro is used to create a struct pci_device_id that matches a
1099  * specific PCI device.  The subvendor, and subdevice fields will be set
1100  * to PCI_ANY_ID.
1101  */
1102 #define PCI_DEVICE_DATA(vend, dev, data) \
1103 	.vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
1104 	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
1105 	.driver_data = (kernel_ulong_t)(data)
1106 
1107 enum {
1108 	PCI_REASSIGN_ALL_RSRC	= 0x00000001,	/* Ignore firmware setup */
1109 	PCI_REASSIGN_ALL_BUS	= 0x00000002,	/* Reassign all bus numbers */
1110 	PCI_PROBE_ONLY		= 0x00000004,	/* Use existing setup */
1111 	PCI_CAN_SKIP_ISA_ALIGN	= 0x00000008,	/* Don't do ISA alignment */
1112 	PCI_ENABLE_PROC_DOMAINS	= 0x00000010,	/* Enable domains in /proc */
1113 	PCI_COMPAT_DOMAIN_0	= 0x00000020,	/* ... except domain 0 */
1114 	PCI_SCAN_ALL_PCIE_DEVS	= 0x00000040,	/* Scan all, not just dev 0 */
1115 };
1116 
1117 #define PCI_IRQ_INTX		(1 << 0) /* Allow INTx interrupts */
1118 #define PCI_IRQ_MSI		(1 << 1) /* Allow MSI interrupts */
1119 #define PCI_IRQ_MSIX		(1 << 2) /* Allow MSI-X interrupts */
1120 #define PCI_IRQ_AFFINITY	(1 << 3) /* Auto-assign affinity */
1121 
1122 /* These external functions are only available when PCI support is enabled */
1123 #ifdef CONFIG_PCI
1124 
1125 extern unsigned int pci_flags;
1126 
pci_set_flags(int flags)1127 static inline void pci_set_flags(int flags) { pci_flags = flags; }
pci_add_flags(int flags)1128 static inline void pci_add_flags(int flags) { pci_flags |= flags; }
pci_clear_flags(int flags)1129 static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
pci_has_flag(int flag)1130 static inline int pci_has_flag(int flag) { return pci_flags & flag; }
1131 
1132 void pcie_bus_configure_settings(struct pci_bus *bus);
1133 
1134 enum pcie_bus_config_types {
1135 	PCIE_BUS_TUNE_OFF,	/* Don't touch MPS at all */
1136 	PCIE_BUS_DEFAULT,	/* Ensure MPS matches upstream bridge */
1137 	PCIE_BUS_SAFE,		/* Use largest MPS boot-time devices support */
1138 	PCIE_BUS_PERFORMANCE,	/* Use MPS and MRRS for best performance */
1139 	PCIE_BUS_PEER2PEER,	/* Set MPS = 128 for all devices */
1140 };
1141 
1142 extern enum pcie_bus_config_types pcie_bus_config;
1143 
1144 extern const struct bus_type pci_bus_type;
1145 
1146 /* Do NOT directly access these two variables, unless you are arch-specific PCI
1147  * code, or PCI core code. */
1148 extern struct list_head pci_root_buses;	/* List of all known PCI buses */
1149 /* Some device drivers need know if PCI is initiated */
1150 int no_pci_devices(void);
1151 
1152 void pcibios_resource_survey_bus(struct pci_bus *bus);
1153 void pcibios_bus_add_device(struct pci_dev *pdev);
1154 void pcibios_add_bus(struct pci_bus *bus);
1155 void pcibios_remove_bus(struct pci_bus *bus);
1156 void pcibios_fixup_bus(struct pci_bus *);
1157 int __must_check pcibios_enable_device(struct pci_dev *, int mask);
1158 /* Architecture-specific versions may override this (weak) */
1159 char *pcibios_setup(char *str);
1160 
1161 /* Used only when drivers/pci/setup.c is used */
1162 resource_size_t pcibios_align_resource(void *, const struct resource *,
1163 				resource_size_t,
1164 				resource_size_t);
1165 
1166 /* Generic PCI functions used internally */
1167 
1168 void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region,
1169 			     struct resource *res);
1170 void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res,
1171 			     struct pci_bus_region *region);
1172 void pcibios_scan_specific_bus(int busn);
1173 struct pci_bus *pci_find_bus(int domain, int busnr);
1174 void pci_bus_add_devices(const struct pci_bus *bus);
1175 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
1176 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1177 				    struct pci_ops *ops, void *sysdata,
1178 				    struct list_head *resources);
1179 int pci_host_probe(struct pci_host_bridge *bridge);
1180 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
1181 int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
1182 void pci_bus_release_busn_res(struct pci_bus *b);
1183 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1184 				  struct pci_ops *ops, void *sysdata,
1185 				  struct list_head *resources);
1186 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge);
1187 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
1188 				int busnr);
1189 struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
1190 				 const char *name,
1191 				 struct hotplug_slot *hotplug);
1192 void pci_destroy_slot(struct pci_slot *slot);
1193 #ifdef CONFIG_SYSFS
1194 void pci_dev_assign_slot(struct pci_dev *dev);
1195 #else
pci_dev_assign_slot(struct pci_dev * dev)1196 static inline void pci_dev_assign_slot(struct pci_dev *dev) { }
1197 #endif
1198 int pci_scan_slot(struct pci_bus *bus, int devfn);
1199 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
1200 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
1201 unsigned int pci_scan_child_bus(struct pci_bus *bus);
1202 void pci_bus_add_device(struct pci_dev *dev);
1203 void pci_read_bridge_bases(struct pci_bus *child);
1204 struct resource *pci_find_parent_resource(const struct pci_dev *dev,
1205 					  struct resource *res);
1206 u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
1207 int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
1208 u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
1209 struct pci_dev *pci_dev_get(struct pci_dev *dev);
1210 void pci_dev_put(struct pci_dev *dev);
1211 DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
1212 void pci_remove_bus(struct pci_bus *b);
1213 void pci_stop_and_remove_bus_device(struct pci_dev *dev);
1214 void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
1215 void pci_stop_root_bus(struct pci_bus *bus);
1216 void pci_remove_root_bus(struct pci_bus *bus);
1217 void pci_setup_cardbus(struct pci_bus *bus);
1218 void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
1219 void pci_sort_breadthfirst(void);
1220 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
1221 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
1222 
1223 /* Generic PCI functions exported to card drivers */
1224 
1225 u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
1226 u8 pci_find_capability(struct pci_dev *dev, int cap);
1227 u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
1228 u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
1229 u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap);
1230 u16 pci_find_ext_capability(struct pci_dev *dev, int cap);
1231 u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap);
1232 struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
1233 u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap);
1234 u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec);
1235 
1236 u64 pci_get_dsn(struct pci_dev *dev);
1237 
1238 struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device,
1239 			       struct pci_dev *from);
1240 struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
1241 			       unsigned int ss_vendor, unsigned int ss_device,
1242 			       struct pci_dev *from);
1243 struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
1244 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
1245 					    unsigned int devfn);
1246 struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
1247 struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
1248 
1249 int pci_dev_present(const struct pci_device_id *ids);
1250 
1251 int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
1252 			     int where, u8 *val);
1253 int pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn,
1254 			     int where, u16 *val);
1255 int pci_bus_read_config_dword(struct pci_bus *bus, unsigned int devfn,
1256 			      int where, u32 *val);
1257 int pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn,
1258 			      int where, u8 val);
1259 int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
1260 			      int where, u16 val);
1261 int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
1262 			       int where, u32 val);
1263 
1264 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
1265 			    int where, int size, u32 *val);
1266 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
1267 			    int where, int size, u32 val);
1268 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
1269 			      int where, int size, u32 *val);
1270 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
1271 			       int where, int size, u32 val);
1272 
1273 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
1274 
1275 int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
1276 int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
1277 int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
1278 int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
1279 int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
1280 int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
1281 void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos,
1282 				    u32 clear, u32 set);
1283 
1284 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
1285 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
1286 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
1287 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
1288 int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
1289 						u16 clear, u16 set);
1290 int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
1291 					      u16 clear, u16 set);
1292 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
1293 					u32 clear, u32 set);
1294 
1295 /**
1296  * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
1297  * @dev:	PCI device structure of the PCI Express device
1298  * @pos:	PCI Express Capability Register
1299  * @clear:	Clear bitmask
1300  * @set:	Set bitmask
1301  *
1302  * Perform a Read-Modify-Write (RMW) operation using @clear and @set
1303  * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
1304  * Capability Registers are accessed concurrently in RMW fashion, hence
1305  * require locking which is handled transparently to the caller.
1306  */
pcie_capability_clear_and_set_word(struct pci_dev * dev,int pos,u16 clear,u16 set)1307 static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
1308 						     int pos,
1309 						     u16 clear, u16 set)
1310 {
1311 	switch (pos) {
1312 	case PCI_EXP_LNKCTL:
1313 	case PCI_EXP_LNKCTL2:
1314 	case PCI_EXP_RTCTL:
1315 		return pcie_capability_clear_and_set_word_locked(dev, pos,
1316 								 clear, set);
1317 	default:
1318 		return pcie_capability_clear_and_set_word_unlocked(dev, pos,
1319 								   clear, set);
1320 	}
1321 }
1322 
pcie_capability_set_word(struct pci_dev * dev,int pos,u16 set)1323 static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
1324 					   u16 set)
1325 {
1326 	return pcie_capability_clear_and_set_word(dev, pos, 0, set);
1327 }
1328 
pcie_capability_set_dword(struct pci_dev * dev,int pos,u32 set)1329 static inline int pcie_capability_set_dword(struct pci_dev *dev, int pos,
1330 					    u32 set)
1331 {
1332 	return pcie_capability_clear_and_set_dword(dev, pos, 0, set);
1333 }
1334 
pcie_capability_clear_word(struct pci_dev * dev,int pos,u16 clear)1335 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
1336 					     u16 clear)
1337 {
1338 	return pcie_capability_clear_and_set_word(dev, pos, clear, 0);
1339 }
1340 
pcie_capability_clear_dword(struct pci_dev * dev,int pos,u32 clear)1341 static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos,
1342 					      u32 clear)
1343 {
1344 	return pcie_capability_clear_and_set_dword(dev, pos, clear, 0);
1345 }
1346 
1347 /* User-space driven config access */
1348 int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
1349 int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
1350 int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
1351 int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
1352 int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
1353 int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
1354 
1355 int __must_check pci_enable_device(struct pci_dev *dev);
1356 int __must_check pci_enable_device_mem(struct pci_dev *dev);
1357 int __must_check pci_reenable_device(struct pci_dev *);
1358 int __must_check pcim_enable_device(struct pci_dev *pdev);
1359 void pcim_pin_device(struct pci_dev *pdev);
1360 
pci_intx_mask_supported(struct pci_dev * pdev)1361 static inline bool pci_intx_mask_supported(struct pci_dev *pdev)
1362 {
1363 	/*
1364 	 * INTx masking is supported if PCI_COMMAND_INTX_DISABLE is
1365 	 * writable and no quirk has marked the feature broken.
1366 	 */
1367 	return !pdev->broken_intx_masking;
1368 }
1369 
pci_is_enabled(struct pci_dev * pdev)1370 static inline int pci_is_enabled(struct pci_dev *pdev)
1371 {
1372 	return (atomic_read(&pdev->enable_cnt) > 0);
1373 }
1374 
pci_is_managed(struct pci_dev * pdev)1375 static inline int pci_is_managed(struct pci_dev *pdev)
1376 {
1377 	return pdev->is_managed;
1378 }
1379 
1380 void pci_disable_device(struct pci_dev *dev);
1381 
1382 extern unsigned int pcibios_max_latency;
1383 void pci_set_master(struct pci_dev *dev);
1384 void pci_clear_master(struct pci_dev *dev);
1385 
1386 int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
1387 int pci_set_cacheline_size(struct pci_dev *dev);
1388 int __must_check pci_set_mwi(struct pci_dev *dev);
1389 int __must_check pcim_set_mwi(struct pci_dev *dev);
1390 int pci_try_set_mwi(struct pci_dev *dev);
1391 void pci_clear_mwi(struct pci_dev *dev);
1392 void pci_disable_parity(struct pci_dev *dev);
1393 void pci_intx(struct pci_dev *dev, int enable);
1394 bool pci_check_and_mask_intx(struct pci_dev *dev);
1395 bool pci_check_and_unmask_intx(struct pci_dev *dev);
1396 int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
1397 int pci_wait_for_pending_transaction(struct pci_dev *dev);
1398 int pcix_get_max_mmrbc(struct pci_dev *dev);
1399 int pcix_get_mmrbc(struct pci_dev *dev);
1400 int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
1401 int pcie_get_readrq(struct pci_dev *dev);
1402 int pcie_set_readrq(struct pci_dev *dev, int rq);
1403 int pcie_get_mps(struct pci_dev *dev);
1404 int pcie_set_mps(struct pci_dev *dev, int mps);
1405 u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
1406 			     enum pci_bus_speed *speed,
1407 			     enum pcie_link_width *width);
1408 int pcie_link_speed_mbps(struct pci_dev *pdev);
1409 void pcie_print_link_status(struct pci_dev *dev);
1410 int pcie_reset_flr(struct pci_dev *dev, bool probe);
1411 int pcie_flr(struct pci_dev *dev);
1412 int __pci_reset_function_locked(struct pci_dev *dev);
1413 int pci_reset_function(struct pci_dev *dev);
1414 int pci_reset_function_locked(struct pci_dev *dev);
1415 int pci_try_reset_function(struct pci_dev *dev);
1416 int pci_probe_reset_slot(struct pci_slot *slot);
1417 int pci_probe_reset_bus(struct pci_bus *bus);
1418 int pci_reset_bus(struct pci_dev *dev);
1419 void pci_reset_secondary_bus(struct pci_dev *dev);
1420 void pcibios_reset_secondary_bus(struct pci_dev *dev);
1421 void pci_update_resource(struct pci_dev *dev, int resno);
1422 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
1423 int pci_release_resource(struct pci_dev *dev, int resno);
pci_rebar_bytes_to_size(u64 bytes)1424 static inline int pci_rebar_bytes_to_size(u64 bytes)
1425 {
1426 	bytes = roundup_pow_of_two(bytes);
1427 
1428 	/* Return BAR size as defined in the resizable BAR specification */
1429 	return max(ilog2(bytes), 20) - 20;
1430 }
1431 
1432 u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
1433 int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
1434 int pci_select_bars(struct pci_dev *dev, unsigned long flags);
1435 bool pci_device_is_present(struct pci_dev *pdev);
1436 void pci_ignore_hotplug(struct pci_dev *dev);
1437 struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
1438 int pci_status_get_and_clear_errors(struct pci_dev *pdev);
1439 
1440 int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
1441 		irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
1442 		const char *fmt, ...);
1443 void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
1444 
1445 /* ROM control related routines */
1446 int pci_enable_rom(struct pci_dev *pdev);
1447 void pci_disable_rom(struct pci_dev *pdev);
1448 void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
1449 void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
1450 
1451 /* Power management related routines */
1452 int pci_save_state(struct pci_dev *dev);
1453 void pci_restore_state(struct pci_dev *dev);
1454 struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1455 int pci_load_saved_state(struct pci_dev *dev,
1456 			 struct pci_saved_state *state);
1457 int pci_load_and_free_saved_state(struct pci_dev *dev,
1458 				  struct pci_saved_state **state);
1459 int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
1460 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
1461 int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
1462 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
1463 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
1464 void pci_pme_active(struct pci_dev *dev, bool enable);
1465 int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
1466 int pci_wake_from_d3(struct pci_dev *dev, bool enable);
1467 int pci_prepare_to_sleep(struct pci_dev *dev);
1468 int pci_back_from_sleep(struct pci_dev *dev);
1469 bool pci_dev_run_wake(struct pci_dev *dev);
1470 void pci_d3cold_enable(struct pci_dev *dev);
1471 void pci_d3cold_disable(struct pci_dev *dev);
1472 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
1473 void pci_resume_bus(struct pci_bus *bus);
1474 void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
1475 
1476 /* For use by arch with custom probe code */
1477 void set_pcie_port_type(struct pci_dev *pdev);
1478 void set_pcie_hotplug_bridge(struct pci_dev *pdev);
1479 
1480 /* Functions for PCI Hotplug drivers to use */
1481 unsigned int pci_rescan_bus(struct pci_bus *bus);
1482 void pci_lock_rescan_remove(void);
1483 void pci_unlock_rescan_remove(void);
1484 
1485 /* Vital Product Data routines */
1486 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1487 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1488 ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
1489 ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
1490 
1491 /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
1492 resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1493 void pci_bus_assign_resources(const struct pci_bus *bus);
1494 void pci_bus_claim_resources(struct pci_bus *bus);
1495 void pci_bus_size_bridges(struct pci_bus *bus);
1496 int pci_claim_resource(struct pci_dev *, int);
1497 int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
1498 void pci_assign_unassigned_resources(void);
1499 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
1500 void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
1501 void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
1502 int pci_enable_resources(struct pci_dev *, int mask);
1503 void pci_assign_irq(struct pci_dev *dev);
1504 struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1505 #define HAVE_PCI_REQ_REGIONS	2
1506 int __must_check pci_request_regions(struct pci_dev *, const char *);
1507 int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
1508 void pci_release_regions(struct pci_dev *);
1509 int __must_check pci_request_region(struct pci_dev *, int, const char *);
1510 void pci_release_region(struct pci_dev *, int);
1511 int pci_request_selected_regions(struct pci_dev *, int, const char *);
1512 int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
1513 void pci_release_selected_regions(struct pci_dev *, int);
1514 
1515 static inline __must_check struct resource *
pci_request_config_region_exclusive(struct pci_dev * pdev,unsigned int offset,unsigned int len,const char * name)1516 pci_request_config_region_exclusive(struct pci_dev *pdev, unsigned int offset,
1517 				    unsigned int len, const char *name)
1518 {
1519 	return __request_region(&pdev->driver_exclusive_resource, offset, len,
1520 				name, IORESOURCE_EXCLUSIVE);
1521 }
1522 
pci_release_config_region(struct pci_dev * pdev,unsigned int offset,unsigned int len)1523 static inline void pci_release_config_region(struct pci_dev *pdev,
1524 					     unsigned int offset,
1525 					     unsigned int len)
1526 {
1527 	__release_region(&pdev->driver_exclusive_resource, offset, len);
1528 }
1529 
1530 /* drivers/pci/bus.c */
1531 void pci_add_resource(struct list_head *resources, struct resource *res);
1532 void pci_add_resource_offset(struct list_head *resources, struct resource *res,
1533 			     resource_size_t offset);
1534 void pci_free_resource_list(struct list_head *resources);
1535 void pci_bus_add_resource(struct pci_bus *bus, struct resource *res);
1536 struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n);
1537 void pci_bus_remove_resources(struct pci_bus *bus);
1538 void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res);
1539 int devm_request_pci_bus_resources(struct device *dev,
1540 				   struct list_head *resources);
1541 
1542 /* Temporary until new and working PCI SBR API in place */
1543 int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
1544 
1545 #define __pci_bus_for_each_res0(bus, res, ...)				\
1546 	for (unsigned int __b = 0;					\
1547 	     (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1548 	     __b++)
1549 
1550 #define __pci_bus_for_each_res1(bus, res, __b)				\
1551 	for (__b = 0;							\
1552 	     (res = pci_bus_resource_n(bus, __b)) || __b < PCI_BRIDGE_RESOURCE_NUM; \
1553 	     __b++)
1554 
1555 /**
1556  * pci_bus_for_each_resource - iterate over PCI bus resources
1557  * @bus: the PCI bus
1558  * @res: pointer to the current resource
1559  * @...: optional index of the current resource
1560  *
1561  * Iterate over PCI bus resources. The first part is to go over PCI bus
1562  * resource array, which has at most the %PCI_BRIDGE_RESOURCE_NUM entries.
1563  * After that continue with the separate list of the additional resources,
1564  * if not empty. That's why the Logical OR is being used.
1565  *
1566  * Possible usage:
1567  *
1568  *	struct pci_bus *bus = ...;
1569  *	struct resource *res;
1570  *	unsigned int i;
1571  *
1572  * 	// With optional index
1573  * 	pci_bus_for_each_resource(bus, res, i)
1574  * 		pr_info("PCI bus resource[%u]: %pR\n", i, res);
1575  *
1576  * 	// Without index
1577  * 	pci_bus_for_each_resource(bus, res)
1578  * 		_do_something_(res);
1579  */
1580 #define pci_bus_for_each_resource(bus, res, ...)			\
1581 	CONCATENATE(__pci_bus_for_each_res, COUNT_ARGS(__VA_ARGS__))	\
1582 		    (bus, res, __VA_ARGS__)
1583 
1584 int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1585 			struct resource *res, resource_size_t size,
1586 			resource_size_t align, resource_size_t min,
1587 			unsigned long type_mask,
1588 			resource_alignf alignf,
1589 			void *alignf_data);
1590 
1591 
1592 int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr,
1593 			resource_size_t size);
1594 unsigned long pci_address_to_pio(phys_addr_t addr);
1595 phys_addr_t pci_pio_to_address(unsigned long pio);
1596 int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1597 int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
1598 			   phys_addr_t phys_addr);
1599 void pci_unmap_iospace(struct resource *res);
1600 void __iomem *devm_pci_remap_cfgspace(struct device *dev,
1601 				      resource_size_t offset,
1602 				      resource_size_t size);
1603 void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
1604 					  struct resource *res);
1605 
pci_bus_address(struct pci_dev * pdev,int bar)1606 static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1607 {
1608 	struct pci_bus_region region;
1609 
1610 	pcibios_resource_to_bus(pdev->bus, &region, &pdev->resource[bar]);
1611 	return region.start;
1612 }
1613 
1614 /* Proper probing supporting hot-pluggable devices */
1615 int __must_check __pci_register_driver(struct pci_driver *, struct module *,
1616 				       const char *mod_name);
1617 
1618 /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */
1619 #define pci_register_driver(driver)		\
1620 	__pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
1621 
1622 void pci_unregister_driver(struct pci_driver *dev);
1623 
1624 /**
1625  * module_pci_driver() - Helper macro for registering a PCI driver
1626  * @__pci_driver: pci_driver struct
1627  *
1628  * Helper macro for PCI drivers which do not do anything special in module
1629  * init/exit. This eliminates a lot of boilerplate. Each module may only
1630  * use this macro once, and calling it replaces module_init() and module_exit()
1631  */
1632 #define module_pci_driver(__pci_driver) \
1633 	module_driver(__pci_driver, pci_register_driver, pci_unregister_driver)
1634 
1635 /**
1636  * builtin_pci_driver() - Helper macro for registering a PCI driver
1637  * @__pci_driver: pci_driver struct
1638  *
1639  * Helper macro for PCI drivers which do not do anything special in their
1640  * init code. This eliminates a lot of boilerplate. Each driver may only
1641  * use this macro once, and calling it replaces device_initcall(...)
1642  */
1643 #define builtin_pci_driver(__pci_driver) \
1644 	builtin_driver(__pci_driver, pci_register_driver)
1645 
1646 struct pci_driver *pci_dev_driver(const struct pci_dev *dev);
1647 int pci_add_dynid(struct pci_driver *drv,
1648 		  unsigned int vendor, unsigned int device,
1649 		  unsigned int subvendor, unsigned int subdevice,
1650 		  unsigned int class, unsigned int class_mask,
1651 		  unsigned long driver_data);
1652 const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
1653 					 struct pci_dev *dev);
1654 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
1655 		    int pass);
1656 
1657 void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
1658 		  void *userdata);
1659 int pci_cfg_space_size(struct pci_dev *dev);
1660 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
1661 resource_size_t pcibios_window_alignment(struct pci_bus *bus,
1662 					 unsigned long type);
1663 
1664 #define PCI_VGA_STATE_CHANGE_BRIDGE (1 << 0)
1665 #define PCI_VGA_STATE_CHANGE_DECODES (1 << 1)
1666 
1667 int pci_set_vga_state(struct pci_dev *pdev, bool decode,
1668 		      unsigned int command_bits, u32 flags);
1669 
1670 /*
1671  * Virtual interrupts allow for more interrupts to be allocated
1672  * than the device has interrupts for. These are not programmed
1673  * into the device's MSI-X table and must be handled by some
1674  * other driver means.
1675  */
1676 #define PCI_IRQ_VIRTUAL		(1 << 4)
1677 
1678 #define PCI_IRQ_ALL_TYPES	(PCI_IRQ_INTX | PCI_IRQ_MSI | PCI_IRQ_MSIX)
1679 
1680 #include <linux/dmapool.h>
1681 
1682 struct msix_entry {
1683 	u32	vector;	/* Kernel uses to write allocated vector */
1684 	u16	entry;	/* Driver uses to specify entry, OS writes */
1685 };
1686 
1687 #ifdef CONFIG_PCI_MSI
1688 int pci_msi_vec_count(struct pci_dev *dev);
1689 void pci_disable_msi(struct pci_dev *dev);
1690 int pci_msix_vec_count(struct pci_dev *dev);
1691 void pci_disable_msix(struct pci_dev *dev);
1692 void pci_restore_msi_state(struct pci_dev *dev);
1693 bool pci_msi_enabled(void);
1694 int pci_enable_msi(struct pci_dev *dev);
1695 int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
1696 			  int minvec, int maxvec);
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1697 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1698 					struct msix_entry *entries, int nvec)
1699 {
1700 	int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
1701 	if (rc < 0)
1702 		return rc;
1703 	return 0;
1704 }
1705 int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1706 			  unsigned int max_vecs, unsigned int flags);
1707 int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1708 				   unsigned int max_vecs, unsigned int flags,
1709 				   struct irq_affinity *affd);
1710 
1711 bool pci_msix_can_alloc_dyn(struct pci_dev *dev);
1712 struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1713 				     const struct irq_affinity_desc *affdesc);
1714 void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
1715 
1716 void pci_free_irq_vectors(struct pci_dev *dev);
1717 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1718 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1719 
1720 #else
pci_msi_vec_count(struct pci_dev * dev)1721 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msi(struct pci_dev * dev)1722 static inline void pci_disable_msi(struct pci_dev *dev) { }
pci_msix_vec_count(struct pci_dev * dev)1723 static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
pci_disable_msix(struct pci_dev * dev)1724 static inline void pci_disable_msix(struct pci_dev *dev) { }
pci_restore_msi_state(struct pci_dev * dev)1725 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
pci_msi_enabled(void)1726 static inline bool pci_msi_enabled(void) { return false; }
pci_enable_msi(struct pci_dev * dev)1727 static inline int pci_enable_msi(struct pci_dev *dev)
1728 { return -ENOSYS; }
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)1729 static inline int pci_enable_msix_range(struct pci_dev *dev,
1730 			struct msix_entry *entries, int minvec, int maxvec)
1731 { return -ENOSYS; }
pci_enable_msix_exact(struct pci_dev * dev,struct msix_entry * entries,int nvec)1732 static inline int pci_enable_msix_exact(struct pci_dev *dev,
1733 			struct msix_entry *entries, int nvec)
1734 { return -ENOSYS; }
1735 
1736 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)1737 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
1738 			       unsigned int max_vecs, unsigned int flags,
1739 			       struct irq_affinity *aff_desc)
1740 {
1741 	if ((flags & PCI_IRQ_INTX) && min_vecs == 1 && dev->irq)
1742 		return 1;
1743 	return -ENOSPC;
1744 }
1745 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)1746 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1747 		      unsigned int max_vecs, unsigned int flags)
1748 {
1749 	return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs,
1750 					      flags, NULL);
1751 }
1752 
pci_msix_can_alloc_dyn(struct pci_dev * dev)1753 static inline bool pci_msix_can_alloc_dyn(struct pci_dev *dev)
1754 { return false; }
pci_msix_alloc_irq_at(struct pci_dev * dev,unsigned int index,const struct irq_affinity_desc * affdesc)1755 static inline struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index,
1756 						   const struct irq_affinity_desc *affdesc)
1757 {
1758 	struct msi_map map = { .index = -ENOSYS, };
1759 
1760 	return map;
1761 }
1762 
pci_msix_free_irq(struct pci_dev * pdev,struct msi_map map)1763 static inline void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map)
1764 {
1765 }
1766 
pci_free_irq_vectors(struct pci_dev * dev)1767 static inline void pci_free_irq_vectors(struct pci_dev *dev)
1768 {
1769 }
1770 
pci_irq_vector(struct pci_dev * dev,unsigned int nr)1771 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1772 {
1773 	if (WARN_ON_ONCE(nr > 0))
1774 		return -EINVAL;
1775 	return dev->irq;
1776 }
pci_irq_get_affinity(struct pci_dev * pdev,int vec)1777 static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1778 		int vec)
1779 {
1780 	return cpu_possible_mask;
1781 }
1782 #endif
1783 
1784 /**
1785  * pci_irqd_intx_xlate() - Translate PCI INTx value to an IRQ domain hwirq
1786  * @d: the INTx IRQ domain
1787  * @node: the DT node for the device whose interrupt we're translating
1788  * @intspec: the interrupt specifier data from the DT
1789  * @intsize: the number of entries in @intspec
1790  * @out_hwirq: pointer at which to write the hwirq number
1791  * @out_type: pointer at which to write the interrupt type
1792  *
1793  * Translate a PCI INTx interrupt number from device tree in the range 1-4, as
1794  * stored in the standard PCI_INTERRUPT_PIN register, to a value in the range
1795  * 0-3 suitable for use in a 4 entry IRQ domain. That is, subtract one from the
1796  * INTx value to obtain the hwirq number.
1797  *
1798  * Returns 0 on success, or -EINVAL if the interrupt specifier is out of range.
1799  */
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)1800 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
1801 				      struct device_node *node,
1802 				      const u32 *intspec,
1803 				      unsigned int intsize,
1804 				      unsigned long *out_hwirq,
1805 				      unsigned int *out_type)
1806 {
1807 	const u32 intx = intspec[0];
1808 
1809 	if (intx < PCI_INTERRUPT_INTA || intx > PCI_INTERRUPT_INTD)
1810 		return -EINVAL;
1811 
1812 	*out_hwirq = intx - PCI_INTERRUPT_INTA;
1813 	return 0;
1814 }
1815 
1816 #ifdef CONFIG_PCIEPORTBUS
1817 extern bool pcie_ports_disabled;
1818 extern bool pcie_ports_native;
1819 
1820 int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req,
1821 			  bool use_lt);
1822 #else
1823 #define pcie_ports_disabled	true
1824 #define pcie_ports_native	false
1825 
pcie_set_target_speed(struct pci_dev * port,enum pci_bus_speed speed_req,bool use_lt)1826 static inline int pcie_set_target_speed(struct pci_dev *port,
1827 					enum pci_bus_speed speed_req,
1828 					bool use_lt)
1829 {
1830 	return -EOPNOTSUPP;
1831 }
1832 #endif
1833 
1834 #define PCIE_LINK_STATE_L0S		(BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */
1835 #define PCIE_LINK_STATE_L1		BIT(2)	/* L1 state */
1836 #define PCIE_LINK_STATE_L1_1		BIT(3)	/* ASPM L1.1 state */
1837 #define PCIE_LINK_STATE_L1_2		BIT(4)	/* ASPM L1.2 state */
1838 #define PCIE_LINK_STATE_L1_1_PCIPM	BIT(5)	/* PCI-PM L1.1 state */
1839 #define PCIE_LINK_STATE_L1_2_PCIPM	BIT(6)	/* PCI-PM L1.2 state */
1840 #define PCIE_LINK_STATE_ASPM_ALL	(PCIE_LINK_STATE_L0S		|\
1841 					 PCIE_LINK_STATE_L1		|\
1842 					 PCIE_LINK_STATE_L1_1		|\
1843 					 PCIE_LINK_STATE_L1_2		|\
1844 					 PCIE_LINK_STATE_L1_1_PCIPM	|\
1845 					 PCIE_LINK_STATE_L1_2_PCIPM)
1846 #define PCIE_LINK_STATE_CLKPM		BIT(7)
1847 #define PCIE_LINK_STATE_ALL		(PCIE_LINK_STATE_ASPM_ALL	|\
1848 					 PCIE_LINK_STATE_CLKPM)
1849 
1850 #ifdef CONFIG_PCIEASPM
1851 int pci_disable_link_state(struct pci_dev *pdev, int state);
1852 int pci_disable_link_state_locked(struct pci_dev *pdev, int state);
1853 int pci_enable_link_state(struct pci_dev *pdev, int state);
1854 int pci_enable_link_state_locked(struct pci_dev *pdev, int state);
1855 void pcie_no_aspm(void);
1856 bool pcie_aspm_support_enabled(void);
1857 bool pcie_aspm_enabled(struct pci_dev *pdev);
1858 #else
pci_disable_link_state(struct pci_dev * pdev,int state)1859 static inline int pci_disable_link_state(struct pci_dev *pdev, int state)
1860 { return 0; }
pci_disable_link_state_locked(struct pci_dev * pdev,int state)1861 static inline int pci_disable_link_state_locked(struct pci_dev *pdev, int state)
1862 { return 0; }
pci_enable_link_state(struct pci_dev * pdev,int state)1863 static inline int pci_enable_link_state(struct pci_dev *pdev, int state)
1864 { return 0; }
pci_enable_link_state_locked(struct pci_dev * pdev,int state)1865 static inline int pci_enable_link_state_locked(struct pci_dev *pdev, int state)
1866 { return 0; }
pcie_no_aspm(void)1867 static inline void pcie_no_aspm(void) { }
pcie_aspm_support_enabled(void)1868 static inline bool pcie_aspm_support_enabled(void) { return false; }
pcie_aspm_enabled(struct pci_dev * pdev)1869 static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
1870 #endif
1871 
1872 #ifdef CONFIG_HOTPLUG_PCI
1873 void pci_hp_ignore_link_change(struct pci_dev *pdev);
1874 void pci_hp_unignore_link_change(struct pci_dev *pdev);
1875 #else
pci_hp_ignore_link_change(struct pci_dev * pdev)1876 static inline void pci_hp_ignore_link_change(struct pci_dev *pdev) { }
pci_hp_unignore_link_change(struct pci_dev * pdev)1877 static inline void pci_hp_unignore_link_change(struct pci_dev *pdev) { }
1878 #endif
1879 
1880 #ifdef CONFIG_PCIEAER
1881 bool pci_aer_available(void);
1882 #else
pci_aer_available(void)1883 static inline bool pci_aer_available(void) { return false; }
1884 #endif
1885 
1886 bool pci_ats_disabled(void);
1887 
1888 #define PCIE_PTM_CONTEXT_UPDATE_AUTO 0
1889 #define PCIE_PTM_CONTEXT_UPDATE_MANUAL 1
1890 
1891 struct pcie_ptm_ops {
1892 	int (*check_capability)(void *drvdata);
1893 	int (*context_update_write)(void *drvdata, u8 mode);
1894 	int (*context_update_read)(void *drvdata, u8 *mode);
1895 	int (*context_valid_write)(void *drvdata, bool valid);
1896 	int (*context_valid_read)(void *drvdata, bool *valid);
1897 	int (*local_clock_read)(void *drvdata, u64 *clock);
1898 	int (*master_clock_read)(void *drvdata, u64 *clock);
1899 	int (*t1_read)(void *drvdata, u64 *clock);
1900 	int (*t2_read)(void *drvdata, u64 *clock);
1901 	int (*t3_read)(void *drvdata, u64 *clock);
1902 	int (*t4_read)(void *drvdata, u64 *clock);
1903 
1904 	bool (*context_update_visible)(void *drvdata);
1905 	bool (*context_valid_visible)(void *drvdata);
1906 	bool (*local_clock_visible)(void *drvdata);
1907 	bool (*master_clock_visible)(void *drvdata);
1908 	bool (*t1_visible)(void *drvdata);
1909 	bool (*t2_visible)(void *drvdata);
1910 	bool (*t3_visible)(void *drvdata);
1911 	bool (*t4_visible)(void *drvdata);
1912 };
1913 
1914 struct pci_ptm_debugfs {
1915 	struct dentry *debugfs;
1916 	const struct pcie_ptm_ops *ops;
1917 	struct mutex lock;
1918 	void *pdata;
1919 };
1920 
1921 #ifdef CONFIG_PCIE_PTM
1922 int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
1923 void pci_disable_ptm(struct pci_dev *dev);
1924 bool pcie_ptm_enabled(struct pci_dev *dev);
1925 #else
pci_enable_ptm(struct pci_dev * dev,u8 * granularity)1926 static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
1927 { return -EINVAL; }
pci_disable_ptm(struct pci_dev * dev)1928 static inline void pci_disable_ptm(struct pci_dev *dev) { }
pcie_ptm_enabled(struct pci_dev * dev)1929 static inline bool pcie_ptm_enabled(struct pci_dev *dev)
1930 { return false; }
1931 #endif
1932 
1933 #if IS_ENABLED(CONFIG_DEBUG_FS) && IS_ENABLED(CONFIG_PCIE_PTM)
1934 struct pci_ptm_debugfs *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
1935 						const struct pcie_ptm_ops *ops);
1936 void pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs);
1937 #else
1938 static inline struct pci_ptm_debugfs
pcie_ptm_create_debugfs(struct device * dev,void * pdata,const struct pcie_ptm_ops * ops)1939 *pcie_ptm_create_debugfs(struct device *dev, void *pdata,
1940 			 const struct pcie_ptm_ops *ops) { return NULL; }
1941 static inline void
pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs * ptm_debugfs)1942 pcie_ptm_destroy_debugfs(struct pci_ptm_debugfs *ptm_debugfs) { }
1943 #endif
1944 
1945 void pci_cfg_access_lock(struct pci_dev *dev);
1946 bool pci_cfg_access_trylock(struct pci_dev *dev);
1947 void pci_cfg_access_unlock(struct pci_dev *dev);
1948 
1949 void pci_dev_lock(struct pci_dev *dev);
1950 int pci_dev_trylock(struct pci_dev *dev);
1951 void pci_dev_unlock(struct pci_dev *dev);
1952 DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
1953 
1954 /*
1955  * PCI domain support.  Sometimes called PCI segment (eg by ACPI),
1956  * a PCI domain is defined to be a set of PCI buses which share
1957  * configuration space.
1958  */
1959 #ifdef CONFIG_PCI_DOMAINS
1960 extern int pci_domains_supported;
1961 #else
1962 enum { pci_domains_supported = 0 };
1963 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1964 static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1965 #endif /* CONFIG_PCI_DOMAINS */
1966 
1967 /*
1968  * Generic implementation for PCI domain support. If your
1969  * architecture does not need custom management of PCI
1970  * domains then this implementation will be used
1971  */
1972 #ifdef CONFIG_PCI_DOMAINS_GENERIC
pci_domain_nr(struct pci_bus * bus)1973 static inline int pci_domain_nr(struct pci_bus *bus)
1974 {
1975 	return bus->domain_nr;
1976 }
1977 #ifdef CONFIG_ACPI
1978 int acpi_pci_bus_find_domain_nr(struct pci_bus *bus);
1979 #else
acpi_pci_bus_find_domain_nr(struct pci_bus * bus)1980 static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
1981 { return 0; }
1982 #endif
1983 int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent);
1984 void pci_bus_release_domain_nr(struct device *parent, int domain_nr);
1985 #endif
1986 
1987 /* Some architectures require additional setup to direct VGA traffic */
1988 typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1989 				    unsigned int command_bits, u32 flags);
1990 void pci_register_set_vga_state(arch_set_vga_state_t func);
1991 
1992 static inline int
pci_request_io_regions(struct pci_dev * pdev,const char * name)1993 pci_request_io_regions(struct pci_dev *pdev, const char *name)
1994 {
1995 	return pci_request_selected_regions(pdev,
1996 			    pci_select_bars(pdev, IORESOURCE_IO), name);
1997 }
1998 
1999 static inline void
pci_release_io_regions(struct pci_dev * pdev)2000 pci_release_io_regions(struct pci_dev *pdev)
2001 {
2002 	return pci_release_selected_regions(pdev,
2003 			    pci_select_bars(pdev, IORESOURCE_IO));
2004 }
2005 
2006 static inline int
pci_request_mem_regions(struct pci_dev * pdev,const char * name)2007 pci_request_mem_regions(struct pci_dev *pdev, const char *name)
2008 {
2009 	return pci_request_selected_regions(pdev,
2010 			    pci_select_bars(pdev, IORESOURCE_MEM), name);
2011 }
2012 
2013 static inline void
pci_release_mem_regions(struct pci_dev * pdev)2014 pci_release_mem_regions(struct pci_dev *pdev)
2015 {
2016 	return pci_release_selected_regions(pdev,
2017 			    pci_select_bars(pdev, IORESOURCE_MEM));
2018 }
2019 
2020 #else /* CONFIG_PCI is not enabled */
2021 
pci_set_flags(int flags)2022 static inline void pci_set_flags(int flags) { }
pci_add_flags(int flags)2023 static inline void pci_add_flags(int flags) { }
pci_clear_flags(int flags)2024 static inline void pci_clear_flags(int flags) { }
pci_has_flag(int flag)2025 static inline int pci_has_flag(int flag) { return 0; }
2026 
2027 /*
2028  * If the system does not have PCI, clearly these return errors.  Define
2029  * these as simple inline functions to avoid hair in drivers.
2030  */
2031 #define _PCI_NOP(o, s, t) \
2032 	static inline int pci_##o##_config_##s(struct pci_dev *dev, \
2033 						int where, t val) \
2034 		{ return PCIBIOS_FUNC_NOT_SUPPORTED; }
2035 
2036 #define _PCI_NOP_ALL(o, x)	_PCI_NOP(o, byte, u8 x) \
2037 				_PCI_NOP(o, word, u16 x) \
2038 				_PCI_NOP(o, dword, u32 x)
2039 _PCI_NOP_ALL(read, *)
2040 _PCI_NOP_ALL(write,)
2041 
pci_get_device(unsigned int vendor,unsigned int device,struct pci_dev * from)2042 static inline struct pci_dev *pci_get_device(unsigned int vendor,
2043 					     unsigned int device,
2044 					     struct pci_dev *from)
2045 { return NULL; }
2046 
pci_get_subsys(unsigned int vendor,unsigned int device,unsigned int ss_vendor,unsigned int ss_device,struct pci_dev * from)2047 static inline struct pci_dev *pci_get_subsys(unsigned int vendor,
2048 					     unsigned int device,
2049 					     unsigned int ss_vendor,
2050 					     unsigned int ss_device,
2051 					     struct pci_dev *from)
2052 { return NULL; }
2053 
pci_get_class(unsigned int class,struct pci_dev * from)2054 static inline struct pci_dev *pci_get_class(unsigned int class,
2055 					    struct pci_dev *from)
2056 { return NULL; }
2057 
pci_get_base_class(unsigned int class,struct pci_dev * from)2058 static inline struct pci_dev *pci_get_base_class(unsigned int class,
2059 						 struct pci_dev *from)
2060 { return NULL; }
2061 
pci_dev_present(const struct pci_device_id * ids)2062 static inline int pci_dev_present(const struct pci_device_id *ids)
2063 { return 0; }
2064 
2065 #define no_pci_devices()	(1)
2066 #define pci_dev_put(dev)	do { } while (0)
2067 
pci_set_master(struct pci_dev * dev)2068 static inline void pci_set_master(struct pci_dev *dev) { }
pci_clear_master(struct pci_dev * dev)2069 static inline void pci_clear_master(struct pci_dev *dev) { }
pci_enable_device(struct pci_dev * dev)2070 static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
pci_disable_device(struct pci_dev * dev)2071 static inline void pci_disable_device(struct pci_dev *dev) { }
pcim_enable_device(struct pci_dev * pdev)2072 static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
pci_assign_resource(struct pci_dev * dev,int i)2073 static inline int pci_assign_resource(struct pci_dev *dev, int i)
2074 { return -EBUSY; }
__pci_register_driver(struct pci_driver * drv,struct module * owner,const char * mod_name)2075 static inline int __must_check __pci_register_driver(struct pci_driver *drv,
2076 						     struct module *owner,
2077 						     const char *mod_name)
2078 { return 0; }
pci_register_driver(struct pci_driver * drv)2079 static inline int pci_register_driver(struct pci_driver *drv)
2080 { return 0; }
pci_unregister_driver(struct pci_driver * drv)2081 static inline void pci_unregister_driver(struct pci_driver *drv) { }
pci_find_capability(struct pci_dev * dev,int cap)2082 static inline u8 pci_find_capability(struct pci_dev *dev, int cap)
2083 { return 0; }
pci_find_next_capability(struct pci_dev * dev,u8 post,int cap)2084 static inline u8 pci_find_next_capability(struct pci_dev *dev, u8 post, int cap)
2085 { return 0; }
pci_find_ext_capability(struct pci_dev * dev,int cap)2086 static inline u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
2087 { return 0; }
2088 
pci_get_dsn(struct pci_dev * dev)2089 static inline u64 pci_get_dsn(struct pci_dev *dev)
2090 { return 0; }
2091 
2092 /* Power management related routines */
pci_save_state(struct pci_dev * dev)2093 static inline int pci_save_state(struct pci_dev *dev) { return 0; }
pci_restore_state(struct pci_dev * dev)2094 static inline void pci_restore_state(struct pci_dev *dev) { }
pci_set_power_state(struct pci_dev * dev,pci_power_t state)2095 static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
2096 { return 0; }
pci_set_power_state_locked(struct pci_dev * dev,pci_power_t state)2097 static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
2098 { return 0; }
pci_wake_from_d3(struct pci_dev * dev,bool enable)2099 static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2100 { return 0; }
pci_choose_state(struct pci_dev * dev,pm_message_t state)2101 static inline pci_power_t pci_choose_state(struct pci_dev *dev,
2102 					   pm_message_t state)
2103 { return PCI_D0; }
pci_enable_wake(struct pci_dev * dev,pci_power_t state,int enable)2104 static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
2105 				  int enable)
2106 { return 0; }
2107 
pci_find_resource(struct pci_dev * dev,struct resource * res)2108 static inline struct resource *pci_find_resource(struct pci_dev *dev,
2109 						 struct resource *res)
2110 { return NULL; }
pci_request_regions(struct pci_dev * dev,const char * res_name)2111 static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
2112 { return -EIO; }
pci_release_regions(struct pci_dev * dev)2113 static inline void pci_release_regions(struct pci_dev *dev) { }
2114 
pci_register_io_range(const struct fwnode_handle * fwnode,phys_addr_t addr,resource_size_t size)2115 static inline int pci_register_io_range(const struct fwnode_handle *fwnode,
2116 					phys_addr_t addr, resource_size_t size)
2117 { return -EINVAL; }
2118 
pci_address_to_pio(phys_addr_t addr)2119 static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
2120 
pci_find_next_bus(const struct pci_bus * from)2121 static inline struct pci_bus *pci_find_next_bus(const struct pci_bus *from)
2122 { return NULL; }
pci_get_slot(struct pci_bus * bus,unsigned int devfn)2123 static inline struct pci_dev *pci_get_slot(struct pci_bus *bus,
2124 						unsigned int devfn)
2125 { return NULL; }
pci_get_domain_bus_and_slot(int domain,unsigned int bus,unsigned int devfn)2126 static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain,
2127 					unsigned int bus, unsigned int devfn)
2128 { return NULL; }
2129 
pci_domain_nr(struct pci_bus * bus)2130 static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
pci_dev_get(struct pci_dev * dev)2131 static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
2132 
2133 #define dev_is_pci(d) (false)
2134 #define dev_is_pf(d) (false)
pci_acs_enabled(struct pci_dev * pdev,u16 acs_flags)2135 static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2136 { return false; }
pci_irqd_intx_xlate(struct irq_domain * d,struct device_node * node,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)2137 static inline int pci_irqd_intx_xlate(struct irq_domain *d,
2138 				      struct device_node *node,
2139 				      const u32 *intspec,
2140 				      unsigned int intsize,
2141 				      unsigned long *out_hwirq,
2142 				      unsigned int *out_type)
2143 { return -EINVAL; }
2144 
pci_match_id(const struct pci_device_id * ids,struct pci_dev * dev)2145 static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
2146 							 struct pci_dev *dev)
2147 { return NULL; }
pci_ats_disabled(void)2148 static inline bool pci_ats_disabled(void) { return true; }
2149 
pci_irq_vector(struct pci_dev * dev,unsigned int nr)2150 static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
2151 {
2152 	return -EINVAL;
2153 }
2154 
2155 static inline int
pci_alloc_irq_vectors_affinity(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags,struct irq_affinity * aff_desc)2156 pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
2157 			       unsigned int max_vecs, unsigned int flags,
2158 			       struct irq_affinity *aff_desc)
2159 {
2160 	return -ENOSPC;
2161 }
2162 static inline int
pci_alloc_irq_vectors(struct pci_dev * dev,unsigned int min_vecs,unsigned int max_vecs,unsigned int flags)2163 pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
2164 		      unsigned int max_vecs, unsigned int flags)
2165 {
2166 	return -ENOSPC;
2167 }
2168 #endif /* CONFIG_PCI */
2169 
2170 /* Include architecture-dependent settings and functions */
2171 
2172 #include <asm/pci.h>
2173 
2174 /*
2175  * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
2176  * is expected to be an offset within that region.
2177  *
2178  */
2179 int pci_mmap_resource_range(struct pci_dev *dev, int bar,
2180 			    struct vm_area_struct *vma,
2181 			    enum pci_mmap_state mmap_state, int write_combine);
2182 
2183 #ifndef arch_can_pci_mmap_wc
2184 #define arch_can_pci_mmap_wc()		0
2185 #endif
2186 
2187 #ifndef arch_can_pci_mmap_io
2188 #define arch_can_pci_mmap_io()		0
2189 #define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
2190 #else
2191 int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
2192 #endif
2193 
2194 #ifndef pci_root_bus_fwnode
2195 #define pci_root_bus_fwnode(bus)	NULL
2196 #endif
2197 
2198 /*
2199  * These helpers provide future and backwards compatibility
2200  * for accessing popular PCI BAR info
2201  */
2202 #define pci_resource_n(dev, bar)	(&(dev)->resource[(bar)])
2203 #define pci_resource_start(dev, bar)	(pci_resource_n(dev, bar)->start)
2204 #define pci_resource_end(dev, bar)	(pci_resource_n(dev, bar)->end)
2205 #define pci_resource_flags(dev, bar)	(pci_resource_n(dev, bar)->flags)
2206 #define pci_resource_len(dev,bar)					\
2207 	(pci_resource_end((dev), (bar)) ? 				\
2208 	 resource_size(pci_resource_n((dev), (bar))) : 0)
2209 
2210 #define __pci_dev_for_each_res0(dev, res, ...)				  \
2211 	for (unsigned int __b = 0;					  \
2212 	     __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2213 	     __b++)
2214 
2215 #define __pci_dev_for_each_res1(dev, res, __b)				  \
2216 	for (__b = 0;							  \
2217 	     __b < PCI_NUM_RESOURCES && (res = pci_resource_n(dev, __b)); \
2218 	     __b++)
2219 
2220 #define pci_dev_for_each_resource(dev, res, ...)			\
2221 	CONCATENATE(__pci_dev_for_each_res, COUNT_ARGS(__VA_ARGS__)) 	\
2222 		    (dev, res, __VA_ARGS__)
2223 
2224 /*
2225  * Similar to the helpers above, these manipulate per-pci_dev
2226  * driver-specific data.  They are really just a wrapper around
2227  * the generic device structure functions of these calls.
2228  */
pci_get_drvdata(struct pci_dev * pdev)2229 static inline void *pci_get_drvdata(struct pci_dev *pdev)
2230 {
2231 	return dev_get_drvdata(&pdev->dev);
2232 }
2233 
pci_set_drvdata(struct pci_dev * pdev,void * data)2234 static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
2235 {
2236 	dev_set_drvdata(&pdev->dev, data);
2237 }
2238 
pci_name(const struct pci_dev * pdev)2239 static inline const char *pci_name(const struct pci_dev *pdev)
2240 {
2241 	return dev_name(&pdev->dev);
2242 }
2243 
2244 void pci_resource_to_user(const struct pci_dev *dev, int bar,
2245 			  const struct resource *rsrc,
2246 			  resource_size_t *start, resource_size_t *end);
2247 
2248 /*
2249  * The world is not perfect and supplies us with broken PCI devices.
2250  * For at least a part of these bugs we need a work-around, so both
2251  * generic (drivers/pci/quirks.c) and per-architecture code can define
2252  * fixup hooks to be called for particular buggy devices.
2253  */
2254 
2255 struct pci_fixup {
2256 	u16 vendor;			/* Or PCI_ANY_ID */
2257 	u16 device;			/* Or PCI_ANY_ID */
2258 	u32 class;			/* Or PCI_ANY_ID */
2259 	unsigned int class_shift;	/* should be 0, 8, 16 */
2260 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2261 	int hook_offset;
2262 #else
2263 	void (*hook)(struct pci_dev *dev);
2264 #endif
2265 };
2266 
2267 enum pci_fixup_pass {
2268 	pci_fixup_early,	/* Before probing BARs */
2269 	pci_fixup_header,	/* After reading configuration header */
2270 	pci_fixup_final,	/* Final phase of device fixups */
2271 	pci_fixup_enable,	/* pci_enable_device() time */
2272 	pci_fixup_resume,	/* pci_device_resume() */
2273 	pci_fixup_suspend,	/* pci_device_suspend() */
2274 	pci_fixup_resume_early, /* pci_device_resume_early() */
2275 	pci_fixup_suspend_late,	/* pci_device_suspend_late() */
2276 };
2277 
2278 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
2279 #define ___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2280 				    class_shift, hook)			\
2281 	__ADDRESSABLE(hook)						\
2282 	asm(".section "	#sec ", \"a\"				\n"	\
2283 	    ".balign	16					\n"	\
2284 	    ".short "	#vendor ", " #device "			\n"	\
2285 	    ".long "	#class ", " #class_shift "		\n"	\
2286 	    ".long "	#hook " - .				\n"	\
2287 	    ".previous						\n");
2288 
2289 /*
2290  * Clang's LTO may rename static functions in C, but has no way to
2291  * handle such renamings when referenced from inline asm. To work
2292  * around this, create global C stubs for these cases.
2293  */
2294 #ifdef CONFIG_LTO_CLANG
2295 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2296 				  class_shift, hook, stub)		\
2297 	void stub(struct pci_dev *dev);					\
2298 	void stub(struct pci_dev *dev)					\
2299 	{ 								\
2300 		hook(dev); 						\
2301 	}								\
2302 	___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2303 				  class_shift, stub)
2304 #else
2305 #define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2306 				  class_shift, hook, stub)		\
2307 	___DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2308 				  class_shift, hook)
2309 #endif
2310 
2311 #define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2312 				  class_shift, hook)			\
2313 	__DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class,	\
2314 				  class_shift, hook, __UNIQUE_ID(hook))
2315 #else
2316 /* Anonymous variables would be nice... */
2317 #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class,	\
2318 				  class_shift, hook)			\
2319 	static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used	\
2320 	__attribute__((__section__(#section), aligned((sizeof(void *)))))    \
2321 		= { vendor, device, class, class_shift, hook };
2322 #endif
2323 
2324 #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class,		\
2325 					 class_shift, hook)		\
2326 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
2327 		hook, vendor, device, class, class_shift, hook)
2328 #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class,		\
2329 					 class_shift, hook)		\
2330 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
2331 		hook, vendor, device, class, class_shift, hook)
2332 #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class,		\
2333 					 class_shift, hook)		\
2334 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
2335 		hook, vendor, device, class, class_shift, hook)
2336 #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class,		\
2337 					 class_shift, hook)		\
2338 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
2339 		hook, vendor, device, class, class_shift, hook)
2340 #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class,		\
2341 					 class_shift, hook)		\
2342 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
2343 		resume##hook, vendor, device, class, class_shift, hook)
2344 #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class,	\
2345 					 class_shift, hook)		\
2346 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
2347 		resume_early##hook, vendor, device, class, class_shift, hook)
2348 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class,		\
2349 					 class_shift, hook)		\
2350 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
2351 		suspend##hook, vendor, device, class, class_shift, hook)
2352 #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class,	\
2353 					 class_shift, hook)		\
2354 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
2355 		suspend_late##hook, vendor, device, class, class_shift, hook)
2356 
2357 #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook)			\
2358 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\
2359 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2360 #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook)			\
2361 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\
2362 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2363 #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook)			\
2364 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\
2365 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2366 #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook)			\
2367 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\
2368 		hook, vendor, device, PCI_ANY_ID, 0, hook)
2369 #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook)			\
2370 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\
2371 		resume##hook, vendor, device, PCI_ANY_ID, 0, hook)
2372 #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook)		\
2373 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\
2374 		resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook)
2375 #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook)			\
2376 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\
2377 		suspend##hook, vendor, device, PCI_ANY_ID, 0, hook)
2378 #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook)		\
2379 	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late,		\
2380 		suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook)
2381 
2382 #ifdef CONFIG_PCI_QUIRKS
2383 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
2384 #else
pci_fixup_device(enum pci_fixup_pass pass,struct pci_dev * dev)2385 static inline void pci_fixup_device(enum pci_fixup_pass pass,
2386 				    struct pci_dev *dev) { }
2387 #endif
2388 
2389 int pcim_intx(struct pci_dev *pdev, int enabled);
2390 int pcim_request_all_regions(struct pci_dev *pdev, const char *name);
2391 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
2392 void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar,
2393 				const char *name);
2394 void pcim_iounmap_region(struct pci_dev *pdev, int bar);
2395 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
2396 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev);
2397 int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
2398 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
2399 void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
2400 				unsigned long offset, unsigned long len);
2401 
2402 extern int pci_pci_problems;
2403 #define PCIPCI_FAIL		1	/* No PCI PCI DMA */
2404 #define PCIPCI_TRITON		2
2405 #define PCIPCI_NATOMA		4
2406 #define PCIPCI_VIAETBF		8
2407 #define PCIPCI_VSFX		16
2408 #define PCIPCI_ALIMAGIK		32	/* Need low latency setting */
2409 #define PCIAGP_FAIL		64	/* No PCI to AGP DMA */
2410 
2411 extern u8 pci_dfl_cache_line_size;
2412 extern u8 pci_cache_line_size;
2413 
2414 /* Architecture-specific versions may override these (weak) */
2415 void pcibios_disable_device(struct pci_dev *dev);
2416 void pcibios_set_master(struct pci_dev *dev);
2417 int pcibios_set_pcie_reset_state(struct pci_dev *dev,
2418 				 enum pcie_reset_state state);
2419 int pcibios_device_add(struct pci_dev *dev);
2420 void pcibios_release_device(struct pci_dev *dev);
2421 #ifdef CONFIG_PCI
2422 void pcibios_penalize_isa_irq(int irq, int active);
2423 #else
pcibios_penalize_isa_irq(int irq,int active)2424 static inline void pcibios_penalize_isa_irq(int irq, int active) {}
2425 #endif
2426 int pcibios_alloc_irq(struct pci_dev *dev);
2427 void pcibios_free_irq(struct pci_dev *dev);
2428 resource_size_t pcibios_default_alignment(void);
2429 
2430 #if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
2431 extern int pci_create_resource_files(struct pci_dev *dev);
2432 extern void pci_remove_resource_files(struct pci_dev *dev);
2433 #endif
2434 
2435 #if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
2436 void __init pci_mmcfg_early_init(void);
2437 void __init pci_mmcfg_late_init(void);
2438 #else
pci_mmcfg_early_init(void)2439 static inline void pci_mmcfg_early_init(void) { }
pci_mmcfg_late_init(void)2440 static inline void pci_mmcfg_late_init(void) { }
2441 #endif
2442 
2443 int pci_ext_cfg_avail(void);
2444 
2445 void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
2446 void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
2447 
2448 #ifdef CONFIG_PCI_IOV
2449 int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
2450 int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);
2451 int pci_iov_vf_id(struct pci_dev *dev);
2452 void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver);
2453 int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
2454 void pci_disable_sriov(struct pci_dev *dev);
2455 
2456 int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id);
2457 int pci_iov_add_virtfn(struct pci_dev *dev, int id);
2458 void pci_iov_remove_virtfn(struct pci_dev *dev, int id);
2459 int pci_num_vf(struct pci_dev *dev);
2460 int pci_vfs_assigned(struct pci_dev *dev);
2461 int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
2462 int pci_sriov_get_totalvfs(struct pci_dev *dev);
2463 int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn);
2464 resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno);
2465 int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size);
2466 u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs);
2467 void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe);
2468 
2469 /* Arch may override these (weak) */
2470 int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
2471 int pcibios_sriov_disable(struct pci_dev *pdev);
2472 resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno);
2473 #else
pci_iov_virtfn_bus(struct pci_dev * dev,int id)2474 static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id)
2475 {
2476 	return -ENOSYS;
2477 }
pci_iov_virtfn_devfn(struct pci_dev * dev,int id)2478 static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
2479 {
2480 	return -ENOSYS;
2481 }
2482 
pci_iov_vf_id(struct pci_dev * dev)2483 static inline int pci_iov_vf_id(struct pci_dev *dev)
2484 {
2485 	return -ENOSYS;
2486 }
2487 
pci_iov_get_pf_drvdata(struct pci_dev * dev,struct pci_driver * pf_driver)2488 static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev,
2489 					   struct pci_driver *pf_driver)
2490 {
2491 	return ERR_PTR(-EINVAL);
2492 }
2493 
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)2494 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
2495 { return -ENODEV; }
2496 
pci_iov_sysfs_link(struct pci_dev * dev,struct pci_dev * virtfn,int id)2497 static inline int pci_iov_sysfs_link(struct pci_dev *dev,
2498 				     struct pci_dev *virtfn, int id)
2499 {
2500 	return -ENODEV;
2501 }
pci_iov_add_virtfn(struct pci_dev * dev,int id)2502 static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id)
2503 {
2504 	return -ENOSYS;
2505 }
pci_iov_remove_virtfn(struct pci_dev * dev,int id)2506 static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
2507 					 int id) { }
pci_disable_sriov(struct pci_dev * dev)2508 static inline void pci_disable_sriov(struct pci_dev *dev) { }
pci_num_vf(struct pci_dev * dev)2509 static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
pci_vfs_assigned(struct pci_dev * dev)2510 static inline int pci_vfs_assigned(struct pci_dev *dev)
2511 { return 0; }
pci_sriov_set_totalvfs(struct pci_dev * dev,u16 numvfs)2512 static inline int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
2513 { return 0; }
pci_sriov_get_totalvfs(struct pci_dev * dev)2514 static inline int pci_sriov_get_totalvfs(struct pci_dev *dev)
2515 { return 0; }
2516 #define pci_sriov_configure_simple	NULL
pci_iov_resource_size(struct pci_dev * dev,int resno)2517 static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno)
2518 { return 0; }
pci_iov_vf_bar_set_size(struct pci_dev * dev,int resno,int size)2519 static inline int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size)
2520 { return -ENODEV; }
pci_iov_vf_bar_get_sizes(struct pci_dev * dev,int resno,int num_vfs)2521 static inline u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs)
2522 { return 0; }
pci_vf_drivers_autoprobe(struct pci_dev * dev,bool probe)2523 static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { }
2524 #endif
2525 
2526 /**
2527  * pci_pcie_cap - get the saved PCIe capability offset
2528  * @dev: PCI device
2529  *
2530  * PCIe capability offset is calculated at PCI device initialization
2531  * time and saved in the data structure. This function returns saved
2532  * PCIe capability offset. Using this instead of pci_find_capability()
2533  * reduces unnecessary search in the PCI configuration space. If you
2534  * need to calculate PCIe capability offset from raw device for some
2535  * reasons, please use pci_find_capability() instead.
2536  */
pci_pcie_cap(struct pci_dev * dev)2537 static inline int pci_pcie_cap(struct pci_dev *dev)
2538 {
2539 	return dev->pcie_cap;
2540 }
2541 
2542 /**
2543  * pci_is_pcie - check if the PCI device is PCI Express capable
2544  * @dev: PCI device
2545  *
2546  * Returns: true if the PCI device is PCI Express capable, false otherwise.
2547  */
pci_is_pcie(struct pci_dev * dev)2548 static inline bool pci_is_pcie(struct pci_dev *dev)
2549 {
2550 	return pci_pcie_cap(dev);
2551 }
2552 
2553 /**
2554  * pcie_caps_reg - get the PCIe Capabilities Register
2555  * @dev: PCI device
2556  */
pcie_caps_reg(const struct pci_dev * dev)2557 static inline u16 pcie_caps_reg(const struct pci_dev *dev)
2558 {
2559 	return dev->pcie_flags_reg;
2560 }
2561 
2562 /**
2563  * pci_pcie_type - get the PCIe device/port type
2564  * @dev: PCI device
2565  */
pci_pcie_type(const struct pci_dev * dev)2566 static inline int pci_pcie_type(const struct pci_dev *dev)
2567 {
2568 	return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
2569 }
2570 
2571 /**
2572  * pcie_find_root_port - Get the PCIe root port device
2573  * @dev: PCI device
2574  *
2575  * Traverse up the parent chain and return the PCIe Root Port PCI Device
2576  * for a given PCI/PCIe Device.
2577  */
pcie_find_root_port(struct pci_dev * dev)2578 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
2579 {
2580 	while (dev) {
2581 		if (pci_is_pcie(dev) &&
2582 		    pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2583 			return dev;
2584 		dev = pci_upstream_bridge(dev);
2585 	}
2586 
2587 	return NULL;
2588 }
2589 
pci_dev_is_disconnected(const struct pci_dev * dev)2590 static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
2591 {
2592 	/*
2593 	 * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg()
2594 	 * and read w/o common lock. READ_ONCE() ensures compiler cannot cache
2595 	 * the value (e.g. inside the loop in pci_dev_wait()).
2596 	 */
2597 	return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure;
2598 }
2599 
2600 void pci_request_acs(void);
2601 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
2602 bool pci_acs_path_enabled(struct pci_dev *start,
2603 			  struct pci_dev *end, u16 acs_flags);
2604 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
2605 
2606 #define PCI_VPD_LRDT			0x80	/* Large Resource Data Type */
2607 #define PCI_VPD_LRDT_ID(x)		((x) | PCI_VPD_LRDT)
2608 
2609 /* Large Resource Data Type Tag Item Names */
2610 #define PCI_VPD_LTIN_ID_STRING		0x02	/* Identifier String */
2611 #define PCI_VPD_LTIN_RO_DATA		0x10	/* Read-Only Data */
2612 #define PCI_VPD_LTIN_RW_DATA		0x11	/* Read-Write Data */
2613 
2614 #define PCI_VPD_LRDT_ID_STRING		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_ID_STRING)
2615 #define PCI_VPD_LRDT_RO_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA)
2616 #define PCI_VPD_LRDT_RW_DATA		PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
2617 
2618 #define PCI_VPD_RO_KEYWORD_PARTNO	"PN"
2619 #define PCI_VPD_RO_KEYWORD_SERIALNO	"SN"
2620 #define PCI_VPD_RO_KEYWORD_MFR_ID	"MN"
2621 #define PCI_VPD_RO_KEYWORD_VENDOR0	"V0"
2622 #define PCI_VPD_RO_KEYWORD_CHKSUM	"RV"
2623 
2624 /**
2625  * pci_vpd_alloc - Allocate buffer and read VPD into it
2626  * @dev: PCI device
2627  * @size: pointer to field where VPD length is returned
2628  *
2629  * Returns pointer to allocated buffer or an ERR_PTR in case of failure
2630  */
2631 void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size);
2632 
2633 /**
2634  * pci_vpd_find_id_string - Locate id string in VPD
2635  * @buf: Pointer to buffered VPD data
2636  * @len: The length of the buffer area in which to search
2637  * @size: Pointer to field where length of id string is returned
2638  *
2639  * Returns the index of the id string or -ENOENT if not found.
2640  */
2641 int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size);
2642 
2643 /**
2644  * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section
2645  * @buf: Pointer to buffered VPD data
2646  * @len: The length of the buffer area in which to search
2647  * @kw: The keyword to search for
2648  * @size: Pointer to field where length of found keyword data is returned
2649  *
2650  * Returns the index of the information field keyword data or -ENOENT if
2651  * not found.
2652  */
2653 int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len,
2654 				 const char *kw, unsigned int *size);
2655 
2656 /**
2657  * pci_vpd_check_csum - Check VPD checksum
2658  * @buf: Pointer to buffered VPD data
2659  * @len: VPD size
2660  *
2661  * Returns 1 if VPD has no checksum, otherwise 0 or an errno
2662  */
2663 int pci_vpd_check_csum(const void *buf, unsigned int len);
2664 
2665 /* PCI <-> OF binding helpers */
2666 #ifdef CONFIG_OF
2667 struct device_node;
2668 struct irq_domain;
2669 struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
2670 bool pci_host_of_has_msi_map(struct device *dev);
2671 
2672 /* Arch may override this (weak) */
2673 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
2674 
2675 #else	/* CONFIG_OF */
2676 static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus * bus)2677 pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
pci_host_of_has_msi_map(struct device * dev)2678 static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; }
2679 #endif  /* CONFIG_OF */
2680 
2681 static inline struct device_node *
pci_device_to_OF_node(const struct pci_dev * pdev)2682 pci_device_to_OF_node(const struct pci_dev *pdev)
2683 {
2684 	return pdev ? pdev->dev.of_node : NULL;
2685 }
2686 
pci_bus_to_OF_node(struct pci_bus * bus)2687 static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
2688 {
2689 	return bus ? bus->dev.of_node : NULL;
2690 }
2691 
2692 #ifdef CONFIG_ACPI
2693 struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
2694 
2695 void
2696 pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
2697 bool pci_pr3_present(struct pci_dev *pdev);
2698 #else
2699 static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus * bus)2700 pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
pci_pr3_present(struct pci_dev * pdev)2701 static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
2702 #endif
2703 
2704 #if defined(CONFIG_X86) && defined(CONFIG_ACPI)
2705 bool arch_pci_dev_is_removable(struct pci_dev *pdev);
2706 #else
arch_pci_dev_is_removable(struct pci_dev * pdev)2707 static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; }
2708 #endif
2709 
2710 #ifdef CONFIG_EEH
pci_dev_to_eeh_dev(struct pci_dev * pdev)2711 static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
2712 {
2713 	return pdev->dev.archdata.edev;
2714 }
2715 #endif
2716 
2717 void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
2718 bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
2719 int pci_for_each_dma_alias(struct pci_dev *pdev,
2720 			   int (*fn)(struct pci_dev *pdev,
2721 				     u16 alias, void *data), void *data);
2722 
2723 /* Helper functions for operation of device flag */
pci_set_dev_assigned(struct pci_dev * pdev)2724 static inline void pci_set_dev_assigned(struct pci_dev *pdev)
2725 {
2726 	pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
2727 }
pci_clear_dev_assigned(struct pci_dev * pdev)2728 static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
2729 {
2730 	pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
2731 }
pci_is_dev_assigned(struct pci_dev * pdev)2732 static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
2733 {
2734 	return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
2735 }
2736 
2737 /**
2738  * pci_ari_enabled - query ARI forwarding status
2739  * @bus: the PCI bus
2740  *
2741  * Returns true if ARI forwarding is enabled.
2742  */
pci_ari_enabled(struct pci_bus * bus)2743 static inline bool pci_ari_enabled(struct pci_bus *bus)
2744 {
2745 	return bus->self && bus->self->ari_enabled;
2746 }
2747 
2748 /**
2749  * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
2750  * @pdev: PCI device to check
2751  *
2752  * Walk upwards from @pdev and check for each encountered bridge if it's part
2753  * of a Thunderbolt controller.  Reaching the host bridge means @pdev is not
2754  * Thunderbolt-attached.  (But rather soldered to the mainboard usually.)
2755  */
pci_is_thunderbolt_attached(struct pci_dev * pdev)2756 static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
2757 {
2758 	struct pci_dev *parent = pdev;
2759 
2760 	if (pdev->is_thunderbolt)
2761 		return true;
2762 
2763 	while ((parent = pci_upstream_bridge(parent)))
2764 		if (parent->is_thunderbolt)
2765 			return true;
2766 
2767 	return false;
2768 }
2769 
2770 #if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) || defined(CONFIG_S390)
2771 void pci_uevent_ers(struct pci_dev *pdev, enum  pci_ers_result err_type);
2772 #endif
2773 
2774 #include <linux/dma-mapping.h>
2775 
2776 #define pci_emerg(pdev, fmt, arg...)	dev_emerg(&(pdev)->dev, fmt, ##arg)
2777 #define pci_alert(pdev, fmt, arg...)	dev_alert(&(pdev)->dev, fmt, ##arg)
2778 #define pci_crit(pdev, fmt, arg...)	dev_crit(&(pdev)->dev, fmt, ##arg)
2779 #define pci_err(pdev, fmt, arg...)	dev_err(&(pdev)->dev, fmt, ##arg)
2780 #define pci_warn(pdev, fmt, arg...)	dev_warn(&(pdev)->dev, fmt, ##arg)
2781 #define pci_warn_once(pdev, fmt, arg...) dev_warn_once(&(pdev)->dev, fmt, ##arg)
2782 #define pci_notice(pdev, fmt, arg...)	dev_notice(&(pdev)->dev, fmt, ##arg)
2783 #define pci_info(pdev, fmt, arg...)	dev_info(&(pdev)->dev, fmt, ##arg)
2784 #define pci_dbg(pdev, fmt, arg...)	dev_dbg(&(pdev)->dev, fmt, ##arg)
2785 
2786 #define pci_notice_ratelimited(pdev, fmt, arg...) \
2787 	dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
2788 
2789 #define pci_info_ratelimited(pdev, fmt, arg...) \
2790 	dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
2791 
2792 #define pci_WARN(pdev, condition, fmt, arg...) \
2793 	WARN(condition, "%s %s: " fmt, \
2794 	     dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2795 
2796 #define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
2797 	WARN_ONCE(condition, "%s %s: " fmt, \
2798 		  dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
2799 
2800 #endif /* LINUX_PCI_H */
2801