xref: /freebsd/sys/compat/linuxkpi/common/include/linux/pci.h (revision 91e54839545f6cb91bb1a5f768ddd6c1e2420b48)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  * Copyright (c) 2020-2025 The FreeBSD Foundation
8  *
9  * Portions of this software were developed by Björn Zeeb
10  * under sponsorship from the FreeBSD Foundation.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice unmodified, this list of conditions, and the following
17  *    disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #ifndef	_LINUXKPI_LINUX_PCI_H_
34 #define	_LINUXKPI_LINUX_PCI_H_
35 
36 #define	CONFIG_PCI_MSI
37 
38 #include <linux/types.h>
39 #include <linux/device/driver.h>
40 
41 #include <sys/param.h>
42 #include <sys/bus.h>
43 #include <sys/module.h>
44 #include <sys/nv.h>
45 #include <sys/pciio.h>
46 #include <dev/pci/pcivar.h>
47 #include <dev/pci/pcireg.h>
48 #include <dev/pci/pci_private.h>
49 
50 #include <machine/resource.h>
51 
52 #include <linux/list.h>
53 #include <linux/dmapool.h>
54 #include <linux/dma-mapping.h>
55 #include <linux/compiler.h>
56 #include <linux/errno.h>
57 #include <asm/atomic.h>
58 #include <asm/memtype.h>
59 #include <linux/device.h>
60 #include <linux/pci_ids.h>
61 #include <linux/pm.h>
62 
63 /*
64  * <linux/ioport.h> should be included here, like Linux, but we can't have that
65  * because Linux `struct resource` definition would conflict with FreeBSD
66  * native definition.
67  *
68  * At least the amdgpu DRM driver (amdgpu_isp.c at the time of this writing)
69  * relies on this indirect include to get the definition of Linux `struct
70  * resource`. As a workaround, we include <linux/ioport.h> from
71  * <linux/mfd/core.h>.
72  */
73 
74 #include <linux/kernel.h>	/* pr_debug */
75 
76 struct pci_device_id {
77 	uint32_t	vendor;
78 	uint32_t	device;
79 	uint32_t	subvendor;
80 	uint32_t	subdevice;
81 	uint32_t	class;
82 	uint32_t	class_mask;
83 	uintptr_t	driver_data;
84 };
85 
86 #define	MODULE_DEVICE_TABLE_BUS_pci(_bus, _table)			\
87 MODULE_PNP_INFO("U32:vendor;U32:device;V32:subvendor;V32:subdevice",	\
88     _bus, lkpi_ ## _table, _table, nitems(_table) - 1)
89 
90 #define	PCI_ANY_ID			-1U
91 
92 #define PCI_DEVFN(slot, func)   ((((slot) & 0x1f) << 3) | ((func) & 0x07))
93 #define PCI_SLOT(devfn)		(((devfn) >> 3) & 0x1f)
94 #define PCI_FUNC(devfn)		((devfn) & 0x07)
95 #define	PCI_BUS_NUM(devfn)	(((devfn) >> 8) & 0xff)
96 #define	PCI_DEVID(bus, devfn)	((((uint16_t)(bus)) << 8) | (devfn))
97 
98 #define PCI_VDEVICE(_vendor, _device)					\
99 	    .vendor = PCI_VENDOR_ID_##_vendor, .device = (_device),	\
100 	    .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
101 #define	PCI_DEVICE(_vendor, _device)					\
102 	    .vendor = (_vendor), .device = (_device),			\
103 	    .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
104 
105 #define	to_pci_dev(n)	container_of(n, struct pci_dev, dev)
106 
107 #define	PCI_STD_NUM_BARS	6
108 #define	PCI_BASE_ADDRESS_0	PCIR_BARS
109 #define	PCI_BASE_ADDRESS_MEM_TYPE_64	PCIM_BAR_MEM_64
110 #define	PCI_VENDOR_ID		PCIR_VENDOR
111 #define	PCI_DEVICE_ID		PCIR_DEVICE
112 #define	PCI_COMMAND		PCIR_COMMAND
113 #define	PCI_COMMAND_INTX_DISABLE	PCIM_CMD_INTxDIS
114 #define	PCI_COMMAND_MEMORY	PCIM_CMD_MEMEN
115 #define	PCI_PRIMARY_BUS		PCIR_PRIBUS_1
116 #define	PCI_SECONDARY_BUS	PCIR_SECBUS_1
117 #define	PCI_SUBORDINATE_BUS	PCIR_SUBBUS_1
118 #define	PCI_SEC_LATENCY_TIMER	PCIR_SECLAT_1
119 #define	PCI_EXP_DEVCTL		PCIER_DEVICE_CTL		/* Device Control */
120 #define	PCI_EXP_LNKCTL		PCIER_LINK_CTL			/* Link Control */
121 #define	PCI_EXP_LNKCTL_ASPM_L0S	PCIEM_LINK_CTL_ASPMC_L0S
122 #define	PCI_EXP_LNKCTL_ASPM_L1	PCIEM_LINK_CTL_ASPMC_L1
123 #define PCI_EXP_LNKCTL_ASPMC	PCIEM_LINK_CTL_ASPMC
124 #define	PCI_EXP_LNKCTL_CLKREQ_EN PCIEM_LINK_CTL_ECPM		/* Enable clock PM */
125 #define PCI_EXP_LNKCTL_HAWD	PCIEM_LINK_CTL_HAWD
126 #define	PCI_EXP_FLAGS_TYPE	PCIEM_FLAGS_TYPE		/* Device/Port type */
127 #define	PCI_EXP_DEVCAP		PCIER_DEVICE_CAP		/* Device capabilities */
128 #define	PCI_EXP_DEVSTA		PCIER_DEVICE_STA		/* Device Status */
129 #define	PCI_EXP_LNKCAP		PCIER_LINK_CAP			/* Link Capabilities */
130 #define	PCI_EXP_LNKSTA		PCIER_LINK_STA			/* Link Status */
131 #define	PCI_EXP_SLTCAP		PCIER_SLOT_CAP			/* Slot Capabilities */
132 #define	PCI_EXP_SLTCTL		PCIER_SLOT_CTL			/* Slot Control */
133 #define	PCI_EXP_SLTSTA		PCIER_SLOT_STA			/* Slot Status */
134 #define	PCI_EXP_RTCTL		PCIER_ROOT_CTL			/* Root Control */
135 #define	PCI_EXP_RTCAP		PCIER_ROOT_CAP			/* Root Capabilities */
136 #define	PCI_EXP_RTSTA		PCIER_ROOT_STA			/* Root Status */
137 #define	PCI_EXP_DEVCAP2		PCIER_DEVICE_CAP2		/* Device Capabilities 2 */
138 #define	PCI_EXP_DEVCTL2		PCIER_DEVICE_CTL2		/* Device Control 2 */
139 #define	PCI_EXP_DEVCTL2_LTR_EN	PCIEM_CTL2_LTR_ENABLE
140 #define	PCI_EXP_DEVCTL2_COMP_TMOUT_DIS	PCIEM_CTL2_COMP_TIMO_DISABLE
141 #define	PCI_EXP_LNKCAP2		PCIER_LINK_CAP2			/* Link Capabilities 2 */
142 #define	PCI_EXP_LNKCTL2		PCIER_LINK_CTL2			/* Link Control 2 */
143 #define	PCI_EXP_LNKSTA2		PCIER_LINK_STA2			/* Link Status 2 */
144 #define	PCI_EXP_FLAGS		PCIER_FLAGS			/* Capabilities register */
145 #define	PCI_EXP_FLAGS_VERS	PCIEM_FLAGS_VERSION		/* Capability version */
146 #define	PCI_EXP_TYPE_ROOT_PORT	PCIEM_TYPE_ROOT_PORT		/* Root Port */
147 #define	PCI_EXP_TYPE_ENDPOINT	PCIEM_TYPE_ENDPOINT		/* Express Endpoint */
148 #define	PCI_EXP_TYPE_LEG_END	PCIEM_TYPE_LEGACY_ENDPOINT	/* Legacy Endpoint */
149 #define	PCI_EXP_TYPE_DOWNSTREAM PCIEM_TYPE_DOWNSTREAM_PORT	/* Downstream Port */
150 #define	PCI_EXP_FLAGS_SLOT	PCIEM_FLAGS_SLOT		/* Slot implemented */
151 #define	PCI_EXP_TYPE_RC_EC	PCIEM_TYPE_ROOT_EC		/* Root Complex Event Collector */
152 #define	PCI_EXP_LNKSTA_CLS	PCIEM_LINK_STA_SPEED
153 #define	PCI_EXP_LNKSTA_CLS_8_0GB	0x0003	/* Current Link Speed 8.0GT/s */
154 #define	PCI_EXP_LNKCAP_SLS_2_5GB 0x01	/* Supported Link Speed 2.5GT/s */
155 #define	PCI_EXP_LNKCAP_SLS_5_0GB 0x02	/* Supported Link Speed 5.0GT/s */
156 #define	PCI_EXP_LNKCAP_SLS_8_0GB 0x03	/* Supported Link Speed 8.0GT/s */
157 #define	PCI_EXP_LNKCAP_SLS_16_0GB 0x04	/* Supported Link Speed 16.0GT/s */
158 #define	PCI_EXP_LNKCAP_SLS_32_0GB 0x05	/* Supported Link Speed 32.0GT/s */
159 #define	PCI_EXP_LNKCAP_SLS_64_0GB 0x06	/* Supported Link Speed 64.0GT/s */
160 #define	PCI_EXP_LNKCAP_MLW	0x03f0	/* Maximum Link Width */
161 #define	PCI_EXP_LNKCAP2_SLS_2_5GB 0x02	/* Supported Link Speed 2.5GT/s */
162 #define	PCI_EXP_LNKCAP2_SLS_5_0GB 0x04	/* Supported Link Speed 5.0GT/s */
163 #define	PCI_EXP_LNKCAP2_SLS_8_0GB 0x08	/* Supported Link Speed 8.0GT/s */
164 #define	PCI_EXP_LNKCAP2_SLS_16_0GB 0x10	/* Supported Link Speed 16.0GT/s */
165 #define	PCI_EXP_LNKCAP2_SLS_32_0GB 0x20	/* Supported Link Speed 32.0GT/s */
166 #define	PCI_EXP_LNKCAP2_SLS_64_0GB 0x40	/* Supported Link Speed 64.0GT/s */
167 #define	PCI_EXP_LNKCTL2_TLS		0x000f
168 #define	PCI_EXP_LNKCTL2_TLS_2_5GT	0x0001	/* Supported Speed 2.5GT/s */
169 #define	PCI_EXP_LNKCTL2_TLS_5_0GT	0x0002	/* Supported Speed 5GT/s */
170 #define	PCI_EXP_LNKCTL2_TLS_8_0GT	0x0003	/* Supported Speed 8GT/s */
171 #define	PCI_EXP_LNKCTL2_TLS_16_0GT	0x0004	/* Supported Speed 16GT/s */
172 #define	PCI_EXP_LNKCTL2_TLS_32_0GT	0x0005	/* Supported Speed 32GT/s */
173 #define	PCI_EXP_LNKCTL2_TLS_64_0GT	0x0006	/* Supported Speed 64GT/s */
174 #define	PCI_EXP_LNKCTL2_ENTER_COMP	0x0010	/* Enter Compliance */
175 #define	PCI_EXP_LNKCTL2_TX_MARGIN	0x0380	/* Transmit Margin */
176 
177 #define	PCI_MSI_ADDRESS_LO	PCIR_MSI_ADDR
178 #define	PCI_MSI_ADDRESS_HI	PCIR_MSI_ADDR_HIGH
179 #define	PCI_MSI_FLAGS		PCIR_MSI_CTRL
180 #define	PCI_MSI_FLAGS_ENABLE	PCIM_MSICTRL_MSI_ENABLE
181 #define	PCI_MSIX_FLAGS		PCIR_MSIX_CTRL
182 #define	PCI_MSIX_FLAGS_ENABLE	PCIM_MSIXCTRL_MSIX_ENABLE
183 
184 #define PCI_EXP_LNKCAP_CLKPM	0x00040000
185 #define PCI_EXP_DEVSTA_TRPND	0x0020
186 
187 #define	IORESOURCE_MEM	(1 << SYS_RES_MEMORY)
188 #define	IORESOURCE_IO	(1 << SYS_RES_IOPORT)
189 #define	IORESOURCE_IRQ	(1 << SYS_RES_IRQ)
190 
191 enum pci_bus_speed {
192 	PCI_SPEED_UNKNOWN = -1,
193 	PCIE_SPEED_2_5GT,
194 	PCIE_SPEED_5_0GT,
195 	PCIE_SPEED_8_0GT,
196 	PCIE_SPEED_16_0GT,
197 	PCIE_SPEED_32_0GT,
198 	PCIE_SPEED_64_0GT,
199 };
200 
201 enum pcie_link_width {
202 	PCIE_LNK_WIDTH_RESRV	= 0x00,
203 	PCIE_LNK_X1		= 0x01,
204 	PCIE_LNK_X2		= 0x02,
205 	PCIE_LNK_X4		= 0x04,
206 	PCIE_LNK_X8		= 0x08,
207 	PCIE_LNK_X12		= 0x0c,
208 	PCIE_LNK_X16		= 0x10,
209 	PCIE_LNK_X32		= 0x20,
210 	PCIE_LNK_WIDTH_UNKNOWN	= 0xff,
211 };
212 
213 #define	PCIE_LINK_STATE_L0S		0x00000001
214 #define	PCIE_LINK_STATE_L1		0x00000002
215 #define	PCIE_LINK_STATE_CLKPM		0x00000004
216 
217 typedef int pci_power_t;
218 
219 #define PCI_D0		PCI_POWERSTATE_D0
220 #define PCI_D1		PCI_POWERSTATE_D1
221 #define PCI_D2		PCI_POWERSTATE_D2
222 #define PCI_D3hot	PCI_POWERSTATE_D3_HOT
223 #define PCI_D3cold	PCI_POWERSTATE_D3_COLD
224 
225 #define PCI_POWER_ERROR	PCI_POWERSTATE_UNKNOWN
226 
227 extern const char *pci_power_names[6];
228 
229 #define	PCI_ERR_UNCOR_STATUS		PCIR_AER_UC_STATUS
230 #define	PCI_ERR_COR_STATUS		PCIR_AER_COR_STATUS
231 #define	PCI_ERR_ROOT_COMMAND		PCIR_AER_ROOTERR_CMD
232 #define	PCI_ERR_ROOT_ERR_SRC		PCIR_AER_COR_SOURCE_ID
233 
234 #define	PCI_EXT_CAP_ID_ERR		PCIZ_AER
235 #define	PCI_EXT_CAP_ID_L1SS		PCIZ_L1PM
236 
237 #define	PCI_L1SS_CTL1			0x8
238 #define	PCI_L1SS_CTL1_L1SS_MASK		0xf
239 
240 #define	PCI_IRQ_INTX			0x01
241 #define	PCI_IRQ_MSI			0x02
242 #define	PCI_IRQ_MSIX			0x04
243 #define	PCI_IRQ_ALL_TYPES		(PCI_IRQ_MSIX|PCI_IRQ_MSI|PCI_IRQ_INTX)
244 
245 #if defined(LINUXKPI_VERSION) && (LINUXKPI_VERSION <= 61000)
246 #define	PCI_IRQ_LEGACY			PCI_IRQ_INTX
247 #endif
248 
249 /*
250  * Linux PCI code uses `PCI_SET_ERROR_RESPONSE()` to indicate to the caller of
251  * a `pci_read_*()` function that the read failed. An example of failure is
252  * whether the device was disconnected. It is a bit weird because Linux
253  * `pci_read_*()` can return an error value, as the read value is stored in a
254  * integer passed by pointer.
255  *
256  * We don't set PCI_ERROR_RESPONSE anywhere as of this commit, but the DRM
257  * drivers started to use `PCI_POSSIBLE_ERROR()`.
258  */
259 #define	PCI_ERROR_RESPONSE		(~0ULL)
260 #define	PCI_SET_ERROR_RESPONSE(val)	(*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
261 #define	PCI_POSSIBLE_ERROR(val)		((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
262 
263 struct pci_dev;
264 
265 struct pci_driver {
266 	struct list_head		node;
267 	char				*name;
268 	const struct pci_device_id		*id_table;
269 	int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id);
270 	void (*remove)(struct pci_dev *dev);
271 	int  (*suspend) (struct pci_dev *dev, pm_message_t state);	/* Device suspended */
272 	int  (*resume) (struct pci_dev *dev);		/* Device woken up */
273 	void (*shutdown) (struct pci_dev *dev);		/* Device shutdown */
274 	driver_t			bsddriver;
275 	devclass_t			bsdclass;
276 	struct device_driver		driver;
277 	const struct pci_error_handlers       *err_handler;
278 	bool				isdrm;
279 	int				bsd_probe_return;
280 	int  (*bsd_iov_init)(device_t dev, uint16_t num_vfs,
281 	    const nvlist_t *pf_config);
282 	void  (*bsd_iov_uninit)(device_t dev);
283 	int  (*bsd_iov_add_vf)(device_t dev, uint16_t vfnum,
284 	    const nvlist_t *vf_config);
285 };
286 
287 struct pci_bus {
288 	struct pci_dev	*self;
289 	/* struct pci_bus	*parent */
290 	int		domain;
291 	int		number;
292 };
293 
294 extern struct list_head pci_drivers;
295 extern struct list_head pci_devices;
296 extern spinlock_t pci_lock;
297 
298 #define	__devexit_p(x)	x
299 
300 #define	module_pci_driver(_drv)						\
301     module_driver(_drv, linux_pci_register_driver, linux_pci_unregister_driver)
302 
303 struct msi_msg {
304 	uint32_t			data;
305 };
306 
307 struct pci_msi_desc {
308 	struct {
309 		bool			is_64;
310 	} msi_attrib;
311 };
312 
313 struct msi_desc {
314 	struct msi_msg			msg;
315 	struct pci_msi_desc		pci;
316 };
317 
318 struct msix_entry {
319 	int entry;
320 	int vector;
321 };
322 
323 /*
324  * If we find drivers accessing this from multiple KPIs we may have to
325  * refcount objects of this structure.
326  */
327 struct resource;
328 struct pci_mmio_region {
329 	TAILQ_ENTRY(pci_mmio_region)	next;
330 	struct resource			*res;
331 	int				rid;
332 	int				type;
333 };
334 
335 struct pci_dev {
336 	struct device		dev;
337 	struct list_head	links;
338 	struct pci_driver	*pdrv;
339 	struct pci_bus		*bus;
340 	struct pci_dev		*root;
341 	pci_power_t		current_state;
342 	uint16_t		device;
343 	uint16_t		vendor;
344 	uint16_t		subsystem_vendor;
345 	uint16_t		subsystem_device;
346 	unsigned int		irq;
347 	unsigned int		devfn;
348 	uint32_t		class;
349 	uint8_t			revision;
350 	uint8_t			msi_cap;
351 	uint8_t			msix_cap;
352 	bool			managed;	/* devres "pcim_*(). */
353 	bool			want_iomap_res;
354 	bool			msi_enabled;
355 	bool			msix_enabled;
356 	phys_addr_t		rom;
357 	size_t			romlen;
358 	struct msi_desc		**msi_desc;
359 	char			*path_name;
360 	spinlock_t		pcie_cap_lock;
361 
362 	TAILQ_HEAD(, pci_mmio_region)	mmio;
363 };
364 
365 int pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv,
366     unsigned int flags);
367 bool pci_device_is_present(struct pci_dev *pdev);
368 
369 int linuxkpi_pcim_enable_device(struct pci_dev *pdev);
370 void __iomem **linuxkpi_pcim_iomap_table(struct pci_dev *pdev);
371 void *linuxkpi_pci_iomap_range(struct pci_dev *, int,
372     unsigned long, unsigned long);
373 void *linuxkpi_pci_iomap(struct pci_dev *, int, unsigned long);
374 void *linuxkpi_pcim_iomap(struct pci_dev *, int, unsigned long);
375 void linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res);
376 int linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask,
377     const char *name);
378 int linuxkpi_pci_request_region(struct pci_dev *, int, const char *);
379 int linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name);
380 int linuxkpi_pcim_request_all_regions(struct pci_dev *, const char *);
381 void linuxkpi_pci_release_region(struct pci_dev *pdev, int bar);
382 void linuxkpi_pci_release_regions(struct pci_dev *pdev);
383 int linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries,
384     int nreq);
385 
386 /* Internal helper function(s). */
387 struct pci_dev *lkpinew_pci_dev(device_t);
388 void lkpi_pci_devres_release(struct device *, void *);
389 struct pci_dev *lkpi_pci_get_device(uint32_t, uint32_t, struct pci_dev *);
390 struct msi_desc *lkpi_pci_msi_desc_alloc(int);
391 struct device *lkpi_pci_find_irq_dev(unsigned int irq);
392 int _lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec);
393 
394 #define	pci_err(pdev, fmt, ...)						\
395     dev_err(&(pdev)->dev, fmt, ##__VA_ARGS__)
396 #define	pci_info(pdev, fmt, ...)					\
397     dev_info(&(pdev)->dev, fmt, ##__VA_ARGS__)
398 
399 static inline bool
dev_is_pci(struct device * dev)400 dev_is_pci(struct device *dev)
401 {
402 
403 	return (device_get_devclass(dev->bsddev) == devclass_find("pci"));
404 }
405 
406 static inline uint16_t
pci_dev_id(struct pci_dev * pdev)407 pci_dev_id(struct pci_dev *pdev)
408 {
409 	return (PCI_DEVID(pdev->bus->number, pdev->devfn));
410 }
411 
412 static inline int
pci_resource_type(struct pci_dev * pdev,int bar)413 pci_resource_type(struct pci_dev *pdev, int bar)
414 {
415 	struct pci_map *pm;
416 
417 	pm = pci_find_bar(pdev->dev.bsddev, PCIR_BAR(bar));
418 	if (!pm)
419 		return (-1);
420 
421 	if (PCI_BAR_IO(pm->pm_value))
422 		return (SYS_RES_IOPORT);
423 	else
424 		return (SYS_RES_MEMORY);
425 }
426 
427 /*
428  * All drivers just seem to want to inspect the type not flags.
429  */
430 static inline int
pci_resource_flags(struct pci_dev * pdev,int bar)431 pci_resource_flags(struct pci_dev *pdev, int bar)
432 {
433 	int type;
434 
435 	type = pci_resource_type(pdev, bar);
436 	if (type < 0)
437 		return (0);
438 	return (1 << type);
439 }
440 
441 static inline const char *
pci_name(struct pci_dev * d)442 pci_name(struct pci_dev *d)
443 {
444 	return d->path_name;
445 }
446 
447 static inline void *
pci_get_drvdata(struct pci_dev * pdev)448 pci_get_drvdata(struct pci_dev *pdev)
449 {
450 
451 	return dev_get_drvdata(&pdev->dev);
452 }
453 
454 static inline void
pci_set_drvdata(struct pci_dev * pdev,void * data)455 pci_set_drvdata(struct pci_dev *pdev, void *data)
456 {
457 
458 	dev_set_drvdata(&pdev->dev, data);
459 }
460 
461 static inline struct pci_dev *
pci_dev_get(struct pci_dev * pdev)462 pci_dev_get(struct pci_dev *pdev)
463 {
464 
465 	if (pdev != NULL)
466 		get_device(&pdev->dev);
467 	return (pdev);
468 }
469 
470 static __inline void
pci_dev_put(struct pci_dev * pdev)471 pci_dev_put(struct pci_dev *pdev)
472 {
473 
474 	if (pdev != NULL)
475 		put_device(&pdev->dev);
476 }
477 
478 static inline int
pci_enable_device(struct pci_dev * pdev)479 pci_enable_device(struct pci_dev *pdev)
480 {
481 
482 	pci_enable_io(pdev->dev.bsddev, SYS_RES_IOPORT);
483 	pci_enable_io(pdev->dev.bsddev, SYS_RES_MEMORY);
484 	return (0);
485 }
486 
487 static inline void
pci_disable_device(struct pci_dev * pdev)488 pci_disable_device(struct pci_dev *pdev)
489 {
490 
491 	pci_disable_busmaster(pdev->dev.bsddev);
492 }
493 
494 static inline int
pci_set_master(struct pci_dev * pdev)495 pci_set_master(struct pci_dev *pdev)
496 {
497 
498 	pci_enable_busmaster(pdev->dev.bsddev);
499 	return (0);
500 }
501 
502 static inline int
pci_set_power_state(struct pci_dev * pdev,int state)503 pci_set_power_state(struct pci_dev *pdev, int state)
504 {
505 
506 	pci_set_powerstate(pdev->dev.bsddev, state);
507 	return (0);
508 }
509 
510 static inline int
pci_clear_master(struct pci_dev * pdev)511 pci_clear_master(struct pci_dev *pdev)
512 {
513 
514 	pci_disable_busmaster(pdev->dev.bsddev);
515 	return (0);
516 }
517 
518 static inline bool
pci_is_root_bus(struct pci_bus * pbus)519 pci_is_root_bus(struct pci_bus *pbus)
520 {
521 
522 	return (pbus->self == NULL);
523 }
524 
525 static inline struct pci_dev *
pci_upstream_bridge(struct pci_dev * pdev)526 pci_upstream_bridge(struct pci_dev *pdev)
527 {
528 
529 	if (pci_is_root_bus(pdev->bus))
530 		return (NULL);
531 
532 	/*
533 	 * If we do not have a (proper) "upstream bridge" set, e.g., we point
534 	 * to ourselves, try to handle this case on the fly like we do
535 	 * for pcie_find_root_port().
536 	 */
537 	if (pdev == pdev->bus->self) {
538 		device_t bridge;
539 
540 		/*
541 		 * In the case of DRM drivers, the passed device is a child of
542 		 * `vgapci`. We want to start the lookup from `vgapci`, so the
543 		 * parent of the passed `drmn`.
544 		 *
545 		 * We can use the `isdrm` flag to determine this.
546 		 */
547 		bridge = pdev->dev.bsddev;
548 		if (pdev->pdrv != NULL && pdev->pdrv->isdrm)
549 			bridge = device_get_parent(bridge);
550 		if (bridge == NULL)
551 			goto done;
552 
553 		bridge = device_get_parent(bridge);
554 		if (bridge == NULL)
555 			goto done;
556 		bridge = device_get_parent(bridge);
557 		if (bridge == NULL)
558 			goto done;
559 		if (device_get_devclass(device_get_parent(bridge)) !=
560 		    devclass_find("pci"))
561 			goto done;
562 
563 		/*
564 		 * "bridge" is a PCI-to-PCI bridge.  Create a Linux pci_dev
565 		 * for it so it can be returned.
566 		 */
567 		pdev->bus->self = lkpinew_pci_dev(bridge);
568 	}
569 done:
570 	return (pdev->bus->self);
571 }
572 
573 #define	pci_request_region(pdev, bar, res_name)				\
574     linuxkpi_pci_request_region(pdev, bar, res_name)
575 #define	pci_release_region(pdev, bar)					\
576     linuxkpi_pci_release_region(pdev, bar)
577 #define	pci_request_regions(pdev, res_name)				\
578     linuxkpi_pci_request_regions(pdev, res_name)
579 #define	pci_release_regions(pdev)					\
580     linuxkpi_pci_release_regions(pdev)
581 #define	pcim_request_all_regions(pdev, name)				\
582     linuxkpi_pcim_request_all_regions(pdev, name)
583 
584 static inline void
lkpi_pci_disable_msix(struct pci_dev * pdev)585 lkpi_pci_disable_msix(struct pci_dev *pdev)
586 {
587 
588 	pci_release_msi(pdev->dev.bsddev);
589 
590 	/*
591 	 * The MSIX IRQ numbers associated with this PCI device are no
592 	 * longer valid and might be re-assigned. Make sure
593 	 * lkpi_pci_find_irq_dev() does no longer see them by
594 	 * resetting their references to zero:
595 	 */
596 	pdev->dev.irq_start = 0;
597 	pdev->dev.irq_end = 0;
598 	pdev->msix_enabled = false;
599 }
600 /* Only for consistency. No conflict on that one. */
601 #define	pci_disable_msix(pdev)		lkpi_pci_disable_msix(pdev)
602 
603 static inline void
lkpi_pci_disable_msi(struct pci_dev * pdev)604 lkpi_pci_disable_msi(struct pci_dev *pdev)
605 {
606 
607 	pci_release_msi(pdev->dev.bsddev);
608 
609 	pdev->dev.irq_start = 0;
610 	pdev->dev.irq_end = 0;
611 	pdev->irq = pdev->dev.irq;
612 	pdev->msi_enabled = false;
613 }
614 #define	pci_disable_msi(pdev)		lkpi_pci_disable_msi(pdev)
615 #define	pci_free_irq_vectors(pdev)	lkpi_pci_disable_msi(pdev)
616 
617 unsigned long	pci_resource_start(struct pci_dev *pdev, int bar);
618 unsigned long	pci_resource_len(struct pci_dev *pdev, int bar);
619 
620 static inline bus_addr_t
pci_bus_address(struct pci_dev * pdev,int bar)621 pci_bus_address(struct pci_dev *pdev, int bar)
622 {
623 
624 	return (pci_resource_start(pdev, bar));
625 }
626 
627 #define	PCI_CAP_ID_EXP	PCIY_EXPRESS
628 #define	PCI_CAP_ID_PCIX	PCIY_PCIX
629 #define PCI_CAP_ID_AGP  PCIY_AGP
630 #define PCI_CAP_ID_PM   PCIY_PMG
631 
632 #define PCI_EXP_DEVCTL		PCIER_DEVICE_CTL
633 #define PCI_EXP_DEVCTL_PAYLOAD	PCIEM_CTL_MAX_PAYLOAD
634 #define PCI_EXP_DEVCTL_READRQ	PCIEM_CTL_MAX_READ_REQUEST
635 #define PCI_EXP_LNKCTL		PCIER_LINK_CTL
636 #define PCI_EXP_LNKSTA		PCIER_LINK_STA
637 
638 static inline int
pci_find_capability(struct pci_dev * pdev,int capid)639 pci_find_capability(struct pci_dev *pdev, int capid)
640 {
641 	int reg;
642 
643 	if (pci_find_cap(pdev->dev.bsddev, capid, &reg))
644 		return (0);
645 	return (reg);
646 }
647 
pci_pcie_cap(struct pci_dev * dev)648 static inline int pci_pcie_cap(struct pci_dev *dev)
649 {
650 	return pci_find_capability(dev, PCI_CAP_ID_EXP);
651 }
652 
653 static inline int
pci_find_ext_capability(struct pci_dev * pdev,int capid)654 pci_find_ext_capability(struct pci_dev *pdev, int capid)
655 {
656 	int reg;
657 
658 	if (pci_find_extcap(pdev->dev.bsddev, capid, &reg))
659 		return (0);
660 	return (reg);
661 }
662 
663 #define	PCIM_PCAP_PME_SHIFT	11
664 static __inline bool
pci_pme_capable(struct pci_dev * pdev,uint32_t flag)665 pci_pme_capable(struct pci_dev *pdev, uint32_t flag)
666 {
667 	struct pci_devinfo *dinfo;
668 	pcicfgregs *cfg;
669 
670 	if (flag > (PCIM_PCAP_D3PME_COLD >> PCIM_PCAP_PME_SHIFT))
671 		return (false);
672 
673 	dinfo = device_get_ivars(pdev->dev.bsddev);
674 	cfg = &dinfo->cfg;
675 
676 	if (cfg->pp.pp_cap == 0)
677 		return (false);
678 
679 	if ((cfg->pp.pp_cap & (1 << (PCIM_PCAP_PME_SHIFT + flag))) != 0)
680 		return (true);
681 
682 	return (false);
683 }
684 
685 static inline int
pci_disable_link_state(struct pci_dev * pdev,uint32_t flags)686 pci_disable_link_state(struct pci_dev *pdev, uint32_t flags)
687 {
688 
689 	if (!pci_enable_aspm)
690 		return (-EPERM);
691 
692 	return (-ENXIO);
693 }
694 
695 static inline int
pci_read_config_byte(const struct pci_dev * pdev,int where,u8 * val)696 pci_read_config_byte(const struct pci_dev *pdev, int where, u8 *val)
697 {
698 
699 	*val = (u8)pci_read_config(pdev->dev.bsddev, where, 1);
700 	return (0);
701 }
702 
703 static inline int
pci_read_config_word(const struct pci_dev * pdev,int where,u16 * val)704 pci_read_config_word(const struct pci_dev *pdev, int where, u16 *val)
705 {
706 
707 	*val = (u16)pci_read_config(pdev->dev.bsddev, where, 2);
708 	return (0);
709 }
710 
711 static inline int
pci_read_config_dword(const struct pci_dev * pdev,int where,u32 * val)712 pci_read_config_dword(const struct pci_dev *pdev, int where, u32 *val)
713 {
714 
715 	*val = (u32)pci_read_config(pdev->dev.bsddev, where, 4);
716 	return (0);
717 }
718 
719 static inline int
pci_write_config_byte(const struct pci_dev * pdev,int where,u8 val)720 pci_write_config_byte(const struct pci_dev *pdev, int where, u8 val)
721 {
722 
723 	pci_write_config(pdev->dev.bsddev, where, val, 1);
724 	return (0);
725 }
726 
727 static inline int
pci_write_config_word(const struct pci_dev * pdev,int where,u16 val)728 pci_write_config_word(const struct pci_dev *pdev, int where, u16 val)
729 {
730 
731 	pci_write_config(pdev->dev.bsddev, where, val, 2);
732 	return (0);
733 }
734 
735 static inline int
pci_write_config_dword(const struct pci_dev * pdev,int where,u32 val)736 pci_write_config_dword(const struct pci_dev *pdev, int where, u32 val)
737 {
738 
739 	pci_write_config(pdev->dev.bsddev, where, val, 4);
740 	return (0);
741 }
742 
743 int	linux_pci_register_driver(struct pci_driver *pdrv);
744 int	linux_pci_register_drm_driver(struct pci_driver *pdrv);
745 void	linux_pci_unregister_driver(struct pci_driver *pdrv);
746 void	linux_pci_unregister_drm_driver(struct pci_driver *pdrv);
747 
748 #define	pci_register_driver(pdrv)					\
749     linux_pci_register_driver(pdrv)
750 #define	pci_unregister_driver(pdrv)					\
751     linux_pci_unregister_driver(pdrv)
752 
753 /*
754  * Enable msix, positive errors indicate actual number of available
755  * vectors.  Negative errors are failures.
756  *
757  * NB: define added to prevent this definition of pci_enable_msix from
758  * clashing with the native FreeBSD version.
759  */
760 #define	pci_enable_msix(...)						\
761     linuxkpi_pci_enable_msix(__VA_ARGS__)
762 
763 #define	pci_enable_msix_range(...)					\
764     linux_pci_enable_msix_range(__VA_ARGS__)
765 
766 static inline int
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)767 pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
768     int minvec, int maxvec)
769 {
770 	int nvec = maxvec;
771 	int rc;
772 
773 	if (maxvec < minvec)
774 		return (-ERANGE);
775 
776 	do {
777 		rc = pci_enable_msix(dev, entries, nvec);
778 		if (rc < 0) {
779 			return (rc);
780 		} else if (rc > 0) {
781 			if (rc < minvec)
782 				return (-ENOSPC);
783 			nvec = rc;
784 		}
785 	} while (rc);
786 	return (nvec);
787 }
788 
789 #define	pci_enable_msi(pdev)						\
790     linux_pci_enable_msi(pdev)
791 
792 static inline int
pci_enable_msi(struct pci_dev * pdev)793 pci_enable_msi(struct pci_dev *pdev)
794 {
795 
796 	return (_lkpi_pci_enable_msi_range(pdev, 1, 1));
797 }
798 
799 static inline int
pci_channel_offline(struct pci_dev * pdev)800 pci_channel_offline(struct pci_dev *pdev)
801 {
802 
803 	return (pci_read_config(pdev->dev.bsddev, PCIR_VENDOR, 2) == PCIV_INVALID);
804 }
805 
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)806 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
807 {
808 	return -ENODEV;
809 }
810 
pci_disable_sriov(struct pci_dev * dev)811 static inline void pci_disable_sriov(struct pci_dev *dev)
812 {
813 }
814 
815 #define	pci_iomap_range(pdev, mmio_bar, mmio_off, mmio_size)		\
816     linuxkpi_pci_iomap_range(pdev, mmio_bar, mmio_off, mmio_size)
817 #define	pci_iomap(pdev, mmio_bar, mmio_size)				\
818     linuxkpi_pci_iomap(pdev, mmio_bar, mmio_size)
819 #define	pcim_iomap(pdev, bar, maxlen)					\
820     linuxkpi_pcim_iomap(pdev, bar, maxlen)
821 #define	pci_iounmap(pdev, res)						\
822     linuxkpi_pci_iounmap(pdev, res)
823 
824 static inline void
lkpi_pci_save_state(struct pci_dev * pdev)825 lkpi_pci_save_state(struct pci_dev *pdev)
826 {
827 
828 	pci_save_state(pdev->dev.bsddev);
829 }
830 
831 static inline void
lkpi_pci_restore_state(struct pci_dev * pdev)832 lkpi_pci_restore_state(struct pci_dev *pdev)
833 {
834 
835 	pci_restore_state(pdev->dev.bsddev);
836 }
837 
838 #define pci_save_state(dev)	lkpi_pci_save_state(dev)
839 #define pci_restore_state(dev)	lkpi_pci_restore_state(dev)
840 
841 static inline int
linuxkpi_pci_enable_wake(struct pci_dev * pdev,pci_power_t state,bool ena)842 linuxkpi_pci_enable_wake(struct pci_dev *pdev, pci_power_t state, bool ena)
843 {
844 	/*
845 	 * We do not currently support this in device.h either to
846 	 * check if the device is allowed to wake up in first place.
847 	 */
848 	pr_debug("%s: TODO\n", __func__);
849 	return (0);
850 }
851 #define	pci_enable_wake(dev, state, ena)				\
852     linuxkpi_pci_enable_wake(dev, state, ena)
853 
854 static inline int
pci_reset_function(struct pci_dev * pdev)855 pci_reset_function(struct pci_dev *pdev)
856 {
857 
858 	return (-ENOSYS);
859 }
860 
861 #define DEFINE_PCI_DEVICE_TABLE(_table) \
862 	const struct pci_device_id _table[] __devinitdata
863 
864 /* XXX This should not be necessary. */
865 #define	pcix_set_mmrbc(d, v)	0
866 #define	pcix_get_max_mmrbc(d)	0
867 #define	pcie_set_readrq(d, v)	pci_set_max_read_req((d)->dev.bsddev, (v))
868 
869 #define	PCI_DMA_BIDIRECTIONAL	0
870 #define	PCI_DMA_TODEVICE	1
871 #define	PCI_DMA_FROMDEVICE	2
872 #define	PCI_DMA_NONE		3
873 
874 #define	pci_pool		dma_pool
875 #define	pci_pool_destroy(...)	dma_pool_destroy(__VA_ARGS__)
876 #define	pci_pool_alloc(...)	dma_pool_alloc(__VA_ARGS__)
877 #define	pci_pool_free(...)	dma_pool_free(__VA_ARGS__)
878 #define	pci_pool_create(_name, _pdev, _size, _align, _alloc)		\
879 	    dma_pool_create(_name, &(_pdev)->dev, _size, _align, _alloc)
880 #define	pci_free_consistent(_hwdev, _size, _vaddr, _dma_handle)		\
881 	    dma_free_coherent((_hwdev) == NULL ? NULL : &(_hwdev)->dev,	\
882 		_size, _vaddr, _dma_handle)
883 #define	pci_map_sg(_hwdev, _sg, _nents, _dir)				\
884 	    dma_map_sg((_hwdev) == NULL ? NULL : &(_hwdev->dev),	\
885 		_sg, _nents, (enum dma_data_direction)_dir)
886 #define	pci_map_single(_hwdev, _ptr, _size, _dir)			\
887 	    dma_map_single((_hwdev) == NULL ? NULL : &(_hwdev->dev),	\
888 		(_ptr), (_size), (enum dma_data_direction)_dir)
889 #define	pci_unmap_single(_hwdev, _addr, _size, _dir)			\
890 	    dma_unmap_single((_hwdev) == NULL ? NULL : &(_hwdev)->dev,	\
891 		_addr, _size, (enum dma_data_direction)_dir)
892 #define	pci_unmap_sg(_hwdev, _sg, _nents, _dir)				\
893 	    dma_unmap_sg((_hwdev) == NULL ? NULL : &(_hwdev)->dev,	\
894 		_sg, _nents, (enum dma_data_direction)_dir)
895 #define	pci_map_page(_hwdev, _page, _offset, _size, _dir)		\
896 	    dma_map_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, _page,\
897 		_offset, _size, (enum dma_data_direction)_dir)
898 #define	pci_unmap_page(_hwdev, _dma_address, _size, _dir)		\
899 	    dma_unmap_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev,	\
900 		_dma_address, _size, (enum dma_data_direction)_dir)
901 #define	pci_set_dma_mask(_pdev, mask)	dma_set_mask(&(_pdev)->dev, (mask))
902 #define	pci_dma_mapping_error(_pdev, _dma_addr)				\
903 	    dma_mapping_error(&(_pdev)->dev, _dma_addr)
904 #define	pci_set_consistent_dma_mask(_pdev, _mask)			\
905 	    dma_set_coherent_mask(&(_pdev)->dev, (_mask))
906 #define	DECLARE_PCI_UNMAP_ADDR(x)	DEFINE_DMA_UNMAP_ADDR(x);
907 #define	DECLARE_PCI_UNMAP_LEN(x)	DEFINE_DMA_UNMAP_LEN(x);
908 #define	pci_unmap_addr		dma_unmap_addr
909 #define	pci_unmap_addr_set	dma_unmap_addr_set
910 #define	pci_unmap_len		dma_unmap_len
911 #define	pci_unmap_len_set	dma_unmap_len_set
912 
913 typedef unsigned int __bitwise pci_channel_state_t;
914 typedef unsigned int __bitwise pci_ers_result_t;
915 
916 enum pci_channel_state {
917 	pci_channel_io_normal = 1,
918 	pci_channel_io_frozen = 2,
919 	pci_channel_io_perm_failure = 3,
920 };
921 
922 enum pci_ers_result {
923 	PCI_ERS_RESULT_NONE = 1,
924 	PCI_ERS_RESULT_CAN_RECOVER = 2,
925 	PCI_ERS_RESULT_NEED_RESET = 3,
926 	PCI_ERS_RESULT_DISCONNECT = 4,
927 	PCI_ERS_RESULT_RECOVERED = 5,
928 };
929 
930 /* PCI bus error event callbacks */
931 struct pci_error_handlers {
932 	pci_ers_result_t (*error_detected)(struct pci_dev *dev,
933 	    enum pci_channel_state error);
934 	pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
935 	pci_ers_result_t (*link_reset)(struct pci_dev *dev);
936 	pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
937 	void (*resume)(struct pci_dev *dev);
938 };
939 
940 /* FreeBSD does not support SRIOV - yet */
pci_physfn(struct pci_dev * dev)941 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
942 {
943 	return dev;
944 }
945 
pci_is_pcie(struct pci_dev * dev)946 static inline bool pci_is_pcie(struct pci_dev *dev)
947 {
948 	return !!pci_pcie_cap(dev);
949 }
950 
pcie_flags_reg(struct pci_dev * dev)951 static inline u16 pcie_flags_reg(struct pci_dev *dev)
952 {
953 	int pos;
954 	u16 reg16;
955 
956 	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
957 	if (!pos)
958 		return 0;
959 
960 	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
961 
962 	return reg16;
963 }
964 
pci_pcie_type(struct pci_dev * dev)965 static inline int pci_pcie_type(struct pci_dev *dev)
966 {
967 	return (pcie_flags_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
968 }
969 
pcie_cap_version(struct pci_dev * dev)970 static inline int pcie_cap_version(struct pci_dev *dev)
971 {
972 	return pcie_flags_reg(dev) & PCI_EXP_FLAGS_VERS;
973 }
974 
pcie_cap_has_lnkctl(struct pci_dev * dev)975 static inline bool pcie_cap_has_lnkctl(struct pci_dev *dev)
976 {
977 	int type = pci_pcie_type(dev);
978 
979 	return pcie_cap_version(dev) > 1 ||
980 	       type == PCI_EXP_TYPE_ROOT_PORT ||
981 	       type == PCI_EXP_TYPE_ENDPOINT ||
982 	       type == PCI_EXP_TYPE_LEG_END;
983 }
984 
pcie_cap_has_devctl(const struct pci_dev * dev)985 static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
986 {
987 		return true;
988 }
989 
pcie_cap_has_sltctl(struct pci_dev * dev)990 static inline bool pcie_cap_has_sltctl(struct pci_dev *dev)
991 {
992 	int type = pci_pcie_type(dev);
993 
994 	return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT ||
995 	    (type == PCI_EXP_TYPE_DOWNSTREAM &&
996 	    pcie_flags_reg(dev) & PCI_EXP_FLAGS_SLOT);
997 }
998 
pcie_cap_has_rtctl(struct pci_dev * dev)999 static inline bool pcie_cap_has_rtctl(struct pci_dev *dev)
1000 {
1001 	int type = pci_pcie_type(dev);
1002 
1003 	return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT ||
1004 	    type == PCI_EXP_TYPE_RC_EC;
1005 }
1006 
pcie_capability_reg_implemented(struct pci_dev * dev,int pos)1007 static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
1008 {
1009 	if (!pci_is_pcie(dev))
1010 		return false;
1011 
1012 	switch (pos) {
1013 	case PCI_EXP_FLAGS_TYPE:
1014 		return true;
1015 	case PCI_EXP_DEVCAP:
1016 	case PCI_EXP_DEVCTL:
1017 	case PCI_EXP_DEVSTA:
1018 		return pcie_cap_has_devctl(dev);
1019 	case PCI_EXP_LNKCAP:
1020 	case PCI_EXP_LNKCTL:
1021 	case PCI_EXP_LNKSTA:
1022 		return pcie_cap_has_lnkctl(dev);
1023 	case PCI_EXP_SLTCAP:
1024 	case PCI_EXP_SLTCTL:
1025 	case PCI_EXP_SLTSTA:
1026 		return pcie_cap_has_sltctl(dev);
1027 	case PCI_EXP_RTCTL:
1028 	case PCI_EXP_RTCAP:
1029 	case PCI_EXP_RTSTA:
1030 		return pcie_cap_has_rtctl(dev);
1031 	case PCI_EXP_DEVCAP2:
1032 	case PCI_EXP_DEVCTL2:
1033 	case PCI_EXP_LNKCAP2:
1034 	case PCI_EXP_LNKCTL2:
1035 	case PCI_EXP_LNKSTA2:
1036 		return pcie_cap_version(dev) > 1;
1037 	default:
1038 		return false;
1039 	}
1040 }
1041 
1042 static inline int
pcie_capability_read_dword(struct pci_dev * dev,int pos,u32 * dst)1043 pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *dst)
1044 {
1045 	*dst = 0;
1046 	if (pos & 3)
1047 		return -EINVAL;
1048 
1049 	if (!pcie_capability_reg_implemented(dev, pos))
1050 		return -EINVAL;
1051 
1052 	return pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, dst);
1053 }
1054 
1055 static inline int
pcie_capability_read_word(struct pci_dev * dev,int pos,u16 * dst)1056 pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *dst)
1057 {
1058 	*dst = 0;
1059 	if (pos & 3)
1060 		return -EINVAL;
1061 
1062 	if (!pcie_capability_reg_implemented(dev, pos))
1063 		return -EINVAL;
1064 
1065 	return pci_read_config_word(dev, pci_pcie_cap(dev) + pos, dst);
1066 }
1067 
1068 static inline int
pcie_capability_write_word(struct pci_dev * dev,int pos,u16 val)1069 pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
1070 {
1071 	if (pos & 1)
1072 		return -EINVAL;
1073 
1074 	if (!pcie_capability_reg_implemented(dev, pos))
1075 		return 0;
1076 
1077 	return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
1078 }
1079 
1080 static inline int
pcie_capability_clear_and_set_word(struct pci_dev * dev,int pos,uint16_t clear,uint16_t set)1081 pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
1082     uint16_t clear, uint16_t set)
1083 {
1084 	int error;
1085 	uint16_t v;
1086 
1087 	if (pos == PCI_EXP_LNKCTL || pos == PCI_EXP_RTCTL)
1088 		spin_lock(&dev->pcie_cap_lock);
1089 
1090 	error = pcie_capability_read_word(dev, pos, &v);
1091 	if (error == 0) {
1092 		v &= ~clear;
1093 		v |= set;
1094 		error = pcie_capability_write_word(dev, pos, v);
1095 	}
1096 
1097 	if (pos == PCI_EXP_LNKCTL || pos == PCI_EXP_RTCTL)
1098 		spin_unlock(&dev->pcie_cap_lock);
1099 
1100 	return (error);
1101 }
1102 
1103 static inline int
pcie_capability_set_word(struct pci_dev * dev,int pos,uint16_t val)1104 pcie_capability_set_word(struct pci_dev *dev, int pos, uint16_t val)
1105 {
1106 	return (pcie_capability_clear_and_set_word(dev, pos, 0, val));
1107 }
1108 
1109 static inline int
pcie_capability_clear_word(struct pci_dev * dev,int pos,uint16_t val)1110 pcie_capability_clear_word(struct pci_dev *dev, int pos, uint16_t val)
1111 {
1112 	return (pcie_capability_clear_and_set_word(dev, pos, val, 0));
1113 }
1114 
pcie_get_minimum_link(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)1115 static inline int pcie_get_minimum_link(struct pci_dev *dev,
1116     enum pci_bus_speed *speed, enum pcie_link_width *width)
1117 {
1118 	*speed = PCI_SPEED_UNKNOWN;
1119 	*width = PCIE_LNK_WIDTH_UNKNOWN;
1120 	return (0);
1121 }
1122 
1123 static inline int
pci_num_vf(struct pci_dev * dev)1124 pci_num_vf(struct pci_dev *dev)
1125 {
1126 	return (0);
1127 }
1128 
1129 static inline enum pci_bus_speed
pcie_get_speed_cap(struct pci_dev * dev)1130 pcie_get_speed_cap(struct pci_dev *dev)
1131 {
1132 	struct pci_dev *pbus;
1133 	device_t root;
1134 	uint32_t lnkcap, lnkcap2;
1135 	int error, pos;
1136 
1137 	/*
1138 	 * We should always be called on a PCI device.
1139 	 * The only current consumer I could find was amdgpu which either
1140 	 * calls us directly on a pdev(drmn?) or with the result of
1141 	 * pci_upstream_bridge().
1142 	 *
1143 	 * Treat "drmn" as special again as it is not a PCI device.
1144 	 */
1145 	if (dev->pdrv != NULL && dev->pdrv->isdrm) {
1146 		pbus = pci_upstream_bridge(dev);
1147 		if (pbus == NULL)
1148 			return (PCI_SPEED_UNKNOWN);
1149 	} else
1150 		pbus = dev;
1151 
1152 	/* "root" may be misleading as it may not be that. */
1153 	root = pbus->dev.bsddev;
1154 
1155 	if (pci_get_vendor(root) == PCI_VENDOR_ID_VIA ||
1156 	    pci_get_vendor(root) == PCI_VENDOR_ID_SERVERWORKS)
1157 		return (PCI_SPEED_UNKNOWN);
1158 
1159 	if ((error = pci_find_cap(root, PCIY_EXPRESS, &pos)) != 0)
1160 		return (PCI_SPEED_UNKNOWN);
1161 
1162 	lnkcap2 = pci_read_config(root, pos + PCIER_LINK_CAP2, 4);
1163 
1164 	if (lnkcap2) {	/* PCIe r3.0-compliant */
1165 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
1166 			return (PCIE_SPEED_2_5GT);
1167 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
1168 			return (PCIE_SPEED_5_0GT);
1169 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
1170 			return (PCIE_SPEED_8_0GT);
1171 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
1172 			return (PCIE_SPEED_16_0GT);
1173 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB)
1174 			return (PCIE_SPEED_32_0GT);
1175 		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_64_0GB)
1176 			return (PCIE_SPEED_64_0GT);
1177 	} else {	/* pre-r3.0 */
1178 		lnkcap = pci_read_config(root, pos + PCIER_LINK_CAP, 4);
1179 		if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
1180 			return (PCIE_SPEED_2_5GT);
1181 		if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
1182 			return (PCIE_SPEED_5_0GT);
1183 		if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
1184 			return (PCIE_SPEED_8_0GT);
1185 		if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
1186 			return (PCIE_SPEED_16_0GT);
1187 		if (lnkcap & PCI_EXP_LNKCAP_SLS_32_0GB)
1188 			return (PCIE_SPEED_32_0GT);
1189 		if (lnkcap & PCI_EXP_LNKCAP_SLS_64_0GB)
1190 			return (PCIE_SPEED_64_0GT);
1191 	}
1192 	return (PCI_SPEED_UNKNOWN);
1193 }
1194 
1195 static inline enum pcie_link_width
pcie_get_width_cap(struct pci_dev * dev)1196 pcie_get_width_cap(struct pci_dev *dev)
1197 {
1198 	uint32_t lnkcap;
1199 
1200 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
1201 	if (lnkcap)
1202 		return ((lnkcap & PCI_EXP_LNKCAP_MLW) >> 4);
1203 
1204 	return (PCIE_LNK_WIDTH_UNKNOWN);
1205 }
1206 
1207 static inline int
pcie_get_mps(struct pci_dev * dev)1208 pcie_get_mps(struct pci_dev *dev)
1209 {
1210 	return (pci_get_max_payload(dev->dev.bsddev));
1211 }
1212 
1213 static inline uint32_t
PCIE_SPEED2MBS_ENC(enum pci_bus_speed spd)1214 PCIE_SPEED2MBS_ENC(enum pci_bus_speed spd)
1215 {
1216 
1217 	switch(spd) {
1218 	case PCIE_SPEED_64_0GT:
1219 		return (64000 * 128 / 130);
1220 	case PCIE_SPEED_32_0GT:
1221 		return (32000 * 128 / 130);
1222 	case PCIE_SPEED_16_0GT:
1223 		return (16000 * 128 / 130);
1224 	case PCIE_SPEED_8_0GT:
1225 		return (8000 * 128 / 130);
1226 	case PCIE_SPEED_5_0GT:
1227 		return (5000 * 8 / 10);
1228 	case PCIE_SPEED_2_5GT:
1229 		return (2500 * 8 / 10);
1230 	default:
1231 		return (0);
1232 	}
1233 }
1234 
1235 static inline uint32_t
pcie_bandwidth_available(struct pci_dev * pdev,struct pci_dev ** limiting,enum pci_bus_speed * speed,enum pcie_link_width * width)1236 pcie_bandwidth_available(struct pci_dev *pdev,
1237     struct pci_dev **limiting,
1238     enum pci_bus_speed *speed,
1239     enum pcie_link_width *width)
1240 {
1241 	enum pci_bus_speed nspeed = pcie_get_speed_cap(pdev);
1242 	enum pcie_link_width nwidth = pcie_get_width_cap(pdev);
1243 
1244 	if (speed)
1245 		*speed = nspeed;
1246 	if (width)
1247 		*width = nwidth;
1248 
1249 	return (nwidth * PCIE_SPEED2MBS_ENC(nspeed));
1250 }
1251 
1252 static inline bool
pcie_aspm_enabled(struct pci_dev * pdev)1253 pcie_aspm_enabled(struct pci_dev *pdev)
1254 {
1255 	return (false);
1256 }
1257 
1258 static inline struct pci_dev *
pcie_find_root_port(struct pci_dev * pdev)1259 pcie_find_root_port(struct pci_dev *pdev)
1260 {
1261 	device_t root;
1262 
1263 	if (pdev->root != NULL)
1264 		return (pdev->root);
1265 
1266 	root = pci_find_pcie_root_port(pdev->dev.bsddev);
1267 	if (root == NULL)
1268 		return (NULL);
1269 
1270 	pdev->root = lkpinew_pci_dev(root);
1271 	return (pdev->root);
1272 }
1273 
1274 /* This is needed when people rip out the device "HotPlug". */
1275 static inline void
pci_lock_rescan_remove(void)1276 pci_lock_rescan_remove(void)
1277 {
1278 }
1279 
1280 static inline void
pci_unlock_rescan_remove(void)1281 pci_unlock_rescan_remove(void)
1282 {
1283 }
1284 
1285 static __inline void
pci_stop_and_remove_bus_device(struct pci_dev * pdev)1286 pci_stop_and_remove_bus_device(struct pci_dev *pdev)
1287 {
1288 }
1289 
1290 static inline int
pci_rescan_bus(struct pci_bus * pbus)1291 pci_rescan_bus(struct pci_bus *pbus)
1292 {
1293 	device_t *devlist, parent;
1294 	int devcount, error;
1295 
1296 	if (!device_is_attached(pbus->self->dev.bsddev))
1297 		return (0);
1298 	/* pci_rescan_method() will work on the pcib (parent). */
1299 	error = BUS_RESCAN(pbus->self->dev.bsddev);
1300 	if (error != 0)
1301 		return (0);
1302 
1303 	parent = device_get_parent(pbus->self->dev.bsddev);
1304 	error = device_get_children(parent, &devlist, &devcount);
1305 	if (error != 0)
1306 		return (0);
1307 	if (devcount != 0)
1308 		free(devlist, M_TEMP);
1309 
1310 	return (devcount);
1311 }
1312 
1313 /*
1314  * The following functions can be used to attach/detach the LinuxKPI's
1315  * PCI device runtime. The pci_driver and pci_device_id pointer is
1316  * allowed to be NULL. Other pointers must be all valid.
1317  * The pci_dev structure should be zero-initialized before passed
1318  * to the linux_pci_attach_device function.
1319  */
1320 extern int linux_pci_attach_device(device_t, struct pci_driver *,
1321     const struct pci_device_id *, struct pci_dev *);
1322 extern int linux_pci_detach_device(struct pci_dev *);
1323 
1324 static inline int
pci_dev_present(const struct pci_device_id * cur)1325 pci_dev_present(const struct pci_device_id *cur)
1326 {
1327 	while (cur != NULL && (cur->vendor || cur->device)) {
1328 		if (pci_find_device(cur->vendor, cur->device) != NULL) {
1329 			return (1);
1330 		}
1331 		cur++;
1332 	}
1333 	return (0);
1334 }
1335 
1336 static inline const struct pci_device_id *
pci_match_id(const struct pci_device_id * ids,struct pci_dev * pdev)1337 pci_match_id(const struct pci_device_id *ids, struct pci_dev *pdev)
1338 {
1339 	if (ids == NULL)
1340 		return (NULL);
1341 
1342 	for (;
1343 	     ids->vendor != 0 || ids->subvendor != 0 || ids->class_mask != 0;
1344 	     ids++)
1345 		if ((ids->vendor == PCI_ANY_ID ||
1346 		     ids->vendor == pdev->vendor) &&
1347 		    (ids->device == PCI_ANY_ID ||
1348 		     ids->device == pdev->device) &&
1349 		    (ids->subvendor == PCI_ANY_ID ||
1350 		     ids->subvendor == pdev->subsystem_vendor) &&
1351 		    (ids->subdevice == PCI_ANY_ID ||
1352 		     ids->subdevice == pdev->subsystem_device) &&
1353 		    ((ids->class ^ pdev->class) & ids->class_mask) == 0)
1354 			return (ids);
1355 
1356 	return (NULL);
1357 }
1358 
1359 struct pci_dev *lkpi_pci_get_domain_bus_and_slot(int domain,
1360     unsigned int bus, unsigned int devfn);
1361 #define	pci_get_domain_bus_and_slot(domain, bus, devfn)	\
1362 	lkpi_pci_get_domain_bus_and_slot(domain, bus, devfn)
1363 
1364 struct pci_dev *lkpi_pci_get_slot(struct pci_bus *, unsigned int);
1365 #ifndef	WANT_NATIVE_PCI_GET_SLOT
1366 #define	pci_get_slot(_pbus, _devfn)				\
1367     lkpi_pci_get_slot(_pbus, _devfn)
1368 #endif
1369 
1370 static inline int
pci_domain_nr(struct pci_bus * pbus)1371 pci_domain_nr(struct pci_bus *pbus)
1372 {
1373 
1374 	return (pbus->domain);
1375 }
1376 
1377 static inline int
pci_bus_read_config(struct pci_bus * bus,unsigned int devfn,int pos,uint32_t * val,int len)1378 pci_bus_read_config(struct pci_bus *bus, unsigned int devfn,
1379                     int pos, uint32_t *val, int len)
1380 {
1381 
1382 	*val = pci_read_config(bus->self->dev.bsddev, pos, len);
1383 	return (0);
1384 }
1385 
1386 static inline int
pci_bus_read_config_word(struct pci_bus * bus,unsigned int devfn,int pos,u16 * val)1387 pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn, int pos, u16 *val)
1388 {
1389 	uint32_t tmp;
1390 	int ret;
1391 
1392 	ret = pci_bus_read_config(bus, devfn, pos, &tmp, 2);
1393 	*val = (u16)tmp;
1394 	return (ret);
1395 }
1396 
1397 static inline int
pci_bus_read_config_byte(struct pci_bus * bus,unsigned int devfn,int pos,u8 * val)1398 pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn, int pos, u8 *val)
1399 {
1400 	uint32_t tmp;
1401 	int ret;
1402 
1403 	ret = pci_bus_read_config(bus, devfn, pos, &tmp, 1);
1404 	*val = (u8)tmp;
1405 	return (ret);
1406 }
1407 
1408 static inline int
pci_bus_write_config(struct pci_bus * bus,unsigned int devfn,int pos,uint32_t val,int size)1409 pci_bus_write_config(struct pci_bus *bus, unsigned int devfn, int pos,
1410     uint32_t val, int size)
1411 {
1412 
1413 	pci_write_config(bus->self->dev.bsddev, pos, val, size);
1414 	return (0);
1415 }
1416 
1417 static inline int
pci_bus_write_config_byte(struct pci_bus * bus,unsigned int devfn,int pos,uint8_t val)1418 pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn, int pos,
1419     uint8_t val)
1420 {
1421 	return (pci_bus_write_config(bus, devfn, pos, val, 1));
1422 }
1423 
1424 static inline int
pci_bus_write_config_word(struct pci_bus * bus,unsigned int devfn,int pos,uint16_t val)1425 pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, int pos,
1426     uint16_t val)
1427 {
1428 	return (pci_bus_write_config(bus, devfn, pos, val, 2));
1429 }
1430 
1431 struct pci_dev *lkpi_pci_get_class(unsigned int class, struct pci_dev *from);
1432 #define	pci_get_class(class, from)	lkpi_pci_get_class(class, from)
1433 struct pci_dev *lkpi_pci_get_base_class(unsigned int class,
1434     struct pci_dev *from);
1435 #define	pci_get_base_class(class, from)	lkpi_pci_get_base_class(class, from)
1436 
1437 /* -------------------------------------------------------------------------- */
1438 
1439 #define	pcim_enable_device(pdev)					\
1440     linuxkpi_pcim_enable_device(pdev)
1441 #define	pcim_iomap_table(pdev)						\
1442     linuxkpi_pcim_iomap_table(pdev)
1443 #define	pcim_iomap_regions(pdev, mask, name)				\
1444     linuxkpi_pcim_iomap_regions(pdev,  mask, name)
1445 
1446 static inline int
pcim_iomap_regions_request_all(struct pci_dev * pdev,uint32_t mask,char * name)1447 pcim_iomap_regions_request_all(struct pci_dev *pdev, uint32_t mask, char *name)
1448 {
1449 	uint32_t requests, req_mask;
1450 	int bar, error;
1451 
1452 	/* Request all the BARs ("regions") we do not iomap. */
1453 	req_mask = ((1 << (PCIR_MAX_BAR_0 + 1)) - 1) & ~mask;
1454 	for (bar = requests = 0; requests != req_mask; bar++) {
1455 		if ((req_mask & (1 << bar)) == 0)
1456 			continue;
1457 		error = pci_request_region(pdev, bar, name);
1458 		if (error != 0 && error != -ENODEV)
1459 			goto err;
1460 		requests |= (1 << bar);
1461 	}
1462 
1463 	error = pcim_iomap_regions(pdev, mask, name);
1464 	if (error != 0)
1465 		goto err;
1466 
1467 	return (0);
1468 
1469 err:
1470 	for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
1471 		if ((requests & (1 << bar)) != 0)
1472 			pci_release_region(pdev, bar);
1473 	}
1474 
1475 	return (-EINVAL);
1476 }
1477 
1478 /*
1479  * We cannot simply re-define pci_get_device() as we would normally do
1480  * and then hide it in linux_pci.c as too many semi-native drivers still
1481  * include linux/pci.h and run into the conflict with native PCI. Linux drivers
1482  * using pci_get_device() need to be changed to call linuxkpi_pci_get_device().
1483  */
1484 static inline struct pci_dev *
linuxkpi_pci_get_device(uint32_t vendor,uint32_t device,struct pci_dev * odev)1485 linuxkpi_pci_get_device(uint32_t vendor, uint32_t device, struct pci_dev *odev)
1486 {
1487 
1488 	return (lkpi_pci_get_device(vendor, device, odev));
1489 }
1490 
1491 #define	for_each_pci_dev(_pdev)						\
1492     while ((_pdev = linuxkpi_pci_get_device(PCI_ANY_ID, PCI_ANY_ID, _pdev)) != NULL)
1493 
1494 /* This is a FreeBSD extension so we can use bus_*(). */
1495 static inline void
linuxkpi_pcim_want_to_use_bus_functions(struct pci_dev * pdev)1496 linuxkpi_pcim_want_to_use_bus_functions(struct pci_dev *pdev)
1497 {
1498 	pdev->want_iomap_res = true;
1499 }
1500 
1501 static inline bool
pci_is_thunderbolt_attached(struct pci_dev * pdev)1502 pci_is_thunderbolt_attached(struct pci_dev *pdev)
1503 {
1504 
1505 	return (false);
1506 }
1507 
1508 static inline void *
pci_platform_rom(struct pci_dev * pdev,size_t * size)1509 pci_platform_rom(struct pci_dev *pdev, size_t *size)
1510 {
1511 
1512 	return (NULL);
1513 }
1514 
1515 static inline void
pci_ignore_hotplug(struct pci_dev * pdev)1516 pci_ignore_hotplug(struct pci_dev *pdev)
1517 {
1518 }
1519 
1520 static inline const char *
pci_power_name(pci_power_t state)1521 pci_power_name(pci_power_t state)
1522 {
1523 	int pstate = state + 1;
1524 
1525 	if (pstate >= 0 && pstate < nitems(pci_power_names))
1526 		return (pci_power_names[pstate]);
1527 	else
1528 		return (pci_power_names[0]);
1529 }
1530 
1531 static inline int
pcie_get_readrq(struct pci_dev * dev)1532 pcie_get_readrq(struct pci_dev *dev)
1533 {
1534 	u16 ctl;
1535 
1536 	if (pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl))
1537 		return (-EINVAL);
1538 
1539 	return (128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12));
1540 }
1541 
1542 static inline bool
pci_is_enabled(struct pci_dev * pdev)1543 pci_is_enabled(struct pci_dev *pdev)
1544 {
1545 
1546 	return ((pci_read_config(pdev->dev.bsddev, PCIR_COMMAND, 2) &
1547 	    PCIM_CMD_BUSMASTEREN) != 0);
1548 }
1549 
1550 static inline int
pci_wait_for_pending_transaction(struct pci_dev * pdev)1551 pci_wait_for_pending_transaction(struct pci_dev *pdev)
1552 {
1553 
1554 	return (0);
1555 }
1556 
1557 static inline int
pci_assign_resource(struct pci_dev * pdev,int bar)1558 pci_assign_resource(struct pci_dev *pdev, int bar)
1559 {
1560 
1561 	return (0);
1562 }
1563 
1564 static inline int
pci_irq_vector(struct pci_dev * pdev,unsigned int vector)1565 pci_irq_vector(struct pci_dev *pdev, unsigned int vector)
1566 {
1567 
1568 	if (!pdev->msix_enabled && !pdev->msi_enabled) {
1569 		if (vector != 0)
1570 			return (-EINVAL);
1571 		return (pdev->irq);
1572 	}
1573 
1574 	if (pdev->msix_enabled || pdev->msi_enabled) {
1575 		if ((pdev->dev.irq_start + vector) >= pdev->dev.irq_end)
1576 			return (-EINVAL);
1577 		return (pdev->dev.irq_start + vector);
1578 	}
1579 
1580         return (-ENXIO);
1581 }
1582 
1583 static inline int
pci_wake_from_d3(struct pci_dev * pdev,bool enable)1584 pci_wake_from_d3(struct pci_dev *pdev, bool enable)
1585 {
1586 
1587 	pr_debug("%s: TODO\n", __func__);
1588 	return (0);
1589 }
1590 
1591 #endif	/* _LINUXKPI_LINUX_PCI_H_ */
1592