1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 * Copyright (c) 2020-2025 The FreeBSD Foundation
8 *
9 * Portions of this software were developed by Björn Zeeb
10 * under sponsorship from the FreeBSD Foundation.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice unmodified, this list of conditions, and the following
17 * disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33 #ifndef _LINUXKPI_LINUX_PCI_H_
34 #define _LINUXKPI_LINUX_PCI_H_
35
36 #define CONFIG_PCI_MSI
37
38 #include <linux/types.h>
39 #include <linux/device/driver.h>
40
41 #include <sys/param.h>
42 #include <sys/bus.h>
43 #include <sys/module.h>
44 #include <sys/nv.h>
45 #include <sys/pciio.h>
46 #include <dev/pci/pcivar.h>
47 #include <dev/pci/pcireg.h>
48 #include <dev/pci/pci_private.h>
49
50 #include <machine/resource.h>
51
52 #include <linux/list.h>
53 #include <linux/dmapool.h>
54 #include <linux/dma-mapping.h>
55 #include <linux/compiler.h>
56 #include <linux/errno.h>
57 #include <asm/atomic.h>
58 #include <asm/memtype.h>
59 #include <linux/device.h>
60 #include <linux/pci_ids.h>
61 #include <linux/pm.h>
62
63 #include <linux/kernel.h> /* pr_debug */
64
65 struct pci_device_id {
66 uint32_t vendor;
67 uint32_t device;
68 uint32_t subvendor;
69 uint32_t subdevice;
70 uint32_t class;
71 uint32_t class_mask;
72 uintptr_t driver_data;
73 };
74
75 #define MODULE_DEVICE_TABLE_BUS_pci(_bus, _table) \
76 MODULE_PNP_INFO("U32:vendor;U32:device;V32:subvendor;V32:subdevice", \
77 _bus, lkpi_ ## _table, _table, nitems(_table) - 1)
78
79 /* Linux has an empty element at the end of the ID table -> nitems() - 1. */
80 #define MODULE_DEVICE_TABLE(_bus, _table) \
81 \
82 static device_method_t _ ## _bus ## _ ## _table ## _methods[] = { \
83 DEVMETHOD_END \
84 }; \
85 \
86 static driver_t _ ## _bus ## _ ## _table ## _driver = { \
87 "lkpi_" #_bus #_table, \
88 _ ## _bus ## _ ## _table ## _methods, \
89 0 \
90 }; \
91 \
92 DRIVER_MODULE(lkpi_ ## _table, _bus, _ ## _bus ## _ ## _table ## _driver,\
93 0, 0); \
94 \
95 MODULE_DEVICE_TABLE_BUS_ ## _bus(_bus, _table)
96
97 #define PCI_ANY_ID -1U
98
99 #define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
100 #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
101 #define PCI_FUNC(devfn) ((devfn) & 0x07)
102 #define PCI_BUS_NUM(devfn) (((devfn) >> 8) & 0xff)
103 #define PCI_DEVID(bus, devfn) ((((uint16_t)(bus)) << 8) | (devfn))
104
105 #define PCI_VDEVICE(_vendor, _device) \
106 .vendor = PCI_VENDOR_ID_##_vendor, .device = (_device), \
107 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
108 #define PCI_DEVICE(_vendor, _device) \
109 .vendor = (_vendor), .device = (_device), \
110 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
111
112 #define to_pci_dev(n) container_of(n, struct pci_dev, dev)
113
114 #define PCI_STD_NUM_BARS 6
115 #define PCI_BASE_ADDRESS_0 PCIR_BARS
116 #define PCI_BASE_ADDRESS_MEM_TYPE_64 PCIM_BAR_MEM_64
117 #define PCI_VENDOR_ID PCIR_VENDOR
118 #define PCI_DEVICE_ID PCIR_DEVICE
119 #define PCI_COMMAND PCIR_COMMAND
120 #define PCI_COMMAND_INTX_DISABLE PCIM_CMD_INTxDIS
121 #define PCI_COMMAND_MEMORY PCIM_CMD_MEMEN
122 #define PCI_PRIMARY_BUS PCIR_PRIBUS_1
123 #define PCI_SECONDARY_BUS PCIR_SECBUS_1
124 #define PCI_SUBORDINATE_BUS PCIR_SUBBUS_1
125 #define PCI_SEC_LATENCY_TIMER PCIR_SECLAT_1
126 #define PCI_EXP_DEVCTL PCIER_DEVICE_CTL /* Device Control */
127 #define PCI_EXP_LNKCTL PCIER_LINK_CTL /* Link Control */
128 #define PCI_EXP_LNKCTL_ASPM_L0S PCIEM_LINK_CTL_ASPMC_L0S
129 #define PCI_EXP_LNKCTL_ASPM_L1 PCIEM_LINK_CTL_ASPMC_L1
130 #define PCI_EXP_LNKCTL_ASPMC PCIEM_LINK_CTL_ASPMC
131 #define PCI_EXP_LNKCTL_CLKREQ_EN PCIEM_LINK_CTL_ECPM /* Enable clock PM */
132 #define PCI_EXP_LNKCTL_HAWD PCIEM_LINK_CTL_HAWD
133 #define PCI_EXP_FLAGS_TYPE PCIEM_FLAGS_TYPE /* Device/Port type */
134 #define PCI_EXP_DEVCAP PCIER_DEVICE_CAP /* Device capabilities */
135 #define PCI_EXP_DEVSTA PCIER_DEVICE_STA /* Device Status */
136 #define PCI_EXP_LNKCAP PCIER_LINK_CAP /* Link Capabilities */
137 #define PCI_EXP_LNKSTA PCIER_LINK_STA /* Link Status */
138 #define PCI_EXP_SLTCAP PCIER_SLOT_CAP /* Slot Capabilities */
139 #define PCI_EXP_SLTCTL PCIER_SLOT_CTL /* Slot Control */
140 #define PCI_EXP_SLTSTA PCIER_SLOT_STA /* Slot Status */
141 #define PCI_EXP_RTCTL PCIER_ROOT_CTL /* Root Control */
142 #define PCI_EXP_RTCAP PCIER_ROOT_CAP /* Root Capabilities */
143 #define PCI_EXP_RTSTA PCIER_ROOT_STA /* Root Status */
144 #define PCI_EXP_DEVCAP2 PCIER_DEVICE_CAP2 /* Device Capabilities 2 */
145 #define PCI_EXP_DEVCTL2 PCIER_DEVICE_CTL2 /* Device Control 2 */
146 #define PCI_EXP_DEVCTL2_LTR_EN PCIEM_CTL2_LTR_ENABLE
147 #define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS PCIEM_CTL2_COMP_TIMO_DISABLE
148 #define PCI_EXP_LNKCAP2 PCIER_LINK_CAP2 /* Link Capabilities 2 */
149 #define PCI_EXP_LNKCTL2 PCIER_LINK_CTL2 /* Link Control 2 */
150 #define PCI_EXP_LNKSTA2 PCIER_LINK_STA2 /* Link Status 2 */
151 #define PCI_EXP_FLAGS PCIER_FLAGS /* Capabilities register */
152 #define PCI_EXP_FLAGS_VERS PCIEM_FLAGS_VERSION /* Capability version */
153 #define PCI_EXP_TYPE_ROOT_PORT PCIEM_TYPE_ROOT_PORT /* Root Port */
154 #define PCI_EXP_TYPE_ENDPOINT PCIEM_TYPE_ENDPOINT /* Express Endpoint */
155 #define PCI_EXP_TYPE_LEG_END PCIEM_TYPE_LEGACY_ENDPOINT /* Legacy Endpoint */
156 #define PCI_EXP_TYPE_DOWNSTREAM PCIEM_TYPE_DOWNSTREAM_PORT /* Downstream Port */
157 #define PCI_EXP_FLAGS_SLOT PCIEM_FLAGS_SLOT /* Slot implemented */
158 #define PCI_EXP_TYPE_RC_EC PCIEM_TYPE_ROOT_EC /* Root Complex Event Collector */
159 #define PCI_EXP_LNKSTA_CLS PCIEM_LINK_STA_SPEED
160 #define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 /* Current Link Speed 8.0GT/s */
161 #define PCI_EXP_LNKCAP_SLS_2_5GB 0x01 /* Supported Link Speed 2.5GT/s */
162 #define PCI_EXP_LNKCAP_SLS_5_0GB 0x02 /* Supported Link Speed 5.0GT/s */
163 #define PCI_EXP_LNKCAP_SLS_8_0GB 0x03 /* Supported Link Speed 8.0GT/s */
164 #define PCI_EXP_LNKCAP_SLS_16_0GB 0x04 /* Supported Link Speed 16.0GT/s */
165 #define PCI_EXP_LNKCAP_SLS_32_0GB 0x05 /* Supported Link Speed 32.0GT/s */
166 #define PCI_EXP_LNKCAP_SLS_64_0GB 0x06 /* Supported Link Speed 64.0GT/s */
167 #define PCI_EXP_LNKCAP_MLW 0x03f0 /* Maximum Link Width */
168 #define PCI_EXP_LNKCAP2_SLS_2_5GB 0x02 /* Supported Link Speed 2.5GT/s */
169 #define PCI_EXP_LNKCAP2_SLS_5_0GB 0x04 /* Supported Link Speed 5.0GT/s */
170 #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x08 /* Supported Link Speed 8.0GT/s */
171 #define PCI_EXP_LNKCAP2_SLS_16_0GB 0x10 /* Supported Link Speed 16.0GT/s */
172 #define PCI_EXP_LNKCAP2_SLS_32_0GB 0x20 /* Supported Link Speed 32.0GT/s */
173 #define PCI_EXP_LNKCAP2_SLS_64_0GB 0x40 /* Supported Link Speed 64.0GT/s */
174 #define PCI_EXP_LNKCTL2_TLS 0x000f
175 #define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
176 #define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
177 #define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
178 #define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
179 #define PCI_EXP_LNKCTL2_TLS_32_0GT 0x0005 /* Supported Speed 32GT/s */
180 #define PCI_EXP_LNKCTL2_TLS_64_0GT 0x0006 /* Supported Speed 64GT/s */
181 #define PCI_EXP_LNKCTL2_ENTER_COMP 0x0010 /* Enter Compliance */
182 #define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */
183
184 #define PCI_MSI_ADDRESS_LO PCIR_MSI_ADDR
185 #define PCI_MSI_ADDRESS_HI PCIR_MSI_ADDR_HIGH
186 #define PCI_MSI_FLAGS PCIR_MSI_CTRL
187 #define PCI_MSI_FLAGS_ENABLE PCIM_MSICTRL_MSI_ENABLE
188 #define PCI_MSIX_FLAGS PCIR_MSIX_CTRL
189 #define PCI_MSIX_FLAGS_ENABLE PCIM_MSIXCTRL_MSIX_ENABLE
190
191 #define PCI_EXP_LNKCAP_CLKPM 0x00040000
192 #define PCI_EXP_DEVSTA_TRPND 0x0020
193
194 #define IORESOURCE_MEM (1 << SYS_RES_MEMORY)
195 #define IORESOURCE_IO (1 << SYS_RES_IOPORT)
196 #define IORESOURCE_IRQ (1 << SYS_RES_IRQ)
197
198 enum pci_bus_speed {
199 PCI_SPEED_UNKNOWN = -1,
200 PCIE_SPEED_2_5GT,
201 PCIE_SPEED_5_0GT,
202 PCIE_SPEED_8_0GT,
203 PCIE_SPEED_16_0GT,
204 PCIE_SPEED_32_0GT,
205 PCIE_SPEED_64_0GT,
206 };
207
208 enum pcie_link_width {
209 PCIE_LNK_WIDTH_RESRV = 0x00,
210 PCIE_LNK_X1 = 0x01,
211 PCIE_LNK_X2 = 0x02,
212 PCIE_LNK_X4 = 0x04,
213 PCIE_LNK_X8 = 0x08,
214 PCIE_LNK_X12 = 0x0c,
215 PCIE_LNK_X16 = 0x10,
216 PCIE_LNK_X32 = 0x20,
217 PCIE_LNK_WIDTH_UNKNOWN = 0xff,
218 };
219
220 #define PCIE_LINK_STATE_L0S 0x00000001
221 #define PCIE_LINK_STATE_L1 0x00000002
222 #define PCIE_LINK_STATE_CLKPM 0x00000004
223
224 typedef int pci_power_t;
225
226 #define PCI_D0 PCI_POWERSTATE_D0
227 #define PCI_D1 PCI_POWERSTATE_D1
228 #define PCI_D2 PCI_POWERSTATE_D2
229 #define PCI_D3hot PCI_POWERSTATE_D3_HOT
230 #define PCI_D3cold PCI_POWERSTATE_D3_COLD
231
232 #define PCI_POWER_ERROR PCI_POWERSTATE_UNKNOWN
233
234 extern const char *pci_power_names[6];
235
236 #define PCI_ERR_UNCOR_STATUS PCIR_AER_UC_STATUS
237 #define PCI_ERR_COR_STATUS PCIR_AER_COR_STATUS
238 #define PCI_ERR_ROOT_COMMAND PCIR_AER_ROOTERR_CMD
239 #define PCI_ERR_ROOT_ERR_SRC PCIR_AER_COR_SOURCE_ID
240
241 #define PCI_EXT_CAP_ID_ERR PCIZ_AER
242 #define PCI_EXT_CAP_ID_L1SS PCIZ_L1PM
243
244 #define PCI_L1SS_CTL1 0x8
245 #define PCI_L1SS_CTL1_L1SS_MASK 0xf
246
247 #define PCI_IRQ_INTX 0x01
248 #define PCI_IRQ_MSI 0x02
249 #define PCI_IRQ_MSIX 0x04
250 #define PCI_IRQ_ALL_TYPES (PCI_IRQ_MSIX|PCI_IRQ_MSI|PCI_IRQ_INTX)
251
252 #if defined(LINUXKPI_VERSION) && (LINUXKPI_VERSION <= 61000)
253 #define PCI_IRQ_LEGACY PCI_IRQ_INTX
254 #endif
255
256 /*
257 * Linux PCI code uses `PCI_SET_ERROR_RESPONSE()` to indicate to the caller of
258 * a `pci_read_*()` function that the read failed. An example of failure is
259 * whether the device was disconnected. It is a bit weird because Linux
260 * `pci_read_*()` can return an error value, as the read value is stored in a
261 * integer passed by pointer.
262 *
263 * We don't set PCI_ERROR_RESPONSE anywhere as of this commit, but the DRM
264 * drivers started to use `PCI_POSSIBLE_ERROR()`.
265 */
266 #define PCI_ERROR_RESPONSE (~0ULL)
267 #define PCI_SET_ERROR_RESPONSE(val) (*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
268 #define PCI_POSSIBLE_ERROR(val) ((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
269
270 struct pci_dev;
271
272 struct pci_driver {
273 struct list_head node;
274 char *name;
275 const struct pci_device_id *id_table;
276 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id);
277 void (*remove)(struct pci_dev *dev);
278 int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */
279 int (*resume) (struct pci_dev *dev); /* Device woken up */
280 void (*shutdown) (struct pci_dev *dev); /* Device shutdown */
281 driver_t bsddriver;
282 devclass_t bsdclass;
283 struct device_driver driver;
284 const struct pci_error_handlers *err_handler;
285 bool isdrm;
286 int bsd_probe_return;
287 int (*bsd_iov_init)(device_t dev, uint16_t num_vfs,
288 const nvlist_t *pf_config);
289 void (*bsd_iov_uninit)(device_t dev);
290 int (*bsd_iov_add_vf)(device_t dev, uint16_t vfnum,
291 const nvlist_t *vf_config);
292 };
293
294 struct pci_bus {
295 struct pci_dev *self;
296 /* struct pci_bus *parent */
297 int domain;
298 int number;
299 };
300
301 extern struct list_head pci_drivers;
302 extern struct list_head pci_devices;
303 extern spinlock_t pci_lock;
304
305 #define __devexit_p(x) x
306
307 #define module_pci_driver(_drv) \
308 module_driver(_drv, linux_pci_register_driver, linux_pci_unregister_driver)
309
310 struct msi_msg {
311 uint32_t data;
312 };
313
314 struct pci_msi_desc {
315 struct {
316 bool is_64;
317 } msi_attrib;
318 };
319
320 struct msi_desc {
321 struct msi_msg msg;
322 struct pci_msi_desc pci;
323 };
324
325 struct msix_entry {
326 int entry;
327 int vector;
328 };
329
330 /*
331 * If we find drivers accessing this from multiple KPIs we may have to
332 * refcount objects of this structure.
333 */
334 struct resource;
335 struct pci_mmio_region {
336 TAILQ_ENTRY(pci_mmio_region) next;
337 struct resource *res;
338 int rid;
339 int type;
340 };
341
342 struct pci_dev {
343 struct device dev;
344 struct list_head links;
345 struct pci_driver *pdrv;
346 struct pci_bus *bus;
347 struct pci_dev *root;
348 pci_power_t current_state;
349 uint16_t device;
350 uint16_t vendor;
351 uint16_t subsystem_vendor;
352 uint16_t subsystem_device;
353 unsigned int irq;
354 unsigned int devfn;
355 uint32_t class;
356 uint8_t revision;
357 uint8_t msi_cap;
358 uint8_t msix_cap;
359 bool managed; /* devres "pcim_*(). */
360 bool want_iomap_res;
361 bool msi_enabled;
362 bool msix_enabled;
363 phys_addr_t rom;
364 size_t romlen;
365 struct msi_desc **msi_desc;
366 char *path_name;
367 spinlock_t pcie_cap_lock;
368
369 TAILQ_HEAD(, pci_mmio_region) mmio;
370 };
371
372 int pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv,
373 unsigned int flags);
374 bool pci_device_is_present(struct pci_dev *pdev);
375
376 int linuxkpi_pcim_enable_device(struct pci_dev *pdev);
377 void __iomem **linuxkpi_pcim_iomap_table(struct pci_dev *pdev);
378 void *linuxkpi_pci_iomap_range(struct pci_dev *, int,
379 unsigned long, unsigned long);
380 void *linuxkpi_pci_iomap(struct pci_dev *, int, unsigned long);
381 void *linuxkpi_pcim_iomap(struct pci_dev *, int, unsigned long);
382 void linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res);
383 int linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask,
384 const char *name);
385 int linuxkpi_pci_request_region(struct pci_dev *, int, const char *);
386 int linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name);
387 int linuxkpi_pcim_request_all_regions(struct pci_dev *, const char *);
388 void linuxkpi_pci_release_region(struct pci_dev *pdev, int bar);
389 void linuxkpi_pci_release_regions(struct pci_dev *pdev);
390 int linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries,
391 int nreq);
392
393 /* Internal helper function(s). */
394 struct pci_dev *lkpinew_pci_dev(device_t);
395 void lkpi_pci_devres_release(struct device *, void *);
396 struct pci_dev *lkpi_pci_get_device(uint32_t, uint32_t, struct pci_dev *);
397 struct msi_desc *lkpi_pci_msi_desc_alloc(int);
398 struct device *lkpi_pci_find_irq_dev(unsigned int irq);
399 int _lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec);
400
401 #define pci_err(pdev, fmt, ...) \
402 dev_err(&(pdev)->dev, fmt, ##__VA_ARGS__)
403 #define pci_info(pdev, fmt, ...) \
404 dev_info(&(pdev)->dev, fmt, ##__VA_ARGS__)
405
406 static inline bool
dev_is_pci(struct device * dev)407 dev_is_pci(struct device *dev)
408 {
409
410 return (device_get_devclass(dev->bsddev) == devclass_find("pci"));
411 }
412
413 static inline uint16_t
pci_dev_id(struct pci_dev * pdev)414 pci_dev_id(struct pci_dev *pdev)
415 {
416 return (PCI_DEVID(pdev->bus->number, pdev->devfn));
417 }
418
419 static inline int
pci_resource_type(struct pci_dev * pdev,int bar)420 pci_resource_type(struct pci_dev *pdev, int bar)
421 {
422 struct pci_map *pm;
423
424 pm = pci_find_bar(pdev->dev.bsddev, PCIR_BAR(bar));
425 if (!pm)
426 return (-1);
427
428 if (PCI_BAR_IO(pm->pm_value))
429 return (SYS_RES_IOPORT);
430 else
431 return (SYS_RES_MEMORY);
432 }
433
434 /*
435 * All drivers just seem to want to inspect the type not flags.
436 */
437 static inline int
pci_resource_flags(struct pci_dev * pdev,int bar)438 pci_resource_flags(struct pci_dev *pdev, int bar)
439 {
440 int type;
441
442 type = pci_resource_type(pdev, bar);
443 if (type < 0)
444 return (0);
445 return (1 << type);
446 }
447
448 static inline const char *
pci_name(struct pci_dev * d)449 pci_name(struct pci_dev *d)
450 {
451 return d->path_name;
452 }
453
454 static inline void *
pci_get_drvdata(struct pci_dev * pdev)455 pci_get_drvdata(struct pci_dev *pdev)
456 {
457
458 return dev_get_drvdata(&pdev->dev);
459 }
460
461 static inline void
pci_set_drvdata(struct pci_dev * pdev,void * data)462 pci_set_drvdata(struct pci_dev *pdev, void *data)
463 {
464
465 dev_set_drvdata(&pdev->dev, data);
466 }
467
468 static inline struct pci_dev *
pci_dev_get(struct pci_dev * pdev)469 pci_dev_get(struct pci_dev *pdev)
470 {
471
472 if (pdev != NULL)
473 get_device(&pdev->dev);
474 return (pdev);
475 }
476
477 static __inline void
pci_dev_put(struct pci_dev * pdev)478 pci_dev_put(struct pci_dev *pdev)
479 {
480
481 if (pdev != NULL)
482 put_device(&pdev->dev);
483 }
484
485 static inline int
pci_enable_device(struct pci_dev * pdev)486 pci_enable_device(struct pci_dev *pdev)
487 {
488
489 pci_enable_io(pdev->dev.bsddev, SYS_RES_IOPORT);
490 pci_enable_io(pdev->dev.bsddev, SYS_RES_MEMORY);
491 return (0);
492 }
493
494 static inline void
pci_disable_device(struct pci_dev * pdev)495 pci_disable_device(struct pci_dev *pdev)
496 {
497
498 pci_disable_busmaster(pdev->dev.bsddev);
499 }
500
501 static inline int
pci_set_master(struct pci_dev * pdev)502 pci_set_master(struct pci_dev *pdev)
503 {
504
505 pci_enable_busmaster(pdev->dev.bsddev);
506 return (0);
507 }
508
509 static inline int
pci_set_power_state(struct pci_dev * pdev,int state)510 pci_set_power_state(struct pci_dev *pdev, int state)
511 {
512
513 pci_set_powerstate(pdev->dev.bsddev, state);
514 return (0);
515 }
516
517 static inline int
pci_clear_master(struct pci_dev * pdev)518 pci_clear_master(struct pci_dev *pdev)
519 {
520
521 pci_disable_busmaster(pdev->dev.bsddev);
522 return (0);
523 }
524
525 static inline bool
pci_is_root_bus(struct pci_bus * pbus)526 pci_is_root_bus(struct pci_bus *pbus)
527 {
528
529 return (pbus->self == NULL);
530 }
531
532 static inline struct pci_dev *
pci_upstream_bridge(struct pci_dev * pdev)533 pci_upstream_bridge(struct pci_dev *pdev)
534 {
535
536 if (pci_is_root_bus(pdev->bus))
537 return (NULL);
538
539 /*
540 * If we do not have a (proper) "upstream bridge" set, e.g., we point
541 * to ourselves, try to handle this case on the fly like we do
542 * for pcie_find_root_port().
543 */
544 if (pdev == pdev->bus->self) {
545 device_t bridge;
546
547 /*
548 * In the case of DRM drivers, the passed device is a child of
549 * `vgapci`. We want to start the lookup from `vgapci`, so the
550 * parent of the passed `drmn`.
551 *
552 * We can use the `isdrm` flag to determine this.
553 */
554 bridge = pdev->dev.bsddev;
555 if (pdev->pdrv != NULL && pdev->pdrv->isdrm)
556 bridge = device_get_parent(bridge);
557 if (bridge == NULL)
558 goto done;
559
560 bridge = device_get_parent(bridge);
561 if (bridge == NULL)
562 goto done;
563 bridge = device_get_parent(bridge);
564 if (bridge == NULL)
565 goto done;
566 if (device_get_devclass(device_get_parent(bridge)) !=
567 devclass_find("pci"))
568 goto done;
569
570 /*
571 * "bridge" is a PCI-to-PCI bridge. Create a Linux pci_dev
572 * for it so it can be returned.
573 */
574 pdev->bus->self = lkpinew_pci_dev(bridge);
575 }
576 done:
577 return (pdev->bus->self);
578 }
579
580 #define pci_request_region(pdev, bar, res_name) \
581 linuxkpi_pci_request_region(pdev, bar, res_name)
582 #define pci_release_region(pdev, bar) \
583 linuxkpi_pci_release_region(pdev, bar)
584 #define pci_request_regions(pdev, res_name) \
585 linuxkpi_pci_request_regions(pdev, res_name)
586 #define pci_release_regions(pdev) \
587 linuxkpi_pci_release_regions(pdev)
588 #define pcim_request_all_regions(pdev, name) \
589 linuxkpi_pcim_request_all_regions(pdev, name)
590
591 static inline void
lkpi_pci_disable_msix(struct pci_dev * pdev)592 lkpi_pci_disable_msix(struct pci_dev *pdev)
593 {
594
595 pci_release_msi(pdev->dev.bsddev);
596
597 /*
598 * The MSIX IRQ numbers associated with this PCI device are no
599 * longer valid and might be re-assigned. Make sure
600 * lkpi_pci_find_irq_dev() does no longer see them by
601 * resetting their references to zero:
602 */
603 pdev->dev.irq_start = 0;
604 pdev->dev.irq_end = 0;
605 pdev->msix_enabled = false;
606 }
607 /* Only for consistency. No conflict on that one. */
608 #define pci_disable_msix(pdev) lkpi_pci_disable_msix(pdev)
609
610 static inline void
lkpi_pci_disable_msi(struct pci_dev * pdev)611 lkpi_pci_disable_msi(struct pci_dev *pdev)
612 {
613
614 pci_release_msi(pdev->dev.bsddev);
615
616 pdev->dev.irq_start = 0;
617 pdev->dev.irq_end = 0;
618 pdev->irq = pdev->dev.irq;
619 pdev->msi_enabled = false;
620 }
621 #define pci_disable_msi(pdev) lkpi_pci_disable_msi(pdev)
622 #define pci_free_irq_vectors(pdev) lkpi_pci_disable_msi(pdev)
623
624 unsigned long pci_resource_start(struct pci_dev *pdev, int bar);
625 unsigned long pci_resource_len(struct pci_dev *pdev, int bar);
626
627 static inline bus_addr_t
pci_bus_address(struct pci_dev * pdev,int bar)628 pci_bus_address(struct pci_dev *pdev, int bar)
629 {
630
631 return (pci_resource_start(pdev, bar));
632 }
633
634 #define PCI_CAP_ID_EXP PCIY_EXPRESS
635 #define PCI_CAP_ID_PCIX PCIY_PCIX
636 #define PCI_CAP_ID_AGP PCIY_AGP
637 #define PCI_CAP_ID_PM PCIY_PMG
638
639 #define PCI_EXP_DEVCTL PCIER_DEVICE_CTL
640 #define PCI_EXP_DEVCTL_PAYLOAD PCIEM_CTL_MAX_PAYLOAD
641 #define PCI_EXP_DEVCTL_READRQ PCIEM_CTL_MAX_READ_REQUEST
642 #define PCI_EXP_LNKCTL PCIER_LINK_CTL
643 #define PCI_EXP_LNKSTA PCIER_LINK_STA
644
645 static inline int
pci_find_capability(struct pci_dev * pdev,int capid)646 pci_find_capability(struct pci_dev *pdev, int capid)
647 {
648 int reg;
649
650 if (pci_find_cap(pdev->dev.bsddev, capid, ®))
651 return (0);
652 return (reg);
653 }
654
pci_pcie_cap(struct pci_dev * dev)655 static inline int pci_pcie_cap(struct pci_dev *dev)
656 {
657 return pci_find_capability(dev, PCI_CAP_ID_EXP);
658 }
659
660 static inline int
pci_find_ext_capability(struct pci_dev * pdev,int capid)661 pci_find_ext_capability(struct pci_dev *pdev, int capid)
662 {
663 int reg;
664
665 if (pci_find_extcap(pdev->dev.bsddev, capid, ®))
666 return (0);
667 return (reg);
668 }
669
670 #define PCIM_PCAP_PME_SHIFT 11
671 static __inline bool
pci_pme_capable(struct pci_dev * pdev,uint32_t flag)672 pci_pme_capable(struct pci_dev *pdev, uint32_t flag)
673 {
674 struct pci_devinfo *dinfo;
675 pcicfgregs *cfg;
676
677 if (flag > (PCIM_PCAP_D3PME_COLD >> PCIM_PCAP_PME_SHIFT))
678 return (false);
679
680 dinfo = device_get_ivars(pdev->dev.bsddev);
681 cfg = &dinfo->cfg;
682
683 if (cfg->pp.pp_cap == 0)
684 return (false);
685
686 if ((cfg->pp.pp_cap & (1 << (PCIM_PCAP_PME_SHIFT + flag))) != 0)
687 return (true);
688
689 return (false);
690 }
691
692 static inline int
pci_disable_link_state(struct pci_dev * pdev,uint32_t flags)693 pci_disable_link_state(struct pci_dev *pdev, uint32_t flags)
694 {
695
696 if (!pci_enable_aspm)
697 return (-EPERM);
698
699 return (-ENXIO);
700 }
701
702 static inline int
pci_read_config_byte(const struct pci_dev * pdev,int where,u8 * val)703 pci_read_config_byte(const struct pci_dev *pdev, int where, u8 *val)
704 {
705
706 *val = (u8)pci_read_config(pdev->dev.bsddev, where, 1);
707 return (0);
708 }
709
710 static inline int
pci_read_config_word(const struct pci_dev * pdev,int where,u16 * val)711 pci_read_config_word(const struct pci_dev *pdev, int where, u16 *val)
712 {
713
714 *val = (u16)pci_read_config(pdev->dev.bsddev, where, 2);
715 return (0);
716 }
717
718 static inline int
pci_read_config_dword(const struct pci_dev * pdev,int where,u32 * val)719 pci_read_config_dword(const struct pci_dev *pdev, int where, u32 *val)
720 {
721
722 *val = (u32)pci_read_config(pdev->dev.bsddev, where, 4);
723 return (0);
724 }
725
726 static inline int
pci_write_config_byte(const struct pci_dev * pdev,int where,u8 val)727 pci_write_config_byte(const struct pci_dev *pdev, int where, u8 val)
728 {
729
730 pci_write_config(pdev->dev.bsddev, where, val, 1);
731 return (0);
732 }
733
734 static inline int
pci_write_config_word(const struct pci_dev * pdev,int where,u16 val)735 pci_write_config_word(const struct pci_dev *pdev, int where, u16 val)
736 {
737
738 pci_write_config(pdev->dev.bsddev, where, val, 2);
739 return (0);
740 }
741
742 static inline int
pci_write_config_dword(const struct pci_dev * pdev,int where,u32 val)743 pci_write_config_dword(const struct pci_dev *pdev, int where, u32 val)
744 {
745
746 pci_write_config(pdev->dev.bsddev, where, val, 4);
747 return (0);
748 }
749
750 int linux_pci_register_driver(struct pci_driver *pdrv);
751 int linux_pci_register_drm_driver(struct pci_driver *pdrv);
752 void linux_pci_unregister_driver(struct pci_driver *pdrv);
753 void linux_pci_unregister_drm_driver(struct pci_driver *pdrv);
754
755 #define pci_register_driver(pdrv) \
756 linux_pci_register_driver(pdrv)
757 #define pci_unregister_driver(pdrv) \
758 linux_pci_unregister_driver(pdrv)
759
760 /*
761 * Enable msix, positive errors indicate actual number of available
762 * vectors. Negative errors are failures.
763 *
764 * NB: define added to prevent this definition of pci_enable_msix from
765 * clashing with the native FreeBSD version.
766 */
767 #define pci_enable_msix(...) \
768 linuxkpi_pci_enable_msix(__VA_ARGS__)
769
770 #define pci_enable_msix_range(...) \
771 linux_pci_enable_msix_range(__VA_ARGS__)
772
773 static inline int
pci_enable_msix_range(struct pci_dev * dev,struct msix_entry * entries,int minvec,int maxvec)774 pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
775 int minvec, int maxvec)
776 {
777 int nvec = maxvec;
778 int rc;
779
780 if (maxvec < minvec)
781 return (-ERANGE);
782
783 do {
784 rc = pci_enable_msix(dev, entries, nvec);
785 if (rc < 0) {
786 return (rc);
787 } else if (rc > 0) {
788 if (rc < minvec)
789 return (-ENOSPC);
790 nvec = rc;
791 }
792 } while (rc);
793 return (nvec);
794 }
795
796 #define pci_enable_msi(pdev) \
797 linux_pci_enable_msi(pdev)
798
799 static inline int
pci_enable_msi(struct pci_dev * pdev)800 pci_enable_msi(struct pci_dev *pdev)
801 {
802
803 return (_lkpi_pci_enable_msi_range(pdev, 1, 1));
804 }
805
806 static inline int
pci_channel_offline(struct pci_dev * pdev)807 pci_channel_offline(struct pci_dev *pdev)
808 {
809
810 return (pci_read_config(pdev->dev.bsddev, PCIR_VENDOR, 2) == PCIV_INVALID);
811 }
812
pci_enable_sriov(struct pci_dev * dev,int nr_virtfn)813 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
814 {
815 return -ENODEV;
816 }
817
pci_disable_sriov(struct pci_dev * dev)818 static inline void pci_disable_sriov(struct pci_dev *dev)
819 {
820 }
821
822 #define pci_iomap_range(pdev, mmio_bar, mmio_off, mmio_size) \
823 linuxkpi_pci_iomap_range(pdev, mmio_bar, mmio_off, mmio_size)
824 #define pci_iomap(pdev, mmio_bar, mmio_size) \
825 linuxkpi_pci_iomap(pdev, mmio_bar, mmio_size)
826 #define pcim_iomap(pdev, bar, maxlen) \
827 linuxkpi_pcim_iomap(pdev, bar, maxlen)
828 #define pci_iounmap(pdev, res) \
829 linuxkpi_pci_iounmap(pdev, res)
830
831 static inline void
lkpi_pci_save_state(struct pci_dev * pdev)832 lkpi_pci_save_state(struct pci_dev *pdev)
833 {
834
835 pci_save_state(pdev->dev.bsddev);
836 }
837
838 static inline void
lkpi_pci_restore_state(struct pci_dev * pdev)839 lkpi_pci_restore_state(struct pci_dev *pdev)
840 {
841
842 pci_restore_state(pdev->dev.bsddev);
843 }
844
845 #define pci_save_state(dev) lkpi_pci_save_state(dev)
846 #define pci_restore_state(dev) lkpi_pci_restore_state(dev)
847
848 static inline int
linuxkpi_pci_enable_wake(struct pci_dev * pdev,pci_power_t state,bool ena)849 linuxkpi_pci_enable_wake(struct pci_dev *pdev, pci_power_t state, bool ena)
850 {
851 /*
852 * We do not currently support this in device.h either to
853 * check if the device is allowed to wake up in first place.
854 */
855 pr_debug("%s: TODO\n", __func__);
856 return (0);
857 }
858 #define pci_enable_wake(dev, state, ena) \
859 linuxkpi_pci_enable_wake(dev, state, ena)
860
861 static inline int
pci_reset_function(struct pci_dev * pdev)862 pci_reset_function(struct pci_dev *pdev)
863 {
864
865 return (-ENOSYS);
866 }
867
868 #define DEFINE_PCI_DEVICE_TABLE(_table) \
869 const struct pci_device_id _table[] __devinitdata
870
871 /* XXX This should not be necessary. */
872 #define pcix_set_mmrbc(d, v) 0
873 #define pcix_get_max_mmrbc(d) 0
874 #define pcie_set_readrq(d, v) pci_set_max_read_req((d)->dev.bsddev, (v))
875
876 #define PCI_DMA_BIDIRECTIONAL 0
877 #define PCI_DMA_TODEVICE 1
878 #define PCI_DMA_FROMDEVICE 2
879 #define PCI_DMA_NONE 3
880
881 #define pci_pool dma_pool
882 #define pci_pool_destroy(...) dma_pool_destroy(__VA_ARGS__)
883 #define pci_pool_alloc(...) dma_pool_alloc(__VA_ARGS__)
884 #define pci_pool_free(...) dma_pool_free(__VA_ARGS__)
885 #define pci_pool_create(_name, _pdev, _size, _align, _alloc) \
886 dma_pool_create(_name, &(_pdev)->dev, _size, _align, _alloc)
887 #define pci_free_consistent(_hwdev, _size, _vaddr, _dma_handle) \
888 dma_free_coherent((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
889 _size, _vaddr, _dma_handle)
890 #define pci_map_sg(_hwdev, _sg, _nents, _dir) \
891 dma_map_sg((_hwdev) == NULL ? NULL : &(_hwdev->dev), \
892 _sg, _nents, (enum dma_data_direction)_dir)
893 #define pci_map_single(_hwdev, _ptr, _size, _dir) \
894 dma_map_single((_hwdev) == NULL ? NULL : &(_hwdev->dev), \
895 (_ptr), (_size), (enum dma_data_direction)_dir)
896 #define pci_unmap_single(_hwdev, _addr, _size, _dir) \
897 dma_unmap_single((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
898 _addr, _size, (enum dma_data_direction)_dir)
899 #define pci_unmap_sg(_hwdev, _sg, _nents, _dir) \
900 dma_unmap_sg((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
901 _sg, _nents, (enum dma_data_direction)_dir)
902 #define pci_map_page(_hwdev, _page, _offset, _size, _dir) \
903 dma_map_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, _page,\
904 _offset, _size, (enum dma_data_direction)_dir)
905 #define pci_unmap_page(_hwdev, _dma_address, _size, _dir) \
906 dma_unmap_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
907 _dma_address, _size, (enum dma_data_direction)_dir)
908 #define pci_set_dma_mask(_pdev, mask) dma_set_mask(&(_pdev)->dev, (mask))
909 #define pci_dma_mapping_error(_pdev, _dma_addr) \
910 dma_mapping_error(&(_pdev)->dev, _dma_addr)
911 #define pci_set_consistent_dma_mask(_pdev, _mask) \
912 dma_set_coherent_mask(&(_pdev)->dev, (_mask))
913 #define DECLARE_PCI_UNMAP_ADDR(x) DEFINE_DMA_UNMAP_ADDR(x);
914 #define DECLARE_PCI_UNMAP_LEN(x) DEFINE_DMA_UNMAP_LEN(x);
915 #define pci_unmap_addr dma_unmap_addr
916 #define pci_unmap_addr_set dma_unmap_addr_set
917 #define pci_unmap_len dma_unmap_len
918 #define pci_unmap_len_set dma_unmap_len_set
919
920 typedef unsigned int __bitwise pci_channel_state_t;
921 typedef unsigned int __bitwise pci_ers_result_t;
922
923 enum pci_channel_state {
924 pci_channel_io_normal = 1,
925 pci_channel_io_frozen = 2,
926 pci_channel_io_perm_failure = 3,
927 };
928
929 enum pci_ers_result {
930 PCI_ERS_RESULT_NONE = 1,
931 PCI_ERS_RESULT_CAN_RECOVER = 2,
932 PCI_ERS_RESULT_NEED_RESET = 3,
933 PCI_ERS_RESULT_DISCONNECT = 4,
934 PCI_ERS_RESULT_RECOVERED = 5,
935 };
936
937 /* PCI bus error event callbacks */
938 struct pci_error_handlers {
939 pci_ers_result_t (*error_detected)(struct pci_dev *dev,
940 enum pci_channel_state error);
941 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
942 pci_ers_result_t (*link_reset)(struct pci_dev *dev);
943 pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
944 void (*resume)(struct pci_dev *dev);
945 };
946
947 /* FreeBSD does not support SRIOV - yet */
pci_physfn(struct pci_dev * dev)948 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
949 {
950 return dev;
951 }
952
pci_is_pcie(struct pci_dev * dev)953 static inline bool pci_is_pcie(struct pci_dev *dev)
954 {
955 return !!pci_pcie_cap(dev);
956 }
957
pcie_flags_reg(struct pci_dev * dev)958 static inline u16 pcie_flags_reg(struct pci_dev *dev)
959 {
960 int pos;
961 u16 reg16;
962
963 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
964 if (!pos)
965 return 0;
966
967 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16);
968
969 return reg16;
970 }
971
pci_pcie_type(struct pci_dev * dev)972 static inline int pci_pcie_type(struct pci_dev *dev)
973 {
974 return (pcie_flags_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
975 }
976
pcie_cap_version(struct pci_dev * dev)977 static inline int pcie_cap_version(struct pci_dev *dev)
978 {
979 return pcie_flags_reg(dev) & PCI_EXP_FLAGS_VERS;
980 }
981
pcie_cap_has_lnkctl(struct pci_dev * dev)982 static inline bool pcie_cap_has_lnkctl(struct pci_dev *dev)
983 {
984 int type = pci_pcie_type(dev);
985
986 return pcie_cap_version(dev) > 1 ||
987 type == PCI_EXP_TYPE_ROOT_PORT ||
988 type == PCI_EXP_TYPE_ENDPOINT ||
989 type == PCI_EXP_TYPE_LEG_END;
990 }
991
pcie_cap_has_devctl(const struct pci_dev * dev)992 static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
993 {
994 return true;
995 }
996
pcie_cap_has_sltctl(struct pci_dev * dev)997 static inline bool pcie_cap_has_sltctl(struct pci_dev *dev)
998 {
999 int type = pci_pcie_type(dev);
1000
1001 return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT ||
1002 (type == PCI_EXP_TYPE_DOWNSTREAM &&
1003 pcie_flags_reg(dev) & PCI_EXP_FLAGS_SLOT);
1004 }
1005
pcie_cap_has_rtctl(struct pci_dev * dev)1006 static inline bool pcie_cap_has_rtctl(struct pci_dev *dev)
1007 {
1008 int type = pci_pcie_type(dev);
1009
1010 return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT ||
1011 type == PCI_EXP_TYPE_RC_EC;
1012 }
1013
pcie_capability_reg_implemented(struct pci_dev * dev,int pos)1014 static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
1015 {
1016 if (!pci_is_pcie(dev))
1017 return false;
1018
1019 switch (pos) {
1020 case PCI_EXP_FLAGS_TYPE:
1021 return true;
1022 case PCI_EXP_DEVCAP:
1023 case PCI_EXP_DEVCTL:
1024 case PCI_EXP_DEVSTA:
1025 return pcie_cap_has_devctl(dev);
1026 case PCI_EXP_LNKCAP:
1027 case PCI_EXP_LNKCTL:
1028 case PCI_EXP_LNKSTA:
1029 return pcie_cap_has_lnkctl(dev);
1030 case PCI_EXP_SLTCAP:
1031 case PCI_EXP_SLTCTL:
1032 case PCI_EXP_SLTSTA:
1033 return pcie_cap_has_sltctl(dev);
1034 case PCI_EXP_RTCTL:
1035 case PCI_EXP_RTCAP:
1036 case PCI_EXP_RTSTA:
1037 return pcie_cap_has_rtctl(dev);
1038 case PCI_EXP_DEVCAP2:
1039 case PCI_EXP_DEVCTL2:
1040 case PCI_EXP_LNKCAP2:
1041 case PCI_EXP_LNKCTL2:
1042 case PCI_EXP_LNKSTA2:
1043 return pcie_cap_version(dev) > 1;
1044 default:
1045 return false;
1046 }
1047 }
1048
1049 static inline int
pcie_capability_read_dword(struct pci_dev * dev,int pos,u32 * dst)1050 pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *dst)
1051 {
1052 *dst = 0;
1053 if (pos & 3)
1054 return -EINVAL;
1055
1056 if (!pcie_capability_reg_implemented(dev, pos))
1057 return -EINVAL;
1058
1059 return pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, dst);
1060 }
1061
1062 static inline int
pcie_capability_read_word(struct pci_dev * dev,int pos,u16 * dst)1063 pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *dst)
1064 {
1065 *dst = 0;
1066 if (pos & 3)
1067 return -EINVAL;
1068
1069 if (!pcie_capability_reg_implemented(dev, pos))
1070 return -EINVAL;
1071
1072 return pci_read_config_word(dev, pci_pcie_cap(dev) + pos, dst);
1073 }
1074
1075 static inline int
pcie_capability_write_word(struct pci_dev * dev,int pos,u16 val)1076 pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
1077 {
1078 if (pos & 1)
1079 return -EINVAL;
1080
1081 if (!pcie_capability_reg_implemented(dev, pos))
1082 return 0;
1083
1084 return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
1085 }
1086
1087 static inline int
pcie_capability_clear_and_set_word(struct pci_dev * dev,int pos,uint16_t clear,uint16_t set)1088 pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
1089 uint16_t clear, uint16_t set)
1090 {
1091 int error;
1092 uint16_t v;
1093
1094 if (pos == PCI_EXP_LNKCTL || pos == PCI_EXP_RTCTL)
1095 spin_lock(&dev->pcie_cap_lock);
1096
1097 error = pcie_capability_read_word(dev, pos, &v);
1098 if (error == 0) {
1099 v &= ~clear;
1100 v |= set;
1101 error = pcie_capability_write_word(dev, pos, v);
1102 }
1103
1104 if (pos == PCI_EXP_LNKCTL || pos == PCI_EXP_RTCTL)
1105 spin_unlock(&dev->pcie_cap_lock);
1106
1107 return (error);
1108 }
1109
1110 static inline int
pcie_capability_set_word(struct pci_dev * dev,int pos,uint16_t val)1111 pcie_capability_set_word(struct pci_dev *dev, int pos, uint16_t val)
1112 {
1113 return (pcie_capability_clear_and_set_word(dev, pos, 0, val));
1114 }
1115
1116 static inline int
pcie_capability_clear_word(struct pci_dev * dev,int pos,uint16_t val)1117 pcie_capability_clear_word(struct pci_dev *dev, int pos, uint16_t val)
1118 {
1119 return (pcie_capability_clear_and_set_word(dev, pos, val, 0));
1120 }
1121
pcie_get_minimum_link(struct pci_dev * dev,enum pci_bus_speed * speed,enum pcie_link_width * width)1122 static inline int pcie_get_minimum_link(struct pci_dev *dev,
1123 enum pci_bus_speed *speed, enum pcie_link_width *width)
1124 {
1125 *speed = PCI_SPEED_UNKNOWN;
1126 *width = PCIE_LNK_WIDTH_UNKNOWN;
1127 return (0);
1128 }
1129
1130 static inline int
pci_num_vf(struct pci_dev * dev)1131 pci_num_vf(struct pci_dev *dev)
1132 {
1133 return (0);
1134 }
1135
1136 static inline enum pci_bus_speed
pcie_get_speed_cap(struct pci_dev * dev)1137 pcie_get_speed_cap(struct pci_dev *dev)
1138 {
1139 device_t root;
1140 uint32_t lnkcap, lnkcap2;
1141 int error, pos;
1142
1143 root = device_get_parent(dev->dev.bsddev);
1144 if (root == NULL)
1145 return (PCI_SPEED_UNKNOWN);
1146 root = device_get_parent(root);
1147 if (root == NULL)
1148 return (PCI_SPEED_UNKNOWN);
1149 root = device_get_parent(root);
1150 if (root == NULL)
1151 return (PCI_SPEED_UNKNOWN);
1152
1153 if (pci_get_vendor(root) == PCI_VENDOR_ID_VIA ||
1154 pci_get_vendor(root) == PCI_VENDOR_ID_SERVERWORKS)
1155 return (PCI_SPEED_UNKNOWN);
1156
1157 if ((error = pci_find_cap(root, PCIY_EXPRESS, &pos)) != 0)
1158 return (PCI_SPEED_UNKNOWN);
1159
1160 lnkcap2 = pci_read_config(root, pos + PCIER_LINK_CAP2, 4);
1161
1162 if (lnkcap2) { /* PCIe r3.0-compliant */
1163 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
1164 return (PCIE_SPEED_2_5GT);
1165 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
1166 return (PCIE_SPEED_5_0GT);
1167 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
1168 return (PCIE_SPEED_8_0GT);
1169 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
1170 return (PCIE_SPEED_16_0GT);
1171 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB)
1172 return (PCIE_SPEED_32_0GT);
1173 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_64_0GB)
1174 return (PCIE_SPEED_64_0GT);
1175 } else { /* pre-r3.0 */
1176 lnkcap = pci_read_config(root, pos + PCIER_LINK_CAP, 4);
1177 if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
1178 return (PCIE_SPEED_2_5GT);
1179 if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
1180 return (PCIE_SPEED_5_0GT);
1181 if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
1182 return (PCIE_SPEED_8_0GT);
1183 if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
1184 return (PCIE_SPEED_16_0GT);
1185 if (lnkcap & PCI_EXP_LNKCAP_SLS_32_0GB)
1186 return (PCIE_SPEED_32_0GT);
1187 if (lnkcap & PCI_EXP_LNKCAP_SLS_64_0GB)
1188 return (PCIE_SPEED_64_0GT);
1189 }
1190 return (PCI_SPEED_UNKNOWN);
1191 }
1192
1193 static inline enum pcie_link_width
pcie_get_width_cap(struct pci_dev * dev)1194 pcie_get_width_cap(struct pci_dev *dev)
1195 {
1196 uint32_t lnkcap;
1197
1198 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
1199 if (lnkcap)
1200 return ((lnkcap & PCI_EXP_LNKCAP_MLW) >> 4);
1201
1202 return (PCIE_LNK_WIDTH_UNKNOWN);
1203 }
1204
1205 static inline int
pcie_get_mps(struct pci_dev * dev)1206 pcie_get_mps(struct pci_dev *dev)
1207 {
1208 return (pci_get_max_payload(dev->dev.bsddev));
1209 }
1210
1211 static inline uint32_t
PCIE_SPEED2MBS_ENC(enum pci_bus_speed spd)1212 PCIE_SPEED2MBS_ENC(enum pci_bus_speed spd)
1213 {
1214
1215 switch(spd) {
1216 case PCIE_SPEED_64_0GT:
1217 return (64000 * 128 / 130);
1218 case PCIE_SPEED_32_0GT:
1219 return (32000 * 128 / 130);
1220 case PCIE_SPEED_16_0GT:
1221 return (16000 * 128 / 130);
1222 case PCIE_SPEED_8_0GT:
1223 return (8000 * 128 / 130);
1224 case PCIE_SPEED_5_0GT:
1225 return (5000 * 8 / 10);
1226 case PCIE_SPEED_2_5GT:
1227 return (2500 * 8 / 10);
1228 default:
1229 return (0);
1230 }
1231 }
1232
1233 static inline uint32_t
pcie_bandwidth_available(struct pci_dev * pdev,struct pci_dev ** limiting,enum pci_bus_speed * speed,enum pcie_link_width * width)1234 pcie_bandwidth_available(struct pci_dev *pdev,
1235 struct pci_dev **limiting,
1236 enum pci_bus_speed *speed,
1237 enum pcie_link_width *width)
1238 {
1239 enum pci_bus_speed nspeed = pcie_get_speed_cap(pdev);
1240 enum pcie_link_width nwidth = pcie_get_width_cap(pdev);
1241
1242 if (speed)
1243 *speed = nspeed;
1244 if (width)
1245 *width = nwidth;
1246
1247 return (nwidth * PCIE_SPEED2MBS_ENC(nspeed));
1248 }
1249
1250 static inline bool
pcie_aspm_enabled(struct pci_dev * pdev)1251 pcie_aspm_enabled(struct pci_dev *pdev)
1252 {
1253 return (false);
1254 }
1255
1256 static inline struct pci_dev *
pcie_find_root_port(struct pci_dev * pdev)1257 pcie_find_root_port(struct pci_dev *pdev)
1258 {
1259 device_t root;
1260
1261 if (pdev->root != NULL)
1262 return (pdev->root);
1263
1264 root = pci_find_pcie_root_port(pdev->dev.bsddev);
1265 if (root == NULL)
1266 return (NULL);
1267
1268 pdev->root = lkpinew_pci_dev(root);
1269 return (pdev->root);
1270 }
1271
1272 /* This is needed when people rip out the device "HotPlug". */
1273 static inline void
pci_lock_rescan_remove(void)1274 pci_lock_rescan_remove(void)
1275 {
1276 }
1277
1278 static inline void
pci_unlock_rescan_remove(void)1279 pci_unlock_rescan_remove(void)
1280 {
1281 }
1282
1283 static __inline void
pci_stop_and_remove_bus_device(struct pci_dev * pdev)1284 pci_stop_and_remove_bus_device(struct pci_dev *pdev)
1285 {
1286 }
1287
1288 static inline int
pci_rescan_bus(struct pci_bus * pbus)1289 pci_rescan_bus(struct pci_bus *pbus)
1290 {
1291 device_t *devlist, parent;
1292 int devcount, error;
1293
1294 if (!device_is_attached(pbus->self->dev.bsddev))
1295 return (0);
1296 /* pci_rescan_method() will work on the pcib (parent). */
1297 error = BUS_RESCAN(pbus->self->dev.bsddev);
1298 if (error != 0)
1299 return (0);
1300
1301 parent = device_get_parent(pbus->self->dev.bsddev);
1302 error = device_get_children(parent, &devlist, &devcount);
1303 if (error != 0)
1304 return (0);
1305 if (devcount != 0)
1306 free(devlist, M_TEMP);
1307
1308 return (devcount);
1309 }
1310
1311 /*
1312 * The following functions can be used to attach/detach the LinuxKPI's
1313 * PCI device runtime. The pci_driver and pci_device_id pointer is
1314 * allowed to be NULL. Other pointers must be all valid.
1315 * The pci_dev structure should be zero-initialized before passed
1316 * to the linux_pci_attach_device function.
1317 */
1318 extern int linux_pci_attach_device(device_t, struct pci_driver *,
1319 const struct pci_device_id *, struct pci_dev *);
1320 extern int linux_pci_detach_device(struct pci_dev *);
1321
1322 static inline int
pci_dev_present(const struct pci_device_id * cur)1323 pci_dev_present(const struct pci_device_id *cur)
1324 {
1325 while (cur != NULL && (cur->vendor || cur->device)) {
1326 if (pci_find_device(cur->vendor, cur->device) != NULL) {
1327 return (1);
1328 }
1329 cur++;
1330 }
1331 return (0);
1332 }
1333
1334 static inline const struct pci_device_id *
pci_match_id(const struct pci_device_id * ids,struct pci_dev * pdev)1335 pci_match_id(const struct pci_device_id *ids, struct pci_dev *pdev)
1336 {
1337 if (ids == NULL)
1338 return (NULL);
1339
1340 for (;
1341 ids->vendor != 0 || ids->subvendor != 0 || ids->class_mask != 0;
1342 ids++)
1343 if ((ids->vendor == PCI_ANY_ID ||
1344 ids->vendor == pdev->vendor) &&
1345 (ids->device == PCI_ANY_ID ||
1346 ids->device == pdev->device) &&
1347 (ids->subvendor == PCI_ANY_ID ||
1348 ids->subvendor == pdev->subsystem_vendor) &&
1349 (ids->subdevice == PCI_ANY_ID ||
1350 ids->subdevice == pdev->subsystem_device) &&
1351 ((ids->class ^ pdev->class) & ids->class_mask) == 0)
1352 return (ids);
1353
1354 return (NULL);
1355 }
1356
1357 struct pci_dev *lkpi_pci_get_domain_bus_and_slot(int domain,
1358 unsigned int bus, unsigned int devfn);
1359 #define pci_get_domain_bus_and_slot(domain, bus, devfn) \
1360 lkpi_pci_get_domain_bus_and_slot(domain, bus, devfn)
1361
1362 struct pci_dev *lkpi_pci_get_slot(struct pci_bus *, unsigned int);
1363 #ifndef WANT_NATIVE_PCI_GET_SLOT
1364 #define pci_get_slot(_pbus, _devfn) \
1365 lkpi_pci_get_slot(_pbus, _devfn)
1366 #endif
1367
1368 static inline int
pci_domain_nr(struct pci_bus * pbus)1369 pci_domain_nr(struct pci_bus *pbus)
1370 {
1371
1372 return (pbus->domain);
1373 }
1374
1375 static inline int
pci_bus_read_config(struct pci_bus * bus,unsigned int devfn,int pos,uint32_t * val,int len)1376 pci_bus_read_config(struct pci_bus *bus, unsigned int devfn,
1377 int pos, uint32_t *val, int len)
1378 {
1379
1380 *val = pci_read_config(bus->self->dev.bsddev, pos, len);
1381 return (0);
1382 }
1383
1384 static inline int
pci_bus_read_config_word(struct pci_bus * bus,unsigned int devfn,int pos,u16 * val)1385 pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn, int pos, u16 *val)
1386 {
1387 uint32_t tmp;
1388 int ret;
1389
1390 ret = pci_bus_read_config(bus, devfn, pos, &tmp, 2);
1391 *val = (u16)tmp;
1392 return (ret);
1393 }
1394
1395 static inline int
pci_bus_read_config_byte(struct pci_bus * bus,unsigned int devfn,int pos,u8 * val)1396 pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn, int pos, u8 *val)
1397 {
1398 uint32_t tmp;
1399 int ret;
1400
1401 ret = pci_bus_read_config(bus, devfn, pos, &tmp, 1);
1402 *val = (u8)tmp;
1403 return (ret);
1404 }
1405
1406 static inline int
pci_bus_write_config(struct pci_bus * bus,unsigned int devfn,int pos,uint32_t val,int size)1407 pci_bus_write_config(struct pci_bus *bus, unsigned int devfn, int pos,
1408 uint32_t val, int size)
1409 {
1410
1411 pci_write_config(bus->self->dev.bsddev, pos, val, size);
1412 return (0);
1413 }
1414
1415 static inline int
pci_bus_write_config_byte(struct pci_bus * bus,unsigned int devfn,int pos,uint8_t val)1416 pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn, int pos,
1417 uint8_t val)
1418 {
1419 return (pci_bus_write_config(bus, devfn, pos, val, 1));
1420 }
1421
1422 static inline int
pci_bus_write_config_word(struct pci_bus * bus,unsigned int devfn,int pos,uint16_t val)1423 pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, int pos,
1424 uint16_t val)
1425 {
1426 return (pci_bus_write_config(bus, devfn, pos, val, 2));
1427 }
1428
1429 struct pci_dev *lkpi_pci_get_class(unsigned int class, struct pci_dev *from);
1430 #define pci_get_class(class, from) lkpi_pci_get_class(class, from)
1431 struct pci_dev *lkpi_pci_get_base_class(unsigned int class,
1432 struct pci_dev *from);
1433 #define pci_get_base_class(class, from) lkpi_pci_get_base_class(class, from)
1434
1435 /* -------------------------------------------------------------------------- */
1436
1437 #define pcim_enable_device(pdev) \
1438 linuxkpi_pcim_enable_device(pdev)
1439 #define pcim_iomap_table(pdev) \
1440 linuxkpi_pcim_iomap_table(pdev)
1441 #define pcim_iomap_regions(pdev, mask, name) \
1442 linuxkpi_pcim_iomap_regions(pdev, mask, name)
1443
1444 static inline int
pcim_iomap_regions_request_all(struct pci_dev * pdev,uint32_t mask,char * name)1445 pcim_iomap_regions_request_all(struct pci_dev *pdev, uint32_t mask, char *name)
1446 {
1447 uint32_t requests, req_mask;
1448 int bar, error;
1449
1450 /* Request all the BARs ("regions") we do not iomap. */
1451 req_mask = ((1 << (PCIR_MAX_BAR_0 + 1)) - 1) & ~mask;
1452 for (bar = requests = 0; requests != req_mask; bar++) {
1453 if ((req_mask & (1 << bar)) == 0)
1454 continue;
1455 error = pci_request_region(pdev, bar, name);
1456 if (error != 0 && error != -ENODEV)
1457 goto err;
1458 requests |= (1 << bar);
1459 }
1460
1461 error = pcim_iomap_regions(pdev, mask, name);
1462 if (error != 0)
1463 goto err;
1464
1465 return (0);
1466
1467 err:
1468 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
1469 if ((requests & (1 << bar)) != 0)
1470 pci_release_region(pdev, bar);
1471 }
1472
1473 return (-EINVAL);
1474 }
1475
1476 /*
1477 * We cannot simply re-define pci_get_device() as we would normally do
1478 * and then hide it in linux_pci.c as too many semi-native drivers still
1479 * include linux/pci.h and run into the conflict with native PCI. Linux drivers
1480 * using pci_get_device() need to be changed to call linuxkpi_pci_get_device().
1481 */
1482 static inline struct pci_dev *
linuxkpi_pci_get_device(uint32_t vendor,uint32_t device,struct pci_dev * odev)1483 linuxkpi_pci_get_device(uint32_t vendor, uint32_t device, struct pci_dev *odev)
1484 {
1485
1486 return (lkpi_pci_get_device(vendor, device, odev));
1487 }
1488
1489 #define for_each_pci_dev(_pdev) \
1490 while ((_pdev = linuxkpi_pci_get_device(PCI_ANY_ID, PCI_ANY_ID, _pdev)) != NULL)
1491
1492 /* This is a FreeBSD extension so we can use bus_*(). */
1493 static inline void
linuxkpi_pcim_want_to_use_bus_functions(struct pci_dev * pdev)1494 linuxkpi_pcim_want_to_use_bus_functions(struct pci_dev *pdev)
1495 {
1496 pdev->want_iomap_res = true;
1497 }
1498
1499 static inline bool
pci_is_thunderbolt_attached(struct pci_dev * pdev)1500 pci_is_thunderbolt_attached(struct pci_dev *pdev)
1501 {
1502
1503 return (false);
1504 }
1505
1506 static inline void *
pci_platform_rom(struct pci_dev * pdev,size_t * size)1507 pci_platform_rom(struct pci_dev *pdev, size_t *size)
1508 {
1509
1510 return (NULL);
1511 }
1512
1513 static inline void
pci_ignore_hotplug(struct pci_dev * pdev)1514 pci_ignore_hotplug(struct pci_dev *pdev)
1515 {
1516 }
1517
1518 static inline const char *
pci_power_name(pci_power_t state)1519 pci_power_name(pci_power_t state)
1520 {
1521 int pstate = state + 1;
1522
1523 if (pstate >= 0 && pstate < nitems(pci_power_names))
1524 return (pci_power_names[pstate]);
1525 else
1526 return (pci_power_names[0]);
1527 }
1528
1529 static inline int
pcie_get_readrq(struct pci_dev * dev)1530 pcie_get_readrq(struct pci_dev *dev)
1531 {
1532 u16 ctl;
1533
1534 if (pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl))
1535 return (-EINVAL);
1536
1537 return (128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12));
1538 }
1539
1540 static inline bool
pci_is_enabled(struct pci_dev * pdev)1541 pci_is_enabled(struct pci_dev *pdev)
1542 {
1543
1544 return ((pci_read_config(pdev->dev.bsddev, PCIR_COMMAND, 2) &
1545 PCIM_CMD_BUSMASTEREN) != 0);
1546 }
1547
1548 static inline int
pci_wait_for_pending_transaction(struct pci_dev * pdev)1549 pci_wait_for_pending_transaction(struct pci_dev *pdev)
1550 {
1551
1552 return (0);
1553 }
1554
1555 static inline int
pci_assign_resource(struct pci_dev * pdev,int bar)1556 pci_assign_resource(struct pci_dev *pdev, int bar)
1557 {
1558
1559 return (0);
1560 }
1561
1562 static inline int
pci_irq_vector(struct pci_dev * pdev,unsigned int vector)1563 pci_irq_vector(struct pci_dev *pdev, unsigned int vector)
1564 {
1565
1566 if (!pdev->msix_enabled && !pdev->msi_enabled) {
1567 if (vector != 0)
1568 return (-EINVAL);
1569 return (pdev->irq);
1570 }
1571
1572 if (pdev->msix_enabled || pdev->msi_enabled) {
1573 if ((pdev->dev.irq_start + vector) >= pdev->dev.irq_end)
1574 return (-EINVAL);
1575 return (pdev->dev.irq_start + vector);
1576 }
1577
1578 return (-ENXIO);
1579 }
1580
1581 static inline int
pci_wake_from_d3(struct pci_dev * pdev,bool enable)1582 pci_wake_from_d3(struct pci_dev *pdev, bool enable)
1583 {
1584
1585 pr_debug("%s: TODO\n", __func__);
1586 return (0);
1587 }
1588
1589 #endif /* _LINUXKPI_LINUX_PCI_H_ */
1590