xref: /linux/include/linux/virtio_pci_modern.h (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VIRTIO_PCI_MODERN_H
3 #define _LINUX_VIRTIO_PCI_MODERN_H
4 
5 #include <linux/pci.h>
6 #include <linux/virtio_pci.h>
7 
8 /**
9  * struct virtio_pci_modern_device - info for modern PCI virtio
10  * @pci_dev:	    Ptr to the PCI device struct
11  * @common:	    Position of the common capability in the PCI config
12  * @device:	    Device-specific data (non-legacy mode)
13  * @notify_base:    Base of vq notifications (non-legacy mode)
14  * @notify_pa:	    Physical base of vq notifications
15  * @isr:	    Where to read and clear interrupt
16  * @notify_len:	    So we can sanity-check accesses
17  * @device_len:	    So we can sanity-check accesses
18  * @notify_map_cap: Capability for when we need to map notifications per-vq
19  * @notify_offset_multiplier: Multiply queue_notify_off by this value
20  *                            (non-legacy mode).
21  * @modern_bars:    Bitmask of BARs
22  * @id:		    Device and vendor id
23  * @device_id_check: Callback defined before vp_modern_probe() to be used to
24  *		    verify the PCI device is a vendor's expected device rather
25  *		    than the standard virtio PCI device
26  *		    Returns the found device id or ERRNO
27  * @dma_mask:	    Optional mask instead of the traditional DMA_BIT_MASK(64),
28  *		    for vendor devices with DMA space address limitations
29  */
30 struct virtio_pci_modern_device {
31 	struct pci_dev *pci_dev;
32 
33 	struct virtio_pci_common_cfg __iomem *common;
34 	void __iomem *device;
35 	void __iomem *notify_base;
36 	resource_size_t notify_pa;
37 	u8 __iomem *isr;
38 
39 	size_t notify_len;
40 	size_t device_len;
41 	size_t common_len;
42 
43 	int notify_map_cap;
44 
45 	u32 notify_offset_multiplier;
46 	int modern_bars;
47 	struct virtio_device_id id;
48 
49 	int (*device_id_check)(struct pci_dev *pdev);
50 	u64 dma_mask;
51 };
52 
53 /*
54  * Type-safe wrappers for io accesses.
55  * Use these to enforce at compile time the following spec requirement:
56  *
57  * The driver MUST access each field using the “natural” access
58  * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
59  * for 16-bit fields and 8-bit accesses for 8-bit fields.
60  */
vp_ioread8(const u8 __iomem * addr)61 static inline u8 vp_ioread8(const u8 __iomem *addr)
62 {
63 	return ioread8(addr);
64 }
vp_ioread16(const __le16 __iomem * addr)65 static inline u16 vp_ioread16 (const __le16 __iomem *addr)
66 {
67 	return ioread16(addr);
68 }
69 
vp_ioread32(const __le32 __iomem * addr)70 static inline u32 vp_ioread32(const __le32 __iomem *addr)
71 {
72 	return ioread32(addr);
73 }
74 
vp_iowrite8(u8 value,u8 __iomem * addr)75 static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
76 {
77 	iowrite8(value, addr);
78 }
79 
vp_iowrite16(u16 value,__le16 __iomem * addr)80 static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
81 {
82 	iowrite16(value, addr);
83 }
84 
vp_iowrite32(u32 value,__le32 __iomem * addr)85 static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
86 {
87 	iowrite32(value, addr);
88 }
89 
vp_iowrite64_twopart(u64 val,__le32 __iomem * lo,__le32 __iomem * hi)90 static inline void vp_iowrite64_twopart(u64 val,
91 					__le32 __iomem *lo,
92 					__le32 __iomem *hi)
93 {
94 	vp_iowrite32((u32)val, lo);
95 	vp_iowrite32(val >> 32, hi);
96 }
97 
98 u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev);
99 u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev);
100 void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
101 		     u64 features);
102 u32 vp_modern_generation(struct virtio_pci_modern_device *mdev);
103 u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev);
104 void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
105 		   u8 status);
106 u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
107 			   u16 idx, u16 vector);
108 u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
109 		     u16 vector);
110 void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
111 			     u16 index, u64 desc_addr, u64 driver_addr,
112 			     u64 device_addr);
113 void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
114 				u16 idx, bool enable);
115 bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
116 				u16 idx);
117 void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
118 			      u16 idx, u16 size);
119 u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
120 			     u16 idx);
121 u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev);
122 void __iomem * vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
123 				       u16 index, resource_size_t *pa);
124 int vp_modern_probe(struct virtio_pci_modern_device *mdev);
125 void vp_modern_remove(struct virtio_pci_modern_device *mdev);
126 int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
127 void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index);
128 u16 vp_modern_avq_num(struct virtio_pci_modern_device *mdev);
129 u16 vp_modern_avq_index(struct virtio_pci_modern_device *mdev);
130 #endif
131