xref: /linux/include/linux/vfio_pci_core.h (revision ba23adb6533149df33b9a247f31a87227b3c86d5)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
4  *     Author: Alex Williamson <alex.williamson@redhat.com>
5  *
6  * Derived from original vfio:
7  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
8  * Author: Tom Lyon, pugs@cisco.com
9  */
10 
11 #include <linux/mutex.h>
12 #include <linux/pci.h>
13 #include <linux/vfio.h>
14 #include <linux/irqbypass.h>
15 #include <linux/rcupdate.h>
16 #include <linux/types.h>
17 #include <linux/uuid.h>
18 #include <linux/notifier.h>
19 
20 #ifndef VFIO_PCI_CORE_H
21 #define VFIO_PCI_CORE_H
22 
23 #define VFIO_PCI_OFFSET_SHIFT   40
24 #define VFIO_PCI_OFFSET_TO_INDEX(off)	(off >> VFIO_PCI_OFFSET_SHIFT)
25 #define VFIO_PCI_INDEX_TO_OFFSET(index)	((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
26 #define VFIO_PCI_OFFSET_MASK	(((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
27 
28 struct vfio_pci_core_device;
29 struct vfio_pci_region;
30 struct p2pdma_provider;
31 struct dma_buf_attachment;
32 
33 struct vfio_pci_eventfd {
34 	struct eventfd_ctx	*ctx;
35 	struct rcu_head		rcu;
36 };
37 
38 struct vfio_pci_regops {
39 	ssize_t (*rw)(struct vfio_pci_core_device *vdev, char __user *buf,
40 		      size_t count, loff_t *ppos, bool iswrite);
41 	void	(*release)(struct vfio_pci_core_device *vdev,
42 			   struct vfio_pci_region *region);
43 	int	(*mmap)(struct vfio_pci_core_device *vdev,
44 			struct vfio_pci_region *region,
45 			struct vm_area_struct *vma);
46 	int	(*add_capability)(struct vfio_pci_core_device *vdev,
47 				  struct vfio_pci_region *region,
48 				  struct vfio_info_cap *caps);
49 };
50 
51 struct vfio_pci_region {
52 	u32				type;
53 	u32				subtype;
54 	const struct vfio_pci_regops	*ops;
55 	void				*data;
56 	size_t				size;
57 	u32				flags;
58 };
59 
60 struct vfio_pci_device_ops {
61 	int (*get_dmabuf_phys)(struct vfio_pci_core_device *vdev,
62 			       struct p2pdma_provider **provider,
63 			       unsigned int region_index,
64 			       struct phys_vec *phys_vec,
65 			       struct vfio_region_dma_range *dma_ranges,
66 			       size_t nr_ranges);
67 };
68 
69 #if IS_ENABLED(CONFIG_VFIO_PCI_DMABUF)
70 int vfio_pci_core_fill_phys_vec(struct phys_vec *phys_vec,
71 				struct vfio_region_dma_range *dma_ranges,
72 				size_t nr_ranges, phys_addr_t start,
73 				phys_addr_t len);
74 int vfio_pci_core_get_dmabuf_phys(struct vfio_pci_core_device *vdev,
75 				  struct p2pdma_provider **provider,
76 				  unsigned int region_index,
77 				  struct phys_vec *phys_vec,
78 				  struct vfio_region_dma_range *dma_ranges,
79 				  size_t nr_ranges);
80 #else
81 static inline int
vfio_pci_core_fill_phys_vec(struct phys_vec * phys_vec,struct vfio_region_dma_range * dma_ranges,size_t nr_ranges,phys_addr_t start,phys_addr_t len)82 vfio_pci_core_fill_phys_vec(struct phys_vec *phys_vec,
83 			    struct vfio_region_dma_range *dma_ranges,
84 			    size_t nr_ranges, phys_addr_t start,
85 			    phys_addr_t len)
86 {
87 	return -EINVAL;
88 }
vfio_pci_core_get_dmabuf_phys(struct vfio_pci_core_device * vdev,struct p2pdma_provider ** provider,unsigned int region_index,struct phys_vec * phys_vec,struct vfio_region_dma_range * dma_ranges,size_t nr_ranges)89 static inline int vfio_pci_core_get_dmabuf_phys(
90 	struct vfio_pci_core_device *vdev, struct p2pdma_provider **provider,
91 	unsigned int region_index, struct phys_vec *phys_vec,
92 	struct vfio_region_dma_range *dma_ranges, size_t nr_ranges)
93 {
94 	return -EOPNOTSUPP;
95 }
96 #endif
97 
98 struct vfio_pci_core_device {
99 	struct vfio_device	vdev;
100 	struct pci_dev		*pdev;
101 	const struct vfio_pci_device_ops *pci_ops;
102 	void __iomem		*barmap[PCI_STD_NUM_BARS];
103 	bool			bar_mmap_supported[PCI_STD_NUM_BARS];
104 	u8			*pci_config_map;
105 	u8			*vconfig;
106 	struct perm_bits	*msi_perm;
107 	spinlock_t		irqlock;
108 	struct mutex		igate;
109 	struct xarray		ctx;
110 	int			irq_type;
111 	int			num_regions;
112 	struct vfio_pci_region	*region;
113 	u8			msi_qmax;
114 	u8			msix_bar;
115 	u16			msix_size;
116 	u32			msix_offset;
117 	u32			rbar[7];
118 	bool			has_dyn_msix:1;
119 	bool			pci_2_3:1;
120 	bool			virq_disabled:1;
121 	bool			reset_works:1;
122 	bool			extended_caps:1;
123 	bool			bardirty:1;
124 	bool			has_vga:1;
125 	bool			needs_reset:1;
126 	bool			nointx:1;
127 	bool			needs_pm_restore:1;
128 	bool			pm_intx_masked:1;
129 	bool			pm_runtime_engaged:1;
130 	struct pci_saved_state	*pci_saved_state;
131 	struct pci_saved_state	*pm_save;
132 	int			ioeventfds_nr;
133 	struct vfio_pci_eventfd __rcu *err_trigger;
134 	struct vfio_pci_eventfd __rcu *req_trigger;
135 	struct eventfd_ctx	*pm_wake_eventfd_ctx;
136 	struct list_head	dummy_resources_list;
137 	struct mutex		ioeventfds_lock;
138 	struct list_head	ioeventfds_list;
139 	struct vfio_pci_vf_token	*vf_token;
140 	struct list_head		sriov_pfs_item;
141 	struct vfio_pci_core_device	*sriov_pf_core_dev;
142 	struct notifier_block	nb;
143 	struct rw_semaphore	memory_lock;
144 	struct list_head	dmabufs;
145 };
146 
147 enum vfio_pci_io_width {
148 	VFIO_PCI_IO_WIDTH_1 = 1,
149 	VFIO_PCI_IO_WIDTH_2 = 2,
150 	VFIO_PCI_IO_WIDTH_4 = 4,
151 	VFIO_PCI_IO_WIDTH_8 = 8,
152 };
153 
154 /* Will be exported for vfio pci drivers usage */
155 int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev,
156 				      unsigned int type, unsigned int subtype,
157 				      const struct vfio_pci_regops *ops,
158 				      size_t size, u32 flags, void *data);
159 void vfio_pci_core_set_params(bool nointxmask, bool is_disable_vga,
160 			      bool is_disable_idle_d3);
161 void vfio_pci_core_close_device(struct vfio_device *core_vdev);
162 int vfio_pci_core_init_dev(struct vfio_device *core_vdev);
163 void vfio_pci_core_release_dev(struct vfio_device *core_vdev);
164 int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev);
165 void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev);
166 extern const struct pci_error_handlers vfio_pci_core_err_handlers;
167 int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev,
168 				  int nr_virtfn);
169 long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
170 		unsigned long arg);
171 int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
172 				void __user *arg, size_t argsz);
173 int vfio_pci_ioctl_get_region_info(struct vfio_device *core_vdev,
174 				   struct vfio_region_info *info,
175 				   struct vfio_info_cap *caps);
176 ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
177 		size_t count, loff_t *ppos);
178 ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
179 		size_t count, loff_t *ppos);
180 vm_fault_t vfio_pci_vmf_insert_pfn(struct vfio_pci_core_device *vdev,
181 				   struct vm_fault *vmf, unsigned long pfn,
182 				   unsigned int order);
183 int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma);
184 void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count);
185 int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf);
186 int vfio_pci_core_match_token_uuid(struct vfio_device *core_vdev,
187 				   const uuid_t *uuid);
188 int vfio_pci_core_enable(struct vfio_pci_core_device *vdev);
189 void vfio_pci_core_disable(struct vfio_pci_core_device *vdev);
190 void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev);
191 int vfio_pci_core_setup_barmap(struct vfio_pci_core_device *vdev, int bar);
192 pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
193 						pci_channel_state_t state);
194 ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
195 			       void __iomem *io, char __user *buf,
196 			       loff_t off, size_t count, size_t x_start,
197 			       size_t x_end, bool iswrite,
198 			       enum vfio_pci_io_width max_width);
199 bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev);
200 bool vfio_pci_core_range_intersect_range(loff_t buf_start, size_t buf_cnt,
201 					 loff_t reg_start, size_t reg_cnt,
202 					 loff_t *buf_offset,
203 					 size_t *intersect_count,
204 					 size_t *register_offset);
205 #define VFIO_IOWRITE_DECLARATION(size) \
206 int vfio_pci_core_iowrite##size(struct vfio_pci_core_device *vdev,	\
207 			bool test_mem, u##size val, void __iomem *io);
208 
209 VFIO_IOWRITE_DECLARATION(8)
210 VFIO_IOWRITE_DECLARATION(16)
211 VFIO_IOWRITE_DECLARATION(32)
212 #ifdef iowrite64
213 VFIO_IOWRITE_DECLARATION(64)
214 #endif
215 
216 #define VFIO_IOREAD_DECLARATION(size) \
217 int vfio_pci_core_ioread##size(struct vfio_pci_core_device *vdev,	\
218 			bool test_mem, u##size *val, void __iomem *io);
219 
220 VFIO_IOREAD_DECLARATION(8)
221 VFIO_IOREAD_DECLARATION(16)
222 VFIO_IOREAD_DECLARATION(32)
223 #ifdef ioread64
224 VFIO_IOREAD_DECLARATION(64)
225 #endif
226 
is_aligned_for_order(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned int order)227 static inline bool is_aligned_for_order(struct vm_area_struct *vma,
228 					unsigned long addr,
229 					unsigned long pfn,
230 					unsigned int order)
231 {
232 	return !(order && (addr < vma->vm_start ||
233 			   addr + (PAGE_SIZE << order) > vma->vm_end ||
234 			   !IS_ALIGNED(pfn, 1 << order)));
235 }
236 
237 int vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
238 				 struct phys_vec *phys);
239 
240 #endif /* VFIO_PCI_CORE_H */
241