xref: /linux/tools/testing/selftests/vfio/lib/include/vfio_util.h (revision 6da43bbeb6918164f7287269881a5f861ae09d7e)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef SELFTESTS_VFIO_LIB_INCLUDE_VFIO_UTIL_H
3 #define SELFTESTS_VFIO_LIB_INCLUDE_VFIO_UTIL_H
4 
5 #include <fcntl.h>
6 #include <string.h>
7 
8 #include <uapi/linux/types.h>
9 #include <linux/iommufd.h>
10 #include <linux/list.h>
11 #include <linux/pci_regs.h>
12 #include <linux/vfio.h>
13 
14 #include "../../../kselftest.h"
15 
16 #define VFIO_LOG_AND_EXIT(...) do {		\
17 	fprintf(stderr, "  " __VA_ARGS__);	\
18 	fprintf(stderr, "\n");			\
19 	exit(KSFT_FAIL);			\
20 } while (0)
21 
22 #define VFIO_ASSERT_OP(_lhs, _rhs, _op, ...) do {				\
23 	typeof(_lhs) __lhs = (_lhs);						\
24 	typeof(_rhs) __rhs = (_rhs);						\
25 										\
26 	if (__lhs _op __rhs)							\
27 		break;								\
28 										\
29 	fprintf(stderr, "%s:%u: Assertion Failure\n\n", __FILE__, __LINE__);	\
30 	fprintf(stderr, "  Expression: " #_lhs " " #_op " " #_rhs "\n");	\
31 	fprintf(stderr, "  Observed: %#lx %s %#lx\n",				\
32 			(u64)__lhs, #_op, (u64)__rhs);				\
33 	fprintf(stderr, "  [errno: %d - %s]\n", errno, strerror(errno));	\
34 	VFIO_LOG_AND_EXIT(__VA_ARGS__);						\
35 } while (0)
36 
37 #define VFIO_ASSERT_EQ(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, ==, ##__VA_ARGS__)
38 #define VFIO_ASSERT_NE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, !=, ##__VA_ARGS__)
39 #define VFIO_ASSERT_LT(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, <, ##__VA_ARGS__)
40 #define VFIO_ASSERT_LE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, <=, ##__VA_ARGS__)
41 #define VFIO_ASSERT_GT(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, >, ##__VA_ARGS__)
42 #define VFIO_ASSERT_GE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, >=, ##__VA_ARGS__)
43 #define VFIO_ASSERT_TRUE(_a, ...) VFIO_ASSERT_NE(false, (_a), ##__VA_ARGS__)
44 #define VFIO_ASSERT_FALSE(_a, ...) VFIO_ASSERT_EQ(false, (_a), ##__VA_ARGS__)
45 #define VFIO_ASSERT_NULL(_a, ...) VFIO_ASSERT_EQ(NULL, _a, ##__VA_ARGS__)
46 #define VFIO_ASSERT_NOT_NULL(_a, ...) VFIO_ASSERT_NE(NULL, _a, ##__VA_ARGS__)
47 
48 #define VFIO_FAIL(_fmt, ...) do {				\
49 	fprintf(stderr, "%s:%u: FAIL\n\n", __FILE__, __LINE__);	\
50 	VFIO_LOG_AND_EXIT(_fmt, ##__VA_ARGS__);			\
51 } while (0)
52 
53 struct vfio_iommu_mode {
54 	const char *name;
55 	const char *container_path;
56 	unsigned long iommu_type;
57 };
58 
59 /*
60  * Generator for VFIO selftests fixture variants that replicate across all
61  * possible IOMMU modes. Tests must define FIXTURE_VARIANT_ADD_IOMMU_MODE()
62  * which should then use FIXTURE_VARIANT_ADD() to create the variant.
63  */
64 #define FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(...) \
65 FIXTURE_VARIANT_ADD_IOMMU_MODE(vfio_type1_iommu, ##__VA_ARGS__); \
66 FIXTURE_VARIANT_ADD_IOMMU_MODE(vfio_type1v2_iommu, ##__VA_ARGS__); \
67 FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd_compat_type1, ##__VA_ARGS__); \
68 FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd_compat_type1v2, ##__VA_ARGS__); \
69 FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd, ##__VA_ARGS__)
70 
71 struct vfio_pci_bar {
72 	struct vfio_region_info info;
73 	void *vaddr;
74 };
75 
76 typedef u64 iova_t;
77 
78 #define INVALID_IOVA UINT64_MAX
79 
80 struct vfio_dma_region {
81 	struct list_head link;
82 	void *vaddr;
83 	iova_t iova;
84 	u64 size;
85 };
86 
87 struct vfio_pci_device;
88 
89 struct vfio_pci_driver_ops {
90 	const char *name;
91 
92 	/**
93 	 * @probe() - Check if the driver supports the given device.
94 	 *
95 	 * Return: 0 on success, non-0 on failure.
96 	 */
97 	int (*probe)(struct vfio_pci_device *device);
98 
99 	/**
100 	 * @init() - Initialize the driver for @device.
101 	 *
102 	 * Must be called after device->driver.region has been initialized.
103 	 */
104 	void (*init)(struct vfio_pci_device *device);
105 
106 	/**
107 	 * remove() - Deinitialize the driver for @device.
108 	 */
109 	void (*remove)(struct vfio_pci_device *device);
110 
111 	/**
112 	 * memcpy_start() - Kick off @count repeated memcpy operations from
113 	 * [@src, @src + @size) to [@dst, @dst + @size).
114 	 *
115 	 * Guarantees:
116 	 *  - The device will attempt DMA reads on [src, src + size).
117 	 *  - The device will attempt DMA writes on [dst, dst + size).
118 	 *  - The device will not generate any interrupts.
119 	 *
120 	 * memcpy_start() returns immediately, it does not wait for the
121 	 * copies to complete.
122 	 */
123 	void (*memcpy_start)(struct vfio_pci_device *device,
124 			     iova_t src, iova_t dst, u64 size, u64 count);
125 
126 	/**
127 	 * memcpy_wait() - Wait until the memcpy operations started by
128 	 * memcpy_start() have finished.
129 	 *
130 	 * Guarantees:
131 	 *  - All in-flight DMAs initiated by memcpy_start() are fully complete
132 	 *    before memcpy_wait() returns.
133 	 *
134 	 * Returns non-0 if the driver detects that an error occurred during the
135 	 * memcpy, 0 otherwise.
136 	 */
137 	int (*memcpy_wait)(struct vfio_pci_device *device);
138 
139 	/**
140 	 * send_msi() - Make the device send the MSI device->driver.msi.
141 	 *
142 	 * Guarantees:
143 	 *  - The device will send the MSI once.
144 	 */
145 	void (*send_msi)(struct vfio_pci_device *device);
146 };
147 
148 struct vfio_pci_driver {
149 	const struct vfio_pci_driver_ops *ops;
150 	bool initialized;
151 	bool memcpy_in_progress;
152 
153 	/* Region to be used by the driver (e.g. for in-memory descriptors) */
154 	struct vfio_dma_region region;
155 
156 	/* The maximum size that can be passed to memcpy_start(). */
157 	u64 max_memcpy_size;
158 
159 	/* The maximum count that can be passed to memcpy_start(). */
160 	u64 max_memcpy_count;
161 
162 	/* The MSI vector the device will signal in ops->send_msi(). */
163 	int msi;
164 };
165 
166 struct vfio_pci_device {
167 	int fd;
168 
169 	const struct vfio_iommu_mode *iommu_mode;
170 	int group_fd;
171 	int container_fd;
172 
173 	int iommufd;
174 	u32 ioas_id;
175 
176 	struct vfio_device_info info;
177 	struct vfio_region_info config_space;
178 	struct vfio_pci_bar bars[PCI_STD_NUM_BARS];
179 
180 	struct vfio_irq_info msi_info;
181 	struct vfio_irq_info msix_info;
182 
183 	struct list_head dma_regions;
184 
185 	/* eventfds for MSI and MSI-x interrupts */
186 	int msi_eventfds[PCI_MSIX_FLAGS_QSIZE + 1];
187 
188 	struct vfio_pci_driver driver;
189 };
190 
191 struct iova_allocator {
192 	struct iommu_iova_range *ranges;
193 	u32 nranges;
194 	u32 range_idx;
195 	u64 range_offset;
196 };
197 
198 /*
199  * Return the BDF string of the device that the test should use.
200  *
201  * If a BDF string is provided by the user on the command line (as the last
202  * element of argv[]), then this function will return that and decrement argc
203  * by 1.
204  *
205  * Otherwise this function will attempt to use the environment variable
206  * $VFIO_SELFTESTS_BDF.
207  *
208  * If BDF cannot be determined then the test will exit with KSFT_SKIP.
209  */
210 const char *vfio_selftests_get_bdf(int *argc, char *argv[]);
211 const char *vfio_pci_get_cdev_path(const char *bdf);
212 
213 extern const char *default_iommu_mode;
214 
215 struct vfio_pci_device *vfio_pci_device_init(const char *bdf, const char *iommu_mode);
216 void vfio_pci_device_cleanup(struct vfio_pci_device *device);
217 void vfio_pci_device_reset(struct vfio_pci_device *device);
218 
219 struct iommu_iova_range *vfio_pci_iova_ranges(struct vfio_pci_device *device,
220 					      u32 *nranges);
221 
222 struct iova_allocator *iova_allocator_init(struct vfio_pci_device *device);
223 void iova_allocator_cleanup(struct iova_allocator *allocator);
224 iova_t iova_allocator_alloc(struct iova_allocator *allocator, size_t size);
225 
226 int __vfio_pci_dma_map(struct vfio_pci_device *device,
227 		       struct vfio_dma_region *region);
228 int __vfio_pci_dma_unmap(struct vfio_pci_device *device,
229 			 struct vfio_dma_region *region,
230 			 u64 *unmapped);
231 int __vfio_pci_dma_unmap_all(struct vfio_pci_device *device, u64 *unmapped);
232 
vfio_pci_dma_map(struct vfio_pci_device * device,struct vfio_dma_region * region)233 static inline void vfio_pci_dma_map(struct vfio_pci_device *device,
234 				    struct vfio_dma_region *region)
235 {
236 	VFIO_ASSERT_EQ(__vfio_pci_dma_map(device, region), 0);
237 }
238 
vfio_pci_dma_unmap(struct vfio_pci_device * device,struct vfio_dma_region * region)239 static inline void vfio_pci_dma_unmap(struct vfio_pci_device *device,
240 				      struct vfio_dma_region *region)
241 {
242 	VFIO_ASSERT_EQ(__vfio_pci_dma_unmap(device, region, NULL), 0);
243 }
244 
vfio_pci_dma_unmap_all(struct vfio_pci_device * device)245 static inline void vfio_pci_dma_unmap_all(struct vfio_pci_device *device)
246 {
247 	VFIO_ASSERT_EQ(__vfio_pci_dma_unmap_all(device, NULL), 0);
248 }
249 
250 void vfio_pci_config_access(struct vfio_pci_device *device, bool write,
251 			    size_t config, size_t size, void *data);
252 
253 #define vfio_pci_config_read(_device, _offset, _type) ({			    \
254 	_type __data;								    \
255 	vfio_pci_config_access((_device), false, _offset, sizeof(__data), &__data); \
256 	__data;									    \
257 })
258 
259 #define vfio_pci_config_readb(_d, _o) vfio_pci_config_read(_d, _o, u8)
260 #define vfio_pci_config_readw(_d, _o) vfio_pci_config_read(_d, _o, u16)
261 #define vfio_pci_config_readl(_d, _o) vfio_pci_config_read(_d, _o, u32)
262 
263 #define vfio_pci_config_write(_device, _offset, _value, _type) do {		  \
264 	_type __data = (_value);						  \
265 	vfio_pci_config_access((_device), true, _offset, sizeof(_type), &__data); \
266 } while (0)
267 
268 #define vfio_pci_config_writeb(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u8)
269 #define vfio_pci_config_writew(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u16)
270 #define vfio_pci_config_writel(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u32)
271 
272 void vfio_pci_irq_enable(struct vfio_pci_device *device, u32 index,
273 			 u32 vector, int count);
274 void vfio_pci_irq_disable(struct vfio_pci_device *device, u32 index);
275 void vfio_pci_irq_trigger(struct vfio_pci_device *device, u32 index, u32 vector);
276 
fcntl_set_nonblock(int fd)277 static inline void fcntl_set_nonblock(int fd)
278 {
279 	int r;
280 
281 	r = fcntl(fd, F_GETFL, 0);
282 	VFIO_ASSERT_NE(r, -1, "F_GETFL failed for fd %d\n", fd);
283 
284 	r = fcntl(fd, F_SETFL, r | O_NONBLOCK);
285 	VFIO_ASSERT_NE(r, -1, "F_SETFL O_NONBLOCK failed for fd %d\n", fd);
286 }
287 
vfio_pci_msi_enable(struct vfio_pci_device * device,u32 vector,int count)288 static inline void vfio_pci_msi_enable(struct vfio_pci_device *device,
289 				       u32 vector, int count)
290 {
291 	vfio_pci_irq_enable(device, VFIO_PCI_MSI_IRQ_INDEX, vector, count);
292 }
293 
vfio_pci_msi_disable(struct vfio_pci_device * device)294 static inline void vfio_pci_msi_disable(struct vfio_pci_device *device)
295 {
296 	vfio_pci_irq_disable(device, VFIO_PCI_MSI_IRQ_INDEX);
297 }
298 
vfio_pci_msix_enable(struct vfio_pci_device * device,u32 vector,int count)299 static inline void vfio_pci_msix_enable(struct vfio_pci_device *device,
300 					u32 vector, int count)
301 {
302 	vfio_pci_irq_enable(device, VFIO_PCI_MSIX_IRQ_INDEX, vector, count);
303 }
304 
vfio_pci_msix_disable(struct vfio_pci_device * device)305 static inline void vfio_pci_msix_disable(struct vfio_pci_device *device)
306 {
307 	vfio_pci_irq_disable(device, VFIO_PCI_MSIX_IRQ_INDEX);
308 }
309 
310 iova_t __to_iova(struct vfio_pci_device *device, void *vaddr);
311 iova_t to_iova(struct vfio_pci_device *device, void *vaddr);
312 
vfio_pci_device_match(struct vfio_pci_device * device,u16 vendor_id,u16 device_id)313 static inline bool vfio_pci_device_match(struct vfio_pci_device *device,
314 					 u16 vendor_id, u16 device_id)
315 {
316 	return (vendor_id == vfio_pci_config_readw(device, PCI_VENDOR_ID)) &&
317 		(device_id == vfio_pci_config_readw(device, PCI_DEVICE_ID));
318 }
319 
320 void vfio_pci_driver_probe(struct vfio_pci_device *device);
321 void vfio_pci_driver_init(struct vfio_pci_device *device);
322 void vfio_pci_driver_remove(struct vfio_pci_device *device);
323 int vfio_pci_driver_memcpy(struct vfio_pci_device *device,
324 			   iova_t src, iova_t dst, u64 size);
325 void vfio_pci_driver_memcpy_start(struct vfio_pci_device *device,
326 				  iova_t src, iova_t dst, u64 size,
327 				  u64 count);
328 int vfio_pci_driver_memcpy_wait(struct vfio_pci_device *device);
329 void vfio_pci_driver_send_msi(struct vfio_pci_device *device);
330 
331 #endif /* SELFTESTS_VFIO_LIB_INCLUDE_VFIO_UTIL_H */
332