xref: /linux/tools/testing/selftests/vfio/lib/include/libvfio.h (revision 657d241e695fa7afa091c77d54ff2df703ece35d)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_H
3 #define SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_H
4 
5 #include <fcntl.h>
6 #include <string.h>
7 
8 #include <uapi/linux/types.h>
9 #include <linux/iommufd.h>
10 #include <linux/list.h>
11 #include <linux/pci_regs.h>
12 #include <linux/vfio.h>
13 
14 #include "../../../kselftest.h"
15 
16 #define VFIO_LOG_AND_EXIT(...) do {		\
17 	fprintf(stderr, "  " __VA_ARGS__);	\
18 	fprintf(stderr, "\n");			\
19 	exit(KSFT_FAIL);			\
20 } while (0)
21 
22 #define VFIO_ASSERT_OP(_lhs, _rhs, _op, ...) do {				\
23 	typeof(_lhs) __lhs = (_lhs);						\
24 	typeof(_rhs) __rhs = (_rhs);						\
25 										\
26 	if (__lhs _op __rhs)							\
27 		break;								\
28 										\
29 	fprintf(stderr, "%s:%u: Assertion Failure\n\n", __FILE__, __LINE__);	\
30 	fprintf(stderr, "  Expression: " #_lhs " " #_op " " #_rhs "\n");	\
31 	fprintf(stderr, "  Observed: %#lx %s %#lx\n",				\
32 			(u64)__lhs, #_op, (u64)__rhs);				\
33 	fprintf(stderr, "  [errno: %d - %s]\n", errno, strerror(errno));	\
34 	VFIO_LOG_AND_EXIT(__VA_ARGS__);						\
35 } while (0)
36 
37 #define VFIO_ASSERT_EQ(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, ==, ##__VA_ARGS__)
38 #define VFIO_ASSERT_NE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, !=, ##__VA_ARGS__)
39 #define VFIO_ASSERT_LT(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, <, ##__VA_ARGS__)
40 #define VFIO_ASSERT_LE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, <=, ##__VA_ARGS__)
41 #define VFIO_ASSERT_GT(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, >, ##__VA_ARGS__)
42 #define VFIO_ASSERT_GE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, >=, ##__VA_ARGS__)
43 #define VFIO_ASSERT_TRUE(_a, ...) VFIO_ASSERT_NE(false, (_a), ##__VA_ARGS__)
44 #define VFIO_ASSERT_FALSE(_a, ...) VFIO_ASSERT_EQ(false, (_a), ##__VA_ARGS__)
45 #define VFIO_ASSERT_NULL(_a, ...) VFIO_ASSERT_EQ(NULL, _a, ##__VA_ARGS__)
46 #define VFIO_ASSERT_NOT_NULL(_a, ...) VFIO_ASSERT_NE(NULL, _a, ##__VA_ARGS__)
47 
48 #define VFIO_FAIL(_fmt, ...) do {				\
49 	fprintf(stderr, "%s:%u: FAIL\n\n", __FILE__, __LINE__);	\
50 	VFIO_LOG_AND_EXIT(_fmt, ##__VA_ARGS__);			\
51 } while (0)
52 
53 #define ioctl_assert(_fd, _op, _arg) do {						       \
54 	void *__arg = (_arg);								       \
55 	int __ret = ioctl((_fd), (_op), (__arg));					       \
56 	VFIO_ASSERT_EQ(__ret, 0, "ioctl(%s, %s, %s) returned %d\n", #_fd, #_op, #_arg, __ret); \
57 } while (0)
58 
59 #define dev_info(_dev, _fmt, ...) printf("%s: " _fmt, (_dev)->bdf, ##__VA_ARGS__)
60 #define dev_err(_dev, _fmt, ...) fprintf(stderr, "%s: " _fmt, (_dev)->bdf, ##__VA_ARGS__)
61 
62 struct iommu_mode {
63 	const char *name;
64 	const char *container_path;
65 	unsigned long iommu_type;
66 };
67 
68 /*
69  * Generator for VFIO selftests fixture variants that replicate across all
70  * possible IOMMU modes. Tests must define FIXTURE_VARIANT_ADD_IOMMU_MODE()
71  * which should then use FIXTURE_VARIANT_ADD() to create the variant.
72  */
73 #define FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(...) \
74 FIXTURE_VARIANT_ADD_IOMMU_MODE(vfio_type1_iommu, ##__VA_ARGS__); \
75 FIXTURE_VARIANT_ADD_IOMMU_MODE(vfio_type1v2_iommu, ##__VA_ARGS__); \
76 FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd_compat_type1, ##__VA_ARGS__); \
77 FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd_compat_type1v2, ##__VA_ARGS__); \
78 FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd, ##__VA_ARGS__)
79 
80 struct vfio_pci_bar {
81 	struct vfio_region_info info;
82 	void *vaddr;
83 };
84 
85 typedef u64 iova_t;
86 
87 #define INVALID_IOVA UINT64_MAX
88 
89 struct dma_region {
90 	struct list_head link;
91 	void *vaddr;
92 	iova_t iova;
93 	u64 size;
94 };
95 
96 struct vfio_pci_device;
97 
98 struct vfio_pci_driver_ops {
99 	const char *name;
100 
101 	/**
102 	 * @probe() - Check if the driver supports the given device.
103 	 *
104 	 * Return: 0 on success, non-0 on failure.
105 	 */
106 	int (*probe)(struct vfio_pci_device *device);
107 
108 	/**
109 	 * @init() - Initialize the driver for @device.
110 	 *
111 	 * Must be called after device->driver.region has been initialized.
112 	 */
113 	void (*init)(struct vfio_pci_device *device);
114 
115 	/**
116 	 * remove() - Deinitialize the driver for @device.
117 	 */
118 	void (*remove)(struct vfio_pci_device *device);
119 
120 	/**
121 	 * memcpy_start() - Kick off @count repeated memcpy operations from
122 	 * [@src, @src + @size) to [@dst, @dst + @size).
123 	 *
124 	 * Guarantees:
125 	 *  - The device will attempt DMA reads on [src, src + size).
126 	 *  - The device will attempt DMA writes on [dst, dst + size).
127 	 *  - The device will not generate any interrupts.
128 	 *
129 	 * memcpy_start() returns immediately, it does not wait for the
130 	 * copies to complete.
131 	 */
132 	void (*memcpy_start)(struct vfio_pci_device *device,
133 			     iova_t src, iova_t dst, u64 size, u64 count);
134 
135 	/**
136 	 * memcpy_wait() - Wait until the memcpy operations started by
137 	 * memcpy_start() have finished.
138 	 *
139 	 * Guarantees:
140 	 *  - All in-flight DMAs initiated by memcpy_start() are fully complete
141 	 *    before memcpy_wait() returns.
142 	 *
143 	 * Returns non-0 if the driver detects that an error occurred during the
144 	 * memcpy, 0 otherwise.
145 	 */
146 	int (*memcpy_wait)(struct vfio_pci_device *device);
147 
148 	/**
149 	 * send_msi() - Make the device send the MSI device->driver.msi.
150 	 *
151 	 * Guarantees:
152 	 *  - The device will send the MSI once.
153 	 */
154 	void (*send_msi)(struct vfio_pci_device *device);
155 };
156 
157 struct vfio_pci_driver {
158 	const struct vfio_pci_driver_ops *ops;
159 	bool initialized;
160 	bool memcpy_in_progress;
161 
162 	/* Region to be used by the driver (e.g. for in-memory descriptors) */
163 	struct dma_region region;
164 
165 	/* The maximum size that can be passed to memcpy_start(). */
166 	u64 max_memcpy_size;
167 
168 	/* The maximum count that can be passed to memcpy_start(). */
169 	u64 max_memcpy_count;
170 
171 	/* The MSI vector the device will signal in ops->send_msi(). */
172 	int msi;
173 };
174 
175 struct iommu {
176 	const struct iommu_mode *mode;
177 	int container_fd;
178 	int iommufd;
179 	u32 ioas_id;
180 	struct list_head dma_regions;
181 };
182 
183 struct vfio_pci_device {
184 	const char *bdf;
185 	int fd;
186 	int group_fd;
187 
188 	struct iommu *iommu;
189 
190 	struct vfio_device_info info;
191 	struct vfio_region_info config_space;
192 	struct vfio_pci_bar bars[PCI_STD_NUM_BARS];
193 
194 	struct vfio_irq_info msi_info;
195 	struct vfio_irq_info msix_info;
196 
197 	/* eventfds for MSI and MSI-x interrupts */
198 	int msi_eventfds[PCI_MSIX_FLAGS_QSIZE + 1];
199 
200 	struct vfio_pci_driver driver;
201 };
202 
203 struct iova_allocator {
204 	struct iommu_iova_range *ranges;
205 	u32 nranges;
206 	u32 range_idx;
207 	u64 range_offset;
208 };
209 
210 /*
211  * Return the BDF string of the device that the test should use.
212  *
213  * If a BDF string is provided by the user on the command line (as the last
214  * element of argv[]), then this function will return that and decrement argc
215  * by 1.
216  *
217  * Otherwise this function will attempt to use the environment variable
218  * $VFIO_SELFTESTS_BDF.
219  *
220  * If BDF cannot be determined then the test will exit with KSFT_SKIP.
221  */
222 const char *vfio_selftests_get_bdf(int *argc, char *argv[]);
223 char **vfio_selftests_get_bdfs(int *argc, char *argv[], int *nr_bdfs);
224 
225 const char *vfio_pci_get_cdev_path(const char *bdf);
226 
227 extern const char *default_iommu_mode;
228 
229 struct iommu *iommu_init(const char *iommu_mode);
230 void iommu_cleanup(struct iommu *iommu);
231 
232 int __iommu_map(struct iommu *iommu, struct dma_region *region);
233 
234 static inline void iommu_map(struct iommu *iommu, struct dma_region *region)
235 {
236 	VFIO_ASSERT_EQ(__iommu_map(iommu, region), 0);
237 }
238 
239 int __iommu_unmap(struct iommu *iommu, struct dma_region *region, u64 *unmapped);
240 
241 static inline void iommu_unmap(struct iommu *iommu, struct dma_region *region)
242 {
243 	VFIO_ASSERT_EQ(__iommu_unmap(iommu, region, NULL), 0);
244 }
245 
246 int __iommu_unmap_all(struct iommu *iommu, u64 *unmapped);
247 
248 static inline void iommu_unmap_all(struct iommu *iommu)
249 {
250 	VFIO_ASSERT_EQ(__iommu_unmap_all(iommu, NULL), 0);
251 }
252 
253 iova_t __iommu_hva2iova(struct iommu *iommu, void *vaddr);
254 iova_t iommu_hva2iova(struct iommu *iommu, void *vaddr);
255 
256 struct iommu_iova_range *iommu_iova_ranges(struct iommu *iommu, u32 *nranges);
257 
258 struct vfio_pci_device *vfio_pci_device_init(const char *bdf, struct iommu *iommu);
259 void vfio_pci_device_cleanup(struct vfio_pci_device *device);
260 
261 void vfio_pci_device_reset(struct vfio_pci_device *device);
262 
263 struct iova_allocator *iova_allocator_init(struct iommu *iommu);
264 void iova_allocator_cleanup(struct iova_allocator *allocator);
265 iova_t iova_allocator_alloc(struct iova_allocator *allocator, size_t size);
266 
267 void vfio_pci_config_access(struct vfio_pci_device *device, bool write,
268 			    size_t config, size_t size, void *data);
269 
270 #define vfio_pci_config_read(_device, _offset, _type) ({			    \
271 	_type __data;								    \
272 	vfio_pci_config_access((_device), false, _offset, sizeof(__data), &__data); \
273 	__data;									    \
274 })
275 
276 #define vfio_pci_config_readb(_d, _o) vfio_pci_config_read(_d, _o, u8)
277 #define vfio_pci_config_readw(_d, _o) vfio_pci_config_read(_d, _o, u16)
278 #define vfio_pci_config_readl(_d, _o) vfio_pci_config_read(_d, _o, u32)
279 
280 #define vfio_pci_config_write(_device, _offset, _value, _type) do {		  \
281 	_type __data = (_value);						  \
282 	vfio_pci_config_access((_device), true, _offset, sizeof(_type), &__data); \
283 } while (0)
284 
285 #define vfio_pci_config_writeb(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u8)
286 #define vfio_pci_config_writew(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u16)
287 #define vfio_pci_config_writel(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u32)
288 
289 void vfio_pci_irq_enable(struct vfio_pci_device *device, u32 index,
290 			 u32 vector, int count);
291 void vfio_pci_irq_disable(struct vfio_pci_device *device, u32 index);
292 void vfio_pci_irq_trigger(struct vfio_pci_device *device, u32 index, u32 vector);
293 
294 static inline void fcntl_set_nonblock(int fd)
295 {
296 	int r;
297 
298 	r = fcntl(fd, F_GETFL, 0);
299 	VFIO_ASSERT_NE(r, -1, "F_GETFL failed for fd %d\n", fd);
300 
301 	r = fcntl(fd, F_SETFL, r | O_NONBLOCK);
302 	VFIO_ASSERT_NE(r, -1, "F_SETFL O_NONBLOCK failed for fd %d\n", fd);
303 }
304 
305 static inline void vfio_pci_msi_enable(struct vfio_pci_device *device,
306 				       u32 vector, int count)
307 {
308 	vfio_pci_irq_enable(device, VFIO_PCI_MSI_IRQ_INDEX, vector, count);
309 }
310 
311 static inline void vfio_pci_msi_disable(struct vfio_pci_device *device)
312 {
313 	vfio_pci_irq_disable(device, VFIO_PCI_MSI_IRQ_INDEX);
314 }
315 
316 static inline void vfio_pci_msix_enable(struct vfio_pci_device *device,
317 					u32 vector, int count)
318 {
319 	vfio_pci_irq_enable(device, VFIO_PCI_MSIX_IRQ_INDEX, vector, count);
320 }
321 
322 static inline void vfio_pci_msix_disable(struct vfio_pci_device *device)
323 {
324 	vfio_pci_irq_disable(device, VFIO_PCI_MSIX_IRQ_INDEX);
325 }
326 
327 static inline iova_t __to_iova(struct vfio_pci_device *device, void *vaddr)
328 {
329 	return __iommu_hva2iova(device->iommu, vaddr);
330 }
331 
332 static inline iova_t to_iova(struct vfio_pci_device *device, void *vaddr)
333 {
334 	return iommu_hva2iova(device->iommu, vaddr);
335 }
336 
337 static inline bool vfio_pci_device_match(struct vfio_pci_device *device,
338 					 u16 vendor_id, u16 device_id)
339 {
340 	return (vendor_id == vfio_pci_config_readw(device, PCI_VENDOR_ID)) &&
341 		(device_id == vfio_pci_config_readw(device, PCI_DEVICE_ID));
342 }
343 
344 void vfio_pci_driver_probe(struct vfio_pci_device *device);
345 void vfio_pci_driver_init(struct vfio_pci_device *device);
346 void vfio_pci_driver_remove(struct vfio_pci_device *device);
347 int vfio_pci_driver_memcpy(struct vfio_pci_device *device,
348 			   iova_t src, iova_t dst, u64 size);
349 void vfio_pci_driver_memcpy_start(struct vfio_pci_device *device,
350 				  iova_t src, iova_t dst, u64 size,
351 				  u64 count);
352 int vfio_pci_driver_memcpy_wait(struct vfio_pci_device *device);
353 void vfio_pci_driver_send_msi(struct vfio_pci_device *device);
354 
355 #endif /* SELFTESTS_VFIO_LIB_INCLUDE_LIBVFIO_H */
356