1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef SELFTESTS_VFIO_LIB_INCLUDE_VFIO_UTIL_H
3 #define SELFTESTS_VFIO_LIB_INCLUDE_VFIO_UTIL_H
4
5 #include <fcntl.h>
6 #include <string.h>
7 #include <linux/vfio.h>
8 #include <linux/list.h>
9 #include <linux/pci_regs.h>
10
11 #include "../../../kselftest.h"
12
13 #define VFIO_LOG_AND_EXIT(...) do { \
14 fprintf(stderr, " " __VA_ARGS__); \
15 fprintf(stderr, "\n"); \
16 exit(KSFT_FAIL); \
17 } while (0)
18
19 #define VFIO_ASSERT_OP(_lhs, _rhs, _op, ...) do { \
20 typeof(_lhs) __lhs = (_lhs); \
21 typeof(_rhs) __rhs = (_rhs); \
22 \
23 if (__lhs _op __rhs) \
24 break; \
25 \
26 fprintf(stderr, "%s:%u: Assertion Failure\n\n", __FILE__, __LINE__); \
27 fprintf(stderr, " Expression: " #_lhs " " #_op " " #_rhs "\n"); \
28 fprintf(stderr, " Observed: %#lx %s %#lx\n", \
29 (u64)__lhs, #_op, (u64)__rhs); \
30 fprintf(stderr, " [errno: %d - %s]\n", errno, strerror(errno)); \
31 VFIO_LOG_AND_EXIT(__VA_ARGS__); \
32 } while (0)
33
34 #define VFIO_ASSERT_EQ(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, ==, ##__VA_ARGS__)
35 #define VFIO_ASSERT_NE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, !=, ##__VA_ARGS__)
36 #define VFIO_ASSERT_LT(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, <, ##__VA_ARGS__)
37 #define VFIO_ASSERT_LE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, <=, ##__VA_ARGS__)
38 #define VFIO_ASSERT_GT(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, >, ##__VA_ARGS__)
39 #define VFIO_ASSERT_GE(_a, _b, ...) VFIO_ASSERT_OP(_a, _b, >=, ##__VA_ARGS__)
40 #define VFIO_ASSERT_TRUE(_a, ...) VFIO_ASSERT_NE(false, (_a), ##__VA_ARGS__)
41 #define VFIO_ASSERT_FALSE(_a, ...) VFIO_ASSERT_EQ(false, (_a), ##__VA_ARGS__)
42 #define VFIO_ASSERT_NULL(_a, ...) VFIO_ASSERT_EQ(NULL, _a, ##__VA_ARGS__)
43 #define VFIO_ASSERT_NOT_NULL(_a, ...) VFIO_ASSERT_NE(NULL, _a, ##__VA_ARGS__)
44
45 #define VFIO_FAIL(_fmt, ...) do { \
46 fprintf(stderr, "%s:%u: FAIL\n\n", __FILE__, __LINE__); \
47 VFIO_LOG_AND_EXIT(_fmt, ##__VA_ARGS__); \
48 } while (0)
49
50 struct vfio_iommu_mode {
51 const char *name;
52 const char *container_path;
53 unsigned long iommu_type;
54 };
55
56 /*
57 * Generator for VFIO selftests fixture variants that replicate across all
58 * possible IOMMU modes. Tests must define FIXTURE_VARIANT_ADD_IOMMU_MODE()
59 * which should then use FIXTURE_VARIANT_ADD() to create the variant.
60 */
61 #define FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(...) \
62 FIXTURE_VARIANT_ADD_IOMMU_MODE(vfio_type1_iommu, ##__VA_ARGS__); \
63 FIXTURE_VARIANT_ADD_IOMMU_MODE(vfio_type1v2_iommu, ##__VA_ARGS__); \
64 FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd_compat_type1, ##__VA_ARGS__); \
65 FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd_compat_type1v2, ##__VA_ARGS__); \
66 FIXTURE_VARIANT_ADD_IOMMU_MODE(iommufd, ##__VA_ARGS__)
67
68 struct vfio_pci_bar {
69 struct vfio_region_info info;
70 void *vaddr;
71 };
72
73 typedef u64 iova_t;
74
75 #define INVALID_IOVA UINT64_MAX
76
77 struct vfio_dma_region {
78 struct list_head link;
79 void *vaddr;
80 iova_t iova;
81 u64 size;
82 };
83
84 struct vfio_pci_device;
85
86 struct vfio_pci_driver_ops {
87 const char *name;
88
89 /**
90 * @probe() - Check if the driver supports the given device.
91 *
92 * Return: 0 on success, non-0 on failure.
93 */
94 int (*probe)(struct vfio_pci_device *device);
95
96 /**
97 * @init() - Initialize the driver for @device.
98 *
99 * Must be called after device->driver.region has been initialized.
100 */
101 void (*init)(struct vfio_pci_device *device);
102
103 /**
104 * remove() - Deinitialize the driver for @device.
105 */
106 void (*remove)(struct vfio_pci_device *device);
107
108 /**
109 * memcpy_start() - Kick off @count repeated memcpy operations from
110 * [@src, @src + @size) to [@dst, @dst + @size).
111 *
112 * Guarantees:
113 * - The device will attempt DMA reads on [src, src + size).
114 * - The device will attempt DMA writes on [dst, dst + size).
115 * - The device will not generate any interrupts.
116 *
117 * memcpy_start() returns immediately, it does not wait for the
118 * copies to complete.
119 */
120 void (*memcpy_start)(struct vfio_pci_device *device,
121 iova_t src, iova_t dst, u64 size, u64 count);
122
123 /**
124 * memcpy_wait() - Wait until the memcpy operations started by
125 * memcpy_start() have finished.
126 *
127 * Guarantees:
128 * - All in-flight DMAs initiated by memcpy_start() are fully complete
129 * before memcpy_wait() returns.
130 *
131 * Returns non-0 if the driver detects that an error occurred during the
132 * memcpy, 0 otherwise.
133 */
134 int (*memcpy_wait)(struct vfio_pci_device *device);
135
136 /**
137 * send_msi() - Make the device send the MSI device->driver.msi.
138 *
139 * Guarantees:
140 * - The device will send the MSI once.
141 */
142 void (*send_msi)(struct vfio_pci_device *device);
143 };
144
145 struct vfio_pci_driver {
146 const struct vfio_pci_driver_ops *ops;
147 bool initialized;
148 bool memcpy_in_progress;
149
150 /* Region to be used by the driver (e.g. for in-memory descriptors) */
151 struct vfio_dma_region region;
152
153 /* The maximum size that can be passed to memcpy_start(). */
154 u64 max_memcpy_size;
155
156 /* The maximum count that can be passed to memcpy_start(). */
157 u64 max_memcpy_count;
158
159 /* The MSI vector the device will signal in ops->send_msi(). */
160 int msi;
161 };
162
163 struct vfio_pci_device {
164 int fd;
165
166 const struct vfio_iommu_mode *iommu_mode;
167 int group_fd;
168 int container_fd;
169
170 int iommufd;
171 u32 ioas_id;
172
173 struct vfio_device_info info;
174 struct vfio_region_info config_space;
175 struct vfio_pci_bar bars[PCI_STD_NUM_BARS];
176
177 struct vfio_irq_info msi_info;
178 struct vfio_irq_info msix_info;
179
180 struct list_head dma_regions;
181
182 /* eventfds for MSI and MSI-x interrupts */
183 int msi_eventfds[PCI_MSIX_FLAGS_QSIZE + 1];
184
185 struct vfio_pci_driver driver;
186 };
187
188 /*
189 * Return the BDF string of the device that the test should use.
190 *
191 * If a BDF string is provided by the user on the command line (as the last
192 * element of argv[]), then this function will return that and decrement argc
193 * by 1.
194 *
195 * Otherwise this function will attempt to use the environment variable
196 * $VFIO_SELFTESTS_BDF.
197 *
198 * If BDF cannot be determined then the test will exit with KSFT_SKIP.
199 */
200 const char *vfio_selftests_get_bdf(int *argc, char *argv[]);
201 const char *vfio_pci_get_cdev_path(const char *bdf);
202
203 extern const char *default_iommu_mode;
204
205 struct vfio_pci_device *vfio_pci_device_init(const char *bdf, const char *iommu_mode);
206 void vfio_pci_device_cleanup(struct vfio_pci_device *device);
207 void vfio_pci_device_reset(struct vfio_pci_device *device);
208
209 int __vfio_pci_dma_map(struct vfio_pci_device *device,
210 struct vfio_dma_region *region);
211 int __vfio_pci_dma_unmap(struct vfio_pci_device *device,
212 struct vfio_dma_region *region,
213 u64 *unmapped);
214 int __vfio_pci_dma_unmap_all(struct vfio_pci_device *device, u64 *unmapped);
215
vfio_pci_dma_map(struct vfio_pci_device * device,struct vfio_dma_region * region)216 static inline void vfio_pci_dma_map(struct vfio_pci_device *device,
217 struct vfio_dma_region *region)
218 {
219 VFIO_ASSERT_EQ(__vfio_pci_dma_map(device, region), 0);
220 }
221
vfio_pci_dma_unmap(struct vfio_pci_device * device,struct vfio_dma_region * region)222 static inline void vfio_pci_dma_unmap(struct vfio_pci_device *device,
223 struct vfio_dma_region *region)
224 {
225 VFIO_ASSERT_EQ(__vfio_pci_dma_unmap(device, region, NULL), 0);
226 }
227
vfio_pci_dma_unmap_all(struct vfio_pci_device * device)228 static inline void vfio_pci_dma_unmap_all(struct vfio_pci_device *device)
229 {
230 VFIO_ASSERT_EQ(__vfio_pci_dma_unmap_all(device, NULL), 0);
231 }
232
233 void vfio_pci_config_access(struct vfio_pci_device *device, bool write,
234 size_t config, size_t size, void *data);
235
236 #define vfio_pci_config_read(_device, _offset, _type) ({ \
237 _type __data; \
238 vfio_pci_config_access((_device), false, _offset, sizeof(__data), &__data); \
239 __data; \
240 })
241
242 #define vfio_pci_config_readb(_d, _o) vfio_pci_config_read(_d, _o, u8)
243 #define vfio_pci_config_readw(_d, _o) vfio_pci_config_read(_d, _o, u16)
244 #define vfio_pci_config_readl(_d, _o) vfio_pci_config_read(_d, _o, u32)
245
246 #define vfio_pci_config_write(_device, _offset, _value, _type) do { \
247 _type __data = (_value); \
248 vfio_pci_config_access((_device), true, _offset, sizeof(_type), &__data); \
249 } while (0)
250
251 #define vfio_pci_config_writeb(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u8)
252 #define vfio_pci_config_writew(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u16)
253 #define vfio_pci_config_writel(_d, _o, _v) vfio_pci_config_write(_d, _o, _v, u32)
254
255 void vfio_pci_irq_enable(struct vfio_pci_device *device, u32 index,
256 u32 vector, int count);
257 void vfio_pci_irq_disable(struct vfio_pci_device *device, u32 index);
258 void vfio_pci_irq_trigger(struct vfio_pci_device *device, u32 index, u32 vector);
259
fcntl_set_nonblock(int fd)260 static inline void fcntl_set_nonblock(int fd)
261 {
262 int r;
263
264 r = fcntl(fd, F_GETFL, 0);
265 VFIO_ASSERT_NE(r, -1, "F_GETFL failed for fd %d\n", fd);
266
267 r = fcntl(fd, F_SETFL, r | O_NONBLOCK);
268 VFIO_ASSERT_NE(r, -1, "F_SETFL O_NONBLOCK failed for fd %d\n", fd);
269 }
270
vfio_pci_msi_enable(struct vfio_pci_device * device,u32 vector,int count)271 static inline void vfio_pci_msi_enable(struct vfio_pci_device *device,
272 u32 vector, int count)
273 {
274 vfio_pci_irq_enable(device, VFIO_PCI_MSI_IRQ_INDEX, vector, count);
275 }
276
vfio_pci_msi_disable(struct vfio_pci_device * device)277 static inline void vfio_pci_msi_disable(struct vfio_pci_device *device)
278 {
279 vfio_pci_irq_disable(device, VFIO_PCI_MSI_IRQ_INDEX);
280 }
281
vfio_pci_msix_enable(struct vfio_pci_device * device,u32 vector,int count)282 static inline void vfio_pci_msix_enable(struct vfio_pci_device *device,
283 u32 vector, int count)
284 {
285 vfio_pci_irq_enable(device, VFIO_PCI_MSIX_IRQ_INDEX, vector, count);
286 }
287
vfio_pci_msix_disable(struct vfio_pci_device * device)288 static inline void vfio_pci_msix_disable(struct vfio_pci_device *device)
289 {
290 vfio_pci_irq_disable(device, VFIO_PCI_MSIX_IRQ_INDEX);
291 }
292
293 iova_t __to_iova(struct vfio_pci_device *device, void *vaddr);
294 iova_t to_iova(struct vfio_pci_device *device, void *vaddr);
295
vfio_pci_device_match(struct vfio_pci_device * device,u16 vendor_id,u16 device_id)296 static inline bool vfio_pci_device_match(struct vfio_pci_device *device,
297 u16 vendor_id, u16 device_id)
298 {
299 return (vendor_id == vfio_pci_config_readw(device, PCI_VENDOR_ID)) &&
300 (device_id == vfio_pci_config_readw(device, PCI_DEVICE_ID));
301 }
302
303 void vfio_pci_driver_probe(struct vfio_pci_device *device);
304 void vfio_pci_driver_init(struct vfio_pci_device *device);
305 void vfio_pci_driver_remove(struct vfio_pci_device *device);
306 int vfio_pci_driver_memcpy(struct vfio_pci_device *device,
307 iova_t src, iova_t dst, u64 size);
308 void vfio_pci_driver_memcpy_start(struct vfio_pci_device *device,
309 iova_t src, iova_t dst, u64 size,
310 u64 count);
311 int vfio_pci_driver_memcpy_wait(struct vfio_pci_device *device);
312 void vfio_pci_driver_send_msi(struct vfio_pci_device *device);
313
314 #endif /* SELFTESTS_VFIO_LIB_INCLUDE_VFIO_UTIL_H */
315