xref: /linux/tools/testing/selftests/vfio/lib/vfio_pci_device.c (revision 5df9bd6205114fac04c0f9539fa23f996e22a439)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <dirent.h>
3 #include <fcntl.h>
4 #include <libgen.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <unistd.h>
8 
9 #include <sys/eventfd.h>
10 #include <sys/ioctl.h>
11 #include <sys/mman.h>
12 
13 #include <linux/limits.h>
14 #include <linux/mman.h>
15 #include <linux/types.h>
16 #include <linux/vfio.h>
17 
18 #include "../../../kselftest.h"
19 #include <vfio_util.h>
20 
21 #define PCI_SYSFS_PATH	"/sys/bus/pci/devices"
22 
23 #define ioctl_assert(_fd, _op, _arg) do {						       \
24 	void *__arg = (_arg);								       \
25 	int __ret = ioctl((_fd), (_op), (__arg));					       \
26 	VFIO_ASSERT_EQ(__ret, 0, "ioctl(%s, %s, %s) returned %d\n", #_fd, #_op, #_arg, __ret); \
27 } while (0)
28 
29 iova_t __to_iova(struct vfio_pci_device *device, void *vaddr)
30 {
31 	struct vfio_dma_region *region;
32 
33 	list_for_each_entry(region, &device->dma_regions, link) {
34 		if (vaddr < region->vaddr)
35 			continue;
36 
37 		if (vaddr >= region->vaddr + region->size)
38 			continue;
39 
40 		return region->iova + (vaddr - region->vaddr);
41 	}
42 
43 	return INVALID_IOVA;
44 }
45 
46 iova_t to_iova(struct vfio_pci_device *device, void *vaddr)
47 {
48 	iova_t iova;
49 
50 	iova = __to_iova(device, vaddr);
51 	VFIO_ASSERT_NE(iova, INVALID_IOVA, "%p is not mapped into device.\n", vaddr);
52 
53 	return iova;
54 }
55 
56 static void vfio_pci_irq_set(struct vfio_pci_device *device,
57 			     u32 index, u32 vector, u32 count, int *fds)
58 {
59 	u8 buf[sizeof(struct vfio_irq_set) + sizeof(int) * count] = {};
60 	struct vfio_irq_set *irq = (void *)&buf;
61 	int *irq_fds = (void *)&irq->data;
62 
63 	irq->argsz = sizeof(buf);
64 	irq->flags = VFIO_IRQ_SET_ACTION_TRIGGER;
65 	irq->index = index;
66 	irq->start = vector;
67 	irq->count = count;
68 
69 	if (count) {
70 		irq->flags |= VFIO_IRQ_SET_DATA_EVENTFD;
71 		memcpy(irq_fds, fds, sizeof(int) * count);
72 	} else {
73 		irq->flags |= VFIO_IRQ_SET_DATA_NONE;
74 	}
75 
76 	ioctl_assert(device->fd, VFIO_DEVICE_SET_IRQS, irq);
77 }
78 
79 void vfio_pci_irq_trigger(struct vfio_pci_device *device, u32 index, u32 vector)
80 {
81 	struct vfio_irq_set irq = {
82 		.argsz = sizeof(irq),
83 		.flags = VFIO_IRQ_SET_ACTION_TRIGGER | VFIO_IRQ_SET_DATA_NONE,
84 		.index = index,
85 		.start = vector,
86 		.count = 1,
87 	};
88 
89 	ioctl_assert(device->fd, VFIO_DEVICE_SET_IRQS, &irq);
90 }
91 
92 static void check_supported_irq_index(u32 index)
93 {
94 	/* VFIO selftests only supports MSI and MSI-x for now. */
95 	VFIO_ASSERT_TRUE(index == VFIO_PCI_MSI_IRQ_INDEX ||
96 			 index == VFIO_PCI_MSIX_IRQ_INDEX,
97 			 "Unsupported IRQ index: %u\n", index);
98 }
99 
100 void vfio_pci_irq_enable(struct vfio_pci_device *device, u32 index, u32 vector,
101 			 int count)
102 {
103 	int i;
104 
105 	check_supported_irq_index(index);
106 
107 	for (i = vector; i < vector + count; i++) {
108 		VFIO_ASSERT_LT(device->msi_eventfds[i], 0);
109 		device->msi_eventfds[i] = eventfd(0, 0);
110 		VFIO_ASSERT_GE(device->msi_eventfds[i], 0);
111 	}
112 
113 	vfio_pci_irq_set(device, index, vector, count, device->msi_eventfds + vector);
114 }
115 
116 void vfio_pci_irq_disable(struct vfio_pci_device *device, u32 index)
117 {
118 	int i;
119 
120 	check_supported_irq_index(index);
121 
122 	for (i = 0; i < ARRAY_SIZE(device->msi_eventfds); i++) {
123 		if (device->msi_eventfds[i] < 0)
124 			continue;
125 
126 		VFIO_ASSERT_EQ(close(device->msi_eventfds[i]), 0);
127 		device->msi_eventfds[i] = -1;
128 	}
129 
130 	vfio_pci_irq_set(device, index, 0, 0, NULL);
131 }
132 
133 static void vfio_pci_irq_get(struct vfio_pci_device *device, u32 index,
134 			     struct vfio_irq_info *irq_info)
135 {
136 	irq_info->argsz = sizeof(*irq_info);
137 	irq_info->index = index;
138 
139 	ioctl_assert(device->fd, VFIO_DEVICE_GET_IRQ_INFO, irq_info);
140 }
141 
142 void vfio_pci_dma_map(struct vfio_pci_device *device,
143 		      struct vfio_dma_region *region)
144 {
145 	struct vfio_iommu_type1_dma_map map = {
146 		.argsz = sizeof(map),
147 		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
148 		.vaddr = (u64)region->vaddr,
149 		.iova = region->iova,
150 		.size = region->size,
151 	};
152 
153 	ioctl_assert(device->container_fd, VFIO_IOMMU_MAP_DMA, &map);
154 
155 	list_add(&region->link, &device->dma_regions);
156 }
157 
158 void vfio_pci_dma_unmap(struct vfio_pci_device *device,
159 			struct vfio_dma_region *region)
160 {
161 	struct vfio_iommu_type1_dma_unmap unmap = {
162 		.argsz = sizeof(unmap),
163 		.iova = region->iova,
164 		.size = region->size,
165 	};
166 
167 	ioctl_assert(device->container_fd, VFIO_IOMMU_UNMAP_DMA, &unmap);
168 
169 	list_del(&region->link);
170 }
171 
172 static void vfio_pci_region_get(struct vfio_pci_device *device, int index,
173 				struct vfio_region_info *info)
174 {
175 	memset(info, 0, sizeof(*info));
176 
177 	info->argsz = sizeof(*info);
178 	info->index = index;
179 
180 	ioctl_assert(device->fd, VFIO_DEVICE_GET_REGION_INFO, info);
181 }
182 
183 static void vfio_pci_bar_map(struct vfio_pci_device *device, int index)
184 {
185 	struct vfio_pci_bar *bar = &device->bars[index];
186 	int prot = 0;
187 
188 	VFIO_ASSERT_LT(index, PCI_STD_NUM_BARS);
189 	VFIO_ASSERT_NULL(bar->vaddr);
190 	VFIO_ASSERT_TRUE(bar->info.flags & VFIO_REGION_INFO_FLAG_MMAP);
191 
192 	if (bar->info.flags & VFIO_REGION_INFO_FLAG_READ)
193 		prot |= PROT_READ;
194 	if (bar->info.flags & VFIO_REGION_INFO_FLAG_WRITE)
195 		prot |= PROT_WRITE;
196 
197 	bar->vaddr = mmap(NULL, bar->info.size, prot, MAP_FILE | MAP_SHARED,
198 			  device->fd, bar->info.offset);
199 	VFIO_ASSERT_NE(bar->vaddr, MAP_FAILED);
200 }
201 
202 static void vfio_pci_bar_unmap(struct vfio_pci_device *device, int index)
203 {
204 	struct vfio_pci_bar *bar = &device->bars[index];
205 
206 	VFIO_ASSERT_LT(index, PCI_STD_NUM_BARS);
207 	VFIO_ASSERT_NOT_NULL(bar->vaddr);
208 
209 	VFIO_ASSERT_EQ(munmap(bar->vaddr, bar->info.size), 0);
210 	bar->vaddr = NULL;
211 }
212 
213 static void vfio_pci_bar_unmap_all(struct vfio_pci_device *device)
214 {
215 	int i;
216 
217 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
218 		if (device->bars[i].vaddr)
219 			vfio_pci_bar_unmap(device, i);
220 	}
221 }
222 
223 void vfio_pci_config_access(struct vfio_pci_device *device, bool write,
224 			    size_t config, size_t size, void *data)
225 {
226 	struct vfio_region_info *config_space = &device->config_space;
227 	int ret;
228 
229 	if (write)
230 		ret = pwrite(device->fd, data, size, config_space->offset + config);
231 	else
232 		ret = pread(device->fd, data, size, config_space->offset + config);
233 
234 	VFIO_ASSERT_EQ(ret, size, "Failed to %s PCI config space: 0x%lx\n",
235 		       write ? "write to" : "read from", config);
236 }
237 
238 void vfio_pci_device_reset(struct vfio_pci_device *device)
239 {
240 	ioctl_assert(device->fd, VFIO_DEVICE_RESET, NULL);
241 }
242 
243 static unsigned int vfio_pci_get_group_from_dev(const char *bdf)
244 {
245 	char dev_iommu_group_path[PATH_MAX] = {0};
246 	char sysfs_path[PATH_MAX] = {0};
247 	unsigned int group;
248 	int ret;
249 
250 	snprintf(sysfs_path, PATH_MAX, "%s/%s/iommu_group", PCI_SYSFS_PATH, bdf);
251 
252 	ret = readlink(sysfs_path, dev_iommu_group_path, sizeof(dev_iommu_group_path));
253 	VFIO_ASSERT_NE(ret, -1, "Failed to get the IOMMU group for device: %s\n", bdf);
254 
255 	ret = sscanf(basename(dev_iommu_group_path), "%u", &group);
256 	VFIO_ASSERT_EQ(ret, 1, "Failed to get the IOMMU group for device: %s\n", bdf);
257 
258 	return group;
259 }
260 
261 static void vfio_pci_container_setup(struct vfio_pci_device *device)
262 {
263 	const char *path = device->iommu_mode->container_path;
264 	int version;
265 
266 	device->container_fd = open(path, O_RDWR);
267 	VFIO_ASSERT_GE(device->container_fd, 0, "open(%s) failed\n", path);
268 
269 	version = ioctl(device->container_fd, VFIO_GET_API_VERSION);
270 	VFIO_ASSERT_EQ(version, VFIO_API_VERSION);
271 }
272 
273 static void vfio_pci_group_setup(struct vfio_pci_device *device, const char *bdf)
274 {
275 	struct vfio_group_status group_status = {
276 		.argsz = sizeof(group_status),
277 	};
278 	char group_path[32];
279 	int group;
280 
281 	group = vfio_pci_get_group_from_dev(bdf);
282 	snprintf(group_path, sizeof(group_path), "/dev/vfio/%d", group);
283 
284 	device->group_fd = open(group_path, O_RDWR);
285 	VFIO_ASSERT_GE(device->group_fd, 0, "open(%s) failed\n", group_path);
286 
287 	ioctl_assert(device->group_fd, VFIO_GROUP_GET_STATUS, &group_status);
288 	VFIO_ASSERT_TRUE(group_status.flags & VFIO_GROUP_FLAGS_VIABLE);
289 
290 	ioctl_assert(device->group_fd, VFIO_GROUP_SET_CONTAINER, &device->container_fd);
291 }
292 
293 static void vfio_pci_iommu_setup(struct vfio_pci_device *device)
294 {
295 	unsigned long iommu_type = device->iommu_mode->iommu_type;
296 	int ret;
297 
298 	INIT_LIST_HEAD(&device->dma_regions);
299 
300 	ret = ioctl(device->container_fd, VFIO_CHECK_EXTENSION, iommu_type);
301 	VFIO_ASSERT_GT(ret, 0, "VFIO IOMMU type %lu not supported\n", iommu_type);
302 
303 	ioctl_assert(device->container_fd, VFIO_SET_IOMMU, (void *)iommu_type);
304 }
305 
306 static void vfio_pci_device_setup(struct vfio_pci_device *device, const char *bdf)
307 {
308 	int i;
309 
310 	device->fd = ioctl(device->group_fd, VFIO_GROUP_GET_DEVICE_FD, bdf);
311 	VFIO_ASSERT_GE(device->fd, 0);
312 
313 	device->info.argsz = sizeof(device->info);
314 	ioctl_assert(device->fd, VFIO_DEVICE_GET_INFO, &device->info);
315 
316 	vfio_pci_region_get(device, VFIO_PCI_CONFIG_REGION_INDEX, &device->config_space);
317 
318 	/* Sanity check VFIO does not advertise mmap for config space */
319 	VFIO_ASSERT_TRUE(!(device->config_space.flags & VFIO_REGION_INFO_FLAG_MMAP),
320 			 "PCI config space should not support mmap()\n");
321 
322 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
323 		struct vfio_pci_bar *bar = device->bars + i;
324 
325 		vfio_pci_region_get(device, i, &bar->info);
326 		if (bar->info.flags & VFIO_REGION_INFO_FLAG_MMAP)
327 			vfio_pci_bar_map(device, i);
328 	}
329 
330 	vfio_pci_irq_get(device, VFIO_PCI_MSI_IRQ_INDEX, &device->msi_info);
331 	vfio_pci_irq_get(device, VFIO_PCI_MSIX_IRQ_INDEX, &device->msix_info);
332 
333 	for (i = 0; i < ARRAY_SIZE(device->msi_eventfds); i++)
334 		device->msi_eventfds[i] = -1;
335 }
336 
337 const char *vfio_pci_get_cdev_path(const char *bdf)
338 {
339 	char dir_path[PATH_MAX];
340 	struct dirent *entry;
341 	char *cdev_path;
342 	DIR *dir;
343 
344 	cdev_path = calloc(PATH_MAX, 1);
345 	VFIO_ASSERT_NOT_NULL(cdev_path);
346 
347 	snprintf(dir_path, sizeof(dir_path), "/sys/bus/pci/devices/%s/vfio-dev/", bdf);
348 
349 	dir = opendir(dir_path);
350 	VFIO_ASSERT_NOT_NULL(dir, "Failed to open directory %s\n", dir_path);
351 
352 	while ((entry = readdir(dir)) != NULL) {
353 		/* Find the file that starts with "vfio" */
354 		if (strncmp("vfio", entry->d_name, 4))
355 			continue;
356 
357 		snprintf(cdev_path, PATH_MAX, "/dev/vfio/devices/%s", entry->d_name);
358 		break;
359 	}
360 
361 	VFIO_ASSERT_NE(cdev_path[0], 0, "Failed to find vfio cdev file.\n");
362 	VFIO_ASSERT_EQ(closedir(dir), 0);
363 
364 	return cdev_path;
365 }
366 
367 static const struct vfio_iommu_mode iommu_modes[] = {
368 	{
369 		.name = "vfio_type1_iommu",
370 		.container_path = "/dev/vfio/vfio",
371 		.iommu_type = VFIO_TYPE1_IOMMU,
372 	},
373 };
374 
375 const char *default_iommu_mode = "vfio_type1_iommu";
376 
377 static const struct vfio_iommu_mode *lookup_iommu_mode(const char *iommu_mode)
378 {
379 	int i;
380 
381 	if (!iommu_mode)
382 		iommu_mode = default_iommu_mode;
383 
384 	for (i = 0; i < ARRAY_SIZE(iommu_modes); i++) {
385 		if (strcmp(iommu_mode, iommu_modes[i].name))
386 			continue;
387 
388 		return &iommu_modes[i];
389 	}
390 
391 	VFIO_FAIL("Unrecognized IOMMU mode: %s\n", iommu_mode);
392 }
393 
394 struct vfio_pci_device *vfio_pci_device_init(const char *bdf, const char *iommu_mode)
395 {
396 	struct vfio_pci_device *device;
397 
398 	device = calloc(1, sizeof(*device));
399 	VFIO_ASSERT_NOT_NULL(device);
400 
401 	device->iommu_mode = lookup_iommu_mode(iommu_mode);
402 
403 	vfio_pci_container_setup(device);
404 	vfio_pci_group_setup(device, bdf);
405 	vfio_pci_iommu_setup(device);
406 	vfio_pci_device_setup(device, bdf);
407 
408 	vfio_pci_driver_probe(device);
409 
410 	return device;
411 }
412 
413 void vfio_pci_device_cleanup(struct vfio_pci_device *device)
414 {
415 	int i;
416 
417 	if (device->driver.initialized)
418 		vfio_pci_driver_remove(device);
419 
420 	vfio_pci_bar_unmap_all(device);
421 
422 	VFIO_ASSERT_EQ(close(device->fd), 0);
423 
424 	for (i = 0; i < ARRAY_SIZE(device->msi_eventfds); i++) {
425 		if (device->msi_eventfds[i] < 0)
426 			continue;
427 
428 		VFIO_ASSERT_EQ(close(device->msi_eventfds[i]), 0);
429 	}
430 
431 	VFIO_ASSERT_EQ(close(device->group_fd), 0);
432 	VFIO_ASSERT_EQ(close(device->container_fd), 0);
433 
434 	free(device);
435 }
436 
437 static bool is_bdf(const char *str)
438 {
439 	unsigned int s, b, d, f;
440 	int length, count;
441 
442 	count = sscanf(str, "%4x:%2x:%2x.%2x%n", &s, &b, &d, &f, &length);
443 	return count == 4 && length == strlen(str);
444 }
445 
446 const char *vfio_selftests_get_bdf(int *argc, char *argv[])
447 {
448 	char *bdf;
449 
450 	if (*argc > 1 && is_bdf(argv[*argc - 1]))
451 		return argv[--(*argc)];
452 
453 	bdf = getenv("VFIO_SELFTESTS_BDF");
454 	if (bdf) {
455 		VFIO_ASSERT_TRUE(is_bdf(bdf), "Invalid BDF: %s\n", bdf);
456 		return bdf;
457 	}
458 
459 	fprintf(stderr, "Unable to determine which device to use, skipping test.\n");
460 	fprintf(stderr, "\n");
461 	fprintf(stderr, "To pass the device address via environment variable:\n");
462 	fprintf(stderr, "\n");
463 	fprintf(stderr, "    export VFIO_SELFTESTS_BDF=segment:bus:device.function\n");
464 	fprintf(stderr, "    %s [options]\n", argv[0]);
465 	fprintf(stderr, "\n");
466 	fprintf(stderr, "To pass the device address via argv:\n");
467 	fprintf(stderr, "\n");
468 	fprintf(stderr, "    %s [options] segment:bus:device.function\n", argv[0]);
469 	fprintf(stderr, "\n");
470 	exit(KSFT_SKIP);
471 }
472