xref: /linux/tools/testing/selftests/vfio/lib/vfio_pci_device.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <dirent.h>
3 #include <fcntl.h>
4 #include <libgen.h>
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <unistd.h>
9 
10 #include <sys/eventfd.h>
11 #include <sys/ioctl.h>
12 #include <sys/mman.h>
13 
14 #include <linux/align.h>
15 #include <linux/iommufd.h>
16 #include <linux/kernel.h>
17 #include <linux/limits.h>
18 #include <linux/log2.h>
19 #include <linux/mman.h>
20 #include <linux/overflow.h>
21 #include <linux/sizes.h>
22 #include <linux/types.h>
23 #include <linux/vfio.h>
24 
25 #include "kselftest.h"
26 #include <libvfio.h>
27 
28 #define PCI_SYSFS_PATH	"/sys/bus/pci/devices"
29 
30 static void vfio_pci_irq_set(struct vfio_pci_device *device,
31 			     u32 index, u32 vector, u32 count, int *fds)
32 {
33 	u8 buf[sizeof(struct vfio_irq_set) + sizeof(int) * count];
34 	struct vfio_irq_set *irq = (void *)&buf;
35 	int *irq_fds = (void *)&irq->data;
36 
37 	memset(buf, 0, sizeof(buf));
38 
39 	irq->argsz = sizeof(buf);
40 	irq->flags = VFIO_IRQ_SET_ACTION_TRIGGER;
41 	irq->index = index;
42 	irq->start = vector;
43 	irq->count = count;
44 
45 	if (count) {
46 		irq->flags |= VFIO_IRQ_SET_DATA_EVENTFD;
47 		memcpy(irq_fds, fds, sizeof(int) * count);
48 	} else {
49 		irq->flags |= VFIO_IRQ_SET_DATA_NONE;
50 	}
51 
52 	ioctl_assert(device->fd, VFIO_DEVICE_SET_IRQS, irq);
53 }
54 
55 void vfio_pci_irq_trigger(struct vfio_pci_device *device, u32 index, u32 vector)
56 {
57 	struct vfio_irq_set irq = {
58 		.argsz = sizeof(irq),
59 		.flags = VFIO_IRQ_SET_ACTION_TRIGGER | VFIO_IRQ_SET_DATA_NONE,
60 		.index = index,
61 		.start = vector,
62 		.count = 1,
63 	};
64 
65 	ioctl_assert(device->fd, VFIO_DEVICE_SET_IRQS, &irq);
66 }
67 
68 static void check_supported_irq_index(u32 index)
69 {
70 	/* VFIO selftests only supports MSI and MSI-x for now. */
71 	VFIO_ASSERT_TRUE(index == VFIO_PCI_MSI_IRQ_INDEX ||
72 			 index == VFIO_PCI_MSIX_IRQ_INDEX,
73 			 "Unsupported IRQ index: %u\n", index);
74 }
75 
76 void vfio_pci_irq_enable(struct vfio_pci_device *device, u32 index, u32 vector,
77 			 int count)
78 {
79 	int i;
80 
81 	check_supported_irq_index(index);
82 
83 	for (i = vector; i < vector + count; i++) {
84 		VFIO_ASSERT_LT(device->msi_eventfds[i], 0);
85 		device->msi_eventfds[i] = eventfd(0, 0);
86 		VFIO_ASSERT_GE(device->msi_eventfds[i], 0);
87 	}
88 
89 	vfio_pci_irq_set(device, index, vector, count, device->msi_eventfds + vector);
90 }
91 
92 void vfio_pci_irq_disable(struct vfio_pci_device *device, u32 index)
93 {
94 	int i;
95 
96 	check_supported_irq_index(index);
97 
98 	for (i = 0; i < ARRAY_SIZE(device->msi_eventfds); i++) {
99 		if (device->msi_eventfds[i] < 0)
100 			continue;
101 
102 		VFIO_ASSERT_EQ(close(device->msi_eventfds[i]), 0);
103 		device->msi_eventfds[i] = -1;
104 	}
105 
106 	vfio_pci_irq_set(device, index, 0, 0, NULL);
107 }
108 
109 static void vfio_pci_irq_get(struct vfio_pci_device *device, u32 index,
110 			     struct vfio_irq_info *irq_info)
111 {
112 	irq_info->argsz = sizeof(*irq_info);
113 	irq_info->index = index;
114 
115 	ioctl_assert(device->fd, VFIO_DEVICE_GET_IRQ_INFO, irq_info);
116 }
117 
118 static void vfio_pci_region_get(struct vfio_pci_device *device, int index,
119 				struct vfio_region_info *info)
120 {
121 	memset(info, 0, sizeof(*info));
122 
123 	info->argsz = sizeof(*info);
124 	info->index = index;
125 
126 	ioctl_assert(device->fd, VFIO_DEVICE_GET_REGION_INFO, info);
127 }
128 
129 static void vfio_pci_bar_map(struct vfio_pci_device *device, int index)
130 {
131 	struct vfio_pci_bar *bar = &device->bars[index];
132 	size_t align, size;
133 	int prot = 0;
134 	void *vaddr;
135 
136 	VFIO_ASSERT_LT(index, PCI_STD_NUM_BARS);
137 	VFIO_ASSERT_NULL(bar->vaddr);
138 	VFIO_ASSERT_TRUE(bar->info.flags & VFIO_REGION_INFO_FLAG_MMAP);
139 	VFIO_ASSERT_TRUE(is_power_of_2(bar->info.size));
140 
141 	if (bar->info.flags & VFIO_REGION_INFO_FLAG_READ)
142 		prot |= PROT_READ;
143 	if (bar->info.flags & VFIO_REGION_INFO_FLAG_WRITE)
144 		prot |= PROT_WRITE;
145 
146 	size = bar->info.size;
147 
148 	/*
149 	 * Align BAR mmaps to improve page fault granularity during potential
150 	 * subsequent IOMMU mapping of these BAR vaddr. 1G for x86 is the
151 	 * largest hugepage size across any architecture, so no benefit from
152 	 * larger alignment. BARs smaller than 1G will be aligned by their
153 	 * power-of-two size, guaranteeing sufficient alignment for smaller
154 	 * hugepages, if present.
155 	 */
156 	align = min_t(size_t, size, SZ_1G);
157 
158 	vaddr = mmap_reserve(size, align, 0);
159 	bar->vaddr = mmap(vaddr, size, prot, MAP_SHARED | MAP_FIXED,
160 			  device->fd, bar->info.offset);
161 	VFIO_ASSERT_NE(bar->vaddr, MAP_FAILED);
162 
163 	madvise(bar->vaddr, size, MADV_HUGEPAGE);
164 }
165 
166 static void vfio_pci_bar_unmap(struct vfio_pci_device *device, int index)
167 {
168 	struct vfio_pci_bar *bar = &device->bars[index];
169 
170 	VFIO_ASSERT_LT(index, PCI_STD_NUM_BARS);
171 	VFIO_ASSERT_NOT_NULL(bar->vaddr);
172 
173 	VFIO_ASSERT_EQ(munmap(bar->vaddr, bar->info.size), 0);
174 	bar->vaddr = NULL;
175 }
176 
177 static void vfio_pci_bar_unmap_all(struct vfio_pci_device *device)
178 {
179 	int i;
180 
181 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
182 		if (device->bars[i].vaddr)
183 			vfio_pci_bar_unmap(device, i);
184 	}
185 }
186 
187 void vfio_pci_config_access(struct vfio_pci_device *device, bool write,
188 			    size_t config, size_t size, void *data)
189 {
190 	struct vfio_region_info *config_space = &device->config_space;
191 	int ret;
192 
193 	if (write)
194 		ret = pwrite(device->fd, data, size, config_space->offset + config);
195 	else
196 		ret = pread(device->fd, data, size, config_space->offset + config);
197 
198 	VFIO_ASSERT_EQ(ret, size, "Failed to %s PCI config space: 0x%lx\n",
199 		       write ? "write to" : "read from", config);
200 }
201 
202 void vfio_pci_device_reset(struct vfio_pci_device *device)
203 {
204 	ioctl_assert(device->fd, VFIO_DEVICE_RESET, NULL);
205 }
206 
207 static unsigned int vfio_pci_get_group_from_dev(const char *bdf)
208 {
209 	char dev_iommu_group_path[PATH_MAX] = {0};
210 	char sysfs_path[PATH_MAX] = {0};
211 	unsigned int group;
212 	int ret;
213 
214 	snprintf(sysfs_path, PATH_MAX, "%s/%s/iommu_group", PCI_SYSFS_PATH, bdf);
215 
216 	ret = readlink(sysfs_path, dev_iommu_group_path, sizeof(dev_iommu_group_path));
217 	VFIO_ASSERT_NE(ret, -1, "Failed to get the IOMMU group for device: %s\n", bdf);
218 
219 	ret = sscanf(basename(dev_iommu_group_path), "%u", &group);
220 	VFIO_ASSERT_EQ(ret, 1, "Failed to get the IOMMU group for device: %s\n", bdf);
221 
222 	return group;
223 }
224 
225 static void vfio_pci_group_setup(struct vfio_pci_device *device, const char *bdf)
226 {
227 	struct vfio_group_status group_status = {
228 		.argsz = sizeof(group_status),
229 	};
230 	char group_path[32];
231 	int group;
232 
233 	group = vfio_pci_get_group_from_dev(bdf);
234 	snprintf(group_path, sizeof(group_path), "/dev/vfio/%d", group);
235 
236 	device->group_fd = open(group_path, O_RDWR);
237 	VFIO_ASSERT_GE(device->group_fd, 0, "open(%s) failed\n", group_path);
238 
239 	ioctl_assert(device->group_fd, VFIO_GROUP_GET_STATUS, &group_status);
240 	VFIO_ASSERT_TRUE(group_status.flags & VFIO_GROUP_FLAGS_VIABLE);
241 
242 	ioctl_assert(device->group_fd, VFIO_GROUP_SET_CONTAINER, &device->iommu->container_fd);
243 }
244 
245 static void vfio_pci_container_setup(struct vfio_pci_device *device, const char *bdf)
246 {
247 	struct iommu *iommu = device->iommu;
248 	unsigned long iommu_type = iommu->mode->iommu_type;
249 	int ret;
250 
251 	vfio_pci_group_setup(device, bdf);
252 
253 	ret = ioctl(iommu->container_fd, VFIO_CHECK_EXTENSION, iommu_type);
254 	VFIO_ASSERT_GT(ret, 0, "VFIO IOMMU type %lu not supported\n", iommu_type);
255 
256 	/*
257 	 * Allow multiple threads to race to set the IOMMU type on the
258 	 * container. The first will succeed and the rest should fail
259 	 * because the IOMMU type is already set.
260 	 */
261 	(void)ioctl(iommu->container_fd, VFIO_SET_IOMMU, (void *)iommu_type);
262 
263 	device->fd = ioctl(device->group_fd, VFIO_GROUP_GET_DEVICE_FD, bdf);
264 	VFIO_ASSERT_GE(device->fd, 0);
265 }
266 
267 static void vfio_pci_device_setup(struct vfio_pci_device *device)
268 {
269 	int i;
270 
271 	device->info.argsz = sizeof(device->info);
272 	ioctl_assert(device->fd, VFIO_DEVICE_GET_INFO, &device->info);
273 
274 	vfio_pci_region_get(device, VFIO_PCI_CONFIG_REGION_INDEX, &device->config_space);
275 
276 	/* Sanity check VFIO does not advertise mmap for config space */
277 	VFIO_ASSERT_TRUE(!(device->config_space.flags & VFIO_REGION_INFO_FLAG_MMAP),
278 			 "PCI config space should not support mmap()\n");
279 
280 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
281 		struct vfio_pci_bar *bar = device->bars + i;
282 
283 		vfio_pci_region_get(device, i, &bar->info);
284 		if (bar->info.flags & VFIO_REGION_INFO_FLAG_MMAP)
285 			vfio_pci_bar_map(device, i);
286 	}
287 
288 	vfio_pci_irq_get(device, VFIO_PCI_MSI_IRQ_INDEX, &device->msi_info);
289 	vfio_pci_irq_get(device, VFIO_PCI_MSIX_IRQ_INDEX, &device->msix_info);
290 
291 	for (i = 0; i < ARRAY_SIZE(device->msi_eventfds); i++)
292 		device->msi_eventfds[i] = -1;
293 }
294 
295 const char *vfio_pci_get_cdev_path(const char *bdf)
296 {
297 	char dir_path[PATH_MAX];
298 	struct dirent *entry;
299 	char *cdev_path;
300 	DIR *dir;
301 
302 	cdev_path = calloc(PATH_MAX, 1);
303 	VFIO_ASSERT_NOT_NULL(cdev_path);
304 
305 	snprintf(dir_path, sizeof(dir_path), "/sys/bus/pci/devices/%s/vfio-dev/", bdf);
306 
307 	dir = opendir(dir_path);
308 	VFIO_ASSERT_NOT_NULL(dir, "Failed to open directory %s\n", dir_path);
309 
310 	while ((entry = readdir(dir)) != NULL) {
311 		/* Find the file that starts with "vfio" */
312 		if (strncmp("vfio", entry->d_name, 4))
313 			continue;
314 
315 		snprintf(cdev_path, PATH_MAX, "/dev/vfio/devices/%s", entry->d_name);
316 		break;
317 	}
318 
319 	VFIO_ASSERT_NE(cdev_path[0], 0, "Failed to find vfio cdev file.\n");
320 	VFIO_ASSERT_EQ(closedir(dir), 0);
321 
322 	return cdev_path;
323 }
324 
325 static void vfio_device_bind_iommufd(int device_fd, int iommufd)
326 {
327 	struct vfio_device_bind_iommufd args = {
328 		.argsz = sizeof(args),
329 		.iommufd = iommufd,
330 	};
331 
332 	ioctl_assert(device_fd, VFIO_DEVICE_BIND_IOMMUFD, &args);
333 }
334 
335 static void vfio_device_attach_iommufd_pt(int device_fd, u32 pt_id)
336 {
337 	struct vfio_device_attach_iommufd_pt args = {
338 		.argsz = sizeof(args),
339 		.pt_id = pt_id,
340 	};
341 
342 	ioctl_assert(device_fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &args);
343 }
344 
345 static void vfio_pci_iommufd_setup(struct vfio_pci_device *device, const char *bdf)
346 {
347 	const char *cdev_path = vfio_pci_get_cdev_path(bdf);
348 
349 	device->fd = open(cdev_path, O_RDWR);
350 	VFIO_ASSERT_GE(device->fd, 0);
351 	free((void *)cdev_path);
352 
353 	vfio_device_bind_iommufd(device->fd, device->iommu->iommufd);
354 	vfio_device_attach_iommufd_pt(device->fd, device->iommu->ioas_id);
355 }
356 
357 struct vfio_pci_device *vfio_pci_device_init(const char *bdf, struct iommu *iommu)
358 {
359 	struct vfio_pci_device *device;
360 
361 	device = calloc(1, sizeof(*device));
362 	VFIO_ASSERT_NOT_NULL(device);
363 
364 	VFIO_ASSERT_NOT_NULL(iommu);
365 	device->iommu = iommu;
366 	device->bdf = bdf;
367 
368 	if (iommu->mode->container_path)
369 		vfio_pci_container_setup(device, bdf);
370 	else
371 		vfio_pci_iommufd_setup(device, bdf);
372 
373 	vfio_pci_device_setup(device);
374 	vfio_pci_driver_probe(device);
375 
376 	return device;
377 }
378 
379 void vfio_pci_device_cleanup(struct vfio_pci_device *device)
380 {
381 	int i;
382 
383 	if (device->driver.initialized)
384 		vfio_pci_driver_remove(device);
385 
386 	vfio_pci_bar_unmap_all(device);
387 
388 	VFIO_ASSERT_EQ(close(device->fd), 0);
389 
390 	for (i = 0; i < ARRAY_SIZE(device->msi_eventfds); i++) {
391 		if (device->msi_eventfds[i] < 0)
392 			continue;
393 
394 		VFIO_ASSERT_EQ(close(device->msi_eventfds[i]), 0);
395 	}
396 
397 	if (device->group_fd)
398 		VFIO_ASSERT_EQ(close(device->group_fd), 0);
399 
400 	free(device);
401 }
402