1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <dirent.h>
3 #include <fcntl.h>
4 #include <libgen.h>
5 #include <stdint.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <unistd.h>
9
10 #include <sys/eventfd.h>
11 #include <sys/ioctl.h>
12 #include <sys/mman.h>
13
14 #include <uapi/linux/types.h>
15 #include <linux/iommufd.h>
16 #include <linux/limits.h>
17 #include <linux/mman.h>
18 #include <linux/overflow.h>
19 #include <linux/types.h>
20 #include <linux/vfio.h>
21
22 #include "kselftest.h"
23 #include <libvfio.h>
24
25 #define PCI_SYSFS_PATH "/sys/bus/pci/devices"
26
vfio_pci_irq_set(struct vfio_pci_device * device,u32 index,u32 vector,u32 count,int * fds)27 static void vfio_pci_irq_set(struct vfio_pci_device *device,
28 u32 index, u32 vector, u32 count, int *fds)
29 {
30 u8 buf[sizeof(struct vfio_irq_set) + sizeof(int) * count] = {};
31 struct vfio_irq_set *irq = (void *)&buf;
32 int *irq_fds = (void *)&irq->data;
33
34 irq->argsz = sizeof(buf);
35 irq->flags = VFIO_IRQ_SET_ACTION_TRIGGER;
36 irq->index = index;
37 irq->start = vector;
38 irq->count = count;
39
40 if (count) {
41 irq->flags |= VFIO_IRQ_SET_DATA_EVENTFD;
42 memcpy(irq_fds, fds, sizeof(int) * count);
43 } else {
44 irq->flags |= VFIO_IRQ_SET_DATA_NONE;
45 }
46
47 ioctl_assert(device->fd, VFIO_DEVICE_SET_IRQS, irq);
48 }
49
vfio_pci_irq_trigger(struct vfio_pci_device * device,u32 index,u32 vector)50 void vfio_pci_irq_trigger(struct vfio_pci_device *device, u32 index, u32 vector)
51 {
52 struct vfio_irq_set irq = {
53 .argsz = sizeof(irq),
54 .flags = VFIO_IRQ_SET_ACTION_TRIGGER | VFIO_IRQ_SET_DATA_NONE,
55 .index = index,
56 .start = vector,
57 .count = 1,
58 };
59
60 ioctl_assert(device->fd, VFIO_DEVICE_SET_IRQS, &irq);
61 }
62
check_supported_irq_index(u32 index)63 static void check_supported_irq_index(u32 index)
64 {
65 /* VFIO selftests only supports MSI and MSI-x for now. */
66 VFIO_ASSERT_TRUE(index == VFIO_PCI_MSI_IRQ_INDEX ||
67 index == VFIO_PCI_MSIX_IRQ_INDEX,
68 "Unsupported IRQ index: %u\n", index);
69 }
70
vfio_pci_irq_enable(struct vfio_pci_device * device,u32 index,u32 vector,int count)71 void vfio_pci_irq_enable(struct vfio_pci_device *device, u32 index, u32 vector,
72 int count)
73 {
74 int i;
75
76 check_supported_irq_index(index);
77
78 for (i = vector; i < vector + count; i++) {
79 VFIO_ASSERT_LT(device->msi_eventfds[i], 0);
80 device->msi_eventfds[i] = eventfd(0, 0);
81 VFIO_ASSERT_GE(device->msi_eventfds[i], 0);
82 }
83
84 vfio_pci_irq_set(device, index, vector, count, device->msi_eventfds + vector);
85 }
86
vfio_pci_irq_disable(struct vfio_pci_device * device,u32 index)87 void vfio_pci_irq_disable(struct vfio_pci_device *device, u32 index)
88 {
89 int i;
90
91 check_supported_irq_index(index);
92
93 for (i = 0; i < ARRAY_SIZE(device->msi_eventfds); i++) {
94 if (device->msi_eventfds[i] < 0)
95 continue;
96
97 VFIO_ASSERT_EQ(close(device->msi_eventfds[i]), 0);
98 device->msi_eventfds[i] = -1;
99 }
100
101 vfio_pci_irq_set(device, index, 0, 0, NULL);
102 }
103
vfio_pci_irq_get(struct vfio_pci_device * device,u32 index,struct vfio_irq_info * irq_info)104 static void vfio_pci_irq_get(struct vfio_pci_device *device, u32 index,
105 struct vfio_irq_info *irq_info)
106 {
107 irq_info->argsz = sizeof(*irq_info);
108 irq_info->index = index;
109
110 ioctl_assert(device->fd, VFIO_DEVICE_GET_IRQ_INFO, irq_info);
111 }
112
vfio_pci_region_get(struct vfio_pci_device * device,int index,struct vfio_region_info * info)113 static void vfio_pci_region_get(struct vfio_pci_device *device, int index,
114 struct vfio_region_info *info)
115 {
116 memset(info, 0, sizeof(*info));
117
118 info->argsz = sizeof(*info);
119 info->index = index;
120
121 ioctl_assert(device->fd, VFIO_DEVICE_GET_REGION_INFO, info);
122 }
123
vfio_pci_bar_map(struct vfio_pci_device * device,int index)124 static void vfio_pci_bar_map(struct vfio_pci_device *device, int index)
125 {
126 struct vfio_pci_bar *bar = &device->bars[index];
127 int prot = 0;
128
129 VFIO_ASSERT_LT(index, PCI_STD_NUM_BARS);
130 VFIO_ASSERT_NULL(bar->vaddr);
131 VFIO_ASSERT_TRUE(bar->info.flags & VFIO_REGION_INFO_FLAG_MMAP);
132
133 if (bar->info.flags & VFIO_REGION_INFO_FLAG_READ)
134 prot |= PROT_READ;
135 if (bar->info.flags & VFIO_REGION_INFO_FLAG_WRITE)
136 prot |= PROT_WRITE;
137
138 bar->vaddr = mmap(NULL, bar->info.size, prot, MAP_FILE | MAP_SHARED,
139 device->fd, bar->info.offset);
140 VFIO_ASSERT_NE(bar->vaddr, MAP_FAILED);
141 }
142
vfio_pci_bar_unmap(struct vfio_pci_device * device,int index)143 static void vfio_pci_bar_unmap(struct vfio_pci_device *device, int index)
144 {
145 struct vfio_pci_bar *bar = &device->bars[index];
146
147 VFIO_ASSERT_LT(index, PCI_STD_NUM_BARS);
148 VFIO_ASSERT_NOT_NULL(bar->vaddr);
149
150 VFIO_ASSERT_EQ(munmap(bar->vaddr, bar->info.size), 0);
151 bar->vaddr = NULL;
152 }
153
vfio_pci_bar_unmap_all(struct vfio_pci_device * device)154 static void vfio_pci_bar_unmap_all(struct vfio_pci_device *device)
155 {
156 int i;
157
158 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
159 if (device->bars[i].vaddr)
160 vfio_pci_bar_unmap(device, i);
161 }
162 }
163
vfio_pci_config_access(struct vfio_pci_device * device,bool write,size_t config,size_t size,void * data)164 void vfio_pci_config_access(struct vfio_pci_device *device, bool write,
165 size_t config, size_t size, void *data)
166 {
167 struct vfio_region_info *config_space = &device->config_space;
168 int ret;
169
170 if (write)
171 ret = pwrite(device->fd, data, size, config_space->offset + config);
172 else
173 ret = pread(device->fd, data, size, config_space->offset + config);
174
175 VFIO_ASSERT_EQ(ret, size, "Failed to %s PCI config space: 0x%lx\n",
176 write ? "write to" : "read from", config);
177 }
178
vfio_pci_device_reset(struct vfio_pci_device * device)179 void vfio_pci_device_reset(struct vfio_pci_device *device)
180 {
181 ioctl_assert(device->fd, VFIO_DEVICE_RESET, NULL);
182 }
183
vfio_pci_get_group_from_dev(const char * bdf)184 static unsigned int vfio_pci_get_group_from_dev(const char *bdf)
185 {
186 char dev_iommu_group_path[PATH_MAX] = {0};
187 char sysfs_path[PATH_MAX] = {0};
188 unsigned int group;
189 int ret;
190
191 snprintf(sysfs_path, PATH_MAX, "%s/%s/iommu_group", PCI_SYSFS_PATH, bdf);
192
193 ret = readlink(sysfs_path, dev_iommu_group_path, sizeof(dev_iommu_group_path));
194 VFIO_ASSERT_NE(ret, -1, "Failed to get the IOMMU group for device: %s\n", bdf);
195
196 ret = sscanf(basename(dev_iommu_group_path), "%u", &group);
197 VFIO_ASSERT_EQ(ret, 1, "Failed to get the IOMMU group for device: %s\n", bdf);
198
199 return group;
200 }
201
vfio_pci_group_setup(struct vfio_pci_device * device,const char * bdf)202 static void vfio_pci_group_setup(struct vfio_pci_device *device, const char *bdf)
203 {
204 struct vfio_group_status group_status = {
205 .argsz = sizeof(group_status),
206 };
207 char group_path[32];
208 int group;
209
210 group = vfio_pci_get_group_from_dev(bdf);
211 snprintf(group_path, sizeof(group_path), "/dev/vfio/%d", group);
212
213 device->group_fd = open(group_path, O_RDWR);
214 VFIO_ASSERT_GE(device->group_fd, 0, "open(%s) failed\n", group_path);
215
216 ioctl_assert(device->group_fd, VFIO_GROUP_GET_STATUS, &group_status);
217 VFIO_ASSERT_TRUE(group_status.flags & VFIO_GROUP_FLAGS_VIABLE);
218
219 ioctl_assert(device->group_fd, VFIO_GROUP_SET_CONTAINER, &device->iommu->container_fd);
220 }
221
vfio_pci_container_setup(struct vfio_pci_device * device,const char * bdf)222 static void vfio_pci_container_setup(struct vfio_pci_device *device, const char *bdf)
223 {
224 struct iommu *iommu = device->iommu;
225 unsigned long iommu_type = iommu->mode->iommu_type;
226 int ret;
227
228 vfio_pci_group_setup(device, bdf);
229
230 ret = ioctl(iommu->container_fd, VFIO_CHECK_EXTENSION, iommu_type);
231 VFIO_ASSERT_GT(ret, 0, "VFIO IOMMU type %lu not supported\n", iommu_type);
232
233 /*
234 * Allow multiple threads to race to set the IOMMU type on the
235 * container. The first will succeed and the rest should fail
236 * because the IOMMU type is already set.
237 */
238 (void)ioctl(iommu->container_fd, VFIO_SET_IOMMU, (void *)iommu_type);
239
240 device->fd = ioctl(device->group_fd, VFIO_GROUP_GET_DEVICE_FD, bdf);
241 VFIO_ASSERT_GE(device->fd, 0);
242 }
243
vfio_pci_device_setup(struct vfio_pci_device * device)244 static void vfio_pci_device_setup(struct vfio_pci_device *device)
245 {
246 int i;
247
248 device->info.argsz = sizeof(device->info);
249 ioctl_assert(device->fd, VFIO_DEVICE_GET_INFO, &device->info);
250
251 vfio_pci_region_get(device, VFIO_PCI_CONFIG_REGION_INDEX, &device->config_space);
252
253 /* Sanity check VFIO does not advertise mmap for config space */
254 VFIO_ASSERT_TRUE(!(device->config_space.flags & VFIO_REGION_INFO_FLAG_MMAP),
255 "PCI config space should not support mmap()\n");
256
257 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
258 struct vfio_pci_bar *bar = device->bars + i;
259
260 vfio_pci_region_get(device, i, &bar->info);
261 if (bar->info.flags & VFIO_REGION_INFO_FLAG_MMAP)
262 vfio_pci_bar_map(device, i);
263 }
264
265 vfio_pci_irq_get(device, VFIO_PCI_MSI_IRQ_INDEX, &device->msi_info);
266 vfio_pci_irq_get(device, VFIO_PCI_MSIX_IRQ_INDEX, &device->msix_info);
267
268 for (i = 0; i < ARRAY_SIZE(device->msi_eventfds); i++)
269 device->msi_eventfds[i] = -1;
270 }
271
vfio_pci_get_cdev_path(const char * bdf)272 const char *vfio_pci_get_cdev_path(const char *bdf)
273 {
274 char dir_path[PATH_MAX];
275 struct dirent *entry;
276 char *cdev_path;
277 DIR *dir;
278
279 cdev_path = calloc(PATH_MAX, 1);
280 VFIO_ASSERT_NOT_NULL(cdev_path);
281
282 snprintf(dir_path, sizeof(dir_path), "/sys/bus/pci/devices/%s/vfio-dev/", bdf);
283
284 dir = opendir(dir_path);
285 VFIO_ASSERT_NOT_NULL(dir, "Failed to open directory %s\n", dir_path);
286
287 while ((entry = readdir(dir)) != NULL) {
288 /* Find the file that starts with "vfio" */
289 if (strncmp("vfio", entry->d_name, 4))
290 continue;
291
292 snprintf(cdev_path, PATH_MAX, "/dev/vfio/devices/%s", entry->d_name);
293 break;
294 }
295
296 VFIO_ASSERT_NE(cdev_path[0], 0, "Failed to find vfio cdev file.\n");
297 VFIO_ASSERT_EQ(closedir(dir), 0);
298
299 return cdev_path;
300 }
301
vfio_device_bind_iommufd(int device_fd,int iommufd)302 static void vfio_device_bind_iommufd(int device_fd, int iommufd)
303 {
304 struct vfio_device_bind_iommufd args = {
305 .argsz = sizeof(args),
306 .iommufd = iommufd,
307 };
308
309 ioctl_assert(device_fd, VFIO_DEVICE_BIND_IOMMUFD, &args);
310 }
311
vfio_device_attach_iommufd_pt(int device_fd,u32 pt_id)312 static void vfio_device_attach_iommufd_pt(int device_fd, u32 pt_id)
313 {
314 struct vfio_device_attach_iommufd_pt args = {
315 .argsz = sizeof(args),
316 .pt_id = pt_id,
317 };
318
319 ioctl_assert(device_fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &args);
320 }
321
vfio_pci_iommufd_setup(struct vfio_pci_device * device,const char * bdf)322 static void vfio_pci_iommufd_setup(struct vfio_pci_device *device, const char *bdf)
323 {
324 const char *cdev_path = vfio_pci_get_cdev_path(bdf);
325
326 device->fd = open(cdev_path, O_RDWR);
327 VFIO_ASSERT_GE(device->fd, 0);
328 free((void *)cdev_path);
329
330 vfio_device_bind_iommufd(device->fd, device->iommu->iommufd);
331 vfio_device_attach_iommufd_pt(device->fd, device->iommu->ioas_id);
332 }
333
vfio_pci_device_init(const char * bdf,struct iommu * iommu)334 struct vfio_pci_device *vfio_pci_device_init(const char *bdf, struct iommu *iommu)
335 {
336 struct vfio_pci_device *device;
337
338 device = calloc(1, sizeof(*device));
339 VFIO_ASSERT_NOT_NULL(device);
340
341 VFIO_ASSERT_NOT_NULL(iommu);
342 device->iommu = iommu;
343 device->bdf = bdf;
344
345 if (iommu->mode->container_path)
346 vfio_pci_container_setup(device, bdf);
347 else
348 vfio_pci_iommufd_setup(device, bdf);
349
350 vfio_pci_device_setup(device);
351 vfio_pci_driver_probe(device);
352
353 return device;
354 }
355
vfio_pci_device_cleanup(struct vfio_pci_device * device)356 void vfio_pci_device_cleanup(struct vfio_pci_device *device)
357 {
358 int i;
359
360 if (device->driver.initialized)
361 vfio_pci_driver_remove(device);
362
363 vfio_pci_bar_unmap_all(device);
364
365 VFIO_ASSERT_EQ(close(device->fd), 0);
366
367 for (i = 0; i < ARRAY_SIZE(device->msi_eventfds); i++) {
368 if (device->msi_eventfds[i] < 0)
369 continue;
370
371 VFIO_ASSERT_EQ(close(device->msi_eventfds[i]), 0);
372 }
373
374 if (device->group_fd)
375 VFIO_ASSERT_EQ(close(device->group_fd), 0);
376
377 free(device);
378 }
379