xref: /linux/tools/testing/selftests/vfio/vfio_pci_driver_test.c (revision 6da43bbeb6918164f7287269881a5f861ae09d7e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <sys/ioctl.h>
3 #include <sys/mman.h>
4 
5 #include <linux/sizes.h>
6 #include <linux/vfio.h>
7 
8 #include <vfio_util.h>
9 
10 #include "../kselftest_harness.h"
11 
12 static const char *device_bdf;
13 
14 #define ASSERT_NO_MSI(_eventfd) do {			\
15 	u64 __value;					\
16 							\
17 	ASSERT_EQ(-1, read(_eventfd, &__value, 8));	\
18 	ASSERT_EQ(EAGAIN, errno);			\
19 } while (0)
20 
region_setup(struct vfio_pci_device * device,struct iova_allocator * iova_allocator,struct vfio_dma_region * region,u64 size)21 static void region_setup(struct vfio_pci_device *device,
22 			 struct iova_allocator *iova_allocator,
23 			 struct vfio_dma_region *region, u64 size)
24 {
25 	const int flags = MAP_SHARED | MAP_ANONYMOUS;
26 	const int prot = PROT_READ | PROT_WRITE;
27 	void *vaddr;
28 
29 	vaddr = mmap(NULL, size, prot, flags, -1, 0);
30 	VFIO_ASSERT_NE(vaddr, MAP_FAILED);
31 
32 	region->vaddr = vaddr;
33 	region->iova = iova_allocator_alloc(iova_allocator, size);
34 	region->size = size;
35 
36 	vfio_pci_dma_map(device, region);
37 }
38 
region_teardown(struct vfio_pci_device * device,struct vfio_dma_region * region)39 static void region_teardown(struct vfio_pci_device *device,
40 			    struct vfio_dma_region *region)
41 {
42 	vfio_pci_dma_unmap(device, region);
43 	VFIO_ASSERT_EQ(munmap(region->vaddr, region->size), 0);
44 }
45 
FIXTURE(vfio_pci_driver_test)46 FIXTURE(vfio_pci_driver_test) {
47 	struct vfio_pci_device *device;
48 	struct iova_allocator *iova_allocator;
49 	struct vfio_dma_region memcpy_region;
50 	void *vaddr;
51 	int msi_fd;
52 
53 	u64 size;
54 	void *src;
55 	void *dst;
56 	iova_t src_iova;
57 	iova_t dst_iova;
58 	iova_t unmapped_iova;
59 };
60 
FIXTURE_VARIANT(vfio_pci_driver_test)61 FIXTURE_VARIANT(vfio_pci_driver_test) {
62 	const char *iommu_mode;
63 };
64 
65 #define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode)		\
66 FIXTURE_VARIANT_ADD(vfio_pci_driver_test, _iommu_mode) {	\
67 	.iommu_mode = #_iommu_mode,				\
68 }
69 
70 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES();
71 
FIXTURE_SETUP(vfio_pci_driver_test)72 FIXTURE_SETUP(vfio_pci_driver_test)
73 {
74 	struct vfio_pci_driver *driver;
75 
76 	self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
77 	self->iova_allocator = iova_allocator_init(self->device);
78 
79 	driver = &self->device->driver;
80 
81 	region_setup(self->device, self->iova_allocator, &self->memcpy_region, SZ_1G);
82 	region_setup(self->device, self->iova_allocator, &driver->region, SZ_2M);
83 
84 	/* Any IOVA that doesn't overlap memcpy_region and driver->region. */
85 	self->unmapped_iova = iova_allocator_alloc(self->iova_allocator, SZ_1G);
86 
87 	vfio_pci_driver_init(self->device);
88 	self->msi_fd = self->device->msi_eventfds[driver->msi];
89 
90 	/*
91 	 * Use the maximum size supported by the device for memcpy operations,
92 	 * slimmed down to fit into the memcpy region (divided by 2 so src and
93 	 * dst regions do not overlap).
94 	 */
95 	self->size = self->device->driver.max_memcpy_size;
96 	self->size = min(self->size, self->memcpy_region.size / 2);
97 
98 	self->src = self->memcpy_region.vaddr;
99 	self->dst = self->src + self->size;
100 
101 	self->src_iova = to_iova(self->device, self->src);
102 	self->dst_iova = to_iova(self->device, self->dst);
103 }
104 
FIXTURE_TEARDOWN(vfio_pci_driver_test)105 FIXTURE_TEARDOWN(vfio_pci_driver_test)
106 {
107 	struct vfio_pci_driver *driver = &self->device->driver;
108 
109 	vfio_pci_driver_remove(self->device);
110 
111 	region_teardown(self->device, &self->memcpy_region);
112 	region_teardown(self->device, &driver->region);
113 
114 	iova_allocator_cleanup(self->iova_allocator);
115 	vfio_pci_device_cleanup(self->device);
116 }
117 
TEST_F(vfio_pci_driver_test,init_remove)118 TEST_F(vfio_pci_driver_test, init_remove)
119 {
120 	int i;
121 
122 	for (i = 0; i < 10; i++) {
123 		vfio_pci_driver_remove(self->device);
124 		vfio_pci_driver_init(self->device);
125 	}
126 }
127 
TEST_F(vfio_pci_driver_test,memcpy_success)128 TEST_F(vfio_pci_driver_test, memcpy_success)
129 {
130 	fcntl_set_nonblock(self->msi_fd);
131 
132 	memset(self->src, 'x', self->size);
133 	memset(self->dst, 'y', self->size);
134 
135 	ASSERT_EQ(0, vfio_pci_driver_memcpy(self->device,
136 					    self->src_iova,
137 					    self->dst_iova,
138 					    self->size));
139 
140 	ASSERT_EQ(0, memcmp(self->src, self->dst, self->size));
141 	ASSERT_NO_MSI(self->msi_fd);
142 }
143 
TEST_F(vfio_pci_driver_test,memcpy_from_unmapped_iova)144 TEST_F(vfio_pci_driver_test, memcpy_from_unmapped_iova)
145 {
146 	fcntl_set_nonblock(self->msi_fd);
147 
148 	/*
149 	 * Ignore the return value since not all devices will detect and report
150 	 * accesses to unmapped IOVAs as errors.
151 	 */
152 	vfio_pci_driver_memcpy(self->device, self->unmapped_iova,
153 			       self->dst_iova, self->size);
154 
155 	ASSERT_NO_MSI(self->msi_fd);
156 }
157 
TEST_F(vfio_pci_driver_test,memcpy_to_unmapped_iova)158 TEST_F(vfio_pci_driver_test, memcpy_to_unmapped_iova)
159 {
160 	fcntl_set_nonblock(self->msi_fd);
161 
162 	/*
163 	 * Ignore the return value since not all devices will detect and report
164 	 * accesses to unmapped IOVAs as errors.
165 	 */
166 	vfio_pci_driver_memcpy(self->device, self->src_iova,
167 			       self->unmapped_iova, self->size);
168 
169 	ASSERT_NO_MSI(self->msi_fd);
170 }
171 
TEST_F(vfio_pci_driver_test,send_msi)172 TEST_F(vfio_pci_driver_test, send_msi)
173 {
174 	u64 value;
175 
176 	vfio_pci_driver_send_msi(self->device);
177 	ASSERT_EQ(8, read(self->msi_fd, &value, 8));
178 	ASSERT_EQ(1, value);
179 }
180 
TEST_F(vfio_pci_driver_test,mix_and_match)181 TEST_F(vfio_pci_driver_test, mix_and_match)
182 {
183 	u64 value;
184 	int i;
185 
186 	for (i = 0; i < 10; i++) {
187 		memset(self->src, 'x', self->size);
188 		memset(self->dst, 'y', self->size);
189 
190 		ASSERT_EQ(0, vfio_pci_driver_memcpy(self->device,
191 						    self->src_iova,
192 						    self->dst_iova,
193 						    self->size));
194 
195 		ASSERT_EQ(0, memcmp(self->src, self->dst, self->size));
196 
197 		vfio_pci_driver_memcpy(self->device,
198 				       self->unmapped_iova,
199 				       self->dst_iova,
200 				       self->size);
201 
202 		vfio_pci_driver_send_msi(self->device);
203 		ASSERT_EQ(8, read(self->msi_fd, &value, 8));
204 		ASSERT_EQ(1, value);
205 	}
206 }
207 
208 TEST_F_TIMEOUT(vfio_pci_driver_test, memcpy_storm, 60)
209 {
210 	struct vfio_pci_driver *driver = &self->device->driver;
211 	u64 total_size;
212 	u64 count;
213 
214 	fcntl_set_nonblock(self->msi_fd);
215 
216 	/*
217 	 * Perform up to 250GiB worth of DMA reads and writes across several
218 	 * memcpy operations. Some devices can support even more but the test
219 	 * will take too long.
220 	 */
221 	total_size = 250UL * SZ_1G;
222 	count = min(total_size / self->size, driver->max_memcpy_count);
223 
224 	printf("Kicking off %lu memcpys of size 0x%lx\n", count, self->size);
225 	vfio_pci_driver_memcpy_start(self->device,
226 				     self->src_iova,
227 				     self->dst_iova,
228 				     self->size, count);
229 
230 	ASSERT_EQ(0, vfio_pci_driver_memcpy_wait(self->device));
231 	ASSERT_NO_MSI(self->msi_fd);
232 }
233 
main(int argc,char * argv[])234 int main(int argc, char *argv[])
235 {
236 	struct vfio_pci_device *device;
237 
238 	device_bdf = vfio_selftests_get_bdf(&argc, argv);
239 
240 	device = vfio_pci_device_init(device_bdf, default_iommu_mode);
241 	if (!device->driver.ops) {
242 		fprintf(stderr, "No driver found for device %s\n", device_bdf);
243 		return KSFT_SKIP;
244 	}
245 	vfio_pci_device_cleanup(device);
246 
247 	return test_harness_run(argc, argv);
248 }
249