xref: /linux/tools/testing/selftests/vfio/vfio_dma_mapping_test.c (revision 6a069876eb1402478900ee0eb7d7fe276bb1f4e3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <stdio.h>
3 #include <sys/mman.h>
4 #include <unistd.h>
5 
6 #include <linux/iommufd.h>
7 #include <linux/limits.h>
8 #include <linux/mman.h>
9 #include <linux/sizes.h>
10 #include <linux/vfio.h>
11 
12 #include <libvfio.h>
13 
14 #include "kselftest_harness.h"
15 
16 static const char *device_bdf;
17 
18 struct iommu_mapping {
19 	u64 pgd;
20 	u64 p4d;
21 	u64 pud;
22 	u64 pmd;
23 	u64 pte;
24 };
25 
26 static void parse_next_value(char **line, u64 *value)
27 {
28 	char *token;
29 
30 	token = strtok_r(*line, " \t|\n", line);
31 	if (!token)
32 		return;
33 
34 	/* Caller verifies `value`. No need to check return value. */
35 	sscanf(token, "0x%lx", value);
36 }
37 
38 static int intel_iommu_mapping_get(const char *bdf, u64 iova,
39 				   struct iommu_mapping *mapping)
40 {
41 	char iommu_mapping_path[PATH_MAX], line[PATH_MAX];
42 	u64 line_iova = -1;
43 	int ret = -ENOENT;
44 	FILE *file;
45 	char *rest;
46 
47 	snprintf(iommu_mapping_path, sizeof(iommu_mapping_path),
48 		 "/sys/kernel/debug/iommu/intel/%s/domain_translation_struct",
49 		 bdf);
50 
51 	printf("Searching for IOVA 0x%lx in %s\n", iova, iommu_mapping_path);
52 
53 	file = fopen(iommu_mapping_path, "r");
54 	VFIO_ASSERT_NOT_NULL(file, "fopen(%s) failed", iommu_mapping_path);
55 
56 	while (fgets(line, sizeof(line), file)) {
57 		rest = line;
58 
59 		parse_next_value(&rest, &line_iova);
60 		if (line_iova != (iova / getpagesize()))
61 			continue;
62 
63 		/*
64 		 * Ensure each struct field is initialized in case of empty
65 		 * page table values.
66 		 */
67 		memset(mapping, 0, sizeof(*mapping));
68 		parse_next_value(&rest, &mapping->pgd);
69 		parse_next_value(&rest, &mapping->p4d);
70 		parse_next_value(&rest, &mapping->pud);
71 		parse_next_value(&rest, &mapping->pmd);
72 		parse_next_value(&rest, &mapping->pte);
73 
74 		ret = 0;
75 		break;
76 	}
77 
78 	fclose(file);
79 
80 	if (ret)
81 		printf("IOVA not found\n");
82 
83 	return ret;
84 }
85 
86 static int iommu_mapping_get(const char *bdf, u64 iova,
87 			     struct iommu_mapping *mapping)
88 {
89 	if (!access("/sys/kernel/debug/iommu/intel", F_OK))
90 		return intel_iommu_mapping_get(bdf, iova, mapping);
91 
92 	return -EOPNOTSUPP;
93 }
94 
95 FIXTURE(vfio_dma_mapping_test) {
96 	struct iommu *iommu;
97 	struct vfio_pci_device *device;
98 	struct iova_allocator *iova_allocator;
99 };
100 
101 FIXTURE_VARIANT(vfio_dma_mapping_test) {
102 	const char *iommu_mode;
103 	u64 size;
104 	int mmap_flags;
105 };
106 
107 #define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode, _name, _size, _mmap_flags) \
108 FIXTURE_VARIANT_ADD(vfio_dma_mapping_test, _iommu_mode ## _ ## _name) {	       \
109 	.iommu_mode = #_iommu_mode,					       \
110 	.size = (_size),						       \
111 	.mmap_flags = MAP_ANONYMOUS | MAP_PRIVATE | (_mmap_flags),	       \
112 }
113 
114 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous, 0, 0);
115 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_2mb, SZ_2M, MAP_HUGETLB | MAP_HUGE_2MB);
116 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB | MAP_HUGE_1GB);
117 
118 #undef FIXTURE_VARIANT_ADD_IOMMU_MODE
119 
120 FIXTURE_SETUP(vfio_dma_mapping_test)
121 {
122 	self->iommu = iommu_init(variant->iommu_mode);
123 	self->device = vfio_pci_device_init(device_bdf, self->iommu);
124 	self->iova_allocator = iova_allocator_init(self->iommu);
125 }
126 
127 FIXTURE_TEARDOWN(vfio_dma_mapping_test)
128 {
129 	iova_allocator_cleanup(self->iova_allocator);
130 	vfio_pci_device_cleanup(self->device);
131 	iommu_cleanup(self->iommu);
132 }
133 
134 TEST_F(vfio_dma_mapping_test, dma_map_unmap)
135 {
136 	const u64 size = variant->size ?: getpagesize();
137 	const int flags = variant->mmap_flags;
138 	struct dma_region region;
139 	struct iommu_mapping mapping;
140 	u64 mapping_size = size;
141 	u64 unmapped;
142 	int rc;
143 
144 	region.vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0);
145 
146 	/* Skip the test if there aren't enough HugeTLB pages available. */
147 	if (flags & MAP_HUGETLB && region.vaddr == MAP_FAILED)
148 		SKIP(return, "mmap() failed: %s (%d)\n", strerror(errno), errno);
149 	else
150 		ASSERT_NE(region.vaddr, MAP_FAILED);
151 
152 	region.iova = iova_allocator_alloc(self->iova_allocator, size);
153 	region.size = size;
154 
155 	iommu_map(self->iommu, &region);
156 	printf("Mapped HVA %p (size 0x%lx) at IOVA 0x%lx\n", region.vaddr, size, region.iova);
157 
158 	ASSERT_EQ(region.iova, to_iova(self->device, region.vaddr));
159 
160 	rc = iommu_mapping_get(device_bdf, region.iova, &mapping);
161 	if (rc == -EOPNOTSUPP)
162 		goto unmap;
163 
164 	/*
165 	 * IOMMUFD compatibility-mode does not support huge mappings when
166 	 * using VFIO_TYPE1_IOMMU.
167 	 */
168 	if (!strcmp(variant->iommu_mode, "iommufd_compat_type1"))
169 		mapping_size = SZ_4K;
170 
171 	ASSERT_EQ(0, rc);
172 	printf("Found IOMMU mappings for IOVA 0x%lx:\n", region.iova);
173 	printf("PGD: 0x%016lx\n", mapping.pgd);
174 	printf("P4D: 0x%016lx\n", mapping.p4d);
175 	printf("PUD: 0x%016lx\n", mapping.pud);
176 	printf("PMD: 0x%016lx\n", mapping.pmd);
177 	printf("PTE: 0x%016lx\n", mapping.pte);
178 
179 	switch (mapping_size) {
180 	case SZ_4K:
181 		ASSERT_NE(0, mapping.pte);
182 		break;
183 	case SZ_2M:
184 		ASSERT_EQ(0, mapping.pte);
185 		ASSERT_NE(0, mapping.pmd);
186 		break;
187 	case SZ_1G:
188 		ASSERT_EQ(0, mapping.pte);
189 		ASSERT_EQ(0, mapping.pmd);
190 		ASSERT_NE(0, mapping.pud);
191 		break;
192 	default:
193 		VFIO_FAIL("Unrecognized size: 0x%lx\n", mapping_size);
194 	}
195 
196 unmap:
197 	rc = __iommu_unmap(self->iommu, &region, &unmapped);
198 	ASSERT_EQ(rc, 0);
199 	ASSERT_EQ(unmapped, region.size);
200 	printf("Unmapped IOVA 0x%lx\n", region.iova);
201 	ASSERT_NE(0, __to_iova(self->device, region.vaddr, NULL));
202 	ASSERT_NE(0, iommu_mapping_get(device_bdf, region.iova, &mapping));
203 
204 	ASSERT_TRUE(!munmap(region.vaddr, size));
205 }
206 
207 FIXTURE(vfio_dma_map_limit_test) {
208 	struct iommu *iommu;
209 	struct vfio_pci_device *device;
210 	struct dma_region region;
211 	size_t mmap_size;
212 };
213 
214 FIXTURE_VARIANT(vfio_dma_map_limit_test) {
215 	const char *iommu_mode;
216 };
217 
218 #define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode)			       \
219 FIXTURE_VARIANT_ADD(vfio_dma_map_limit_test, _iommu_mode) {		       \
220 	.iommu_mode = #_iommu_mode,					       \
221 }
222 
223 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES();
224 
225 #undef FIXTURE_VARIANT_ADD_IOMMU_MODE
226 
227 FIXTURE_SETUP(vfio_dma_map_limit_test)
228 {
229 	struct dma_region *region = &self->region;
230 	struct iommu_iova_range *ranges;
231 	u64 region_size = getpagesize();
232 	iova_t last_iova;
233 	u32 nranges;
234 
235 	/*
236 	 * Over-allocate mmap by double the size to provide enough backing vaddr
237 	 * for overflow tests
238 	 */
239 	self->mmap_size = 2 * region_size;
240 
241 	self->iommu = iommu_init(variant->iommu_mode);
242 	self->device = vfio_pci_device_init(device_bdf, self->iommu);
243 	region->vaddr = mmap(NULL, self->mmap_size, PROT_READ | PROT_WRITE,
244 			     MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
245 	ASSERT_NE(region->vaddr, MAP_FAILED);
246 
247 	ranges = iommu_iova_ranges(self->iommu, &nranges);
248 	VFIO_ASSERT_NOT_NULL(ranges);
249 	last_iova = ranges[nranges - 1].last;
250 	free(ranges);
251 
252 	/* One page prior to the last iova */
253 	region->iova = last_iova & ~(region_size - 1);
254 	region->size = region_size;
255 }
256 
257 FIXTURE_TEARDOWN(vfio_dma_map_limit_test)
258 {
259 	vfio_pci_device_cleanup(self->device);
260 	iommu_cleanup(self->iommu);
261 	ASSERT_EQ(munmap(self->region.vaddr, self->mmap_size), 0);
262 }
263 
264 TEST_F(vfio_dma_map_limit_test, unmap_range)
265 {
266 	struct dma_region *region = &self->region;
267 	u64 unmapped;
268 	int rc;
269 
270 	iommu_map(self->iommu, region);
271 	ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr));
272 
273 	rc = __iommu_unmap(self->iommu, region, &unmapped);
274 	ASSERT_EQ(rc, 0);
275 	ASSERT_EQ(unmapped, region->size);
276 }
277 
278 TEST_F(vfio_dma_map_limit_test, unmap_all)
279 {
280 	struct dma_region *region = &self->region;
281 	u64 unmapped;
282 	int rc;
283 
284 	iommu_map(self->iommu, region);
285 	ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr));
286 
287 	rc = __iommu_unmap_all(self->iommu, &unmapped);
288 	ASSERT_EQ(rc, 0);
289 	ASSERT_EQ(unmapped, region->size);
290 }
291 
292 TEST_F(vfio_dma_map_limit_test, overflow)
293 {
294 	struct dma_region *region = &self->region;
295 	int rc;
296 
297 	region->iova = ~(iova_t)0 & ~(region->size - 1);
298 	region->size = self->mmap_size;
299 
300 	rc = __iommu_map(self->iommu, region);
301 	ASSERT_EQ(rc, -EOVERFLOW);
302 
303 	rc = __iommu_unmap(self->iommu, region, NULL);
304 	ASSERT_EQ(rc, -EOVERFLOW);
305 }
306 
307 int main(int argc, char *argv[])
308 {
309 	device_bdf = vfio_selftests_get_bdf(&argc, argv);
310 	return test_harness_run(argc, argv);
311 }
312