xref: /linux/tools/testing/selftests/vfio/vfio_dma_mapping_test.c (revision 127fa2ae9e2b1f9b9d876dfaa39fe3640cec5764)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <stdio.h>
3 #include <sys/mman.h>
4 #include <unistd.h>
5 
6 #include <uapi/linux/types.h>
7 #include <linux/iommufd.h>
8 #include <linux/limits.h>
9 #include <linux/mman.h>
10 #include <linux/sizes.h>
11 #include <linux/vfio.h>
12 
13 #include <vfio_util.h>
14 
15 #include "../kselftest_harness.h"
16 
17 static const char *device_bdf;
18 
19 struct iommu_mapping {
20 	u64 pgd;
21 	u64 p4d;
22 	u64 pud;
23 	u64 pmd;
24 	u64 pte;
25 };
26 
27 static void parse_next_value(char **line, u64 *value)
28 {
29 	char *token;
30 
31 	token = strtok_r(*line, " \t|\n", line);
32 	if (!token)
33 		return;
34 
35 	/* Caller verifies `value`. No need to check return value. */
36 	sscanf(token, "0x%lx", value);
37 }
38 
39 static int intel_iommu_mapping_get(const char *bdf, u64 iova,
40 				   struct iommu_mapping *mapping)
41 {
42 	char iommu_mapping_path[PATH_MAX], line[PATH_MAX];
43 	u64 line_iova = -1;
44 	int ret = -ENOENT;
45 	FILE *file;
46 	char *rest;
47 
48 	snprintf(iommu_mapping_path, sizeof(iommu_mapping_path),
49 		 "/sys/kernel/debug/iommu/intel/%s/domain_translation_struct",
50 		 bdf);
51 
52 	printf("Searching for IOVA 0x%lx in %s\n", iova, iommu_mapping_path);
53 
54 	file = fopen(iommu_mapping_path, "r");
55 	VFIO_ASSERT_NOT_NULL(file, "fopen(%s) failed", iommu_mapping_path);
56 
57 	while (fgets(line, sizeof(line), file)) {
58 		rest = line;
59 
60 		parse_next_value(&rest, &line_iova);
61 		if (line_iova != (iova / getpagesize()))
62 			continue;
63 
64 		/*
65 		 * Ensure each struct field is initialized in case of empty
66 		 * page table values.
67 		 */
68 		memset(mapping, 0, sizeof(*mapping));
69 		parse_next_value(&rest, &mapping->pgd);
70 		parse_next_value(&rest, &mapping->p4d);
71 		parse_next_value(&rest, &mapping->pud);
72 		parse_next_value(&rest, &mapping->pmd);
73 		parse_next_value(&rest, &mapping->pte);
74 
75 		ret = 0;
76 		break;
77 	}
78 
79 	fclose(file);
80 
81 	if (ret)
82 		printf("IOVA not found\n");
83 
84 	return ret;
85 }
86 
87 static int iommu_mapping_get(const char *bdf, u64 iova,
88 			     struct iommu_mapping *mapping)
89 {
90 	if (!access("/sys/kernel/debug/iommu/intel", F_OK))
91 		return intel_iommu_mapping_get(bdf, iova, mapping);
92 
93 	return -EOPNOTSUPP;
94 }
95 
96 FIXTURE(vfio_dma_mapping_test) {
97 	struct vfio_pci_device *device;
98 	struct iova_allocator *iova_allocator;
99 };
100 
101 FIXTURE_VARIANT(vfio_dma_mapping_test) {
102 	const char *iommu_mode;
103 	u64 size;
104 	int mmap_flags;
105 };
106 
107 #define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode, _name, _size, _mmap_flags) \
108 FIXTURE_VARIANT_ADD(vfio_dma_mapping_test, _iommu_mode ## _ ## _name) {	       \
109 	.iommu_mode = #_iommu_mode,					       \
110 	.size = (_size),						       \
111 	.mmap_flags = MAP_ANONYMOUS | MAP_PRIVATE | (_mmap_flags),	       \
112 }
113 
114 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous, 0, 0);
115 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_2mb, SZ_2M, MAP_HUGETLB | MAP_HUGE_2MB);
116 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB | MAP_HUGE_1GB);
117 
118 #undef FIXTURE_VARIANT_ADD_IOMMU_MODE
119 
120 FIXTURE_SETUP(vfio_dma_mapping_test)
121 {
122 	self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
123 	self->iova_allocator = iova_allocator_init(self->device);
124 }
125 
126 FIXTURE_TEARDOWN(vfio_dma_mapping_test)
127 {
128 	iova_allocator_cleanup(self->iova_allocator);
129 	vfio_pci_device_cleanup(self->device);
130 }
131 
132 TEST_F(vfio_dma_mapping_test, dma_map_unmap)
133 {
134 	const u64 size = variant->size ?: getpagesize();
135 	const int flags = variant->mmap_flags;
136 	struct vfio_dma_region region;
137 	struct iommu_mapping mapping;
138 	u64 mapping_size = size;
139 	u64 unmapped;
140 	int rc;
141 
142 	region.vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0);
143 
144 	/* Skip the test if there aren't enough HugeTLB pages available. */
145 	if (flags & MAP_HUGETLB && region.vaddr == MAP_FAILED)
146 		SKIP(return, "mmap() failed: %s (%d)\n", strerror(errno), errno);
147 	else
148 		ASSERT_NE(region.vaddr, MAP_FAILED);
149 
150 	region.iova = iova_allocator_alloc(self->iova_allocator, size);
151 	region.size = size;
152 
153 	vfio_pci_dma_map(self->device, &region);
154 	printf("Mapped HVA %p (size 0x%lx) at IOVA 0x%lx\n", region.vaddr, size, region.iova);
155 
156 	ASSERT_EQ(region.iova, to_iova(self->device, region.vaddr));
157 
158 	rc = iommu_mapping_get(device_bdf, region.iova, &mapping);
159 	if (rc == -EOPNOTSUPP)
160 		goto unmap;
161 
162 	/*
163 	 * IOMMUFD compatibility-mode does not support huge mappings when
164 	 * using VFIO_TYPE1_IOMMU.
165 	 */
166 	if (!strcmp(variant->iommu_mode, "iommufd_compat_type1"))
167 		mapping_size = SZ_4K;
168 
169 	ASSERT_EQ(0, rc);
170 	printf("Found IOMMU mappings for IOVA 0x%lx:\n", region.iova);
171 	printf("PGD: 0x%016lx\n", mapping.pgd);
172 	printf("P4D: 0x%016lx\n", mapping.p4d);
173 	printf("PUD: 0x%016lx\n", mapping.pud);
174 	printf("PMD: 0x%016lx\n", mapping.pmd);
175 	printf("PTE: 0x%016lx\n", mapping.pte);
176 
177 	switch (mapping_size) {
178 	case SZ_4K:
179 		ASSERT_NE(0, mapping.pte);
180 		break;
181 	case SZ_2M:
182 		ASSERT_EQ(0, mapping.pte);
183 		ASSERT_NE(0, mapping.pmd);
184 		break;
185 	case SZ_1G:
186 		ASSERT_EQ(0, mapping.pte);
187 		ASSERT_EQ(0, mapping.pmd);
188 		ASSERT_NE(0, mapping.pud);
189 		break;
190 	default:
191 		VFIO_FAIL("Unrecognized size: 0x%lx\n", mapping_size);
192 	}
193 
194 unmap:
195 	rc = __vfio_pci_dma_unmap(self->device, &region, &unmapped);
196 	ASSERT_EQ(rc, 0);
197 	ASSERT_EQ(unmapped, region.size);
198 	printf("Unmapped IOVA 0x%lx\n", region.iova);
199 	ASSERT_EQ(INVALID_IOVA, __to_iova(self->device, region.vaddr));
200 	ASSERT_NE(0, iommu_mapping_get(device_bdf, region.iova, &mapping));
201 
202 	ASSERT_TRUE(!munmap(region.vaddr, size));
203 }
204 
205 FIXTURE(vfio_dma_map_limit_test) {
206 	struct vfio_pci_device *device;
207 	struct vfio_dma_region region;
208 	size_t mmap_size;
209 };
210 
211 FIXTURE_VARIANT(vfio_dma_map_limit_test) {
212 	const char *iommu_mode;
213 };
214 
215 #define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode)			       \
216 FIXTURE_VARIANT_ADD(vfio_dma_map_limit_test, _iommu_mode) {		       \
217 	.iommu_mode = #_iommu_mode,					       \
218 }
219 
220 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES();
221 
222 #undef FIXTURE_VARIANT_ADD_IOMMU_MODE
223 
224 FIXTURE_SETUP(vfio_dma_map_limit_test)
225 {
226 	struct vfio_dma_region *region = &self->region;
227 	struct iommu_iova_range *ranges;
228 	u64 region_size = getpagesize();
229 	iova_t last_iova;
230 	u32 nranges;
231 
232 	/*
233 	 * Over-allocate mmap by double the size to provide enough backing vaddr
234 	 * for overflow tests
235 	 */
236 	self->mmap_size = 2 * region_size;
237 
238 	self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
239 	region->vaddr = mmap(NULL, self->mmap_size, PROT_READ | PROT_WRITE,
240 			     MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
241 	ASSERT_NE(region->vaddr, MAP_FAILED);
242 
243 	ranges = vfio_pci_iova_ranges(self->device, &nranges);
244 	VFIO_ASSERT_NOT_NULL(ranges);
245 	last_iova = ranges[nranges - 1].last;
246 	free(ranges);
247 
248 	/* One page prior to the last iova */
249 	region->iova = last_iova & ~(region_size - 1);
250 	region->size = region_size;
251 }
252 
253 FIXTURE_TEARDOWN(vfio_dma_map_limit_test)
254 {
255 	vfio_pci_device_cleanup(self->device);
256 	ASSERT_EQ(munmap(self->region.vaddr, self->mmap_size), 0);
257 }
258 
259 TEST_F(vfio_dma_map_limit_test, unmap_range)
260 {
261 	struct vfio_dma_region *region = &self->region;
262 	u64 unmapped;
263 	int rc;
264 
265 	vfio_pci_dma_map(self->device, region);
266 	ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr));
267 
268 	rc = __vfio_pci_dma_unmap(self->device, region, &unmapped);
269 	ASSERT_EQ(rc, 0);
270 	ASSERT_EQ(unmapped, region->size);
271 }
272 
273 TEST_F(vfio_dma_map_limit_test, unmap_all)
274 {
275 	struct vfio_dma_region *region = &self->region;
276 	u64 unmapped;
277 	int rc;
278 
279 	vfio_pci_dma_map(self->device, region);
280 	ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr));
281 
282 	rc = __vfio_pci_dma_unmap_all(self->device, &unmapped);
283 	ASSERT_EQ(rc, 0);
284 	ASSERT_EQ(unmapped, region->size);
285 }
286 
287 TEST_F(vfio_dma_map_limit_test, overflow)
288 {
289 	struct vfio_dma_region *region = &self->region;
290 	int rc;
291 
292 	region->iova = ~(iova_t)0 & ~(region->size - 1);
293 	region->size = self->mmap_size;
294 
295 	rc = __vfio_pci_dma_map(self->device, region);
296 	ASSERT_EQ(rc, -EOVERFLOW);
297 
298 	rc = __vfio_pci_dma_unmap(self->device, region, NULL);
299 	ASSERT_EQ(rc, -EOVERFLOW);
300 }
301 
302 int main(int argc, char *argv[])
303 {
304 	device_bdf = vfio_selftests_get_bdf(&argc, argv);
305 	return test_harness_run(argc, argv);
306 }
307