xref: /linux/tools/testing/selftests/vfio/vfio_dma_mapping_test.c (revision 09b1704f5b02c18dd02b21343530463fcfc92c54)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <stdio.h>
3 #include <sys/mman.h>
4 #include <unistd.h>
5 
6 #include <linux/limits.h>
7 #include <linux/mman.h>
8 #include <linux/sizes.h>
9 #include <linux/vfio.h>
10 
11 #include <vfio_util.h>
12 
13 #include "../kselftest_harness.h"
14 
15 static const char *device_bdf;
16 
17 struct iommu_mapping {
18 	u64 pgd;
19 	u64 p4d;
20 	u64 pud;
21 	u64 pmd;
22 	u64 pte;
23 };
24 
25 static void parse_next_value(char **line, u64 *value)
26 {
27 	char *token;
28 
29 	token = strtok_r(*line, " \t|\n", line);
30 	if (!token)
31 		return;
32 
33 	/* Caller verifies `value`. No need to check return value. */
34 	sscanf(token, "0x%lx", value);
35 }
36 
37 static int intel_iommu_mapping_get(const char *bdf, u64 iova,
38 				   struct iommu_mapping *mapping)
39 {
40 	char iommu_mapping_path[PATH_MAX], line[PATH_MAX];
41 	u64 line_iova = -1;
42 	int ret = -ENOENT;
43 	FILE *file;
44 	char *rest;
45 
46 	snprintf(iommu_mapping_path, sizeof(iommu_mapping_path),
47 		 "/sys/kernel/debug/iommu/intel/%s/domain_translation_struct",
48 		 bdf);
49 
50 	printf("Searching for IOVA 0x%lx in %s\n", iova, iommu_mapping_path);
51 
52 	file = fopen(iommu_mapping_path, "r");
53 	VFIO_ASSERT_NOT_NULL(file, "fopen(%s) failed", iommu_mapping_path);
54 
55 	while (fgets(line, sizeof(line), file)) {
56 		rest = line;
57 
58 		parse_next_value(&rest, &line_iova);
59 		if (line_iova != (iova / getpagesize()))
60 			continue;
61 
62 		/*
63 		 * Ensure each struct field is initialized in case of empty
64 		 * page table values.
65 		 */
66 		memset(mapping, 0, sizeof(*mapping));
67 		parse_next_value(&rest, &mapping->pgd);
68 		parse_next_value(&rest, &mapping->p4d);
69 		parse_next_value(&rest, &mapping->pud);
70 		parse_next_value(&rest, &mapping->pmd);
71 		parse_next_value(&rest, &mapping->pte);
72 
73 		ret = 0;
74 		break;
75 	}
76 
77 	fclose(file);
78 
79 	if (ret)
80 		printf("IOVA not found\n");
81 
82 	return ret;
83 }
84 
85 static int iommu_mapping_get(const char *bdf, u64 iova,
86 			     struct iommu_mapping *mapping)
87 {
88 	if (!access("/sys/kernel/debug/iommu/intel", F_OK))
89 		return intel_iommu_mapping_get(bdf, iova, mapping);
90 
91 	return -EOPNOTSUPP;
92 }
93 
94 FIXTURE(vfio_dma_mapping_test) {
95 	struct vfio_pci_device *device;
96 };
97 
98 FIXTURE_VARIANT(vfio_dma_mapping_test) {
99 	const char *iommu_mode;
100 	u64 size;
101 	int mmap_flags;
102 };
103 
104 #define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode, _name, _size, _mmap_flags) \
105 FIXTURE_VARIANT_ADD(vfio_dma_mapping_test, _iommu_mode ## _ ## _name) {	       \
106 	.iommu_mode = #_iommu_mode,					       \
107 	.size = (_size),						       \
108 	.mmap_flags = MAP_ANONYMOUS | MAP_PRIVATE | (_mmap_flags),	       \
109 }
110 
111 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous, 0, 0);
112 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_2mb, SZ_2M, MAP_HUGETLB | MAP_HUGE_2MB);
113 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB | MAP_HUGE_1GB);
114 
115 #undef FIXTURE_VARIANT_ADD_IOMMU_MODE
116 
117 FIXTURE_SETUP(vfio_dma_mapping_test)
118 {
119 	self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
120 }
121 
122 FIXTURE_TEARDOWN(vfio_dma_mapping_test)
123 {
124 	vfio_pci_device_cleanup(self->device);
125 }
126 
127 TEST_F(vfio_dma_mapping_test, dma_map_unmap)
128 {
129 	const u64 size = variant->size ?: getpagesize();
130 	const int flags = variant->mmap_flags;
131 	struct vfio_dma_region region;
132 	struct iommu_mapping mapping;
133 	u64 mapping_size = size;
134 	u64 unmapped;
135 	int rc;
136 
137 	region.vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0);
138 
139 	/* Skip the test if there aren't enough HugeTLB pages available. */
140 	if (flags & MAP_HUGETLB && region.vaddr == MAP_FAILED)
141 		SKIP(return, "mmap() failed: %s (%d)\n", strerror(errno), errno);
142 	else
143 		ASSERT_NE(region.vaddr, MAP_FAILED);
144 
145 	region.iova = (u64)region.vaddr;
146 	region.size = size;
147 
148 	vfio_pci_dma_map(self->device, &region);
149 	printf("Mapped HVA %p (size 0x%lx) at IOVA 0x%lx\n", region.vaddr, size, region.iova);
150 
151 	ASSERT_EQ(region.iova, to_iova(self->device, region.vaddr));
152 
153 	rc = iommu_mapping_get(device_bdf, region.iova, &mapping);
154 	if (rc == -EOPNOTSUPP)
155 		goto unmap;
156 
157 	/*
158 	 * IOMMUFD compatibility-mode does not support huge mappings when
159 	 * using VFIO_TYPE1_IOMMU.
160 	 */
161 	if (!strcmp(variant->iommu_mode, "iommufd_compat_type1"))
162 		mapping_size = SZ_4K;
163 
164 	ASSERT_EQ(0, rc);
165 	printf("Found IOMMU mappings for IOVA 0x%lx:\n", region.iova);
166 	printf("PGD: 0x%016lx\n", mapping.pgd);
167 	printf("P4D: 0x%016lx\n", mapping.p4d);
168 	printf("PUD: 0x%016lx\n", mapping.pud);
169 	printf("PMD: 0x%016lx\n", mapping.pmd);
170 	printf("PTE: 0x%016lx\n", mapping.pte);
171 
172 	switch (mapping_size) {
173 	case SZ_4K:
174 		ASSERT_NE(0, mapping.pte);
175 		break;
176 	case SZ_2M:
177 		ASSERT_EQ(0, mapping.pte);
178 		ASSERT_NE(0, mapping.pmd);
179 		break;
180 	case SZ_1G:
181 		ASSERT_EQ(0, mapping.pte);
182 		ASSERT_EQ(0, mapping.pmd);
183 		ASSERT_NE(0, mapping.pud);
184 		break;
185 	default:
186 		VFIO_FAIL("Unrecognized size: 0x%lx\n", mapping_size);
187 	}
188 
189 unmap:
190 	rc = __vfio_pci_dma_unmap(self->device, &region, &unmapped);
191 	ASSERT_EQ(rc, 0);
192 	ASSERT_EQ(unmapped, region.size);
193 	printf("Unmapped IOVA 0x%lx\n", region.iova);
194 	ASSERT_EQ(INVALID_IOVA, __to_iova(self->device, region.vaddr));
195 	ASSERT_NE(0, iommu_mapping_get(device_bdf, region.iova, &mapping));
196 
197 	ASSERT_TRUE(!munmap(region.vaddr, size));
198 }
199 
200 FIXTURE(vfio_dma_map_limit_test) {
201 	struct vfio_pci_device *device;
202 	struct vfio_dma_region region;
203 	size_t mmap_size;
204 };
205 
206 FIXTURE_VARIANT(vfio_dma_map_limit_test) {
207 	const char *iommu_mode;
208 };
209 
210 #define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode)			       \
211 FIXTURE_VARIANT_ADD(vfio_dma_map_limit_test, _iommu_mode) {		       \
212 	.iommu_mode = #_iommu_mode,					       \
213 }
214 
215 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES();
216 
217 #undef FIXTURE_VARIANT_ADD_IOMMU_MODE
218 
219 FIXTURE_SETUP(vfio_dma_map_limit_test)
220 {
221 	struct vfio_dma_region *region = &self->region;
222 	u64 region_size = getpagesize();
223 
224 	/*
225 	 * Over-allocate mmap by double the size to provide enough backing vaddr
226 	 * for overflow tests
227 	 */
228 	self->mmap_size = 2 * region_size;
229 
230 	self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode);
231 	region->vaddr = mmap(NULL, self->mmap_size, PROT_READ | PROT_WRITE,
232 			     MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
233 	ASSERT_NE(region->vaddr, MAP_FAILED);
234 
235 	/* One page prior to the end of address space */
236 	region->iova = ~(iova_t)0 & ~(region_size - 1);
237 	region->size = region_size;
238 }
239 
240 FIXTURE_TEARDOWN(vfio_dma_map_limit_test)
241 {
242 	vfio_pci_device_cleanup(self->device);
243 	ASSERT_EQ(munmap(self->region.vaddr, self->mmap_size), 0);
244 }
245 
246 TEST_F(vfio_dma_map_limit_test, unmap_range)
247 {
248 	struct vfio_dma_region *region = &self->region;
249 	u64 unmapped;
250 	int rc;
251 
252 	vfio_pci_dma_map(self->device, region);
253 	ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr));
254 
255 	rc = __vfio_pci_dma_unmap(self->device, region, &unmapped);
256 	ASSERT_EQ(rc, 0);
257 	ASSERT_EQ(unmapped, region->size);
258 }
259 
260 TEST_F(vfio_dma_map_limit_test, unmap_all)
261 {
262 	struct vfio_dma_region *region = &self->region;
263 	u64 unmapped;
264 	int rc;
265 
266 	vfio_pci_dma_map(self->device, region);
267 	ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr));
268 
269 	rc = __vfio_pci_dma_unmap_all(self->device, &unmapped);
270 	ASSERT_EQ(rc, 0);
271 	ASSERT_EQ(unmapped, region->size);
272 }
273 
274 TEST_F(vfio_dma_map_limit_test, overflow)
275 {
276 	struct vfio_dma_region *region = &self->region;
277 	int rc;
278 
279 	region->size = self->mmap_size;
280 
281 	rc = __vfio_pci_dma_map(self->device, region);
282 	ASSERT_EQ(rc, -EOVERFLOW);
283 
284 	rc = __vfio_pci_dma_unmap(self->device, region, NULL);
285 	ASSERT_EQ(rc, -EOVERFLOW);
286 }
287 
288 int main(int argc, char *argv[])
289 {
290 	device_bdf = vfio_selftests_get_bdf(&argc, argv);
291 	return test_harness_run(argc, argv);
292 }
293