1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <stdio.h>
3 #include <sys/mman.h>
4 #include <unistd.h>
5
6 #include <uapi/linux/types.h>
7 #include <linux/iommufd.h>
8 #include <linux/limits.h>
9 #include <linux/mman.h>
10 #include <linux/sizes.h>
11 #include <linux/vfio.h>
12
13 #include <libvfio.h>
14
15 #include "kselftest_harness.h"
16
17 static const char *device_bdf;
18
19 struct iommu_mapping {
20 u64 pgd;
21 u64 p4d;
22 u64 pud;
23 u64 pmd;
24 u64 pte;
25 };
26
parse_next_value(char ** line,u64 * value)27 static void parse_next_value(char **line, u64 *value)
28 {
29 char *token;
30
31 token = strtok_r(*line, " \t|\n", line);
32 if (!token)
33 return;
34
35 /* Caller verifies `value`. No need to check return value. */
36 sscanf(token, "0x%lx", value);
37 }
38
intel_iommu_mapping_get(const char * bdf,u64 iova,struct iommu_mapping * mapping)39 static int intel_iommu_mapping_get(const char *bdf, u64 iova,
40 struct iommu_mapping *mapping)
41 {
42 char iommu_mapping_path[PATH_MAX], line[PATH_MAX];
43 u64 line_iova = -1;
44 int ret = -ENOENT;
45 FILE *file;
46 char *rest;
47
48 snprintf(iommu_mapping_path, sizeof(iommu_mapping_path),
49 "/sys/kernel/debug/iommu/intel/%s/domain_translation_struct",
50 bdf);
51
52 printf("Searching for IOVA 0x%lx in %s\n", iova, iommu_mapping_path);
53
54 file = fopen(iommu_mapping_path, "r");
55 VFIO_ASSERT_NOT_NULL(file, "fopen(%s) failed", iommu_mapping_path);
56
57 while (fgets(line, sizeof(line), file)) {
58 rest = line;
59
60 parse_next_value(&rest, &line_iova);
61 if (line_iova != (iova / getpagesize()))
62 continue;
63
64 /*
65 * Ensure each struct field is initialized in case of empty
66 * page table values.
67 */
68 memset(mapping, 0, sizeof(*mapping));
69 parse_next_value(&rest, &mapping->pgd);
70 parse_next_value(&rest, &mapping->p4d);
71 parse_next_value(&rest, &mapping->pud);
72 parse_next_value(&rest, &mapping->pmd);
73 parse_next_value(&rest, &mapping->pte);
74
75 ret = 0;
76 break;
77 }
78
79 fclose(file);
80
81 if (ret)
82 printf("IOVA not found\n");
83
84 return ret;
85 }
86
iommu_mapping_get(const char * bdf,u64 iova,struct iommu_mapping * mapping)87 static int iommu_mapping_get(const char *bdf, u64 iova,
88 struct iommu_mapping *mapping)
89 {
90 if (!access("/sys/kernel/debug/iommu/intel", F_OK))
91 return intel_iommu_mapping_get(bdf, iova, mapping);
92
93 return -EOPNOTSUPP;
94 }
95
FIXTURE(vfio_dma_mapping_test)96 FIXTURE(vfio_dma_mapping_test) {
97 struct iommu *iommu;
98 struct vfio_pci_device *device;
99 struct iova_allocator *iova_allocator;
100 };
101
FIXTURE_VARIANT(vfio_dma_mapping_test)102 FIXTURE_VARIANT(vfio_dma_mapping_test) {
103 const char *iommu_mode;
104 u64 size;
105 int mmap_flags;
106 };
107
108 #define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode, _name, _size, _mmap_flags) \
109 FIXTURE_VARIANT_ADD(vfio_dma_mapping_test, _iommu_mode ## _ ## _name) { \
110 .iommu_mode = #_iommu_mode, \
111 .size = (_size), \
112 .mmap_flags = MAP_ANONYMOUS | MAP_PRIVATE | (_mmap_flags), \
113 }
114
115 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous, 0, 0);
116 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_2mb, SZ_2M, MAP_HUGETLB | MAP_HUGE_2MB);
117 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB | MAP_HUGE_1GB);
118
119 #undef FIXTURE_VARIANT_ADD_IOMMU_MODE
120
FIXTURE_SETUP(vfio_dma_mapping_test)121 FIXTURE_SETUP(vfio_dma_mapping_test)
122 {
123 self->iommu = iommu_init(variant->iommu_mode);
124 self->device = vfio_pci_device_init(device_bdf, self->iommu);
125 self->iova_allocator = iova_allocator_init(self->iommu);
126 }
127
FIXTURE_TEARDOWN(vfio_dma_mapping_test)128 FIXTURE_TEARDOWN(vfio_dma_mapping_test)
129 {
130 iova_allocator_cleanup(self->iova_allocator);
131 vfio_pci_device_cleanup(self->device);
132 iommu_cleanup(self->iommu);
133 }
134
TEST_F(vfio_dma_mapping_test,dma_map_unmap)135 TEST_F(vfio_dma_mapping_test, dma_map_unmap)
136 {
137 const u64 size = variant->size ?: getpagesize();
138 const int flags = variant->mmap_flags;
139 struct dma_region region;
140 struct iommu_mapping mapping;
141 u64 mapping_size = size;
142 u64 unmapped;
143 int rc;
144
145 region.vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0);
146
147 /* Skip the test if there aren't enough HugeTLB pages available. */
148 if (flags & MAP_HUGETLB && region.vaddr == MAP_FAILED)
149 SKIP(return, "mmap() failed: %s (%d)\n", strerror(errno), errno);
150 else
151 ASSERT_NE(region.vaddr, MAP_FAILED);
152
153 region.iova = iova_allocator_alloc(self->iova_allocator, size);
154 region.size = size;
155
156 iommu_map(self->iommu, ®ion);
157 printf("Mapped HVA %p (size 0x%lx) at IOVA 0x%lx\n", region.vaddr, size, region.iova);
158
159 ASSERT_EQ(region.iova, to_iova(self->device, region.vaddr));
160
161 rc = iommu_mapping_get(device_bdf, region.iova, &mapping);
162 if (rc == -EOPNOTSUPP)
163 goto unmap;
164
165 /*
166 * IOMMUFD compatibility-mode does not support huge mappings when
167 * using VFIO_TYPE1_IOMMU.
168 */
169 if (!strcmp(variant->iommu_mode, "iommufd_compat_type1"))
170 mapping_size = SZ_4K;
171
172 ASSERT_EQ(0, rc);
173 printf("Found IOMMU mappings for IOVA 0x%lx:\n", region.iova);
174 printf("PGD: 0x%016lx\n", mapping.pgd);
175 printf("P4D: 0x%016lx\n", mapping.p4d);
176 printf("PUD: 0x%016lx\n", mapping.pud);
177 printf("PMD: 0x%016lx\n", mapping.pmd);
178 printf("PTE: 0x%016lx\n", mapping.pte);
179
180 switch (mapping_size) {
181 case SZ_4K:
182 ASSERT_NE(0, mapping.pte);
183 break;
184 case SZ_2M:
185 ASSERT_EQ(0, mapping.pte);
186 ASSERT_NE(0, mapping.pmd);
187 break;
188 case SZ_1G:
189 ASSERT_EQ(0, mapping.pte);
190 ASSERT_EQ(0, mapping.pmd);
191 ASSERT_NE(0, mapping.pud);
192 break;
193 default:
194 VFIO_FAIL("Unrecognized size: 0x%lx\n", mapping_size);
195 }
196
197 unmap:
198 rc = __iommu_unmap(self->iommu, ®ion, &unmapped);
199 ASSERT_EQ(rc, 0);
200 ASSERT_EQ(unmapped, region.size);
201 printf("Unmapped IOVA 0x%lx\n", region.iova);
202 ASSERT_NE(0, __to_iova(self->device, region.vaddr, NULL));
203 ASSERT_NE(0, iommu_mapping_get(device_bdf, region.iova, &mapping));
204
205 ASSERT_TRUE(!munmap(region.vaddr, size));
206 }
207
FIXTURE(vfio_dma_map_limit_test)208 FIXTURE(vfio_dma_map_limit_test) {
209 struct iommu *iommu;
210 struct vfio_pci_device *device;
211 struct dma_region region;
212 size_t mmap_size;
213 };
214
FIXTURE_VARIANT(vfio_dma_map_limit_test)215 FIXTURE_VARIANT(vfio_dma_map_limit_test) {
216 const char *iommu_mode;
217 };
218
219 #define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode) \
220 FIXTURE_VARIANT_ADD(vfio_dma_map_limit_test, _iommu_mode) { \
221 .iommu_mode = #_iommu_mode, \
222 }
223
224 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES();
225
226 #undef FIXTURE_VARIANT_ADD_IOMMU_MODE
227
FIXTURE_SETUP(vfio_dma_map_limit_test)228 FIXTURE_SETUP(vfio_dma_map_limit_test)
229 {
230 struct dma_region *region = &self->region;
231 struct iommu_iova_range *ranges;
232 u64 region_size = getpagesize();
233 iova_t last_iova;
234 u32 nranges;
235
236 /*
237 * Over-allocate mmap by double the size to provide enough backing vaddr
238 * for overflow tests
239 */
240 self->mmap_size = 2 * region_size;
241
242 self->iommu = iommu_init(variant->iommu_mode);
243 self->device = vfio_pci_device_init(device_bdf, self->iommu);
244 region->vaddr = mmap(NULL, self->mmap_size, PROT_READ | PROT_WRITE,
245 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
246 ASSERT_NE(region->vaddr, MAP_FAILED);
247
248 ranges = iommu_iova_ranges(self->iommu, &nranges);
249 VFIO_ASSERT_NOT_NULL(ranges);
250 last_iova = ranges[nranges - 1].last;
251 free(ranges);
252
253 /* One page prior to the last iova */
254 region->iova = last_iova & ~(region_size - 1);
255 region->size = region_size;
256 }
257
FIXTURE_TEARDOWN(vfio_dma_map_limit_test)258 FIXTURE_TEARDOWN(vfio_dma_map_limit_test)
259 {
260 vfio_pci_device_cleanup(self->device);
261 iommu_cleanup(self->iommu);
262 ASSERT_EQ(munmap(self->region.vaddr, self->mmap_size), 0);
263 }
264
TEST_F(vfio_dma_map_limit_test,unmap_range)265 TEST_F(vfio_dma_map_limit_test, unmap_range)
266 {
267 struct dma_region *region = &self->region;
268 u64 unmapped;
269 int rc;
270
271 iommu_map(self->iommu, region);
272 ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr));
273
274 rc = __iommu_unmap(self->iommu, region, &unmapped);
275 ASSERT_EQ(rc, 0);
276 ASSERT_EQ(unmapped, region->size);
277 }
278
TEST_F(vfio_dma_map_limit_test,unmap_all)279 TEST_F(vfio_dma_map_limit_test, unmap_all)
280 {
281 struct dma_region *region = &self->region;
282 u64 unmapped;
283 int rc;
284
285 iommu_map(self->iommu, region);
286 ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr));
287
288 rc = __iommu_unmap_all(self->iommu, &unmapped);
289 ASSERT_EQ(rc, 0);
290 ASSERT_EQ(unmapped, region->size);
291 }
292
TEST_F(vfio_dma_map_limit_test,overflow)293 TEST_F(vfio_dma_map_limit_test, overflow)
294 {
295 struct dma_region *region = &self->region;
296 int rc;
297
298 region->iova = ~(iova_t)0 & ~(region->size - 1);
299 region->size = self->mmap_size;
300
301 rc = __iommu_map(self->iommu, region);
302 ASSERT_EQ(rc, -EOVERFLOW);
303
304 rc = __iommu_unmap(self->iommu, region, NULL);
305 ASSERT_EQ(rc, -EOVERFLOW);
306 }
307
main(int argc,char * argv[])308 int main(int argc, char *argv[])
309 {
310 device_bdf = vfio_selftests_get_bdf(&argc, argv);
311 return test_harness_run(argc, argv);
312 }
313