1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <stdio.h> 3 #include <sys/mman.h> 4 #include <unistd.h> 5 6 #include <linux/iommufd.h> 7 #include <linux/limits.h> 8 #include <linux/mman.h> 9 #include <linux/sizes.h> 10 #include <linux/vfio.h> 11 12 #include <libvfio.h> 13 14 #include "kselftest_harness.h" 15 16 static const char *device_bdf; 17 18 struct iommu_mapping { 19 u64 pgd; 20 u64 p4d; 21 u64 pud; 22 u64 pmd; 23 u64 pte; 24 }; 25 26 static void parse_next_value(char **line, u64 *value) 27 { 28 char *token; 29 30 token = strtok_r(*line, " \t|\n", line); 31 if (!token) 32 return; 33 34 /* Caller verifies `value`. No need to check return value. */ 35 sscanf(token, "0x%lx", value); 36 } 37 38 static int intel_iommu_mapping_get(const char *bdf, u64 iova, 39 struct iommu_mapping *mapping) 40 { 41 char iommu_mapping_path[PATH_MAX], line[PATH_MAX]; 42 u64 line_iova = -1; 43 int ret = -ENOENT; 44 FILE *file; 45 char *rest; 46 47 snprintf(iommu_mapping_path, sizeof(iommu_mapping_path), 48 "/sys/kernel/debug/iommu/intel/%s/domain_translation_struct", 49 bdf); 50 51 printf("Searching for IOVA 0x%lx in %s\n", iova, iommu_mapping_path); 52 53 file = fopen(iommu_mapping_path, "r"); 54 VFIO_ASSERT_NOT_NULL(file, "fopen(%s) failed", iommu_mapping_path); 55 56 while (fgets(line, sizeof(line), file)) { 57 rest = line; 58 59 parse_next_value(&rest, &line_iova); 60 if (line_iova != (iova / getpagesize())) 61 continue; 62 63 /* 64 * Ensure each struct field is initialized in case of empty 65 * page table values. 66 */ 67 memset(mapping, 0, sizeof(*mapping)); 68 parse_next_value(&rest, &mapping->pgd); 69 parse_next_value(&rest, &mapping->p4d); 70 parse_next_value(&rest, &mapping->pud); 71 parse_next_value(&rest, &mapping->pmd); 72 parse_next_value(&rest, &mapping->pte); 73 74 ret = 0; 75 break; 76 } 77 78 fclose(file); 79 80 if (ret) 81 printf("IOVA not found\n"); 82 83 return ret; 84 } 85 86 static int iommu_mapping_get(const char *bdf, u64 iova, 87 struct iommu_mapping *mapping) 88 { 89 if (!access("/sys/kernel/debug/iommu/intel", F_OK)) 90 return intel_iommu_mapping_get(bdf, iova, mapping); 91 92 return -EOPNOTSUPP; 93 } 94 95 FIXTURE(vfio_dma_mapping_test) { 96 struct iommu *iommu; 97 struct vfio_pci_device *device; 98 struct iova_allocator *iova_allocator; 99 }; 100 101 FIXTURE_VARIANT(vfio_dma_mapping_test) { 102 const char *iommu_mode; 103 u64 size; 104 int mmap_flags; 105 }; 106 107 #define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode, _name, _size, _mmap_flags) \ 108 FIXTURE_VARIANT_ADD(vfio_dma_mapping_test, _iommu_mode ## _ ## _name) { \ 109 .iommu_mode = #_iommu_mode, \ 110 .size = (_size), \ 111 .mmap_flags = MAP_ANONYMOUS | MAP_PRIVATE | (_mmap_flags), \ 112 } 113 114 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous, 0, 0); 115 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_2mb, SZ_2M, MAP_HUGETLB | MAP_HUGE_2MB); 116 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB | MAP_HUGE_1GB); 117 118 #undef FIXTURE_VARIANT_ADD_IOMMU_MODE 119 120 FIXTURE_SETUP(vfio_dma_mapping_test) 121 { 122 self->iommu = iommu_init(variant->iommu_mode); 123 self->device = vfio_pci_device_init(device_bdf, self->iommu); 124 self->iova_allocator = iova_allocator_init(self->iommu); 125 } 126 127 FIXTURE_TEARDOWN(vfio_dma_mapping_test) 128 { 129 iova_allocator_cleanup(self->iova_allocator); 130 vfio_pci_device_cleanup(self->device); 131 iommu_cleanup(self->iommu); 132 } 133 134 TEST_F(vfio_dma_mapping_test, dma_map_unmap) 135 { 136 const u64 size = variant->size ?: getpagesize(); 137 const int flags = variant->mmap_flags; 138 struct dma_region region; 139 struct iommu_mapping mapping; 140 u64 mapping_size = size; 141 u64 unmapped; 142 int rc; 143 144 region.vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0); 145 146 /* Skip the test if there aren't enough HugeTLB pages available. */ 147 if (flags & MAP_HUGETLB && region.vaddr == MAP_FAILED) 148 SKIP(return, "mmap() failed: %s (%d)\n", strerror(errno), errno); 149 else 150 ASSERT_NE(region.vaddr, MAP_FAILED); 151 152 region.iova = iova_allocator_alloc(self->iova_allocator, size); 153 region.size = size; 154 155 iommu_map(self->iommu, ®ion); 156 printf("Mapped HVA %p (size 0x%lx) at IOVA 0x%lx\n", region.vaddr, size, region.iova); 157 158 ASSERT_EQ(region.iova, to_iova(self->device, region.vaddr)); 159 160 rc = iommu_mapping_get(device_bdf, region.iova, &mapping); 161 if (rc == -EOPNOTSUPP) 162 goto unmap; 163 164 if (self->iommu->mode->iommu_type == VFIO_TYPE1_IOMMU) 165 goto unmap; 166 167 ASSERT_EQ(0, rc); 168 printf("Found IOMMU mappings for IOVA 0x%lx:\n", region.iova); 169 printf("PGD: 0x%016lx\n", mapping.pgd); 170 printf("P4D: 0x%016lx\n", mapping.p4d); 171 printf("PUD: 0x%016lx\n", mapping.pud); 172 printf("PMD: 0x%016lx\n", mapping.pmd); 173 printf("PTE: 0x%016lx\n", mapping.pte); 174 175 switch (mapping_size) { 176 case SZ_4K: 177 ASSERT_NE(0, mapping.pte); 178 break; 179 case SZ_2M: 180 ASSERT_EQ(0, mapping.pte); 181 ASSERT_NE(0, mapping.pmd); 182 break; 183 case SZ_1G: 184 ASSERT_EQ(0, mapping.pte); 185 ASSERT_EQ(0, mapping.pmd); 186 ASSERT_NE(0, mapping.pud); 187 break; 188 default: 189 VFIO_FAIL("Unrecognized size: 0x%lx\n", mapping_size); 190 } 191 192 unmap: 193 rc = __iommu_unmap(self->iommu, ®ion, &unmapped); 194 ASSERT_EQ(rc, 0); 195 ASSERT_EQ(unmapped, region.size); 196 printf("Unmapped IOVA 0x%lx\n", region.iova); 197 ASSERT_NE(0, __to_iova(self->device, region.vaddr, NULL)); 198 ASSERT_NE(0, iommu_mapping_get(device_bdf, region.iova, &mapping)); 199 200 ASSERT_TRUE(!munmap(region.vaddr, size)); 201 } 202 203 FIXTURE(vfio_dma_map_limit_test) { 204 struct iommu *iommu; 205 struct vfio_pci_device *device; 206 struct dma_region region; 207 size_t mmap_size; 208 }; 209 210 FIXTURE_VARIANT(vfio_dma_map_limit_test) { 211 const char *iommu_mode; 212 }; 213 214 #define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode) \ 215 FIXTURE_VARIANT_ADD(vfio_dma_map_limit_test, _iommu_mode) { \ 216 .iommu_mode = #_iommu_mode, \ 217 } 218 219 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(); 220 221 #undef FIXTURE_VARIANT_ADD_IOMMU_MODE 222 223 FIXTURE_SETUP(vfio_dma_map_limit_test) 224 { 225 struct dma_region *region = &self->region; 226 struct iommu_iova_range *ranges; 227 u64 region_size = getpagesize(); 228 iova_t last_iova; 229 u32 nranges; 230 231 /* 232 * Over-allocate mmap by double the size to provide enough backing vaddr 233 * for overflow tests 234 */ 235 self->mmap_size = 2 * region_size; 236 237 self->iommu = iommu_init(variant->iommu_mode); 238 self->device = vfio_pci_device_init(device_bdf, self->iommu); 239 region->vaddr = mmap(NULL, self->mmap_size, PROT_READ | PROT_WRITE, 240 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 241 ASSERT_NE(region->vaddr, MAP_FAILED); 242 243 ranges = iommu_iova_ranges(self->iommu, &nranges); 244 VFIO_ASSERT_NOT_NULL(ranges); 245 last_iova = ranges[nranges - 1].last; 246 free(ranges); 247 248 /* One page prior to the last iova */ 249 region->iova = last_iova & ~(region_size - 1); 250 region->size = region_size; 251 } 252 253 FIXTURE_TEARDOWN(vfio_dma_map_limit_test) 254 { 255 vfio_pci_device_cleanup(self->device); 256 iommu_cleanup(self->iommu); 257 ASSERT_EQ(munmap(self->region.vaddr, self->mmap_size), 0); 258 } 259 260 TEST_F(vfio_dma_map_limit_test, unmap_range) 261 { 262 struct dma_region *region = &self->region; 263 u64 unmapped; 264 int rc; 265 266 iommu_map(self->iommu, region); 267 ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr)); 268 269 rc = __iommu_unmap(self->iommu, region, &unmapped); 270 ASSERT_EQ(rc, 0); 271 ASSERT_EQ(unmapped, region->size); 272 } 273 274 TEST_F(vfio_dma_map_limit_test, unmap_all) 275 { 276 struct dma_region *region = &self->region; 277 u64 unmapped; 278 int rc; 279 280 iommu_map(self->iommu, region); 281 ASSERT_EQ(region->iova, to_iova(self->device, region->vaddr)); 282 283 rc = __iommu_unmap_all(self->iommu, &unmapped); 284 ASSERT_EQ(rc, 0); 285 ASSERT_EQ(unmapped, region->size); 286 } 287 288 TEST_F(vfio_dma_map_limit_test, overflow) 289 { 290 struct dma_region *region = &self->region; 291 int rc; 292 293 region->iova = ~(iova_t)0 & ~(region->size - 1); 294 region->size = self->mmap_size; 295 296 rc = __iommu_map(self->iommu, region); 297 ASSERT_EQ(rc, -EOVERFLOW); 298 299 rc = __iommu_unmap(self->iommu, region, NULL); 300 ASSERT_EQ(rc, -EOVERFLOW); 301 } 302 303 int main(int argc, char *argv[]) 304 { 305 device_bdf = vfio_selftests_get_bdf(&argc, argv); 306 return test_harness_run(argc, argv); 307 } 308