1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <stdio.h> 3 #include <sys/mman.h> 4 #include <unistd.h> 5 6 #include <linux/limits.h> 7 #include <linux/mman.h> 8 #include <linux/sizes.h> 9 #include <linux/vfio.h> 10 11 #include <vfio_util.h> 12 13 #include "../kselftest_harness.h" 14 15 static const char *device_bdf; 16 17 struct iommu_mapping { 18 u64 pgd; 19 u64 p4d; 20 u64 pud; 21 u64 pmd; 22 u64 pte; 23 }; 24 25 static void parse_next_value(char **line, u64 *value) 26 { 27 char *token; 28 29 token = strtok_r(*line, " \t|\n", line); 30 if (!token) 31 return; 32 33 /* Caller verifies `value`. No need to check return value. */ 34 sscanf(token, "0x%lx", value); 35 } 36 37 static int intel_iommu_mapping_get(const char *bdf, u64 iova, 38 struct iommu_mapping *mapping) 39 { 40 char iommu_mapping_path[PATH_MAX], line[PATH_MAX]; 41 u64 line_iova = -1; 42 int ret = -ENOENT; 43 FILE *file; 44 char *rest; 45 46 snprintf(iommu_mapping_path, sizeof(iommu_mapping_path), 47 "/sys/kernel/debug/iommu/intel/%s/domain_translation_struct", 48 bdf); 49 50 printf("Searching for IOVA 0x%lx in %s\n", iova, iommu_mapping_path); 51 52 file = fopen(iommu_mapping_path, "r"); 53 VFIO_ASSERT_NOT_NULL(file, "fopen(%s) failed", iommu_mapping_path); 54 55 while (fgets(line, sizeof(line), file)) { 56 rest = line; 57 58 parse_next_value(&rest, &line_iova); 59 if (line_iova != (iova / getpagesize())) 60 continue; 61 62 /* 63 * Ensure each struct field is initialized in case of empty 64 * page table values. 65 */ 66 memset(mapping, 0, sizeof(*mapping)); 67 parse_next_value(&rest, &mapping->pgd); 68 parse_next_value(&rest, &mapping->p4d); 69 parse_next_value(&rest, &mapping->pud); 70 parse_next_value(&rest, &mapping->pmd); 71 parse_next_value(&rest, &mapping->pte); 72 73 ret = 0; 74 break; 75 } 76 77 fclose(file); 78 79 if (ret) 80 printf("IOVA not found\n"); 81 82 return ret; 83 } 84 85 static int iommu_mapping_get(const char *bdf, u64 iova, 86 struct iommu_mapping *mapping) 87 { 88 if (!access("/sys/kernel/debug/iommu/intel", F_OK)) 89 return intel_iommu_mapping_get(bdf, iova, mapping); 90 91 return -EOPNOTSUPP; 92 } 93 94 FIXTURE(vfio_dma_mapping_test) { 95 struct vfio_pci_device *device; 96 }; 97 98 FIXTURE_VARIANT(vfio_dma_mapping_test) { 99 const char *iommu_mode; 100 u64 size; 101 int mmap_flags; 102 }; 103 104 #define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode, _name, _size, _mmap_flags) \ 105 FIXTURE_VARIANT_ADD(vfio_dma_mapping_test, _iommu_mode ## _ ## _name) { \ 106 .iommu_mode = #_iommu_mode, \ 107 .size = (_size), \ 108 .mmap_flags = MAP_ANONYMOUS | MAP_PRIVATE | (_mmap_flags), \ 109 } 110 111 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous, 0, 0); 112 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_2mb, SZ_2M, MAP_HUGETLB | MAP_HUGE_2MB); 113 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(anonymous_hugetlb_1gb, SZ_1G, MAP_HUGETLB | MAP_HUGE_1GB); 114 115 FIXTURE_SETUP(vfio_dma_mapping_test) 116 { 117 self->device = vfio_pci_device_init(device_bdf, variant->iommu_mode); 118 } 119 120 FIXTURE_TEARDOWN(vfio_dma_mapping_test) 121 { 122 vfio_pci_device_cleanup(self->device); 123 } 124 125 TEST_F(vfio_dma_mapping_test, dma_map_unmap) 126 { 127 const u64 size = variant->size ?: getpagesize(); 128 const int flags = variant->mmap_flags; 129 struct vfio_dma_region region; 130 struct iommu_mapping mapping; 131 u64 mapping_size = size; 132 int rc; 133 134 region.vaddr = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, -1, 0); 135 136 /* Skip the test if there aren't enough HugeTLB pages available. */ 137 if (flags & MAP_HUGETLB && region.vaddr == MAP_FAILED) 138 SKIP(return, "mmap() failed: %s (%d)\n", strerror(errno), errno); 139 else 140 ASSERT_NE(region.vaddr, MAP_FAILED); 141 142 region.iova = (u64)region.vaddr; 143 region.size = size; 144 145 vfio_pci_dma_map(self->device, ®ion); 146 printf("Mapped HVA %p (size 0x%lx) at IOVA 0x%lx\n", region.vaddr, size, region.iova); 147 148 ASSERT_EQ(region.iova, to_iova(self->device, region.vaddr)); 149 150 rc = iommu_mapping_get(device_bdf, region.iova, &mapping); 151 if (rc == -EOPNOTSUPP) 152 goto unmap; 153 154 /* 155 * IOMMUFD compatibility-mode does not support huge mappings when 156 * using VFIO_TYPE1_IOMMU. 157 */ 158 if (!strcmp(variant->iommu_mode, "iommufd_compat_type1")) 159 mapping_size = SZ_4K; 160 161 ASSERT_EQ(0, rc); 162 printf("Found IOMMU mappings for IOVA 0x%lx:\n", region.iova); 163 printf("PGD: 0x%016lx\n", mapping.pgd); 164 printf("P4D: 0x%016lx\n", mapping.p4d); 165 printf("PUD: 0x%016lx\n", mapping.pud); 166 printf("PMD: 0x%016lx\n", mapping.pmd); 167 printf("PTE: 0x%016lx\n", mapping.pte); 168 169 switch (mapping_size) { 170 case SZ_4K: 171 ASSERT_NE(0, mapping.pte); 172 break; 173 case SZ_2M: 174 ASSERT_EQ(0, mapping.pte); 175 ASSERT_NE(0, mapping.pmd); 176 break; 177 case SZ_1G: 178 ASSERT_EQ(0, mapping.pte); 179 ASSERT_EQ(0, mapping.pmd); 180 ASSERT_NE(0, mapping.pud); 181 break; 182 default: 183 VFIO_FAIL("Unrecognized size: 0x%lx\n", mapping_size); 184 } 185 186 unmap: 187 vfio_pci_dma_unmap(self->device, ®ion); 188 printf("Unmapped IOVA 0x%lx\n", region.iova); 189 ASSERT_EQ(INVALID_IOVA, __to_iova(self->device, region.vaddr)); 190 ASSERT_NE(0, iommu_mapping_get(device_bdf, region.iova, &mapping)); 191 192 ASSERT_TRUE(!munmap(region.vaddr, size)); 193 } 194 195 int main(int argc, char *argv[]) 196 { 197 device_bdf = vfio_selftests_get_bdf(&argc, argv); 198 return test_harness_run(argc, argv); 199 } 200