1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <stdio.h> 3 #include <sys/mman.h> 4 #include <unistd.h> 5 6 #include <uapi/linux/types.h> 7 #include <linux/pci_regs.h> 8 #include <linux/sizes.h> 9 #include <linux/vfio.h> 10 11 #include <libvfio.h> 12 13 #include "../kselftest_harness.h" 14 15 static const char *device_bdf; 16 17 static struct vfio_pci_bar *largest_mapped_bar(struct vfio_pci_device *device) 18 { 19 u32 flags = VFIO_REGION_INFO_FLAG_READ | VFIO_REGION_INFO_FLAG_WRITE; 20 struct vfio_pci_bar *largest = NULL; 21 u64 bar_size = 0; 22 23 for (int i = 0; i < PCI_STD_NUM_BARS; i++) { 24 struct vfio_pci_bar *bar = &device->bars[i]; 25 26 if (!bar->vaddr) 27 continue; 28 29 /* 30 * iommu_map() maps with READ|WRITE, so require the same 31 * abilities for the underlying VFIO region. 32 */ 33 if ((bar->info.flags & flags) != flags) 34 continue; 35 36 if (bar->info.size > bar_size) { 37 bar_size = bar->info.size; 38 largest = bar; 39 } 40 } 41 42 return largest; 43 } 44 45 FIXTURE(vfio_dma_mapping_mmio_test) { 46 struct iommu *iommu; 47 struct vfio_pci_device *device; 48 struct iova_allocator *iova_allocator; 49 struct vfio_pci_bar *bar; 50 }; 51 52 FIXTURE_VARIANT(vfio_dma_mapping_mmio_test) { 53 const char *iommu_mode; 54 }; 55 56 #define FIXTURE_VARIANT_ADD_IOMMU_MODE(_iommu_mode) \ 57 FIXTURE_VARIANT_ADD(vfio_dma_mapping_mmio_test, _iommu_mode) { \ 58 .iommu_mode = #_iommu_mode, \ 59 } 60 61 FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(); 62 63 #undef FIXTURE_VARIANT_ADD_IOMMU_MODE 64 65 FIXTURE_SETUP(vfio_dma_mapping_mmio_test) 66 { 67 self->iommu = iommu_init(variant->iommu_mode); 68 self->device = vfio_pci_device_init(device_bdf, self->iommu); 69 self->iova_allocator = iova_allocator_init(self->iommu); 70 self->bar = largest_mapped_bar(self->device); 71 72 if (!self->bar) 73 SKIP(return, "No mappable BAR found on device %s", device_bdf); 74 } 75 76 FIXTURE_TEARDOWN(vfio_dma_mapping_mmio_test) 77 { 78 iova_allocator_cleanup(self->iova_allocator); 79 vfio_pci_device_cleanup(self->device); 80 iommu_cleanup(self->iommu); 81 } 82 83 static void do_mmio_map_test(struct iommu *iommu, 84 struct iova_allocator *iova_allocator, 85 void *vaddr, size_t size) 86 { 87 struct dma_region region = { 88 .vaddr = vaddr, 89 .size = size, 90 .iova = iova_allocator_alloc(iova_allocator, size), 91 }; 92 93 /* 94 * NOTE: Check for iommufd compat success once it lands. Native iommufd 95 * will never support this. 96 */ 97 if (!strcmp(iommu->mode->name, MODE_VFIO_TYPE1V2_IOMMU) || 98 !strcmp(iommu->mode->name, MODE_VFIO_TYPE1_IOMMU)) { 99 iommu_map(iommu, ®ion); 100 iommu_unmap(iommu, ®ion); 101 } else { 102 VFIO_ASSERT_NE(__iommu_map(iommu, ®ion), 0); 103 VFIO_ASSERT_NE(__iommu_unmap(iommu, ®ion, NULL), 0); 104 } 105 } 106 107 TEST_F(vfio_dma_mapping_mmio_test, map_full_bar) 108 { 109 do_mmio_map_test(self->iommu, self->iova_allocator, 110 self->bar->vaddr, self->bar->info.size); 111 } 112 113 TEST_F(vfio_dma_mapping_mmio_test, map_partial_bar) 114 { 115 if (self->bar->info.size < 2 * getpagesize()) 116 SKIP(return, "BAR too small (size=0x%llx)", self->bar->info.size); 117 118 do_mmio_map_test(self->iommu, self->iova_allocator, 119 self->bar->vaddr, getpagesize()); 120 } 121 122 /* Test IOMMU mapping of BAR mmap with intentionally poor vaddr alignment. */ 123 TEST_F(vfio_dma_mapping_mmio_test, map_bar_misaligned) 124 { 125 /* Limit size to bound test time for large BARs */ 126 size_t size = min_t(size_t, self->bar->info.size, SZ_1G); 127 void *vaddr; 128 129 vaddr = mmap_reserve(size, SZ_1G, getpagesize()); 130 vaddr = mmap(vaddr, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, 131 self->device->fd, self->bar->info.offset); 132 VFIO_ASSERT_NE(vaddr, MAP_FAILED); 133 134 do_mmio_map_test(self->iommu, self->iova_allocator, vaddr, size); 135 136 VFIO_ASSERT_EQ(munmap(vaddr, size), 0); 137 } 138 139 int main(int argc, char *argv[]) 140 { 141 device_bdf = vfio_selftests_get_bdf(&argc, argv); 142 return test_harness_run(argc, argv); 143 } 144