1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <dirent.h> 3 #include <fcntl.h> 4 #include <libgen.h> 5 #include <stdint.h> 6 #include <stdlib.h> 7 #include <string.h> 8 #include <unistd.h> 9 10 #include <sys/eventfd.h> 11 #include <sys/ioctl.h> 12 #include <sys/mman.h> 13 14 #include <uapi/linux/types.h> 15 #include <linux/limits.h> 16 #include <linux/mman.h> 17 #include <linux/types.h> 18 #include <linux/vfio.h> 19 #include <linux/iommufd.h> 20 21 #include "../../../kselftest.h" 22 #include <libvfio.h> 23 24 const char *default_iommu_mode = "iommufd"; 25 26 /* Reminder: Keep in sync with FIXTURE_VARIANT_ADD_ALL_IOMMU_MODES(). */ 27 static const struct iommu_mode iommu_modes[] = { 28 { 29 .name = "vfio_type1_iommu", 30 .container_path = "/dev/vfio/vfio", 31 .iommu_type = VFIO_TYPE1_IOMMU, 32 }, 33 { 34 .name = "vfio_type1v2_iommu", 35 .container_path = "/dev/vfio/vfio", 36 .iommu_type = VFIO_TYPE1v2_IOMMU, 37 }, 38 { 39 .name = "iommufd_compat_type1", 40 .container_path = "/dev/iommu", 41 .iommu_type = VFIO_TYPE1_IOMMU, 42 }, 43 { 44 .name = "iommufd_compat_type1v2", 45 .container_path = "/dev/iommu", 46 .iommu_type = VFIO_TYPE1v2_IOMMU, 47 }, 48 { 49 .name = "iommufd", 50 }, 51 }; 52 53 static const struct iommu_mode *lookup_iommu_mode(const char *iommu_mode) 54 { 55 int i; 56 57 if (!iommu_mode) 58 iommu_mode = default_iommu_mode; 59 60 for (i = 0; i < ARRAY_SIZE(iommu_modes); i++) { 61 if (strcmp(iommu_mode, iommu_modes[i].name)) 62 continue; 63 64 return &iommu_modes[i]; 65 } 66 67 VFIO_FAIL("Unrecognized IOMMU mode: %s\n", iommu_mode); 68 } 69 70 iova_t __iommu_hva2iova(struct iommu *iommu, void *vaddr) 71 { 72 struct dma_region *region; 73 74 list_for_each_entry(region, &iommu->dma_regions, link) { 75 if (vaddr < region->vaddr) 76 continue; 77 78 if (vaddr >= region->vaddr + region->size) 79 continue; 80 81 return region->iova + (vaddr - region->vaddr); 82 } 83 84 return INVALID_IOVA; 85 } 86 87 iova_t iommu_hva2iova(struct iommu *iommu, void *vaddr) 88 { 89 iova_t iova; 90 91 iova = __iommu_hva2iova(iommu, vaddr); 92 VFIO_ASSERT_NE(iova, INVALID_IOVA, "%p is not mapped into IOMMU\n", vaddr); 93 94 return iova; 95 } 96 97 static int vfio_iommu_map(struct iommu *iommu, struct dma_region *region) 98 { 99 struct vfio_iommu_type1_dma_map args = { 100 .argsz = sizeof(args), 101 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE, 102 .vaddr = (u64)region->vaddr, 103 .iova = region->iova, 104 .size = region->size, 105 }; 106 107 if (ioctl(iommu->container_fd, VFIO_IOMMU_MAP_DMA, &args)) 108 return -errno; 109 110 return 0; 111 } 112 113 static int iommufd_map(struct iommu *iommu, struct dma_region *region) 114 { 115 struct iommu_ioas_map args = { 116 .size = sizeof(args), 117 .flags = IOMMU_IOAS_MAP_READABLE | 118 IOMMU_IOAS_MAP_WRITEABLE | 119 IOMMU_IOAS_MAP_FIXED_IOVA, 120 .user_va = (u64)region->vaddr, 121 .iova = region->iova, 122 .length = region->size, 123 .ioas_id = iommu->ioas_id, 124 }; 125 126 if (ioctl(iommu->iommufd, IOMMU_IOAS_MAP, &args)) 127 return -errno; 128 129 return 0; 130 } 131 132 int __iommu_map(struct iommu *iommu, struct dma_region *region) 133 { 134 int ret; 135 136 if (iommu->iommufd) 137 ret = iommufd_map(iommu, region); 138 else 139 ret = vfio_iommu_map(iommu, region); 140 141 if (ret) 142 return ret; 143 144 list_add(®ion->link, &iommu->dma_regions); 145 146 return 0; 147 } 148 149 static int __vfio_iommu_unmap(int fd, u64 iova, u64 size, u32 flags, u64 *unmapped) 150 { 151 struct vfio_iommu_type1_dma_unmap args = { 152 .argsz = sizeof(args), 153 .iova = iova, 154 .size = size, 155 .flags = flags, 156 }; 157 158 if (ioctl(fd, VFIO_IOMMU_UNMAP_DMA, &args)) 159 return -errno; 160 161 if (unmapped) 162 *unmapped = args.size; 163 164 return 0; 165 } 166 167 static int vfio_iommu_unmap(struct iommu *iommu, struct dma_region *region, 168 u64 *unmapped) 169 { 170 return __vfio_iommu_unmap(iommu->container_fd, region->iova, 171 region->size, 0, unmapped); 172 } 173 174 static int __iommufd_unmap(int fd, u64 iova, u64 length, u32 ioas_id, u64 *unmapped) 175 { 176 struct iommu_ioas_unmap args = { 177 .size = sizeof(args), 178 .iova = iova, 179 .length = length, 180 .ioas_id = ioas_id, 181 }; 182 183 if (ioctl(fd, IOMMU_IOAS_UNMAP, &args)) 184 return -errno; 185 186 if (unmapped) 187 *unmapped = args.length; 188 189 return 0; 190 } 191 192 static int iommufd_unmap(struct iommu *iommu, struct dma_region *region, 193 u64 *unmapped) 194 { 195 return __iommufd_unmap(iommu->iommufd, region->iova, region->size, 196 iommu->ioas_id, unmapped); 197 } 198 199 int __iommu_unmap(struct iommu *iommu, struct dma_region *region, u64 *unmapped) 200 { 201 int ret; 202 203 if (iommu->iommufd) 204 ret = iommufd_unmap(iommu, region, unmapped); 205 else 206 ret = vfio_iommu_unmap(iommu, region, unmapped); 207 208 if (ret) 209 return ret; 210 211 list_del_init(®ion->link); 212 213 return 0; 214 } 215 216 int __iommu_unmap_all(struct iommu *iommu, u64 *unmapped) 217 { 218 int ret; 219 struct dma_region *curr, *next; 220 221 if (iommu->iommufd) 222 ret = __iommufd_unmap(iommu->iommufd, 0, UINT64_MAX, 223 iommu->ioas_id, unmapped); 224 else 225 ret = __vfio_iommu_unmap(iommu->container_fd, 0, 0, 226 VFIO_DMA_UNMAP_FLAG_ALL, unmapped); 227 228 if (ret) 229 return ret; 230 231 list_for_each_entry_safe(curr, next, &iommu->dma_regions, link) 232 list_del_init(&curr->link); 233 234 return 0; 235 } 236 237 static struct vfio_info_cap_header *next_cap_hdr(void *buf, u32 bufsz, 238 u32 *cap_offset) 239 { 240 struct vfio_info_cap_header *hdr; 241 242 if (!*cap_offset) 243 return NULL; 244 245 VFIO_ASSERT_LT(*cap_offset, bufsz); 246 VFIO_ASSERT_GE(bufsz - *cap_offset, sizeof(*hdr)); 247 248 hdr = (struct vfio_info_cap_header *)((u8 *)buf + *cap_offset); 249 *cap_offset = hdr->next; 250 251 return hdr; 252 } 253 254 static struct vfio_info_cap_header *vfio_iommu_info_cap_hdr(struct vfio_iommu_type1_info *info, 255 u16 cap_id) 256 { 257 struct vfio_info_cap_header *hdr; 258 u32 cap_offset = info->cap_offset; 259 u32 max_depth; 260 u32 depth = 0; 261 262 if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) 263 return NULL; 264 265 if (cap_offset) 266 VFIO_ASSERT_GE(cap_offset, sizeof(*info)); 267 268 max_depth = (info->argsz - sizeof(*info)) / sizeof(*hdr); 269 270 while ((hdr = next_cap_hdr(info, info->argsz, &cap_offset))) { 271 depth++; 272 VFIO_ASSERT_LE(depth, max_depth, "Capability chain contains a cycle\n"); 273 274 if (hdr->id == cap_id) 275 return hdr; 276 } 277 278 return NULL; 279 } 280 281 /* Return buffer including capability chain, if present. Free with free() */ 282 static struct vfio_iommu_type1_info *vfio_iommu_get_info(int container_fd) 283 { 284 struct vfio_iommu_type1_info *info; 285 286 info = malloc(sizeof(*info)); 287 VFIO_ASSERT_NOT_NULL(info); 288 289 *info = (struct vfio_iommu_type1_info) { 290 .argsz = sizeof(*info), 291 }; 292 293 ioctl_assert(container_fd, VFIO_IOMMU_GET_INFO, info); 294 VFIO_ASSERT_GE(info->argsz, sizeof(*info)); 295 296 info = realloc(info, info->argsz); 297 VFIO_ASSERT_NOT_NULL(info); 298 299 ioctl_assert(container_fd, VFIO_IOMMU_GET_INFO, info); 300 VFIO_ASSERT_GE(info->argsz, sizeof(*info)); 301 302 return info; 303 } 304 305 /* 306 * Return iova ranges for the device's container. Normalize vfio_iommu_type1 to 307 * report iommufd's iommu_iova_range. Free with free(). 308 */ 309 static struct iommu_iova_range *vfio_iommu_iova_ranges(struct iommu *iommu, 310 u32 *nranges) 311 { 312 struct vfio_iommu_type1_info_cap_iova_range *cap_range; 313 struct vfio_iommu_type1_info *info; 314 struct vfio_info_cap_header *hdr; 315 struct iommu_iova_range *ranges = NULL; 316 317 info = vfio_iommu_get_info(iommu->container_fd); 318 hdr = vfio_iommu_info_cap_hdr(info, VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE); 319 VFIO_ASSERT_NOT_NULL(hdr); 320 321 cap_range = container_of(hdr, struct vfio_iommu_type1_info_cap_iova_range, header); 322 VFIO_ASSERT_GT(cap_range->nr_iovas, 0); 323 324 ranges = calloc(cap_range->nr_iovas, sizeof(*ranges)); 325 VFIO_ASSERT_NOT_NULL(ranges); 326 327 for (u32 i = 0; i < cap_range->nr_iovas; i++) { 328 ranges[i] = (struct iommu_iova_range){ 329 .start = cap_range->iova_ranges[i].start, 330 .last = cap_range->iova_ranges[i].end, 331 }; 332 } 333 334 *nranges = cap_range->nr_iovas; 335 336 free(info); 337 return ranges; 338 } 339 340 /* Return iova ranges of the device's IOAS. Free with free() */ 341 static struct iommu_iova_range *iommufd_iova_ranges(struct iommu *iommu, 342 u32 *nranges) 343 { 344 struct iommu_iova_range *ranges; 345 int ret; 346 347 struct iommu_ioas_iova_ranges query = { 348 .size = sizeof(query), 349 .ioas_id = iommu->ioas_id, 350 }; 351 352 ret = ioctl(iommu->iommufd, IOMMU_IOAS_IOVA_RANGES, &query); 353 VFIO_ASSERT_EQ(ret, -1); 354 VFIO_ASSERT_EQ(errno, EMSGSIZE); 355 VFIO_ASSERT_GT(query.num_iovas, 0); 356 357 ranges = calloc(query.num_iovas, sizeof(*ranges)); 358 VFIO_ASSERT_NOT_NULL(ranges); 359 360 query.allowed_iovas = (uintptr_t)ranges; 361 362 ioctl_assert(iommu->iommufd, IOMMU_IOAS_IOVA_RANGES, &query); 363 *nranges = query.num_iovas; 364 365 return ranges; 366 } 367 368 static int iova_range_comp(const void *a, const void *b) 369 { 370 const struct iommu_iova_range *ra = a, *rb = b; 371 372 if (ra->start < rb->start) 373 return -1; 374 375 if (ra->start > rb->start) 376 return 1; 377 378 return 0; 379 } 380 381 /* Return sorted IOVA ranges of the device. Free with free(). */ 382 struct iommu_iova_range *iommu_iova_ranges(struct iommu *iommu, u32 *nranges) 383 { 384 struct iommu_iova_range *ranges; 385 386 if (iommu->iommufd) 387 ranges = iommufd_iova_ranges(iommu, nranges); 388 else 389 ranges = vfio_iommu_iova_ranges(iommu, nranges); 390 391 if (!ranges) 392 return NULL; 393 394 VFIO_ASSERT_GT(*nranges, 0); 395 396 /* Sort and check that ranges are sane and non-overlapping */ 397 qsort(ranges, *nranges, sizeof(*ranges), iova_range_comp); 398 VFIO_ASSERT_LT(ranges[0].start, ranges[0].last); 399 400 for (u32 i = 1; i < *nranges; i++) { 401 VFIO_ASSERT_LT(ranges[i].start, ranges[i].last); 402 VFIO_ASSERT_LT(ranges[i - 1].last, ranges[i].start); 403 } 404 405 return ranges; 406 } 407 408 static u32 iommufd_ioas_alloc(int iommufd) 409 { 410 struct iommu_ioas_alloc args = { 411 .size = sizeof(args), 412 }; 413 414 ioctl_assert(iommufd, IOMMU_IOAS_ALLOC, &args); 415 return args.out_ioas_id; 416 } 417 418 struct iommu *iommu_init(const char *iommu_mode) 419 { 420 const char *container_path; 421 struct iommu *iommu; 422 int version; 423 424 iommu = calloc(1, sizeof(*iommu)); 425 VFIO_ASSERT_NOT_NULL(iommu); 426 427 INIT_LIST_HEAD(&iommu->dma_regions); 428 429 iommu->mode = lookup_iommu_mode(iommu_mode); 430 431 container_path = iommu->mode->container_path; 432 if (container_path) { 433 iommu->container_fd = open(container_path, O_RDWR); 434 VFIO_ASSERT_GE(iommu->container_fd, 0, "open(%s) failed\n", container_path); 435 436 version = ioctl(iommu->container_fd, VFIO_GET_API_VERSION); 437 VFIO_ASSERT_EQ(version, VFIO_API_VERSION, "Unsupported version: %d\n", version); 438 } else { 439 /* 440 * Require device->iommufd to be >0 so that a simple non-0 check can be 441 * used to check if iommufd is enabled. In practice open() will never 442 * return 0 unless stdin is closed. 443 */ 444 iommu->iommufd = open("/dev/iommu", O_RDWR); 445 VFIO_ASSERT_GT(iommu->iommufd, 0); 446 447 iommu->ioas_id = iommufd_ioas_alloc(iommu->iommufd); 448 } 449 450 return iommu; 451 } 452 453 void iommu_cleanup(struct iommu *iommu) 454 { 455 if (iommu->iommufd) 456 VFIO_ASSERT_EQ(close(iommu->iommufd), 0); 457 else 458 VFIO_ASSERT_EQ(close(iommu->container_fd), 0); 459 460 free(iommu); 461 } 462