1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. 3 * 4 * Kernel side components to support tools/testing/selftests/iommu 5 */ 6 #include <linux/slab.h> 7 #include <linux/iommu.h> 8 #include <linux/xarray.h> 9 #include <linux/file.h> 10 #include <linux/anon_inodes.h> 11 #include <linux/fault-inject.h> 12 #include <linux/platform_device.h> 13 #include <uapi/linux/iommufd.h> 14 15 #include "../iommu-priv.h" 16 #include "io_pagetable.h" 17 #include "iommufd_private.h" 18 #include "iommufd_test.h" 19 20 static DECLARE_FAULT_ATTR(fail_iommufd); 21 static struct dentry *dbgfs_root; 22 static struct platform_device *selftest_iommu_dev; 23 static const struct iommu_ops mock_ops; 24 static struct iommu_domain_ops domain_nested_ops; 25 26 size_t iommufd_test_memory_limit = 65536; 27 28 struct mock_bus_type { 29 struct bus_type bus; 30 struct notifier_block nb; 31 }; 32 33 static struct mock_bus_type iommufd_mock_bus_type = { 34 .bus = { 35 .name = "iommufd_mock", 36 }, 37 }; 38 39 static atomic_t mock_dev_num; 40 41 enum { 42 MOCK_DIRTY_TRACK = 1, 43 MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2, 44 MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE, 45 46 /* 47 * Like a real page table alignment requires the low bits of the address 48 * to be zero. xarray also requires the high bit to be zero, so we store 49 * the pfns shifted. The upper bits are used for metadata. 50 */ 51 MOCK_PFN_MASK = ULONG_MAX / MOCK_IO_PAGE_SIZE, 52 53 _MOCK_PFN_START = MOCK_PFN_MASK + 1, 54 MOCK_PFN_START_IOVA = _MOCK_PFN_START, 55 MOCK_PFN_LAST_IOVA = _MOCK_PFN_START, 56 MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1, 57 MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2, 58 }; 59 60 /* 61 * Syzkaller has trouble randomizing the correct iova to use since it is linked 62 * to the map ioctl's output, and it has no ide about that. So, simplify things. 63 * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset 64 * value. This has a much smaller randomization space and syzkaller can hit it. 65 */ 66 static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt, 67 u64 *iova) 68 { 69 struct syz_layout { 70 __u32 nth_area; 71 __u32 offset; 72 }; 73 struct syz_layout *syz = (void *)iova; 74 unsigned int nth = syz->nth_area; 75 struct iopt_area *area; 76 77 down_read(&iopt->iova_rwsem); 78 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; 79 area = iopt_area_iter_next(area, 0, ULONG_MAX)) { 80 if (nth == 0) { 81 up_read(&iopt->iova_rwsem); 82 return iopt_area_iova(area) + syz->offset; 83 } 84 nth--; 85 } 86 up_read(&iopt->iova_rwsem); 87 88 return 0; 89 } 90 91 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, 92 unsigned int ioas_id, u64 *iova, u32 *flags) 93 { 94 struct iommufd_ioas *ioas; 95 96 if (!(*flags & MOCK_FLAGS_ACCESS_SYZ)) 97 return; 98 *flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ; 99 100 ioas = iommufd_get_ioas(ucmd->ictx, ioas_id); 101 if (IS_ERR(ioas)) 102 return; 103 *iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova); 104 iommufd_put_object(ucmd->ictx, &ioas->obj); 105 } 106 107 struct mock_iommu_domain { 108 unsigned long flags; 109 struct iommu_domain domain; 110 struct xarray pfns; 111 }; 112 113 struct mock_iommu_domain_nested { 114 struct iommu_domain domain; 115 struct mock_iommu_domain *parent; 116 u32 iotlb[MOCK_NESTED_DOMAIN_IOTLB_NUM]; 117 }; 118 119 enum selftest_obj_type { 120 TYPE_IDEV, 121 }; 122 123 struct mock_dev { 124 struct device dev; 125 unsigned long flags; 126 }; 127 128 struct selftest_obj { 129 struct iommufd_object obj; 130 enum selftest_obj_type type; 131 132 union { 133 struct { 134 struct iommufd_device *idev; 135 struct iommufd_ctx *ictx; 136 struct mock_dev *mock_dev; 137 } idev; 138 }; 139 }; 140 141 static int mock_domain_nop_attach(struct iommu_domain *domain, 142 struct device *dev) 143 { 144 struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); 145 146 if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY)) 147 return -EINVAL; 148 149 return 0; 150 } 151 152 static const struct iommu_domain_ops mock_blocking_ops = { 153 .attach_dev = mock_domain_nop_attach, 154 }; 155 156 static struct iommu_domain mock_blocking_domain = { 157 .type = IOMMU_DOMAIN_BLOCKED, 158 .ops = &mock_blocking_ops, 159 }; 160 161 static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type) 162 { 163 struct iommu_test_hw_info *info; 164 165 info = kzalloc(sizeof(*info), GFP_KERNEL); 166 if (!info) 167 return ERR_PTR(-ENOMEM); 168 169 info->test_reg = IOMMU_HW_INFO_SELFTEST_REGVAL; 170 *length = sizeof(*info); 171 *type = IOMMU_HW_INFO_TYPE_SELFTEST; 172 173 return info; 174 } 175 176 static int mock_domain_set_dirty_tracking(struct iommu_domain *domain, 177 bool enable) 178 { 179 struct mock_iommu_domain *mock = 180 container_of(domain, struct mock_iommu_domain, domain); 181 unsigned long flags = mock->flags; 182 183 if (enable && !domain->dirty_ops) 184 return -EINVAL; 185 186 /* No change? */ 187 if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK))) 188 return 0; 189 190 flags = (enable ? flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK); 191 192 mock->flags = flags; 193 return 0; 194 } 195 196 static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock, 197 unsigned long iova, size_t page_size, 198 unsigned long flags) 199 { 200 unsigned long cur, end = iova + page_size - 1; 201 bool dirty = false; 202 void *ent, *old; 203 204 for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) { 205 ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE); 206 if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) 207 continue; 208 209 dirty = true; 210 /* Clear dirty */ 211 if (!(flags & IOMMU_DIRTY_NO_CLEAR)) { 212 unsigned long val; 213 214 val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA; 215 old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE, 216 xa_mk_value(val), GFP_KERNEL); 217 WARN_ON_ONCE(ent != old); 218 } 219 } 220 221 return dirty; 222 } 223 224 static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain, 225 unsigned long iova, size_t size, 226 unsigned long flags, 227 struct iommu_dirty_bitmap *dirty) 228 { 229 struct mock_iommu_domain *mock = 230 container_of(domain, struct mock_iommu_domain, domain); 231 unsigned long end = iova + size; 232 void *ent; 233 234 if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap) 235 return -EINVAL; 236 237 do { 238 unsigned long pgsize = MOCK_IO_PAGE_SIZE; 239 unsigned long head; 240 241 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); 242 if (!ent) { 243 iova += pgsize; 244 continue; 245 } 246 247 if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA) 248 pgsize = MOCK_HUGE_PAGE_SIZE; 249 head = iova & ~(pgsize - 1); 250 251 /* Clear dirty */ 252 if (mock_test_and_clear_dirty(mock, head, pgsize, flags)) 253 iommu_dirty_bitmap_record(dirty, head, pgsize); 254 iova = head + pgsize; 255 } while (iova < end); 256 257 return 0; 258 } 259 260 const struct iommu_dirty_ops dirty_ops = { 261 .set_dirty_tracking = mock_domain_set_dirty_tracking, 262 .read_and_clear_dirty = mock_domain_read_and_clear_dirty, 263 }; 264 265 static struct iommu_domain *mock_domain_alloc_paging(struct device *dev) 266 { 267 struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); 268 struct mock_iommu_domain *mock; 269 270 mock = kzalloc(sizeof(*mock), GFP_KERNEL); 271 if (!mock) 272 return NULL; 273 mock->domain.geometry.aperture_start = MOCK_APERTURE_START; 274 mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST; 275 mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE; 276 if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA) 277 mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE; 278 mock->domain.ops = mock_ops.default_domain_ops; 279 mock->domain.type = IOMMU_DOMAIN_UNMANAGED; 280 xa_init(&mock->pfns); 281 return &mock->domain; 282 } 283 284 static struct iommu_domain * 285 __mock_domain_alloc_nested(struct mock_iommu_domain *mock_parent, 286 const struct iommu_hwpt_selftest *user_cfg) 287 { 288 struct mock_iommu_domain_nested *mock_nested; 289 int i; 290 291 mock_nested = kzalloc(sizeof(*mock_nested), GFP_KERNEL); 292 if (!mock_nested) 293 return ERR_PTR(-ENOMEM); 294 mock_nested->parent = mock_parent; 295 mock_nested->domain.ops = &domain_nested_ops; 296 mock_nested->domain.type = IOMMU_DOMAIN_NESTED; 297 for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++) 298 mock_nested->iotlb[i] = user_cfg->iotlb; 299 return &mock_nested->domain; 300 } 301 302 static struct iommu_domain * 303 mock_domain_alloc_user(struct device *dev, u32 flags, 304 struct iommu_domain *parent, 305 const struct iommu_user_data *user_data) 306 { 307 struct mock_iommu_domain *mock_parent; 308 struct iommu_hwpt_selftest user_cfg; 309 int rc; 310 311 /* must be mock_domain */ 312 if (!parent) { 313 struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); 314 bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; 315 bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY; 316 struct iommu_domain *domain; 317 318 if (flags & (~(IOMMU_HWPT_ALLOC_NEST_PARENT | 319 IOMMU_HWPT_ALLOC_DIRTY_TRACKING))) 320 return ERR_PTR(-EOPNOTSUPP); 321 if (user_data || (has_dirty_flag && no_dirty_ops)) 322 return ERR_PTR(-EOPNOTSUPP); 323 domain = mock_domain_alloc_paging(dev); 324 if (!domain) 325 return ERR_PTR(-ENOMEM); 326 if (has_dirty_flag) 327 container_of(domain, struct mock_iommu_domain, domain) 328 ->domain.dirty_ops = &dirty_ops; 329 return domain; 330 } 331 332 /* must be mock_domain_nested */ 333 if (user_data->type != IOMMU_HWPT_DATA_SELFTEST || flags) 334 return ERR_PTR(-EOPNOTSUPP); 335 if (!parent || parent->ops != mock_ops.default_domain_ops) 336 return ERR_PTR(-EINVAL); 337 338 mock_parent = container_of(parent, struct mock_iommu_domain, domain); 339 if (!mock_parent) 340 return ERR_PTR(-EINVAL); 341 342 rc = iommu_copy_struct_from_user(&user_cfg, user_data, 343 IOMMU_HWPT_DATA_SELFTEST, iotlb); 344 if (rc) 345 return ERR_PTR(rc); 346 347 return __mock_domain_alloc_nested(mock_parent, &user_cfg); 348 } 349 350 static void mock_domain_free(struct iommu_domain *domain) 351 { 352 struct mock_iommu_domain *mock = 353 container_of(domain, struct mock_iommu_domain, domain); 354 355 WARN_ON(!xa_empty(&mock->pfns)); 356 kfree(mock); 357 } 358 359 static int mock_domain_map_pages(struct iommu_domain *domain, 360 unsigned long iova, phys_addr_t paddr, 361 size_t pgsize, size_t pgcount, int prot, 362 gfp_t gfp, size_t *mapped) 363 { 364 struct mock_iommu_domain *mock = 365 container_of(domain, struct mock_iommu_domain, domain); 366 unsigned long flags = MOCK_PFN_START_IOVA; 367 unsigned long start_iova = iova; 368 369 /* 370 * xarray does not reliably work with fault injection because it does a 371 * retry allocation, so put our own failure point. 372 */ 373 if (iommufd_should_fail()) 374 return -ENOENT; 375 376 WARN_ON(iova % MOCK_IO_PAGE_SIZE); 377 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE); 378 for (; pgcount; pgcount--) { 379 size_t cur; 380 381 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) { 382 void *old; 383 384 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) 385 flags = MOCK_PFN_LAST_IOVA; 386 if (pgsize != MOCK_IO_PAGE_SIZE) { 387 flags |= MOCK_PFN_HUGE_IOVA; 388 } 389 old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE, 390 xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) | 391 flags), 392 gfp); 393 if (xa_is_err(old)) { 394 for (; start_iova != iova; 395 start_iova += MOCK_IO_PAGE_SIZE) 396 xa_erase(&mock->pfns, 397 start_iova / 398 MOCK_IO_PAGE_SIZE); 399 return xa_err(old); 400 } 401 WARN_ON(old); 402 iova += MOCK_IO_PAGE_SIZE; 403 paddr += MOCK_IO_PAGE_SIZE; 404 *mapped += MOCK_IO_PAGE_SIZE; 405 flags = 0; 406 } 407 } 408 return 0; 409 } 410 411 static size_t mock_domain_unmap_pages(struct iommu_domain *domain, 412 unsigned long iova, size_t pgsize, 413 size_t pgcount, 414 struct iommu_iotlb_gather *iotlb_gather) 415 { 416 struct mock_iommu_domain *mock = 417 container_of(domain, struct mock_iommu_domain, domain); 418 bool first = true; 419 size_t ret = 0; 420 void *ent; 421 422 WARN_ON(iova % MOCK_IO_PAGE_SIZE); 423 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE); 424 425 for (; pgcount; pgcount--) { 426 size_t cur; 427 428 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) { 429 ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); 430 431 /* 432 * iommufd generates unmaps that must be a strict 433 * superset of the map's performend So every starting 434 * IOVA should have been an iova passed to map, and the 435 * 436 * First IOVA must be present and have been a first IOVA 437 * passed to map_pages 438 */ 439 if (first) { 440 WARN_ON(ent && !(xa_to_value(ent) & 441 MOCK_PFN_START_IOVA)); 442 first = false; 443 } 444 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) 445 WARN_ON(ent && !(xa_to_value(ent) & 446 MOCK_PFN_LAST_IOVA)); 447 448 iova += MOCK_IO_PAGE_SIZE; 449 ret += MOCK_IO_PAGE_SIZE; 450 } 451 } 452 return ret; 453 } 454 455 static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain, 456 dma_addr_t iova) 457 { 458 struct mock_iommu_domain *mock = 459 container_of(domain, struct mock_iommu_domain, domain); 460 void *ent; 461 462 WARN_ON(iova % MOCK_IO_PAGE_SIZE); 463 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); 464 WARN_ON(!ent); 465 return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE; 466 } 467 468 static bool mock_domain_capable(struct device *dev, enum iommu_cap cap) 469 { 470 struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); 471 472 switch (cap) { 473 case IOMMU_CAP_CACHE_COHERENCY: 474 return true; 475 case IOMMU_CAP_DIRTY_TRACKING: 476 return !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY); 477 default: 478 break; 479 } 480 481 return false; 482 } 483 484 static struct iommu_device mock_iommu_device = { 485 }; 486 487 static struct iommu_device *mock_probe_device(struct device *dev) 488 { 489 if (dev->bus != &iommufd_mock_bus_type.bus) 490 return ERR_PTR(-ENODEV); 491 return &mock_iommu_device; 492 } 493 494 static const struct iommu_ops mock_ops = { 495 /* 496 * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type() 497 * because it is zero. 498 */ 499 .default_domain = &mock_blocking_domain, 500 .blocked_domain = &mock_blocking_domain, 501 .owner = THIS_MODULE, 502 .pgsize_bitmap = MOCK_IO_PAGE_SIZE, 503 .hw_info = mock_domain_hw_info, 504 .domain_alloc_paging = mock_domain_alloc_paging, 505 .domain_alloc_user = mock_domain_alloc_user, 506 .capable = mock_domain_capable, 507 .device_group = generic_device_group, 508 .probe_device = mock_probe_device, 509 .default_domain_ops = 510 &(struct iommu_domain_ops){ 511 .free = mock_domain_free, 512 .attach_dev = mock_domain_nop_attach, 513 .map_pages = mock_domain_map_pages, 514 .unmap_pages = mock_domain_unmap_pages, 515 .iova_to_phys = mock_domain_iova_to_phys, 516 }, 517 }; 518 519 static void mock_domain_free_nested(struct iommu_domain *domain) 520 { 521 struct mock_iommu_domain_nested *mock_nested = 522 container_of(domain, struct mock_iommu_domain_nested, domain); 523 524 kfree(mock_nested); 525 } 526 527 static int 528 mock_domain_cache_invalidate_user(struct iommu_domain *domain, 529 struct iommu_user_data_array *array) 530 { 531 struct mock_iommu_domain_nested *mock_nested = 532 container_of(domain, struct mock_iommu_domain_nested, domain); 533 struct iommu_hwpt_invalidate_selftest inv; 534 u32 processed = 0; 535 int i = 0, j; 536 int rc = 0; 537 538 if (array->type != IOMMU_HWPT_INVALIDATE_DATA_SELFTEST) { 539 rc = -EINVAL; 540 goto out; 541 } 542 543 for ( ; i < array->entry_num; i++) { 544 rc = iommu_copy_struct_from_user_array(&inv, array, 545 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST, 546 i, iotlb_id); 547 if (rc) 548 break; 549 550 if (inv.flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) { 551 rc = -EOPNOTSUPP; 552 break; 553 } 554 555 if (inv.iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX) { 556 rc = -EINVAL; 557 break; 558 } 559 560 if (inv.flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) { 561 /* Invalidate all mock iotlb entries and ignore iotlb_id */ 562 for (j = 0; j < MOCK_NESTED_DOMAIN_IOTLB_NUM; j++) 563 mock_nested->iotlb[j] = 0; 564 } else { 565 mock_nested->iotlb[inv.iotlb_id] = 0; 566 } 567 568 processed++; 569 } 570 571 out: 572 array->entry_num = processed; 573 return rc; 574 } 575 576 static struct iommu_domain_ops domain_nested_ops = { 577 .free = mock_domain_free_nested, 578 .attach_dev = mock_domain_nop_attach, 579 .cache_invalidate_user = mock_domain_cache_invalidate_user, 580 }; 581 582 static inline struct iommufd_hw_pagetable * 583 __get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, u32 hwpt_type) 584 { 585 struct iommufd_object *obj; 586 587 obj = iommufd_get_object(ucmd->ictx, mockpt_id, hwpt_type); 588 if (IS_ERR(obj)) 589 return ERR_CAST(obj); 590 return container_of(obj, struct iommufd_hw_pagetable, obj); 591 } 592 593 static inline struct iommufd_hw_pagetable * 594 get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, 595 struct mock_iommu_domain **mock) 596 { 597 struct iommufd_hw_pagetable *hwpt; 598 599 hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_PAGING); 600 if (IS_ERR(hwpt)) 601 return hwpt; 602 if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED || 603 hwpt->domain->ops != mock_ops.default_domain_ops) { 604 iommufd_put_object(ucmd->ictx, &hwpt->obj); 605 return ERR_PTR(-EINVAL); 606 } 607 *mock = container_of(hwpt->domain, struct mock_iommu_domain, domain); 608 return hwpt; 609 } 610 611 static inline struct iommufd_hw_pagetable * 612 get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id, 613 struct mock_iommu_domain_nested **mock_nested) 614 { 615 struct iommufd_hw_pagetable *hwpt; 616 617 hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_NESTED); 618 if (IS_ERR(hwpt)) 619 return hwpt; 620 if (hwpt->domain->type != IOMMU_DOMAIN_NESTED || 621 hwpt->domain->ops != &domain_nested_ops) { 622 iommufd_put_object(ucmd->ictx, &hwpt->obj); 623 return ERR_PTR(-EINVAL); 624 } 625 *mock_nested = container_of(hwpt->domain, 626 struct mock_iommu_domain_nested, domain); 627 return hwpt; 628 } 629 630 static void mock_dev_release(struct device *dev) 631 { 632 struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); 633 634 atomic_dec(&mock_dev_num); 635 kfree(mdev); 636 } 637 638 static struct mock_dev *mock_dev_create(unsigned long dev_flags) 639 { 640 struct mock_dev *mdev; 641 int rc; 642 643 if (dev_flags & 644 ~(MOCK_FLAGS_DEVICE_NO_DIRTY | MOCK_FLAGS_DEVICE_HUGE_IOVA)) 645 return ERR_PTR(-EINVAL); 646 647 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); 648 if (!mdev) 649 return ERR_PTR(-ENOMEM); 650 651 device_initialize(&mdev->dev); 652 mdev->flags = dev_flags; 653 mdev->dev.release = mock_dev_release; 654 mdev->dev.bus = &iommufd_mock_bus_type.bus; 655 656 rc = dev_set_name(&mdev->dev, "iommufd_mock%u", 657 atomic_inc_return(&mock_dev_num)); 658 if (rc) 659 goto err_put; 660 661 rc = device_add(&mdev->dev); 662 if (rc) 663 goto err_put; 664 return mdev; 665 666 err_put: 667 put_device(&mdev->dev); 668 return ERR_PTR(rc); 669 } 670 671 static void mock_dev_destroy(struct mock_dev *mdev) 672 { 673 device_unregister(&mdev->dev); 674 } 675 676 bool iommufd_selftest_is_mock_dev(struct device *dev) 677 { 678 return dev->release == mock_dev_release; 679 } 680 681 /* Create an hw_pagetable with the mock domain so we can test the domain ops */ 682 static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, 683 struct iommu_test_cmd *cmd) 684 { 685 struct iommufd_device *idev; 686 struct selftest_obj *sobj; 687 u32 pt_id = cmd->id; 688 u32 dev_flags = 0; 689 u32 idev_id; 690 int rc; 691 692 sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST); 693 if (IS_ERR(sobj)) 694 return PTR_ERR(sobj); 695 696 sobj->idev.ictx = ucmd->ictx; 697 sobj->type = TYPE_IDEV; 698 699 if (cmd->op == IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS) 700 dev_flags = cmd->mock_domain_flags.dev_flags; 701 702 sobj->idev.mock_dev = mock_dev_create(dev_flags); 703 if (IS_ERR(sobj->idev.mock_dev)) { 704 rc = PTR_ERR(sobj->idev.mock_dev); 705 goto out_sobj; 706 } 707 708 idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev, 709 &idev_id); 710 if (IS_ERR(idev)) { 711 rc = PTR_ERR(idev); 712 goto out_mdev; 713 } 714 sobj->idev.idev = idev; 715 716 rc = iommufd_device_attach(idev, &pt_id); 717 if (rc) 718 goto out_unbind; 719 720 /* Userspace must destroy the device_id to destroy the object */ 721 cmd->mock_domain.out_hwpt_id = pt_id; 722 cmd->mock_domain.out_stdev_id = sobj->obj.id; 723 cmd->mock_domain.out_idev_id = idev_id; 724 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 725 if (rc) 726 goto out_detach; 727 iommufd_object_finalize(ucmd->ictx, &sobj->obj); 728 return 0; 729 730 out_detach: 731 iommufd_device_detach(idev); 732 out_unbind: 733 iommufd_device_unbind(idev); 734 out_mdev: 735 mock_dev_destroy(sobj->idev.mock_dev); 736 out_sobj: 737 iommufd_object_abort(ucmd->ictx, &sobj->obj); 738 return rc; 739 } 740 741 /* Replace the mock domain with a manually allocated hw_pagetable */ 742 static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd, 743 unsigned int device_id, u32 pt_id, 744 struct iommu_test_cmd *cmd) 745 { 746 struct iommufd_object *dev_obj; 747 struct selftest_obj *sobj; 748 int rc; 749 750 /* 751 * Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure 752 * it doesn't race with detach, which is not allowed. 753 */ 754 dev_obj = 755 iommufd_get_object(ucmd->ictx, device_id, IOMMUFD_OBJ_SELFTEST); 756 if (IS_ERR(dev_obj)) 757 return PTR_ERR(dev_obj); 758 759 sobj = container_of(dev_obj, struct selftest_obj, obj); 760 if (sobj->type != TYPE_IDEV) { 761 rc = -EINVAL; 762 goto out_dev_obj; 763 } 764 765 rc = iommufd_device_replace(sobj->idev.idev, &pt_id); 766 if (rc) 767 goto out_dev_obj; 768 769 cmd->mock_domain_replace.pt_id = pt_id; 770 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 771 772 out_dev_obj: 773 iommufd_put_object(ucmd->ictx, dev_obj); 774 return rc; 775 } 776 777 /* Add an additional reserved IOVA to the IOAS */ 778 static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd, 779 unsigned int mockpt_id, 780 unsigned long start, size_t length) 781 { 782 struct iommufd_ioas *ioas; 783 int rc; 784 785 ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id); 786 if (IS_ERR(ioas)) 787 return PTR_ERR(ioas); 788 down_write(&ioas->iopt.iova_rwsem); 789 rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL); 790 up_write(&ioas->iopt.iova_rwsem); 791 iommufd_put_object(ucmd->ictx, &ioas->obj); 792 return rc; 793 } 794 795 /* Check that every pfn under each iova matches the pfn under a user VA */ 796 static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd, 797 unsigned int mockpt_id, unsigned long iova, 798 size_t length, void __user *uptr) 799 { 800 struct iommufd_hw_pagetable *hwpt; 801 struct mock_iommu_domain *mock; 802 uintptr_t end; 803 int rc; 804 805 if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE || 806 (uintptr_t)uptr % MOCK_IO_PAGE_SIZE || 807 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end)) 808 return -EINVAL; 809 810 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock); 811 if (IS_ERR(hwpt)) 812 return PTR_ERR(hwpt); 813 814 for (; length; length -= MOCK_IO_PAGE_SIZE) { 815 struct page *pages[1]; 816 unsigned long pfn; 817 long npages; 818 void *ent; 819 820 npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0, 821 pages); 822 if (npages < 0) { 823 rc = npages; 824 goto out_put; 825 } 826 if (WARN_ON(npages != 1)) { 827 rc = -EFAULT; 828 goto out_put; 829 } 830 pfn = page_to_pfn(pages[0]); 831 put_page(pages[0]); 832 833 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); 834 if (!ent || 835 (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE != 836 pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) { 837 rc = -EINVAL; 838 goto out_put; 839 } 840 iova += MOCK_IO_PAGE_SIZE; 841 uptr += MOCK_IO_PAGE_SIZE; 842 } 843 rc = 0; 844 845 out_put: 846 iommufd_put_object(ucmd->ictx, &hwpt->obj); 847 return rc; 848 } 849 850 /* Check that the page ref count matches, to look for missing pin/unpins */ 851 static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd, 852 void __user *uptr, size_t length, 853 unsigned int refs) 854 { 855 uintptr_t end; 856 857 if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE || 858 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end)) 859 return -EINVAL; 860 861 for (; length; length -= PAGE_SIZE) { 862 struct page *pages[1]; 863 long npages; 864 865 npages = get_user_pages_fast((uintptr_t)uptr, 1, 0, pages); 866 if (npages < 0) 867 return npages; 868 if (WARN_ON(npages != 1)) 869 return -EFAULT; 870 if (!PageCompound(pages[0])) { 871 unsigned int count; 872 873 count = page_ref_count(pages[0]); 874 if (count / GUP_PIN_COUNTING_BIAS != refs) { 875 put_page(pages[0]); 876 return -EIO; 877 } 878 } 879 put_page(pages[0]); 880 uptr += PAGE_SIZE; 881 } 882 return 0; 883 } 884 885 static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd, 886 u32 mockpt_id, unsigned int iotlb_id, 887 u32 iotlb) 888 { 889 struct mock_iommu_domain_nested *mock_nested; 890 struct iommufd_hw_pagetable *hwpt; 891 int rc = 0; 892 893 hwpt = get_md_pagetable_nested(ucmd, mockpt_id, &mock_nested); 894 if (IS_ERR(hwpt)) 895 return PTR_ERR(hwpt); 896 897 mock_nested = container_of(hwpt->domain, 898 struct mock_iommu_domain_nested, domain); 899 900 if (iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX || 901 mock_nested->iotlb[iotlb_id] != iotlb) 902 rc = -EINVAL; 903 iommufd_put_object(ucmd->ictx, &hwpt->obj); 904 return rc; 905 } 906 907 struct selftest_access { 908 struct iommufd_access *access; 909 struct file *file; 910 struct mutex lock; 911 struct list_head items; 912 unsigned int next_id; 913 bool destroying; 914 }; 915 916 struct selftest_access_item { 917 struct list_head items_elm; 918 unsigned long iova; 919 size_t length; 920 unsigned int id; 921 }; 922 923 static const struct file_operations iommfd_test_staccess_fops; 924 925 static struct selftest_access *iommufd_access_get(int fd) 926 { 927 struct file *file; 928 929 file = fget(fd); 930 if (!file) 931 return ERR_PTR(-EBADFD); 932 933 if (file->f_op != &iommfd_test_staccess_fops) { 934 fput(file); 935 return ERR_PTR(-EBADFD); 936 } 937 return file->private_data; 938 } 939 940 static void iommufd_test_access_unmap(void *data, unsigned long iova, 941 unsigned long length) 942 { 943 unsigned long iova_last = iova + length - 1; 944 struct selftest_access *staccess = data; 945 struct selftest_access_item *item; 946 struct selftest_access_item *tmp; 947 948 mutex_lock(&staccess->lock); 949 list_for_each_entry_safe(item, tmp, &staccess->items, items_elm) { 950 if (iova > item->iova + item->length - 1 || 951 iova_last < item->iova) 952 continue; 953 list_del(&item->items_elm); 954 iommufd_access_unpin_pages(staccess->access, item->iova, 955 item->length); 956 kfree(item); 957 } 958 mutex_unlock(&staccess->lock); 959 } 960 961 static int iommufd_test_access_item_destroy(struct iommufd_ucmd *ucmd, 962 unsigned int access_id, 963 unsigned int item_id) 964 { 965 struct selftest_access_item *item; 966 struct selftest_access *staccess; 967 968 staccess = iommufd_access_get(access_id); 969 if (IS_ERR(staccess)) 970 return PTR_ERR(staccess); 971 972 mutex_lock(&staccess->lock); 973 list_for_each_entry(item, &staccess->items, items_elm) { 974 if (item->id == item_id) { 975 list_del(&item->items_elm); 976 iommufd_access_unpin_pages(staccess->access, item->iova, 977 item->length); 978 mutex_unlock(&staccess->lock); 979 kfree(item); 980 fput(staccess->file); 981 return 0; 982 } 983 } 984 mutex_unlock(&staccess->lock); 985 fput(staccess->file); 986 return -ENOENT; 987 } 988 989 static int iommufd_test_staccess_release(struct inode *inode, 990 struct file *filep) 991 { 992 struct selftest_access *staccess = filep->private_data; 993 994 if (staccess->access) { 995 iommufd_test_access_unmap(staccess, 0, ULONG_MAX); 996 iommufd_access_destroy(staccess->access); 997 } 998 mutex_destroy(&staccess->lock); 999 kfree(staccess); 1000 return 0; 1001 } 1002 1003 static const struct iommufd_access_ops selftest_access_ops_pin = { 1004 .needs_pin_pages = 1, 1005 .unmap = iommufd_test_access_unmap, 1006 }; 1007 1008 static const struct iommufd_access_ops selftest_access_ops = { 1009 .unmap = iommufd_test_access_unmap, 1010 }; 1011 1012 static const struct file_operations iommfd_test_staccess_fops = { 1013 .release = iommufd_test_staccess_release, 1014 }; 1015 1016 static struct selftest_access *iommufd_test_alloc_access(void) 1017 { 1018 struct selftest_access *staccess; 1019 struct file *filep; 1020 1021 staccess = kzalloc(sizeof(*staccess), GFP_KERNEL_ACCOUNT); 1022 if (!staccess) 1023 return ERR_PTR(-ENOMEM); 1024 INIT_LIST_HEAD(&staccess->items); 1025 mutex_init(&staccess->lock); 1026 1027 filep = anon_inode_getfile("[iommufd_test_staccess]", 1028 &iommfd_test_staccess_fops, staccess, 1029 O_RDWR); 1030 if (IS_ERR(filep)) { 1031 kfree(staccess); 1032 return ERR_CAST(filep); 1033 } 1034 staccess->file = filep; 1035 return staccess; 1036 } 1037 1038 static int iommufd_test_create_access(struct iommufd_ucmd *ucmd, 1039 unsigned int ioas_id, unsigned int flags) 1040 { 1041 struct iommu_test_cmd *cmd = ucmd->cmd; 1042 struct selftest_access *staccess; 1043 struct iommufd_access *access; 1044 u32 id; 1045 int fdno; 1046 int rc; 1047 1048 if (flags & ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) 1049 return -EOPNOTSUPP; 1050 1051 staccess = iommufd_test_alloc_access(); 1052 if (IS_ERR(staccess)) 1053 return PTR_ERR(staccess); 1054 1055 fdno = get_unused_fd_flags(O_CLOEXEC); 1056 if (fdno < 0) { 1057 rc = -ENOMEM; 1058 goto out_free_staccess; 1059 } 1060 1061 access = iommufd_access_create( 1062 ucmd->ictx, 1063 (flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ? 1064 &selftest_access_ops_pin : 1065 &selftest_access_ops, 1066 staccess, &id); 1067 if (IS_ERR(access)) { 1068 rc = PTR_ERR(access); 1069 goto out_put_fdno; 1070 } 1071 rc = iommufd_access_attach(access, ioas_id); 1072 if (rc) 1073 goto out_destroy; 1074 cmd->create_access.out_access_fd = fdno; 1075 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 1076 if (rc) 1077 goto out_destroy; 1078 1079 staccess->access = access; 1080 fd_install(fdno, staccess->file); 1081 return 0; 1082 1083 out_destroy: 1084 iommufd_access_destroy(access); 1085 out_put_fdno: 1086 put_unused_fd(fdno); 1087 out_free_staccess: 1088 fput(staccess->file); 1089 return rc; 1090 } 1091 1092 static int iommufd_test_access_replace_ioas(struct iommufd_ucmd *ucmd, 1093 unsigned int access_id, 1094 unsigned int ioas_id) 1095 { 1096 struct selftest_access *staccess; 1097 int rc; 1098 1099 staccess = iommufd_access_get(access_id); 1100 if (IS_ERR(staccess)) 1101 return PTR_ERR(staccess); 1102 1103 rc = iommufd_access_replace(staccess->access, ioas_id); 1104 fput(staccess->file); 1105 return rc; 1106 } 1107 1108 /* Check that the pages in a page array match the pages in the user VA */ 1109 static int iommufd_test_check_pages(void __user *uptr, struct page **pages, 1110 size_t npages) 1111 { 1112 for (; npages; npages--) { 1113 struct page *tmp_pages[1]; 1114 long rc; 1115 1116 rc = get_user_pages_fast((uintptr_t)uptr, 1, 0, tmp_pages); 1117 if (rc < 0) 1118 return rc; 1119 if (WARN_ON(rc != 1)) 1120 return -EFAULT; 1121 put_page(tmp_pages[0]); 1122 if (tmp_pages[0] != *pages) 1123 return -EBADE; 1124 pages++; 1125 uptr += PAGE_SIZE; 1126 } 1127 return 0; 1128 } 1129 1130 static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd, 1131 unsigned int access_id, unsigned long iova, 1132 size_t length, void __user *uptr, 1133 u32 flags) 1134 { 1135 struct iommu_test_cmd *cmd = ucmd->cmd; 1136 struct selftest_access_item *item; 1137 struct selftest_access *staccess; 1138 struct page **pages; 1139 size_t npages; 1140 int rc; 1141 1142 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */ 1143 if (length > 16*1024*1024) 1144 return -ENOMEM; 1145 1146 if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ)) 1147 return -EOPNOTSUPP; 1148 1149 staccess = iommufd_access_get(access_id); 1150 if (IS_ERR(staccess)) 1151 return PTR_ERR(staccess); 1152 1153 if (staccess->access->ops != &selftest_access_ops_pin) { 1154 rc = -EOPNOTSUPP; 1155 goto out_put; 1156 } 1157 1158 if (flags & MOCK_FLAGS_ACCESS_SYZ) 1159 iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt, 1160 &cmd->access_pages.iova); 1161 1162 npages = (ALIGN(iova + length, PAGE_SIZE) - 1163 ALIGN_DOWN(iova, PAGE_SIZE)) / 1164 PAGE_SIZE; 1165 pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL_ACCOUNT); 1166 if (!pages) { 1167 rc = -ENOMEM; 1168 goto out_put; 1169 } 1170 1171 /* 1172 * Drivers will need to think very carefully about this locking. The 1173 * core code can do multiple unmaps instantaneously after 1174 * iommufd_access_pin_pages() and *all* the unmaps must not return until 1175 * the range is unpinned. This simple implementation puts a global lock 1176 * around the pin, which may not suit drivers that want this to be a 1177 * performance path. drivers that get this wrong will trigger WARN_ON 1178 * races and cause EDEADLOCK failures to userspace. 1179 */ 1180 mutex_lock(&staccess->lock); 1181 rc = iommufd_access_pin_pages(staccess->access, iova, length, pages, 1182 flags & MOCK_FLAGS_ACCESS_WRITE); 1183 if (rc) 1184 goto out_unlock; 1185 1186 /* For syzkaller allow uptr to be NULL to skip this check */ 1187 if (uptr) { 1188 rc = iommufd_test_check_pages( 1189 uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages, 1190 npages); 1191 if (rc) 1192 goto out_unaccess; 1193 } 1194 1195 item = kzalloc(sizeof(*item), GFP_KERNEL_ACCOUNT); 1196 if (!item) { 1197 rc = -ENOMEM; 1198 goto out_unaccess; 1199 } 1200 1201 item->iova = iova; 1202 item->length = length; 1203 item->id = staccess->next_id++; 1204 list_add_tail(&item->items_elm, &staccess->items); 1205 1206 cmd->access_pages.out_access_pages_id = item->id; 1207 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 1208 if (rc) 1209 goto out_free_item; 1210 goto out_unlock; 1211 1212 out_free_item: 1213 list_del(&item->items_elm); 1214 kfree(item); 1215 out_unaccess: 1216 iommufd_access_unpin_pages(staccess->access, iova, length); 1217 out_unlock: 1218 mutex_unlock(&staccess->lock); 1219 kvfree(pages); 1220 out_put: 1221 fput(staccess->file); 1222 return rc; 1223 } 1224 1225 static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd, 1226 unsigned int access_id, unsigned long iova, 1227 size_t length, void __user *ubuf, 1228 unsigned int flags) 1229 { 1230 struct iommu_test_cmd *cmd = ucmd->cmd; 1231 struct selftest_access *staccess; 1232 void *tmp; 1233 int rc; 1234 1235 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */ 1236 if (length > 16*1024*1024) 1237 return -ENOMEM; 1238 1239 if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH | 1240 MOCK_FLAGS_ACCESS_SYZ)) 1241 return -EOPNOTSUPP; 1242 1243 staccess = iommufd_access_get(access_id); 1244 if (IS_ERR(staccess)) 1245 return PTR_ERR(staccess); 1246 1247 tmp = kvzalloc(length, GFP_KERNEL_ACCOUNT); 1248 if (!tmp) { 1249 rc = -ENOMEM; 1250 goto out_put; 1251 } 1252 1253 if (flags & MOCK_ACCESS_RW_WRITE) { 1254 if (copy_from_user(tmp, ubuf, length)) { 1255 rc = -EFAULT; 1256 goto out_free; 1257 } 1258 } 1259 1260 if (flags & MOCK_FLAGS_ACCESS_SYZ) 1261 iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt, 1262 &cmd->access_rw.iova); 1263 1264 rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags); 1265 if (rc) 1266 goto out_free; 1267 if (!(flags & MOCK_ACCESS_RW_WRITE)) { 1268 if (copy_to_user(ubuf, tmp, length)) { 1269 rc = -EFAULT; 1270 goto out_free; 1271 } 1272 } 1273 1274 out_free: 1275 kvfree(tmp); 1276 out_put: 1277 fput(staccess->file); 1278 return rc; 1279 } 1280 static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE); 1281 static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH == 1282 __IOMMUFD_ACCESS_RW_SLOW_PATH); 1283 1284 static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id, 1285 unsigned long iova, size_t length, 1286 unsigned long page_size, void __user *uptr, 1287 u32 flags) 1288 { 1289 unsigned long bitmap_size, i, max; 1290 struct iommu_test_cmd *cmd = ucmd->cmd; 1291 struct iommufd_hw_pagetable *hwpt; 1292 struct mock_iommu_domain *mock; 1293 int rc, count = 0; 1294 void *tmp; 1295 1296 if (!page_size || !length || iova % page_size || length % page_size || 1297 !uptr) 1298 return -EINVAL; 1299 1300 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock); 1301 if (IS_ERR(hwpt)) 1302 return PTR_ERR(hwpt); 1303 1304 if (!(mock->flags & MOCK_DIRTY_TRACK)) { 1305 rc = -EINVAL; 1306 goto out_put; 1307 } 1308 1309 max = length / page_size; 1310 bitmap_size = max / BITS_PER_BYTE; 1311 1312 tmp = kvzalloc(bitmap_size, GFP_KERNEL_ACCOUNT); 1313 if (!tmp) { 1314 rc = -ENOMEM; 1315 goto out_put; 1316 } 1317 1318 if (copy_from_user(tmp, uptr, bitmap_size)) { 1319 rc = -EFAULT; 1320 goto out_free; 1321 } 1322 1323 for (i = 0; i < max; i++) { 1324 unsigned long cur = iova + i * page_size; 1325 void *ent, *old; 1326 1327 if (!test_bit(i, (unsigned long *)tmp)) 1328 continue; 1329 1330 ent = xa_load(&mock->pfns, cur / page_size); 1331 if (ent) { 1332 unsigned long val; 1333 1334 val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA; 1335 old = xa_store(&mock->pfns, cur / page_size, 1336 xa_mk_value(val), GFP_KERNEL); 1337 WARN_ON_ONCE(ent != old); 1338 count++; 1339 } 1340 } 1341 1342 cmd->dirty.out_nr_dirty = count; 1343 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 1344 out_free: 1345 kvfree(tmp); 1346 out_put: 1347 iommufd_put_object(ucmd->ictx, &hwpt->obj); 1348 return rc; 1349 } 1350 1351 void iommufd_selftest_destroy(struct iommufd_object *obj) 1352 { 1353 struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj); 1354 1355 switch (sobj->type) { 1356 case TYPE_IDEV: 1357 iommufd_device_detach(sobj->idev.idev); 1358 iommufd_device_unbind(sobj->idev.idev); 1359 mock_dev_destroy(sobj->idev.mock_dev); 1360 break; 1361 } 1362 } 1363 1364 int iommufd_test(struct iommufd_ucmd *ucmd) 1365 { 1366 struct iommu_test_cmd *cmd = ucmd->cmd; 1367 1368 switch (cmd->op) { 1369 case IOMMU_TEST_OP_ADD_RESERVED: 1370 return iommufd_test_add_reserved(ucmd, cmd->id, 1371 cmd->add_reserved.start, 1372 cmd->add_reserved.length); 1373 case IOMMU_TEST_OP_MOCK_DOMAIN: 1374 case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS: 1375 return iommufd_test_mock_domain(ucmd, cmd); 1376 case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE: 1377 return iommufd_test_mock_domain_replace( 1378 ucmd, cmd->id, cmd->mock_domain_replace.pt_id, cmd); 1379 case IOMMU_TEST_OP_MD_CHECK_MAP: 1380 return iommufd_test_md_check_pa( 1381 ucmd, cmd->id, cmd->check_map.iova, 1382 cmd->check_map.length, 1383 u64_to_user_ptr(cmd->check_map.uptr)); 1384 case IOMMU_TEST_OP_MD_CHECK_REFS: 1385 return iommufd_test_md_check_refs( 1386 ucmd, u64_to_user_ptr(cmd->check_refs.uptr), 1387 cmd->check_refs.length, cmd->check_refs.refs); 1388 case IOMMU_TEST_OP_MD_CHECK_IOTLB: 1389 return iommufd_test_md_check_iotlb(ucmd, cmd->id, 1390 cmd->check_iotlb.id, 1391 cmd->check_iotlb.iotlb); 1392 case IOMMU_TEST_OP_CREATE_ACCESS: 1393 return iommufd_test_create_access(ucmd, cmd->id, 1394 cmd->create_access.flags); 1395 case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS: 1396 return iommufd_test_access_replace_ioas( 1397 ucmd, cmd->id, cmd->access_replace_ioas.ioas_id); 1398 case IOMMU_TEST_OP_ACCESS_PAGES: 1399 return iommufd_test_access_pages( 1400 ucmd, cmd->id, cmd->access_pages.iova, 1401 cmd->access_pages.length, 1402 u64_to_user_ptr(cmd->access_pages.uptr), 1403 cmd->access_pages.flags); 1404 case IOMMU_TEST_OP_ACCESS_RW: 1405 return iommufd_test_access_rw( 1406 ucmd, cmd->id, cmd->access_rw.iova, 1407 cmd->access_rw.length, 1408 u64_to_user_ptr(cmd->access_rw.uptr), 1409 cmd->access_rw.flags); 1410 case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES: 1411 return iommufd_test_access_item_destroy( 1412 ucmd, cmd->id, cmd->destroy_access_pages.access_pages_id); 1413 case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT: 1414 /* Protect _batch_init(), can not be less than elmsz */ 1415 if (cmd->memory_limit.limit < 1416 sizeof(unsigned long) + sizeof(u32)) 1417 return -EINVAL; 1418 iommufd_test_memory_limit = cmd->memory_limit.limit; 1419 return 0; 1420 case IOMMU_TEST_OP_DIRTY: 1421 return iommufd_test_dirty(ucmd, cmd->id, cmd->dirty.iova, 1422 cmd->dirty.length, 1423 cmd->dirty.page_size, 1424 u64_to_user_ptr(cmd->dirty.uptr), 1425 cmd->dirty.flags); 1426 default: 1427 return -EOPNOTSUPP; 1428 } 1429 } 1430 1431 bool iommufd_should_fail(void) 1432 { 1433 return should_fail(&fail_iommufd, 1); 1434 } 1435 1436 int __init iommufd_test_init(void) 1437 { 1438 struct platform_device_info pdevinfo = { 1439 .name = "iommufd_selftest_iommu", 1440 }; 1441 int rc; 1442 1443 dbgfs_root = 1444 fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd); 1445 1446 selftest_iommu_dev = platform_device_register_full(&pdevinfo); 1447 if (IS_ERR(selftest_iommu_dev)) { 1448 rc = PTR_ERR(selftest_iommu_dev); 1449 goto err_dbgfs; 1450 } 1451 1452 rc = bus_register(&iommufd_mock_bus_type.bus); 1453 if (rc) 1454 goto err_platform; 1455 1456 rc = iommu_device_sysfs_add(&mock_iommu_device, 1457 &selftest_iommu_dev->dev, NULL, "%s", 1458 dev_name(&selftest_iommu_dev->dev)); 1459 if (rc) 1460 goto err_bus; 1461 1462 rc = iommu_device_register_bus(&mock_iommu_device, &mock_ops, 1463 &iommufd_mock_bus_type.bus, 1464 &iommufd_mock_bus_type.nb); 1465 if (rc) 1466 goto err_sysfs; 1467 return 0; 1468 1469 err_sysfs: 1470 iommu_device_sysfs_remove(&mock_iommu_device); 1471 err_bus: 1472 bus_unregister(&iommufd_mock_bus_type.bus); 1473 err_platform: 1474 platform_device_unregister(selftest_iommu_dev); 1475 err_dbgfs: 1476 debugfs_remove_recursive(dbgfs_root); 1477 return rc; 1478 } 1479 1480 void iommufd_test_exit(void) 1481 { 1482 iommu_device_sysfs_remove(&mock_iommu_device); 1483 iommu_device_unregister_bus(&mock_iommu_device, 1484 &iommufd_mock_bus_type.bus, 1485 &iommufd_mock_bus_type.nb); 1486 bus_unregister(&iommufd_mock_bus_type.bus); 1487 platform_device_unregister(selftest_iommu_dev); 1488 debugfs_remove_recursive(dbgfs_root); 1489 } 1490