1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. 3 * 4 * Kernel side components to support tools/testing/selftests/iommu 5 */ 6 #include <linux/anon_inodes.h> 7 #include <linux/debugfs.h> 8 #include <linux/fault-inject.h> 9 #include <linux/file.h> 10 #include <linux/iommu.h> 11 #include <linux/platform_device.h> 12 #include <linux/slab.h> 13 #include <linux/xarray.h> 14 #include <uapi/linux/iommufd.h> 15 16 #include "../iommu-priv.h" 17 #include "io_pagetable.h" 18 #include "iommufd_private.h" 19 #include "iommufd_test.h" 20 21 static DECLARE_FAULT_ATTR(fail_iommufd); 22 static struct dentry *dbgfs_root; 23 static struct platform_device *selftest_iommu_dev; 24 static const struct iommu_ops mock_ops; 25 static struct iommu_domain_ops domain_nested_ops; 26 27 size_t iommufd_test_memory_limit = 65536; 28 29 struct mock_bus_type { 30 struct bus_type bus; 31 struct notifier_block nb; 32 }; 33 34 static struct mock_bus_type iommufd_mock_bus_type = { 35 .bus = { 36 .name = "iommufd_mock", 37 }, 38 }; 39 40 static DEFINE_IDA(mock_dev_ida); 41 42 enum { 43 MOCK_DIRTY_TRACK = 1, 44 MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2, 45 MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE, 46 47 /* 48 * Like a real page table alignment requires the low bits of the address 49 * to be zero. xarray also requires the high bit to be zero, so we store 50 * the pfns shifted. The upper bits are used for metadata. 51 */ 52 MOCK_PFN_MASK = ULONG_MAX / MOCK_IO_PAGE_SIZE, 53 54 _MOCK_PFN_START = MOCK_PFN_MASK + 1, 55 MOCK_PFN_START_IOVA = _MOCK_PFN_START, 56 MOCK_PFN_LAST_IOVA = _MOCK_PFN_START, 57 MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1, 58 MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2, 59 }; 60 61 static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain); 62 static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain); 63 64 /* 65 * Syzkaller has trouble randomizing the correct iova to use since it is linked 66 * to the map ioctl's output, and it has no ide about that. So, simplify things. 67 * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset 68 * value. This has a much smaller randomization space and syzkaller can hit it. 69 */ 70 static unsigned long __iommufd_test_syz_conv_iova(struct io_pagetable *iopt, 71 u64 *iova) 72 { 73 struct syz_layout { 74 __u32 nth_area; 75 __u32 offset; 76 }; 77 struct syz_layout *syz = (void *)iova; 78 unsigned int nth = syz->nth_area; 79 struct iopt_area *area; 80 81 down_read(&iopt->iova_rwsem); 82 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area; 83 area = iopt_area_iter_next(area, 0, ULONG_MAX)) { 84 if (nth == 0) { 85 up_read(&iopt->iova_rwsem); 86 return iopt_area_iova(area) + syz->offset; 87 } 88 nth--; 89 } 90 up_read(&iopt->iova_rwsem); 91 92 return 0; 93 } 94 95 static unsigned long iommufd_test_syz_conv_iova(struct iommufd_access *access, 96 u64 *iova) 97 { 98 unsigned long ret; 99 100 mutex_lock(&access->ioas_lock); 101 if (!access->ioas) { 102 mutex_unlock(&access->ioas_lock); 103 return 0; 104 } 105 ret = __iommufd_test_syz_conv_iova(&access->ioas->iopt, iova); 106 mutex_unlock(&access->ioas_lock); 107 return ret; 108 } 109 110 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, 111 unsigned int ioas_id, u64 *iova, u32 *flags) 112 { 113 struct iommufd_ioas *ioas; 114 115 if (!(*flags & MOCK_FLAGS_ACCESS_SYZ)) 116 return; 117 *flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ; 118 119 ioas = iommufd_get_ioas(ucmd->ictx, ioas_id); 120 if (IS_ERR(ioas)) 121 return; 122 *iova = __iommufd_test_syz_conv_iova(&ioas->iopt, iova); 123 iommufd_put_object(ucmd->ictx, &ioas->obj); 124 } 125 126 struct mock_iommu_domain { 127 unsigned long flags; 128 struct iommu_domain domain; 129 struct xarray pfns; 130 }; 131 132 static inline struct mock_iommu_domain * 133 to_mock_domain(struct iommu_domain *domain) 134 { 135 return container_of(domain, struct mock_iommu_domain, domain); 136 } 137 138 struct mock_iommu_domain_nested { 139 struct iommu_domain domain; 140 struct mock_viommu *mock_viommu; 141 struct mock_iommu_domain *parent; 142 u32 iotlb[MOCK_NESTED_DOMAIN_IOTLB_NUM]; 143 }; 144 145 static inline struct mock_iommu_domain_nested * 146 to_mock_nested(struct iommu_domain *domain) 147 { 148 return container_of(domain, struct mock_iommu_domain_nested, domain); 149 } 150 151 struct mock_viommu { 152 struct iommufd_viommu core; 153 struct mock_iommu_domain *s2_parent; 154 }; 155 156 static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu) 157 { 158 return container_of(viommu, struct mock_viommu, core); 159 } 160 161 enum selftest_obj_type { 162 TYPE_IDEV, 163 }; 164 165 struct mock_dev { 166 struct device dev; 167 struct mock_viommu *viommu; 168 struct rw_semaphore viommu_rwsem; 169 unsigned long flags; 170 unsigned long vdev_id; 171 int id; 172 u32 cache[MOCK_DEV_CACHE_NUM]; 173 atomic_t pasid_1024_fake_error; 174 unsigned int iopf_refcount; 175 struct iommu_domain *domain; 176 }; 177 178 static inline struct mock_dev *to_mock_dev(struct device *dev) 179 { 180 return container_of(dev, struct mock_dev, dev); 181 } 182 183 struct selftest_obj { 184 struct iommufd_object obj; 185 enum selftest_obj_type type; 186 187 union { 188 struct { 189 struct iommufd_device *idev; 190 struct iommufd_ctx *ictx; 191 struct mock_dev *mock_dev; 192 } idev; 193 }; 194 }; 195 196 static inline struct selftest_obj *to_selftest_obj(struct iommufd_object *obj) 197 { 198 return container_of(obj, struct selftest_obj, obj); 199 } 200 201 static int mock_domain_nop_attach(struct iommu_domain *domain, 202 struct device *dev) 203 { 204 struct mock_dev *mdev = to_mock_dev(dev); 205 struct mock_viommu *new_viommu = NULL; 206 unsigned long vdev_id = 0; 207 int rc; 208 209 if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY)) 210 return -EINVAL; 211 212 iommu_group_mutex_assert(dev); 213 if (domain->type == IOMMU_DOMAIN_NESTED) { 214 new_viommu = to_mock_nested(domain)->mock_viommu; 215 if (new_viommu) { 216 rc = iommufd_viommu_get_vdev_id(&new_viommu->core, dev, 217 &vdev_id); 218 if (rc) 219 return rc; 220 } 221 } 222 if (new_viommu != mdev->viommu) { 223 down_write(&mdev->viommu_rwsem); 224 mdev->viommu = new_viommu; 225 mdev->vdev_id = vdev_id; 226 up_write(&mdev->viommu_rwsem); 227 } 228 229 rc = mock_dev_enable_iopf(dev, domain); 230 if (rc) 231 return rc; 232 233 mock_dev_disable_iopf(dev, mdev->domain); 234 mdev->domain = domain; 235 236 return 0; 237 } 238 239 static int mock_domain_set_dev_pasid_nop(struct iommu_domain *domain, 240 struct device *dev, ioasid_t pasid, 241 struct iommu_domain *old) 242 { 243 struct mock_dev *mdev = to_mock_dev(dev); 244 int rc; 245 246 /* 247 * Per the first attach with pasid 1024, set the 248 * mdev->pasid_1024_fake_error. Hence the second call of this op 249 * can fake an error to validate the error path of the core. This 250 * is helpful to test the case in which the iommu core needs to 251 * rollback to the old domain due to driver failure. e.g. replace. 252 * User should be careful about the third call of this op, it shall 253 * succeed since the mdev->pasid_1024_fake_error is cleared in the 254 * second call. 255 */ 256 if (pasid == 1024) { 257 if (domain->type == IOMMU_DOMAIN_BLOCKED) { 258 atomic_set(&mdev->pasid_1024_fake_error, 0); 259 } else if (atomic_read(&mdev->pasid_1024_fake_error)) { 260 /* 261 * Clear the flag, and fake an error to fail the 262 * replacement. 263 */ 264 atomic_set(&mdev->pasid_1024_fake_error, 0); 265 return -ENOMEM; 266 } else { 267 /* Set the flag to fake an error in next call */ 268 atomic_set(&mdev->pasid_1024_fake_error, 1); 269 } 270 } 271 272 rc = mock_dev_enable_iopf(dev, domain); 273 if (rc) 274 return rc; 275 276 mock_dev_disable_iopf(dev, old); 277 278 return 0; 279 } 280 281 static const struct iommu_domain_ops mock_blocking_ops = { 282 .attach_dev = mock_domain_nop_attach, 283 .set_dev_pasid = mock_domain_set_dev_pasid_nop 284 }; 285 286 static struct iommu_domain mock_blocking_domain = { 287 .type = IOMMU_DOMAIN_BLOCKED, 288 .ops = &mock_blocking_ops, 289 }; 290 291 static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type) 292 { 293 struct iommu_test_hw_info *info; 294 295 info = kzalloc(sizeof(*info), GFP_KERNEL); 296 if (!info) 297 return ERR_PTR(-ENOMEM); 298 299 info->test_reg = IOMMU_HW_INFO_SELFTEST_REGVAL; 300 *length = sizeof(*info); 301 *type = IOMMU_HW_INFO_TYPE_SELFTEST; 302 303 return info; 304 } 305 306 static int mock_domain_set_dirty_tracking(struct iommu_domain *domain, 307 bool enable) 308 { 309 struct mock_iommu_domain *mock = to_mock_domain(domain); 310 unsigned long flags = mock->flags; 311 312 if (enable && !domain->dirty_ops) 313 return -EINVAL; 314 315 /* No change? */ 316 if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK))) 317 return 0; 318 319 flags = (enable ? flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK); 320 321 mock->flags = flags; 322 return 0; 323 } 324 325 static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock, 326 unsigned long iova, size_t page_size, 327 unsigned long flags) 328 { 329 unsigned long cur, end = iova + page_size - 1; 330 bool dirty = false; 331 void *ent, *old; 332 333 for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) { 334 ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE); 335 if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) 336 continue; 337 338 dirty = true; 339 /* Clear dirty */ 340 if (!(flags & IOMMU_DIRTY_NO_CLEAR)) { 341 unsigned long val; 342 343 val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA; 344 old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE, 345 xa_mk_value(val), GFP_KERNEL); 346 WARN_ON_ONCE(ent != old); 347 } 348 } 349 350 return dirty; 351 } 352 353 static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain, 354 unsigned long iova, size_t size, 355 unsigned long flags, 356 struct iommu_dirty_bitmap *dirty) 357 { 358 struct mock_iommu_domain *mock = to_mock_domain(domain); 359 unsigned long end = iova + size; 360 void *ent; 361 362 if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap) 363 return -EINVAL; 364 365 do { 366 unsigned long pgsize = MOCK_IO_PAGE_SIZE; 367 unsigned long head; 368 369 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); 370 if (!ent) { 371 iova += pgsize; 372 continue; 373 } 374 375 if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA) 376 pgsize = MOCK_HUGE_PAGE_SIZE; 377 head = iova & ~(pgsize - 1); 378 379 /* Clear dirty */ 380 if (mock_test_and_clear_dirty(mock, head, pgsize, flags)) 381 iommu_dirty_bitmap_record(dirty, iova, pgsize); 382 iova += pgsize; 383 } while (iova < end); 384 385 return 0; 386 } 387 388 static const struct iommu_dirty_ops dirty_ops = { 389 .set_dirty_tracking = mock_domain_set_dirty_tracking, 390 .read_and_clear_dirty = mock_domain_read_and_clear_dirty, 391 }; 392 393 static struct mock_iommu_domain_nested * 394 __mock_domain_alloc_nested(const struct iommu_user_data *user_data) 395 { 396 struct mock_iommu_domain_nested *mock_nested; 397 struct iommu_hwpt_selftest user_cfg; 398 int rc, i; 399 400 if (user_data->type != IOMMU_HWPT_DATA_SELFTEST) 401 return ERR_PTR(-EOPNOTSUPP); 402 403 rc = iommu_copy_struct_from_user(&user_cfg, user_data, 404 IOMMU_HWPT_DATA_SELFTEST, iotlb); 405 if (rc) 406 return ERR_PTR(rc); 407 408 mock_nested = kzalloc(sizeof(*mock_nested), GFP_KERNEL); 409 if (!mock_nested) 410 return ERR_PTR(-ENOMEM); 411 mock_nested->domain.ops = &domain_nested_ops; 412 mock_nested->domain.type = IOMMU_DOMAIN_NESTED; 413 for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++) 414 mock_nested->iotlb[i] = user_cfg.iotlb; 415 return mock_nested; 416 } 417 418 static struct iommu_domain * 419 mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent, 420 u32 flags, const struct iommu_user_data *user_data) 421 { 422 struct mock_iommu_domain_nested *mock_nested; 423 struct mock_iommu_domain *mock_parent; 424 425 if (flags & ~IOMMU_HWPT_ALLOC_PASID) 426 return ERR_PTR(-EOPNOTSUPP); 427 if (!parent || parent->ops != mock_ops.default_domain_ops) 428 return ERR_PTR(-EINVAL); 429 430 mock_parent = to_mock_domain(parent); 431 if (!mock_parent) 432 return ERR_PTR(-EINVAL); 433 434 mock_nested = __mock_domain_alloc_nested(user_data); 435 if (IS_ERR(mock_nested)) 436 return ERR_CAST(mock_nested); 437 mock_nested->parent = mock_parent; 438 return &mock_nested->domain; 439 } 440 441 static struct iommu_domain * 442 mock_domain_alloc_paging_flags(struct device *dev, u32 flags, 443 const struct iommu_user_data *user_data) 444 { 445 bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; 446 const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | 447 IOMMU_HWPT_ALLOC_NEST_PARENT | 448 IOMMU_HWPT_ALLOC_PASID; 449 struct mock_dev *mdev = to_mock_dev(dev); 450 bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY; 451 struct mock_iommu_domain *mock; 452 453 if (user_data) 454 return ERR_PTR(-EOPNOTSUPP); 455 if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops)) 456 return ERR_PTR(-EOPNOTSUPP); 457 458 mock = kzalloc(sizeof(*mock), GFP_KERNEL); 459 if (!mock) 460 return ERR_PTR(-ENOMEM); 461 mock->domain.geometry.aperture_start = MOCK_APERTURE_START; 462 mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST; 463 mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE; 464 if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA) 465 mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE; 466 mock->domain.ops = mock_ops.default_domain_ops; 467 mock->domain.type = IOMMU_DOMAIN_UNMANAGED; 468 xa_init(&mock->pfns); 469 470 if (has_dirty_flag) 471 mock->domain.dirty_ops = &dirty_ops; 472 return &mock->domain; 473 } 474 475 static void mock_domain_free(struct iommu_domain *domain) 476 { 477 struct mock_iommu_domain *mock = to_mock_domain(domain); 478 479 WARN_ON(!xa_empty(&mock->pfns)); 480 kfree(mock); 481 } 482 483 static int mock_domain_map_pages(struct iommu_domain *domain, 484 unsigned long iova, phys_addr_t paddr, 485 size_t pgsize, size_t pgcount, int prot, 486 gfp_t gfp, size_t *mapped) 487 { 488 struct mock_iommu_domain *mock = to_mock_domain(domain); 489 unsigned long flags = MOCK_PFN_START_IOVA; 490 unsigned long start_iova = iova; 491 492 /* 493 * xarray does not reliably work with fault injection because it does a 494 * retry allocation, so put our own failure point. 495 */ 496 if (iommufd_should_fail()) 497 return -ENOENT; 498 499 WARN_ON(iova % MOCK_IO_PAGE_SIZE); 500 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE); 501 for (; pgcount; pgcount--) { 502 size_t cur; 503 504 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) { 505 void *old; 506 507 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize) 508 flags = MOCK_PFN_LAST_IOVA; 509 if (pgsize != MOCK_IO_PAGE_SIZE) { 510 flags |= MOCK_PFN_HUGE_IOVA; 511 } 512 old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE, 513 xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) | 514 flags), 515 gfp); 516 if (xa_is_err(old)) { 517 for (; start_iova != iova; 518 start_iova += MOCK_IO_PAGE_SIZE) 519 xa_erase(&mock->pfns, 520 start_iova / 521 MOCK_IO_PAGE_SIZE); 522 return xa_err(old); 523 } 524 WARN_ON(old); 525 iova += MOCK_IO_PAGE_SIZE; 526 paddr += MOCK_IO_PAGE_SIZE; 527 *mapped += MOCK_IO_PAGE_SIZE; 528 flags = 0; 529 } 530 } 531 return 0; 532 } 533 534 static size_t mock_domain_unmap_pages(struct iommu_domain *domain, 535 unsigned long iova, size_t pgsize, 536 size_t pgcount, 537 struct iommu_iotlb_gather *iotlb_gather) 538 { 539 struct mock_iommu_domain *mock = to_mock_domain(domain); 540 bool first = true; 541 size_t ret = 0; 542 void *ent; 543 544 WARN_ON(iova % MOCK_IO_PAGE_SIZE); 545 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE); 546 547 for (; pgcount; pgcount--) { 548 size_t cur; 549 550 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) { 551 ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); 552 553 /* 554 * iommufd generates unmaps that must be a strict 555 * superset of the map's performend So every 556 * starting/ending IOVA should have been an iova passed 557 * to map. 558 * 559 * This simple logic doesn't work when the HUGE_PAGE is 560 * turned on since the core code will automatically 561 * switch between the two page sizes creating a break in 562 * the unmap calls. The break can land in the middle of 563 * contiguous IOVA. 564 */ 565 if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) { 566 if (first) { 567 WARN_ON(ent && !(xa_to_value(ent) & 568 MOCK_PFN_START_IOVA)); 569 first = false; 570 } 571 if (pgcount == 1 && 572 cur + MOCK_IO_PAGE_SIZE == pgsize) 573 WARN_ON(ent && !(xa_to_value(ent) & 574 MOCK_PFN_LAST_IOVA)); 575 } 576 577 iova += MOCK_IO_PAGE_SIZE; 578 ret += MOCK_IO_PAGE_SIZE; 579 } 580 } 581 return ret; 582 } 583 584 static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain, 585 dma_addr_t iova) 586 { 587 struct mock_iommu_domain *mock = to_mock_domain(domain); 588 void *ent; 589 590 WARN_ON(iova % MOCK_IO_PAGE_SIZE); 591 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); 592 WARN_ON(!ent); 593 return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE; 594 } 595 596 static bool mock_domain_capable(struct device *dev, enum iommu_cap cap) 597 { 598 struct mock_dev *mdev = to_mock_dev(dev); 599 600 switch (cap) { 601 case IOMMU_CAP_CACHE_COHERENCY: 602 return true; 603 case IOMMU_CAP_DIRTY_TRACKING: 604 return !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY); 605 default: 606 break; 607 } 608 609 return false; 610 } 611 612 static struct iopf_queue *mock_iommu_iopf_queue; 613 614 static struct mock_iommu_device { 615 struct iommu_device iommu_dev; 616 struct completion complete; 617 refcount_t users; 618 } mock_iommu; 619 620 static struct iommu_device *mock_probe_device(struct device *dev) 621 { 622 if (dev->bus != &iommufd_mock_bus_type.bus) 623 return ERR_PTR(-ENODEV); 624 return &mock_iommu.iommu_dev; 625 } 626 627 static void mock_domain_page_response(struct device *dev, struct iopf_fault *evt, 628 struct iommu_page_response *msg) 629 { 630 } 631 632 static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain) 633 { 634 struct mock_dev *mdev = to_mock_dev(dev); 635 int ret; 636 637 if (!domain || !domain->iopf_handler) 638 return 0; 639 640 if (!mock_iommu_iopf_queue) 641 return -ENODEV; 642 643 if (mdev->iopf_refcount) { 644 mdev->iopf_refcount++; 645 return 0; 646 } 647 648 ret = iopf_queue_add_device(mock_iommu_iopf_queue, dev); 649 if (ret) 650 return ret; 651 652 mdev->iopf_refcount = 1; 653 654 return 0; 655 } 656 657 static void mock_dev_disable_iopf(struct device *dev, struct iommu_domain *domain) 658 { 659 struct mock_dev *mdev = to_mock_dev(dev); 660 661 if (!domain || !domain->iopf_handler) 662 return; 663 664 if (--mdev->iopf_refcount) 665 return; 666 667 iopf_queue_remove_device(mock_iommu_iopf_queue, dev); 668 } 669 670 static void mock_viommu_destroy(struct iommufd_viommu *viommu) 671 { 672 struct mock_iommu_device *mock_iommu = container_of( 673 viommu->iommu_dev, struct mock_iommu_device, iommu_dev); 674 675 if (refcount_dec_and_test(&mock_iommu->users)) 676 complete(&mock_iommu->complete); 677 678 /* iommufd core frees mock_viommu and viommu */ 679 } 680 681 static struct iommu_domain * 682 mock_viommu_alloc_domain_nested(struct iommufd_viommu *viommu, u32 flags, 683 const struct iommu_user_data *user_data) 684 { 685 struct mock_viommu *mock_viommu = to_mock_viommu(viommu); 686 struct mock_iommu_domain_nested *mock_nested; 687 688 if (flags & ~IOMMU_HWPT_ALLOC_PASID) 689 return ERR_PTR(-EOPNOTSUPP); 690 691 mock_nested = __mock_domain_alloc_nested(user_data); 692 if (IS_ERR(mock_nested)) 693 return ERR_CAST(mock_nested); 694 mock_nested->mock_viommu = mock_viommu; 695 mock_nested->parent = mock_viommu->s2_parent; 696 return &mock_nested->domain; 697 } 698 699 static int mock_viommu_cache_invalidate(struct iommufd_viommu *viommu, 700 struct iommu_user_data_array *array) 701 { 702 struct iommu_viommu_invalidate_selftest *cmds; 703 struct iommu_viommu_invalidate_selftest *cur; 704 struct iommu_viommu_invalidate_selftest *end; 705 int rc; 706 707 /* A zero-length array is allowed to validate the array type */ 708 if (array->entry_num == 0 && 709 array->type == IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST) { 710 array->entry_num = 0; 711 return 0; 712 } 713 714 cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL); 715 if (!cmds) 716 return -ENOMEM; 717 cur = cmds; 718 end = cmds + array->entry_num; 719 720 static_assert(sizeof(*cmds) == 3 * sizeof(u32)); 721 rc = iommu_copy_struct_from_full_user_array( 722 cmds, sizeof(*cmds), array, 723 IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST); 724 if (rc) 725 goto out; 726 727 while (cur != end) { 728 struct mock_dev *mdev; 729 struct device *dev; 730 int i; 731 732 if (cur->flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) { 733 rc = -EOPNOTSUPP; 734 goto out; 735 } 736 737 if (cur->cache_id > MOCK_DEV_CACHE_ID_MAX) { 738 rc = -EINVAL; 739 goto out; 740 } 741 742 xa_lock(&viommu->vdevs); 743 dev = iommufd_viommu_find_dev(viommu, 744 (unsigned long)cur->vdev_id); 745 if (!dev) { 746 xa_unlock(&viommu->vdevs); 747 rc = -EINVAL; 748 goto out; 749 } 750 mdev = container_of(dev, struct mock_dev, dev); 751 752 if (cur->flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) { 753 /* Invalidate all cache entries and ignore cache_id */ 754 for (i = 0; i < MOCK_DEV_CACHE_NUM; i++) 755 mdev->cache[i] = 0; 756 } else { 757 mdev->cache[cur->cache_id] = 0; 758 } 759 xa_unlock(&viommu->vdevs); 760 761 cur++; 762 } 763 out: 764 array->entry_num = cur - cmds; 765 kfree(cmds); 766 return rc; 767 } 768 769 static struct iommufd_viommu_ops mock_viommu_ops = { 770 .destroy = mock_viommu_destroy, 771 .alloc_domain_nested = mock_viommu_alloc_domain_nested, 772 .cache_invalidate = mock_viommu_cache_invalidate, 773 }; 774 775 static struct iommufd_viommu *mock_viommu_alloc(struct device *dev, 776 struct iommu_domain *domain, 777 struct iommufd_ctx *ictx, 778 unsigned int viommu_type) 779 { 780 struct mock_iommu_device *mock_iommu = 781 iommu_get_iommu_dev(dev, struct mock_iommu_device, iommu_dev); 782 struct mock_viommu *mock_viommu; 783 784 if (viommu_type != IOMMU_VIOMMU_TYPE_SELFTEST) 785 return ERR_PTR(-EOPNOTSUPP); 786 787 mock_viommu = iommufd_viommu_alloc(ictx, struct mock_viommu, core, 788 &mock_viommu_ops); 789 if (IS_ERR(mock_viommu)) 790 return ERR_CAST(mock_viommu); 791 792 refcount_inc(&mock_iommu->users); 793 return &mock_viommu->core; 794 } 795 796 static const struct iommu_ops mock_ops = { 797 /* 798 * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type() 799 * because it is zero. 800 */ 801 .default_domain = &mock_blocking_domain, 802 .blocked_domain = &mock_blocking_domain, 803 .owner = THIS_MODULE, 804 .hw_info = mock_domain_hw_info, 805 .domain_alloc_paging_flags = mock_domain_alloc_paging_flags, 806 .domain_alloc_nested = mock_domain_alloc_nested, 807 .capable = mock_domain_capable, 808 .device_group = generic_device_group, 809 .probe_device = mock_probe_device, 810 .page_response = mock_domain_page_response, 811 .user_pasid_table = true, 812 .viommu_alloc = mock_viommu_alloc, 813 .default_domain_ops = 814 &(struct iommu_domain_ops){ 815 .free = mock_domain_free, 816 .attach_dev = mock_domain_nop_attach, 817 .map_pages = mock_domain_map_pages, 818 .unmap_pages = mock_domain_unmap_pages, 819 .iova_to_phys = mock_domain_iova_to_phys, 820 .set_dev_pasid = mock_domain_set_dev_pasid_nop, 821 }, 822 }; 823 824 static void mock_domain_free_nested(struct iommu_domain *domain) 825 { 826 kfree(to_mock_nested(domain)); 827 } 828 829 static int 830 mock_domain_cache_invalidate_user(struct iommu_domain *domain, 831 struct iommu_user_data_array *array) 832 { 833 struct mock_iommu_domain_nested *mock_nested = to_mock_nested(domain); 834 struct iommu_hwpt_invalidate_selftest inv; 835 u32 processed = 0; 836 int i = 0, j; 837 int rc = 0; 838 839 if (array->type != IOMMU_HWPT_INVALIDATE_DATA_SELFTEST) { 840 rc = -EINVAL; 841 goto out; 842 } 843 844 for ( ; i < array->entry_num; i++) { 845 rc = iommu_copy_struct_from_user_array(&inv, array, 846 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST, 847 i, iotlb_id); 848 if (rc) 849 break; 850 851 if (inv.flags & ~IOMMU_TEST_INVALIDATE_FLAG_ALL) { 852 rc = -EOPNOTSUPP; 853 break; 854 } 855 856 if (inv.iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX) { 857 rc = -EINVAL; 858 break; 859 } 860 861 if (inv.flags & IOMMU_TEST_INVALIDATE_FLAG_ALL) { 862 /* Invalidate all mock iotlb entries and ignore iotlb_id */ 863 for (j = 0; j < MOCK_NESTED_DOMAIN_IOTLB_NUM; j++) 864 mock_nested->iotlb[j] = 0; 865 } else { 866 mock_nested->iotlb[inv.iotlb_id] = 0; 867 } 868 869 processed++; 870 } 871 872 out: 873 array->entry_num = processed; 874 return rc; 875 } 876 877 static struct iommu_domain_ops domain_nested_ops = { 878 .free = mock_domain_free_nested, 879 .attach_dev = mock_domain_nop_attach, 880 .cache_invalidate_user = mock_domain_cache_invalidate_user, 881 .set_dev_pasid = mock_domain_set_dev_pasid_nop, 882 }; 883 884 static inline struct iommufd_hw_pagetable * 885 __get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, u32 hwpt_type) 886 { 887 struct iommufd_object *obj; 888 889 obj = iommufd_get_object(ucmd->ictx, mockpt_id, hwpt_type); 890 if (IS_ERR(obj)) 891 return ERR_CAST(obj); 892 return container_of(obj, struct iommufd_hw_pagetable, obj); 893 } 894 895 static inline struct iommufd_hw_pagetable * 896 get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, 897 struct mock_iommu_domain **mock) 898 { 899 struct iommufd_hw_pagetable *hwpt; 900 901 hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_PAGING); 902 if (IS_ERR(hwpt)) 903 return hwpt; 904 if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED || 905 hwpt->domain->ops != mock_ops.default_domain_ops) { 906 iommufd_put_object(ucmd->ictx, &hwpt->obj); 907 return ERR_PTR(-EINVAL); 908 } 909 *mock = to_mock_domain(hwpt->domain); 910 return hwpt; 911 } 912 913 static inline struct iommufd_hw_pagetable * 914 get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id, 915 struct mock_iommu_domain_nested **mock_nested) 916 { 917 struct iommufd_hw_pagetable *hwpt; 918 919 hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_NESTED); 920 if (IS_ERR(hwpt)) 921 return hwpt; 922 if (hwpt->domain->type != IOMMU_DOMAIN_NESTED || 923 hwpt->domain->ops != &domain_nested_ops) { 924 iommufd_put_object(ucmd->ictx, &hwpt->obj); 925 return ERR_PTR(-EINVAL); 926 } 927 *mock_nested = to_mock_nested(hwpt->domain); 928 return hwpt; 929 } 930 931 static void mock_dev_release(struct device *dev) 932 { 933 struct mock_dev *mdev = to_mock_dev(dev); 934 935 ida_free(&mock_dev_ida, mdev->id); 936 kfree(mdev); 937 } 938 939 static struct mock_dev *mock_dev_create(unsigned long dev_flags) 940 { 941 struct property_entry prop[] = { 942 PROPERTY_ENTRY_U32("pasid-num-bits", 0), 943 {}, 944 }; 945 const u32 valid_flags = MOCK_FLAGS_DEVICE_NO_DIRTY | 946 MOCK_FLAGS_DEVICE_HUGE_IOVA | 947 MOCK_FLAGS_DEVICE_PASID; 948 struct mock_dev *mdev; 949 int rc, i; 950 951 if (dev_flags & ~valid_flags) 952 return ERR_PTR(-EINVAL); 953 954 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); 955 if (!mdev) 956 return ERR_PTR(-ENOMEM); 957 958 init_rwsem(&mdev->viommu_rwsem); 959 device_initialize(&mdev->dev); 960 mdev->flags = dev_flags; 961 mdev->dev.release = mock_dev_release; 962 mdev->dev.bus = &iommufd_mock_bus_type.bus; 963 for (i = 0; i < MOCK_DEV_CACHE_NUM; i++) 964 mdev->cache[i] = IOMMU_TEST_DEV_CACHE_DEFAULT; 965 966 rc = ida_alloc(&mock_dev_ida, GFP_KERNEL); 967 if (rc < 0) 968 goto err_put; 969 mdev->id = rc; 970 971 rc = dev_set_name(&mdev->dev, "iommufd_mock%u", mdev->id); 972 if (rc) 973 goto err_put; 974 975 if (dev_flags & MOCK_FLAGS_DEVICE_PASID) 976 prop[0] = PROPERTY_ENTRY_U32("pasid-num-bits", MOCK_PASID_WIDTH); 977 978 rc = device_create_managed_software_node(&mdev->dev, prop, NULL); 979 if (rc) { 980 dev_err(&mdev->dev, "add pasid-num-bits property failed, rc: %d", rc); 981 goto err_put; 982 } 983 984 rc = device_add(&mdev->dev); 985 if (rc) 986 goto err_put; 987 return mdev; 988 989 err_put: 990 put_device(&mdev->dev); 991 return ERR_PTR(rc); 992 } 993 994 static void mock_dev_destroy(struct mock_dev *mdev) 995 { 996 device_unregister(&mdev->dev); 997 } 998 999 bool iommufd_selftest_is_mock_dev(struct device *dev) 1000 { 1001 return dev->release == mock_dev_release; 1002 } 1003 1004 /* Create an hw_pagetable with the mock domain so we can test the domain ops */ 1005 static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, 1006 struct iommu_test_cmd *cmd) 1007 { 1008 struct iommufd_device *idev; 1009 struct selftest_obj *sobj; 1010 u32 pt_id = cmd->id; 1011 u32 dev_flags = 0; 1012 u32 idev_id; 1013 int rc; 1014 1015 sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST); 1016 if (IS_ERR(sobj)) 1017 return PTR_ERR(sobj); 1018 1019 sobj->idev.ictx = ucmd->ictx; 1020 sobj->type = TYPE_IDEV; 1021 1022 if (cmd->op == IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS) 1023 dev_flags = cmd->mock_domain_flags.dev_flags; 1024 1025 sobj->idev.mock_dev = mock_dev_create(dev_flags); 1026 if (IS_ERR(sobj->idev.mock_dev)) { 1027 rc = PTR_ERR(sobj->idev.mock_dev); 1028 goto out_sobj; 1029 } 1030 1031 idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev, 1032 &idev_id); 1033 if (IS_ERR(idev)) { 1034 rc = PTR_ERR(idev); 1035 goto out_mdev; 1036 } 1037 sobj->idev.idev = idev; 1038 1039 rc = iommufd_device_attach(idev, IOMMU_NO_PASID, &pt_id); 1040 if (rc) 1041 goto out_unbind; 1042 1043 /* Userspace must destroy the device_id to destroy the object */ 1044 cmd->mock_domain.out_hwpt_id = pt_id; 1045 cmd->mock_domain.out_stdev_id = sobj->obj.id; 1046 cmd->mock_domain.out_idev_id = idev_id; 1047 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 1048 if (rc) 1049 goto out_detach; 1050 iommufd_object_finalize(ucmd->ictx, &sobj->obj); 1051 return 0; 1052 1053 out_detach: 1054 iommufd_device_detach(idev, IOMMU_NO_PASID); 1055 out_unbind: 1056 iommufd_device_unbind(idev); 1057 out_mdev: 1058 mock_dev_destroy(sobj->idev.mock_dev); 1059 out_sobj: 1060 iommufd_object_abort(ucmd->ictx, &sobj->obj); 1061 return rc; 1062 } 1063 1064 static struct selftest_obj * 1065 iommufd_test_get_selftest_obj(struct iommufd_ctx *ictx, u32 id) 1066 { 1067 struct iommufd_object *dev_obj; 1068 struct selftest_obj *sobj; 1069 1070 /* 1071 * Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure 1072 * it doesn't race with detach, which is not allowed. 1073 */ 1074 dev_obj = iommufd_get_object(ictx, id, IOMMUFD_OBJ_SELFTEST); 1075 if (IS_ERR(dev_obj)) 1076 return ERR_CAST(dev_obj); 1077 1078 sobj = to_selftest_obj(dev_obj); 1079 if (sobj->type != TYPE_IDEV) { 1080 iommufd_put_object(ictx, dev_obj); 1081 return ERR_PTR(-EINVAL); 1082 } 1083 return sobj; 1084 } 1085 1086 /* Replace the mock domain with a manually allocated hw_pagetable */ 1087 static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd, 1088 unsigned int device_id, u32 pt_id, 1089 struct iommu_test_cmd *cmd) 1090 { 1091 struct selftest_obj *sobj; 1092 int rc; 1093 1094 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, device_id); 1095 if (IS_ERR(sobj)) 1096 return PTR_ERR(sobj); 1097 1098 rc = iommufd_device_replace(sobj->idev.idev, IOMMU_NO_PASID, &pt_id); 1099 if (rc) 1100 goto out_sobj; 1101 1102 cmd->mock_domain_replace.pt_id = pt_id; 1103 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 1104 1105 out_sobj: 1106 iommufd_put_object(ucmd->ictx, &sobj->obj); 1107 return rc; 1108 } 1109 1110 /* Add an additional reserved IOVA to the IOAS */ 1111 static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd, 1112 unsigned int mockpt_id, 1113 unsigned long start, size_t length) 1114 { 1115 struct iommufd_ioas *ioas; 1116 int rc; 1117 1118 ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id); 1119 if (IS_ERR(ioas)) 1120 return PTR_ERR(ioas); 1121 down_write(&ioas->iopt.iova_rwsem); 1122 rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL); 1123 up_write(&ioas->iopt.iova_rwsem); 1124 iommufd_put_object(ucmd->ictx, &ioas->obj); 1125 return rc; 1126 } 1127 1128 /* Check that every pfn under each iova matches the pfn under a user VA */ 1129 static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd, 1130 unsigned int mockpt_id, unsigned long iova, 1131 size_t length, void __user *uptr) 1132 { 1133 struct iommufd_hw_pagetable *hwpt; 1134 struct mock_iommu_domain *mock; 1135 uintptr_t end; 1136 int rc; 1137 1138 if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE || 1139 (uintptr_t)uptr % MOCK_IO_PAGE_SIZE || 1140 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end)) 1141 return -EINVAL; 1142 1143 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock); 1144 if (IS_ERR(hwpt)) 1145 return PTR_ERR(hwpt); 1146 1147 for (; length; length -= MOCK_IO_PAGE_SIZE) { 1148 struct page *pages[1]; 1149 unsigned long pfn; 1150 long npages; 1151 void *ent; 1152 1153 npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0, 1154 pages); 1155 if (npages < 0) { 1156 rc = npages; 1157 goto out_put; 1158 } 1159 if (WARN_ON(npages != 1)) { 1160 rc = -EFAULT; 1161 goto out_put; 1162 } 1163 pfn = page_to_pfn(pages[0]); 1164 put_page(pages[0]); 1165 1166 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE); 1167 if (!ent || 1168 (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE != 1169 pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) { 1170 rc = -EINVAL; 1171 goto out_put; 1172 } 1173 iova += MOCK_IO_PAGE_SIZE; 1174 uptr += MOCK_IO_PAGE_SIZE; 1175 } 1176 rc = 0; 1177 1178 out_put: 1179 iommufd_put_object(ucmd->ictx, &hwpt->obj); 1180 return rc; 1181 } 1182 1183 /* Check that the page ref count matches, to look for missing pin/unpins */ 1184 static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd, 1185 void __user *uptr, size_t length, 1186 unsigned int refs) 1187 { 1188 uintptr_t end; 1189 1190 if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE || 1191 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end)) 1192 return -EINVAL; 1193 1194 for (; length; length -= PAGE_SIZE) { 1195 struct page *pages[1]; 1196 long npages; 1197 1198 npages = get_user_pages_fast((uintptr_t)uptr, 1, 0, pages); 1199 if (npages < 0) 1200 return npages; 1201 if (WARN_ON(npages != 1)) 1202 return -EFAULT; 1203 if (!PageCompound(pages[0])) { 1204 unsigned int count; 1205 1206 count = page_ref_count(pages[0]); 1207 if (count / GUP_PIN_COUNTING_BIAS != refs) { 1208 put_page(pages[0]); 1209 return -EIO; 1210 } 1211 } 1212 put_page(pages[0]); 1213 uptr += PAGE_SIZE; 1214 } 1215 return 0; 1216 } 1217 1218 static int iommufd_test_md_check_iotlb(struct iommufd_ucmd *ucmd, 1219 u32 mockpt_id, unsigned int iotlb_id, 1220 u32 iotlb) 1221 { 1222 struct mock_iommu_domain_nested *mock_nested; 1223 struct iommufd_hw_pagetable *hwpt; 1224 int rc = 0; 1225 1226 hwpt = get_md_pagetable_nested(ucmd, mockpt_id, &mock_nested); 1227 if (IS_ERR(hwpt)) 1228 return PTR_ERR(hwpt); 1229 1230 mock_nested = to_mock_nested(hwpt->domain); 1231 1232 if (iotlb_id > MOCK_NESTED_DOMAIN_IOTLB_ID_MAX || 1233 mock_nested->iotlb[iotlb_id] != iotlb) 1234 rc = -EINVAL; 1235 iommufd_put_object(ucmd->ictx, &hwpt->obj); 1236 return rc; 1237 } 1238 1239 static int iommufd_test_dev_check_cache(struct iommufd_ucmd *ucmd, u32 idev_id, 1240 unsigned int cache_id, u32 cache) 1241 { 1242 struct iommufd_device *idev; 1243 struct mock_dev *mdev; 1244 int rc = 0; 1245 1246 idev = iommufd_get_device(ucmd, idev_id); 1247 if (IS_ERR(idev)) 1248 return PTR_ERR(idev); 1249 mdev = container_of(idev->dev, struct mock_dev, dev); 1250 1251 if (cache_id > MOCK_DEV_CACHE_ID_MAX || mdev->cache[cache_id] != cache) 1252 rc = -EINVAL; 1253 iommufd_put_object(ucmd->ictx, &idev->obj); 1254 return rc; 1255 } 1256 1257 struct selftest_access { 1258 struct iommufd_access *access; 1259 struct file *file; 1260 struct mutex lock; 1261 struct list_head items; 1262 unsigned int next_id; 1263 bool destroying; 1264 }; 1265 1266 struct selftest_access_item { 1267 struct list_head items_elm; 1268 unsigned long iova; 1269 size_t length; 1270 unsigned int id; 1271 }; 1272 1273 static const struct file_operations iommfd_test_staccess_fops; 1274 1275 static struct selftest_access *iommufd_access_get(int fd) 1276 { 1277 struct file *file; 1278 1279 file = fget(fd); 1280 if (!file) 1281 return ERR_PTR(-EBADFD); 1282 1283 if (file->f_op != &iommfd_test_staccess_fops) { 1284 fput(file); 1285 return ERR_PTR(-EBADFD); 1286 } 1287 return file->private_data; 1288 } 1289 1290 static void iommufd_test_access_unmap(void *data, unsigned long iova, 1291 unsigned long length) 1292 { 1293 unsigned long iova_last = iova + length - 1; 1294 struct selftest_access *staccess = data; 1295 struct selftest_access_item *item; 1296 struct selftest_access_item *tmp; 1297 1298 mutex_lock(&staccess->lock); 1299 list_for_each_entry_safe(item, tmp, &staccess->items, items_elm) { 1300 if (iova > item->iova + item->length - 1 || 1301 iova_last < item->iova) 1302 continue; 1303 list_del(&item->items_elm); 1304 iommufd_access_unpin_pages(staccess->access, item->iova, 1305 item->length); 1306 kfree(item); 1307 } 1308 mutex_unlock(&staccess->lock); 1309 } 1310 1311 static int iommufd_test_access_item_destroy(struct iommufd_ucmd *ucmd, 1312 unsigned int access_id, 1313 unsigned int item_id) 1314 { 1315 struct selftest_access_item *item; 1316 struct selftest_access *staccess; 1317 1318 staccess = iommufd_access_get(access_id); 1319 if (IS_ERR(staccess)) 1320 return PTR_ERR(staccess); 1321 1322 mutex_lock(&staccess->lock); 1323 list_for_each_entry(item, &staccess->items, items_elm) { 1324 if (item->id == item_id) { 1325 list_del(&item->items_elm); 1326 iommufd_access_unpin_pages(staccess->access, item->iova, 1327 item->length); 1328 mutex_unlock(&staccess->lock); 1329 kfree(item); 1330 fput(staccess->file); 1331 return 0; 1332 } 1333 } 1334 mutex_unlock(&staccess->lock); 1335 fput(staccess->file); 1336 return -ENOENT; 1337 } 1338 1339 static int iommufd_test_staccess_release(struct inode *inode, 1340 struct file *filep) 1341 { 1342 struct selftest_access *staccess = filep->private_data; 1343 1344 if (staccess->access) { 1345 iommufd_test_access_unmap(staccess, 0, ULONG_MAX); 1346 iommufd_access_destroy(staccess->access); 1347 } 1348 mutex_destroy(&staccess->lock); 1349 kfree(staccess); 1350 return 0; 1351 } 1352 1353 static const struct iommufd_access_ops selftest_access_ops_pin = { 1354 .needs_pin_pages = 1, 1355 .unmap = iommufd_test_access_unmap, 1356 }; 1357 1358 static const struct iommufd_access_ops selftest_access_ops = { 1359 .unmap = iommufd_test_access_unmap, 1360 }; 1361 1362 static const struct file_operations iommfd_test_staccess_fops = { 1363 .release = iommufd_test_staccess_release, 1364 }; 1365 1366 static struct selftest_access *iommufd_test_alloc_access(void) 1367 { 1368 struct selftest_access *staccess; 1369 struct file *filep; 1370 1371 staccess = kzalloc(sizeof(*staccess), GFP_KERNEL_ACCOUNT); 1372 if (!staccess) 1373 return ERR_PTR(-ENOMEM); 1374 INIT_LIST_HEAD(&staccess->items); 1375 mutex_init(&staccess->lock); 1376 1377 filep = anon_inode_getfile("[iommufd_test_staccess]", 1378 &iommfd_test_staccess_fops, staccess, 1379 O_RDWR); 1380 if (IS_ERR(filep)) { 1381 kfree(staccess); 1382 return ERR_CAST(filep); 1383 } 1384 staccess->file = filep; 1385 return staccess; 1386 } 1387 1388 static int iommufd_test_create_access(struct iommufd_ucmd *ucmd, 1389 unsigned int ioas_id, unsigned int flags) 1390 { 1391 struct iommu_test_cmd *cmd = ucmd->cmd; 1392 struct selftest_access *staccess; 1393 struct iommufd_access *access; 1394 u32 id; 1395 int fdno; 1396 int rc; 1397 1398 if (flags & ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) 1399 return -EOPNOTSUPP; 1400 1401 staccess = iommufd_test_alloc_access(); 1402 if (IS_ERR(staccess)) 1403 return PTR_ERR(staccess); 1404 1405 fdno = get_unused_fd_flags(O_CLOEXEC); 1406 if (fdno < 0) { 1407 rc = -ENOMEM; 1408 goto out_free_staccess; 1409 } 1410 1411 access = iommufd_access_create( 1412 ucmd->ictx, 1413 (flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ? 1414 &selftest_access_ops_pin : 1415 &selftest_access_ops, 1416 staccess, &id); 1417 if (IS_ERR(access)) { 1418 rc = PTR_ERR(access); 1419 goto out_put_fdno; 1420 } 1421 rc = iommufd_access_attach(access, ioas_id); 1422 if (rc) 1423 goto out_destroy; 1424 cmd->create_access.out_access_fd = fdno; 1425 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 1426 if (rc) 1427 goto out_destroy; 1428 1429 staccess->access = access; 1430 fd_install(fdno, staccess->file); 1431 return 0; 1432 1433 out_destroy: 1434 iommufd_access_destroy(access); 1435 out_put_fdno: 1436 put_unused_fd(fdno); 1437 out_free_staccess: 1438 fput(staccess->file); 1439 return rc; 1440 } 1441 1442 static int iommufd_test_access_replace_ioas(struct iommufd_ucmd *ucmd, 1443 unsigned int access_id, 1444 unsigned int ioas_id) 1445 { 1446 struct selftest_access *staccess; 1447 int rc; 1448 1449 staccess = iommufd_access_get(access_id); 1450 if (IS_ERR(staccess)) 1451 return PTR_ERR(staccess); 1452 1453 rc = iommufd_access_replace(staccess->access, ioas_id); 1454 fput(staccess->file); 1455 return rc; 1456 } 1457 1458 /* Check that the pages in a page array match the pages in the user VA */ 1459 static int iommufd_test_check_pages(void __user *uptr, struct page **pages, 1460 size_t npages) 1461 { 1462 for (; npages; npages--) { 1463 struct page *tmp_pages[1]; 1464 long rc; 1465 1466 rc = get_user_pages_fast((uintptr_t)uptr, 1, 0, tmp_pages); 1467 if (rc < 0) 1468 return rc; 1469 if (WARN_ON(rc != 1)) 1470 return -EFAULT; 1471 put_page(tmp_pages[0]); 1472 if (tmp_pages[0] != *pages) 1473 return -EBADE; 1474 pages++; 1475 uptr += PAGE_SIZE; 1476 } 1477 return 0; 1478 } 1479 1480 static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd, 1481 unsigned int access_id, unsigned long iova, 1482 size_t length, void __user *uptr, 1483 u32 flags) 1484 { 1485 struct iommu_test_cmd *cmd = ucmd->cmd; 1486 struct selftest_access_item *item; 1487 struct selftest_access *staccess; 1488 struct page **pages; 1489 size_t npages; 1490 int rc; 1491 1492 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */ 1493 if (length > 16*1024*1024) 1494 return -ENOMEM; 1495 1496 if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ)) 1497 return -EOPNOTSUPP; 1498 1499 staccess = iommufd_access_get(access_id); 1500 if (IS_ERR(staccess)) 1501 return PTR_ERR(staccess); 1502 1503 if (staccess->access->ops != &selftest_access_ops_pin) { 1504 rc = -EOPNOTSUPP; 1505 goto out_put; 1506 } 1507 1508 if (flags & MOCK_FLAGS_ACCESS_SYZ) 1509 iova = iommufd_test_syz_conv_iova(staccess->access, 1510 &cmd->access_pages.iova); 1511 1512 npages = (ALIGN(iova + length, PAGE_SIZE) - 1513 ALIGN_DOWN(iova, PAGE_SIZE)) / 1514 PAGE_SIZE; 1515 pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL_ACCOUNT); 1516 if (!pages) { 1517 rc = -ENOMEM; 1518 goto out_put; 1519 } 1520 1521 /* 1522 * Drivers will need to think very carefully about this locking. The 1523 * core code can do multiple unmaps instantaneously after 1524 * iommufd_access_pin_pages() and *all* the unmaps must not return until 1525 * the range is unpinned. This simple implementation puts a global lock 1526 * around the pin, which may not suit drivers that want this to be a 1527 * performance path. drivers that get this wrong will trigger WARN_ON 1528 * races and cause EDEADLOCK failures to userspace. 1529 */ 1530 mutex_lock(&staccess->lock); 1531 rc = iommufd_access_pin_pages(staccess->access, iova, length, pages, 1532 flags & MOCK_FLAGS_ACCESS_WRITE); 1533 if (rc) 1534 goto out_unlock; 1535 1536 /* For syzkaller allow uptr to be NULL to skip this check */ 1537 if (uptr) { 1538 rc = iommufd_test_check_pages( 1539 uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages, 1540 npages); 1541 if (rc) 1542 goto out_unaccess; 1543 } 1544 1545 item = kzalloc(sizeof(*item), GFP_KERNEL_ACCOUNT); 1546 if (!item) { 1547 rc = -ENOMEM; 1548 goto out_unaccess; 1549 } 1550 1551 item->iova = iova; 1552 item->length = length; 1553 item->id = staccess->next_id++; 1554 list_add_tail(&item->items_elm, &staccess->items); 1555 1556 cmd->access_pages.out_access_pages_id = item->id; 1557 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 1558 if (rc) 1559 goto out_free_item; 1560 goto out_unlock; 1561 1562 out_free_item: 1563 list_del(&item->items_elm); 1564 kfree(item); 1565 out_unaccess: 1566 iommufd_access_unpin_pages(staccess->access, iova, length); 1567 out_unlock: 1568 mutex_unlock(&staccess->lock); 1569 kvfree(pages); 1570 out_put: 1571 fput(staccess->file); 1572 return rc; 1573 } 1574 1575 static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd, 1576 unsigned int access_id, unsigned long iova, 1577 size_t length, void __user *ubuf, 1578 unsigned int flags) 1579 { 1580 struct iommu_test_cmd *cmd = ucmd->cmd; 1581 struct selftest_access *staccess; 1582 void *tmp; 1583 int rc; 1584 1585 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */ 1586 if (length > 16*1024*1024) 1587 return -ENOMEM; 1588 1589 if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH | 1590 MOCK_FLAGS_ACCESS_SYZ)) 1591 return -EOPNOTSUPP; 1592 1593 staccess = iommufd_access_get(access_id); 1594 if (IS_ERR(staccess)) 1595 return PTR_ERR(staccess); 1596 1597 tmp = kvzalloc(length, GFP_KERNEL_ACCOUNT); 1598 if (!tmp) { 1599 rc = -ENOMEM; 1600 goto out_put; 1601 } 1602 1603 if (flags & MOCK_ACCESS_RW_WRITE) { 1604 if (copy_from_user(tmp, ubuf, length)) { 1605 rc = -EFAULT; 1606 goto out_free; 1607 } 1608 } 1609 1610 if (flags & MOCK_FLAGS_ACCESS_SYZ) 1611 iova = iommufd_test_syz_conv_iova(staccess->access, 1612 &cmd->access_rw.iova); 1613 1614 rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags); 1615 if (rc) 1616 goto out_free; 1617 if (!(flags & MOCK_ACCESS_RW_WRITE)) { 1618 if (copy_to_user(ubuf, tmp, length)) { 1619 rc = -EFAULT; 1620 goto out_free; 1621 } 1622 } 1623 1624 out_free: 1625 kvfree(tmp); 1626 out_put: 1627 fput(staccess->file); 1628 return rc; 1629 } 1630 static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE); 1631 static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH == 1632 __IOMMUFD_ACCESS_RW_SLOW_PATH); 1633 1634 static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id, 1635 unsigned long iova, size_t length, 1636 unsigned long page_size, void __user *uptr, 1637 u32 flags) 1638 { 1639 unsigned long i, max; 1640 struct iommu_test_cmd *cmd = ucmd->cmd; 1641 struct iommufd_hw_pagetable *hwpt; 1642 struct mock_iommu_domain *mock; 1643 int rc, count = 0; 1644 void *tmp; 1645 1646 if (!page_size || !length || iova % page_size || length % page_size || 1647 !uptr) 1648 return -EINVAL; 1649 1650 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock); 1651 if (IS_ERR(hwpt)) 1652 return PTR_ERR(hwpt); 1653 1654 if (!(mock->flags & MOCK_DIRTY_TRACK)) { 1655 rc = -EINVAL; 1656 goto out_put; 1657 } 1658 1659 max = length / page_size; 1660 tmp = kvzalloc(DIV_ROUND_UP(max, BITS_PER_LONG) * sizeof(unsigned long), 1661 GFP_KERNEL_ACCOUNT); 1662 if (!tmp) { 1663 rc = -ENOMEM; 1664 goto out_put; 1665 } 1666 1667 if (copy_from_user(tmp, uptr,DIV_ROUND_UP(max, BITS_PER_BYTE))) { 1668 rc = -EFAULT; 1669 goto out_free; 1670 } 1671 1672 for (i = 0; i < max; i++) { 1673 unsigned long cur = iova + i * page_size; 1674 void *ent, *old; 1675 1676 if (!test_bit(i, (unsigned long *)tmp)) 1677 continue; 1678 1679 ent = xa_load(&mock->pfns, cur / page_size); 1680 if (ent) { 1681 unsigned long val; 1682 1683 val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA; 1684 old = xa_store(&mock->pfns, cur / page_size, 1685 xa_mk_value(val), GFP_KERNEL); 1686 WARN_ON_ONCE(ent != old); 1687 count++; 1688 } 1689 } 1690 1691 cmd->dirty.out_nr_dirty = count; 1692 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 1693 out_free: 1694 kvfree(tmp); 1695 out_put: 1696 iommufd_put_object(ucmd->ictx, &hwpt->obj); 1697 return rc; 1698 } 1699 1700 static int iommufd_test_trigger_iopf(struct iommufd_ucmd *ucmd, 1701 struct iommu_test_cmd *cmd) 1702 { 1703 struct iopf_fault event = { }; 1704 struct iommufd_device *idev; 1705 1706 idev = iommufd_get_device(ucmd, cmd->trigger_iopf.dev_id); 1707 if (IS_ERR(idev)) 1708 return PTR_ERR(idev); 1709 1710 event.fault.prm.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; 1711 if (cmd->trigger_iopf.pasid != IOMMU_NO_PASID) 1712 event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; 1713 event.fault.type = IOMMU_FAULT_PAGE_REQ; 1714 event.fault.prm.addr = cmd->trigger_iopf.addr; 1715 event.fault.prm.pasid = cmd->trigger_iopf.pasid; 1716 event.fault.prm.grpid = cmd->trigger_iopf.grpid; 1717 event.fault.prm.perm = cmd->trigger_iopf.perm; 1718 1719 iommu_report_device_fault(idev->dev, &event); 1720 iommufd_put_object(ucmd->ictx, &idev->obj); 1721 1722 return 0; 1723 } 1724 1725 static int iommufd_test_trigger_vevent(struct iommufd_ucmd *ucmd, 1726 struct iommu_test_cmd *cmd) 1727 { 1728 struct iommu_viommu_event_selftest test = {}; 1729 struct iommufd_device *idev; 1730 struct mock_dev *mdev; 1731 int rc = -ENOENT; 1732 1733 idev = iommufd_get_device(ucmd, cmd->trigger_vevent.dev_id); 1734 if (IS_ERR(idev)) 1735 return PTR_ERR(idev); 1736 mdev = to_mock_dev(idev->dev); 1737 1738 down_read(&mdev->viommu_rwsem); 1739 if (!mdev->viommu || !mdev->vdev_id) 1740 goto out_unlock; 1741 1742 test.virt_id = mdev->vdev_id; 1743 rc = iommufd_viommu_report_event(&mdev->viommu->core, 1744 IOMMU_VEVENTQ_TYPE_SELFTEST, &test, 1745 sizeof(test)); 1746 out_unlock: 1747 up_read(&mdev->viommu_rwsem); 1748 iommufd_put_object(ucmd->ictx, &idev->obj); 1749 1750 return rc; 1751 } 1752 1753 static inline struct iommufd_hw_pagetable * 1754 iommufd_get_hwpt(struct iommufd_ucmd *ucmd, u32 id) 1755 { 1756 struct iommufd_object *pt_obj; 1757 1758 pt_obj = iommufd_get_object(ucmd->ictx, id, IOMMUFD_OBJ_ANY); 1759 if (IS_ERR(pt_obj)) 1760 return ERR_CAST(pt_obj); 1761 1762 if (pt_obj->type != IOMMUFD_OBJ_HWPT_NESTED && 1763 pt_obj->type != IOMMUFD_OBJ_HWPT_PAGING) { 1764 iommufd_put_object(ucmd->ictx, pt_obj); 1765 return ERR_PTR(-EINVAL); 1766 } 1767 1768 return container_of(pt_obj, struct iommufd_hw_pagetable, obj); 1769 } 1770 1771 static int iommufd_test_pasid_check_hwpt(struct iommufd_ucmd *ucmd, 1772 struct iommu_test_cmd *cmd) 1773 { 1774 u32 hwpt_id = cmd->pasid_check.hwpt_id; 1775 struct iommu_domain *attached_domain; 1776 struct iommu_attach_handle *handle; 1777 struct iommufd_hw_pagetable *hwpt; 1778 struct selftest_obj *sobj; 1779 struct mock_dev *mdev; 1780 int rc = 0; 1781 1782 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id); 1783 if (IS_ERR(sobj)) 1784 return PTR_ERR(sobj); 1785 1786 mdev = sobj->idev.mock_dev; 1787 1788 handle = iommu_attach_handle_get(mdev->dev.iommu_group, 1789 cmd->pasid_check.pasid, 0); 1790 if (IS_ERR(handle)) 1791 attached_domain = NULL; 1792 else 1793 attached_domain = handle->domain; 1794 1795 /* hwpt_id == 0 means to check if pasid is detached */ 1796 if (!hwpt_id) { 1797 if (attached_domain) 1798 rc = -EINVAL; 1799 goto out_sobj; 1800 } 1801 1802 hwpt = iommufd_get_hwpt(ucmd, hwpt_id); 1803 if (IS_ERR(hwpt)) { 1804 rc = PTR_ERR(hwpt); 1805 goto out_sobj; 1806 } 1807 1808 if (attached_domain != hwpt->domain) 1809 rc = -EINVAL; 1810 1811 iommufd_put_object(ucmd->ictx, &hwpt->obj); 1812 out_sobj: 1813 iommufd_put_object(ucmd->ictx, &sobj->obj); 1814 return rc; 1815 } 1816 1817 static int iommufd_test_pasid_attach(struct iommufd_ucmd *ucmd, 1818 struct iommu_test_cmd *cmd) 1819 { 1820 struct selftest_obj *sobj; 1821 int rc; 1822 1823 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id); 1824 if (IS_ERR(sobj)) 1825 return PTR_ERR(sobj); 1826 1827 rc = iommufd_device_attach(sobj->idev.idev, cmd->pasid_attach.pasid, 1828 &cmd->pasid_attach.pt_id); 1829 if (rc) 1830 goto out_sobj; 1831 1832 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 1833 if (rc) 1834 iommufd_device_detach(sobj->idev.idev, 1835 cmd->pasid_attach.pasid); 1836 1837 out_sobj: 1838 iommufd_put_object(ucmd->ictx, &sobj->obj); 1839 return rc; 1840 } 1841 1842 static int iommufd_test_pasid_replace(struct iommufd_ucmd *ucmd, 1843 struct iommu_test_cmd *cmd) 1844 { 1845 struct selftest_obj *sobj; 1846 int rc; 1847 1848 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id); 1849 if (IS_ERR(sobj)) 1850 return PTR_ERR(sobj); 1851 1852 rc = iommufd_device_replace(sobj->idev.idev, cmd->pasid_attach.pasid, 1853 &cmd->pasid_attach.pt_id); 1854 if (rc) 1855 goto out_sobj; 1856 1857 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 1858 1859 out_sobj: 1860 iommufd_put_object(ucmd->ictx, &sobj->obj); 1861 return rc; 1862 } 1863 1864 static int iommufd_test_pasid_detach(struct iommufd_ucmd *ucmd, 1865 struct iommu_test_cmd *cmd) 1866 { 1867 struct selftest_obj *sobj; 1868 1869 sobj = iommufd_test_get_selftest_obj(ucmd->ictx, cmd->id); 1870 if (IS_ERR(sobj)) 1871 return PTR_ERR(sobj); 1872 1873 iommufd_device_detach(sobj->idev.idev, cmd->pasid_detach.pasid); 1874 iommufd_put_object(ucmd->ictx, &sobj->obj); 1875 return 0; 1876 } 1877 1878 void iommufd_selftest_destroy(struct iommufd_object *obj) 1879 { 1880 struct selftest_obj *sobj = to_selftest_obj(obj); 1881 1882 switch (sobj->type) { 1883 case TYPE_IDEV: 1884 iommufd_device_detach(sobj->idev.idev, IOMMU_NO_PASID); 1885 iommufd_device_unbind(sobj->idev.idev); 1886 mock_dev_destroy(sobj->idev.mock_dev); 1887 break; 1888 } 1889 } 1890 1891 int iommufd_test(struct iommufd_ucmd *ucmd) 1892 { 1893 struct iommu_test_cmd *cmd = ucmd->cmd; 1894 1895 switch (cmd->op) { 1896 case IOMMU_TEST_OP_ADD_RESERVED: 1897 return iommufd_test_add_reserved(ucmd, cmd->id, 1898 cmd->add_reserved.start, 1899 cmd->add_reserved.length); 1900 case IOMMU_TEST_OP_MOCK_DOMAIN: 1901 case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS: 1902 return iommufd_test_mock_domain(ucmd, cmd); 1903 case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE: 1904 return iommufd_test_mock_domain_replace( 1905 ucmd, cmd->id, cmd->mock_domain_replace.pt_id, cmd); 1906 case IOMMU_TEST_OP_MD_CHECK_MAP: 1907 return iommufd_test_md_check_pa( 1908 ucmd, cmd->id, cmd->check_map.iova, 1909 cmd->check_map.length, 1910 u64_to_user_ptr(cmd->check_map.uptr)); 1911 case IOMMU_TEST_OP_MD_CHECK_REFS: 1912 return iommufd_test_md_check_refs( 1913 ucmd, u64_to_user_ptr(cmd->check_refs.uptr), 1914 cmd->check_refs.length, cmd->check_refs.refs); 1915 case IOMMU_TEST_OP_MD_CHECK_IOTLB: 1916 return iommufd_test_md_check_iotlb(ucmd, cmd->id, 1917 cmd->check_iotlb.id, 1918 cmd->check_iotlb.iotlb); 1919 case IOMMU_TEST_OP_DEV_CHECK_CACHE: 1920 return iommufd_test_dev_check_cache(ucmd, cmd->id, 1921 cmd->check_dev_cache.id, 1922 cmd->check_dev_cache.cache); 1923 case IOMMU_TEST_OP_CREATE_ACCESS: 1924 return iommufd_test_create_access(ucmd, cmd->id, 1925 cmd->create_access.flags); 1926 case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS: 1927 return iommufd_test_access_replace_ioas( 1928 ucmd, cmd->id, cmd->access_replace_ioas.ioas_id); 1929 case IOMMU_TEST_OP_ACCESS_PAGES: 1930 return iommufd_test_access_pages( 1931 ucmd, cmd->id, cmd->access_pages.iova, 1932 cmd->access_pages.length, 1933 u64_to_user_ptr(cmd->access_pages.uptr), 1934 cmd->access_pages.flags); 1935 case IOMMU_TEST_OP_ACCESS_RW: 1936 return iommufd_test_access_rw( 1937 ucmd, cmd->id, cmd->access_rw.iova, 1938 cmd->access_rw.length, 1939 u64_to_user_ptr(cmd->access_rw.uptr), 1940 cmd->access_rw.flags); 1941 case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES: 1942 return iommufd_test_access_item_destroy( 1943 ucmd, cmd->id, cmd->destroy_access_pages.access_pages_id); 1944 case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT: 1945 /* Protect _batch_init(), can not be less than elmsz */ 1946 if (cmd->memory_limit.limit < 1947 sizeof(unsigned long) + sizeof(u32)) 1948 return -EINVAL; 1949 iommufd_test_memory_limit = cmd->memory_limit.limit; 1950 return 0; 1951 case IOMMU_TEST_OP_DIRTY: 1952 return iommufd_test_dirty(ucmd, cmd->id, cmd->dirty.iova, 1953 cmd->dirty.length, 1954 cmd->dirty.page_size, 1955 u64_to_user_ptr(cmd->dirty.uptr), 1956 cmd->dirty.flags); 1957 case IOMMU_TEST_OP_TRIGGER_IOPF: 1958 return iommufd_test_trigger_iopf(ucmd, cmd); 1959 case IOMMU_TEST_OP_TRIGGER_VEVENT: 1960 return iommufd_test_trigger_vevent(ucmd, cmd); 1961 case IOMMU_TEST_OP_PASID_ATTACH: 1962 return iommufd_test_pasid_attach(ucmd, cmd); 1963 case IOMMU_TEST_OP_PASID_REPLACE: 1964 return iommufd_test_pasid_replace(ucmd, cmd); 1965 case IOMMU_TEST_OP_PASID_DETACH: 1966 return iommufd_test_pasid_detach(ucmd, cmd); 1967 case IOMMU_TEST_OP_PASID_CHECK_HWPT: 1968 return iommufd_test_pasid_check_hwpt(ucmd, cmd); 1969 default: 1970 return -EOPNOTSUPP; 1971 } 1972 } 1973 1974 bool iommufd_should_fail(void) 1975 { 1976 return should_fail(&fail_iommufd, 1); 1977 } 1978 1979 int __init iommufd_test_init(void) 1980 { 1981 struct platform_device_info pdevinfo = { 1982 .name = "iommufd_selftest_iommu", 1983 }; 1984 int rc; 1985 1986 dbgfs_root = 1987 fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd); 1988 1989 selftest_iommu_dev = platform_device_register_full(&pdevinfo); 1990 if (IS_ERR(selftest_iommu_dev)) { 1991 rc = PTR_ERR(selftest_iommu_dev); 1992 goto err_dbgfs; 1993 } 1994 1995 rc = bus_register(&iommufd_mock_bus_type.bus); 1996 if (rc) 1997 goto err_platform; 1998 1999 rc = iommu_device_sysfs_add(&mock_iommu.iommu_dev, 2000 &selftest_iommu_dev->dev, NULL, "%s", 2001 dev_name(&selftest_iommu_dev->dev)); 2002 if (rc) 2003 goto err_bus; 2004 2005 rc = iommu_device_register_bus(&mock_iommu.iommu_dev, &mock_ops, 2006 &iommufd_mock_bus_type.bus, 2007 &iommufd_mock_bus_type.nb); 2008 if (rc) 2009 goto err_sysfs; 2010 2011 refcount_set(&mock_iommu.users, 1); 2012 init_completion(&mock_iommu.complete); 2013 2014 mock_iommu_iopf_queue = iopf_queue_alloc("mock-iopfq"); 2015 mock_iommu.iommu_dev.max_pasids = (1 << MOCK_PASID_WIDTH); 2016 2017 return 0; 2018 2019 err_sysfs: 2020 iommu_device_sysfs_remove(&mock_iommu.iommu_dev); 2021 err_bus: 2022 bus_unregister(&iommufd_mock_bus_type.bus); 2023 err_platform: 2024 platform_device_unregister(selftest_iommu_dev); 2025 err_dbgfs: 2026 debugfs_remove_recursive(dbgfs_root); 2027 return rc; 2028 } 2029 2030 static void iommufd_test_wait_for_users(void) 2031 { 2032 if (refcount_dec_and_test(&mock_iommu.users)) 2033 return; 2034 /* 2035 * Time out waiting for iommu device user count to become 0. 2036 * 2037 * Note that this is just making an example here, since the selftest is 2038 * built into the iommufd module, i.e. it only unplugs the iommu device 2039 * when unloading the module. So, it is expected that this WARN_ON will 2040 * not trigger, as long as any iommufd FDs are open. 2041 */ 2042 WARN_ON(!wait_for_completion_timeout(&mock_iommu.complete, 2043 msecs_to_jiffies(10000))); 2044 } 2045 2046 void iommufd_test_exit(void) 2047 { 2048 if (mock_iommu_iopf_queue) { 2049 iopf_queue_free(mock_iommu_iopf_queue); 2050 mock_iommu_iopf_queue = NULL; 2051 } 2052 2053 iommufd_test_wait_for_users(); 2054 iommu_device_sysfs_remove(&mock_iommu.iommu_dev); 2055 iommu_device_unregister_bus(&mock_iommu.iommu_dev, 2056 &iommufd_mock_bus_type.bus, 2057 &iommufd_mock_bus_type.nb); 2058 bus_unregister(&iommufd_mock_bus_type.bus); 2059 platform_device_unregister(selftest_iommu_dev); 2060 debugfs_remove_recursive(dbgfs_root); 2061 } 2062