1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VFIO core 4 * 5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved. 6 * Author: Alex Williamson <alex.williamson@redhat.com> 7 * 8 * Derived from original vfio: 9 * Copyright 2010 Cisco Systems, Inc. All rights reserved. 10 * Author: Tom Lyon, pugs@cisco.com 11 */ 12 13 #include <linux/cdev.h> 14 #include <linux/compat.h> 15 #include <linux/device.h> 16 #include <linux/fs.h> 17 #include <linux/idr.h> 18 #include <linux/iommu.h> 19 #if IS_ENABLED(CONFIG_KVM) 20 #include <linux/kvm_host.h> 21 #endif 22 #include <linux/list.h> 23 #include <linux/miscdevice.h> 24 #include <linux/module.h> 25 #include <linux/mount.h> 26 #include <linux/mutex.h> 27 #include <linux/pci.h> 28 #include <linux/pseudo_fs.h> 29 #include <linux/rwsem.h> 30 #include <linux/sched.h> 31 #include <linux/seq_file.h> 32 #include <linux/slab.h> 33 #include <linux/stat.h> 34 #include <linux/string.h> 35 #include <linux/uaccess.h> 36 #include <linux/vfio.h> 37 #include <linux/wait.h> 38 #include <linux/sched/signal.h> 39 #include <linux/pm_runtime.h> 40 #include <linux/interval_tree.h> 41 #include <linux/iova_bitmap.h> 42 #include <linux/iommufd.h> 43 #include "vfio.h" 44 45 #define DRIVER_VERSION "0.3" 46 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" 47 #define DRIVER_DESC "VFIO - User Level meta-driver" 48 49 #define VFIO_MAGIC 0x5646494f /* "VFIO" */ 50 51 static struct vfio { 52 struct class *device_class; 53 struct ida device_ida; 54 struct vfsmount *vfs_mount; 55 int fs_count; 56 } vfio; 57 58 #ifdef CONFIG_VFIO_NOIOMMU 59 bool vfio_noiommu __read_mostly; 60 module_param_named(enable_unsafe_noiommu_mode, 61 vfio_noiommu, bool, S_IRUGO | S_IWUSR); 62 MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)"); 63 #endif 64 65 static DEFINE_XARRAY(vfio_device_set_xa); 66 67 int vfio_assign_device_set(struct vfio_device *device, void *set_id) 68 { 69 unsigned long idx = (unsigned long)set_id; 70 struct vfio_device_set *new_dev_set; 71 struct vfio_device_set *dev_set; 72 73 if (WARN_ON(!set_id)) 74 return -EINVAL; 75 76 /* 77 * Atomically acquire a singleton object in the xarray for this set_id 78 */ 79 xa_lock(&vfio_device_set_xa); 80 dev_set = xa_load(&vfio_device_set_xa, idx); 81 if (dev_set) 82 goto found_get_ref; 83 xa_unlock(&vfio_device_set_xa); 84 85 new_dev_set = kzalloc(sizeof(*new_dev_set), GFP_KERNEL); 86 if (!new_dev_set) 87 return -ENOMEM; 88 mutex_init(&new_dev_set->lock); 89 INIT_LIST_HEAD(&new_dev_set->device_list); 90 new_dev_set->set_id = set_id; 91 92 xa_lock(&vfio_device_set_xa); 93 dev_set = __xa_cmpxchg(&vfio_device_set_xa, idx, NULL, new_dev_set, 94 GFP_KERNEL); 95 if (!dev_set) { 96 dev_set = new_dev_set; 97 goto found_get_ref; 98 } 99 100 kfree(new_dev_set); 101 if (xa_is_err(dev_set)) { 102 xa_unlock(&vfio_device_set_xa); 103 return xa_err(dev_set); 104 } 105 106 found_get_ref: 107 dev_set->device_count++; 108 xa_unlock(&vfio_device_set_xa); 109 mutex_lock(&dev_set->lock); 110 device->dev_set = dev_set; 111 list_add_tail(&device->dev_set_list, &dev_set->device_list); 112 mutex_unlock(&dev_set->lock); 113 return 0; 114 } 115 EXPORT_SYMBOL_GPL(vfio_assign_device_set); 116 117 static void vfio_release_device_set(struct vfio_device *device) 118 { 119 struct vfio_device_set *dev_set = device->dev_set; 120 121 if (!dev_set) 122 return; 123 124 mutex_lock(&dev_set->lock); 125 list_del(&device->dev_set_list); 126 mutex_unlock(&dev_set->lock); 127 128 xa_lock(&vfio_device_set_xa); 129 if (!--dev_set->device_count) { 130 __xa_erase(&vfio_device_set_xa, 131 (unsigned long)dev_set->set_id); 132 mutex_destroy(&dev_set->lock); 133 kfree(dev_set); 134 } 135 xa_unlock(&vfio_device_set_xa); 136 } 137 138 unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set) 139 { 140 struct vfio_device *cur; 141 unsigned int open_count = 0; 142 143 lockdep_assert_held(&dev_set->lock); 144 145 list_for_each_entry(cur, &dev_set->device_list, dev_set_list) 146 open_count += cur->open_count; 147 return open_count; 148 } 149 EXPORT_SYMBOL_GPL(vfio_device_set_open_count); 150 151 struct vfio_device * 152 vfio_find_device_in_devset(struct vfio_device_set *dev_set, 153 struct device *dev) 154 { 155 struct vfio_device *cur; 156 157 lockdep_assert_held(&dev_set->lock); 158 159 list_for_each_entry(cur, &dev_set->device_list, dev_set_list) 160 if (cur->dev == dev) 161 return cur; 162 return NULL; 163 } 164 EXPORT_SYMBOL_GPL(vfio_find_device_in_devset); 165 166 /* 167 * Device objects - create, release, get, put, search 168 */ 169 /* Device reference always implies a group reference */ 170 void vfio_device_put_registration(struct vfio_device *device) 171 { 172 if (refcount_dec_and_test(&device->refcount)) 173 complete(&device->comp); 174 } 175 176 bool vfio_device_try_get_registration(struct vfio_device *device) 177 { 178 return refcount_inc_not_zero(&device->refcount); 179 } 180 181 /* 182 * VFIO driver API 183 */ 184 /* Release helper called by vfio_put_device() */ 185 static void vfio_device_release(struct device *dev) 186 { 187 struct vfio_device *device = 188 container_of(dev, struct vfio_device, device); 189 190 vfio_release_device_set(device); 191 ida_free(&vfio.device_ida, device->index); 192 193 if (device->ops->release) 194 device->ops->release(device); 195 196 iput(device->inode); 197 simple_release_fs(&vfio.vfs_mount, &vfio.fs_count); 198 kvfree(device); 199 } 200 201 static int vfio_init_device(struct vfio_device *device, struct device *dev, 202 const struct vfio_device_ops *ops); 203 204 /* 205 * Allocate and initialize vfio_device so it can be registered to vfio 206 * core. 207 * 208 * Drivers should use the wrapper vfio_alloc_device() for allocation. 209 * @size is the size of the structure to be allocated, including any 210 * private data used by the driver. 211 * 212 * Driver may provide an @init callback to cover device private data. 213 * 214 * Use vfio_put_device() to release the structure after success return. 215 */ 216 struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev, 217 const struct vfio_device_ops *ops) 218 { 219 struct vfio_device *device; 220 int ret; 221 222 if (WARN_ON(size < sizeof(struct vfio_device))) 223 return ERR_PTR(-EINVAL); 224 225 device = kvzalloc(size, GFP_KERNEL); 226 if (!device) 227 return ERR_PTR(-ENOMEM); 228 229 ret = vfio_init_device(device, dev, ops); 230 if (ret) 231 goto out_free; 232 return device; 233 234 out_free: 235 kvfree(device); 236 return ERR_PTR(ret); 237 } 238 EXPORT_SYMBOL_GPL(_vfio_alloc_device); 239 240 static int vfio_fs_init_fs_context(struct fs_context *fc) 241 { 242 return init_pseudo(fc, VFIO_MAGIC) ? 0 : -ENOMEM; 243 } 244 245 static struct file_system_type vfio_fs_type = { 246 .name = "vfio", 247 .owner = THIS_MODULE, 248 .init_fs_context = vfio_fs_init_fs_context, 249 .kill_sb = kill_anon_super, 250 }; 251 252 static struct inode *vfio_fs_inode_new(void) 253 { 254 struct inode *inode; 255 int ret; 256 257 ret = simple_pin_fs(&vfio_fs_type, &vfio.vfs_mount, &vfio.fs_count); 258 if (ret) 259 return ERR_PTR(ret); 260 261 inode = alloc_anon_inode(vfio.vfs_mount->mnt_sb); 262 if (IS_ERR(inode)) 263 simple_release_fs(&vfio.vfs_mount, &vfio.fs_count); 264 265 return inode; 266 } 267 268 /* 269 * Initialize a vfio_device so it can be registered to vfio core. 270 */ 271 static int vfio_init_device(struct vfio_device *device, struct device *dev, 272 const struct vfio_device_ops *ops) 273 { 274 int ret; 275 276 ret = ida_alloc_max(&vfio.device_ida, MINORMASK, GFP_KERNEL); 277 if (ret < 0) { 278 dev_dbg(dev, "Error to alloc index\n"); 279 return ret; 280 } 281 282 device->index = ret; 283 init_completion(&device->comp); 284 device->dev = dev; 285 device->ops = ops; 286 device->inode = vfio_fs_inode_new(); 287 if (IS_ERR(device->inode)) { 288 ret = PTR_ERR(device->inode); 289 goto out_inode; 290 } 291 292 if (ops->init) { 293 ret = ops->init(device); 294 if (ret) 295 goto out_uninit; 296 } 297 298 device_initialize(&device->device); 299 device->device.release = vfio_device_release; 300 device->device.class = vfio.device_class; 301 device->device.parent = device->dev; 302 return 0; 303 304 out_uninit: 305 iput(device->inode); 306 simple_release_fs(&vfio.vfs_mount, &vfio.fs_count); 307 out_inode: 308 vfio_release_device_set(device); 309 ida_free(&vfio.device_ida, device->index); 310 return ret; 311 } 312 313 static int __vfio_register_dev(struct vfio_device *device, 314 enum vfio_group_type type) 315 { 316 int ret; 317 318 if (WARN_ON(IS_ENABLED(CONFIG_IOMMUFD) && 319 (!device->ops->bind_iommufd || 320 !device->ops->unbind_iommufd || 321 !device->ops->attach_ioas || 322 !device->ops->detach_ioas))) 323 return -EINVAL; 324 325 /* 326 * If the driver doesn't specify a set then the device is added to a 327 * singleton set just for itself. 328 */ 329 if (!device->dev_set) 330 vfio_assign_device_set(device, device); 331 332 ret = dev_set_name(&device->device, "vfio%d", device->index); 333 if (ret) 334 return ret; 335 336 ret = vfio_device_set_group(device, type); 337 if (ret) 338 return ret; 339 340 /* 341 * VFIO always sets IOMMU_CACHE because we offer no way for userspace to 342 * restore cache coherency. It has to be checked here because it is only 343 * valid for cases where we are using iommu groups. 344 */ 345 if (type == VFIO_IOMMU && !vfio_device_is_noiommu(device) && 346 !device_iommu_capable(device->dev, IOMMU_CAP_CACHE_COHERENCY)) { 347 ret = -EINVAL; 348 goto err_out; 349 } 350 351 ret = vfio_device_add(device); 352 if (ret) 353 goto err_out; 354 355 /* Refcounting can't start until the driver calls register */ 356 refcount_set(&device->refcount, 1); 357 358 vfio_device_group_register(device); 359 vfio_device_debugfs_init(device); 360 361 return 0; 362 err_out: 363 vfio_device_remove_group(device); 364 return ret; 365 } 366 367 int vfio_register_group_dev(struct vfio_device *device) 368 { 369 return __vfio_register_dev(device, VFIO_IOMMU); 370 } 371 EXPORT_SYMBOL_GPL(vfio_register_group_dev); 372 373 /* 374 * Register a virtual device without IOMMU backing. The user of this 375 * device must not be able to directly trigger unmediated DMA. 376 */ 377 int vfio_register_emulated_iommu_dev(struct vfio_device *device) 378 { 379 return __vfio_register_dev(device, VFIO_EMULATED_IOMMU); 380 } 381 EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev); 382 383 /* 384 * Decrement the device reference count and wait for the device to be 385 * removed. Open file descriptors for the device... */ 386 void vfio_unregister_group_dev(struct vfio_device *device) 387 { 388 unsigned int i = 0; 389 bool interrupted = false; 390 long rc; 391 392 /* 393 * Prevent new device opened by userspace via the 394 * VFIO_GROUP_GET_DEVICE_FD in the group path. 395 */ 396 vfio_device_group_unregister(device); 397 398 /* 399 * Balances vfio_device_add() in register path, also prevents 400 * new device opened by userspace in the cdev path. 401 */ 402 vfio_device_del(device); 403 404 vfio_device_put_registration(device); 405 rc = try_wait_for_completion(&device->comp); 406 while (rc <= 0) { 407 if (device->ops->request) 408 device->ops->request(device, i++); 409 410 if (interrupted) { 411 rc = wait_for_completion_timeout(&device->comp, 412 HZ * 10); 413 } else { 414 rc = wait_for_completion_interruptible_timeout( 415 &device->comp, HZ * 10); 416 if (rc < 0) { 417 interrupted = true; 418 dev_warn(device->dev, 419 "Device is currently in use, task" 420 " \"%s\" (%d) " 421 "blocked until device is released", 422 current->comm, task_pid_nr(current)); 423 } 424 } 425 } 426 427 vfio_device_debugfs_exit(device); 428 /* Balances vfio_device_set_group in register path */ 429 vfio_device_remove_group(device); 430 } 431 EXPORT_SYMBOL_GPL(vfio_unregister_group_dev); 432 433 #if IS_ENABLED(CONFIG_KVM) 434 void vfio_device_get_kvm_safe(struct vfio_device *device, struct kvm *kvm) 435 { 436 void (*pfn)(struct kvm *kvm); 437 bool (*fn)(struct kvm *kvm); 438 bool ret; 439 440 lockdep_assert_held(&device->dev_set->lock); 441 442 if (!kvm) 443 return; 444 445 pfn = symbol_get(kvm_put_kvm); 446 if (WARN_ON(!pfn)) 447 return; 448 449 fn = symbol_get(kvm_get_kvm_safe); 450 if (WARN_ON(!fn)) { 451 symbol_put(kvm_put_kvm); 452 return; 453 } 454 455 ret = fn(kvm); 456 symbol_put(kvm_get_kvm_safe); 457 if (!ret) { 458 symbol_put(kvm_put_kvm); 459 return; 460 } 461 462 device->put_kvm = pfn; 463 device->kvm = kvm; 464 } 465 466 void vfio_device_put_kvm(struct vfio_device *device) 467 { 468 lockdep_assert_held(&device->dev_set->lock); 469 470 if (!device->kvm) 471 return; 472 473 if (WARN_ON(!device->put_kvm)) 474 goto clear; 475 476 device->put_kvm(device->kvm); 477 device->put_kvm = NULL; 478 symbol_put(kvm_put_kvm); 479 480 clear: 481 device->kvm = NULL; 482 } 483 #endif 484 485 /* true if the vfio_device has open_device() called but not close_device() */ 486 static bool vfio_assert_device_open(struct vfio_device *device) 487 { 488 return !WARN_ON_ONCE(!READ_ONCE(device->open_count)); 489 } 490 491 struct vfio_device_file * 492 vfio_allocate_device_file(struct vfio_device *device) 493 { 494 struct vfio_device_file *df; 495 496 df = kzalloc(sizeof(*df), GFP_KERNEL_ACCOUNT); 497 if (!df) 498 return ERR_PTR(-ENOMEM); 499 500 df->device = device; 501 spin_lock_init(&df->kvm_ref_lock); 502 503 return df; 504 } 505 506 static int vfio_df_device_first_open(struct vfio_device_file *df) 507 { 508 struct vfio_device *device = df->device; 509 struct iommufd_ctx *iommufd = df->iommufd; 510 int ret; 511 512 lockdep_assert_held(&device->dev_set->lock); 513 514 if (!try_module_get(device->dev->driver->owner)) 515 return -ENODEV; 516 517 if (iommufd) 518 ret = vfio_df_iommufd_bind(df); 519 else 520 ret = vfio_device_group_use_iommu(device); 521 if (ret) 522 goto err_module_put; 523 524 if (device->ops->open_device) { 525 ret = device->ops->open_device(device); 526 if (ret) 527 goto err_unuse_iommu; 528 } 529 return 0; 530 531 err_unuse_iommu: 532 if (iommufd) 533 vfio_df_iommufd_unbind(df); 534 else 535 vfio_device_group_unuse_iommu(device); 536 err_module_put: 537 module_put(device->dev->driver->owner); 538 return ret; 539 } 540 541 static void vfio_df_device_last_close(struct vfio_device_file *df) 542 { 543 struct vfio_device *device = df->device; 544 struct iommufd_ctx *iommufd = df->iommufd; 545 546 lockdep_assert_held(&device->dev_set->lock); 547 548 if (device->ops->close_device) 549 device->ops->close_device(device); 550 if (iommufd) 551 vfio_df_iommufd_unbind(df); 552 else 553 vfio_device_group_unuse_iommu(device); 554 module_put(device->dev->driver->owner); 555 } 556 557 int vfio_df_open(struct vfio_device_file *df) 558 { 559 struct vfio_device *device = df->device; 560 int ret = 0; 561 562 lockdep_assert_held(&device->dev_set->lock); 563 564 /* 565 * Only the group path allows the device to be opened multiple 566 * times. The device cdev path doesn't have a secure way for it. 567 */ 568 if (device->open_count != 0 && !df->group) 569 return -EINVAL; 570 571 device->open_count++; 572 if (device->open_count == 1) { 573 ret = vfio_df_device_first_open(df); 574 if (ret) 575 device->open_count--; 576 } 577 578 return ret; 579 } 580 581 void vfio_df_close(struct vfio_device_file *df) 582 { 583 struct vfio_device *device = df->device; 584 585 lockdep_assert_held(&device->dev_set->lock); 586 587 if (!vfio_assert_device_open(device)) 588 return; 589 if (device->open_count == 1) 590 vfio_df_device_last_close(df); 591 device->open_count--; 592 } 593 594 /* 595 * Wrapper around pm_runtime_resume_and_get(). 596 * Return error code on failure or 0 on success. 597 */ 598 static inline int vfio_device_pm_runtime_get(struct vfio_device *device) 599 { 600 struct device *dev = device->dev; 601 602 if (dev->driver && dev->driver->pm) { 603 int ret; 604 605 ret = pm_runtime_resume_and_get(dev); 606 if (ret) { 607 dev_info_ratelimited(dev, 608 "vfio: runtime resume failed %d\n", ret); 609 return -EIO; 610 } 611 } 612 613 return 0; 614 } 615 616 /* 617 * Wrapper around pm_runtime_put(). 618 */ 619 static inline void vfio_device_pm_runtime_put(struct vfio_device *device) 620 { 621 struct device *dev = device->dev; 622 623 if (dev->driver && dev->driver->pm) 624 pm_runtime_put(dev); 625 } 626 627 /* 628 * VFIO Device fd 629 */ 630 static int vfio_device_fops_release(struct inode *inode, struct file *filep) 631 { 632 struct vfio_device_file *df = filep->private_data; 633 struct vfio_device *device = df->device; 634 635 if (df->group) 636 vfio_df_group_close(df); 637 else 638 vfio_df_unbind_iommufd(df); 639 640 vfio_device_put_registration(device); 641 642 kfree(df); 643 644 return 0; 645 } 646 647 /* 648 * vfio_mig_get_next_state - Compute the next step in the FSM 649 * @cur_fsm - The current state the device is in 650 * @new_fsm - The target state to reach 651 * @next_fsm - Pointer to the next step to get to new_fsm 652 * 653 * Return 0 upon success, otherwise -errno 654 * Upon success the next step in the state progression between cur_fsm and 655 * new_fsm will be set in next_fsm. 656 * 657 * This breaks down requests for combination transitions into smaller steps and 658 * returns the next step to get to new_fsm. The function may need to be called 659 * multiple times before reaching new_fsm. 660 * 661 */ 662 int vfio_mig_get_next_state(struct vfio_device *device, 663 enum vfio_device_mig_state cur_fsm, 664 enum vfio_device_mig_state new_fsm, 665 enum vfio_device_mig_state *next_fsm) 666 { 667 enum { VFIO_DEVICE_NUM_STATES = VFIO_DEVICE_STATE_PRE_COPY_P2P + 1 }; 668 /* 669 * The coding in this table requires the driver to implement the 670 * following FSM arcs: 671 * RESUMING -> STOP 672 * STOP -> RESUMING 673 * STOP -> STOP_COPY 674 * STOP_COPY -> STOP 675 * 676 * If P2P is supported then the driver must also implement these FSM 677 * arcs: 678 * RUNNING -> RUNNING_P2P 679 * RUNNING_P2P -> RUNNING 680 * RUNNING_P2P -> STOP 681 * STOP -> RUNNING_P2P 682 * 683 * If precopy is supported then the driver must support these additional 684 * FSM arcs: 685 * RUNNING -> PRE_COPY 686 * PRE_COPY -> RUNNING 687 * PRE_COPY -> STOP_COPY 688 * However, if precopy and P2P are supported together then the driver 689 * must support these additional arcs beyond the P2P arcs above: 690 * PRE_COPY -> RUNNING 691 * PRE_COPY -> PRE_COPY_P2P 692 * PRE_COPY_P2P -> PRE_COPY 693 * PRE_COPY_P2P -> RUNNING_P2P 694 * PRE_COPY_P2P -> STOP_COPY 695 * RUNNING -> PRE_COPY 696 * RUNNING_P2P -> PRE_COPY_P2P 697 * 698 * Without P2P and precopy the driver must implement: 699 * RUNNING -> STOP 700 * STOP -> RUNNING 701 * 702 * The coding will step through multiple states for some combination 703 * transitions; if all optional features are supported, this means the 704 * following ones: 705 * PRE_COPY -> PRE_COPY_P2P -> STOP_COPY 706 * PRE_COPY -> RUNNING -> RUNNING_P2P 707 * PRE_COPY -> RUNNING -> RUNNING_P2P -> STOP 708 * PRE_COPY -> RUNNING -> RUNNING_P2P -> STOP -> RESUMING 709 * PRE_COPY_P2P -> RUNNING_P2P -> RUNNING 710 * PRE_COPY_P2P -> RUNNING_P2P -> STOP 711 * PRE_COPY_P2P -> RUNNING_P2P -> STOP -> RESUMING 712 * RESUMING -> STOP -> RUNNING_P2P 713 * RESUMING -> STOP -> RUNNING_P2P -> PRE_COPY_P2P 714 * RESUMING -> STOP -> RUNNING_P2P -> RUNNING 715 * RESUMING -> STOP -> RUNNING_P2P -> RUNNING -> PRE_COPY 716 * RESUMING -> STOP -> STOP_COPY 717 * RUNNING -> RUNNING_P2P -> PRE_COPY_P2P 718 * RUNNING -> RUNNING_P2P -> STOP 719 * RUNNING -> RUNNING_P2P -> STOP -> RESUMING 720 * RUNNING -> RUNNING_P2P -> STOP -> STOP_COPY 721 * RUNNING_P2P -> RUNNING -> PRE_COPY 722 * RUNNING_P2P -> STOP -> RESUMING 723 * RUNNING_P2P -> STOP -> STOP_COPY 724 * STOP -> RUNNING_P2P -> PRE_COPY_P2P 725 * STOP -> RUNNING_P2P -> RUNNING 726 * STOP -> RUNNING_P2P -> RUNNING -> PRE_COPY 727 * STOP_COPY -> STOP -> RESUMING 728 * STOP_COPY -> STOP -> RUNNING_P2P 729 * STOP_COPY -> STOP -> RUNNING_P2P -> RUNNING 730 * 731 * The following transitions are blocked: 732 * STOP_COPY -> PRE_COPY 733 * STOP_COPY -> PRE_COPY_P2P 734 */ 735 static const u8 vfio_from_fsm_table[VFIO_DEVICE_NUM_STATES][VFIO_DEVICE_NUM_STATES] = { 736 [VFIO_DEVICE_STATE_STOP] = { 737 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, 738 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P, 739 [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P, 740 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, 741 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, 742 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING, 743 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, 744 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, 745 }, 746 [VFIO_DEVICE_STATE_RUNNING] = { 747 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P, 748 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, 749 [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY, 750 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, 751 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_RUNNING_P2P, 752 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P, 753 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, 754 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, 755 }, 756 [VFIO_DEVICE_STATE_PRE_COPY] = { 757 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING, 758 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, 759 [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY, 760 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P, 761 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_PRE_COPY_P2P, 762 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING, 763 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING, 764 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, 765 }, 766 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = { 767 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_RUNNING_P2P, 768 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING_P2P, 769 [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_PRE_COPY, 770 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P, 771 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, 772 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RUNNING_P2P, 773 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, 774 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, 775 }, 776 [VFIO_DEVICE_STATE_STOP_COPY] = { 777 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, 778 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP, 779 [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_ERROR, 780 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_ERROR, 781 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP_COPY, 782 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, 783 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP, 784 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, 785 }, 786 [VFIO_DEVICE_STATE_RESUMING] = { 787 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, 788 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_STOP, 789 [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_STOP, 790 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_STOP, 791 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, 792 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_RESUMING, 793 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_STOP, 794 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, 795 }, 796 [VFIO_DEVICE_STATE_RUNNING_P2P] = { 797 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_STOP, 798 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_RUNNING, 799 [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_RUNNING, 800 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_PRE_COPY_P2P, 801 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_STOP, 802 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_STOP, 803 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_RUNNING_P2P, 804 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, 805 }, 806 [VFIO_DEVICE_STATE_ERROR] = { 807 [VFIO_DEVICE_STATE_STOP] = VFIO_DEVICE_STATE_ERROR, 808 [VFIO_DEVICE_STATE_RUNNING] = VFIO_DEVICE_STATE_ERROR, 809 [VFIO_DEVICE_STATE_PRE_COPY] = VFIO_DEVICE_STATE_ERROR, 810 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_DEVICE_STATE_ERROR, 811 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_DEVICE_STATE_ERROR, 812 [VFIO_DEVICE_STATE_RESUMING] = VFIO_DEVICE_STATE_ERROR, 813 [VFIO_DEVICE_STATE_RUNNING_P2P] = VFIO_DEVICE_STATE_ERROR, 814 [VFIO_DEVICE_STATE_ERROR] = VFIO_DEVICE_STATE_ERROR, 815 }, 816 }; 817 818 static const unsigned int state_flags_table[VFIO_DEVICE_NUM_STATES] = { 819 [VFIO_DEVICE_STATE_STOP] = VFIO_MIGRATION_STOP_COPY, 820 [VFIO_DEVICE_STATE_RUNNING] = VFIO_MIGRATION_STOP_COPY, 821 [VFIO_DEVICE_STATE_PRE_COPY] = 822 VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY, 823 [VFIO_DEVICE_STATE_PRE_COPY_P2P] = VFIO_MIGRATION_STOP_COPY | 824 VFIO_MIGRATION_P2P | 825 VFIO_MIGRATION_PRE_COPY, 826 [VFIO_DEVICE_STATE_STOP_COPY] = VFIO_MIGRATION_STOP_COPY, 827 [VFIO_DEVICE_STATE_RESUMING] = VFIO_MIGRATION_STOP_COPY, 828 [VFIO_DEVICE_STATE_RUNNING_P2P] = 829 VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P, 830 [VFIO_DEVICE_STATE_ERROR] = ~0U, 831 }; 832 833 if (WARN_ON(cur_fsm >= ARRAY_SIZE(vfio_from_fsm_table) || 834 (state_flags_table[cur_fsm] & device->migration_flags) != 835 state_flags_table[cur_fsm])) 836 return -EINVAL; 837 838 if (new_fsm >= ARRAY_SIZE(vfio_from_fsm_table) || 839 (state_flags_table[new_fsm] & device->migration_flags) != 840 state_flags_table[new_fsm]) 841 return -EINVAL; 842 843 /* 844 * Arcs touching optional and unsupported states are skipped over. The 845 * driver will instead see an arc from the original state to the next 846 * logical state, as per the above comment. 847 */ 848 *next_fsm = vfio_from_fsm_table[cur_fsm][new_fsm]; 849 while ((state_flags_table[*next_fsm] & device->migration_flags) != 850 state_flags_table[*next_fsm]) 851 *next_fsm = vfio_from_fsm_table[*next_fsm][new_fsm]; 852 853 return (*next_fsm != VFIO_DEVICE_STATE_ERROR) ? 0 : -EINVAL; 854 } 855 EXPORT_SYMBOL_GPL(vfio_mig_get_next_state); 856 857 /* 858 * Convert the drivers's struct file into a FD number and return it to userspace 859 */ 860 static int vfio_ioct_mig_return_fd(struct file *filp, void __user *arg, 861 struct vfio_device_feature_mig_state *mig) 862 { 863 int ret; 864 int fd; 865 866 fd = get_unused_fd_flags(O_CLOEXEC); 867 if (fd < 0) { 868 ret = fd; 869 goto out_fput; 870 } 871 872 mig->data_fd = fd; 873 if (copy_to_user(arg, mig, sizeof(*mig))) { 874 ret = -EFAULT; 875 goto out_put_unused; 876 } 877 fd_install(fd, filp); 878 return 0; 879 880 out_put_unused: 881 put_unused_fd(fd); 882 out_fput: 883 fput(filp); 884 return ret; 885 } 886 887 static int 888 vfio_ioctl_device_feature_mig_device_state(struct vfio_device *device, 889 u32 flags, void __user *arg, 890 size_t argsz) 891 { 892 size_t minsz = 893 offsetofend(struct vfio_device_feature_mig_state, data_fd); 894 struct vfio_device_feature_mig_state mig; 895 struct file *filp = NULL; 896 int ret; 897 898 if (!device->mig_ops) 899 return -ENOTTY; 900 901 ret = vfio_check_feature(flags, argsz, 902 VFIO_DEVICE_FEATURE_SET | 903 VFIO_DEVICE_FEATURE_GET, 904 sizeof(mig)); 905 if (ret != 1) 906 return ret; 907 908 if (copy_from_user(&mig, arg, minsz)) 909 return -EFAULT; 910 911 if (flags & VFIO_DEVICE_FEATURE_GET) { 912 enum vfio_device_mig_state curr_state; 913 914 ret = device->mig_ops->migration_get_state(device, 915 &curr_state); 916 if (ret) 917 return ret; 918 mig.device_state = curr_state; 919 goto out_copy; 920 } 921 922 /* Handle the VFIO_DEVICE_FEATURE_SET */ 923 filp = device->mig_ops->migration_set_state(device, mig.device_state); 924 if (IS_ERR(filp) || !filp) 925 goto out_copy; 926 927 return vfio_ioct_mig_return_fd(filp, arg, &mig); 928 out_copy: 929 mig.data_fd = -1; 930 if (copy_to_user(arg, &mig, sizeof(mig))) 931 return -EFAULT; 932 if (IS_ERR(filp)) 933 return PTR_ERR(filp); 934 return 0; 935 } 936 937 static int 938 vfio_ioctl_device_feature_migration_data_size(struct vfio_device *device, 939 u32 flags, void __user *arg, 940 size_t argsz) 941 { 942 struct vfio_device_feature_mig_data_size data_size = {}; 943 unsigned long stop_copy_length; 944 int ret; 945 946 if (!device->mig_ops) 947 return -ENOTTY; 948 949 ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET, 950 sizeof(data_size)); 951 if (ret != 1) 952 return ret; 953 954 ret = device->mig_ops->migration_get_data_size(device, &stop_copy_length); 955 if (ret) 956 return ret; 957 958 data_size.stop_copy_length = stop_copy_length; 959 if (copy_to_user(arg, &data_size, sizeof(data_size))) 960 return -EFAULT; 961 962 return 0; 963 } 964 965 static int vfio_ioctl_device_feature_migration(struct vfio_device *device, 966 u32 flags, void __user *arg, 967 size_t argsz) 968 { 969 struct vfio_device_feature_migration mig = { 970 .flags = device->migration_flags, 971 }; 972 int ret; 973 974 if (!device->mig_ops) 975 return -ENOTTY; 976 977 ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_GET, 978 sizeof(mig)); 979 if (ret != 1) 980 return ret; 981 if (copy_to_user(arg, &mig, sizeof(mig))) 982 return -EFAULT; 983 return 0; 984 } 985 986 void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes, 987 u32 req_nodes) 988 { 989 struct interval_tree_node *prev, *curr, *comb_start, *comb_end; 990 unsigned long min_gap, curr_gap; 991 992 /* Special shortcut when a single range is required */ 993 if (req_nodes == 1) { 994 unsigned long last; 995 996 comb_start = interval_tree_iter_first(root, 0, ULONG_MAX); 997 998 /* Empty list */ 999 if (WARN_ON_ONCE(!comb_start)) 1000 return; 1001 1002 curr = comb_start; 1003 while (curr) { 1004 last = curr->last; 1005 prev = curr; 1006 curr = interval_tree_iter_next(curr, 0, ULONG_MAX); 1007 if (prev != comb_start) 1008 interval_tree_remove(prev, root); 1009 } 1010 comb_start->last = last; 1011 return; 1012 } 1013 1014 /* Combine ranges which have the smallest gap */ 1015 while (cur_nodes > req_nodes) { 1016 prev = NULL; 1017 min_gap = ULONG_MAX; 1018 curr = interval_tree_iter_first(root, 0, ULONG_MAX); 1019 while (curr) { 1020 if (prev) { 1021 curr_gap = curr->start - prev->last; 1022 if (curr_gap < min_gap) { 1023 min_gap = curr_gap; 1024 comb_start = prev; 1025 comb_end = curr; 1026 } 1027 } 1028 prev = curr; 1029 curr = interval_tree_iter_next(curr, 0, ULONG_MAX); 1030 } 1031 1032 /* Empty list or no nodes to combine */ 1033 if (WARN_ON_ONCE(min_gap == ULONG_MAX)) 1034 break; 1035 1036 comb_start->last = comb_end->last; 1037 interval_tree_remove(comb_end, root); 1038 cur_nodes--; 1039 } 1040 } 1041 EXPORT_SYMBOL_GPL(vfio_combine_iova_ranges); 1042 1043 /* Ranges should fit into a single kernel page */ 1044 #define LOG_MAX_RANGES \ 1045 (PAGE_SIZE / sizeof(struct vfio_device_feature_dma_logging_range)) 1046 1047 static int 1048 vfio_ioctl_device_feature_logging_start(struct vfio_device *device, 1049 u32 flags, void __user *arg, 1050 size_t argsz) 1051 { 1052 size_t minsz = 1053 offsetofend(struct vfio_device_feature_dma_logging_control, 1054 ranges); 1055 struct vfio_device_feature_dma_logging_range __user *ranges; 1056 struct vfio_device_feature_dma_logging_control control; 1057 struct vfio_device_feature_dma_logging_range range; 1058 struct rb_root_cached root = RB_ROOT_CACHED; 1059 struct interval_tree_node *nodes; 1060 u64 iova_end; 1061 u32 nnodes; 1062 int i, ret; 1063 1064 if (!device->log_ops) 1065 return -ENOTTY; 1066 1067 ret = vfio_check_feature(flags, argsz, 1068 VFIO_DEVICE_FEATURE_SET, 1069 sizeof(control)); 1070 if (ret != 1) 1071 return ret; 1072 1073 if (copy_from_user(&control, arg, minsz)) 1074 return -EFAULT; 1075 1076 nnodes = control.num_ranges; 1077 if (!nnodes) 1078 return -EINVAL; 1079 1080 if (nnodes > LOG_MAX_RANGES) 1081 return -E2BIG; 1082 1083 ranges = u64_to_user_ptr(control.ranges); 1084 nodes = kmalloc_array(nnodes, sizeof(struct interval_tree_node), 1085 GFP_KERNEL); 1086 if (!nodes) 1087 return -ENOMEM; 1088 1089 for (i = 0; i < nnodes; i++) { 1090 if (copy_from_user(&range, &ranges[i], sizeof(range))) { 1091 ret = -EFAULT; 1092 goto end; 1093 } 1094 if (!IS_ALIGNED(range.iova, control.page_size) || 1095 !IS_ALIGNED(range.length, control.page_size)) { 1096 ret = -EINVAL; 1097 goto end; 1098 } 1099 1100 if (check_add_overflow(range.iova, range.length, &iova_end) || 1101 iova_end > ULONG_MAX) { 1102 ret = -EOVERFLOW; 1103 goto end; 1104 } 1105 1106 nodes[i].start = range.iova; 1107 nodes[i].last = range.iova + range.length - 1; 1108 if (interval_tree_iter_first(&root, nodes[i].start, 1109 nodes[i].last)) { 1110 /* Range overlapping */ 1111 ret = -EINVAL; 1112 goto end; 1113 } 1114 interval_tree_insert(nodes + i, &root); 1115 } 1116 1117 ret = device->log_ops->log_start(device, &root, nnodes, 1118 &control.page_size); 1119 if (ret) 1120 goto end; 1121 1122 if (copy_to_user(arg, &control, sizeof(control))) { 1123 ret = -EFAULT; 1124 device->log_ops->log_stop(device); 1125 } 1126 1127 end: 1128 kfree(nodes); 1129 return ret; 1130 } 1131 1132 static int 1133 vfio_ioctl_device_feature_logging_stop(struct vfio_device *device, 1134 u32 flags, void __user *arg, 1135 size_t argsz) 1136 { 1137 int ret; 1138 1139 if (!device->log_ops) 1140 return -ENOTTY; 1141 1142 ret = vfio_check_feature(flags, argsz, 1143 VFIO_DEVICE_FEATURE_SET, 0); 1144 if (ret != 1) 1145 return ret; 1146 1147 return device->log_ops->log_stop(device); 1148 } 1149 1150 static int vfio_device_log_read_and_clear(struct iova_bitmap *iter, 1151 unsigned long iova, size_t length, 1152 void *opaque) 1153 { 1154 struct vfio_device *device = opaque; 1155 1156 return device->log_ops->log_read_and_clear(device, iova, length, iter); 1157 } 1158 1159 static int 1160 vfio_ioctl_device_feature_logging_report(struct vfio_device *device, 1161 u32 flags, void __user *arg, 1162 size_t argsz) 1163 { 1164 size_t minsz = 1165 offsetofend(struct vfio_device_feature_dma_logging_report, 1166 bitmap); 1167 struct vfio_device_feature_dma_logging_report report; 1168 struct iova_bitmap *iter; 1169 u64 iova_end; 1170 int ret; 1171 1172 if (!device->log_ops) 1173 return -ENOTTY; 1174 1175 ret = vfio_check_feature(flags, argsz, 1176 VFIO_DEVICE_FEATURE_GET, 1177 sizeof(report)); 1178 if (ret != 1) 1179 return ret; 1180 1181 if (copy_from_user(&report, arg, minsz)) 1182 return -EFAULT; 1183 1184 if (report.page_size < SZ_4K || !is_power_of_2(report.page_size)) 1185 return -EINVAL; 1186 1187 if (check_add_overflow(report.iova, report.length, &iova_end) || 1188 iova_end > ULONG_MAX) 1189 return -EOVERFLOW; 1190 1191 iter = iova_bitmap_alloc(report.iova, report.length, 1192 report.page_size, 1193 u64_to_user_ptr(report.bitmap)); 1194 if (IS_ERR(iter)) 1195 return PTR_ERR(iter); 1196 1197 ret = iova_bitmap_for_each(iter, device, 1198 vfio_device_log_read_and_clear); 1199 1200 iova_bitmap_free(iter); 1201 return ret; 1202 } 1203 1204 static int vfio_ioctl_device_feature(struct vfio_device *device, 1205 struct vfio_device_feature __user *arg) 1206 { 1207 size_t minsz = offsetofend(struct vfio_device_feature, flags); 1208 struct vfio_device_feature feature; 1209 1210 if (copy_from_user(&feature, arg, minsz)) 1211 return -EFAULT; 1212 1213 if (feature.argsz < minsz) 1214 return -EINVAL; 1215 1216 /* Check unknown flags */ 1217 if (feature.flags & 1218 ~(VFIO_DEVICE_FEATURE_MASK | VFIO_DEVICE_FEATURE_SET | 1219 VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_PROBE)) 1220 return -EINVAL; 1221 1222 /* GET & SET are mutually exclusive except with PROBE */ 1223 if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) && 1224 (feature.flags & VFIO_DEVICE_FEATURE_SET) && 1225 (feature.flags & VFIO_DEVICE_FEATURE_GET)) 1226 return -EINVAL; 1227 1228 switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) { 1229 case VFIO_DEVICE_FEATURE_MIGRATION: 1230 return vfio_ioctl_device_feature_migration( 1231 device, feature.flags, arg->data, 1232 feature.argsz - minsz); 1233 case VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE: 1234 return vfio_ioctl_device_feature_mig_device_state( 1235 device, feature.flags, arg->data, 1236 feature.argsz - minsz); 1237 case VFIO_DEVICE_FEATURE_DMA_LOGGING_START: 1238 return vfio_ioctl_device_feature_logging_start( 1239 device, feature.flags, arg->data, 1240 feature.argsz - minsz); 1241 case VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP: 1242 return vfio_ioctl_device_feature_logging_stop( 1243 device, feature.flags, arg->data, 1244 feature.argsz - minsz); 1245 case VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT: 1246 return vfio_ioctl_device_feature_logging_report( 1247 device, feature.flags, arg->data, 1248 feature.argsz - minsz); 1249 case VFIO_DEVICE_FEATURE_MIG_DATA_SIZE: 1250 return vfio_ioctl_device_feature_migration_data_size( 1251 device, feature.flags, arg->data, 1252 feature.argsz - minsz); 1253 default: 1254 if (unlikely(!device->ops->device_feature)) 1255 return -EINVAL; 1256 return device->ops->device_feature(device, feature.flags, 1257 arg->data, 1258 feature.argsz - minsz); 1259 } 1260 } 1261 1262 static long vfio_device_fops_unl_ioctl(struct file *filep, 1263 unsigned int cmd, unsigned long arg) 1264 { 1265 struct vfio_device_file *df = filep->private_data; 1266 struct vfio_device *device = df->device; 1267 void __user *uptr = (void __user *)arg; 1268 int ret; 1269 1270 if (cmd == VFIO_DEVICE_BIND_IOMMUFD) 1271 return vfio_df_ioctl_bind_iommufd(df, uptr); 1272 1273 /* Paired with smp_store_release() following vfio_df_open() */ 1274 if (!smp_load_acquire(&df->access_granted)) 1275 return -EINVAL; 1276 1277 ret = vfio_device_pm_runtime_get(device); 1278 if (ret) 1279 return ret; 1280 1281 /* cdev only ioctls */ 1282 if (IS_ENABLED(CONFIG_VFIO_DEVICE_CDEV) && !df->group) { 1283 switch (cmd) { 1284 case VFIO_DEVICE_ATTACH_IOMMUFD_PT: 1285 ret = vfio_df_ioctl_attach_pt(df, uptr); 1286 goto out; 1287 1288 case VFIO_DEVICE_DETACH_IOMMUFD_PT: 1289 ret = vfio_df_ioctl_detach_pt(df, uptr); 1290 goto out; 1291 } 1292 } 1293 1294 switch (cmd) { 1295 case VFIO_DEVICE_FEATURE: 1296 ret = vfio_ioctl_device_feature(device, uptr); 1297 break; 1298 1299 default: 1300 if (unlikely(!device->ops->ioctl)) 1301 ret = -EINVAL; 1302 else 1303 ret = device->ops->ioctl(device, cmd, arg); 1304 break; 1305 } 1306 out: 1307 vfio_device_pm_runtime_put(device); 1308 return ret; 1309 } 1310 1311 static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf, 1312 size_t count, loff_t *ppos) 1313 { 1314 struct vfio_device_file *df = filep->private_data; 1315 struct vfio_device *device = df->device; 1316 1317 /* Paired with smp_store_release() following vfio_df_open() */ 1318 if (!smp_load_acquire(&df->access_granted)) 1319 return -EINVAL; 1320 1321 if (unlikely(!device->ops->read)) 1322 return -EINVAL; 1323 1324 return device->ops->read(device, buf, count, ppos); 1325 } 1326 1327 static ssize_t vfio_device_fops_write(struct file *filep, 1328 const char __user *buf, 1329 size_t count, loff_t *ppos) 1330 { 1331 struct vfio_device_file *df = filep->private_data; 1332 struct vfio_device *device = df->device; 1333 1334 /* Paired with smp_store_release() following vfio_df_open() */ 1335 if (!smp_load_acquire(&df->access_granted)) 1336 return -EINVAL; 1337 1338 if (unlikely(!device->ops->write)) 1339 return -EINVAL; 1340 1341 return device->ops->write(device, buf, count, ppos); 1342 } 1343 1344 static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma) 1345 { 1346 struct vfio_device_file *df = filep->private_data; 1347 struct vfio_device *device = df->device; 1348 1349 /* Paired with smp_store_release() following vfio_df_open() */ 1350 if (!smp_load_acquire(&df->access_granted)) 1351 return -EINVAL; 1352 1353 if (unlikely(!device->ops->mmap)) 1354 return -EINVAL; 1355 1356 return device->ops->mmap(device, vma); 1357 } 1358 1359 #ifdef CONFIG_PROC_FS 1360 static void vfio_device_show_fdinfo(struct seq_file *m, struct file *filep) 1361 { 1362 char *path; 1363 struct vfio_device_file *df = filep->private_data; 1364 struct vfio_device *device = df->device; 1365 1366 path = kobject_get_path(&device->dev->kobj, GFP_KERNEL); 1367 if (!path) 1368 return; 1369 1370 seq_printf(m, "vfio-device-syspath: /sys%s\n", path); 1371 kfree(path); 1372 } 1373 #endif 1374 1375 const struct file_operations vfio_device_fops = { 1376 .owner = THIS_MODULE, 1377 .open = vfio_device_fops_cdev_open, 1378 .release = vfio_device_fops_release, 1379 .read = vfio_device_fops_read, 1380 .write = vfio_device_fops_write, 1381 .unlocked_ioctl = vfio_device_fops_unl_ioctl, 1382 .compat_ioctl = compat_ptr_ioctl, 1383 .mmap = vfio_device_fops_mmap, 1384 #ifdef CONFIG_PROC_FS 1385 .show_fdinfo = vfio_device_show_fdinfo, 1386 #endif 1387 }; 1388 1389 static struct vfio_device *vfio_device_from_file(struct file *file) 1390 { 1391 struct vfio_device_file *df = file->private_data; 1392 1393 if (file->f_op != &vfio_device_fops) 1394 return NULL; 1395 return df->device; 1396 } 1397 1398 /** 1399 * vfio_file_is_valid - True if the file is valid vfio file 1400 * @file: VFIO group file or VFIO device file 1401 */ 1402 bool vfio_file_is_valid(struct file *file) 1403 { 1404 return vfio_group_from_file(file) || 1405 vfio_device_from_file(file); 1406 } 1407 EXPORT_SYMBOL_GPL(vfio_file_is_valid); 1408 1409 /** 1410 * vfio_file_enforced_coherent - True if the DMA associated with the VFIO file 1411 * is always CPU cache coherent 1412 * @file: VFIO group file or VFIO device file 1413 * 1414 * Enforced coherency means that the IOMMU ignores things like the PCIe no-snoop 1415 * bit in DMA transactions. A return of false indicates that the user has 1416 * rights to access additional instructions such as wbinvd on x86. 1417 */ 1418 bool vfio_file_enforced_coherent(struct file *file) 1419 { 1420 struct vfio_device *device; 1421 struct vfio_group *group; 1422 1423 group = vfio_group_from_file(file); 1424 if (group) 1425 return vfio_group_enforced_coherent(group); 1426 1427 device = vfio_device_from_file(file); 1428 if (device) 1429 return device_iommu_capable(device->dev, 1430 IOMMU_CAP_ENFORCE_CACHE_COHERENCY); 1431 1432 return true; 1433 } 1434 EXPORT_SYMBOL_GPL(vfio_file_enforced_coherent); 1435 1436 static void vfio_device_file_set_kvm(struct file *file, struct kvm *kvm) 1437 { 1438 struct vfio_device_file *df = file->private_data; 1439 1440 /* 1441 * The kvm is first recorded in the vfio_device_file, and will 1442 * be propagated to vfio_device::kvm when the file is bound to 1443 * iommufd successfully in the vfio device cdev path. 1444 */ 1445 spin_lock(&df->kvm_ref_lock); 1446 df->kvm = kvm; 1447 spin_unlock(&df->kvm_ref_lock); 1448 } 1449 1450 /** 1451 * vfio_file_set_kvm - Link a kvm with VFIO drivers 1452 * @file: VFIO group file or VFIO device file 1453 * @kvm: KVM to link 1454 * 1455 * When a VFIO device is first opened the KVM will be available in 1456 * device->kvm if one was associated with the file. 1457 */ 1458 void vfio_file_set_kvm(struct file *file, struct kvm *kvm) 1459 { 1460 struct vfio_group *group; 1461 1462 group = vfio_group_from_file(file); 1463 if (group) 1464 vfio_group_set_kvm(group, kvm); 1465 1466 if (vfio_device_from_file(file)) 1467 vfio_device_file_set_kvm(file, kvm); 1468 } 1469 EXPORT_SYMBOL_GPL(vfio_file_set_kvm); 1470 1471 /* 1472 * Sub-module support 1473 */ 1474 /* 1475 * Helper for managing a buffer of info chain capabilities, allocate or 1476 * reallocate a buffer with additional @size, filling in @id and @version 1477 * of the capability. A pointer to the new capability is returned. 1478 * 1479 * NB. The chain is based at the head of the buffer, so new entries are 1480 * added to the tail, vfio_info_cap_shift() should be called to fixup the 1481 * next offsets prior to copying to the user buffer. 1482 */ 1483 struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps, 1484 size_t size, u16 id, u16 version) 1485 { 1486 void *buf; 1487 struct vfio_info_cap_header *header, *tmp; 1488 1489 /* Ensure that the next capability struct will be aligned */ 1490 size = ALIGN(size, sizeof(u64)); 1491 1492 buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL); 1493 if (!buf) { 1494 kfree(caps->buf); 1495 caps->buf = NULL; 1496 caps->size = 0; 1497 return ERR_PTR(-ENOMEM); 1498 } 1499 1500 caps->buf = buf; 1501 header = buf + caps->size; 1502 1503 /* Eventually copied to user buffer, zero */ 1504 memset(header, 0, size); 1505 1506 header->id = id; 1507 header->version = version; 1508 1509 /* Add to the end of the capability chain */ 1510 for (tmp = buf; tmp->next; tmp = buf + tmp->next) 1511 ; /* nothing */ 1512 1513 tmp->next = caps->size; 1514 caps->size += size; 1515 1516 return header; 1517 } 1518 EXPORT_SYMBOL_GPL(vfio_info_cap_add); 1519 1520 void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset) 1521 { 1522 struct vfio_info_cap_header *tmp; 1523 void *buf = (void *)caps->buf; 1524 1525 /* Capability structs should start with proper alignment */ 1526 WARN_ON(!IS_ALIGNED(offset, sizeof(u64))); 1527 1528 for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset) 1529 tmp->next += offset; 1530 } 1531 EXPORT_SYMBOL(vfio_info_cap_shift); 1532 1533 int vfio_info_add_capability(struct vfio_info_cap *caps, 1534 struct vfio_info_cap_header *cap, size_t size) 1535 { 1536 struct vfio_info_cap_header *header; 1537 1538 header = vfio_info_cap_add(caps, size, cap->id, cap->version); 1539 if (IS_ERR(header)) 1540 return PTR_ERR(header); 1541 1542 memcpy(header + 1, cap + 1, size - sizeof(*header)); 1543 1544 return 0; 1545 } 1546 EXPORT_SYMBOL(vfio_info_add_capability); 1547 1548 int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs, 1549 int max_irq_type, size_t *data_size) 1550 { 1551 unsigned long minsz; 1552 size_t size; 1553 1554 minsz = offsetofend(struct vfio_irq_set, count); 1555 1556 if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) || 1557 (hdr->count >= (U32_MAX - hdr->start)) || 1558 (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | 1559 VFIO_IRQ_SET_ACTION_TYPE_MASK))) 1560 return -EINVAL; 1561 1562 if (data_size) 1563 *data_size = 0; 1564 1565 if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs) 1566 return -EINVAL; 1567 1568 switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { 1569 case VFIO_IRQ_SET_DATA_NONE: 1570 size = 0; 1571 break; 1572 case VFIO_IRQ_SET_DATA_BOOL: 1573 size = sizeof(uint8_t); 1574 break; 1575 case VFIO_IRQ_SET_DATA_EVENTFD: 1576 size = sizeof(int32_t); 1577 break; 1578 default: 1579 return -EINVAL; 1580 } 1581 1582 if (size) { 1583 if (hdr->argsz - minsz < hdr->count * size) 1584 return -EINVAL; 1585 1586 if (!data_size) 1587 return -EINVAL; 1588 1589 *data_size = hdr->count * size; 1590 } 1591 1592 return 0; 1593 } 1594 EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare); 1595 1596 /* 1597 * Pin contiguous user pages and return their associated host pages for local 1598 * domain only. 1599 * @device [in] : device 1600 * @iova [in] : starting IOVA of user pages to be pinned. 1601 * @npage [in] : count of pages to be pinned. This count should not 1602 * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. 1603 * @prot [in] : protection flags 1604 * @pages[out] : array of host pages 1605 * Return error or number of pages pinned. 1606 * 1607 * A driver may only call this function if the vfio_device was created 1608 * by vfio_register_emulated_iommu_dev() due to vfio_device_container_pin_pages(). 1609 */ 1610 int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova, 1611 int npage, int prot, struct page **pages) 1612 { 1613 /* group->container cannot change while a vfio device is open */ 1614 if (!pages || !npage || WARN_ON(!vfio_assert_device_open(device))) 1615 return -EINVAL; 1616 if (!device->ops->dma_unmap) 1617 return -EINVAL; 1618 if (vfio_device_has_container(device)) 1619 return vfio_device_container_pin_pages(device, iova, 1620 npage, prot, pages); 1621 if (device->iommufd_access) { 1622 int ret; 1623 1624 if (iova > ULONG_MAX) 1625 return -EINVAL; 1626 /* 1627 * VFIO ignores the sub page offset, npages is from the start of 1628 * a PAGE_SIZE chunk of IOVA. The caller is expected to recover 1629 * the sub page offset by doing: 1630 * pages[0] + (iova % PAGE_SIZE) 1631 */ 1632 ret = iommufd_access_pin_pages( 1633 device->iommufd_access, ALIGN_DOWN(iova, PAGE_SIZE), 1634 npage * PAGE_SIZE, pages, 1635 (prot & IOMMU_WRITE) ? IOMMUFD_ACCESS_RW_WRITE : 0); 1636 if (ret) 1637 return ret; 1638 return npage; 1639 } 1640 return -EINVAL; 1641 } 1642 EXPORT_SYMBOL(vfio_pin_pages); 1643 1644 /* 1645 * Unpin contiguous host pages for local domain only. 1646 * @device [in] : device 1647 * @iova [in] : starting address of user pages to be unpinned. 1648 * @npage [in] : count of pages to be unpinned. This count should not 1649 * be greater than VFIO_PIN_PAGES_MAX_ENTRIES. 1650 */ 1651 void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage) 1652 { 1653 if (WARN_ON(!vfio_assert_device_open(device))) 1654 return; 1655 if (WARN_ON(!device->ops->dma_unmap)) 1656 return; 1657 1658 if (vfio_device_has_container(device)) { 1659 vfio_device_container_unpin_pages(device, iova, npage); 1660 return; 1661 } 1662 if (device->iommufd_access) { 1663 if (WARN_ON(iova > ULONG_MAX)) 1664 return; 1665 iommufd_access_unpin_pages(device->iommufd_access, 1666 ALIGN_DOWN(iova, PAGE_SIZE), 1667 npage * PAGE_SIZE); 1668 return; 1669 } 1670 } 1671 EXPORT_SYMBOL(vfio_unpin_pages); 1672 1673 /* 1674 * This interface allows the CPUs to perform some sort of virtual DMA on 1675 * behalf of the device. 1676 * 1677 * CPUs read/write from/into a range of IOVAs pointing to user space memory 1678 * into/from a kernel buffer. 1679 * 1680 * As the read/write of user space memory is conducted via the CPUs and is 1681 * not a real device DMA, it is not necessary to pin the user space memory. 1682 * 1683 * @device [in] : VFIO device 1684 * @iova [in] : base IOVA of a user space buffer 1685 * @data [in] : pointer to kernel buffer 1686 * @len [in] : kernel buffer length 1687 * @write : indicate read or write 1688 * Return error code on failure or 0 on success. 1689 */ 1690 int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data, 1691 size_t len, bool write) 1692 { 1693 if (!data || len <= 0 || !vfio_assert_device_open(device)) 1694 return -EINVAL; 1695 1696 if (vfio_device_has_container(device)) 1697 return vfio_device_container_dma_rw(device, iova, 1698 data, len, write); 1699 1700 if (device->iommufd_access) { 1701 unsigned int flags = 0; 1702 1703 if (iova > ULONG_MAX) 1704 return -EINVAL; 1705 1706 /* VFIO historically tries to auto-detect a kthread */ 1707 if (!current->mm) 1708 flags |= IOMMUFD_ACCESS_RW_KTHREAD; 1709 if (write) 1710 flags |= IOMMUFD_ACCESS_RW_WRITE; 1711 return iommufd_access_rw(device->iommufd_access, iova, data, 1712 len, flags); 1713 } 1714 return -EINVAL; 1715 } 1716 EXPORT_SYMBOL(vfio_dma_rw); 1717 1718 /* 1719 * Module/class support 1720 */ 1721 static int __init vfio_init(void) 1722 { 1723 int ret; 1724 1725 ida_init(&vfio.device_ida); 1726 1727 ret = vfio_group_init(); 1728 if (ret) 1729 return ret; 1730 1731 ret = vfio_virqfd_init(); 1732 if (ret) 1733 goto err_virqfd; 1734 1735 /* /sys/class/vfio-dev/vfioX */ 1736 vfio.device_class = class_create("vfio-dev"); 1737 if (IS_ERR(vfio.device_class)) { 1738 ret = PTR_ERR(vfio.device_class); 1739 goto err_dev_class; 1740 } 1741 1742 ret = vfio_cdev_init(vfio.device_class); 1743 if (ret) 1744 goto err_alloc_dev_chrdev; 1745 1746 vfio_debugfs_create_root(); 1747 pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); 1748 return 0; 1749 1750 err_alloc_dev_chrdev: 1751 class_destroy(vfio.device_class); 1752 vfio.device_class = NULL; 1753 err_dev_class: 1754 vfio_virqfd_exit(); 1755 err_virqfd: 1756 vfio_group_cleanup(); 1757 return ret; 1758 } 1759 1760 static void __exit vfio_cleanup(void) 1761 { 1762 vfio_debugfs_remove_root(); 1763 ida_destroy(&vfio.device_ida); 1764 vfio_cdev_cleanup(); 1765 class_destroy(vfio.device_class); 1766 vfio.device_class = NULL; 1767 vfio_virqfd_exit(); 1768 vfio_group_cleanup(); 1769 xa_destroy(&vfio_device_set_xa); 1770 } 1771 1772 module_init(vfio_init); 1773 module_exit(vfio_cleanup); 1774 1775 MODULE_IMPORT_NS("IOMMUFD"); 1776 MODULE_VERSION(DRIVER_VERSION); 1777 MODULE_LICENSE("GPL v2"); 1778 MODULE_AUTHOR(DRIVER_AUTHOR); 1779 MODULE_DESCRIPTION(DRIVER_DESC); 1780 MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce"); 1781