1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2009 Red Hat, Inc. 3 * Copyright (C) 2006 Rusty Russell IBM Corporation 4 * 5 * Author: Michael S. Tsirkin <mst@redhat.com> 6 * 7 * Inspiration, some code, and most witty comments come from 8 * Documentation/virtual/lguest/lguest.c, by Rusty Russell 9 * 10 * Generic code for virtio server in host kernel. 11 */ 12 13 #include <linux/eventfd.h> 14 #include <linux/vhost.h> 15 #include <linux/uio.h> 16 #include <linux/mm.h> 17 #include <linux/miscdevice.h> 18 #include <linux/mutex.h> 19 #include <linux/poll.h> 20 #include <linux/file.h> 21 #include <linux/highmem.h> 22 #include <linux/slab.h> 23 #include <linux/vmalloc.h> 24 #include <linux/kthread.h> 25 #include <linux/cgroup.h> 26 #include <linux/module.h> 27 #include <linux/sort.h> 28 #include <linux/sched/mm.h> 29 #include <linux/sched/signal.h> 30 #include <linux/sched/vhost_task.h> 31 #include <linux/interval_tree_generic.h> 32 #include <linux/nospec.h> 33 #include <linux/kcov.h> 34 35 #include "vhost.h" 36 37 static ushort max_mem_regions = 64; 38 module_param(max_mem_regions, ushort, 0444); 39 MODULE_PARM_DESC(max_mem_regions, 40 "Maximum number of memory regions in memory map. (default: 64)"); 41 static int max_iotlb_entries = 2048; 42 module_param(max_iotlb_entries, int, 0444); 43 MODULE_PARM_DESC(max_iotlb_entries, 44 "Maximum number of iotlb entries. (default: 2048)"); 45 static bool fork_from_owner_default = VHOST_FORK_OWNER_TASK; 46 47 #ifdef CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL 48 module_param(fork_from_owner_default, bool, 0444); 49 MODULE_PARM_DESC(fork_from_owner_default, 50 "Set task mode as the default(default: Y)"); 51 #endif 52 53 enum { 54 VHOST_MEMORY_F_LOG = 0x1, 55 }; 56 57 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) 58 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) 59 60 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY 61 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) 62 { 63 vq->user_be = !virtio_legacy_is_little_endian(); 64 } 65 66 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq) 67 { 68 vq->user_be = true; 69 } 70 71 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq) 72 { 73 vq->user_be = false; 74 } 75 76 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) 77 { 78 struct vhost_vring_state s; 79 80 if (vq->private_data) 81 return -EBUSY; 82 83 if (copy_from_user(&s, argp, sizeof(s))) 84 return -EFAULT; 85 86 if (s.num != VHOST_VRING_LITTLE_ENDIAN && 87 s.num != VHOST_VRING_BIG_ENDIAN) 88 return -EINVAL; 89 90 if (s.num == VHOST_VRING_BIG_ENDIAN) 91 vhost_enable_cross_endian_big(vq); 92 else 93 vhost_enable_cross_endian_little(vq); 94 95 return 0; 96 } 97 98 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, 99 int __user *argp) 100 { 101 struct vhost_vring_state s = { 102 .index = idx, 103 .num = vq->user_be 104 }; 105 106 if (copy_to_user(argp, &s, sizeof(s))) 107 return -EFAULT; 108 109 return 0; 110 } 111 112 static void vhost_init_is_le(struct vhost_virtqueue *vq) 113 { 114 /* Note for legacy virtio: user_be is initialized at reset time 115 * according to the host endianness. If userspace does not set an 116 * explicit endianness, the default behavior is native endian, as 117 * expected by legacy virtio. 118 */ 119 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be; 120 } 121 #else 122 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) 123 { 124 } 125 126 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) 127 { 128 return -ENOIOCTLCMD; 129 } 130 131 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, 132 int __user *argp) 133 { 134 return -ENOIOCTLCMD; 135 } 136 137 static void vhost_init_is_le(struct vhost_virtqueue *vq) 138 { 139 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) 140 || virtio_legacy_is_little_endian(); 141 } 142 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */ 143 144 static void vhost_reset_is_le(struct vhost_virtqueue *vq) 145 { 146 vhost_init_is_le(vq); 147 } 148 149 struct vhost_flush_struct { 150 struct vhost_work work; 151 struct completion wait_event; 152 }; 153 154 static void vhost_flush_work(struct vhost_work *work) 155 { 156 struct vhost_flush_struct *s; 157 158 s = container_of(work, struct vhost_flush_struct, work); 159 complete(&s->wait_event); 160 } 161 162 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, 163 poll_table *pt) 164 { 165 struct vhost_poll *poll; 166 167 poll = container_of(pt, struct vhost_poll, table); 168 poll->wqh = wqh; 169 add_wait_queue(wqh, &poll->wait); 170 } 171 172 static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, 173 void *key) 174 { 175 struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait); 176 struct vhost_work *work = &poll->work; 177 178 if (!(key_to_poll(key) & poll->mask)) 179 return 0; 180 181 if (!poll->dev->use_worker) 182 work->fn(work); 183 else 184 vhost_poll_queue(poll); 185 186 return 0; 187 } 188 189 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) 190 { 191 clear_bit(VHOST_WORK_QUEUED, &work->flags); 192 work->fn = fn; 193 } 194 EXPORT_SYMBOL_GPL(vhost_work_init); 195 196 /* Init poll structure */ 197 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, 198 __poll_t mask, struct vhost_dev *dev, 199 struct vhost_virtqueue *vq) 200 { 201 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); 202 init_poll_funcptr(&poll->table, vhost_poll_func); 203 poll->mask = mask; 204 poll->dev = dev; 205 poll->wqh = NULL; 206 poll->vq = vq; 207 208 vhost_work_init(&poll->work, fn); 209 } 210 EXPORT_SYMBOL_GPL(vhost_poll_init); 211 212 /* Start polling a file. We add ourselves to file's wait queue. The caller must 213 * keep a reference to a file until after vhost_poll_stop is called. */ 214 int vhost_poll_start(struct vhost_poll *poll, struct file *file) 215 { 216 __poll_t mask; 217 218 if (poll->wqh) 219 return 0; 220 221 mask = vfs_poll(file, &poll->table); 222 if (mask) 223 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask)); 224 if (mask & EPOLLERR) { 225 vhost_poll_stop(poll); 226 return -EINVAL; 227 } 228 229 return 0; 230 } 231 EXPORT_SYMBOL_GPL(vhost_poll_start); 232 233 /* Stop polling a file. After this function returns, it becomes safe to drop the 234 * file reference. You must also flush afterwards. */ 235 void vhost_poll_stop(struct vhost_poll *poll) 236 { 237 if (poll->wqh) { 238 remove_wait_queue(poll->wqh, &poll->wait); 239 poll->wqh = NULL; 240 } 241 } 242 EXPORT_SYMBOL_GPL(vhost_poll_stop); 243 244 static void vhost_worker_queue(struct vhost_worker *worker, 245 struct vhost_work *work) 246 { 247 if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) { 248 /* We can only add the work to the list after we're 249 * sure it was not in the list. 250 * test_and_set_bit() implies a memory barrier. 251 */ 252 llist_add(&work->node, &worker->work_list); 253 worker->ops->wakeup(worker); 254 } 255 } 256 257 bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work) 258 { 259 struct vhost_worker *worker; 260 bool queued = false; 261 262 rcu_read_lock(); 263 worker = rcu_dereference(vq->worker); 264 if (worker) { 265 queued = true; 266 vhost_worker_queue(worker, work); 267 } 268 rcu_read_unlock(); 269 270 return queued; 271 } 272 EXPORT_SYMBOL_GPL(vhost_vq_work_queue); 273 274 /** 275 * __vhost_worker_flush - flush a worker 276 * @worker: worker to flush 277 * 278 * The worker's flush_mutex must be held. 279 */ 280 static void __vhost_worker_flush(struct vhost_worker *worker) 281 { 282 struct vhost_flush_struct flush; 283 284 if (!worker->attachment_cnt || worker->killed) 285 return; 286 287 init_completion(&flush.wait_event); 288 vhost_work_init(&flush.work, vhost_flush_work); 289 290 vhost_worker_queue(worker, &flush.work); 291 /* 292 * Drop mutex in case our worker is killed and it needs to take the 293 * mutex to force cleanup. 294 */ 295 mutex_unlock(&worker->mutex); 296 wait_for_completion(&flush.wait_event); 297 mutex_lock(&worker->mutex); 298 } 299 300 static void vhost_worker_flush(struct vhost_worker *worker) 301 { 302 mutex_lock(&worker->mutex); 303 __vhost_worker_flush(worker); 304 mutex_unlock(&worker->mutex); 305 } 306 307 void vhost_dev_flush(struct vhost_dev *dev) 308 { 309 struct vhost_worker *worker; 310 unsigned long i; 311 312 xa_for_each(&dev->worker_xa, i, worker) 313 vhost_worker_flush(worker); 314 } 315 EXPORT_SYMBOL_GPL(vhost_dev_flush); 316 317 /* A lockless hint for busy polling code to exit the loop */ 318 bool vhost_vq_has_work(struct vhost_virtqueue *vq) 319 { 320 struct vhost_worker *worker; 321 bool has_work = false; 322 323 rcu_read_lock(); 324 worker = rcu_dereference(vq->worker); 325 if (worker && !llist_empty(&worker->work_list)) 326 has_work = true; 327 rcu_read_unlock(); 328 329 return has_work; 330 } 331 EXPORT_SYMBOL_GPL(vhost_vq_has_work); 332 333 void vhost_poll_queue(struct vhost_poll *poll) 334 { 335 vhost_vq_work_queue(poll->vq, &poll->work); 336 } 337 EXPORT_SYMBOL_GPL(vhost_poll_queue); 338 339 static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq) 340 { 341 int j; 342 343 for (j = 0; j < VHOST_NUM_ADDRS; j++) 344 vq->meta_iotlb[j] = NULL; 345 } 346 347 static void vhost_vq_meta_reset(struct vhost_dev *d) 348 { 349 int i; 350 351 for (i = 0; i < d->nvqs; ++i) 352 __vhost_vq_meta_reset(d->vqs[i]); 353 } 354 355 static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx) 356 { 357 call_ctx->ctx = NULL; 358 memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer)); 359 } 360 361 bool vhost_vq_is_setup(struct vhost_virtqueue *vq) 362 { 363 return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq); 364 } 365 EXPORT_SYMBOL_GPL(vhost_vq_is_setup); 366 367 static void vhost_vq_reset(struct vhost_dev *dev, 368 struct vhost_virtqueue *vq) 369 { 370 vq->num = 1; 371 vq->desc = NULL; 372 vq->avail = NULL; 373 vq->used = NULL; 374 vq->last_avail_idx = 0; 375 vq->next_avail_head = 0; 376 vq->avail_idx = 0; 377 vq->last_used_idx = 0; 378 vq->signalled_used = 0; 379 vq->signalled_used_valid = false; 380 vq->used_flags = 0; 381 vq->log_used = false; 382 vq->log_addr = -1ull; 383 vq->private_data = NULL; 384 virtio_features_zero(vq->acked_features_array); 385 vq->acked_backend_features = 0; 386 vq->log_base = NULL; 387 vq->error_ctx = NULL; 388 vq->kick = NULL; 389 vq->log_ctx = NULL; 390 vhost_disable_cross_endian(vq); 391 vhost_reset_is_le(vq); 392 vq->busyloop_timeout = 0; 393 vq->umem = NULL; 394 vq->iotlb = NULL; 395 rcu_assign_pointer(vq->worker, NULL); 396 vhost_vring_call_reset(&vq->call_ctx); 397 __vhost_vq_meta_reset(vq); 398 } 399 400 static int vhost_run_work_kthread_list(void *data) 401 { 402 struct vhost_worker *worker = data; 403 struct vhost_work *work, *work_next; 404 struct vhost_dev *dev = worker->dev; 405 struct llist_node *node; 406 407 kthread_use_mm(dev->mm); 408 409 for (;;) { 410 /* mb paired w/ kthread_stop */ 411 set_current_state(TASK_INTERRUPTIBLE); 412 413 if (kthread_should_stop()) { 414 __set_current_state(TASK_RUNNING); 415 break; 416 } 417 node = llist_del_all(&worker->work_list); 418 if (!node) 419 schedule(); 420 421 node = llist_reverse_order(node); 422 /* make sure flag is seen after deletion */ 423 smp_wmb(); 424 llist_for_each_entry_safe(work, work_next, node, node) { 425 clear_bit(VHOST_WORK_QUEUED, &work->flags); 426 __set_current_state(TASK_RUNNING); 427 kcov_remote_start_common(worker->kcov_handle); 428 work->fn(work); 429 kcov_remote_stop(); 430 cond_resched(); 431 } 432 } 433 kthread_unuse_mm(dev->mm); 434 435 return 0; 436 } 437 438 static bool vhost_run_work_list(void *data) 439 { 440 struct vhost_worker *worker = data; 441 struct vhost_work *work, *work_next; 442 struct llist_node *node; 443 444 node = llist_del_all(&worker->work_list); 445 if (node) { 446 __set_current_state(TASK_RUNNING); 447 448 node = llist_reverse_order(node); 449 /* make sure flag is seen after deletion */ 450 smp_wmb(); 451 llist_for_each_entry_safe(work, work_next, node, node) { 452 clear_bit(VHOST_WORK_QUEUED, &work->flags); 453 kcov_remote_start_common(worker->kcov_handle); 454 work->fn(work); 455 kcov_remote_stop(); 456 cond_resched(); 457 } 458 } 459 460 return !!node; 461 } 462 463 static void vhost_worker_killed(void *data) 464 { 465 struct vhost_worker *worker = data; 466 struct vhost_dev *dev = worker->dev; 467 struct vhost_virtqueue *vq; 468 int i, attach_cnt = 0; 469 470 mutex_lock(&worker->mutex); 471 worker->killed = true; 472 473 for (i = 0; i < dev->nvqs; i++) { 474 vq = dev->vqs[i]; 475 476 mutex_lock(&vq->mutex); 477 if (worker == 478 rcu_dereference_check(vq->worker, 479 lockdep_is_held(&vq->mutex))) { 480 rcu_assign_pointer(vq->worker, NULL); 481 attach_cnt++; 482 } 483 mutex_unlock(&vq->mutex); 484 } 485 486 worker->attachment_cnt -= attach_cnt; 487 if (attach_cnt) 488 synchronize_rcu(); 489 /* 490 * Finish vhost_worker_flush calls and any other works that snuck in 491 * before the synchronize_rcu. 492 */ 493 vhost_run_work_list(worker); 494 mutex_unlock(&worker->mutex); 495 } 496 497 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq) 498 { 499 kfree(vq->indirect); 500 vq->indirect = NULL; 501 kfree(vq->log); 502 vq->log = NULL; 503 kfree(vq->heads); 504 vq->heads = NULL; 505 kfree(vq->nheads); 506 vq->nheads = NULL; 507 } 508 509 /* Helper to allocate iovec buffers for all vqs. */ 510 static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) 511 { 512 struct vhost_virtqueue *vq; 513 int i; 514 515 for (i = 0; i < dev->nvqs; ++i) { 516 vq = dev->vqs[i]; 517 vq->indirect = kmalloc_array(UIO_MAXIOV, 518 sizeof(*vq->indirect), 519 GFP_KERNEL); 520 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log), 521 GFP_KERNEL); 522 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads), 523 GFP_KERNEL); 524 vq->nheads = kmalloc_array(dev->iov_limit, sizeof(*vq->nheads), 525 GFP_KERNEL); 526 if (!vq->indirect || !vq->log || !vq->heads || !vq->nheads) 527 goto err_nomem; 528 } 529 return 0; 530 531 err_nomem: 532 for (; i >= 0; --i) 533 vhost_vq_free_iovecs(dev->vqs[i]); 534 return -ENOMEM; 535 } 536 537 static void vhost_dev_free_iovecs(struct vhost_dev *dev) 538 { 539 int i; 540 541 for (i = 0; i < dev->nvqs; ++i) 542 vhost_vq_free_iovecs(dev->vqs[i]); 543 } 544 545 bool vhost_exceeds_weight(struct vhost_virtqueue *vq, 546 int pkts, int total_len) 547 { 548 struct vhost_dev *dev = vq->dev; 549 550 if ((dev->byte_weight && total_len >= dev->byte_weight) || 551 pkts >= dev->weight) { 552 vhost_poll_queue(&vq->poll); 553 return true; 554 } 555 556 return false; 557 } 558 EXPORT_SYMBOL_GPL(vhost_exceeds_weight); 559 560 static size_t vhost_get_avail_size(struct vhost_virtqueue *vq, 561 unsigned int num) 562 { 563 size_t event __maybe_unused = 564 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 565 566 return size_add(struct_size(vq->avail, ring, num), event); 567 } 568 569 static size_t vhost_get_used_size(struct vhost_virtqueue *vq, 570 unsigned int num) 571 { 572 size_t event __maybe_unused = 573 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 574 575 return size_add(struct_size(vq->used, ring, num), event); 576 } 577 578 static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, 579 unsigned int num) 580 { 581 return sizeof(*vq->desc) * num; 582 } 583 584 void vhost_dev_init(struct vhost_dev *dev, 585 struct vhost_virtqueue **vqs, int nvqs, 586 int iov_limit, int weight, int byte_weight, 587 bool use_worker, 588 int (*msg_handler)(struct vhost_dev *dev, u32 asid, 589 struct vhost_iotlb_msg *msg)) 590 { 591 struct vhost_virtqueue *vq; 592 int i; 593 594 dev->vqs = vqs; 595 dev->nvqs = nvqs; 596 mutex_init(&dev->mutex); 597 dev->log_ctx = NULL; 598 dev->umem = NULL; 599 dev->iotlb = NULL; 600 dev->mm = NULL; 601 dev->iov_limit = iov_limit; 602 dev->weight = weight; 603 dev->byte_weight = byte_weight; 604 dev->use_worker = use_worker; 605 dev->msg_handler = msg_handler; 606 dev->fork_owner = fork_from_owner_default; 607 init_waitqueue_head(&dev->wait); 608 INIT_LIST_HEAD(&dev->read_list); 609 INIT_LIST_HEAD(&dev->pending_list); 610 spin_lock_init(&dev->iotlb_lock); 611 xa_init_flags(&dev->worker_xa, XA_FLAGS_ALLOC); 612 613 for (i = 0; i < dev->nvqs; ++i) { 614 vq = dev->vqs[i]; 615 vq->log = NULL; 616 vq->indirect = NULL; 617 vq->heads = NULL; 618 vq->dev = dev; 619 mutex_init(&vq->mutex); 620 vhost_vq_reset(dev, vq); 621 if (vq->handle_kick) 622 vhost_poll_init(&vq->poll, vq->handle_kick, 623 EPOLLIN, dev, vq); 624 } 625 } 626 EXPORT_SYMBOL_GPL(vhost_dev_init); 627 628 /* Caller should have device mutex */ 629 long vhost_dev_check_owner(struct vhost_dev *dev) 630 { 631 /* Are you the owner? If not, I don't think you mean to do that */ 632 return dev->mm == current->mm ? 0 : -EPERM; 633 } 634 EXPORT_SYMBOL_GPL(vhost_dev_check_owner); 635 636 struct vhost_attach_cgroups_struct { 637 struct vhost_work work; 638 struct task_struct *owner; 639 int ret; 640 }; 641 642 static void vhost_attach_cgroups_work(struct vhost_work *work) 643 { 644 struct vhost_attach_cgroups_struct *s; 645 646 s = container_of(work, struct vhost_attach_cgroups_struct, work); 647 s->ret = cgroup_attach_task_all(s->owner, current); 648 } 649 650 static int vhost_attach_task_to_cgroups(struct vhost_worker *worker) 651 { 652 struct vhost_attach_cgroups_struct attach; 653 int saved_cnt; 654 655 attach.owner = current; 656 657 vhost_work_init(&attach.work, vhost_attach_cgroups_work); 658 vhost_worker_queue(worker, &attach.work); 659 660 mutex_lock(&worker->mutex); 661 662 /* 663 * Bypass attachment_cnt check in __vhost_worker_flush: 664 * Temporarily change it to INT_MAX to bypass the check 665 */ 666 saved_cnt = worker->attachment_cnt; 667 worker->attachment_cnt = INT_MAX; 668 __vhost_worker_flush(worker); 669 worker->attachment_cnt = saved_cnt; 670 671 mutex_unlock(&worker->mutex); 672 673 return attach.ret; 674 } 675 676 /* Caller should have device mutex */ 677 bool vhost_dev_has_owner(struct vhost_dev *dev) 678 { 679 return dev->mm; 680 } 681 EXPORT_SYMBOL_GPL(vhost_dev_has_owner); 682 683 static void vhost_attach_mm(struct vhost_dev *dev) 684 { 685 /* No owner, become one */ 686 if (dev->use_worker) { 687 dev->mm = get_task_mm(current); 688 } else { 689 /* vDPA device does not use worker thread, so there's 690 * no need to hold the address space for mm. This helps 691 * to avoid deadlock in the case of mmap() which may 692 * hold the refcnt of the file and depends on release 693 * method to remove vma. 694 */ 695 dev->mm = current->mm; 696 mmgrab(dev->mm); 697 } 698 } 699 700 static void vhost_detach_mm(struct vhost_dev *dev) 701 { 702 if (!dev->mm) 703 return; 704 705 if (dev->use_worker) 706 mmput(dev->mm); 707 else 708 mmdrop(dev->mm); 709 710 dev->mm = NULL; 711 } 712 713 static void vhost_worker_destroy(struct vhost_dev *dev, 714 struct vhost_worker *worker) 715 { 716 if (!worker) 717 return; 718 719 WARN_ON(!llist_empty(&worker->work_list)); 720 xa_erase(&dev->worker_xa, worker->id); 721 worker->ops->stop(worker); 722 kfree(worker); 723 } 724 725 static void vhost_workers_free(struct vhost_dev *dev) 726 { 727 struct vhost_worker *worker; 728 unsigned long i; 729 730 if (!dev->use_worker) 731 return; 732 733 for (i = 0; i < dev->nvqs; i++) 734 rcu_assign_pointer(dev->vqs[i]->worker, NULL); 735 /* 736 * Free the default worker we created and cleanup workers userspace 737 * created but couldn't clean up (it forgot or crashed). 738 */ 739 xa_for_each(&dev->worker_xa, i, worker) 740 vhost_worker_destroy(dev, worker); 741 xa_destroy(&dev->worker_xa); 742 } 743 744 static void vhost_task_wakeup(struct vhost_worker *worker) 745 { 746 return vhost_task_wake(worker->vtsk); 747 } 748 749 static void vhost_kthread_wakeup(struct vhost_worker *worker) 750 { 751 wake_up_process(worker->kthread_task); 752 } 753 754 static void vhost_task_do_stop(struct vhost_worker *worker) 755 { 756 return vhost_task_stop(worker->vtsk); 757 } 758 759 static void vhost_kthread_do_stop(struct vhost_worker *worker) 760 { 761 kthread_stop(worker->kthread_task); 762 } 763 764 static int vhost_task_worker_create(struct vhost_worker *worker, 765 struct vhost_dev *dev, const char *name) 766 { 767 struct vhost_task *vtsk; 768 u32 id; 769 int ret; 770 771 vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed, 772 worker, name); 773 if (IS_ERR(vtsk)) 774 return PTR_ERR(vtsk); 775 776 worker->vtsk = vtsk; 777 vhost_task_start(vtsk); 778 ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL); 779 if (ret < 0) { 780 vhost_task_do_stop(worker); 781 return ret; 782 } 783 worker->id = id; 784 return 0; 785 } 786 787 static int vhost_kthread_worker_create(struct vhost_worker *worker, 788 struct vhost_dev *dev, const char *name) 789 { 790 struct task_struct *task; 791 u32 id; 792 int ret; 793 794 task = kthread_create(vhost_run_work_kthread_list, worker, "%s", name); 795 if (IS_ERR(task)) 796 return PTR_ERR(task); 797 798 worker->kthread_task = task; 799 wake_up_process(task); 800 ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL); 801 if (ret < 0) 802 goto stop_worker; 803 804 ret = vhost_attach_task_to_cgroups(worker); 805 if (ret) 806 goto stop_worker; 807 808 worker->id = id; 809 return 0; 810 811 stop_worker: 812 vhost_kthread_do_stop(worker); 813 return ret; 814 } 815 816 static const struct vhost_worker_ops kthread_ops = { 817 .create = vhost_kthread_worker_create, 818 .stop = vhost_kthread_do_stop, 819 .wakeup = vhost_kthread_wakeup, 820 }; 821 822 static const struct vhost_worker_ops vhost_task_ops = { 823 .create = vhost_task_worker_create, 824 .stop = vhost_task_do_stop, 825 .wakeup = vhost_task_wakeup, 826 }; 827 828 static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev) 829 { 830 struct vhost_worker *worker; 831 char name[TASK_COMM_LEN]; 832 int ret; 833 const struct vhost_worker_ops *ops = dev->fork_owner ? &vhost_task_ops : 834 &kthread_ops; 835 836 worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT); 837 if (!worker) 838 return NULL; 839 840 worker->dev = dev; 841 worker->ops = ops; 842 snprintf(name, sizeof(name), "vhost-%d", current->pid); 843 844 mutex_init(&worker->mutex); 845 init_llist_head(&worker->work_list); 846 worker->kcov_handle = kcov_common_handle(); 847 ret = ops->create(worker, dev, name); 848 if (ret < 0) 849 goto free_worker; 850 851 return worker; 852 853 free_worker: 854 kfree(worker); 855 return NULL; 856 } 857 858 /* Caller must have device mutex */ 859 static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq, 860 struct vhost_worker *worker) 861 { 862 struct vhost_worker *old_worker; 863 864 mutex_lock(&worker->mutex); 865 if (worker->killed) { 866 mutex_unlock(&worker->mutex); 867 return; 868 } 869 870 mutex_lock(&vq->mutex); 871 872 old_worker = rcu_dereference_check(vq->worker, 873 lockdep_is_held(&vq->mutex)); 874 rcu_assign_pointer(vq->worker, worker); 875 worker->attachment_cnt++; 876 877 if (!old_worker) { 878 mutex_unlock(&vq->mutex); 879 mutex_unlock(&worker->mutex); 880 return; 881 } 882 mutex_unlock(&vq->mutex); 883 mutex_unlock(&worker->mutex); 884 885 /* 886 * Take the worker mutex to make sure we see the work queued from 887 * device wide flushes which doesn't use RCU for execution. 888 */ 889 mutex_lock(&old_worker->mutex); 890 if (old_worker->killed) { 891 mutex_unlock(&old_worker->mutex); 892 return; 893 } 894 895 /* 896 * We don't want to call synchronize_rcu for every vq during setup 897 * because it will slow down VM startup. If we haven't done 898 * VHOST_SET_VRING_KICK and not done the driver specific 899 * SET_ENDPOINT/RUNNING then we can skip the sync since there will 900 * not be any works queued for scsi and net. 901 */ 902 mutex_lock(&vq->mutex); 903 if (!vhost_vq_get_backend(vq) && !vq->kick) { 904 mutex_unlock(&vq->mutex); 905 906 old_worker->attachment_cnt--; 907 mutex_unlock(&old_worker->mutex); 908 /* 909 * vsock can queue anytime after VHOST_VSOCK_SET_GUEST_CID. 910 * Warn if it adds support for multiple workers but forgets to 911 * handle the early queueing case. 912 */ 913 WARN_ON(!old_worker->attachment_cnt && 914 !llist_empty(&old_worker->work_list)); 915 return; 916 } 917 mutex_unlock(&vq->mutex); 918 919 /* Make sure new vq queue/flush/poll calls see the new worker */ 920 synchronize_rcu(); 921 /* Make sure whatever was queued gets run */ 922 __vhost_worker_flush(old_worker); 923 old_worker->attachment_cnt--; 924 mutex_unlock(&old_worker->mutex); 925 } 926 927 /* Caller must have device mutex */ 928 static int vhost_vq_attach_worker(struct vhost_virtqueue *vq, 929 struct vhost_vring_worker *info) 930 { 931 unsigned long index = info->worker_id; 932 struct vhost_dev *dev = vq->dev; 933 struct vhost_worker *worker; 934 935 if (!dev->use_worker) 936 return -EINVAL; 937 938 worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT); 939 if (!worker || worker->id != info->worker_id) 940 return -ENODEV; 941 942 __vhost_vq_attach_worker(vq, worker); 943 return 0; 944 } 945 946 /* Caller must have device mutex */ 947 static int vhost_new_worker(struct vhost_dev *dev, 948 struct vhost_worker_state *info) 949 { 950 struct vhost_worker *worker; 951 952 worker = vhost_worker_create(dev); 953 if (!worker) 954 return -ENOMEM; 955 956 info->worker_id = worker->id; 957 return 0; 958 } 959 960 /* Caller must have device mutex */ 961 static int vhost_free_worker(struct vhost_dev *dev, 962 struct vhost_worker_state *info) 963 { 964 unsigned long index = info->worker_id; 965 struct vhost_worker *worker; 966 967 worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT); 968 if (!worker || worker->id != info->worker_id) 969 return -ENODEV; 970 971 mutex_lock(&worker->mutex); 972 if (worker->attachment_cnt || worker->killed) { 973 mutex_unlock(&worker->mutex); 974 return -EBUSY; 975 } 976 /* 977 * A flush might have raced and snuck in before attachment_cnt was set 978 * to zero. Make sure flushes are flushed from the queue before 979 * freeing. 980 */ 981 __vhost_worker_flush(worker); 982 mutex_unlock(&worker->mutex); 983 984 vhost_worker_destroy(dev, worker); 985 return 0; 986 } 987 988 static int vhost_get_vq_from_user(struct vhost_dev *dev, void __user *argp, 989 struct vhost_virtqueue **vq, u32 *id) 990 { 991 u32 __user *idxp = argp; 992 u32 idx; 993 long r; 994 995 r = get_user(idx, idxp); 996 if (r < 0) 997 return r; 998 999 if (idx >= dev->nvqs) 1000 return -ENOBUFS; 1001 1002 idx = array_index_nospec(idx, dev->nvqs); 1003 1004 *vq = dev->vqs[idx]; 1005 *id = idx; 1006 return 0; 1007 } 1008 1009 /* Caller must have device mutex */ 1010 long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl, 1011 void __user *argp) 1012 { 1013 struct vhost_vring_worker ring_worker; 1014 struct vhost_worker_state state; 1015 struct vhost_worker *worker; 1016 struct vhost_virtqueue *vq; 1017 long ret; 1018 u32 idx; 1019 1020 if (!dev->use_worker) 1021 return -EINVAL; 1022 1023 if (!vhost_dev_has_owner(dev)) 1024 return -EINVAL; 1025 1026 ret = vhost_dev_check_owner(dev); 1027 if (ret) 1028 return ret; 1029 1030 switch (ioctl) { 1031 /* dev worker ioctls */ 1032 case VHOST_NEW_WORKER: 1033 /* 1034 * vhost_tasks will account for worker threads under the parent's 1035 * NPROC value but kthreads do not. To avoid userspace overflowing 1036 * the system with worker threads fork_owner must be true. 1037 */ 1038 if (!dev->fork_owner) 1039 return -EFAULT; 1040 1041 ret = vhost_new_worker(dev, &state); 1042 if (!ret && copy_to_user(argp, &state, sizeof(state))) 1043 ret = -EFAULT; 1044 return ret; 1045 case VHOST_FREE_WORKER: 1046 if (copy_from_user(&state, argp, sizeof(state))) 1047 return -EFAULT; 1048 return vhost_free_worker(dev, &state); 1049 /* vring worker ioctls */ 1050 case VHOST_ATTACH_VRING_WORKER: 1051 case VHOST_GET_VRING_WORKER: 1052 break; 1053 default: 1054 return -ENOIOCTLCMD; 1055 } 1056 1057 ret = vhost_get_vq_from_user(dev, argp, &vq, &idx); 1058 if (ret) 1059 return ret; 1060 1061 switch (ioctl) { 1062 case VHOST_ATTACH_VRING_WORKER: 1063 if (copy_from_user(&ring_worker, argp, sizeof(ring_worker))) { 1064 ret = -EFAULT; 1065 break; 1066 } 1067 1068 ret = vhost_vq_attach_worker(vq, &ring_worker); 1069 break; 1070 case VHOST_GET_VRING_WORKER: 1071 worker = rcu_dereference_check(vq->worker, 1072 lockdep_is_held(&dev->mutex)); 1073 if (!worker) { 1074 ret = -EINVAL; 1075 break; 1076 } 1077 1078 ring_worker.index = idx; 1079 ring_worker.worker_id = worker->id; 1080 1081 if (copy_to_user(argp, &ring_worker, sizeof(ring_worker))) 1082 ret = -EFAULT; 1083 break; 1084 default: 1085 ret = -ENOIOCTLCMD; 1086 break; 1087 } 1088 1089 return ret; 1090 } 1091 EXPORT_SYMBOL_GPL(vhost_worker_ioctl); 1092 1093 /* Caller should have device mutex */ 1094 long vhost_dev_set_owner(struct vhost_dev *dev) 1095 { 1096 struct vhost_worker *worker; 1097 int err, i; 1098 1099 /* Is there an owner already? */ 1100 if (vhost_dev_has_owner(dev)) { 1101 err = -EBUSY; 1102 goto err_mm; 1103 } 1104 1105 vhost_attach_mm(dev); 1106 1107 err = vhost_dev_alloc_iovecs(dev); 1108 if (err) 1109 goto err_iovecs; 1110 1111 if (dev->use_worker) { 1112 /* 1113 * This should be done last, because vsock can queue work 1114 * before VHOST_SET_OWNER so it simplifies the failure path 1115 * below since we don't have to worry about vsock queueing 1116 * while we free the worker. 1117 */ 1118 worker = vhost_worker_create(dev); 1119 if (!worker) { 1120 err = -ENOMEM; 1121 goto err_worker; 1122 } 1123 1124 for (i = 0; i < dev->nvqs; i++) 1125 __vhost_vq_attach_worker(dev->vqs[i], worker); 1126 } 1127 1128 return 0; 1129 1130 err_worker: 1131 vhost_dev_free_iovecs(dev); 1132 err_iovecs: 1133 vhost_detach_mm(dev); 1134 err_mm: 1135 return err; 1136 } 1137 EXPORT_SYMBOL_GPL(vhost_dev_set_owner); 1138 1139 static struct vhost_iotlb *iotlb_alloc(void) 1140 { 1141 return vhost_iotlb_alloc(max_iotlb_entries, 1142 VHOST_IOTLB_FLAG_RETIRE); 1143 } 1144 1145 struct vhost_iotlb *vhost_dev_reset_owner_prepare(void) 1146 { 1147 return iotlb_alloc(); 1148 } 1149 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare); 1150 1151 /* Caller should have device mutex */ 1152 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem) 1153 { 1154 int i; 1155 1156 vhost_dev_cleanup(dev); 1157 1158 dev->fork_owner = fork_from_owner_default; 1159 dev->umem = umem; 1160 /* We don't need VQ locks below since vhost_dev_cleanup makes sure 1161 * VQs aren't running. 1162 */ 1163 for (i = 0; i < dev->nvqs; ++i) 1164 dev->vqs[i]->umem = umem; 1165 } 1166 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner); 1167 1168 void vhost_dev_stop(struct vhost_dev *dev) 1169 { 1170 int i; 1171 1172 for (i = 0; i < dev->nvqs; ++i) { 1173 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) 1174 vhost_poll_stop(&dev->vqs[i]->poll); 1175 } 1176 1177 vhost_dev_flush(dev); 1178 } 1179 EXPORT_SYMBOL_GPL(vhost_dev_stop); 1180 1181 void vhost_clear_msg(struct vhost_dev *dev) 1182 { 1183 struct vhost_msg_node *node, *n; 1184 1185 spin_lock(&dev->iotlb_lock); 1186 1187 list_for_each_entry_safe(node, n, &dev->read_list, node) { 1188 list_del(&node->node); 1189 kfree(node); 1190 } 1191 1192 list_for_each_entry_safe(node, n, &dev->pending_list, node) { 1193 list_del(&node->node); 1194 kfree(node); 1195 } 1196 1197 spin_unlock(&dev->iotlb_lock); 1198 } 1199 EXPORT_SYMBOL_GPL(vhost_clear_msg); 1200 1201 void vhost_dev_cleanup(struct vhost_dev *dev) 1202 { 1203 int i; 1204 1205 for (i = 0; i < dev->nvqs; ++i) { 1206 if (dev->vqs[i]->error_ctx) 1207 eventfd_ctx_put(dev->vqs[i]->error_ctx); 1208 if (dev->vqs[i]->kick) 1209 fput(dev->vqs[i]->kick); 1210 if (dev->vqs[i]->call_ctx.ctx) 1211 eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx); 1212 vhost_vq_reset(dev, dev->vqs[i]); 1213 } 1214 vhost_dev_free_iovecs(dev); 1215 if (dev->log_ctx) 1216 eventfd_ctx_put(dev->log_ctx); 1217 dev->log_ctx = NULL; 1218 /* No one will access memory at this point */ 1219 vhost_iotlb_free(dev->umem); 1220 dev->umem = NULL; 1221 vhost_iotlb_free(dev->iotlb); 1222 dev->iotlb = NULL; 1223 vhost_clear_msg(dev); 1224 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); 1225 vhost_workers_free(dev); 1226 vhost_detach_mm(dev); 1227 } 1228 EXPORT_SYMBOL_GPL(vhost_dev_cleanup); 1229 1230 static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz) 1231 { 1232 u64 a = addr / VHOST_PAGE_SIZE / 8; 1233 1234 /* Make sure 64 bit math will not overflow. */ 1235 if (a > ULONG_MAX - (unsigned long)log_base || 1236 a + (unsigned long)log_base > ULONG_MAX) 1237 return false; 1238 1239 return access_ok(log_base + a, 1240 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); 1241 } 1242 1243 /* Make sure 64 bit math will not overflow. */ 1244 static bool vhost_overflow(u64 uaddr, u64 size) 1245 { 1246 if (uaddr > ULONG_MAX || size > ULONG_MAX) 1247 return true; 1248 1249 if (!size) 1250 return false; 1251 1252 return uaddr > ULONG_MAX - size + 1; 1253 } 1254 1255 /* Caller should have vq mutex and device mutex. */ 1256 static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem, 1257 int log_all) 1258 { 1259 struct vhost_iotlb_map *map; 1260 1261 if (!umem) 1262 return false; 1263 1264 list_for_each_entry(map, &umem->list, link) { 1265 unsigned long a = map->addr; 1266 1267 if (vhost_overflow(map->addr, map->size)) 1268 return false; 1269 1270 1271 if (!access_ok((void __user *)a, map->size)) 1272 return false; 1273 else if (log_all && !log_access_ok(log_base, 1274 map->start, 1275 map->size)) 1276 return false; 1277 } 1278 return true; 1279 } 1280 1281 static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, 1282 u64 addr, unsigned int size, 1283 int type) 1284 { 1285 const struct vhost_iotlb_map *map = vq->meta_iotlb[type]; 1286 1287 if (!map) 1288 return NULL; 1289 1290 return (void __user *)(uintptr_t)(map->addr + addr - map->start); 1291 } 1292 1293 /* Can we switch to this memory table? */ 1294 /* Caller should have device mutex but not vq mutex */ 1295 static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem, 1296 int log_all) 1297 { 1298 int i; 1299 1300 for (i = 0; i < d->nvqs; ++i) { 1301 bool ok; 1302 bool log; 1303 1304 mutex_lock(&d->vqs[i]->mutex); 1305 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL); 1306 /* If ring is inactive, will check when it's enabled. */ 1307 if (d->vqs[i]->private_data) 1308 ok = vq_memory_access_ok(d->vqs[i]->log_base, 1309 umem, log); 1310 else 1311 ok = true; 1312 mutex_unlock(&d->vqs[i]->mutex); 1313 if (!ok) 1314 return false; 1315 } 1316 return true; 1317 } 1318 1319 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, 1320 struct iovec iov[], int iov_size, int access); 1321 1322 static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to, 1323 const void *from, unsigned size) 1324 { 1325 int ret; 1326 1327 if (!vq->iotlb) 1328 return __copy_to_user(to, from, size); 1329 else { 1330 /* This function should be called after iotlb 1331 * prefetch, which means we're sure that all vq 1332 * could be access through iotlb. So -EAGAIN should 1333 * not happen in this case. 1334 */ 1335 struct iov_iter t; 1336 void __user *uaddr = vhost_vq_meta_fetch(vq, 1337 (u64)(uintptr_t)to, size, 1338 VHOST_ADDR_USED); 1339 1340 if (uaddr) 1341 return __copy_to_user(uaddr, from, size); 1342 1343 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov, 1344 ARRAY_SIZE(vq->iotlb_iov), 1345 VHOST_ACCESS_WO); 1346 if (ret < 0) 1347 goto out; 1348 iov_iter_init(&t, ITER_DEST, vq->iotlb_iov, ret, size); 1349 ret = copy_to_iter(from, size, &t); 1350 if (ret == size) 1351 ret = 0; 1352 } 1353 out: 1354 return ret; 1355 } 1356 1357 static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, 1358 void __user *from, unsigned size) 1359 { 1360 int ret; 1361 1362 if (!vq->iotlb) 1363 return __copy_from_user(to, from, size); 1364 else { 1365 /* This function should be called after iotlb 1366 * prefetch, which means we're sure that vq 1367 * could be access through iotlb. So -EAGAIN should 1368 * not happen in this case. 1369 */ 1370 void __user *uaddr = vhost_vq_meta_fetch(vq, 1371 (u64)(uintptr_t)from, size, 1372 VHOST_ADDR_DESC); 1373 struct iov_iter f; 1374 1375 if (uaddr) 1376 return __copy_from_user(to, uaddr, size); 1377 1378 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov, 1379 ARRAY_SIZE(vq->iotlb_iov), 1380 VHOST_ACCESS_RO); 1381 if (ret < 0) { 1382 vq_err(vq, "IOTLB translation failure: uaddr " 1383 "%p size 0x%llx\n", from, 1384 (unsigned long long) size); 1385 goto out; 1386 } 1387 iov_iter_init(&f, ITER_SOURCE, vq->iotlb_iov, ret, size); 1388 ret = copy_from_iter(to, size, &f); 1389 if (ret == size) 1390 ret = 0; 1391 } 1392 1393 out: 1394 return ret; 1395 } 1396 1397 static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq, 1398 void __user *addr, unsigned int size, 1399 int type) 1400 { 1401 int ret; 1402 1403 ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov, 1404 ARRAY_SIZE(vq->iotlb_iov), 1405 VHOST_ACCESS_RO); 1406 if (ret < 0) { 1407 vq_err(vq, "IOTLB translation failure: uaddr " 1408 "%p size 0x%llx\n", addr, 1409 (unsigned long long) size); 1410 return NULL; 1411 } 1412 1413 if (ret != 1 || vq->iotlb_iov[0].iov_len != size) { 1414 vq_err(vq, "Non atomic userspace memory access: uaddr " 1415 "%p size 0x%llx\n", addr, 1416 (unsigned long long) size); 1417 return NULL; 1418 } 1419 1420 return vq->iotlb_iov[0].iov_base; 1421 } 1422 1423 /* This function should be called after iotlb 1424 * prefetch, which means we're sure that vq 1425 * could be access through iotlb. So -EAGAIN should 1426 * not happen in this case. 1427 */ 1428 static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, 1429 void __user *addr, unsigned int size, 1430 int type) 1431 { 1432 void __user *uaddr = vhost_vq_meta_fetch(vq, 1433 (u64)(uintptr_t)addr, size, type); 1434 if (uaddr) 1435 return uaddr; 1436 1437 return __vhost_get_user_slow(vq, addr, size, type); 1438 } 1439 1440 #define vhost_put_user(vq, x, ptr) \ 1441 ({ \ 1442 int ret; \ 1443 if (!vq->iotlb) { \ 1444 ret = __put_user(x, ptr); \ 1445 } else { \ 1446 __typeof__(ptr) to = \ 1447 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \ 1448 sizeof(*ptr), VHOST_ADDR_USED); \ 1449 if (to != NULL) \ 1450 ret = __put_user(x, to); \ 1451 else \ 1452 ret = -EFAULT; \ 1453 } \ 1454 ret; \ 1455 }) 1456 1457 static inline int vhost_put_avail_event(struct vhost_virtqueue *vq) 1458 { 1459 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), 1460 vhost_avail_event(vq)); 1461 } 1462 1463 static inline int vhost_put_used(struct vhost_virtqueue *vq, 1464 struct vring_used_elem *head, int idx, 1465 int count) 1466 { 1467 return vhost_copy_to_user(vq, vq->used->ring + idx, head, 1468 count * sizeof(*head)); 1469 } 1470 1471 static inline int vhost_put_used_flags(struct vhost_virtqueue *vq) 1472 1473 { 1474 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), 1475 &vq->used->flags); 1476 } 1477 1478 static inline int vhost_put_used_idx(struct vhost_virtqueue *vq) 1479 1480 { 1481 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), 1482 &vq->used->idx); 1483 } 1484 1485 #define vhost_get_user(vq, x, ptr, type) \ 1486 ({ \ 1487 int ret; \ 1488 if (!vq->iotlb) { \ 1489 ret = __get_user(x, ptr); \ 1490 } else { \ 1491 __typeof__(ptr) from = \ 1492 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \ 1493 sizeof(*ptr), \ 1494 type); \ 1495 if (from != NULL) \ 1496 ret = __get_user(x, from); \ 1497 else \ 1498 ret = -EFAULT; \ 1499 } \ 1500 ret; \ 1501 }) 1502 1503 #define vhost_get_avail(vq, x, ptr) \ 1504 vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL) 1505 1506 #define vhost_get_used(vq, x, ptr) \ 1507 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) 1508 1509 static void vhost_dev_lock_vqs(struct vhost_dev *d) 1510 { 1511 int i = 0; 1512 for (i = 0; i < d->nvqs; ++i) 1513 mutex_lock_nested(&d->vqs[i]->mutex, i); 1514 } 1515 1516 static void vhost_dev_unlock_vqs(struct vhost_dev *d) 1517 { 1518 int i = 0; 1519 for (i = 0; i < d->nvqs; ++i) 1520 mutex_unlock(&d->vqs[i]->mutex); 1521 } 1522 1523 static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq) 1524 { 1525 __virtio16 idx; 1526 int r; 1527 1528 r = vhost_get_avail(vq, idx, &vq->avail->idx); 1529 if (unlikely(r < 0)) { 1530 vq_err(vq, "Failed to access available index at %p (%d)\n", 1531 &vq->avail->idx, r); 1532 return r; 1533 } 1534 1535 /* Check it isn't doing very strange thing with available indexes */ 1536 vq->avail_idx = vhost16_to_cpu(vq, idx); 1537 if (unlikely((u16)(vq->avail_idx - vq->last_avail_idx) > vq->num)) { 1538 vq_err(vq, "Invalid available index change from %u to %u", 1539 vq->last_avail_idx, vq->avail_idx); 1540 return -EINVAL; 1541 } 1542 1543 /* We're done if there is nothing new */ 1544 if (vq->avail_idx == vq->last_avail_idx) 1545 return 0; 1546 1547 /* 1548 * We updated vq->avail_idx so we need a memory barrier between 1549 * the index read above and the caller reading avail ring entries. 1550 */ 1551 smp_rmb(); 1552 return 1; 1553 } 1554 1555 static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, 1556 __virtio16 *head, int idx) 1557 { 1558 return vhost_get_avail(vq, *head, 1559 &vq->avail->ring[idx & (vq->num - 1)]); 1560 } 1561 1562 static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq, 1563 __virtio16 *flags) 1564 { 1565 return vhost_get_avail(vq, *flags, &vq->avail->flags); 1566 } 1567 1568 static inline int vhost_get_used_event(struct vhost_virtqueue *vq, 1569 __virtio16 *event) 1570 { 1571 return vhost_get_avail(vq, *event, vhost_used_event(vq)); 1572 } 1573 1574 static inline int vhost_get_used_idx(struct vhost_virtqueue *vq, 1575 __virtio16 *idx) 1576 { 1577 return vhost_get_used(vq, *idx, &vq->used->idx); 1578 } 1579 1580 static inline int vhost_get_desc(struct vhost_virtqueue *vq, 1581 struct vring_desc *desc, int idx) 1582 { 1583 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); 1584 } 1585 1586 static void vhost_iotlb_notify_vq(struct vhost_dev *d, 1587 struct vhost_iotlb_msg *msg) 1588 { 1589 struct vhost_msg_node *node, *n; 1590 1591 spin_lock(&d->iotlb_lock); 1592 1593 list_for_each_entry_safe(node, n, &d->pending_list, node) { 1594 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; 1595 if (msg->iova <= vq_msg->iova && 1596 msg->iova + msg->size - 1 >= vq_msg->iova && 1597 vq_msg->type == VHOST_IOTLB_MISS) { 1598 vhost_poll_queue(&node->vq->poll); 1599 list_del(&node->node); 1600 kfree(node); 1601 } 1602 } 1603 1604 spin_unlock(&d->iotlb_lock); 1605 } 1606 1607 static bool umem_access_ok(u64 uaddr, u64 size, int access) 1608 { 1609 unsigned long a = uaddr; 1610 1611 /* Make sure 64 bit math will not overflow. */ 1612 if (vhost_overflow(uaddr, size)) 1613 return false; 1614 1615 if ((access & VHOST_ACCESS_RO) && 1616 !access_ok((void __user *)a, size)) 1617 return false; 1618 if ((access & VHOST_ACCESS_WO) && 1619 !access_ok((void __user *)a, size)) 1620 return false; 1621 return true; 1622 } 1623 1624 static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid, 1625 struct vhost_iotlb_msg *msg) 1626 { 1627 int ret = 0; 1628 1629 if (asid != 0) 1630 return -EINVAL; 1631 1632 mutex_lock(&dev->mutex); 1633 vhost_dev_lock_vqs(dev); 1634 switch (msg->type) { 1635 case VHOST_IOTLB_UPDATE: 1636 if (!dev->iotlb) { 1637 ret = -EFAULT; 1638 break; 1639 } 1640 if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) { 1641 ret = -EFAULT; 1642 break; 1643 } 1644 vhost_vq_meta_reset(dev); 1645 if (vhost_iotlb_add_range(dev->iotlb, msg->iova, 1646 msg->iova + msg->size - 1, 1647 msg->uaddr, msg->perm)) { 1648 ret = -ENOMEM; 1649 break; 1650 } 1651 vhost_iotlb_notify_vq(dev, msg); 1652 break; 1653 case VHOST_IOTLB_INVALIDATE: 1654 if (!dev->iotlb) { 1655 ret = -EFAULT; 1656 break; 1657 } 1658 vhost_vq_meta_reset(dev); 1659 vhost_iotlb_del_range(dev->iotlb, msg->iova, 1660 msg->iova + msg->size - 1); 1661 break; 1662 default: 1663 ret = -EINVAL; 1664 break; 1665 } 1666 1667 vhost_dev_unlock_vqs(dev); 1668 mutex_unlock(&dev->mutex); 1669 1670 return ret; 1671 } 1672 ssize_t vhost_chr_write_iter(struct vhost_dev *dev, 1673 struct iov_iter *from) 1674 { 1675 struct vhost_iotlb_msg msg; 1676 size_t offset; 1677 int type, ret; 1678 u32 asid = 0; 1679 1680 ret = copy_from_iter(&type, sizeof(type), from); 1681 if (ret != sizeof(type)) { 1682 ret = -EINVAL; 1683 goto done; 1684 } 1685 1686 switch (type) { 1687 case VHOST_IOTLB_MSG: 1688 /* There maybe a hole after type for V1 message type, 1689 * so skip it here. 1690 */ 1691 offset = offsetof(struct vhost_msg, iotlb) - sizeof(int); 1692 break; 1693 case VHOST_IOTLB_MSG_V2: 1694 if (vhost_backend_has_feature(dev->vqs[0], 1695 VHOST_BACKEND_F_IOTLB_ASID)) { 1696 ret = copy_from_iter(&asid, sizeof(asid), from); 1697 if (ret != sizeof(asid)) { 1698 ret = -EINVAL; 1699 goto done; 1700 } 1701 offset = 0; 1702 } else 1703 offset = sizeof(__u32); 1704 break; 1705 default: 1706 ret = -EINVAL; 1707 goto done; 1708 } 1709 1710 iov_iter_advance(from, offset); 1711 ret = copy_from_iter(&msg, sizeof(msg), from); 1712 if (ret != sizeof(msg)) { 1713 ret = -EINVAL; 1714 goto done; 1715 } 1716 1717 if (msg.type == VHOST_IOTLB_UPDATE && msg.size == 0) { 1718 ret = -EINVAL; 1719 goto done; 1720 } 1721 1722 if (dev->msg_handler) 1723 ret = dev->msg_handler(dev, asid, &msg); 1724 else 1725 ret = vhost_process_iotlb_msg(dev, asid, &msg); 1726 if (ret) { 1727 ret = -EFAULT; 1728 goto done; 1729 } 1730 1731 ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) : 1732 sizeof(struct vhost_msg_v2); 1733 done: 1734 return ret; 1735 } 1736 EXPORT_SYMBOL(vhost_chr_write_iter); 1737 1738 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev, 1739 poll_table *wait) 1740 { 1741 __poll_t mask = 0; 1742 1743 poll_wait(file, &dev->wait, wait); 1744 1745 if (!list_empty(&dev->read_list)) 1746 mask |= EPOLLIN | EPOLLRDNORM; 1747 1748 return mask; 1749 } 1750 EXPORT_SYMBOL(vhost_chr_poll); 1751 1752 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to, 1753 int noblock) 1754 { 1755 DEFINE_WAIT(wait); 1756 struct vhost_msg_node *node; 1757 ssize_t ret = 0; 1758 unsigned size = sizeof(struct vhost_msg); 1759 1760 if (iov_iter_count(to) < size) 1761 return 0; 1762 1763 while (1) { 1764 if (!noblock) 1765 prepare_to_wait(&dev->wait, &wait, 1766 TASK_INTERRUPTIBLE); 1767 1768 node = vhost_dequeue_msg(dev, &dev->read_list); 1769 if (node) 1770 break; 1771 if (noblock) { 1772 ret = -EAGAIN; 1773 break; 1774 } 1775 if (signal_pending(current)) { 1776 ret = -ERESTARTSYS; 1777 break; 1778 } 1779 if (!dev->iotlb) { 1780 ret = -EBADFD; 1781 break; 1782 } 1783 1784 schedule(); 1785 } 1786 1787 if (!noblock) 1788 finish_wait(&dev->wait, &wait); 1789 1790 if (node) { 1791 struct vhost_iotlb_msg *msg; 1792 void *start = &node->msg; 1793 1794 switch (node->msg.type) { 1795 case VHOST_IOTLB_MSG: 1796 size = sizeof(node->msg); 1797 msg = &node->msg.iotlb; 1798 break; 1799 case VHOST_IOTLB_MSG_V2: 1800 size = sizeof(node->msg_v2); 1801 msg = &node->msg_v2.iotlb; 1802 break; 1803 default: 1804 BUG(); 1805 break; 1806 } 1807 1808 ret = copy_to_iter(start, size, to); 1809 if (ret != size || msg->type != VHOST_IOTLB_MISS) { 1810 kfree(node); 1811 return ret; 1812 } 1813 vhost_enqueue_msg(dev, &dev->pending_list, node); 1814 } 1815 1816 return ret; 1817 } 1818 EXPORT_SYMBOL_GPL(vhost_chr_read_iter); 1819 1820 static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access) 1821 { 1822 struct vhost_dev *dev = vq->dev; 1823 struct vhost_msg_node *node; 1824 struct vhost_iotlb_msg *msg; 1825 bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2); 1826 1827 node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG); 1828 if (!node) 1829 return -ENOMEM; 1830 1831 if (v2) { 1832 node->msg_v2.type = VHOST_IOTLB_MSG_V2; 1833 msg = &node->msg_v2.iotlb; 1834 } else { 1835 msg = &node->msg.iotlb; 1836 } 1837 1838 msg->type = VHOST_IOTLB_MISS; 1839 msg->iova = iova; 1840 msg->perm = access; 1841 1842 vhost_enqueue_msg(dev, &dev->read_list, node); 1843 1844 return 0; 1845 } 1846 1847 static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, 1848 vring_desc_t __user *desc, 1849 vring_avail_t __user *avail, 1850 vring_used_t __user *used) 1851 1852 { 1853 /* If an IOTLB device is present, the vring addresses are 1854 * GIOVAs. Access validation occurs at prefetch time. */ 1855 if (vq->iotlb) 1856 return true; 1857 1858 return access_ok(desc, vhost_get_desc_size(vq, num)) && 1859 access_ok(avail, vhost_get_avail_size(vq, num)) && 1860 access_ok(used, vhost_get_used_size(vq, num)); 1861 } 1862 1863 static void vhost_vq_meta_update(struct vhost_virtqueue *vq, 1864 const struct vhost_iotlb_map *map, 1865 int type) 1866 { 1867 int access = (type == VHOST_ADDR_USED) ? 1868 VHOST_ACCESS_WO : VHOST_ACCESS_RO; 1869 1870 if (likely(map->perm & access)) 1871 vq->meta_iotlb[type] = map; 1872 } 1873 1874 static bool iotlb_access_ok(struct vhost_virtqueue *vq, 1875 int access, u64 addr, u64 len, int type) 1876 { 1877 const struct vhost_iotlb_map *map; 1878 struct vhost_iotlb *umem = vq->iotlb; 1879 u64 s = 0, size, orig_addr = addr, last = addr + len - 1; 1880 1881 if (vhost_vq_meta_fetch(vq, addr, len, type)) 1882 return true; 1883 1884 while (len > s) { 1885 map = vhost_iotlb_itree_first(umem, addr, last); 1886 if (map == NULL || map->start > addr) { 1887 vhost_iotlb_miss(vq, addr, access); 1888 return false; 1889 } else if (!(map->perm & access)) { 1890 /* Report the possible access violation by 1891 * request another translation from userspace. 1892 */ 1893 return false; 1894 } 1895 1896 size = map->size - addr + map->start; 1897 1898 if (orig_addr == addr && size >= len) 1899 vhost_vq_meta_update(vq, map, type); 1900 1901 s += size; 1902 addr += size; 1903 } 1904 1905 return true; 1906 } 1907 1908 int vq_meta_prefetch(struct vhost_virtqueue *vq) 1909 { 1910 unsigned int num = vq->num; 1911 1912 if (!vq->iotlb) 1913 return 1; 1914 1915 return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc, 1916 vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) && 1917 iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail, 1918 vhost_get_avail_size(vq, num), 1919 VHOST_ADDR_AVAIL) && 1920 iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used, 1921 vhost_get_used_size(vq, num), VHOST_ADDR_USED); 1922 } 1923 EXPORT_SYMBOL_GPL(vq_meta_prefetch); 1924 1925 /* Can we log writes? */ 1926 /* Caller should have device mutex but not vq mutex */ 1927 bool vhost_log_access_ok(struct vhost_dev *dev) 1928 { 1929 return memory_access_ok(dev, dev->umem, 1); 1930 } 1931 EXPORT_SYMBOL_GPL(vhost_log_access_ok); 1932 1933 static bool vq_log_used_access_ok(struct vhost_virtqueue *vq, 1934 void __user *log_base, 1935 bool log_used, 1936 u64 log_addr) 1937 { 1938 /* If an IOTLB device is present, log_addr is a GIOVA that 1939 * will never be logged by log_used(). */ 1940 if (vq->iotlb) 1941 return true; 1942 1943 return !log_used || log_access_ok(log_base, log_addr, 1944 vhost_get_used_size(vq, vq->num)); 1945 } 1946 1947 /* Verify access for write logging. */ 1948 /* Caller should have vq mutex and device mutex */ 1949 static bool vq_log_access_ok(struct vhost_virtqueue *vq, 1950 void __user *log_base) 1951 { 1952 return vq_memory_access_ok(log_base, vq->umem, 1953 vhost_has_feature(vq, VHOST_F_LOG_ALL)) && 1954 vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr); 1955 } 1956 1957 /* Can we start vq? */ 1958 /* Caller should have vq mutex and device mutex */ 1959 bool vhost_vq_access_ok(struct vhost_virtqueue *vq) 1960 { 1961 if (!vq_log_access_ok(vq, vq->log_base)) 1962 return false; 1963 1964 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used); 1965 } 1966 EXPORT_SYMBOL_GPL(vhost_vq_access_ok); 1967 1968 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) 1969 { 1970 struct vhost_memory mem, *newmem; 1971 struct vhost_memory_region *region; 1972 struct vhost_iotlb *newumem, *oldumem; 1973 unsigned long size = offsetof(struct vhost_memory, regions); 1974 int i; 1975 1976 if (copy_from_user(&mem, m, size)) 1977 return -EFAULT; 1978 if (mem.padding) 1979 return -EOPNOTSUPP; 1980 if (mem.nregions > max_mem_regions) 1981 return -E2BIG; 1982 newmem = kvzalloc(struct_size(newmem, regions, mem.nregions), 1983 GFP_KERNEL); 1984 if (!newmem) 1985 return -ENOMEM; 1986 1987 memcpy(newmem, &mem, size); 1988 if (copy_from_user(newmem->regions, m->regions, 1989 flex_array_size(newmem, regions, mem.nregions))) { 1990 kvfree(newmem); 1991 return -EFAULT; 1992 } 1993 1994 newumem = iotlb_alloc(); 1995 if (!newumem) { 1996 kvfree(newmem); 1997 return -ENOMEM; 1998 } 1999 2000 for (region = newmem->regions; 2001 region < newmem->regions + mem.nregions; 2002 region++) { 2003 if (vhost_iotlb_add_range(newumem, 2004 region->guest_phys_addr, 2005 region->guest_phys_addr + 2006 region->memory_size - 1, 2007 region->userspace_addr, 2008 VHOST_MAP_RW)) 2009 goto err; 2010 } 2011 2012 if (!memory_access_ok(d, newumem, 0)) 2013 goto err; 2014 2015 oldumem = d->umem; 2016 d->umem = newumem; 2017 2018 /* All memory accesses are done under some VQ mutex. */ 2019 for (i = 0; i < d->nvqs; ++i) { 2020 mutex_lock(&d->vqs[i]->mutex); 2021 d->vqs[i]->umem = newumem; 2022 mutex_unlock(&d->vqs[i]->mutex); 2023 } 2024 2025 kvfree(newmem); 2026 vhost_iotlb_free(oldumem); 2027 return 0; 2028 2029 err: 2030 vhost_iotlb_free(newumem); 2031 kvfree(newmem); 2032 return -EFAULT; 2033 } 2034 2035 static long vhost_vring_set_num(struct vhost_dev *d, 2036 struct vhost_virtqueue *vq, 2037 void __user *argp) 2038 { 2039 struct vhost_vring_state s; 2040 2041 /* Resizing ring with an active backend? 2042 * You don't want to do that. */ 2043 if (vq->private_data) 2044 return -EBUSY; 2045 2046 if (copy_from_user(&s, argp, sizeof s)) 2047 return -EFAULT; 2048 2049 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) 2050 return -EINVAL; 2051 vq->num = s.num; 2052 2053 return 0; 2054 } 2055 2056 static long vhost_vring_set_addr(struct vhost_dev *d, 2057 struct vhost_virtqueue *vq, 2058 void __user *argp) 2059 { 2060 struct vhost_vring_addr a; 2061 2062 if (copy_from_user(&a, argp, sizeof a)) 2063 return -EFAULT; 2064 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) 2065 return -EOPNOTSUPP; 2066 2067 /* For 32bit, verify that the top 32bits of the user 2068 data are set to zero. */ 2069 if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr || 2070 (u64)(unsigned long)a.used_user_addr != a.used_user_addr || 2071 (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) 2072 return -EFAULT; 2073 2074 /* Make sure it's safe to cast pointers to vring types. */ 2075 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); 2076 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); 2077 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || 2078 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || 2079 (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) 2080 return -EINVAL; 2081 2082 /* We only verify access here if backend is configured. 2083 * If it is not, we don't as size might not have been setup. 2084 * We will verify when backend is configured. */ 2085 if (vq->private_data) { 2086 if (!vq_access_ok(vq, vq->num, 2087 (void __user *)(unsigned long)a.desc_user_addr, 2088 (void __user *)(unsigned long)a.avail_user_addr, 2089 (void __user *)(unsigned long)a.used_user_addr)) 2090 return -EINVAL; 2091 2092 /* Also validate log access for used ring if enabled. */ 2093 if (!vq_log_used_access_ok(vq, vq->log_base, 2094 a.flags & (0x1 << VHOST_VRING_F_LOG), 2095 a.log_guest_addr)) 2096 return -EINVAL; 2097 } 2098 2099 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); 2100 vq->desc = (void __user *)(unsigned long)a.desc_user_addr; 2101 vq->avail = (void __user *)(unsigned long)a.avail_user_addr; 2102 vq->log_addr = a.log_guest_addr; 2103 vq->used = (void __user *)(unsigned long)a.used_user_addr; 2104 2105 return 0; 2106 } 2107 2108 static long vhost_vring_set_num_addr(struct vhost_dev *d, 2109 struct vhost_virtqueue *vq, 2110 unsigned int ioctl, 2111 void __user *argp) 2112 { 2113 long r; 2114 2115 mutex_lock(&vq->mutex); 2116 2117 switch (ioctl) { 2118 case VHOST_SET_VRING_NUM: 2119 r = vhost_vring_set_num(d, vq, argp); 2120 break; 2121 case VHOST_SET_VRING_ADDR: 2122 r = vhost_vring_set_addr(d, vq, argp); 2123 break; 2124 default: 2125 BUG(); 2126 } 2127 2128 mutex_unlock(&vq->mutex); 2129 2130 return r; 2131 } 2132 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) 2133 { 2134 struct file *eventfp, *filep = NULL; 2135 bool pollstart = false, pollstop = false; 2136 struct eventfd_ctx *ctx = NULL; 2137 struct vhost_virtqueue *vq; 2138 struct vhost_vring_state s; 2139 struct vhost_vring_file f; 2140 u32 idx; 2141 long r; 2142 2143 r = vhost_get_vq_from_user(d, argp, &vq, &idx); 2144 if (r < 0) 2145 return r; 2146 2147 if (ioctl == VHOST_SET_VRING_NUM || 2148 ioctl == VHOST_SET_VRING_ADDR) { 2149 return vhost_vring_set_num_addr(d, vq, ioctl, argp); 2150 } 2151 2152 mutex_lock(&vq->mutex); 2153 2154 switch (ioctl) { 2155 case VHOST_SET_VRING_BASE: 2156 /* Moving base with an active backend? 2157 * You don't want to do that. */ 2158 if (vq->private_data) { 2159 r = -EBUSY; 2160 break; 2161 } 2162 if (copy_from_user(&s, argp, sizeof s)) { 2163 r = -EFAULT; 2164 break; 2165 } 2166 if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) { 2167 vq->next_avail_head = vq->last_avail_idx = 2168 s.num & 0xffff; 2169 vq->last_used_idx = (s.num >> 16) & 0xffff; 2170 } else { 2171 if (s.num > 0xffff) { 2172 r = -EINVAL; 2173 break; 2174 } 2175 vq->next_avail_head = vq->last_avail_idx = s.num; 2176 } 2177 /* Forget the cached index value. */ 2178 vq->avail_idx = vq->last_avail_idx; 2179 break; 2180 case VHOST_GET_VRING_BASE: 2181 s.index = idx; 2182 if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) 2183 s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16); 2184 else 2185 s.num = vq->last_avail_idx; 2186 if (copy_to_user(argp, &s, sizeof s)) 2187 r = -EFAULT; 2188 break; 2189 case VHOST_SET_VRING_KICK: 2190 if (copy_from_user(&f, argp, sizeof f)) { 2191 r = -EFAULT; 2192 break; 2193 } 2194 eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd); 2195 if (IS_ERR(eventfp)) { 2196 r = PTR_ERR(eventfp); 2197 break; 2198 } 2199 if (eventfp != vq->kick) { 2200 pollstop = (filep = vq->kick) != NULL; 2201 pollstart = (vq->kick = eventfp) != NULL; 2202 } else 2203 filep = eventfp; 2204 break; 2205 case VHOST_SET_VRING_CALL: 2206 if (copy_from_user(&f, argp, sizeof f)) { 2207 r = -EFAULT; 2208 break; 2209 } 2210 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd); 2211 if (IS_ERR(ctx)) { 2212 r = PTR_ERR(ctx); 2213 break; 2214 } 2215 2216 swap(ctx, vq->call_ctx.ctx); 2217 break; 2218 case VHOST_SET_VRING_ERR: 2219 if (copy_from_user(&f, argp, sizeof f)) { 2220 r = -EFAULT; 2221 break; 2222 } 2223 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd); 2224 if (IS_ERR(ctx)) { 2225 r = PTR_ERR(ctx); 2226 break; 2227 } 2228 swap(ctx, vq->error_ctx); 2229 break; 2230 case VHOST_SET_VRING_ENDIAN: 2231 r = vhost_set_vring_endian(vq, argp); 2232 break; 2233 case VHOST_GET_VRING_ENDIAN: 2234 r = vhost_get_vring_endian(vq, idx, argp); 2235 break; 2236 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT: 2237 if (copy_from_user(&s, argp, sizeof(s))) { 2238 r = -EFAULT; 2239 break; 2240 } 2241 vq->busyloop_timeout = s.num; 2242 break; 2243 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT: 2244 s.index = idx; 2245 s.num = vq->busyloop_timeout; 2246 if (copy_to_user(argp, &s, sizeof(s))) 2247 r = -EFAULT; 2248 break; 2249 default: 2250 r = -ENOIOCTLCMD; 2251 } 2252 2253 if (pollstop && vq->handle_kick) 2254 vhost_poll_stop(&vq->poll); 2255 2256 if (!IS_ERR_OR_NULL(ctx)) 2257 eventfd_ctx_put(ctx); 2258 if (filep) 2259 fput(filep); 2260 2261 if (pollstart && vq->handle_kick) 2262 r = vhost_poll_start(&vq->poll, vq->kick); 2263 2264 mutex_unlock(&vq->mutex); 2265 2266 if (pollstop && vq->handle_kick) 2267 vhost_dev_flush(vq->poll.dev); 2268 return r; 2269 } 2270 EXPORT_SYMBOL_GPL(vhost_vring_ioctl); 2271 2272 int vhost_init_device_iotlb(struct vhost_dev *d) 2273 { 2274 struct vhost_iotlb *niotlb, *oiotlb; 2275 int i; 2276 2277 niotlb = iotlb_alloc(); 2278 if (!niotlb) 2279 return -ENOMEM; 2280 2281 oiotlb = d->iotlb; 2282 d->iotlb = niotlb; 2283 2284 for (i = 0; i < d->nvqs; ++i) { 2285 struct vhost_virtqueue *vq = d->vqs[i]; 2286 2287 mutex_lock(&vq->mutex); 2288 vq->iotlb = niotlb; 2289 __vhost_vq_meta_reset(vq); 2290 mutex_unlock(&vq->mutex); 2291 } 2292 2293 vhost_iotlb_free(oiotlb); 2294 2295 return 0; 2296 } 2297 EXPORT_SYMBOL_GPL(vhost_init_device_iotlb); 2298 2299 /* Caller must have device mutex */ 2300 long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) 2301 { 2302 struct eventfd_ctx *ctx; 2303 u64 p; 2304 long r; 2305 int i, fd; 2306 2307 /* If you are not the owner, you can become one */ 2308 if (ioctl == VHOST_SET_OWNER) { 2309 r = vhost_dev_set_owner(d); 2310 goto done; 2311 } 2312 2313 #ifdef CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL 2314 if (ioctl == VHOST_SET_FORK_FROM_OWNER) { 2315 /* Only allow modification before owner is set */ 2316 if (vhost_dev_has_owner(d)) { 2317 r = -EBUSY; 2318 goto done; 2319 } 2320 u8 fork_owner_val; 2321 2322 if (get_user(fork_owner_val, (u8 __user *)argp)) { 2323 r = -EFAULT; 2324 goto done; 2325 } 2326 if (fork_owner_val != VHOST_FORK_OWNER_TASK && 2327 fork_owner_val != VHOST_FORK_OWNER_KTHREAD) { 2328 r = -EINVAL; 2329 goto done; 2330 } 2331 d->fork_owner = !!fork_owner_val; 2332 r = 0; 2333 goto done; 2334 } 2335 if (ioctl == VHOST_GET_FORK_FROM_OWNER) { 2336 u8 fork_owner_val = d->fork_owner; 2337 2338 if (fork_owner_val != VHOST_FORK_OWNER_TASK && 2339 fork_owner_val != VHOST_FORK_OWNER_KTHREAD) { 2340 r = -EINVAL; 2341 goto done; 2342 } 2343 if (put_user(fork_owner_val, (u8 __user *)argp)) { 2344 r = -EFAULT; 2345 goto done; 2346 } 2347 r = 0; 2348 goto done; 2349 } 2350 #endif 2351 2352 /* You must be the owner to do anything else */ 2353 r = vhost_dev_check_owner(d); 2354 if (r) 2355 goto done; 2356 2357 switch (ioctl) { 2358 case VHOST_SET_MEM_TABLE: 2359 r = vhost_set_memory(d, argp); 2360 break; 2361 case VHOST_SET_LOG_BASE: 2362 if (copy_from_user(&p, argp, sizeof p)) { 2363 r = -EFAULT; 2364 break; 2365 } 2366 if ((u64)(unsigned long)p != p) { 2367 r = -EFAULT; 2368 break; 2369 } 2370 for (i = 0; i < d->nvqs; ++i) { 2371 struct vhost_virtqueue *vq; 2372 void __user *base = (void __user *)(unsigned long)p; 2373 vq = d->vqs[i]; 2374 mutex_lock(&vq->mutex); 2375 /* If ring is inactive, will check when it's enabled. */ 2376 if (vq->private_data && !vq_log_access_ok(vq, base)) 2377 r = -EFAULT; 2378 else 2379 vq->log_base = base; 2380 mutex_unlock(&vq->mutex); 2381 } 2382 break; 2383 case VHOST_SET_LOG_FD: 2384 r = get_user(fd, (int __user *)argp); 2385 if (r < 0) 2386 break; 2387 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd); 2388 if (IS_ERR(ctx)) { 2389 r = PTR_ERR(ctx); 2390 break; 2391 } 2392 swap(ctx, d->log_ctx); 2393 for (i = 0; i < d->nvqs; ++i) { 2394 mutex_lock(&d->vqs[i]->mutex); 2395 d->vqs[i]->log_ctx = d->log_ctx; 2396 mutex_unlock(&d->vqs[i]->mutex); 2397 } 2398 if (ctx) 2399 eventfd_ctx_put(ctx); 2400 break; 2401 default: 2402 r = -ENOIOCTLCMD; 2403 break; 2404 } 2405 done: 2406 return r; 2407 } 2408 EXPORT_SYMBOL_GPL(vhost_dev_ioctl); 2409 2410 /* TODO: This is really inefficient. We need something like get_user() 2411 * (instruction directly accesses the data, with an exception table entry 2412 * returning -EFAULT). See Documentation/arch/x86/exception-tables.rst. 2413 */ 2414 static int set_bit_to_user(int nr, void __user *addr) 2415 { 2416 unsigned long log = (unsigned long)addr; 2417 struct page *page; 2418 void *base; 2419 int bit = nr + (log % PAGE_SIZE) * 8; 2420 int r; 2421 2422 r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page); 2423 if (r < 0) 2424 return r; 2425 BUG_ON(r != 1); 2426 base = kmap_atomic(page); 2427 set_bit(bit, base); 2428 kunmap_atomic(base); 2429 unpin_user_pages_dirty_lock(&page, 1, true); 2430 return 0; 2431 } 2432 2433 static int log_write(void __user *log_base, 2434 u64 write_address, u64 write_length) 2435 { 2436 u64 write_page = write_address / VHOST_PAGE_SIZE; 2437 int r; 2438 2439 if (!write_length) 2440 return 0; 2441 write_length += write_address % VHOST_PAGE_SIZE; 2442 for (;;) { 2443 u64 base = (u64)(unsigned long)log_base; 2444 u64 log = base + write_page / 8; 2445 int bit = write_page % 8; 2446 if ((u64)(unsigned long)log != log) 2447 return -EFAULT; 2448 r = set_bit_to_user(bit, (void __user *)(unsigned long)log); 2449 if (r < 0) 2450 return r; 2451 if (write_length <= VHOST_PAGE_SIZE) 2452 break; 2453 write_length -= VHOST_PAGE_SIZE; 2454 write_page += 1; 2455 } 2456 return r; 2457 } 2458 2459 static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) 2460 { 2461 struct vhost_iotlb *umem = vq->umem; 2462 struct vhost_iotlb_map *u; 2463 u64 start, end, l, min; 2464 int r; 2465 bool hit = false; 2466 2467 while (len) { 2468 min = len; 2469 /* More than one GPAs can be mapped into a single HVA. So 2470 * iterate all possible umems here to be safe. 2471 */ 2472 list_for_each_entry(u, &umem->list, link) { 2473 if (u->addr > hva - 1 + len || 2474 u->addr - 1 + u->size < hva) 2475 continue; 2476 start = max(u->addr, hva); 2477 end = min(u->addr - 1 + u->size, hva - 1 + len); 2478 l = end - start + 1; 2479 r = log_write(vq->log_base, 2480 u->start + start - u->addr, 2481 l); 2482 if (r < 0) 2483 return r; 2484 hit = true; 2485 min = min(l, min); 2486 } 2487 2488 if (!hit) 2489 return -EFAULT; 2490 2491 len -= min; 2492 hva += min; 2493 } 2494 2495 return 0; 2496 } 2497 2498 static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) 2499 { 2500 struct iovec *iov = vq->log_iov; 2501 int i, ret; 2502 2503 if (!vq->iotlb) 2504 return log_write(vq->log_base, vq->log_addr + used_offset, len); 2505 2506 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, 2507 len, iov, 64, VHOST_ACCESS_WO); 2508 if (ret < 0) 2509 return ret; 2510 2511 for (i = 0; i < ret; i++) { 2512 ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base, 2513 iov[i].iov_len); 2514 if (ret) 2515 return ret; 2516 } 2517 2518 return 0; 2519 } 2520 2521 /* 2522 * vhost_log_write() - Log in dirty page bitmap 2523 * @vq: vhost virtqueue. 2524 * @log: Array of dirty memory in GPA. 2525 * @log_num: Size of vhost_log arrary. 2526 * @len: The total length of memory buffer to log in the dirty bitmap. 2527 * Some drivers may only partially use pages shared via the last 2528 * vring descriptor (i.e. vhost-net RX buffer). 2529 * Use (len == U64_MAX) to indicate the driver would log all 2530 * pages of vring descriptors. 2531 * @iov: Array of dirty memory in HVA. 2532 * @count: Size of iovec array. 2533 */ 2534 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 2535 unsigned int log_num, u64 len, struct iovec *iov, int count) 2536 { 2537 int i, r; 2538 2539 /* Make sure data written is seen before log. */ 2540 smp_wmb(); 2541 2542 if (vq->iotlb) { 2543 for (i = 0; i < count; i++) { 2544 r = log_write_hva(vq, (uintptr_t)iov[i].iov_base, 2545 iov[i].iov_len); 2546 if (r < 0) 2547 return r; 2548 } 2549 return 0; 2550 } 2551 2552 for (i = 0; i < log_num; ++i) { 2553 u64 l = min(log[i].len, len); 2554 r = log_write(vq->log_base, log[i].addr, l); 2555 if (r < 0) 2556 return r; 2557 2558 if (len != U64_MAX) 2559 len -= l; 2560 } 2561 2562 if (vq->log_ctx) 2563 eventfd_signal(vq->log_ctx); 2564 2565 return 0; 2566 } 2567 EXPORT_SYMBOL_GPL(vhost_log_write); 2568 2569 static int vhost_update_used_flags(struct vhost_virtqueue *vq) 2570 { 2571 void __user *used; 2572 if (vhost_put_used_flags(vq)) 2573 return -EFAULT; 2574 if (unlikely(vq->log_used)) { 2575 /* Make sure the flag is seen before log. */ 2576 smp_wmb(); 2577 /* Log used flag write. */ 2578 used = &vq->used->flags; 2579 log_used(vq, (used - (void __user *)vq->used), 2580 sizeof vq->used->flags); 2581 if (vq->log_ctx) 2582 eventfd_signal(vq->log_ctx); 2583 } 2584 return 0; 2585 } 2586 2587 static int vhost_update_avail_event(struct vhost_virtqueue *vq) 2588 { 2589 if (vhost_put_avail_event(vq)) 2590 return -EFAULT; 2591 if (unlikely(vq->log_used)) { 2592 void __user *used; 2593 /* Make sure the event is seen before log. */ 2594 smp_wmb(); 2595 /* Log avail event write */ 2596 used = vhost_avail_event(vq); 2597 log_used(vq, (used - (void __user *)vq->used), 2598 sizeof *vhost_avail_event(vq)); 2599 if (vq->log_ctx) 2600 eventfd_signal(vq->log_ctx); 2601 } 2602 return 0; 2603 } 2604 2605 int vhost_vq_init_access(struct vhost_virtqueue *vq) 2606 { 2607 __virtio16 last_used_idx; 2608 int r; 2609 bool is_le = vq->is_le; 2610 2611 if (!vq->private_data) 2612 return 0; 2613 2614 vhost_init_is_le(vq); 2615 2616 r = vhost_update_used_flags(vq); 2617 if (r) 2618 goto err; 2619 vq->signalled_used_valid = false; 2620 if (!vq->iotlb && 2621 !access_ok(&vq->used->idx, sizeof vq->used->idx)) { 2622 r = -EFAULT; 2623 goto err; 2624 } 2625 r = vhost_get_used_idx(vq, &last_used_idx); 2626 if (r) { 2627 vq_err(vq, "Can't access used idx at %p\n", 2628 &vq->used->idx); 2629 goto err; 2630 } 2631 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); 2632 return 0; 2633 2634 err: 2635 vq->is_le = is_le; 2636 return r; 2637 } 2638 EXPORT_SYMBOL_GPL(vhost_vq_init_access); 2639 2640 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, 2641 struct iovec iov[], int iov_size, int access) 2642 { 2643 const struct vhost_iotlb_map *map; 2644 struct vhost_dev *dev = vq->dev; 2645 struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem; 2646 struct iovec *_iov; 2647 u64 s = 0, last = addr + len - 1; 2648 int ret = 0; 2649 2650 while ((u64)len > s) { 2651 u64 size; 2652 if (unlikely(ret >= iov_size)) { 2653 ret = -ENOBUFS; 2654 break; 2655 } 2656 2657 map = vhost_iotlb_itree_first(umem, addr, last); 2658 if (map == NULL || map->start > addr) { 2659 if (umem != dev->iotlb) { 2660 ret = -EFAULT; 2661 break; 2662 } 2663 ret = -EAGAIN; 2664 break; 2665 } else if (!(map->perm & access)) { 2666 ret = -EPERM; 2667 break; 2668 } 2669 2670 _iov = iov + ret; 2671 size = map->size - addr + map->start; 2672 _iov->iov_len = min((u64)len - s, size); 2673 _iov->iov_base = (void __user *)(unsigned long) 2674 (map->addr + addr - map->start); 2675 s += size; 2676 addr += size; 2677 ++ret; 2678 } 2679 2680 if (ret == -EAGAIN) 2681 vhost_iotlb_miss(vq, addr, access); 2682 return ret; 2683 } 2684 2685 /* Each buffer in the virtqueues is actually a chain of descriptors. This 2686 * function returns the next descriptor in the chain, 2687 * or -1U if we're at the end. */ 2688 static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc) 2689 { 2690 unsigned int next; 2691 2692 /* If this descriptor says it doesn't chain, we're done. */ 2693 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT))) 2694 return -1U; 2695 2696 /* Check they're not leading us off end of descriptors. */ 2697 next = vhost16_to_cpu(vq, READ_ONCE(desc->next)); 2698 return next; 2699 } 2700 2701 static int get_indirect(struct vhost_virtqueue *vq, 2702 struct iovec iov[], unsigned int iov_size, 2703 unsigned int *out_num, unsigned int *in_num, 2704 struct vhost_log *log, unsigned int *log_num, 2705 struct vring_desc *indirect) 2706 { 2707 struct vring_desc desc; 2708 unsigned int i = 0, count, found = 0; 2709 u32 len = vhost32_to_cpu(vq, indirect->len); 2710 struct iov_iter from; 2711 int ret, access; 2712 2713 /* Sanity check */ 2714 if (unlikely(len % sizeof desc)) { 2715 vq_err(vq, "Invalid length in indirect descriptor: " 2716 "len 0x%llx not multiple of 0x%zx\n", 2717 (unsigned long long)len, 2718 sizeof desc); 2719 return -EINVAL; 2720 } 2721 2722 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect, 2723 UIO_MAXIOV, VHOST_ACCESS_RO); 2724 if (unlikely(ret < 0)) { 2725 if (ret != -EAGAIN) 2726 vq_err(vq, "Translation failure %d in indirect.\n", ret); 2727 return ret; 2728 } 2729 iov_iter_init(&from, ITER_SOURCE, vq->indirect, ret, len); 2730 count = len / sizeof desc; 2731 /* Buffers are chained via a 16 bit next field, so 2732 * we can have at most 2^16 of these. */ 2733 if (unlikely(count > USHRT_MAX + 1)) { 2734 vq_err(vq, "Indirect buffer length too big: %d\n", 2735 indirect->len); 2736 return -E2BIG; 2737 } 2738 2739 do { 2740 unsigned iov_count = *in_num + *out_num; 2741 if (unlikely(++found > count)) { 2742 vq_err(vq, "Loop detected: last one at %u " 2743 "indirect size %u\n", 2744 i, count); 2745 return -EINVAL; 2746 } 2747 if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) { 2748 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", 2749 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); 2750 return -EINVAL; 2751 } 2752 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) { 2753 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", 2754 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); 2755 return -EINVAL; 2756 } 2757 2758 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) 2759 access = VHOST_ACCESS_WO; 2760 else 2761 access = VHOST_ACCESS_RO; 2762 2763 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), 2764 vhost32_to_cpu(vq, desc.len), iov + iov_count, 2765 iov_size - iov_count, access); 2766 if (unlikely(ret < 0)) { 2767 if (ret != -EAGAIN) 2768 vq_err(vq, "Translation failure %d indirect idx %d\n", 2769 ret, i); 2770 return ret; 2771 } 2772 /* If this is an input descriptor, increment that count. */ 2773 if (access == VHOST_ACCESS_WO) { 2774 *in_num += ret; 2775 if (unlikely(log && ret)) { 2776 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); 2777 log[*log_num].len = vhost32_to_cpu(vq, desc.len); 2778 ++*log_num; 2779 } 2780 } else { 2781 /* If it's an output descriptor, they're all supposed 2782 * to come before any input descriptors. */ 2783 if (unlikely(*in_num)) { 2784 vq_err(vq, "Indirect descriptor " 2785 "has out after in: idx %d\n", i); 2786 return -EINVAL; 2787 } 2788 *out_num += ret; 2789 } 2790 } while ((i = next_desc(vq, &desc)) != -1); 2791 return 0; 2792 } 2793 2794 /* This looks in the virtqueue and for the first available buffer, and converts 2795 * it to an iovec for convenient access. Since descriptors consist of some 2796 * number of output then some number of input descriptors, it's actually two 2797 * iovecs, but we pack them into one and note how many of each there were. 2798 * 2799 * This function returns the descriptor number found, or vq->num (which is 2800 * never a valid descriptor number) if none was found. A negative code is 2801 * returned on error. */ 2802 int vhost_get_vq_desc(struct vhost_virtqueue *vq, 2803 struct iovec iov[], unsigned int iov_size, 2804 unsigned int *out_num, unsigned int *in_num, 2805 struct vhost_log *log, unsigned int *log_num) 2806 { 2807 bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER); 2808 struct vring_desc desc; 2809 unsigned int i, head, found = 0; 2810 u16 last_avail_idx = vq->last_avail_idx; 2811 __virtio16 ring_head; 2812 int ret, access, c = 0; 2813 2814 if (vq->avail_idx == vq->last_avail_idx) { 2815 ret = vhost_get_avail_idx(vq); 2816 if (unlikely(ret < 0)) 2817 return ret; 2818 2819 if (!ret) 2820 return vq->num; 2821 } 2822 2823 if (in_order) 2824 head = vq->next_avail_head & (vq->num - 1); 2825 else { 2826 /* Grab the next descriptor number they're 2827 * advertising, and increment the index we've seen. */ 2828 if (unlikely(vhost_get_avail_head(vq, &ring_head, 2829 last_avail_idx))) { 2830 vq_err(vq, "Failed to read head: idx %d address %p\n", 2831 last_avail_idx, 2832 &vq->avail->ring[last_avail_idx % vq->num]); 2833 return -EFAULT; 2834 } 2835 head = vhost16_to_cpu(vq, ring_head); 2836 } 2837 2838 /* If their number is silly, that's an error. */ 2839 if (unlikely(head >= vq->num)) { 2840 vq_err(vq, "Guest says index %u > %u is available", 2841 head, vq->num); 2842 return -EINVAL; 2843 } 2844 2845 /* When we start there are none of either input nor output. */ 2846 *out_num = *in_num = 0; 2847 if (unlikely(log)) 2848 *log_num = 0; 2849 2850 i = head; 2851 do { 2852 unsigned iov_count = *in_num + *out_num; 2853 if (unlikely(i >= vq->num)) { 2854 vq_err(vq, "Desc index is %u > %u, head = %u", 2855 i, vq->num, head); 2856 return -EINVAL; 2857 } 2858 if (unlikely(++found > vq->num)) { 2859 vq_err(vq, "Loop detected: last one at %u " 2860 "vq size %u head %u\n", 2861 i, vq->num, head); 2862 return -EINVAL; 2863 } 2864 ret = vhost_get_desc(vq, &desc, i); 2865 if (unlikely(ret)) { 2866 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", 2867 i, vq->desc + i); 2868 return -EFAULT; 2869 } 2870 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) { 2871 ret = get_indirect(vq, iov, iov_size, 2872 out_num, in_num, 2873 log, log_num, &desc); 2874 if (unlikely(ret < 0)) { 2875 if (ret != -EAGAIN) 2876 vq_err(vq, "Failure detected " 2877 "in indirect descriptor at idx %d\n", i); 2878 return ret; 2879 } 2880 ++c; 2881 continue; 2882 } 2883 2884 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) 2885 access = VHOST_ACCESS_WO; 2886 else 2887 access = VHOST_ACCESS_RO; 2888 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), 2889 vhost32_to_cpu(vq, desc.len), iov + iov_count, 2890 iov_size - iov_count, access); 2891 if (unlikely(ret < 0)) { 2892 if (ret != -EAGAIN) 2893 vq_err(vq, "Translation failure %d descriptor idx %d\n", 2894 ret, i); 2895 return ret; 2896 } 2897 if (access == VHOST_ACCESS_WO) { 2898 /* If this is an input descriptor, 2899 * increment that count. */ 2900 *in_num += ret; 2901 if (unlikely(log && ret)) { 2902 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); 2903 log[*log_num].len = vhost32_to_cpu(vq, desc.len); 2904 ++*log_num; 2905 } 2906 } else { 2907 /* If it's an output descriptor, they're all supposed 2908 * to come before any input descriptors. */ 2909 if (unlikely(*in_num)) { 2910 vq_err(vq, "Descriptor has out after in: " 2911 "idx %d\n", i); 2912 return -EINVAL; 2913 } 2914 *out_num += ret; 2915 } 2916 ++c; 2917 } while ((i = next_desc(vq, &desc)) != -1); 2918 2919 /* On success, increment avail index. */ 2920 vq->last_avail_idx++; 2921 vq->next_avail_head += c; 2922 2923 /* Assume notifications from guest are disabled at this point, 2924 * if they aren't we would need to update avail_event index. */ 2925 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); 2926 return head; 2927 } 2928 EXPORT_SYMBOL_GPL(vhost_get_vq_desc); 2929 2930 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */ 2931 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) 2932 { 2933 vq->last_avail_idx -= n; 2934 } 2935 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc); 2936 2937 /* After we've used one of their buffers, we tell them about it. We'll then 2938 * want to notify the guest, using eventfd. */ 2939 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) 2940 { 2941 struct vring_used_elem heads = { 2942 cpu_to_vhost32(vq, head), 2943 cpu_to_vhost32(vq, len) 2944 }; 2945 u16 nheads = 1; 2946 2947 return vhost_add_used_n(vq, &heads, &nheads, 1); 2948 } 2949 EXPORT_SYMBOL_GPL(vhost_add_used); 2950 2951 static int __vhost_add_used_n(struct vhost_virtqueue *vq, 2952 struct vring_used_elem *heads, 2953 unsigned count) 2954 { 2955 vring_used_elem_t __user *used; 2956 u16 old, new; 2957 int start; 2958 2959 start = vq->last_used_idx & (vq->num - 1); 2960 used = vq->used->ring + start; 2961 if (vhost_put_used(vq, heads, start, count)) { 2962 vq_err(vq, "Failed to write used"); 2963 return -EFAULT; 2964 } 2965 if (unlikely(vq->log_used)) { 2966 /* Make sure data is seen before log. */ 2967 smp_wmb(); 2968 /* Log used ring entry write. */ 2969 log_used(vq, ((void __user *)used - (void __user *)vq->used), 2970 count * sizeof *used); 2971 } 2972 old = vq->last_used_idx; 2973 new = (vq->last_used_idx += count); 2974 /* If the driver never bothers to signal in a very long while, 2975 * used index might wrap around. If that happens, invalidate 2976 * signalled_used index we stored. TODO: make sure driver 2977 * signals at least once in 2^16 and remove this. */ 2978 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) 2979 vq->signalled_used_valid = false; 2980 return 0; 2981 } 2982 2983 static int vhost_add_used_n_ooo(struct vhost_virtqueue *vq, 2984 struct vring_used_elem *heads, 2985 unsigned count) 2986 { 2987 int start, n, r; 2988 2989 start = vq->last_used_idx & (vq->num - 1); 2990 n = vq->num - start; 2991 if (n < count) { 2992 r = __vhost_add_used_n(vq, heads, n); 2993 if (r < 0) 2994 return r; 2995 heads += n; 2996 count -= n; 2997 } 2998 return __vhost_add_used_n(vq, heads, count); 2999 } 3000 3001 static int vhost_add_used_n_in_order(struct vhost_virtqueue *vq, 3002 struct vring_used_elem *heads, 3003 const u16 *nheads, 3004 unsigned count) 3005 { 3006 vring_used_elem_t __user *used; 3007 u16 old, new = vq->last_used_idx; 3008 int start, i; 3009 3010 if (!nheads) 3011 return -EINVAL; 3012 3013 start = vq->last_used_idx & (vq->num - 1); 3014 used = vq->used->ring + start; 3015 3016 for (i = 0; i < count; i++) { 3017 if (vhost_put_used(vq, &heads[i], start, 1)) { 3018 vq_err(vq, "Failed to write used"); 3019 return -EFAULT; 3020 } 3021 start += nheads[i]; 3022 new += nheads[i]; 3023 if (start >= vq->num) 3024 start -= vq->num; 3025 } 3026 3027 if (unlikely(vq->log_used)) { 3028 /* Make sure data is seen before log. */ 3029 smp_wmb(); 3030 /* Log used ring entry write. */ 3031 log_used(vq, ((void __user *)used - (void __user *)vq->used), 3032 (vq->num - start) * sizeof *used); 3033 if (start + count > vq->num) 3034 log_used(vq, 0, 3035 (start + count - vq->num) * sizeof *used); 3036 } 3037 3038 old = vq->last_used_idx; 3039 vq->last_used_idx = new; 3040 /* If the driver never bothers to signal in a very long while, 3041 * used index might wrap around. If that happens, invalidate 3042 * signalled_used index we stored. TODO: make sure driver 3043 * signals at least once in 2^16 and remove this. */ 3044 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) 3045 vq->signalled_used_valid = false; 3046 return 0; 3047 } 3048 3049 /* After we've used one of their buffers, we tell them about it. We'll then 3050 * want to notify the guest, using eventfd. */ 3051 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, 3052 u16 *nheads, unsigned count) 3053 { 3054 bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER); 3055 int r; 3056 3057 if (!in_order || !nheads) 3058 r = vhost_add_used_n_ooo(vq, heads, count); 3059 else 3060 r = vhost_add_used_n_in_order(vq, heads, nheads, count); 3061 3062 if (r < 0) 3063 return r; 3064 3065 /* Make sure buffer is written before we update index. */ 3066 smp_wmb(); 3067 if (vhost_put_used_idx(vq)) { 3068 vq_err(vq, "Failed to increment used idx"); 3069 return -EFAULT; 3070 } 3071 if (unlikely(vq->log_used)) { 3072 /* Make sure used idx is seen before log. */ 3073 smp_wmb(); 3074 /* Log used index update. */ 3075 log_used(vq, offsetof(struct vring_used, idx), 3076 sizeof vq->used->idx); 3077 if (vq->log_ctx) 3078 eventfd_signal(vq->log_ctx); 3079 } 3080 return r; 3081 } 3082 EXPORT_SYMBOL_GPL(vhost_add_used_n); 3083 3084 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 3085 { 3086 __u16 old, new; 3087 __virtio16 event; 3088 bool v; 3089 /* Flush out used index updates. This is paired 3090 * with the barrier that the Guest executes when enabling 3091 * interrupts. */ 3092 smp_mb(); 3093 3094 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && 3095 unlikely(vq->avail_idx == vq->last_avail_idx)) 3096 return true; 3097 3098 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 3099 __virtio16 flags; 3100 if (vhost_get_avail_flags(vq, &flags)) { 3101 vq_err(vq, "Failed to get flags"); 3102 return true; 3103 } 3104 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT)); 3105 } 3106 old = vq->signalled_used; 3107 v = vq->signalled_used_valid; 3108 new = vq->signalled_used = vq->last_used_idx; 3109 vq->signalled_used_valid = true; 3110 3111 if (unlikely(!v)) 3112 return true; 3113 3114 if (vhost_get_used_event(vq, &event)) { 3115 vq_err(vq, "Failed to get used event idx"); 3116 return true; 3117 } 3118 return vring_need_event(vhost16_to_cpu(vq, event), new, old); 3119 } 3120 3121 /* This actually signals the guest, using eventfd. */ 3122 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) 3123 { 3124 /* Signal the Guest tell them we used something up. */ 3125 if (vq->call_ctx.ctx && vhost_notify(dev, vq)) 3126 eventfd_signal(vq->call_ctx.ctx); 3127 } 3128 EXPORT_SYMBOL_GPL(vhost_signal); 3129 3130 /* And here's the combo meal deal. Supersize me! */ 3131 void vhost_add_used_and_signal(struct vhost_dev *dev, 3132 struct vhost_virtqueue *vq, 3133 unsigned int head, int len) 3134 { 3135 vhost_add_used(vq, head, len); 3136 vhost_signal(dev, vq); 3137 } 3138 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal); 3139 3140 /* multi-buffer version of vhost_add_used_and_signal */ 3141 void vhost_add_used_and_signal_n(struct vhost_dev *dev, 3142 struct vhost_virtqueue *vq, 3143 struct vring_used_elem *heads, 3144 u16 *nheads, 3145 unsigned count) 3146 { 3147 vhost_add_used_n(vq, heads, nheads, count); 3148 vhost_signal(dev, vq); 3149 } 3150 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n); 3151 3152 /* return true if we're sure that available ring is empty */ 3153 bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) 3154 { 3155 int r; 3156 3157 if (vq->avail_idx != vq->last_avail_idx) 3158 return false; 3159 3160 r = vhost_get_avail_idx(vq); 3161 3162 /* Note: we treat error as non-empty here */ 3163 return r == 0; 3164 } 3165 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty); 3166 3167 /* OK, now we need to know about added descriptors. */ 3168 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 3169 { 3170 int r; 3171 3172 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) 3173 return false; 3174 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; 3175 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 3176 r = vhost_update_used_flags(vq); 3177 if (r) { 3178 vq_err(vq, "Failed to enable notification at %p: %d\n", 3179 &vq->used->flags, r); 3180 return false; 3181 } 3182 } else { 3183 r = vhost_update_avail_event(vq); 3184 if (r) { 3185 vq_err(vq, "Failed to update avail event index at %p: %d\n", 3186 vhost_avail_event(vq), r); 3187 return false; 3188 } 3189 } 3190 /* They could have slipped one in as we were doing that: make 3191 * sure it's written, then check again. */ 3192 smp_mb(); 3193 3194 r = vhost_get_avail_idx(vq); 3195 /* Note: we treat error as empty here */ 3196 if (unlikely(r < 0)) 3197 return false; 3198 3199 return r; 3200 } 3201 EXPORT_SYMBOL_GPL(vhost_enable_notify); 3202 3203 /* We don't need to be notified again. */ 3204 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 3205 { 3206 int r; 3207 3208 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) 3209 return; 3210 vq->used_flags |= VRING_USED_F_NO_NOTIFY; 3211 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 3212 r = vhost_update_used_flags(vq); 3213 if (r) 3214 vq_err(vq, "Failed to disable notification at %p: %d\n", 3215 &vq->used->flags, r); 3216 } 3217 } 3218 EXPORT_SYMBOL_GPL(vhost_disable_notify); 3219 3220 /* Create a new message. */ 3221 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type) 3222 { 3223 /* Make sure all padding within the structure is initialized. */ 3224 struct vhost_msg_node *node = kzalloc(sizeof(*node), GFP_KERNEL); 3225 if (!node) 3226 return NULL; 3227 3228 node->vq = vq; 3229 node->msg.type = type; 3230 return node; 3231 } 3232 EXPORT_SYMBOL_GPL(vhost_new_msg); 3233 3234 void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head, 3235 struct vhost_msg_node *node) 3236 { 3237 spin_lock(&dev->iotlb_lock); 3238 list_add_tail(&node->node, head); 3239 spin_unlock(&dev->iotlb_lock); 3240 3241 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); 3242 } 3243 EXPORT_SYMBOL_GPL(vhost_enqueue_msg); 3244 3245 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev, 3246 struct list_head *head) 3247 { 3248 struct vhost_msg_node *node = NULL; 3249 3250 spin_lock(&dev->iotlb_lock); 3251 if (!list_empty(head)) { 3252 node = list_first_entry(head, struct vhost_msg_node, 3253 node); 3254 list_del(&node->node); 3255 } 3256 spin_unlock(&dev->iotlb_lock); 3257 3258 return node; 3259 } 3260 EXPORT_SYMBOL_GPL(vhost_dequeue_msg); 3261 3262 void vhost_set_backend_features(struct vhost_dev *dev, u64 features) 3263 { 3264 struct vhost_virtqueue *vq; 3265 int i; 3266 3267 mutex_lock(&dev->mutex); 3268 for (i = 0; i < dev->nvqs; ++i) { 3269 vq = dev->vqs[i]; 3270 mutex_lock(&vq->mutex); 3271 vq->acked_backend_features = features; 3272 mutex_unlock(&vq->mutex); 3273 } 3274 mutex_unlock(&dev->mutex); 3275 } 3276 EXPORT_SYMBOL_GPL(vhost_set_backend_features); 3277 3278 static int __init vhost_init(void) 3279 { 3280 return 0; 3281 } 3282 3283 static void __exit vhost_exit(void) 3284 { 3285 } 3286 3287 module_init(vhost_init); 3288 module_exit(vhost_exit); 3289 3290 MODULE_VERSION("0.0.1"); 3291 MODULE_LICENSE("GPL v2"); 3292 MODULE_AUTHOR("Michael S. Tsirkin"); 3293 MODULE_DESCRIPTION("Host kernel accelerator for virtio"); 3294