1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2009 Red Hat, Inc. 3 * Copyright (C) 2006 Rusty Russell IBM Corporation 4 * 5 * Author: Michael S. Tsirkin <mst@redhat.com> 6 * 7 * Inspiration, some code, and most witty comments come from 8 * Documentation/virtual/lguest/lguest.c, by Rusty Russell 9 * 10 * Generic code for virtio server in host kernel. 11 */ 12 13 #include <linux/eventfd.h> 14 #include <linux/vhost.h> 15 #include <linux/uio.h> 16 #include <linux/mm.h> 17 #include <linux/miscdevice.h> 18 #include <linux/mutex.h> 19 #include <linux/poll.h> 20 #include <linux/file.h> 21 #include <linux/highmem.h> 22 #include <linux/slab.h> 23 #include <linux/vmalloc.h> 24 #include <linux/kthread.h> 25 #include <linux/cgroup.h> 26 #include <linux/module.h> 27 #include <linux/sort.h> 28 #include <linux/sched/mm.h> 29 #include <linux/sched/signal.h> 30 #include <linux/interval_tree_generic.h> 31 #include <linux/nospec.h> 32 #include <linux/kcov.h> 33 34 #include "vhost.h" 35 36 static ushort max_mem_regions = 64; 37 module_param(max_mem_regions, ushort, 0444); 38 MODULE_PARM_DESC(max_mem_regions, 39 "Maximum number of memory regions in memory map. (default: 64)"); 40 static int max_iotlb_entries = 2048; 41 module_param(max_iotlb_entries, int, 0444); 42 MODULE_PARM_DESC(max_iotlb_entries, 43 "Maximum number of iotlb entries. (default: 2048)"); 44 45 enum { 46 VHOST_MEMORY_F_LOG = 0x1, 47 }; 48 49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) 50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) 51 52 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY 53 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) 54 { 55 vq->user_be = !virtio_legacy_is_little_endian(); 56 } 57 58 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq) 59 { 60 vq->user_be = true; 61 } 62 63 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq) 64 { 65 vq->user_be = false; 66 } 67 68 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) 69 { 70 struct vhost_vring_state s; 71 72 if (vq->private_data) 73 return -EBUSY; 74 75 if (copy_from_user(&s, argp, sizeof(s))) 76 return -EFAULT; 77 78 if (s.num != VHOST_VRING_LITTLE_ENDIAN && 79 s.num != VHOST_VRING_BIG_ENDIAN) 80 return -EINVAL; 81 82 if (s.num == VHOST_VRING_BIG_ENDIAN) 83 vhost_enable_cross_endian_big(vq); 84 else 85 vhost_enable_cross_endian_little(vq); 86 87 return 0; 88 } 89 90 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, 91 int __user *argp) 92 { 93 struct vhost_vring_state s = { 94 .index = idx, 95 .num = vq->user_be 96 }; 97 98 if (copy_to_user(argp, &s, sizeof(s))) 99 return -EFAULT; 100 101 return 0; 102 } 103 104 static void vhost_init_is_le(struct vhost_virtqueue *vq) 105 { 106 /* Note for legacy virtio: user_be is initialized at reset time 107 * according to the host endianness. If userspace does not set an 108 * explicit endianness, the default behavior is native endian, as 109 * expected by legacy virtio. 110 */ 111 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be; 112 } 113 #else 114 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) 115 { 116 } 117 118 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) 119 { 120 return -ENOIOCTLCMD; 121 } 122 123 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, 124 int __user *argp) 125 { 126 return -ENOIOCTLCMD; 127 } 128 129 static void vhost_init_is_le(struct vhost_virtqueue *vq) 130 { 131 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) 132 || virtio_legacy_is_little_endian(); 133 } 134 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */ 135 136 static void vhost_reset_is_le(struct vhost_virtqueue *vq) 137 { 138 vhost_init_is_le(vq); 139 } 140 141 struct vhost_flush_struct { 142 struct vhost_work work; 143 struct completion wait_event; 144 }; 145 146 static void vhost_flush_work(struct vhost_work *work) 147 { 148 struct vhost_flush_struct *s; 149 150 s = container_of(work, struct vhost_flush_struct, work); 151 complete(&s->wait_event); 152 } 153 154 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, 155 poll_table *pt) 156 { 157 struct vhost_poll *poll; 158 159 poll = container_of(pt, struct vhost_poll, table); 160 poll->wqh = wqh; 161 add_wait_queue(wqh, &poll->wait); 162 } 163 164 static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, 165 void *key) 166 { 167 struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait); 168 struct vhost_work *work = &poll->work; 169 170 if (!(key_to_poll(key) & poll->mask)) 171 return 0; 172 173 if (!poll->dev->use_worker) 174 work->fn(work); 175 else 176 vhost_poll_queue(poll); 177 178 return 0; 179 } 180 181 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) 182 { 183 clear_bit(VHOST_WORK_QUEUED, &work->flags); 184 work->fn = fn; 185 } 186 EXPORT_SYMBOL_GPL(vhost_work_init); 187 188 /* Init poll structure */ 189 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, 190 __poll_t mask, struct vhost_dev *dev) 191 { 192 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); 193 init_poll_funcptr(&poll->table, vhost_poll_func); 194 poll->mask = mask; 195 poll->dev = dev; 196 poll->wqh = NULL; 197 198 vhost_work_init(&poll->work, fn); 199 } 200 EXPORT_SYMBOL_GPL(vhost_poll_init); 201 202 /* Start polling a file. We add ourselves to file's wait queue. The caller must 203 * keep a reference to a file until after vhost_poll_stop is called. */ 204 int vhost_poll_start(struct vhost_poll *poll, struct file *file) 205 { 206 __poll_t mask; 207 208 if (poll->wqh) 209 return 0; 210 211 mask = vfs_poll(file, &poll->table); 212 if (mask) 213 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask)); 214 if (mask & EPOLLERR) { 215 vhost_poll_stop(poll); 216 return -EINVAL; 217 } 218 219 return 0; 220 } 221 EXPORT_SYMBOL_GPL(vhost_poll_start); 222 223 /* Stop polling a file. After this function returns, it becomes safe to drop the 224 * file reference. You must also flush afterwards. */ 225 void vhost_poll_stop(struct vhost_poll *poll) 226 { 227 if (poll->wqh) { 228 remove_wait_queue(poll->wqh, &poll->wait); 229 poll->wqh = NULL; 230 } 231 } 232 EXPORT_SYMBOL_GPL(vhost_poll_stop); 233 234 void vhost_dev_flush(struct vhost_dev *dev) 235 { 236 struct vhost_flush_struct flush; 237 238 if (dev->worker) { 239 init_completion(&flush.wait_event); 240 vhost_work_init(&flush.work, vhost_flush_work); 241 242 vhost_work_queue(dev, &flush.work); 243 wait_for_completion(&flush.wait_event); 244 } 245 } 246 EXPORT_SYMBOL_GPL(vhost_dev_flush); 247 248 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) 249 { 250 if (!dev->worker) 251 return; 252 253 if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) { 254 /* We can only add the work to the list after we're 255 * sure it was not in the list. 256 * test_and_set_bit() implies a memory barrier. 257 */ 258 llist_add(&work->node, &dev->work_list); 259 wake_up_process(dev->worker); 260 } 261 } 262 EXPORT_SYMBOL_GPL(vhost_work_queue); 263 264 /* A lockless hint for busy polling code to exit the loop */ 265 bool vhost_has_work(struct vhost_dev *dev) 266 { 267 return !llist_empty(&dev->work_list); 268 } 269 EXPORT_SYMBOL_GPL(vhost_has_work); 270 271 void vhost_poll_queue(struct vhost_poll *poll) 272 { 273 vhost_work_queue(poll->dev, &poll->work); 274 } 275 EXPORT_SYMBOL_GPL(vhost_poll_queue); 276 277 static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq) 278 { 279 int j; 280 281 for (j = 0; j < VHOST_NUM_ADDRS; j++) 282 vq->meta_iotlb[j] = NULL; 283 } 284 285 static void vhost_vq_meta_reset(struct vhost_dev *d) 286 { 287 int i; 288 289 for (i = 0; i < d->nvqs; ++i) 290 __vhost_vq_meta_reset(d->vqs[i]); 291 } 292 293 static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx) 294 { 295 call_ctx->ctx = NULL; 296 memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer)); 297 } 298 299 bool vhost_vq_is_setup(struct vhost_virtqueue *vq) 300 { 301 return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq); 302 } 303 EXPORT_SYMBOL_GPL(vhost_vq_is_setup); 304 305 static void vhost_vq_reset(struct vhost_dev *dev, 306 struct vhost_virtqueue *vq) 307 { 308 vq->num = 1; 309 vq->desc = NULL; 310 vq->avail = NULL; 311 vq->used = NULL; 312 vq->last_avail_idx = 0; 313 vq->avail_idx = 0; 314 vq->last_used_idx = 0; 315 vq->signalled_used = 0; 316 vq->signalled_used_valid = false; 317 vq->used_flags = 0; 318 vq->log_used = false; 319 vq->log_addr = -1ull; 320 vq->private_data = NULL; 321 vq->acked_features = 0; 322 vq->acked_backend_features = 0; 323 vq->log_base = NULL; 324 vq->error_ctx = NULL; 325 vq->kick = NULL; 326 vq->log_ctx = NULL; 327 vhost_disable_cross_endian(vq); 328 vhost_reset_is_le(vq); 329 vq->busyloop_timeout = 0; 330 vq->umem = NULL; 331 vq->iotlb = NULL; 332 vhost_vring_call_reset(&vq->call_ctx); 333 __vhost_vq_meta_reset(vq); 334 } 335 336 static int vhost_worker(void *data) 337 { 338 struct vhost_dev *dev = data; 339 struct vhost_work *work, *work_next; 340 struct llist_node *node; 341 342 kthread_use_mm(dev->mm); 343 344 for (;;) { 345 /* mb paired w/ kthread_stop */ 346 set_current_state(TASK_INTERRUPTIBLE); 347 348 if (kthread_should_stop()) { 349 __set_current_state(TASK_RUNNING); 350 break; 351 } 352 353 node = llist_del_all(&dev->work_list); 354 if (!node) 355 schedule(); 356 357 node = llist_reverse_order(node); 358 /* make sure flag is seen after deletion */ 359 smp_wmb(); 360 llist_for_each_entry_safe(work, work_next, node, node) { 361 clear_bit(VHOST_WORK_QUEUED, &work->flags); 362 __set_current_state(TASK_RUNNING); 363 kcov_remote_start_common(dev->kcov_handle); 364 work->fn(work); 365 kcov_remote_stop(); 366 if (need_resched()) 367 schedule(); 368 } 369 } 370 kthread_unuse_mm(dev->mm); 371 return 0; 372 } 373 374 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq) 375 { 376 kfree(vq->indirect); 377 vq->indirect = NULL; 378 kfree(vq->log); 379 vq->log = NULL; 380 kfree(vq->heads); 381 vq->heads = NULL; 382 } 383 384 /* Helper to allocate iovec buffers for all vqs. */ 385 static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) 386 { 387 struct vhost_virtqueue *vq; 388 int i; 389 390 for (i = 0; i < dev->nvqs; ++i) { 391 vq = dev->vqs[i]; 392 vq->indirect = kmalloc_array(UIO_MAXIOV, 393 sizeof(*vq->indirect), 394 GFP_KERNEL); 395 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log), 396 GFP_KERNEL); 397 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads), 398 GFP_KERNEL); 399 if (!vq->indirect || !vq->log || !vq->heads) 400 goto err_nomem; 401 } 402 return 0; 403 404 err_nomem: 405 for (; i >= 0; --i) 406 vhost_vq_free_iovecs(dev->vqs[i]); 407 return -ENOMEM; 408 } 409 410 static void vhost_dev_free_iovecs(struct vhost_dev *dev) 411 { 412 int i; 413 414 for (i = 0; i < dev->nvqs; ++i) 415 vhost_vq_free_iovecs(dev->vqs[i]); 416 } 417 418 bool vhost_exceeds_weight(struct vhost_virtqueue *vq, 419 int pkts, int total_len) 420 { 421 struct vhost_dev *dev = vq->dev; 422 423 if ((dev->byte_weight && total_len >= dev->byte_weight) || 424 pkts >= dev->weight) { 425 vhost_poll_queue(&vq->poll); 426 return true; 427 } 428 429 return false; 430 } 431 EXPORT_SYMBOL_GPL(vhost_exceeds_weight); 432 433 static size_t vhost_get_avail_size(struct vhost_virtqueue *vq, 434 unsigned int num) 435 { 436 size_t event __maybe_unused = 437 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 438 439 return size_add(struct_size(vq->avail, ring, num), event); 440 } 441 442 static size_t vhost_get_used_size(struct vhost_virtqueue *vq, 443 unsigned int num) 444 { 445 size_t event __maybe_unused = 446 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 447 448 return size_add(struct_size(vq->used, ring, num), event); 449 } 450 451 static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, 452 unsigned int num) 453 { 454 return sizeof(*vq->desc) * num; 455 } 456 457 void vhost_dev_init(struct vhost_dev *dev, 458 struct vhost_virtqueue **vqs, int nvqs, 459 int iov_limit, int weight, int byte_weight, 460 bool use_worker, 461 int (*msg_handler)(struct vhost_dev *dev, u32 asid, 462 struct vhost_iotlb_msg *msg)) 463 { 464 struct vhost_virtqueue *vq; 465 int i; 466 467 dev->vqs = vqs; 468 dev->nvqs = nvqs; 469 mutex_init(&dev->mutex); 470 dev->log_ctx = NULL; 471 dev->umem = NULL; 472 dev->iotlb = NULL; 473 dev->mm = NULL; 474 dev->worker = NULL; 475 dev->iov_limit = iov_limit; 476 dev->weight = weight; 477 dev->byte_weight = byte_weight; 478 dev->use_worker = use_worker; 479 dev->msg_handler = msg_handler; 480 init_llist_head(&dev->work_list); 481 init_waitqueue_head(&dev->wait); 482 INIT_LIST_HEAD(&dev->read_list); 483 INIT_LIST_HEAD(&dev->pending_list); 484 spin_lock_init(&dev->iotlb_lock); 485 486 487 for (i = 0; i < dev->nvqs; ++i) { 488 vq = dev->vqs[i]; 489 vq->log = NULL; 490 vq->indirect = NULL; 491 vq->heads = NULL; 492 vq->dev = dev; 493 mutex_init(&vq->mutex); 494 vhost_vq_reset(dev, vq); 495 if (vq->handle_kick) 496 vhost_poll_init(&vq->poll, vq->handle_kick, 497 EPOLLIN, dev); 498 } 499 } 500 EXPORT_SYMBOL_GPL(vhost_dev_init); 501 502 /* Caller should have device mutex */ 503 long vhost_dev_check_owner(struct vhost_dev *dev) 504 { 505 /* Are you the owner? If not, I don't think you mean to do that */ 506 return dev->mm == current->mm ? 0 : -EPERM; 507 } 508 EXPORT_SYMBOL_GPL(vhost_dev_check_owner); 509 510 struct vhost_attach_cgroups_struct { 511 struct vhost_work work; 512 struct task_struct *owner; 513 int ret; 514 }; 515 516 static void vhost_attach_cgroups_work(struct vhost_work *work) 517 { 518 struct vhost_attach_cgroups_struct *s; 519 520 s = container_of(work, struct vhost_attach_cgroups_struct, work); 521 s->ret = cgroup_attach_task_all(s->owner, current); 522 } 523 524 static int vhost_attach_cgroups(struct vhost_dev *dev) 525 { 526 struct vhost_attach_cgroups_struct attach; 527 528 attach.owner = current; 529 vhost_work_init(&attach.work, vhost_attach_cgroups_work); 530 vhost_work_queue(dev, &attach.work); 531 vhost_dev_flush(dev); 532 return attach.ret; 533 } 534 535 /* Caller should have device mutex */ 536 bool vhost_dev_has_owner(struct vhost_dev *dev) 537 { 538 return dev->mm; 539 } 540 EXPORT_SYMBOL_GPL(vhost_dev_has_owner); 541 542 static void vhost_attach_mm(struct vhost_dev *dev) 543 { 544 /* No owner, become one */ 545 if (dev->use_worker) { 546 dev->mm = get_task_mm(current); 547 } else { 548 /* vDPA device does not use worker thead, so there's 549 * no need to hold the address space for mm. This help 550 * to avoid deadlock in the case of mmap() which may 551 * held the refcnt of the file and depends on release 552 * method to remove vma. 553 */ 554 dev->mm = current->mm; 555 mmgrab(dev->mm); 556 } 557 } 558 559 static void vhost_detach_mm(struct vhost_dev *dev) 560 { 561 if (!dev->mm) 562 return; 563 564 if (dev->use_worker) 565 mmput(dev->mm); 566 else 567 mmdrop(dev->mm); 568 569 dev->mm = NULL; 570 } 571 572 /* Caller should have device mutex */ 573 long vhost_dev_set_owner(struct vhost_dev *dev) 574 { 575 struct task_struct *worker; 576 int err; 577 578 /* Is there an owner already? */ 579 if (vhost_dev_has_owner(dev)) { 580 err = -EBUSY; 581 goto err_mm; 582 } 583 584 vhost_attach_mm(dev); 585 586 dev->kcov_handle = kcov_common_handle(); 587 if (dev->use_worker) { 588 worker = kthread_create(vhost_worker, dev, 589 "vhost-%d", current->pid); 590 if (IS_ERR(worker)) { 591 err = PTR_ERR(worker); 592 goto err_worker; 593 } 594 595 dev->worker = worker; 596 wake_up_process(worker); /* avoid contributing to loadavg */ 597 598 err = vhost_attach_cgroups(dev); 599 if (err) 600 goto err_cgroup; 601 } 602 603 err = vhost_dev_alloc_iovecs(dev); 604 if (err) 605 goto err_cgroup; 606 607 return 0; 608 err_cgroup: 609 if (dev->worker) { 610 kthread_stop(dev->worker); 611 dev->worker = NULL; 612 } 613 err_worker: 614 vhost_detach_mm(dev); 615 dev->kcov_handle = 0; 616 err_mm: 617 return err; 618 } 619 EXPORT_SYMBOL_GPL(vhost_dev_set_owner); 620 621 static struct vhost_iotlb *iotlb_alloc(void) 622 { 623 return vhost_iotlb_alloc(max_iotlb_entries, 624 VHOST_IOTLB_FLAG_RETIRE); 625 } 626 627 struct vhost_iotlb *vhost_dev_reset_owner_prepare(void) 628 { 629 return iotlb_alloc(); 630 } 631 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare); 632 633 /* Caller should have device mutex */ 634 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem) 635 { 636 int i; 637 638 vhost_dev_cleanup(dev); 639 640 dev->umem = umem; 641 /* We don't need VQ locks below since vhost_dev_cleanup makes sure 642 * VQs aren't running. 643 */ 644 for (i = 0; i < dev->nvqs; ++i) 645 dev->vqs[i]->umem = umem; 646 } 647 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner); 648 649 void vhost_dev_stop(struct vhost_dev *dev) 650 { 651 int i; 652 653 for (i = 0; i < dev->nvqs; ++i) { 654 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) 655 vhost_poll_stop(&dev->vqs[i]->poll); 656 } 657 658 vhost_dev_flush(dev); 659 } 660 EXPORT_SYMBOL_GPL(vhost_dev_stop); 661 662 void vhost_clear_msg(struct vhost_dev *dev) 663 { 664 struct vhost_msg_node *node, *n; 665 666 spin_lock(&dev->iotlb_lock); 667 668 list_for_each_entry_safe(node, n, &dev->read_list, node) { 669 list_del(&node->node); 670 kfree(node); 671 } 672 673 list_for_each_entry_safe(node, n, &dev->pending_list, node) { 674 list_del(&node->node); 675 kfree(node); 676 } 677 678 spin_unlock(&dev->iotlb_lock); 679 } 680 EXPORT_SYMBOL_GPL(vhost_clear_msg); 681 682 void vhost_dev_cleanup(struct vhost_dev *dev) 683 { 684 int i; 685 686 for (i = 0; i < dev->nvqs; ++i) { 687 if (dev->vqs[i]->error_ctx) 688 eventfd_ctx_put(dev->vqs[i]->error_ctx); 689 if (dev->vqs[i]->kick) 690 fput(dev->vqs[i]->kick); 691 if (dev->vqs[i]->call_ctx.ctx) 692 eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx); 693 vhost_vq_reset(dev, dev->vqs[i]); 694 } 695 vhost_dev_free_iovecs(dev); 696 if (dev->log_ctx) 697 eventfd_ctx_put(dev->log_ctx); 698 dev->log_ctx = NULL; 699 /* No one will access memory at this point */ 700 vhost_iotlb_free(dev->umem); 701 dev->umem = NULL; 702 vhost_iotlb_free(dev->iotlb); 703 dev->iotlb = NULL; 704 vhost_clear_msg(dev); 705 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); 706 WARN_ON(!llist_empty(&dev->work_list)); 707 if (dev->worker) { 708 kthread_stop(dev->worker); 709 dev->worker = NULL; 710 dev->kcov_handle = 0; 711 } 712 vhost_detach_mm(dev); 713 } 714 EXPORT_SYMBOL_GPL(vhost_dev_cleanup); 715 716 static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz) 717 { 718 u64 a = addr / VHOST_PAGE_SIZE / 8; 719 720 /* Make sure 64 bit math will not overflow. */ 721 if (a > ULONG_MAX - (unsigned long)log_base || 722 a + (unsigned long)log_base > ULONG_MAX) 723 return false; 724 725 return access_ok(log_base + a, 726 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); 727 } 728 729 /* Make sure 64 bit math will not overflow. */ 730 static bool vhost_overflow(u64 uaddr, u64 size) 731 { 732 if (uaddr > ULONG_MAX || size > ULONG_MAX) 733 return true; 734 735 if (!size) 736 return false; 737 738 return uaddr > ULONG_MAX - size + 1; 739 } 740 741 /* Caller should have vq mutex and device mutex. */ 742 static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem, 743 int log_all) 744 { 745 struct vhost_iotlb_map *map; 746 747 if (!umem) 748 return false; 749 750 list_for_each_entry(map, &umem->list, link) { 751 unsigned long a = map->addr; 752 753 if (vhost_overflow(map->addr, map->size)) 754 return false; 755 756 757 if (!access_ok((void __user *)a, map->size)) 758 return false; 759 else if (log_all && !log_access_ok(log_base, 760 map->start, 761 map->size)) 762 return false; 763 } 764 return true; 765 } 766 767 static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, 768 u64 addr, unsigned int size, 769 int type) 770 { 771 const struct vhost_iotlb_map *map = vq->meta_iotlb[type]; 772 773 if (!map) 774 return NULL; 775 776 return (void __user *)(uintptr_t)(map->addr + addr - map->start); 777 } 778 779 /* Can we switch to this memory table? */ 780 /* Caller should have device mutex but not vq mutex */ 781 static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem, 782 int log_all) 783 { 784 int i; 785 786 for (i = 0; i < d->nvqs; ++i) { 787 bool ok; 788 bool log; 789 790 mutex_lock(&d->vqs[i]->mutex); 791 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL); 792 /* If ring is inactive, will check when it's enabled. */ 793 if (d->vqs[i]->private_data) 794 ok = vq_memory_access_ok(d->vqs[i]->log_base, 795 umem, log); 796 else 797 ok = true; 798 mutex_unlock(&d->vqs[i]->mutex); 799 if (!ok) 800 return false; 801 } 802 return true; 803 } 804 805 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, 806 struct iovec iov[], int iov_size, int access); 807 808 static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to, 809 const void *from, unsigned size) 810 { 811 int ret; 812 813 if (!vq->iotlb) 814 return __copy_to_user(to, from, size); 815 else { 816 /* This function should be called after iotlb 817 * prefetch, which means we're sure that all vq 818 * could be access through iotlb. So -EAGAIN should 819 * not happen in this case. 820 */ 821 struct iov_iter t; 822 void __user *uaddr = vhost_vq_meta_fetch(vq, 823 (u64)(uintptr_t)to, size, 824 VHOST_ADDR_USED); 825 826 if (uaddr) 827 return __copy_to_user(uaddr, from, size); 828 829 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov, 830 ARRAY_SIZE(vq->iotlb_iov), 831 VHOST_ACCESS_WO); 832 if (ret < 0) 833 goto out; 834 iov_iter_init(&t, ITER_DEST, vq->iotlb_iov, ret, size); 835 ret = copy_to_iter(from, size, &t); 836 if (ret == size) 837 ret = 0; 838 } 839 out: 840 return ret; 841 } 842 843 static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, 844 void __user *from, unsigned size) 845 { 846 int ret; 847 848 if (!vq->iotlb) 849 return __copy_from_user(to, from, size); 850 else { 851 /* This function should be called after iotlb 852 * prefetch, which means we're sure that vq 853 * could be access through iotlb. So -EAGAIN should 854 * not happen in this case. 855 */ 856 void __user *uaddr = vhost_vq_meta_fetch(vq, 857 (u64)(uintptr_t)from, size, 858 VHOST_ADDR_DESC); 859 struct iov_iter f; 860 861 if (uaddr) 862 return __copy_from_user(to, uaddr, size); 863 864 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov, 865 ARRAY_SIZE(vq->iotlb_iov), 866 VHOST_ACCESS_RO); 867 if (ret < 0) { 868 vq_err(vq, "IOTLB translation failure: uaddr " 869 "%p size 0x%llx\n", from, 870 (unsigned long long) size); 871 goto out; 872 } 873 iov_iter_init(&f, ITER_SOURCE, vq->iotlb_iov, ret, size); 874 ret = copy_from_iter(to, size, &f); 875 if (ret == size) 876 ret = 0; 877 } 878 879 out: 880 return ret; 881 } 882 883 static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq, 884 void __user *addr, unsigned int size, 885 int type) 886 { 887 int ret; 888 889 ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov, 890 ARRAY_SIZE(vq->iotlb_iov), 891 VHOST_ACCESS_RO); 892 if (ret < 0) { 893 vq_err(vq, "IOTLB translation failure: uaddr " 894 "%p size 0x%llx\n", addr, 895 (unsigned long long) size); 896 return NULL; 897 } 898 899 if (ret != 1 || vq->iotlb_iov[0].iov_len != size) { 900 vq_err(vq, "Non atomic userspace memory access: uaddr " 901 "%p size 0x%llx\n", addr, 902 (unsigned long long) size); 903 return NULL; 904 } 905 906 return vq->iotlb_iov[0].iov_base; 907 } 908 909 /* This function should be called after iotlb 910 * prefetch, which means we're sure that vq 911 * could be access through iotlb. So -EAGAIN should 912 * not happen in this case. 913 */ 914 static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, 915 void __user *addr, unsigned int size, 916 int type) 917 { 918 void __user *uaddr = vhost_vq_meta_fetch(vq, 919 (u64)(uintptr_t)addr, size, type); 920 if (uaddr) 921 return uaddr; 922 923 return __vhost_get_user_slow(vq, addr, size, type); 924 } 925 926 #define vhost_put_user(vq, x, ptr) \ 927 ({ \ 928 int ret; \ 929 if (!vq->iotlb) { \ 930 ret = __put_user(x, ptr); \ 931 } else { \ 932 __typeof__(ptr) to = \ 933 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \ 934 sizeof(*ptr), VHOST_ADDR_USED); \ 935 if (to != NULL) \ 936 ret = __put_user(x, to); \ 937 else \ 938 ret = -EFAULT; \ 939 } \ 940 ret; \ 941 }) 942 943 static inline int vhost_put_avail_event(struct vhost_virtqueue *vq) 944 { 945 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), 946 vhost_avail_event(vq)); 947 } 948 949 static inline int vhost_put_used(struct vhost_virtqueue *vq, 950 struct vring_used_elem *head, int idx, 951 int count) 952 { 953 return vhost_copy_to_user(vq, vq->used->ring + idx, head, 954 count * sizeof(*head)); 955 } 956 957 static inline int vhost_put_used_flags(struct vhost_virtqueue *vq) 958 959 { 960 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), 961 &vq->used->flags); 962 } 963 964 static inline int vhost_put_used_idx(struct vhost_virtqueue *vq) 965 966 { 967 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), 968 &vq->used->idx); 969 } 970 971 #define vhost_get_user(vq, x, ptr, type) \ 972 ({ \ 973 int ret; \ 974 if (!vq->iotlb) { \ 975 ret = __get_user(x, ptr); \ 976 } else { \ 977 __typeof__(ptr) from = \ 978 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \ 979 sizeof(*ptr), \ 980 type); \ 981 if (from != NULL) \ 982 ret = __get_user(x, from); \ 983 else \ 984 ret = -EFAULT; \ 985 } \ 986 ret; \ 987 }) 988 989 #define vhost_get_avail(vq, x, ptr) \ 990 vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL) 991 992 #define vhost_get_used(vq, x, ptr) \ 993 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) 994 995 static void vhost_dev_lock_vqs(struct vhost_dev *d) 996 { 997 int i = 0; 998 for (i = 0; i < d->nvqs; ++i) 999 mutex_lock_nested(&d->vqs[i]->mutex, i); 1000 } 1001 1002 static void vhost_dev_unlock_vqs(struct vhost_dev *d) 1003 { 1004 int i = 0; 1005 for (i = 0; i < d->nvqs; ++i) 1006 mutex_unlock(&d->vqs[i]->mutex); 1007 } 1008 1009 static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq, 1010 __virtio16 *idx) 1011 { 1012 return vhost_get_avail(vq, *idx, &vq->avail->idx); 1013 } 1014 1015 static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, 1016 __virtio16 *head, int idx) 1017 { 1018 return vhost_get_avail(vq, *head, 1019 &vq->avail->ring[idx & (vq->num - 1)]); 1020 } 1021 1022 static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq, 1023 __virtio16 *flags) 1024 { 1025 return vhost_get_avail(vq, *flags, &vq->avail->flags); 1026 } 1027 1028 static inline int vhost_get_used_event(struct vhost_virtqueue *vq, 1029 __virtio16 *event) 1030 { 1031 return vhost_get_avail(vq, *event, vhost_used_event(vq)); 1032 } 1033 1034 static inline int vhost_get_used_idx(struct vhost_virtqueue *vq, 1035 __virtio16 *idx) 1036 { 1037 return vhost_get_used(vq, *idx, &vq->used->idx); 1038 } 1039 1040 static inline int vhost_get_desc(struct vhost_virtqueue *vq, 1041 struct vring_desc *desc, int idx) 1042 { 1043 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); 1044 } 1045 1046 static void vhost_iotlb_notify_vq(struct vhost_dev *d, 1047 struct vhost_iotlb_msg *msg) 1048 { 1049 struct vhost_msg_node *node, *n; 1050 1051 spin_lock(&d->iotlb_lock); 1052 1053 list_for_each_entry_safe(node, n, &d->pending_list, node) { 1054 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; 1055 if (msg->iova <= vq_msg->iova && 1056 msg->iova + msg->size - 1 >= vq_msg->iova && 1057 vq_msg->type == VHOST_IOTLB_MISS) { 1058 vhost_poll_queue(&node->vq->poll); 1059 list_del(&node->node); 1060 kfree(node); 1061 } 1062 } 1063 1064 spin_unlock(&d->iotlb_lock); 1065 } 1066 1067 static bool umem_access_ok(u64 uaddr, u64 size, int access) 1068 { 1069 unsigned long a = uaddr; 1070 1071 /* Make sure 64 bit math will not overflow. */ 1072 if (vhost_overflow(uaddr, size)) 1073 return false; 1074 1075 if ((access & VHOST_ACCESS_RO) && 1076 !access_ok((void __user *)a, size)) 1077 return false; 1078 if ((access & VHOST_ACCESS_WO) && 1079 !access_ok((void __user *)a, size)) 1080 return false; 1081 return true; 1082 } 1083 1084 static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid, 1085 struct vhost_iotlb_msg *msg) 1086 { 1087 int ret = 0; 1088 1089 if (asid != 0) 1090 return -EINVAL; 1091 1092 mutex_lock(&dev->mutex); 1093 vhost_dev_lock_vqs(dev); 1094 switch (msg->type) { 1095 case VHOST_IOTLB_UPDATE: 1096 if (!dev->iotlb) { 1097 ret = -EFAULT; 1098 break; 1099 } 1100 if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) { 1101 ret = -EFAULT; 1102 break; 1103 } 1104 vhost_vq_meta_reset(dev); 1105 if (vhost_iotlb_add_range(dev->iotlb, msg->iova, 1106 msg->iova + msg->size - 1, 1107 msg->uaddr, msg->perm)) { 1108 ret = -ENOMEM; 1109 break; 1110 } 1111 vhost_iotlb_notify_vq(dev, msg); 1112 break; 1113 case VHOST_IOTLB_INVALIDATE: 1114 if (!dev->iotlb) { 1115 ret = -EFAULT; 1116 break; 1117 } 1118 vhost_vq_meta_reset(dev); 1119 vhost_iotlb_del_range(dev->iotlb, msg->iova, 1120 msg->iova + msg->size - 1); 1121 break; 1122 default: 1123 ret = -EINVAL; 1124 break; 1125 } 1126 1127 vhost_dev_unlock_vqs(dev); 1128 mutex_unlock(&dev->mutex); 1129 1130 return ret; 1131 } 1132 ssize_t vhost_chr_write_iter(struct vhost_dev *dev, 1133 struct iov_iter *from) 1134 { 1135 struct vhost_iotlb_msg msg; 1136 size_t offset; 1137 int type, ret; 1138 u32 asid = 0; 1139 1140 ret = copy_from_iter(&type, sizeof(type), from); 1141 if (ret != sizeof(type)) { 1142 ret = -EINVAL; 1143 goto done; 1144 } 1145 1146 switch (type) { 1147 case VHOST_IOTLB_MSG: 1148 /* There maybe a hole after type for V1 message type, 1149 * so skip it here. 1150 */ 1151 offset = offsetof(struct vhost_msg, iotlb) - sizeof(int); 1152 break; 1153 case VHOST_IOTLB_MSG_V2: 1154 if (vhost_backend_has_feature(dev->vqs[0], 1155 VHOST_BACKEND_F_IOTLB_ASID)) { 1156 ret = copy_from_iter(&asid, sizeof(asid), from); 1157 if (ret != sizeof(asid)) { 1158 ret = -EINVAL; 1159 goto done; 1160 } 1161 offset = 0; 1162 } else 1163 offset = sizeof(__u32); 1164 break; 1165 default: 1166 ret = -EINVAL; 1167 goto done; 1168 } 1169 1170 iov_iter_advance(from, offset); 1171 ret = copy_from_iter(&msg, sizeof(msg), from); 1172 if (ret != sizeof(msg)) { 1173 ret = -EINVAL; 1174 goto done; 1175 } 1176 1177 if ((msg.type == VHOST_IOTLB_UPDATE || 1178 msg.type == VHOST_IOTLB_INVALIDATE) && 1179 msg.size == 0) { 1180 ret = -EINVAL; 1181 goto done; 1182 } 1183 1184 if (dev->msg_handler) 1185 ret = dev->msg_handler(dev, asid, &msg); 1186 else 1187 ret = vhost_process_iotlb_msg(dev, asid, &msg); 1188 if (ret) { 1189 ret = -EFAULT; 1190 goto done; 1191 } 1192 1193 ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) : 1194 sizeof(struct vhost_msg_v2); 1195 done: 1196 return ret; 1197 } 1198 EXPORT_SYMBOL(vhost_chr_write_iter); 1199 1200 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev, 1201 poll_table *wait) 1202 { 1203 __poll_t mask = 0; 1204 1205 poll_wait(file, &dev->wait, wait); 1206 1207 if (!list_empty(&dev->read_list)) 1208 mask |= EPOLLIN | EPOLLRDNORM; 1209 1210 return mask; 1211 } 1212 EXPORT_SYMBOL(vhost_chr_poll); 1213 1214 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to, 1215 int noblock) 1216 { 1217 DEFINE_WAIT(wait); 1218 struct vhost_msg_node *node; 1219 ssize_t ret = 0; 1220 unsigned size = sizeof(struct vhost_msg); 1221 1222 if (iov_iter_count(to) < size) 1223 return 0; 1224 1225 while (1) { 1226 if (!noblock) 1227 prepare_to_wait(&dev->wait, &wait, 1228 TASK_INTERRUPTIBLE); 1229 1230 node = vhost_dequeue_msg(dev, &dev->read_list); 1231 if (node) 1232 break; 1233 if (noblock) { 1234 ret = -EAGAIN; 1235 break; 1236 } 1237 if (signal_pending(current)) { 1238 ret = -ERESTARTSYS; 1239 break; 1240 } 1241 if (!dev->iotlb) { 1242 ret = -EBADFD; 1243 break; 1244 } 1245 1246 schedule(); 1247 } 1248 1249 if (!noblock) 1250 finish_wait(&dev->wait, &wait); 1251 1252 if (node) { 1253 struct vhost_iotlb_msg *msg; 1254 void *start = &node->msg; 1255 1256 switch (node->msg.type) { 1257 case VHOST_IOTLB_MSG: 1258 size = sizeof(node->msg); 1259 msg = &node->msg.iotlb; 1260 break; 1261 case VHOST_IOTLB_MSG_V2: 1262 size = sizeof(node->msg_v2); 1263 msg = &node->msg_v2.iotlb; 1264 break; 1265 default: 1266 BUG(); 1267 break; 1268 } 1269 1270 ret = copy_to_iter(start, size, to); 1271 if (ret != size || msg->type != VHOST_IOTLB_MISS) { 1272 kfree(node); 1273 return ret; 1274 } 1275 vhost_enqueue_msg(dev, &dev->pending_list, node); 1276 } 1277 1278 return ret; 1279 } 1280 EXPORT_SYMBOL_GPL(vhost_chr_read_iter); 1281 1282 static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access) 1283 { 1284 struct vhost_dev *dev = vq->dev; 1285 struct vhost_msg_node *node; 1286 struct vhost_iotlb_msg *msg; 1287 bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2); 1288 1289 node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG); 1290 if (!node) 1291 return -ENOMEM; 1292 1293 if (v2) { 1294 node->msg_v2.type = VHOST_IOTLB_MSG_V2; 1295 msg = &node->msg_v2.iotlb; 1296 } else { 1297 msg = &node->msg.iotlb; 1298 } 1299 1300 msg->type = VHOST_IOTLB_MISS; 1301 msg->iova = iova; 1302 msg->perm = access; 1303 1304 vhost_enqueue_msg(dev, &dev->read_list, node); 1305 1306 return 0; 1307 } 1308 1309 static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, 1310 vring_desc_t __user *desc, 1311 vring_avail_t __user *avail, 1312 vring_used_t __user *used) 1313 1314 { 1315 /* If an IOTLB device is present, the vring addresses are 1316 * GIOVAs. Access validation occurs at prefetch time. */ 1317 if (vq->iotlb) 1318 return true; 1319 1320 return access_ok(desc, vhost_get_desc_size(vq, num)) && 1321 access_ok(avail, vhost_get_avail_size(vq, num)) && 1322 access_ok(used, vhost_get_used_size(vq, num)); 1323 } 1324 1325 static void vhost_vq_meta_update(struct vhost_virtqueue *vq, 1326 const struct vhost_iotlb_map *map, 1327 int type) 1328 { 1329 int access = (type == VHOST_ADDR_USED) ? 1330 VHOST_ACCESS_WO : VHOST_ACCESS_RO; 1331 1332 if (likely(map->perm & access)) 1333 vq->meta_iotlb[type] = map; 1334 } 1335 1336 static bool iotlb_access_ok(struct vhost_virtqueue *vq, 1337 int access, u64 addr, u64 len, int type) 1338 { 1339 const struct vhost_iotlb_map *map; 1340 struct vhost_iotlb *umem = vq->iotlb; 1341 u64 s = 0, size, orig_addr = addr, last = addr + len - 1; 1342 1343 if (vhost_vq_meta_fetch(vq, addr, len, type)) 1344 return true; 1345 1346 while (len > s) { 1347 map = vhost_iotlb_itree_first(umem, addr, last); 1348 if (map == NULL || map->start > addr) { 1349 vhost_iotlb_miss(vq, addr, access); 1350 return false; 1351 } else if (!(map->perm & access)) { 1352 /* Report the possible access violation by 1353 * request another translation from userspace. 1354 */ 1355 return false; 1356 } 1357 1358 size = map->size - addr + map->start; 1359 1360 if (orig_addr == addr && size >= len) 1361 vhost_vq_meta_update(vq, map, type); 1362 1363 s += size; 1364 addr += size; 1365 } 1366 1367 return true; 1368 } 1369 1370 int vq_meta_prefetch(struct vhost_virtqueue *vq) 1371 { 1372 unsigned int num = vq->num; 1373 1374 if (!vq->iotlb) 1375 return 1; 1376 1377 return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc, 1378 vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) && 1379 iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail, 1380 vhost_get_avail_size(vq, num), 1381 VHOST_ADDR_AVAIL) && 1382 iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used, 1383 vhost_get_used_size(vq, num), VHOST_ADDR_USED); 1384 } 1385 EXPORT_SYMBOL_GPL(vq_meta_prefetch); 1386 1387 /* Can we log writes? */ 1388 /* Caller should have device mutex but not vq mutex */ 1389 bool vhost_log_access_ok(struct vhost_dev *dev) 1390 { 1391 return memory_access_ok(dev, dev->umem, 1); 1392 } 1393 EXPORT_SYMBOL_GPL(vhost_log_access_ok); 1394 1395 static bool vq_log_used_access_ok(struct vhost_virtqueue *vq, 1396 void __user *log_base, 1397 bool log_used, 1398 u64 log_addr) 1399 { 1400 /* If an IOTLB device is present, log_addr is a GIOVA that 1401 * will never be logged by log_used(). */ 1402 if (vq->iotlb) 1403 return true; 1404 1405 return !log_used || log_access_ok(log_base, log_addr, 1406 vhost_get_used_size(vq, vq->num)); 1407 } 1408 1409 /* Verify access for write logging. */ 1410 /* Caller should have vq mutex and device mutex */ 1411 static bool vq_log_access_ok(struct vhost_virtqueue *vq, 1412 void __user *log_base) 1413 { 1414 return vq_memory_access_ok(log_base, vq->umem, 1415 vhost_has_feature(vq, VHOST_F_LOG_ALL)) && 1416 vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr); 1417 } 1418 1419 /* Can we start vq? */ 1420 /* Caller should have vq mutex and device mutex */ 1421 bool vhost_vq_access_ok(struct vhost_virtqueue *vq) 1422 { 1423 if (!vq_log_access_ok(vq, vq->log_base)) 1424 return false; 1425 1426 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used); 1427 } 1428 EXPORT_SYMBOL_GPL(vhost_vq_access_ok); 1429 1430 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) 1431 { 1432 struct vhost_memory mem, *newmem; 1433 struct vhost_memory_region *region; 1434 struct vhost_iotlb *newumem, *oldumem; 1435 unsigned long size = offsetof(struct vhost_memory, regions); 1436 int i; 1437 1438 if (copy_from_user(&mem, m, size)) 1439 return -EFAULT; 1440 if (mem.padding) 1441 return -EOPNOTSUPP; 1442 if (mem.nregions > max_mem_regions) 1443 return -E2BIG; 1444 newmem = kvzalloc(struct_size(newmem, regions, mem.nregions), 1445 GFP_KERNEL); 1446 if (!newmem) 1447 return -ENOMEM; 1448 1449 memcpy(newmem, &mem, size); 1450 if (copy_from_user(newmem->regions, m->regions, 1451 flex_array_size(newmem, regions, mem.nregions))) { 1452 kvfree(newmem); 1453 return -EFAULT; 1454 } 1455 1456 newumem = iotlb_alloc(); 1457 if (!newumem) { 1458 kvfree(newmem); 1459 return -ENOMEM; 1460 } 1461 1462 for (region = newmem->regions; 1463 region < newmem->regions + mem.nregions; 1464 region++) { 1465 if (vhost_iotlb_add_range(newumem, 1466 region->guest_phys_addr, 1467 region->guest_phys_addr + 1468 region->memory_size - 1, 1469 region->userspace_addr, 1470 VHOST_MAP_RW)) 1471 goto err; 1472 } 1473 1474 if (!memory_access_ok(d, newumem, 0)) 1475 goto err; 1476 1477 oldumem = d->umem; 1478 d->umem = newumem; 1479 1480 /* All memory accesses are done under some VQ mutex. */ 1481 for (i = 0; i < d->nvqs; ++i) { 1482 mutex_lock(&d->vqs[i]->mutex); 1483 d->vqs[i]->umem = newumem; 1484 mutex_unlock(&d->vqs[i]->mutex); 1485 } 1486 1487 kvfree(newmem); 1488 vhost_iotlb_free(oldumem); 1489 return 0; 1490 1491 err: 1492 vhost_iotlb_free(newumem); 1493 kvfree(newmem); 1494 return -EFAULT; 1495 } 1496 1497 static long vhost_vring_set_num(struct vhost_dev *d, 1498 struct vhost_virtqueue *vq, 1499 void __user *argp) 1500 { 1501 struct vhost_vring_state s; 1502 1503 /* Resizing ring with an active backend? 1504 * You don't want to do that. */ 1505 if (vq->private_data) 1506 return -EBUSY; 1507 1508 if (copy_from_user(&s, argp, sizeof s)) 1509 return -EFAULT; 1510 1511 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) 1512 return -EINVAL; 1513 vq->num = s.num; 1514 1515 return 0; 1516 } 1517 1518 static long vhost_vring_set_addr(struct vhost_dev *d, 1519 struct vhost_virtqueue *vq, 1520 void __user *argp) 1521 { 1522 struct vhost_vring_addr a; 1523 1524 if (copy_from_user(&a, argp, sizeof a)) 1525 return -EFAULT; 1526 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) 1527 return -EOPNOTSUPP; 1528 1529 /* For 32bit, verify that the top 32bits of the user 1530 data are set to zero. */ 1531 if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr || 1532 (u64)(unsigned long)a.used_user_addr != a.used_user_addr || 1533 (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) 1534 return -EFAULT; 1535 1536 /* Make sure it's safe to cast pointers to vring types. */ 1537 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); 1538 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); 1539 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || 1540 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || 1541 (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) 1542 return -EINVAL; 1543 1544 /* We only verify access here if backend is configured. 1545 * If it is not, we don't as size might not have been setup. 1546 * We will verify when backend is configured. */ 1547 if (vq->private_data) { 1548 if (!vq_access_ok(vq, vq->num, 1549 (void __user *)(unsigned long)a.desc_user_addr, 1550 (void __user *)(unsigned long)a.avail_user_addr, 1551 (void __user *)(unsigned long)a.used_user_addr)) 1552 return -EINVAL; 1553 1554 /* Also validate log access for used ring if enabled. */ 1555 if (!vq_log_used_access_ok(vq, vq->log_base, 1556 a.flags & (0x1 << VHOST_VRING_F_LOG), 1557 a.log_guest_addr)) 1558 return -EINVAL; 1559 } 1560 1561 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); 1562 vq->desc = (void __user *)(unsigned long)a.desc_user_addr; 1563 vq->avail = (void __user *)(unsigned long)a.avail_user_addr; 1564 vq->log_addr = a.log_guest_addr; 1565 vq->used = (void __user *)(unsigned long)a.used_user_addr; 1566 1567 return 0; 1568 } 1569 1570 static long vhost_vring_set_num_addr(struct vhost_dev *d, 1571 struct vhost_virtqueue *vq, 1572 unsigned int ioctl, 1573 void __user *argp) 1574 { 1575 long r; 1576 1577 mutex_lock(&vq->mutex); 1578 1579 switch (ioctl) { 1580 case VHOST_SET_VRING_NUM: 1581 r = vhost_vring_set_num(d, vq, argp); 1582 break; 1583 case VHOST_SET_VRING_ADDR: 1584 r = vhost_vring_set_addr(d, vq, argp); 1585 break; 1586 default: 1587 BUG(); 1588 } 1589 1590 mutex_unlock(&vq->mutex); 1591 1592 return r; 1593 } 1594 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) 1595 { 1596 struct file *eventfp, *filep = NULL; 1597 bool pollstart = false, pollstop = false; 1598 struct eventfd_ctx *ctx = NULL; 1599 u32 __user *idxp = argp; 1600 struct vhost_virtqueue *vq; 1601 struct vhost_vring_state s; 1602 struct vhost_vring_file f; 1603 u32 idx; 1604 long r; 1605 1606 r = get_user(idx, idxp); 1607 if (r < 0) 1608 return r; 1609 if (idx >= d->nvqs) 1610 return -ENOBUFS; 1611 1612 idx = array_index_nospec(idx, d->nvqs); 1613 vq = d->vqs[idx]; 1614 1615 if (ioctl == VHOST_SET_VRING_NUM || 1616 ioctl == VHOST_SET_VRING_ADDR) { 1617 return vhost_vring_set_num_addr(d, vq, ioctl, argp); 1618 } 1619 1620 mutex_lock(&vq->mutex); 1621 1622 switch (ioctl) { 1623 case VHOST_SET_VRING_BASE: 1624 /* Moving base with an active backend? 1625 * You don't want to do that. */ 1626 if (vq->private_data) { 1627 r = -EBUSY; 1628 break; 1629 } 1630 if (copy_from_user(&s, argp, sizeof s)) { 1631 r = -EFAULT; 1632 break; 1633 } 1634 if (s.num > 0xffff) { 1635 r = -EINVAL; 1636 break; 1637 } 1638 vq->last_avail_idx = s.num; 1639 /* Forget the cached index value. */ 1640 vq->avail_idx = vq->last_avail_idx; 1641 break; 1642 case VHOST_GET_VRING_BASE: 1643 s.index = idx; 1644 s.num = vq->last_avail_idx; 1645 if (copy_to_user(argp, &s, sizeof s)) 1646 r = -EFAULT; 1647 break; 1648 case VHOST_SET_VRING_KICK: 1649 if (copy_from_user(&f, argp, sizeof f)) { 1650 r = -EFAULT; 1651 break; 1652 } 1653 eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd); 1654 if (IS_ERR(eventfp)) { 1655 r = PTR_ERR(eventfp); 1656 break; 1657 } 1658 if (eventfp != vq->kick) { 1659 pollstop = (filep = vq->kick) != NULL; 1660 pollstart = (vq->kick = eventfp) != NULL; 1661 } else 1662 filep = eventfp; 1663 break; 1664 case VHOST_SET_VRING_CALL: 1665 if (copy_from_user(&f, argp, sizeof f)) { 1666 r = -EFAULT; 1667 break; 1668 } 1669 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd); 1670 if (IS_ERR(ctx)) { 1671 r = PTR_ERR(ctx); 1672 break; 1673 } 1674 1675 swap(ctx, vq->call_ctx.ctx); 1676 break; 1677 case VHOST_SET_VRING_ERR: 1678 if (copy_from_user(&f, argp, sizeof f)) { 1679 r = -EFAULT; 1680 break; 1681 } 1682 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd); 1683 if (IS_ERR(ctx)) { 1684 r = PTR_ERR(ctx); 1685 break; 1686 } 1687 swap(ctx, vq->error_ctx); 1688 break; 1689 case VHOST_SET_VRING_ENDIAN: 1690 r = vhost_set_vring_endian(vq, argp); 1691 break; 1692 case VHOST_GET_VRING_ENDIAN: 1693 r = vhost_get_vring_endian(vq, idx, argp); 1694 break; 1695 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT: 1696 if (copy_from_user(&s, argp, sizeof(s))) { 1697 r = -EFAULT; 1698 break; 1699 } 1700 vq->busyloop_timeout = s.num; 1701 break; 1702 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT: 1703 s.index = idx; 1704 s.num = vq->busyloop_timeout; 1705 if (copy_to_user(argp, &s, sizeof(s))) 1706 r = -EFAULT; 1707 break; 1708 default: 1709 r = -ENOIOCTLCMD; 1710 } 1711 1712 if (pollstop && vq->handle_kick) 1713 vhost_poll_stop(&vq->poll); 1714 1715 if (!IS_ERR_OR_NULL(ctx)) 1716 eventfd_ctx_put(ctx); 1717 if (filep) 1718 fput(filep); 1719 1720 if (pollstart && vq->handle_kick) 1721 r = vhost_poll_start(&vq->poll, vq->kick); 1722 1723 mutex_unlock(&vq->mutex); 1724 1725 if (pollstop && vq->handle_kick) 1726 vhost_dev_flush(vq->poll.dev); 1727 return r; 1728 } 1729 EXPORT_SYMBOL_GPL(vhost_vring_ioctl); 1730 1731 int vhost_init_device_iotlb(struct vhost_dev *d) 1732 { 1733 struct vhost_iotlb *niotlb, *oiotlb; 1734 int i; 1735 1736 niotlb = iotlb_alloc(); 1737 if (!niotlb) 1738 return -ENOMEM; 1739 1740 oiotlb = d->iotlb; 1741 d->iotlb = niotlb; 1742 1743 for (i = 0; i < d->nvqs; ++i) { 1744 struct vhost_virtqueue *vq = d->vqs[i]; 1745 1746 mutex_lock(&vq->mutex); 1747 vq->iotlb = niotlb; 1748 __vhost_vq_meta_reset(vq); 1749 mutex_unlock(&vq->mutex); 1750 } 1751 1752 vhost_iotlb_free(oiotlb); 1753 1754 return 0; 1755 } 1756 EXPORT_SYMBOL_GPL(vhost_init_device_iotlb); 1757 1758 /* Caller must have device mutex */ 1759 long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) 1760 { 1761 struct eventfd_ctx *ctx; 1762 u64 p; 1763 long r; 1764 int i, fd; 1765 1766 /* If you are not the owner, you can become one */ 1767 if (ioctl == VHOST_SET_OWNER) { 1768 r = vhost_dev_set_owner(d); 1769 goto done; 1770 } 1771 1772 /* You must be the owner to do anything else */ 1773 r = vhost_dev_check_owner(d); 1774 if (r) 1775 goto done; 1776 1777 switch (ioctl) { 1778 case VHOST_SET_MEM_TABLE: 1779 r = vhost_set_memory(d, argp); 1780 break; 1781 case VHOST_SET_LOG_BASE: 1782 if (copy_from_user(&p, argp, sizeof p)) { 1783 r = -EFAULT; 1784 break; 1785 } 1786 if ((u64)(unsigned long)p != p) { 1787 r = -EFAULT; 1788 break; 1789 } 1790 for (i = 0; i < d->nvqs; ++i) { 1791 struct vhost_virtqueue *vq; 1792 void __user *base = (void __user *)(unsigned long)p; 1793 vq = d->vqs[i]; 1794 mutex_lock(&vq->mutex); 1795 /* If ring is inactive, will check when it's enabled. */ 1796 if (vq->private_data && !vq_log_access_ok(vq, base)) 1797 r = -EFAULT; 1798 else 1799 vq->log_base = base; 1800 mutex_unlock(&vq->mutex); 1801 } 1802 break; 1803 case VHOST_SET_LOG_FD: 1804 r = get_user(fd, (int __user *)argp); 1805 if (r < 0) 1806 break; 1807 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd); 1808 if (IS_ERR(ctx)) { 1809 r = PTR_ERR(ctx); 1810 break; 1811 } 1812 swap(ctx, d->log_ctx); 1813 for (i = 0; i < d->nvqs; ++i) { 1814 mutex_lock(&d->vqs[i]->mutex); 1815 d->vqs[i]->log_ctx = d->log_ctx; 1816 mutex_unlock(&d->vqs[i]->mutex); 1817 } 1818 if (ctx) 1819 eventfd_ctx_put(ctx); 1820 break; 1821 default: 1822 r = -ENOIOCTLCMD; 1823 break; 1824 } 1825 done: 1826 return r; 1827 } 1828 EXPORT_SYMBOL_GPL(vhost_dev_ioctl); 1829 1830 /* TODO: This is really inefficient. We need something like get_user() 1831 * (instruction directly accesses the data, with an exception table entry 1832 * returning -EFAULT). See Documentation/x86/exception-tables.rst. 1833 */ 1834 static int set_bit_to_user(int nr, void __user *addr) 1835 { 1836 unsigned long log = (unsigned long)addr; 1837 struct page *page; 1838 void *base; 1839 int bit = nr + (log % PAGE_SIZE) * 8; 1840 int r; 1841 1842 r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page); 1843 if (r < 0) 1844 return r; 1845 BUG_ON(r != 1); 1846 base = kmap_atomic(page); 1847 set_bit(bit, base); 1848 kunmap_atomic(base); 1849 unpin_user_pages_dirty_lock(&page, 1, true); 1850 return 0; 1851 } 1852 1853 static int log_write(void __user *log_base, 1854 u64 write_address, u64 write_length) 1855 { 1856 u64 write_page = write_address / VHOST_PAGE_SIZE; 1857 int r; 1858 1859 if (!write_length) 1860 return 0; 1861 write_length += write_address % VHOST_PAGE_SIZE; 1862 for (;;) { 1863 u64 base = (u64)(unsigned long)log_base; 1864 u64 log = base + write_page / 8; 1865 int bit = write_page % 8; 1866 if ((u64)(unsigned long)log != log) 1867 return -EFAULT; 1868 r = set_bit_to_user(bit, (void __user *)(unsigned long)log); 1869 if (r < 0) 1870 return r; 1871 if (write_length <= VHOST_PAGE_SIZE) 1872 break; 1873 write_length -= VHOST_PAGE_SIZE; 1874 write_page += 1; 1875 } 1876 return r; 1877 } 1878 1879 static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) 1880 { 1881 struct vhost_iotlb *umem = vq->umem; 1882 struct vhost_iotlb_map *u; 1883 u64 start, end, l, min; 1884 int r; 1885 bool hit = false; 1886 1887 while (len) { 1888 min = len; 1889 /* More than one GPAs can be mapped into a single HVA. So 1890 * iterate all possible umems here to be safe. 1891 */ 1892 list_for_each_entry(u, &umem->list, link) { 1893 if (u->addr > hva - 1 + len || 1894 u->addr - 1 + u->size < hva) 1895 continue; 1896 start = max(u->addr, hva); 1897 end = min(u->addr - 1 + u->size, hva - 1 + len); 1898 l = end - start + 1; 1899 r = log_write(vq->log_base, 1900 u->start + start - u->addr, 1901 l); 1902 if (r < 0) 1903 return r; 1904 hit = true; 1905 min = min(l, min); 1906 } 1907 1908 if (!hit) 1909 return -EFAULT; 1910 1911 len -= min; 1912 hva += min; 1913 } 1914 1915 return 0; 1916 } 1917 1918 static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) 1919 { 1920 struct iovec *iov = vq->log_iov; 1921 int i, ret; 1922 1923 if (!vq->iotlb) 1924 return log_write(vq->log_base, vq->log_addr + used_offset, len); 1925 1926 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, 1927 len, iov, 64, VHOST_ACCESS_WO); 1928 if (ret < 0) 1929 return ret; 1930 1931 for (i = 0; i < ret; i++) { 1932 ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base, 1933 iov[i].iov_len); 1934 if (ret) 1935 return ret; 1936 } 1937 1938 return 0; 1939 } 1940 1941 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 1942 unsigned int log_num, u64 len, struct iovec *iov, int count) 1943 { 1944 int i, r; 1945 1946 /* Make sure data written is seen before log. */ 1947 smp_wmb(); 1948 1949 if (vq->iotlb) { 1950 for (i = 0; i < count; i++) { 1951 r = log_write_hva(vq, (uintptr_t)iov[i].iov_base, 1952 iov[i].iov_len); 1953 if (r < 0) 1954 return r; 1955 } 1956 return 0; 1957 } 1958 1959 for (i = 0; i < log_num; ++i) { 1960 u64 l = min(log[i].len, len); 1961 r = log_write(vq->log_base, log[i].addr, l); 1962 if (r < 0) 1963 return r; 1964 len -= l; 1965 if (!len) { 1966 if (vq->log_ctx) 1967 eventfd_signal(vq->log_ctx, 1); 1968 return 0; 1969 } 1970 } 1971 /* Length written exceeds what we have stored. This is a bug. */ 1972 BUG(); 1973 return 0; 1974 } 1975 EXPORT_SYMBOL_GPL(vhost_log_write); 1976 1977 static int vhost_update_used_flags(struct vhost_virtqueue *vq) 1978 { 1979 void __user *used; 1980 if (vhost_put_used_flags(vq)) 1981 return -EFAULT; 1982 if (unlikely(vq->log_used)) { 1983 /* Make sure the flag is seen before log. */ 1984 smp_wmb(); 1985 /* Log used flag write. */ 1986 used = &vq->used->flags; 1987 log_used(vq, (used - (void __user *)vq->used), 1988 sizeof vq->used->flags); 1989 if (vq->log_ctx) 1990 eventfd_signal(vq->log_ctx, 1); 1991 } 1992 return 0; 1993 } 1994 1995 static int vhost_update_avail_event(struct vhost_virtqueue *vq) 1996 { 1997 if (vhost_put_avail_event(vq)) 1998 return -EFAULT; 1999 if (unlikely(vq->log_used)) { 2000 void __user *used; 2001 /* Make sure the event is seen before log. */ 2002 smp_wmb(); 2003 /* Log avail event write */ 2004 used = vhost_avail_event(vq); 2005 log_used(vq, (used - (void __user *)vq->used), 2006 sizeof *vhost_avail_event(vq)); 2007 if (vq->log_ctx) 2008 eventfd_signal(vq->log_ctx, 1); 2009 } 2010 return 0; 2011 } 2012 2013 int vhost_vq_init_access(struct vhost_virtqueue *vq) 2014 { 2015 __virtio16 last_used_idx; 2016 int r; 2017 bool is_le = vq->is_le; 2018 2019 if (!vq->private_data) 2020 return 0; 2021 2022 vhost_init_is_le(vq); 2023 2024 r = vhost_update_used_flags(vq); 2025 if (r) 2026 goto err; 2027 vq->signalled_used_valid = false; 2028 if (!vq->iotlb && 2029 !access_ok(&vq->used->idx, sizeof vq->used->idx)) { 2030 r = -EFAULT; 2031 goto err; 2032 } 2033 r = vhost_get_used_idx(vq, &last_used_idx); 2034 if (r) { 2035 vq_err(vq, "Can't access used idx at %p\n", 2036 &vq->used->idx); 2037 goto err; 2038 } 2039 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); 2040 return 0; 2041 2042 err: 2043 vq->is_le = is_le; 2044 return r; 2045 } 2046 EXPORT_SYMBOL_GPL(vhost_vq_init_access); 2047 2048 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, 2049 struct iovec iov[], int iov_size, int access) 2050 { 2051 const struct vhost_iotlb_map *map; 2052 struct vhost_dev *dev = vq->dev; 2053 struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem; 2054 struct iovec *_iov; 2055 u64 s = 0, last = addr + len - 1; 2056 int ret = 0; 2057 2058 while ((u64)len > s) { 2059 u64 size; 2060 if (unlikely(ret >= iov_size)) { 2061 ret = -ENOBUFS; 2062 break; 2063 } 2064 2065 map = vhost_iotlb_itree_first(umem, addr, last); 2066 if (map == NULL || map->start > addr) { 2067 if (umem != dev->iotlb) { 2068 ret = -EFAULT; 2069 break; 2070 } 2071 ret = -EAGAIN; 2072 break; 2073 } else if (!(map->perm & access)) { 2074 ret = -EPERM; 2075 break; 2076 } 2077 2078 _iov = iov + ret; 2079 size = map->size - addr + map->start; 2080 _iov->iov_len = min((u64)len - s, size); 2081 _iov->iov_base = (void __user *)(unsigned long) 2082 (map->addr + addr - map->start); 2083 s += size; 2084 addr += size; 2085 ++ret; 2086 } 2087 2088 if (ret == -EAGAIN) 2089 vhost_iotlb_miss(vq, addr, access); 2090 return ret; 2091 } 2092 2093 /* Each buffer in the virtqueues is actually a chain of descriptors. This 2094 * function returns the next descriptor in the chain, 2095 * or -1U if we're at the end. */ 2096 static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc) 2097 { 2098 unsigned int next; 2099 2100 /* If this descriptor says it doesn't chain, we're done. */ 2101 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT))) 2102 return -1U; 2103 2104 /* Check they're not leading us off end of descriptors. */ 2105 next = vhost16_to_cpu(vq, READ_ONCE(desc->next)); 2106 return next; 2107 } 2108 2109 static int get_indirect(struct vhost_virtqueue *vq, 2110 struct iovec iov[], unsigned int iov_size, 2111 unsigned int *out_num, unsigned int *in_num, 2112 struct vhost_log *log, unsigned int *log_num, 2113 struct vring_desc *indirect) 2114 { 2115 struct vring_desc desc; 2116 unsigned int i = 0, count, found = 0; 2117 u32 len = vhost32_to_cpu(vq, indirect->len); 2118 struct iov_iter from; 2119 int ret, access; 2120 2121 /* Sanity check */ 2122 if (unlikely(len % sizeof desc)) { 2123 vq_err(vq, "Invalid length in indirect descriptor: " 2124 "len 0x%llx not multiple of 0x%zx\n", 2125 (unsigned long long)len, 2126 sizeof desc); 2127 return -EINVAL; 2128 } 2129 2130 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect, 2131 UIO_MAXIOV, VHOST_ACCESS_RO); 2132 if (unlikely(ret < 0)) { 2133 if (ret != -EAGAIN) 2134 vq_err(vq, "Translation failure %d in indirect.\n", ret); 2135 return ret; 2136 } 2137 iov_iter_init(&from, ITER_SOURCE, vq->indirect, ret, len); 2138 count = len / sizeof desc; 2139 /* Buffers are chained via a 16 bit next field, so 2140 * we can have at most 2^16 of these. */ 2141 if (unlikely(count > USHRT_MAX + 1)) { 2142 vq_err(vq, "Indirect buffer length too big: %d\n", 2143 indirect->len); 2144 return -E2BIG; 2145 } 2146 2147 do { 2148 unsigned iov_count = *in_num + *out_num; 2149 if (unlikely(++found > count)) { 2150 vq_err(vq, "Loop detected: last one at %u " 2151 "indirect size %u\n", 2152 i, count); 2153 return -EINVAL; 2154 } 2155 if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) { 2156 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", 2157 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); 2158 return -EINVAL; 2159 } 2160 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) { 2161 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", 2162 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); 2163 return -EINVAL; 2164 } 2165 2166 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) 2167 access = VHOST_ACCESS_WO; 2168 else 2169 access = VHOST_ACCESS_RO; 2170 2171 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), 2172 vhost32_to_cpu(vq, desc.len), iov + iov_count, 2173 iov_size - iov_count, access); 2174 if (unlikely(ret < 0)) { 2175 if (ret != -EAGAIN) 2176 vq_err(vq, "Translation failure %d indirect idx %d\n", 2177 ret, i); 2178 return ret; 2179 } 2180 /* If this is an input descriptor, increment that count. */ 2181 if (access == VHOST_ACCESS_WO) { 2182 *in_num += ret; 2183 if (unlikely(log && ret)) { 2184 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); 2185 log[*log_num].len = vhost32_to_cpu(vq, desc.len); 2186 ++*log_num; 2187 } 2188 } else { 2189 /* If it's an output descriptor, they're all supposed 2190 * to come before any input descriptors. */ 2191 if (unlikely(*in_num)) { 2192 vq_err(vq, "Indirect descriptor " 2193 "has out after in: idx %d\n", i); 2194 return -EINVAL; 2195 } 2196 *out_num += ret; 2197 } 2198 } while ((i = next_desc(vq, &desc)) != -1); 2199 return 0; 2200 } 2201 2202 /* This looks in the virtqueue and for the first available buffer, and converts 2203 * it to an iovec for convenient access. Since descriptors consist of some 2204 * number of output then some number of input descriptors, it's actually two 2205 * iovecs, but we pack them into one and note how many of each there were. 2206 * 2207 * This function returns the descriptor number found, or vq->num (which is 2208 * never a valid descriptor number) if none was found. A negative code is 2209 * returned on error. */ 2210 int vhost_get_vq_desc(struct vhost_virtqueue *vq, 2211 struct iovec iov[], unsigned int iov_size, 2212 unsigned int *out_num, unsigned int *in_num, 2213 struct vhost_log *log, unsigned int *log_num) 2214 { 2215 struct vring_desc desc; 2216 unsigned int i, head, found = 0; 2217 u16 last_avail_idx; 2218 __virtio16 avail_idx; 2219 __virtio16 ring_head; 2220 int ret, access; 2221 2222 /* Check it isn't doing very strange things with descriptor numbers. */ 2223 last_avail_idx = vq->last_avail_idx; 2224 2225 if (vq->avail_idx == vq->last_avail_idx) { 2226 if (unlikely(vhost_get_avail_idx(vq, &avail_idx))) { 2227 vq_err(vq, "Failed to access avail idx at %p\n", 2228 &vq->avail->idx); 2229 return -EFAULT; 2230 } 2231 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); 2232 2233 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { 2234 vq_err(vq, "Guest moved used index from %u to %u", 2235 last_avail_idx, vq->avail_idx); 2236 return -EFAULT; 2237 } 2238 2239 /* If there's nothing new since last we looked, return 2240 * invalid. 2241 */ 2242 if (vq->avail_idx == last_avail_idx) 2243 return vq->num; 2244 2245 /* Only get avail ring entries after they have been 2246 * exposed by guest. 2247 */ 2248 smp_rmb(); 2249 } 2250 2251 /* Grab the next descriptor number they're advertising, and increment 2252 * the index we've seen. */ 2253 if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) { 2254 vq_err(vq, "Failed to read head: idx %d address %p\n", 2255 last_avail_idx, 2256 &vq->avail->ring[last_avail_idx % vq->num]); 2257 return -EFAULT; 2258 } 2259 2260 head = vhost16_to_cpu(vq, ring_head); 2261 2262 /* If their number is silly, that's an error. */ 2263 if (unlikely(head >= vq->num)) { 2264 vq_err(vq, "Guest says index %u > %u is available", 2265 head, vq->num); 2266 return -EINVAL; 2267 } 2268 2269 /* When we start there are none of either input nor output. */ 2270 *out_num = *in_num = 0; 2271 if (unlikely(log)) 2272 *log_num = 0; 2273 2274 i = head; 2275 do { 2276 unsigned iov_count = *in_num + *out_num; 2277 if (unlikely(i >= vq->num)) { 2278 vq_err(vq, "Desc index is %u > %u, head = %u", 2279 i, vq->num, head); 2280 return -EINVAL; 2281 } 2282 if (unlikely(++found > vq->num)) { 2283 vq_err(vq, "Loop detected: last one at %u " 2284 "vq size %u head %u\n", 2285 i, vq->num, head); 2286 return -EINVAL; 2287 } 2288 ret = vhost_get_desc(vq, &desc, i); 2289 if (unlikely(ret)) { 2290 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", 2291 i, vq->desc + i); 2292 return -EFAULT; 2293 } 2294 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) { 2295 ret = get_indirect(vq, iov, iov_size, 2296 out_num, in_num, 2297 log, log_num, &desc); 2298 if (unlikely(ret < 0)) { 2299 if (ret != -EAGAIN) 2300 vq_err(vq, "Failure detected " 2301 "in indirect descriptor at idx %d\n", i); 2302 return ret; 2303 } 2304 continue; 2305 } 2306 2307 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) 2308 access = VHOST_ACCESS_WO; 2309 else 2310 access = VHOST_ACCESS_RO; 2311 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), 2312 vhost32_to_cpu(vq, desc.len), iov + iov_count, 2313 iov_size - iov_count, access); 2314 if (unlikely(ret < 0)) { 2315 if (ret != -EAGAIN) 2316 vq_err(vq, "Translation failure %d descriptor idx %d\n", 2317 ret, i); 2318 return ret; 2319 } 2320 if (access == VHOST_ACCESS_WO) { 2321 /* If this is an input descriptor, 2322 * increment that count. */ 2323 *in_num += ret; 2324 if (unlikely(log && ret)) { 2325 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); 2326 log[*log_num].len = vhost32_to_cpu(vq, desc.len); 2327 ++*log_num; 2328 } 2329 } else { 2330 /* If it's an output descriptor, they're all supposed 2331 * to come before any input descriptors. */ 2332 if (unlikely(*in_num)) { 2333 vq_err(vq, "Descriptor has out after in: " 2334 "idx %d\n", i); 2335 return -EINVAL; 2336 } 2337 *out_num += ret; 2338 } 2339 } while ((i = next_desc(vq, &desc)) != -1); 2340 2341 /* On success, increment avail index. */ 2342 vq->last_avail_idx++; 2343 2344 /* Assume notifications from guest are disabled at this point, 2345 * if they aren't we would need to update avail_event index. */ 2346 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); 2347 return head; 2348 } 2349 EXPORT_SYMBOL_GPL(vhost_get_vq_desc); 2350 2351 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */ 2352 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) 2353 { 2354 vq->last_avail_idx -= n; 2355 } 2356 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc); 2357 2358 /* After we've used one of their buffers, we tell them about it. We'll then 2359 * want to notify the guest, using eventfd. */ 2360 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) 2361 { 2362 struct vring_used_elem heads = { 2363 cpu_to_vhost32(vq, head), 2364 cpu_to_vhost32(vq, len) 2365 }; 2366 2367 return vhost_add_used_n(vq, &heads, 1); 2368 } 2369 EXPORT_SYMBOL_GPL(vhost_add_used); 2370 2371 static int __vhost_add_used_n(struct vhost_virtqueue *vq, 2372 struct vring_used_elem *heads, 2373 unsigned count) 2374 { 2375 vring_used_elem_t __user *used; 2376 u16 old, new; 2377 int start; 2378 2379 start = vq->last_used_idx & (vq->num - 1); 2380 used = vq->used->ring + start; 2381 if (vhost_put_used(vq, heads, start, count)) { 2382 vq_err(vq, "Failed to write used"); 2383 return -EFAULT; 2384 } 2385 if (unlikely(vq->log_used)) { 2386 /* Make sure data is seen before log. */ 2387 smp_wmb(); 2388 /* Log used ring entry write. */ 2389 log_used(vq, ((void __user *)used - (void __user *)vq->used), 2390 count * sizeof *used); 2391 } 2392 old = vq->last_used_idx; 2393 new = (vq->last_used_idx += count); 2394 /* If the driver never bothers to signal in a very long while, 2395 * used index might wrap around. If that happens, invalidate 2396 * signalled_used index we stored. TODO: make sure driver 2397 * signals at least once in 2^16 and remove this. */ 2398 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) 2399 vq->signalled_used_valid = false; 2400 return 0; 2401 } 2402 2403 /* After we've used one of their buffers, we tell them about it. We'll then 2404 * want to notify the guest, using eventfd. */ 2405 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, 2406 unsigned count) 2407 { 2408 int start, n, r; 2409 2410 start = vq->last_used_idx & (vq->num - 1); 2411 n = vq->num - start; 2412 if (n < count) { 2413 r = __vhost_add_used_n(vq, heads, n); 2414 if (r < 0) 2415 return r; 2416 heads += n; 2417 count -= n; 2418 } 2419 r = __vhost_add_used_n(vq, heads, count); 2420 2421 /* Make sure buffer is written before we update index. */ 2422 smp_wmb(); 2423 if (vhost_put_used_idx(vq)) { 2424 vq_err(vq, "Failed to increment used idx"); 2425 return -EFAULT; 2426 } 2427 if (unlikely(vq->log_used)) { 2428 /* Make sure used idx is seen before log. */ 2429 smp_wmb(); 2430 /* Log used index update. */ 2431 log_used(vq, offsetof(struct vring_used, idx), 2432 sizeof vq->used->idx); 2433 if (vq->log_ctx) 2434 eventfd_signal(vq->log_ctx, 1); 2435 } 2436 return r; 2437 } 2438 EXPORT_SYMBOL_GPL(vhost_add_used_n); 2439 2440 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 2441 { 2442 __u16 old, new; 2443 __virtio16 event; 2444 bool v; 2445 /* Flush out used index updates. This is paired 2446 * with the barrier that the Guest executes when enabling 2447 * interrupts. */ 2448 smp_mb(); 2449 2450 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && 2451 unlikely(vq->avail_idx == vq->last_avail_idx)) 2452 return true; 2453 2454 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 2455 __virtio16 flags; 2456 if (vhost_get_avail_flags(vq, &flags)) { 2457 vq_err(vq, "Failed to get flags"); 2458 return true; 2459 } 2460 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT)); 2461 } 2462 old = vq->signalled_used; 2463 v = vq->signalled_used_valid; 2464 new = vq->signalled_used = vq->last_used_idx; 2465 vq->signalled_used_valid = true; 2466 2467 if (unlikely(!v)) 2468 return true; 2469 2470 if (vhost_get_used_event(vq, &event)) { 2471 vq_err(vq, "Failed to get used event idx"); 2472 return true; 2473 } 2474 return vring_need_event(vhost16_to_cpu(vq, event), new, old); 2475 } 2476 2477 /* This actually signals the guest, using eventfd. */ 2478 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) 2479 { 2480 /* Signal the Guest tell them we used something up. */ 2481 if (vq->call_ctx.ctx && vhost_notify(dev, vq)) 2482 eventfd_signal(vq->call_ctx.ctx, 1); 2483 } 2484 EXPORT_SYMBOL_GPL(vhost_signal); 2485 2486 /* And here's the combo meal deal. Supersize me! */ 2487 void vhost_add_used_and_signal(struct vhost_dev *dev, 2488 struct vhost_virtqueue *vq, 2489 unsigned int head, int len) 2490 { 2491 vhost_add_used(vq, head, len); 2492 vhost_signal(dev, vq); 2493 } 2494 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal); 2495 2496 /* multi-buffer version of vhost_add_used_and_signal */ 2497 void vhost_add_used_and_signal_n(struct vhost_dev *dev, 2498 struct vhost_virtqueue *vq, 2499 struct vring_used_elem *heads, unsigned count) 2500 { 2501 vhost_add_used_n(vq, heads, count); 2502 vhost_signal(dev, vq); 2503 } 2504 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n); 2505 2506 /* return true if we're sure that avaiable ring is empty */ 2507 bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) 2508 { 2509 __virtio16 avail_idx; 2510 int r; 2511 2512 if (vq->avail_idx != vq->last_avail_idx) 2513 return false; 2514 2515 r = vhost_get_avail_idx(vq, &avail_idx); 2516 if (unlikely(r)) 2517 return false; 2518 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); 2519 2520 return vq->avail_idx == vq->last_avail_idx; 2521 } 2522 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty); 2523 2524 /* OK, now we need to know about added descriptors. */ 2525 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 2526 { 2527 __virtio16 avail_idx; 2528 int r; 2529 2530 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) 2531 return false; 2532 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; 2533 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 2534 r = vhost_update_used_flags(vq); 2535 if (r) { 2536 vq_err(vq, "Failed to enable notification at %p: %d\n", 2537 &vq->used->flags, r); 2538 return false; 2539 } 2540 } else { 2541 r = vhost_update_avail_event(vq); 2542 if (r) { 2543 vq_err(vq, "Failed to update avail event index at %p: %d\n", 2544 vhost_avail_event(vq), r); 2545 return false; 2546 } 2547 } 2548 /* They could have slipped one in as we were doing that: make 2549 * sure it's written, then check again. */ 2550 smp_mb(); 2551 r = vhost_get_avail_idx(vq, &avail_idx); 2552 if (r) { 2553 vq_err(vq, "Failed to check avail idx at %p: %d\n", 2554 &vq->avail->idx, r); 2555 return false; 2556 } 2557 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); 2558 2559 return vq->avail_idx != vq->last_avail_idx; 2560 } 2561 EXPORT_SYMBOL_GPL(vhost_enable_notify); 2562 2563 /* We don't need to be notified again. */ 2564 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 2565 { 2566 int r; 2567 2568 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) 2569 return; 2570 vq->used_flags |= VRING_USED_F_NO_NOTIFY; 2571 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 2572 r = vhost_update_used_flags(vq); 2573 if (r) 2574 vq_err(vq, "Failed to disable notification at %p: %d\n", 2575 &vq->used->flags, r); 2576 } 2577 } 2578 EXPORT_SYMBOL_GPL(vhost_disable_notify); 2579 2580 /* Create a new message. */ 2581 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type) 2582 { 2583 struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL); 2584 if (!node) 2585 return NULL; 2586 2587 /* Make sure all padding within the structure is initialized. */ 2588 memset(&node->msg, 0, sizeof node->msg); 2589 node->vq = vq; 2590 node->msg.type = type; 2591 return node; 2592 } 2593 EXPORT_SYMBOL_GPL(vhost_new_msg); 2594 2595 void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head, 2596 struct vhost_msg_node *node) 2597 { 2598 spin_lock(&dev->iotlb_lock); 2599 list_add_tail(&node->node, head); 2600 spin_unlock(&dev->iotlb_lock); 2601 2602 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); 2603 } 2604 EXPORT_SYMBOL_GPL(vhost_enqueue_msg); 2605 2606 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev, 2607 struct list_head *head) 2608 { 2609 struct vhost_msg_node *node = NULL; 2610 2611 spin_lock(&dev->iotlb_lock); 2612 if (!list_empty(head)) { 2613 node = list_first_entry(head, struct vhost_msg_node, 2614 node); 2615 list_del(&node->node); 2616 } 2617 spin_unlock(&dev->iotlb_lock); 2618 2619 return node; 2620 } 2621 EXPORT_SYMBOL_GPL(vhost_dequeue_msg); 2622 2623 void vhost_set_backend_features(struct vhost_dev *dev, u64 features) 2624 { 2625 struct vhost_virtqueue *vq; 2626 int i; 2627 2628 mutex_lock(&dev->mutex); 2629 for (i = 0; i < dev->nvqs; ++i) { 2630 vq = dev->vqs[i]; 2631 mutex_lock(&vq->mutex); 2632 vq->acked_backend_features = features; 2633 mutex_unlock(&vq->mutex); 2634 } 2635 mutex_unlock(&dev->mutex); 2636 } 2637 EXPORT_SYMBOL_GPL(vhost_set_backend_features); 2638 2639 static int __init vhost_init(void) 2640 { 2641 return 0; 2642 } 2643 2644 static void __exit vhost_exit(void) 2645 { 2646 } 2647 2648 module_init(vhost_init); 2649 module_exit(vhost_exit); 2650 2651 MODULE_VERSION("0.0.1"); 2652 MODULE_LICENSE("GPL v2"); 2653 MODULE_AUTHOR("Michael S. Tsirkin"); 2654 MODULE_DESCRIPTION("Host kernel accelerator for virtio"); 2655