1 /* Copyright (C) 2009 Red Hat, Inc. 2 * Copyright (C) 2006 Rusty Russell IBM Corporation 3 * 4 * Author: Michael S. Tsirkin <mst@redhat.com> 5 * 6 * Inspiration, some code, and most witty comments come from 7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. 10 * 11 * Generic code for virtio server in host kernel. 12 */ 13 14 #include <linux/eventfd.h> 15 #include <linux/vhost.h> 16 #include <linux/uio.h> 17 #include <linux/mm.h> 18 #include <linux/mmu_context.h> 19 #include <linux/miscdevice.h> 20 #include <linux/mutex.h> 21 #include <linux/poll.h> 22 #include <linux/file.h> 23 #include <linux/highmem.h> 24 #include <linux/slab.h> 25 #include <linux/vmalloc.h> 26 #include <linux/kthread.h> 27 #include <linux/cgroup.h> 28 #include <linux/module.h> 29 #include <linux/sort.h> 30 #include <linux/sched/mm.h> 31 #include <linux/sched/signal.h> 32 #include <linux/interval_tree_generic.h> 33 34 #include "vhost.h" 35 36 static ushort max_mem_regions = 64; 37 module_param(max_mem_regions, ushort, 0444); 38 MODULE_PARM_DESC(max_mem_regions, 39 "Maximum number of memory regions in memory map. (default: 64)"); 40 static int max_iotlb_entries = 2048; 41 module_param(max_iotlb_entries, int, 0444); 42 MODULE_PARM_DESC(max_iotlb_entries, 43 "Maximum number of iotlb entries. (default: 2048)"); 44 45 enum { 46 VHOST_MEMORY_F_LOG = 0x1, 47 }; 48 49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) 50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) 51 52 INTERVAL_TREE_DEFINE(struct vhost_umem_node, 53 rb, __u64, __subtree_last, 54 START, LAST, static inline, vhost_umem_interval_tree); 55 56 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY 57 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) 58 { 59 vq->user_be = !virtio_legacy_is_little_endian(); 60 } 61 62 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq) 63 { 64 vq->user_be = true; 65 } 66 67 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq) 68 { 69 vq->user_be = false; 70 } 71 72 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) 73 { 74 struct vhost_vring_state s; 75 76 if (vq->private_data) 77 return -EBUSY; 78 79 if (copy_from_user(&s, argp, sizeof(s))) 80 return -EFAULT; 81 82 if (s.num != VHOST_VRING_LITTLE_ENDIAN && 83 s.num != VHOST_VRING_BIG_ENDIAN) 84 return -EINVAL; 85 86 if (s.num == VHOST_VRING_BIG_ENDIAN) 87 vhost_enable_cross_endian_big(vq); 88 else 89 vhost_enable_cross_endian_little(vq); 90 91 return 0; 92 } 93 94 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, 95 int __user *argp) 96 { 97 struct vhost_vring_state s = { 98 .index = idx, 99 .num = vq->user_be 100 }; 101 102 if (copy_to_user(argp, &s, sizeof(s))) 103 return -EFAULT; 104 105 return 0; 106 } 107 108 static void vhost_init_is_le(struct vhost_virtqueue *vq) 109 { 110 /* Note for legacy virtio: user_be is initialized at reset time 111 * according to the host endianness. If userspace does not set an 112 * explicit endianness, the default behavior is native endian, as 113 * expected by legacy virtio. 114 */ 115 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be; 116 } 117 #else 118 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) 119 { 120 } 121 122 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) 123 { 124 return -ENOIOCTLCMD; 125 } 126 127 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, 128 int __user *argp) 129 { 130 return -ENOIOCTLCMD; 131 } 132 133 static void vhost_init_is_le(struct vhost_virtqueue *vq) 134 { 135 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) 136 || virtio_legacy_is_little_endian(); 137 } 138 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */ 139 140 static void vhost_reset_is_le(struct vhost_virtqueue *vq) 141 { 142 vhost_init_is_le(vq); 143 } 144 145 struct vhost_flush_struct { 146 struct vhost_work work; 147 struct completion wait_event; 148 }; 149 150 static void vhost_flush_work(struct vhost_work *work) 151 { 152 struct vhost_flush_struct *s; 153 154 s = container_of(work, struct vhost_flush_struct, work); 155 complete(&s->wait_event); 156 } 157 158 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, 159 poll_table *pt) 160 { 161 struct vhost_poll *poll; 162 163 poll = container_of(pt, struct vhost_poll, table); 164 poll->wqh = wqh; 165 add_wait_queue(wqh, &poll->wait); 166 } 167 168 static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, 169 void *key) 170 { 171 struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait); 172 173 if (!((unsigned long)key & poll->mask)) 174 return 0; 175 176 vhost_poll_queue(poll); 177 return 0; 178 } 179 180 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) 181 { 182 clear_bit(VHOST_WORK_QUEUED, &work->flags); 183 work->fn = fn; 184 init_waitqueue_head(&work->done); 185 } 186 EXPORT_SYMBOL_GPL(vhost_work_init); 187 188 /* Init poll structure */ 189 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, 190 unsigned long mask, struct vhost_dev *dev) 191 { 192 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); 193 init_poll_funcptr(&poll->table, vhost_poll_func); 194 poll->mask = mask; 195 poll->dev = dev; 196 poll->wqh = NULL; 197 198 vhost_work_init(&poll->work, fn); 199 } 200 EXPORT_SYMBOL_GPL(vhost_poll_init); 201 202 /* Start polling a file. We add ourselves to file's wait queue. The caller must 203 * keep a reference to a file until after vhost_poll_stop is called. */ 204 int vhost_poll_start(struct vhost_poll *poll, struct file *file) 205 { 206 unsigned long mask; 207 int ret = 0; 208 209 if (poll->wqh) 210 return 0; 211 212 mask = file->f_op->poll(file, &poll->table); 213 if (mask) 214 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); 215 if (mask & POLLERR) { 216 if (poll->wqh) 217 remove_wait_queue(poll->wqh, &poll->wait); 218 ret = -EINVAL; 219 } 220 221 return ret; 222 } 223 EXPORT_SYMBOL_GPL(vhost_poll_start); 224 225 /* Stop polling a file. After this function returns, it becomes safe to drop the 226 * file reference. You must also flush afterwards. */ 227 void vhost_poll_stop(struct vhost_poll *poll) 228 { 229 if (poll->wqh) { 230 remove_wait_queue(poll->wqh, &poll->wait); 231 poll->wqh = NULL; 232 } 233 } 234 EXPORT_SYMBOL_GPL(vhost_poll_stop); 235 236 void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) 237 { 238 struct vhost_flush_struct flush; 239 240 if (dev->worker) { 241 init_completion(&flush.wait_event); 242 vhost_work_init(&flush.work, vhost_flush_work); 243 244 vhost_work_queue(dev, &flush.work); 245 wait_for_completion(&flush.wait_event); 246 } 247 } 248 EXPORT_SYMBOL_GPL(vhost_work_flush); 249 250 /* Flush any work that has been scheduled. When calling this, don't hold any 251 * locks that are also used by the callback. */ 252 void vhost_poll_flush(struct vhost_poll *poll) 253 { 254 vhost_work_flush(poll->dev, &poll->work); 255 } 256 EXPORT_SYMBOL_GPL(vhost_poll_flush); 257 258 void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) 259 { 260 if (!dev->worker) 261 return; 262 263 if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) { 264 /* We can only add the work to the list after we're 265 * sure it was not in the list. 266 * test_and_set_bit() implies a memory barrier. 267 */ 268 llist_add(&work->node, &dev->work_list); 269 wake_up_process(dev->worker); 270 } 271 } 272 EXPORT_SYMBOL_GPL(vhost_work_queue); 273 274 /* A lockless hint for busy polling code to exit the loop */ 275 bool vhost_has_work(struct vhost_dev *dev) 276 { 277 return !llist_empty(&dev->work_list); 278 } 279 EXPORT_SYMBOL_GPL(vhost_has_work); 280 281 void vhost_poll_queue(struct vhost_poll *poll) 282 { 283 vhost_work_queue(poll->dev, &poll->work); 284 } 285 EXPORT_SYMBOL_GPL(vhost_poll_queue); 286 287 static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq) 288 { 289 int j; 290 291 for (j = 0; j < VHOST_NUM_ADDRS; j++) 292 vq->meta_iotlb[j] = NULL; 293 } 294 295 static void vhost_vq_meta_reset(struct vhost_dev *d) 296 { 297 int i; 298 299 for (i = 0; i < d->nvqs; ++i) 300 __vhost_vq_meta_reset(d->vqs[i]); 301 } 302 303 static void vhost_vq_reset(struct vhost_dev *dev, 304 struct vhost_virtqueue *vq) 305 { 306 vq->num = 1; 307 vq->desc = NULL; 308 vq->avail = NULL; 309 vq->used = NULL; 310 vq->last_avail_idx = 0; 311 vq->avail_idx = 0; 312 vq->last_used_idx = 0; 313 vq->signalled_used = 0; 314 vq->signalled_used_valid = false; 315 vq->used_flags = 0; 316 vq->log_used = false; 317 vq->log_addr = -1ull; 318 vq->private_data = NULL; 319 vq->acked_features = 0; 320 vq->log_base = NULL; 321 vq->error_ctx = NULL; 322 vq->error = NULL; 323 vq->kick = NULL; 324 vq->call_ctx = NULL; 325 vq->call = NULL; 326 vq->log_ctx = NULL; 327 vhost_reset_is_le(vq); 328 vhost_disable_cross_endian(vq); 329 vq->busyloop_timeout = 0; 330 vq->umem = NULL; 331 vq->iotlb = NULL; 332 __vhost_vq_meta_reset(vq); 333 } 334 335 static int vhost_worker(void *data) 336 { 337 struct vhost_dev *dev = data; 338 struct vhost_work *work, *work_next; 339 struct llist_node *node; 340 mm_segment_t oldfs = get_fs(); 341 342 set_fs(USER_DS); 343 use_mm(dev->mm); 344 345 for (;;) { 346 /* mb paired w/ kthread_stop */ 347 set_current_state(TASK_INTERRUPTIBLE); 348 349 if (kthread_should_stop()) { 350 __set_current_state(TASK_RUNNING); 351 break; 352 } 353 354 node = llist_del_all(&dev->work_list); 355 if (!node) 356 schedule(); 357 358 node = llist_reverse_order(node); 359 /* make sure flag is seen after deletion */ 360 smp_wmb(); 361 llist_for_each_entry_safe(work, work_next, node, node) { 362 clear_bit(VHOST_WORK_QUEUED, &work->flags); 363 __set_current_state(TASK_RUNNING); 364 work->fn(work); 365 if (need_resched()) 366 schedule(); 367 } 368 } 369 unuse_mm(dev->mm); 370 set_fs(oldfs); 371 return 0; 372 } 373 374 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq) 375 { 376 kfree(vq->indirect); 377 vq->indirect = NULL; 378 kfree(vq->log); 379 vq->log = NULL; 380 kfree(vq->heads); 381 vq->heads = NULL; 382 } 383 384 /* Helper to allocate iovec buffers for all vqs. */ 385 static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) 386 { 387 struct vhost_virtqueue *vq; 388 int i; 389 390 for (i = 0; i < dev->nvqs; ++i) { 391 vq = dev->vqs[i]; 392 vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV, 393 GFP_KERNEL); 394 vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL); 395 vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL); 396 if (!vq->indirect || !vq->log || !vq->heads) 397 goto err_nomem; 398 } 399 return 0; 400 401 err_nomem: 402 for (; i >= 0; --i) 403 vhost_vq_free_iovecs(dev->vqs[i]); 404 return -ENOMEM; 405 } 406 407 static void vhost_dev_free_iovecs(struct vhost_dev *dev) 408 { 409 int i; 410 411 for (i = 0; i < dev->nvqs; ++i) 412 vhost_vq_free_iovecs(dev->vqs[i]); 413 } 414 415 void vhost_dev_init(struct vhost_dev *dev, 416 struct vhost_virtqueue **vqs, int nvqs) 417 { 418 struct vhost_virtqueue *vq; 419 int i; 420 421 dev->vqs = vqs; 422 dev->nvqs = nvqs; 423 mutex_init(&dev->mutex); 424 dev->log_ctx = NULL; 425 dev->log_file = NULL; 426 dev->umem = NULL; 427 dev->iotlb = NULL; 428 dev->mm = NULL; 429 dev->worker = NULL; 430 init_llist_head(&dev->work_list); 431 init_waitqueue_head(&dev->wait); 432 INIT_LIST_HEAD(&dev->read_list); 433 INIT_LIST_HEAD(&dev->pending_list); 434 spin_lock_init(&dev->iotlb_lock); 435 436 437 for (i = 0; i < dev->nvqs; ++i) { 438 vq = dev->vqs[i]; 439 vq->log = NULL; 440 vq->indirect = NULL; 441 vq->heads = NULL; 442 vq->dev = dev; 443 mutex_init(&vq->mutex); 444 vhost_vq_reset(dev, vq); 445 if (vq->handle_kick) 446 vhost_poll_init(&vq->poll, vq->handle_kick, 447 POLLIN, dev); 448 } 449 } 450 EXPORT_SYMBOL_GPL(vhost_dev_init); 451 452 /* Caller should have device mutex */ 453 long vhost_dev_check_owner(struct vhost_dev *dev) 454 { 455 /* Are you the owner? If not, I don't think you mean to do that */ 456 return dev->mm == current->mm ? 0 : -EPERM; 457 } 458 EXPORT_SYMBOL_GPL(vhost_dev_check_owner); 459 460 struct vhost_attach_cgroups_struct { 461 struct vhost_work work; 462 struct task_struct *owner; 463 int ret; 464 }; 465 466 static void vhost_attach_cgroups_work(struct vhost_work *work) 467 { 468 struct vhost_attach_cgroups_struct *s; 469 470 s = container_of(work, struct vhost_attach_cgroups_struct, work); 471 s->ret = cgroup_attach_task_all(s->owner, current); 472 } 473 474 static int vhost_attach_cgroups(struct vhost_dev *dev) 475 { 476 struct vhost_attach_cgroups_struct attach; 477 478 attach.owner = current; 479 vhost_work_init(&attach.work, vhost_attach_cgroups_work); 480 vhost_work_queue(dev, &attach.work); 481 vhost_work_flush(dev, &attach.work); 482 return attach.ret; 483 } 484 485 /* Caller should have device mutex */ 486 bool vhost_dev_has_owner(struct vhost_dev *dev) 487 { 488 return dev->mm; 489 } 490 EXPORT_SYMBOL_GPL(vhost_dev_has_owner); 491 492 /* Caller should have device mutex */ 493 long vhost_dev_set_owner(struct vhost_dev *dev) 494 { 495 struct task_struct *worker; 496 int err; 497 498 /* Is there an owner already? */ 499 if (vhost_dev_has_owner(dev)) { 500 err = -EBUSY; 501 goto err_mm; 502 } 503 504 /* No owner, become one */ 505 dev->mm = get_task_mm(current); 506 worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid); 507 if (IS_ERR(worker)) { 508 err = PTR_ERR(worker); 509 goto err_worker; 510 } 511 512 dev->worker = worker; 513 wake_up_process(worker); /* avoid contributing to loadavg */ 514 515 err = vhost_attach_cgroups(dev); 516 if (err) 517 goto err_cgroup; 518 519 err = vhost_dev_alloc_iovecs(dev); 520 if (err) 521 goto err_cgroup; 522 523 return 0; 524 err_cgroup: 525 kthread_stop(worker); 526 dev->worker = NULL; 527 err_worker: 528 if (dev->mm) 529 mmput(dev->mm); 530 dev->mm = NULL; 531 err_mm: 532 return err; 533 } 534 EXPORT_SYMBOL_GPL(vhost_dev_set_owner); 535 536 struct vhost_umem *vhost_dev_reset_owner_prepare(void) 537 { 538 return kvzalloc(sizeof(struct vhost_umem), GFP_KERNEL); 539 } 540 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare); 541 542 /* Caller should have device mutex */ 543 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem) 544 { 545 int i; 546 547 vhost_dev_cleanup(dev, true); 548 549 /* Restore memory to default empty mapping. */ 550 INIT_LIST_HEAD(&umem->umem_list); 551 dev->umem = umem; 552 /* We don't need VQ locks below since vhost_dev_cleanup makes sure 553 * VQs aren't running. 554 */ 555 for (i = 0; i < dev->nvqs; ++i) 556 dev->vqs[i]->umem = umem; 557 } 558 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner); 559 560 void vhost_dev_stop(struct vhost_dev *dev) 561 { 562 int i; 563 564 for (i = 0; i < dev->nvqs; ++i) { 565 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) { 566 vhost_poll_stop(&dev->vqs[i]->poll); 567 vhost_poll_flush(&dev->vqs[i]->poll); 568 } 569 } 570 } 571 EXPORT_SYMBOL_GPL(vhost_dev_stop); 572 573 static void vhost_umem_free(struct vhost_umem *umem, 574 struct vhost_umem_node *node) 575 { 576 vhost_umem_interval_tree_remove(node, &umem->umem_tree); 577 list_del(&node->link); 578 kfree(node); 579 umem->numem--; 580 } 581 582 static void vhost_umem_clean(struct vhost_umem *umem) 583 { 584 struct vhost_umem_node *node, *tmp; 585 586 if (!umem) 587 return; 588 589 list_for_each_entry_safe(node, tmp, &umem->umem_list, link) 590 vhost_umem_free(umem, node); 591 592 kvfree(umem); 593 } 594 595 static void vhost_clear_msg(struct vhost_dev *dev) 596 { 597 struct vhost_msg_node *node, *n; 598 599 spin_lock(&dev->iotlb_lock); 600 601 list_for_each_entry_safe(node, n, &dev->read_list, node) { 602 list_del(&node->node); 603 kfree(node); 604 } 605 606 list_for_each_entry_safe(node, n, &dev->pending_list, node) { 607 list_del(&node->node); 608 kfree(node); 609 } 610 611 spin_unlock(&dev->iotlb_lock); 612 } 613 614 /* Caller should have device mutex if and only if locked is set */ 615 void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) 616 { 617 int i; 618 619 for (i = 0; i < dev->nvqs; ++i) { 620 if (dev->vqs[i]->error_ctx) 621 eventfd_ctx_put(dev->vqs[i]->error_ctx); 622 if (dev->vqs[i]->error) 623 fput(dev->vqs[i]->error); 624 if (dev->vqs[i]->kick) 625 fput(dev->vqs[i]->kick); 626 if (dev->vqs[i]->call_ctx) 627 eventfd_ctx_put(dev->vqs[i]->call_ctx); 628 if (dev->vqs[i]->call) 629 fput(dev->vqs[i]->call); 630 vhost_vq_reset(dev, dev->vqs[i]); 631 } 632 vhost_dev_free_iovecs(dev); 633 if (dev->log_ctx) 634 eventfd_ctx_put(dev->log_ctx); 635 dev->log_ctx = NULL; 636 if (dev->log_file) 637 fput(dev->log_file); 638 dev->log_file = NULL; 639 /* No one will access memory at this point */ 640 vhost_umem_clean(dev->umem); 641 dev->umem = NULL; 642 vhost_umem_clean(dev->iotlb); 643 dev->iotlb = NULL; 644 vhost_clear_msg(dev); 645 wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM); 646 WARN_ON(!llist_empty(&dev->work_list)); 647 if (dev->worker) { 648 kthread_stop(dev->worker); 649 dev->worker = NULL; 650 } 651 if (dev->mm) 652 mmput(dev->mm); 653 dev->mm = NULL; 654 } 655 EXPORT_SYMBOL_GPL(vhost_dev_cleanup); 656 657 static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) 658 { 659 u64 a = addr / VHOST_PAGE_SIZE / 8; 660 661 /* Make sure 64 bit math will not overflow. */ 662 if (a > ULONG_MAX - (unsigned long)log_base || 663 a + (unsigned long)log_base > ULONG_MAX) 664 return 0; 665 666 return access_ok(VERIFY_WRITE, log_base + a, 667 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); 668 } 669 670 static bool vhost_overflow(u64 uaddr, u64 size) 671 { 672 /* Make sure 64 bit math will not overflow. */ 673 return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size; 674 } 675 676 /* Caller should have vq mutex and device mutex. */ 677 static int vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem, 678 int log_all) 679 { 680 struct vhost_umem_node *node; 681 682 if (!umem) 683 return 0; 684 685 list_for_each_entry(node, &umem->umem_list, link) { 686 unsigned long a = node->userspace_addr; 687 688 if (vhost_overflow(node->userspace_addr, node->size)) 689 return 0; 690 691 692 if (!access_ok(VERIFY_WRITE, (void __user *)a, 693 node->size)) 694 return 0; 695 else if (log_all && !log_access_ok(log_base, 696 node->start, 697 node->size)) 698 return 0; 699 } 700 return 1; 701 } 702 703 static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, 704 u64 addr, unsigned int size, 705 int type) 706 { 707 const struct vhost_umem_node *node = vq->meta_iotlb[type]; 708 709 if (!node) 710 return NULL; 711 712 return (void *)(uintptr_t)(node->userspace_addr + addr - node->start); 713 } 714 715 /* Can we switch to this memory table? */ 716 /* Caller should have device mutex but not vq mutex */ 717 static int memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem, 718 int log_all) 719 { 720 int i; 721 722 for (i = 0; i < d->nvqs; ++i) { 723 int ok; 724 bool log; 725 726 mutex_lock(&d->vqs[i]->mutex); 727 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL); 728 /* If ring is inactive, will check when it's enabled. */ 729 if (d->vqs[i]->private_data) 730 ok = vq_memory_access_ok(d->vqs[i]->log_base, 731 umem, log); 732 else 733 ok = 1; 734 mutex_unlock(&d->vqs[i]->mutex); 735 if (!ok) 736 return 0; 737 } 738 return 1; 739 } 740 741 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, 742 struct iovec iov[], int iov_size, int access); 743 744 static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to, 745 const void *from, unsigned size) 746 { 747 int ret; 748 749 if (!vq->iotlb) 750 return __copy_to_user(to, from, size); 751 else { 752 /* This function should be called after iotlb 753 * prefetch, which means we're sure that all vq 754 * could be access through iotlb. So -EAGAIN should 755 * not happen in this case. 756 */ 757 struct iov_iter t; 758 void __user *uaddr = vhost_vq_meta_fetch(vq, 759 (u64)(uintptr_t)to, size, 760 VHOST_ADDR_DESC); 761 762 if (uaddr) 763 return __copy_to_user(uaddr, from, size); 764 765 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov, 766 ARRAY_SIZE(vq->iotlb_iov), 767 VHOST_ACCESS_WO); 768 if (ret < 0) 769 goto out; 770 iov_iter_init(&t, WRITE, vq->iotlb_iov, ret, size); 771 ret = copy_to_iter(from, size, &t); 772 if (ret == size) 773 ret = 0; 774 } 775 out: 776 return ret; 777 } 778 779 static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, 780 void __user *from, unsigned size) 781 { 782 int ret; 783 784 if (!vq->iotlb) 785 return __copy_from_user(to, from, size); 786 else { 787 /* This function should be called after iotlb 788 * prefetch, which means we're sure that vq 789 * could be access through iotlb. So -EAGAIN should 790 * not happen in this case. 791 */ 792 void __user *uaddr = vhost_vq_meta_fetch(vq, 793 (u64)(uintptr_t)from, size, 794 VHOST_ADDR_DESC); 795 struct iov_iter f; 796 797 if (uaddr) 798 return __copy_from_user(to, uaddr, size); 799 800 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov, 801 ARRAY_SIZE(vq->iotlb_iov), 802 VHOST_ACCESS_RO); 803 if (ret < 0) { 804 vq_err(vq, "IOTLB translation failure: uaddr " 805 "%p size 0x%llx\n", from, 806 (unsigned long long) size); 807 goto out; 808 } 809 iov_iter_init(&f, READ, vq->iotlb_iov, ret, size); 810 ret = copy_from_iter(to, size, &f); 811 if (ret == size) 812 ret = 0; 813 } 814 815 out: 816 return ret; 817 } 818 819 static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq, 820 void __user *addr, unsigned int size, 821 int type) 822 { 823 int ret; 824 825 ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov, 826 ARRAY_SIZE(vq->iotlb_iov), 827 VHOST_ACCESS_RO); 828 if (ret < 0) { 829 vq_err(vq, "IOTLB translation failure: uaddr " 830 "%p size 0x%llx\n", addr, 831 (unsigned long long) size); 832 return NULL; 833 } 834 835 if (ret != 1 || vq->iotlb_iov[0].iov_len != size) { 836 vq_err(vq, "Non atomic userspace memory access: uaddr " 837 "%p size 0x%llx\n", addr, 838 (unsigned long long) size); 839 return NULL; 840 } 841 842 return vq->iotlb_iov[0].iov_base; 843 } 844 845 /* This function should be called after iotlb 846 * prefetch, which means we're sure that vq 847 * could be access through iotlb. So -EAGAIN should 848 * not happen in this case. 849 */ 850 static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, 851 void *addr, unsigned int size, 852 int type) 853 { 854 void __user *uaddr = vhost_vq_meta_fetch(vq, 855 (u64)(uintptr_t)addr, size, type); 856 if (uaddr) 857 return uaddr; 858 859 return __vhost_get_user_slow(vq, addr, size, type); 860 } 861 862 #define vhost_put_user(vq, x, ptr) \ 863 ({ \ 864 int ret = -EFAULT; \ 865 if (!vq->iotlb) { \ 866 ret = __put_user(x, ptr); \ 867 } else { \ 868 __typeof__(ptr) to = \ 869 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \ 870 sizeof(*ptr), VHOST_ADDR_USED); \ 871 if (to != NULL) \ 872 ret = __put_user(x, to); \ 873 else \ 874 ret = -EFAULT; \ 875 } \ 876 ret; \ 877 }) 878 879 #define vhost_get_user(vq, x, ptr, type) \ 880 ({ \ 881 int ret; \ 882 if (!vq->iotlb) { \ 883 ret = __get_user(x, ptr); \ 884 } else { \ 885 __typeof__(ptr) from = \ 886 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \ 887 sizeof(*ptr), \ 888 type); \ 889 if (from != NULL) \ 890 ret = __get_user(x, from); \ 891 else \ 892 ret = -EFAULT; \ 893 } \ 894 ret; \ 895 }) 896 897 #define vhost_get_avail(vq, x, ptr) \ 898 vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL) 899 900 #define vhost_get_used(vq, x, ptr) \ 901 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) 902 903 static void vhost_dev_lock_vqs(struct vhost_dev *d) 904 { 905 int i = 0; 906 for (i = 0; i < d->nvqs; ++i) 907 mutex_lock_nested(&d->vqs[i]->mutex, i); 908 } 909 910 static void vhost_dev_unlock_vqs(struct vhost_dev *d) 911 { 912 int i = 0; 913 for (i = 0; i < d->nvqs; ++i) 914 mutex_unlock(&d->vqs[i]->mutex); 915 } 916 917 static int vhost_new_umem_range(struct vhost_umem *umem, 918 u64 start, u64 size, u64 end, 919 u64 userspace_addr, int perm) 920 { 921 struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC); 922 923 if (!node) 924 return -ENOMEM; 925 926 if (umem->numem == max_iotlb_entries) { 927 tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link); 928 vhost_umem_free(umem, tmp); 929 } 930 931 node->start = start; 932 node->size = size; 933 node->last = end; 934 node->userspace_addr = userspace_addr; 935 node->perm = perm; 936 INIT_LIST_HEAD(&node->link); 937 list_add_tail(&node->link, &umem->umem_list); 938 vhost_umem_interval_tree_insert(node, &umem->umem_tree); 939 umem->numem++; 940 941 return 0; 942 } 943 944 static void vhost_del_umem_range(struct vhost_umem *umem, 945 u64 start, u64 end) 946 { 947 struct vhost_umem_node *node; 948 949 while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree, 950 start, end))) 951 vhost_umem_free(umem, node); 952 } 953 954 static void vhost_iotlb_notify_vq(struct vhost_dev *d, 955 struct vhost_iotlb_msg *msg) 956 { 957 struct vhost_msg_node *node, *n; 958 959 spin_lock(&d->iotlb_lock); 960 961 list_for_each_entry_safe(node, n, &d->pending_list, node) { 962 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; 963 if (msg->iova <= vq_msg->iova && 964 msg->iova + msg->size - 1 > vq_msg->iova && 965 vq_msg->type == VHOST_IOTLB_MISS) { 966 vhost_poll_queue(&node->vq->poll); 967 list_del(&node->node); 968 kfree(node); 969 } 970 } 971 972 spin_unlock(&d->iotlb_lock); 973 } 974 975 static int umem_access_ok(u64 uaddr, u64 size, int access) 976 { 977 unsigned long a = uaddr; 978 979 /* Make sure 64 bit math will not overflow. */ 980 if (vhost_overflow(uaddr, size)) 981 return -EFAULT; 982 983 if ((access & VHOST_ACCESS_RO) && 984 !access_ok(VERIFY_READ, (void __user *)a, size)) 985 return -EFAULT; 986 if ((access & VHOST_ACCESS_WO) && 987 !access_ok(VERIFY_WRITE, (void __user *)a, size)) 988 return -EFAULT; 989 return 0; 990 } 991 992 static int vhost_process_iotlb_msg(struct vhost_dev *dev, 993 struct vhost_iotlb_msg *msg) 994 { 995 int ret = 0; 996 997 vhost_dev_lock_vqs(dev); 998 switch (msg->type) { 999 case VHOST_IOTLB_UPDATE: 1000 if (!dev->iotlb) { 1001 ret = -EFAULT; 1002 break; 1003 } 1004 if (umem_access_ok(msg->uaddr, msg->size, msg->perm)) { 1005 ret = -EFAULT; 1006 break; 1007 } 1008 vhost_vq_meta_reset(dev); 1009 if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size, 1010 msg->iova + msg->size - 1, 1011 msg->uaddr, msg->perm)) { 1012 ret = -ENOMEM; 1013 break; 1014 } 1015 vhost_iotlb_notify_vq(dev, msg); 1016 break; 1017 case VHOST_IOTLB_INVALIDATE: 1018 if (!dev->iotlb) { 1019 ret = -EFAULT; 1020 break; 1021 } 1022 vhost_vq_meta_reset(dev); 1023 vhost_del_umem_range(dev->iotlb, msg->iova, 1024 msg->iova + msg->size - 1); 1025 break; 1026 default: 1027 ret = -EINVAL; 1028 break; 1029 } 1030 1031 vhost_dev_unlock_vqs(dev); 1032 return ret; 1033 } 1034 ssize_t vhost_chr_write_iter(struct vhost_dev *dev, 1035 struct iov_iter *from) 1036 { 1037 struct vhost_msg_node node; 1038 unsigned size = sizeof(struct vhost_msg); 1039 size_t ret; 1040 int err; 1041 1042 if (iov_iter_count(from) < size) 1043 return 0; 1044 ret = copy_from_iter(&node.msg, size, from); 1045 if (ret != size) 1046 goto done; 1047 1048 switch (node.msg.type) { 1049 case VHOST_IOTLB_MSG: 1050 err = vhost_process_iotlb_msg(dev, &node.msg.iotlb); 1051 if (err) 1052 ret = err; 1053 break; 1054 default: 1055 ret = -EINVAL; 1056 break; 1057 } 1058 1059 done: 1060 return ret; 1061 } 1062 EXPORT_SYMBOL(vhost_chr_write_iter); 1063 1064 unsigned int vhost_chr_poll(struct file *file, struct vhost_dev *dev, 1065 poll_table *wait) 1066 { 1067 unsigned int mask = 0; 1068 1069 poll_wait(file, &dev->wait, wait); 1070 1071 if (!list_empty(&dev->read_list)) 1072 mask |= POLLIN | POLLRDNORM; 1073 1074 return mask; 1075 } 1076 EXPORT_SYMBOL(vhost_chr_poll); 1077 1078 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to, 1079 int noblock) 1080 { 1081 DEFINE_WAIT(wait); 1082 struct vhost_msg_node *node; 1083 ssize_t ret = 0; 1084 unsigned size = sizeof(struct vhost_msg); 1085 1086 if (iov_iter_count(to) < size) 1087 return 0; 1088 1089 while (1) { 1090 if (!noblock) 1091 prepare_to_wait(&dev->wait, &wait, 1092 TASK_INTERRUPTIBLE); 1093 1094 node = vhost_dequeue_msg(dev, &dev->read_list); 1095 if (node) 1096 break; 1097 if (noblock) { 1098 ret = -EAGAIN; 1099 break; 1100 } 1101 if (signal_pending(current)) { 1102 ret = -ERESTARTSYS; 1103 break; 1104 } 1105 if (!dev->iotlb) { 1106 ret = -EBADFD; 1107 break; 1108 } 1109 1110 schedule(); 1111 } 1112 1113 if (!noblock) 1114 finish_wait(&dev->wait, &wait); 1115 1116 if (node) { 1117 ret = copy_to_iter(&node->msg, size, to); 1118 1119 if (ret != size || node->msg.type != VHOST_IOTLB_MISS) { 1120 kfree(node); 1121 return ret; 1122 } 1123 1124 vhost_enqueue_msg(dev, &dev->pending_list, node); 1125 } 1126 1127 return ret; 1128 } 1129 EXPORT_SYMBOL_GPL(vhost_chr_read_iter); 1130 1131 static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access) 1132 { 1133 struct vhost_dev *dev = vq->dev; 1134 struct vhost_msg_node *node; 1135 struct vhost_iotlb_msg *msg; 1136 1137 node = vhost_new_msg(vq, VHOST_IOTLB_MISS); 1138 if (!node) 1139 return -ENOMEM; 1140 1141 msg = &node->msg.iotlb; 1142 msg->type = VHOST_IOTLB_MISS; 1143 msg->iova = iova; 1144 msg->perm = access; 1145 1146 vhost_enqueue_msg(dev, &dev->read_list, node); 1147 1148 return 0; 1149 } 1150 1151 static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, 1152 struct vring_desc __user *desc, 1153 struct vring_avail __user *avail, 1154 struct vring_used __user *used) 1155 1156 { 1157 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 1158 1159 return access_ok(VERIFY_READ, desc, num * sizeof *desc) && 1160 access_ok(VERIFY_READ, avail, 1161 sizeof *avail + num * sizeof *avail->ring + s) && 1162 access_ok(VERIFY_WRITE, used, 1163 sizeof *used + num * sizeof *used->ring + s); 1164 } 1165 1166 static void vhost_vq_meta_update(struct vhost_virtqueue *vq, 1167 const struct vhost_umem_node *node, 1168 int type) 1169 { 1170 int access = (type == VHOST_ADDR_USED) ? 1171 VHOST_ACCESS_WO : VHOST_ACCESS_RO; 1172 1173 if (likely(node->perm & access)) 1174 vq->meta_iotlb[type] = node; 1175 } 1176 1177 static int iotlb_access_ok(struct vhost_virtqueue *vq, 1178 int access, u64 addr, u64 len, int type) 1179 { 1180 const struct vhost_umem_node *node; 1181 struct vhost_umem *umem = vq->iotlb; 1182 u64 s = 0, size, orig_addr = addr, last = addr + len - 1; 1183 1184 if (vhost_vq_meta_fetch(vq, addr, len, type)) 1185 return true; 1186 1187 while (len > s) { 1188 node = vhost_umem_interval_tree_iter_first(&umem->umem_tree, 1189 addr, 1190 last); 1191 if (node == NULL || node->start > addr) { 1192 vhost_iotlb_miss(vq, addr, access); 1193 return false; 1194 } else if (!(node->perm & access)) { 1195 /* Report the possible access violation by 1196 * request another translation from userspace. 1197 */ 1198 return false; 1199 } 1200 1201 size = node->size - addr + node->start; 1202 1203 if (orig_addr == addr && size >= len) 1204 vhost_vq_meta_update(vq, node, type); 1205 1206 s += size; 1207 addr += size; 1208 } 1209 1210 return true; 1211 } 1212 1213 int vq_iotlb_prefetch(struct vhost_virtqueue *vq) 1214 { 1215 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 1216 unsigned int num = vq->num; 1217 1218 if (!vq->iotlb) 1219 return 1; 1220 1221 return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc, 1222 num * sizeof(*vq->desc), VHOST_ADDR_DESC) && 1223 iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail, 1224 sizeof *vq->avail + 1225 num * sizeof(*vq->avail->ring) + s, 1226 VHOST_ADDR_AVAIL) && 1227 iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used, 1228 sizeof *vq->used + 1229 num * sizeof(*vq->used->ring) + s, 1230 VHOST_ADDR_USED); 1231 } 1232 EXPORT_SYMBOL_GPL(vq_iotlb_prefetch); 1233 1234 /* Can we log writes? */ 1235 /* Caller should have device mutex but not vq mutex */ 1236 int vhost_log_access_ok(struct vhost_dev *dev) 1237 { 1238 return memory_access_ok(dev, dev->umem, 1); 1239 } 1240 EXPORT_SYMBOL_GPL(vhost_log_access_ok); 1241 1242 /* Verify access for write logging. */ 1243 /* Caller should have vq mutex and device mutex */ 1244 static int vq_log_access_ok(struct vhost_virtqueue *vq, 1245 void __user *log_base) 1246 { 1247 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; 1248 1249 return vq_memory_access_ok(log_base, vq->umem, 1250 vhost_has_feature(vq, VHOST_F_LOG_ALL)) && 1251 (!vq->log_used || log_access_ok(log_base, vq->log_addr, 1252 sizeof *vq->used + 1253 vq->num * sizeof *vq->used->ring + s)); 1254 } 1255 1256 /* Can we start vq? */ 1257 /* Caller should have vq mutex and device mutex */ 1258 int vhost_vq_access_ok(struct vhost_virtqueue *vq) 1259 { 1260 if (vq->iotlb) { 1261 /* When device IOTLB was used, the access validation 1262 * will be validated during prefetching. 1263 */ 1264 return 1; 1265 } 1266 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) && 1267 vq_log_access_ok(vq, vq->log_base); 1268 } 1269 EXPORT_SYMBOL_GPL(vhost_vq_access_ok); 1270 1271 static struct vhost_umem *vhost_umem_alloc(void) 1272 { 1273 struct vhost_umem *umem = kvzalloc(sizeof(*umem), GFP_KERNEL); 1274 1275 if (!umem) 1276 return NULL; 1277 1278 umem->umem_tree = RB_ROOT_CACHED; 1279 umem->numem = 0; 1280 INIT_LIST_HEAD(&umem->umem_list); 1281 1282 return umem; 1283 } 1284 1285 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) 1286 { 1287 struct vhost_memory mem, *newmem; 1288 struct vhost_memory_region *region; 1289 struct vhost_umem *newumem, *oldumem; 1290 unsigned long size = offsetof(struct vhost_memory, regions); 1291 int i; 1292 1293 if (copy_from_user(&mem, m, size)) 1294 return -EFAULT; 1295 if (mem.padding) 1296 return -EOPNOTSUPP; 1297 if (mem.nregions > max_mem_regions) 1298 return -E2BIG; 1299 newmem = kvzalloc(size + mem.nregions * sizeof(*m->regions), GFP_KERNEL); 1300 if (!newmem) 1301 return -ENOMEM; 1302 1303 memcpy(newmem, &mem, size); 1304 if (copy_from_user(newmem->regions, m->regions, 1305 mem.nregions * sizeof *m->regions)) { 1306 kvfree(newmem); 1307 return -EFAULT; 1308 } 1309 1310 newumem = vhost_umem_alloc(); 1311 if (!newumem) { 1312 kvfree(newmem); 1313 return -ENOMEM; 1314 } 1315 1316 for (region = newmem->regions; 1317 region < newmem->regions + mem.nregions; 1318 region++) { 1319 if (vhost_new_umem_range(newumem, 1320 region->guest_phys_addr, 1321 region->memory_size, 1322 region->guest_phys_addr + 1323 region->memory_size - 1, 1324 region->userspace_addr, 1325 VHOST_ACCESS_RW)) 1326 goto err; 1327 } 1328 1329 if (!memory_access_ok(d, newumem, 0)) 1330 goto err; 1331 1332 oldumem = d->umem; 1333 d->umem = newumem; 1334 1335 /* All memory accesses are done under some VQ mutex. */ 1336 for (i = 0; i < d->nvqs; ++i) { 1337 mutex_lock(&d->vqs[i]->mutex); 1338 d->vqs[i]->umem = newumem; 1339 mutex_unlock(&d->vqs[i]->mutex); 1340 } 1341 1342 kvfree(newmem); 1343 vhost_umem_clean(oldumem); 1344 return 0; 1345 1346 err: 1347 vhost_umem_clean(newumem); 1348 kvfree(newmem); 1349 return -EFAULT; 1350 } 1351 1352 long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) 1353 { 1354 struct file *eventfp, *filep = NULL; 1355 bool pollstart = false, pollstop = false; 1356 struct eventfd_ctx *ctx = NULL; 1357 u32 __user *idxp = argp; 1358 struct vhost_virtqueue *vq; 1359 struct vhost_vring_state s; 1360 struct vhost_vring_file f; 1361 struct vhost_vring_addr a; 1362 u32 idx; 1363 long r; 1364 1365 r = get_user(idx, idxp); 1366 if (r < 0) 1367 return r; 1368 if (idx >= d->nvqs) 1369 return -ENOBUFS; 1370 1371 vq = d->vqs[idx]; 1372 1373 mutex_lock(&vq->mutex); 1374 1375 switch (ioctl) { 1376 case VHOST_SET_VRING_NUM: 1377 /* Resizing ring with an active backend? 1378 * You don't want to do that. */ 1379 if (vq->private_data) { 1380 r = -EBUSY; 1381 break; 1382 } 1383 if (copy_from_user(&s, argp, sizeof s)) { 1384 r = -EFAULT; 1385 break; 1386 } 1387 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) { 1388 r = -EINVAL; 1389 break; 1390 } 1391 vq->num = s.num; 1392 break; 1393 case VHOST_SET_VRING_BASE: 1394 /* Moving base with an active backend? 1395 * You don't want to do that. */ 1396 if (vq->private_data) { 1397 r = -EBUSY; 1398 break; 1399 } 1400 if (copy_from_user(&s, argp, sizeof s)) { 1401 r = -EFAULT; 1402 break; 1403 } 1404 if (s.num > 0xffff) { 1405 r = -EINVAL; 1406 break; 1407 } 1408 vq->last_avail_idx = s.num; 1409 /* Forget the cached index value. */ 1410 vq->avail_idx = vq->last_avail_idx; 1411 break; 1412 case VHOST_GET_VRING_BASE: 1413 s.index = idx; 1414 s.num = vq->last_avail_idx; 1415 if (copy_to_user(argp, &s, sizeof s)) 1416 r = -EFAULT; 1417 break; 1418 case VHOST_SET_VRING_ADDR: 1419 if (copy_from_user(&a, argp, sizeof a)) { 1420 r = -EFAULT; 1421 break; 1422 } 1423 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) { 1424 r = -EOPNOTSUPP; 1425 break; 1426 } 1427 /* For 32bit, verify that the top 32bits of the user 1428 data are set to zero. */ 1429 if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr || 1430 (u64)(unsigned long)a.used_user_addr != a.used_user_addr || 1431 (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) { 1432 r = -EFAULT; 1433 break; 1434 } 1435 1436 /* Make sure it's safe to cast pointers to vring types. */ 1437 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); 1438 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); 1439 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || 1440 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || 1441 (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) { 1442 r = -EINVAL; 1443 break; 1444 } 1445 1446 /* We only verify access here if backend is configured. 1447 * If it is not, we don't as size might not have been setup. 1448 * We will verify when backend is configured. */ 1449 if (vq->private_data) { 1450 if (!vq_access_ok(vq, vq->num, 1451 (void __user *)(unsigned long)a.desc_user_addr, 1452 (void __user *)(unsigned long)a.avail_user_addr, 1453 (void __user *)(unsigned long)a.used_user_addr)) { 1454 r = -EINVAL; 1455 break; 1456 } 1457 1458 /* Also validate log access for used ring if enabled. */ 1459 if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) && 1460 !log_access_ok(vq->log_base, a.log_guest_addr, 1461 sizeof *vq->used + 1462 vq->num * sizeof *vq->used->ring)) { 1463 r = -EINVAL; 1464 break; 1465 } 1466 } 1467 1468 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); 1469 vq->desc = (void __user *)(unsigned long)a.desc_user_addr; 1470 vq->avail = (void __user *)(unsigned long)a.avail_user_addr; 1471 vq->log_addr = a.log_guest_addr; 1472 vq->used = (void __user *)(unsigned long)a.used_user_addr; 1473 break; 1474 case VHOST_SET_VRING_KICK: 1475 if (copy_from_user(&f, argp, sizeof f)) { 1476 r = -EFAULT; 1477 break; 1478 } 1479 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); 1480 if (IS_ERR(eventfp)) { 1481 r = PTR_ERR(eventfp); 1482 break; 1483 } 1484 if (eventfp != vq->kick) { 1485 pollstop = (filep = vq->kick) != NULL; 1486 pollstart = (vq->kick = eventfp) != NULL; 1487 } else 1488 filep = eventfp; 1489 break; 1490 case VHOST_SET_VRING_CALL: 1491 if (copy_from_user(&f, argp, sizeof f)) { 1492 r = -EFAULT; 1493 break; 1494 } 1495 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); 1496 if (IS_ERR(eventfp)) { 1497 r = PTR_ERR(eventfp); 1498 break; 1499 } 1500 if (eventfp != vq->call) { 1501 filep = vq->call; 1502 ctx = vq->call_ctx; 1503 vq->call = eventfp; 1504 vq->call_ctx = eventfp ? 1505 eventfd_ctx_fileget(eventfp) : NULL; 1506 } else 1507 filep = eventfp; 1508 break; 1509 case VHOST_SET_VRING_ERR: 1510 if (copy_from_user(&f, argp, sizeof f)) { 1511 r = -EFAULT; 1512 break; 1513 } 1514 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd); 1515 if (IS_ERR(eventfp)) { 1516 r = PTR_ERR(eventfp); 1517 break; 1518 } 1519 if (eventfp != vq->error) { 1520 filep = vq->error; 1521 vq->error = eventfp; 1522 ctx = vq->error_ctx; 1523 vq->error_ctx = eventfp ? 1524 eventfd_ctx_fileget(eventfp) : NULL; 1525 } else 1526 filep = eventfp; 1527 break; 1528 case VHOST_SET_VRING_ENDIAN: 1529 r = vhost_set_vring_endian(vq, argp); 1530 break; 1531 case VHOST_GET_VRING_ENDIAN: 1532 r = vhost_get_vring_endian(vq, idx, argp); 1533 break; 1534 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT: 1535 if (copy_from_user(&s, argp, sizeof(s))) { 1536 r = -EFAULT; 1537 break; 1538 } 1539 vq->busyloop_timeout = s.num; 1540 break; 1541 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT: 1542 s.index = idx; 1543 s.num = vq->busyloop_timeout; 1544 if (copy_to_user(argp, &s, sizeof(s))) 1545 r = -EFAULT; 1546 break; 1547 default: 1548 r = -ENOIOCTLCMD; 1549 } 1550 1551 if (pollstop && vq->handle_kick) 1552 vhost_poll_stop(&vq->poll); 1553 1554 if (ctx) 1555 eventfd_ctx_put(ctx); 1556 if (filep) 1557 fput(filep); 1558 1559 if (pollstart && vq->handle_kick) 1560 r = vhost_poll_start(&vq->poll, vq->kick); 1561 1562 mutex_unlock(&vq->mutex); 1563 1564 if (pollstop && vq->handle_kick) 1565 vhost_poll_flush(&vq->poll); 1566 return r; 1567 } 1568 EXPORT_SYMBOL_GPL(vhost_vring_ioctl); 1569 1570 int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled) 1571 { 1572 struct vhost_umem *niotlb, *oiotlb; 1573 int i; 1574 1575 niotlb = vhost_umem_alloc(); 1576 if (!niotlb) 1577 return -ENOMEM; 1578 1579 oiotlb = d->iotlb; 1580 d->iotlb = niotlb; 1581 1582 for (i = 0; i < d->nvqs; ++i) { 1583 mutex_lock(&d->vqs[i]->mutex); 1584 d->vqs[i]->iotlb = niotlb; 1585 mutex_unlock(&d->vqs[i]->mutex); 1586 } 1587 1588 vhost_umem_clean(oiotlb); 1589 1590 return 0; 1591 } 1592 EXPORT_SYMBOL_GPL(vhost_init_device_iotlb); 1593 1594 /* Caller must have device mutex */ 1595 long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) 1596 { 1597 struct file *eventfp, *filep = NULL; 1598 struct eventfd_ctx *ctx = NULL; 1599 u64 p; 1600 long r; 1601 int i, fd; 1602 1603 /* If you are not the owner, you can become one */ 1604 if (ioctl == VHOST_SET_OWNER) { 1605 r = vhost_dev_set_owner(d); 1606 goto done; 1607 } 1608 1609 /* You must be the owner to do anything else */ 1610 r = vhost_dev_check_owner(d); 1611 if (r) 1612 goto done; 1613 1614 switch (ioctl) { 1615 case VHOST_SET_MEM_TABLE: 1616 r = vhost_set_memory(d, argp); 1617 break; 1618 case VHOST_SET_LOG_BASE: 1619 if (copy_from_user(&p, argp, sizeof p)) { 1620 r = -EFAULT; 1621 break; 1622 } 1623 if ((u64)(unsigned long)p != p) { 1624 r = -EFAULT; 1625 break; 1626 } 1627 for (i = 0; i < d->nvqs; ++i) { 1628 struct vhost_virtqueue *vq; 1629 void __user *base = (void __user *)(unsigned long)p; 1630 vq = d->vqs[i]; 1631 mutex_lock(&vq->mutex); 1632 /* If ring is inactive, will check when it's enabled. */ 1633 if (vq->private_data && !vq_log_access_ok(vq, base)) 1634 r = -EFAULT; 1635 else 1636 vq->log_base = base; 1637 mutex_unlock(&vq->mutex); 1638 } 1639 break; 1640 case VHOST_SET_LOG_FD: 1641 r = get_user(fd, (int __user *)argp); 1642 if (r < 0) 1643 break; 1644 eventfp = fd == -1 ? NULL : eventfd_fget(fd); 1645 if (IS_ERR(eventfp)) { 1646 r = PTR_ERR(eventfp); 1647 break; 1648 } 1649 if (eventfp != d->log_file) { 1650 filep = d->log_file; 1651 d->log_file = eventfp; 1652 ctx = d->log_ctx; 1653 d->log_ctx = eventfp ? 1654 eventfd_ctx_fileget(eventfp) : NULL; 1655 } else 1656 filep = eventfp; 1657 for (i = 0; i < d->nvqs; ++i) { 1658 mutex_lock(&d->vqs[i]->mutex); 1659 d->vqs[i]->log_ctx = d->log_ctx; 1660 mutex_unlock(&d->vqs[i]->mutex); 1661 } 1662 if (ctx) 1663 eventfd_ctx_put(ctx); 1664 if (filep) 1665 fput(filep); 1666 break; 1667 default: 1668 r = -ENOIOCTLCMD; 1669 break; 1670 } 1671 done: 1672 return r; 1673 } 1674 EXPORT_SYMBOL_GPL(vhost_dev_ioctl); 1675 1676 /* TODO: This is really inefficient. We need something like get_user() 1677 * (instruction directly accesses the data, with an exception table entry 1678 * returning -EFAULT). See Documentation/x86/exception-tables.txt. 1679 */ 1680 static int set_bit_to_user(int nr, void __user *addr) 1681 { 1682 unsigned long log = (unsigned long)addr; 1683 struct page *page; 1684 void *base; 1685 int bit = nr + (log % PAGE_SIZE) * 8; 1686 int r; 1687 1688 r = get_user_pages_fast(log, 1, 1, &page); 1689 if (r < 0) 1690 return r; 1691 BUG_ON(r != 1); 1692 base = kmap_atomic(page); 1693 set_bit(bit, base); 1694 kunmap_atomic(base); 1695 set_page_dirty_lock(page); 1696 put_page(page); 1697 return 0; 1698 } 1699 1700 static int log_write(void __user *log_base, 1701 u64 write_address, u64 write_length) 1702 { 1703 u64 write_page = write_address / VHOST_PAGE_SIZE; 1704 int r; 1705 1706 if (!write_length) 1707 return 0; 1708 write_length += write_address % VHOST_PAGE_SIZE; 1709 for (;;) { 1710 u64 base = (u64)(unsigned long)log_base; 1711 u64 log = base + write_page / 8; 1712 int bit = write_page % 8; 1713 if ((u64)(unsigned long)log != log) 1714 return -EFAULT; 1715 r = set_bit_to_user(bit, (void __user *)(unsigned long)log); 1716 if (r < 0) 1717 return r; 1718 if (write_length <= VHOST_PAGE_SIZE) 1719 break; 1720 write_length -= VHOST_PAGE_SIZE; 1721 write_page += 1; 1722 } 1723 return r; 1724 } 1725 1726 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, 1727 unsigned int log_num, u64 len) 1728 { 1729 int i, r; 1730 1731 /* Make sure data written is seen before log. */ 1732 smp_wmb(); 1733 for (i = 0; i < log_num; ++i) { 1734 u64 l = min(log[i].len, len); 1735 r = log_write(vq->log_base, log[i].addr, l); 1736 if (r < 0) 1737 return r; 1738 len -= l; 1739 if (!len) { 1740 if (vq->log_ctx) 1741 eventfd_signal(vq->log_ctx, 1); 1742 return 0; 1743 } 1744 } 1745 /* Length written exceeds what we have stored. This is a bug. */ 1746 BUG(); 1747 return 0; 1748 } 1749 EXPORT_SYMBOL_GPL(vhost_log_write); 1750 1751 static int vhost_update_used_flags(struct vhost_virtqueue *vq) 1752 { 1753 void __user *used; 1754 if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), 1755 &vq->used->flags) < 0) 1756 return -EFAULT; 1757 if (unlikely(vq->log_used)) { 1758 /* Make sure the flag is seen before log. */ 1759 smp_wmb(); 1760 /* Log used flag write. */ 1761 used = &vq->used->flags; 1762 log_write(vq->log_base, vq->log_addr + 1763 (used - (void __user *)vq->used), 1764 sizeof vq->used->flags); 1765 if (vq->log_ctx) 1766 eventfd_signal(vq->log_ctx, 1); 1767 } 1768 return 0; 1769 } 1770 1771 static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) 1772 { 1773 if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), 1774 vhost_avail_event(vq))) 1775 return -EFAULT; 1776 if (unlikely(vq->log_used)) { 1777 void __user *used; 1778 /* Make sure the event is seen before log. */ 1779 smp_wmb(); 1780 /* Log avail event write */ 1781 used = vhost_avail_event(vq); 1782 log_write(vq->log_base, vq->log_addr + 1783 (used - (void __user *)vq->used), 1784 sizeof *vhost_avail_event(vq)); 1785 if (vq->log_ctx) 1786 eventfd_signal(vq->log_ctx, 1); 1787 } 1788 return 0; 1789 } 1790 1791 int vhost_vq_init_access(struct vhost_virtqueue *vq) 1792 { 1793 __virtio16 last_used_idx; 1794 int r; 1795 bool is_le = vq->is_le; 1796 1797 if (!vq->private_data) 1798 return 0; 1799 1800 vhost_init_is_le(vq); 1801 1802 r = vhost_update_used_flags(vq); 1803 if (r) 1804 goto err; 1805 vq->signalled_used_valid = false; 1806 if (!vq->iotlb && 1807 !access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) { 1808 r = -EFAULT; 1809 goto err; 1810 } 1811 r = vhost_get_used(vq, last_used_idx, &vq->used->idx); 1812 if (r) { 1813 vq_err(vq, "Can't access used idx at %p\n", 1814 &vq->used->idx); 1815 goto err; 1816 } 1817 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); 1818 return 0; 1819 1820 err: 1821 vq->is_le = is_le; 1822 return r; 1823 } 1824 EXPORT_SYMBOL_GPL(vhost_vq_init_access); 1825 1826 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, 1827 struct iovec iov[], int iov_size, int access) 1828 { 1829 const struct vhost_umem_node *node; 1830 struct vhost_dev *dev = vq->dev; 1831 struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem; 1832 struct iovec *_iov; 1833 u64 s = 0; 1834 int ret = 0; 1835 1836 while ((u64)len > s) { 1837 u64 size; 1838 if (unlikely(ret >= iov_size)) { 1839 ret = -ENOBUFS; 1840 break; 1841 } 1842 1843 node = vhost_umem_interval_tree_iter_first(&umem->umem_tree, 1844 addr, addr + len - 1); 1845 if (node == NULL || node->start > addr) { 1846 if (umem != dev->iotlb) { 1847 ret = -EFAULT; 1848 break; 1849 } 1850 ret = -EAGAIN; 1851 break; 1852 } else if (!(node->perm & access)) { 1853 ret = -EPERM; 1854 break; 1855 } 1856 1857 _iov = iov + ret; 1858 size = node->size - addr + node->start; 1859 _iov->iov_len = min((u64)len - s, size); 1860 _iov->iov_base = (void __user *)(unsigned long) 1861 (node->userspace_addr + addr - node->start); 1862 s += size; 1863 addr += size; 1864 ++ret; 1865 } 1866 1867 if (ret == -EAGAIN) 1868 vhost_iotlb_miss(vq, addr, access); 1869 return ret; 1870 } 1871 1872 /* Each buffer in the virtqueues is actually a chain of descriptors. This 1873 * function returns the next descriptor in the chain, 1874 * or -1U if we're at the end. */ 1875 static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc) 1876 { 1877 unsigned int next; 1878 1879 /* If this descriptor says it doesn't chain, we're done. */ 1880 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT))) 1881 return -1U; 1882 1883 /* Check they're not leading us off end of descriptors. */ 1884 next = vhost16_to_cpu(vq, desc->next); 1885 /* Make sure compiler knows to grab that: we don't want it changing! */ 1886 /* We will use the result as an index in an array, so most 1887 * architectures only need a compiler barrier here. */ 1888 read_barrier_depends(); 1889 1890 return next; 1891 } 1892 1893 static int get_indirect(struct vhost_virtqueue *vq, 1894 struct iovec iov[], unsigned int iov_size, 1895 unsigned int *out_num, unsigned int *in_num, 1896 struct vhost_log *log, unsigned int *log_num, 1897 struct vring_desc *indirect) 1898 { 1899 struct vring_desc desc; 1900 unsigned int i = 0, count, found = 0; 1901 u32 len = vhost32_to_cpu(vq, indirect->len); 1902 struct iov_iter from; 1903 int ret, access; 1904 1905 /* Sanity check */ 1906 if (unlikely(len % sizeof desc)) { 1907 vq_err(vq, "Invalid length in indirect descriptor: " 1908 "len 0x%llx not multiple of 0x%zx\n", 1909 (unsigned long long)len, 1910 sizeof desc); 1911 return -EINVAL; 1912 } 1913 1914 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect, 1915 UIO_MAXIOV, VHOST_ACCESS_RO); 1916 if (unlikely(ret < 0)) { 1917 if (ret != -EAGAIN) 1918 vq_err(vq, "Translation failure %d in indirect.\n", ret); 1919 return ret; 1920 } 1921 iov_iter_init(&from, READ, vq->indirect, ret, len); 1922 1923 /* We will use the result as an address to read from, so most 1924 * architectures only need a compiler barrier here. */ 1925 read_barrier_depends(); 1926 1927 count = len / sizeof desc; 1928 /* Buffers are chained via a 16 bit next field, so 1929 * we can have at most 2^16 of these. */ 1930 if (unlikely(count > USHRT_MAX + 1)) { 1931 vq_err(vq, "Indirect buffer length too big: %d\n", 1932 indirect->len); 1933 return -E2BIG; 1934 } 1935 1936 do { 1937 unsigned iov_count = *in_num + *out_num; 1938 if (unlikely(++found > count)) { 1939 vq_err(vq, "Loop detected: last one at %u " 1940 "indirect size %u\n", 1941 i, count); 1942 return -EINVAL; 1943 } 1944 if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) { 1945 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", 1946 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); 1947 return -EINVAL; 1948 } 1949 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) { 1950 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", 1951 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); 1952 return -EINVAL; 1953 } 1954 1955 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) 1956 access = VHOST_ACCESS_WO; 1957 else 1958 access = VHOST_ACCESS_RO; 1959 1960 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), 1961 vhost32_to_cpu(vq, desc.len), iov + iov_count, 1962 iov_size - iov_count, access); 1963 if (unlikely(ret < 0)) { 1964 if (ret != -EAGAIN) 1965 vq_err(vq, "Translation failure %d indirect idx %d\n", 1966 ret, i); 1967 return ret; 1968 } 1969 /* If this is an input descriptor, increment that count. */ 1970 if (access == VHOST_ACCESS_WO) { 1971 *in_num += ret; 1972 if (unlikely(log)) { 1973 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); 1974 log[*log_num].len = vhost32_to_cpu(vq, desc.len); 1975 ++*log_num; 1976 } 1977 } else { 1978 /* If it's an output descriptor, they're all supposed 1979 * to come before any input descriptors. */ 1980 if (unlikely(*in_num)) { 1981 vq_err(vq, "Indirect descriptor " 1982 "has out after in: idx %d\n", i); 1983 return -EINVAL; 1984 } 1985 *out_num += ret; 1986 } 1987 } while ((i = next_desc(vq, &desc)) != -1); 1988 return 0; 1989 } 1990 1991 /* This looks in the virtqueue and for the first available buffer, and converts 1992 * it to an iovec for convenient access. Since descriptors consist of some 1993 * number of output then some number of input descriptors, it's actually two 1994 * iovecs, but we pack them into one and note how many of each there were. 1995 * 1996 * This function returns the descriptor number found, or vq->num (which is 1997 * never a valid descriptor number) if none was found. A negative code is 1998 * returned on error. */ 1999 int vhost_get_vq_desc(struct vhost_virtqueue *vq, 2000 struct iovec iov[], unsigned int iov_size, 2001 unsigned int *out_num, unsigned int *in_num, 2002 struct vhost_log *log, unsigned int *log_num) 2003 { 2004 struct vring_desc desc; 2005 unsigned int i, head, found = 0; 2006 u16 last_avail_idx; 2007 __virtio16 avail_idx; 2008 __virtio16 ring_head; 2009 int ret, access; 2010 2011 /* Check it isn't doing very strange things with descriptor numbers. */ 2012 last_avail_idx = vq->last_avail_idx; 2013 2014 if (vq->avail_idx == vq->last_avail_idx) { 2015 if (unlikely(vhost_get_avail(vq, avail_idx, &vq->avail->idx))) { 2016 vq_err(vq, "Failed to access avail idx at %p\n", 2017 &vq->avail->idx); 2018 return -EFAULT; 2019 } 2020 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); 2021 2022 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) { 2023 vq_err(vq, "Guest moved used index from %u to %u", 2024 last_avail_idx, vq->avail_idx); 2025 return -EFAULT; 2026 } 2027 2028 /* If there's nothing new since last we looked, return 2029 * invalid. 2030 */ 2031 if (vq->avail_idx == last_avail_idx) 2032 return vq->num; 2033 2034 /* Only get avail ring entries after they have been 2035 * exposed by guest. 2036 */ 2037 smp_rmb(); 2038 } 2039 2040 /* Grab the next descriptor number they're advertising, and increment 2041 * the index we've seen. */ 2042 if (unlikely(vhost_get_avail(vq, ring_head, 2043 &vq->avail->ring[last_avail_idx & (vq->num - 1)]))) { 2044 vq_err(vq, "Failed to read head: idx %d address %p\n", 2045 last_avail_idx, 2046 &vq->avail->ring[last_avail_idx % vq->num]); 2047 return -EFAULT; 2048 } 2049 2050 head = vhost16_to_cpu(vq, ring_head); 2051 2052 /* If their number is silly, that's an error. */ 2053 if (unlikely(head >= vq->num)) { 2054 vq_err(vq, "Guest says index %u > %u is available", 2055 head, vq->num); 2056 return -EINVAL; 2057 } 2058 2059 /* When we start there are none of either input nor output. */ 2060 *out_num = *in_num = 0; 2061 if (unlikely(log)) 2062 *log_num = 0; 2063 2064 i = head; 2065 do { 2066 unsigned iov_count = *in_num + *out_num; 2067 if (unlikely(i >= vq->num)) { 2068 vq_err(vq, "Desc index is %u > %u, head = %u", 2069 i, vq->num, head); 2070 return -EINVAL; 2071 } 2072 if (unlikely(++found > vq->num)) { 2073 vq_err(vq, "Loop detected: last one at %u " 2074 "vq size %u head %u\n", 2075 i, vq->num, head); 2076 return -EINVAL; 2077 } 2078 ret = vhost_copy_from_user(vq, &desc, vq->desc + i, 2079 sizeof desc); 2080 if (unlikely(ret)) { 2081 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", 2082 i, vq->desc + i); 2083 return -EFAULT; 2084 } 2085 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) { 2086 ret = get_indirect(vq, iov, iov_size, 2087 out_num, in_num, 2088 log, log_num, &desc); 2089 if (unlikely(ret < 0)) { 2090 if (ret != -EAGAIN) 2091 vq_err(vq, "Failure detected " 2092 "in indirect descriptor at idx %d\n", i); 2093 return ret; 2094 } 2095 continue; 2096 } 2097 2098 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) 2099 access = VHOST_ACCESS_WO; 2100 else 2101 access = VHOST_ACCESS_RO; 2102 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), 2103 vhost32_to_cpu(vq, desc.len), iov + iov_count, 2104 iov_size - iov_count, access); 2105 if (unlikely(ret < 0)) { 2106 if (ret != -EAGAIN) 2107 vq_err(vq, "Translation failure %d descriptor idx %d\n", 2108 ret, i); 2109 return ret; 2110 } 2111 if (access == VHOST_ACCESS_WO) { 2112 /* If this is an input descriptor, 2113 * increment that count. */ 2114 *in_num += ret; 2115 if (unlikely(log)) { 2116 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); 2117 log[*log_num].len = vhost32_to_cpu(vq, desc.len); 2118 ++*log_num; 2119 } 2120 } else { 2121 /* If it's an output descriptor, they're all supposed 2122 * to come before any input descriptors. */ 2123 if (unlikely(*in_num)) { 2124 vq_err(vq, "Descriptor has out after in: " 2125 "idx %d\n", i); 2126 return -EINVAL; 2127 } 2128 *out_num += ret; 2129 } 2130 } while ((i = next_desc(vq, &desc)) != -1); 2131 2132 /* On success, increment avail index. */ 2133 vq->last_avail_idx++; 2134 2135 /* Assume notifications from guest are disabled at this point, 2136 * if they aren't we would need to update avail_event index. */ 2137 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); 2138 return head; 2139 } 2140 EXPORT_SYMBOL_GPL(vhost_get_vq_desc); 2141 2142 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */ 2143 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) 2144 { 2145 vq->last_avail_idx -= n; 2146 } 2147 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc); 2148 2149 /* After we've used one of their buffers, we tell them about it. We'll then 2150 * want to notify the guest, using eventfd. */ 2151 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) 2152 { 2153 struct vring_used_elem heads = { 2154 cpu_to_vhost32(vq, head), 2155 cpu_to_vhost32(vq, len) 2156 }; 2157 2158 return vhost_add_used_n(vq, &heads, 1); 2159 } 2160 EXPORT_SYMBOL_GPL(vhost_add_used); 2161 2162 static int __vhost_add_used_n(struct vhost_virtqueue *vq, 2163 struct vring_used_elem *heads, 2164 unsigned count) 2165 { 2166 struct vring_used_elem __user *used; 2167 u16 old, new; 2168 int start; 2169 2170 start = vq->last_used_idx & (vq->num - 1); 2171 used = vq->used->ring + start; 2172 if (count == 1) { 2173 if (vhost_put_user(vq, heads[0].id, &used->id)) { 2174 vq_err(vq, "Failed to write used id"); 2175 return -EFAULT; 2176 } 2177 if (vhost_put_user(vq, heads[0].len, &used->len)) { 2178 vq_err(vq, "Failed to write used len"); 2179 return -EFAULT; 2180 } 2181 } else if (vhost_copy_to_user(vq, used, heads, count * sizeof *used)) { 2182 vq_err(vq, "Failed to write used"); 2183 return -EFAULT; 2184 } 2185 if (unlikely(vq->log_used)) { 2186 /* Make sure data is seen before log. */ 2187 smp_wmb(); 2188 /* Log used ring entry write. */ 2189 log_write(vq->log_base, 2190 vq->log_addr + 2191 ((void __user *)used - (void __user *)vq->used), 2192 count * sizeof *used); 2193 } 2194 old = vq->last_used_idx; 2195 new = (vq->last_used_idx += count); 2196 /* If the driver never bothers to signal in a very long while, 2197 * used index might wrap around. If that happens, invalidate 2198 * signalled_used index we stored. TODO: make sure driver 2199 * signals at least once in 2^16 and remove this. */ 2200 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) 2201 vq->signalled_used_valid = false; 2202 return 0; 2203 } 2204 2205 /* After we've used one of their buffers, we tell them about it. We'll then 2206 * want to notify the guest, using eventfd. */ 2207 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, 2208 unsigned count) 2209 { 2210 int start, n, r; 2211 2212 start = vq->last_used_idx & (vq->num - 1); 2213 n = vq->num - start; 2214 if (n < count) { 2215 r = __vhost_add_used_n(vq, heads, n); 2216 if (r < 0) 2217 return r; 2218 heads += n; 2219 count -= n; 2220 } 2221 r = __vhost_add_used_n(vq, heads, count); 2222 2223 /* Make sure buffer is written before we update index. */ 2224 smp_wmb(); 2225 if (vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), 2226 &vq->used->idx)) { 2227 vq_err(vq, "Failed to increment used idx"); 2228 return -EFAULT; 2229 } 2230 if (unlikely(vq->log_used)) { 2231 /* Log used index update. */ 2232 log_write(vq->log_base, 2233 vq->log_addr + offsetof(struct vring_used, idx), 2234 sizeof vq->used->idx); 2235 if (vq->log_ctx) 2236 eventfd_signal(vq->log_ctx, 1); 2237 } 2238 return r; 2239 } 2240 EXPORT_SYMBOL_GPL(vhost_add_used_n); 2241 2242 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 2243 { 2244 __u16 old, new; 2245 __virtio16 event; 2246 bool v; 2247 /* Flush out used index updates. This is paired 2248 * with the barrier that the Guest executes when enabling 2249 * interrupts. */ 2250 smp_mb(); 2251 2252 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && 2253 unlikely(vq->avail_idx == vq->last_avail_idx)) 2254 return true; 2255 2256 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 2257 __virtio16 flags; 2258 if (vhost_get_avail(vq, flags, &vq->avail->flags)) { 2259 vq_err(vq, "Failed to get flags"); 2260 return true; 2261 } 2262 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT)); 2263 } 2264 old = vq->signalled_used; 2265 v = vq->signalled_used_valid; 2266 new = vq->signalled_used = vq->last_used_idx; 2267 vq->signalled_used_valid = true; 2268 2269 if (unlikely(!v)) 2270 return true; 2271 2272 if (vhost_get_avail(vq, event, vhost_used_event(vq))) { 2273 vq_err(vq, "Failed to get used event idx"); 2274 return true; 2275 } 2276 return vring_need_event(vhost16_to_cpu(vq, event), new, old); 2277 } 2278 2279 /* This actually signals the guest, using eventfd. */ 2280 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) 2281 { 2282 /* Signal the Guest tell them we used something up. */ 2283 if (vq->call_ctx && vhost_notify(dev, vq)) 2284 eventfd_signal(vq->call_ctx, 1); 2285 } 2286 EXPORT_SYMBOL_GPL(vhost_signal); 2287 2288 /* And here's the combo meal deal. Supersize me! */ 2289 void vhost_add_used_and_signal(struct vhost_dev *dev, 2290 struct vhost_virtqueue *vq, 2291 unsigned int head, int len) 2292 { 2293 vhost_add_used(vq, head, len); 2294 vhost_signal(dev, vq); 2295 } 2296 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal); 2297 2298 /* multi-buffer version of vhost_add_used_and_signal */ 2299 void vhost_add_used_and_signal_n(struct vhost_dev *dev, 2300 struct vhost_virtqueue *vq, 2301 struct vring_used_elem *heads, unsigned count) 2302 { 2303 vhost_add_used_n(vq, heads, count); 2304 vhost_signal(dev, vq); 2305 } 2306 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n); 2307 2308 /* return true if we're sure that avaiable ring is empty */ 2309 bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) 2310 { 2311 __virtio16 avail_idx; 2312 int r; 2313 2314 if (vq->avail_idx != vq->last_avail_idx) 2315 return false; 2316 2317 r = vhost_get_avail(vq, avail_idx, &vq->avail->idx); 2318 if (unlikely(r)) 2319 return false; 2320 vq->avail_idx = vhost16_to_cpu(vq, avail_idx); 2321 2322 return vq->avail_idx == vq->last_avail_idx; 2323 } 2324 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty); 2325 2326 /* OK, now we need to know about added descriptors. */ 2327 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 2328 { 2329 __virtio16 avail_idx; 2330 int r; 2331 2332 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) 2333 return false; 2334 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; 2335 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 2336 r = vhost_update_used_flags(vq); 2337 if (r) { 2338 vq_err(vq, "Failed to enable notification at %p: %d\n", 2339 &vq->used->flags, r); 2340 return false; 2341 } 2342 } else { 2343 r = vhost_update_avail_event(vq, vq->avail_idx); 2344 if (r) { 2345 vq_err(vq, "Failed to update avail event index at %p: %d\n", 2346 vhost_avail_event(vq), r); 2347 return false; 2348 } 2349 } 2350 /* They could have slipped one in as we were doing that: make 2351 * sure it's written, then check again. */ 2352 smp_mb(); 2353 r = vhost_get_avail(vq, avail_idx, &vq->avail->idx); 2354 if (r) { 2355 vq_err(vq, "Failed to check avail idx at %p: %d\n", 2356 &vq->avail->idx, r); 2357 return false; 2358 } 2359 2360 return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx; 2361 } 2362 EXPORT_SYMBOL_GPL(vhost_enable_notify); 2363 2364 /* We don't need to be notified again. */ 2365 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 2366 { 2367 int r; 2368 2369 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) 2370 return; 2371 vq->used_flags |= VRING_USED_F_NO_NOTIFY; 2372 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { 2373 r = vhost_update_used_flags(vq); 2374 if (r) 2375 vq_err(vq, "Failed to enable notification at %p: %d\n", 2376 &vq->used->flags, r); 2377 } 2378 } 2379 EXPORT_SYMBOL_GPL(vhost_disable_notify); 2380 2381 /* Create a new message. */ 2382 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type) 2383 { 2384 struct vhost_msg_node *node = kmalloc(sizeof *node, GFP_KERNEL); 2385 if (!node) 2386 return NULL; 2387 node->vq = vq; 2388 node->msg.type = type; 2389 return node; 2390 } 2391 EXPORT_SYMBOL_GPL(vhost_new_msg); 2392 2393 void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head, 2394 struct vhost_msg_node *node) 2395 { 2396 spin_lock(&dev->iotlb_lock); 2397 list_add_tail(&node->node, head); 2398 spin_unlock(&dev->iotlb_lock); 2399 2400 wake_up_interruptible_poll(&dev->wait, POLLIN | POLLRDNORM); 2401 } 2402 EXPORT_SYMBOL_GPL(vhost_enqueue_msg); 2403 2404 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev, 2405 struct list_head *head) 2406 { 2407 struct vhost_msg_node *node = NULL; 2408 2409 spin_lock(&dev->iotlb_lock); 2410 if (!list_empty(head)) { 2411 node = list_first_entry(head, struct vhost_msg_node, 2412 node); 2413 list_del(&node->node); 2414 } 2415 spin_unlock(&dev->iotlb_lock); 2416 2417 return node; 2418 } 2419 EXPORT_SYMBOL_GPL(vhost_dequeue_msg); 2420 2421 2422 static int __init vhost_init(void) 2423 { 2424 return 0; 2425 } 2426 2427 static void __exit vhost_exit(void) 2428 { 2429 } 2430 2431 module_init(vhost_init); 2432 module_exit(vhost_exit); 2433 2434 MODULE_VERSION("0.0.1"); 2435 MODULE_LICENSE("GPL v2"); 2436 MODULE_AUTHOR("Michael S. Tsirkin"); 2437 MODULE_DESCRIPTION("Host kernel accelerator for virtio"); 2438