vhost.c (6d5e6aa860a33fdfcd07de658c8108027c06c329) | vhost.c (6ac1afbf6132df0fcb0898f3509371305af7de16) |
---|---|
1/* Copyright (C) 2009 Red Hat, Inc. 2 * Copyright (C) 2006 Rusty Russell IBM Corporation 3 * 4 * Author: Michael S. Tsirkin <mst@redhat.com> 5 * 6 * Inspiration, some code, and most witty comments come from 7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell 8 * --- 11 unchanged lines hidden (view full) --- 20#include <linux/mutex.h> 21#include <linux/rcupdate.h> 22#include <linux/poll.h> 23#include <linux/file.h> 24#include <linux/highmem.h> 25#include <linux/slab.h> 26#include <linux/kthread.h> 27#include <linux/cgroup.h> | 1/* Copyright (C) 2009 Red Hat, Inc. 2 * Copyright (C) 2006 Rusty Russell IBM Corporation 3 * 4 * Author: Michael S. Tsirkin <mst@redhat.com> 5 * 6 * Inspiration, some code, and most witty comments come from 7 * Documentation/virtual/lguest/lguest.c, by Rusty Russell 8 * --- 11 unchanged lines hidden (view full) --- 20#include <linux/mutex.h> 21#include <linux/rcupdate.h> 22#include <linux/poll.h> 23#include <linux/file.h> 24#include <linux/highmem.h> 25#include <linux/slab.h> 26#include <linux/kthread.h> 27#include <linux/cgroup.h> |
28#include <linux/module.h> |
|
28 29#include "vhost.h" 30 31enum { 32 VHOST_MEMORY_MAX_NREGIONS = 64, 33 VHOST_MEMORY_F_LOG = 0x1, 34}; 35 --- 25 unchanged lines hidden (view full) --- 61void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) 62{ 63 INIT_LIST_HEAD(&work->node); 64 work->fn = fn; 65 init_waitqueue_head(&work->done); 66 work->flushing = 0; 67 work->queue_seq = work->done_seq = 0; 68} | 29 30#include "vhost.h" 31 32enum { 33 VHOST_MEMORY_MAX_NREGIONS = 64, 34 VHOST_MEMORY_F_LOG = 0x1, 35}; 36 --- 25 unchanged lines hidden (view full) --- 62void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) 63{ 64 INIT_LIST_HEAD(&work->node); 65 work->fn = fn; 66 init_waitqueue_head(&work->done); 67 work->flushing = 0; 68 work->queue_seq = work->done_seq = 0; 69} |
70EXPORT_SYMBOL_GPL(vhost_work_init); |
|
69 70/* Init poll structure */ 71void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, 72 unsigned long mask, struct vhost_dev *dev) 73{ 74 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); 75 init_poll_funcptr(&poll->table, vhost_poll_func); 76 poll->mask = mask; 77 poll->dev = dev; 78 poll->wqh = NULL; 79 80 vhost_work_init(&poll->work, fn); 81} | 71 72/* Init poll structure */ 73void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, 74 unsigned long mask, struct vhost_dev *dev) 75{ 76 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); 77 init_poll_funcptr(&poll->table, vhost_poll_func); 78 poll->mask = mask; 79 poll->dev = dev; 80 poll->wqh = NULL; 81 82 vhost_work_init(&poll->work, fn); 83} |
84EXPORT_SYMBOL_GPL(vhost_poll_init); |
|
82 83/* Start polling a file. We add ourselves to file's wait queue. The caller must 84 * keep a reference to a file until after vhost_poll_stop is called. */ 85int vhost_poll_start(struct vhost_poll *poll, struct file *file) 86{ 87 unsigned long mask; 88 int ret = 0; 89 --- 6 unchanged lines hidden (view full) --- 96 if (mask & POLLERR) { 97 if (poll->wqh) 98 remove_wait_queue(poll->wqh, &poll->wait); 99 ret = -EINVAL; 100 } 101 102 return ret; 103} | 85 86/* Start polling a file. We add ourselves to file's wait queue. The caller must 87 * keep a reference to a file until after vhost_poll_stop is called. */ 88int vhost_poll_start(struct vhost_poll *poll, struct file *file) 89{ 90 unsigned long mask; 91 int ret = 0; 92 --- 6 unchanged lines hidden (view full) --- 99 if (mask & POLLERR) { 100 if (poll->wqh) 101 remove_wait_queue(poll->wqh, &poll->wait); 102 ret = -EINVAL; 103 } 104 105 return ret; 106} |
107EXPORT_SYMBOL_GPL(vhost_poll_start); |
|
104 105/* Stop polling a file. After this function returns, it becomes safe to drop the 106 * file reference. You must also flush afterwards. */ 107void vhost_poll_stop(struct vhost_poll *poll) 108{ 109 if (poll->wqh) { 110 remove_wait_queue(poll->wqh, &poll->wait); 111 poll->wqh = NULL; 112 } 113} | 108 109/* Stop polling a file. After this function returns, it becomes safe to drop the 110 * file reference. You must also flush afterwards. */ 111void vhost_poll_stop(struct vhost_poll *poll) 112{ 113 if (poll->wqh) { 114 remove_wait_queue(poll->wqh, &poll->wait); 115 poll->wqh = NULL; 116 } 117} |
118EXPORT_SYMBOL_GPL(vhost_poll_stop); |
|
114 115static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, 116 unsigned seq) 117{ 118 int left; 119 120 spin_lock_irq(&dev->work_lock); 121 left = seq - work->done_seq; 122 spin_unlock_irq(&dev->work_lock); 123 return left <= 0; 124} 125 | 119 120static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, 121 unsigned seq) 122{ 123 int left; 124 125 spin_lock_irq(&dev->work_lock); 126 left = seq - work->done_seq; 127 spin_unlock_irq(&dev->work_lock); 128 return left <= 0; 129} 130 |
126static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) | 131void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work) |
127{ 128 unsigned seq; 129 int flushing; 130 131 spin_lock_irq(&dev->work_lock); 132 seq = work->queue_seq; 133 work->flushing++; 134 spin_unlock_irq(&dev->work_lock); 135 wait_event(work->done, vhost_work_seq_done(dev, work, seq)); 136 spin_lock_irq(&dev->work_lock); 137 flushing = --work->flushing; 138 spin_unlock_irq(&dev->work_lock); 139 BUG_ON(flushing < 0); 140} | 132{ 133 unsigned seq; 134 int flushing; 135 136 spin_lock_irq(&dev->work_lock); 137 seq = work->queue_seq; 138 work->flushing++; 139 spin_unlock_irq(&dev->work_lock); 140 wait_event(work->done, vhost_work_seq_done(dev, work, seq)); 141 spin_lock_irq(&dev->work_lock); 142 flushing = --work->flushing; 143 spin_unlock_irq(&dev->work_lock); 144 BUG_ON(flushing < 0); 145} |
146EXPORT_SYMBOL_GPL(vhost_work_flush); |
|
141 142/* Flush any work that has been scheduled. When calling this, don't hold any 143 * locks that are also used by the callback. */ 144void vhost_poll_flush(struct vhost_poll *poll) 145{ 146 vhost_work_flush(poll->dev, &poll->work); 147} | 147 148/* Flush any work that has been scheduled. When calling this, don't hold any 149 * locks that are also used by the callback. */ 150void vhost_poll_flush(struct vhost_poll *poll) 151{ 152 vhost_work_flush(poll->dev, &poll->work); 153} |
154EXPORT_SYMBOL_GPL(vhost_poll_flush); |
|
148 149void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) 150{ 151 unsigned long flags; 152 153 spin_lock_irqsave(&dev->work_lock, flags); 154 if (list_empty(&work->node)) { 155 list_add_tail(&work->node, &dev->work_list); 156 work->queue_seq++; 157 wake_up_process(dev->worker); 158 } 159 spin_unlock_irqrestore(&dev->work_lock, flags); 160} | 155 156void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work) 157{ 158 unsigned long flags; 159 160 spin_lock_irqsave(&dev->work_lock, flags); 161 if (list_empty(&work->node)) { 162 list_add_tail(&work->node, &dev->work_list); 163 work->queue_seq++; 164 wake_up_process(dev->worker); 165 } 166 spin_unlock_irqrestore(&dev->work_lock, flags); 167} |
168EXPORT_SYMBOL_GPL(vhost_work_queue); |
|
161 162void vhost_poll_queue(struct vhost_poll *poll) 163{ 164 vhost_work_queue(poll->dev, &poll->work); 165} | 169 170void vhost_poll_queue(struct vhost_poll *poll) 171{ 172 vhost_work_queue(poll->dev, &poll->work); 173} |
174EXPORT_SYMBOL_GPL(vhost_poll_queue); |
|
166 167static void vhost_vq_reset(struct vhost_dev *dev, 168 struct vhost_virtqueue *vq) 169{ 170 vq->num = 1; 171 vq->desc = NULL; 172 vq->avail = NULL; 173 vq->used = NULL; --- 132 unchanged lines hidden (view full) --- 306 vhost_vq_reset(dev, vq); 307 if (vq->handle_kick) 308 vhost_poll_init(&vq->poll, vq->handle_kick, 309 POLLIN, dev); 310 } 311 312 return 0; 313} | 175 176static void vhost_vq_reset(struct vhost_dev *dev, 177 struct vhost_virtqueue *vq) 178{ 179 vq->num = 1; 180 vq->desc = NULL; 181 vq->avail = NULL; 182 vq->used = NULL; --- 132 unchanged lines hidden (view full) --- 315 vhost_vq_reset(dev, vq); 316 if (vq->handle_kick) 317 vhost_poll_init(&vq->poll, vq->handle_kick, 318 POLLIN, dev); 319 } 320 321 return 0; 322} |
323EXPORT_SYMBOL_GPL(vhost_dev_init); |
|
314 315/* Caller should have device mutex */ 316long vhost_dev_check_owner(struct vhost_dev *dev) 317{ 318 /* Are you the owner? If not, I don't think you mean to do that */ 319 return dev->mm == current->mm ? 0 : -EPERM; 320} | 324 325/* Caller should have device mutex */ 326long vhost_dev_check_owner(struct vhost_dev *dev) 327{ 328 /* Are you the owner? If not, I don't think you mean to do that */ 329 return dev->mm == current->mm ? 0 : -EPERM; 330} |
331EXPORT_SYMBOL_GPL(vhost_dev_check_owner); |
|
321 322struct vhost_attach_cgroups_struct { 323 struct vhost_work work; 324 struct task_struct *owner; 325 int ret; 326}; 327 328static void vhost_attach_cgroups_work(struct vhost_work *work) --- 15 unchanged lines hidden (view full) --- 344 return attach.ret; 345} 346 347/* Caller should have device mutex */ 348bool vhost_dev_has_owner(struct vhost_dev *dev) 349{ 350 return dev->mm; 351} | 332 333struct vhost_attach_cgroups_struct { 334 struct vhost_work work; 335 struct task_struct *owner; 336 int ret; 337}; 338 339static void vhost_attach_cgroups_work(struct vhost_work *work) --- 15 unchanged lines hidden (view full) --- 355 return attach.ret; 356} 357 358/* Caller should have device mutex */ 359bool vhost_dev_has_owner(struct vhost_dev *dev) 360{ 361 return dev->mm; 362} |
363EXPORT_SYMBOL_GPL(vhost_dev_has_owner); |
|
352 353/* Caller should have device mutex */ 354long vhost_dev_set_owner(struct vhost_dev *dev) 355{ 356 struct task_struct *worker; 357 int err; 358 359 /* Is there an owner already? */ --- 27 unchanged lines hidden (view full) --- 387 dev->worker = NULL; 388err_worker: 389 if (dev->mm) 390 mmput(dev->mm); 391 dev->mm = NULL; 392err_mm: 393 return err; 394} | 364 365/* Caller should have device mutex */ 366long vhost_dev_set_owner(struct vhost_dev *dev) 367{ 368 struct task_struct *worker; 369 int err; 370 371 /* Is there an owner already? */ --- 27 unchanged lines hidden (view full) --- 399 dev->worker = NULL; 400err_worker: 401 if (dev->mm) 402 mmput(dev->mm); 403 dev->mm = NULL; 404err_mm: 405 return err; 406} |
407EXPORT_SYMBOL_GPL(vhost_dev_set_owner); |
|
395 396struct vhost_memory *vhost_dev_reset_owner_prepare(void) 397{ 398 return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL); 399} | 408 409struct vhost_memory *vhost_dev_reset_owner_prepare(void) 410{ 411 return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL); 412} |
413EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare); |
|
400 401/* Caller should have device mutex */ 402void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory) 403{ 404 vhost_dev_cleanup(dev, true); 405 406 /* Restore memory to default empty mapping. */ 407 memory->nregions = 0; 408 RCU_INIT_POINTER(dev->memory, memory); 409} | 414 415/* Caller should have device mutex */ 416void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory) 417{ 418 vhost_dev_cleanup(dev, true); 419 420 /* Restore memory to default empty mapping. */ 421 memory->nregions = 0; 422 RCU_INIT_POINTER(dev->memory, memory); 423} |
424EXPORT_SYMBOL_GPL(vhost_dev_reset_owner); |
|
410 411void vhost_dev_stop(struct vhost_dev *dev) 412{ 413 int i; 414 415 for (i = 0; i < dev->nvqs; ++i) { 416 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) { 417 vhost_poll_stop(&dev->vqs[i]->poll); 418 vhost_poll_flush(&dev->vqs[i]->poll); 419 } 420 } 421} | 425 426void vhost_dev_stop(struct vhost_dev *dev) 427{ 428 int i; 429 430 for (i = 0; i < dev->nvqs; ++i) { 431 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) { 432 vhost_poll_stop(&dev->vqs[i]->poll); 433 vhost_poll_flush(&dev->vqs[i]->poll); 434 } 435 } 436} |
437EXPORT_SYMBOL_GPL(vhost_dev_stop); |
|
422 423/* Caller should have device mutex if and only if locked is set */ 424void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) 425{ 426 int i; 427 428 for (i = 0; i < dev->nvqs; ++i) { 429 if (dev->vqs[i]->error_ctx) --- 24 unchanged lines hidden (view full) --- 454 if (dev->worker) { 455 kthread_stop(dev->worker); 456 dev->worker = NULL; 457 } 458 if (dev->mm) 459 mmput(dev->mm); 460 dev->mm = NULL; 461} | 438 439/* Caller should have device mutex if and only if locked is set */ 440void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) 441{ 442 int i; 443 444 for (i = 0; i < dev->nvqs; ++i) { 445 if (dev->vqs[i]->error_ctx) --- 24 unchanged lines hidden (view full) --- 470 if (dev->worker) { 471 kthread_stop(dev->worker); 472 dev->worker = NULL; 473 } 474 if (dev->mm) 475 mmput(dev->mm); 476 dev->mm = NULL; 477} |
478EXPORT_SYMBOL_GPL(vhost_dev_cleanup); |
|
462 463static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) 464{ 465 u64 a = addr / VHOST_PAGE_SIZE / 8; 466 467 /* Make sure 64 bit math will not overflow. */ 468 if (a > ULONG_MAX - (unsigned long)log_base || 469 a + (unsigned long)log_base > ULONG_MAX) --- 69 unchanged lines hidden (view full) --- 539int vhost_log_access_ok(struct vhost_dev *dev) 540{ 541 struct vhost_memory *mp; 542 543 mp = rcu_dereference_protected(dev->memory, 544 lockdep_is_held(&dev->mutex)); 545 return memory_access_ok(dev, mp, 1); 546} | 479 480static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) 481{ 482 u64 a = addr / VHOST_PAGE_SIZE / 8; 483 484 /* Make sure 64 bit math will not overflow. */ 485 if (a > ULONG_MAX - (unsigned long)log_base || 486 a + (unsigned long)log_base > ULONG_MAX) --- 69 unchanged lines hidden (view full) --- 556int vhost_log_access_ok(struct vhost_dev *dev) 557{ 558 struct vhost_memory *mp; 559 560 mp = rcu_dereference_protected(dev->memory, 561 lockdep_is_held(&dev->mutex)); 562 return memory_access_ok(dev, mp, 1); 563} |
564EXPORT_SYMBOL_GPL(vhost_log_access_ok); |
|
547 548/* Verify access for write logging. */ 549/* Caller should have vq mutex and device mutex */ 550static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq, 551 void __user *log_base) 552{ 553 struct vhost_memory *mp; 554 size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; --- 9 unchanged lines hidden (view full) --- 564 565/* Can we start vq? */ 566/* Caller should have vq mutex and device mutex */ 567int vhost_vq_access_ok(struct vhost_virtqueue *vq) 568{ 569 return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) && 570 vq_log_access_ok(vq->dev, vq, vq->log_base); 571} | 565 566/* Verify access for write logging. */ 567/* Caller should have vq mutex and device mutex */ 568static int vq_log_access_ok(struct vhost_dev *d, struct vhost_virtqueue *vq, 569 void __user *log_base) 570{ 571 struct vhost_memory *mp; 572 size_t s = vhost_has_feature(d, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; --- 9 unchanged lines hidden (view full) --- 582 583/* Can we start vq? */ 584/* Caller should have vq mutex and device mutex */ 585int vhost_vq_access_ok(struct vhost_virtqueue *vq) 586{ 587 return vq_access_ok(vq->dev, vq->num, vq->desc, vq->avail, vq->used) && 588 vq_log_access_ok(vq->dev, vq, vq->log_base); 589} |
590EXPORT_SYMBOL_GPL(vhost_vq_access_ok); |
|
572 573static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) 574{ 575 struct vhost_memory mem, *newmem, *oldmem; 576 unsigned long size = offsetof(struct vhost_memory, regions); 577 578 if (copy_from_user(&mem, m, size)) 579 return -EFAULT; --- 213 unchanged lines hidden (view full) --- 793 r = vhost_poll_start(&vq->poll, vq->kick); 794 795 mutex_unlock(&vq->mutex); 796 797 if (pollstop && vq->handle_kick) 798 vhost_poll_flush(&vq->poll); 799 return r; 800} | 591 592static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) 593{ 594 struct vhost_memory mem, *newmem, *oldmem; 595 unsigned long size = offsetof(struct vhost_memory, regions); 596 597 if (copy_from_user(&mem, m, size)) 598 return -EFAULT; --- 213 unchanged lines hidden (view full) --- 812 r = vhost_poll_start(&vq->poll, vq->kick); 813 814 mutex_unlock(&vq->mutex); 815 816 if (pollstop && vq->handle_kick) 817 vhost_poll_flush(&vq->poll); 818 return r; 819} |
820EXPORT_SYMBOL_GPL(vhost_vring_ioctl); |
|
801 802/* Caller must have device mutex */ 803long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) 804{ 805 struct file *eventfp, *filep = NULL; 806 struct eventfd_ctx *ctx = NULL; 807 u64 p; 808 long r; --- 64 unchanged lines hidden (view full) --- 873 break; 874 default: 875 r = -ENOIOCTLCMD; 876 break; 877 } 878done: 879 return r; 880} | 821 822/* Caller must have device mutex */ 823long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) 824{ 825 struct file *eventfp, *filep = NULL; 826 struct eventfd_ctx *ctx = NULL; 827 u64 p; 828 long r; --- 64 unchanged lines hidden (view full) --- 893 break; 894 default: 895 r = -ENOIOCTLCMD; 896 break; 897 } 898done: 899 return r; 900} |
901EXPORT_SYMBOL_GPL(vhost_dev_ioctl); |
|
881 882static const struct vhost_memory_region *find_region(struct vhost_memory *mem, 883 __u64 addr, __u32 len) 884{ 885 struct vhost_memory_region *reg; 886 int i; 887 888 /* linear search is not brilliant, but we really have on the order of 6 --- 75 unchanged lines hidden (view full) --- 964 eventfd_signal(vq->log_ctx, 1); 965 return 0; 966 } 967 } 968 /* Length written exceeds what we have stored. This is a bug. */ 969 BUG(); 970 return 0; 971} | 902 903static const struct vhost_memory_region *find_region(struct vhost_memory *mem, 904 __u64 addr, __u32 len) 905{ 906 struct vhost_memory_region *reg; 907 int i; 908 909 /* linear search is not brilliant, but we really have on the order of 6 --- 75 unchanged lines hidden (view full) --- 985 eventfd_signal(vq->log_ctx, 1); 986 return 0; 987 } 988 } 989 /* Length written exceeds what we have stored. This is a bug. */ 990 BUG(); 991 return 0; 992} |
993EXPORT_SYMBOL_GPL(vhost_log_write); |
|
972 973static int vhost_update_used_flags(struct vhost_virtqueue *vq) 974{ 975 void __user *used; 976 if (__put_user(vq->used_flags, &vq->used->flags) < 0) 977 return -EFAULT; 978 if (unlikely(vq->log_used)) { 979 /* Make sure the flag is seen before log. */ --- 35 unchanged lines hidden (view full) --- 1015 return 0; 1016 1017 r = vhost_update_used_flags(vq); 1018 if (r) 1019 return r; 1020 vq->signalled_used_valid = false; 1021 return get_user(vq->last_used_idx, &vq->used->idx); 1022} | 994 995static int vhost_update_used_flags(struct vhost_virtqueue *vq) 996{ 997 void __user *used; 998 if (__put_user(vq->used_flags, &vq->used->flags) < 0) 999 return -EFAULT; 1000 if (unlikely(vq->log_used)) { 1001 /* Make sure the flag is seen before log. */ --- 35 unchanged lines hidden (view full) --- 1037 return 0; 1038 1039 r = vhost_update_used_flags(vq); 1040 if (r) 1041 return r; 1042 vq->signalled_used_valid = false; 1043 return get_user(vq->last_used_idx, &vq->used->idx); 1044} |
1045EXPORT_SYMBOL_GPL(vhost_init_used); |
|
1023 1024static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len, 1025 struct iovec iov[], int iov_size) 1026{ 1027 const struct vhost_memory_region *reg; 1028 struct vhost_memory *mem; 1029 struct iovec *_iov; 1030 u64 s = 0; --- 260 unchanged lines hidden (view full) --- 1291 /* On success, increment avail index. */ 1292 vq->last_avail_idx++; 1293 1294 /* Assume notifications from guest are disabled at this point, 1295 * if they aren't we would need to update avail_event index. */ 1296 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); 1297 return head; 1298} | 1046 1047static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len, 1048 struct iovec iov[], int iov_size) 1049{ 1050 const struct vhost_memory_region *reg; 1051 struct vhost_memory *mem; 1052 struct iovec *_iov; 1053 u64 s = 0; --- 260 unchanged lines hidden (view full) --- 1314 /* On success, increment avail index. */ 1315 vq->last_avail_idx++; 1316 1317 /* Assume notifications from guest are disabled at this point, 1318 * if they aren't we would need to update avail_event index. */ 1319 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); 1320 return head; 1321} |
1322EXPORT_SYMBOL_GPL(vhost_get_vq_desc); |
|
1299 1300/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */ 1301void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) 1302{ 1303 vq->last_avail_idx -= n; 1304} | 1323 1324/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */ 1325void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) 1326{ 1327 vq->last_avail_idx -= n; 1328} |
1329EXPORT_SYMBOL_GPL(vhost_discard_vq_desc); |
|
1305 1306/* After we've used one of their buffers, we tell them about it. We'll then 1307 * want to notify the guest, using eventfd. */ 1308int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) 1309{ 1310 struct vring_used_elem __user *used; 1311 1312 /* The virtqueue contains a ring of used buffers. Get a pointer to the --- 32 unchanged lines hidden (view full) --- 1345 /* If the driver never bothers to signal in a very long while, 1346 * used index might wrap around. If that happens, invalidate 1347 * signalled_used index we stored. TODO: make sure driver 1348 * signals at least once in 2^16 and remove this. */ 1349 if (unlikely(vq->last_used_idx == vq->signalled_used)) 1350 vq->signalled_used_valid = false; 1351 return 0; 1352} | 1330 1331/* After we've used one of their buffers, we tell them about it. We'll then 1332 * want to notify the guest, using eventfd. */ 1333int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) 1334{ 1335 struct vring_used_elem __user *used; 1336 1337 /* The virtqueue contains a ring of used buffers. Get a pointer to the --- 32 unchanged lines hidden (view full) --- 1370 /* If the driver never bothers to signal in a very long while, 1371 * used index might wrap around. If that happens, invalidate 1372 * signalled_used index we stored. TODO: make sure driver 1373 * signals at least once in 2^16 and remove this. */ 1374 if (unlikely(vq->last_used_idx == vq->signalled_used)) 1375 vq->signalled_used_valid = false; 1376 return 0; 1377} |
1378EXPORT_SYMBOL_GPL(vhost_add_used); |
|
1353 1354static int __vhost_add_used_n(struct vhost_virtqueue *vq, 1355 struct vring_used_elem *heads, 1356 unsigned count) 1357{ 1358 struct vring_used_elem __user *used; 1359 u16 old, new; 1360 int start; --- 53 unchanged lines hidden (view full) --- 1414 log_write(vq->log_base, 1415 vq->log_addr + offsetof(struct vring_used, idx), 1416 sizeof vq->used->idx); 1417 if (vq->log_ctx) 1418 eventfd_signal(vq->log_ctx, 1); 1419 } 1420 return r; 1421} | 1379 1380static int __vhost_add_used_n(struct vhost_virtqueue *vq, 1381 struct vring_used_elem *heads, 1382 unsigned count) 1383{ 1384 struct vring_used_elem __user *used; 1385 u16 old, new; 1386 int start; --- 53 unchanged lines hidden (view full) --- 1440 log_write(vq->log_base, 1441 vq->log_addr + offsetof(struct vring_used, idx), 1442 sizeof vq->used->idx); 1443 if (vq->log_ctx) 1444 eventfd_signal(vq->log_ctx, 1); 1445 } 1446 return r; 1447} |
1448EXPORT_SYMBOL_GPL(vhost_add_used_n); |
|
1422 1423static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1424{ 1425 __u16 old, new, event; 1426 bool v; 1427 /* Flush out used index updates. This is paired 1428 * with the barrier that the Guest executes when enabling 1429 * interrupts. */ --- 28 unchanged lines hidden (view full) --- 1458 1459/* This actually signals the guest, using eventfd. */ 1460void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1461{ 1462 /* Signal the Guest tell them we used something up. */ 1463 if (vq->call_ctx && vhost_notify(dev, vq)) 1464 eventfd_signal(vq->call_ctx, 1); 1465} | 1449 1450static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1451{ 1452 __u16 old, new, event; 1453 bool v; 1454 /* Flush out used index updates. This is paired 1455 * with the barrier that the Guest executes when enabling 1456 * interrupts. */ --- 28 unchanged lines hidden (view full) --- 1485 1486/* This actually signals the guest, using eventfd. */ 1487void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1488{ 1489 /* Signal the Guest tell them we used something up. */ 1490 if (vq->call_ctx && vhost_notify(dev, vq)) 1491 eventfd_signal(vq->call_ctx, 1); 1492} |
1493EXPORT_SYMBOL_GPL(vhost_signal); |
|
1466 1467/* And here's the combo meal deal. Supersize me! */ 1468void vhost_add_used_and_signal(struct vhost_dev *dev, 1469 struct vhost_virtqueue *vq, 1470 unsigned int head, int len) 1471{ 1472 vhost_add_used(vq, head, len); 1473 vhost_signal(dev, vq); 1474} | 1494 1495/* And here's the combo meal deal. Supersize me! */ 1496void vhost_add_used_and_signal(struct vhost_dev *dev, 1497 struct vhost_virtqueue *vq, 1498 unsigned int head, int len) 1499{ 1500 vhost_add_used(vq, head, len); 1501 vhost_signal(dev, vq); 1502} |
1503EXPORT_SYMBOL_GPL(vhost_add_used_and_signal); |
|
1475 1476/* multi-buffer version of vhost_add_used_and_signal */ 1477void vhost_add_used_and_signal_n(struct vhost_dev *dev, 1478 struct vhost_virtqueue *vq, 1479 struct vring_used_elem *heads, unsigned count) 1480{ 1481 vhost_add_used_n(vq, heads, count); 1482 vhost_signal(dev, vq); 1483} | 1504 1505/* multi-buffer version of vhost_add_used_and_signal */ 1506void vhost_add_used_and_signal_n(struct vhost_dev *dev, 1507 struct vhost_virtqueue *vq, 1508 struct vring_used_elem *heads, unsigned count) 1509{ 1510 vhost_add_used_n(vq, heads, count); 1511 vhost_signal(dev, vq); 1512} |
1513EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n); |
|
1484 1485/* OK, now we need to know about added descriptors. */ 1486bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1487{ 1488 u16 avail_idx; 1489 int r; 1490 1491 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) --- 21 unchanged lines hidden (view full) --- 1513 if (r) { 1514 vq_err(vq, "Failed to check avail idx at %p: %d\n", 1515 &vq->avail->idx, r); 1516 return false; 1517 } 1518 1519 return avail_idx != vq->avail_idx; 1520} | 1514 1515/* OK, now we need to know about added descriptors. */ 1516bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1517{ 1518 u16 avail_idx; 1519 int r; 1520 1521 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) --- 21 unchanged lines hidden (view full) --- 1543 if (r) { 1544 vq_err(vq, "Failed to check avail idx at %p: %d\n", 1545 &vq->avail->idx, r); 1546 return false; 1547 } 1548 1549 return avail_idx != vq->avail_idx; 1550} |
1551EXPORT_SYMBOL_GPL(vhost_enable_notify); |
|
1521 1522/* We don't need to be notified again. */ 1523void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1524{ 1525 int r; 1526 1527 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) 1528 return; 1529 vq->used_flags |= VRING_USED_F_NO_NOTIFY; 1530 if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 1531 r = vhost_update_used_flags(vq); 1532 if (r) 1533 vq_err(vq, "Failed to enable notification at %p: %d\n", 1534 &vq->used->flags, r); 1535 } 1536} | 1552 1553/* We don't need to be notified again. */ 1554void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) 1555{ 1556 int r; 1557 1558 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) 1559 return; 1560 vq->used_flags |= VRING_USED_F_NO_NOTIFY; 1561 if (!vhost_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) { 1562 r = vhost_update_used_flags(vq); 1563 if (r) 1564 vq_err(vq, "Failed to enable notification at %p: %d\n", 1565 &vq->used->flags, r); 1566 } 1567} |
1568EXPORT_SYMBOL_GPL(vhost_disable_notify); 1569 1570static int __init vhost_init(void) 1571{ 1572 return 0; 1573} 1574 1575static void __exit vhost_exit(void) 1576{ 1577} 1578 1579module_init(vhost_init); 1580module_exit(vhost_exit); 1581 1582MODULE_VERSION("0.0.1"); 1583MODULE_LICENSE("GPL v2"); 1584MODULE_AUTHOR("Michael S. Tsirkin"); 1585MODULE_DESCRIPTION("Host kernel accelerator for virtio"); |
|