Lines Matching +full:ctx +full:- +full:asid

1 // SPDX-License-Identifier: GPL-2.0-only
49 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
50 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
55 vq->user_be = !virtio_legacy_is_little_endian(); in vhost_disable_cross_endian()
60 vq->user_be = true; in vhost_enable_cross_endian_big()
65 vq->user_be = false; in vhost_enable_cross_endian_little()
72 if (vq->private_data) in vhost_set_vring_endian()
73 return -EBUSY; in vhost_set_vring_endian()
76 return -EFAULT; in vhost_set_vring_endian()
80 return -EINVAL; in vhost_set_vring_endian()
95 .num = vq->user_be in vhost_get_vring_endian()
99 return -EFAULT; in vhost_get_vring_endian()
111 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be; in vhost_init_is_le()
120 return -ENOIOCTLCMD; in vhost_set_vring_endian()
126 return -ENOIOCTLCMD; in vhost_get_vring_endian()
131 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) in vhost_init_is_le()
151 complete(&s->wait_event); in vhost_flush_work()
160 poll->wqh = wqh; in vhost_poll_func()
161 add_wait_queue(wqh, &poll->wait); in vhost_poll_func()
168 struct vhost_work *work = &poll->work; in vhost_poll_wakeup()
170 if (!(key_to_poll(key) & poll->mask)) in vhost_poll_wakeup()
173 if (!poll->dev->use_worker) in vhost_poll_wakeup()
174 work->fn(work); in vhost_poll_wakeup()
183 clear_bit(VHOST_WORK_QUEUED, &work->flags); in vhost_work_init()
184 work->fn = fn; in vhost_work_init()
193 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); in vhost_poll_init()
194 init_poll_funcptr(&poll->table, vhost_poll_func); in vhost_poll_init()
195 poll->mask = mask; in vhost_poll_init()
196 poll->dev = dev; in vhost_poll_init()
197 poll->wqh = NULL; in vhost_poll_init()
198 poll->vq = vq; in vhost_poll_init()
200 vhost_work_init(&poll->work, fn); in vhost_poll_init()
210 if (poll->wqh) in vhost_poll_start()
213 mask = vfs_poll(file, &poll->table); in vhost_poll_start()
215 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask)); in vhost_poll_start()
218 return -EINVAL; in vhost_poll_start()
229 if (poll->wqh) { in vhost_poll_stop()
230 remove_wait_queue(poll->wqh, &poll->wait); in vhost_poll_stop()
231 poll->wqh = NULL; in vhost_poll_stop()
239 if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) { in vhost_worker_queue()
244 llist_add(&work->node, &worker->work_list); in vhost_worker_queue()
245 vhost_task_wake(worker->vtsk); in vhost_worker_queue()
255 worker = rcu_dereference(vq->worker); in vhost_vq_work_queue()
267 * __vhost_worker_flush - flush a worker
276 if (!worker->attachment_cnt || worker->killed) in __vhost_worker_flush()
287 mutex_unlock(&worker->mutex); in __vhost_worker_flush()
289 mutex_lock(&worker->mutex); in __vhost_worker_flush()
294 mutex_lock(&worker->mutex); in vhost_worker_flush()
296 mutex_unlock(&worker->mutex); in vhost_worker_flush()
304 xa_for_each(&dev->worker_xa, i, worker) in vhost_dev_flush()
316 worker = rcu_dereference(vq->worker); in vhost_vq_has_work()
317 if (worker && !llist_empty(&worker->work_list)) in vhost_vq_has_work()
327 vhost_vq_work_queue(poll->vq, &poll->work); in vhost_poll_queue()
336 vq->meta_iotlb[j] = NULL; in __vhost_vq_meta_reset()
343 for (i = 0; i < d->nvqs; ++i) in vhost_vq_meta_reset()
344 __vhost_vq_meta_reset(d->vqs[i]); in vhost_vq_meta_reset()
349 call_ctx->ctx = NULL; in vhost_vring_call_reset()
350 memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer)); in vhost_vring_call_reset()
355 return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq); in vhost_vq_is_setup()
362 vq->num = 1; in vhost_vq_reset()
363 vq->desc = NULL; in vhost_vq_reset()
364 vq->avail = NULL; in vhost_vq_reset()
365 vq->used = NULL; in vhost_vq_reset()
366 vq->last_avail_idx = 0; in vhost_vq_reset()
367 vq->avail_idx = 0; in vhost_vq_reset()
368 vq->last_used_idx = 0; in vhost_vq_reset()
369 vq->signalled_used = 0; in vhost_vq_reset()
370 vq->signalled_used_valid = false; in vhost_vq_reset()
371 vq->used_flags = 0; in vhost_vq_reset()
372 vq->log_used = false; in vhost_vq_reset()
373 vq->log_addr = -1ull; in vhost_vq_reset()
374 vq->private_data = NULL; in vhost_vq_reset()
375 vq->acked_features = 0; in vhost_vq_reset()
376 vq->acked_backend_features = 0; in vhost_vq_reset()
377 vq->log_base = NULL; in vhost_vq_reset()
378 vq->error_ctx = NULL; in vhost_vq_reset()
379 vq->kick = NULL; in vhost_vq_reset()
380 vq->log_ctx = NULL; in vhost_vq_reset()
383 vq->busyloop_timeout = 0; in vhost_vq_reset()
384 vq->umem = NULL; in vhost_vq_reset()
385 vq->iotlb = NULL; in vhost_vq_reset()
386 rcu_assign_pointer(vq->worker, NULL); in vhost_vq_reset()
387 vhost_vring_call_reset(&vq->call_ctx); in vhost_vq_reset()
397 node = llist_del_all(&worker->work_list); in vhost_run_work_list()
405 clear_bit(VHOST_WORK_QUEUED, &work->flags); in vhost_run_work_list()
406 kcov_remote_start_common(worker->kcov_handle); in vhost_run_work_list()
407 work->fn(work); in vhost_run_work_list()
419 struct vhost_dev *dev = worker->dev; in vhost_worker_killed()
423 mutex_lock(&worker->mutex); in vhost_worker_killed()
424 worker->killed = true; in vhost_worker_killed()
426 for (i = 0; i < dev->nvqs; i++) { in vhost_worker_killed()
427 vq = dev->vqs[i]; in vhost_worker_killed()
429 mutex_lock(&vq->mutex); in vhost_worker_killed()
431 rcu_dereference_check(vq->worker, in vhost_worker_killed()
432 lockdep_is_held(&vq->mutex))) { in vhost_worker_killed()
433 rcu_assign_pointer(vq->worker, NULL); in vhost_worker_killed()
436 mutex_unlock(&vq->mutex); in vhost_worker_killed()
439 worker->attachment_cnt -= attach_cnt; in vhost_worker_killed()
447 mutex_unlock(&worker->mutex); in vhost_worker_killed()
452 kfree(vq->indirect); in vhost_vq_free_iovecs()
453 vq->indirect = NULL; in vhost_vq_free_iovecs()
454 kfree(vq->log); in vhost_vq_free_iovecs()
455 vq->log = NULL; in vhost_vq_free_iovecs()
456 kfree(vq->heads); in vhost_vq_free_iovecs()
457 vq->heads = NULL; in vhost_vq_free_iovecs()
466 for (i = 0; i < dev->nvqs; ++i) { in vhost_dev_alloc_iovecs()
467 vq = dev->vqs[i]; in vhost_dev_alloc_iovecs()
468 vq->indirect = kmalloc_array(UIO_MAXIOV, in vhost_dev_alloc_iovecs()
469 sizeof(*vq->indirect), in vhost_dev_alloc_iovecs()
471 vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log), in vhost_dev_alloc_iovecs()
473 vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads), in vhost_dev_alloc_iovecs()
475 if (!vq->indirect || !vq->log || !vq->heads) in vhost_dev_alloc_iovecs()
481 for (; i >= 0; --i) in vhost_dev_alloc_iovecs()
482 vhost_vq_free_iovecs(dev->vqs[i]); in vhost_dev_alloc_iovecs()
483 return -ENOMEM; in vhost_dev_alloc_iovecs()
490 for (i = 0; i < dev->nvqs; ++i) in vhost_dev_free_iovecs()
491 vhost_vq_free_iovecs(dev->vqs[i]); in vhost_dev_free_iovecs()
497 struct vhost_dev *dev = vq->dev; in vhost_exceeds_weight()
499 if ((dev->byte_weight && total_len >= dev->byte_weight) || in vhost_exceeds_weight()
500 pkts >= dev->weight) { in vhost_exceeds_weight()
501 vhost_poll_queue(&vq->poll); in vhost_exceeds_weight()
515 return size_add(struct_size(vq->avail, ring, num), event); in vhost_get_avail_size()
524 return size_add(struct_size(vq->used, ring, num), event); in vhost_get_used_size()
530 return sizeof(*vq->desc) * num; in vhost_get_desc_size()
537 int (*msg_handler)(struct vhost_dev *dev, u32 asid, in vhost_dev_init() argument
543 dev->vqs = vqs; in vhost_dev_init()
544 dev->nvqs = nvqs; in vhost_dev_init()
545 mutex_init(&dev->mutex); in vhost_dev_init()
546 dev->log_ctx = NULL; in vhost_dev_init()
547 dev->umem = NULL; in vhost_dev_init()
548 dev->iotlb = NULL; in vhost_dev_init()
549 dev->mm = NULL; in vhost_dev_init()
550 dev->iov_limit = iov_limit; in vhost_dev_init()
551 dev->weight = weight; in vhost_dev_init()
552 dev->byte_weight = byte_weight; in vhost_dev_init()
553 dev->use_worker = use_worker; in vhost_dev_init()
554 dev->msg_handler = msg_handler; in vhost_dev_init()
555 init_waitqueue_head(&dev->wait); in vhost_dev_init()
556 INIT_LIST_HEAD(&dev->read_list); in vhost_dev_init()
557 INIT_LIST_HEAD(&dev->pending_list); in vhost_dev_init()
558 spin_lock_init(&dev->iotlb_lock); in vhost_dev_init()
559 xa_init_flags(&dev->worker_xa, XA_FLAGS_ALLOC); in vhost_dev_init()
561 for (i = 0; i < dev->nvqs; ++i) { in vhost_dev_init()
562 vq = dev->vqs[i]; in vhost_dev_init()
563 vq->log = NULL; in vhost_dev_init()
564 vq->indirect = NULL; in vhost_dev_init()
565 vq->heads = NULL; in vhost_dev_init()
566 vq->dev = dev; in vhost_dev_init()
567 mutex_init(&vq->mutex); in vhost_dev_init()
569 if (vq->handle_kick) in vhost_dev_init()
570 vhost_poll_init(&vq->poll, vq->handle_kick, in vhost_dev_init()
580 return dev->mm == current->mm ? 0 : -EPERM; in vhost_dev_check_owner()
587 return dev->mm; in vhost_dev_has_owner()
594 if (dev->use_worker) { in vhost_attach_mm()
595 dev->mm = get_task_mm(current); in vhost_attach_mm()
603 dev->mm = current->mm; in vhost_attach_mm()
604 mmgrab(dev->mm); in vhost_attach_mm()
610 if (!dev->mm) in vhost_detach_mm()
613 if (dev->use_worker) in vhost_detach_mm()
614 mmput(dev->mm); in vhost_detach_mm()
616 mmdrop(dev->mm); in vhost_detach_mm()
618 dev->mm = NULL; in vhost_detach_mm()
627 WARN_ON(!llist_empty(&worker->work_list)); in vhost_worker_destroy()
628 xa_erase(&dev->worker_xa, worker->id); in vhost_worker_destroy()
629 vhost_task_stop(worker->vtsk); in vhost_worker_destroy()
638 if (!dev->use_worker) in vhost_workers_free()
641 for (i = 0; i < dev->nvqs; i++) in vhost_workers_free()
642 rcu_assign_pointer(dev->vqs[i]->worker, NULL); in vhost_workers_free()
647 xa_for_each(&dev->worker_xa, i, worker) in vhost_workers_free()
649 xa_destroy(&dev->worker_xa); in vhost_workers_free()
664 worker->dev = dev; in vhost_worker_create()
665 snprintf(name, sizeof(name), "vhost-%d", current->pid); in vhost_worker_create()
672 mutex_init(&worker->mutex); in vhost_worker_create()
673 init_llist_head(&worker->work_list); in vhost_worker_create()
674 worker->kcov_handle = kcov_common_handle(); in vhost_worker_create()
675 worker->vtsk = vtsk; in vhost_worker_create()
679 ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL); in vhost_worker_create()
682 worker->id = id; in vhost_worker_create()
699 mutex_lock(&worker->mutex); in __vhost_vq_attach_worker()
700 if (worker->killed) { in __vhost_vq_attach_worker()
701 mutex_unlock(&worker->mutex); in __vhost_vq_attach_worker()
705 mutex_lock(&vq->mutex); in __vhost_vq_attach_worker()
707 old_worker = rcu_dereference_check(vq->worker, in __vhost_vq_attach_worker()
708 lockdep_is_held(&vq->mutex)); in __vhost_vq_attach_worker()
709 rcu_assign_pointer(vq->worker, worker); in __vhost_vq_attach_worker()
710 worker->attachment_cnt++; in __vhost_vq_attach_worker()
713 mutex_unlock(&vq->mutex); in __vhost_vq_attach_worker()
714 mutex_unlock(&worker->mutex); in __vhost_vq_attach_worker()
717 mutex_unlock(&vq->mutex); in __vhost_vq_attach_worker()
718 mutex_unlock(&worker->mutex); in __vhost_vq_attach_worker()
724 mutex_lock(&old_worker->mutex); in __vhost_vq_attach_worker()
725 if (old_worker->killed) { in __vhost_vq_attach_worker()
726 mutex_unlock(&old_worker->mutex); in __vhost_vq_attach_worker()
737 mutex_lock(&vq->mutex); in __vhost_vq_attach_worker()
738 if (!vhost_vq_get_backend(vq) && !vq->kick) { in __vhost_vq_attach_worker()
739 mutex_unlock(&vq->mutex); in __vhost_vq_attach_worker()
741 old_worker->attachment_cnt--; in __vhost_vq_attach_worker()
742 mutex_unlock(&old_worker->mutex); in __vhost_vq_attach_worker()
748 WARN_ON(!old_worker->attachment_cnt && in __vhost_vq_attach_worker()
749 !llist_empty(&old_worker->work_list)); in __vhost_vq_attach_worker()
752 mutex_unlock(&vq->mutex); in __vhost_vq_attach_worker()
758 old_worker->attachment_cnt--; in __vhost_vq_attach_worker()
759 mutex_unlock(&old_worker->mutex); in __vhost_vq_attach_worker()
766 unsigned long index = info->worker_id; in vhost_vq_attach_worker()
767 struct vhost_dev *dev = vq->dev; in vhost_vq_attach_worker()
770 if (!dev->use_worker) in vhost_vq_attach_worker()
771 return -EINVAL; in vhost_vq_attach_worker()
773 worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT); in vhost_vq_attach_worker()
774 if (!worker || worker->id != info->worker_id) in vhost_vq_attach_worker()
775 return -ENODEV; in vhost_vq_attach_worker()
789 return -ENOMEM; in vhost_new_worker()
791 info->worker_id = worker->id; in vhost_new_worker()
799 unsigned long index = info->worker_id; in vhost_free_worker()
802 worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT); in vhost_free_worker()
803 if (!worker || worker->id != info->worker_id) in vhost_free_worker()
804 return -ENODEV; in vhost_free_worker()
806 mutex_lock(&worker->mutex); in vhost_free_worker()
807 if (worker->attachment_cnt || worker->killed) { in vhost_free_worker()
808 mutex_unlock(&worker->mutex); in vhost_free_worker()
809 return -EBUSY; in vhost_free_worker()
817 mutex_unlock(&worker->mutex); in vhost_free_worker()
834 if (idx >= dev->nvqs) in vhost_get_vq_from_user()
835 return -ENOBUFS; in vhost_get_vq_from_user()
837 idx = array_index_nospec(idx, dev->nvqs); in vhost_get_vq_from_user()
839 *vq = dev->vqs[idx]; in vhost_get_vq_from_user()
855 if (!dev->use_worker) in vhost_worker_ioctl()
856 return -EINVAL; in vhost_worker_ioctl()
859 return -EINVAL; in vhost_worker_ioctl()
870 ret = -EFAULT; in vhost_worker_ioctl()
874 return -EFAULT; in vhost_worker_ioctl()
881 return -ENOIOCTLCMD; in vhost_worker_ioctl()
891 ret = -EFAULT; in vhost_worker_ioctl()
898 worker = rcu_dereference_check(vq->worker, in vhost_worker_ioctl()
899 lockdep_is_held(&dev->mutex)); in vhost_worker_ioctl()
901 ret = -EINVAL; in vhost_worker_ioctl()
906 ring_worker.worker_id = worker->id; in vhost_worker_ioctl()
909 ret = -EFAULT; in vhost_worker_ioctl()
912 ret = -ENOIOCTLCMD; in vhost_worker_ioctl()
928 err = -EBUSY; in vhost_dev_set_owner()
938 if (dev->use_worker) { in vhost_dev_set_owner()
947 err = -ENOMEM; in vhost_dev_set_owner()
951 for (i = 0; i < dev->nvqs; i++) in vhost_dev_set_owner()
952 __vhost_vq_attach_worker(dev->vqs[i], worker); in vhost_dev_set_owner()
985 dev->umem = umem; in vhost_dev_reset_owner()
989 for (i = 0; i < dev->nvqs; ++i) in vhost_dev_reset_owner()
990 dev->vqs[i]->umem = umem; in vhost_dev_reset_owner()
998 for (i = 0; i < dev->nvqs; ++i) { in vhost_dev_stop()
999 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) in vhost_dev_stop()
1000 vhost_poll_stop(&dev->vqs[i]->poll); in vhost_dev_stop()
1011 spin_lock(&dev->iotlb_lock); in vhost_clear_msg()
1013 list_for_each_entry_safe(node, n, &dev->read_list, node) { in vhost_clear_msg()
1014 list_del(&node->node); in vhost_clear_msg()
1018 list_for_each_entry_safe(node, n, &dev->pending_list, node) { in vhost_clear_msg()
1019 list_del(&node->node); in vhost_clear_msg()
1023 spin_unlock(&dev->iotlb_lock); in vhost_clear_msg()
1031 for (i = 0; i < dev->nvqs; ++i) { in vhost_dev_cleanup()
1032 if (dev->vqs[i]->error_ctx) in vhost_dev_cleanup()
1033 eventfd_ctx_put(dev->vqs[i]->error_ctx); in vhost_dev_cleanup()
1034 if (dev->vqs[i]->kick) in vhost_dev_cleanup()
1035 fput(dev->vqs[i]->kick); in vhost_dev_cleanup()
1036 if (dev->vqs[i]->call_ctx.ctx) in vhost_dev_cleanup()
1037 eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx); in vhost_dev_cleanup()
1038 vhost_vq_reset(dev, dev->vqs[i]); in vhost_dev_cleanup()
1041 if (dev->log_ctx) in vhost_dev_cleanup()
1042 eventfd_ctx_put(dev->log_ctx); in vhost_dev_cleanup()
1043 dev->log_ctx = NULL; in vhost_dev_cleanup()
1045 vhost_iotlb_free(dev->umem); in vhost_dev_cleanup()
1046 dev->umem = NULL; in vhost_dev_cleanup()
1047 vhost_iotlb_free(dev->iotlb); in vhost_dev_cleanup()
1048 dev->iotlb = NULL; in vhost_dev_cleanup()
1050 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); in vhost_dev_cleanup()
1061 if (a > ULONG_MAX - (unsigned long)log_base || in log_access_ok()
1066 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); in log_access_ok()
1078 return uaddr > ULONG_MAX - size + 1; in vhost_overflow()
1090 list_for_each_entry(map, &umem->list, link) { in vq_memory_access_ok()
1091 unsigned long a = map->addr; in vq_memory_access_ok()
1093 if (vhost_overflow(map->addr, map->size)) in vq_memory_access_ok()
1097 if (!access_ok((void __user *)a, map->size)) in vq_memory_access_ok()
1100 map->start, in vq_memory_access_ok()
1101 map->size)) in vq_memory_access_ok()
1111 const struct vhost_iotlb_map *map = vq->meta_iotlb[type]; in vhost_vq_meta_fetch()
1116 return (void __user *)(uintptr_t)(map->addr + addr - map->start); in vhost_vq_meta_fetch()
1126 for (i = 0; i < d->nvqs; ++i) { in memory_access_ok()
1130 mutex_lock(&d->vqs[i]->mutex); in memory_access_ok()
1131 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL); in memory_access_ok()
1133 if (d->vqs[i]->private_data) in memory_access_ok()
1134 ok = vq_memory_access_ok(d->vqs[i]->log_base, in memory_access_ok()
1138 mutex_unlock(&d->vqs[i]->mutex); in memory_access_ok()
1153 if (!vq->iotlb) in vhost_copy_to_user()
1158 * could be access through iotlb. So -EAGAIN should in vhost_copy_to_user()
1169 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov, in vhost_copy_to_user()
1170 ARRAY_SIZE(vq->iotlb_iov), in vhost_copy_to_user()
1174 iov_iter_init(&t, ITER_DEST, vq->iotlb_iov, ret, size); in vhost_copy_to_user()
1188 if (!vq->iotlb) in vhost_copy_from_user()
1193 * could be access through iotlb. So -EAGAIN should in vhost_copy_from_user()
1204 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov, in vhost_copy_from_user()
1205 ARRAY_SIZE(vq->iotlb_iov), in vhost_copy_from_user()
1213 iov_iter_init(&f, ITER_SOURCE, vq->iotlb_iov, ret, size); in vhost_copy_from_user()
1229 ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov, in __vhost_get_user_slow()
1230 ARRAY_SIZE(vq->iotlb_iov), in __vhost_get_user_slow()
1239 if (ret != 1 || vq->iotlb_iov[0].iov_len != size) { in __vhost_get_user_slow()
1246 return vq->iotlb_iov[0].iov_base; in __vhost_get_user_slow()
1251 * could be access through iotlb. So -EAGAIN should
1269 if (!vq->iotlb) { \
1278 ret = -EFAULT; \
1285 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), in vhost_put_avail_event()
1293 return vhost_copy_to_user(vq, vq->used->ring + idx, head, in vhost_put_used()
1300 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), in vhost_put_used_flags()
1301 &vq->used->flags); in vhost_put_used_flags()
1307 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), in vhost_put_used_idx()
1308 &vq->used->idx); in vhost_put_used_idx()
1314 if (!vq->iotlb) { \
1324 ret = -EFAULT; \
1338 for (i = 0; i < d->nvqs; ++i) in vhost_dev_lock_vqs()
1339 mutex_lock_nested(&d->vqs[i]->mutex, i); in vhost_dev_lock_vqs()
1345 for (i = 0; i < d->nvqs; ++i) in vhost_dev_unlock_vqs()
1346 mutex_unlock(&d->vqs[i]->mutex); in vhost_dev_unlock_vqs()
1354 r = vhost_get_avail(vq, idx, &vq->avail->idx); in vhost_get_avail_idx()
1357 &vq->avail->idx, r); in vhost_get_avail_idx()
1362 vq->avail_idx = vhost16_to_cpu(vq, idx); in vhost_get_avail_idx()
1363 if (unlikely((u16)(vq->avail_idx - vq->last_avail_idx) > vq->num)) { in vhost_get_avail_idx()
1365 vq->last_avail_idx, vq->avail_idx); in vhost_get_avail_idx()
1366 return -EINVAL; in vhost_get_avail_idx()
1370 if (vq->avail_idx == vq->last_avail_idx) in vhost_get_avail_idx()
1374 * We updated vq->avail_idx so we need a memory barrier between in vhost_get_avail_idx()
1385 &vq->avail->ring[idx & (vq->num - 1)]); in vhost_get_avail_head()
1391 return vhost_get_avail(vq, *flags, &vq->avail->flags); in vhost_get_avail_flags()
1403 return vhost_get_used(vq, *idx, &vq->used->idx); in vhost_get_used_idx()
1409 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); in vhost_get_desc()
1417 spin_lock(&d->iotlb_lock); in vhost_iotlb_notify_vq()
1419 list_for_each_entry_safe(node, n, &d->pending_list, node) { in vhost_iotlb_notify_vq()
1420 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; in vhost_iotlb_notify_vq()
1421 if (msg->iova <= vq_msg->iova && in vhost_iotlb_notify_vq()
1422 msg->iova + msg->size - 1 >= vq_msg->iova && in vhost_iotlb_notify_vq()
1423 vq_msg->type == VHOST_IOTLB_MISS) { in vhost_iotlb_notify_vq()
1424 vhost_poll_queue(&node->vq->poll); in vhost_iotlb_notify_vq()
1425 list_del(&node->node); in vhost_iotlb_notify_vq()
1430 spin_unlock(&d->iotlb_lock); in vhost_iotlb_notify_vq()
1450 static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid, in vhost_process_iotlb_msg() argument
1455 if (asid != 0) in vhost_process_iotlb_msg()
1456 return -EINVAL; in vhost_process_iotlb_msg()
1458 mutex_lock(&dev->mutex); in vhost_process_iotlb_msg()
1460 switch (msg->type) { in vhost_process_iotlb_msg()
1462 if (!dev->iotlb) { in vhost_process_iotlb_msg()
1463 ret = -EFAULT; in vhost_process_iotlb_msg()
1466 if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) { in vhost_process_iotlb_msg()
1467 ret = -EFAULT; in vhost_process_iotlb_msg()
1471 if (vhost_iotlb_add_range(dev->iotlb, msg->iova, in vhost_process_iotlb_msg()
1472 msg->iova + msg->size - 1, in vhost_process_iotlb_msg()
1473 msg->uaddr, msg->perm)) { in vhost_process_iotlb_msg()
1474 ret = -ENOMEM; in vhost_process_iotlb_msg()
1480 if (!dev->iotlb) { in vhost_process_iotlb_msg()
1481 ret = -EFAULT; in vhost_process_iotlb_msg()
1485 vhost_iotlb_del_range(dev->iotlb, msg->iova, in vhost_process_iotlb_msg()
1486 msg->iova + msg->size - 1); in vhost_process_iotlb_msg()
1489 ret = -EINVAL; in vhost_process_iotlb_msg()
1494 mutex_unlock(&dev->mutex); in vhost_process_iotlb_msg()
1504 u32 asid = 0; in vhost_chr_write_iter() local
1508 ret = -EINVAL; in vhost_chr_write_iter()
1517 offset = offsetof(struct vhost_msg, iotlb) - sizeof(int); in vhost_chr_write_iter()
1520 if (vhost_backend_has_feature(dev->vqs[0], in vhost_chr_write_iter()
1522 ret = copy_from_iter(&asid, sizeof(asid), from); in vhost_chr_write_iter()
1523 if (ret != sizeof(asid)) { in vhost_chr_write_iter()
1524 ret = -EINVAL; in vhost_chr_write_iter()
1532 ret = -EINVAL; in vhost_chr_write_iter()
1539 ret = -EINVAL; in vhost_chr_write_iter()
1544 ret = -EINVAL; in vhost_chr_write_iter()
1548 if (dev->msg_handler) in vhost_chr_write_iter()
1549 ret = dev->msg_handler(dev, asid, &msg); in vhost_chr_write_iter()
1551 ret = vhost_process_iotlb_msg(dev, asid, &msg); in vhost_chr_write_iter()
1553 ret = -EFAULT; in vhost_chr_write_iter()
1569 poll_wait(file, &dev->wait, wait); in vhost_chr_poll()
1571 if (!list_empty(&dev->read_list)) in vhost_chr_poll()
1591 prepare_to_wait(&dev->wait, &wait, in vhost_chr_read_iter()
1594 node = vhost_dequeue_msg(dev, &dev->read_list); in vhost_chr_read_iter()
1598 ret = -EAGAIN; in vhost_chr_read_iter()
1602 ret = -ERESTARTSYS; in vhost_chr_read_iter()
1605 if (!dev->iotlb) { in vhost_chr_read_iter()
1606 ret = -EBADFD; in vhost_chr_read_iter()
1614 finish_wait(&dev->wait, &wait); in vhost_chr_read_iter()
1618 void *start = &node->msg; in vhost_chr_read_iter()
1620 switch (node->msg.type) { in vhost_chr_read_iter()
1622 size = sizeof(node->msg); in vhost_chr_read_iter()
1623 msg = &node->msg.iotlb; in vhost_chr_read_iter()
1626 size = sizeof(node->msg_v2); in vhost_chr_read_iter()
1627 msg = &node->msg_v2.iotlb; in vhost_chr_read_iter()
1635 if (ret != size || msg->type != VHOST_IOTLB_MISS) { in vhost_chr_read_iter()
1639 vhost_enqueue_msg(dev, &dev->pending_list, node); in vhost_chr_read_iter()
1648 struct vhost_dev *dev = vq->dev; in vhost_iotlb_miss()
1655 return -ENOMEM; in vhost_iotlb_miss()
1658 node->msg_v2.type = VHOST_IOTLB_MSG_V2; in vhost_iotlb_miss()
1659 msg = &node->msg_v2.iotlb; in vhost_iotlb_miss()
1661 msg = &node->msg.iotlb; in vhost_iotlb_miss()
1664 msg->type = VHOST_IOTLB_MISS; in vhost_iotlb_miss()
1665 msg->iova = iova; in vhost_iotlb_miss()
1666 msg->perm = access; in vhost_iotlb_miss()
1668 vhost_enqueue_msg(dev, &dev->read_list, node); in vhost_iotlb_miss()
1681 if (vq->iotlb) in vq_access_ok()
1696 if (likely(map->perm & access)) in vhost_vq_meta_update()
1697 vq->meta_iotlb[type] = map; in vhost_vq_meta_update()
1704 struct vhost_iotlb *umem = vq->iotlb; in iotlb_access_ok()
1705 u64 s = 0, size, orig_addr = addr, last = addr + len - 1; in iotlb_access_ok()
1712 if (map == NULL || map->start > addr) { in iotlb_access_ok()
1715 } else if (!(map->perm & access)) { in iotlb_access_ok()
1722 size = map->size - addr + map->start; in iotlb_access_ok()
1736 unsigned int num = vq->num; in vq_meta_prefetch()
1738 if (!vq->iotlb) in vq_meta_prefetch()
1741 return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc, in vq_meta_prefetch()
1743 iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail, in vq_meta_prefetch()
1746 iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used, in vq_meta_prefetch()
1755 return memory_access_ok(dev, dev->umem, 1); in vhost_log_access_ok()
1766 if (vq->iotlb) in vq_log_used_access_ok()
1770 vhost_get_used_size(vq, vq->num)); in vq_log_used_access_ok()
1778 return vq_memory_access_ok(log_base, vq->umem, in vq_log_access_ok()
1780 vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr); in vq_log_access_ok()
1787 if (!vq_log_access_ok(vq, vq->log_base)) in vhost_vq_access_ok()
1790 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used); in vhost_vq_access_ok()
1803 return -EFAULT; in vhost_set_memory()
1805 return -EOPNOTSUPP; in vhost_set_memory()
1807 return -E2BIG; in vhost_set_memory()
1811 return -ENOMEM; in vhost_set_memory()
1814 if (copy_from_user(newmem->regions, m->regions, in vhost_set_memory()
1817 return -EFAULT; in vhost_set_memory()
1823 return -ENOMEM; in vhost_set_memory()
1826 for (region = newmem->regions; in vhost_set_memory()
1827 region < newmem->regions + mem.nregions; in vhost_set_memory()
1830 region->guest_phys_addr, in vhost_set_memory()
1831 region->guest_phys_addr + in vhost_set_memory()
1832 region->memory_size - 1, in vhost_set_memory()
1833 region->userspace_addr, in vhost_set_memory()
1841 oldumem = d->umem; in vhost_set_memory()
1842 d->umem = newumem; in vhost_set_memory()
1845 for (i = 0; i < d->nvqs; ++i) { in vhost_set_memory()
1846 mutex_lock(&d->vqs[i]->mutex); in vhost_set_memory()
1847 d->vqs[i]->umem = newumem; in vhost_set_memory()
1848 mutex_unlock(&d->vqs[i]->mutex); in vhost_set_memory()
1858 return -EFAULT; in vhost_set_memory()
1869 if (vq->private_data) in vhost_vring_set_num()
1870 return -EBUSY; in vhost_vring_set_num()
1873 return -EFAULT; in vhost_vring_set_num()
1875 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) in vhost_vring_set_num()
1876 return -EINVAL; in vhost_vring_set_num()
1877 vq->num = s.num; in vhost_vring_set_num()
1889 return -EFAULT; in vhost_vring_set_addr()
1891 return -EOPNOTSUPP; in vhost_vring_set_addr()
1898 return -EFAULT; in vhost_vring_set_addr()
1901 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); in vhost_vring_set_addr()
1902 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); in vhost_vring_set_addr()
1903 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || in vhost_vring_set_addr()
1904 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || in vhost_vring_set_addr()
1905 (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) in vhost_vring_set_addr()
1906 return -EINVAL; in vhost_vring_set_addr()
1911 if (vq->private_data) { in vhost_vring_set_addr()
1912 if (!vq_access_ok(vq, vq->num, in vhost_vring_set_addr()
1916 return -EINVAL; in vhost_vring_set_addr()
1919 if (!vq_log_used_access_ok(vq, vq->log_base, in vhost_vring_set_addr()
1922 return -EINVAL; in vhost_vring_set_addr()
1925 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); in vhost_vring_set_addr()
1926 vq->desc = (void __user *)(unsigned long)a.desc_user_addr; in vhost_vring_set_addr()
1927 vq->avail = (void __user *)(unsigned long)a.avail_user_addr; in vhost_vring_set_addr()
1928 vq->log_addr = a.log_guest_addr; in vhost_vring_set_addr()
1929 vq->used = (void __user *)(unsigned long)a.used_user_addr; in vhost_vring_set_addr()
1941 mutex_lock(&vq->mutex); in vhost_vring_set_num_addr()
1954 mutex_unlock(&vq->mutex); in vhost_vring_set_num_addr()
1962 struct eventfd_ctx *ctx = NULL; in vhost_vring_ioctl() local
1978 mutex_lock(&vq->mutex); in vhost_vring_ioctl()
1984 if (vq->private_data) { in vhost_vring_ioctl()
1985 r = -EBUSY; in vhost_vring_ioctl()
1989 r = -EFAULT; in vhost_vring_ioctl()
1993 vq->last_avail_idx = s.num & 0xffff; in vhost_vring_ioctl()
1994 vq->last_used_idx = (s.num >> 16) & 0xffff; in vhost_vring_ioctl()
1997 r = -EINVAL; in vhost_vring_ioctl()
2000 vq->last_avail_idx = s.num; in vhost_vring_ioctl()
2003 vq->avail_idx = vq->last_avail_idx; in vhost_vring_ioctl()
2008 s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16); in vhost_vring_ioctl()
2010 s.num = vq->last_avail_idx; in vhost_vring_ioctl()
2012 r = -EFAULT; in vhost_vring_ioctl()
2016 r = -EFAULT; in vhost_vring_ioctl()
2024 if (eventfp != vq->kick) { in vhost_vring_ioctl()
2025 pollstop = (filep = vq->kick) != NULL; in vhost_vring_ioctl()
2026 pollstart = (vq->kick = eventfp) != NULL; in vhost_vring_ioctl()
2032 r = -EFAULT; in vhost_vring_ioctl()
2035 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd); in vhost_vring_ioctl()
2036 if (IS_ERR(ctx)) { in vhost_vring_ioctl()
2037 r = PTR_ERR(ctx); in vhost_vring_ioctl()
2041 swap(ctx, vq->call_ctx.ctx); in vhost_vring_ioctl()
2045 r = -EFAULT; in vhost_vring_ioctl()
2048 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd); in vhost_vring_ioctl()
2049 if (IS_ERR(ctx)) { in vhost_vring_ioctl()
2050 r = PTR_ERR(ctx); in vhost_vring_ioctl()
2053 swap(ctx, vq->error_ctx); in vhost_vring_ioctl()
2063 r = -EFAULT; in vhost_vring_ioctl()
2066 vq->busyloop_timeout = s.num; in vhost_vring_ioctl()
2070 s.num = vq->busyloop_timeout; in vhost_vring_ioctl()
2072 r = -EFAULT; in vhost_vring_ioctl()
2075 r = -ENOIOCTLCMD; in vhost_vring_ioctl()
2078 if (pollstop && vq->handle_kick) in vhost_vring_ioctl()
2079 vhost_poll_stop(&vq->poll); in vhost_vring_ioctl()
2081 if (!IS_ERR_OR_NULL(ctx)) in vhost_vring_ioctl()
2082 eventfd_ctx_put(ctx); in vhost_vring_ioctl()
2086 if (pollstart && vq->handle_kick) in vhost_vring_ioctl()
2087 r = vhost_poll_start(&vq->poll, vq->kick); in vhost_vring_ioctl()
2089 mutex_unlock(&vq->mutex); in vhost_vring_ioctl()
2091 if (pollstop && vq->handle_kick) in vhost_vring_ioctl()
2092 vhost_dev_flush(vq->poll.dev); in vhost_vring_ioctl()
2104 return -ENOMEM; in vhost_init_device_iotlb()
2106 oiotlb = d->iotlb; in vhost_init_device_iotlb()
2107 d->iotlb = niotlb; in vhost_init_device_iotlb()
2109 for (i = 0; i < d->nvqs; ++i) { in vhost_init_device_iotlb()
2110 struct vhost_virtqueue *vq = d->vqs[i]; in vhost_init_device_iotlb()
2112 mutex_lock(&vq->mutex); in vhost_init_device_iotlb()
2113 vq->iotlb = niotlb; in vhost_init_device_iotlb()
2115 mutex_unlock(&vq->mutex); in vhost_init_device_iotlb()
2127 struct eventfd_ctx *ctx; in vhost_dev_ioctl() local
2149 r = -EFAULT; in vhost_dev_ioctl()
2153 r = -EFAULT; in vhost_dev_ioctl()
2156 for (i = 0; i < d->nvqs; ++i) { in vhost_dev_ioctl()
2159 vq = d->vqs[i]; in vhost_dev_ioctl()
2160 mutex_lock(&vq->mutex); in vhost_dev_ioctl()
2162 if (vq->private_data && !vq_log_access_ok(vq, base)) in vhost_dev_ioctl()
2163 r = -EFAULT; in vhost_dev_ioctl()
2165 vq->log_base = base; in vhost_dev_ioctl()
2166 mutex_unlock(&vq->mutex); in vhost_dev_ioctl()
2173 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd); in vhost_dev_ioctl()
2174 if (IS_ERR(ctx)) { in vhost_dev_ioctl()
2175 r = PTR_ERR(ctx); in vhost_dev_ioctl()
2178 swap(ctx, d->log_ctx); in vhost_dev_ioctl()
2179 for (i = 0; i < d->nvqs; ++i) { in vhost_dev_ioctl()
2180 mutex_lock(&d->vqs[i]->mutex); in vhost_dev_ioctl()
2181 d->vqs[i]->log_ctx = d->log_ctx; in vhost_dev_ioctl()
2182 mutex_unlock(&d->vqs[i]->mutex); in vhost_dev_ioctl()
2184 if (ctx) in vhost_dev_ioctl()
2185 eventfd_ctx_put(ctx); in vhost_dev_ioctl()
2188 r = -ENOIOCTLCMD; in vhost_dev_ioctl()
2198 * returning -EFAULT). See Documentation/arch/x86/exception-tables.rst.
2233 return -EFAULT; in log_write()
2239 write_length -= VHOST_PAGE_SIZE; in log_write()
2247 struct vhost_iotlb *umem = vq->umem; in log_write_hva()
2258 list_for_each_entry(u, &umem->list, link) { in log_write_hva()
2259 if (u->addr > hva - 1 + len || in log_write_hva()
2260 u->addr - 1 + u->size < hva) in log_write_hva()
2262 start = max(u->addr, hva); in log_write_hva()
2263 end = min(u->addr - 1 + u->size, hva - 1 + len); in log_write_hva()
2264 l = end - start + 1; in log_write_hva()
2265 r = log_write(vq->log_base, in log_write_hva()
2266 u->start + start - u->addr, in log_write_hva()
2275 return -EFAULT; in log_write_hva()
2277 len -= min; in log_write_hva()
2286 struct iovec *iov = vq->log_iov; in log_used()
2289 if (!vq->iotlb) in log_used()
2290 return log_write(vq->log_base, vq->log_addr + used_offset, len); in log_used()
2292 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, in log_used()
2315 if (vq->iotlb) { in vhost_log_write()
2327 r = log_write(vq->log_base, log[i].addr, l); in vhost_log_write()
2330 len -= l; in vhost_log_write()
2332 if (vq->log_ctx) in vhost_log_write()
2333 eventfd_signal(vq->log_ctx); in vhost_log_write()
2347 return -EFAULT; in vhost_update_used_flags()
2348 if (unlikely(vq->log_used)) { in vhost_update_used_flags()
2352 used = &vq->used->flags; in vhost_update_used_flags()
2353 log_used(vq, (used - (void __user *)vq->used), in vhost_update_used_flags()
2354 sizeof vq->used->flags); in vhost_update_used_flags()
2355 if (vq->log_ctx) in vhost_update_used_flags()
2356 eventfd_signal(vq->log_ctx); in vhost_update_used_flags()
2364 return -EFAULT; in vhost_update_avail_event()
2365 if (unlikely(vq->log_used)) { in vhost_update_avail_event()
2371 log_used(vq, (used - (void __user *)vq->used), in vhost_update_avail_event()
2373 if (vq->log_ctx) in vhost_update_avail_event()
2374 eventfd_signal(vq->log_ctx); in vhost_update_avail_event()
2383 bool is_le = vq->is_le; in vhost_vq_init_access()
2385 if (!vq->private_data) in vhost_vq_init_access()
2393 vq->signalled_used_valid = false; in vhost_vq_init_access()
2394 if (!vq->iotlb && in vhost_vq_init_access()
2395 !access_ok(&vq->used->idx, sizeof vq->used->idx)) { in vhost_vq_init_access()
2396 r = -EFAULT; in vhost_vq_init_access()
2402 &vq->used->idx); in vhost_vq_init_access()
2405 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); in vhost_vq_init_access()
2409 vq->is_le = is_le; in vhost_vq_init_access()
2418 struct vhost_dev *dev = vq->dev; in translate_desc()
2419 struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem; in translate_desc()
2421 u64 s = 0, last = addr + len - 1; in translate_desc()
2427 ret = -ENOBUFS; in translate_desc()
2432 if (map == NULL || map->start > addr) { in translate_desc()
2433 if (umem != dev->iotlb) { in translate_desc()
2434 ret = -EFAULT; in translate_desc()
2437 ret = -EAGAIN; in translate_desc()
2439 } else if (!(map->perm & access)) { in translate_desc()
2440 ret = -EPERM; in translate_desc()
2445 size = map->size - addr + map->start; in translate_desc()
2446 _iov->iov_len = min((u64)len - s, size); in translate_desc()
2447 _iov->iov_base = (void __user *)(unsigned long) in translate_desc()
2448 (map->addr + addr - map->start); in translate_desc()
2454 if (ret == -EAGAIN) in translate_desc()
2461 * or -1U if we're at the end. */
2467 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT))) in next_desc()
2468 return -1U; in next_desc()
2471 next = vhost16_to_cpu(vq, READ_ONCE(desc->next)); in next_desc()
2483 u32 len = vhost32_to_cpu(vq, indirect->len); in get_indirect()
2493 return -EINVAL; in get_indirect()
2496 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect, in get_indirect()
2499 if (ret != -EAGAIN) in get_indirect()
2503 iov_iter_init(&from, ITER_SOURCE, vq->indirect, ret, len); in get_indirect()
2509 indirect->len); in get_indirect()
2510 return -E2BIG; in get_indirect()
2519 return -EINVAL; in get_indirect()
2523 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); in get_indirect()
2524 return -EINVAL; in get_indirect()
2528 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); in get_indirect()
2529 return -EINVAL; in get_indirect()
2539 iov_size - iov_count, access); in get_indirect()
2541 if (ret != -EAGAIN) in get_indirect()
2560 return -EINVAL; in get_indirect()
2564 } while ((i = next_desc(vq, &desc)) != -1); in get_indirect()
2573 * This function returns the descriptor number found, or vq->num (which is
2583 u16 last_avail_idx = vq->last_avail_idx; in vhost_get_vq_desc()
2587 if (vq->avail_idx == vq->last_avail_idx) { in vhost_get_vq_desc()
2593 return vq->num; in vhost_get_vq_desc()
2601 &vq->avail->ring[last_avail_idx % vq->num]); in vhost_get_vq_desc()
2602 return -EFAULT; in vhost_get_vq_desc()
2608 if (unlikely(head >= vq->num)) { in vhost_get_vq_desc()
2610 head, vq->num); in vhost_get_vq_desc()
2611 return -EINVAL; in vhost_get_vq_desc()
2622 if (unlikely(i >= vq->num)) { in vhost_get_vq_desc()
2624 i, vq->num, head); in vhost_get_vq_desc()
2625 return -EINVAL; in vhost_get_vq_desc()
2627 if (unlikely(++found > vq->num)) { in vhost_get_vq_desc()
2630 i, vq->num, head); in vhost_get_vq_desc()
2631 return -EINVAL; in vhost_get_vq_desc()
2636 i, vq->desc + i); in vhost_get_vq_desc()
2637 return -EFAULT; in vhost_get_vq_desc()
2644 if (ret != -EAGAIN) in vhost_get_vq_desc()
2658 iov_size - iov_count, access); in vhost_get_vq_desc()
2660 if (ret != -EAGAIN) in vhost_get_vq_desc()
2680 return -EINVAL; in vhost_get_vq_desc()
2684 } while ((i = next_desc(vq, &desc)) != -1); in vhost_get_vq_desc()
2687 vq->last_avail_idx++; in vhost_get_vq_desc()
2691 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); in vhost_get_vq_desc()
2699 vq->last_avail_idx -= n; in vhost_discard_vq_desc()
2724 start = vq->last_used_idx & (vq->num - 1); in __vhost_add_used_n()
2725 used = vq->used->ring + start; in __vhost_add_used_n()
2728 return -EFAULT; in __vhost_add_used_n()
2730 if (unlikely(vq->log_used)) { in __vhost_add_used_n()
2734 log_used(vq, ((void __user *)used - (void __user *)vq->used), in __vhost_add_used_n()
2737 old = vq->last_used_idx; in __vhost_add_used_n()
2738 new = (vq->last_used_idx += count); in __vhost_add_used_n()
2743 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) in __vhost_add_used_n()
2744 vq->signalled_used_valid = false; in __vhost_add_used_n()
2755 start = vq->last_used_idx & (vq->num - 1); in vhost_add_used_n()
2756 n = vq->num - start; in vhost_add_used_n()
2762 count -= n; in vhost_add_used_n()
2770 return -EFAULT; in vhost_add_used_n()
2772 if (unlikely(vq->log_used)) { in vhost_add_used_n()
2777 sizeof vq->used->idx); in vhost_add_used_n()
2778 if (vq->log_ctx) in vhost_add_used_n()
2779 eventfd_signal(vq->log_ctx); in vhost_add_used_n()
2796 unlikely(vq->avail_idx == vq->last_avail_idx)) in vhost_notify()
2807 old = vq->signalled_used; in vhost_notify()
2808 v = vq->signalled_used_valid; in vhost_notify()
2809 new = vq->signalled_used = vq->last_used_idx; in vhost_notify()
2810 vq->signalled_used_valid = true; in vhost_notify()
2826 if (vq->call_ctx.ctx && vhost_notify(dev, vq)) in vhost_signal()
2827 eventfd_signal(vq->call_ctx.ctx); in vhost_signal()
2841 /* multi-buffer version of vhost_add_used_and_signal */
2856 if (vq->avail_idx != vq->last_avail_idx) in vhost_vq_avail_empty()
2861 /* Note: we treat error as non-empty here */ in vhost_vq_avail_empty()
2871 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) in vhost_enable_notify()
2873 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; in vhost_enable_notify()
2878 &vq->used->flags, r); in vhost_enable_notify()
2907 if (vq->used_flags & VRING_USED_F_NO_NOTIFY) in vhost_disable_notify()
2909 vq->used_flags |= VRING_USED_F_NO_NOTIFY; in vhost_disable_notify()
2914 &vq->used->flags, r); in vhost_disable_notify()
2927 node->vq = vq; in vhost_new_msg()
2928 node->msg.type = type; in vhost_new_msg()
2936 spin_lock(&dev->iotlb_lock); in vhost_enqueue_msg()
2937 list_add_tail(&node->node, head); in vhost_enqueue_msg()
2938 spin_unlock(&dev->iotlb_lock); in vhost_enqueue_msg()
2940 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); in vhost_enqueue_msg()
2949 spin_lock(&dev->iotlb_lock); in vhost_dequeue_msg()
2953 list_del(&node->node); in vhost_dequeue_msg()
2955 spin_unlock(&dev->iotlb_lock); in vhost_dequeue_msg()
2966 mutex_lock(&dev->mutex); in vhost_set_backend_features()
2967 for (i = 0; i < dev->nvqs; ++i) { in vhost_set_backend_features()
2968 vq = dev->vqs[i]; in vhost_set_backend_features()
2969 mutex_lock(&vq->mutex); in vhost_set_backend_features()
2970 vq->acked_backend_features = features; in vhost_set_backend_features()
2971 mutex_unlock(&vq->mutex); in vhost_set_backend_features()
2973 mutex_unlock(&dev->mutex); in vhost_set_backend_features()