1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2009 Red Hat, Inc.
3 * Copyright (C) 2006 Rusty Russell IBM Corporation
4 *
5 * Author: Michael S. Tsirkin <mst@redhat.com>
6 *
7 * Inspiration, some code, and most witty comments come from
8 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
9 *
10 * Generic code for virtio server in host kernel.
11 */
12
13 #include <linux/eventfd.h>
14 #include <linux/vhost.h>
15 #include <linux/uio.h>
16 #include <linux/mm.h>
17 #include <linux/miscdevice.h>
18 #include <linux/mutex.h>
19 #include <linux/poll.h>
20 #include <linux/file.h>
21 #include <linux/highmem.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/kthread.h>
25 #include <linux/cgroup.h>
26 #include <linux/module.h>
27 #include <linux/sort.h>
28 #include <linux/sched/mm.h>
29 #include <linux/sched/signal.h>
30 #include <linux/sched/vhost_task.h>
31 #include <linux/interval_tree_generic.h>
32 #include <linux/nospec.h>
33 #include <linux/kcov.h>
34
35 #include "vhost.h"
36
37 static ushort max_mem_regions = 64;
38 module_param(max_mem_regions, ushort, 0444);
39 MODULE_PARM_DESC(max_mem_regions,
40 "Maximum number of memory regions in memory map. (default: 64)");
41 static int max_iotlb_entries = 2048;
42 module_param(max_iotlb_entries, int, 0444);
43 MODULE_PARM_DESC(max_iotlb_entries,
44 "Maximum number of iotlb entries. (default: 2048)");
45 static bool fork_from_owner_default = VHOST_FORK_OWNER_TASK;
46
47 #ifdef CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL
48 module_param(fork_from_owner_default, bool, 0444);
49 MODULE_PARM_DESC(fork_from_owner_default,
50 "Set task mode as the default(default: Y)");
51 #endif
52
53 enum {
54 VHOST_MEMORY_F_LOG = 0x1,
55 };
56
57 #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
58 #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
59
60 #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
vhost_disable_cross_endian(struct vhost_virtqueue * vq)61 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
62 {
63 vq->user_be = !virtio_legacy_is_little_endian();
64 }
65
vhost_enable_cross_endian_big(struct vhost_virtqueue * vq)66 static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq)
67 {
68 vq->user_be = true;
69 }
70
vhost_enable_cross_endian_little(struct vhost_virtqueue * vq)71 static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq)
72 {
73 vq->user_be = false;
74 }
75
vhost_set_vring_endian(struct vhost_virtqueue * vq,int __user * argp)76 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
77 {
78 struct vhost_vring_state s;
79
80 if (vq->private_data)
81 return -EBUSY;
82
83 if (copy_from_user(&s, argp, sizeof(s)))
84 return -EFAULT;
85
86 if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
87 s.num != VHOST_VRING_BIG_ENDIAN)
88 return -EINVAL;
89
90 if (s.num == VHOST_VRING_BIG_ENDIAN)
91 vhost_enable_cross_endian_big(vq);
92 else
93 vhost_enable_cross_endian_little(vq);
94
95 return 0;
96 }
97
vhost_get_vring_endian(struct vhost_virtqueue * vq,u32 idx,int __user * argp)98 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
99 int __user *argp)
100 {
101 struct vhost_vring_state s = {
102 .index = idx,
103 .num = vq->user_be
104 };
105
106 if (copy_to_user(argp, &s, sizeof(s)))
107 return -EFAULT;
108
109 return 0;
110 }
111
vhost_init_is_le(struct vhost_virtqueue * vq)112 static void vhost_init_is_le(struct vhost_virtqueue *vq)
113 {
114 /* Note for legacy virtio: user_be is initialized at reset time
115 * according to the host endianness. If userspace does not set an
116 * explicit endianness, the default behavior is native endian, as
117 * expected by legacy virtio.
118 */
119 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
120 }
121 #else
vhost_disable_cross_endian(struct vhost_virtqueue * vq)122 static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
123 {
124 }
125
vhost_set_vring_endian(struct vhost_virtqueue * vq,int __user * argp)126 static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
127 {
128 return -ENOIOCTLCMD;
129 }
130
vhost_get_vring_endian(struct vhost_virtqueue * vq,u32 idx,int __user * argp)131 static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
132 int __user *argp)
133 {
134 return -ENOIOCTLCMD;
135 }
136
vhost_init_is_le(struct vhost_virtqueue * vq)137 static void vhost_init_is_le(struct vhost_virtqueue *vq)
138 {
139 vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
140 || virtio_legacy_is_little_endian();
141 }
142 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
143
vhost_reset_is_le(struct vhost_virtqueue * vq)144 static void vhost_reset_is_le(struct vhost_virtqueue *vq)
145 {
146 vhost_init_is_le(vq);
147 }
148
149 struct vhost_flush_struct {
150 struct vhost_work work;
151 struct completion wait_event;
152 };
153
vhost_flush_work(struct vhost_work * work)154 static void vhost_flush_work(struct vhost_work *work)
155 {
156 struct vhost_flush_struct *s;
157
158 s = container_of(work, struct vhost_flush_struct, work);
159 complete(&s->wait_event);
160 }
161
vhost_poll_func(struct file * file,wait_queue_head_t * wqh,poll_table * pt)162 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
163 poll_table *pt)
164 {
165 struct vhost_poll *poll;
166
167 poll = container_of(pt, struct vhost_poll, table);
168 poll->wqh = wqh;
169 add_wait_queue(wqh, &poll->wait);
170 }
171
vhost_poll_wakeup(wait_queue_entry_t * wait,unsigned mode,int sync,void * key)172 static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
173 void *key)
174 {
175 struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
176 struct vhost_work *work = &poll->work;
177
178 if (!(key_to_poll(key) & poll->mask))
179 return 0;
180
181 if (!poll->dev->use_worker)
182 work->fn(work);
183 else
184 vhost_poll_queue(poll);
185
186 return 0;
187 }
188
vhost_work_init(struct vhost_work * work,vhost_work_fn_t fn)189 void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
190 {
191 clear_bit(VHOST_WORK_QUEUED, &work->flags);
192 work->fn = fn;
193 }
194 EXPORT_SYMBOL_GPL(vhost_work_init);
195
196 /* Init poll structure */
vhost_poll_init(struct vhost_poll * poll,vhost_work_fn_t fn,__poll_t mask,struct vhost_dev * dev,struct vhost_virtqueue * vq)197 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
198 __poll_t mask, struct vhost_dev *dev,
199 struct vhost_virtqueue *vq)
200 {
201 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
202 init_poll_funcptr(&poll->table, vhost_poll_func);
203 poll->mask = mask;
204 poll->dev = dev;
205 poll->wqh = NULL;
206 poll->vq = vq;
207
208 vhost_work_init(&poll->work, fn);
209 }
210 EXPORT_SYMBOL_GPL(vhost_poll_init);
211
212 /* Start polling a file. We add ourselves to file's wait queue. The caller must
213 * keep a reference to a file until after vhost_poll_stop is called. */
vhost_poll_start(struct vhost_poll * poll,struct file * file)214 int vhost_poll_start(struct vhost_poll *poll, struct file *file)
215 {
216 __poll_t mask;
217
218 if (poll->wqh)
219 return 0;
220
221 mask = vfs_poll(file, &poll->table);
222 if (mask)
223 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
224 if (mask & EPOLLERR) {
225 vhost_poll_stop(poll);
226 return -EINVAL;
227 }
228
229 return 0;
230 }
231 EXPORT_SYMBOL_GPL(vhost_poll_start);
232
233 /* Stop polling a file. After this function returns, it becomes safe to drop the
234 * file reference. You must also flush afterwards. */
vhost_poll_stop(struct vhost_poll * poll)235 void vhost_poll_stop(struct vhost_poll *poll)
236 {
237 if (poll->wqh) {
238 remove_wait_queue(poll->wqh, &poll->wait);
239 poll->wqh = NULL;
240 }
241 }
242 EXPORT_SYMBOL_GPL(vhost_poll_stop);
243
vhost_worker_queue(struct vhost_worker * worker,struct vhost_work * work)244 static void vhost_worker_queue(struct vhost_worker *worker,
245 struct vhost_work *work)
246 {
247 if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) {
248 /* We can only add the work to the list after we're
249 * sure it was not in the list.
250 * test_and_set_bit() implies a memory barrier.
251 */
252 llist_add(&work->node, &worker->work_list);
253 worker->ops->wakeup(worker);
254 }
255 }
256
vhost_vq_work_queue(struct vhost_virtqueue * vq,struct vhost_work * work)257 bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work)
258 {
259 struct vhost_worker *worker;
260 bool queued = false;
261
262 rcu_read_lock();
263 worker = rcu_dereference(vq->worker);
264 if (worker) {
265 queued = true;
266 vhost_worker_queue(worker, work);
267 }
268 rcu_read_unlock();
269
270 return queued;
271 }
272 EXPORT_SYMBOL_GPL(vhost_vq_work_queue);
273
274 /**
275 * __vhost_worker_flush - flush a worker
276 * @worker: worker to flush
277 *
278 * The worker's flush_mutex must be held.
279 */
__vhost_worker_flush(struct vhost_worker * worker)280 static void __vhost_worker_flush(struct vhost_worker *worker)
281 {
282 struct vhost_flush_struct flush;
283
284 if (!worker->attachment_cnt || worker->killed)
285 return;
286
287 init_completion(&flush.wait_event);
288 vhost_work_init(&flush.work, vhost_flush_work);
289
290 vhost_worker_queue(worker, &flush.work);
291 /*
292 * Drop mutex in case our worker is killed and it needs to take the
293 * mutex to force cleanup.
294 */
295 mutex_unlock(&worker->mutex);
296 wait_for_completion(&flush.wait_event);
297 mutex_lock(&worker->mutex);
298 }
299
vhost_worker_flush(struct vhost_worker * worker)300 static void vhost_worker_flush(struct vhost_worker *worker)
301 {
302 mutex_lock(&worker->mutex);
303 __vhost_worker_flush(worker);
304 mutex_unlock(&worker->mutex);
305 }
306
vhost_dev_flush(struct vhost_dev * dev)307 void vhost_dev_flush(struct vhost_dev *dev)
308 {
309 struct vhost_worker *worker;
310 unsigned long i;
311
312 xa_for_each(&dev->worker_xa, i, worker)
313 vhost_worker_flush(worker);
314 }
315 EXPORT_SYMBOL_GPL(vhost_dev_flush);
316
317 /* A lockless hint for busy polling code to exit the loop */
vhost_vq_has_work(struct vhost_virtqueue * vq)318 bool vhost_vq_has_work(struct vhost_virtqueue *vq)
319 {
320 struct vhost_worker *worker;
321 bool has_work = false;
322
323 rcu_read_lock();
324 worker = rcu_dereference(vq->worker);
325 if (worker && !llist_empty(&worker->work_list))
326 has_work = true;
327 rcu_read_unlock();
328
329 return has_work;
330 }
331 EXPORT_SYMBOL_GPL(vhost_vq_has_work);
332
vhost_poll_queue(struct vhost_poll * poll)333 void vhost_poll_queue(struct vhost_poll *poll)
334 {
335 vhost_vq_work_queue(poll->vq, &poll->work);
336 }
337 EXPORT_SYMBOL_GPL(vhost_poll_queue);
338
__vhost_vq_meta_reset(struct vhost_virtqueue * vq)339 static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq)
340 {
341 int j;
342
343 for (j = 0; j < VHOST_NUM_ADDRS; j++)
344 vq->meta_iotlb[j] = NULL;
345 }
346
vhost_vq_meta_reset(struct vhost_dev * d)347 static void vhost_vq_meta_reset(struct vhost_dev *d)
348 {
349 int i;
350
351 for (i = 0; i < d->nvqs; ++i)
352 __vhost_vq_meta_reset(d->vqs[i]);
353 }
354
vhost_vring_call_reset(struct vhost_vring_call * call_ctx)355 static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx)
356 {
357 call_ctx->ctx = NULL;
358 memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer));
359 }
360
vhost_vq_is_setup(struct vhost_virtqueue * vq)361 bool vhost_vq_is_setup(struct vhost_virtqueue *vq)
362 {
363 return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq);
364 }
365 EXPORT_SYMBOL_GPL(vhost_vq_is_setup);
366
vhost_vq_reset(struct vhost_dev * dev,struct vhost_virtqueue * vq)367 static void vhost_vq_reset(struct vhost_dev *dev,
368 struct vhost_virtqueue *vq)
369 {
370 vq->num = 1;
371 vq->desc = NULL;
372 vq->avail = NULL;
373 vq->used = NULL;
374 vq->last_avail_idx = 0;
375 vq->next_avail_head = 0;
376 vq->avail_idx = 0;
377 vq->last_used_idx = 0;
378 vq->signalled_used = 0;
379 vq->signalled_used_valid = false;
380 vq->used_flags = 0;
381 vq->log_used = false;
382 vq->log_addr = -1ull;
383 vq->private_data = NULL;
384 virtio_features_zero(vq->acked_features_array);
385 vq->acked_backend_features = 0;
386 vq->log_base = NULL;
387 vq->error_ctx = NULL;
388 vq->kick = NULL;
389 vq->log_ctx = NULL;
390 vhost_disable_cross_endian(vq);
391 vhost_reset_is_le(vq);
392 vq->busyloop_timeout = 0;
393 vq->umem = NULL;
394 vq->iotlb = NULL;
395 rcu_assign_pointer(vq->worker, NULL);
396 vhost_vring_call_reset(&vq->call_ctx);
397 __vhost_vq_meta_reset(vq);
398 }
399
vhost_run_work_kthread_list(void * data)400 static int vhost_run_work_kthread_list(void *data)
401 {
402 struct vhost_worker *worker = data;
403 struct vhost_work *work, *work_next;
404 struct vhost_dev *dev = worker->dev;
405 struct llist_node *node;
406
407 kthread_use_mm(dev->mm);
408
409 for (;;) {
410 /* mb paired w/ kthread_stop */
411 set_current_state(TASK_INTERRUPTIBLE);
412
413 if (kthread_should_stop()) {
414 __set_current_state(TASK_RUNNING);
415 break;
416 }
417 node = llist_del_all(&worker->work_list);
418 if (!node)
419 schedule();
420
421 node = llist_reverse_order(node);
422 /* make sure flag is seen after deletion */
423 smp_wmb();
424 llist_for_each_entry_safe(work, work_next, node, node) {
425 clear_bit(VHOST_WORK_QUEUED, &work->flags);
426 __set_current_state(TASK_RUNNING);
427 kcov_remote_start_common(worker->kcov_handle);
428 work->fn(work);
429 kcov_remote_stop();
430 cond_resched();
431 }
432 }
433 kthread_unuse_mm(dev->mm);
434
435 return 0;
436 }
437
vhost_run_work_list(void * data)438 static bool vhost_run_work_list(void *data)
439 {
440 struct vhost_worker *worker = data;
441 struct vhost_work *work, *work_next;
442 struct llist_node *node;
443
444 node = llist_del_all(&worker->work_list);
445 if (node) {
446 __set_current_state(TASK_RUNNING);
447
448 node = llist_reverse_order(node);
449 /* make sure flag is seen after deletion */
450 smp_wmb();
451 llist_for_each_entry_safe(work, work_next, node, node) {
452 clear_bit(VHOST_WORK_QUEUED, &work->flags);
453 kcov_remote_start_common(worker->kcov_handle);
454 work->fn(work);
455 kcov_remote_stop();
456 cond_resched();
457 }
458 }
459
460 return !!node;
461 }
462
vhost_worker_killed(void * data)463 static void vhost_worker_killed(void *data)
464 {
465 struct vhost_worker *worker = data;
466 struct vhost_dev *dev = worker->dev;
467 struct vhost_virtqueue *vq;
468 int i, attach_cnt = 0;
469
470 mutex_lock(&worker->mutex);
471 worker->killed = true;
472
473 for (i = 0; i < dev->nvqs; i++) {
474 vq = dev->vqs[i];
475
476 mutex_lock(&vq->mutex);
477 if (worker ==
478 rcu_dereference_check(vq->worker,
479 lockdep_is_held(&vq->mutex))) {
480 rcu_assign_pointer(vq->worker, NULL);
481 attach_cnt++;
482 }
483 mutex_unlock(&vq->mutex);
484 }
485
486 worker->attachment_cnt -= attach_cnt;
487 if (attach_cnt)
488 synchronize_rcu();
489 /*
490 * Finish vhost_worker_flush calls and any other works that snuck in
491 * before the synchronize_rcu.
492 */
493 vhost_run_work_list(worker);
494 mutex_unlock(&worker->mutex);
495 }
496
vhost_vq_free_iovecs(struct vhost_virtqueue * vq)497 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
498 {
499 kfree(vq->indirect);
500 vq->indirect = NULL;
501 kfree(vq->log);
502 vq->log = NULL;
503 kfree(vq->heads);
504 vq->heads = NULL;
505 kfree(vq->nheads);
506 vq->nheads = NULL;
507 }
508
509 /* Helper to allocate iovec buffers for all vqs. */
vhost_dev_alloc_iovecs(struct vhost_dev * dev)510 static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
511 {
512 struct vhost_virtqueue *vq;
513 int i;
514
515 for (i = 0; i < dev->nvqs; ++i) {
516 vq = dev->vqs[i];
517 vq->indirect = kmalloc_objs(*vq->indirect, UIO_MAXIOV);
518 vq->log = kmalloc_objs(*vq->log, dev->iov_limit);
519 vq->heads = kmalloc_objs(*vq->heads, dev->iov_limit);
520 vq->nheads = kmalloc_array(dev->iov_limit, sizeof(*vq->nheads),
521 GFP_KERNEL);
522 if (!vq->indirect || !vq->log || !vq->heads || !vq->nheads)
523 goto err_nomem;
524 }
525 return 0;
526
527 err_nomem:
528 for (; i >= 0; --i)
529 vhost_vq_free_iovecs(dev->vqs[i]);
530 return -ENOMEM;
531 }
532
vhost_dev_free_iovecs(struct vhost_dev * dev)533 static void vhost_dev_free_iovecs(struct vhost_dev *dev)
534 {
535 int i;
536
537 for (i = 0; i < dev->nvqs; ++i)
538 vhost_vq_free_iovecs(dev->vqs[i]);
539 }
540
vhost_exceeds_weight(struct vhost_virtqueue * vq,int pkts,int total_len)541 bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
542 int pkts, int total_len)
543 {
544 struct vhost_dev *dev = vq->dev;
545
546 if ((dev->byte_weight && total_len >= dev->byte_weight) ||
547 pkts >= dev->weight) {
548 vhost_poll_queue(&vq->poll);
549 return true;
550 }
551
552 return false;
553 }
554 EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
555
vhost_get_avail_size(struct vhost_virtqueue * vq,unsigned int num)556 static size_t vhost_get_avail_size(struct vhost_virtqueue *vq,
557 unsigned int num)
558 {
559 size_t event __maybe_unused =
560 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
561
562 return size_add(struct_size(vq->avail, ring, num), event);
563 }
564
vhost_get_used_size(struct vhost_virtqueue * vq,unsigned int num)565 static size_t vhost_get_used_size(struct vhost_virtqueue *vq,
566 unsigned int num)
567 {
568 size_t event __maybe_unused =
569 vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
570
571 return size_add(struct_size(vq->used, ring, num), event);
572 }
573
vhost_get_desc_size(struct vhost_virtqueue * vq,unsigned int num)574 static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
575 unsigned int num)
576 {
577 return sizeof(*vq->desc) * num;
578 }
579
vhost_dev_init(struct vhost_dev * dev,struct vhost_virtqueue ** vqs,int nvqs,int iov_limit,int weight,int byte_weight,bool use_worker,int (* msg_handler)(struct vhost_dev * dev,u32 asid,struct vhost_iotlb_msg * msg))580 void vhost_dev_init(struct vhost_dev *dev,
581 struct vhost_virtqueue **vqs, int nvqs,
582 int iov_limit, int weight, int byte_weight,
583 bool use_worker,
584 int (*msg_handler)(struct vhost_dev *dev, u32 asid,
585 struct vhost_iotlb_msg *msg))
586 {
587 struct vhost_virtqueue *vq;
588 int i;
589
590 dev->vqs = vqs;
591 dev->nvqs = nvqs;
592 mutex_init(&dev->mutex);
593 dev->log_ctx = NULL;
594 dev->umem = NULL;
595 dev->iotlb = NULL;
596 dev->mm = NULL;
597 dev->iov_limit = iov_limit;
598 dev->weight = weight;
599 dev->byte_weight = byte_weight;
600 dev->use_worker = use_worker;
601 dev->msg_handler = msg_handler;
602 dev->fork_owner = fork_from_owner_default;
603 init_waitqueue_head(&dev->wait);
604 INIT_LIST_HEAD(&dev->read_list);
605 INIT_LIST_HEAD(&dev->pending_list);
606 spin_lock_init(&dev->iotlb_lock);
607 xa_init_flags(&dev->worker_xa, XA_FLAGS_ALLOC);
608
609 for (i = 0; i < dev->nvqs; ++i) {
610 vq = dev->vqs[i];
611 vq->log = NULL;
612 vq->indirect = NULL;
613 vq->heads = NULL;
614 vq->nheads = NULL;
615 vq->dev = dev;
616 mutex_init(&vq->mutex);
617 vhost_vq_reset(dev, vq);
618 if (vq->handle_kick)
619 vhost_poll_init(&vq->poll, vq->handle_kick,
620 EPOLLIN, dev, vq);
621 }
622 }
623 EXPORT_SYMBOL_GPL(vhost_dev_init);
624
625 /* Caller should have device mutex */
vhost_dev_check_owner(struct vhost_dev * dev)626 long vhost_dev_check_owner(struct vhost_dev *dev)
627 {
628 /* Are you the owner? If not, I don't think you mean to do that */
629 return dev->mm == current->mm ? 0 : -EPERM;
630 }
631 EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
632
633 struct vhost_attach_cgroups_struct {
634 struct vhost_work work;
635 struct task_struct *owner;
636 int ret;
637 };
638
vhost_attach_cgroups_work(struct vhost_work * work)639 static void vhost_attach_cgroups_work(struct vhost_work *work)
640 {
641 struct vhost_attach_cgroups_struct *s;
642
643 s = container_of(work, struct vhost_attach_cgroups_struct, work);
644 s->ret = cgroup_attach_task_all(s->owner, current);
645 }
646
vhost_attach_task_to_cgroups(struct vhost_worker * worker)647 static int vhost_attach_task_to_cgroups(struct vhost_worker *worker)
648 {
649 struct vhost_attach_cgroups_struct attach;
650 int saved_cnt;
651
652 attach.owner = current;
653
654 vhost_work_init(&attach.work, vhost_attach_cgroups_work);
655 vhost_worker_queue(worker, &attach.work);
656
657 mutex_lock(&worker->mutex);
658
659 /*
660 * Bypass attachment_cnt check in __vhost_worker_flush:
661 * Temporarily change it to INT_MAX to bypass the check
662 */
663 saved_cnt = worker->attachment_cnt;
664 worker->attachment_cnt = INT_MAX;
665 __vhost_worker_flush(worker);
666 worker->attachment_cnt = saved_cnt;
667
668 mutex_unlock(&worker->mutex);
669
670 return attach.ret;
671 }
672
673 /* Caller should have device mutex */
vhost_dev_has_owner(struct vhost_dev * dev)674 bool vhost_dev_has_owner(struct vhost_dev *dev)
675 {
676 return dev->mm;
677 }
678 EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
679
vhost_attach_mm(struct vhost_dev * dev)680 static void vhost_attach_mm(struct vhost_dev *dev)
681 {
682 /* No owner, become one */
683 if (dev->use_worker) {
684 dev->mm = get_task_mm(current);
685 } else {
686 /* vDPA device does not use worker thread, so there's
687 * no need to hold the address space for mm. This helps
688 * to avoid deadlock in the case of mmap() which may
689 * hold the refcnt of the file and depends on release
690 * method to remove vma.
691 */
692 dev->mm = current->mm;
693 mmgrab(dev->mm);
694 }
695 }
696
vhost_detach_mm(struct vhost_dev * dev)697 static void vhost_detach_mm(struct vhost_dev *dev)
698 {
699 if (!dev->mm)
700 return;
701
702 if (dev->use_worker)
703 mmput(dev->mm);
704 else
705 mmdrop(dev->mm);
706
707 dev->mm = NULL;
708 }
709
vhost_worker_destroy(struct vhost_dev * dev,struct vhost_worker * worker)710 static void vhost_worker_destroy(struct vhost_dev *dev,
711 struct vhost_worker *worker)
712 {
713 if (!worker)
714 return;
715
716 WARN_ON(!llist_empty(&worker->work_list));
717 xa_erase(&dev->worker_xa, worker->id);
718 worker->ops->stop(worker);
719 kfree(worker);
720 }
721
vhost_workers_free(struct vhost_dev * dev)722 static void vhost_workers_free(struct vhost_dev *dev)
723 {
724 struct vhost_worker *worker;
725 unsigned long i;
726
727 if (!dev->use_worker)
728 return;
729
730 for (i = 0; i < dev->nvqs; i++)
731 rcu_assign_pointer(dev->vqs[i]->worker, NULL);
732 /*
733 * Free the default worker we created and cleanup workers userspace
734 * created but couldn't clean up (it forgot or crashed).
735 */
736 xa_for_each(&dev->worker_xa, i, worker)
737 vhost_worker_destroy(dev, worker);
738 xa_destroy(&dev->worker_xa);
739 }
740
vhost_task_wakeup(struct vhost_worker * worker)741 static void vhost_task_wakeup(struct vhost_worker *worker)
742 {
743 return vhost_task_wake(worker->vtsk);
744 }
745
vhost_kthread_wakeup(struct vhost_worker * worker)746 static void vhost_kthread_wakeup(struct vhost_worker *worker)
747 {
748 wake_up_process(worker->kthread_task);
749 }
750
vhost_task_do_stop(struct vhost_worker * worker)751 static void vhost_task_do_stop(struct vhost_worker *worker)
752 {
753 return vhost_task_stop(worker->vtsk);
754 }
755
vhost_kthread_do_stop(struct vhost_worker * worker)756 static void vhost_kthread_do_stop(struct vhost_worker *worker)
757 {
758 kthread_stop(worker->kthread_task);
759 }
760
vhost_task_worker_create(struct vhost_worker * worker,struct vhost_dev * dev,const char * name)761 static int vhost_task_worker_create(struct vhost_worker *worker,
762 struct vhost_dev *dev, const char *name)
763 {
764 struct vhost_task *vtsk;
765 u32 id;
766 int ret;
767
768 vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
769 worker, name);
770 if (IS_ERR(vtsk))
771 return PTR_ERR(vtsk);
772
773 worker->vtsk = vtsk;
774 vhost_task_start(vtsk);
775 ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
776 if (ret < 0) {
777 vhost_task_do_stop(worker);
778 return ret;
779 }
780 worker->id = id;
781 return 0;
782 }
783
vhost_kthread_worker_create(struct vhost_worker * worker,struct vhost_dev * dev,const char * name)784 static int vhost_kthread_worker_create(struct vhost_worker *worker,
785 struct vhost_dev *dev, const char *name)
786 {
787 struct task_struct *task;
788 u32 id;
789 int ret;
790
791 task = kthread_create(vhost_run_work_kthread_list, worker, "%s", name);
792 if (IS_ERR(task))
793 return PTR_ERR(task);
794
795 worker->kthread_task = task;
796 wake_up_process(task);
797 ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
798 if (ret < 0)
799 goto stop_worker;
800
801 ret = vhost_attach_task_to_cgroups(worker);
802 if (ret)
803 goto free_id;
804
805 worker->id = id;
806 return 0;
807
808 free_id:
809 xa_erase(&dev->worker_xa, id);
810 stop_worker:
811 vhost_kthread_do_stop(worker);
812 return ret;
813 }
814
815 static const struct vhost_worker_ops kthread_ops = {
816 .create = vhost_kthread_worker_create,
817 .stop = vhost_kthread_do_stop,
818 .wakeup = vhost_kthread_wakeup,
819 };
820
821 static const struct vhost_worker_ops vhost_task_ops = {
822 .create = vhost_task_worker_create,
823 .stop = vhost_task_do_stop,
824 .wakeup = vhost_task_wakeup,
825 };
826
vhost_worker_create(struct vhost_dev * dev)827 static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
828 {
829 struct vhost_worker *worker;
830 char name[TASK_COMM_LEN];
831 int ret;
832 const struct vhost_worker_ops *ops = dev->fork_owner ? &vhost_task_ops :
833 &kthread_ops;
834
835 worker = kzalloc_obj(*worker, GFP_KERNEL_ACCOUNT);
836 if (!worker)
837 return NULL;
838
839 worker->dev = dev;
840 worker->ops = ops;
841 snprintf(name, sizeof(name), "vhost-%d", current->pid);
842
843 mutex_init(&worker->mutex);
844 init_llist_head(&worker->work_list);
845 worker->kcov_handle = kcov_common_handle();
846 ret = ops->create(worker, dev, name);
847 if (ret < 0)
848 goto free_worker;
849
850 return worker;
851
852 free_worker:
853 kfree(worker);
854 return NULL;
855 }
856
857 /* Caller must have device mutex */
__vhost_vq_attach_worker(struct vhost_virtqueue * vq,struct vhost_worker * worker)858 static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq,
859 struct vhost_worker *worker)
860 {
861 struct vhost_worker *old_worker;
862
863 mutex_lock(&worker->mutex);
864 if (worker->killed) {
865 mutex_unlock(&worker->mutex);
866 return;
867 }
868
869 mutex_lock(&vq->mutex);
870
871 old_worker = rcu_dereference_check(vq->worker,
872 lockdep_is_held(&vq->mutex));
873 rcu_assign_pointer(vq->worker, worker);
874 worker->attachment_cnt++;
875
876 if (!old_worker) {
877 mutex_unlock(&vq->mutex);
878 mutex_unlock(&worker->mutex);
879 return;
880 }
881 mutex_unlock(&vq->mutex);
882 mutex_unlock(&worker->mutex);
883
884 /*
885 * Take the worker mutex to make sure we see the work queued from
886 * device wide flushes which doesn't use RCU for execution.
887 */
888 mutex_lock(&old_worker->mutex);
889 if (old_worker->killed) {
890 mutex_unlock(&old_worker->mutex);
891 return;
892 }
893
894 /*
895 * We don't want to call synchronize_rcu for every vq during setup
896 * because it will slow down VM startup. If we haven't done
897 * VHOST_SET_VRING_KICK and not done the driver specific
898 * SET_ENDPOINT/RUNNING then we can skip the sync since there will
899 * not be any works queued for scsi and net.
900 */
901 mutex_lock(&vq->mutex);
902 if (!vhost_vq_get_backend(vq) && !vq->kick) {
903 mutex_unlock(&vq->mutex);
904
905 old_worker->attachment_cnt--;
906 mutex_unlock(&old_worker->mutex);
907 /*
908 * vsock can queue anytime after VHOST_VSOCK_SET_GUEST_CID.
909 * Warn if it adds support for multiple workers but forgets to
910 * handle the early queueing case.
911 */
912 WARN_ON(!old_worker->attachment_cnt &&
913 !llist_empty(&old_worker->work_list));
914 return;
915 }
916 mutex_unlock(&vq->mutex);
917
918 /* Make sure new vq queue/flush/poll calls see the new worker */
919 synchronize_rcu();
920 /* Make sure whatever was queued gets run */
921 __vhost_worker_flush(old_worker);
922 old_worker->attachment_cnt--;
923 mutex_unlock(&old_worker->mutex);
924 }
925
926 /* Caller must have device mutex */
vhost_vq_attach_worker(struct vhost_virtqueue * vq,struct vhost_vring_worker * info)927 static int vhost_vq_attach_worker(struct vhost_virtqueue *vq,
928 struct vhost_vring_worker *info)
929 {
930 unsigned long index = info->worker_id;
931 struct vhost_dev *dev = vq->dev;
932 struct vhost_worker *worker;
933
934 if (!dev->use_worker)
935 return -EINVAL;
936
937 worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT);
938 if (!worker || worker->id != info->worker_id)
939 return -ENODEV;
940
941 __vhost_vq_attach_worker(vq, worker);
942 return 0;
943 }
944
945 /* Caller must have device mutex */
vhost_new_worker(struct vhost_dev * dev,struct vhost_worker_state * info)946 static int vhost_new_worker(struct vhost_dev *dev,
947 struct vhost_worker_state *info)
948 {
949 struct vhost_worker *worker;
950
951 worker = vhost_worker_create(dev);
952 if (!worker)
953 return -ENOMEM;
954
955 info->worker_id = worker->id;
956 return 0;
957 }
958
959 /* Caller must have device mutex */
vhost_free_worker(struct vhost_dev * dev,struct vhost_worker_state * info)960 static int vhost_free_worker(struct vhost_dev *dev,
961 struct vhost_worker_state *info)
962 {
963 unsigned long index = info->worker_id;
964 struct vhost_worker *worker;
965
966 worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT);
967 if (!worker || worker->id != info->worker_id)
968 return -ENODEV;
969
970 mutex_lock(&worker->mutex);
971 if (worker->attachment_cnt || worker->killed) {
972 mutex_unlock(&worker->mutex);
973 return -EBUSY;
974 }
975 /*
976 * A flush might have raced and snuck in before attachment_cnt was set
977 * to zero. Make sure flushes are flushed from the queue before
978 * freeing.
979 */
980 __vhost_worker_flush(worker);
981 mutex_unlock(&worker->mutex);
982
983 vhost_worker_destroy(dev, worker);
984 return 0;
985 }
986
vhost_get_vq_from_user(struct vhost_dev * dev,void __user * argp,struct vhost_virtqueue ** vq,u32 * id)987 static int vhost_get_vq_from_user(struct vhost_dev *dev, void __user *argp,
988 struct vhost_virtqueue **vq, u32 *id)
989 {
990 u32 __user *idxp = argp;
991 u32 idx;
992 long r;
993
994 r = get_user(idx, idxp);
995 if (r < 0)
996 return r;
997
998 if (idx >= dev->nvqs)
999 return -ENOBUFS;
1000
1001 idx = array_index_nospec(idx, dev->nvqs);
1002
1003 *vq = dev->vqs[idx];
1004 *id = idx;
1005 return 0;
1006 }
1007
1008 /* Caller must have device mutex */
vhost_worker_ioctl(struct vhost_dev * dev,unsigned int ioctl,void __user * argp)1009 long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl,
1010 void __user *argp)
1011 {
1012 struct vhost_vring_worker ring_worker;
1013 struct vhost_worker_state state;
1014 struct vhost_worker *worker;
1015 struct vhost_virtqueue *vq;
1016 long ret;
1017 u32 idx;
1018
1019 if (!dev->use_worker)
1020 return -EINVAL;
1021
1022 if (!vhost_dev_has_owner(dev))
1023 return -EINVAL;
1024
1025 ret = vhost_dev_check_owner(dev);
1026 if (ret)
1027 return ret;
1028
1029 switch (ioctl) {
1030 /* dev worker ioctls */
1031 case VHOST_NEW_WORKER:
1032 /*
1033 * vhost_tasks will account for worker threads under the parent's
1034 * NPROC value but kthreads do not. To avoid userspace overflowing
1035 * the system with worker threads fork_owner must be true.
1036 */
1037 if (!dev->fork_owner)
1038 return -EFAULT;
1039
1040 ret = vhost_new_worker(dev, &state);
1041 if (!ret && copy_to_user(argp, &state, sizeof(state)))
1042 ret = -EFAULT;
1043 return ret;
1044 case VHOST_FREE_WORKER:
1045 if (copy_from_user(&state, argp, sizeof(state)))
1046 return -EFAULT;
1047 return vhost_free_worker(dev, &state);
1048 /* vring worker ioctls */
1049 case VHOST_ATTACH_VRING_WORKER:
1050 case VHOST_GET_VRING_WORKER:
1051 break;
1052 default:
1053 return -ENOIOCTLCMD;
1054 }
1055
1056 ret = vhost_get_vq_from_user(dev, argp, &vq, &idx);
1057 if (ret)
1058 return ret;
1059
1060 switch (ioctl) {
1061 case VHOST_ATTACH_VRING_WORKER:
1062 if (copy_from_user(&ring_worker, argp, sizeof(ring_worker))) {
1063 ret = -EFAULT;
1064 break;
1065 }
1066
1067 ret = vhost_vq_attach_worker(vq, &ring_worker);
1068 break;
1069 case VHOST_GET_VRING_WORKER:
1070 worker = rcu_dereference_check(vq->worker,
1071 lockdep_is_held(&dev->mutex));
1072 if (!worker) {
1073 ret = -EINVAL;
1074 break;
1075 }
1076
1077 ring_worker.index = idx;
1078 ring_worker.worker_id = worker->id;
1079
1080 if (copy_to_user(argp, &ring_worker, sizeof(ring_worker)))
1081 ret = -EFAULT;
1082 break;
1083 default:
1084 ret = -ENOIOCTLCMD;
1085 break;
1086 }
1087
1088 return ret;
1089 }
1090 EXPORT_SYMBOL_GPL(vhost_worker_ioctl);
1091
1092 /* Caller should have device mutex */
vhost_dev_set_owner(struct vhost_dev * dev)1093 long vhost_dev_set_owner(struct vhost_dev *dev)
1094 {
1095 struct vhost_worker *worker;
1096 int err, i;
1097
1098 /* Is there an owner already? */
1099 if (vhost_dev_has_owner(dev)) {
1100 err = -EBUSY;
1101 goto err_mm;
1102 }
1103
1104 vhost_attach_mm(dev);
1105
1106 err = vhost_dev_alloc_iovecs(dev);
1107 if (err)
1108 goto err_iovecs;
1109
1110 if (dev->use_worker) {
1111 /*
1112 * This should be done last, because vsock can queue work
1113 * before VHOST_SET_OWNER so it simplifies the failure path
1114 * below since we don't have to worry about vsock queueing
1115 * while we free the worker.
1116 */
1117 worker = vhost_worker_create(dev);
1118 if (!worker) {
1119 err = -ENOMEM;
1120 goto err_worker;
1121 }
1122
1123 for (i = 0; i < dev->nvqs; i++)
1124 __vhost_vq_attach_worker(dev->vqs[i], worker);
1125 }
1126
1127 return 0;
1128
1129 err_worker:
1130 vhost_dev_free_iovecs(dev);
1131 err_iovecs:
1132 vhost_detach_mm(dev);
1133 err_mm:
1134 return err;
1135 }
1136 EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
1137
iotlb_alloc(void)1138 static struct vhost_iotlb *iotlb_alloc(void)
1139 {
1140 return vhost_iotlb_alloc(max_iotlb_entries,
1141 VHOST_IOTLB_FLAG_RETIRE);
1142 }
1143
vhost_dev_reset_owner_prepare(void)1144 struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
1145 {
1146 return iotlb_alloc();
1147 }
1148 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
1149
1150 /* Caller should have device mutex */
vhost_dev_reset_owner(struct vhost_dev * dev,struct vhost_iotlb * umem)1151 void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
1152 {
1153 int i;
1154
1155 vhost_dev_cleanup(dev);
1156
1157 dev->fork_owner = fork_from_owner_default;
1158 dev->umem = umem;
1159 /* We don't need VQ locks below since vhost_dev_cleanup makes sure
1160 * VQs aren't running.
1161 */
1162 for (i = 0; i < dev->nvqs; ++i)
1163 dev->vqs[i]->umem = umem;
1164 }
1165 EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
1166
vhost_dev_stop(struct vhost_dev * dev)1167 void vhost_dev_stop(struct vhost_dev *dev)
1168 {
1169 int i;
1170
1171 for (i = 0; i < dev->nvqs; ++i) {
1172 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick)
1173 vhost_poll_stop(&dev->vqs[i]->poll);
1174 }
1175
1176 vhost_dev_flush(dev);
1177 }
1178 EXPORT_SYMBOL_GPL(vhost_dev_stop);
1179
vhost_clear_msg(struct vhost_dev * dev)1180 void vhost_clear_msg(struct vhost_dev *dev)
1181 {
1182 struct vhost_msg_node *node, *n;
1183
1184 spin_lock(&dev->iotlb_lock);
1185
1186 list_for_each_entry_safe(node, n, &dev->read_list, node) {
1187 list_del(&node->node);
1188 kfree(node);
1189 }
1190
1191 list_for_each_entry_safe(node, n, &dev->pending_list, node) {
1192 list_del(&node->node);
1193 kfree(node);
1194 }
1195
1196 spin_unlock(&dev->iotlb_lock);
1197 }
1198 EXPORT_SYMBOL_GPL(vhost_clear_msg);
1199
vhost_dev_cleanup(struct vhost_dev * dev)1200 void vhost_dev_cleanup(struct vhost_dev *dev)
1201 {
1202 int i;
1203
1204 for (i = 0; i < dev->nvqs; ++i) {
1205 if (dev->vqs[i]->error_ctx)
1206 eventfd_ctx_put(dev->vqs[i]->error_ctx);
1207 if (dev->vqs[i]->kick)
1208 fput(dev->vqs[i]->kick);
1209 if (dev->vqs[i]->call_ctx.ctx)
1210 eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx);
1211 vhost_vq_reset(dev, dev->vqs[i]);
1212 }
1213 vhost_dev_free_iovecs(dev);
1214 if (dev->log_ctx)
1215 eventfd_ctx_put(dev->log_ctx);
1216 dev->log_ctx = NULL;
1217 /* No one will access memory at this point */
1218 vhost_iotlb_free(dev->umem);
1219 dev->umem = NULL;
1220 vhost_iotlb_free(dev->iotlb);
1221 dev->iotlb = NULL;
1222 vhost_clear_msg(dev);
1223 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
1224 vhost_workers_free(dev);
1225 vhost_detach_mm(dev);
1226 }
1227 EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
1228
log_access_ok(void __user * log_base,u64 addr,unsigned long sz)1229 static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
1230 {
1231 u64 a = addr / VHOST_PAGE_SIZE / 8;
1232
1233 /* Make sure 64 bit math will not overflow. */
1234 if (a > ULONG_MAX - (unsigned long)log_base ||
1235 a + (unsigned long)log_base > ULONG_MAX)
1236 return false;
1237
1238 return access_ok(log_base + a,
1239 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
1240 }
1241
1242 /* Make sure 64 bit math will not overflow. */
vhost_overflow(u64 uaddr,u64 size)1243 static bool vhost_overflow(u64 uaddr, u64 size)
1244 {
1245 if (uaddr > ULONG_MAX || size > ULONG_MAX)
1246 return true;
1247
1248 if (!size)
1249 return false;
1250
1251 return uaddr > ULONG_MAX - size + 1;
1252 }
1253
1254 /* Caller should have vq mutex and device mutex. */
vq_memory_access_ok(void __user * log_base,struct vhost_iotlb * umem,int log_all)1255 static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
1256 int log_all)
1257 {
1258 struct vhost_iotlb_map *map;
1259
1260 if (!umem)
1261 return false;
1262
1263 list_for_each_entry(map, &umem->list, link) {
1264 unsigned long a = map->addr;
1265
1266 if (vhost_overflow(map->addr, map->size))
1267 return false;
1268
1269
1270 if (!access_ok((void __user *)a, map->size))
1271 return false;
1272 else if (log_all && !log_access_ok(log_base,
1273 map->start,
1274 map->size))
1275 return false;
1276 }
1277 return true;
1278 }
1279
vhost_vq_meta_fetch(struct vhost_virtqueue * vq,u64 addr,unsigned int size,int type)1280 static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
1281 u64 addr, unsigned int size,
1282 int type)
1283 {
1284 const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
1285
1286 if (!map)
1287 return NULL;
1288
1289 return (void __user *)(uintptr_t)(map->addr + addr - map->start);
1290 }
1291
1292 /* Can we switch to this memory table? */
1293 /* Caller should have device mutex but not vq mutex */
memory_access_ok(struct vhost_dev * d,struct vhost_iotlb * umem,int log_all)1294 static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
1295 int log_all)
1296 {
1297 int i;
1298
1299 for (i = 0; i < d->nvqs; ++i) {
1300 bool ok;
1301 bool log;
1302
1303 mutex_lock(&d->vqs[i]->mutex);
1304 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
1305 /* If ring is inactive, will check when it's enabled. */
1306 if (d->vqs[i]->private_data)
1307 ok = vq_memory_access_ok(d->vqs[i]->log_base,
1308 umem, log);
1309 else
1310 ok = true;
1311 mutex_unlock(&d->vqs[i]->mutex);
1312 if (!ok)
1313 return false;
1314 }
1315 return true;
1316 }
1317
1318 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
1319 struct iovec iov[], int iov_size, int access);
1320
vhost_copy_to_user(struct vhost_virtqueue * vq,void __user * to,const void * from,unsigned size)1321 static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
1322 const void *from, unsigned size)
1323 {
1324 int ret;
1325
1326 if (!vq->iotlb)
1327 return __copy_to_user(to, from, size);
1328 else {
1329 /* This function should be called after iotlb
1330 * prefetch, which means we're sure that all vq
1331 * could be access through iotlb. So -EAGAIN should
1332 * not happen in this case.
1333 */
1334 struct iov_iter t;
1335 void __user *uaddr = vhost_vq_meta_fetch(vq,
1336 (u64)(uintptr_t)to, size,
1337 VHOST_ADDR_USED);
1338
1339 if (uaddr)
1340 return __copy_to_user(uaddr, from, size);
1341
1342 ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov,
1343 ARRAY_SIZE(vq->iotlb_iov),
1344 VHOST_ACCESS_WO);
1345 if (ret < 0)
1346 goto out;
1347 iov_iter_init(&t, ITER_DEST, vq->iotlb_iov, ret, size);
1348 ret = copy_to_iter(from, size, &t);
1349 if (ret == size)
1350 ret = 0;
1351 }
1352 out:
1353 return ret;
1354 }
1355
vhost_copy_from_user(struct vhost_virtqueue * vq,void * to,void __user * from,unsigned size)1356 static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to,
1357 void __user *from, unsigned size)
1358 {
1359 int ret;
1360
1361 if (!vq->iotlb)
1362 return __copy_from_user(to, from, size);
1363 else {
1364 /* This function should be called after iotlb
1365 * prefetch, which means we're sure that vq
1366 * could be access through iotlb. So -EAGAIN should
1367 * not happen in this case.
1368 */
1369 void __user *uaddr = vhost_vq_meta_fetch(vq,
1370 (u64)(uintptr_t)from, size,
1371 VHOST_ADDR_DESC);
1372 struct iov_iter f;
1373
1374 if (uaddr)
1375 return __copy_from_user(to, uaddr, size);
1376
1377 ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov,
1378 ARRAY_SIZE(vq->iotlb_iov),
1379 VHOST_ACCESS_RO);
1380 if (ret < 0) {
1381 vq_err(vq, "IOTLB translation failure: uaddr "
1382 "%p size 0x%llx\n", from,
1383 (unsigned long long) size);
1384 goto out;
1385 }
1386 iov_iter_init(&f, ITER_SOURCE, vq->iotlb_iov, ret, size);
1387 ret = copy_from_iter(to, size, &f);
1388 if (ret == size)
1389 ret = 0;
1390 }
1391
1392 out:
1393 return ret;
1394 }
1395
__vhost_get_user_slow(struct vhost_virtqueue * vq,void __user * addr,unsigned int size,int type)1396 static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq,
1397 void __user *addr, unsigned int size,
1398 int type)
1399 {
1400 int ret;
1401
1402 ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov,
1403 ARRAY_SIZE(vq->iotlb_iov),
1404 VHOST_ACCESS_RO);
1405 if (ret < 0) {
1406 vq_err(vq, "IOTLB translation failure: uaddr "
1407 "%p size 0x%llx\n", addr,
1408 (unsigned long long) size);
1409 return NULL;
1410 }
1411
1412 if (ret != 1 || vq->iotlb_iov[0].iov_len != size) {
1413 vq_err(vq, "Non atomic userspace memory access: uaddr "
1414 "%p size 0x%llx\n", addr,
1415 (unsigned long long) size);
1416 return NULL;
1417 }
1418
1419 return vq->iotlb_iov[0].iov_base;
1420 }
1421
1422 /* This function should be called after iotlb
1423 * prefetch, which means we're sure that vq
1424 * could be access through iotlb. So -EAGAIN should
1425 * not happen in this case.
1426 */
__vhost_get_user(struct vhost_virtqueue * vq,void __user * addr,unsigned int size,int type)1427 static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
1428 void __user *addr, unsigned int size,
1429 int type)
1430 {
1431 void __user *uaddr = vhost_vq_meta_fetch(vq,
1432 (u64)(uintptr_t)addr, size, type);
1433 if (uaddr)
1434 return uaddr;
1435
1436 return __vhost_get_user_slow(vq, addr, size, type);
1437 }
1438
1439 #define vhost_put_user(vq, x, ptr) \
1440 ({ \
1441 int ret; \
1442 if (!vq->iotlb) { \
1443 ret = put_user(x, ptr); \
1444 } else { \
1445 __typeof__(ptr) to = \
1446 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
1447 sizeof(*ptr), VHOST_ADDR_USED); \
1448 if (to != NULL) \
1449 ret = put_user(x, to); \
1450 else \
1451 ret = -EFAULT; \
1452 } \
1453 ret; \
1454 })
1455
vhost_put_avail_event(struct vhost_virtqueue * vq)1456 static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
1457 {
1458 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
1459 vhost_avail_event(vq));
1460 }
1461
vhost_put_used(struct vhost_virtqueue * vq,struct vring_used_elem * head,int idx,int count)1462 static inline int vhost_put_used(struct vhost_virtqueue *vq,
1463 struct vring_used_elem *head, int idx,
1464 int count)
1465 {
1466 return vhost_copy_to_user(vq, vq->used->ring + idx, head,
1467 count * sizeof(*head));
1468 }
1469
vhost_put_used_flags(struct vhost_virtqueue * vq)1470 static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
1471
1472 {
1473 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
1474 &vq->used->flags);
1475 }
1476
vhost_put_used_idx(struct vhost_virtqueue * vq)1477 static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
1478
1479 {
1480 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
1481 &vq->used->idx);
1482 }
1483
1484 #define vhost_get_user(vq, x, ptr, type) \
1485 ({ \
1486 int ret; \
1487 if (!vq->iotlb) { \
1488 ret = get_user(x, ptr); \
1489 } else { \
1490 __typeof__(ptr) from = \
1491 (__typeof__(ptr)) __vhost_get_user(vq, ptr, \
1492 sizeof(*ptr), \
1493 type); \
1494 if (from != NULL) \
1495 ret = get_user(x, from); \
1496 else \
1497 ret = -EFAULT; \
1498 } \
1499 ret; \
1500 })
1501
1502 #define vhost_get_avail(vq, x, ptr) \
1503 vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL)
1504
1505 #define vhost_get_used(vq, x, ptr) \
1506 vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
1507
vhost_dev_lock_vqs(struct vhost_dev * d)1508 static void vhost_dev_lock_vqs(struct vhost_dev *d)
1509 {
1510 int i = 0;
1511 for (i = 0; i < d->nvqs; ++i)
1512 mutex_lock_nested(&d->vqs[i]->mutex, i);
1513 }
1514
vhost_dev_unlock_vqs(struct vhost_dev * d)1515 static void vhost_dev_unlock_vqs(struct vhost_dev *d)
1516 {
1517 int i = 0;
1518 for (i = 0; i < d->nvqs; ++i)
1519 mutex_unlock(&d->vqs[i]->mutex);
1520 }
1521
vhost_get_avail_idx(struct vhost_virtqueue * vq)1522 static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq)
1523 {
1524 __virtio16 idx;
1525 int r;
1526
1527 r = vhost_get_avail(vq, idx, &vq->avail->idx);
1528 if (unlikely(r < 0)) {
1529 vq_err(vq, "Failed to access available index at %p (%d)\n",
1530 &vq->avail->idx, r);
1531 return r;
1532 }
1533
1534 /* Check it isn't doing very strange thing with available indexes */
1535 vq->avail_idx = vhost16_to_cpu(vq, idx);
1536 if (unlikely((u16)(vq->avail_idx - vq->last_avail_idx) > vq->num)) {
1537 vq_err(vq, "Invalid available index change from %u to %u",
1538 vq->last_avail_idx, vq->avail_idx);
1539 return -EINVAL;
1540 }
1541
1542 /* We're done if there is nothing new */
1543 if (vq->avail_idx == vq->last_avail_idx)
1544 return 0;
1545
1546 /*
1547 * We updated vq->avail_idx so we need a memory barrier between
1548 * the index read above and the caller reading avail ring entries.
1549 */
1550 smp_rmb();
1551 return 1;
1552 }
1553
vhost_get_avail_head(struct vhost_virtqueue * vq,__virtio16 * head,int idx)1554 static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
1555 __virtio16 *head, int idx)
1556 {
1557 return vhost_get_avail(vq, *head,
1558 &vq->avail->ring[idx & (vq->num - 1)]);
1559 }
1560
vhost_get_avail_flags(struct vhost_virtqueue * vq,__virtio16 * flags)1561 static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
1562 __virtio16 *flags)
1563 {
1564 return vhost_get_avail(vq, *flags, &vq->avail->flags);
1565 }
1566
vhost_get_used_event(struct vhost_virtqueue * vq,__virtio16 * event)1567 static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
1568 __virtio16 *event)
1569 {
1570 return vhost_get_avail(vq, *event, vhost_used_event(vq));
1571 }
1572
vhost_get_used_idx(struct vhost_virtqueue * vq,__virtio16 * idx)1573 static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
1574 __virtio16 *idx)
1575 {
1576 return vhost_get_used(vq, *idx, &vq->used->idx);
1577 }
1578
vhost_get_desc(struct vhost_virtqueue * vq,struct vring_desc * desc,int idx)1579 static inline int vhost_get_desc(struct vhost_virtqueue *vq,
1580 struct vring_desc *desc, int idx)
1581 {
1582 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
1583 }
1584
vhost_iotlb_notify_vq(struct vhost_dev * d,struct vhost_iotlb_msg * msg)1585 static void vhost_iotlb_notify_vq(struct vhost_dev *d,
1586 struct vhost_iotlb_msg *msg)
1587 {
1588 struct vhost_msg_node *node, *n;
1589
1590 spin_lock(&d->iotlb_lock);
1591
1592 list_for_each_entry_safe(node, n, &d->pending_list, node) {
1593 struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
1594 if (msg->iova <= vq_msg->iova &&
1595 msg->iova + msg->size - 1 >= vq_msg->iova &&
1596 vq_msg->type == VHOST_IOTLB_MISS) {
1597 vhost_poll_queue(&node->vq->poll);
1598 list_del(&node->node);
1599 kfree(node);
1600 }
1601 }
1602
1603 spin_unlock(&d->iotlb_lock);
1604 }
1605
umem_access_ok(u64 uaddr,u64 size,int access)1606 static bool umem_access_ok(u64 uaddr, u64 size, int access)
1607 {
1608 unsigned long a = uaddr;
1609
1610 /* Make sure 64 bit math will not overflow. */
1611 if (vhost_overflow(uaddr, size))
1612 return false;
1613
1614 if ((access & VHOST_ACCESS_RO) &&
1615 !access_ok((void __user *)a, size))
1616 return false;
1617 if ((access & VHOST_ACCESS_WO) &&
1618 !access_ok((void __user *)a, size))
1619 return false;
1620 return true;
1621 }
1622
vhost_process_iotlb_msg(struct vhost_dev * dev,u32 asid,struct vhost_iotlb_msg * msg)1623 static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
1624 struct vhost_iotlb_msg *msg)
1625 {
1626 int ret = 0;
1627
1628 if (asid != 0)
1629 return -EINVAL;
1630
1631 mutex_lock(&dev->mutex);
1632 vhost_dev_lock_vqs(dev);
1633 switch (msg->type) {
1634 case VHOST_IOTLB_UPDATE:
1635 if (!dev->iotlb) {
1636 ret = -EFAULT;
1637 break;
1638 }
1639 if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) {
1640 ret = -EFAULT;
1641 break;
1642 }
1643 vhost_vq_meta_reset(dev);
1644 if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
1645 msg->iova + msg->size - 1,
1646 msg->uaddr, msg->perm)) {
1647 ret = -ENOMEM;
1648 break;
1649 }
1650 vhost_iotlb_notify_vq(dev, msg);
1651 break;
1652 case VHOST_IOTLB_INVALIDATE:
1653 if (!dev->iotlb) {
1654 ret = -EFAULT;
1655 break;
1656 }
1657 vhost_vq_meta_reset(dev);
1658 vhost_iotlb_del_range(dev->iotlb, msg->iova,
1659 msg->iova + msg->size - 1);
1660 break;
1661 default:
1662 ret = -EINVAL;
1663 break;
1664 }
1665
1666 vhost_dev_unlock_vqs(dev);
1667 mutex_unlock(&dev->mutex);
1668
1669 return ret;
1670 }
vhost_chr_write_iter(struct vhost_dev * dev,struct iov_iter * from)1671 ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
1672 struct iov_iter *from)
1673 {
1674 struct vhost_iotlb_msg msg;
1675 size_t offset;
1676 int type, ret;
1677 u32 asid = 0;
1678
1679 ret = copy_from_iter(&type, sizeof(type), from);
1680 if (ret != sizeof(type)) {
1681 ret = -EINVAL;
1682 goto done;
1683 }
1684
1685 switch (type) {
1686 case VHOST_IOTLB_MSG:
1687 /* There maybe a hole after type for V1 message type,
1688 * so skip it here.
1689 */
1690 offset = offsetof(struct vhost_msg, iotlb) - sizeof(int);
1691 break;
1692 case VHOST_IOTLB_MSG_V2:
1693 if (vhost_backend_has_feature(dev->vqs[0],
1694 VHOST_BACKEND_F_IOTLB_ASID)) {
1695 ret = copy_from_iter(&asid, sizeof(asid), from);
1696 if (ret != sizeof(asid)) {
1697 ret = -EINVAL;
1698 goto done;
1699 }
1700 offset = 0;
1701 } else
1702 offset = sizeof(__u32);
1703 break;
1704 default:
1705 ret = -EINVAL;
1706 goto done;
1707 }
1708
1709 iov_iter_advance(from, offset);
1710 ret = copy_from_iter(&msg, sizeof(msg), from);
1711 if (ret != sizeof(msg)) {
1712 ret = -EINVAL;
1713 goto done;
1714 }
1715
1716 if (msg.type == VHOST_IOTLB_UPDATE && msg.size == 0) {
1717 ret = -EINVAL;
1718 goto done;
1719 }
1720
1721 if (dev->msg_handler)
1722 ret = dev->msg_handler(dev, asid, &msg);
1723 else
1724 ret = vhost_process_iotlb_msg(dev, asid, &msg);
1725 if (ret) {
1726 ret = -EFAULT;
1727 goto done;
1728 }
1729
1730 ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) :
1731 sizeof(struct vhost_msg_v2);
1732 done:
1733 return ret;
1734 }
1735 EXPORT_SYMBOL(vhost_chr_write_iter);
1736
vhost_chr_poll(struct file * file,struct vhost_dev * dev,poll_table * wait)1737 __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
1738 poll_table *wait)
1739 {
1740 __poll_t mask = 0;
1741
1742 poll_wait(file, &dev->wait, wait);
1743
1744 if (!list_empty(&dev->read_list))
1745 mask |= EPOLLIN | EPOLLRDNORM;
1746
1747 return mask;
1748 }
1749 EXPORT_SYMBOL(vhost_chr_poll);
1750
vhost_chr_read_iter(struct vhost_dev * dev,struct iov_iter * to,int noblock)1751 ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
1752 int noblock)
1753 {
1754 DEFINE_WAIT(wait);
1755 struct vhost_msg_node *node;
1756 ssize_t ret = 0;
1757 unsigned size = sizeof(struct vhost_msg);
1758
1759 if (iov_iter_count(to) < size)
1760 return 0;
1761
1762 while (1) {
1763 if (!noblock)
1764 prepare_to_wait(&dev->wait, &wait,
1765 TASK_INTERRUPTIBLE);
1766
1767 node = vhost_dequeue_msg(dev, &dev->read_list);
1768 if (node)
1769 break;
1770 if (noblock) {
1771 ret = -EAGAIN;
1772 break;
1773 }
1774 if (signal_pending(current)) {
1775 ret = -ERESTARTSYS;
1776 break;
1777 }
1778 if (!dev->iotlb) {
1779 ret = -EBADFD;
1780 break;
1781 }
1782
1783 schedule();
1784 }
1785
1786 if (!noblock)
1787 finish_wait(&dev->wait, &wait);
1788
1789 if (node) {
1790 struct vhost_iotlb_msg *msg;
1791 void *start = &node->msg;
1792
1793 switch (node->msg.type) {
1794 case VHOST_IOTLB_MSG:
1795 size = sizeof(node->msg);
1796 msg = &node->msg.iotlb;
1797 break;
1798 case VHOST_IOTLB_MSG_V2:
1799 size = sizeof(node->msg_v2);
1800 msg = &node->msg_v2.iotlb;
1801 break;
1802 default:
1803 BUG();
1804 break;
1805 }
1806
1807 ret = copy_to_iter(start, size, to);
1808 if (ret != size || msg->type != VHOST_IOTLB_MISS) {
1809 kfree(node);
1810 return ret;
1811 }
1812 vhost_enqueue_msg(dev, &dev->pending_list, node);
1813 }
1814
1815 return ret;
1816 }
1817 EXPORT_SYMBOL_GPL(vhost_chr_read_iter);
1818
vhost_iotlb_miss(struct vhost_virtqueue * vq,u64 iova,int access)1819 static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
1820 {
1821 struct vhost_dev *dev = vq->dev;
1822 struct vhost_msg_node *node;
1823 struct vhost_iotlb_msg *msg;
1824 bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2);
1825
1826 node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG);
1827 if (!node)
1828 return -ENOMEM;
1829
1830 if (v2) {
1831 node->msg_v2.type = VHOST_IOTLB_MSG_V2;
1832 msg = &node->msg_v2.iotlb;
1833 } else {
1834 msg = &node->msg.iotlb;
1835 }
1836
1837 msg->type = VHOST_IOTLB_MISS;
1838 msg->iova = iova;
1839 msg->perm = access;
1840
1841 vhost_enqueue_msg(dev, &dev->read_list, node);
1842
1843 return 0;
1844 }
1845
vq_access_ok(struct vhost_virtqueue * vq,unsigned int num,vring_desc_t __user * desc,vring_avail_t __user * avail,vring_used_t __user * used)1846 static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
1847 vring_desc_t __user *desc,
1848 vring_avail_t __user *avail,
1849 vring_used_t __user *used)
1850
1851 {
1852 /* If an IOTLB device is present, the vring addresses are
1853 * GIOVAs. Access validation occurs at prefetch time. */
1854 if (vq->iotlb)
1855 return true;
1856
1857 return access_ok(desc, vhost_get_desc_size(vq, num)) &&
1858 access_ok(avail, vhost_get_avail_size(vq, num)) &&
1859 access_ok(used, vhost_get_used_size(vq, num));
1860 }
1861
vhost_vq_meta_update(struct vhost_virtqueue * vq,const struct vhost_iotlb_map * map,int type)1862 static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
1863 const struct vhost_iotlb_map *map,
1864 int type)
1865 {
1866 int access = (type == VHOST_ADDR_USED) ?
1867 VHOST_ACCESS_WO : VHOST_ACCESS_RO;
1868
1869 if (likely(map->perm & access))
1870 vq->meta_iotlb[type] = map;
1871 }
1872
iotlb_access_ok(struct vhost_virtqueue * vq,int access,u64 addr,u64 len,int type)1873 static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1874 int access, u64 addr, u64 len, int type)
1875 {
1876 const struct vhost_iotlb_map *map;
1877 struct vhost_iotlb *umem = vq->iotlb;
1878 u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
1879
1880 if (vhost_vq_meta_fetch(vq, addr, len, type))
1881 return true;
1882
1883 while (len > s) {
1884 map = vhost_iotlb_itree_first(umem, addr, last);
1885 if (map == NULL || map->start > addr) {
1886 vhost_iotlb_miss(vq, addr, access);
1887 return false;
1888 } else if (!(map->perm & access)) {
1889 /* Report the possible access violation by
1890 * request another translation from userspace.
1891 */
1892 return false;
1893 }
1894
1895 size = map->size - addr + map->start;
1896
1897 if (orig_addr == addr && size >= len)
1898 vhost_vq_meta_update(vq, map, type);
1899
1900 s += size;
1901 addr += size;
1902 }
1903
1904 return true;
1905 }
1906
vq_meta_prefetch(struct vhost_virtqueue * vq)1907 int vq_meta_prefetch(struct vhost_virtqueue *vq)
1908 {
1909 unsigned int num = vq->num;
1910
1911 if (!vq->iotlb)
1912 return 1;
1913
1914 return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
1915 vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
1916 iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
1917 vhost_get_avail_size(vq, num),
1918 VHOST_ADDR_AVAIL) &&
1919 iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
1920 vhost_get_used_size(vq, num), VHOST_ADDR_USED);
1921 }
1922 EXPORT_SYMBOL_GPL(vq_meta_prefetch);
1923
1924 /* Can we log writes? */
1925 /* Caller should have device mutex but not vq mutex */
vhost_log_access_ok(struct vhost_dev * dev)1926 bool vhost_log_access_ok(struct vhost_dev *dev)
1927 {
1928 return memory_access_ok(dev, dev->umem, 1);
1929 }
1930 EXPORT_SYMBOL_GPL(vhost_log_access_ok);
1931
vq_log_used_access_ok(struct vhost_virtqueue * vq,void __user * log_base,bool log_used,u64 log_addr)1932 static bool vq_log_used_access_ok(struct vhost_virtqueue *vq,
1933 void __user *log_base,
1934 bool log_used,
1935 u64 log_addr)
1936 {
1937 /* If an IOTLB device is present, log_addr is a GIOVA that
1938 * will never be logged by log_used(). */
1939 if (vq->iotlb)
1940 return true;
1941
1942 return !log_used || log_access_ok(log_base, log_addr,
1943 vhost_get_used_size(vq, vq->num));
1944 }
1945
1946 /* Verify access for write logging. */
1947 /* Caller should have vq mutex and device mutex */
vq_log_access_ok(struct vhost_virtqueue * vq,void __user * log_base)1948 static bool vq_log_access_ok(struct vhost_virtqueue *vq,
1949 void __user *log_base)
1950 {
1951 return vq_memory_access_ok(log_base, vq->umem,
1952 vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
1953 vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr);
1954 }
1955
1956 /* Can we start vq? */
1957 /* Caller should have vq mutex and device mutex */
vhost_vq_access_ok(struct vhost_virtqueue * vq)1958 bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
1959 {
1960 if (!vq_log_access_ok(vq, vq->log_base))
1961 return false;
1962
1963 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
1964 }
1965 EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
1966
vhost_set_memory(struct vhost_dev * d,struct vhost_memory __user * m)1967 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
1968 {
1969 struct vhost_memory mem, *newmem;
1970 struct vhost_memory_region *region;
1971 struct vhost_iotlb *newumem, *oldumem;
1972 unsigned long size = offsetof(struct vhost_memory, regions);
1973 int i;
1974
1975 if (copy_from_user(&mem, m, size))
1976 return -EFAULT;
1977 if (mem.padding)
1978 return -EOPNOTSUPP;
1979 if (mem.nregions > max_mem_regions)
1980 return -E2BIG;
1981 newmem = kvzalloc_flex(*newmem, regions, mem.nregions);
1982 if (!newmem)
1983 return -ENOMEM;
1984
1985 memcpy(newmem, &mem, size);
1986 if (copy_from_user(newmem->regions, m->regions,
1987 flex_array_size(newmem, regions, mem.nregions))) {
1988 kvfree(newmem);
1989 return -EFAULT;
1990 }
1991
1992 newumem = iotlb_alloc();
1993 if (!newumem) {
1994 kvfree(newmem);
1995 return -ENOMEM;
1996 }
1997
1998 for (region = newmem->regions;
1999 region < newmem->regions + mem.nregions;
2000 region++) {
2001 if (vhost_iotlb_add_range(newumem,
2002 region->guest_phys_addr,
2003 region->guest_phys_addr +
2004 region->memory_size - 1,
2005 region->userspace_addr,
2006 VHOST_MAP_RW))
2007 goto err;
2008 }
2009
2010 if (!memory_access_ok(d, newumem, 0))
2011 goto err;
2012
2013 oldumem = d->umem;
2014 d->umem = newumem;
2015
2016 /* All memory accesses are done under some VQ mutex. */
2017 for (i = 0; i < d->nvqs; ++i) {
2018 mutex_lock(&d->vqs[i]->mutex);
2019 d->vqs[i]->umem = newumem;
2020 mutex_unlock(&d->vqs[i]->mutex);
2021 }
2022
2023 kvfree(newmem);
2024 vhost_iotlb_free(oldumem);
2025 return 0;
2026
2027 err:
2028 vhost_iotlb_free(newumem);
2029 kvfree(newmem);
2030 return -EFAULT;
2031 }
2032
vhost_vring_set_num(struct vhost_dev * d,struct vhost_virtqueue * vq,void __user * argp)2033 static long vhost_vring_set_num(struct vhost_dev *d,
2034 struct vhost_virtqueue *vq,
2035 void __user *argp)
2036 {
2037 struct vhost_vring_state s;
2038
2039 /* Resizing ring with an active backend?
2040 * You don't want to do that. */
2041 if (vq->private_data)
2042 return -EBUSY;
2043
2044 if (copy_from_user(&s, argp, sizeof s))
2045 return -EFAULT;
2046
2047 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1)))
2048 return -EINVAL;
2049 vq->num = s.num;
2050
2051 return 0;
2052 }
2053
vhost_vring_set_addr(struct vhost_dev * d,struct vhost_virtqueue * vq,void __user * argp)2054 static long vhost_vring_set_addr(struct vhost_dev *d,
2055 struct vhost_virtqueue *vq,
2056 void __user *argp)
2057 {
2058 struct vhost_vring_addr a;
2059
2060 if (copy_from_user(&a, argp, sizeof a))
2061 return -EFAULT;
2062 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG))
2063 return -EOPNOTSUPP;
2064
2065 /* For 32bit, verify that the top 32bits of the user
2066 data are set to zero. */
2067 if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
2068 (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
2069 (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr)
2070 return -EFAULT;
2071
2072 /* Make sure it's safe to cast pointers to vring types. */
2073 BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
2074 BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
2075 if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
2076 (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
2077 (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1)))
2078 return -EINVAL;
2079
2080 /* We only verify access here if backend is configured.
2081 * If it is not, we don't as size might not have been setup.
2082 * We will verify when backend is configured. */
2083 if (vq->private_data) {
2084 if (!vq_access_ok(vq, vq->num,
2085 (void __user *)(unsigned long)a.desc_user_addr,
2086 (void __user *)(unsigned long)a.avail_user_addr,
2087 (void __user *)(unsigned long)a.used_user_addr))
2088 return -EINVAL;
2089
2090 /* Also validate log access for used ring if enabled. */
2091 if (!vq_log_used_access_ok(vq, vq->log_base,
2092 a.flags & (0x1 << VHOST_VRING_F_LOG),
2093 a.log_guest_addr))
2094 return -EINVAL;
2095 }
2096
2097 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
2098 vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
2099 vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
2100 vq->log_addr = a.log_guest_addr;
2101 vq->used = (void __user *)(unsigned long)a.used_user_addr;
2102
2103 return 0;
2104 }
2105
vhost_vring_set_num_addr(struct vhost_dev * d,struct vhost_virtqueue * vq,unsigned int ioctl,void __user * argp)2106 static long vhost_vring_set_num_addr(struct vhost_dev *d,
2107 struct vhost_virtqueue *vq,
2108 unsigned int ioctl,
2109 void __user *argp)
2110 {
2111 long r;
2112
2113 mutex_lock(&vq->mutex);
2114
2115 switch (ioctl) {
2116 case VHOST_SET_VRING_NUM:
2117 r = vhost_vring_set_num(d, vq, argp);
2118 break;
2119 case VHOST_SET_VRING_ADDR:
2120 r = vhost_vring_set_addr(d, vq, argp);
2121 break;
2122 default:
2123 BUG();
2124 }
2125
2126 mutex_unlock(&vq->mutex);
2127
2128 return r;
2129 }
vhost_vring_ioctl(struct vhost_dev * d,unsigned int ioctl,void __user * argp)2130 long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
2131 {
2132 struct file *eventfp, *filep = NULL;
2133 bool pollstart = false, pollstop = false;
2134 struct eventfd_ctx *ctx = NULL;
2135 struct vhost_virtqueue *vq;
2136 struct vhost_vring_state s;
2137 struct vhost_vring_file f;
2138 u32 idx;
2139 long r;
2140
2141 r = vhost_get_vq_from_user(d, argp, &vq, &idx);
2142 if (r < 0)
2143 return r;
2144
2145 if (ioctl == VHOST_SET_VRING_NUM ||
2146 ioctl == VHOST_SET_VRING_ADDR) {
2147 return vhost_vring_set_num_addr(d, vq, ioctl, argp);
2148 }
2149
2150 mutex_lock(&vq->mutex);
2151
2152 switch (ioctl) {
2153 case VHOST_SET_VRING_BASE:
2154 /* Moving base with an active backend?
2155 * You don't want to do that. */
2156 if (vq->private_data) {
2157 r = -EBUSY;
2158 break;
2159 }
2160 if (copy_from_user(&s, argp, sizeof s)) {
2161 r = -EFAULT;
2162 break;
2163 }
2164 if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
2165 vq->next_avail_head = vq->last_avail_idx =
2166 s.num & 0xffff;
2167 vq->last_used_idx = (s.num >> 16) & 0xffff;
2168 } else {
2169 if (s.num > 0xffff) {
2170 r = -EINVAL;
2171 break;
2172 }
2173 vq->next_avail_head = vq->last_avail_idx = s.num;
2174 }
2175 /* Forget the cached index value. */
2176 vq->avail_idx = vq->last_avail_idx;
2177 break;
2178 case VHOST_GET_VRING_BASE:
2179 s.index = idx;
2180 if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
2181 s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16);
2182 else
2183 s.num = vq->last_avail_idx;
2184 if (copy_to_user(argp, &s, sizeof s))
2185 r = -EFAULT;
2186 break;
2187 case VHOST_SET_VRING_KICK:
2188 if (copy_from_user(&f, argp, sizeof f)) {
2189 r = -EFAULT;
2190 break;
2191 }
2192 eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd);
2193 if (IS_ERR(eventfp)) {
2194 r = PTR_ERR(eventfp);
2195 break;
2196 }
2197 if (eventfp != vq->kick) {
2198 pollstop = (filep = vq->kick) != NULL;
2199 pollstart = (vq->kick = eventfp) != NULL;
2200 } else
2201 filep = eventfp;
2202 break;
2203 case VHOST_SET_VRING_CALL:
2204 if (copy_from_user(&f, argp, sizeof f)) {
2205 r = -EFAULT;
2206 break;
2207 }
2208 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
2209 if (IS_ERR(ctx)) {
2210 r = PTR_ERR(ctx);
2211 break;
2212 }
2213
2214 swap(ctx, vq->call_ctx.ctx);
2215 break;
2216 case VHOST_SET_VRING_ERR:
2217 if (copy_from_user(&f, argp, sizeof f)) {
2218 r = -EFAULT;
2219 break;
2220 }
2221 ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
2222 if (IS_ERR(ctx)) {
2223 r = PTR_ERR(ctx);
2224 break;
2225 }
2226 swap(ctx, vq->error_ctx);
2227 break;
2228 case VHOST_SET_VRING_ENDIAN:
2229 r = vhost_set_vring_endian(vq, argp);
2230 break;
2231 case VHOST_GET_VRING_ENDIAN:
2232 r = vhost_get_vring_endian(vq, idx, argp);
2233 break;
2234 case VHOST_SET_VRING_BUSYLOOP_TIMEOUT:
2235 if (copy_from_user(&s, argp, sizeof(s))) {
2236 r = -EFAULT;
2237 break;
2238 }
2239 vq->busyloop_timeout = s.num;
2240 break;
2241 case VHOST_GET_VRING_BUSYLOOP_TIMEOUT:
2242 s.index = idx;
2243 s.num = vq->busyloop_timeout;
2244 if (copy_to_user(argp, &s, sizeof(s)))
2245 r = -EFAULT;
2246 break;
2247 default:
2248 r = -ENOIOCTLCMD;
2249 }
2250
2251 if (pollstop && vq->handle_kick)
2252 vhost_poll_stop(&vq->poll);
2253
2254 if (!IS_ERR_OR_NULL(ctx))
2255 eventfd_ctx_put(ctx);
2256 if (filep)
2257 fput(filep);
2258
2259 if (pollstart && vq->handle_kick)
2260 r = vhost_poll_start(&vq->poll, vq->kick);
2261
2262 mutex_unlock(&vq->mutex);
2263
2264 if (pollstop && vq->handle_kick)
2265 vhost_dev_flush(vq->poll.dev);
2266 return r;
2267 }
2268 EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
2269
vhost_init_device_iotlb(struct vhost_dev * d)2270 int vhost_init_device_iotlb(struct vhost_dev *d)
2271 {
2272 struct vhost_iotlb *niotlb, *oiotlb;
2273 int i;
2274
2275 niotlb = iotlb_alloc();
2276 if (!niotlb)
2277 return -ENOMEM;
2278
2279 oiotlb = d->iotlb;
2280 d->iotlb = niotlb;
2281
2282 for (i = 0; i < d->nvqs; ++i) {
2283 struct vhost_virtqueue *vq = d->vqs[i];
2284
2285 mutex_lock(&vq->mutex);
2286 vq->iotlb = niotlb;
2287 __vhost_vq_meta_reset(vq);
2288 mutex_unlock(&vq->mutex);
2289 }
2290
2291 vhost_iotlb_free(oiotlb);
2292
2293 return 0;
2294 }
2295 EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
2296
2297 /* Caller must have device mutex */
vhost_dev_ioctl(struct vhost_dev * d,unsigned int ioctl,void __user * argp)2298 long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
2299 {
2300 struct eventfd_ctx *ctx;
2301 u64 p;
2302 long r;
2303 int i, fd;
2304
2305 /* If you are not the owner, you can become one */
2306 if (ioctl == VHOST_SET_OWNER) {
2307 r = vhost_dev_set_owner(d);
2308 goto done;
2309 }
2310
2311 #ifdef CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL
2312 if (ioctl == VHOST_SET_FORK_FROM_OWNER) {
2313 /* Only allow modification before owner is set */
2314 if (vhost_dev_has_owner(d)) {
2315 r = -EBUSY;
2316 goto done;
2317 }
2318 u8 fork_owner_val;
2319
2320 if (get_user(fork_owner_val, (u8 __user *)argp)) {
2321 r = -EFAULT;
2322 goto done;
2323 }
2324 if (fork_owner_val != VHOST_FORK_OWNER_TASK &&
2325 fork_owner_val != VHOST_FORK_OWNER_KTHREAD) {
2326 r = -EINVAL;
2327 goto done;
2328 }
2329 d->fork_owner = !!fork_owner_val;
2330 r = 0;
2331 goto done;
2332 }
2333 if (ioctl == VHOST_GET_FORK_FROM_OWNER) {
2334 u8 fork_owner_val = d->fork_owner;
2335
2336 if (fork_owner_val != VHOST_FORK_OWNER_TASK &&
2337 fork_owner_val != VHOST_FORK_OWNER_KTHREAD) {
2338 r = -EINVAL;
2339 goto done;
2340 }
2341 if (put_user(fork_owner_val, (u8 __user *)argp)) {
2342 r = -EFAULT;
2343 goto done;
2344 }
2345 r = 0;
2346 goto done;
2347 }
2348 #endif
2349
2350 /* You must be the owner to do anything else */
2351 r = vhost_dev_check_owner(d);
2352 if (r)
2353 goto done;
2354
2355 switch (ioctl) {
2356 case VHOST_SET_MEM_TABLE:
2357 r = vhost_set_memory(d, argp);
2358 break;
2359 case VHOST_SET_LOG_BASE:
2360 if (copy_from_user(&p, argp, sizeof p)) {
2361 r = -EFAULT;
2362 break;
2363 }
2364 if ((u64)(unsigned long)p != p) {
2365 r = -EFAULT;
2366 break;
2367 }
2368 for (i = 0; i < d->nvqs; ++i) {
2369 struct vhost_virtqueue *vq;
2370 void __user *base = (void __user *)(unsigned long)p;
2371 vq = d->vqs[i];
2372 mutex_lock(&vq->mutex);
2373 /* If ring is inactive, will check when it's enabled. */
2374 if (vq->private_data && !vq_log_access_ok(vq, base))
2375 r = -EFAULT;
2376 else
2377 vq->log_base = base;
2378 mutex_unlock(&vq->mutex);
2379 }
2380 break;
2381 case VHOST_SET_LOG_FD:
2382 r = get_user(fd, (int __user *)argp);
2383 if (r < 0)
2384 break;
2385 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
2386 if (IS_ERR(ctx)) {
2387 r = PTR_ERR(ctx);
2388 break;
2389 }
2390 swap(ctx, d->log_ctx);
2391 for (i = 0; i < d->nvqs; ++i) {
2392 mutex_lock(&d->vqs[i]->mutex);
2393 d->vqs[i]->log_ctx = d->log_ctx;
2394 mutex_unlock(&d->vqs[i]->mutex);
2395 }
2396 if (ctx)
2397 eventfd_ctx_put(ctx);
2398 break;
2399 default:
2400 r = -ENOIOCTLCMD;
2401 break;
2402 }
2403 done:
2404 return r;
2405 }
2406 EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
2407
2408 /* TODO: This is really inefficient. We need something like get_user()
2409 * (instruction directly accesses the data, with an exception table entry
2410 * returning -EFAULT). See Documentation/arch/x86/exception-tables.rst.
2411 */
set_bit_to_user(int nr,void __user * addr)2412 static int set_bit_to_user(int nr, void __user *addr)
2413 {
2414 unsigned long log = (unsigned long)addr;
2415 struct page *page;
2416 void *base;
2417 int bit = nr + (log % PAGE_SIZE) * 8;
2418 int r;
2419
2420 r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page);
2421 if (r < 0)
2422 return r;
2423 BUG_ON(r != 1);
2424 base = kmap_atomic(page);
2425 set_bit(bit, base);
2426 kunmap_atomic(base);
2427 unpin_user_pages_dirty_lock(&page, 1, true);
2428 return 0;
2429 }
2430
log_write(void __user * log_base,u64 write_address,u64 write_length)2431 static int log_write(void __user *log_base,
2432 u64 write_address, u64 write_length)
2433 {
2434 u64 write_page = write_address / VHOST_PAGE_SIZE;
2435 int r;
2436
2437 if (!write_length)
2438 return 0;
2439 write_length += write_address % VHOST_PAGE_SIZE;
2440 for (;;) {
2441 u64 base = (u64)(unsigned long)log_base;
2442 u64 log = base + write_page / 8;
2443 int bit = write_page % 8;
2444 if ((u64)(unsigned long)log != log)
2445 return -EFAULT;
2446 r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
2447 if (r < 0)
2448 return r;
2449 if (write_length <= VHOST_PAGE_SIZE)
2450 break;
2451 write_length -= VHOST_PAGE_SIZE;
2452 write_page += 1;
2453 }
2454 return r;
2455 }
2456
log_write_hva(struct vhost_virtqueue * vq,u64 hva,u64 len)2457 static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
2458 {
2459 struct vhost_iotlb *umem = vq->umem;
2460 struct vhost_iotlb_map *u;
2461 u64 start, end, l, min;
2462 int r;
2463 bool hit = false;
2464
2465 while (len) {
2466 min = len;
2467 /* More than one GPAs can be mapped into a single HVA. So
2468 * iterate all possible umems here to be safe.
2469 */
2470 list_for_each_entry(u, &umem->list, link) {
2471 if (u->addr > hva - 1 + len ||
2472 u->addr - 1 + u->size < hva)
2473 continue;
2474 start = max(u->addr, hva);
2475 end = min(u->addr - 1 + u->size, hva - 1 + len);
2476 l = end - start + 1;
2477 r = log_write(vq->log_base,
2478 u->start + start - u->addr,
2479 l);
2480 if (r < 0)
2481 return r;
2482 hit = true;
2483 min = min(l, min);
2484 }
2485
2486 if (!hit)
2487 return -EFAULT;
2488
2489 len -= min;
2490 hva += min;
2491 }
2492
2493 return 0;
2494 }
2495
log_used(struct vhost_virtqueue * vq,u64 used_offset,u64 len)2496 static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
2497 {
2498 struct iovec *iov = vq->log_iov;
2499 int i, ret;
2500
2501 if (!vq->iotlb)
2502 return log_write(vq->log_base, vq->log_addr + used_offset, len);
2503
2504 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
2505 len, iov, 64, VHOST_ACCESS_WO);
2506 if (ret < 0)
2507 return ret;
2508
2509 for (i = 0; i < ret; i++) {
2510 ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
2511 iov[i].iov_len);
2512 if (ret)
2513 return ret;
2514 }
2515
2516 return 0;
2517 }
2518
2519 /*
2520 * vhost_log_write() - Log in dirty page bitmap
2521 * @vq: vhost virtqueue.
2522 * @log: Array of dirty memory in GPA.
2523 * @log_num: Size of vhost_log arrary.
2524 * @len: The total length of memory buffer to log in the dirty bitmap.
2525 * Some drivers may only partially use pages shared via the last
2526 * vring descriptor (i.e. vhost-net RX buffer).
2527 * Use (len == U64_MAX) to indicate the driver would log all
2528 * pages of vring descriptors.
2529 * @iov: Array of dirty memory in HVA.
2530 * @count: Size of iovec array.
2531 */
vhost_log_write(struct vhost_virtqueue * vq,struct vhost_log * log,unsigned int log_num,u64 len,struct iovec * iov,int count)2532 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
2533 unsigned int log_num, u64 len, struct iovec *iov, int count)
2534 {
2535 int i, r;
2536
2537 /* Make sure data written is seen before log. */
2538 smp_wmb();
2539
2540 if (vq->iotlb) {
2541 for (i = 0; i < count; i++) {
2542 r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
2543 iov[i].iov_len);
2544 if (r < 0)
2545 return r;
2546 }
2547 return 0;
2548 }
2549
2550 for (i = 0; i < log_num; ++i) {
2551 u64 l = min(log[i].len, len);
2552 r = log_write(vq->log_base, log[i].addr, l);
2553 if (r < 0)
2554 return r;
2555
2556 if (len != U64_MAX)
2557 len -= l;
2558 }
2559
2560 if (vq->log_ctx)
2561 eventfd_signal(vq->log_ctx);
2562
2563 return 0;
2564 }
2565 EXPORT_SYMBOL_GPL(vhost_log_write);
2566
vhost_update_used_flags(struct vhost_virtqueue * vq)2567 static int vhost_update_used_flags(struct vhost_virtqueue *vq)
2568 {
2569 void __user *used;
2570 if (vhost_put_used_flags(vq))
2571 return -EFAULT;
2572 if (unlikely(vq->log_used)) {
2573 /* Make sure the flag is seen before log. */
2574 smp_wmb();
2575 /* Log used flag write. */
2576 used = &vq->used->flags;
2577 log_used(vq, (used - (void __user *)vq->used),
2578 sizeof vq->used->flags);
2579 if (vq->log_ctx)
2580 eventfd_signal(vq->log_ctx);
2581 }
2582 return 0;
2583 }
2584
vhost_update_avail_event(struct vhost_virtqueue * vq)2585 static int vhost_update_avail_event(struct vhost_virtqueue *vq)
2586 {
2587 if (vhost_put_avail_event(vq))
2588 return -EFAULT;
2589 if (unlikely(vq->log_used)) {
2590 void __user *used;
2591 /* Make sure the event is seen before log. */
2592 smp_wmb();
2593 /* Log avail event write */
2594 used = vhost_avail_event(vq);
2595 log_used(vq, (used - (void __user *)vq->used),
2596 sizeof *vhost_avail_event(vq));
2597 if (vq->log_ctx)
2598 eventfd_signal(vq->log_ctx);
2599 }
2600 return 0;
2601 }
2602
vhost_vq_init_access(struct vhost_virtqueue * vq)2603 int vhost_vq_init_access(struct vhost_virtqueue *vq)
2604 {
2605 __virtio16 last_used_idx;
2606 int r;
2607 bool is_le = vq->is_le;
2608
2609 if (!vq->private_data)
2610 return 0;
2611
2612 vhost_init_is_le(vq);
2613
2614 r = vhost_update_used_flags(vq);
2615 if (r)
2616 goto err;
2617 vq->signalled_used_valid = false;
2618 if (!vq->iotlb &&
2619 !access_ok(&vq->used->idx, sizeof vq->used->idx)) {
2620 r = -EFAULT;
2621 goto err;
2622 }
2623 r = vhost_get_used_idx(vq, &last_used_idx);
2624 if (r) {
2625 vq_err(vq, "Can't access used idx at %p\n",
2626 &vq->used->idx);
2627 goto err;
2628 }
2629 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
2630 return 0;
2631
2632 err:
2633 vq->is_le = is_le;
2634 return r;
2635 }
2636 EXPORT_SYMBOL_GPL(vhost_vq_init_access);
2637
translate_desc(struct vhost_virtqueue * vq,u64 addr,u32 len,struct iovec iov[],int iov_size,int access)2638 static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
2639 struct iovec iov[], int iov_size, int access)
2640 {
2641 const struct vhost_iotlb_map *map;
2642 struct vhost_dev *dev = vq->dev;
2643 struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
2644 struct iovec *_iov;
2645 u64 s = 0, last = addr + len - 1;
2646 int ret = 0;
2647
2648 while ((u64)len > s) {
2649 u64 size;
2650 if (unlikely(ret >= iov_size)) {
2651 ret = -ENOBUFS;
2652 break;
2653 }
2654
2655 map = vhost_iotlb_itree_first(umem, addr, last);
2656 if (map == NULL || map->start > addr) {
2657 if (umem != dev->iotlb) {
2658 ret = -EFAULT;
2659 break;
2660 }
2661 ret = -EAGAIN;
2662 break;
2663 } else if (!(map->perm & access)) {
2664 ret = -EPERM;
2665 break;
2666 }
2667
2668 _iov = iov + ret;
2669 size = map->size - addr + map->start;
2670 _iov->iov_len = min((u64)len - s, size);
2671 _iov->iov_base = (void __user *)(unsigned long)
2672 (map->addr + addr - map->start);
2673 s += size;
2674 addr += size;
2675 ++ret;
2676 }
2677
2678 if (ret == -EAGAIN)
2679 vhost_iotlb_miss(vq, addr, access);
2680 return ret;
2681 }
2682
2683 /* Each buffer in the virtqueues is actually a chain of descriptors. This
2684 * function returns the next descriptor in the chain,
2685 * or -1U if we're at the end. */
next_desc(struct vhost_virtqueue * vq,struct vring_desc * desc)2686 static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
2687 {
2688 unsigned int next;
2689
2690 /* If this descriptor says it doesn't chain, we're done. */
2691 if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
2692 return -1U;
2693
2694 /* Check they're not leading us off end of descriptors. */
2695 next = vhost16_to_cpu(vq, READ_ONCE(desc->next));
2696 return next;
2697 }
2698
get_indirect(struct vhost_virtqueue * vq,struct iovec iov[],unsigned int iov_size,unsigned int * out_num,unsigned int * in_num,struct vhost_log * log,unsigned int * log_num,struct vring_desc * indirect)2699 static int get_indirect(struct vhost_virtqueue *vq,
2700 struct iovec iov[], unsigned int iov_size,
2701 unsigned int *out_num, unsigned int *in_num,
2702 struct vhost_log *log, unsigned int *log_num,
2703 struct vring_desc *indirect)
2704 {
2705 struct vring_desc desc;
2706 unsigned int i = 0, count, found = 0;
2707 u32 len = vhost32_to_cpu(vq, indirect->len);
2708 struct iov_iter from;
2709 int ret, access;
2710
2711 /* Sanity check */
2712 if (unlikely(len % sizeof desc)) {
2713 vq_err(vq, "Invalid length in indirect descriptor: "
2714 "len 0x%llx not multiple of 0x%zx\n",
2715 (unsigned long long)len,
2716 sizeof desc);
2717 return -EINVAL;
2718 }
2719
2720 ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
2721 UIO_MAXIOV, VHOST_ACCESS_RO);
2722 if (unlikely(ret < 0)) {
2723 if (ret != -EAGAIN)
2724 vq_err(vq, "Translation failure %d in indirect.\n", ret);
2725 return ret;
2726 }
2727 iov_iter_init(&from, ITER_SOURCE, vq->indirect, ret, len);
2728 count = len / sizeof desc;
2729 /* Buffers are chained via a 16 bit next field, so
2730 * we can have at most 2^16 of these. */
2731 if (unlikely(count > USHRT_MAX + 1)) {
2732 vq_err(vq, "Indirect buffer length too big: %d\n",
2733 indirect->len);
2734 return -E2BIG;
2735 }
2736
2737 do {
2738 unsigned iov_count = *in_num + *out_num;
2739 if (unlikely(++found > count)) {
2740 vq_err(vq, "Loop detected: last one at %u "
2741 "indirect size %u\n",
2742 i, count);
2743 return -EINVAL;
2744 }
2745 if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) {
2746 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
2747 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2748 return -EINVAL;
2749 }
2750 if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
2751 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
2752 i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
2753 return -EINVAL;
2754 }
2755
2756 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2757 access = VHOST_ACCESS_WO;
2758 else
2759 access = VHOST_ACCESS_RO;
2760
2761 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2762 vhost32_to_cpu(vq, desc.len), iov + iov_count,
2763 iov_size - iov_count, access);
2764 if (unlikely(ret < 0)) {
2765 if (ret != -EAGAIN)
2766 vq_err(vq, "Translation failure %d indirect idx %d\n",
2767 ret, i);
2768 return ret;
2769 }
2770 /* If this is an input descriptor, increment that count. */
2771 if (access == VHOST_ACCESS_WO) {
2772 *in_num += ret;
2773 if (unlikely(log && ret)) {
2774 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2775 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2776 ++*log_num;
2777 }
2778 } else {
2779 /* If it's an output descriptor, they're all supposed
2780 * to come before any input descriptors. */
2781 if (unlikely(*in_num)) {
2782 vq_err(vq, "Indirect descriptor "
2783 "has out after in: idx %d\n", i);
2784 return -EINVAL;
2785 }
2786 *out_num += ret;
2787 }
2788 } while ((i = next_desc(vq, &desc)) != -1);
2789 return 0;
2790 }
2791
2792 /**
2793 * vhost_get_vq_desc_n - Fetch the next available descriptor chain and build iovecs
2794 * @vq: target virtqueue
2795 * @iov: array that receives the scatter/gather segments
2796 * @iov_size: capacity of @iov in elements
2797 * @out_num: the number of output segments
2798 * @in_num: the number of input segments
2799 * @log: optional array to record addr/len for each writable segment; NULL if unused
2800 * @log_num: optional output; number of entries written to @log when provided
2801 * @ndesc: optional output; number of descriptors consumed from the available ring
2802 * (useful for rollback via vhost_discard_vq_desc)
2803 *
2804 * Extracts one available descriptor chain from @vq and translates guest addresses
2805 * into host iovecs.
2806 *
2807 * On success, advances @vq->last_avail_idx by 1 and @vq->next_avail_head by the
2808 * number of descriptors consumed (also stored via @ndesc when non-NULL).
2809 *
2810 * Return:
2811 * - head index in [0, @vq->num) on success;
2812 * - @vq->num if no descriptor is currently available;
2813 * - negative errno on failure
2814 */
vhost_get_vq_desc_n(struct vhost_virtqueue * vq,struct iovec iov[],unsigned int iov_size,unsigned int * out_num,unsigned int * in_num,struct vhost_log * log,unsigned int * log_num,unsigned int * ndesc)2815 int vhost_get_vq_desc_n(struct vhost_virtqueue *vq,
2816 struct iovec iov[], unsigned int iov_size,
2817 unsigned int *out_num, unsigned int *in_num,
2818 struct vhost_log *log, unsigned int *log_num,
2819 unsigned int *ndesc)
2820 {
2821 bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
2822 struct vring_desc desc;
2823 unsigned int i, head, found = 0;
2824 u16 last_avail_idx = vq->last_avail_idx;
2825 __virtio16 ring_head;
2826 int ret, access, c = 0;
2827
2828 if (vq->avail_idx == vq->last_avail_idx) {
2829 ret = vhost_get_avail_idx(vq);
2830 if (unlikely(ret < 0))
2831 return ret;
2832
2833 if (!ret)
2834 return vq->num;
2835 }
2836
2837 if (in_order)
2838 head = vq->next_avail_head & (vq->num - 1);
2839 else {
2840 /* Grab the next descriptor number they're
2841 * advertising, and increment the index we've seen. */
2842 if (unlikely(vhost_get_avail_head(vq, &ring_head,
2843 last_avail_idx))) {
2844 vq_err(vq, "Failed to read head: idx %d address %p\n",
2845 last_avail_idx,
2846 &vq->avail->ring[last_avail_idx % vq->num]);
2847 return -EFAULT;
2848 }
2849 head = vhost16_to_cpu(vq, ring_head);
2850 }
2851
2852 /* If their number is silly, that's an error. */
2853 if (unlikely(head >= vq->num)) {
2854 vq_err(vq, "Guest says index %u > %u is available",
2855 head, vq->num);
2856 return -EINVAL;
2857 }
2858
2859 /* When we start there are none of either input nor output. */
2860 *out_num = *in_num = 0;
2861 if (unlikely(log))
2862 *log_num = 0;
2863
2864 i = head;
2865 do {
2866 unsigned iov_count = *in_num + *out_num;
2867 if (unlikely(i >= vq->num)) {
2868 vq_err(vq, "Desc index is %u > %u, head = %u",
2869 i, vq->num, head);
2870 return -EINVAL;
2871 }
2872 if (unlikely(++found > vq->num)) {
2873 vq_err(vq, "Loop detected: last one at %u "
2874 "vq size %u head %u\n",
2875 i, vq->num, head);
2876 return -EINVAL;
2877 }
2878 ret = vhost_get_desc(vq, &desc, i);
2879 if (unlikely(ret)) {
2880 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
2881 i, vq->desc + i);
2882 return -EFAULT;
2883 }
2884 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
2885 ret = get_indirect(vq, iov, iov_size,
2886 out_num, in_num,
2887 log, log_num, &desc);
2888 if (unlikely(ret < 0)) {
2889 if (ret != -EAGAIN)
2890 vq_err(vq, "Failure detected "
2891 "in indirect descriptor at idx %d\n", i);
2892 return ret;
2893 }
2894 ++c;
2895 continue;
2896 }
2897
2898 if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE))
2899 access = VHOST_ACCESS_WO;
2900 else
2901 access = VHOST_ACCESS_RO;
2902 ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
2903 vhost32_to_cpu(vq, desc.len), iov + iov_count,
2904 iov_size - iov_count, access);
2905 if (unlikely(ret < 0)) {
2906 if (ret != -EAGAIN)
2907 vq_err(vq, "Translation failure %d descriptor idx %d\n",
2908 ret, i);
2909 return ret;
2910 }
2911 if (access == VHOST_ACCESS_WO) {
2912 /* If this is an input descriptor,
2913 * increment that count. */
2914 *in_num += ret;
2915 if (unlikely(log && ret)) {
2916 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2917 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2918 ++*log_num;
2919 }
2920 } else {
2921 /* If it's an output descriptor, they're all supposed
2922 * to come before any input descriptors. */
2923 if (unlikely(*in_num)) {
2924 vq_err(vq, "Descriptor has out after in: "
2925 "idx %d\n", i);
2926 return -EINVAL;
2927 }
2928 *out_num += ret;
2929 }
2930 ++c;
2931 } while ((i = next_desc(vq, &desc)) != -1);
2932
2933 /* On success, increment avail index. */
2934 vq->last_avail_idx++;
2935 vq->next_avail_head += c;
2936
2937 if (ndesc)
2938 *ndesc = c;
2939
2940 /* Assume notifications from guest are disabled at this point,
2941 * if they aren't we would need to update avail_event index. */
2942 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
2943 return head;
2944 }
2945 EXPORT_SYMBOL_GPL(vhost_get_vq_desc_n);
2946
2947 /* This looks in the virtqueue and for the first available buffer, and converts
2948 * it to an iovec for convenient access. Since descriptors consist of some
2949 * number of output then some number of input descriptors, it's actually two
2950 * iovecs, but we pack them into one and note how many of each there were.
2951 *
2952 * This function returns the descriptor number found, or vq->num (which is
2953 * never a valid descriptor number) if none was found. A negative code is
2954 * returned on error.
2955 */
vhost_get_vq_desc(struct vhost_virtqueue * vq,struct iovec iov[],unsigned int iov_size,unsigned int * out_num,unsigned int * in_num,struct vhost_log * log,unsigned int * log_num)2956 int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2957 struct iovec iov[], unsigned int iov_size,
2958 unsigned int *out_num, unsigned int *in_num,
2959 struct vhost_log *log, unsigned int *log_num)
2960 {
2961 return vhost_get_vq_desc_n(vq, iov, iov_size, out_num, in_num,
2962 log, log_num, NULL);
2963 }
2964 EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
2965
2966 /**
2967 * vhost_discard_vq_desc - Reverse the effect of vhost_get_vq_desc_n()
2968 * @vq: target virtqueue
2969 * @nbufs: number of buffers to roll back
2970 * @ndesc: number of descriptors to roll back
2971 *
2972 * Rewinds the internal consumer cursors after a failed attempt to use buffers
2973 * returned by vhost_get_vq_desc_n().
2974 */
vhost_discard_vq_desc(struct vhost_virtqueue * vq,int nbufs,unsigned int ndesc)2975 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int nbufs,
2976 unsigned int ndesc)
2977 {
2978 vq->next_avail_head -= ndesc;
2979 vq->last_avail_idx -= nbufs;
2980 }
2981 EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
2982
2983 /* After we've used one of their buffers, we tell them about it. We'll then
2984 * want to notify the guest, using eventfd. */
vhost_add_used(struct vhost_virtqueue * vq,unsigned int head,int len)2985 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
2986 {
2987 struct vring_used_elem heads = {
2988 cpu_to_vhost32(vq, head),
2989 cpu_to_vhost32(vq, len)
2990 };
2991 u16 nheads = 1;
2992
2993 return vhost_add_used_n(vq, &heads, &nheads, 1);
2994 }
2995 EXPORT_SYMBOL_GPL(vhost_add_used);
2996
__vhost_add_used_n(struct vhost_virtqueue * vq,struct vring_used_elem * heads,unsigned count)2997 static int __vhost_add_used_n(struct vhost_virtqueue *vq,
2998 struct vring_used_elem *heads,
2999 unsigned count)
3000 {
3001 vring_used_elem_t __user *used;
3002 u16 old, new;
3003 int start;
3004
3005 start = vq->last_used_idx & (vq->num - 1);
3006 used = vq->used->ring + start;
3007 if (vhost_put_used(vq, heads, start, count)) {
3008 vq_err(vq, "Failed to write used");
3009 return -EFAULT;
3010 }
3011 if (unlikely(vq->log_used)) {
3012 /* Make sure data is seen before log. */
3013 smp_wmb();
3014 /* Log used ring entry write. */
3015 log_used(vq, ((void __user *)used - (void __user *)vq->used),
3016 count * sizeof *used);
3017 }
3018 old = vq->last_used_idx;
3019 new = (vq->last_used_idx += count);
3020 /* If the driver never bothers to signal in a very long while,
3021 * used index might wrap around. If that happens, invalidate
3022 * signalled_used index we stored. TODO: make sure driver
3023 * signals at least once in 2^16 and remove this. */
3024 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
3025 vq->signalled_used_valid = false;
3026 return 0;
3027 }
3028
vhost_add_used_n_ooo(struct vhost_virtqueue * vq,struct vring_used_elem * heads,unsigned count)3029 static int vhost_add_used_n_ooo(struct vhost_virtqueue *vq,
3030 struct vring_used_elem *heads,
3031 unsigned count)
3032 {
3033 int start, n, r;
3034
3035 start = vq->last_used_idx & (vq->num - 1);
3036 n = vq->num - start;
3037 if (n < count) {
3038 r = __vhost_add_used_n(vq, heads, n);
3039 if (r < 0)
3040 return r;
3041 heads += n;
3042 count -= n;
3043 }
3044 return __vhost_add_used_n(vq, heads, count);
3045 }
3046
vhost_add_used_n_in_order(struct vhost_virtqueue * vq,struct vring_used_elem * heads,const u16 * nheads,unsigned count)3047 static int vhost_add_used_n_in_order(struct vhost_virtqueue *vq,
3048 struct vring_used_elem *heads,
3049 const u16 *nheads,
3050 unsigned count)
3051 {
3052 vring_used_elem_t __user *used;
3053 u16 old, new = vq->last_used_idx;
3054 int start, i;
3055
3056 if (!nheads)
3057 return -EINVAL;
3058
3059 start = vq->last_used_idx & (vq->num - 1);
3060 used = vq->used->ring + start;
3061
3062 for (i = 0; i < count; i++) {
3063 if (vhost_put_used(vq, &heads[i], start, 1)) {
3064 vq_err(vq, "Failed to write used");
3065 return -EFAULT;
3066 }
3067 start += nheads[i];
3068 new += nheads[i];
3069 if (start >= vq->num)
3070 start -= vq->num;
3071 }
3072
3073 if (unlikely(vq->log_used)) {
3074 /* Make sure data is seen before log. */
3075 smp_wmb();
3076 /* Log used ring entry write. */
3077 log_used(vq, ((void __user *)used - (void __user *)vq->used),
3078 (vq->num - start) * sizeof *used);
3079 if (start + count > vq->num)
3080 log_used(vq, 0,
3081 (start + count - vq->num) * sizeof *used);
3082 }
3083
3084 old = vq->last_used_idx;
3085 vq->last_used_idx = new;
3086 /* If the driver never bothers to signal in a very long while,
3087 * used index might wrap around. If that happens, invalidate
3088 * signalled_used index we stored. TODO: make sure driver
3089 * signals at least once in 2^16 and remove this. */
3090 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
3091 vq->signalled_used_valid = false;
3092 return 0;
3093 }
3094
3095 /* After we've used one of their buffers, we tell them about it. We'll then
3096 * want to notify the guest, using eventfd. */
vhost_add_used_n(struct vhost_virtqueue * vq,struct vring_used_elem * heads,u16 * nheads,unsigned count)3097 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
3098 u16 *nheads, unsigned count)
3099 {
3100 bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
3101 int r;
3102
3103 if (!in_order || !nheads)
3104 r = vhost_add_used_n_ooo(vq, heads, count);
3105 else
3106 r = vhost_add_used_n_in_order(vq, heads, nheads, count);
3107
3108 if (r < 0)
3109 return r;
3110
3111 /* Make sure buffer is written before we update index. */
3112 smp_wmb();
3113 if (vhost_put_used_idx(vq)) {
3114 vq_err(vq, "Failed to increment used idx");
3115 return -EFAULT;
3116 }
3117 if (unlikely(vq->log_used)) {
3118 /* Make sure used idx is seen before log. */
3119 smp_wmb();
3120 /* Log used index update. */
3121 log_used(vq, offsetof(struct vring_used, idx),
3122 sizeof vq->used->idx);
3123 if (vq->log_ctx)
3124 eventfd_signal(vq->log_ctx);
3125 }
3126 return r;
3127 }
3128 EXPORT_SYMBOL_GPL(vhost_add_used_n);
3129
vhost_notify(struct vhost_dev * dev,struct vhost_virtqueue * vq)3130 static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
3131 {
3132 __u16 old, new;
3133 __virtio16 event;
3134 bool v;
3135 /* Flush out used index updates. This is paired
3136 * with the barrier that the Guest executes when enabling
3137 * interrupts. */
3138 smp_mb();
3139
3140 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
3141 unlikely(vq->avail_idx == vq->last_avail_idx))
3142 return true;
3143
3144 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
3145 __virtio16 flags;
3146 if (vhost_get_avail_flags(vq, &flags)) {
3147 vq_err(vq, "Failed to get flags");
3148 return true;
3149 }
3150 return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
3151 }
3152 old = vq->signalled_used;
3153 v = vq->signalled_used_valid;
3154 new = vq->signalled_used = vq->last_used_idx;
3155 vq->signalled_used_valid = true;
3156
3157 if (unlikely(!v))
3158 return true;
3159
3160 if (vhost_get_used_event(vq, &event)) {
3161 vq_err(vq, "Failed to get used event idx");
3162 return true;
3163 }
3164 return vring_need_event(vhost16_to_cpu(vq, event), new, old);
3165 }
3166
3167 /* This actually signals the guest, using eventfd. */
vhost_signal(struct vhost_dev * dev,struct vhost_virtqueue * vq)3168 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
3169 {
3170 /* Signal the Guest tell them we used something up. */
3171 if (vq->call_ctx.ctx && vhost_notify(dev, vq))
3172 eventfd_signal(vq->call_ctx.ctx);
3173 }
3174 EXPORT_SYMBOL_GPL(vhost_signal);
3175
3176 /* And here's the combo meal deal. Supersize me! */
vhost_add_used_and_signal(struct vhost_dev * dev,struct vhost_virtqueue * vq,unsigned int head,int len)3177 void vhost_add_used_and_signal(struct vhost_dev *dev,
3178 struct vhost_virtqueue *vq,
3179 unsigned int head, int len)
3180 {
3181 vhost_add_used(vq, head, len);
3182 vhost_signal(dev, vq);
3183 }
3184 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
3185
3186 /* multi-buffer version of vhost_add_used_and_signal */
vhost_add_used_and_signal_n(struct vhost_dev * dev,struct vhost_virtqueue * vq,struct vring_used_elem * heads,u16 * nheads,unsigned count)3187 void vhost_add_used_and_signal_n(struct vhost_dev *dev,
3188 struct vhost_virtqueue *vq,
3189 struct vring_used_elem *heads,
3190 u16 *nheads,
3191 unsigned count)
3192 {
3193 vhost_add_used_n(vq, heads, nheads, count);
3194 vhost_signal(dev, vq);
3195 }
3196 EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
3197
3198 /* return true if we're sure that available ring is empty */
vhost_vq_avail_empty(struct vhost_dev * dev,struct vhost_virtqueue * vq)3199 bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
3200 {
3201 int r;
3202
3203 if (vq->avail_idx != vq->last_avail_idx)
3204 return false;
3205
3206 r = vhost_get_avail_idx(vq);
3207
3208 /* Note: we treat error as non-empty here */
3209 return r == 0;
3210 }
3211 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
3212
3213 /* OK, now we need to know about added descriptors. */
vhost_enable_notify(struct vhost_dev * dev,struct vhost_virtqueue * vq)3214 bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
3215 {
3216 int r;
3217
3218 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
3219 return false;
3220 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
3221 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
3222 r = vhost_update_used_flags(vq);
3223 if (r) {
3224 vq_err(vq, "Failed to enable notification at %p: %d\n",
3225 &vq->used->flags, r);
3226 return false;
3227 }
3228 } else {
3229 r = vhost_update_avail_event(vq);
3230 if (r) {
3231 vq_err(vq, "Failed to update avail event index at %p: %d\n",
3232 vhost_avail_event(vq), r);
3233 return false;
3234 }
3235 }
3236 /* They could have slipped one in as we were doing that: make
3237 * sure it's written, then check again. */
3238 smp_mb();
3239
3240 r = vhost_get_avail_idx(vq);
3241 /* Note: we treat error as empty here */
3242 if (unlikely(r < 0))
3243 return false;
3244
3245 return r;
3246 }
3247 EXPORT_SYMBOL_GPL(vhost_enable_notify);
3248
3249 /* We don't need to be notified again. */
vhost_disable_notify(struct vhost_dev * dev,struct vhost_virtqueue * vq)3250 void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
3251 {
3252 int r;
3253
3254 if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
3255 return;
3256 vq->used_flags |= VRING_USED_F_NO_NOTIFY;
3257 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
3258 r = vhost_update_used_flags(vq);
3259 if (r)
3260 vq_err(vq, "Failed to disable notification at %p: %d\n",
3261 &vq->used->flags, r);
3262 }
3263 }
3264 EXPORT_SYMBOL_GPL(vhost_disable_notify);
3265
3266 /* Create a new message. */
vhost_new_msg(struct vhost_virtqueue * vq,int type)3267 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type)
3268 {
3269 /* Make sure all padding within the structure is initialized. */
3270 struct vhost_msg_node *node = kzalloc_obj(*node);
3271 if (!node)
3272 return NULL;
3273
3274 node->vq = vq;
3275 node->msg.type = type;
3276 return node;
3277 }
3278 EXPORT_SYMBOL_GPL(vhost_new_msg);
3279
vhost_enqueue_msg(struct vhost_dev * dev,struct list_head * head,struct vhost_msg_node * node)3280 void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head,
3281 struct vhost_msg_node *node)
3282 {
3283 spin_lock(&dev->iotlb_lock);
3284 list_add_tail(&node->node, head);
3285 spin_unlock(&dev->iotlb_lock);
3286
3287 wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
3288 }
3289 EXPORT_SYMBOL_GPL(vhost_enqueue_msg);
3290
vhost_dequeue_msg(struct vhost_dev * dev,struct list_head * head)3291 struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
3292 struct list_head *head)
3293 {
3294 struct vhost_msg_node *node = NULL;
3295
3296 spin_lock(&dev->iotlb_lock);
3297 if (!list_empty(head)) {
3298 node = list_first_entry(head, struct vhost_msg_node,
3299 node);
3300 list_del(&node->node);
3301 }
3302 spin_unlock(&dev->iotlb_lock);
3303
3304 return node;
3305 }
3306 EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
3307
vhost_set_backend_features(struct vhost_dev * dev,u64 features)3308 void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
3309 {
3310 struct vhost_virtqueue *vq;
3311 int i;
3312
3313 mutex_lock(&dev->mutex);
3314 for (i = 0; i < dev->nvqs; ++i) {
3315 vq = dev->vqs[i];
3316 mutex_lock(&vq->mutex);
3317 vq->acked_backend_features = features;
3318 mutex_unlock(&vq->mutex);
3319 }
3320 mutex_unlock(&dev->mutex);
3321 }
3322 EXPORT_SYMBOL_GPL(vhost_set_backend_features);
3323
vhost_init(void)3324 static int __init vhost_init(void)
3325 {
3326 return 0;
3327 }
3328
vhost_exit(void)3329 static void __exit vhost_exit(void)
3330 {
3331 }
3332
3333 module_init(vhost_init);
3334 module_exit(vhost_exit);
3335
3336 MODULE_VERSION("0.0.1");
3337 MODULE_LICENSE("GPL v2");
3338 MODULE_AUTHOR("Michael S. Tsirkin");
3339 MODULE_DESCRIPTION("Host kernel accelerator for virtio");
3340