xref: /linux/drivers/vhost/vhost.c (revision 4e8cec269dd9e823804141f25ce37c23e72d3c12)
1 /* Copyright (C) 2009 Red Hat, Inc.
2  * Copyright (C) 2006 Rusty Russell IBM Corporation
3  *
4  * Author: Michael S. Tsirkin <mst@redhat.com>
5  *
6  * Inspiration, some code, and most witty comments come from
7  * Documentation/lguest/lguest.c, by Rusty Russell
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.
10  *
11  * Generic code for virtio server in host kernel.
12  */
13 
14 #include <linux/eventfd.h>
15 #include <linux/vhost.h>
16 #include <linux/virtio_net.h>
17 #include <linux/mm.h>
18 #include <linux/miscdevice.h>
19 #include <linux/mutex.h>
20 #include <linux/rcupdate.h>
21 #include <linux/poll.h>
22 #include <linux/file.h>
23 #include <linux/highmem.h>
24 #include <linux/slab.h>
25 #include <linux/kthread.h>
26 #include <linux/cgroup.h>
27 
28 #include <linux/net.h>
29 #include <linux/if_packet.h>
30 #include <linux/if_arp.h>
31 
32 #include <net/sock.h>
33 
34 #include "vhost.h"
35 
36 enum {
37 	VHOST_MEMORY_MAX_NREGIONS = 64,
38 	VHOST_MEMORY_F_LOG = 0x1,
39 };
40 
41 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
42 			    poll_table *pt)
43 {
44 	struct vhost_poll *poll;
45 	poll = container_of(pt, struct vhost_poll, table);
46 
47 	poll->wqh = wqh;
48 	add_wait_queue(wqh, &poll->wait);
49 }
50 
51 static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
52 			     void *key)
53 {
54 	struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
55 
56 	if (!((unsigned long)key & poll->mask))
57 		return 0;
58 
59 	vhost_poll_queue(poll);
60 	return 0;
61 }
62 
63 static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
64 {
65 	INIT_LIST_HEAD(&work->node);
66 	work->fn = fn;
67 	init_waitqueue_head(&work->done);
68 	work->flushing = 0;
69 	work->queue_seq = work->done_seq = 0;
70 }
71 
72 /* Init poll structure */
73 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
74 		     unsigned long mask, struct vhost_dev *dev)
75 {
76 	init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
77 	init_poll_funcptr(&poll->table, vhost_poll_func);
78 	poll->mask = mask;
79 	poll->dev = dev;
80 
81 	vhost_work_init(&poll->work, fn);
82 }
83 
84 /* Start polling a file. We add ourselves to file's wait queue. The caller must
85  * keep a reference to a file until after vhost_poll_stop is called. */
86 void vhost_poll_start(struct vhost_poll *poll, struct file *file)
87 {
88 	unsigned long mask;
89 	mask = file->f_op->poll(file, &poll->table);
90 	if (mask)
91 		vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
92 }
93 
94 /* Stop polling a file. After this function returns, it becomes safe to drop the
95  * file reference. You must also flush afterwards. */
96 void vhost_poll_stop(struct vhost_poll *poll)
97 {
98 	remove_wait_queue(poll->wqh, &poll->wait);
99 }
100 
101 static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
102 {
103 	unsigned seq;
104 	int left;
105 	int flushing;
106 
107 	spin_lock_irq(&dev->work_lock);
108 	seq = work->queue_seq;
109 	work->flushing++;
110 	spin_unlock_irq(&dev->work_lock);
111 	wait_event(work->done, ({
112 		   spin_lock_irq(&dev->work_lock);
113 		   left = seq - work->done_seq <= 0;
114 		   spin_unlock_irq(&dev->work_lock);
115 		   left;
116 	}));
117 	spin_lock_irq(&dev->work_lock);
118 	flushing = --work->flushing;
119 	spin_unlock_irq(&dev->work_lock);
120 	BUG_ON(flushing < 0);
121 }
122 
123 /* Flush any work that has been scheduled. When calling this, don't hold any
124  * locks that are also used by the callback. */
125 void vhost_poll_flush(struct vhost_poll *poll)
126 {
127 	vhost_work_flush(poll->dev, &poll->work);
128 }
129 
130 static inline void vhost_work_queue(struct vhost_dev *dev,
131 				    struct vhost_work *work)
132 {
133 	unsigned long flags;
134 
135 	spin_lock_irqsave(&dev->work_lock, flags);
136 	if (list_empty(&work->node)) {
137 		list_add_tail(&work->node, &dev->work_list);
138 		work->queue_seq++;
139 		wake_up_process(dev->worker);
140 	}
141 	spin_unlock_irqrestore(&dev->work_lock, flags);
142 }
143 
144 void vhost_poll_queue(struct vhost_poll *poll)
145 {
146 	vhost_work_queue(poll->dev, &poll->work);
147 }
148 
149 static void vhost_vq_reset(struct vhost_dev *dev,
150 			   struct vhost_virtqueue *vq)
151 {
152 	vq->num = 1;
153 	vq->desc = NULL;
154 	vq->avail = NULL;
155 	vq->used = NULL;
156 	vq->last_avail_idx = 0;
157 	vq->avail_idx = 0;
158 	vq->last_used_idx = 0;
159 	vq->used_flags = 0;
160 	vq->used_flags = 0;
161 	vq->log_used = false;
162 	vq->log_addr = -1ull;
163 	vq->vhost_hlen = 0;
164 	vq->sock_hlen = 0;
165 	vq->private_data = NULL;
166 	vq->log_base = NULL;
167 	vq->error_ctx = NULL;
168 	vq->error = NULL;
169 	vq->kick = NULL;
170 	vq->call_ctx = NULL;
171 	vq->call = NULL;
172 	vq->log_ctx = NULL;
173 }
174 
175 static int vhost_worker(void *data)
176 {
177 	struct vhost_dev *dev = data;
178 	struct vhost_work *work = NULL;
179 	unsigned uninitialized_var(seq);
180 
181 	for (;;) {
182 		/* mb paired w/ kthread_stop */
183 		set_current_state(TASK_INTERRUPTIBLE);
184 
185 		spin_lock_irq(&dev->work_lock);
186 		if (work) {
187 			work->done_seq = seq;
188 			if (work->flushing)
189 				wake_up_all(&work->done);
190 		}
191 
192 		if (kthread_should_stop()) {
193 			spin_unlock_irq(&dev->work_lock);
194 			__set_current_state(TASK_RUNNING);
195 			return 0;
196 		}
197 		if (!list_empty(&dev->work_list)) {
198 			work = list_first_entry(&dev->work_list,
199 						struct vhost_work, node);
200 			list_del_init(&work->node);
201 			seq = work->queue_seq;
202 		} else
203 			work = NULL;
204 		spin_unlock_irq(&dev->work_lock);
205 
206 		if (work) {
207 			__set_current_state(TASK_RUNNING);
208 			work->fn(work);
209 		} else
210 			schedule();
211 
212 	}
213 }
214 
215 long vhost_dev_init(struct vhost_dev *dev,
216 		    struct vhost_virtqueue *vqs, int nvqs)
217 {
218 	int i;
219 
220 	dev->vqs = vqs;
221 	dev->nvqs = nvqs;
222 	mutex_init(&dev->mutex);
223 	dev->log_ctx = NULL;
224 	dev->log_file = NULL;
225 	dev->memory = NULL;
226 	dev->mm = NULL;
227 	spin_lock_init(&dev->work_lock);
228 	INIT_LIST_HEAD(&dev->work_list);
229 	dev->worker = NULL;
230 
231 	for (i = 0; i < dev->nvqs; ++i) {
232 		dev->vqs[i].dev = dev;
233 		mutex_init(&dev->vqs[i].mutex);
234 		vhost_vq_reset(dev, dev->vqs + i);
235 		if (dev->vqs[i].handle_kick)
236 			vhost_poll_init(&dev->vqs[i].poll,
237 					dev->vqs[i].handle_kick, POLLIN, dev);
238 	}
239 
240 	return 0;
241 }
242 
243 /* Caller should have device mutex */
244 long vhost_dev_check_owner(struct vhost_dev *dev)
245 {
246 	/* Are you the owner? If not, I don't think you mean to do that */
247 	return dev->mm == current->mm ? 0 : -EPERM;
248 }
249 
250 struct vhost_attach_cgroups_struct {
251         struct vhost_work work;
252         struct task_struct *owner;
253         int ret;
254 };
255 
256 static void vhost_attach_cgroups_work(struct vhost_work *work)
257 {
258         struct vhost_attach_cgroups_struct *s;
259         s = container_of(work, struct vhost_attach_cgroups_struct, work);
260         s->ret = cgroup_attach_task_all(s->owner, current);
261 }
262 
263 static int vhost_attach_cgroups(struct vhost_dev *dev)
264 {
265         struct vhost_attach_cgroups_struct attach;
266         attach.owner = current;
267         vhost_work_init(&attach.work, vhost_attach_cgroups_work);
268         vhost_work_queue(dev, &attach.work);
269         vhost_work_flush(dev, &attach.work);
270         return attach.ret;
271 }
272 
273 /* Caller should have device mutex */
274 static long vhost_dev_set_owner(struct vhost_dev *dev)
275 {
276 	struct task_struct *worker;
277 	int err;
278 	/* Is there an owner already? */
279 	if (dev->mm) {
280 		err = -EBUSY;
281 		goto err_mm;
282 	}
283 	/* No owner, become one */
284 	dev->mm = get_task_mm(current);
285 	worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
286 	if (IS_ERR(worker)) {
287 		err = PTR_ERR(worker);
288 		goto err_worker;
289 	}
290 
291 	dev->worker = worker;
292 	wake_up_process(worker);	/* avoid contributing to loadavg */
293 
294 	err = vhost_attach_cgroups(dev);
295 	if (err)
296 		goto err_cgroup;
297 
298 	return 0;
299 err_cgroup:
300 	kthread_stop(worker);
301 	dev->worker = NULL;
302 err_worker:
303 	if (dev->mm)
304 		mmput(dev->mm);
305 	dev->mm = NULL;
306 err_mm:
307 	return err;
308 }
309 
310 /* Caller should have device mutex */
311 long vhost_dev_reset_owner(struct vhost_dev *dev)
312 {
313 	struct vhost_memory *memory;
314 
315 	/* Restore memory to default empty mapping. */
316 	memory = kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
317 	if (!memory)
318 		return -ENOMEM;
319 
320 	vhost_dev_cleanup(dev);
321 
322 	memory->nregions = 0;
323 	dev->memory = memory;
324 	return 0;
325 }
326 
327 /* Caller should have device mutex */
328 void vhost_dev_cleanup(struct vhost_dev *dev)
329 {
330 	int i;
331 	for (i = 0; i < dev->nvqs; ++i) {
332 		if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
333 			vhost_poll_stop(&dev->vqs[i].poll);
334 			vhost_poll_flush(&dev->vqs[i].poll);
335 		}
336 		if (dev->vqs[i].error_ctx)
337 			eventfd_ctx_put(dev->vqs[i].error_ctx);
338 		if (dev->vqs[i].error)
339 			fput(dev->vqs[i].error);
340 		if (dev->vqs[i].kick)
341 			fput(dev->vqs[i].kick);
342 		if (dev->vqs[i].call_ctx)
343 			eventfd_ctx_put(dev->vqs[i].call_ctx);
344 		if (dev->vqs[i].call)
345 			fput(dev->vqs[i].call);
346 		vhost_vq_reset(dev, dev->vqs + i);
347 	}
348 	if (dev->log_ctx)
349 		eventfd_ctx_put(dev->log_ctx);
350 	dev->log_ctx = NULL;
351 	if (dev->log_file)
352 		fput(dev->log_file);
353 	dev->log_file = NULL;
354 	/* No one will access memory at this point */
355 	kfree(dev->memory);
356 	dev->memory = NULL;
357 	if (dev->mm)
358 		mmput(dev->mm);
359 	dev->mm = NULL;
360 
361 	WARN_ON(!list_empty(&dev->work_list));
362 	if (dev->worker) {
363 		kthread_stop(dev->worker);
364 		dev->worker = NULL;
365 	}
366 }
367 
368 static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
369 {
370 	u64 a = addr / VHOST_PAGE_SIZE / 8;
371 	/* Make sure 64 bit math will not overflow. */
372 	if (a > ULONG_MAX - (unsigned long)log_base ||
373 	    a + (unsigned long)log_base > ULONG_MAX)
374 		return -EFAULT;
375 
376 	return access_ok(VERIFY_WRITE, log_base + a,
377 			 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
378 }
379 
380 /* Caller should have vq mutex and device mutex. */
381 static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
382 			       int log_all)
383 {
384 	int i;
385 
386 	if (!mem)
387 		return 0;
388 
389 	for (i = 0; i < mem->nregions; ++i) {
390 		struct vhost_memory_region *m = mem->regions + i;
391 		unsigned long a = m->userspace_addr;
392 		if (m->memory_size > ULONG_MAX)
393 			return 0;
394 		else if (!access_ok(VERIFY_WRITE, (void __user *)a,
395 				    m->memory_size))
396 			return 0;
397 		else if (log_all && !log_access_ok(log_base,
398 						   m->guest_phys_addr,
399 						   m->memory_size))
400 			return 0;
401 	}
402 	return 1;
403 }
404 
405 /* Can we switch to this memory table? */
406 /* Caller should have device mutex but not vq mutex */
407 static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
408 			    int log_all)
409 {
410 	int i;
411 	for (i = 0; i < d->nvqs; ++i) {
412 		int ok;
413 		mutex_lock(&d->vqs[i].mutex);
414 		/* If ring is inactive, will check when it's enabled. */
415 		if (d->vqs[i].private_data)
416 			ok = vq_memory_access_ok(d->vqs[i].log_base, mem,
417 						 log_all);
418 		else
419 			ok = 1;
420 		mutex_unlock(&d->vqs[i].mutex);
421 		if (!ok)
422 			return 0;
423 	}
424 	return 1;
425 }
426 
427 static int vq_access_ok(unsigned int num,
428 			struct vring_desc __user *desc,
429 			struct vring_avail __user *avail,
430 			struct vring_used __user *used)
431 {
432 	return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
433 	       access_ok(VERIFY_READ, avail,
434 			 sizeof *avail + num * sizeof *avail->ring) &&
435 	       access_ok(VERIFY_WRITE, used,
436 			sizeof *used + num * sizeof *used->ring);
437 }
438 
439 /* Can we log writes? */
440 /* Caller should have device mutex but not vq mutex */
441 int vhost_log_access_ok(struct vhost_dev *dev)
442 {
443 	return memory_access_ok(dev, dev->memory, 1);
444 }
445 
446 /* Verify access for write logging. */
447 /* Caller should have vq mutex and device mutex */
448 static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
449 {
450 	return vq_memory_access_ok(log_base, vq->dev->memory,
451 			    vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
452 		(!vq->log_used || log_access_ok(log_base, vq->log_addr,
453 					sizeof *vq->used +
454 					vq->num * sizeof *vq->used->ring));
455 }
456 
457 /* Can we start vq? */
458 /* Caller should have vq mutex and device mutex */
459 int vhost_vq_access_ok(struct vhost_virtqueue *vq)
460 {
461 	return vq_access_ok(vq->num, vq->desc, vq->avail, vq->used) &&
462 		vq_log_access_ok(vq, vq->log_base);
463 }
464 
465 static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
466 {
467 	struct vhost_memory mem, *newmem, *oldmem;
468 	unsigned long size = offsetof(struct vhost_memory, regions);
469 	if (copy_from_user(&mem, m, size))
470 		return -EFAULT;
471 	if (mem.padding)
472 		return -EOPNOTSUPP;
473 	if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
474 		return -E2BIG;
475 	newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
476 	if (!newmem)
477 		return -ENOMEM;
478 
479 	memcpy(newmem, &mem, size);
480 	if (copy_from_user(newmem->regions, m->regions,
481 			   mem.nregions * sizeof *m->regions)) {
482 		kfree(newmem);
483 		return -EFAULT;
484 	}
485 
486 	if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) {
487 		kfree(newmem);
488 		return -EFAULT;
489 	}
490 	oldmem = d->memory;
491 	rcu_assign_pointer(d->memory, newmem);
492 	synchronize_rcu();
493 	kfree(oldmem);
494 	return 0;
495 }
496 
497 static int init_used(struct vhost_virtqueue *vq,
498 		     struct vring_used __user *used)
499 {
500 	int r = put_user(vq->used_flags, &used->flags);
501 	if (r)
502 		return r;
503 	return get_user(vq->last_used_idx, &used->idx);
504 }
505 
506 static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
507 {
508 	struct file *eventfp, *filep = NULL,
509 		    *pollstart = NULL, *pollstop = NULL;
510 	struct eventfd_ctx *ctx = NULL;
511 	u32 __user *idxp = argp;
512 	struct vhost_virtqueue *vq;
513 	struct vhost_vring_state s;
514 	struct vhost_vring_file f;
515 	struct vhost_vring_addr a;
516 	u32 idx;
517 	long r;
518 
519 	r = get_user(idx, idxp);
520 	if (r < 0)
521 		return r;
522 	if (idx >= d->nvqs)
523 		return -ENOBUFS;
524 
525 	vq = d->vqs + idx;
526 
527 	mutex_lock(&vq->mutex);
528 
529 	switch (ioctl) {
530 	case VHOST_SET_VRING_NUM:
531 		/* Resizing ring with an active backend?
532 		 * You don't want to do that. */
533 		if (vq->private_data) {
534 			r = -EBUSY;
535 			break;
536 		}
537 		if (copy_from_user(&s, argp, sizeof s)) {
538 			r = -EFAULT;
539 			break;
540 		}
541 		if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
542 			r = -EINVAL;
543 			break;
544 		}
545 		vq->num = s.num;
546 		break;
547 	case VHOST_SET_VRING_BASE:
548 		/* Moving base with an active backend?
549 		 * You don't want to do that. */
550 		if (vq->private_data) {
551 			r = -EBUSY;
552 			break;
553 		}
554 		if (copy_from_user(&s, argp, sizeof s)) {
555 			r = -EFAULT;
556 			break;
557 		}
558 		if (s.num > 0xffff) {
559 			r = -EINVAL;
560 			break;
561 		}
562 		vq->last_avail_idx = s.num;
563 		/* Forget the cached index value. */
564 		vq->avail_idx = vq->last_avail_idx;
565 		break;
566 	case VHOST_GET_VRING_BASE:
567 		s.index = idx;
568 		s.num = vq->last_avail_idx;
569 		if (copy_to_user(argp, &s, sizeof s))
570 			r = -EFAULT;
571 		break;
572 	case VHOST_SET_VRING_ADDR:
573 		if (copy_from_user(&a, argp, sizeof a)) {
574 			r = -EFAULT;
575 			break;
576 		}
577 		if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
578 			r = -EOPNOTSUPP;
579 			break;
580 		}
581 		/* For 32bit, verify that the top 32bits of the user
582 		   data are set to zero. */
583 		if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
584 		    (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
585 		    (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
586 			r = -EFAULT;
587 			break;
588 		}
589 		if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
590 		    (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
591 		    (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
592 			r = -EINVAL;
593 			break;
594 		}
595 
596 		/* We only verify access here if backend is configured.
597 		 * If it is not, we don't as size might not have been setup.
598 		 * We will verify when backend is configured. */
599 		if (vq->private_data) {
600 			if (!vq_access_ok(vq->num,
601 				(void __user *)(unsigned long)a.desc_user_addr,
602 				(void __user *)(unsigned long)a.avail_user_addr,
603 				(void __user *)(unsigned long)a.used_user_addr)) {
604 				r = -EINVAL;
605 				break;
606 			}
607 
608 			/* Also validate log access for used ring if enabled. */
609 			if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
610 			    !log_access_ok(vq->log_base, a.log_guest_addr,
611 					   sizeof *vq->used +
612 					   vq->num * sizeof *vq->used->ring)) {
613 				r = -EINVAL;
614 				break;
615 			}
616 		}
617 
618 		r = init_used(vq, (struct vring_used __user *)(unsigned long)
619 			      a.used_user_addr);
620 		if (r)
621 			break;
622 		vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
623 		vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
624 		vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
625 		vq->log_addr = a.log_guest_addr;
626 		vq->used = (void __user *)(unsigned long)a.used_user_addr;
627 		break;
628 	case VHOST_SET_VRING_KICK:
629 		if (copy_from_user(&f, argp, sizeof f)) {
630 			r = -EFAULT;
631 			break;
632 		}
633 		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
634 		if (IS_ERR(eventfp)) {
635 			r = PTR_ERR(eventfp);
636 			break;
637 		}
638 		if (eventfp != vq->kick) {
639 			pollstop = filep = vq->kick;
640 			pollstart = vq->kick = eventfp;
641 		} else
642 			filep = eventfp;
643 		break;
644 	case VHOST_SET_VRING_CALL:
645 		if (copy_from_user(&f, argp, sizeof f)) {
646 			r = -EFAULT;
647 			break;
648 		}
649 		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
650 		if (IS_ERR(eventfp)) {
651 			r = PTR_ERR(eventfp);
652 			break;
653 		}
654 		if (eventfp != vq->call) {
655 			filep = vq->call;
656 			ctx = vq->call_ctx;
657 			vq->call = eventfp;
658 			vq->call_ctx = eventfp ?
659 				eventfd_ctx_fileget(eventfp) : NULL;
660 		} else
661 			filep = eventfp;
662 		break;
663 	case VHOST_SET_VRING_ERR:
664 		if (copy_from_user(&f, argp, sizeof f)) {
665 			r = -EFAULT;
666 			break;
667 		}
668 		eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
669 		if (IS_ERR(eventfp)) {
670 			r = PTR_ERR(eventfp);
671 			break;
672 		}
673 		if (eventfp != vq->error) {
674 			filep = vq->error;
675 			vq->error = eventfp;
676 			ctx = vq->error_ctx;
677 			vq->error_ctx = eventfp ?
678 				eventfd_ctx_fileget(eventfp) : NULL;
679 		} else
680 			filep = eventfp;
681 		break;
682 	default:
683 		r = -ENOIOCTLCMD;
684 	}
685 
686 	if (pollstop && vq->handle_kick)
687 		vhost_poll_stop(&vq->poll);
688 
689 	if (ctx)
690 		eventfd_ctx_put(ctx);
691 	if (filep)
692 		fput(filep);
693 
694 	if (pollstart && vq->handle_kick)
695 		vhost_poll_start(&vq->poll, vq->kick);
696 
697 	mutex_unlock(&vq->mutex);
698 
699 	if (pollstop && vq->handle_kick)
700 		vhost_poll_flush(&vq->poll);
701 	return r;
702 }
703 
704 /* Caller must have device mutex */
705 long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
706 {
707 	void __user *argp = (void __user *)arg;
708 	struct file *eventfp, *filep = NULL;
709 	struct eventfd_ctx *ctx = NULL;
710 	u64 p;
711 	long r;
712 	int i, fd;
713 
714 	/* If you are not the owner, you can become one */
715 	if (ioctl == VHOST_SET_OWNER) {
716 		r = vhost_dev_set_owner(d);
717 		goto done;
718 	}
719 
720 	/* You must be the owner to do anything else */
721 	r = vhost_dev_check_owner(d);
722 	if (r)
723 		goto done;
724 
725 	switch (ioctl) {
726 	case VHOST_SET_MEM_TABLE:
727 		r = vhost_set_memory(d, argp);
728 		break;
729 	case VHOST_SET_LOG_BASE:
730 		if (copy_from_user(&p, argp, sizeof p)) {
731 			r = -EFAULT;
732 			break;
733 		}
734 		if ((u64)(unsigned long)p != p) {
735 			r = -EFAULT;
736 			break;
737 		}
738 		for (i = 0; i < d->nvqs; ++i) {
739 			struct vhost_virtqueue *vq;
740 			void __user *base = (void __user *)(unsigned long)p;
741 			vq = d->vqs + i;
742 			mutex_lock(&vq->mutex);
743 			/* If ring is inactive, will check when it's enabled. */
744 			if (vq->private_data && !vq_log_access_ok(vq, base))
745 				r = -EFAULT;
746 			else
747 				vq->log_base = base;
748 			mutex_unlock(&vq->mutex);
749 		}
750 		break;
751 	case VHOST_SET_LOG_FD:
752 		r = get_user(fd, (int __user *)argp);
753 		if (r < 0)
754 			break;
755 		eventfp = fd == -1 ? NULL : eventfd_fget(fd);
756 		if (IS_ERR(eventfp)) {
757 			r = PTR_ERR(eventfp);
758 			break;
759 		}
760 		if (eventfp != d->log_file) {
761 			filep = d->log_file;
762 			ctx = d->log_ctx;
763 			d->log_ctx = eventfp ?
764 				eventfd_ctx_fileget(eventfp) : NULL;
765 		} else
766 			filep = eventfp;
767 		for (i = 0; i < d->nvqs; ++i) {
768 			mutex_lock(&d->vqs[i].mutex);
769 			d->vqs[i].log_ctx = d->log_ctx;
770 			mutex_unlock(&d->vqs[i].mutex);
771 		}
772 		if (ctx)
773 			eventfd_ctx_put(ctx);
774 		if (filep)
775 			fput(filep);
776 		break;
777 	default:
778 		r = vhost_set_vring(d, ioctl, argp);
779 		break;
780 	}
781 done:
782 	return r;
783 }
784 
785 static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
786 						     __u64 addr, __u32 len)
787 {
788 	struct vhost_memory_region *reg;
789 	int i;
790 	/* linear search is not brilliant, but we really have on the order of 6
791 	 * regions in practice */
792 	for (i = 0; i < mem->nregions; ++i) {
793 		reg = mem->regions + i;
794 		if (reg->guest_phys_addr <= addr &&
795 		    reg->guest_phys_addr + reg->memory_size - 1 >= addr)
796 			return reg;
797 	}
798 	return NULL;
799 }
800 
801 /* TODO: This is really inefficient.  We need something like get_user()
802  * (instruction directly accesses the data, with an exception table entry
803  * returning -EFAULT). See Documentation/x86/exception-tables.txt.
804  */
805 static int set_bit_to_user(int nr, void __user *addr)
806 {
807 	unsigned long log = (unsigned long)addr;
808 	struct page *page;
809 	void *base;
810 	int bit = nr + (log % PAGE_SIZE) * 8;
811 	int r;
812 	r = get_user_pages_fast(log, 1, 1, &page);
813 	if (r < 0)
814 		return r;
815 	BUG_ON(r != 1);
816 	base = kmap_atomic(page, KM_USER0);
817 	set_bit(bit, base);
818 	kunmap_atomic(base, KM_USER0);
819 	set_page_dirty_lock(page);
820 	put_page(page);
821 	return 0;
822 }
823 
824 static int log_write(void __user *log_base,
825 		     u64 write_address, u64 write_length)
826 {
827 	int r;
828 	if (!write_length)
829 		return 0;
830 	write_address /= VHOST_PAGE_SIZE;
831 	for (;;) {
832 		u64 base = (u64)(unsigned long)log_base;
833 		u64 log = base + write_address / 8;
834 		int bit = write_address % 8;
835 		if ((u64)(unsigned long)log != log)
836 			return -EFAULT;
837 		r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
838 		if (r < 0)
839 			return r;
840 		if (write_length <= VHOST_PAGE_SIZE)
841 			break;
842 		write_length -= VHOST_PAGE_SIZE;
843 		write_address += VHOST_PAGE_SIZE;
844 	}
845 	return r;
846 }
847 
848 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
849 		    unsigned int log_num, u64 len)
850 {
851 	int i, r;
852 
853 	/* Make sure data written is seen before log. */
854 	smp_wmb();
855 	for (i = 0; i < log_num; ++i) {
856 		u64 l = min(log[i].len, len);
857 		r = log_write(vq->log_base, log[i].addr, l);
858 		if (r < 0)
859 			return r;
860 		len -= l;
861 		if (!len)
862 			return 0;
863 	}
864 	if (vq->log_ctx)
865 		eventfd_signal(vq->log_ctx, 1);
866 	/* Length written exceeds what we have stored. This is a bug. */
867 	BUG();
868 	return 0;
869 }
870 
871 static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
872 			  struct iovec iov[], int iov_size)
873 {
874 	const struct vhost_memory_region *reg;
875 	struct vhost_memory *mem;
876 	struct iovec *_iov;
877 	u64 s = 0;
878 	int ret = 0;
879 
880 	rcu_read_lock();
881 
882 	mem = rcu_dereference(dev->memory);
883 	while ((u64)len > s) {
884 		u64 size;
885 		if (unlikely(ret >= iov_size)) {
886 			ret = -ENOBUFS;
887 			break;
888 		}
889 		reg = find_region(mem, addr, len);
890 		if (unlikely(!reg)) {
891 			ret = -EFAULT;
892 			break;
893 		}
894 		_iov = iov + ret;
895 		size = reg->memory_size - addr + reg->guest_phys_addr;
896 		_iov->iov_len = min((u64)len, size);
897 		_iov->iov_base = (void __user *)(unsigned long)
898 			(reg->userspace_addr + addr - reg->guest_phys_addr);
899 		s += size;
900 		addr += size;
901 		++ret;
902 	}
903 
904 	rcu_read_unlock();
905 	return ret;
906 }
907 
908 /* Each buffer in the virtqueues is actually a chain of descriptors.  This
909  * function returns the next descriptor in the chain,
910  * or -1U if we're at the end. */
911 static unsigned next_desc(struct vring_desc *desc)
912 {
913 	unsigned int next;
914 
915 	/* If this descriptor says it doesn't chain, we're done. */
916 	if (!(desc->flags & VRING_DESC_F_NEXT))
917 		return -1U;
918 
919 	/* Check they're not leading us off end of descriptors. */
920 	next = desc->next;
921 	/* Make sure compiler knows to grab that: we don't want it changing! */
922 	/* We will use the result as an index in an array, so most
923 	 * architectures only need a compiler barrier here. */
924 	read_barrier_depends();
925 
926 	return next;
927 }
928 
929 static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
930 			struct iovec iov[], unsigned int iov_size,
931 			unsigned int *out_num, unsigned int *in_num,
932 			struct vhost_log *log, unsigned int *log_num,
933 			struct vring_desc *indirect)
934 {
935 	struct vring_desc desc;
936 	unsigned int i = 0, count, found = 0;
937 	int ret;
938 
939 	/* Sanity check */
940 	if (unlikely(indirect->len % sizeof desc)) {
941 		vq_err(vq, "Invalid length in indirect descriptor: "
942 		       "len 0x%llx not multiple of 0x%zx\n",
943 		       (unsigned long long)indirect->len,
944 		       sizeof desc);
945 		return -EINVAL;
946 	}
947 
948 	ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
949 			     ARRAY_SIZE(vq->indirect));
950 	if (unlikely(ret < 0)) {
951 		vq_err(vq, "Translation failure %d in indirect.\n", ret);
952 		return ret;
953 	}
954 
955 	/* We will use the result as an address to read from, so most
956 	 * architectures only need a compiler barrier here. */
957 	read_barrier_depends();
958 
959 	count = indirect->len / sizeof desc;
960 	/* Buffers are chained via a 16 bit next field, so
961 	 * we can have at most 2^16 of these. */
962 	if (unlikely(count > USHRT_MAX + 1)) {
963 		vq_err(vq, "Indirect buffer length too big: %d\n",
964 		       indirect->len);
965 		return -E2BIG;
966 	}
967 
968 	do {
969 		unsigned iov_count = *in_num + *out_num;
970 		if (unlikely(++found > count)) {
971 			vq_err(vq, "Loop detected: last one at %u "
972 			       "indirect size %u\n",
973 			       i, count);
974 			return -EINVAL;
975 		}
976 		if (unlikely(memcpy_fromiovec((unsigned char *)&desc, vq->indirect,
977 					      sizeof desc))) {
978 			vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
979 			       i, (size_t)indirect->addr + i * sizeof desc);
980 			return -EINVAL;
981 		}
982 		if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
983 			vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
984 			       i, (size_t)indirect->addr + i * sizeof desc);
985 			return -EINVAL;
986 		}
987 
988 		ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
989 				     iov_size - iov_count);
990 		if (unlikely(ret < 0)) {
991 			vq_err(vq, "Translation failure %d indirect idx %d\n",
992 			       ret, i);
993 			return ret;
994 		}
995 		/* If this is an input descriptor, increment that count. */
996 		if (desc.flags & VRING_DESC_F_WRITE) {
997 			*in_num += ret;
998 			if (unlikely(log)) {
999 				log[*log_num].addr = desc.addr;
1000 				log[*log_num].len = desc.len;
1001 				++*log_num;
1002 			}
1003 		} else {
1004 			/* If it's an output descriptor, they're all supposed
1005 			 * to come before any input descriptors. */
1006 			if (unlikely(*in_num)) {
1007 				vq_err(vq, "Indirect descriptor "
1008 				       "has out after in: idx %d\n", i);
1009 				return -EINVAL;
1010 			}
1011 			*out_num += ret;
1012 		}
1013 	} while ((i = next_desc(&desc)) != -1);
1014 	return 0;
1015 }
1016 
1017 /* This looks in the virtqueue and for the first available buffer, and converts
1018  * it to an iovec for convenient access.  Since descriptors consist of some
1019  * number of output then some number of input descriptors, it's actually two
1020  * iovecs, but we pack them into one and note how many of each there were.
1021  *
1022  * This function returns the descriptor number found, or vq->num (which is
1023  * never a valid descriptor number) if none was found.  A negative code is
1024  * returned on error. */
1025 int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
1026 		      struct iovec iov[], unsigned int iov_size,
1027 		      unsigned int *out_num, unsigned int *in_num,
1028 		      struct vhost_log *log, unsigned int *log_num)
1029 {
1030 	struct vring_desc desc;
1031 	unsigned int i, head, found = 0;
1032 	u16 last_avail_idx;
1033 	int ret;
1034 
1035 	/* Check it isn't doing very strange things with descriptor numbers. */
1036 	last_avail_idx = vq->last_avail_idx;
1037 	if (unlikely(get_user(vq->avail_idx, &vq->avail->idx))) {
1038 		vq_err(vq, "Failed to access avail idx at %p\n",
1039 		       &vq->avail->idx);
1040 		return -EFAULT;
1041 	}
1042 
1043 	if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
1044 		vq_err(vq, "Guest moved used index from %u to %u",
1045 		       last_avail_idx, vq->avail_idx);
1046 		return -EFAULT;
1047 	}
1048 
1049 	/* If there's nothing new since last we looked, return invalid. */
1050 	if (vq->avail_idx == last_avail_idx)
1051 		return vq->num;
1052 
1053 	/* Only get avail ring entries after they have been exposed by guest. */
1054 	smp_rmb();
1055 
1056 	/* Grab the next descriptor number they're advertising, and increment
1057 	 * the index we've seen. */
1058 	if (unlikely(get_user(head,
1059 			      &vq->avail->ring[last_avail_idx % vq->num]))) {
1060 		vq_err(vq, "Failed to read head: idx %d address %p\n",
1061 		       last_avail_idx,
1062 		       &vq->avail->ring[last_avail_idx % vq->num]);
1063 		return -EFAULT;
1064 	}
1065 
1066 	/* If their number is silly, that's an error. */
1067 	if (unlikely(head >= vq->num)) {
1068 		vq_err(vq, "Guest says index %u > %u is available",
1069 		       head, vq->num);
1070 		return -EINVAL;
1071 	}
1072 
1073 	/* When we start there are none of either input nor output. */
1074 	*out_num = *in_num = 0;
1075 	if (unlikely(log))
1076 		*log_num = 0;
1077 
1078 	i = head;
1079 	do {
1080 		unsigned iov_count = *in_num + *out_num;
1081 		if (unlikely(i >= vq->num)) {
1082 			vq_err(vq, "Desc index is %u > %u, head = %u",
1083 			       i, vq->num, head);
1084 			return -EINVAL;
1085 		}
1086 		if (unlikely(++found > vq->num)) {
1087 			vq_err(vq, "Loop detected: last one at %u "
1088 			       "vq size %u head %u\n",
1089 			       i, vq->num, head);
1090 			return -EINVAL;
1091 		}
1092 		ret = copy_from_user(&desc, vq->desc + i, sizeof desc);
1093 		if (unlikely(ret)) {
1094 			vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
1095 			       i, vq->desc + i);
1096 			return -EFAULT;
1097 		}
1098 		if (desc.flags & VRING_DESC_F_INDIRECT) {
1099 			ret = get_indirect(dev, vq, iov, iov_size,
1100 					   out_num, in_num,
1101 					   log, log_num, &desc);
1102 			if (unlikely(ret < 0)) {
1103 				vq_err(vq, "Failure detected "
1104 				       "in indirect descriptor at idx %d\n", i);
1105 				return ret;
1106 			}
1107 			continue;
1108 		}
1109 
1110 		ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
1111 				     iov_size - iov_count);
1112 		if (unlikely(ret < 0)) {
1113 			vq_err(vq, "Translation failure %d descriptor idx %d\n",
1114 			       ret, i);
1115 			return ret;
1116 		}
1117 		if (desc.flags & VRING_DESC_F_WRITE) {
1118 			/* If this is an input descriptor,
1119 			 * increment that count. */
1120 			*in_num += ret;
1121 			if (unlikely(log)) {
1122 				log[*log_num].addr = desc.addr;
1123 				log[*log_num].len = desc.len;
1124 				++*log_num;
1125 			}
1126 		} else {
1127 			/* If it's an output descriptor, they're all supposed
1128 			 * to come before any input descriptors. */
1129 			if (unlikely(*in_num)) {
1130 				vq_err(vq, "Descriptor has out after in: "
1131 				       "idx %d\n", i);
1132 				return -EINVAL;
1133 			}
1134 			*out_num += ret;
1135 		}
1136 	} while ((i = next_desc(&desc)) != -1);
1137 
1138 	/* On success, increment avail index. */
1139 	vq->last_avail_idx++;
1140 	return head;
1141 }
1142 
1143 /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
1144 void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
1145 {
1146 	vq->last_avail_idx -= n;
1147 }
1148 
1149 /* After we've used one of their buffers, we tell them about it.  We'll then
1150  * want to notify the guest, using eventfd. */
1151 int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1152 {
1153 	struct vring_used_elem __user *used;
1154 
1155 	/* The virtqueue contains a ring of used buffers.  Get a pointer to the
1156 	 * next entry in that used ring. */
1157 	used = &vq->used->ring[vq->last_used_idx % vq->num];
1158 	if (put_user(head, &used->id)) {
1159 		vq_err(vq, "Failed to write used id");
1160 		return -EFAULT;
1161 	}
1162 	if (put_user(len, &used->len)) {
1163 		vq_err(vq, "Failed to write used len");
1164 		return -EFAULT;
1165 	}
1166 	/* Make sure buffer is written before we update index. */
1167 	smp_wmb();
1168 	if (put_user(vq->last_used_idx + 1, &vq->used->idx)) {
1169 		vq_err(vq, "Failed to increment used idx");
1170 		return -EFAULT;
1171 	}
1172 	if (unlikely(vq->log_used)) {
1173 		/* Make sure data is seen before log. */
1174 		smp_wmb();
1175 		/* Log used ring entry write. */
1176 		log_write(vq->log_base,
1177 			  vq->log_addr +
1178 			   ((void __user *)used - (void __user *)vq->used),
1179 			  sizeof *used);
1180 		/* Log used index update. */
1181 		log_write(vq->log_base,
1182 			  vq->log_addr + offsetof(struct vring_used, idx),
1183 			  sizeof vq->used->idx);
1184 		if (vq->log_ctx)
1185 			eventfd_signal(vq->log_ctx, 1);
1186 	}
1187 	vq->last_used_idx++;
1188 	return 0;
1189 }
1190 
1191 static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1192 			    struct vring_used_elem *heads,
1193 			    unsigned count)
1194 {
1195 	struct vring_used_elem __user *used;
1196 	int start;
1197 
1198 	start = vq->last_used_idx % vq->num;
1199 	used = vq->used->ring + start;
1200 	if (copy_to_user(used, heads, count * sizeof *used)) {
1201 		vq_err(vq, "Failed to write used");
1202 		return -EFAULT;
1203 	}
1204 	if (unlikely(vq->log_used)) {
1205 		/* Make sure data is seen before log. */
1206 		smp_wmb();
1207 		/* Log used ring entry write. */
1208 		log_write(vq->log_base,
1209 			  vq->log_addr +
1210 			   ((void __user *)used - (void __user *)vq->used),
1211 			  count * sizeof *used);
1212 	}
1213 	vq->last_used_idx += count;
1214 	return 0;
1215 }
1216 
1217 /* After we've used one of their buffers, we tell them about it.  We'll then
1218  * want to notify the guest, using eventfd. */
1219 int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1220 		     unsigned count)
1221 {
1222 	int start, n, r;
1223 
1224 	start = vq->last_used_idx % vq->num;
1225 	n = vq->num - start;
1226 	if (n < count) {
1227 		r = __vhost_add_used_n(vq, heads, n);
1228 		if (r < 0)
1229 			return r;
1230 		heads += n;
1231 		count -= n;
1232 	}
1233 	r = __vhost_add_used_n(vq, heads, count);
1234 
1235 	/* Make sure buffer is written before we update index. */
1236 	smp_wmb();
1237 	if (put_user(vq->last_used_idx, &vq->used->idx)) {
1238 		vq_err(vq, "Failed to increment used idx");
1239 		return -EFAULT;
1240 	}
1241 	if (unlikely(vq->log_used)) {
1242 		/* Log used index update. */
1243 		log_write(vq->log_base,
1244 			  vq->log_addr + offsetof(struct vring_used, idx),
1245 			  sizeof vq->used->idx);
1246 		if (vq->log_ctx)
1247 			eventfd_signal(vq->log_ctx, 1);
1248 	}
1249 	return r;
1250 }
1251 
1252 /* This actually signals the guest, using eventfd. */
1253 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1254 {
1255 	__u16 flags;
1256 	/* Flush out used index updates. This is paired
1257 	 * with the barrier that the Guest executes when enabling
1258 	 * interrupts. */
1259 	smp_mb();
1260 
1261 	if (get_user(flags, &vq->avail->flags)) {
1262 		vq_err(vq, "Failed to get flags");
1263 		return;
1264 	}
1265 
1266 	/* If they don't want an interrupt, don't signal, unless empty. */
1267 	if ((flags & VRING_AVAIL_F_NO_INTERRUPT) &&
1268 	    (vq->avail_idx != vq->last_avail_idx ||
1269 	     !vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY)))
1270 		return;
1271 
1272 	/* Signal the Guest tell them we used something up. */
1273 	if (vq->call_ctx)
1274 		eventfd_signal(vq->call_ctx, 1);
1275 }
1276 
1277 /* And here's the combo meal deal.  Supersize me! */
1278 void vhost_add_used_and_signal(struct vhost_dev *dev,
1279 			       struct vhost_virtqueue *vq,
1280 			       unsigned int head, int len)
1281 {
1282 	vhost_add_used(vq, head, len);
1283 	vhost_signal(dev, vq);
1284 }
1285 
1286 /* multi-buffer version of vhost_add_used_and_signal */
1287 void vhost_add_used_and_signal_n(struct vhost_dev *dev,
1288 				 struct vhost_virtqueue *vq,
1289 				 struct vring_used_elem *heads, unsigned count)
1290 {
1291 	vhost_add_used_n(vq, heads, count);
1292 	vhost_signal(dev, vq);
1293 }
1294 
1295 /* OK, now we need to know about added descriptors. */
1296 bool vhost_enable_notify(struct vhost_virtqueue *vq)
1297 {
1298 	u16 avail_idx;
1299 	int r;
1300 	if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1301 		return false;
1302 	vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
1303 	r = put_user(vq->used_flags, &vq->used->flags);
1304 	if (r) {
1305 		vq_err(vq, "Failed to enable notification at %p: %d\n",
1306 		       &vq->used->flags, r);
1307 		return false;
1308 	}
1309 	/* They could have slipped one in as we were doing that: make
1310 	 * sure it's written, then check again. */
1311 	smp_mb();
1312 	r = get_user(avail_idx, &vq->avail->idx);
1313 	if (r) {
1314 		vq_err(vq, "Failed to check avail idx at %p: %d\n",
1315 		       &vq->avail->idx, r);
1316 		return false;
1317 	}
1318 
1319 	return avail_idx != vq->avail_idx;
1320 }
1321 
1322 /* We don't need to be notified again. */
1323 void vhost_disable_notify(struct vhost_virtqueue *vq)
1324 {
1325 	int r;
1326 	if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1327 		return;
1328 	vq->used_flags |= VRING_USED_F_NO_NOTIFY;
1329 	r = put_user(vq->used_flags, &vq->used->flags);
1330 	if (r)
1331 		vq_err(vq, "Failed to enable notification at %p: %d\n",
1332 		       &vq->used->flags, r);
1333 }
1334