xref: /freebsd/sys/dev/virtio/virtqueue.c (revision 9a14aa017b21c292740c00ee098195cd46642730)
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@daemoninthecloset.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /*
28  * Implements the virtqueue interface as basically described
29  * in the original VirtIO paper.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/sglist.h>
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
42 
43 #include <machine/cpu.h>
44 #include <machine/bus.h>
45 #include <machine/atomic.h>
46 #include <machine/resource.h>
47 #include <sys/bus.h>
48 #include <sys/rman.h>
49 
50 #include <dev/virtio/virtio.h>
51 #include <dev/virtio/virtqueue.h>
52 #include <dev/virtio/virtio_ring.h>
53 
54 #include "virtio_bus_if.h"
55 
56 struct virtqueue {
57 	device_t		 vq_dev;
58 	char			 vq_name[VIRTQUEUE_MAX_NAME_SZ];
59 	uint16_t		 vq_queue_index;
60 	uint16_t		 vq_nentries;
61 	uint32_t		 vq_flags;
62 #define	VIRTQUEUE_FLAG_INDIRECT	 0x0001
63 
64 	int			 vq_alignment;
65 	int			 vq_ring_size;
66 	void			*vq_ring_mem;
67 	int			 vq_max_indirect_size;
68 	int			 vq_indirect_mem_size;
69 	virtqueue_intr_t	*vq_intrhand;
70 	void			*vq_intrhand_arg;
71 
72 	struct vring		 vq_ring;
73 	uint16_t		 vq_free_cnt;
74 	uint16_t		 vq_queued_cnt;
75 	/*
76 	 * Head of the free chain in the descriptor table. If
77 	 * there are no free descriptors, this will be set to
78 	 * VQ_RING_DESC_CHAIN_END.
79 	 */
80 	uint16_t		 vq_desc_head_idx;
81 	/*
82 	 * Last consumed descriptor in the used table,
83 	 * trails vq_ring.used->idx.
84 	 */
85 	uint16_t		 vq_used_cons_idx;
86 
87 	struct vq_desc_extra {
88 		void		  *cookie;
89 		struct vring_desc *indirect;
90 		vm_paddr_t	   indirect_paddr;
91 		uint16_t	   ndescs;
92 	} vq_descx[0];
93 };
94 
95 /*
96  * The maximum virtqueue size is 2^15. Use that value as the end of
97  * descriptor chain terminator since it will never be a valid index
98  * in the descriptor table. This is used to verify we are correctly
99  * handling vq_free_cnt.
100  */
101 #define VQ_RING_DESC_CHAIN_END 32768
102 
103 #define VQASSERT(_vq, _exp, _msg, ...)				\
104     KASSERT((_exp),("%s: %s - "_msg, __func__, (_vq)->vq_name,	\
105 	##__VA_ARGS__))
106 
107 #define VQ_RING_ASSERT_VALID_IDX(_vq, _idx)			\
108     VQASSERT((_vq), (_idx) < (_vq)->vq_nentries,		\
109 	"invalid ring index: %d, max: %d", (_idx),		\
110 	(_vq)->vq_nentries)
111 
112 #define VQ_RING_ASSERT_CHAIN_TERM(_vq)				\
113     VQASSERT((_vq), (_vq)->vq_desc_head_idx ==			\
114 	VQ_RING_DESC_CHAIN_END,	"full ring terminated "		\
115 	"incorrectly: head idx: %d", (_vq)->vq_desc_head_idx)
116 
117 static int	virtqueue_init_indirect(struct virtqueue *vq, int);
118 static void	virtqueue_free_indirect(struct virtqueue *vq);
119 static void	virtqueue_init_indirect_list(struct virtqueue *,
120 		    struct vring_desc *);
121 
122 static void	vq_ring_init(struct virtqueue *);
123 static void	vq_ring_update_avail(struct virtqueue *, uint16_t);
124 static uint16_t	vq_ring_enqueue_segments(struct virtqueue *,
125 		    struct vring_desc *, uint16_t, struct sglist *, int, int);
126 static int	vq_ring_use_indirect(struct virtqueue *, int);
127 static void	vq_ring_enqueue_indirect(struct virtqueue *, void *,
128 		    struct sglist *, int, int);
129 static void	vq_ring_notify_host(struct virtqueue *, int);
130 static void	vq_ring_free_chain(struct virtqueue *, uint16_t);
131 
132 uint64_t
133 virtqueue_filter_features(uint64_t features)
134 {
135 	uint64_t mask;
136 
137 	mask = (1 << VIRTIO_TRANSPORT_F_START) - 1;
138 	mask |= VIRTIO_RING_F_INDIRECT_DESC;
139 
140 	return (features & mask);
141 }
142 
143 int
144 virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
145     vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
146 {
147 	struct virtqueue *vq;
148 	int error;
149 
150 	*vqp = NULL;
151 	error = 0;
152 
153 	if (size == 0) {
154 		device_printf(dev,
155 		    "virtqueue %d (%s) does not exist (size is zero)\n",
156 		    queue, info->vqai_name);
157 		return (ENODEV);
158 	} else if (!powerof2(size)) {
159 		device_printf(dev,
160 		    "virtqueue %d (%s) size is not a power of 2: %d\n",
161 		    queue, info->vqai_name, size);
162 		return (ENXIO);
163 	} else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
164 		device_printf(dev, "virtqueue %d (%s) requested too many "
165 		    "indirect descriptors: %d, max %d\n",
166 		    queue, info->vqai_name, info->vqai_maxindirsz,
167 		    VIRTIO_MAX_INDIRECT);
168 		return (EINVAL);
169 	}
170 
171 	vq = malloc(sizeof(struct virtqueue) +
172 	    size * sizeof(struct vq_desc_extra), M_DEVBUF, M_NOWAIT | M_ZERO);
173 	if (vq == NULL) {
174 		device_printf(dev, "cannot allocate virtqueue\n");
175 		return (ENOMEM);
176 	}
177 
178 	vq->vq_dev = dev;
179 	strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
180 	vq->vq_queue_index = queue;
181 	vq->vq_alignment = align;
182 	vq->vq_nentries = size;
183 	vq->vq_free_cnt = size;
184 	vq->vq_intrhand = info->vqai_intr;
185 	vq->vq_intrhand_arg = info->vqai_intr_arg;
186 
187 	if (info->vqai_maxindirsz > 1) {
188 		error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
189 		if (error)
190 			goto fail;
191 	}
192 
193 	vq->vq_ring_size = round_page(vring_size(size, align));
194 	vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
195 	    M_NOWAIT | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
196 	if (vq->vq_ring_mem == NULL) {
197 		device_printf(dev,
198 		    "cannot allocate memory for virtqueue ring\n");
199 		error = ENOMEM;
200 		goto fail;
201 	}
202 
203 	vq_ring_init(vq);
204 	virtqueue_disable_intr(vq);
205 
206 	*vqp = vq;
207 
208 fail:
209 	if (error)
210 		virtqueue_free(vq);
211 
212 	return (error);
213 }
214 
215 static int
216 virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
217 {
218 	device_t dev;
219 	struct vq_desc_extra *dxp;
220 	int i, size;
221 
222 	dev = vq->vq_dev;
223 
224 	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
225 		/*
226 		 * Indirect descriptors requested by the driver but not
227 		 * negotiated. Return zero to keep the initialization
228 		 * going: we'll run fine without.
229 		 */
230 		if (bootverbose)
231 			device_printf(dev, "virtqueue %d (%s) requested "
232 			    "indirect descriptors but not negotiated\n",
233 			    vq->vq_queue_index, vq->vq_name);
234 		return (0);
235 	}
236 
237 	size = indirect_size * sizeof(struct vring_desc);
238 	vq->vq_max_indirect_size = indirect_size;
239 	vq->vq_indirect_mem_size = size;
240 	vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;
241 
242 	for (i = 0; i < vq->vq_nentries; i++) {
243 		dxp = &vq->vq_descx[i];
244 
245 		dxp->indirect = malloc(size, M_DEVBUF, M_NOWAIT);
246 		if (dxp->indirect == NULL) {
247 			device_printf(dev, "cannot allocate indirect list\n");
248 			return (ENOMEM);
249 		}
250 
251 		dxp->indirect_paddr = vtophys(dxp->indirect);
252 		virtqueue_init_indirect_list(vq, dxp->indirect);
253 	}
254 
255 	return (0);
256 }
257 
258 static void
259 virtqueue_free_indirect(struct virtqueue *vq)
260 {
261 	struct vq_desc_extra *dxp;
262 	int i;
263 
264 	for (i = 0; i < vq->vq_nentries; i++) {
265 		dxp = &vq->vq_descx[i];
266 
267 		if (dxp->indirect == NULL)
268 			break;
269 
270 		free(dxp->indirect, M_DEVBUF);
271 		dxp->indirect = NULL;
272 		dxp->indirect_paddr = 0;
273 	}
274 
275 	vq->vq_flags &= ~VIRTQUEUE_FLAG_INDIRECT;
276 	vq->vq_indirect_mem_size = 0;
277 }
278 
279 static void
280 virtqueue_init_indirect_list(struct virtqueue *vq,
281     struct vring_desc *indirect)
282 {
283 	int i;
284 
285 	bzero(indirect, vq->vq_indirect_mem_size);
286 
287 	for (i = 0; i < vq->vq_max_indirect_size - 1; i++)
288 		indirect[i].next = i + 1;
289 	indirect[i].next = VQ_RING_DESC_CHAIN_END;
290 }
291 
292 int
293 virtqueue_reinit(struct virtqueue *vq, uint16_t size)
294 {
295 	struct vq_desc_extra *dxp;
296 	int i;
297 
298 	if (vq->vq_nentries != size) {
299 		device_printf(vq->vq_dev,
300 		    "%s: '%s' changed size; old=%hu, new=%hu\n",
301 		    __func__, vq->vq_name, vq->vq_nentries, size);
302 		return (EINVAL);
303 	}
304 
305 	/* Warn if the virtqueue was not properly cleaned up. */
306 	if (vq->vq_free_cnt != vq->vq_nentries) {
307 		device_printf(vq->vq_dev,
308 		    "%s: warning, '%s' virtqueue not empty, "
309 		    "leaking %d entries\n", __func__, vq->vq_name,
310 		    vq->vq_nentries - vq->vq_free_cnt);
311 	}
312 
313 	vq->vq_desc_head_idx = 0;
314 	vq->vq_used_cons_idx = 0;
315 	vq->vq_queued_cnt = 0;
316 	vq->vq_free_cnt = vq->vq_nentries;
317 
318 	/* To be safe, reset all our allocated memory. */
319 	bzero(vq->vq_ring_mem, vq->vq_ring_size);
320 	for (i = 0; i < vq->vq_nentries; i++) {
321 		dxp = &vq->vq_descx[i];
322 		dxp->cookie = NULL;
323 		dxp->ndescs = 0;
324 		if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
325 			virtqueue_init_indirect_list(vq, dxp->indirect);
326 	}
327 
328 	vq_ring_init(vq);
329 	virtqueue_disable_intr(vq);
330 
331 	return (0);
332 }
333 
334 void
335 virtqueue_free(struct virtqueue *vq)
336 {
337 
338 	if (vq->vq_free_cnt != vq->vq_nentries) {
339 		device_printf(vq->vq_dev, "%s: freeing non-empty virtqueue, "
340 		    "leaking %d entries\n", vq->vq_name,
341 		    vq->vq_nentries - vq->vq_free_cnt);
342 	}
343 
344 	if (vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT)
345 		virtqueue_free_indirect(vq);
346 
347 	if (vq->vq_ring_mem != NULL) {
348 		contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
349 		vq->vq_ring_size = 0;
350 		vq->vq_ring_mem = NULL;
351 	}
352 
353 	free(vq, M_DEVBUF);
354 }
355 
356 vm_paddr_t
357 virtqueue_paddr(struct virtqueue *vq)
358 {
359 
360 	return (vtophys(vq->vq_ring_mem));
361 }
362 
363 int
364 virtqueue_size(struct virtqueue *vq)
365 {
366 
367 	return (vq->vq_nentries);
368 }
369 
370 int
371 virtqueue_empty(struct virtqueue *vq)
372 {
373 
374 	return (vq->vq_nentries == vq->vq_free_cnt);
375 }
376 
377 int
378 virtqueue_full(struct virtqueue *vq)
379 {
380 
381 	return (vq->vq_free_cnt == 0);
382 }
383 
384 void
385 virtqueue_notify(struct virtqueue *vq)
386 {
387 
388 	vq->vq_queued_cnt = 0;
389 	vq_ring_notify_host(vq, 0);
390 }
391 
392 int
393 virtqueue_nused(struct virtqueue *vq)
394 {
395 	uint16_t used_idx, nused;
396 
397 	used_idx = vq->vq_ring.used->idx;
398 	if (used_idx >= vq->vq_used_cons_idx)
399 		nused = used_idx - vq->vq_used_cons_idx;
400 	else
401 		nused = UINT16_MAX - vq->vq_used_cons_idx +
402 		    used_idx + 1;
403 	VQASSERT(vq, nused <= vq->vq_nentries, "used more than available");
404 
405 	return (nused);
406 }
407 
408 int
409 virtqueue_intr(struct virtqueue *vq)
410 {
411 
412 	if (vq->vq_intrhand == NULL ||
413 	    vq->vq_used_cons_idx == vq->vq_ring.used->idx)
414 		return (0);
415 
416 	vq->vq_intrhand(vq->vq_intrhand_arg);
417 
418 	return (1);
419 }
420 
421 int
422 virtqueue_enable_intr(struct virtqueue *vq)
423 {
424 
425 	/*
426 	 * Enable interrupts, making sure we get the latest
427 	 * index of what's already been consumed.
428 	 */
429 	vq->vq_ring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
430 
431 	mb();
432 
433 	/*
434 	 * Additional items may have been consumed in the time between
435 	 * since we last checked and enabled interrupts above. Let our
436 	 * caller know so it processes the new entries.
437 	 */
438 	if (vq->vq_used_cons_idx != vq->vq_ring.used->idx)
439 		return (1);
440 
441 	return (0);
442 }
443 
444 void
445 virtqueue_disable_intr(struct virtqueue *vq)
446 {
447 
448 	/*
449 	 * Note this is only considered a hint to the host.
450 	 */
451 	vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
452 }
453 
454 int
455 virtqueue_enqueue(struct virtqueue *vq, void *cookie, struct sglist *sg,
456     int readable, int writable)
457 {
458 	struct vq_desc_extra *dxp;
459 	int needed;
460 	uint16_t head_idx, idx;
461 
462 	needed = readable + writable;
463 
464 	VQASSERT(vq, cookie != NULL, "enqueuing with no cookie");
465 	VQASSERT(vq, needed == sg->sg_nseg,
466 	    "segment count mismatch, %d, %d", needed, sg->sg_nseg);
467 	VQASSERT(vq,
468 	    needed <= vq->vq_nentries || needed <= vq->vq_max_indirect_size,
469 	    "too many segments to enqueue: %d, %d/%d", needed,
470 	    vq->vq_nentries, vq->vq_max_indirect_size);
471 
472 	if (needed < 1)
473 		return (EINVAL);
474 	if (vq->vq_free_cnt == 0)
475 		return (ENOSPC);
476 
477 	if (vq_ring_use_indirect(vq, needed)) {
478 		vq_ring_enqueue_indirect(vq, cookie, sg, readable, writable);
479 		return (0);
480 	} else if (vq->vq_free_cnt < needed)
481 		return (EMSGSIZE);
482 
483 	head_idx = vq->vq_desc_head_idx;
484 	VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
485 	dxp = &vq->vq_descx[head_idx];
486 
487 	VQASSERT(vq, dxp->cookie == NULL,
488 	    "cookie already exists for index %d", head_idx);
489 	dxp->cookie = cookie;
490 	dxp->ndescs = needed;
491 
492 	idx = vq_ring_enqueue_segments(vq, vq->vq_ring.desc, head_idx,
493 	    sg, readable, writable);
494 
495 	vq->vq_desc_head_idx = idx;
496 	vq->vq_free_cnt -= needed;
497 	if (vq->vq_free_cnt == 0)
498 		VQ_RING_ASSERT_CHAIN_TERM(vq);
499 	else
500 		VQ_RING_ASSERT_VALID_IDX(vq, idx);
501 
502 	vq_ring_update_avail(vq, head_idx);
503 
504 	return (0);
505 }
506 
507 void *
508 virtqueue_dequeue(struct virtqueue *vq, uint32_t *len)
509 {
510 	struct vring_used_elem *uep;
511 	void *cookie;
512 	uint16_t used_idx, desc_idx;
513 
514 	if (vq->vq_used_cons_idx == vq->vq_ring.used->idx)
515 		return (NULL);
516 
517 	used_idx = vq->vq_used_cons_idx++ & (vq->vq_nentries - 1);
518 	uep = &vq->vq_ring.used->ring[used_idx];
519 
520 	mb();
521 	desc_idx = (uint16_t) uep->id;
522 	if (len != NULL)
523 		*len = uep->len;
524 
525 	vq_ring_free_chain(vq, desc_idx);
526 
527 	cookie = vq->vq_descx[desc_idx].cookie;
528 	VQASSERT(vq, cookie != NULL, "no cookie for index %d", desc_idx);
529 	vq->vq_descx[desc_idx].cookie = NULL;
530 
531 	return (cookie);
532 }
533 
534 void *
535 virtqueue_poll(struct virtqueue *vq, uint32_t *len)
536 {
537 	void *cookie;
538 
539 	while ((cookie = virtqueue_dequeue(vq, len)) == NULL)
540 		cpu_spinwait();
541 
542 	return (cookie);
543 }
544 
545 void *
546 virtqueue_drain(struct virtqueue *vq, int *last)
547 {
548 	void *cookie;
549 	int idx;
550 
551 	cookie = NULL;
552 	idx = *last;
553 
554 	while (idx < vq->vq_nentries && cookie == NULL) {
555 		if ((cookie = vq->vq_descx[idx].cookie) != NULL) {
556 			vq->vq_descx[idx].cookie = NULL;
557 			/* Free chain to keep free count consistent. */
558 			vq_ring_free_chain(vq, idx);
559 		}
560 		idx++;
561 	}
562 
563 	*last = idx;
564 
565 	return (cookie);
566 }
567 
568 void
569 virtqueue_dump(struct virtqueue *vq)
570 {
571 
572 	if (vq == NULL)
573 		return;
574 
575 	printf("VQ: %s - size=%d; free=%d; used=%d; queued=%d; "
576 	    "desc_head_idx=%d; avail.idx=%d; used_cons_idx=%d; "
577 	    "used.idx=%d; avail.flags=0x%x; used.flags=0x%x\n",
578 	    vq->vq_name, vq->vq_nentries, vq->vq_free_cnt,
579 	    virtqueue_nused(vq), vq->vq_queued_cnt, vq->vq_desc_head_idx,
580 	    vq->vq_ring.avail->idx, vq->vq_used_cons_idx,
581 	    vq->vq_ring.used->idx, vq->vq_ring.avail->flags,
582 	    vq->vq_ring.used->flags);
583 }
584 
585 static void
586 vq_ring_init(struct virtqueue *vq)
587 {
588 	struct vring *vr;
589 	char *ring_mem;
590 	int i, size;
591 
592 	ring_mem = vq->vq_ring_mem;
593 	size = vq->vq_nentries;
594 	vr = &vq->vq_ring;
595 
596 	vring_init(vr, size, ring_mem, vq->vq_alignment);
597 
598 	for (i = 0; i < size - 1; i++)
599 		vr->desc[i].next = i + 1;
600 	vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
601 }
602 
603 static void
604 vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
605 {
606 	uint16_t avail_idx;
607 
608 	/*
609 	 * Place the head of the descriptor chain into the next slot and make
610 	 * it usable to the host. The chain is made available now rather than
611 	 * deferring to virtqueue_notify() in the hopes that if the host is
612 	 * currently running on another CPU, we can keep it processing the new
613 	 * descriptor.
614 	 */
615 	avail_idx = vq->vq_ring.avail->idx & (vq->vq_nentries - 1);
616 	vq->vq_ring.avail->ring[avail_idx] = desc_idx;
617 
618 	mb();
619 	vq->vq_ring.avail->idx++;
620 
621 	/* Keep pending count until virtqueue_notify() for debugging. */
622 	vq->vq_queued_cnt++;
623 }
624 
625 static uint16_t
626 vq_ring_enqueue_segments(struct virtqueue *vq, struct vring_desc *desc,
627     uint16_t head_idx, struct sglist *sg, int readable, int writable)
628 {
629 	struct sglist_seg *seg;
630 	struct vring_desc *dp;
631 	int i, needed;
632 	uint16_t idx;
633 
634 	needed = readable + writable;
635 
636 	for (i = 0, idx = head_idx, seg = sg->sg_segs;
637 	     i < needed;
638 	     i++, idx = dp->next, seg++) {
639 		VQASSERT(vq, idx != VQ_RING_DESC_CHAIN_END,
640 		    "premature end of free desc chain");
641 
642 		dp = &desc[idx];
643 		dp->addr = seg->ss_paddr;
644 		dp->len = seg->ss_len;
645 		dp->flags = 0;
646 
647 		if (i < needed - 1)
648 			dp->flags |= VRING_DESC_F_NEXT;
649 		if (i >= readable)
650 			dp->flags |= VRING_DESC_F_WRITE;
651 	}
652 
653 	return (idx);
654 }
655 
656 static int
657 vq_ring_use_indirect(struct virtqueue *vq, int needed)
658 {
659 
660 	if ((vq->vq_flags & VIRTQUEUE_FLAG_INDIRECT) == 0)
661 		return (0);
662 
663 	if (vq->vq_max_indirect_size < needed)
664 		return (0);
665 
666 	if (needed < 2)
667 		return (0);
668 
669 	return (1);
670 }
671 
672 static void
673 vq_ring_enqueue_indirect(struct virtqueue *vq, void *cookie,
674     struct sglist *sg, int readable, int writable)
675 {
676 	struct vring_desc *dp;
677 	struct vq_desc_extra *dxp;
678 	int needed;
679 	uint16_t head_idx;
680 
681 	needed = readable + writable;
682 	VQASSERT(vq, needed <= vq->vq_max_indirect_size,
683 	    "enqueuing too many indirect descriptors");
684 
685 	head_idx = vq->vq_desc_head_idx;
686 	VQ_RING_ASSERT_VALID_IDX(vq, head_idx);
687 	dp = &vq->vq_ring.desc[head_idx];
688 	dxp = &vq->vq_descx[head_idx];
689 
690 	VQASSERT(vq, dxp->cookie == NULL,
691 	    "cookie already exists for index %d", head_idx);
692 	dxp->cookie = cookie;
693 	dxp->ndescs = 1;
694 
695 	dp->addr = dxp->indirect_paddr;
696 	dp->len = needed * sizeof(struct vring_desc);
697 	dp->flags = VRING_DESC_F_INDIRECT;
698 
699 	vq_ring_enqueue_segments(vq, dxp->indirect, 0,
700 	    sg, readable, writable);
701 
702 	vq->vq_desc_head_idx = dp->next;
703 	vq->vq_free_cnt--;
704 	if (vq->vq_free_cnt == 0)
705 		VQ_RING_ASSERT_CHAIN_TERM(vq);
706 	else
707 		VQ_RING_ASSERT_VALID_IDX(vq, vq->vq_desc_head_idx);
708 
709 	vq_ring_update_avail(vq, head_idx);
710 }
711 
712 static void
713 vq_ring_notify_host(struct virtqueue *vq, int force)
714 {
715 
716 	mb();
717 
718 	if (force ||
719 	    (vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY) == 0)
720 		VIRTIO_BUS_NOTIFY_VQ(vq->vq_dev, vq->vq_queue_index);
721 }
722 
723 static void
724 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
725 {
726 	struct vring_desc *dp;
727 	struct vq_desc_extra *dxp;
728 
729 	VQ_RING_ASSERT_VALID_IDX(vq, desc_idx);
730 	dp = &vq->vq_ring.desc[desc_idx];
731 	dxp = &vq->vq_descx[desc_idx];
732 
733 	if (vq->vq_free_cnt == 0)
734 		VQ_RING_ASSERT_CHAIN_TERM(vq);
735 
736 	vq->vq_free_cnt += dxp->ndescs;
737 	dxp->ndescs--;
738 
739 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
740 		while (dp->flags & VRING_DESC_F_NEXT) {
741 			VQ_RING_ASSERT_VALID_IDX(vq, dp->next);
742 			dp = &vq->vq_ring.desc[dp->next];
743 			dxp->ndescs--;
744 		}
745 	}
746 	VQASSERT(vq, dxp->ndescs == 0, "failed to free entire desc chain");
747 
748 	/*
749 	 * We must append the existing free chain, if any, to the end of
750 	 * newly freed chain. If the virtqueue was completely used, then
751 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
752 	 */
753 	dp->next = vq->vq_desc_head_idx;
754 	vq->vq_desc_head_idx = desc_idx;
755 }
756