xref: /freebsd/sys/dev/virtio/block/virtio_blk.c (revision 3b8f08459569bf0faa21473e5cec2491e95c9349)
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /* Driver for VirtIO block devices. */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/bio.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/sglist.h>
39 #include <sys/sysctl.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/queue.h>
43 
44 #include <geom/geom_disk.h>
45 
46 #include <machine/bus.h>
47 #include <machine/resource.h>
48 #include <sys/bus.h>
49 #include <sys/rman.h>
50 
51 #include <dev/virtio/virtio.h>
52 #include <dev/virtio/virtqueue.h>
53 #include <dev/virtio/block/virtio_blk.h>
54 
55 #include "virtio_if.h"
56 
57 struct vtblk_request {
58 	struct virtio_blk_outhdr	 vbr_hdr;
59 	struct bio			*vbr_bp;
60 	uint8_t				 vbr_ack;
61 
62 	TAILQ_ENTRY(vtblk_request)	 vbr_link;
63 };
64 
65 enum vtblk_cache_mode {
66 	VTBLK_CACHE_WRITETHROUGH,
67 	VTBLK_CACHE_WRITEBACK,
68 	VTBLK_CACHE_MAX
69 };
70 
71 struct vtblk_softc {
72 	device_t		 vtblk_dev;
73 	struct mtx		 vtblk_mtx;
74 	uint64_t		 vtblk_features;
75 	uint32_t		 vtblk_flags;
76 #define VTBLK_FLAG_INDIRECT	0x0001
77 #define VTBLK_FLAG_READONLY	0x0002
78 #define VTBLK_FLAG_DETACH	0x0004
79 #define VTBLK_FLAG_SUSPEND	0x0008
80 #define VTBLK_FLAG_DUMPING	0x0010
81 #define VTBLK_FLAG_BARRIER	0x0020
82 #define VTBLK_FLAG_WC_CONFIG	0x0040
83 
84 	struct virtqueue	*vtblk_vq;
85 	struct sglist		*vtblk_sglist;
86 	struct disk		*vtblk_disk;
87 
88 	struct bio_queue_head	 vtblk_bioq;
89 	TAILQ_HEAD(, vtblk_request)
90 				 vtblk_req_free;
91 	TAILQ_HEAD(, vtblk_request)
92 				 vtblk_req_ready;
93 	struct vtblk_request	*vtblk_req_ordered;
94 
95 	int			 vtblk_max_nsegs;
96 	int			 vtblk_request_count;
97 	enum vtblk_cache_mode	 vtblk_write_cache;
98 
99 	struct vtblk_request	 vtblk_dump_request;
100 };
101 
102 static struct virtio_feature_desc vtblk_feature_desc[] = {
103 	{ VIRTIO_BLK_F_BARRIER,		"HostBarrier"	},
104 	{ VIRTIO_BLK_F_SIZE_MAX,	"MaxSegSize"	},
105 	{ VIRTIO_BLK_F_SEG_MAX,		"MaxNumSegs"	},
106 	{ VIRTIO_BLK_F_GEOMETRY,	"DiskGeometry"	},
107 	{ VIRTIO_BLK_F_RO,		"ReadOnly"	},
108 	{ VIRTIO_BLK_F_BLK_SIZE,	"BlockSize"	},
109 	{ VIRTIO_BLK_F_SCSI,		"SCSICmds"	},
110 	{ VIRTIO_BLK_F_WCE,		"WriteCache"	},
111 	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology"	},
112 	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE"	},
113 
114 	{ 0, NULL }
115 };
116 
117 static int	vtblk_modevent(module_t, int, void *);
118 
119 static int	vtblk_probe(device_t);
120 static int	vtblk_attach(device_t);
121 static int	vtblk_detach(device_t);
122 static int	vtblk_suspend(device_t);
123 static int	vtblk_resume(device_t);
124 static int	vtblk_shutdown(device_t);
125 static int	vtblk_config_change(device_t);
126 
127 static int	vtblk_open(struct disk *);
128 static int	vtblk_close(struct disk *);
129 static int	vtblk_ioctl(struct disk *, u_long, void *, int,
130 		    struct thread *);
131 static int	vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
132 static void	vtblk_strategy(struct bio *);
133 
134 static void	vtblk_negotiate_features(struct vtblk_softc *);
135 static int	vtblk_maximum_segments(struct vtblk_softc *,
136 		    struct virtio_blk_config *);
137 static int	vtblk_alloc_virtqueue(struct vtblk_softc *);
138 static void	vtblk_resize_disk(struct vtblk_softc *, uint64_t);
139 static void	vtblk_set_write_cache(struct vtblk_softc *, int);
140 static int	vtblk_write_cache_enabled(struct vtblk_softc *sc,
141 		    struct virtio_blk_config *);
142 static int	vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
143 static void	vtblk_alloc_disk(struct vtblk_softc *,
144 		    struct virtio_blk_config *);
145 static void	vtblk_create_disk(struct vtblk_softc *);
146 
147 static int	vtblk_quiesce(struct vtblk_softc *);
148 static void	vtblk_startio(struct vtblk_softc *);
149 static struct vtblk_request * vtblk_bio_request(struct vtblk_softc *);
150 static int	vtblk_execute_request(struct vtblk_softc *,
151 		    struct vtblk_request *);
152 
153 static void	vtblk_vq_intr(void *);
154 
155 static void	vtblk_stop(struct vtblk_softc *);
156 
157 static void	vtblk_read_config(struct vtblk_softc *,
158 		    struct virtio_blk_config *);
159 static void	vtblk_get_ident(struct vtblk_softc *);
160 static void	vtblk_prepare_dump(struct vtblk_softc *);
161 static int	vtblk_write_dump(struct vtblk_softc *, void *, off_t, size_t);
162 static int	vtblk_flush_dump(struct vtblk_softc *);
163 static int	vtblk_poll_request(struct vtblk_softc *,
164 		    struct vtblk_request *);
165 
166 static void	vtblk_finish_completed(struct vtblk_softc *);
167 static void	vtblk_drain_vq(struct vtblk_softc *, int);
168 static void	vtblk_drain(struct vtblk_softc *);
169 
170 static int	vtblk_alloc_requests(struct vtblk_softc *);
171 static void	vtblk_free_requests(struct vtblk_softc *);
172 static struct vtblk_request * vtblk_dequeue_request(struct vtblk_softc *);
173 static void	vtblk_enqueue_request(struct vtblk_softc *,
174 		    struct vtblk_request *);
175 
176 static struct vtblk_request * vtblk_dequeue_ready(struct vtblk_softc *);
177 static void	vtblk_enqueue_ready(struct vtblk_softc *,
178 		    struct vtblk_request *);
179 
180 static int	vtblk_request_error(struct vtblk_request *);
181 static void	vtblk_finish_bio(struct bio *, int);
182 
183 static void	vtblk_setup_sysctl(struct vtblk_softc *);
184 static int	vtblk_tunable_int(struct vtblk_softc *, const char *, int);
185 
186 /* Tunables. */
187 static int vtblk_no_ident = 0;
188 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
189 static int vtblk_writecache_mode = -1;
190 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
191 
192 /* Features desired/implemented by this driver. */
193 #define VTBLK_FEATURES \
194     (VIRTIO_BLK_F_BARRIER		| \
195      VIRTIO_BLK_F_SIZE_MAX		| \
196      VIRTIO_BLK_F_SEG_MAX		| \
197      VIRTIO_BLK_F_GEOMETRY		| \
198      VIRTIO_BLK_F_RO			| \
199      VIRTIO_BLK_F_BLK_SIZE		| \
200      VIRTIO_BLK_F_WCE			| \
201      VIRTIO_BLK_F_CONFIG_WCE		| \
202      VIRTIO_RING_F_INDIRECT_DESC)
203 
204 #define VTBLK_MTX(_sc)		&(_sc)->vtblk_mtx
205 #define VTBLK_LOCK_INIT(_sc, _name) \
206 				mtx_init(VTBLK_MTX((_sc)), (_name), \
207 				    "VirtIO Block Lock", MTX_DEF)
208 #define VTBLK_LOCK(_sc)		mtx_lock(VTBLK_MTX((_sc)))
209 #define VTBLK_UNLOCK(_sc)	mtx_unlock(VTBLK_MTX((_sc)))
210 #define VTBLK_LOCK_DESTROY(_sc)	mtx_destroy(VTBLK_MTX((_sc)))
211 #define VTBLK_LOCK_ASSERT(_sc)	mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
212 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
213 				mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
214 
215 #define VTBLK_DISK_NAME		"vtbd"
216 #define VTBLK_QUIESCE_TIMEOUT	(30 * hz)
217 
218 /*
219  * Each block request uses at least two segments - one for the header
220  * and one for the status.
221  */
222 #define VTBLK_MIN_SEGMENTS	2
223 
224 static device_method_t vtblk_methods[] = {
225 	/* Device methods. */
226 	DEVMETHOD(device_probe,		vtblk_probe),
227 	DEVMETHOD(device_attach,	vtblk_attach),
228 	DEVMETHOD(device_detach,	vtblk_detach),
229 	DEVMETHOD(device_suspend,	vtblk_suspend),
230 	DEVMETHOD(device_resume,	vtblk_resume),
231 	DEVMETHOD(device_shutdown,	vtblk_shutdown),
232 
233 	/* VirtIO methods. */
234 	DEVMETHOD(virtio_config_change,	vtblk_config_change),
235 
236 	DEVMETHOD_END
237 };
238 
239 static driver_t vtblk_driver = {
240 	"vtblk",
241 	vtblk_methods,
242 	sizeof(struct vtblk_softc)
243 };
244 static devclass_t vtblk_devclass;
245 
246 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
247     vtblk_modevent, 0);
248 MODULE_VERSION(virtio_blk, 1);
249 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
250 
251 static int
252 vtblk_modevent(module_t mod, int type, void *unused)
253 {
254 	int error;
255 
256 	error = 0;
257 
258 	switch (type) {
259 	case MOD_LOAD:
260 	case MOD_QUIESCE:
261 	case MOD_UNLOAD:
262 	case MOD_SHUTDOWN:
263 		break;
264 	default:
265 		error = EOPNOTSUPP;
266 		break;
267 	}
268 
269 	return (error);
270 }
271 
272 static int
273 vtblk_probe(device_t dev)
274 {
275 
276 	if (virtio_get_device_type(dev) != VIRTIO_ID_BLOCK)
277 		return (ENXIO);
278 
279 	device_set_desc(dev, "VirtIO Block Adapter");
280 
281 	return (BUS_PROBE_DEFAULT);
282 }
283 
284 static int
285 vtblk_attach(device_t dev)
286 {
287 	struct vtblk_softc *sc;
288 	struct virtio_blk_config blkcfg;
289 	int error;
290 
291 	sc = device_get_softc(dev);
292 	sc->vtblk_dev = dev;
293 
294 	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
295 
296 	bioq_init(&sc->vtblk_bioq);
297 	TAILQ_INIT(&sc->vtblk_req_free);
298 	TAILQ_INIT(&sc->vtblk_req_ready);
299 
300 	virtio_set_feature_desc(dev, vtblk_feature_desc);
301 	vtblk_negotiate_features(sc);
302 
303 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
304 		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
305 	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
306 		sc->vtblk_flags |= VTBLK_FLAG_READONLY;
307 	if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
308 		sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
309 	if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
310 		sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
311 
312 	vtblk_setup_sysctl(sc);
313 
314 	/* Get local copy of config. */
315 	vtblk_read_config(sc, &blkcfg);
316 
317 	/*
318 	 * With the current sglist(9) implementation, it is not easy
319 	 * for us to support a maximum segment size as adjacent
320 	 * segments are coalesced. For now, just make sure it's larger
321 	 * than the maximum supported transfer size.
322 	 */
323 	if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
324 		if (blkcfg.size_max < MAXPHYS) {
325 			error = ENOTSUP;
326 			device_printf(dev, "host requires unsupported "
327 			    "maximum segment size feature\n");
328 			goto fail;
329 		}
330 	}
331 
332 	sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
333 	if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
334 		error = EINVAL;
335 		device_printf(dev, "fewer than minimum number of segments "
336 		    "allowed: %d\n", sc->vtblk_max_nsegs);
337 		goto fail;
338 	}
339 
340 	sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
341 	if (sc->vtblk_sglist == NULL) {
342 		error = ENOMEM;
343 		device_printf(dev, "cannot allocate sglist\n");
344 		goto fail;
345 	}
346 
347 	error = vtblk_alloc_virtqueue(sc);
348 	if (error) {
349 		device_printf(dev, "cannot allocate virtqueue\n");
350 		goto fail;
351 	}
352 
353 	error = vtblk_alloc_requests(sc);
354 	if (error) {
355 		device_printf(dev, "cannot preallocate requests\n");
356 		goto fail;
357 	}
358 
359 	vtblk_alloc_disk(sc, &blkcfg);
360 
361 	error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
362 	if (error) {
363 		device_printf(dev, "cannot setup virtqueue interrupt\n");
364 		goto fail;
365 	}
366 
367 	vtblk_create_disk(sc);
368 
369 	virtqueue_enable_intr(sc->vtblk_vq);
370 
371 fail:
372 	if (error)
373 		vtblk_detach(dev);
374 
375 	return (error);
376 }
377 
378 static int
379 vtblk_detach(device_t dev)
380 {
381 	struct vtblk_softc *sc;
382 
383 	sc = device_get_softc(dev);
384 
385 	VTBLK_LOCK(sc);
386 	sc->vtblk_flags |= VTBLK_FLAG_DETACH;
387 	if (device_is_attached(dev))
388 		vtblk_stop(sc);
389 	VTBLK_UNLOCK(sc);
390 
391 	vtblk_drain(sc);
392 
393 	if (sc->vtblk_disk != NULL) {
394 		disk_destroy(sc->vtblk_disk);
395 		sc->vtblk_disk = NULL;
396 	}
397 
398 	if (sc->vtblk_sglist != NULL) {
399 		sglist_free(sc->vtblk_sglist);
400 		sc->vtblk_sglist = NULL;
401 	}
402 
403 	VTBLK_LOCK_DESTROY(sc);
404 
405 	return (0);
406 }
407 
408 static int
409 vtblk_suspend(device_t dev)
410 {
411 	struct vtblk_softc *sc;
412 	int error;
413 
414 	sc = device_get_softc(dev);
415 
416 	VTBLK_LOCK(sc);
417 	sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
418 	/* XXX BMV: virtio_stop(), etc needed here? */
419 	error = vtblk_quiesce(sc);
420 	if (error)
421 		sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
422 	VTBLK_UNLOCK(sc);
423 
424 	return (error);
425 }
426 
427 static int
428 vtblk_resume(device_t dev)
429 {
430 	struct vtblk_softc *sc;
431 
432 	sc = device_get_softc(dev);
433 
434 	VTBLK_LOCK(sc);
435 	/* XXX BMV: virtio_reinit(), etc needed here? */
436 	sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
437 	vtblk_startio(sc);
438 	VTBLK_UNLOCK(sc);
439 
440 	return (0);
441 }
442 
443 static int
444 vtblk_shutdown(device_t dev)
445 {
446 
447 	return (0);
448 }
449 
450 static int
451 vtblk_config_change(device_t dev)
452 {
453 	struct vtblk_softc *sc;
454 	struct virtio_blk_config blkcfg;
455 	uint64_t capacity;
456 
457 	sc = device_get_softc(dev);
458 
459 	vtblk_read_config(sc, &blkcfg);
460 
461 	/* Capacity is always in 512-byte units. */
462 	capacity = blkcfg.capacity * 512;
463 
464 	if (sc->vtblk_disk->d_mediasize != capacity)
465 		vtblk_resize_disk(sc, capacity);
466 
467 	return (0);
468 }
469 
470 static int
471 vtblk_open(struct disk *dp)
472 {
473 	struct vtblk_softc *sc;
474 
475 	if ((sc = dp->d_drv1) == NULL)
476 		return (ENXIO);
477 
478 	return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
479 }
480 
481 static int
482 vtblk_close(struct disk *dp)
483 {
484 	struct vtblk_softc *sc;
485 
486 	if ((sc = dp->d_drv1) == NULL)
487 		return (ENXIO);
488 
489 	return (0);
490 }
491 
492 static int
493 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
494     struct thread *td)
495 {
496 	struct vtblk_softc *sc;
497 
498 	if ((sc = dp->d_drv1) == NULL)
499 		return (ENXIO);
500 
501 	return (ENOTTY);
502 }
503 
504 static int
505 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
506     size_t length)
507 {
508 	struct disk *dp;
509 	struct vtblk_softc *sc;
510 	int error;
511 
512 	dp = arg;
513 
514 	if ((sc = dp->d_drv1) == NULL)
515 		return (ENXIO);
516 
517 	VTBLK_LOCK(sc);
518 
519 	if ((sc->vtblk_flags & VTBLK_FLAG_DUMPING) == 0) {
520 		vtblk_prepare_dump(sc);
521 		sc->vtblk_flags |= VTBLK_FLAG_DUMPING;
522 	}
523 
524 	if (length > 0)
525 		error = vtblk_write_dump(sc, virtual, offset, length);
526 	else if (virtual == NULL && offset == 0)
527 		error = vtblk_flush_dump(sc);
528 	else {
529 		error = EINVAL;
530 		sc->vtblk_flags &= ~VTBLK_FLAG_DUMPING;
531 	}
532 
533 	VTBLK_UNLOCK(sc);
534 
535 	return (error);
536 }
537 
538 static void
539 vtblk_strategy(struct bio *bp)
540 {
541 	struct vtblk_softc *sc;
542 
543 	if ((sc = bp->bio_disk->d_drv1) == NULL) {
544 		vtblk_finish_bio(bp, EINVAL);
545 		return;
546 	}
547 
548 	/*
549 	 * Fail any write if RO. Unfortunately, there does not seem to
550 	 * be a better way to report our readonly'ness to GEOM above.
551 	 */
552 	if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
553 	    (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
554 		vtblk_finish_bio(bp, EROFS);
555 		return;
556 	}
557 
558 #ifdef INVARIANTS
559 	/*
560 	 * Prevent read/write buffers spanning too many segments from
561 	 * getting into the queue. This should only trip if d_maxsize
562 	 * was incorrectly set.
563 	 */
564 	if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
565 		int nsegs, max_nsegs;
566 
567 		nsegs = sglist_count(bp->bio_data, bp->bio_bcount);
568 		max_nsegs = sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS;
569 
570 		KASSERT(nsegs <= max_nsegs,
571 		    ("%s: bio %p spanned too many segments: %d, max: %d",
572 		    __func__, bp, nsegs, max_nsegs));
573 	}
574 #endif
575 
576 	VTBLK_LOCK(sc);
577 	if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
578 		vtblk_finish_bio(bp, ENXIO);
579 	else {
580 		bioq_disksort(&sc->vtblk_bioq, bp);
581 
582 		if ((sc->vtblk_flags & VTBLK_FLAG_SUSPEND) == 0)
583 			vtblk_startio(sc);
584 	}
585 	VTBLK_UNLOCK(sc);
586 }
587 
588 static void
589 vtblk_negotiate_features(struct vtblk_softc *sc)
590 {
591 	device_t dev;
592 	uint64_t features;
593 
594 	dev = sc->vtblk_dev;
595 	features = VTBLK_FEATURES;
596 
597 	sc->vtblk_features = virtio_negotiate_features(dev, features);
598 }
599 
600 static int
601 vtblk_maximum_segments(struct vtblk_softc *sc,
602     struct virtio_blk_config *blkcfg)
603 {
604 	device_t dev;
605 	int nsegs;
606 
607 	dev = sc->vtblk_dev;
608 	nsegs = VTBLK_MIN_SEGMENTS;
609 
610 	if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
611 		nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
612 		if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
613 			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
614 	} else
615 		nsegs += 1;
616 
617 	return (nsegs);
618 }
619 
620 static int
621 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
622 {
623 	device_t dev;
624 	struct vq_alloc_info vq_info;
625 
626 	dev = sc->vtblk_dev;
627 
628 	VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
629 	    vtblk_vq_intr, sc, &sc->vtblk_vq,
630 	    "%s request", device_get_nameunit(dev));
631 
632 	return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
633 }
634 
635 static void
636 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
637 {
638 	device_t dev;
639 	struct disk *dp;
640 	int error;
641 
642 	dev = sc->vtblk_dev;
643 	dp = sc->vtblk_disk;
644 
645 	dp->d_mediasize = new_capacity;
646 	if (bootverbose) {
647 		device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
648 		    (uintmax_t) dp->d_mediasize >> 20,
649 		    (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
650 		    dp->d_sectorsize);
651 	}
652 
653 	error = disk_resize(dp, M_NOWAIT);
654 	if (error) {
655 		device_printf(dev,
656 		    "disk_resize(9) failed, error: %d\n", error);
657 	}
658 }
659 
660 static void
661 vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
662 {
663 
664 	/* Set either writeback (1) or writethrough (0) mode. */
665 	virtio_write_dev_config_1(sc->vtblk_dev,
666 	    offsetof(struct virtio_blk_config, writeback), wc);
667 }
668 
669 static int
670 vtblk_write_cache_enabled(struct vtblk_softc *sc,
671     struct virtio_blk_config *blkcfg)
672 {
673 	int wc;
674 
675 	if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
676 		wc = vtblk_tunable_int(sc, "writecache_mode",
677 		    vtblk_writecache_mode);
678 		if (wc >= 0 && wc < VTBLK_CACHE_MAX)
679 			vtblk_set_write_cache(sc, wc);
680 		else
681 			wc = blkcfg->writeback;
682 	} else
683 		wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
684 
685 	return (wc);
686 }
687 
688 static int
689 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
690 {
691 	struct vtblk_softc *sc;
692 	int wc, error;
693 
694 	sc = oidp->oid_arg1;
695 	wc = sc->vtblk_write_cache;
696 
697 	error = sysctl_handle_int(oidp, &wc, 0, req);
698 	if (error || req->newptr == NULL)
699 		return (error);
700 	if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
701 		return (EPERM);
702 	if (wc < 0 || wc >= VTBLK_CACHE_MAX)
703 		return (EINVAL);
704 
705 	VTBLK_LOCK(sc);
706 	sc->vtblk_write_cache = wc;
707 	vtblk_set_write_cache(sc, sc->vtblk_write_cache);
708 	VTBLK_UNLOCK(sc);
709 
710 	return (0);
711 }
712 
713 static void
714 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
715 {
716 	device_t dev;
717 	struct disk *dp;
718 
719 	dev = sc->vtblk_dev;
720 
721 	sc->vtblk_disk = dp = disk_alloc();
722 	dp->d_open = vtblk_open;
723 	dp->d_close = vtblk_close;
724 	dp->d_ioctl = vtblk_ioctl;
725 	dp->d_strategy = vtblk_strategy;
726 	dp->d_name = VTBLK_DISK_NAME;
727 	dp->d_unit = device_get_unit(dev);
728 	dp->d_drv1 = sc;
729 	dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO;
730 	dp->d_hba_vendor = virtio_get_vendor(dev);
731 	dp->d_hba_device = virtio_get_device(dev);
732 	dp->d_hba_subvendor = virtio_get_subvendor(dev);
733 	dp->d_hba_subdevice = virtio_get_subdevice(dev);
734 
735 	if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
736 		dp->d_dump = vtblk_dump;
737 
738 	/* Capacity is always in 512-byte units. */
739 	dp->d_mediasize = blkcfg->capacity * 512;
740 
741 	if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
742 		dp->d_sectorsize = blkcfg->blk_size;
743 	else
744 		dp->d_sectorsize = 512;
745 
746 	/*
747 	 * The VirtIO maximum I/O size is given in terms of segments.
748 	 * However, FreeBSD limits I/O size by logical buffer size, not
749 	 * by physically contiguous pages. Therefore, we have to assume
750 	 * no pages are contiguous. This may impose an artificially low
751 	 * maximum I/O size. But in practice, since QEMU advertises 128
752 	 * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
753 	 * which is typically greater than MAXPHYS. Eventually we should
754 	 * just advertise MAXPHYS and split buffers that are too big.
755 	 *
756 	 * Note we must subtract one additional segment in case of non
757 	 * page aligned buffers.
758 	 */
759 	dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
760 	    PAGE_SIZE;
761 	if (dp->d_maxsize < PAGE_SIZE)
762 		dp->d_maxsize = PAGE_SIZE; /* XXX */
763 
764 	if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
765 		dp->d_fwsectors = blkcfg->geometry.sectors;
766 		dp->d_fwheads = blkcfg->geometry.heads;
767 	}
768 
769 	if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY)) {
770 		dp->d_stripesize = dp->d_sectorsize *
771 		    (1 << blkcfg->topology.physical_block_exp);
772 		dp->d_stripeoffset = (dp->d_stripesize -
773 		    blkcfg->topology.alignment_offset * dp->d_sectorsize) %
774 		    dp->d_stripesize;
775 	}
776 
777 	if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
778 		sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
779 	else
780 		sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
781 }
782 
783 static void
784 vtblk_create_disk(struct vtblk_softc *sc)
785 {
786 	struct disk *dp;
787 
788 	dp = sc->vtblk_disk;
789 
790 	/*
791 	 * Retrieving the identification string must be done after
792 	 * the virtqueue interrupt is setup otherwise it will hang.
793 	 */
794 	vtblk_get_ident(sc);
795 
796 	device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
797 	    (uintmax_t) dp->d_mediasize >> 20,
798 	    (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
799 	    dp->d_sectorsize);
800 
801 	disk_create(dp, DISK_VERSION);
802 }
803 
804 static int
805 vtblk_quiesce(struct vtblk_softc *sc)
806 {
807 	int error;
808 
809 	error = 0;
810 
811 	VTBLK_LOCK_ASSERT(sc);
812 
813 	while (!virtqueue_empty(sc->vtblk_vq)) {
814 		if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
815 		    VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
816 			error = EBUSY;
817 			break;
818 		}
819 	}
820 
821 	return (error);
822 }
823 
824 static void
825 vtblk_startio(struct vtblk_softc *sc)
826 {
827 	struct virtqueue *vq;
828 	struct vtblk_request *req;
829 	int enq;
830 
831 	vq = sc->vtblk_vq;
832 	enq = 0;
833 
834 	VTBLK_LOCK_ASSERT(sc);
835 
836 	while (!virtqueue_full(vq)) {
837 		if ((req = vtblk_dequeue_ready(sc)) == NULL)
838 			req = vtblk_bio_request(sc);
839 		if (req == NULL)
840 			break;
841 
842 		if (vtblk_execute_request(sc, req) != 0) {
843 			vtblk_enqueue_ready(sc, req);
844 			break;
845 		}
846 
847 		enq++;
848 	}
849 
850 	if (enq > 0)
851 		virtqueue_notify(vq);
852 }
853 
854 static struct vtblk_request *
855 vtblk_bio_request(struct vtblk_softc *sc)
856 {
857 	struct bio_queue_head *bioq;
858 	struct vtblk_request *req;
859 	struct bio *bp;
860 
861 	bioq = &sc->vtblk_bioq;
862 
863 	if (bioq_first(bioq) == NULL)
864 		return (NULL);
865 
866 	req = vtblk_dequeue_request(sc);
867 	if (req == NULL)
868 		return (NULL);
869 
870 	bp = bioq_takefirst(bioq);
871 	req->vbr_bp = bp;
872 	req->vbr_ack = -1;
873 	req->vbr_hdr.ioprio = 1;
874 
875 	switch (bp->bio_cmd) {
876 	case BIO_FLUSH:
877 		req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
878 		break;
879 	case BIO_READ:
880 		req->vbr_hdr.type = VIRTIO_BLK_T_IN;
881 		req->vbr_hdr.sector = bp->bio_offset / 512;
882 		break;
883 	case BIO_WRITE:
884 		req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
885 		req->vbr_hdr.sector = bp->bio_offset / 512;
886 		break;
887 	default:
888 		panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
889 	}
890 
891 	return (req);
892 }
893 
894 static int
895 vtblk_execute_request(struct vtblk_softc *sc, struct vtblk_request *req)
896 {
897 	struct virtqueue *vq;
898 	struct sglist *sg;
899 	struct bio *bp;
900 	int ordered, readable, writable, error;
901 
902 	vq = sc->vtblk_vq;
903 	sg = sc->vtblk_sglist;
904 	bp = req->vbr_bp;
905 	ordered = 0;
906 	writable = 0;
907 
908 	VTBLK_LOCK_ASSERT(sc);
909 
910 	/*
911 	 * Wait until the ordered request completes before
912 	 * executing subsequent requests.
913 	 */
914 	if (sc->vtblk_req_ordered != NULL)
915 		return (EBUSY);
916 
917 	if (bp->bio_flags & BIO_ORDERED) {
918 		if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
919 			/*
920 			 * This request will be executed once all
921 			 * the in-flight requests are completed.
922 			 */
923 			if (!virtqueue_empty(vq))
924 				return (EBUSY);
925 			ordered = 1;
926 		} else
927 			req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
928 	}
929 
930 	sglist_reset(sg);
931 	sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
932 
933 	if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
934 		error = sglist_append_bio(sg, bp);
935 		if (error || sg->sg_nseg == sg->sg_maxseg) {
936 			panic("%s: data buffer too big bio:%p error:%d",
937 			    __func__, bp, error);
938 		}
939 
940 		/* BIO_READ means the host writes into our buffer. */
941 		if (bp->bio_cmd == BIO_READ)
942 			writable = sg->sg_nseg - 1;
943 	}
944 
945 	writable++;
946 	sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
947 	readable = sg->sg_nseg - writable;
948 
949 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
950 	if (error == 0 && ordered)
951 		sc->vtblk_req_ordered = req;
952 
953 	return (error);
954 }
955 
956 static void
957 vtblk_vq_intr(void *xsc)
958 {
959 	struct vtblk_softc *sc;
960 	struct virtqueue *vq;
961 
962 	sc = xsc;
963 	vq = sc->vtblk_vq;
964 
965 again:
966 	VTBLK_LOCK(sc);
967 	if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
968 		VTBLK_UNLOCK(sc);
969 		return;
970 	}
971 
972 	vtblk_finish_completed(sc);
973 
974 	if ((sc->vtblk_flags & VTBLK_FLAG_SUSPEND) == 0)
975 		vtblk_startio(sc);
976 	else
977 		wakeup(&sc->vtblk_vq);
978 
979 	if (virtqueue_enable_intr(vq) != 0) {
980 		virtqueue_disable_intr(vq);
981 		VTBLK_UNLOCK(sc);
982 		goto again;
983 	}
984 
985 	VTBLK_UNLOCK(sc);
986 }
987 
988 static void
989 vtblk_stop(struct vtblk_softc *sc)
990 {
991 
992 	virtqueue_disable_intr(sc->vtblk_vq);
993 	virtio_stop(sc->vtblk_dev);
994 }
995 
996 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg)			\
997 	if (virtio_with_feature(_dev, _feature)) {			\
998 		virtio_read_device_config(_dev,				\
999 		    offsetof(struct virtio_blk_config, _field),		\
1000 		    &(_cfg)->_field, sizeof((_cfg)->_field));		\
1001 	}
1002 
1003 static void
1004 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
1005 {
1006 	device_t dev;
1007 
1008 	dev = sc->vtblk_dev;
1009 
1010 	bzero(blkcfg, sizeof(struct virtio_blk_config));
1011 
1012 	/* The capacity is always available. */
1013 	virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
1014 	    capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
1015 
1016 	/* Read the configuration if the feature was negotiated. */
1017 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1018 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1019 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1020 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1021 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1022 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
1023 }
1024 
1025 #undef VTBLK_GET_CONFIG
1026 
1027 static void
1028 vtblk_get_ident(struct vtblk_softc *sc)
1029 {
1030 	struct bio buf;
1031 	struct disk *dp;
1032 	struct vtblk_request *req;
1033 	int len, error;
1034 
1035 	dp = sc->vtblk_disk;
1036 	len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
1037 
1038 	if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
1039 		return;
1040 
1041 	req = vtblk_dequeue_request(sc);
1042 	if (req == NULL)
1043 		return;
1044 
1045 	req->vbr_ack = -1;
1046 	req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1047 	req->vbr_hdr.ioprio = 1;
1048 	req->vbr_hdr.sector = 0;
1049 
1050 	req->vbr_bp = &buf;
1051 	bzero(&buf, sizeof(struct bio));
1052 
1053 	buf.bio_cmd = BIO_READ;
1054 	buf.bio_data = dp->d_ident;
1055 	buf.bio_bcount = len;
1056 
1057 	VTBLK_LOCK(sc);
1058 	error = vtblk_poll_request(sc, req);
1059 	VTBLK_UNLOCK(sc);
1060 
1061 	vtblk_enqueue_request(sc, req);
1062 
1063 	if (error) {
1064 		device_printf(sc->vtblk_dev,
1065 		    "error getting device identifier: %d\n", error);
1066 	}
1067 }
1068 
1069 static void
1070 vtblk_prepare_dump(struct vtblk_softc *sc)
1071 {
1072 	device_t dev;
1073 	struct virtqueue *vq;
1074 
1075 	dev = sc->vtblk_dev;
1076 	vq = sc->vtblk_vq;
1077 
1078 	vtblk_stop(sc);
1079 
1080 	/*
1081 	 * Drain all requests caught in-flight in the virtqueue,
1082 	 * skipping biodone(). When dumping, only one request is
1083 	 * outstanding at a time, and we just poll the virtqueue
1084 	 * for the response.
1085 	 */
1086 	vtblk_drain_vq(sc, 1);
1087 
1088 	if (virtio_reinit(dev, sc->vtblk_features) != 0) {
1089 		panic("%s: cannot reinit VirtIO block device during dump",
1090 		    device_get_nameunit(dev));
1091 	}
1092 
1093 	virtqueue_disable_intr(vq);
1094 	virtio_reinit_complete(dev);
1095 }
1096 
1097 static int
1098 vtblk_write_dump(struct vtblk_softc *sc, void *virtual, off_t offset,
1099     size_t length)
1100 {
1101 	struct bio buf;
1102 	struct vtblk_request *req;
1103 
1104 	req = &sc->vtblk_dump_request;
1105 	req->vbr_ack = -1;
1106 	req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1107 	req->vbr_hdr.ioprio = 1;
1108 	req->vbr_hdr.sector = offset / 512;
1109 
1110 	req->vbr_bp = &buf;
1111 	bzero(&buf, sizeof(struct bio));
1112 
1113 	buf.bio_cmd = BIO_WRITE;
1114 	buf.bio_data = virtual;
1115 	buf.bio_bcount = length;
1116 
1117 	return (vtblk_poll_request(sc, req));
1118 }
1119 
1120 static int
1121 vtblk_flush_dump(struct vtblk_softc *sc)
1122 {
1123 	struct bio buf;
1124 	struct vtblk_request *req;
1125 
1126 	req = &sc->vtblk_dump_request;
1127 	req->vbr_ack = -1;
1128 	req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1129 	req->vbr_hdr.ioprio = 1;
1130 	req->vbr_hdr.sector = 0;
1131 
1132 	req->vbr_bp = &buf;
1133 	bzero(&buf, sizeof(struct bio));
1134 
1135 	buf.bio_cmd = BIO_FLUSH;
1136 
1137 	return (vtblk_poll_request(sc, req));
1138 }
1139 
1140 static int
1141 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
1142 {
1143 	struct virtqueue *vq;
1144 	int error;
1145 
1146 	vq = sc->vtblk_vq;
1147 
1148 	if (!virtqueue_empty(vq))
1149 		return (EBUSY);
1150 
1151 	error = vtblk_execute_request(sc, req);
1152 	if (error)
1153 		return (error);
1154 
1155 	virtqueue_notify(vq);
1156 	virtqueue_poll(vq, NULL);
1157 
1158 	error = vtblk_request_error(req);
1159 	if (error && bootverbose) {
1160 		device_printf(sc->vtblk_dev,
1161 		    "%s: IO error: %d\n", __func__, error);
1162 	}
1163 
1164 	return (error);
1165 }
1166 
1167 static void
1168 vtblk_finish_completed(struct vtblk_softc *sc)
1169 {
1170 	struct vtblk_request *req;
1171 	struct bio *bp;
1172 	int error;
1173 
1174 	while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
1175 		bp = req->vbr_bp;
1176 
1177 		if (sc->vtblk_req_ordered != NULL) {
1178 			/* This should be the only outstanding request. */
1179 			MPASS(sc->vtblk_req_ordered == req);
1180 			sc->vtblk_req_ordered = NULL;
1181 		}
1182 
1183 		error = vtblk_request_error(req);
1184 		if (error)
1185 			disk_err(bp, "hard error", -1, 1);
1186 
1187 		vtblk_finish_bio(bp, error);
1188 		vtblk_enqueue_request(sc, req);
1189 	}
1190 }
1191 
1192 static void
1193 vtblk_drain_vq(struct vtblk_softc *sc, int skip_done)
1194 {
1195 	struct virtqueue *vq;
1196 	struct vtblk_request *req;
1197 	int last;
1198 
1199 	vq = sc->vtblk_vq;
1200 	last = 0;
1201 
1202 	while ((req = virtqueue_drain(vq, &last)) != NULL) {
1203 		if (!skip_done)
1204 			vtblk_finish_bio(req->vbr_bp, ENXIO);
1205 
1206 		vtblk_enqueue_request(sc, req);
1207 	}
1208 
1209 	sc->vtblk_req_ordered = NULL;
1210 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1211 }
1212 
1213 static void
1214 vtblk_drain(struct vtblk_softc *sc)
1215 {
1216 	struct bio_queue_head *bioq;
1217 	struct vtblk_request *req;
1218 	struct bio *bp;
1219 
1220 	bioq = &sc->vtblk_bioq;
1221 
1222 	if (sc->vtblk_vq != NULL) {
1223 		vtblk_finish_completed(sc);
1224 		vtblk_drain_vq(sc, 0);
1225 	}
1226 
1227 	while ((req = vtblk_dequeue_ready(sc)) != NULL) {
1228 		vtblk_finish_bio(req->vbr_bp, ENXIO);
1229 		vtblk_enqueue_request(sc, req);
1230 	}
1231 
1232 	while (bioq_first(bioq) != NULL) {
1233 		bp = bioq_takefirst(bioq);
1234 		vtblk_finish_bio(bp, ENXIO);
1235 	}
1236 
1237 	vtblk_free_requests(sc);
1238 }
1239 
1240 #ifdef INVARIANTS
1241 static void
1242 vtblk_request_invariants(struct vtblk_request *req)
1243 {
1244 	int hdr_nsegs, ack_nsegs;
1245 
1246 	hdr_nsegs = sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr));
1247 	ack_nsegs = sglist_count(&req->vbr_ack, sizeof(req->vbr_ack));
1248 
1249 	KASSERT(hdr_nsegs == 1, ("request header crossed page boundary"));
1250 	KASSERT(ack_nsegs == 1, ("request ack crossed page boundary"));
1251 }
1252 #endif
1253 
1254 static int
1255 vtblk_alloc_requests(struct vtblk_softc *sc)
1256 {
1257 	struct vtblk_request *req;
1258 	int i, nreqs;
1259 
1260 	nreqs = virtqueue_size(sc->vtblk_vq);
1261 
1262 	/*
1263 	 * Preallocate sufficient requests to keep the virtqueue full. Each
1264 	 * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
1265 	 * the number allocated when indirect descriptors are not available.
1266 	 */
1267 	if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
1268 		nreqs /= VTBLK_MIN_SEGMENTS;
1269 
1270 	for (i = 0; i < nreqs; i++) {
1271 		req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
1272 		if (req == NULL)
1273 			return (ENOMEM);
1274 
1275 #ifdef INVARIANTS
1276 		vtblk_request_invariants(req);
1277 #endif
1278 
1279 		sc->vtblk_request_count++;
1280 		vtblk_enqueue_request(sc, req);
1281 	}
1282 
1283 	return (0);
1284 }
1285 
1286 static void
1287 vtblk_free_requests(struct vtblk_softc *sc)
1288 {
1289 	struct vtblk_request *req;
1290 
1291 	KASSERT(TAILQ_EMPTY(&sc->vtblk_req_ready),
1292 	    ("%s: ready requests left on queue", __func__));
1293 
1294 	while ((req = vtblk_dequeue_request(sc)) != NULL) {
1295 		sc->vtblk_request_count--;
1296 		free(req, M_DEVBUF);
1297 	}
1298 
1299 	KASSERT(sc->vtblk_request_count == 0,
1300 	    ("%s: leaked %d requests", __func__, sc->vtblk_request_count));
1301 }
1302 
1303 static struct vtblk_request *
1304 vtblk_dequeue_request(struct vtblk_softc *sc)
1305 {
1306 	struct vtblk_request *req;
1307 
1308 	req = TAILQ_FIRST(&sc->vtblk_req_free);
1309 	if (req != NULL)
1310 		TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
1311 
1312 	return (req);
1313 }
1314 
1315 static void
1316 vtblk_enqueue_request(struct vtblk_softc *sc, struct vtblk_request *req)
1317 {
1318 
1319 	bzero(req, sizeof(struct vtblk_request));
1320 	TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
1321 }
1322 
1323 static struct vtblk_request *
1324 vtblk_dequeue_ready(struct vtblk_softc *sc)
1325 {
1326 	struct vtblk_request *req;
1327 
1328 	req = TAILQ_FIRST(&sc->vtblk_req_ready);
1329 	if (req != NULL)
1330 		TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
1331 
1332 	return (req);
1333 }
1334 
1335 static void
1336 vtblk_enqueue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
1337 {
1338 
1339 	TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
1340 }
1341 
1342 static int
1343 vtblk_request_error(struct vtblk_request *req)
1344 {
1345 	int error;
1346 
1347 	switch (req->vbr_ack) {
1348 	case VIRTIO_BLK_S_OK:
1349 		error = 0;
1350 		break;
1351 	case VIRTIO_BLK_S_UNSUPP:
1352 		error = ENOTSUP;
1353 		break;
1354 	default:
1355 		error = EIO;
1356 		break;
1357 	}
1358 
1359 	return (error);
1360 }
1361 
1362 static void
1363 vtblk_finish_bio(struct bio *bp, int error)
1364 {
1365 
1366 	if (error) {
1367 		bp->bio_resid = bp->bio_bcount;
1368 		bp->bio_error = error;
1369 		bp->bio_flags |= BIO_ERROR;
1370 	}
1371 
1372 	biodone(bp);
1373 }
1374 
1375 static void
1376 vtblk_setup_sysctl(struct vtblk_softc *sc)
1377 {
1378 	device_t dev;
1379 	struct sysctl_ctx_list *ctx;
1380 	struct sysctl_oid *tree;
1381 	struct sysctl_oid_list *child;
1382 
1383 	dev = sc->vtblk_dev;
1384 	ctx = device_get_sysctl_ctx(dev);
1385 	tree = device_get_sysctl_tree(dev);
1386 	child = SYSCTL_CHILDREN(tree);
1387 
1388 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
1389 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, vtblk_write_cache_sysctl,
1390 	    "I", "Write cache mode (writethrough (0) or writeback (1))");
1391 }
1392 
1393 static int
1394 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
1395 {
1396 	char path[64];
1397 
1398 	snprintf(path, sizeof(path),
1399 	    "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
1400 	TUNABLE_INT_FETCH(path, &def);
1401 
1402 	return (def);
1403 }
1404