xref: /freebsd/sys/dev/virtio/block/virtio_blk.c (revision 26a222dc0c048fc071b548eadad7b80405a1b126)
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /* Driver for VirtIO block devices. */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/bio.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/sglist.h>
39 #include <sys/sysctl.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/queue.h>
43 
44 #include <geom/geom_disk.h>
45 
46 #include <machine/bus.h>
47 #include <machine/resource.h>
48 #include <sys/bus.h>
49 #include <sys/rman.h>
50 
51 #include <dev/virtio/virtio.h>
52 #include <dev/virtio/virtqueue.h>
53 #include <dev/virtio/block/virtio_blk.h>
54 
55 #include "virtio_if.h"
56 
57 struct vtblk_request {
58 	struct virtio_blk_outhdr	 vbr_hdr;
59 	struct bio			*vbr_bp;
60 	uint8_t				 vbr_ack;
61 	TAILQ_ENTRY(vtblk_request)	 vbr_link;
62 };
63 
64 enum vtblk_cache_mode {
65 	VTBLK_CACHE_WRITETHROUGH,
66 	VTBLK_CACHE_WRITEBACK,
67 	VTBLK_CACHE_MAX
68 };
69 
70 struct vtblk_softc {
71 	device_t		 vtblk_dev;
72 	struct mtx		 vtblk_mtx;
73 	uint64_t		 vtblk_features;
74 	uint32_t		 vtblk_flags;
75 #define VTBLK_FLAG_INDIRECT	0x0001
76 #define VTBLK_FLAG_READONLY	0x0002
77 #define VTBLK_FLAG_DETACH	0x0004
78 #define VTBLK_FLAG_SUSPEND	0x0008
79 #define VTBLK_FLAG_BARRIER	0x0010
80 #define VTBLK_FLAG_WC_CONFIG	0x0020
81 
82 	struct virtqueue	*vtblk_vq;
83 	struct sglist		*vtblk_sglist;
84 	struct disk		*vtblk_disk;
85 
86 	struct bio_queue_head	 vtblk_bioq;
87 	TAILQ_HEAD(, vtblk_request)
88 				 vtblk_req_free;
89 	TAILQ_HEAD(, vtblk_request)
90 				 vtblk_req_ready;
91 	struct vtblk_request	*vtblk_req_ordered;
92 
93 	int			 vtblk_max_nsegs;
94 	int			 vtblk_request_count;
95 	enum vtblk_cache_mode	 vtblk_write_cache;
96 
97 	struct bio_queue	 vtblk_dump_queue;
98 	struct vtblk_request	 vtblk_dump_request;
99 };
100 
101 static struct virtio_feature_desc vtblk_feature_desc[] = {
102 	{ VIRTIO_BLK_F_BARRIER,		"HostBarrier"	},
103 	{ VIRTIO_BLK_F_SIZE_MAX,	"MaxSegSize"	},
104 	{ VIRTIO_BLK_F_SEG_MAX,		"MaxNumSegs"	},
105 	{ VIRTIO_BLK_F_GEOMETRY,	"DiskGeometry"	},
106 	{ VIRTIO_BLK_F_RO,		"ReadOnly"	},
107 	{ VIRTIO_BLK_F_BLK_SIZE,	"BlockSize"	},
108 	{ VIRTIO_BLK_F_SCSI,		"SCSICmds"	},
109 	{ VIRTIO_BLK_F_WCE,		"WriteCache"	},
110 	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology"	},
111 	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE"	},
112 
113 	{ 0, NULL }
114 };
115 
116 static int	vtblk_modevent(module_t, int, void *);
117 
118 static int	vtblk_probe(device_t);
119 static int	vtblk_attach(device_t);
120 static int	vtblk_detach(device_t);
121 static int	vtblk_suspend(device_t);
122 static int	vtblk_resume(device_t);
123 static int	vtblk_shutdown(device_t);
124 static int	vtblk_config_change(device_t);
125 
126 static int	vtblk_open(struct disk *);
127 static int	vtblk_close(struct disk *);
128 static int	vtblk_ioctl(struct disk *, u_long, void *, int,
129 		    struct thread *);
130 static int	vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
131 static void	vtblk_strategy(struct bio *);
132 
133 static void	vtblk_negotiate_features(struct vtblk_softc *);
134 static void	vtblk_setup_features(struct vtblk_softc *);
135 static int	vtblk_maximum_segments(struct vtblk_softc *,
136 		    struct virtio_blk_config *);
137 static int	vtblk_alloc_virtqueue(struct vtblk_softc *);
138 static void	vtblk_resize_disk(struct vtblk_softc *, uint64_t);
139 static void	vtblk_alloc_disk(struct vtblk_softc *,
140 		    struct virtio_blk_config *);
141 static void	vtblk_create_disk(struct vtblk_softc *);
142 
143 static int	vtblk_request_prealloc(struct vtblk_softc *);
144 static void	vtblk_request_free(struct vtblk_softc *);
145 static struct vtblk_request *
146 		vtblk_request_dequeue(struct vtblk_softc *);
147 static void	vtblk_request_enqueue(struct vtblk_softc *,
148 		    struct vtblk_request *);
149 static struct vtblk_request *
150 		vtblk_request_next_ready(struct vtblk_softc *);
151 static void	vtblk_request_requeue_ready(struct vtblk_softc *,
152 		    struct vtblk_request *);
153 static struct vtblk_request *
154 		vtblk_request_next(struct vtblk_softc *);
155 static struct vtblk_request *
156 		vtblk_request_bio(struct vtblk_softc *);
157 static int	vtblk_request_execute(struct vtblk_softc *,
158 		    struct vtblk_request *);
159 static int	vtblk_request_error(struct vtblk_request *);
160 
161 static void	vtblk_queue_completed(struct vtblk_softc *,
162 		    struct bio_queue *);
163 static void	vtblk_done_completed(struct vtblk_softc *,
164 		    struct bio_queue *);
165 static void	vtblk_drain_vq(struct vtblk_softc *);
166 static void	vtblk_drain(struct vtblk_softc *);
167 
168 static void	vtblk_startio(struct vtblk_softc *);
169 static void	vtblk_bio_done(struct vtblk_softc *, struct bio *, int);
170 
171 static void	vtblk_read_config(struct vtblk_softc *,
172 		    struct virtio_blk_config *);
173 static void	vtblk_ident(struct vtblk_softc *);
174 static int	vtblk_poll_request(struct vtblk_softc *,
175 		    struct vtblk_request *);
176 static int	vtblk_quiesce(struct vtblk_softc *);
177 static void	vtblk_vq_intr(void *);
178 static void	vtblk_stop(struct vtblk_softc *);
179 
180 static void	vtblk_dump_quiesce(struct vtblk_softc *);
181 static int	vtblk_dump_write(struct vtblk_softc *, void *, off_t, size_t);
182 static int	vtblk_dump_flush(struct vtblk_softc *);
183 static void	vtblk_dump_complete(struct vtblk_softc *);
184 
185 static void	vtblk_set_write_cache(struct vtblk_softc *, int);
186 static int	vtblk_write_cache_enabled(struct vtblk_softc *sc,
187 		    struct virtio_blk_config *);
188 static int	vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
189 
190 static void	vtblk_setup_sysctl(struct vtblk_softc *);
191 static int	vtblk_tunable_int(struct vtblk_softc *, const char *, int);
192 
193 /* Tunables. */
194 static int vtblk_no_ident = 0;
195 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
196 static int vtblk_writecache_mode = -1;
197 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
198 
199 /* Features desired/implemented by this driver. */
200 #define VTBLK_FEATURES \
201     (VIRTIO_BLK_F_BARRIER		| \
202      VIRTIO_BLK_F_SIZE_MAX		| \
203      VIRTIO_BLK_F_SEG_MAX		| \
204      VIRTIO_BLK_F_GEOMETRY		| \
205      VIRTIO_BLK_F_RO			| \
206      VIRTIO_BLK_F_BLK_SIZE		| \
207      VIRTIO_BLK_F_WCE			| \
208      VIRTIO_BLK_F_TOPOLOGY		| \
209      VIRTIO_BLK_F_CONFIG_WCE		| \
210      VIRTIO_RING_F_INDIRECT_DESC)
211 
212 #define VTBLK_MTX(_sc)		&(_sc)->vtblk_mtx
213 #define VTBLK_LOCK_INIT(_sc, _name) \
214 				mtx_init(VTBLK_MTX((_sc)), (_name), \
215 				    "VirtIO Block Lock", MTX_DEF)
216 #define VTBLK_LOCK(_sc)		mtx_lock(VTBLK_MTX((_sc)))
217 #define VTBLK_UNLOCK(_sc)	mtx_unlock(VTBLK_MTX((_sc)))
218 #define VTBLK_LOCK_DESTROY(_sc)	mtx_destroy(VTBLK_MTX((_sc)))
219 #define VTBLK_LOCK_ASSERT(_sc)	mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
220 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
221 				mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
222 
223 #define VTBLK_DISK_NAME		"vtbd"
224 #define VTBLK_QUIESCE_TIMEOUT	(30 * hz)
225 
226 /*
227  * Each block request uses at least two segments - one for the header
228  * and one for the status.
229  */
230 #define VTBLK_MIN_SEGMENTS	2
231 
232 static device_method_t vtblk_methods[] = {
233 	/* Device methods. */
234 	DEVMETHOD(device_probe,		vtblk_probe),
235 	DEVMETHOD(device_attach,	vtblk_attach),
236 	DEVMETHOD(device_detach,	vtblk_detach),
237 	DEVMETHOD(device_suspend,	vtblk_suspend),
238 	DEVMETHOD(device_resume,	vtblk_resume),
239 	DEVMETHOD(device_shutdown,	vtblk_shutdown),
240 
241 	/* VirtIO methods. */
242 	DEVMETHOD(virtio_config_change,	vtblk_config_change),
243 
244 	DEVMETHOD_END
245 };
246 
247 static driver_t vtblk_driver = {
248 	"vtblk",
249 	vtblk_methods,
250 	sizeof(struct vtblk_softc)
251 };
252 static devclass_t vtblk_devclass;
253 
254 DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
255     vtblk_modevent, 0);
256 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
257     vtblk_modevent, 0);
258 MODULE_VERSION(virtio_blk, 1);
259 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
260 
261 static int
262 vtblk_modevent(module_t mod, int type, void *unused)
263 {
264 	int error;
265 
266 	error = 0;
267 
268 	switch (type) {
269 	case MOD_LOAD:
270 	case MOD_QUIESCE:
271 	case MOD_UNLOAD:
272 	case MOD_SHUTDOWN:
273 		break;
274 	default:
275 		error = EOPNOTSUPP;
276 		break;
277 	}
278 
279 	return (error);
280 }
281 
282 static int
283 vtblk_probe(device_t dev)
284 {
285 
286 	if (virtio_get_device_type(dev) != VIRTIO_ID_BLOCK)
287 		return (ENXIO);
288 
289 	device_set_desc(dev, "VirtIO Block Adapter");
290 
291 	return (BUS_PROBE_DEFAULT);
292 }
293 
294 static int
295 vtblk_attach(device_t dev)
296 {
297 	struct vtblk_softc *sc;
298 	struct virtio_blk_config blkcfg;
299 	int error;
300 
301 	virtio_set_feature_desc(dev, vtblk_feature_desc);
302 
303 	sc = device_get_softc(dev);
304 	sc->vtblk_dev = dev;
305 	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
306 	bioq_init(&sc->vtblk_bioq);
307 	TAILQ_INIT(&sc->vtblk_dump_queue);
308 	TAILQ_INIT(&sc->vtblk_req_free);
309 	TAILQ_INIT(&sc->vtblk_req_ready);
310 
311 	vtblk_setup_sysctl(sc);
312 	vtblk_setup_features(sc);
313 
314 	vtblk_read_config(sc, &blkcfg);
315 
316 	/*
317 	 * With the current sglist(9) implementation, it is not easy
318 	 * for us to support a maximum segment size as adjacent
319 	 * segments are coalesced. For now, just make sure it's larger
320 	 * than the maximum supported transfer size.
321 	 */
322 	if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
323 		if (blkcfg.size_max < MAXPHYS) {
324 			error = ENOTSUP;
325 			device_printf(dev, "host requires unsupported "
326 			    "maximum segment size feature\n");
327 			goto fail;
328 		}
329 	}
330 
331 	sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
332 	if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
333 		error = EINVAL;
334 		device_printf(dev, "fewer than minimum number of segments "
335 		    "allowed: %d\n", sc->vtblk_max_nsegs);
336 		goto fail;
337 	}
338 
339 	sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
340 	if (sc->vtblk_sglist == NULL) {
341 		error = ENOMEM;
342 		device_printf(dev, "cannot allocate sglist\n");
343 		goto fail;
344 	}
345 
346 	error = vtblk_alloc_virtqueue(sc);
347 	if (error) {
348 		device_printf(dev, "cannot allocate virtqueue\n");
349 		goto fail;
350 	}
351 
352 	error = vtblk_request_prealloc(sc);
353 	if (error) {
354 		device_printf(dev, "cannot preallocate requests\n");
355 		goto fail;
356 	}
357 
358 	vtblk_alloc_disk(sc, &blkcfg);
359 
360 	error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
361 	if (error) {
362 		device_printf(dev, "cannot setup virtqueue interrupt\n");
363 		goto fail;
364 	}
365 
366 	vtblk_create_disk(sc);
367 
368 	virtqueue_enable_intr(sc->vtblk_vq);
369 
370 fail:
371 	if (error)
372 		vtblk_detach(dev);
373 
374 	return (error);
375 }
376 
377 static int
378 vtblk_detach(device_t dev)
379 {
380 	struct vtblk_softc *sc;
381 
382 	sc = device_get_softc(dev);
383 
384 	VTBLK_LOCK(sc);
385 	sc->vtblk_flags |= VTBLK_FLAG_DETACH;
386 	if (device_is_attached(dev))
387 		vtblk_stop(sc);
388 	VTBLK_UNLOCK(sc);
389 
390 	vtblk_drain(sc);
391 
392 	if (sc->vtblk_disk != NULL) {
393 		disk_destroy(sc->vtblk_disk);
394 		sc->vtblk_disk = NULL;
395 	}
396 
397 	if (sc->vtblk_sglist != NULL) {
398 		sglist_free(sc->vtblk_sglist);
399 		sc->vtblk_sglist = NULL;
400 	}
401 
402 	VTBLK_LOCK_DESTROY(sc);
403 
404 	return (0);
405 }
406 
407 static int
408 vtblk_suspend(device_t dev)
409 {
410 	struct vtblk_softc *sc;
411 	int error;
412 
413 	sc = device_get_softc(dev);
414 
415 	VTBLK_LOCK(sc);
416 	sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
417 	/* XXX BMV: virtio_stop(), etc needed here? */
418 	error = vtblk_quiesce(sc);
419 	if (error)
420 		sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
421 	VTBLK_UNLOCK(sc);
422 
423 	return (error);
424 }
425 
426 static int
427 vtblk_resume(device_t dev)
428 {
429 	struct vtblk_softc *sc;
430 
431 	sc = device_get_softc(dev);
432 
433 	VTBLK_LOCK(sc);
434 	/* XXX BMV: virtio_reinit(), etc needed here? */
435 	sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
436 	vtblk_startio(sc);
437 	VTBLK_UNLOCK(sc);
438 
439 	return (0);
440 }
441 
442 static int
443 vtblk_shutdown(device_t dev)
444 {
445 
446 	return (0);
447 }
448 
449 static int
450 vtblk_config_change(device_t dev)
451 {
452 	struct vtblk_softc *sc;
453 	struct virtio_blk_config blkcfg;
454 	uint64_t capacity;
455 
456 	sc = device_get_softc(dev);
457 
458 	vtblk_read_config(sc, &blkcfg);
459 
460 	/* Capacity is always in 512-byte units. */
461 	capacity = blkcfg.capacity * 512;
462 
463 	if (sc->vtblk_disk->d_mediasize != capacity)
464 		vtblk_resize_disk(sc, capacity);
465 
466 	return (0);
467 }
468 
469 static int
470 vtblk_open(struct disk *dp)
471 {
472 	struct vtblk_softc *sc;
473 
474 	if ((sc = dp->d_drv1) == NULL)
475 		return (ENXIO);
476 
477 	return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
478 }
479 
480 static int
481 vtblk_close(struct disk *dp)
482 {
483 	struct vtblk_softc *sc;
484 
485 	if ((sc = dp->d_drv1) == NULL)
486 		return (ENXIO);
487 
488 	return (0);
489 }
490 
491 static int
492 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
493     struct thread *td)
494 {
495 	struct vtblk_softc *sc;
496 
497 	if ((sc = dp->d_drv1) == NULL)
498 		return (ENXIO);
499 
500 	return (ENOTTY);
501 }
502 
503 static int
504 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
505     size_t length)
506 {
507 	struct disk *dp;
508 	struct vtblk_softc *sc;
509 	int error;
510 
511 	dp = arg;
512 	error = 0;
513 
514 	if ((sc = dp->d_drv1) == NULL)
515 		return (ENXIO);
516 
517 	VTBLK_LOCK(sc);
518 
519 	vtblk_dump_quiesce(sc);
520 
521 	if (length > 0)
522 		error = vtblk_dump_write(sc, virtual, offset, length);
523 	if (error || (virtual == NULL && offset == 0))
524 		vtblk_dump_complete(sc);
525 
526 	VTBLK_UNLOCK(sc);
527 
528 	return (error);
529 }
530 
531 static void
532 vtblk_strategy(struct bio *bp)
533 {
534 	struct vtblk_softc *sc;
535 
536 	if ((sc = bp->bio_disk->d_drv1) == NULL) {
537 		vtblk_bio_done(NULL, bp, EINVAL);
538 		return;
539 	}
540 
541 	/*
542 	 * Fail any write if RO. Unfortunately, there does not seem to
543 	 * be a better way to report our readonly'ness to GEOM above.
544 	 */
545 	if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
546 	    (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
547 		vtblk_bio_done(sc, bp, EROFS);
548 		return;
549 	}
550 
551 	VTBLK_LOCK(sc);
552 
553 	if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
554 		VTBLK_UNLOCK(sc);
555 		vtblk_bio_done(sc, bp, ENXIO);
556 		return;
557 	}
558 
559 	bioq_insert_tail(&sc->vtblk_bioq, bp);
560 	vtblk_startio(sc);
561 
562 	VTBLK_UNLOCK(sc);
563 }
564 
565 static void
566 vtblk_negotiate_features(struct vtblk_softc *sc)
567 {
568 	device_t dev;
569 	uint64_t features;
570 
571 	dev = sc->vtblk_dev;
572 	features = VTBLK_FEATURES;
573 
574 	sc->vtblk_features = virtio_negotiate_features(dev, features);
575 }
576 
577 static void
578 vtblk_setup_features(struct vtblk_softc *sc)
579 {
580 	device_t dev;
581 
582 	dev = sc->vtblk_dev;
583 
584 	vtblk_negotiate_features(sc);
585 
586 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
587 		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
588 	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
589 		sc->vtblk_flags |= VTBLK_FLAG_READONLY;
590 	if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
591 		sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
592 	if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
593 		sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
594 }
595 
596 static int
597 vtblk_maximum_segments(struct vtblk_softc *sc,
598     struct virtio_blk_config *blkcfg)
599 {
600 	device_t dev;
601 	int nsegs;
602 
603 	dev = sc->vtblk_dev;
604 	nsegs = VTBLK_MIN_SEGMENTS;
605 
606 	if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
607 		nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
608 		if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
609 			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
610 	} else
611 		nsegs += 1;
612 
613 	return (nsegs);
614 }
615 
616 static int
617 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
618 {
619 	device_t dev;
620 	struct vq_alloc_info vq_info;
621 
622 	dev = sc->vtblk_dev;
623 
624 	VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
625 	    vtblk_vq_intr, sc, &sc->vtblk_vq,
626 	    "%s request", device_get_nameunit(dev));
627 
628 	return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
629 }
630 
631 static void
632 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
633 {
634 	device_t dev;
635 	struct disk *dp;
636 	int error;
637 
638 	dev = sc->vtblk_dev;
639 	dp = sc->vtblk_disk;
640 
641 	dp->d_mediasize = new_capacity;
642 	if (bootverbose) {
643 		device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
644 		    (uintmax_t) dp->d_mediasize >> 20,
645 		    (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
646 		    dp->d_sectorsize);
647 	}
648 
649 	error = disk_resize(dp, M_NOWAIT);
650 	if (error) {
651 		device_printf(dev,
652 		    "disk_resize(9) failed, error: %d\n", error);
653 	}
654 }
655 
656 static void
657 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
658 {
659 	device_t dev;
660 	struct disk *dp;
661 
662 	dev = sc->vtblk_dev;
663 
664 	sc->vtblk_disk = dp = disk_alloc();
665 	dp->d_open = vtblk_open;
666 	dp->d_close = vtblk_close;
667 	dp->d_ioctl = vtblk_ioctl;
668 	dp->d_strategy = vtblk_strategy;
669 	dp->d_name = VTBLK_DISK_NAME;
670 	dp->d_unit = device_get_unit(dev);
671 	dp->d_drv1 = sc;
672 	dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO |
673 	    DISKFLAG_DIRECT_COMPLETION;
674 	dp->d_hba_vendor = virtio_get_vendor(dev);
675 	dp->d_hba_device = virtio_get_device(dev);
676 	dp->d_hba_subvendor = virtio_get_subvendor(dev);
677 	dp->d_hba_subdevice = virtio_get_subdevice(dev);
678 
679 	if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
680 		dp->d_dump = vtblk_dump;
681 
682 	/* Capacity is always in 512-byte units. */
683 	dp->d_mediasize = blkcfg->capacity * 512;
684 
685 	if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
686 		dp->d_sectorsize = blkcfg->blk_size;
687 	else
688 		dp->d_sectorsize = 512;
689 
690 	/*
691 	 * The VirtIO maximum I/O size is given in terms of segments.
692 	 * However, FreeBSD limits I/O size by logical buffer size, not
693 	 * by physically contiguous pages. Therefore, we have to assume
694 	 * no pages are contiguous. This may impose an artificially low
695 	 * maximum I/O size. But in practice, since QEMU advertises 128
696 	 * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
697 	 * which is typically greater than MAXPHYS. Eventually we should
698 	 * just advertise MAXPHYS and split buffers that are too big.
699 	 *
700 	 * Note we must subtract one additional segment in case of non
701 	 * page aligned buffers.
702 	 */
703 	dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
704 	    PAGE_SIZE;
705 	if (dp->d_maxsize < PAGE_SIZE)
706 		dp->d_maxsize = PAGE_SIZE; /* XXX */
707 
708 	if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
709 		dp->d_fwsectors = blkcfg->geometry.sectors;
710 		dp->d_fwheads = blkcfg->geometry.heads;
711 	}
712 
713 	if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY)) {
714 		dp->d_stripesize = dp->d_sectorsize *
715 		    (1 << blkcfg->topology.physical_block_exp);
716 		dp->d_stripeoffset = (dp->d_stripesize -
717 		    blkcfg->topology.alignment_offset * dp->d_sectorsize) %
718 		    dp->d_stripesize;
719 	}
720 
721 	if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
722 		sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
723 	else
724 		sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
725 }
726 
727 static void
728 vtblk_create_disk(struct vtblk_softc *sc)
729 {
730 	struct disk *dp;
731 
732 	dp = sc->vtblk_disk;
733 
734 	vtblk_ident(sc);
735 
736 	device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
737 	    (uintmax_t) dp->d_mediasize >> 20,
738 	    (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
739 	    dp->d_sectorsize);
740 
741 	disk_create(dp, DISK_VERSION);
742 }
743 
744 static int
745 vtblk_request_prealloc(struct vtblk_softc *sc)
746 {
747 	struct vtblk_request *req;
748 	int i, nreqs;
749 
750 	nreqs = virtqueue_size(sc->vtblk_vq);
751 
752 	/*
753 	 * Preallocate sufficient requests to keep the virtqueue full. Each
754 	 * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
755 	 * the number allocated when indirect descriptors are not available.
756 	 */
757 	if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
758 		nreqs /= VTBLK_MIN_SEGMENTS;
759 
760 	for (i = 0; i < nreqs; i++) {
761 		req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
762 		if (req == NULL)
763 			return (ENOMEM);
764 
765 		MPASS(sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr)) == 1);
766 		MPASS(sglist_count(&req->vbr_ack, sizeof(req->vbr_ack)) == 1);
767 
768 		sc->vtblk_request_count++;
769 		vtblk_request_enqueue(sc, req);
770 	}
771 
772 	return (0);
773 }
774 
775 static void
776 vtblk_request_free(struct vtblk_softc *sc)
777 {
778 	struct vtblk_request *req;
779 
780 	MPASS(TAILQ_EMPTY(&sc->vtblk_req_ready));
781 
782 	while ((req = vtblk_request_dequeue(sc)) != NULL) {
783 		sc->vtblk_request_count--;
784 		free(req, M_DEVBUF);
785 	}
786 
787 	KASSERT(sc->vtblk_request_count == 0,
788 	    ("%s: leaked %d requests", __func__, sc->vtblk_request_count));
789 }
790 
791 static struct vtblk_request *
792 vtblk_request_dequeue(struct vtblk_softc *sc)
793 {
794 	struct vtblk_request *req;
795 
796 	req = TAILQ_FIRST(&sc->vtblk_req_free);
797 	if (req != NULL) {
798 		TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
799 		bzero(req, sizeof(struct vtblk_request));
800 	}
801 
802 	return (req);
803 }
804 
805 static void
806 vtblk_request_enqueue(struct vtblk_softc *sc, struct vtblk_request *req)
807 {
808 
809 	TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
810 }
811 
812 static struct vtblk_request *
813 vtblk_request_next_ready(struct vtblk_softc *sc)
814 {
815 	struct vtblk_request *req;
816 
817 	req = TAILQ_FIRST(&sc->vtblk_req_ready);
818 	if (req != NULL)
819 		TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
820 
821 	return (req);
822 }
823 
824 static void
825 vtblk_request_requeue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
826 {
827 
828 	/* NOTE: Currently, there will be at most one request in the queue. */
829 	TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
830 }
831 
832 static struct vtblk_request *
833 vtblk_request_next(struct vtblk_softc *sc)
834 {
835 	struct vtblk_request *req;
836 
837 	req = vtblk_request_next_ready(sc);
838 	if (req != NULL)
839 		return (req);
840 
841 	return (vtblk_request_bio(sc));
842 }
843 
844 static struct vtblk_request *
845 vtblk_request_bio(struct vtblk_softc *sc)
846 {
847 	struct bio_queue_head *bioq;
848 	struct vtblk_request *req;
849 	struct bio *bp;
850 
851 	bioq = &sc->vtblk_bioq;
852 
853 	if (bioq_first(bioq) == NULL)
854 		return (NULL);
855 
856 	req = vtblk_request_dequeue(sc);
857 	if (req == NULL)
858 		return (NULL);
859 
860 	bp = bioq_takefirst(bioq);
861 	req->vbr_bp = bp;
862 	req->vbr_ack = -1;
863 	req->vbr_hdr.ioprio = 1;
864 
865 	switch (bp->bio_cmd) {
866 	case BIO_FLUSH:
867 		req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
868 		break;
869 	case BIO_READ:
870 		req->vbr_hdr.type = VIRTIO_BLK_T_IN;
871 		req->vbr_hdr.sector = bp->bio_offset / 512;
872 		break;
873 	case BIO_WRITE:
874 		req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
875 		req->vbr_hdr.sector = bp->bio_offset / 512;
876 		break;
877 	default:
878 		panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
879 	}
880 
881 	if (bp->bio_flags & BIO_ORDERED)
882 		req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
883 
884 	return (req);
885 }
886 
887 static int
888 vtblk_request_execute(struct vtblk_softc *sc, struct vtblk_request *req)
889 {
890 	struct virtqueue *vq;
891 	struct sglist *sg;
892 	struct bio *bp;
893 	int ordered, readable, writable, error;
894 
895 	vq = sc->vtblk_vq;
896 	sg = sc->vtblk_sglist;
897 	bp = req->vbr_bp;
898 	ordered = 0;
899 	writable = 0;
900 
901 	/*
902 	 * Some hosts (such as bhyve) do not implement the barrier feature,
903 	 * so we emulate it in the driver by allowing the barrier request
904 	 * to be the only one in flight.
905 	 */
906 	if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
907 		if (sc->vtblk_req_ordered != NULL)
908 			return (EBUSY);
909 		if (bp->bio_flags & BIO_ORDERED) {
910 			if (!virtqueue_empty(vq))
911 				return (EBUSY);
912 			ordered = 1;
913 			req->vbr_hdr.type &= ~VIRTIO_BLK_T_BARRIER;
914 		}
915 	}
916 
917 	sglist_reset(sg);
918 	sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
919 
920 	if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
921 		error = sglist_append_bio(sg, bp);
922 		if (error || sg->sg_nseg == sg->sg_maxseg) {
923 			panic("%s: bio %p data buffer too big %d",
924 			    __func__, bp, error);
925 		}
926 
927 		/* BIO_READ means the host writes into our buffer. */
928 		if (bp->bio_cmd == BIO_READ)
929 			writable = sg->sg_nseg - 1;
930 	}
931 
932 	writable++;
933 	sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
934 	readable = sg->sg_nseg - writable;
935 
936 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
937 	if (error == 0 && ordered)
938 		sc->vtblk_req_ordered = req;
939 
940 	return (error);
941 }
942 
943 static int
944 vtblk_request_error(struct vtblk_request *req)
945 {
946 	int error;
947 
948 	switch (req->vbr_ack) {
949 	case VIRTIO_BLK_S_OK:
950 		error = 0;
951 		break;
952 	case VIRTIO_BLK_S_UNSUPP:
953 		error = ENOTSUP;
954 		break;
955 	default:
956 		error = EIO;
957 		break;
958 	}
959 
960 	return (error);
961 }
962 
963 static void
964 vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
965 {
966 	struct vtblk_request *req;
967 	struct bio *bp;
968 
969 	while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
970 		if (sc->vtblk_req_ordered != NULL) {
971 			MPASS(sc->vtblk_req_ordered == req);
972 			sc->vtblk_req_ordered = NULL;
973 		}
974 
975 		bp = req->vbr_bp;
976 		bp->bio_error = vtblk_request_error(req);
977 		TAILQ_INSERT_TAIL(queue, bp, bio_queue);
978 
979 		vtblk_request_enqueue(sc, req);
980 	}
981 }
982 
983 static void
984 vtblk_done_completed(struct vtblk_softc *sc, struct bio_queue *queue)
985 {
986 	struct bio *bp, *tmp;
987 
988 	TAILQ_FOREACH_SAFE(bp, queue, bio_queue, tmp) {
989 		if (bp->bio_error != 0)
990 			disk_err(bp, "hard error", -1, 1);
991 		vtblk_bio_done(sc, bp, bp->bio_error);
992 	}
993 }
994 
995 static void
996 vtblk_drain_vq(struct vtblk_softc *sc)
997 {
998 	struct virtqueue *vq;
999 	struct vtblk_request *req;
1000 	int last;
1001 
1002 	vq = sc->vtblk_vq;
1003 	last = 0;
1004 
1005 	while ((req = virtqueue_drain(vq, &last)) != NULL) {
1006 		vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1007 		vtblk_request_enqueue(sc, req);
1008 	}
1009 
1010 	sc->vtblk_req_ordered = NULL;
1011 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1012 }
1013 
1014 static void
1015 vtblk_drain(struct vtblk_softc *sc)
1016 {
1017 	struct bio_queue queue;
1018 	struct bio_queue_head *bioq;
1019 	struct vtblk_request *req;
1020 	struct bio *bp;
1021 
1022 	bioq = &sc->vtblk_bioq;
1023 	TAILQ_INIT(&queue);
1024 
1025 	if (sc->vtblk_vq != NULL) {
1026 		vtblk_queue_completed(sc, &queue);
1027 		vtblk_done_completed(sc, &queue);
1028 
1029 		vtblk_drain_vq(sc);
1030 	}
1031 
1032 	while ((req = vtblk_request_next_ready(sc)) != NULL) {
1033 		vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1034 		vtblk_request_enqueue(sc, req);
1035 	}
1036 
1037 	while (bioq_first(bioq) != NULL) {
1038 		bp = bioq_takefirst(bioq);
1039 		vtblk_bio_done(sc, bp, ENXIO);
1040 	}
1041 
1042 	vtblk_request_free(sc);
1043 }
1044 
1045 static void
1046 vtblk_startio(struct vtblk_softc *sc)
1047 {
1048 	struct virtqueue *vq;
1049 	struct vtblk_request *req;
1050 	int enq;
1051 
1052 	VTBLK_LOCK_ASSERT(sc);
1053 	vq = sc->vtblk_vq;
1054 	enq = 0;
1055 
1056 	if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1057 		return;
1058 
1059 	while (!virtqueue_full(vq)) {
1060 		req = vtblk_request_next(sc);
1061 		if (req == NULL)
1062 			break;
1063 
1064 		if (vtblk_request_execute(sc, req) != 0) {
1065 			vtblk_request_requeue_ready(sc, req);
1066 			break;
1067 		}
1068 
1069 		enq++;
1070 	}
1071 
1072 	if (enq > 0)
1073 		virtqueue_notify(vq);
1074 }
1075 
1076 static void
1077 vtblk_bio_done(struct vtblk_softc *sc, struct bio *bp, int error)
1078 {
1079 
1080 	/* Because of GEOM direct dispatch, we cannot hold any locks. */
1081 	if (sc != NULL)
1082 		VTBLK_LOCK_ASSERT_NOTOWNED(sc);
1083 
1084 	if (error) {
1085 		bp->bio_resid = bp->bio_bcount;
1086 		bp->bio_error = error;
1087 		bp->bio_flags |= BIO_ERROR;
1088 	}
1089 
1090 	biodone(bp);
1091 }
1092 
1093 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg)			\
1094 	if (virtio_with_feature(_dev, _feature)) {			\
1095 		virtio_read_device_config(_dev,				\
1096 		    offsetof(struct virtio_blk_config, _field),		\
1097 		    &(_cfg)->_field, sizeof((_cfg)->_field));		\
1098 	}
1099 
1100 static void
1101 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
1102 {
1103 	device_t dev;
1104 
1105 	dev = sc->vtblk_dev;
1106 
1107 	bzero(blkcfg, sizeof(struct virtio_blk_config));
1108 
1109 	/* The capacity is always available. */
1110 	virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
1111 	    capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
1112 
1113 	/* Read the configuration if the feature was negotiated. */
1114 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1115 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1116 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1117 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1118 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1119 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
1120 }
1121 
1122 #undef VTBLK_GET_CONFIG
1123 
1124 static void
1125 vtblk_ident(struct vtblk_softc *sc)
1126 {
1127 	struct bio buf;
1128 	struct disk *dp;
1129 	struct vtblk_request *req;
1130 	int len, error;
1131 
1132 	dp = sc->vtblk_disk;
1133 	len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
1134 
1135 	if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
1136 		return;
1137 
1138 	req = vtblk_request_dequeue(sc);
1139 	if (req == NULL)
1140 		return;
1141 
1142 	req->vbr_ack = -1;
1143 	req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1144 	req->vbr_hdr.ioprio = 1;
1145 	req->vbr_hdr.sector = 0;
1146 
1147 	req->vbr_bp = &buf;
1148 	bzero(&buf, sizeof(struct bio));
1149 
1150 	buf.bio_cmd = BIO_READ;
1151 	buf.bio_data = dp->d_ident;
1152 	buf.bio_bcount = len;
1153 
1154 	VTBLK_LOCK(sc);
1155 	error = vtblk_poll_request(sc, req);
1156 	VTBLK_UNLOCK(sc);
1157 
1158 	vtblk_request_enqueue(sc, req);
1159 
1160 	if (error) {
1161 		device_printf(sc->vtblk_dev,
1162 		    "error getting device identifier: %d\n", error);
1163 	}
1164 }
1165 
1166 static int
1167 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
1168 {
1169 	struct virtqueue *vq;
1170 	int error;
1171 
1172 	vq = sc->vtblk_vq;
1173 
1174 	if (!virtqueue_empty(vq))
1175 		return (EBUSY);
1176 
1177 	error = vtblk_request_execute(sc, req);
1178 	if (error)
1179 		return (error);
1180 
1181 	virtqueue_notify(vq);
1182 	virtqueue_poll(vq, NULL);
1183 
1184 	error = vtblk_request_error(req);
1185 	if (error && bootverbose) {
1186 		device_printf(sc->vtblk_dev,
1187 		    "%s: IO error: %d\n", __func__, error);
1188 	}
1189 
1190 	return (error);
1191 }
1192 
1193 static int
1194 vtblk_quiesce(struct vtblk_softc *sc)
1195 {
1196 	int error;
1197 
1198 	VTBLK_LOCK_ASSERT(sc);
1199 	error = 0;
1200 
1201 	while (!virtqueue_empty(sc->vtblk_vq)) {
1202 		if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
1203 		    VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
1204 			error = EBUSY;
1205 			break;
1206 		}
1207 	}
1208 
1209 	return (error);
1210 }
1211 
1212 static void
1213 vtblk_vq_intr(void *xsc)
1214 {
1215 	struct vtblk_softc *sc;
1216 	struct virtqueue *vq;
1217 	struct bio_queue queue;
1218 
1219 	sc = xsc;
1220 	vq = sc->vtblk_vq;
1221 	TAILQ_INIT(&queue);
1222 
1223 	VTBLK_LOCK(sc);
1224 
1225 again:
1226 	if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
1227 		goto out;
1228 
1229 	vtblk_queue_completed(sc, &queue);
1230 	vtblk_startio(sc);
1231 
1232 	if (virtqueue_enable_intr(vq) != 0) {
1233 		virtqueue_disable_intr(vq);
1234 		goto again;
1235 	}
1236 
1237 	if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1238 		wakeup(&sc->vtblk_vq);
1239 
1240 out:
1241 	VTBLK_UNLOCK(sc);
1242 	vtblk_done_completed(sc, &queue);
1243 }
1244 
1245 static void
1246 vtblk_stop(struct vtblk_softc *sc)
1247 {
1248 
1249 	virtqueue_disable_intr(sc->vtblk_vq);
1250 	virtio_stop(sc->vtblk_dev);
1251 }
1252 
1253 static void
1254 vtblk_dump_quiesce(struct vtblk_softc *sc)
1255 {
1256 
1257 	/*
1258 	 * Spin here until all the requests in-flight at the time of the
1259 	 * dump are completed and queued. The queued requests will be
1260 	 * biodone'd once the dump is finished.
1261 	 */
1262 	while (!virtqueue_empty(sc->vtblk_vq))
1263 		vtblk_queue_completed(sc, &sc->vtblk_dump_queue);
1264 }
1265 
1266 static int
1267 vtblk_dump_write(struct vtblk_softc *sc, void *virtual, off_t offset,
1268     size_t length)
1269 {
1270 	struct bio buf;
1271 	struct vtblk_request *req;
1272 
1273 	req = &sc->vtblk_dump_request;
1274 	req->vbr_ack = -1;
1275 	req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1276 	req->vbr_hdr.ioprio = 1;
1277 	req->vbr_hdr.sector = offset / 512;
1278 
1279 	req->vbr_bp = &buf;
1280 	bzero(&buf, sizeof(struct bio));
1281 
1282 	buf.bio_cmd = BIO_WRITE;
1283 	buf.bio_data = virtual;
1284 	buf.bio_bcount = length;
1285 
1286 	return (vtblk_poll_request(sc, req));
1287 }
1288 
1289 static int
1290 vtblk_dump_flush(struct vtblk_softc *sc)
1291 {
1292 	struct bio buf;
1293 	struct vtblk_request *req;
1294 
1295 	req = &sc->vtblk_dump_request;
1296 	req->vbr_ack = -1;
1297 	req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1298 	req->vbr_hdr.ioprio = 1;
1299 	req->vbr_hdr.sector = 0;
1300 
1301 	req->vbr_bp = &buf;
1302 	bzero(&buf, sizeof(struct bio));
1303 
1304 	buf.bio_cmd = BIO_FLUSH;
1305 
1306 	return (vtblk_poll_request(sc, req));
1307 }
1308 
1309 static void
1310 vtblk_dump_complete(struct vtblk_softc *sc)
1311 {
1312 
1313 	vtblk_dump_flush(sc);
1314 
1315 	VTBLK_UNLOCK(sc);
1316 	vtblk_done_completed(sc, &sc->vtblk_dump_queue);
1317 	VTBLK_LOCK(sc);
1318 }
1319 
1320 static void
1321 vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
1322 {
1323 
1324 	/* Set either writeback (1) or writethrough (0) mode. */
1325 	virtio_write_dev_config_1(sc->vtblk_dev,
1326 	    offsetof(struct virtio_blk_config, writeback), wc);
1327 }
1328 
1329 static int
1330 vtblk_write_cache_enabled(struct vtblk_softc *sc,
1331     struct virtio_blk_config *blkcfg)
1332 {
1333 	int wc;
1334 
1335 	if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
1336 		wc = vtblk_tunable_int(sc, "writecache_mode",
1337 		    vtblk_writecache_mode);
1338 		if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1339 			vtblk_set_write_cache(sc, wc);
1340 		else
1341 			wc = blkcfg->writeback;
1342 	} else
1343 		wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
1344 
1345 	return (wc);
1346 }
1347 
1348 static int
1349 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
1350 {
1351 	struct vtblk_softc *sc;
1352 	int wc, error;
1353 
1354 	sc = oidp->oid_arg1;
1355 	wc = sc->vtblk_write_cache;
1356 
1357 	error = sysctl_handle_int(oidp, &wc, 0, req);
1358 	if (error || req->newptr == NULL)
1359 		return (error);
1360 	if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
1361 		return (EPERM);
1362 	if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1363 		return (EINVAL);
1364 
1365 	VTBLK_LOCK(sc);
1366 	sc->vtblk_write_cache = wc;
1367 	vtblk_set_write_cache(sc, sc->vtblk_write_cache);
1368 	VTBLK_UNLOCK(sc);
1369 
1370 	return (0);
1371 }
1372 
1373 static void
1374 vtblk_setup_sysctl(struct vtblk_softc *sc)
1375 {
1376 	device_t dev;
1377 	struct sysctl_ctx_list *ctx;
1378 	struct sysctl_oid *tree;
1379 	struct sysctl_oid_list *child;
1380 
1381 	dev = sc->vtblk_dev;
1382 	ctx = device_get_sysctl_ctx(dev);
1383 	tree = device_get_sysctl_tree(dev);
1384 	child = SYSCTL_CHILDREN(tree);
1385 
1386 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
1387 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, vtblk_write_cache_sysctl,
1388 	    "I", "Write cache mode (writethrough (0) or writeback (1))");
1389 }
1390 
1391 static int
1392 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
1393 {
1394 	char path[64];
1395 
1396 	snprintf(path, sizeof(path),
1397 	    "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
1398 	TUNABLE_INT_FETCH(path, &def);
1399 
1400 	return (def);
1401 }
1402