xref: /freebsd/sys/dev/virtio/block/virtio_blk.c (revision 7d536dc855c85c15bf45f033d108a61b1f3cecc3)
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /* Driver for VirtIO block devices. */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/bio.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/sglist.h>
39 #include <sys/sysctl.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/queue.h>
43 
44 #include <geom/geom.h>
45 #include <geom/geom_disk.h>
46 
47 #include <machine/bus.h>
48 #include <machine/resource.h>
49 #include <sys/bus.h>
50 #include <sys/rman.h>
51 
52 #include <dev/virtio/virtio.h>
53 #include <dev/virtio/virtqueue.h>
54 #include <dev/virtio/block/virtio_blk.h>
55 
56 #include "virtio_if.h"
57 
58 struct vtblk_request {
59 	struct virtio_blk_outhdr	 vbr_hdr;
60 	struct bio			*vbr_bp;
61 	uint8_t				 vbr_ack;
62 	TAILQ_ENTRY(vtblk_request)	 vbr_link;
63 };
64 
65 enum vtblk_cache_mode {
66 	VTBLK_CACHE_WRITETHROUGH,
67 	VTBLK_CACHE_WRITEBACK,
68 	VTBLK_CACHE_MAX
69 };
70 
71 struct vtblk_softc {
72 	device_t		 vtblk_dev;
73 	struct mtx		 vtblk_mtx;
74 	uint64_t		 vtblk_features;
75 	uint32_t		 vtblk_flags;
76 #define VTBLK_FLAG_INDIRECT	0x0001
77 #define VTBLK_FLAG_READONLY	0x0002
78 #define VTBLK_FLAG_DETACH	0x0004
79 #define VTBLK_FLAG_SUSPEND	0x0008
80 #define VTBLK_FLAG_BARRIER	0x0010
81 #define VTBLK_FLAG_WC_CONFIG	0x0020
82 
83 	struct virtqueue	*vtblk_vq;
84 	struct sglist		*vtblk_sglist;
85 	struct disk		*vtblk_disk;
86 
87 	struct bio_queue_head	 vtblk_bioq;
88 	TAILQ_HEAD(, vtblk_request)
89 				 vtblk_req_free;
90 	TAILQ_HEAD(, vtblk_request)
91 				 vtblk_req_ready;
92 	struct vtblk_request	*vtblk_req_ordered;
93 
94 	int			 vtblk_max_nsegs;
95 	int			 vtblk_request_count;
96 	enum vtblk_cache_mode	 vtblk_write_cache;
97 
98 	struct bio_queue	 vtblk_dump_queue;
99 	struct vtblk_request	 vtblk_dump_request;
100 };
101 
102 static struct virtio_feature_desc vtblk_feature_desc[] = {
103 	{ VIRTIO_BLK_F_BARRIER,		"HostBarrier"	},
104 	{ VIRTIO_BLK_F_SIZE_MAX,	"MaxSegSize"	},
105 	{ VIRTIO_BLK_F_SEG_MAX,		"MaxNumSegs"	},
106 	{ VIRTIO_BLK_F_GEOMETRY,	"DiskGeometry"	},
107 	{ VIRTIO_BLK_F_RO,		"ReadOnly"	},
108 	{ VIRTIO_BLK_F_BLK_SIZE,	"BlockSize"	},
109 	{ VIRTIO_BLK_F_SCSI,		"SCSICmds"	},
110 	{ VIRTIO_BLK_F_WCE,		"WriteCache"	},
111 	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology"	},
112 	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE"	},
113 
114 	{ 0, NULL }
115 };
116 
117 static int	vtblk_modevent(module_t, int, void *);
118 
119 static int	vtblk_probe(device_t);
120 static int	vtblk_attach(device_t);
121 static int	vtblk_detach(device_t);
122 static int	vtblk_suspend(device_t);
123 static int	vtblk_resume(device_t);
124 static int	vtblk_shutdown(device_t);
125 static int	vtblk_config_change(device_t);
126 
127 static int	vtblk_open(struct disk *);
128 static int	vtblk_close(struct disk *);
129 static int	vtblk_ioctl(struct disk *, u_long, void *, int,
130 		    struct thread *);
131 static int	vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
132 static void	vtblk_strategy(struct bio *);
133 
134 static void	vtblk_negotiate_features(struct vtblk_softc *);
135 static void	vtblk_setup_features(struct vtblk_softc *);
136 static int	vtblk_maximum_segments(struct vtblk_softc *,
137 		    struct virtio_blk_config *);
138 static int	vtblk_alloc_virtqueue(struct vtblk_softc *);
139 static void	vtblk_resize_disk(struct vtblk_softc *, uint64_t);
140 static void	vtblk_alloc_disk(struct vtblk_softc *,
141 		    struct virtio_blk_config *);
142 static void	vtblk_create_disk(struct vtblk_softc *);
143 
144 static int	vtblk_request_prealloc(struct vtblk_softc *);
145 static void	vtblk_request_free(struct vtblk_softc *);
146 static struct vtblk_request *
147 		vtblk_request_dequeue(struct vtblk_softc *);
148 static void	vtblk_request_enqueue(struct vtblk_softc *,
149 		    struct vtblk_request *);
150 static struct vtblk_request *
151 		vtblk_request_next_ready(struct vtblk_softc *);
152 static void	vtblk_request_requeue_ready(struct vtblk_softc *,
153 		    struct vtblk_request *);
154 static struct vtblk_request *
155 		vtblk_request_next(struct vtblk_softc *);
156 static struct vtblk_request *
157 		vtblk_request_bio(struct vtblk_softc *);
158 static int	vtblk_request_execute(struct vtblk_softc *,
159 		    struct vtblk_request *);
160 static int	vtblk_request_error(struct vtblk_request *);
161 
162 static void	vtblk_queue_completed(struct vtblk_softc *,
163 		    struct bio_queue *);
164 static void	vtblk_done_completed(struct vtblk_softc *,
165 		    struct bio_queue *);
166 static void	vtblk_drain_vq(struct vtblk_softc *);
167 static void	vtblk_drain(struct vtblk_softc *);
168 
169 static void	vtblk_startio(struct vtblk_softc *);
170 static void	vtblk_bio_done(struct vtblk_softc *, struct bio *, int);
171 
172 static void	vtblk_read_config(struct vtblk_softc *,
173 		    struct virtio_blk_config *);
174 static void	vtblk_ident(struct vtblk_softc *);
175 static int	vtblk_poll_request(struct vtblk_softc *,
176 		    struct vtblk_request *);
177 static int	vtblk_quiesce(struct vtblk_softc *);
178 static void	vtblk_vq_intr(void *);
179 static void	vtblk_stop(struct vtblk_softc *);
180 
181 static void	vtblk_dump_quiesce(struct vtblk_softc *);
182 static int	vtblk_dump_write(struct vtblk_softc *, void *, off_t, size_t);
183 static int	vtblk_dump_flush(struct vtblk_softc *);
184 static void	vtblk_dump_complete(struct vtblk_softc *);
185 
186 static void	vtblk_set_write_cache(struct vtblk_softc *, int);
187 static int	vtblk_write_cache_enabled(struct vtblk_softc *sc,
188 		    struct virtio_blk_config *);
189 static int	vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
190 
191 static void	vtblk_setup_sysctl(struct vtblk_softc *);
192 static int	vtblk_tunable_int(struct vtblk_softc *, const char *, int);
193 
194 /* Tunables. */
195 static int vtblk_no_ident = 0;
196 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
197 static int vtblk_writecache_mode = -1;
198 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
199 
200 /* Features desired/implemented by this driver. */
201 #define VTBLK_FEATURES \
202     (VIRTIO_BLK_F_BARRIER		| \
203      VIRTIO_BLK_F_SIZE_MAX		| \
204      VIRTIO_BLK_F_SEG_MAX		| \
205      VIRTIO_BLK_F_GEOMETRY		| \
206      VIRTIO_BLK_F_RO			| \
207      VIRTIO_BLK_F_BLK_SIZE		| \
208      VIRTIO_BLK_F_WCE			| \
209      VIRTIO_BLK_F_TOPOLOGY		| \
210      VIRTIO_BLK_F_CONFIG_WCE		| \
211      VIRTIO_RING_F_INDIRECT_DESC)
212 
213 #define VTBLK_MTX(_sc)		&(_sc)->vtblk_mtx
214 #define VTBLK_LOCK_INIT(_sc, _name) \
215 				mtx_init(VTBLK_MTX((_sc)), (_name), \
216 				    "VirtIO Block Lock", MTX_DEF)
217 #define VTBLK_LOCK(_sc)		mtx_lock(VTBLK_MTX((_sc)))
218 #define VTBLK_UNLOCK(_sc)	mtx_unlock(VTBLK_MTX((_sc)))
219 #define VTBLK_LOCK_DESTROY(_sc)	mtx_destroy(VTBLK_MTX((_sc)))
220 #define VTBLK_LOCK_ASSERT(_sc)	mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
221 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
222 				mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
223 
224 #define VTBLK_DISK_NAME		"vtbd"
225 #define VTBLK_QUIESCE_TIMEOUT	(30 * hz)
226 
227 /*
228  * Each block request uses at least two segments - one for the header
229  * and one for the status.
230  */
231 #define VTBLK_MIN_SEGMENTS	2
232 
233 static device_method_t vtblk_methods[] = {
234 	/* Device methods. */
235 	DEVMETHOD(device_probe,		vtblk_probe),
236 	DEVMETHOD(device_attach,	vtblk_attach),
237 	DEVMETHOD(device_detach,	vtblk_detach),
238 	DEVMETHOD(device_suspend,	vtblk_suspend),
239 	DEVMETHOD(device_resume,	vtblk_resume),
240 	DEVMETHOD(device_shutdown,	vtblk_shutdown),
241 
242 	/* VirtIO methods. */
243 	DEVMETHOD(virtio_config_change,	vtblk_config_change),
244 
245 	DEVMETHOD_END
246 };
247 
248 static driver_t vtblk_driver = {
249 	"vtblk",
250 	vtblk_methods,
251 	sizeof(struct vtblk_softc)
252 };
253 static devclass_t vtblk_devclass;
254 
255 DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
256     vtblk_modevent, 0);
257 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
258     vtblk_modevent, 0);
259 MODULE_VERSION(virtio_blk, 1);
260 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
261 
262 static int
263 vtblk_modevent(module_t mod, int type, void *unused)
264 {
265 	int error;
266 
267 	error = 0;
268 
269 	switch (type) {
270 	case MOD_LOAD:
271 	case MOD_QUIESCE:
272 	case MOD_UNLOAD:
273 	case MOD_SHUTDOWN:
274 		break;
275 	default:
276 		error = EOPNOTSUPP;
277 		break;
278 	}
279 
280 	return (error);
281 }
282 
283 static int
284 vtblk_probe(device_t dev)
285 {
286 
287 	if (virtio_get_device_type(dev) != VIRTIO_ID_BLOCK)
288 		return (ENXIO);
289 
290 	device_set_desc(dev, "VirtIO Block Adapter");
291 
292 	return (BUS_PROBE_DEFAULT);
293 }
294 
295 static int
296 vtblk_attach(device_t dev)
297 {
298 	struct vtblk_softc *sc;
299 	struct virtio_blk_config blkcfg;
300 	int error;
301 
302 	virtio_set_feature_desc(dev, vtblk_feature_desc);
303 
304 	sc = device_get_softc(dev);
305 	sc->vtblk_dev = dev;
306 	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
307 	bioq_init(&sc->vtblk_bioq);
308 	TAILQ_INIT(&sc->vtblk_dump_queue);
309 	TAILQ_INIT(&sc->vtblk_req_free);
310 	TAILQ_INIT(&sc->vtblk_req_ready);
311 
312 	vtblk_setup_sysctl(sc);
313 	vtblk_setup_features(sc);
314 
315 	vtblk_read_config(sc, &blkcfg);
316 
317 	/*
318 	 * With the current sglist(9) implementation, it is not easy
319 	 * for us to support a maximum segment size as adjacent
320 	 * segments are coalesced. For now, just make sure it's larger
321 	 * than the maximum supported transfer size.
322 	 */
323 	if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
324 		if (blkcfg.size_max < MAXPHYS) {
325 			error = ENOTSUP;
326 			device_printf(dev, "host requires unsupported "
327 			    "maximum segment size feature\n");
328 			goto fail;
329 		}
330 	}
331 
332 	sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
333 	if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
334 		error = EINVAL;
335 		device_printf(dev, "fewer than minimum number of segments "
336 		    "allowed: %d\n", sc->vtblk_max_nsegs);
337 		goto fail;
338 	}
339 
340 	sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
341 	if (sc->vtblk_sglist == NULL) {
342 		error = ENOMEM;
343 		device_printf(dev, "cannot allocate sglist\n");
344 		goto fail;
345 	}
346 
347 	error = vtblk_alloc_virtqueue(sc);
348 	if (error) {
349 		device_printf(dev, "cannot allocate virtqueue\n");
350 		goto fail;
351 	}
352 
353 	error = vtblk_request_prealloc(sc);
354 	if (error) {
355 		device_printf(dev, "cannot preallocate requests\n");
356 		goto fail;
357 	}
358 
359 	vtblk_alloc_disk(sc, &blkcfg);
360 
361 	error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
362 	if (error) {
363 		device_printf(dev, "cannot setup virtqueue interrupt\n");
364 		goto fail;
365 	}
366 
367 	vtblk_create_disk(sc);
368 
369 	virtqueue_enable_intr(sc->vtblk_vq);
370 
371 fail:
372 	if (error)
373 		vtblk_detach(dev);
374 
375 	return (error);
376 }
377 
378 static int
379 vtblk_detach(device_t dev)
380 {
381 	struct vtblk_softc *sc;
382 
383 	sc = device_get_softc(dev);
384 
385 	VTBLK_LOCK(sc);
386 	sc->vtblk_flags |= VTBLK_FLAG_DETACH;
387 	if (device_is_attached(dev))
388 		vtblk_stop(sc);
389 	VTBLK_UNLOCK(sc);
390 
391 	vtblk_drain(sc);
392 
393 	if (sc->vtblk_disk != NULL) {
394 		disk_destroy(sc->vtblk_disk);
395 		sc->vtblk_disk = NULL;
396 	}
397 
398 	if (sc->vtblk_sglist != NULL) {
399 		sglist_free(sc->vtblk_sglist);
400 		sc->vtblk_sglist = NULL;
401 	}
402 
403 	VTBLK_LOCK_DESTROY(sc);
404 
405 	return (0);
406 }
407 
408 static int
409 vtblk_suspend(device_t dev)
410 {
411 	struct vtblk_softc *sc;
412 	int error;
413 
414 	sc = device_get_softc(dev);
415 
416 	VTBLK_LOCK(sc);
417 	sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
418 	/* XXX BMV: virtio_stop(), etc needed here? */
419 	error = vtblk_quiesce(sc);
420 	if (error)
421 		sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
422 	VTBLK_UNLOCK(sc);
423 
424 	return (error);
425 }
426 
427 static int
428 vtblk_resume(device_t dev)
429 {
430 	struct vtblk_softc *sc;
431 
432 	sc = device_get_softc(dev);
433 
434 	VTBLK_LOCK(sc);
435 	/* XXX BMV: virtio_reinit(), etc needed here? */
436 	sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
437 	vtblk_startio(sc);
438 	VTBLK_UNLOCK(sc);
439 
440 	return (0);
441 }
442 
443 static int
444 vtblk_shutdown(device_t dev)
445 {
446 
447 	return (0);
448 }
449 
450 static int
451 vtblk_config_change(device_t dev)
452 {
453 	struct vtblk_softc *sc;
454 	struct virtio_blk_config blkcfg;
455 	uint64_t capacity;
456 
457 	sc = device_get_softc(dev);
458 
459 	vtblk_read_config(sc, &blkcfg);
460 
461 	/* Capacity is always in 512-byte units. */
462 	capacity = blkcfg.capacity * 512;
463 
464 	if (sc->vtblk_disk->d_mediasize != capacity)
465 		vtblk_resize_disk(sc, capacity);
466 
467 	return (0);
468 }
469 
470 static int
471 vtblk_open(struct disk *dp)
472 {
473 	struct vtblk_softc *sc;
474 
475 	if ((sc = dp->d_drv1) == NULL)
476 		return (ENXIO);
477 
478 	return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
479 }
480 
481 static int
482 vtblk_close(struct disk *dp)
483 {
484 	struct vtblk_softc *sc;
485 
486 	if ((sc = dp->d_drv1) == NULL)
487 		return (ENXIO);
488 
489 	return (0);
490 }
491 
492 static int
493 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
494     struct thread *td)
495 {
496 	struct vtblk_softc *sc;
497 
498 	if ((sc = dp->d_drv1) == NULL)
499 		return (ENXIO);
500 
501 	return (ENOTTY);
502 }
503 
504 static int
505 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
506     size_t length)
507 {
508 	struct disk *dp;
509 	struct vtblk_softc *sc;
510 	int error;
511 
512 	dp = arg;
513 	error = 0;
514 
515 	if ((sc = dp->d_drv1) == NULL)
516 		return (ENXIO);
517 
518 	VTBLK_LOCK(sc);
519 
520 	vtblk_dump_quiesce(sc);
521 
522 	if (length > 0)
523 		error = vtblk_dump_write(sc, virtual, offset, length);
524 	if (error || (virtual == NULL && offset == 0))
525 		vtblk_dump_complete(sc);
526 
527 	VTBLK_UNLOCK(sc);
528 
529 	return (error);
530 }
531 
532 static void
533 vtblk_strategy(struct bio *bp)
534 {
535 	struct vtblk_softc *sc;
536 
537 	if ((sc = bp->bio_disk->d_drv1) == NULL) {
538 		vtblk_bio_done(NULL, bp, EINVAL);
539 		return;
540 	}
541 
542 	/*
543 	 * Fail any write if RO. Unfortunately, there does not seem to
544 	 * be a better way to report our readonly'ness to GEOM above.
545 	 */
546 	if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
547 	    (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
548 		vtblk_bio_done(sc, bp, EROFS);
549 		return;
550 	}
551 
552 	VTBLK_LOCK(sc);
553 
554 	if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
555 		VTBLK_UNLOCK(sc);
556 		vtblk_bio_done(sc, bp, ENXIO);
557 		return;
558 	}
559 
560 	bioq_insert_tail(&sc->vtblk_bioq, bp);
561 	vtblk_startio(sc);
562 
563 	VTBLK_UNLOCK(sc);
564 }
565 
566 static void
567 vtblk_negotiate_features(struct vtblk_softc *sc)
568 {
569 	device_t dev;
570 	uint64_t features;
571 
572 	dev = sc->vtblk_dev;
573 	features = VTBLK_FEATURES;
574 
575 	sc->vtblk_features = virtio_negotiate_features(dev, features);
576 }
577 
578 static void
579 vtblk_setup_features(struct vtblk_softc *sc)
580 {
581 	device_t dev;
582 
583 	dev = sc->vtblk_dev;
584 
585 	vtblk_negotiate_features(sc);
586 
587 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
588 		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
589 	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
590 		sc->vtblk_flags |= VTBLK_FLAG_READONLY;
591 	if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
592 		sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
593 	if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
594 		sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
595 }
596 
597 static int
598 vtblk_maximum_segments(struct vtblk_softc *sc,
599     struct virtio_blk_config *blkcfg)
600 {
601 	device_t dev;
602 	int nsegs;
603 
604 	dev = sc->vtblk_dev;
605 	nsegs = VTBLK_MIN_SEGMENTS;
606 
607 	if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
608 		nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
609 		if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
610 			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
611 	} else
612 		nsegs += 1;
613 
614 	return (nsegs);
615 }
616 
617 static int
618 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
619 {
620 	device_t dev;
621 	struct vq_alloc_info vq_info;
622 
623 	dev = sc->vtblk_dev;
624 
625 	VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
626 	    vtblk_vq_intr, sc, &sc->vtblk_vq,
627 	    "%s request", device_get_nameunit(dev));
628 
629 	return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
630 }
631 
632 static void
633 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
634 {
635 	device_t dev;
636 	struct disk *dp;
637 	int error;
638 
639 	dev = sc->vtblk_dev;
640 	dp = sc->vtblk_disk;
641 
642 	dp->d_mediasize = new_capacity;
643 	if (bootverbose) {
644 		device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
645 		    (uintmax_t) dp->d_mediasize >> 20,
646 		    (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
647 		    dp->d_sectorsize);
648 	}
649 
650 	error = disk_resize(dp, M_NOWAIT);
651 	if (error) {
652 		device_printf(dev,
653 		    "disk_resize(9) failed, error: %d\n", error);
654 	}
655 }
656 
657 static void
658 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
659 {
660 	device_t dev;
661 	struct disk *dp;
662 
663 	dev = sc->vtblk_dev;
664 
665 	sc->vtblk_disk = dp = disk_alloc();
666 	dp->d_open = vtblk_open;
667 	dp->d_close = vtblk_close;
668 	dp->d_ioctl = vtblk_ioctl;
669 	dp->d_strategy = vtblk_strategy;
670 	dp->d_name = VTBLK_DISK_NAME;
671 	dp->d_unit = device_get_unit(dev);
672 	dp->d_drv1 = sc;
673 	dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO |
674 	    DISKFLAG_DIRECT_COMPLETION;
675 	dp->d_hba_vendor = virtio_get_vendor(dev);
676 	dp->d_hba_device = virtio_get_device(dev);
677 	dp->d_hba_subvendor = virtio_get_subvendor(dev);
678 	dp->d_hba_subdevice = virtio_get_subdevice(dev);
679 
680 	if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
681 		dp->d_dump = vtblk_dump;
682 
683 	/* Capacity is always in 512-byte units. */
684 	dp->d_mediasize = blkcfg->capacity * 512;
685 
686 	if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
687 		dp->d_sectorsize = blkcfg->blk_size;
688 	else
689 		dp->d_sectorsize = 512;
690 
691 	/*
692 	 * The VirtIO maximum I/O size is given in terms of segments.
693 	 * However, FreeBSD limits I/O size by logical buffer size, not
694 	 * by physically contiguous pages. Therefore, we have to assume
695 	 * no pages are contiguous. This may impose an artificially low
696 	 * maximum I/O size. But in practice, since QEMU advertises 128
697 	 * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
698 	 * which is typically greater than MAXPHYS. Eventually we should
699 	 * just advertise MAXPHYS and split buffers that are too big.
700 	 *
701 	 * Note we must subtract one additional segment in case of non
702 	 * page aligned buffers.
703 	 */
704 	dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
705 	    PAGE_SIZE;
706 	if (dp->d_maxsize < PAGE_SIZE)
707 		dp->d_maxsize = PAGE_SIZE; /* XXX */
708 
709 	if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
710 		dp->d_fwsectors = blkcfg->geometry.sectors;
711 		dp->d_fwheads = blkcfg->geometry.heads;
712 	}
713 
714 	if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY) &&
715 	    blkcfg->topology.physical_block_exp > 0) {
716 		dp->d_stripesize = dp->d_sectorsize *
717 		    (1 << blkcfg->topology.physical_block_exp);
718 		dp->d_stripeoffset = (dp->d_stripesize -
719 		    blkcfg->topology.alignment_offset * dp->d_sectorsize) %
720 		    dp->d_stripesize;
721 	}
722 
723 	if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
724 		sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
725 	else
726 		sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
727 }
728 
729 static void
730 vtblk_create_disk(struct vtblk_softc *sc)
731 {
732 	struct disk *dp;
733 
734 	dp = sc->vtblk_disk;
735 
736 	vtblk_ident(sc);
737 
738 	device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
739 	    (uintmax_t) dp->d_mediasize >> 20,
740 	    (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
741 	    dp->d_sectorsize);
742 
743 	disk_create(dp, DISK_VERSION);
744 }
745 
746 static int
747 vtblk_request_prealloc(struct vtblk_softc *sc)
748 {
749 	struct vtblk_request *req;
750 	int i, nreqs;
751 
752 	nreqs = virtqueue_size(sc->vtblk_vq);
753 
754 	/*
755 	 * Preallocate sufficient requests to keep the virtqueue full. Each
756 	 * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
757 	 * the number allocated when indirect descriptors are not available.
758 	 */
759 	if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
760 		nreqs /= VTBLK_MIN_SEGMENTS;
761 
762 	for (i = 0; i < nreqs; i++) {
763 		req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
764 		if (req == NULL)
765 			return (ENOMEM);
766 
767 		MPASS(sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr)) == 1);
768 		MPASS(sglist_count(&req->vbr_ack, sizeof(req->vbr_ack)) == 1);
769 
770 		sc->vtblk_request_count++;
771 		vtblk_request_enqueue(sc, req);
772 	}
773 
774 	return (0);
775 }
776 
777 static void
778 vtblk_request_free(struct vtblk_softc *sc)
779 {
780 	struct vtblk_request *req;
781 
782 	MPASS(TAILQ_EMPTY(&sc->vtblk_req_ready));
783 
784 	while ((req = vtblk_request_dequeue(sc)) != NULL) {
785 		sc->vtblk_request_count--;
786 		free(req, M_DEVBUF);
787 	}
788 
789 	KASSERT(sc->vtblk_request_count == 0,
790 	    ("%s: leaked %d requests", __func__, sc->vtblk_request_count));
791 }
792 
793 static struct vtblk_request *
794 vtblk_request_dequeue(struct vtblk_softc *sc)
795 {
796 	struct vtblk_request *req;
797 
798 	req = TAILQ_FIRST(&sc->vtblk_req_free);
799 	if (req != NULL) {
800 		TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
801 		bzero(req, sizeof(struct vtblk_request));
802 	}
803 
804 	return (req);
805 }
806 
807 static void
808 vtblk_request_enqueue(struct vtblk_softc *sc, struct vtblk_request *req)
809 {
810 
811 	TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
812 }
813 
814 static struct vtblk_request *
815 vtblk_request_next_ready(struct vtblk_softc *sc)
816 {
817 	struct vtblk_request *req;
818 
819 	req = TAILQ_FIRST(&sc->vtblk_req_ready);
820 	if (req != NULL)
821 		TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
822 
823 	return (req);
824 }
825 
826 static void
827 vtblk_request_requeue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
828 {
829 
830 	/* NOTE: Currently, there will be at most one request in the queue. */
831 	TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
832 }
833 
834 static struct vtblk_request *
835 vtblk_request_next(struct vtblk_softc *sc)
836 {
837 	struct vtblk_request *req;
838 
839 	req = vtblk_request_next_ready(sc);
840 	if (req != NULL)
841 		return (req);
842 
843 	return (vtblk_request_bio(sc));
844 }
845 
846 static struct vtblk_request *
847 vtblk_request_bio(struct vtblk_softc *sc)
848 {
849 	struct bio_queue_head *bioq;
850 	struct vtblk_request *req;
851 	struct bio *bp;
852 
853 	bioq = &sc->vtblk_bioq;
854 
855 	if (bioq_first(bioq) == NULL)
856 		return (NULL);
857 
858 	req = vtblk_request_dequeue(sc);
859 	if (req == NULL)
860 		return (NULL);
861 
862 	bp = bioq_takefirst(bioq);
863 	req->vbr_bp = bp;
864 	req->vbr_ack = -1;
865 	req->vbr_hdr.ioprio = 1;
866 
867 	switch (bp->bio_cmd) {
868 	case BIO_FLUSH:
869 		req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
870 		break;
871 	case BIO_READ:
872 		req->vbr_hdr.type = VIRTIO_BLK_T_IN;
873 		req->vbr_hdr.sector = bp->bio_offset / 512;
874 		break;
875 	case BIO_WRITE:
876 		req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
877 		req->vbr_hdr.sector = bp->bio_offset / 512;
878 		break;
879 	default:
880 		panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
881 	}
882 
883 	if (bp->bio_flags & BIO_ORDERED)
884 		req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
885 
886 	return (req);
887 }
888 
889 static int
890 vtblk_request_execute(struct vtblk_softc *sc, struct vtblk_request *req)
891 {
892 	struct virtqueue *vq;
893 	struct sglist *sg;
894 	struct bio *bp;
895 	int ordered, readable, writable, error;
896 
897 	vq = sc->vtblk_vq;
898 	sg = sc->vtblk_sglist;
899 	bp = req->vbr_bp;
900 	ordered = 0;
901 	writable = 0;
902 
903 	/*
904 	 * Some hosts (such as bhyve) do not implement the barrier feature,
905 	 * so we emulate it in the driver by allowing the barrier request
906 	 * to be the only one in flight.
907 	 */
908 	if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
909 		if (sc->vtblk_req_ordered != NULL)
910 			return (EBUSY);
911 		if (bp->bio_flags & BIO_ORDERED) {
912 			if (!virtqueue_empty(vq))
913 				return (EBUSY);
914 			ordered = 1;
915 			req->vbr_hdr.type &= ~VIRTIO_BLK_T_BARRIER;
916 		}
917 	}
918 
919 	sglist_reset(sg);
920 	sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
921 
922 	if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
923 		error = sglist_append_bio(sg, bp);
924 		if (error || sg->sg_nseg == sg->sg_maxseg) {
925 			panic("%s: bio %p data buffer too big %d",
926 			    __func__, bp, error);
927 		}
928 
929 		/* BIO_READ means the host writes into our buffer. */
930 		if (bp->bio_cmd == BIO_READ)
931 			writable = sg->sg_nseg - 1;
932 	}
933 
934 	writable++;
935 	sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
936 	readable = sg->sg_nseg - writable;
937 
938 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
939 	if (error == 0 && ordered)
940 		sc->vtblk_req_ordered = req;
941 
942 	return (error);
943 }
944 
945 static int
946 vtblk_request_error(struct vtblk_request *req)
947 {
948 	int error;
949 
950 	switch (req->vbr_ack) {
951 	case VIRTIO_BLK_S_OK:
952 		error = 0;
953 		break;
954 	case VIRTIO_BLK_S_UNSUPP:
955 		error = ENOTSUP;
956 		break;
957 	default:
958 		error = EIO;
959 		break;
960 	}
961 
962 	return (error);
963 }
964 
965 static void
966 vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
967 {
968 	struct vtblk_request *req;
969 	struct bio *bp;
970 
971 	while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
972 		if (sc->vtblk_req_ordered != NULL) {
973 			MPASS(sc->vtblk_req_ordered == req);
974 			sc->vtblk_req_ordered = NULL;
975 		}
976 
977 		bp = req->vbr_bp;
978 		bp->bio_error = vtblk_request_error(req);
979 		TAILQ_INSERT_TAIL(queue, bp, bio_queue);
980 
981 		vtblk_request_enqueue(sc, req);
982 	}
983 }
984 
985 static void
986 vtblk_done_completed(struct vtblk_softc *sc, struct bio_queue *queue)
987 {
988 	struct bio *bp, *tmp;
989 
990 	TAILQ_FOREACH_SAFE(bp, queue, bio_queue, tmp) {
991 		if (bp->bio_error != 0)
992 			disk_err(bp, "hard error", -1, 1);
993 		vtblk_bio_done(sc, bp, bp->bio_error);
994 	}
995 }
996 
997 static void
998 vtblk_drain_vq(struct vtblk_softc *sc)
999 {
1000 	struct virtqueue *vq;
1001 	struct vtblk_request *req;
1002 	int last;
1003 
1004 	vq = sc->vtblk_vq;
1005 	last = 0;
1006 
1007 	while ((req = virtqueue_drain(vq, &last)) != NULL) {
1008 		vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1009 		vtblk_request_enqueue(sc, req);
1010 	}
1011 
1012 	sc->vtblk_req_ordered = NULL;
1013 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1014 }
1015 
1016 static void
1017 vtblk_drain(struct vtblk_softc *sc)
1018 {
1019 	struct bio_queue queue;
1020 	struct bio_queue_head *bioq;
1021 	struct vtblk_request *req;
1022 	struct bio *bp;
1023 
1024 	bioq = &sc->vtblk_bioq;
1025 	TAILQ_INIT(&queue);
1026 
1027 	if (sc->vtblk_vq != NULL) {
1028 		vtblk_queue_completed(sc, &queue);
1029 		vtblk_done_completed(sc, &queue);
1030 
1031 		vtblk_drain_vq(sc);
1032 	}
1033 
1034 	while ((req = vtblk_request_next_ready(sc)) != NULL) {
1035 		vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1036 		vtblk_request_enqueue(sc, req);
1037 	}
1038 
1039 	while (bioq_first(bioq) != NULL) {
1040 		bp = bioq_takefirst(bioq);
1041 		vtblk_bio_done(sc, bp, ENXIO);
1042 	}
1043 
1044 	vtblk_request_free(sc);
1045 }
1046 
1047 static void
1048 vtblk_startio(struct vtblk_softc *sc)
1049 {
1050 	struct virtqueue *vq;
1051 	struct vtblk_request *req;
1052 	int enq;
1053 
1054 	VTBLK_LOCK_ASSERT(sc);
1055 	vq = sc->vtblk_vq;
1056 	enq = 0;
1057 
1058 	if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1059 		return;
1060 
1061 	while (!virtqueue_full(vq)) {
1062 		req = vtblk_request_next(sc);
1063 		if (req == NULL)
1064 			break;
1065 
1066 		if (vtblk_request_execute(sc, req) != 0) {
1067 			vtblk_request_requeue_ready(sc, req);
1068 			break;
1069 		}
1070 
1071 		enq++;
1072 	}
1073 
1074 	if (enq > 0)
1075 		virtqueue_notify(vq);
1076 }
1077 
1078 static void
1079 vtblk_bio_done(struct vtblk_softc *sc, struct bio *bp, int error)
1080 {
1081 
1082 	/* Because of GEOM direct dispatch, we cannot hold any locks. */
1083 	if (sc != NULL)
1084 		VTBLK_LOCK_ASSERT_NOTOWNED(sc);
1085 
1086 	if (error) {
1087 		bp->bio_resid = bp->bio_bcount;
1088 		bp->bio_error = error;
1089 		bp->bio_flags |= BIO_ERROR;
1090 	}
1091 
1092 	biodone(bp);
1093 }
1094 
1095 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg)			\
1096 	if (virtio_with_feature(_dev, _feature)) {			\
1097 		virtio_read_device_config(_dev,				\
1098 		    offsetof(struct virtio_blk_config, _field),		\
1099 		    &(_cfg)->_field, sizeof((_cfg)->_field));		\
1100 	}
1101 
1102 static void
1103 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
1104 {
1105 	device_t dev;
1106 
1107 	dev = sc->vtblk_dev;
1108 
1109 	bzero(blkcfg, sizeof(struct virtio_blk_config));
1110 
1111 	/* The capacity is always available. */
1112 	virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
1113 	    capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
1114 
1115 	/* Read the configuration if the feature was negotiated. */
1116 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1117 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1118 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1119 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1120 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1121 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
1122 }
1123 
1124 #undef VTBLK_GET_CONFIG
1125 
1126 static void
1127 vtblk_ident(struct vtblk_softc *sc)
1128 {
1129 	struct bio buf;
1130 	struct disk *dp;
1131 	struct vtblk_request *req;
1132 	int len, error;
1133 
1134 	dp = sc->vtblk_disk;
1135 	len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
1136 
1137 	if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
1138 		return;
1139 
1140 	req = vtblk_request_dequeue(sc);
1141 	if (req == NULL)
1142 		return;
1143 
1144 	req->vbr_ack = -1;
1145 	req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1146 	req->vbr_hdr.ioprio = 1;
1147 	req->vbr_hdr.sector = 0;
1148 
1149 	req->vbr_bp = &buf;
1150 	g_reset_bio(&buf);
1151 
1152 	buf.bio_cmd = BIO_READ;
1153 	buf.bio_data = dp->d_ident;
1154 	buf.bio_bcount = len;
1155 
1156 	VTBLK_LOCK(sc);
1157 	error = vtblk_poll_request(sc, req);
1158 	VTBLK_UNLOCK(sc);
1159 
1160 	vtblk_request_enqueue(sc, req);
1161 
1162 	if (error) {
1163 		device_printf(sc->vtblk_dev,
1164 		    "error getting device identifier: %d\n", error);
1165 	}
1166 }
1167 
1168 static int
1169 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
1170 {
1171 	struct virtqueue *vq;
1172 	int error;
1173 
1174 	vq = sc->vtblk_vq;
1175 
1176 	if (!virtqueue_empty(vq))
1177 		return (EBUSY);
1178 
1179 	error = vtblk_request_execute(sc, req);
1180 	if (error)
1181 		return (error);
1182 
1183 	virtqueue_notify(vq);
1184 	virtqueue_poll(vq, NULL);
1185 
1186 	error = vtblk_request_error(req);
1187 	if (error && bootverbose) {
1188 		device_printf(sc->vtblk_dev,
1189 		    "%s: IO error: %d\n", __func__, error);
1190 	}
1191 
1192 	return (error);
1193 }
1194 
1195 static int
1196 vtblk_quiesce(struct vtblk_softc *sc)
1197 {
1198 	int error;
1199 
1200 	VTBLK_LOCK_ASSERT(sc);
1201 	error = 0;
1202 
1203 	while (!virtqueue_empty(sc->vtblk_vq)) {
1204 		if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
1205 		    VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
1206 			error = EBUSY;
1207 			break;
1208 		}
1209 	}
1210 
1211 	return (error);
1212 }
1213 
1214 static void
1215 vtblk_vq_intr(void *xsc)
1216 {
1217 	struct vtblk_softc *sc;
1218 	struct virtqueue *vq;
1219 	struct bio_queue queue;
1220 
1221 	sc = xsc;
1222 	vq = sc->vtblk_vq;
1223 	TAILQ_INIT(&queue);
1224 
1225 	VTBLK_LOCK(sc);
1226 
1227 again:
1228 	if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
1229 		goto out;
1230 
1231 	vtblk_queue_completed(sc, &queue);
1232 	vtblk_startio(sc);
1233 
1234 	if (virtqueue_enable_intr(vq) != 0) {
1235 		virtqueue_disable_intr(vq);
1236 		goto again;
1237 	}
1238 
1239 	if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1240 		wakeup(&sc->vtblk_vq);
1241 
1242 out:
1243 	VTBLK_UNLOCK(sc);
1244 	vtblk_done_completed(sc, &queue);
1245 }
1246 
1247 static void
1248 vtblk_stop(struct vtblk_softc *sc)
1249 {
1250 
1251 	virtqueue_disable_intr(sc->vtblk_vq);
1252 	virtio_stop(sc->vtblk_dev);
1253 }
1254 
1255 static void
1256 vtblk_dump_quiesce(struct vtblk_softc *sc)
1257 {
1258 
1259 	/*
1260 	 * Spin here until all the requests in-flight at the time of the
1261 	 * dump are completed and queued. The queued requests will be
1262 	 * biodone'd once the dump is finished.
1263 	 */
1264 	while (!virtqueue_empty(sc->vtblk_vq))
1265 		vtblk_queue_completed(sc, &sc->vtblk_dump_queue);
1266 }
1267 
1268 static int
1269 vtblk_dump_write(struct vtblk_softc *sc, void *virtual, off_t offset,
1270     size_t length)
1271 {
1272 	struct bio buf;
1273 	struct vtblk_request *req;
1274 
1275 	req = &sc->vtblk_dump_request;
1276 	req->vbr_ack = -1;
1277 	req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1278 	req->vbr_hdr.ioprio = 1;
1279 	req->vbr_hdr.sector = offset / 512;
1280 
1281 	req->vbr_bp = &buf;
1282 	g_reset_bio(&buf);
1283 
1284 	buf.bio_cmd = BIO_WRITE;
1285 	buf.bio_data = virtual;
1286 	buf.bio_bcount = length;
1287 
1288 	return (vtblk_poll_request(sc, req));
1289 }
1290 
1291 static int
1292 vtblk_dump_flush(struct vtblk_softc *sc)
1293 {
1294 	struct bio buf;
1295 	struct vtblk_request *req;
1296 
1297 	req = &sc->vtblk_dump_request;
1298 	req->vbr_ack = -1;
1299 	req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1300 	req->vbr_hdr.ioprio = 1;
1301 	req->vbr_hdr.sector = 0;
1302 
1303 	req->vbr_bp = &buf;
1304 	g_reset_bio(&buf);
1305 
1306 	buf.bio_cmd = BIO_FLUSH;
1307 
1308 	return (vtblk_poll_request(sc, req));
1309 }
1310 
1311 static void
1312 vtblk_dump_complete(struct vtblk_softc *sc)
1313 {
1314 
1315 	vtblk_dump_flush(sc);
1316 
1317 	VTBLK_UNLOCK(sc);
1318 	vtblk_done_completed(sc, &sc->vtblk_dump_queue);
1319 	VTBLK_LOCK(sc);
1320 }
1321 
1322 static void
1323 vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
1324 {
1325 
1326 	/* Set either writeback (1) or writethrough (0) mode. */
1327 	virtio_write_dev_config_1(sc->vtblk_dev,
1328 	    offsetof(struct virtio_blk_config, writeback), wc);
1329 }
1330 
1331 static int
1332 vtblk_write_cache_enabled(struct vtblk_softc *sc,
1333     struct virtio_blk_config *blkcfg)
1334 {
1335 	int wc;
1336 
1337 	if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
1338 		wc = vtblk_tunable_int(sc, "writecache_mode",
1339 		    vtblk_writecache_mode);
1340 		if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1341 			vtblk_set_write_cache(sc, wc);
1342 		else
1343 			wc = blkcfg->writeback;
1344 	} else
1345 		wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
1346 
1347 	return (wc);
1348 }
1349 
1350 static int
1351 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
1352 {
1353 	struct vtblk_softc *sc;
1354 	int wc, error;
1355 
1356 	sc = oidp->oid_arg1;
1357 	wc = sc->vtblk_write_cache;
1358 
1359 	error = sysctl_handle_int(oidp, &wc, 0, req);
1360 	if (error || req->newptr == NULL)
1361 		return (error);
1362 	if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
1363 		return (EPERM);
1364 	if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1365 		return (EINVAL);
1366 
1367 	VTBLK_LOCK(sc);
1368 	sc->vtblk_write_cache = wc;
1369 	vtblk_set_write_cache(sc, sc->vtblk_write_cache);
1370 	VTBLK_UNLOCK(sc);
1371 
1372 	return (0);
1373 }
1374 
1375 static void
1376 vtblk_setup_sysctl(struct vtblk_softc *sc)
1377 {
1378 	device_t dev;
1379 	struct sysctl_ctx_list *ctx;
1380 	struct sysctl_oid *tree;
1381 	struct sysctl_oid_list *child;
1382 
1383 	dev = sc->vtblk_dev;
1384 	ctx = device_get_sysctl_ctx(dev);
1385 	tree = device_get_sysctl_tree(dev);
1386 	child = SYSCTL_CHILDREN(tree);
1387 
1388 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
1389 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, vtblk_write_cache_sysctl,
1390 	    "I", "Write cache mode (writethrough (0) or writeback (1))");
1391 }
1392 
1393 static int
1394 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
1395 {
1396 	char path[64];
1397 
1398 	snprintf(path, sizeof(path),
1399 	    "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
1400 	TUNABLE_INT_FETCH(path, &def);
1401 
1402 	return (def);
1403 }
1404