xref: /freebsd/sys/dev/virtio/block/virtio_blk.c (revision dcf58f92e2c19a32fc171f763698e711c719badc)
1 /*-
2  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 /* Driver for VirtIO block devices. */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/bio.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/sglist.h>
39 #include <sys/sysctl.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/queue.h>
43 
44 #include <geom/geom_disk.h>
45 
46 #include <machine/bus.h>
47 #include <machine/resource.h>
48 #include <sys/bus.h>
49 #include <sys/rman.h>
50 
51 #include <dev/virtio/virtio.h>
52 #include <dev/virtio/virtqueue.h>
53 #include <dev/virtio/block/virtio_blk.h>
54 
55 #include "virtio_if.h"
56 
57 struct vtblk_request {
58 	struct virtio_blk_outhdr	 vbr_hdr;
59 	struct bio			*vbr_bp;
60 	uint8_t				 vbr_ack;
61 
62 	TAILQ_ENTRY(vtblk_request)	 vbr_link;
63 };
64 
65 enum vtblk_cache_mode {
66 	VTBLK_CACHE_WRITETHROUGH,
67 	VTBLK_CACHE_WRITEBACK,
68 	VTBLK_CACHE_MAX
69 };
70 
71 struct vtblk_softc {
72 	device_t		 vtblk_dev;
73 	struct mtx		 vtblk_mtx;
74 	uint64_t		 vtblk_features;
75 	uint32_t		 vtblk_flags;
76 #define VTBLK_FLAG_INDIRECT	0x0001
77 #define VTBLK_FLAG_READONLY	0x0002
78 #define VTBLK_FLAG_DETACH	0x0004
79 #define VTBLK_FLAG_SUSPEND	0x0008
80 #define VTBLK_FLAG_DUMPING	0x0010
81 #define VTBLK_FLAG_BARRIER	0x0020
82 #define VTBLK_FLAG_WC_CONFIG	0x0040
83 
84 	struct virtqueue	*vtblk_vq;
85 	struct sglist		*vtblk_sglist;
86 	struct disk		*vtblk_disk;
87 
88 	struct bio_queue_head	 vtblk_bioq;
89 	TAILQ_HEAD(, vtblk_request)
90 				 vtblk_req_free;
91 	TAILQ_HEAD(, vtblk_request)
92 				 vtblk_req_ready;
93 	struct vtblk_request	*vtblk_req_ordered;
94 
95 	int			 vtblk_max_nsegs;
96 	int			 vtblk_request_count;
97 	enum vtblk_cache_mode	 vtblk_write_cache;
98 
99 	struct vtblk_request	 vtblk_dump_request;
100 };
101 
102 static struct virtio_feature_desc vtblk_feature_desc[] = {
103 	{ VIRTIO_BLK_F_BARRIER,		"HostBarrier"	},
104 	{ VIRTIO_BLK_F_SIZE_MAX,	"MaxSegSize"	},
105 	{ VIRTIO_BLK_F_SEG_MAX,		"MaxNumSegs"	},
106 	{ VIRTIO_BLK_F_GEOMETRY,	"DiskGeometry"	},
107 	{ VIRTIO_BLK_F_RO,		"ReadOnly"	},
108 	{ VIRTIO_BLK_F_BLK_SIZE,	"BlockSize"	},
109 	{ VIRTIO_BLK_F_SCSI,		"SCSICmds"	},
110 	{ VIRTIO_BLK_F_WCE,		"WriteCache"	},
111 	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology"	},
112 	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE"	},
113 
114 	{ 0, NULL }
115 };
116 
117 static int	vtblk_modevent(module_t, int, void *);
118 
119 static int	vtblk_probe(device_t);
120 static int	vtblk_attach(device_t);
121 static int	vtblk_detach(device_t);
122 static int	vtblk_suspend(device_t);
123 static int	vtblk_resume(device_t);
124 static int	vtblk_shutdown(device_t);
125 static int	vtblk_config_change(device_t);
126 
127 static int	vtblk_open(struct disk *);
128 static int	vtblk_close(struct disk *);
129 static int	vtblk_ioctl(struct disk *, u_long, void *, int,
130 		    struct thread *);
131 static int	vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
132 static void	vtblk_strategy(struct bio *);
133 
134 static void	vtblk_negotiate_features(struct vtblk_softc *);
135 static int	vtblk_maximum_segments(struct vtblk_softc *,
136 		    struct virtio_blk_config *);
137 static int	vtblk_alloc_virtqueue(struct vtblk_softc *);
138 static void	vtblk_resize_disk(struct vtblk_softc *, uint64_t);
139 static void	vtblk_set_write_cache(struct vtblk_softc *, int);
140 static int	vtblk_write_cache_enabled(struct vtblk_softc *sc,
141 		    struct virtio_blk_config *);
142 static int	vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
143 static void	vtblk_alloc_disk(struct vtblk_softc *,
144 		    struct virtio_blk_config *);
145 static void	vtblk_create_disk(struct vtblk_softc *);
146 
147 static int	vtblk_quiesce(struct vtblk_softc *);
148 static void	vtblk_startio(struct vtblk_softc *);
149 static struct vtblk_request * vtblk_bio_request(struct vtblk_softc *);
150 static int	vtblk_execute_request(struct vtblk_softc *,
151 		    struct vtblk_request *);
152 
153 static void	vtblk_vq_intr(void *);
154 
155 static void	vtblk_stop(struct vtblk_softc *);
156 
157 static void	vtblk_read_config(struct vtblk_softc *,
158 		    struct virtio_blk_config *);
159 static void	vtblk_get_ident(struct vtblk_softc *);
160 static void	vtblk_prepare_dump(struct vtblk_softc *);
161 static int	vtblk_write_dump(struct vtblk_softc *, void *, off_t, size_t);
162 static int	vtblk_flush_dump(struct vtblk_softc *);
163 static int	vtblk_poll_request(struct vtblk_softc *,
164 		    struct vtblk_request *);
165 
166 static void	vtblk_finish_completed(struct vtblk_softc *);
167 static void	vtblk_drain_vq(struct vtblk_softc *, int);
168 static void	vtblk_drain(struct vtblk_softc *);
169 
170 static int	vtblk_alloc_requests(struct vtblk_softc *);
171 static void	vtblk_free_requests(struct vtblk_softc *);
172 static struct vtblk_request * vtblk_dequeue_request(struct vtblk_softc *);
173 static void	vtblk_enqueue_request(struct vtblk_softc *,
174 		    struct vtblk_request *);
175 
176 static struct vtblk_request * vtblk_dequeue_ready(struct vtblk_softc *);
177 static void	vtblk_enqueue_ready(struct vtblk_softc *,
178 		    struct vtblk_request *);
179 
180 static int	vtblk_request_error(struct vtblk_request *);
181 static void	vtblk_finish_bio(struct bio *, int);
182 
183 static void	vtblk_setup_sysctl(struct vtblk_softc *);
184 static int	vtblk_tunable_int(struct vtblk_softc *, const char *, int);
185 
186 /* Tunables. */
187 static int vtblk_no_ident = 0;
188 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
189 static int vtblk_writecache_mode = -1;
190 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
191 
192 /* Features desired/implemented by this driver. */
193 #define VTBLK_FEATURES \
194     (VIRTIO_BLK_F_BARRIER		| \
195      VIRTIO_BLK_F_SIZE_MAX		| \
196      VIRTIO_BLK_F_SEG_MAX		| \
197      VIRTIO_BLK_F_GEOMETRY		| \
198      VIRTIO_BLK_F_RO			| \
199      VIRTIO_BLK_F_BLK_SIZE		| \
200      VIRTIO_BLK_F_WCE			| \
201      VIRTIO_BLK_F_CONFIG_WCE		| \
202      VIRTIO_RING_F_INDIRECT_DESC)
203 
204 #define VTBLK_MTX(_sc)		&(_sc)->vtblk_mtx
205 #define VTBLK_LOCK_INIT(_sc, _name) \
206 				mtx_init(VTBLK_MTX((_sc)), (_name), \
207 				    "VirtIO Block Lock", MTX_DEF)
208 #define VTBLK_LOCK(_sc)		mtx_lock(VTBLK_MTX((_sc)))
209 #define VTBLK_UNLOCK(_sc)	mtx_unlock(VTBLK_MTX((_sc)))
210 #define VTBLK_LOCK_DESTROY(_sc)	mtx_destroy(VTBLK_MTX((_sc)))
211 #define VTBLK_LOCK_ASSERT(_sc)	mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
212 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
213 				mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
214 
215 #define VTBLK_DISK_NAME		"vtbd"
216 #define VTBLK_QUIESCE_TIMEOUT	(30 * hz)
217 
218 /*
219  * Each block request uses at least two segments - one for the header
220  * and one for the status.
221  */
222 #define VTBLK_MIN_SEGMENTS	2
223 
224 static device_method_t vtblk_methods[] = {
225 	/* Device methods. */
226 	DEVMETHOD(device_probe,		vtblk_probe),
227 	DEVMETHOD(device_attach,	vtblk_attach),
228 	DEVMETHOD(device_detach,	vtblk_detach),
229 	DEVMETHOD(device_suspend,	vtblk_suspend),
230 	DEVMETHOD(device_resume,	vtblk_resume),
231 	DEVMETHOD(device_shutdown,	vtblk_shutdown),
232 
233 	/* VirtIO methods. */
234 	DEVMETHOD(virtio_config_change,	vtblk_config_change),
235 
236 	DEVMETHOD_END
237 };
238 
239 static driver_t vtblk_driver = {
240 	"vtblk",
241 	vtblk_methods,
242 	sizeof(struct vtblk_softc)
243 };
244 static devclass_t vtblk_devclass;
245 
246 DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass,
247     vtblk_modevent, 0);
248 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass,
249     vtblk_modevent, 0);
250 MODULE_VERSION(virtio_blk, 1);
251 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
252 
253 static int
254 vtblk_modevent(module_t mod, int type, void *unused)
255 {
256 	int error;
257 
258 	error = 0;
259 
260 	switch (type) {
261 	case MOD_LOAD:
262 	case MOD_QUIESCE:
263 	case MOD_UNLOAD:
264 	case MOD_SHUTDOWN:
265 		break;
266 	default:
267 		error = EOPNOTSUPP;
268 		break;
269 	}
270 
271 	return (error);
272 }
273 
274 static int
275 vtblk_probe(device_t dev)
276 {
277 
278 	if (virtio_get_device_type(dev) != VIRTIO_ID_BLOCK)
279 		return (ENXIO);
280 
281 	device_set_desc(dev, "VirtIO Block Adapter");
282 
283 	return (BUS_PROBE_DEFAULT);
284 }
285 
286 static int
287 vtblk_attach(device_t dev)
288 {
289 	struct vtblk_softc *sc;
290 	struct virtio_blk_config blkcfg;
291 	int error;
292 
293 	sc = device_get_softc(dev);
294 	sc->vtblk_dev = dev;
295 
296 	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
297 
298 	bioq_init(&sc->vtblk_bioq);
299 	TAILQ_INIT(&sc->vtblk_req_free);
300 	TAILQ_INIT(&sc->vtblk_req_ready);
301 
302 	virtio_set_feature_desc(dev, vtblk_feature_desc);
303 	vtblk_negotiate_features(sc);
304 
305 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
306 		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
307 	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
308 		sc->vtblk_flags |= VTBLK_FLAG_READONLY;
309 	if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
310 		sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
311 	if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
312 		sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG;
313 
314 	vtblk_setup_sysctl(sc);
315 
316 	/* Get local copy of config. */
317 	vtblk_read_config(sc, &blkcfg);
318 
319 	/*
320 	 * With the current sglist(9) implementation, it is not easy
321 	 * for us to support a maximum segment size as adjacent
322 	 * segments are coalesced. For now, just make sure it's larger
323 	 * than the maximum supported transfer size.
324 	 */
325 	if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
326 		if (blkcfg.size_max < MAXPHYS) {
327 			error = ENOTSUP;
328 			device_printf(dev, "host requires unsupported "
329 			    "maximum segment size feature\n");
330 			goto fail;
331 		}
332 	}
333 
334 	sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
335 	if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
336 		error = EINVAL;
337 		device_printf(dev, "fewer than minimum number of segments "
338 		    "allowed: %d\n", sc->vtblk_max_nsegs);
339 		goto fail;
340 	}
341 
342 	sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
343 	if (sc->vtblk_sglist == NULL) {
344 		error = ENOMEM;
345 		device_printf(dev, "cannot allocate sglist\n");
346 		goto fail;
347 	}
348 
349 	error = vtblk_alloc_virtqueue(sc);
350 	if (error) {
351 		device_printf(dev, "cannot allocate virtqueue\n");
352 		goto fail;
353 	}
354 
355 	error = vtblk_alloc_requests(sc);
356 	if (error) {
357 		device_printf(dev, "cannot preallocate requests\n");
358 		goto fail;
359 	}
360 
361 	vtblk_alloc_disk(sc, &blkcfg);
362 
363 	error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
364 	if (error) {
365 		device_printf(dev, "cannot setup virtqueue interrupt\n");
366 		goto fail;
367 	}
368 
369 	vtblk_create_disk(sc);
370 
371 	virtqueue_enable_intr(sc->vtblk_vq);
372 
373 fail:
374 	if (error)
375 		vtblk_detach(dev);
376 
377 	return (error);
378 }
379 
380 static int
381 vtblk_detach(device_t dev)
382 {
383 	struct vtblk_softc *sc;
384 
385 	sc = device_get_softc(dev);
386 
387 	VTBLK_LOCK(sc);
388 	sc->vtblk_flags |= VTBLK_FLAG_DETACH;
389 	if (device_is_attached(dev))
390 		vtblk_stop(sc);
391 	VTBLK_UNLOCK(sc);
392 
393 	vtblk_drain(sc);
394 
395 	if (sc->vtblk_disk != NULL) {
396 		disk_destroy(sc->vtblk_disk);
397 		sc->vtblk_disk = NULL;
398 	}
399 
400 	if (sc->vtblk_sglist != NULL) {
401 		sglist_free(sc->vtblk_sglist);
402 		sc->vtblk_sglist = NULL;
403 	}
404 
405 	VTBLK_LOCK_DESTROY(sc);
406 
407 	return (0);
408 }
409 
410 static int
411 vtblk_suspend(device_t dev)
412 {
413 	struct vtblk_softc *sc;
414 	int error;
415 
416 	sc = device_get_softc(dev);
417 
418 	VTBLK_LOCK(sc);
419 	sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
420 	/* XXX BMV: virtio_stop(), etc needed here? */
421 	error = vtblk_quiesce(sc);
422 	if (error)
423 		sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
424 	VTBLK_UNLOCK(sc);
425 
426 	return (error);
427 }
428 
429 static int
430 vtblk_resume(device_t dev)
431 {
432 	struct vtblk_softc *sc;
433 
434 	sc = device_get_softc(dev);
435 
436 	VTBLK_LOCK(sc);
437 	/* XXX BMV: virtio_reinit(), etc needed here? */
438 	sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
439 	vtblk_startio(sc);
440 	VTBLK_UNLOCK(sc);
441 
442 	return (0);
443 }
444 
445 static int
446 vtblk_shutdown(device_t dev)
447 {
448 
449 	return (0);
450 }
451 
452 static int
453 vtblk_config_change(device_t dev)
454 {
455 	struct vtblk_softc *sc;
456 	struct virtio_blk_config blkcfg;
457 	uint64_t capacity;
458 
459 	sc = device_get_softc(dev);
460 
461 	vtblk_read_config(sc, &blkcfg);
462 
463 	/* Capacity is always in 512-byte units. */
464 	capacity = blkcfg.capacity * 512;
465 
466 	if (sc->vtblk_disk->d_mediasize != capacity)
467 		vtblk_resize_disk(sc, capacity);
468 
469 	return (0);
470 }
471 
472 static int
473 vtblk_open(struct disk *dp)
474 {
475 	struct vtblk_softc *sc;
476 
477 	if ((sc = dp->d_drv1) == NULL)
478 		return (ENXIO);
479 
480 	return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
481 }
482 
483 static int
484 vtblk_close(struct disk *dp)
485 {
486 	struct vtblk_softc *sc;
487 
488 	if ((sc = dp->d_drv1) == NULL)
489 		return (ENXIO);
490 
491 	return (0);
492 }
493 
494 static int
495 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
496     struct thread *td)
497 {
498 	struct vtblk_softc *sc;
499 
500 	if ((sc = dp->d_drv1) == NULL)
501 		return (ENXIO);
502 
503 	return (ENOTTY);
504 }
505 
506 static int
507 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
508     size_t length)
509 {
510 	struct disk *dp;
511 	struct vtblk_softc *sc;
512 	int error;
513 
514 	dp = arg;
515 
516 	if ((sc = dp->d_drv1) == NULL)
517 		return (ENXIO);
518 
519 	VTBLK_LOCK(sc);
520 
521 	if ((sc->vtblk_flags & VTBLK_FLAG_DUMPING) == 0) {
522 		vtblk_prepare_dump(sc);
523 		sc->vtblk_flags |= VTBLK_FLAG_DUMPING;
524 	}
525 
526 	if (length > 0)
527 		error = vtblk_write_dump(sc, virtual, offset, length);
528 	else if (virtual == NULL && offset == 0)
529 		error = vtblk_flush_dump(sc);
530 	else {
531 		error = EINVAL;
532 		sc->vtblk_flags &= ~VTBLK_FLAG_DUMPING;
533 	}
534 
535 	VTBLK_UNLOCK(sc);
536 
537 	return (error);
538 }
539 
540 static void
541 vtblk_strategy(struct bio *bp)
542 {
543 	struct vtblk_softc *sc;
544 
545 	if ((sc = bp->bio_disk->d_drv1) == NULL) {
546 		vtblk_finish_bio(bp, EINVAL);
547 		return;
548 	}
549 
550 	/*
551 	 * Fail any write if RO. Unfortunately, there does not seem to
552 	 * be a better way to report our readonly'ness to GEOM above.
553 	 */
554 	if (sc->vtblk_flags & VTBLK_FLAG_READONLY &&
555 	    (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) {
556 		vtblk_finish_bio(bp, EROFS);
557 		return;
558 	}
559 
560 #ifdef INVARIANTS
561 	/*
562 	 * Prevent read/write buffers spanning too many segments from
563 	 * getting into the queue. This should only trip if d_maxsize
564 	 * was incorrectly set.
565 	 */
566 	if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
567 		int nsegs, max_nsegs;
568 
569 		nsegs = sglist_count(bp->bio_data, bp->bio_bcount);
570 		max_nsegs = sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS;
571 
572 		KASSERT(nsegs <= max_nsegs,
573 		    ("%s: bio %p spanned too many segments: %d, max: %d",
574 		    __func__, bp, nsegs, max_nsegs));
575 	}
576 #endif
577 
578 	VTBLK_LOCK(sc);
579 	if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
580 		vtblk_finish_bio(bp, ENXIO);
581 	else {
582 		bioq_insert_tail(&sc->vtblk_bioq, bp);
583 
584 		if ((sc->vtblk_flags & VTBLK_FLAG_SUSPEND) == 0)
585 			vtblk_startio(sc);
586 	}
587 	VTBLK_UNLOCK(sc);
588 }
589 
590 static void
591 vtblk_negotiate_features(struct vtblk_softc *sc)
592 {
593 	device_t dev;
594 	uint64_t features;
595 
596 	dev = sc->vtblk_dev;
597 	features = VTBLK_FEATURES;
598 
599 	sc->vtblk_features = virtio_negotiate_features(dev, features);
600 }
601 
602 static int
603 vtblk_maximum_segments(struct vtblk_softc *sc,
604     struct virtio_blk_config *blkcfg)
605 {
606 	device_t dev;
607 	int nsegs;
608 
609 	dev = sc->vtblk_dev;
610 	nsegs = VTBLK_MIN_SEGMENTS;
611 
612 	if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
613 		nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1);
614 		if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
615 			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
616 	} else
617 		nsegs += 1;
618 
619 	return (nsegs);
620 }
621 
622 static int
623 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
624 {
625 	device_t dev;
626 	struct vq_alloc_info vq_info;
627 
628 	dev = sc->vtblk_dev;
629 
630 	VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
631 	    vtblk_vq_intr, sc, &sc->vtblk_vq,
632 	    "%s request", device_get_nameunit(dev));
633 
634 	return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
635 }
636 
637 static void
638 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
639 {
640 	device_t dev;
641 	struct disk *dp;
642 	int error;
643 
644 	dev = sc->vtblk_dev;
645 	dp = sc->vtblk_disk;
646 
647 	dp->d_mediasize = new_capacity;
648 	if (bootverbose) {
649 		device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
650 		    (uintmax_t) dp->d_mediasize >> 20,
651 		    (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
652 		    dp->d_sectorsize);
653 	}
654 
655 	error = disk_resize(dp, M_NOWAIT);
656 	if (error) {
657 		device_printf(dev,
658 		    "disk_resize(9) failed, error: %d\n", error);
659 	}
660 }
661 
662 static void
663 vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
664 {
665 
666 	/* Set either writeback (1) or writethrough (0) mode. */
667 	virtio_write_dev_config_1(sc->vtblk_dev,
668 	    offsetof(struct virtio_blk_config, writeback), wc);
669 }
670 
671 static int
672 vtblk_write_cache_enabled(struct vtblk_softc *sc,
673     struct virtio_blk_config *blkcfg)
674 {
675 	int wc;
676 
677 	if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) {
678 		wc = vtblk_tunable_int(sc, "writecache_mode",
679 		    vtblk_writecache_mode);
680 		if (wc >= 0 && wc < VTBLK_CACHE_MAX)
681 			vtblk_set_write_cache(sc, wc);
682 		else
683 			wc = blkcfg->writeback;
684 	} else
685 		wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE);
686 
687 	return (wc);
688 }
689 
690 static int
691 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
692 {
693 	struct vtblk_softc *sc;
694 	int wc, error;
695 
696 	sc = oidp->oid_arg1;
697 	wc = sc->vtblk_write_cache;
698 
699 	error = sysctl_handle_int(oidp, &wc, 0, req);
700 	if (error || req->newptr == NULL)
701 		return (error);
702 	if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0)
703 		return (EPERM);
704 	if (wc < 0 || wc >= VTBLK_CACHE_MAX)
705 		return (EINVAL);
706 
707 	VTBLK_LOCK(sc);
708 	sc->vtblk_write_cache = wc;
709 	vtblk_set_write_cache(sc, sc->vtblk_write_cache);
710 	VTBLK_UNLOCK(sc);
711 
712 	return (0);
713 }
714 
715 static void
716 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
717 {
718 	device_t dev;
719 	struct disk *dp;
720 
721 	dev = sc->vtblk_dev;
722 
723 	sc->vtblk_disk = dp = disk_alloc();
724 	dp->d_open = vtblk_open;
725 	dp->d_close = vtblk_close;
726 	dp->d_ioctl = vtblk_ioctl;
727 	dp->d_strategy = vtblk_strategy;
728 	dp->d_name = VTBLK_DISK_NAME;
729 	dp->d_unit = device_get_unit(dev);
730 	dp->d_drv1 = sc;
731 	dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO;
732 	dp->d_hba_vendor = virtio_get_vendor(dev);
733 	dp->d_hba_device = virtio_get_device(dev);
734 	dp->d_hba_subvendor = virtio_get_subvendor(dev);
735 	dp->d_hba_subdevice = virtio_get_subdevice(dev);
736 
737 	if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0)
738 		dp->d_dump = vtblk_dump;
739 
740 	/* Capacity is always in 512-byte units. */
741 	dp->d_mediasize = blkcfg->capacity * 512;
742 
743 	if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
744 		dp->d_sectorsize = blkcfg->blk_size;
745 	else
746 		dp->d_sectorsize = 512;
747 
748 	/*
749 	 * The VirtIO maximum I/O size is given in terms of segments.
750 	 * However, FreeBSD limits I/O size by logical buffer size, not
751 	 * by physically contiguous pages. Therefore, we have to assume
752 	 * no pages are contiguous. This may impose an artificially low
753 	 * maximum I/O size. But in practice, since QEMU advertises 128
754 	 * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
755 	 * which is typically greater than MAXPHYS. Eventually we should
756 	 * just advertise MAXPHYS and split buffers that are too big.
757 	 *
758 	 * Note we must subtract one additional segment in case of non
759 	 * page aligned buffers.
760 	 */
761 	dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
762 	    PAGE_SIZE;
763 	if (dp->d_maxsize < PAGE_SIZE)
764 		dp->d_maxsize = PAGE_SIZE; /* XXX */
765 
766 	if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
767 		dp->d_fwsectors = blkcfg->geometry.sectors;
768 		dp->d_fwheads = blkcfg->geometry.heads;
769 	}
770 
771 	if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY)) {
772 		dp->d_stripesize = dp->d_sectorsize *
773 		    (1 << blkcfg->topology.physical_block_exp);
774 		dp->d_stripeoffset = (dp->d_stripesize -
775 		    blkcfg->topology.alignment_offset * dp->d_sectorsize) %
776 		    dp->d_stripesize;
777 	}
778 
779 	if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
780 		sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
781 	else
782 		sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
783 }
784 
785 static void
786 vtblk_create_disk(struct vtblk_softc *sc)
787 {
788 	struct disk *dp;
789 
790 	dp = sc->vtblk_disk;
791 
792 	/*
793 	 * Retrieving the identification string must be done after
794 	 * the virtqueue interrupt is setup otherwise it will hang.
795 	 */
796 	vtblk_get_ident(sc);
797 
798 	device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
799 	    (uintmax_t) dp->d_mediasize >> 20,
800 	    (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
801 	    dp->d_sectorsize);
802 
803 	disk_create(dp, DISK_VERSION);
804 }
805 
806 static int
807 vtblk_quiesce(struct vtblk_softc *sc)
808 {
809 	int error;
810 
811 	error = 0;
812 
813 	VTBLK_LOCK_ASSERT(sc);
814 
815 	while (!virtqueue_empty(sc->vtblk_vq)) {
816 		if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
817 		    VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
818 			error = EBUSY;
819 			break;
820 		}
821 	}
822 
823 	return (error);
824 }
825 
826 static void
827 vtblk_startio(struct vtblk_softc *sc)
828 {
829 	struct virtqueue *vq;
830 	struct vtblk_request *req;
831 	int enq;
832 
833 	vq = sc->vtblk_vq;
834 	enq = 0;
835 
836 	VTBLK_LOCK_ASSERT(sc);
837 
838 	while (!virtqueue_full(vq)) {
839 		if ((req = vtblk_dequeue_ready(sc)) == NULL)
840 			req = vtblk_bio_request(sc);
841 		if (req == NULL)
842 			break;
843 
844 		if (vtblk_execute_request(sc, req) != 0) {
845 			vtblk_enqueue_ready(sc, req);
846 			break;
847 		}
848 
849 		enq++;
850 	}
851 
852 	if (enq > 0)
853 		virtqueue_notify(vq);
854 }
855 
856 static struct vtblk_request *
857 vtblk_bio_request(struct vtblk_softc *sc)
858 {
859 	struct bio_queue_head *bioq;
860 	struct vtblk_request *req;
861 	struct bio *bp;
862 
863 	bioq = &sc->vtblk_bioq;
864 
865 	if (bioq_first(bioq) == NULL)
866 		return (NULL);
867 
868 	req = vtblk_dequeue_request(sc);
869 	if (req == NULL)
870 		return (NULL);
871 
872 	bp = bioq_takefirst(bioq);
873 	req->vbr_bp = bp;
874 	req->vbr_ack = -1;
875 	req->vbr_hdr.ioprio = 1;
876 
877 	switch (bp->bio_cmd) {
878 	case BIO_FLUSH:
879 		req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
880 		break;
881 	case BIO_READ:
882 		req->vbr_hdr.type = VIRTIO_BLK_T_IN;
883 		req->vbr_hdr.sector = bp->bio_offset / 512;
884 		break;
885 	case BIO_WRITE:
886 		req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
887 		req->vbr_hdr.sector = bp->bio_offset / 512;
888 		break;
889 	default:
890 		panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
891 	}
892 
893 	return (req);
894 }
895 
896 static int
897 vtblk_execute_request(struct vtblk_softc *sc, struct vtblk_request *req)
898 {
899 	struct virtqueue *vq;
900 	struct sglist *sg;
901 	struct bio *bp;
902 	int ordered, readable, writable, error;
903 
904 	vq = sc->vtblk_vq;
905 	sg = sc->vtblk_sglist;
906 	bp = req->vbr_bp;
907 	ordered = 0;
908 	writable = 0;
909 
910 	VTBLK_LOCK_ASSERT(sc);
911 
912 	/*
913 	 * Wait until the ordered request completes before
914 	 * executing subsequent requests.
915 	 */
916 	if (sc->vtblk_req_ordered != NULL)
917 		return (EBUSY);
918 
919 	if (bp->bio_flags & BIO_ORDERED) {
920 		if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
921 			/*
922 			 * This request will be executed once all
923 			 * the in-flight requests are completed.
924 			 */
925 			if (!virtqueue_empty(vq))
926 				return (EBUSY);
927 			ordered = 1;
928 		} else
929 			req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER;
930 	}
931 
932 	sglist_reset(sg);
933 	sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
934 
935 	if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
936 		error = sglist_append_bio(sg, bp);
937 		if (error || sg->sg_nseg == sg->sg_maxseg) {
938 			panic("%s: data buffer too big bio:%p error:%d",
939 			    __func__, bp, error);
940 		}
941 
942 		/* BIO_READ means the host writes into our buffer. */
943 		if (bp->bio_cmd == BIO_READ)
944 			writable = sg->sg_nseg - 1;
945 	}
946 
947 	writable++;
948 	sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
949 	readable = sg->sg_nseg - writable;
950 
951 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
952 	if (error == 0 && ordered)
953 		sc->vtblk_req_ordered = req;
954 
955 	return (error);
956 }
957 
958 static void
959 vtblk_vq_intr(void *xsc)
960 {
961 	struct vtblk_softc *sc;
962 	struct virtqueue *vq;
963 
964 	sc = xsc;
965 	vq = sc->vtblk_vq;
966 
967 again:
968 	VTBLK_LOCK(sc);
969 	if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
970 		VTBLK_UNLOCK(sc);
971 		return;
972 	}
973 
974 	vtblk_finish_completed(sc);
975 
976 	if ((sc->vtblk_flags & VTBLK_FLAG_SUSPEND) == 0)
977 		vtblk_startio(sc);
978 	else
979 		wakeup(&sc->vtblk_vq);
980 
981 	if (virtqueue_enable_intr(vq) != 0) {
982 		virtqueue_disable_intr(vq);
983 		VTBLK_UNLOCK(sc);
984 		goto again;
985 	}
986 
987 	VTBLK_UNLOCK(sc);
988 }
989 
990 static void
991 vtblk_stop(struct vtblk_softc *sc)
992 {
993 
994 	virtqueue_disable_intr(sc->vtblk_vq);
995 	virtio_stop(sc->vtblk_dev);
996 }
997 
998 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg)			\
999 	if (virtio_with_feature(_dev, _feature)) {			\
1000 		virtio_read_device_config(_dev,				\
1001 		    offsetof(struct virtio_blk_config, _field),		\
1002 		    &(_cfg)->_field, sizeof((_cfg)->_field));		\
1003 	}
1004 
1005 static void
1006 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
1007 {
1008 	device_t dev;
1009 
1010 	dev = sc->vtblk_dev;
1011 
1012 	bzero(blkcfg, sizeof(struct virtio_blk_config));
1013 
1014 	/* The capacity is always available. */
1015 	virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
1016 	    capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
1017 
1018 	/* Read the configuration if the feature was negotiated. */
1019 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1020 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1021 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg);
1022 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1023 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg);
1024 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg);
1025 }
1026 
1027 #undef VTBLK_GET_CONFIG
1028 
1029 static void
1030 vtblk_get_ident(struct vtblk_softc *sc)
1031 {
1032 	struct bio buf;
1033 	struct disk *dp;
1034 	struct vtblk_request *req;
1035 	int len, error;
1036 
1037 	dp = sc->vtblk_disk;
1038 	len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
1039 
1040 	if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
1041 		return;
1042 
1043 	req = vtblk_dequeue_request(sc);
1044 	if (req == NULL)
1045 		return;
1046 
1047 	req->vbr_ack = -1;
1048 	req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID;
1049 	req->vbr_hdr.ioprio = 1;
1050 	req->vbr_hdr.sector = 0;
1051 
1052 	req->vbr_bp = &buf;
1053 	bzero(&buf, sizeof(struct bio));
1054 
1055 	buf.bio_cmd = BIO_READ;
1056 	buf.bio_data = dp->d_ident;
1057 	buf.bio_bcount = len;
1058 
1059 	VTBLK_LOCK(sc);
1060 	error = vtblk_poll_request(sc, req);
1061 	VTBLK_UNLOCK(sc);
1062 
1063 	vtblk_enqueue_request(sc, req);
1064 
1065 	if (error) {
1066 		device_printf(sc->vtblk_dev,
1067 		    "error getting device identifier: %d\n", error);
1068 	}
1069 }
1070 
1071 static void
1072 vtblk_prepare_dump(struct vtblk_softc *sc)
1073 {
1074 	device_t dev;
1075 	struct virtqueue *vq;
1076 
1077 	dev = sc->vtblk_dev;
1078 	vq = sc->vtblk_vq;
1079 
1080 	vtblk_stop(sc);
1081 
1082 	/*
1083 	 * Drain all requests caught in-flight in the virtqueue,
1084 	 * skipping biodone(). When dumping, only one request is
1085 	 * outstanding at a time, and we just poll the virtqueue
1086 	 * for the response.
1087 	 */
1088 	vtblk_drain_vq(sc, 1);
1089 
1090 	if (virtio_reinit(dev, sc->vtblk_features) != 0) {
1091 		panic("%s: cannot reinit VirtIO block device during dump",
1092 		    device_get_nameunit(dev));
1093 	}
1094 
1095 	virtqueue_disable_intr(vq);
1096 	virtio_reinit_complete(dev);
1097 }
1098 
1099 static int
1100 vtblk_write_dump(struct vtblk_softc *sc, void *virtual, off_t offset,
1101     size_t length)
1102 {
1103 	struct bio buf;
1104 	struct vtblk_request *req;
1105 
1106 	req = &sc->vtblk_dump_request;
1107 	req->vbr_ack = -1;
1108 	req->vbr_hdr.type = VIRTIO_BLK_T_OUT;
1109 	req->vbr_hdr.ioprio = 1;
1110 	req->vbr_hdr.sector = offset / 512;
1111 
1112 	req->vbr_bp = &buf;
1113 	bzero(&buf, sizeof(struct bio));
1114 
1115 	buf.bio_cmd = BIO_WRITE;
1116 	buf.bio_data = virtual;
1117 	buf.bio_bcount = length;
1118 
1119 	return (vtblk_poll_request(sc, req));
1120 }
1121 
1122 static int
1123 vtblk_flush_dump(struct vtblk_softc *sc)
1124 {
1125 	struct bio buf;
1126 	struct vtblk_request *req;
1127 
1128 	req = &sc->vtblk_dump_request;
1129 	req->vbr_ack = -1;
1130 	req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH;
1131 	req->vbr_hdr.ioprio = 1;
1132 	req->vbr_hdr.sector = 0;
1133 
1134 	req->vbr_bp = &buf;
1135 	bzero(&buf, sizeof(struct bio));
1136 
1137 	buf.bio_cmd = BIO_FLUSH;
1138 
1139 	return (vtblk_poll_request(sc, req));
1140 }
1141 
1142 static int
1143 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
1144 {
1145 	struct virtqueue *vq;
1146 	int error;
1147 
1148 	vq = sc->vtblk_vq;
1149 
1150 	if (!virtqueue_empty(vq))
1151 		return (EBUSY);
1152 
1153 	error = vtblk_execute_request(sc, req);
1154 	if (error)
1155 		return (error);
1156 
1157 	virtqueue_notify(vq);
1158 	virtqueue_poll(vq, NULL);
1159 
1160 	error = vtblk_request_error(req);
1161 	if (error && bootverbose) {
1162 		device_printf(sc->vtblk_dev,
1163 		    "%s: IO error: %d\n", __func__, error);
1164 	}
1165 
1166 	return (error);
1167 }
1168 
1169 static void
1170 vtblk_finish_completed(struct vtblk_softc *sc)
1171 {
1172 	struct vtblk_request *req;
1173 	struct bio *bp;
1174 	int error;
1175 
1176 	while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
1177 		bp = req->vbr_bp;
1178 
1179 		if (sc->vtblk_req_ordered != NULL) {
1180 			/* This should be the only outstanding request. */
1181 			MPASS(sc->vtblk_req_ordered == req);
1182 			sc->vtblk_req_ordered = NULL;
1183 		}
1184 
1185 		error = vtblk_request_error(req);
1186 		if (error)
1187 			disk_err(bp, "hard error", -1, 1);
1188 
1189 		vtblk_finish_bio(bp, error);
1190 		vtblk_enqueue_request(sc, req);
1191 	}
1192 }
1193 
1194 static void
1195 vtblk_drain_vq(struct vtblk_softc *sc, int skip_done)
1196 {
1197 	struct virtqueue *vq;
1198 	struct vtblk_request *req;
1199 	int last;
1200 
1201 	vq = sc->vtblk_vq;
1202 	last = 0;
1203 
1204 	while ((req = virtqueue_drain(vq, &last)) != NULL) {
1205 		if (!skip_done)
1206 			vtblk_finish_bio(req->vbr_bp, ENXIO);
1207 
1208 		vtblk_enqueue_request(sc, req);
1209 	}
1210 
1211 	sc->vtblk_req_ordered = NULL;
1212 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1213 }
1214 
1215 static void
1216 vtblk_drain(struct vtblk_softc *sc)
1217 {
1218 	struct bio_queue_head *bioq;
1219 	struct vtblk_request *req;
1220 	struct bio *bp;
1221 
1222 	bioq = &sc->vtblk_bioq;
1223 
1224 	if (sc->vtblk_vq != NULL) {
1225 		vtblk_finish_completed(sc);
1226 		vtblk_drain_vq(sc, 0);
1227 	}
1228 
1229 	while ((req = vtblk_dequeue_ready(sc)) != NULL) {
1230 		vtblk_finish_bio(req->vbr_bp, ENXIO);
1231 		vtblk_enqueue_request(sc, req);
1232 	}
1233 
1234 	while (bioq_first(bioq) != NULL) {
1235 		bp = bioq_takefirst(bioq);
1236 		vtblk_finish_bio(bp, ENXIO);
1237 	}
1238 
1239 	vtblk_free_requests(sc);
1240 }
1241 
1242 #ifdef INVARIANTS
1243 static void
1244 vtblk_request_invariants(struct vtblk_request *req)
1245 {
1246 	int hdr_nsegs, ack_nsegs;
1247 
1248 	hdr_nsegs = sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr));
1249 	ack_nsegs = sglist_count(&req->vbr_ack, sizeof(req->vbr_ack));
1250 
1251 	KASSERT(hdr_nsegs == 1, ("request header crossed page boundary"));
1252 	KASSERT(ack_nsegs == 1, ("request ack crossed page boundary"));
1253 }
1254 #endif
1255 
1256 static int
1257 vtblk_alloc_requests(struct vtblk_softc *sc)
1258 {
1259 	struct vtblk_request *req;
1260 	int i, nreqs;
1261 
1262 	nreqs = virtqueue_size(sc->vtblk_vq);
1263 
1264 	/*
1265 	 * Preallocate sufficient requests to keep the virtqueue full. Each
1266 	 * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
1267 	 * the number allocated when indirect descriptors are not available.
1268 	 */
1269 	if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
1270 		nreqs /= VTBLK_MIN_SEGMENTS;
1271 
1272 	for (i = 0; i < nreqs; i++) {
1273 		req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
1274 		if (req == NULL)
1275 			return (ENOMEM);
1276 
1277 #ifdef INVARIANTS
1278 		vtblk_request_invariants(req);
1279 #endif
1280 
1281 		sc->vtblk_request_count++;
1282 		vtblk_enqueue_request(sc, req);
1283 	}
1284 
1285 	return (0);
1286 }
1287 
1288 static void
1289 vtblk_free_requests(struct vtblk_softc *sc)
1290 {
1291 	struct vtblk_request *req;
1292 
1293 	KASSERT(TAILQ_EMPTY(&sc->vtblk_req_ready),
1294 	    ("%s: ready requests left on queue", __func__));
1295 
1296 	while ((req = vtblk_dequeue_request(sc)) != NULL) {
1297 		sc->vtblk_request_count--;
1298 		free(req, M_DEVBUF);
1299 	}
1300 
1301 	KASSERT(sc->vtblk_request_count == 0,
1302 	    ("%s: leaked %d requests", __func__, sc->vtblk_request_count));
1303 }
1304 
1305 static struct vtblk_request *
1306 vtblk_dequeue_request(struct vtblk_softc *sc)
1307 {
1308 	struct vtblk_request *req;
1309 
1310 	req = TAILQ_FIRST(&sc->vtblk_req_free);
1311 	if (req != NULL)
1312 		TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
1313 
1314 	return (req);
1315 }
1316 
1317 static void
1318 vtblk_enqueue_request(struct vtblk_softc *sc, struct vtblk_request *req)
1319 {
1320 
1321 	bzero(req, sizeof(struct vtblk_request));
1322 	TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
1323 }
1324 
1325 static struct vtblk_request *
1326 vtblk_dequeue_ready(struct vtblk_softc *sc)
1327 {
1328 	struct vtblk_request *req;
1329 
1330 	req = TAILQ_FIRST(&sc->vtblk_req_ready);
1331 	if (req != NULL)
1332 		TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
1333 
1334 	return (req);
1335 }
1336 
1337 static void
1338 vtblk_enqueue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
1339 {
1340 
1341 	TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
1342 }
1343 
1344 static int
1345 vtblk_request_error(struct vtblk_request *req)
1346 {
1347 	int error;
1348 
1349 	switch (req->vbr_ack) {
1350 	case VIRTIO_BLK_S_OK:
1351 		error = 0;
1352 		break;
1353 	case VIRTIO_BLK_S_UNSUPP:
1354 		error = ENOTSUP;
1355 		break;
1356 	default:
1357 		error = EIO;
1358 		break;
1359 	}
1360 
1361 	return (error);
1362 }
1363 
1364 static void
1365 vtblk_finish_bio(struct bio *bp, int error)
1366 {
1367 
1368 	if (error) {
1369 		bp->bio_resid = bp->bio_bcount;
1370 		bp->bio_error = error;
1371 		bp->bio_flags |= BIO_ERROR;
1372 	}
1373 
1374 	biodone(bp);
1375 }
1376 
1377 static void
1378 vtblk_setup_sysctl(struct vtblk_softc *sc)
1379 {
1380 	device_t dev;
1381 	struct sysctl_ctx_list *ctx;
1382 	struct sysctl_oid *tree;
1383 	struct sysctl_oid_list *child;
1384 
1385 	dev = sc->vtblk_dev;
1386 	ctx = device_get_sysctl_ctx(dev);
1387 	tree = device_get_sysctl_tree(dev);
1388 	child = SYSCTL_CHILDREN(tree);
1389 
1390 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
1391 	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, vtblk_write_cache_sysctl,
1392 	    "I", "Write cache mode (writethrough (0) or writeback (1))");
1393 }
1394 
1395 static int
1396 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
1397 {
1398 	char path[64];
1399 
1400 	snprintf(path, sizeof(path),
1401 	    "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
1402 	TUNABLE_INT_FETCH(path, &def);
1403 
1404 	return (def);
1405 }
1406