xref: /freebsd/sys/dev/virtio/block/virtio_blk.c (revision cc1a53bc1aea0675d64e9547cdca241612906592)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /* Driver for VirtIO block devices. */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/bio.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/msan.h>
41 #include <sys/sglist.h>
42 #include <sys/sysctl.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/queue.h>
46 
47 #include <geom/geom.h>
48 #include <geom/geom_disk.h>
49 
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 #include <sys/bus.h>
53 #include <sys/rman.h>
54 
55 #include <dev/virtio/virtio.h>
56 #include <dev/virtio/virtqueue.h>
57 #include <dev/virtio/block/virtio_blk.h>
58 
59 #include "virtio_if.h"
60 
61 struct vtblk_request {
62 	struct virtio_blk_outhdr	 vbr_hdr;
63 	struct bio			*vbr_bp;
64 	uint8_t				 vbr_ack;
65 	TAILQ_ENTRY(vtblk_request)	 vbr_link;
66 };
67 
68 enum vtblk_cache_mode {
69 	VTBLK_CACHE_WRITETHROUGH,
70 	VTBLK_CACHE_WRITEBACK,
71 	VTBLK_CACHE_MAX
72 };
73 
74 struct vtblk_softc {
75 	device_t		 vtblk_dev;
76 	struct mtx		 vtblk_mtx;
77 	uint64_t		 vtblk_features;
78 	uint32_t		 vtblk_flags;
79 #define VTBLK_FLAG_INDIRECT	0x0001
80 #define VTBLK_FLAG_DETACH	0x0002
81 #define VTBLK_FLAG_SUSPEND	0x0004
82 #define VTBLK_FLAG_BARRIER	0x0008
83 #define VTBLK_FLAG_WCE_CONFIG	0x0010
84 
85 	struct virtqueue	*vtblk_vq;
86 	struct sglist		*vtblk_sglist;
87 	struct disk		*vtblk_disk;
88 
89 	struct bio_queue_head	 vtblk_bioq;
90 	TAILQ_HEAD(, vtblk_request)
91 				 vtblk_req_free;
92 	TAILQ_HEAD(, vtblk_request)
93 				 vtblk_req_ready;
94 	struct vtblk_request	*vtblk_req_ordered;
95 
96 	int			 vtblk_max_nsegs;
97 	int			 vtblk_request_count;
98 	enum vtblk_cache_mode	 vtblk_write_cache;
99 
100 	struct bio_queue	 vtblk_dump_queue;
101 	struct vtblk_request	 vtblk_dump_request;
102 };
103 
104 static struct virtio_feature_desc vtblk_feature_desc[] = {
105 	{ VIRTIO_BLK_F_BARRIER,		"HostBarrier"	},
106 	{ VIRTIO_BLK_F_SIZE_MAX,	"MaxSegSize"	},
107 	{ VIRTIO_BLK_F_SEG_MAX,		"MaxNumSegs"	},
108 	{ VIRTIO_BLK_F_GEOMETRY,	"DiskGeometry"	},
109 	{ VIRTIO_BLK_F_RO,		"ReadOnly"	},
110 	{ VIRTIO_BLK_F_BLK_SIZE,	"BlockSize"	},
111 	{ VIRTIO_BLK_F_SCSI,		"SCSICmds"	},
112 	{ VIRTIO_BLK_F_FLUSH,		"FlushCmd"	},
113 	{ VIRTIO_BLK_F_TOPOLOGY,	"Topology"	},
114 	{ VIRTIO_BLK_F_CONFIG_WCE,	"ConfigWCE"	},
115 	{ VIRTIO_BLK_F_MQ,		"Multiqueue"	},
116 	{ VIRTIO_BLK_F_DISCARD,		"Discard"	},
117 	{ VIRTIO_BLK_F_WRITE_ZEROES,	"WriteZeros"	},
118 
119 	{ 0, NULL }
120 };
121 
122 static int	vtblk_modevent(module_t, int, void *);
123 
124 static int	vtblk_probe(device_t);
125 static int	vtblk_attach(device_t);
126 static int	vtblk_detach(device_t);
127 static int	vtblk_suspend(device_t);
128 static int	vtblk_resume(device_t);
129 static int	vtblk_shutdown(device_t);
130 static int	vtblk_attach_completed(device_t);
131 static int	vtblk_config_change(device_t);
132 
133 static int	vtblk_open(struct disk *);
134 static int	vtblk_close(struct disk *);
135 static int	vtblk_ioctl(struct disk *, u_long, void *, int,
136 		    struct thread *);
137 static int	vtblk_dump(void *, void *, vm_offset_t, off_t, size_t);
138 static void	vtblk_strategy(struct bio *);
139 
140 static int	vtblk_negotiate_features(struct vtblk_softc *);
141 static int	vtblk_setup_features(struct vtblk_softc *);
142 static int	vtblk_maximum_segments(struct vtblk_softc *,
143 		    struct virtio_blk_config *);
144 static int	vtblk_alloc_virtqueue(struct vtblk_softc *);
145 static void	vtblk_resize_disk(struct vtblk_softc *, uint64_t);
146 static void	vtblk_alloc_disk(struct vtblk_softc *,
147 		    struct virtio_blk_config *);
148 static void	vtblk_create_disk(struct vtblk_softc *);
149 
150 static int	vtblk_request_prealloc(struct vtblk_softc *);
151 static void	vtblk_request_free(struct vtblk_softc *);
152 static struct vtblk_request *
153 		vtblk_request_dequeue(struct vtblk_softc *);
154 static void	vtblk_request_enqueue(struct vtblk_softc *,
155 		    struct vtblk_request *);
156 static struct vtblk_request *
157 		vtblk_request_next_ready(struct vtblk_softc *);
158 static void	vtblk_request_requeue_ready(struct vtblk_softc *,
159 		    struct vtblk_request *);
160 static struct vtblk_request *
161 		vtblk_request_next(struct vtblk_softc *);
162 static struct vtblk_request *
163 		vtblk_request_bio(struct vtblk_softc *);
164 static int	vtblk_request_execute(struct vtblk_softc *,
165 		    struct vtblk_request *);
166 static int	vtblk_request_error(struct vtblk_request *);
167 
168 static void	vtblk_queue_completed(struct vtblk_softc *,
169 		    struct bio_queue *);
170 static void	vtblk_done_completed(struct vtblk_softc *,
171 		    struct bio_queue *);
172 static void	vtblk_drain_vq(struct vtblk_softc *);
173 static void	vtblk_drain(struct vtblk_softc *);
174 
175 static void	vtblk_startio(struct vtblk_softc *);
176 static void	vtblk_bio_done(struct vtblk_softc *, struct bio *, int);
177 
178 static void	vtblk_read_config(struct vtblk_softc *,
179 		    struct virtio_blk_config *);
180 static void	vtblk_ident(struct vtblk_softc *);
181 static int	vtblk_poll_request(struct vtblk_softc *,
182 		    struct vtblk_request *);
183 static int	vtblk_quiesce(struct vtblk_softc *);
184 static void	vtblk_vq_intr(void *);
185 static void	vtblk_stop(struct vtblk_softc *);
186 
187 static void	vtblk_dump_quiesce(struct vtblk_softc *);
188 static int	vtblk_dump_write(struct vtblk_softc *, void *, off_t, size_t);
189 static int	vtblk_dump_flush(struct vtblk_softc *);
190 static void	vtblk_dump_complete(struct vtblk_softc *);
191 
192 static void	vtblk_set_write_cache(struct vtblk_softc *, int);
193 static int	vtblk_write_cache_enabled(struct vtblk_softc *sc,
194 		    struct virtio_blk_config *);
195 static int	vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS);
196 
197 static void	vtblk_setup_sysctl(struct vtblk_softc *);
198 static int	vtblk_tunable_int(struct vtblk_softc *, const char *, int);
199 
200 #define vtblk_modern(_sc) (((_sc)->vtblk_features & VIRTIO_F_VERSION_1) != 0)
201 #define vtblk_htog16(_sc, _val)	virtio_htog16(vtblk_modern(_sc), _val)
202 #define vtblk_htog32(_sc, _val)	virtio_htog32(vtblk_modern(_sc), _val)
203 #define vtblk_htog64(_sc, _val)	virtio_htog64(vtblk_modern(_sc), _val)
204 #define vtblk_gtoh16(_sc, _val)	virtio_gtoh16(vtblk_modern(_sc), _val)
205 #define vtblk_gtoh32(_sc, _val)	virtio_gtoh32(vtblk_modern(_sc), _val)
206 #define vtblk_gtoh64(_sc, _val)	virtio_gtoh64(vtblk_modern(_sc), _val)
207 
208 /* Tunables. */
209 static int vtblk_no_ident = 0;
210 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident);
211 static int vtblk_writecache_mode = -1;
212 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode);
213 
214 #define VTBLK_COMMON_FEATURES \
215     (VIRTIO_BLK_F_SIZE_MAX		| \
216      VIRTIO_BLK_F_SEG_MAX		| \
217      VIRTIO_BLK_F_GEOMETRY		| \
218      VIRTIO_BLK_F_RO			| \
219      VIRTIO_BLK_F_BLK_SIZE		| \
220      VIRTIO_BLK_F_FLUSH			| \
221      VIRTIO_BLK_F_TOPOLOGY		| \
222      VIRTIO_BLK_F_CONFIG_WCE		| \
223      VIRTIO_BLK_F_DISCARD		| \
224      VIRTIO_RING_F_INDIRECT_DESC)
225 
226 #define VTBLK_MODERN_FEATURES	(VTBLK_COMMON_FEATURES)
227 #define VTBLK_LEGACY_FEATURES	(VIRTIO_BLK_F_BARRIER | VTBLK_COMMON_FEATURES)
228 
229 #define VTBLK_MTX(_sc)		&(_sc)->vtblk_mtx
230 #define VTBLK_LOCK_INIT(_sc, _name) \
231 				mtx_init(VTBLK_MTX((_sc)), (_name), \
232 				    "VirtIO Block Lock", MTX_DEF)
233 #define VTBLK_LOCK(_sc)		mtx_lock(VTBLK_MTX((_sc)))
234 #define VTBLK_UNLOCK(_sc)	mtx_unlock(VTBLK_MTX((_sc)))
235 #define VTBLK_LOCK_DESTROY(_sc)	mtx_destroy(VTBLK_MTX((_sc)))
236 #define VTBLK_LOCK_ASSERT(_sc)	mtx_assert(VTBLK_MTX((_sc)), MA_OWNED)
237 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \
238 				mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED)
239 
240 #define VTBLK_DISK_NAME		"vtbd"
241 #define VTBLK_QUIESCE_TIMEOUT	(30 * hz)
242 #define VTBLK_BSIZE		512
243 
244 /*
245  * Each block request uses at least two segments - one for the header
246  * and one for the status.
247  */
248 #define VTBLK_MIN_SEGMENTS	2
249 
250 static device_method_t vtblk_methods[] = {
251 	/* Device methods. */
252 	DEVMETHOD(device_probe,		vtblk_probe),
253 	DEVMETHOD(device_attach,	vtblk_attach),
254 	DEVMETHOD(device_detach,	vtblk_detach),
255 	DEVMETHOD(device_suspend,	vtblk_suspend),
256 	DEVMETHOD(device_resume,	vtblk_resume),
257 	DEVMETHOD(device_shutdown,	vtblk_shutdown),
258 
259 	/* VirtIO methods. */
260 	DEVMETHOD(virtio_attach_completed, vtblk_attach_completed),
261 	DEVMETHOD(virtio_config_change,	vtblk_config_change),
262 
263 	DEVMETHOD_END
264 };
265 
266 static driver_t vtblk_driver = {
267 	"vtblk",
268 	vtblk_methods,
269 	sizeof(struct vtblk_softc)
270 };
271 
272 VIRTIO_DRIVER_MODULE(virtio_blk, vtblk_driver, vtblk_modevent, NULL);
273 MODULE_VERSION(virtio_blk, 1);
274 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1);
275 
276 VIRTIO_SIMPLE_PNPINFO(virtio_blk, VIRTIO_ID_BLOCK, "VirtIO Block Adapter");
277 
278 static int
279 vtblk_modevent(module_t mod, int type, void *unused)
280 {
281 	int error;
282 
283 	error = 0;
284 
285 	switch (type) {
286 	case MOD_LOAD:
287 	case MOD_QUIESCE:
288 	case MOD_UNLOAD:
289 	case MOD_SHUTDOWN:
290 		break;
291 	default:
292 		error = EOPNOTSUPP;
293 		break;
294 	}
295 
296 	return (error);
297 }
298 
299 static int
300 vtblk_probe(device_t dev)
301 {
302 	return (VIRTIO_SIMPLE_PROBE(dev, virtio_blk));
303 }
304 
305 static int
306 vtblk_attach(device_t dev)
307 {
308 	struct vtblk_softc *sc;
309 	struct virtio_blk_config blkcfg;
310 	int error;
311 
312 	sc = device_get_softc(dev);
313 	sc->vtblk_dev = dev;
314 	virtio_set_feature_desc(dev, vtblk_feature_desc);
315 
316 	VTBLK_LOCK_INIT(sc, device_get_nameunit(dev));
317 	bioq_init(&sc->vtblk_bioq);
318 	TAILQ_INIT(&sc->vtblk_dump_queue);
319 	TAILQ_INIT(&sc->vtblk_req_free);
320 	TAILQ_INIT(&sc->vtblk_req_ready);
321 
322 	vtblk_setup_sysctl(sc);
323 
324 	error = vtblk_setup_features(sc);
325 	if (error) {
326 		device_printf(dev, "cannot setup features\n");
327 		goto fail;
328 	}
329 
330 	vtblk_read_config(sc, &blkcfg);
331 
332 	/*
333 	 * With the current sglist(9) implementation, it is not easy
334 	 * for us to support a maximum segment size as adjacent
335 	 * segments are coalesced. For now, just make sure it's larger
336 	 * than the maximum supported transfer size.
337 	 */
338 	if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) {
339 		if (blkcfg.size_max < maxphys) {
340 			error = ENOTSUP;
341 			device_printf(dev, "host requires unsupported "
342 			    "maximum segment size feature\n");
343 			goto fail;
344 		}
345 	}
346 
347 	sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg);
348 	if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) {
349 		error = EINVAL;
350 		device_printf(dev, "fewer than minimum number of segments "
351 		    "allowed: %d\n", sc->vtblk_max_nsegs);
352 		goto fail;
353 	}
354 
355 	sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT);
356 	if (sc->vtblk_sglist == NULL) {
357 		error = ENOMEM;
358 		device_printf(dev, "cannot allocate sglist\n");
359 		goto fail;
360 	}
361 
362 	error = vtblk_alloc_virtqueue(sc);
363 	if (error) {
364 		device_printf(dev, "cannot allocate virtqueue\n");
365 		goto fail;
366 	}
367 
368 	error = vtblk_request_prealloc(sc);
369 	if (error) {
370 		device_printf(dev, "cannot preallocate requests\n");
371 		goto fail;
372 	}
373 
374 	vtblk_alloc_disk(sc, &blkcfg);
375 
376 	error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY);
377 	if (error) {
378 		device_printf(dev, "cannot setup virtqueue interrupt\n");
379 		goto fail;
380 	}
381 
382 	virtqueue_enable_intr(sc->vtblk_vq);
383 
384 fail:
385 	if (error)
386 		vtblk_detach(dev);
387 
388 	return (error);
389 }
390 
391 static int
392 vtblk_detach(device_t dev)
393 {
394 	struct vtblk_softc *sc;
395 
396 	sc = device_get_softc(dev);
397 
398 	VTBLK_LOCK(sc);
399 	sc->vtblk_flags |= VTBLK_FLAG_DETACH;
400 	if (device_is_attached(dev))
401 		vtblk_stop(sc);
402 	VTBLK_UNLOCK(sc);
403 
404 	vtblk_drain(sc);
405 
406 	if (sc->vtblk_disk != NULL) {
407 		disk_destroy(sc->vtblk_disk);
408 		sc->vtblk_disk = NULL;
409 	}
410 
411 	if (sc->vtblk_sglist != NULL) {
412 		sglist_free(sc->vtblk_sglist);
413 		sc->vtblk_sglist = NULL;
414 	}
415 
416 	VTBLK_LOCK_DESTROY(sc);
417 
418 	return (0);
419 }
420 
421 static int
422 vtblk_suspend(device_t dev)
423 {
424 	struct vtblk_softc *sc;
425 	int error;
426 
427 	sc = device_get_softc(dev);
428 
429 	VTBLK_LOCK(sc);
430 	sc->vtblk_flags |= VTBLK_FLAG_SUSPEND;
431 	/* XXX BMV: virtio_stop(), etc needed here? */
432 	error = vtblk_quiesce(sc);
433 	if (error)
434 		sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
435 	VTBLK_UNLOCK(sc);
436 
437 	return (error);
438 }
439 
440 static int
441 vtblk_resume(device_t dev)
442 {
443 	struct vtblk_softc *sc;
444 
445 	sc = device_get_softc(dev);
446 
447 	VTBLK_LOCK(sc);
448 	/* XXX BMV: virtio_reinit(), etc needed here? */
449 	sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND;
450 	vtblk_startio(sc);
451 	VTBLK_UNLOCK(sc);
452 
453 	return (0);
454 }
455 
456 static int
457 vtblk_shutdown(device_t dev)
458 {
459 
460 	return (0);
461 }
462 
463 static int
464 vtblk_attach_completed(device_t dev)
465 {
466 	struct vtblk_softc *sc;
467 
468 	sc = device_get_softc(dev);
469 
470 	/*
471 	 * Create disk after attach as VIRTIO_BLK_T_GET_ID can only be
472 	 * processed after the device acknowledged
473 	 * VIRTIO_CONFIG_STATUS_DRIVER_OK.
474 	 */
475 	vtblk_create_disk(sc);
476 	return (0);
477 }
478 
479 static int
480 vtblk_config_change(device_t dev)
481 {
482 	struct vtblk_softc *sc;
483 	struct virtio_blk_config blkcfg;
484 	uint64_t capacity;
485 
486 	sc = device_get_softc(dev);
487 
488 	vtblk_read_config(sc, &blkcfg);
489 
490 	/* Capacity is always in 512-byte units. */
491 	capacity = blkcfg.capacity * VTBLK_BSIZE;
492 
493 	if (sc->vtblk_disk->d_mediasize != capacity)
494 		vtblk_resize_disk(sc, capacity);
495 
496 	return (0);
497 }
498 
499 static int
500 vtblk_open(struct disk *dp)
501 {
502 	struct vtblk_softc *sc;
503 
504 	if ((sc = dp->d_drv1) == NULL)
505 		return (ENXIO);
506 
507 	return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0);
508 }
509 
510 static int
511 vtblk_close(struct disk *dp)
512 {
513 	struct vtblk_softc *sc;
514 
515 	if ((sc = dp->d_drv1) == NULL)
516 		return (ENXIO);
517 
518 	return (0);
519 }
520 
521 static int
522 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag,
523     struct thread *td)
524 {
525 	struct vtblk_softc *sc;
526 
527 	if ((sc = dp->d_drv1) == NULL)
528 		return (ENXIO);
529 
530 	return (ENOTTY);
531 }
532 
533 static int
534 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset,
535     size_t length)
536 {
537 	struct disk *dp;
538 	struct vtblk_softc *sc;
539 	int error;
540 
541 	dp = arg;
542 	error = 0;
543 
544 	if ((sc = dp->d_drv1) == NULL)
545 		return (ENXIO);
546 
547 	VTBLK_LOCK(sc);
548 
549 	vtblk_dump_quiesce(sc);
550 
551 	if (length > 0)
552 		error = vtblk_dump_write(sc, virtual, offset, length);
553 	if (error || (virtual == NULL && offset == 0))
554 		vtblk_dump_complete(sc);
555 
556 	VTBLK_UNLOCK(sc);
557 
558 	return (error);
559 }
560 
561 static void
562 vtblk_strategy(struct bio *bp)
563 {
564 	struct vtblk_softc *sc;
565 
566 	if ((sc = bp->bio_disk->d_drv1) == NULL) {
567 		vtblk_bio_done(NULL, bp, EINVAL);
568 		return;
569 	}
570 
571 	if ((bp->bio_cmd != BIO_READ) && (bp->bio_cmd != BIO_WRITE) &&
572 	    (bp->bio_cmd != BIO_FLUSH) && (bp->bio_cmd != BIO_DELETE)) {
573 		vtblk_bio_done(sc, bp, EOPNOTSUPP);
574 		return;
575 	}
576 
577 	VTBLK_LOCK(sc);
578 
579 	if (sc->vtblk_flags & VTBLK_FLAG_DETACH) {
580 		VTBLK_UNLOCK(sc);
581 		vtblk_bio_done(sc, bp, ENXIO);
582 		return;
583 	}
584 
585 	bioq_insert_tail(&sc->vtblk_bioq, bp);
586 	vtblk_startio(sc);
587 
588 	VTBLK_UNLOCK(sc);
589 }
590 
591 static int
592 vtblk_negotiate_features(struct vtblk_softc *sc)
593 {
594 	device_t dev;
595 	uint64_t features;
596 
597 	dev = sc->vtblk_dev;
598 	features = virtio_bus_is_modern(dev) ? VTBLK_MODERN_FEATURES :
599 	    VTBLK_LEGACY_FEATURES;
600 
601 	sc->vtblk_features = virtio_negotiate_features(dev, features);
602 	return (virtio_finalize_features(dev));
603 }
604 
605 static int
606 vtblk_setup_features(struct vtblk_softc *sc)
607 {
608 	device_t dev;
609 	int error;
610 
611 	dev = sc->vtblk_dev;
612 
613 	error = vtblk_negotiate_features(sc);
614 	if (error)
615 		return (error);
616 
617 	if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
618 		sc->vtblk_flags |= VTBLK_FLAG_INDIRECT;
619 	if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE))
620 		sc->vtblk_flags |= VTBLK_FLAG_WCE_CONFIG;
621 
622 	/* Legacy. */
623 	if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER))
624 		sc->vtblk_flags |= VTBLK_FLAG_BARRIER;
625 
626 	return (0);
627 }
628 
629 static int
630 vtblk_maximum_segments(struct vtblk_softc *sc,
631     struct virtio_blk_config *blkcfg)
632 {
633 	device_t dev;
634 	int nsegs;
635 
636 	dev = sc->vtblk_dev;
637 	nsegs = VTBLK_MIN_SEGMENTS;
638 
639 	if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) {
640 		nsegs += MIN(blkcfg->seg_max, maxphys / PAGE_SIZE + 1);
641 		if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
642 			nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
643 	} else
644 		nsegs += 1;
645 
646 	return (nsegs);
647 }
648 
649 static int
650 vtblk_alloc_virtqueue(struct vtblk_softc *sc)
651 {
652 	device_t dev;
653 	struct vq_alloc_info vq_info;
654 
655 	dev = sc->vtblk_dev;
656 
657 	VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
658 	    vtblk_vq_intr, sc, &sc->vtblk_vq,
659 	    "%s request", device_get_nameunit(dev));
660 
661 	return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info));
662 }
663 
664 static void
665 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity)
666 {
667 	device_t dev;
668 	struct disk *dp;
669 	int error;
670 
671 	dev = sc->vtblk_dev;
672 	dp = sc->vtblk_disk;
673 
674 	dp->d_mediasize = new_capacity;
675 	if (bootverbose) {
676 		device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n",
677 		    (uintmax_t) dp->d_mediasize >> 20,
678 		    (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
679 		    dp->d_sectorsize);
680 	}
681 
682 	error = disk_resize(dp, M_NOWAIT);
683 	if (error) {
684 		device_printf(dev,
685 		    "disk_resize(9) failed, error: %d\n", error);
686 	}
687 }
688 
689 static void
690 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
691 {
692 	device_t dev;
693 	struct disk *dp;
694 
695 	dev = sc->vtblk_dev;
696 
697 	sc->vtblk_disk = dp = disk_alloc();
698 	dp->d_open = vtblk_open;
699 	dp->d_close = vtblk_close;
700 	dp->d_ioctl = vtblk_ioctl;
701 	dp->d_strategy = vtblk_strategy;
702 	dp->d_name = VTBLK_DISK_NAME;
703 	dp->d_unit = device_get_unit(dev);
704 	dp->d_drv1 = sc;
705 	dp->d_flags = DISKFLAG_UNMAPPED_BIO | DISKFLAG_DIRECT_COMPLETION;
706 	dp->d_hba_vendor = virtio_get_vendor(dev);
707 	dp->d_hba_device = virtio_get_device(dev);
708 	dp->d_hba_subvendor = virtio_get_subvendor(dev);
709 	dp->d_hba_subdevice = virtio_get_subdevice(dev);
710 
711 	if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
712 		dp->d_flags |= DISKFLAG_WRITE_PROTECT;
713 	else {
714 		if (virtio_with_feature(dev, VIRTIO_BLK_F_FLUSH))
715 			dp->d_flags |= DISKFLAG_CANFLUSHCACHE;
716 		dp->d_dump = vtblk_dump;
717 	}
718 
719 	/* Capacity is always in 512-byte units. */
720 	dp->d_mediasize = blkcfg->capacity * VTBLK_BSIZE;
721 
722 	if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE))
723 		dp->d_sectorsize = blkcfg->blk_size;
724 	else
725 		dp->d_sectorsize = VTBLK_BSIZE;
726 
727 	/*
728 	 * The VirtIO maximum I/O size is given in terms of segments.
729 	 * However, FreeBSD limits I/O size by logical buffer size, not
730 	 * by physically contiguous pages. Therefore, we have to assume
731 	 * no pages are contiguous. This may impose an artificially low
732 	 * maximum I/O size. But in practice, since QEMU advertises 128
733 	 * segments, this gives us a maximum IO size of 125 * PAGE_SIZE,
734 	 * which is typically greater than maxphys. Eventually we should
735 	 * just advertise maxphys and split buffers that are too big.
736 	 *
737 	 * Note we must subtract one additional segment in case of non
738 	 * page aligned buffers.
739 	 */
740 	dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) *
741 	    PAGE_SIZE;
742 	if (dp->d_maxsize < PAGE_SIZE)
743 		dp->d_maxsize = PAGE_SIZE; /* XXX */
744 
745 	if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) {
746 		dp->d_fwsectors = blkcfg->geometry.sectors;
747 		dp->d_fwheads = blkcfg->geometry.heads;
748 	}
749 
750 	if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY) &&
751 	    blkcfg->topology.physical_block_exp > 0) {
752 		dp->d_stripesize = dp->d_sectorsize *
753 		    (1 << blkcfg->topology.physical_block_exp);
754 		dp->d_stripeoffset = (dp->d_stripesize -
755 		    blkcfg->topology.alignment_offset * dp->d_sectorsize) %
756 		    dp->d_stripesize;
757 	}
758 
759 	if (virtio_with_feature(dev, VIRTIO_BLK_F_DISCARD)) {
760 		dp->d_flags |= DISKFLAG_CANDELETE;
761 		dp->d_delmaxsize = blkcfg->max_discard_sectors * VTBLK_BSIZE;
762 	}
763 
764 	if (vtblk_write_cache_enabled(sc, blkcfg) != 0)
765 		sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK;
766 	else
767 		sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH;
768 }
769 
770 static void
771 vtblk_create_disk(struct vtblk_softc *sc)
772 {
773 	struct disk *dp;
774 
775 	dp = sc->vtblk_disk;
776 
777 	vtblk_ident(sc);
778 
779 	device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n",
780 	    (uintmax_t) dp->d_mediasize >> 20,
781 	    (uintmax_t) dp->d_mediasize / dp->d_sectorsize,
782 	    dp->d_sectorsize);
783 
784 	disk_create(dp, DISK_VERSION);
785 }
786 
787 static int
788 vtblk_request_prealloc(struct vtblk_softc *sc)
789 {
790 	struct vtblk_request *req;
791 	int i, nreqs;
792 
793 	nreqs = virtqueue_size(sc->vtblk_vq);
794 
795 	/*
796 	 * Preallocate sufficient requests to keep the virtqueue full. Each
797 	 * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce
798 	 * the number allocated when indirect descriptors are not available.
799 	 */
800 	if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0)
801 		nreqs /= VTBLK_MIN_SEGMENTS;
802 
803 	for (i = 0; i < nreqs; i++) {
804 		req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT);
805 		if (req == NULL)
806 			return (ENOMEM);
807 
808 		MPASS(sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr)) == 1);
809 		MPASS(sglist_count(&req->vbr_ack, sizeof(req->vbr_ack)) == 1);
810 
811 		sc->vtblk_request_count++;
812 		vtblk_request_enqueue(sc, req);
813 	}
814 
815 	return (0);
816 }
817 
818 static void
819 vtblk_request_free(struct vtblk_softc *sc)
820 {
821 	struct vtblk_request *req;
822 
823 	MPASS(TAILQ_EMPTY(&sc->vtblk_req_ready));
824 
825 	while ((req = vtblk_request_dequeue(sc)) != NULL) {
826 		sc->vtblk_request_count--;
827 		free(req, M_DEVBUF);
828 	}
829 
830 	KASSERT(sc->vtblk_request_count == 0,
831 	    ("%s: leaked %d requests", __func__, sc->vtblk_request_count));
832 }
833 
834 static struct vtblk_request *
835 vtblk_request_dequeue(struct vtblk_softc *sc)
836 {
837 	struct vtblk_request *req;
838 
839 	req = TAILQ_FIRST(&sc->vtblk_req_free);
840 	if (req != NULL) {
841 		TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link);
842 		bzero(req, sizeof(struct vtblk_request));
843 	}
844 
845 	return (req);
846 }
847 
848 static void
849 vtblk_request_enqueue(struct vtblk_softc *sc, struct vtblk_request *req)
850 {
851 
852 	TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link);
853 }
854 
855 static struct vtblk_request *
856 vtblk_request_next_ready(struct vtblk_softc *sc)
857 {
858 	struct vtblk_request *req;
859 
860 	req = TAILQ_FIRST(&sc->vtblk_req_ready);
861 	if (req != NULL)
862 		TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link);
863 
864 	return (req);
865 }
866 
867 static void
868 vtblk_request_requeue_ready(struct vtblk_softc *sc, struct vtblk_request *req)
869 {
870 
871 	/* NOTE: Currently, there will be at most one request in the queue. */
872 	TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link);
873 }
874 
875 static struct vtblk_request *
876 vtblk_request_next(struct vtblk_softc *sc)
877 {
878 	struct vtblk_request *req;
879 
880 	req = vtblk_request_next_ready(sc);
881 	if (req != NULL)
882 		return (req);
883 
884 	return (vtblk_request_bio(sc));
885 }
886 
887 static struct vtblk_request *
888 vtblk_request_bio(struct vtblk_softc *sc)
889 {
890 	struct bio_queue_head *bioq;
891 	struct vtblk_request *req;
892 	struct bio *bp;
893 
894 	bioq = &sc->vtblk_bioq;
895 
896 	if (bioq_first(bioq) == NULL)
897 		return (NULL);
898 
899 	req = vtblk_request_dequeue(sc);
900 	if (req == NULL)
901 		return (NULL);
902 
903 	bp = bioq_takefirst(bioq);
904 	req->vbr_bp = bp;
905 	req->vbr_ack = -1;
906 	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
907 
908 	switch (bp->bio_cmd) {
909 	case BIO_FLUSH:
910 		req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_FLUSH);
911 		req->vbr_hdr.sector = 0;
912 		break;
913 	case BIO_READ:
914 		req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_IN);
915 		req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / VTBLK_BSIZE);
916 		break;
917 	case BIO_WRITE:
918 		req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_OUT);
919 		req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / VTBLK_BSIZE);
920 		break;
921 	case BIO_DELETE:
922 		req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_DISCARD);
923 		req->vbr_hdr.sector = vtblk_gtoh64(sc, bp->bio_offset / VTBLK_BSIZE);
924 		break;
925 	default:
926 		panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd);
927 	}
928 
929 	if (bp->bio_flags & BIO_ORDERED)
930 		req->vbr_hdr.type |= vtblk_gtoh32(sc, VIRTIO_BLK_T_BARRIER);
931 
932 	return (req);
933 }
934 
935 static int
936 vtblk_request_execute(struct vtblk_softc *sc, struct vtblk_request *req)
937 {
938 	struct virtqueue *vq;
939 	struct sglist *sg;
940 	struct bio *bp;
941 	int ordered, readable, writable, error;
942 
943 	vq = sc->vtblk_vq;
944 	sg = sc->vtblk_sglist;
945 	bp = req->vbr_bp;
946 	ordered = 0;
947 	writable = 0;
948 
949 	/*
950 	 * Some hosts (such as bhyve) do not implement the barrier feature,
951 	 * so we emulate it in the driver by allowing the barrier request
952 	 * to be the only one in flight.
953 	 */
954 	if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) {
955 		if (sc->vtblk_req_ordered != NULL)
956 			return (EBUSY);
957 		if (bp->bio_flags & BIO_ORDERED) {
958 			if (!virtqueue_empty(vq))
959 				return (EBUSY);
960 			ordered = 1;
961 			req->vbr_hdr.type &= vtblk_gtoh32(sc,
962 				~VIRTIO_BLK_T_BARRIER);
963 		}
964 	}
965 
966 	sglist_reset(sg);
967 	sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr));
968 
969 	if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
970 		error = sglist_append_bio(sg, bp);
971 		if (error || sg->sg_nseg == sg->sg_maxseg) {
972 			panic("%s: bio %p data buffer too big %d",
973 			    __func__, bp, error);
974 		}
975 
976 		/* BIO_READ means the host writes into our buffer. */
977 		if (bp->bio_cmd == BIO_READ)
978 			writable = sg->sg_nseg - 1;
979 	} else if (bp->bio_cmd == BIO_DELETE) {
980 		struct virtio_blk_discard_write_zeroes *discard;
981 
982 		discard = malloc(sizeof(*discard), M_DEVBUF, M_NOWAIT | M_ZERO);
983 		if (discard == NULL)
984 			return (ENOMEM);
985 
986 		bp->bio_driver1 = discard;
987 		discard->sector = vtblk_gtoh64(sc, bp->bio_offset / VTBLK_BSIZE);
988 		discard->num_sectors = vtblk_gtoh32(sc, bp->bio_bcount / VTBLK_BSIZE);
989 		error = sglist_append(sg, discard, sizeof(*discard));
990 		if (error || sg->sg_nseg == sg->sg_maxseg) {
991 			panic("%s: bio %p data buffer too big %d",
992 			    __func__, bp, error);
993 		}
994 	}
995 
996 	writable++;
997 	sglist_append(sg, &req->vbr_ack, sizeof(uint8_t));
998 	readable = sg->sg_nseg - writable;
999 
1000 	error = virtqueue_enqueue(vq, req, sg, readable, writable);
1001 	if (error == 0 && ordered)
1002 		sc->vtblk_req_ordered = req;
1003 
1004 	return (error);
1005 }
1006 
1007 static int
1008 vtblk_request_error(struct vtblk_request *req)
1009 {
1010 	int error;
1011 
1012 	switch (req->vbr_ack) {
1013 	case VIRTIO_BLK_S_OK:
1014 		error = 0;
1015 		break;
1016 	case VIRTIO_BLK_S_UNSUPP:
1017 		error = ENOTSUP;
1018 		break;
1019 	default:
1020 		error = EIO;
1021 		break;
1022 	}
1023 
1024 	return (error);
1025 }
1026 
1027 static void
1028 vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
1029 {
1030 	struct vtblk_request *req;
1031 	struct bio *bp;
1032 
1033 	while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
1034 		if (sc->vtblk_req_ordered != NULL) {
1035 			MPASS(sc->vtblk_req_ordered == req);
1036 			sc->vtblk_req_ordered = NULL;
1037 		}
1038 
1039 		bp = req->vbr_bp;
1040 		bp->bio_error = vtblk_request_error(req);
1041 		TAILQ_INSERT_TAIL(queue, bp, bio_queue);
1042 
1043 		vtblk_request_enqueue(sc, req);
1044 	}
1045 }
1046 
1047 static void
1048 vtblk_done_completed(struct vtblk_softc *sc, struct bio_queue *queue)
1049 {
1050 	struct bio *bp, *tmp;
1051 
1052 	TAILQ_FOREACH_SAFE(bp, queue, bio_queue, tmp) {
1053 		if (bp->bio_error != 0)
1054 			disk_err(bp, "hard error", -1, 1);
1055 		vtblk_bio_done(sc, bp, bp->bio_error);
1056 	}
1057 }
1058 
1059 static void
1060 vtblk_drain_vq(struct vtblk_softc *sc)
1061 {
1062 	struct virtqueue *vq;
1063 	struct vtblk_request *req;
1064 	int last;
1065 
1066 	vq = sc->vtblk_vq;
1067 	last = 0;
1068 
1069 	while ((req = virtqueue_drain(vq, &last)) != NULL) {
1070 		vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1071 		vtblk_request_enqueue(sc, req);
1072 	}
1073 
1074 	sc->vtblk_req_ordered = NULL;
1075 	KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1076 }
1077 
1078 static void
1079 vtblk_drain(struct vtblk_softc *sc)
1080 {
1081 	struct bio_queue_head *bioq;
1082 	struct vtblk_request *req;
1083 	struct bio *bp;
1084 
1085 	bioq = &sc->vtblk_bioq;
1086 
1087 	if (sc->vtblk_vq != NULL) {
1088 		struct bio_queue queue;
1089 
1090 		TAILQ_INIT(&queue);
1091 		vtblk_queue_completed(sc, &queue);
1092 		vtblk_done_completed(sc, &queue);
1093 
1094 		vtblk_drain_vq(sc);
1095 	}
1096 
1097 	while ((req = vtblk_request_next_ready(sc)) != NULL) {
1098 		vtblk_bio_done(sc, req->vbr_bp, ENXIO);
1099 		vtblk_request_enqueue(sc, req);
1100 	}
1101 
1102 	while (bioq_first(bioq) != NULL) {
1103 		bp = bioq_takefirst(bioq);
1104 		vtblk_bio_done(sc, bp, ENXIO);
1105 	}
1106 
1107 	vtblk_request_free(sc);
1108 }
1109 
1110 static void
1111 vtblk_startio(struct vtblk_softc *sc)
1112 {
1113 	struct virtqueue *vq;
1114 	struct vtblk_request *req;
1115 	int enq;
1116 
1117 	VTBLK_LOCK_ASSERT(sc);
1118 	vq = sc->vtblk_vq;
1119 	enq = 0;
1120 
1121 	if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1122 		return;
1123 
1124 	while (!virtqueue_full(vq)) {
1125 		req = vtblk_request_next(sc);
1126 		if (req == NULL)
1127 			break;
1128 
1129 		if (vtblk_request_execute(sc, req) != 0) {
1130 			vtblk_request_requeue_ready(sc, req);
1131 			break;
1132 		}
1133 
1134 		enq++;
1135 	}
1136 
1137 	if (enq > 0)
1138 		virtqueue_notify(vq);
1139 }
1140 
1141 static void
1142 vtblk_bio_done(struct vtblk_softc *sc, struct bio *bp, int error)
1143 {
1144 
1145 	/* Because of GEOM direct dispatch, we cannot hold any locks. */
1146 	if (sc != NULL)
1147 		VTBLK_LOCK_ASSERT_NOTOWNED(sc);
1148 
1149 	if (error) {
1150 		bp->bio_resid = bp->bio_bcount;
1151 		bp->bio_error = error;
1152 		bp->bio_flags |= BIO_ERROR;
1153 	} else {
1154 		kmsan_mark_bio(bp, KMSAN_STATE_INITED);
1155 	}
1156 
1157 	if (bp->bio_driver1 != NULL) {
1158 		free(bp->bio_driver1, M_DEVBUF);
1159 		bp->bio_driver1 = NULL;
1160 	}
1161 
1162 	biodone(bp);
1163 }
1164 
1165 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg)			\
1166 	if (virtio_with_feature(_dev, _feature)) {			\
1167 		virtio_read_device_config(_dev,				\
1168 		    offsetof(struct virtio_blk_config, _field),		\
1169 		    &(_cfg)->_field, sizeof((_cfg)->_field));		\
1170 	}
1171 
1172 static void
1173 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
1174 {
1175 	device_t dev;
1176 
1177 	dev = sc->vtblk_dev;
1178 
1179 	bzero(blkcfg, sizeof(struct virtio_blk_config));
1180 
1181 	/* The capacity is always available. */
1182 	virtio_read_device_config(dev, offsetof(struct virtio_blk_config,
1183 	    capacity), &blkcfg->capacity, sizeof(blkcfg->capacity));
1184 
1185 	/* Read the configuration if the feature was negotiated. */
1186 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg);
1187 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg);
1188 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1189 	    geometry.cylinders, blkcfg);
1190 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1191 	    geometry.heads, blkcfg);
1192 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY,
1193 	    geometry.sectors, blkcfg);
1194 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg);
1195 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1196 	    topology.physical_block_exp, blkcfg);
1197 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1198 	    topology.alignment_offset, blkcfg);
1199 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1200 	    topology.min_io_size, blkcfg);
1201 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY,
1202 	    topology.opt_io_size, blkcfg);
1203 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, wce, blkcfg);
1204 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, max_discard_sectors,
1205 	    blkcfg);
1206 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, max_discard_seg, blkcfg);
1207 	VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, discard_sector_alignment,
1208 	    blkcfg);
1209 }
1210 
1211 #undef VTBLK_GET_CONFIG
1212 
1213 static void
1214 vtblk_ident(struct vtblk_softc *sc)
1215 {
1216 	struct bio buf;
1217 	struct disk *dp;
1218 	struct vtblk_request *req;
1219 	int len, error;
1220 
1221 	dp = sc->vtblk_disk;
1222 	len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE);
1223 
1224 	if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0)
1225 		return;
1226 
1227 	req = vtblk_request_dequeue(sc);
1228 	if (req == NULL)
1229 		return;
1230 
1231 	req->vbr_ack = -1;
1232 	req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_GET_ID);
1233 	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1234 	req->vbr_hdr.sector = 0;
1235 
1236 	req->vbr_bp = &buf;
1237 	g_reset_bio(&buf);
1238 
1239 	buf.bio_cmd = BIO_READ;
1240 	buf.bio_data = dp->d_ident;
1241 	buf.bio_bcount = len;
1242 
1243 	VTBLK_LOCK(sc);
1244 	error = vtblk_poll_request(sc, req);
1245 	VTBLK_UNLOCK(sc);
1246 
1247 	vtblk_request_enqueue(sc, req);
1248 
1249 	if (error) {
1250 		device_printf(sc->vtblk_dev,
1251 		    "error getting device identifier: %d\n", error);
1252 	}
1253 }
1254 
1255 static int
1256 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
1257 {
1258 	struct virtqueue *vq;
1259 	int error;
1260 
1261 	vq = sc->vtblk_vq;
1262 
1263 	if (!virtqueue_empty(vq))
1264 		return (EBUSY);
1265 
1266 	error = vtblk_request_execute(sc, req);
1267 	if (error)
1268 		return (error);
1269 
1270 	virtqueue_notify(vq);
1271 	virtqueue_poll(vq, NULL);
1272 
1273 	error = vtblk_request_error(req);
1274 	if (error && bootverbose) {
1275 		device_printf(sc->vtblk_dev,
1276 		    "%s: IO error: %d\n", __func__, error);
1277 	}
1278 
1279 	return (error);
1280 }
1281 
1282 static int
1283 vtblk_quiesce(struct vtblk_softc *sc)
1284 {
1285 	int error;
1286 
1287 	VTBLK_LOCK_ASSERT(sc);
1288 	error = 0;
1289 
1290 	while (!virtqueue_empty(sc->vtblk_vq)) {
1291 		if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq",
1292 		    VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) {
1293 			error = EBUSY;
1294 			break;
1295 		}
1296 	}
1297 
1298 	return (error);
1299 }
1300 
1301 static void
1302 vtblk_vq_intr(void *xsc)
1303 {
1304 	struct vtblk_softc *sc;
1305 	struct virtqueue *vq;
1306 	struct bio_queue queue;
1307 
1308 	sc = xsc;
1309 	vq = sc->vtblk_vq;
1310 	TAILQ_INIT(&queue);
1311 
1312 	VTBLK_LOCK(sc);
1313 
1314 again:
1315 	if (sc->vtblk_flags & VTBLK_FLAG_DETACH)
1316 		goto out;
1317 
1318 	vtblk_queue_completed(sc, &queue);
1319 	vtblk_startio(sc);
1320 
1321 	if (virtqueue_enable_intr(vq) != 0) {
1322 		virtqueue_disable_intr(vq);
1323 		goto again;
1324 	}
1325 
1326 	if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND)
1327 		wakeup(&sc->vtblk_vq);
1328 
1329 out:
1330 	VTBLK_UNLOCK(sc);
1331 	vtblk_done_completed(sc, &queue);
1332 }
1333 
1334 static void
1335 vtblk_stop(struct vtblk_softc *sc)
1336 {
1337 
1338 	virtqueue_disable_intr(sc->vtblk_vq);
1339 	virtio_stop(sc->vtblk_dev);
1340 }
1341 
1342 static void
1343 vtblk_dump_quiesce(struct vtblk_softc *sc)
1344 {
1345 
1346 	/*
1347 	 * Spin here until all the requests in-flight at the time of the
1348 	 * dump are completed and queued. The queued requests will be
1349 	 * biodone'd once the dump is finished.
1350 	 */
1351 	while (!virtqueue_empty(sc->vtblk_vq))
1352 		vtblk_queue_completed(sc, &sc->vtblk_dump_queue);
1353 }
1354 
1355 static int
1356 vtblk_dump_write(struct vtblk_softc *sc, void *virtual, off_t offset,
1357     size_t length)
1358 {
1359 	struct bio buf;
1360 	struct vtblk_request *req;
1361 
1362 	req = &sc->vtblk_dump_request;
1363 	req->vbr_ack = -1;
1364 	req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_OUT);
1365 	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1366 	req->vbr_hdr.sector = vtblk_gtoh64(sc, offset / VTBLK_BSIZE);
1367 
1368 	req->vbr_bp = &buf;
1369 	g_reset_bio(&buf);
1370 
1371 	buf.bio_cmd = BIO_WRITE;
1372 	buf.bio_data = virtual;
1373 	buf.bio_bcount = length;
1374 
1375 	return (vtblk_poll_request(sc, req));
1376 }
1377 
1378 static int
1379 vtblk_dump_flush(struct vtblk_softc *sc)
1380 {
1381 	struct bio buf;
1382 	struct vtblk_request *req;
1383 
1384 	req = &sc->vtblk_dump_request;
1385 	req->vbr_ack = -1;
1386 	req->vbr_hdr.type = vtblk_gtoh32(sc, VIRTIO_BLK_T_FLUSH);
1387 	req->vbr_hdr.ioprio = vtblk_gtoh32(sc, 1);
1388 	req->vbr_hdr.sector = 0;
1389 
1390 	req->vbr_bp = &buf;
1391 	g_reset_bio(&buf);
1392 
1393 	buf.bio_cmd = BIO_FLUSH;
1394 
1395 	return (vtblk_poll_request(sc, req));
1396 }
1397 
1398 static void
1399 vtblk_dump_complete(struct vtblk_softc *sc)
1400 {
1401 
1402 	vtblk_dump_flush(sc);
1403 
1404 	VTBLK_UNLOCK(sc);
1405 	vtblk_done_completed(sc, &sc->vtblk_dump_queue);
1406 	VTBLK_LOCK(sc);
1407 }
1408 
1409 static void
1410 vtblk_set_write_cache(struct vtblk_softc *sc, int wc)
1411 {
1412 
1413 	/* Set either writeback (1) or writethrough (0) mode. */
1414 	virtio_write_dev_config_1(sc->vtblk_dev,
1415 	    offsetof(struct virtio_blk_config, wce), wc);
1416 }
1417 
1418 static int
1419 vtblk_write_cache_enabled(struct vtblk_softc *sc,
1420     struct virtio_blk_config *blkcfg)
1421 {
1422 	int wc;
1423 
1424 	if (sc->vtblk_flags & VTBLK_FLAG_WCE_CONFIG) {
1425 		wc = vtblk_tunable_int(sc, "writecache_mode",
1426 		    vtblk_writecache_mode);
1427 		if (wc >= 0 && wc < VTBLK_CACHE_MAX)
1428 			vtblk_set_write_cache(sc, wc);
1429 		else
1430 			wc = blkcfg->wce;
1431 	} else
1432 		wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_FLUSH);
1433 
1434 	return (wc);
1435 }
1436 
1437 static int
1438 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS)
1439 {
1440 	struct vtblk_softc *sc;
1441 	int wc, error;
1442 
1443 	sc = oidp->oid_arg1;
1444 	wc = sc->vtblk_write_cache;
1445 
1446 	error = sysctl_handle_int(oidp, &wc, 0, req);
1447 	if (error || req->newptr == NULL)
1448 		return (error);
1449 	if ((sc->vtblk_flags & VTBLK_FLAG_WCE_CONFIG) == 0)
1450 		return (EPERM);
1451 	if (wc < 0 || wc >= VTBLK_CACHE_MAX)
1452 		return (EINVAL);
1453 
1454 	VTBLK_LOCK(sc);
1455 	sc->vtblk_write_cache = wc;
1456 	vtblk_set_write_cache(sc, sc->vtblk_write_cache);
1457 	VTBLK_UNLOCK(sc);
1458 
1459 	return (0);
1460 }
1461 
1462 static void
1463 vtblk_setup_sysctl(struct vtblk_softc *sc)
1464 {
1465 	device_t dev;
1466 	struct sysctl_ctx_list *ctx;
1467 	struct sysctl_oid *tree;
1468 	struct sysctl_oid_list *child;
1469 
1470 	dev = sc->vtblk_dev;
1471 	ctx = device_get_sysctl_ctx(dev);
1472 	tree = device_get_sysctl_tree(dev);
1473 	child = SYSCTL_CHILDREN(tree);
1474 
1475 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode",
1476 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
1477 	    vtblk_write_cache_sysctl, "I",
1478 	    "Write cache mode (writethrough (0) or writeback (1))");
1479 }
1480 
1481 static int
1482 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def)
1483 {
1484 	char path[64];
1485 
1486 	snprintf(path, sizeof(path),
1487 	    "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob);
1488 	TUNABLE_INT_FETCH(path, &def);
1489 
1490 	return (def);
1491 }
1492