1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* Driver for VirtIO block devices. */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/bio.h> 38 #include <sys/malloc.h> 39 #include <sys/module.h> 40 #include <sys/sglist.h> 41 #include <sys/sysctl.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/queue.h> 45 46 #include <geom/geom.h> 47 #include <geom/geom_disk.h> 48 49 #include <machine/bus.h> 50 #include <machine/resource.h> 51 #include <sys/bus.h> 52 #include <sys/rman.h> 53 54 #include <dev/virtio/virtio.h> 55 #include <dev/virtio/virtqueue.h> 56 #include <dev/virtio/block/virtio_blk.h> 57 58 #include "virtio_if.h" 59 60 struct vtblk_request { 61 struct virtio_blk_outhdr vbr_hdr; 62 struct bio *vbr_bp; 63 uint8_t vbr_ack; 64 TAILQ_ENTRY(vtblk_request) vbr_link; 65 }; 66 67 enum vtblk_cache_mode { 68 VTBLK_CACHE_WRITETHROUGH, 69 VTBLK_CACHE_WRITEBACK, 70 VTBLK_CACHE_MAX 71 }; 72 73 struct vtblk_softc { 74 device_t vtblk_dev; 75 struct mtx vtblk_mtx; 76 uint64_t vtblk_features; 77 uint32_t vtblk_flags; 78 #define VTBLK_FLAG_INDIRECT 0x0001 79 #define VTBLK_FLAG_READONLY 0x0002 80 #define VTBLK_FLAG_DETACH 0x0004 81 #define VTBLK_FLAG_SUSPEND 0x0008 82 #define VTBLK_FLAG_BARRIER 0x0010 83 #define VTBLK_FLAG_WC_CONFIG 0x0020 84 #define VTBLK_FLAG_DISCARD 0x0040 85 86 struct virtqueue *vtblk_vq; 87 struct sglist *vtblk_sglist; 88 struct disk *vtblk_disk; 89 90 struct bio_queue_head vtblk_bioq; 91 TAILQ_HEAD(, vtblk_request) 92 vtblk_req_free; 93 TAILQ_HEAD(, vtblk_request) 94 vtblk_req_ready; 95 struct vtblk_request *vtblk_req_ordered; 96 97 int vtblk_max_nsegs; 98 int vtblk_request_count; 99 enum vtblk_cache_mode vtblk_write_cache; 100 101 struct bio_queue vtblk_dump_queue; 102 struct vtblk_request vtblk_dump_request; 103 }; 104 105 static struct virtio_feature_desc vtblk_feature_desc[] = { 106 { VIRTIO_BLK_F_BARRIER, "HostBarrier" }, 107 { VIRTIO_BLK_F_SIZE_MAX, "MaxSegSize" }, 108 { VIRTIO_BLK_F_SEG_MAX, "MaxNumSegs" }, 109 { VIRTIO_BLK_F_GEOMETRY, "DiskGeometry" }, 110 { VIRTIO_BLK_F_RO, "ReadOnly" }, 111 { VIRTIO_BLK_F_BLK_SIZE, "BlockSize" }, 112 { VIRTIO_BLK_F_SCSI, "SCSICmds" }, 113 { VIRTIO_BLK_F_WCE, "WriteCache" }, 114 { VIRTIO_BLK_F_TOPOLOGY, "Topology" }, 115 { VIRTIO_BLK_F_CONFIG_WCE, "ConfigWCE" }, 116 { VIRTIO_BLK_F_DISCARD, "Discard" }, 117 { 0, NULL } 118 }; 119 120 static int vtblk_modevent(module_t, int, void *); 121 122 static int vtblk_probe(device_t); 123 static int vtblk_attach(device_t); 124 static int vtblk_detach(device_t); 125 static int vtblk_suspend(device_t); 126 static int vtblk_resume(device_t); 127 static int vtblk_shutdown(device_t); 128 static int vtblk_config_change(device_t); 129 130 static int vtblk_open(struct disk *); 131 static int vtblk_close(struct disk *); 132 static int vtblk_ioctl(struct disk *, u_long, void *, int, 133 struct thread *); 134 static int vtblk_dump(void *, void *, vm_offset_t, off_t, size_t); 135 static void vtblk_strategy(struct bio *); 136 137 static void vtblk_negotiate_features(struct vtblk_softc *); 138 static void vtblk_setup_features(struct vtblk_softc *); 139 static int vtblk_maximum_segments(struct vtblk_softc *, 140 struct virtio_blk_config *); 141 static int vtblk_alloc_virtqueue(struct vtblk_softc *); 142 static void vtblk_resize_disk(struct vtblk_softc *, uint64_t); 143 static void vtblk_alloc_disk(struct vtblk_softc *, 144 struct virtio_blk_config *); 145 static void vtblk_create_disk(struct vtblk_softc *); 146 147 static int vtblk_request_prealloc(struct vtblk_softc *); 148 static void vtblk_request_free(struct vtblk_softc *); 149 static struct vtblk_request * 150 vtblk_request_dequeue(struct vtblk_softc *); 151 static void vtblk_request_enqueue(struct vtblk_softc *, 152 struct vtblk_request *); 153 static struct vtblk_request * 154 vtblk_request_next_ready(struct vtblk_softc *); 155 static void vtblk_request_requeue_ready(struct vtblk_softc *, 156 struct vtblk_request *); 157 static struct vtblk_request * 158 vtblk_request_next(struct vtblk_softc *); 159 static struct vtblk_request * 160 vtblk_request_bio(struct vtblk_softc *); 161 static int vtblk_request_execute(struct vtblk_softc *, 162 struct vtblk_request *); 163 static int vtblk_request_error(struct vtblk_request *); 164 165 static void vtblk_queue_completed(struct vtblk_softc *, 166 struct bio_queue *); 167 static void vtblk_done_completed(struct vtblk_softc *, 168 struct bio_queue *); 169 static void vtblk_drain_vq(struct vtblk_softc *); 170 static void vtblk_drain(struct vtblk_softc *); 171 172 static void vtblk_startio(struct vtblk_softc *); 173 static void vtblk_bio_done(struct vtblk_softc *, struct bio *, int); 174 175 static void vtblk_read_config(struct vtblk_softc *, 176 struct virtio_blk_config *); 177 static void vtblk_ident(struct vtblk_softc *); 178 static int vtblk_poll_request(struct vtblk_softc *, 179 struct vtblk_request *); 180 static int vtblk_quiesce(struct vtblk_softc *); 181 static void vtblk_vq_intr(void *); 182 static void vtblk_stop(struct vtblk_softc *); 183 184 static void vtblk_dump_quiesce(struct vtblk_softc *); 185 static int vtblk_dump_write(struct vtblk_softc *, void *, off_t, size_t); 186 static int vtblk_dump_flush(struct vtblk_softc *); 187 static void vtblk_dump_complete(struct vtblk_softc *); 188 189 static void vtblk_set_write_cache(struct vtblk_softc *, int); 190 static int vtblk_write_cache_enabled(struct vtblk_softc *sc, 191 struct virtio_blk_config *); 192 static int vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS); 193 194 static void vtblk_setup_sysctl(struct vtblk_softc *); 195 static int vtblk_tunable_int(struct vtblk_softc *, const char *, int); 196 197 /* Tunables. */ 198 static int vtblk_no_ident = 0; 199 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident); 200 static int vtblk_writecache_mode = -1; 201 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode); 202 203 /* Features desired/implemented by this driver. */ 204 #define VTBLK_FEATURES \ 205 (VIRTIO_BLK_F_BARRIER | \ 206 VIRTIO_BLK_F_SIZE_MAX | \ 207 VIRTIO_BLK_F_SEG_MAX | \ 208 VIRTIO_BLK_F_GEOMETRY | \ 209 VIRTIO_BLK_F_RO | \ 210 VIRTIO_BLK_F_BLK_SIZE | \ 211 VIRTIO_BLK_F_WCE | \ 212 VIRTIO_BLK_F_TOPOLOGY | \ 213 VIRTIO_BLK_F_CONFIG_WCE | \ 214 VIRTIO_BLK_F_DISCARD | \ 215 VIRTIO_RING_F_INDIRECT_DESC) 216 217 #define VTBLK_MTX(_sc) &(_sc)->vtblk_mtx 218 #define VTBLK_LOCK_INIT(_sc, _name) \ 219 mtx_init(VTBLK_MTX((_sc)), (_name), \ 220 "VirtIO Block Lock", MTX_DEF) 221 #define VTBLK_LOCK(_sc) mtx_lock(VTBLK_MTX((_sc))) 222 #define VTBLK_UNLOCK(_sc) mtx_unlock(VTBLK_MTX((_sc))) 223 #define VTBLK_LOCK_DESTROY(_sc) mtx_destroy(VTBLK_MTX((_sc))) 224 #define VTBLK_LOCK_ASSERT(_sc) mtx_assert(VTBLK_MTX((_sc)), MA_OWNED) 225 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \ 226 mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED) 227 228 #define VTBLK_DISK_NAME "vtbd" 229 #define VTBLK_QUIESCE_TIMEOUT (30 * hz) 230 231 /* 232 * Each block request uses at least two segments - one for the header 233 * and one for the status. 234 */ 235 #define VTBLK_MIN_SEGMENTS 2 236 237 static device_method_t vtblk_methods[] = { 238 /* Device methods. */ 239 DEVMETHOD(device_probe, vtblk_probe), 240 DEVMETHOD(device_attach, vtblk_attach), 241 DEVMETHOD(device_detach, vtblk_detach), 242 DEVMETHOD(device_suspend, vtblk_suspend), 243 DEVMETHOD(device_resume, vtblk_resume), 244 DEVMETHOD(device_shutdown, vtblk_shutdown), 245 246 /* VirtIO methods. */ 247 DEVMETHOD(virtio_config_change, vtblk_config_change), 248 249 DEVMETHOD_END 250 }; 251 252 static driver_t vtblk_driver = { 253 "vtblk", 254 vtblk_methods, 255 sizeof(struct vtblk_softc) 256 }; 257 static devclass_t vtblk_devclass; 258 259 DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass, 260 vtblk_modevent, 0); 261 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass, 262 vtblk_modevent, 0); 263 MODULE_VERSION(virtio_blk, 1); 264 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1); 265 266 VIRTIO_SIMPLE_PNPTABLE(virtio_blk, VIRTIO_ID_BLOCK, "VirtIO Block Adapter"); 267 VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_blk); 268 VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_blk); 269 270 static int 271 vtblk_modevent(module_t mod, int type, void *unused) 272 { 273 int error; 274 275 error = 0; 276 277 switch (type) { 278 case MOD_LOAD: 279 case MOD_QUIESCE: 280 case MOD_UNLOAD: 281 case MOD_SHUTDOWN: 282 break; 283 default: 284 error = EOPNOTSUPP; 285 break; 286 } 287 288 return (error); 289 } 290 291 static int 292 vtblk_probe(device_t dev) 293 { 294 return (VIRTIO_SIMPLE_PROBE(dev, virtio_blk)); 295 } 296 297 static int 298 vtblk_attach(device_t dev) 299 { 300 struct vtblk_softc *sc; 301 struct virtio_blk_config blkcfg; 302 int error; 303 304 virtio_set_feature_desc(dev, vtblk_feature_desc); 305 306 sc = device_get_softc(dev); 307 sc->vtblk_dev = dev; 308 VTBLK_LOCK_INIT(sc, device_get_nameunit(dev)); 309 bioq_init(&sc->vtblk_bioq); 310 TAILQ_INIT(&sc->vtblk_dump_queue); 311 TAILQ_INIT(&sc->vtblk_req_free); 312 TAILQ_INIT(&sc->vtblk_req_ready); 313 314 vtblk_setup_sysctl(sc); 315 vtblk_setup_features(sc); 316 317 vtblk_read_config(sc, &blkcfg); 318 319 /* 320 * With the current sglist(9) implementation, it is not easy 321 * for us to support a maximum segment size as adjacent 322 * segments are coalesced. For now, just make sure it's larger 323 * than the maximum supported transfer size. 324 */ 325 if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) { 326 if (blkcfg.size_max < MAXPHYS) { 327 error = ENOTSUP; 328 device_printf(dev, "host requires unsupported " 329 "maximum segment size feature\n"); 330 goto fail; 331 } 332 } 333 334 sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg); 335 if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) { 336 error = EINVAL; 337 device_printf(dev, "fewer than minimum number of segments " 338 "allowed: %d\n", sc->vtblk_max_nsegs); 339 goto fail; 340 } 341 342 sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT); 343 if (sc->vtblk_sglist == NULL) { 344 error = ENOMEM; 345 device_printf(dev, "cannot allocate sglist\n"); 346 goto fail; 347 } 348 349 error = vtblk_alloc_virtqueue(sc); 350 if (error) { 351 device_printf(dev, "cannot allocate virtqueue\n"); 352 goto fail; 353 } 354 355 error = vtblk_request_prealloc(sc); 356 if (error) { 357 device_printf(dev, "cannot preallocate requests\n"); 358 goto fail; 359 } 360 361 vtblk_alloc_disk(sc, &blkcfg); 362 363 error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY); 364 if (error) { 365 device_printf(dev, "cannot setup virtqueue interrupt\n"); 366 goto fail; 367 } 368 369 vtblk_create_disk(sc); 370 371 virtqueue_enable_intr(sc->vtblk_vq); 372 373 fail: 374 if (error) 375 vtblk_detach(dev); 376 377 return (error); 378 } 379 380 static int 381 vtblk_detach(device_t dev) 382 { 383 struct vtblk_softc *sc; 384 385 sc = device_get_softc(dev); 386 387 VTBLK_LOCK(sc); 388 sc->vtblk_flags |= VTBLK_FLAG_DETACH; 389 if (device_is_attached(dev)) 390 vtblk_stop(sc); 391 VTBLK_UNLOCK(sc); 392 393 vtblk_drain(sc); 394 395 if (sc->vtblk_disk != NULL) { 396 disk_destroy(sc->vtblk_disk); 397 sc->vtblk_disk = NULL; 398 } 399 400 if (sc->vtblk_sglist != NULL) { 401 sglist_free(sc->vtblk_sglist); 402 sc->vtblk_sglist = NULL; 403 } 404 405 VTBLK_LOCK_DESTROY(sc); 406 407 return (0); 408 } 409 410 static int 411 vtblk_suspend(device_t dev) 412 { 413 struct vtblk_softc *sc; 414 int error; 415 416 sc = device_get_softc(dev); 417 418 VTBLK_LOCK(sc); 419 sc->vtblk_flags |= VTBLK_FLAG_SUSPEND; 420 /* XXX BMV: virtio_stop(), etc needed here? */ 421 error = vtblk_quiesce(sc); 422 if (error) 423 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND; 424 VTBLK_UNLOCK(sc); 425 426 return (error); 427 } 428 429 static int 430 vtblk_resume(device_t dev) 431 { 432 struct vtblk_softc *sc; 433 434 sc = device_get_softc(dev); 435 436 VTBLK_LOCK(sc); 437 /* XXX BMV: virtio_reinit(), etc needed here? */ 438 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND; 439 vtblk_startio(sc); 440 VTBLK_UNLOCK(sc); 441 442 return (0); 443 } 444 445 static int 446 vtblk_shutdown(device_t dev) 447 { 448 449 return (0); 450 } 451 452 static int 453 vtblk_config_change(device_t dev) 454 { 455 struct vtblk_softc *sc; 456 struct virtio_blk_config blkcfg; 457 uint64_t capacity; 458 459 sc = device_get_softc(dev); 460 461 vtblk_read_config(sc, &blkcfg); 462 463 /* Capacity is always in 512-byte units. */ 464 capacity = blkcfg.capacity * VTBLK_BSIZE; 465 466 if (sc->vtblk_disk->d_mediasize != capacity) 467 vtblk_resize_disk(sc, capacity); 468 469 return (0); 470 } 471 472 static int 473 vtblk_open(struct disk *dp) 474 { 475 struct vtblk_softc *sc; 476 477 if ((sc = dp->d_drv1) == NULL) 478 return (ENXIO); 479 480 return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0); 481 } 482 483 static int 484 vtblk_close(struct disk *dp) 485 { 486 struct vtblk_softc *sc; 487 488 if ((sc = dp->d_drv1) == NULL) 489 return (ENXIO); 490 491 return (0); 492 } 493 494 static int 495 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, 496 struct thread *td) 497 { 498 struct vtblk_softc *sc; 499 500 if ((sc = dp->d_drv1) == NULL) 501 return (ENXIO); 502 503 return (ENOTTY); 504 } 505 506 static int 507 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, 508 size_t length) 509 { 510 struct disk *dp; 511 struct vtblk_softc *sc; 512 int error; 513 514 dp = arg; 515 error = 0; 516 517 if ((sc = dp->d_drv1) == NULL) 518 return (ENXIO); 519 520 VTBLK_LOCK(sc); 521 522 vtblk_dump_quiesce(sc); 523 524 if (length > 0) 525 error = vtblk_dump_write(sc, virtual, offset, length); 526 if (error || (virtual == NULL && offset == 0)) 527 vtblk_dump_complete(sc); 528 529 VTBLK_UNLOCK(sc); 530 531 return (error); 532 } 533 534 static void 535 vtblk_strategy(struct bio *bp) 536 { 537 struct vtblk_softc *sc; 538 539 if ((sc = bp->bio_disk->d_drv1) == NULL) { 540 vtblk_bio_done(NULL, bp, EINVAL); 541 return; 542 } 543 544 /* 545 * Fail any write if RO. Unfortunately, there does not seem to 546 * be a better way to report our readonly'ness to GEOM above. 547 */ 548 if (sc->vtblk_flags & VTBLK_FLAG_READONLY && 549 (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH || 550 bp->bio_cmd == BIO_DELETE)) { 551 vtblk_bio_done(sc, bp, EROFS); 552 return; 553 } 554 555 if ((bp->bio_cmd != BIO_READ) && (bp->bio_cmd != BIO_WRITE) && 556 (bp->bio_cmd != BIO_FLUSH) && (bp->bio_cmd != BIO_DELETE)) { 557 vtblk_bio_done(sc, bp, EOPNOTSUPP); 558 return; 559 } 560 561 VTBLK_LOCK(sc); 562 563 if (sc->vtblk_flags & VTBLK_FLAG_DETACH) { 564 VTBLK_UNLOCK(sc); 565 vtblk_bio_done(sc, bp, ENXIO); 566 return; 567 } 568 569 if ((bp->bio_cmd == BIO_DELETE) && 570 !(sc->vtblk_flags & VTBLK_FLAG_DISCARD)) { 571 VTBLK_UNLOCK(sc); 572 vtblk_bio_done(sc, bp, EOPNOTSUPP); 573 return; 574 } 575 576 bioq_insert_tail(&sc->vtblk_bioq, bp); 577 vtblk_startio(sc); 578 579 VTBLK_UNLOCK(sc); 580 } 581 582 static void 583 vtblk_negotiate_features(struct vtblk_softc *sc) 584 { 585 device_t dev; 586 uint64_t features; 587 588 dev = sc->vtblk_dev; 589 features = VTBLK_FEATURES; 590 591 sc->vtblk_features = virtio_negotiate_features(dev, features); 592 } 593 594 static void 595 vtblk_setup_features(struct vtblk_softc *sc) 596 { 597 device_t dev; 598 599 dev = sc->vtblk_dev; 600 601 vtblk_negotiate_features(sc); 602 603 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) 604 sc->vtblk_flags |= VTBLK_FLAG_INDIRECT; 605 if (virtio_with_feature(dev, VIRTIO_BLK_F_RO)) 606 sc->vtblk_flags |= VTBLK_FLAG_READONLY; 607 if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER)) 608 sc->vtblk_flags |= VTBLK_FLAG_BARRIER; 609 if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE)) 610 sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG; 611 if (virtio_with_feature(dev, VIRTIO_BLK_F_DISCARD)) 612 sc->vtblk_flags |= VTBLK_FLAG_DISCARD; 613 } 614 615 static int 616 vtblk_maximum_segments(struct vtblk_softc *sc, 617 struct virtio_blk_config *blkcfg) 618 { 619 device_t dev; 620 int nsegs; 621 622 dev = sc->vtblk_dev; 623 nsegs = VTBLK_MIN_SEGMENTS; 624 625 if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) { 626 nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1); 627 if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT) 628 nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT); 629 } else 630 nsegs += 1; 631 632 return (nsegs); 633 } 634 635 static int 636 vtblk_alloc_virtqueue(struct vtblk_softc *sc) 637 { 638 device_t dev; 639 struct vq_alloc_info vq_info; 640 641 dev = sc->vtblk_dev; 642 643 VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs, 644 vtblk_vq_intr, sc, &sc->vtblk_vq, 645 "%s request", device_get_nameunit(dev)); 646 647 return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info)); 648 } 649 650 static void 651 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity) 652 { 653 device_t dev; 654 struct disk *dp; 655 int error; 656 657 dev = sc->vtblk_dev; 658 dp = sc->vtblk_disk; 659 660 dp->d_mediasize = new_capacity; 661 if (bootverbose) { 662 device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n", 663 (uintmax_t) dp->d_mediasize >> 20, 664 (uintmax_t) dp->d_mediasize / dp->d_sectorsize, 665 dp->d_sectorsize); 666 } 667 668 error = disk_resize(dp, M_NOWAIT); 669 if (error) { 670 device_printf(dev, 671 "disk_resize(9) failed, error: %d\n", error); 672 } 673 } 674 675 static void 676 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg) 677 { 678 device_t dev; 679 struct disk *dp; 680 681 dev = sc->vtblk_dev; 682 683 sc->vtblk_disk = dp = disk_alloc(); 684 dp->d_open = vtblk_open; 685 dp->d_close = vtblk_close; 686 dp->d_ioctl = vtblk_ioctl; 687 dp->d_strategy = vtblk_strategy; 688 dp->d_name = VTBLK_DISK_NAME; 689 dp->d_unit = device_get_unit(dev); 690 dp->d_drv1 = sc; 691 dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO | 692 DISKFLAG_DIRECT_COMPLETION; 693 dp->d_hba_vendor = virtio_get_vendor(dev); 694 dp->d_hba_device = virtio_get_device(dev); 695 dp->d_hba_subvendor = virtio_get_subvendor(dev); 696 dp->d_hba_subdevice = virtio_get_subdevice(dev); 697 698 if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0) 699 dp->d_dump = vtblk_dump; 700 701 /* Capacity is always in 512-byte units. */ 702 dp->d_mediasize = blkcfg->capacity * VTBLK_BSIZE; 703 704 if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE)) 705 dp->d_sectorsize = blkcfg->blk_size; 706 else 707 dp->d_sectorsize = VTBLK_BSIZE; 708 709 /* 710 * The VirtIO maximum I/O size is given in terms of segments. 711 * However, FreeBSD limits I/O size by logical buffer size, not 712 * by physically contiguous pages. Therefore, we have to assume 713 * no pages are contiguous. This may impose an artificially low 714 * maximum I/O size. But in practice, since QEMU advertises 128 715 * segments, this gives us a maximum IO size of 125 * PAGE_SIZE, 716 * which is typically greater than MAXPHYS. Eventually we should 717 * just advertise MAXPHYS and split buffers that are too big. 718 * 719 * Note we must subtract one additional segment in case of non 720 * page aligned buffers. 721 */ 722 dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) * 723 PAGE_SIZE; 724 if (dp->d_maxsize < PAGE_SIZE) 725 dp->d_maxsize = PAGE_SIZE; /* XXX */ 726 727 if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) { 728 dp->d_fwsectors = blkcfg->geometry.sectors; 729 dp->d_fwheads = blkcfg->geometry.heads; 730 } 731 732 if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY) && 733 blkcfg->topology.physical_block_exp > 0) { 734 dp->d_stripesize = dp->d_sectorsize * 735 (1 << blkcfg->topology.physical_block_exp); 736 dp->d_stripeoffset = (dp->d_stripesize - 737 blkcfg->topology.alignment_offset * dp->d_sectorsize) % 738 dp->d_stripesize; 739 } 740 741 if (virtio_with_feature(dev, VIRTIO_BLK_F_DISCARD)) { 742 dp->d_flags |= DISKFLAG_CANDELETE; 743 dp->d_delmaxsize = blkcfg->max_discard_sectors * VTBLK_BSIZE; 744 } 745 746 if (vtblk_write_cache_enabled(sc, blkcfg) != 0) 747 sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK; 748 else 749 sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH; 750 } 751 752 static void 753 vtblk_create_disk(struct vtblk_softc *sc) 754 { 755 struct disk *dp; 756 757 dp = sc->vtblk_disk; 758 759 vtblk_ident(sc); 760 761 device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n", 762 (uintmax_t) dp->d_mediasize >> 20, 763 (uintmax_t) dp->d_mediasize / dp->d_sectorsize, 764 dp->d_sectorsize); 765 766 disk_create(dp, DISK_VERSION); 767 } 768 769 static int 770 vtblk_request_prealloc(struct vtblk_softc *sc) 771 { 772 struct vtblk_request *req; 773 int i, nreqs; 774 775 nreqs = virtqueue_size(sc->vtblk_vq); 776 777 /* 778 * Preallocate sufficient requests to keep the virtqueue full. Each 779 * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce 780 * the number allocated when indirect descriptors are not available. 781 */ 782 if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0) 783 nreqs /= VTBLK_MIN_SEGMENTS; 784 785 for (i = 0; i < nreqs; i++) { 786 req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT); 787 if (req == NULL) 788 return (ENOMEM); 789 790 MPASS(sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr)) == 1); 791 MPASS(sglist_count(&req->vbr_ack, sizeof(req->vbr_ack)) == 1); 792 793 sc->vtblk_request_count++; 794 vtblk_request_enqueue(sc, req); 795 } 796 797 return (0); 798 } 799 800 static void 801 vtblk_request_free(struct vtblk_softc *sc) 802 { 803 struct vtblk_request *req; 804 805 MPASS(TAILQ_EMPTY(&sc->vtblk_req_ready)); 806 807 while ((req = vtblk_request_dequeue(sc)) != NULL) { 808 sc->vtblk_request_count--; 809 free(req, M_DEVBUF); 810 } 811 812 KASSERT(sc->vtblk_request_count == 0, 813 ("%s: leaked %d requests", __func__, sc->vtblk_request_count)); 814 } 815 816 static struct vtblk_request * 817 vtblk_request_dequeue(struct vtblk_softc *sc) 818 { 819 struct vtblk_request *req; 820 821 req = TAILQ_FIRST(&sc->vtblk_req_free); 822 if (req != NULL) { 823 TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link); 824 bzero(req, sizeof(struct vtblk_request)); 825 } 826 827 return (req); 828 } 829 830 static void 831 vtblk_request_enqueue(struct vtblk_softc *sc, struct vtblk_request *req) 832 { 833 834 TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link); 835 } 836 837 static struct vtblk_request * 838 vtblk_request_next_ready(struct vtblk_softc *sc) 839 { 840 struct vtblk_request *req; 841 842 req = TAILQ_FIRST(&sc->vtblk_req_ready); 843 if (req != NULL) 844 TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link); 845 846 return (req); 847 } 848 849 static void 850 vtblk_request_requeue_ready(struct vtblk_softc *sc, struct vtblk_request *req) 851 { 852 853 /* NOTE: Currently, there will be at most one request in the queue. */ 854 TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link); 855 } 856 857 static struct vtblk_request * 858 vtblk_request_next(struct vtblk_softc *sc) 859 { 860 struct vtblk_request *req; 861 862 req = vtblk_request_next_ready(sc); 863 if (req != NULL) 864 return (req); 865 866 return (vtblk_request_bio(sc)); 867 } 868 869 static struct vtblk_request * 870 vtblk_request_bio(struct vtblk_softc *sc) 871 { 872 struct bio_queue_head *bioq; 873 struct vtblk_request *req; 874 struct bio *bp; 875 876 bioq = &sc->vtblk_bioq; 877 878 if (bioq_first(bioq) == NULL) 879 return (NULL); 880 881 req = vtblk_request_dequeue(sc); 882 if (req == NULL) 883 return (NULL); 884 885 bp = bioq_takefirst(bioq); 886 req->vbr_bp = bp; 887 req->vbr_ack = -1; 888 req->vbr_hdr.ioprio = 1; 889 890 switch (bp->bio_cmd) { 891 case BIO_FLUSH: 892 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH; 893 break; 894 case BIO_READ: 895 req->vbr_hdr.type = VIRTIO_BLK_T_IN; 896 req->vbr_hdr.sector = bp->bio_offset / VTBLK_BSIZE; 897 break; 898 case BIO_WRITE: 899 req->vbr_hdr.type = VIRTIO_BLK_T_OUT; 900 req->vbr_hdr.sector = bp->bio_offset / VTBLK_BSIZE; 901 break; 902 case BIO_DELETE: 903 req->vbr_hdr.type = VIRTIO_BLK_T_DISCARD; 904 req->vbr_hdr.sector = bp->bio_offset / VTBLK_BSIZE; 905 break; 906 default: 907 panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd); 908 } 909 910 if (bp->bio_flags & BIO_ORDERED) 911 req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER; 912 913 return (req); 914 } 915 916 static int 917 vtblk_request_execute(struct vtblk_softc *sc, struct vtblk_request *req) 918 { 919 struct virtqueue *vq; 920 struct sglist *sg; 921 struct bio *bp; 922 int ordered, readable, writable, error; 923 924 vq = sc->vtblk_vq; 925 sg = sc->vtblk_sglist; 926 bp = req->vbr_bp; 927 ordered = 0; 928 writable = 0; 929 930 /* 931 * Some hosts (such as bhyve) do not implement the barrier feature, 932 * so we emulate it in the driver by allowing the barrier request 933 * to be the only one in flight. 934 */ 935 if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) { 936 if (sc->vtblk_req_ordered != NULL) 937 return (EBUSY); 938 if (bp->bio_flags & BIO_ORDERED) { 939 if (!virtqueue_empty(vq)) 940 return (EBUSY); 941 ordered = 1; 942 req->vbr_hdr.type &= ~VIRTIO_BLK_T_BARRIER; 943 } 944 } 945 946 sglist_reset(sg); 947 sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr)); 948 949 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) { 950 error = sglist_append_bio(sg, bp); 951 if (error || sg->sg_nseg == sg->sg_maxseg) { 952 panic("%s: bio %p data buffer too big %d", 953 __func__, bp, error); 954 } 955 956 /* BIO_READ means the host writes into our buffer. */ 957 if (bp->bio_cmd == BIO_READ) 958 writable = sg->sg_nseg - 1; 959 } else if (bp->bio_cmd == BIO_DELETE) { 960 struct virtio_blk_discard_write_zeroes *discard; 961 962 discard = malloc(sizeof(*discard), M_DEVBUF, M_NOWAIT | M_ZERO); 963 if (discard == NULL) 964 return (ENOMEM); 965 discard->sector = bp->bio_offset / VTBLK_BSIZE; 966 discard->num_sectors = bp->bio_bcount / VTBLK_BSIZE; 967 bp->bio_driver1 = discard; 968 error = sglist_append(sg, discard, sizeof(*discard)); 969 if (error || sg->sg_nseg == sg->sg_maxseg) { 970 panic("%s: bio %p data buffer too big %d", 971 __func__, bp, error); 972 } 973 } 974 975 writable++; 976 sglist_append(sg, &req->vbr_ack, sizeof(uint8_t)); 977 readable = sg->sg_nseg - writable; 978 979 error = virtqueue_enqueue(vq, req, sg, readable, writable); 980 if (error == 0 && ordered) 981 sc->vtblk_req_ordered = req; 982 983 return (error); 984 } 985 986 static int 987 vtblk_request_error(struct vtblk_request *req) 988 { 989 int error; 990 991 switch (req->vbr_ack) { 992 case VIRTIO_BLK_S_OK: 993 error = 0; 994 break; 995 case VIRTIO_BLK_S_UNSUPP: 996 error = ENOTSUP; 997 break; 998 default: 999 error = EIO; 1000 break; 1001 } 1002 1003 return (error); 1004 } 1005 1006 static void 1007 vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue) 1008 { 1009 struct vtblk_request *req; 1010 struct bio *bp; 1011 1012 while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) { 1013 if (sc->vtblk_req_ordered != NULL) { 1014 MPASS(sc->vtblk_req_ordered == req); 1015 sc->vtblk_req_ordered = NULL; 1016 } 1017 1018 bp = req->vbr_bp; 1019 bp->bio_error = vtblk_request_error(req); 1020 TAILQ_INSERT_TAIL(queue, bp, bio_queue); 1021 1022 vtblk_request_enqueue(sc, req); 1023 } 1024 } 1025 1026 static void 1027 vtblk_done_completed(struct vtblk_softc *sc, struct bio_queue *queue) 1028 { 1029 struct bio *bp, *tmp; 1030 1031 TAILQ_FOREACH_SAFE(bp, queue, bio_queue, tmp) { 1032 if (bp->bio_error != 0) 1033 disk_err(bp, "hard error", -1, 1); 1034 vtblk_bio_done(sc, bp, bp->bio_error); 1035 } 1036 } 1037 1038 static void 1039 vtblk_drain_vq(struct vtblk_softc *sc) 1040 { 1041 struct virtqueue *vq; 1042 struct vtblk_request *req; 1043 int last; 1044 1045 vq = sc->vtblk_vq; 1046 last = 0; 1047 1048 while ((req = virtqueue_drain(vq, &last)) != NULL) { 1049 vtblk_bio_done(sc, req->vbr_bp, ENXIO); 1050 vtblk_request_enqueue(sc, req); 1051 } 1052 1053 sc->vtblk_req_ordered = NULL; 1054 KASSERT(virtqueue_empty(vq), ("virtqueue not empty")); 1055 } 1056 1057 static void 1058 vtblk_drain(struct vtblk_softc *sc) 1059 { 1060 struct bio_queue queue; 1061 struct bio_queue_head *bioq; 1062 struct vtblk_request *req; 1063 struct bio *bp; 1064 1065 bioq = &sc->vtblk_bioq; 1066 TAILQ_INIT(&queue); 1067 1068 if (sc->vtblk_vq != NULL) { 1069 vtblk_queue_completed(sc, &queue); 1070 vtblk_done_completed(sc, &queue); 1071 1072 vtblk_drain_vq(sc); 1073 } 1074 1075 while ((req = vtblk_request_next_ready(sc)) != NULL) { 1076 vtblk_bio_done(sc, req->vbr_bp, ENXIO); 1077 vtblk_request_enqueue(sc, req); 1078 } 1079 1080 while (bioq_first(bioq) != NULL) { 1081 bp = bioq_takefirst(bioq); 1082 vtblk_bio_done(sc, bp, ENXIO); 1083 } 1084 1085 vtblk_request_free(sc); 1086 } 1087 1088 static void 1089 vtblk_startio(struct vtblk_softc *sc) 1090 { 1091 struct virtqueue *vq; 1092 struct vtblk_request *req; 1093 int enq; 1094 1095 VTBLK_LOCK_ASSERT(sc); 1096 vq = sc->vtblk_vq; 1097 enq = 0; 1098 1099 if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND) 1100 return; 1101 1102 while (!virtqueue_full(vq)) { 1103 req = vtblk_request_next(sc); 1104 if (req == NULL) 1105 break; 1106 1107 if (vtblk_request_execute(sc, req) != 0) { 1108 vtblk_request_requeue_ready(sc, req); 1109 break; 1110 } 1111 1112 enq++; 1113 } 1114 1115 if (enq > 0) 1116 virtqueue_notify(vq); 1117 } 1118 1119 static void 1120 vtblk_bio_done(struct vtblk_softc *sc, struct bio *bp, int error) 1121 { 1122 1123 /* Because of GEOM direct dispatch, we cannot hold any locks. */ 1124 if (sc != NULL) 1125 VTBLK_LOCK_ASSERT_NOTOWNED(sc); 1126 1127 if (error) { 1128 bp->bio_resid = bp->bio_bcount; 1129 bp->bio_error = error; 1130 bp->bio_flags |= BIO_ERROR; 1131 } 1132 1133 if (bp->bio_driver1 != NULL) { 1134 free(bp->bio_driver1, M_DEVBUF); 1135 bp->bio_driver1 = NULL; 1136 } 1137 1138 biodone(bp); 1139 } 1140 1141 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg) \ 1142 if (virtio_with_feature(_dev, _feature)) { \ 1143 virtio_read_device_config(_dev, \ 1144 offsetof(struct virtio_blk_config, _field), \ 1145 &(_cfg)->_field, sizeof((_cfg)->_field)); \ 1146 } 1147 1148 static void 1149 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg) 1150 { 1151 device_t dev; 1152 1153 dev = sc->vtblk_dev; 1154 1155 bzero(blkcfg, sizeof(struct virtio_blk_config)); 1156 1157 /* The capacity is always available. */ 1158 virtio_read_device_config(dev, offsetof(struct virtio_blk_config, 1159 capacity), &blkcfg->capacity, sizeof(blkcfg->capacity)); 1160 1161 /* Read the configuration if the feature was negotiated. */ 1162 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg); 1163 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg); 1164 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg); 1165 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg); 1166 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg); 1167 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, wce, blkcfg); 1168 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, max_discard_sectors, 1169 blkcfg); 1170 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, max_discard_seg, blkcfg); 1171 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_DISCARD, discard_sector_alignment, 1172 blkcfg); 1173 } 1174 1175 #undef VTBLK_GET_CONFIG 1176 1177 static void 1178 vtblk_ident(struct vtblk_softc *sc) 1179 { 1180 struct bio buf; 1181 struct disk *dp; 1182 struct vtblk_request *req; 1183 int len, error; 1184 1185 dp = sc->vtblk_disk; 1186 len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE); 1187 1188 if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0) 1189 return; 1190 1191 req = vtblk_request_dequeue(sc); 1192 if (req == NULL) 1193 return; 1194 1195 req->vbr_ack = -1; 1196 req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID; 1197 req->vbr_hdr.ioprio = 1; 1198 req->vbr_hdr.sector = 0; 1199 1200 req->vbr_bp = &buf; 1201 g_reset_bio(&buf); 1202 1203 buf.bio_cmd = BIO_READ; 1204 buf.bio_data = dp->d_ident; 1205 buf.bio_bcount = len; 1206 1207 VTBLK_LOCK(sc); 1208 error = vtblk_poll_request(sc, req); 1209 VTBLK_UNLOCK(sc); 1210 1211 vtblk_request_enqueue(sc, req); 1212 1213 if (error) { 1214 device_printf(sc->vtblk_dev, 1215 "error getting device identifier: %d\n", error); 1216 } 1217 } 1218 1219 static int 1220 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req) 1221 { 1222 struct virtqueue *vq; 1223 int error; 1224 1225 vq = sc->vtblk_vq; 1226 1227 if (!virtqueue_empty(vq)) 1228 return (EBUSY); 1229 1230 error = vtblk_request_execute(sc, req); 1231 if (error) 1232 return (error); 1233 1234 virtqueue_notify(vq); 1235 virtqueue_poll(vq, NULL); 1236 1237 error = vtblk_request_error(req); 1238 if (error && bootverbose) { 1239 device_printf(sc->vtblk_dev, 1240 "%s: IO error: %d\n", __func__, error); 1241 } 1242 1243 return (error); 1244 } 1245 1246 static int 1247 vtblk_quiesce(struct vtblk_softc *sc) 1248 { 1249 int error; 1250 1251 VTBLK_LOCK_ASSERT(sc); 1252 error = 0; 1253 1254 while (!virtqueue_empty(sc->vtblk_vq)) { 1255 if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq", 1256 VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) { 1257 error = EBUSY; 1258 break; 1259 } 1260 } 1261 1262 return (error); 1263 } 1264 1265 static void 1266 vtblk_vq_intr(void *xsc) 1267 { 1268 struct vtblk_softc *sc; 1269 struct virtqueue *vq; 1270 struct bio_queue queue; 1271 1272 sc = xsc; 1273 vq = sc->vtblk_vq; 1274 TAILQ_INIT(&queue); 1275 1276 VTBLK_LOCK(sc); 1277 1278 again: 1279 if (sc->vtblk_flags & VTBLK_FLAG_DETACH) 1280 goto out; 1281 1282 vtblk_queue_completed(sc, &queue); 1283 vtblk_startio(sc); 1284 1285 if (virtqueue_enable_intr(vq) != 0) { 1286 virtqueue_disable_intr(vq); 1287 goto again; 1288 } 1289 1290 if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND) 1291 wakeup(&sc->vtblk_vq); 1292 1293 out: 1294 VTBLK_UNLOCK(sc); 1295 vtblk_done_completed(sc, &queue); 1296 } 1297 1298 static void 1299 vtblk_stop(struct vtblk_softc *sc) 1300 { 1301 1302 virtqueue_disable_intr(sc->vtblk_vq); 1303 virtio_stop(sc->vtblk_dev); 1304 } 1305 1306 static void 1307 vtblk_dump_quiesce(struct vtblk_softc *sc) 1308 { 1309 1310 /* 1311 * Spin here until all the requests in-flight at the time of the 1312 * dump are completed and queued. The queued requests will be 1313 * biodone'd once the dump is finished. 1314 */ 1315 while (!virtqueue_empty(sc->vtblk_vq)) 1316 vtblk_queue_completed(sc, &sc->vtblk_dump_queue); 1317 } 1318 1319 static int 1320 vtblk_dump_write(struct vtblk_softc *sc, void *virtual, off_t offset, 1321 size_t length) 1322 { 1323 struct bio buf; 1324 struct vtblk_request *req; 1325 1326 req = &sc->vtblk_dump_request; 1327 req->vbr_ack = -1; 1328 req->vbr_hdr.type = VIRTIO_BLK_T_OUT; 1329 req->vbr_hdr.ioprio = 1; 1330 req->vbr_hdr.sector = offset / VTBLK_BSIZE; 1331 1332 req->vbr_bp = &buf; 1333 g_reset_bio(&buf); 1334 1335 buf.bio_cmd = BIO_WRITE; 1336 buf.bio_data = virtual; 1337 buf.bio_bcount = length; 1338 1339 return (vtblk_poll_request(sc, req)); 1340 } 1341 1342 static int 1343 vtblk_dump_flush(struct vtblk_softc *sc) 1344 { 1345 struct bio buf; 1346 struct vtblk_request *req; 1347 1348 req = &sc->vtblk_dump_request; 1349 req->vbr_ack = -1; 1350 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH; 1351 req->vbr_hdr.ioprio = 1; 1352 req->vbr_hdr.sector = 0; 1353 1354 req->vbr_bp = &buf; 1355 g_reset_bio(&buf); 1356 1357 buf.bio_cmd = BIO_FLUSH; 1358 1359 return (vtblk_poll_request(sc, req)); 1360 } 1361 1362 static void 1363 vtblk_dump_complete(struct vtblk_softc *sc) 1364 { 1365 1366 vtblk_dump_flush(sc); 1367 1368 VTBLK_UNLOCK(sc); 1369 vtblk_done_completed(sc, &sc->vtblk_dump_queue); 1370 VTBLK_LOCK(sc); 1371 } 1372 1373 static void 1374 vtblk_set_write_cache(struct vtblk_softc *sc, int wc) 1375 { 1376 1377 /* Set either writeback (1) or writethrough (0) mode. */ 1378 virtio_write_dev_config_1(sc->vtblk_dev, 1379 offsetof(struct virtio_blk_config, wce), wc); 1380 } 1381 1382 static int 1383 vtblk_write_cache_enabled(struct vtblk_softc *sc, 1384 struct virtio_blk_config *blkcfg) 1385 { 1386 int wc; 1387 1388 if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) { 1389 wc = vtblk_tunable_int(sc, "writecache_mode", 1390 vtblk_writecache_mode); 1391 if (wc >= 0 && wc < VTBLK_CACHE_MAX) 1392 vtblk_set_write_cache(sc, wc); 1393 else 1394 wc = blkcfg->wce; 1395 } else 1396 wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE); 1397 1398 return (wc); 1399 } 1400 1401 static int 1402 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS) 1403 { 1404 struct vtblk_softc *sc; 1405 int wc, error; 1406 1407 sc = oidp->oid_arg1; 1408 wc = sc->vtblk_write_cache; 1409 1410 error = sysctl_handle_int(oidp, &wc, 0, req); 1411 if (error || req->newptr == NULL) 1412 return (error); 1413 if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0) 1414 return (EPERM); 1415 if (wc < 0 || wc >= VTBLK_CACHE_MAX) 1416 return (EINVAL); 1417 1418 VTBLK_LOCK(sc); 1419 sc->vtblk_write_cache = wc; 1420 vtblk_set_write_cache(sc, sc->vtblk_write_cache); 1421 VTBLK_UNLOCK(sc); 1422 1423 return (0); 1424 } 1425 1426 static void 1427 vtblk_setup_sysctl(struct vtblk_softc *sc) 1428 { 1429 device_t dev; 1430 struct sysctl_ctx_list *ctx; 1431 struct sysctl_oid *tree; 1432 struct sysctl_oid_list *child; 1433 1434 dev = sc->vtblk_dev; 1435 ctx = device_get_sysctl_ctx(dev); 1436 tree = device_get_sysctl_tree(dev); 1437 child = SYSCTL_CHILDREN(tree); 1438 1439 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode", 1440 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, 1441 vtblk_write_cache_sysctl, "I", 1442 "Write cache mode (writethrough (0) or writeback (1))"); 1443 } 1444 1445 static int 1446 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def) 1447 { 1448 char path[64]; 1449 1450 snprintf(path, sizeof(path), 1451 "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob); 1452 TUNABLE_INT_FETCH(path, &def); 1453 1454 return (def); 1455 } 1456