1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* Driver for VirtIO block devices. */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/bio.h> 38 #include <sys/malloc.h> 39 #include <sys/module.h> 40 #include <sys/sglist.h> 41 #include <sys/sysctl.h> 42 #include <sys/lock.h> 43 #include <sys/mutex.h> 44 #include <sys/queue.h> 45 46 #include <geom/geom.h> 47 #include <geom/geom_disk.h> 48 49 #include <machine/bus.h> 50 #include <machine/resource.h> 51 #include <sys/bus.h> 52 #include <sys/rman.h> 53 54 #include <dev/virtio/virtio.h> 55 #include <dev/virtio/virtqueue.h> 56 #include <dev/virtio/block/virtio_blk.h> 57 58 #include "virtio_if.h" 59 60 struct vtblk_request { 61 struct virtio_blk_outhdr vbr_hdr; 62 struct bio *vbr_bp; 63 uint8_t vbr_ack; 64 TAILQ_ENTRY(vtblk_request) vbr_link; 65 }; 66 67 enum vtblk_cache_mode { 68 VTBLK_CACHE_WRITETHROUGH, 69 VTBLK_CACHE_WRITEBACK, 70 VTBLK_CACHE_MAX 71 }; 72 73 struct vtblk_softc { 74 device_t vtblk_dev; 75 struct mtx vtblk_mtx; 76 uint64_t vtblk_features; 77 uint32_t vtblk_flags; 78 #define VTBLK_FLAG_INDIRECT 0x0001 79 #define VTBLK_FLAG_READONLY 0x0002 80 #define VTBLK_FLAG_DETACH 0x0004 81 #define VTBLK_FLAG_SUSPEND 0x0008 82 #define VTBLK_FLAG_BARRIER 0x0010 83 #define VTBLK_FLAG_WC_CONFIG 0x0020 84 85 struct virtqueue *vtblk_vq; 86 struct sglist *vtblk_sglist; 87 struct disk *vtblk_disk; 88 89 struct bio_queue_head vtblk_bioq; 90 TAILQ_HEAD(, vtblk_request) 91 vtblk_req_free; 92 TAILQ_HEAD(, vtblk_request) 93 vtblk_req_ready; 94 struct vtblk_request *vtblk_req_ordered; 95 96 int vtblk_max_nsegs; 97 int vtblk_request_count; 98 enum vtblk_cache_mode vtblk_write_cache; 99 100 struct bio_queue vtblk_dump_queue; 101 struct vtblk_request vtblk_dump_request; 102 }; 103 104 static struct virtio_feature_desc vtblk_feature_desc[] = { 105 { VIRTIO_BLK_F_BARRIER, "HostBarrier" }, 106 { VIRTIO_BLK_F_SIZE_MAX, "MaxSegSize" }, 107 { VIRTIO_BLK_F_SEG_MAX, "MaxNumSegs" }, 108 { VIRTIO_BLK_F_GEOMETRY, "DiskGeometry" }, 109 { VIRTIO_BLK_F_RO, "ReadOnly" }, 110 { VIRTIO_BLK_F_BLK_SIZE, "BlockSize" }, 111 { VIRTIO_BLK_F_SCSI, "SCSICmds" }, 112 { VIRTIO_BLK_F_WCE, "WriteCache" }, 113 { VIRTIO_BLK_F_TOPOLOGY, "Topology" }, 114 { VIRTIO_BLK_F_CONFIG_WCE, "ConfigWCE" }, 115 116 { 0, NULL } 117 }; 118 119 static int vtblk_modevent(module_t, int, void *); 120 121 static int vtblk_probe(device_t); 122 static int vtblk_attach(device_t); 123 static int vtblk_detach(device_t); 124 static int vtblk_suspend(device_t); 125 static int vtblk_resume(device_t); 126 static int vtblk_shutdown(device_t); 127 static int vtblk_config_change(device_t); 128 129 static int vtblk_open(struct disk *); 130 static int vtblk_close(struct disk *); 131 static int vtblk_ioctl(struct disk *, u_long, void *, int, 132 struct thread *); 133 static int vtblk_dump(void *, void *, vm_offset_t, off_t, size_t); 134 static void vtblk_strategy(struct bio *); 135 136 static void vtblk_negotiate_features(struct vtblk_softc *); 137 static void vtblk_setup_features(struct vtblk_softc *); 138 static int vtblk_maximum_segments(struct vtblk_softc *, 139 struct virtio_blk_config *); 140 static int vtblk_alloc_virtqueue(struct vtblk_softc *); 141 static void vtblk_resize_disk(struct vtblk_softc *, uint64_t); 142 static void vtblk_alloc_disk(struct vtblk_softc *, 143 struct virtio_blk_config *); 144 static void vtblk_create_disk(struct vtblk_softc *); 145 146 static int vtblk_request_prealloc(struct vtblk_softc *); 147 static void vtblk_request_free(struct vtblk_softc *); 148 static struct vtblk_request * 149 vtblk_request_dequeue(struct vtblk_softc *); 150 static void vtblk_request_enqueue(struct vtblk_softc *, 151 struct vtblk_request *); 152 static struct vtblk_request * 153 vtblk_request_next_ready(struct vtblk_softc *); 154 static void vtblk_request_requeue_ready(struct vtblk_softc *, 155 struct vtblk_request *); 156 static struct vtblk_request * 157 vtblk_request_next(struct vtblk_softc *); 158 static struct vtblk_request * 159 vtblk_request_bio(struct vtblk_softc *); 160 static int vtblk_request_execute(struct vtblk_softc *, 161 struct vtblk_request *); 162 static int vtblk_request_error(struct vtblk_request *); 163 164 static void vtblk_queue_completed(struct vtblk_softc *, 165 struct bio_queue *); 166 static void vtblk_done_completed(struct vtblk_softc *, 167 struct bio_queue *); 168 static void vtblk_drain_vq(struct vtblk_softc *); 169 static void vtblk_drain(struct vtblk_softc *); 170 171 static void vtblk_startio(struct vtblk_softc *); 172 static void vtblk_bio_done(struct vtblk_softc *, struct bio *, int); 173 174 static void vtblk_read_config(struct vtblk_softc *, 175 struct virtio_blk_config *); 176 static void vtblk_ident(struct vtblk_softc *); 177 static int vtblk_poll_request(struct vtblk_softc *, 178 struct vtblk_request *); 179 static int vtblk_quiesce(struct vtblk_softc *); 180 static void vtblk_vq_intr(void *); 181 static void vtblk_stop(struct vtblk_softc *); 182 183 static void vtblk_dump_quiesce(struct vtblk_softc *); 184 static int vtblk_dump_write(struct vtblk_softc *, void *, off_t, size_t); 185 static int vtblk_dump_flush(struct vtblk_softc *); 186 static void vtblk_dump_complete(struct vtblk_softc *); 187 188 static void vtblk_set_write_cache(struct vtblk_softc *, int); 189 static int vtblk_write_cache_enabled(struct vtblk_softc *sc, 190 struct virtio_blk_config *); 191 static int vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS); 192 193 static void vtblk_setup_sysctl(struct vtblk_softc *); 194 static int vtblk_tunable_int(struct vtblk_softc *, const char *, int); 195 196 /* Tunables. */ 197 static int vtblk_no_ident = 0; 198 TUNABLE_INT("hw.vtblk.no_ident", &vtblk_no_ident); 199 static int vtblk_writecache_mode = -1; 200 TUNABLE_INT("hw.vtblk.writecache_mode", &vtblk_writecache_mode); 201 202 /* Features desired/implemented by this driver. */ 203 #define VTBLK_FEATURES \ 204 (VIRTIO_BLK_F_BARRIER | \ 205 VIRTIO_BLK_F_SIZE_MAX | \ 206 VIRTIO_BLK_F_SEG_MAX | \ 207 VIRTIO_BLK_F_GEOMETRY | \ 208 VIRTIO_BLK_F_RO | \ 209 VIRTIO_BLK_F_BLK_SIZE | \ 210 VIRTIO_BLK_F_WCE | \ 211 VIRTIO_BLK_F_TOPOLOGY | \ 212 VIRTIO_BLK_F_CONFIG_WCE | \ 213 VIRTIO_RING_F_INDIRECT_DESC) 214 215 #define VTBLK_MTX(_sc) &(_sc)->vtblk_mtx 216 #define VTBLK_LOCK_INIT(_sc, _name) \ 217 mtx_init(VTBLK_MTX((_sc)), (_name), \ 218 "VirtIO Block Lock", MTX_DEF) 219 #define VTBLK_LOCK(_sc) mtx_lock(VTBLK_MTX((_sc))) 220 #define VTBLK_UNLOCK(_sc) mtx_unlock(VTBLK_MTX((_sc))) 221 #define VTBLK_LOCK_DESTROY(_sc) mtx_destroy(VTBLK_MTX((_sc))) 222 #define VTBLK_LOCK_ASSERT(_sc) mtx_assert(VTBLK_MTX((_sc)), MA_OWNED) 223 #define VTBLK_LOCK_ASSERT_NOTOWNED(_sc) \ 224 mtx_assert(VTBLK_MTX((_sc)), MA_NOTOWNED) 225 226 #define VTBLK_DISK_NAME "vtbd" 227 #define VTBLK_QUIESCE_TIMEOUT (30 * hz) 228 229 /* 230 * Each block request uses at least two segments - one for the header 231 * and one for the status. 232 */ 233 #define VTBLK_MIN_SEGMENTS 2 234 235 static device_method_t vtblk_methods[] = { 236 /* Device methods. */ 237 DEVMETHOD(device_probe, vtblk_probe), 238 DEVMETHOD(device_attach, vtblk_attach), 239 DEVMETHOD(device_detach, vtblk_detach), 240 DEVMETHOD(device_suspend, vtblk_suspend), 241 DEVMETHOD(device_resume, vtblk_resume), 242 DEVMETHOD(device_shutdown, vtblk_shutdown), 243 244 /* VirtIO methods. */ 245 DEVMETHOD(virtio_config_change, vtblk_config_change), 246 247 DEVMETHOD_END 248 }; 249 250 static driver_t vtblk_driver = { 251 "vtblk", 252 vtblk_methods, 253 sizeof(struct vtblk_softc) 254 }; 255 static devclass_t vtblk_devclass; 256 257 DRIVER_MODULE(virtio_blk, virtio_mmio, vtblk_driver, vtblk_devclass, 258 vtblk_modevent, 0); 259 DRIVER_MODULE(virtio_blk, virtio_pci, vtblk_driver, vtblk_devclass, 260 vtblk_modevent, 0); 261 MODULE_VERSION(virtio_blk, 1); 262 MODULE_DEPEND(virtio_blk, virtio, 1, 1, 1); 263 264 VIRTIO_SIMPLE_PNPTABLE(virtio_blk, VIRTIO_ID_BLOCK, "VirtIO Block Adapter"); 265 VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_blk); 266 VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_blk); 267 268 static int 269 vtblk_modevent(module_t mod, int type, void *unused) 270 { 271 int error; 272 273 error = 0; 274 275 switch (type) { 276 case MOD_LOAD: 277 case MOD_QUIESCE: 278 case MOD_UNLOAD: 279 case MOD_SHUTDOWN: 280 break; 281 default: 282 error = EOPNOTSUPP; 283 break; 284 } 285 286 return (error); 287 } 288 289 static int 290 vtblk_probe(device_t dev) 291 { 292 return (VIRTIO_SIMPLE_PROBE(dev, virtio_blk)); 293 } 294 295 static int 296 vtblk_attach(device_t dev) 297 { 298 struct vtblk_softc *sc; 299 struct virtio_blk_config blkcfg; 300 int error; 301 302 virtio_set_feature_desc(dev, vtblk_feature_desc); 303 304 sc = device_get_softc(dev); 305 sc->vtblk_dev = dev; 306 VTBLK_LOCK_INIT(sc, device_get_nameunit(dev)); 307 bioq_init(&sc->vtblk_bioq); 308 TAILQ_INIT(&sc->vtblk_dump_queue); 309 TAILQ_INIT(&sc->vtblk_req_free); 310 TAILQ_INIT(&sc->vtblk_req_ready); 311 312 vtblk_setup_sysctl(sc); 313 vtblk_setup_features(sc); 314 315 vtblk_read_config(sc, &blkcfg); 316 317 /* 318 * With the current sglist(9) implementation, it is not easy 319 * for us to support a maximum segment size as adjacent 320 * segments are coalesced. For now, just make sure it's larger 321 * than the maximum supported transfer size. 322 */ 323 if (virtio_with_feature(dev, VIRTIO_BLK_F_SIZE_MAX)) { 324 if (blkcfg.size_max < MAXPHYS) { 325 error = ENOTSUP; 326 device_printf(dev, "host requires unsupported " 327 "maximum segment size feature\n"); 328 goto fail; 329 } 330 } 331 332 sc->vtblk_max_nsegs = vtblk_maximum_segments(sc, &blkcfg); 333 if (sc->vtblk_max_nsegs <= VTBLK_MIN_SEGMENTS) { 334 error = EINVAL; 335 device_printf(dev, "fewer than minimum number of segments " 336 "allowed: %d\n", sc->vtblk_max_nsegs); 337 goto fail; 338 } 339 340 sc->vtblk_sglist = sglist_alloc(sc->vtblk_max_nsegs, M_NOWAIT); 341 if (sc->vtblk_sglist == NULL) { 342 error = ENOMEM; 343 device_printf(dev, "cannot allocate sglist\n"); 344 goto fail; 345 } 346 347 error = vtblk_alloc_virtqueue(sc); 348 if (error) { 349 device_printf(dev, "cannot allocate virtqueue\n"); 350 goto fail; 351 } 352 353 error = vtblk_request_prealloc(sc); 354 if (error) { 355 device_printf(dev, "cannot preallocate requests\n"); 356 goto fail; 357 } 358 359 vtblk_alloc_disk(sc, &blkcfg); 360 361 error = virtio_setup_intr(dev, INTR_TYPE_BIO | INTR_ENTROPY); 362 if (error) { 363 device_printf(dev, "cannot setup virtqueue interrupt\n"); 364 goto fail; 365 } 366 367 vtblk_create_disk(sc); 368 369 virtqueue_enable_intr(sc->vtblk_vq); 370 371 fail: 372 if (error) 373 vtblk_detach(dev); 374 375 return (error); 376 } 377 378 static int 379 vtblk_detach(device_t dev) 380 { 381 struct vtblk_softc *sc; 382 383 sc = device_get_softc(dev); 384 385 VTBLK_LOCK(sc); 386 sc->vtblk_flags |= VTBLK_FLAG_DETACH; 387 if (device_is_attached(dev)) 388 vtblk_stop(sc); 389 VTBLK_UNLOCK(sc); 390 391 vtblk_drain(sc); 392 393 if (sc->vtblk_disk != NULL) { 394 disk_destroy(sc->vtblk_disk); 395 sc->vtblk_disk = NULL; 396 } 397 398 if (sc->vtblk_sglist != NULL) { 399 sglist_free(sc->vtblk_sglist); 400 sc->vtblk_sglist = NULL; 401 } 402 403 VTBLK_LOCK_DESTROY(sc); 404 405 return (0); 406 } 407 408 static int 409 vtblk_suspend(device_t dev) 410 { 411 struct vtblk_softc *sc; 412 int error; 413 414 sc = device_get_softc(dev); 415 416 VTBLK_LOCK(sc); 417 sc->vtblk_flags |= VTBLK_FLAG_SUSPEND; 418 /* XXX BMV: virtio_stop(), etc needed here? */ 419 error = vtblk_quiesce(sc); 420 if (error) 421 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND; 422 VTBLK_UNLOCK(sc); 423 424 return (error); 425 } 426 427 static int 428 vtblk_resume(device_t dev) 429 { 430 struct vtblk_softc *sc; 431 432 sc = device_get_softc(dev); 433 434 VTBLK_LOCK(sc); 435 /* XXX BMV: virtio_reinit(), etc needed here? */ 436 sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND; 437 vtblk_startio(sc); 438 VTBLK_UNLOCK(sc); 439 440 return (0); 441 } 442 443 static int 444 vtblk_shutdown(device_t dev) 445 { 446 447 return (0); 448 } 449 450 static int 451 vtblk_config_change(device_t dev) 452 { 453 struct vtblk_softc *sc; 454 struct virtio_blk_config blkcfg; 455 uint64_t capacity; 456 457 sc = device_get_softc(dev); 458 459 vtblk_read_config(sc, &blkcfg); 460 461 /* Capacity is always in 512-byte units. */ 462 capacity = blkcfg.capacity * 512; 463 464 if (sc->vtblk_disk->d_mediasize != capacity) 465 vtblk_resize_disk(sc, capacity); 466 467 return (0); 468 } 469 470 static int 471 vtblk_open(struct disk *dp) 472 { 473 struct vtblk_softc *sc; 474 475 if ((sc = dp->d_drv1) == NULL) 476 return (ENXIO); 477 478 return (sc->vtblk_flags & VTBLK_FLAG_DETACH ? ENXIO : 0); 479 } 480 481 static int 482 vtblk_close(struct disk *dp) 483 { 484 struct vtblk_softc *sc; 485 486 if ((sc = dp->d_drv1) == NULL) 487 return (ENXIO); 488 489 return (0); 490 } 491 492 static int 493 vtblk_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, 494 struct thread *td) 495 { 496 struct vtblk_softc *sc; 497 498 if ((sc = dp->d_drv1) == NULL) 499 return (ENXIO); 500 501 return (ENOTTY); 502 } 503 504 static int 505 vtblk_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, 506 size_t length) 507 { 508 struct disk *dp; 509 struct vtblk_softc *sc; 510 int error; 511 512 dp = arg; 513 error = 0; 514 515 if ((sc = dp->d_drv1) == NULL) 516 return (ENXIO); 517 518 VTBLK_LOCK(sc); 519 520 vtblk_dump_quiesce(sc); 521 522 if (length > 0) 523 error = vtblk_dump_write(sc, virtual, offset, length); 524 if (error || (virtual == NULL && offset == 0)) 525 vtblk_dump_complete(sc); 526 527 VTBLK_UNLOCK(sc); 528 529 return (error); 530 } 531 532 static void 533 vtblk_strategy(struct bio *bp) 534 { 535 struct vtblk_softc *sc; 536 537 if ((sc = bp->bio_disk->d_drv1) == NULL) { 538 vtblk_bio_done(NULL, bp, EINVAL); 539 return; 540 } 541 542 /* 543 * Fail any write if RO. Unfortunately, there does not seem to 544 * be a better way to report our readonly'ness to GEOM above. 545 */ 546 if (sc->vtblk_flags & VTBLK_FLAG_READONLY && 547 (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_FLUSH)) { 548 vtblk_bio_done(sc, bp, EROFS); 549 return; 550 } 551 552 if ((bp->bio_cmd != BIO_READ) && (bp->bio_cmd != BIO_WRITE) && 553 (bp->bio_cmd != BIO_FLUSH)) { 554 vtblk_bio_done(sc, bp, EOPNOTSUPP); 555 return; 556 } 557 558 VTBLK_LOCK(sc); 559 560 if (sc->vtblk_flags & VTBLK_FLAG_DETACH) { 561 VTBLK_UNLOCK(sc); 562 vtblk_bio_done(sc, bp, ENXIO); 563 return; 564 } 565 566 bioq_insert_tail(&sc->vtblk_bioq, bp); 567 vtblk_startio(sc); 568 569 VTBLK_UNLOCK(sc); 570 } 571 572 static void 573 vtblk_negotiate_features(struct vtblk_softc *sc) 574 { 575 device_t dev; 576 uint64_t features; 577 578 dev = sc->vtblk_dev; 579 features = VTBLK_FEATURES; 580 581 sc->vtblk_features = virtio_negotiate_features(dev, features); 582 } 583 584 static void 585 vtblk_setup_features(struct vtblk_softc *sc) 586 { 587 device_t dev; 588 589 dev = sc->vtblk_dev; 590 591 vtblk_negotiate_features(sc); 592 593 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) 594 sc->vtblk_flags |= VTBLK_FLAG_INDIRECT; 595 if (virtio_with_feature(dev, VIRTIO_BLK_F_RO)) 596 sc->vtblk_flags |= VTBLK_FLAG_READONLY; 597 if (virtio_with_feature(dev, VIRTIO_BLK_F_BARRIER)) 598 sc->vtblk_flags |= VTBLK_FLAG_BARRIER; 599 if (virtio_with_feature(dev, VIRTIO_BLK_F_CONFIG_WCE)) 600 sc->vtblk_flags |= VTBLK_FLAG_WC_CONFIG; 601 } 602 603 static int 604 vtblk_maximum_segments(struct vtblk_softc *sc, 605 struct virtio_blk_config *blkcfg) 606 { 607 device_t dev; 608 int nsegs; 609 610 dev = sc->vtblk_dev; 611 nsegs = VTBLK_MIN_SEGMENTS; 612 613 if (virtio_with_feature(dev, VIRTIO_BLK_F_SEG_MAX)) { 614 nsegs += MIN(blkcfg->seg_max, MAXPHYS / PAGE_SIZE + 1); 615 if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT) 616 nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT); 617 } else 618 nsegs += 1; 619 620 return (nsegs); 621 } 622 623 static int 624 vtblk_alloc_virtqueue(struct vtblk_softc *sc) 625 { 626 device_t dev; 627 struct vq_alloc_info vq_info; 628 629 dev = sc->vtblk_dev; 630 631 VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs, 632 vtblk_vq_intr, sc, &sc->vtblk_vq, 633 "%s request", device_get_nameunit(dev)); 634 635 return (virtio_alloc_virtqueues(dev, 0, 1, &vq_info)); 636 } 637 638 static void 639 vtblk_resize_disk(struct vtblk_softc *sc, uint64_t new_capacity) 640 { 641 device_t dev; 642 struct disk *dp; 643 int error; 644 645 dev = sc->vtblk_dev; 646 dp = sc->vtblk_disk; 647 648 dp->d_mediasize = new_capacity; 649 if (bootverbose) { 650 device_printf(dev, "resized to %juMB (%ju %u byte sectors)\n", 651 (uintmax_t) dp->d_mediasize >> 20, 652 (uintmax_t) dp->d_mediasize / dp->d_sectorsize, 653 dp->d_sectorsize); 654 } 655 656 error = disk_resize(dp, M_NOWAIT); 657 if (error) { 658 device_printf(dev, 659 "disk_resize(9) failed, error: %d\n", error); 660 } 661 } 662 663 static void 664 vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg) 665 { 666 device_t dev; 667 struct disk *dp; 668 669 dev = sc->vtblk_dev; 670 671 sc->vtblk_disk = dp = disk_alloc(); 672 dp->d_open = vtblk_open; 673 dp->d_close = vtblk_close; 674 dp->d_ioctl = vtblk_ioctl; 675 dp->d_strategy = vtblk_strategy; 676 dp->d_name = VTBLK_DISK_NAME; 677 dp->d_unit = device_get_unit(dev); 678 dp->d_drv1 = sc; 679 dp->d_flags = DISKFLAG_CANFLUSHCACHE | DISKFLAG_UNMAPPED_BIO | 680 DISKFLAG_DIRECT_COMPLETION; 681 dp->d_hba_vendor = virtio_get_vendor(dev); 682 dp->d_hba_device = virtio_get_device(dev); 683 dp->d_hba_subvendor = virtio_get_subvendor(dev); 684 dp->d_hba_subdevice = virtio_get_subdevice(dev); 685 686 if ((sc->vtblk_flags & VTBLK_FLAG_READONLY) == 0) 687 dp->d_dump = vtblk_dump; 688 689 /* Capacity is always in 512-byte units. */ 690 dp->d_mediasize = blkcfg->capacity * 512; 691 692 if (virtio_with_feature(dev, VIRTIO_BLK_F_BLK_SIZE)) 693 dp->d_sectorsize = blkcfg->blk_size; 694 else 695 dp->d_sectorsize = 512; 696 697 /* 698 * The VirtIO maximum I/O size is given in terms of segments. 699 * However, FreeBSD limits I/O size by logical buffer size, not 700 * by physically contiguous pages. Therefore, we have to assume 701 * no pages are contiguous. This may impose an artificially low 702 * maximum I/O size. But in practice, since QEMU advertises 128 703 * segments, this gives us a maximum IO size of 125 * PAGE_SIZE, 704 * which is typically greater than MAXPHYS. Eventually we should 705 * just advertise MAXPHYS and split buffers that are too big. 706 * 707 * Note we must subtract one additional segment in case of non 708 * page aligned buffers. 709 */ 710 dp->d_maxsize = (sc->vtblk_max_nsegs - VTBLK_MIN_SEGMENTS - 1) * 711 PAGE_SIZE; 712 if (dp->d_maxsize < PAGE_SIZE) 713 dp->d_maxsize = PAGE_SIZE; /* XXX */ 714 715 if (virtio_with_feature(dev, VIRTIO_BLK_F_GEOMETRY)) { 716 dp->d_fwsectors = blkcfg->geometry.sectors; 717 dp->d_fwheads = blkcfg->geometry.heads; 718 } 719 720 if (virtio_with_feature(dev, VIRTIO_BLK_F_TOPOLOGY) && 721 blkcfg->topology.physical_block_exp > 0) { 722 dp->d_stripesize = dp->d_sectorsize * 723 (1 << blkcfg->topology.physical_block_exp); 724 dp->d_stripeoffset = (dp->d_stripesize - 725 blkcfg->topology.alignment_offset * dp->d_sectorsize) % 726 dp->d_stripesize; 727 } 728 729 if (vtblk_write_cache_enabled(sc, blkcfg) != 0) 730 sc->vtblk_write_cache = VTBLK_CACHE_WRITEBACK; 731 else 732 sc->vtblk_write_cache = VTBLK_CACHE_WRITETHROUGH; 733 } 734 735 static void 736 vtblk_create_disk(struct vtblk_softc *sc) 737 { 738 struct disk *dp; 739 740 dp = sc->vtblk_disk; 741 742 vtblk_ident(sc); 743 744 device_printf(sc->vtblk_dev, "%juMB (%ju %u byte sectors)\n", 745 (uintmax_t) dp->d_mediasize >> 20, 746 (uintmax_t) dp->d_mediasize / dp->d_sectorsize, 747 dp->d_sectorsize); 748 749 disk_create(dp, DISK_VERSION); 750 } 751 752 static int 753 vtblk_request_prealloc(struct vtblk_softc *sc) 754 { 755 struct vtblk_request *req; 756 int i, nreqs; 757 758 nreqs = virtqueue_size(sc->vtblk_vq); 759 760 /* 761 * Preallocate sufficient requests to keep the virtqueue full. Each 762 * request consumes VTBLK_MIN_SEGMENTS or more descriptors so reduce 763 * the number allocated when indirect descriptors are not available. 764 */ 765 if ((sc->vtblk_flags & VTBLK_FLAG_INDIRECT) == 0) 766 nreqs /= VTBLK_MIN_SEGMENTS; 767 768 for (i = 0; i < nreqs; i++) { 769 req = malloc(sizeof(struct vtblk_request), M_DEVBUF, M_NOWAIT); 770 if (req == NULL) 771 return (ENOMEM); 772 773 MPASS(sglist_count(&req->vbr_hdr, sizeof(req->vbr_hdr)) == 1); 774 MPASS(sglist_count(&req->vbr_ack, sizeof(req->vbr_ack)) == 1); 775 776 sc->vtblk_request_count++; 777 vtblk_request_enqueue(sc, req); 778 } 779 780 return (0); 781 } 782 783 static void 784 vtblk_request_free(struct vtblk_softc *sc) 785 { 786 struct vtblk_request *req; 787 788 MPASS(TAILQ_EMPTY(&sc->vtblk_req_ready)); 789 790 while ((req = vtblk_request_dequeue(sc)) != NULL) { 791 sc->vtblk_request_count--; 792 free(req, M_DEVBUF); 793 } 794 795 KASSERT(sc->vtblk_request_count == 0, 796 ("%s: leaked %d requests", __func__, sc->vtblk_request_count)); 797 } 798 799 static struct vtblk_request * 800 vtblk_request_dequeue(struct vtblk_softc *sc) 801 { 802 struct vtblk_request *req; 803 804 req = TAILQ_FIRST(&sc->vtblk_req_free); 805 if (req != NULL) { 806 TAILQ_REMOVE(&sc->vtblk_req_free, req, vbr_link); 807 bzero(req, sizeof(struct vtblk_request)); 808 } 809 810 return (req); 811 } 812 813 static void 814 vtblk_request_enqueue(struct vtblk_softc *sc, struct vtblk_request *req) 815 { 816 817 TAILQ_INSERT_HEAD(&sc->vtblk_req_free, req, vbr_link); 818 } 819 820 static struct vtblk_request * 821 vtblk_request_next_ready(struct vtblk_softc *sc) 822 { 823 struct vtblk_request *req; 824 825 req = TAILQ_FIRST(&sc->vtblk_req_ready); 826 if (req != NULL) 827 TAILQ_REMOVE(&sc->vtblk_req_ready, req, vbr_link); 828 829 return (req); 830 } 831 832 static void 833 vtblk_request_requeue_ready(struct vtblk_softc *sc, struct vtblk_request *req) 834 { 835 836 /* NOTE: Currently, there will be at most one request in the queue. */ 837 TAILQ_INSERT_HEAD(&sc->vtblk_req_ready, req, vbr_link); 838 } 839 840 static struct vtblk_request * 841 vtblk_request_next(struct vtblk_softc *sc) 842 { 843 struct vtblk_request *req; 844 845 req = vtblk_request_next_ready(sc); 846 if (req != NULL) 847 return (req); 848 849 return (vtblk_request_bio(sc)); 850 } 851 852 static struct vtblk_request * 853 vtblk_request_bio(struct vtblk_softc *sc) 854 { 855 struct bio_queue_head *bioq; 856 struct vtblk_request *req; 857 struct bio *bp; 858 859 bioq = &sc->vtblk_bioq; 860 861 if (bioq_first(bioq) == NULL) 862 return (NULL); 863 864 req = vtblk_request_dequeue(sc); 865 if (req == NULL) 866 return (NULL); 867 868 bp = bioq_takefirst(bioq); 869 req->vbr_bp = bp; 870 req->vbr_ack = -1; 871 req->vbr_hdr.ioprio = 1; 872 873 switch (bp->bio_cmd) { 874 case BIO_FLUSH: 875 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH; 876 break; 877 case BIO_READ: 878 req->vbr_hdr.type = VIRTIO_BLK_T_IN; 879 req->vbr_hdr.sector = bp->bio_offset / 512; 880 break; 881 case BIO_WRITE: 882 req->vbr_hdr.type = VIRTIO_BLK_T_OUT; 883 req->vbr_hdr.sector = bp->bio_offset / 512; 884 break; 885 default: 886 panic("%s: bio with unhandled cmd: %d", __func__, bp->bio_cmd); 887 } 888 889 if (bp->bio_flags & BIO_ORDERED) 890 req->vbr_hdr.type |= VIRTIO_BLK_T_BARRIER; 891 892 return (req); 893 } 894 895 static int 896 vtblk_request_execute(struct vtblk_softc *sc, struct vtblk_request *req) 897 { 898 struct virtqueue *vq; 899 struct sglist *sg; 900 struct bio *bp; 901 int ordered, readable, writable, error; 902 903 vq = sc->vtblk_vq; 904 sg = sc->vtblk_sglist; 905 bp = req->vbr_bp; 906 ordered = 0; 907 writable = 0; 908 909 /* 910 * Some hosts (such as bhyve) do not implement the barrier feature, 911 * so we emulate it in the driver by allowing the barrier request 912 * to be the only one in flight. 913 */ 914 if ((sc->vtblk_flags & VTBLK_FLAG_BARRIER) == 0) { 915 if (sc->vtblk_req_ordered != NULL) 916 return (EBUSY); 917 if (bp->bio_flags & BIO_ORDERED) { 918 if (!virtqueue_empty(vq)) 919 return (EBUSY); 920 ordered = 1; 921 req->vbr_hdr.type &= ~VIRTIO_BLK_T_BARRIER; 922 } 923 } 924 925 sglist_reset(sg); 926 sglist_append(sg, &req->vbr_hdr, sizeof(struct virtio_blk_outhdr)); 927 928 if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) { 929 error = sglist_append_bio(sg, bp); 930 if (error || sg->sg_nseg == sg->sg_maxseg) { 931 panic("%s: bio %p data buffer too big %d", 932 __func__, bp, error); 933 } 934 935 /* BIO_READ means the host writes into our buffer. */ 936 if (bp->bio_cmd == BIO_READ) 937 writable = sg->sg_nseg - 1; 938 } 939 940 writable++; 941 sglist_append(sg, &req->vbr_ack, sizeof(uint8_t)); 942 readable = sg->sg_nseg - writable; 943 944 error = virtqueue_enqueue(vq, req, sg, readable, writable); 945 if (error == 0 && ordered) 946 sc->vtblk_req_ordered = req; 947 948 return (error); 949 } 950 951 static int 952 vtblk_request_error(struct vtblk_request *req) 953 { 954 int error; 955 956 switch (req->vbr_ack) { 957 case VIRTIO_BLK_S_OK: 958 error = 0; 959 break; 960 case VIRTIO_BLK_S_UNSUPP: 961 error = ENOTSUP; 962 break; 963 default: 964 error = EIO; 965 break; 966 } 967 968 return (error); 969 } 970 971 static void 972 vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue) 973 { 974 struct vtblk_request *req; 975 struct bio *bp; 976 977 while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) { 978 if (sc->vtblk_req_ordered != NULL) { 979 MPASS(sc->vtblk_req_ordered == req); 980 sc->vtblk_req_ordered = NULL; 981 } 982 983 bp = req->vbr_bp; 984 bp->bio_error = vtblk_request_error(req); 985 TAILQ_INSERT_TAIL(queue, bp, bio_queue); 986 987 vtblk_request_enqueue(sc, req); 988 } 989 } 990 991 static void 992 vtblk_done_completed(struct vtblk_softc *sc, struct bio_queue *queue) 993 { 994 struct bio *bp, *tmp; 995 996 TAILQ_FOREACH_SAFE(bp, queue, bio_queue, tmp) { 997 if (bp->bio_error != 0) 998 disk_err(bp, "hard error", -1, 1); 999 vtblk_bio_done(sc, bp, bp->bio_error); 1000 } 1001 } 1002 1003 static void 1004 vtblk_drain_vq(struct vtblk_softc *sc) 1005 { 1006 struct virtqueue *vq; 1007 struct vtblk_request *req; 1008 int last; 1009 1010 vq = sc->vtblk_vq; 1011 last = 0; 1012 1013 while ((req = virtqueue_drain(vq, &last)) != NULL) { 1014 vtblk_bio_done(sc, req->vbr_bp, ENXIO); 1015 vtblk_request_enqueue(sc, req); 1016 } 1017 1018 sc->vtblk_req_ordered = NULL; 1019 KASSERT(virtqueue_empty(vq), ("virtqueue not empty")); 1020 } 1021 1022 static void 1023 vtblk_drain(struct vtblk_softc *sc) 1024 { 1025 struct bio_queue queue; 1026 struct bio_queue_head *bioq; 1027 struct vtblk_request *req; 1028 struct bio *bp; 1029 1030 bioq = &sc->vtblk_bioq; 1031 TAILQ_INIT(&queue); 1032 1033 if (sc->vtblk_vq != NULL) { 1034 vtblk_queue_completed(sc, &queue); 1035 vtblk_done_completed(sc, &queue); 1036 1037 vtblk_drain_vq(sc); 1038 } 1039 1040 while ((req = vtblk_request_next_ready(sc)) != NULL) { 1041 vtblk_bio_done(sc, req->vbr_bp, ENXIO); 1042 vtblk_request_enqueue(sc, req); 1043 } 1044 1045 while (bioq_first(bioq) != NULL) { 1046 bp = bioq_takefirst(bioq); 1047 vtblk_bio_done(sc, bp, ENXIO); 1048 } 1049 1050 vtblk_request_free(sc); 1051 } 1052 1053 static void 1054 vtblk_startio(struct vtblk_softc *sc) 1055 { 1056 struct virtqueue *vq; 1057 struct vtblk_request *req; 1058 int enq; 1059 1060 VTBLK_LOCK_ASSERT(sc); 1061 vq = sc->vtblk_vq; 1062 enq = 0; 1063 1064 if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND) 1065 return; 1066 1067 while (!virtqueue_full(vq)) { 1068 req = vtblk_request_next(sc); 1069 if (req == NULL) 1070 break; 1071 1072 if (vtblk_request_execute(sc, req) != 0) { 1073 vtblk_request_requeue_ready(sc, req); 1074 break; 1075 } 1076 1077 enq++; 1078 } 1079 1080 if (enq > 0) 1081 virtqueue_notify(vq); 1082 } 1083 1084 static void 1085 vtblk_bio_done(struct vtblk_softc *sc, struct bio *bp, int error) 1086 { 1087 1088 /* Because of GEOM direct dispatch, we cannot hold any locks. */ 1089 if (sc != NULL) 1090 VTBLK_LOCK_ASSERT_NOTOWNED(sc); 1091 1092 if (error) { 1093 bp->bio_resid = bp->bio_bcount; 1094 bp->bio_error = error; 1095 bp->bio_flags |= BIO_ERROR; 1096 } 1097 1098 biodone(bp); 1099 } 1100 1101 #define VTBLK_GET_CONFIG(_dev, _feature, _field, _cfg) \ 1102 if (virtio_with_feature(_dev, _feature)) { \ 1103 virtio_read_device_config(_dev, \ 1104 offsetof(struct virtio_blk_config, _field), \ 1105 &(_cfg)->_field, sizeof((_cfg)->_field)); \ 1106 } 1107 1108 static void 1109 vtblk_read_config(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg) 1110 { 1111 device_t dev; 1112 1113 dev = sc->vtblk_dev; 1114 1115 bzero(blkcfg, sizeof(struct virtio_blk_config)); 1116 1117 /* The capacity is always available. */ 1118 virtio_read_device_config(dev, offsetof(struct virtio_blk_config, 1119 capacity), &blkcfg->capacity, sizeof(blkcfg->capacity)); 1120 1121 /* Read the configuration if the feature was negotiated. */ 1122 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SIZE_MAX, size_max, blkcfg); 1123 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_SEG_MAX, seg_max, blkcfg); 1124 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_GEOMETRY, geometry, blkcfg); 1125 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_BLK_SIZE, blk_size, blkcfg); 1126 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_TOPOLOGY, topology, blkcfg); 1127 VTBLK_GET_CONFIG(dev, VIRTIO_BLK_F_CONFIG_WCE, writeback, blkcfg); 1128 } 1129 1130 #undef VTBLK_GET_CONFIG 1131 1132 static void 1133 vtblk_ident(struct vtblk_softc *sc) 1134 { 1135 struct bio buf; 1136 struct disk *dp; 1137 struct vtblk_request *req; 1138 int len, error; 1139 1140 dp = sc->vtblk_disk; 1141 len = MIN(VIRTIO_BLK_ID_BYTES, DISK_IDENT_SIZE); 1142 1143 if (vtblk_tunable_int(sc, "no_ident", vtblk_no_ident) != 0) 1144 return; 1145 1146 req = vtblk_request_dequeue(sc); 1147 if (req == NULL) 1148 return; 1149 1150 req->vbr_ack = -1; 1151 req->vbr_hdr.type = VIRTIO_BLK_T_GET_ID; 1152 req->vbr_hdr.ioprio = 1; 1153 req->vbr_hdr.sector = 0; 1154 1155 req->vbr_bp = &buf; 1156 g_reset_bio(&buf); 1157 1158 buf.bio_cmd = BIO_READ; 1159 buf.bio_data = dp->d_ident; 1160 buf.bio_bcount = len; 1161 1162 VTBLK_LOCK(sc); 1163 error = vtblk_poll_request(sc, req); 1164 VTBLK_UNLOCK(sc); 1165 1166 vtblk_request_enqueue(sc, req); 1167 1168 if (error) { 1169 device_printf(sc->vtblk_dev, 1170 "error getting device identifier: %d\n", error); 1171 } 1172 } 1173 1174 static int 1175 vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req) 1176 { 1177 struct virtqueue *vq; 1178 int error; 1179 1180 vq = sc->vtblk_vq; 1181 1182 if (!virtqueue_empty(vq)) 1183 return (EBUSY); 1184 1185 error = vtblk_request_execute(sc, req); 1186 if (error) 1187 return (error); 1188 1189 virtqueue_notify(vq); 1190 virtqueue_poll(vq, NULL); 1191 1192 error = vtblk_request_error(req); 1193 if (error && bootverbose) { 1194 device_printf(sc->vtblk_dev, 1195 "%s: IO error: %d\n", __func__, error); 1196 } 1197 1198 return (error); 1199 } 1200 1201 static int 1202 vtblk_quiesce(struct vtblk_softc *sc) 1203 { 1204 int error; 1205 1206 VTBLK_LOCK_ASSERT(sc); 1207 error = 0; 1208 1209 while (!virtqueue_empty(sc->vtblk_vq)) { 1210 if (mtx_sleep(&sc->vtblk_vq, VTBLK_MTX(sc), PRIBIO, "vtblkq", 1211 VTBLK_QUIESCE_TIMEOUT) == EWOULDBLOCK) { 1212 error = EBUSY; 1213 break; 1214 } 1215 } 1216 1217 return (error); 1218 } 1219 1220 static void 1221 vtblk_vq_intr(void *xsc) 1222 { 1223 struct vtblk_softc *sc; 1224 struct virtqueue *vq; 1225 struct bio_queue queue; 1226 1227 sc = xsc; 1228 vq = sc->vtblk_vq; 1229 TAILQ_INIT(&queue); 1230 1231 VTBLK_LOCK(sc); 1232 1233 again: 1234 if (sc->vtblk_flags & VTBLK_FLAG_DETACH) 1235 goto out; 1236 1237 vtblk_queue_completed(sc, &queue); 1238 vtblk_startio(sc); 1239 1240 if (virtqueue_enable_intr(vq) != 0) { 1241 virtqueue_disable_intr(vq); 1242 goto again; 1243 } 1244 1245 if (sc->vtblk_flags & VTBLK_FLAG_SUSPEND) 1246 wakeup(&sc->vtblk_vq); 1247 1248 out: 1249 VTBLK_UNLOCK(sc); 1250 vtblk_done_completed(sc, &queue); 1251 } 1252 1253 static void 1254 vtblk_stop(struct vtblk_softc *sc) 1255 { 1256 1257 virtqueue_disable_intr(sc->vtblk_vq); 1258 virtio_stop(sc->vtblk_dev); 1259 } 1260 1261 static void 1262 vtblk_dump_quiesce(struct vtblk_softc *sc) 1263 { 1264 1265 /* 1266 * Spin here until all the requests in-flight at the time of the 1267 * dump are completed and queued. The queued requests will be 1268 * biodone'd once the dump is finished. 1269 */ 1270 while (!virtqueue_empty(sc->vtblk_vq)) 1271 vtblk_queue_completed(sc, &sc->vtblk_dump_queue); 1272 } 1273 1274 static int 1275 vtblk_dump_write(struct vtblk_softc *sc, void *virtual, off_t offset, 1276 size_t length) 1277 { 1278 struct bio buf; 1279 struct vtblk_request *req; 1280 1281 req = &sc->vtblk_dump_request; 1282 req->vbr_ack = -1; 1283 req->vbr_hdr.type = VIRTIO_BLK_T_OUT; 1284 req->vbr_hdr.ioprio = 1; 1285 req->vbr_hdr.sector = offset / 512; 1286 1287 req->vbr_bp = &buf; 1288 g_reset_bio(&buf); 1289 1290 buf.bio_cmd = BIO_WRITE; 1291 buf.bio_data = virtual; 1292 buf.bio_bcount = length; 1293 1294 return (vtblk_poll_request(sc, req)); 1295 } 1296 1297 static int 1298 vtblk_dump_flush(struct vtblk_softc *sc) 1299 { 1300 struct bio buf; 1301 struct vtblk_request *req; 1302 1303 req = &sc->vtblk_dump_request; 1304 req->vbr_ack = -1; 1305 req->vbr_hdr.type = VIRTIO_BLK_T_FLUSH; 1306 req->vbr_hdr.ioprio = 1; 1307 req->vbr_hdr.sector = 0; 1308 1309 req->vbr_bp = &buf; 1310 g_reset_bio(&buf); 1311 1312 buf.bio_cmd = BIO_FLUSH; 1313 1314 return (vtblk_poll_request(sc, req)); 1315 } 1316 1317 static void 1318 vtblk_dump_complete(struct vtblk_softc *sc) 1319 { 1320 1321 vtblk_dump_flush(sc); 1322 1323 VTBLK_UNLOCK(sc); 1324 vtblk_done_completed(sc, &sc->vtblk_dump_queue); 1325 VTBLK_LOCK(sc); 1326 } 1327 1328 static void 1329 vtblk_set_write_cache(struct vtblk_softc *sc, int wc) 1330 { 1331 1332 /* Set either writeback (1) or writethrough (0) mode. */ 1333 virtio_write_dev_config_1(sc->vtblk_dev, 1334 offsetof(struct virtio_blk_config, writeback), wc); 1335 } 1336 1337 static int 1338 vtblk_write_cache_enabled(struct vtblk_softc *sc, 1339 struct virtio_blk_config *blkcfg) 1340 { 1341 int wc; 1342 1343 if (sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) { 1344 wc = vtblk_tunable_int(sc, "writecache_mode", 1345 vtblk_writecache_mode); 1346 if (wc >= 0 && wc < VTBLK_CACHE_MAX) 1347 vtblk_set_write_cache(sc, wc); 1348 else 1349 wc = blkcfg->writeback; 1350 } else 1351 wc = virtio_with_feature(sc->vtblk_dev, VIRTIO_BLK_F_WCE); 1352 1353 return (wc); 1354 } 1355 1356 static int 1357 vtblk_write_cache_sysctl(SYSCTL_HANDLER_ARGS) 1358 { 1359 struct vtblk_softc *sc; 1360 int wc, error; 1361 1362 sc = oidp->oid_arg1; 1363 wc = sc->vtblk_write_cache; 1364 1365 error = sysctl_handle_int(oidp, &wc, 0, req); 1366 if (error || req->newptr == NULL) 1367 return (error); 1368 if ((sc->vtblk_flags & VTBLK_FLAG_WC_CONFIG) == 0) 1369 return (EPERM); 1370 if (wc < 0 || wc >= VTBLK_CACHE_MAX) 1371 return (EINVAL); 1372 1373 VTBLK_LOCK(sc); 1374 sc->vtblk_write_cache = wc; 1375 vtblk_set_write_cache(sc, sc->vtblk_write_cache); 1376 VTBLK_UNLOCK(sc); 1377 1378 return (0); 1379 } 1380 1381 static void 1382 vtblk_setup_sysctl(struct vtblk_softc *sc) 1383 { 1384 device_t dev; 1385 struct sysctl_ctx_list *ctx; 1386 struct sysctl_oid *tree; 1387 struct sysctl_oid_list *child; 1388 1389 dev = sc->vtblk_dev; 1390 ctx = device_get_sysctl_ctx(dev); 1391 tree = device_get_sysctl_tree(dev); 1392 child = SYSCTL_CHILDREN(tree); 1393 1394 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "writecache_mode", 1395 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, 1396 vtblk_write_cache_sysctl, "I", 1397 "Write cache mode (writethrough (0) or writeback (1))"); 1398 } 1399 1400 static int 1401 vtblk_tunable_int(struct vtblk_softc *sc, const char *knob, int def) 1402 { 1403 char path[64]; 1404 1405 snprintf(path, sizeof(path), 1406 "hw.vtblk.%d.%s", device_get_unit(sc->vtblk_dev), knob); 1407 TUNABLE_INT_FETCH(path, &def); 1408 1409 return (def); 1410 } 1411