1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2013 Peter Grehan <grehan@freebsd.org> 5 * All rights reserved. 6 * Copyright 2020 Joyent, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #ifndef WITHOUT_CAPSICUM 37 #include <sys/capsicum.h> 38 #endif 39 #include <sys/queue.h> 40 #include <sys/errno.h> 41 #include <sys/stat.h> 42 #include <sys/ioctl.h> 43 #include <sys/disk.h> 44 45 #include <assert.h> 46 #ifndef WITHOUT_CAPSICUM 47 #include <capsicum_helpers.h> 48 #endif 49 #include <err.h> 50 #include <fcntl.h> 51 #include <stdio.h> 52 #include <stdlib.h> 53 #include <string.h> 54 #include <pthread.h> 55 #include <pthread_np.h> 56 #include <signal.h> 57 #include <sysexits.h> 58 #include <unistd.h> 59 60 #include <machine/atomic.h> 61 #include <machine/vmm_snapshot.h> 62 63 #include "bhyverun.h" 64 #include "config.h" 65 #include "debug.h" 66 #include "mevent.h" 67 #include "pci_emul.h" 68 #include "block_if.h" 69 70 #define BLOCKIF_SIG 0xb109b109 71 72 #define BLOCKIF_NUMTHR 8 73 #define BLOCKIF_MAXREQ (BLOCKIF_RING_MAX + BLOCKIF_NUMTHR) 74 75 enum blockop { 76 BOP_READ, 77 BOP_WRITE, 78 BOP_FLUSH, 79 BOP_DELETE 80 }; 81 82 enum blockstat { 83 BST_FREE, 84 BST_BLOCK, 85 BST_PEND, 86 BST_BUSY, 87 BST_DONE 88 }; 89 90 struct blockif_elem { 91 TAILQ_ENTRY(blockif_elem) be_link; 92 struct blockif_req *be_req; 93 enum blockop be_op; 94 enum blockstat be_status; 95 pthread_t be_tid; 96 off_t be_block; 97 }; 98 99 struct blockif_ctxt { 100 unsigned int bc_magic; 101 int bc_fd; 102 int bc_ischr; 103 int bc_isgeom; 104 int bc_candelete; 105 int bc_rdonly; 106 off_t bc_size; 107 int bc_sectsz; 108 int bc_psectsz; 109 int bc_psectoff; 110 int bc_closing; 111 int bc_paused; 112 pthread_t bc_btid[BLOCKIF_NUMTHR]; 113 pthread_mutex_t bc_mtx; 114 pthread_cond_t bc_cond; 115 pthread_cond_t bc_work_done_cond; 116 blockif_resize_cb *bc_resize_cb; 117 void *bc_resize_cb_arg; 118 struct mevent *bc_resize_event; 119 120 /* Request elements and free/pending/busy queues */ 121 TAILQ_HEAD(, blockif_elem) bc_freeq; 122 TAILQ_HEAD(, blockif_elem) bc_pendq; 123 TAILQ_HEAD(, blockif_elem) bc_busyq; 124 struct blockif_elem bc_reqs[BLOCKIF_MAXREQ]; 125 }; 126 127 static pthread_once_t blockif_once = PTHREAD_ONCE_INIT; 128 129 struct blockif_sig_elem { 130 pthread_mutex_t bse_mtx; 131 pthread_cond_t bse_cond; 132 int bse_pending; 133 struct blockif_sig_elem *bse_next; 134 }; 135 136 static struct blockif_sig_elem *blockif_bse_head; 137 138 static int 139 blockif_enqueue(struct blockif_ctxt *bc, struct blockif_req *breq, 140 enum blockop op) 141 { 142 struct blockif_elem *be, *tbe; 143 off_t off; 144 int i; 145 146 be = TAILQ_FIRST(&bc->bc_freeq); 147 assert(be != NULL); 148 assert(be->be_status == BST_FREE); 149 TAILQ_REMOVE(&bc->bc_freeq, be, be_link); 150 be->be_req = breq; 151 be->be_op = op; 152 switch (op) { 153 case BOP_READ: 154 case BOP_WRITE: 155 case BOP_DELETE: 156 off = breq->br_offset; 157 for (i = 0; i < breq->br_iovcnt; i++) 158 off += breq->br_iov[i].iov_len; 159 break; 160 default: 161 off = OFF_MAX; 162 } 163 be->be_block = off; 164 TAILQ_FOREACH(tbe, &bc->bc_pendq, be_link) { 165 if (tbe->be_block == breq->br_offset) 166 break; 167 } 168 if (tbe == NULL) { 169 TAILQ_FOREACH(tbe, &bc->bc_busyq, be_link) { 170 if (tbe->be_block == breq->br_offset) 171 break; 172 } 173 } 174 if (tbe == NULL) 175 be->be_status = BST_PEND; 176 else 177 be->be_status = BST_BLOCK; 178 TAILQ_INSERT_TAIL(&bc->bc_pendq, be, be_link); 179 return (be->be_status == BST_PEND); 180 } 181 182 static int 183 blockif_dequeue(struct blockif_ctxt *bc, pthread_t t, struct blockif_elem **bep) 184 { 185 struct blockif_elem *be; 186 187 TAILQ_FOREACH(be, &bc->bc_pendq, be_link) { 188 if (be->be_status == BST_PEND) 189 break; 190 assert(be->be_status == BST_BLOCK); 191 } 192 if (be == NULL) 193 return (0); 194 TAILQ_REMOVE(&bc->bc_pendq, be, be_link); 195 be->be_status = BST_BUSY; 196 be->be_tid = t; 197 TAILQ_INSERT_TAIL(&bc->bc_busyq, be, be_link); 198 *bep = be; 199 return (1); 200 } 201 202 static void 203 blockif_complete(struct blockif_ctxt *bc, struct blockif_elem *be) 204 { 205 struct blockif_elem *tbe; 206 207 if (be->be_status == BST_DONE || be->be_status == BST_BUSY) 208 TAILQ_REMOVE(&bc->bc_busyq, be, be_link); 209 else 210 TAILQ_REMOVE(&bc->bc_pendq, be, be_link); 211 TAILQ_FOREACH(tbe, &bc->bc_pendq, be_link) { 212 if (tbe->be_req->br_offset == be->be_block) 213 tbe->be_status = BST_PEND; 214 } 215 be->be_tid = 0; 216 be->be_status = BST_FREE; 217 be->be_req = NULL; 218 TAILQ_INSERT_TAIL(&bc->bc_freeq, be, be_link); 219 } 220 221 static int 222 blockif_flush_bc(struct blockif_ctxt *bc) 223 { 224 if (bc->bc_ischr) { 225 if (ioctl(bc->bc_fd, DIOCGFLUSH)) 226 return (errno); 227 } else if (fsync(bc->bc_fd)) 228 return (errno); 229 230 return (0); 231 } 232 233 static void 234 blockif_proc(struct blockif_ctxt *bc, struct blockif_elem *be, uint8_t *buf) 235 { 236 struct blockif_req *br; 237 off_t arg[2]; 238 ssize_t clen, len, off, boff, voff; 239 int i, err; 240 struct spacectl_range range; 241 242 br = be->be_req; 243 if (br->br_iovcnt <= 1) 244 buf = NULL; 245 err = 0; 246 switch (be->be_op) { 247 case BOP_READ: 248 if (buf == NULL) { 249 if ((len = preadv(bc->bc_fd, br->br_iov, br->br_iovcnt, 250 br->br_offset)) < 0) 251 err = errno; 252 else 253 br->br_resid -= len; 254 break; 255 } 256 i = 0; 257 off = voff = 0; 258 while (br->br_resid > 0) { 259 len = MIN(br->br_resid, MAXPHYS); 260 if (pread(bc->bc_fd, buf, len, br->br_offset + 261 off) < 0) { 262 err = errno; 263 break; 264 } 265 boff = 0; 266 do { 267 clen = MIN(len - boff, br->br_iov[i].iov_len - 268 voff); 269 memcpy((uint8_t *)br->br_iov[i].iov_base + voff, 270 buf + boff, clen); 271 if (clen < br->br_iov[i].iov_len - voff) 272 voff += clen; 273 else { 274 i++; 275 voff = 0; 276 } 277 boff += clen; 278 } while (boff < len); 279 off += len; 280 br->br_resid -= len; 281 } 282 break; 283 case BOP_WRITE: 284 if (bc->bc_rdonly) { 285 err = EROFS; 286 break; 287 } 288 if (buf == NULL) { 289 if ((len = pwritev(bc->bc_fd, br->br_iov, br->br_iovcnt, 290 br->br_offset)) < 0) 291 err = errno; 292 else 293 br->br_resid -= len; 294 break; 295 } 296 i = 0; 297 off = voff = 0; 298 while (br->br_resid > 0) { 299 len = MIN(br->br_resid, MAXPHYS); 300 boff = 0; 301 do { 302 clen = MIN(len - boff, br->br_iov[i].iov_len - 303 voff); 304 memcpy(buf + boff, 305 (uint8_t *)br->br_iov[i].iov_base + voff, 306 clen); 307 if (clen < br->br_iov[i].iov_len - voff) 308 voff += clen; 309 else { 310 i++; 311 voff = 0; 312 } 313 boff += clen; 314 } while (boff < len); 315 if (pwrite(bc->bc_fd, buf, len, br->br_offset + 316 off) < 0) { 317 err = errno; 318 break; 319 } 320 off += len; 321 br->br_resid -= len; 322 } 323 break; 324 case BOP_FLUSH: 325 err = blockif_flush_bc(bc); 326 break; 327 case BOP_DELETE: 328 if (!bc->bc_candelete) 329 err = EOPNOTSUPP; 330 else if (bc->bc_rdonly) 331 err = EROFS; 332 else if (bc->bc_ischr) { 333 arg[0] = br->br_offset; 334 arg[1] = br->br_resid; 335 if (ioctl(bc->bc_fd, DIOCGDELETE, arg)) 336 err = errno; 337 else 338 br->br_resid = 0; 339 } else { 340 range.r_offset = br->br_offset; 341 range.r_len = br->br_resid; 342 343 while (range.r_len > 0) { 344 if (fspacectl(bc->bc_fd, SPACECTL_DEALLOC, 345 &range, 0, &range) != 0) { 346 err = errno; 347 break; 348 } 349 } 350 if (err == 0) 351 br->br_resid = 0; 352 } 353 break; 354 default: 355 err = EINVAL; 356 break; 357 } 358 359 be->be_status = BST_DONE; 360 361 (*br->br_callback)(br, err); 362 } 363 364 static inline bool 365 blockif_empty(const struct blockif_ctxt *bc) 366 { 367 return (TAILQ_EMPTY(&bc->bc_pendq) && TAILQ_EMPTY(&bc->bc_busyq)); 368 } 369 370 static void * 371 blockif_thr(void *arg) 372 { 373 struct blockif_ctxt *bc; 374 struct blockif_elem *be; 375 pthread_t t; 376 uint8_t *buf; 377 378 bc = arg; 379 if (bc->bc_isgeom) 380 buf = malloc(MAXPHYS); 381 else 382 buf = NULL; 383 t = pthread_self(); 384 385 pthread_mutex_lock(&bc->bc_mtx); 386 for (;;) { 387 while (blockif_dequeue(bc, t, &be)) { 388 pthread_mutex_unlock(&bc->bc_mtx); 389 blockif_proc(bc, be, buf); 390 pthread_mutex_lock(&bc->bc_mtx); 391 blockif_complete(bc, be); 392 } 393 394 /* If none to work, notify the main thread */ 395 if (blockif_empty(bc)) 396 pthread_cond_broadcast(&bc->bc_work_done_cond); 397 398 /* Check ctxt status here to see if exit requested */ 399 if (bc->bc_closing) 400 break; 401 402 pthread_cond_wait(&bc->bc_cond, &bc->bc_mtx); 403 } 404 pthread_mutex_unlock(&bc->bc_mtx); 405 406 if (buf) 407 free(buf); 408 pthread_exit(NULL); 409 return (NULL); 410 } 411 412 static void 413 blockif_sigcont_handler(int signal __unused, enum ev_type type __unused, 414 void *arg __unused) 415 { 416 struct blockif_sig_elem *bse; 417 418 for (;;) { 419 /* 420 * Process the entire list even if not intended for 421 * this thread. 422 */ 423 do { 424 bse = blockif_bse_head; 425 if (bse == NULL) 426 return; 427 } while (!atomic_cmpset_ptr((uintptr_t *)&blockif_bse_head, 428 (uintptr_t)bse, 429 (uintptr_t)bse->bse_next)); 430 431 pthread_mutex_lock(&bse->bse_mtx); 432 bse->bse_pending = 0; 433 pthread_cond_signal(&bse->bse_cond); 434 pthread_mutex_unlock(&bse->bse_mtx); 435 } 436 } 437 438 static void 439 blockif_init(void) 440 { 441 mevent_add(SIGCONT, EVF_SIGNAL, blockif_sigcont_handler, NULL); 442 (void) signal(SIGCONT, SIG_IGN); 443 } 444 445 int 446 blockif_legacy_config(nvlist_t *nvl, const char *opts) 447 { 448 char *cp, *path; 449 450 if (opts == NULL) 451 return (0); 452 453 cp = strchr(opts, ','); 454 if (cp == NULL) { 455 set_config_value_node(nvl, "path", opts); 456 return (0); 457 } 458 path = strndup(opts, cp - opts); 459 set_config_value_node(nvl, "path", path); 460 free(path); 461 return (pci_parse_legacy_config(nvl, cp + 1)); 462 } 463 464 struct blockif_ctxt * 465 blockif_open(nvlist_t *nvl, const char *ident) 466 { 467 char tname[MAXCOMLEN + 1]; 468 char name[MAXPATHLEN]; 469 const char *path, *pssval, *ssval; 470 char *cp; 471 struct blockif_ctxt *bc; 472 struct stat sbuf; 473 struct diocgattr_arg arg; 474 off_t size, psectsz, psectoff; 475 int extra, fd, i, sectsz; 476 int ro, candelete, geom, ssopt, pssopt; 477 int nodelete; 478 479 #ifndef WITHOUT_CAPSICUM 480 cap_rights_t rights; 481 cap_ioctl_t cmds[] = { DIOCGFLUSH, DIOCGDELETE, DIOCGMEDIASIZE }; 482 #endif 483 484 pthread_once(&blockif_once, blockif_init); 485 486 fd = -1; 487 extra = 0; 488 ssopt = 0; 489 ro = 0; 490 nodelete = 0; 491 492 if (get_config_bool_node_default(nvl, "nocache", false)) 493 extra |= O_DIRECT; 494 if (get_config_bool_node_default(nvl, "nodelete", false)) 495 nodelete = 1; 496 if (get_config_bool_node_default(nvl, "sync", false) || 497 get_config_bool_node_default(nvl, "direct", false)) 498 extra |= O_SYNC; 499 if (get_config_bool_node_default(nvl, "ro", false)) 500 ro = 1; 501 ssval = get_config_value_node(nvl, "sectorsize"); 502 if (ssval != NULL) { 503 ssopt = strtol(ssval, &cp, 10); 504 if (cp == ssval) { 505 EPRINTLN("Invalid sector size \"%s\"", ssval); 506 goto err; 507 } 508 if (*cp == '\0') { 509 pssopt = ssopt; 510 } else if (*cp == '/') { 511 pssval = cp + 1; 512 pssopt = strtol(pssval, &cp, 10); 513 if (cp == pssval || *cp != '\0') { 514 EPRINTLN("Invalid sector size \"%s\"", ssval); 515 goto err; 516 } 517 } else { 518 EPRINTLN("Invalid sector size \"%s\"", ssval); 519 goto err; 520 } 521 } 522 523 path = get_config_value_node(nvl, "path"); 524 if (path == NULL) { 525 EPRINTLN("Missing \"path\" for block device."); 526 goto err; 527 } 528 529 fd = open(path, (ro ? O_RDONLY : O_RDWR) | extra); 530 if (fd < 0 && !ro) { 531 /* Attempt a r/w fail with a r/o open */ 532 fd = open(path, O_RDONLY | extra); 533 ro = 1; 534 } 535 536 if (fd < 0) { 537 warn("Could not open backing file: %s", path); 538 goto err; 539 } 540 541 if (fstat(fd, &sbuf) < 0) { 542 warn("Could not stat backing file %s", path); 543 goto err; 544 } 545 546 #ifndef WITHOUT_CAPSICUM 547 cap_rights_init(&rights, CAP_FSYNC, CAP_IOCTL, CAP_READ, CAP_SEEK, 548 CAP_WRITE, CAP_FSTAT, CAP_EVENT, CAP_FPATHCONF); 549 if (ro) 550 cap_rights_clear(&rights, CAP_FSYNC, CAP_WRITE); 551 552 if (caph_rights_limit(fd, &rights) == -1) 553 errx(EX_OSERR, "Unable to apply rights for sandbox"); 554 #endif 555 556 /* 557 * Deal with raw devices 558 */ 559 size = sbuf.st_size; 560 sectsz = DEV_BSIZE; 561 psectsz = psectoff = 0; 562 candelete = geom = 0; 563 if (S_ISCHR(sbuf.st_mode)) { 564 if (ioctl(fd, DIOCGMEDIASIZE, &size) < 0 || 565 ioctl(fd, DIOCGSECTORSIZE, §sz)) { 566 perror("Could not fetch dev blk/sector size"); 567 goto err; 568 } 569 assert(size != 0); 570 assert(sectsz != 0); 571 if (ioctl(fd, DIOCGSTRIPESIZE, &psectsz) == 0 && psectsz > 0) 572 ioctl(fd, DIOCGSTRIPEOFFSET, &psectoff); 573 strlcpy(arg.name, "GEOM::candelete", sizeof(arg.name)); 574 arg.len = sizeof(arg.value.i); 575 if (nodelete == 0 && ioctl(fd, DIOCGATTR, &arg) == 0) 576 candelete = arg.value.i; 577 if (ioctl(fd, DIOCGPROVIDERNAME, name) == 0) 578 geom = 1; 579 } else { 580 psectsz = sbuf.st_blksize; 581 /* Avoid fallback implementation */ 582 candelete = fpathconf(fd, _PC_DEALLOC_PRESENT) == 1; 583 } 584 585 #ifndef WITHOUT_CAPSICUM 586 if (caph_ioctls_limit(fd, cmds, nitems(cmds)) == -1) 587 errx(EX_OSERR, "Unable to apply rights for sandbox"); 588 #endif 589 590 if (ssopt != 0) { 591 if (!powerof2(ssopt) || !powerof2(pssopt) || ssopt < 512 || 592 ssopt > pssopt) { 593 EPRINTLN("Invalid sector size %d/%d", 594 ssopt, pssopt); 595 goto err; 596 } 597 598 /* 599 * Some backend drivers (e.g. cd0, ada0) require that the I/O 600 * size be a multiple of the device's sector size. 601 * 602 * Validate that the emulated sector size complies with this 603 * requirement. 604 */ 605 if (S_ISCHR(sbuf.st_mode)) { 606 if (ssopt < sectsz || (ssopt % sectsz) != 0) { 607 EPRINTLN("Sector size %d incompatible " 608 "with underlying device sector size %d", 609 ssopt, sectsz); 610 goto err; 611 } 612 } 613 614 sectsz = ssopt; 615 psectsz = pssopt; 616 psectoff = 0; 617 } 618 619 bc = calloc(1, sizeof(struct blockif_ctxt)); 620 if (bc == NULL) { 621 perror("calloc"); 622 goto err; 623 } 624 625 bc->bc_magic = BLOCKIF_SIG; 626 bc->bc_fd = fd; 627 bc->bc_ischr = S_ISCHR(sbuf.st_mode); 628 bc->bc_isgeom = geom; 629 bc->bc_candelete = candelete; 630 bc->bc_rdonly = ro; 631 bc->bc_size = size; 632 bc->bc_sectsz = sectsz; 633 bc->bc_psectsz = psectsz; 634 bc->bc_psectoff = psectoff; 635 pthread_mutex_init(&bc->bc_mtx, NULL); 636 pthread_cond_init(&bc->bc_cond, NULL); 637 bc->bc_paused = 0; 638 pthread_cond_init(&bc->bc_work_done_cond, NULL); 639 TAILQ_INIT(&bc->bc_freeq); 640 TAILQ_INIT(&bc->bc_pendq); 641 TAILQ_INIT(&bc->bc_busyq); 642 for (i = 0; i < BLOCKIF_MAXREQ; i++) { 643 bc->bc_reqs[i].be_status = BST_FREE; 644 TAILQ_INSERT_HEAD(&bc->bc_freeq, &bc->bc_reqs[i], be_link); 645 } 646 647 for (i = 0; i < BLOCKIF_NUMTHR; i++) { 648 pthread_create(&bc->bc_btid[i], NULL, blockif_thr, bc); 649 snprintf(tname, sizeof(tname), "blk-%s-%d", ident, i); 650 pthread_set_name_np(bc->bc_btid[i], tname); 651 } 652 653 return (bc); 654 err: 655 if (fd >= 0) 656 close(fd); 657 return (NULL); 658 } 659 660 static void 661 blockif_resized(int fd, enum ev_type type __unused, void *arg) 662 { 663 struct blockif_ctxt *bc; 664 struct stat sb; 665 off_t mediasize; 666 667 if (fstat(fd, &sb) != 0) 668 return; 669 670 if (S_ISCHR(sb.st_mode)) { 671 if (ioctl(fd, DIOCGMEDIASIZE, &mediasize) < 0) { 672 EPRINTLN("blockif_resized: get mediasize failed: %s", 673 strerror(errno)); 674 return; 675 } 676 } else 677 mediasize = sb.st_size; 678 679 bc = arg; 680 pthread_mutex_lock(&bc->bc_mtx); 681 if (mediasize != bc->bc_size) { 682 bc->bc_size = mediasize; 683 bc->bc_resize_cb(bc, bc->bc_resize_cb_arg, bc->bc_size); 684 } 685 pthread_mutex_unlock(&bc->bc_mtx); 686 } 687 688 int 689 blockif_register_resize_callback(struct blockif_ctxt *bc, blockif_resize_cb *cb, 690 void *cb_arg) 691 { 692 struct stat sb; 693 int err; 694 695 if (cb == NULL) 696 return (EINVAL); 697 698 err = 0; 699 700 pthread_mutex_lock(&bc->bc_mtx); 701 if (bc->bc_resize_cb != NULL) { 702 err = EBUSY; 703 goto out; 704 } 705 706 assert(bc->bc_closing == 0); 707 708 if (fstat(bc->bc_fd, &sb) != 0) { 709 err = errno; 710 goto out; 711 } 712 713 bc->bc_resize_event = mevent_add_flags(bc->bc_fd, EVF_VNODE, 714 EVFF_ATTRIB, blockif_resized, bc); 715 if (bc->bc_resize_event == NULL) { 716 err = ENXIO; 717 goto out; 718 } 719 720 bc->bc_resize_cb = cb; 721 bc->bc_resize_cb_arg = cb_arg; 722 out: 723 pthread_mutex_unlock(&bc->bc_mtx); 724 725 return (err); 726 } 727 728 static int 729 blockif_request(struct blockif_ctxt *bc, struct blockif_req *breq, 730 enum blockop op) 731 { 732 int err; 733 734 err = 0; 735 736 pthread_mutex_lock(&bc->bc_mtx); 737 assert(!bc->bc_paused); 738 if (!TAILQ_EMPTY(&bc->bc_freeq)) { 739 /* 740 * Enqueue and inform the block i/o thread 741 * that there is work available 742 */ 743 if (blockif_enqueue(bc, breq, op)) 744 pthread_cond_signal(&bc->bc_cond); 745 } else { 746 /* 747 * Callers are not allowed to enqueue more than 748 * the specified blockif queue limit. Return an 749 * error to indicate that the queue length has been 750 * exceeded. 751 */ 752 err = E2BIG; 753 } 754 pthread_mutex_unlock(&bc->bc_mtx); 755 756 return (err); 757 } 758 759 int 760 blockif_read(struct blockif_ctxt *bc, struct blockif_req *breq) 761 { 762 assert(bc->bc_magic == BLOCKIF_SIG); 763 return (blockif_request(bc, breq, BOP_READ)); 764 } 765 766 int 767 blockif_write(struct blockif_ctxt *bc, struct blockif_req *breq) 768 { 769 assert(bc->bc_magic == BLOCKIF_SIG); 770 return (blockif_request(bc, breq, BOP_WRITE)); 771 } 772 773 int 774 blockif_flush(struct blockif_ctxt *bc, struct blockif_req *breq) 775 { 776 assert(bc->bc_magic == BLOCKIF_SIG); 777 return (blockif_request(bc, breq, BOP_FLUSH)); 778 } 779 780 int 781 blockif_delete(struct blockif_ctxt *bc, struct blockif_req *breq) 782 { 783 assert(bc->bc_magic == BLOCKIF_SIG); 784 return (blockif_request(bc, breq, BOP_DELETE)); 785 } 786 787 int 788 blockif_cancel(struct blockif_ctxt *bc, struct blockif_req *breq) 789 { 790 struct blockif_elem *be; 791 792 assert(bc->bc_magic == BLOCKIF_SIG); 793 794 pthread_mutex_lock(&bc->bc_mtx); 795 /* XXX: not waiting while paused */ 796 797 /* 798 * Check pending requests. 799 */ 800 TAILQ_FOREACH(be, &bc->bc_pendq, be_link) { 801 if (be->be_req == breq) 802 break; 803 } 804 if (be != NULL) { 805 /* 806 * Found it. 807 */ 808 blockif_complete(bc, be); 809 pthread_mutex_unlock(&bc->bc_mtx); 810 811 return (0); 812 } 813 814 /* 815 * Check in-flight requests. 816 */ 817 TAILQ_FOREACH(be, &bc->bc_busyq, be_link) { 818 if (be->be_req == breq) 819 break; 820 } 821 if (be == NULL) { 822 /* 823 * Didn't find it. 824 */ 825 pthread_mutex_unlock(&bc->bc_mtx); 826 return (EINVAL); 827 } 828 829 /* 830 * Interrupt the processing thread to force it return 831 * prematurely via it's normal callback path. 832 */ 833 while (be->be_status == BST_BUSY) { 834 struct blockif_sig_elem bse, *old_head; 835 836 pthread_mutex_init(&bse.bse_mtx, NULL); 837 pthread_cond_init(&bse.bse_cond, NULL); 838 839 bse.bse_pending = 1; 840 841 do { 842 old_head = blockif_bse_head; 843 bse.bse_next = old_head; 844 } while (!atomic_cmpset_ptr((uintptr_t *)&blockif_bse_head, 845 (uintptr_t)old_head, 846 (uintptr_t)&bse)); 847 848 pthread_kill(be->be_tid, SIGCONT); 849 850 pthread_mutex_lock(&bse.bse_mtx); 851 while (bse.bse_pending) 852 pthread_cond_wait(&bse.bse_cond, &bse.bse_mtx); 853 pthread_mutex_unlock(&bse.bse_mtx); 854 } 855 856 pthread_mutex_unlock(&bc->bc_mtx); 857 858 /* 859 * The processing thread has been interrupted. Since it's not 860 * clear if the callback has been invoked yet, return EBUSY. 861 */ 862 return (EBUSY); 863 } 864 865 int 866 blockif_close(struct blockif_ctxt *bc) 867 { 868 void *jval; 869 int i; 870 871 assert(bc->bc_magic == BLOCKIF_SIG); 872 873 /* 874 * Stop the block i/o thread 875 */ 876 pthread_mutex_lock(&bc->bc_mtx); 877 bc->bc_closing = 1; 878 if (bc->bc_resize_event != NULL) 879 mevent_disable(bc->bc_resize_event); 880 pthread_mutex_unlock(&bc->bc_mtx); 881 pthread_cond_broadcast(&bc->bc_cond); 882 for (i = 0; i < BLOCKIF_NUMTHR; i++) 883 pthread_join(bc->bc_btid[i], &jval); 884 885 /* XXX Cancel queued i/o's ??? */ 886 887 /* 888 * Release resources 889 */ 890 bc->bc_magic = 0; 891 close(bc->bc_fd); 892 free(bc); 893 894 return (0); 895 } 896 897 /* 898 * Return virtual C/H/S values for a given block. Use the algorithm 899 * outlined in the VHD specification to calculate values. 900 */ 901 void 902 blockif_chs(struct blockif_ctxt *bc, uint16_t *c, uint8_t *h, uint8_t *s) 903 { 904 off_t sectors; /* total sectors of the block dev */ 905 off_t hcyl; /* cylinders times heads */ 906 uint16_t secpt; /* sectors per track */ 907 uint8_t heads; 908 909 assert(bc->bc_magic == BLOCKIF_SIG); 910 911 sectors = bc->bc_size / bc->bc_sectsz; 912 913 /* Clamp the size to the largest possible with CHS */ 914 if (sectors > 65535UL*16*255) 915 sectors = 65535UL*16*255; 916 917 if (sectors >= 65536UL*16*63) { 918 secpt = 255; 919 heads = 16; 920 hcyl = sectors / secpt; 921 } else { 922 secpt = 17; 923 hcyl = sectors / secpt; 924 heads = (hcyl + 1023) / 1024; 925 926 if (heads < 4) 927 heads = 4; 928 929 if (hcyl >= (heads * 1024) || heads > 16) { 930 secpt = 31; 931 heads = 16; 932 hcyl = sectors / secpt; 933 } 934 if (hcyl >= (heads * 1024)) { 935 secpt = 63; 936 heads = 16; 937 hcyl = sectors / secpt; 938 } 939 } 940 941 *c = hcyl / heads; 942 *h = heads; 943 *s = secpt; 944 } 945 946 /* 947 * Accessors 948 */ 949 off_t 950 blockif_size(struct blockif_ctxt *bc) 951 { 952 assert(bc->bc_magic == BLOCKIF_SIG); 953 return (bc->bc_size); 954 } 955 956 int 957 blockif_sectsz(struct blockif_ctxt *bc) 958 { 959 assert(bc->bc_magic == BLOCKIF_SIG); 960 return (bc->bc_sectsz); 961 } 962 963 void 964 blockif_psectsz(struct blockif_ctxt *bc, int *size, int *off) 965 { 966 assert(bc->bc_magic == BLOCKIF_SIG); 967 *size = bc->bc_psectsz; 968 *off = bc->bc_psectoff; 969 } 970 971 int 972 blockif_queuesz(struct blockif_ctxt *bc) 973 { 974 assert(bc->bc_magic == BLOCKIF_SIG); 975 return (BLOCKIF_MAXREQ - 1); 976 } 977 978 int 979 blockif_is_ro(struct blockif_ctxt *bc) 980 { 981 assert(bc->bc_magic == BLOCKIF_SIG); 982 return (bc->bc_rdonly); 983 } 984 985 int 986 blockif_candelete(struct blockif_ctxt *bc) 987 { 988 assert(bc->bc_magic == BLOCKIF_SIG); 989 return (bc->bc_candelete); 990 } 991 992 #ifdef BHYVE_SNAPSHOT 993 void 994 blockif_pause(struct blockif_ctxt *bc) 995 { 996 assert(bc != NULL); 997 assert(bc->bc_magic == BLOCKIF_SIG); 998 999 pthread_mutex_lock(&bc->bc_mtx); 1000 bc->bc_paused = 1; 1001 1002 /* The interface is paused. Wait for workers to finish their work */ 1003 while (!blockif_empty(bc)) 1004 pthread_cond_wait(&bc->bc_work_done_cond, &bc->bc_mtx); 1005 pthread_mutex_unlock(&bc->bc_mtx); 1006 1007 if (blockif_flush_bc(bc)) 1008 fprintf(stderr, "%s: [WARN] failed to flush backing file.\r\n", 1009 __func__); 1010 } 1011 1012 void 1013 blockif_resume(struct blockif_ctxt *bc) 1014 { 1015 assert(bc != NULL); 1016 assert(bc->bc_magic == BLOCKIF_SIG); 1017 1018 pthread_mutex_lock(&bc->bc_mtx); 1019 bc->bc_paused = 0; 1020 pthread_mutex_unlock(&bc->bc_mtx); 1021 } 1022 #endif /* BHYVE_SNAPSHOT */ 1023