1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * ZFS volume emulation driver. 28 * 29 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes. 30 * Volumes are accessed through the symbolic links named: 31 * 32 * /dev/zvol/dsk/<pool_name>/<dataset_name> 33 * /dev/zvol/rdsk/<pool_name>/<dataset_name> 34 * 35 * These links are created by the /dev filesystem (sdev_zvolops.c). 36 * Volumes are persistent through reboot. No user command needs to be 37 * run before opening and using a device. 38 */ 39 40 #include <sys/types.h> 41 #include <sys/param.h> 42 #include <sys/errno.h> 43 #include <sys/uio.h> 44 #include <sys/buf.h> 45 #include <sys/modctl.h> 46 #include <sys/open.h> 47 #include <sys/kmem.h> 48 #include <sys/conf.h> 49 #include <sys/cmn_err.h> 50 #include <sys/stat.h> 51 #include <sys/zap.h> 52 #include <sys/spa.h> 53 #include <sys/zio.h> 54 #include <sys/dmu_traverse.h> 55 #include <sys/dnode.h> 56 #include <sys/dsl_dataset.h> 57 #include <sys/dsl_prop.h> 58 #include <sys/dkio.h> 59 #include <sys/efi_partition.h> 60 #include <sys/byteorder.h> 61 #include <sys/pathname.h> 62 #include <sys/ddi.h> 63 #include <sys/sunddi.h> 64 #include <sys/crc32.h> 65 #include <sys/dirent.h> 66 #include <sys/policy.h> 67 #include <sys/fs/zfs.h> 68 #include <sys/zfs_ioctl.h> 69 #include <sys/mkdev.h> 70 #include <sys/zil.h> 71 #include <sys/refcount.h> 72 #include <sys/zfs_znode.h> 73 #include <sys/zfs_rlock.h> 74 #include <sys/vdev_disk.h> 75 #include <sys/vdev_impl.h> 76 #include <sys/zvol.h> 77 #include <sys/dumphdr.h> 78 #include <sys/zil_impl.h> 79 80 #include "zfs_namecheck.h" 81 82 static void *zvol_state; 83 static char *zvol_tag = "zvol_tag"; 84 85 #define ZVOL_DUMPSIZE "dumpsize" 86 87 /* 88 * This lock protects the zvol_state structure from being modified 89 * while it's being used, e.g. an open that comes in before a create 90 * finishes. It also protects temporary opens of the dataset so that, 91 * e.g., an open doesn't get a spurious EBUSY. 92 */ 93 static kmutex_t zvol_state_lock; 94 static uint32_t zvol_minors; 95 96 typedef struct zvol_extent { 97 list_node_t ze_node; 98 dva_t ze_dva; /* dva associated with this extent */ 99 uint64_t ze_nblks; /* number of blocks in extent */ 100 } zvol_extent_t; 101 102 /* 103 * The in-core state of each volume. 104 */ 105 typedef struct zvol_state { 106 char zv_name[MAXPATHLEN]; /* pool/dd name */ 107 uint64_t zv_volsize; /* amount of space we advertise */ 108 uint64_t zv_volblocksize; /* volume block size */ 109 minor_t zv_minor; /* minor number */ 110 uint8_t zv_min_bs; /* minimum addressable block shift */ 111 uint8_t zv_flags; /* readonly, dumpified, etc. */ 112 objset_t *zv_objset; /* objset handle */ 113 uint32_t zv_open_count[OTYPCNT]; /* open counts */ 114 uint32_t zv_total_opens; /* total open count */ 115 zilog_t *zv_zilog; /* ZIL handle */ 116 list_t zv_extents; /* List of extents for dump */ 117 znode_t zv_znode; /* for range locking */ 118 } zvol_state_t; 119 120 /* 121 * zvol specific flags 122 */ 123 #define ZVOL_RDONLY 0x1 124 #define ZVOL_DUMPIFIED 0x2 125 #define ZVOL_EXCL 0x4 126 #define ZVOL_WCE 0x8 127 128 /* 129 * zvol maximum transfer in one DMU tx. 130 */ 131 int zvol_maxphys = DMU_MAX_ACCESS/2; 132 133 static int zvol_remove_zv(zvol_state_t *); 134 extern int zfs_set_prop_nvlist(const char *, nvlist_t *); 135 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio); 136 static int zvol_dumpify(zvol_state_t *zv); 137 static int zvol_dump_fini(zvol_state_t *zv); 138 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize); 139 140 static void 141 zvol_size_changed(uint64_t volsize, major_t maj, minor_t min) 142 { 143 dev_t dev = makedevice(maj, min); 144 145 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 146 "Size", volsize) == DDI_SUCCESS); 147 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 148 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS); 149 150 /* Notify specfs to invalidate the cached size */ 151 spec_size_invalidate(dev, VBLK); 152 spec_size_invalidate(dev, VCHR); 153 } 154 155 int 156 zvol_check_volsize(uint64_t volsize, uint64_t blocksize) 157 { 158 if (volsize == 0) 159 return (EINVAL); 160 161 if (volsize % blocksize != 0) 162 return (EINVAL); 163 164 #ifdef _ILP32 165 if (volsize - 1 > SPEC_MAXOFFSET_T) 166 return (EOVERFLOW); 167 #endif 168 return (0); 169 } 170 171 int 172 zvol_check_volblocksize(uint64_t volblocksize) 173 { 174 if (volblocksize < SPA_MINBLOCKSIZE || 175 volblocksize > SPA_MAXBLOCKSIZE || 176 !ISP2(volblocksize)) 177 return (EDOM); 178 179 return (0); 180 } 181 182 int 183 zvol_get_stats(objset_t *os, nvlist_t *nv) 184 { 185 int error; 186 dmu_object_info_t doi; 187 uint64_t val; 188 189 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val); 190 if (error) 191 return (error); 192 193 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val); 194 195 error = dmu_object_info(os, ZVOL_OBJ, &doi); 196 197 if (error == 0) { 198 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE, 199 doi.doi_data_block_size); 200 } 201 202 return (error); 203 } 204 205 /* 206 * Find a free minor number. 207 */ 208 static minor_t 209 zvol_minor_alloc(void) 210 { 211 minor_t minor; 212 213 ASSERT(MUTEX_HELD(&zvol_state_lock)); 214 215 for (minor = 1; minor <= ZVOL_MAX_MINOR; minor++) 216 if (ddi_get_soft_state(zvol_state, minor) == NULL) 217 return (minor); 218 219 return (0); 220 } 221 222 static zvol_state_t * 223 zvol_minor_lookup(const char *name) 224 { 225 minor_t minor; 226 zvol_state_t *zv; 227 228 ASSERT(MUTEX_HELD(&zvol_state_lock)); 229 230 for (minor = 1; minor <= ZVOL_MAX_MINOR; minor++) { 231 zv = ddi_get_soft_state(zvol_state, minor); 232 if (zv == NULL) 233 continue; 234 if (strcmp(zv->zv_name, name) == 0) 235 break; 236 } 237 238 return (zv); 239 } 240 241 /* extent mapping arg */ 242 struct maparg { 243 zvol_state_t *ma_zv; 244 uint64_t ma_blks; 245 }; 246 247 /*ARGSUSED*/ 248 static int 249 zvol_map_block(spa_t *spa, blkptr_t *bp, const zbookmark_t *zb, 250 const dnode_phys_t *dnp, void *arg) 251 { 252 struct maparg *ma = arg; 253 zvol_extent_t *ze; 254 int bs = ma->ma_zv->zv_volblocksize; 255 256 if (bp == NULL || zb->zb_object != ZVOL_OBJ || zb->zb_level != 0) 257 return (0); 258 259 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid); 260 ma->ma_blks++; 261 262 /* Abort immediately if we have encountered gang blocks */ 263 if (BP_IS_GANG(bp)) 264 return (EFRAGS); 265 266 /* 267 * See if the block is at the end of the previous extent. 268 */ 269 ze = list_tail(&ma->ma_zv->zv_extents); 270 if (ze && 271 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) && 272 DVA_GET_OFFSET(BP_IDENTITY(bp)) == 273 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) { 274 ze->ze_nblks++; 275 return (0); 276 } 277 278 dprintf_bp(bp, "%s", "next blkptr:"); 279 280 /* start a new extent */ 281 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP); 282 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */ 283 ze->ze_nblks = 1; 284 list_insert_tail(&ma->ma_zv->zv_extents, ze); 285 return (0); 286 } 287 288 static void 289 zvol_free_extents(zvol_state_t *zv) 290 { 291 zvol_extent_t *ze; 292 293 while (ze = list_head(&zv->zv_extents)) { 294 list_remove(&zv->zv_extents, ze); 295 kmem_free(ze, sizeof (zvol_extent_t)); 296 } 297 } 298 299 static int 300 zvol_get_lbas(zvol_state_t *zv) 301 { 302 struct maparg ma; 303 int err; 304 305 ma.ma_zv = zv; 306 ma.ma_blks = 0; 307 zvol_free_extents(zv); 308 309 err = traverse_dataset(dmu_objset_ds(zv->zv_objset), 0, 310 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma); 311 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) { 312 zvol_free_extents(zv); 313 return (err ? err : EIO); 314 } 315 316 return (0); 317 } 318 319 /* ARGSUSED */ 320 void 321 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 322 { 323 zfs_creat_t *zct = arg; 324 nvlist_t *nvprops = zct->zct_props; 325 int error; 326 uint64_t volblocksize, volsize; 327 328 VERIFY(nvlist_lookup_uint64(nvprops, 329 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0); 330 if (nvlist_lookup_uint64(nvprops, 331 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0) 332 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE); 333 334 /* 335 * These properties must be removed from the list so the generic 336 * property setting step won't apply to them. 337 */ 338 VERIFY(nvlist_remove_all(nvprops, 339 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0); 340 (void) nvlist_remove_all(nvprops, 341 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE)); 342 343 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize, 344 DMU_OT_NONE, 0, tx); 345 ASSERT(error == 0); 346 347 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP, 348 DMU_OT_NONE, 0, tx); 349 ASSERT(error == 0); 350 351 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); 352 ASSERT(error == 0); 353 } 354 355 /* 356 * Replay a TX_WRITE ZIL transaction that didn't get committed 357 * after a system failure 358 */ 359 static int 360 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap) 361 { 362 objset_t *os = zv->zv_objset; 363 char *data = (char *)(lr + 1); /* data follows lr_write_t */ 364 uint64_t off = lr->lr_offset; 365 uint64_t len = lr->lr_length; 366 dmu_tx_t *tx; 367 int error; 368 369 if (byteswap) 370 byteswap_uint64_array(lr, sizeof (*lr)); 371 372 tx = dmu_tx_create(os); 373 dmu_tx_hold_write(tx, ZVOL_OBJ, off, len); 374 error = dmu_tx_assign(tx, TXG_WAIT); 375 if (error) { 376 dmu_tx_abort(tx); 377 } else { 378 dmu_write(os, ZVOL_OBJ, off, len, data, tx); 379 dmu_tx_commit(tx); 380 } 381 382 return (error); 383 } 384 385 /* ARGSUSED */ 386 static int 387 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap) 388 { 389 return (ENOTSUP); 390 } 391 392 /* 393 * Callback vectors for replaying records. 394 * Only TX_WRITE is needed for zvol. 395 */ 396 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = { 397 zvol_replay_err, /* 0 no such transaction type */ 398 zvol_replay_err, /* TX_CREATE */ 399 zvol_replay_err, /* TX_MKDIR */ 400 zvol_replay_err, /* TX_MKXATTR */ 401 zvol_replay_err, /* TX_SYMLINK */ 402 zvol_replay_err, /* TX_REMOVE */ 403 zvol_replay_err, /* TX_RMDIR */ 404 zvol_replay_err, /* TX_LINK */ 405 zvol_replay_err, /* TX_RENAME */ 406 zvol_replay_write, /* TX_WRITE */ 407 zvol_replay_err, /* TX_TRUNCATE */ 408 zvol_replay_err, /* TX_SETATTR */ 409 zvol_replay_err, /* TX_ACL */ 410 }; 411 412 int 413 zvol_name2minor(const char *name, minor_t *minor) 414 { 415 zvol_state_t *zv; 416 417 mutex_enter(&zvol_state_lock); 418 zv = zvol_minor_lookup(name); 419 if (minor && zv) 420 *minor = zv->zv_minor; 421 mutex_exit(&zvol_state_lock); 422 return (zv ? 0 : -1); 423 } 424 425 /* 426 * Create a minor node (plus a whole lot more) for the specified volume. 427 */ 428 int 429 zvol_create_minor(const char *name) 430 { 431 zvol_state_t *zv; 432 objset_t *os; 433 dmu_object_info_t doi; 434 minor_t minor = 0; 435 char chrbuf[30], blkbuf[30]; 436 int error; 437 438 mutex_enter(&zvol_state_lock); 439 440 if ((zv = zvol_minor_lookup(name)) != NULL) { 441 mutex_exit(&zvol_state_lock); 442 return (EEXIST); 443 } 444 445 /* lie and say we're read-only */ 446 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os); 447 448 if (error) { 449 mutex_exit(&zvol_state_lock); 450 return (error); 451 } 452 453 if ((minor = zvol_minor_alloc()) == 0) { 454 dmu_objset_disown(os, zvol_tag); 455 mutex_exit(&zvol_state_lock); 456 return (ENXIO); 457 } 458 459 if (ddi_soft_state_zalloc(zvol_state, minor) != DDI_SUCCESS) { 460 dmu_objset_disown(os, zvol_tag); 461 mutex_exit(&zvol_state_lock); 462 return (EAGAIN); 463 } 464 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME, 465 (char *)name); 466 467 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor); 468 469 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR, 470 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 471 ddi_soft_state_free(zvol_state, minor); 472 dmu_objset_disown(os, zvol_tag); 473 mutex_exit(&zvol_state_lock); 474 return (EAGAIN); 475 } 476 477 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor); 478 479 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK, 480 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 481 ddi_remove_minor_node(zfs_dip, chrbuf); 482 ddi_soft_state_free(zvol_state, minor); 483 dmu_objset_disown(os, zvol_tag); 484 mutex_exit(&zvol_state_lock); 485 return (EAGAIN); 486 } 487 488 zv = ddi_get_soft_state(zvol_state, minor); 489 490 (void) strlcpy(zv->zv_name, name, MAXPATHLEN); 491 zv->zv_min_bs = DEV_BSHIFT; 492 zv->zv_minor = minor; 493 zv->zv_objset = os; 494 if (dmu_objset_is_snapshot(os)) 495 zv->zv_flags |= ZVOL_RDONLY; 496 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL); 497 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare, 498 sizeof (rl_t), offsetof(rl_t, r_node)); 499 list_create(&zv->zv_extents, sizeof (zvol_extent_t), 500 offsetof(zvol_extent_t, ze_node)); 501 /* get and cache the blocksize */ 502 error = dmu_object_info(os, ZVOL_OBJ, &doi); 503 ASSERT(error == 0); 504 zv->zv_volblocksize = doi.doi_data_block_size; 505 506 zil_replay(os, zv, zvol_replay_vector); 507 dmu_objset_disown(os, zvol_tag); 508 zv->zv_objset = NULL; 509 510 zvol_minors++; 511 512 mutex_exit(&zvol_state_lock); 513 514 return (0); 515 } 516 517 /* 518 * Remove minor node for the specified volume. 519 */ 520 static int 521 zvol_remove_zv(zvol_state_t *zv) 522 { 523 char nmbuf[20]; 524 525 ASSERT(MUTEX_HELD(&zvol_state_lock)); 526 if (zv->zv_total_opens != 0) 527 return (EBUSY); 528 529 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", zv->zv_minor); 530 ddi_remove_minor_node(zfs_dip, nmbuf); 531 532 (void) snprintf(nmbuf, sizeof (nmbuf), "%u", zv->zv_minor); 533 ddi_remove_minor_node(zfs_dip, nmbuf); 534 535 avl_destroy(&zv->zv_znode.z_range_avl); 536 mutex_destroy(&zv->zv_znode.z_range_lock); 537 538 ddi_soft_state_free(zvol_state, zv->zv_minor); 539 540 zvol_minors--; 541 return (0); 542 } 543 544 int 545 zvol_remove_minor(const char *name) 546 { 547 zvol_state_t *zv; 548 int rc; 549 550 mutex_enter(&zvol_state_lock); 551 if ((zv = zvol_minor_lookup(name)) == NULL) { 552 mutex_exit(&zvol_state_lock); 553 return (ENXIO); 554 } 555 rc = zvol_remove_zv(zv); 556 mutex_exit(&zvol_state_lock); 557 return (rc); 558 } 559 560 int 561 zvol_first_open(zvol_state_t *zv) 562 { 563 objset_t *os; 564 uint64_t volsize; 565 int error; 566 uint64_t readonly; 567 568 /* lie and say we're read-only */ 569 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE, 570 zvol_tag, &os); 571 if (error) 572 return (error); 573 574 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 575 if (error) { 576 ASSERT(error == 0); 577 dmu_objset_disown(os, zvol_tag); 578 return (error); 579 } 580 zv->zv_objset = os; 581 zv->zv_volsize = volsize; 582 zv->zv_zilog = zil_open(os, zvol_get_data); 583 zvol_size_changed(zv->zv_volsize, ddi_driver_major(zfs_dip), 584 zv->zv_minor); 585 586 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly, 587 NULL) == 0); 588 if (readonly || dmu_objset_is_snapshot(os)) 589 zv->zv_flags |= ZVOL_RDONLY; 590 else 591 zv->zv_flags &= ~ZVOL_RDONLY; 592 return (error); 593 } 594 595 void 596 zvol_last_close(zvol_state_t *zv) 597 { 598 zil_close(zv->zv_zilog); 599 zv->zv_zilog = NULL; 600 dmu_objset_disown(zv->zv_objset, zvol_tag); 601 zv->zv_objset = NULL; 602 } 603 604 int 605 zvol_prealloc(zvol_state_t *zv) 606 { 607 objset_t *os = zv->zv_objset; 608 dmu_tx_t *tx; 609 uint64_t refd, avail, usedobjs, availobjs; 610 uint64_t resid = zv->zv_volsize; 611 uint64_t off = 0; 612 613 /* Check the space usage before attempting to allocate the space */ 614 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs); 615 if (avail < zv->zv_volsize) 616 return (ENOSPC); 617 618 /* Free old extents if they exist */ 619 zvol_free_extents(zv); 620 621 while (resid != 0) { 622 int error; 623 uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE); 624 625 tx = dmu_tx_create(os); 626 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 627 error = dmu_tx_assign(tx, TXG_WAIT); 628 if (error) { 629 dmu_tx_abort(tx); 630 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off); 631 return (error); 632 } 633 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx); 634 dmu_tx_commit(tx); 635 off += bytes; 636 resid -= bytes; 637 } 638 txg_wait_synced(dmu_objset_pool(os), 0); 639 640 return (0); 641 } 642 643 int 644 zvol_update_volsize(objset_t *os, uint64_t volsize) 645 { 646 dmu_tx_t *tx; 647 int error; 648 649 ASSERT(MUTEX_HELD(&zvol_state_lock)); 650 651 tx = dmu_tx_create(os); 652 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 653 error = dmu_tx_assign(tx, TXG_WAIT); 654 if (error) { 655 dmu_tx_abort(tx); 656 return (error); 657 } 658 659 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, 660 &volsize, tx); 661 dmu_tx_commit(tx); 662 663 if (error == 0) 664 error = dmu_free_long_range(os, 665 ZVOL_OBJ, volsize, DMU_OBJECT_END); 666 return (error); 667 } 668 669 void 670 zvol_remove_minors(const char *name) 671 { 672 zvol_state_t *zv; 673 char *namebuf; 674 minor_t minor; 675 676 namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP); 677 (void) strncpy(namebuf, name, strlen(name)); 678 (void) strcat(namebuf, "/"); 679 mutex_enter(&zvol_state_lock); 680 for (minor = 1; minor <= ZVOL_MAX_MINOR; minor++) { 681 682 zv = ddi_get_soft_state(zvol_state, minor); 683 if (zv == NULL) 684 continue; 685 if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0) 686 (void) zvol_remove_zv(zv); 687 } 688 kmem_free(namebuf, strlen(name) + 2); 689 690 mutex_exit(&zvol_state_lock); 691 } 692 693 int 694 zvol_set_volsize(const char *name, major_t maj, uint64_t volsize) 695 { 696 zvol_state_t *zv = NULL; 697 objset_t *os; 698 int error; 699 dmu_object_info_t doi; 700 uint64_t old_volsize = 0ULL; 701 uint64_t readonly; 702 703 mutex_enter(&zvol_state_lock); 704 zv = zvol_minor_lookup(name); 705 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) { 706 mutex_exit(&zvol_state_lock); 707 return (error); 708 } 709 710 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 || 711 (error = zvol_check_volsize(volsize, 712 doi.doi_data_block_size)) != 0) 713 goto out; 714 715 VERIFY(dsl_prop_get_integer(name, "readonly", &readonly, 716 NULL) == 0); 717 if (readonly) { 718 error = EROFS; 719 goto out; 720 } 721 722 error = zvol_update_volsize(os, volsize); 723 /* 724 * Reinitialize the dump area to the new size. If we 725 * failed to resize the dump area then restore it back to 726 * its original size. 727 */ 728 if (zv && error == 0) { 729 if (zv->zv_flags & ZVOL_DUMPIFIED) { 730 old_volsize = zv->zv_volsize; 731 zv->zv_volsize = volsize; 732 if ((error = zvol_dumpify(zv)) != 0 || 733 (error = dumpvp_resize()) != 0) { 734 (void) zvol_update_volsize(os, old_volsize); 735 zv->zv_volsize = old_volsize; 736 error = zvol_dumpify(zv); 737 } 738 } 739 if (error == 0) { 740 zv->zv_volsize = volsize; 741 zvol_size_changed(volsize, maj, zv->zv_minor); 742 } 743 } 744 745 /* 746 * Generate a LUN expansion event. 747 */ 748 if (zv && error == 0) { 749 sysevent_id_t eid; 750 nvlist_t *attr; 751 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 752 753 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV, 754 zv->zv_minor); 755 756 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 757 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 758 759 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 760 ESC_DEV_DLE, attr, &eid, DDI_SLEEP); 761 762 nvlist_free(attr); 763 kmem_free(physpath, MAXPATHLEN); 764 } 765 766 out: 767 dmu_objset_rele(os, FTAG); 768 769 mutex_exit(&zvol_state_lock); 770 771 return (error); 772 } 773 774 /*ARGSUSED*/ 775 int 776 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr) 777 { 778 minor_t minor = getminor(*devp); 779 zvol_state_t *zv; 780 int err = 0; 781 782 if (minor == 0) /* This is the control device */ 783 return (0); 784 785 mutex_enter(&zvol_state_lock); 786 787 zv = ddi_get_soft_state(zvol_state, minor); 788 if (zv == NULL) { 789 mutex_exit(&zvol_state_lock); 790 return (ENXIO); 791 } 792 793 if (zv->zv_total_opens == 0) 794 err = zvol_first_open(zv); 795 if (err) { 796 mutex_exit(&zvol_state_lock); 797 return (err); 798 } 799 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) { 800 err = EROFS; 801 goto out; 802 } 803 if (zv->zv_flags & ZVOL_EXCL) { 804 err = EBUSY; 805 goto out; 806 } 807 if (flag & FEXCL) { 808 if (zv->zv_total_opens != 0) { 809 err = EBUSY; 810 goto out; 811 } 812 zv->zv_flags |= ZVOL_EXCL; 813 } 814 815 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) { 816 zv->zv_open_count[otyp]++; 817 zv->zv_total_opens++; 818 } 819 mutex_exit(&zvol_state_lock); 820 821 return (err); 822 out: 823 if (zv->zv_total_opens == 0) 824 zvol_last_close(zv); 825 mutex_exit(&zvol_state_lock); 826 return (err); 827 } 828 829 /*ARGSUSED*/ 830 int 831 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr) 832 { 833 minor_t minor = getminor(dev); 834 zvol_state_t *zv; 835 int error = 0; 836 837 if (minor == 0) /* This is the control device */ 838 return (0); 839 840 mutex_enter(&zvol_state_lock); 841 842 zv = ddi_get_soft_state(zvol_state, minor); 843 if (zv == NULL) { 844 mutex_exit(&zvol_state_lock); 845 return (ENXIO); 846 } 847 848 if (zv->zv_flags & ZVOL_EXCL) { 849 ASSERT(zv->zv_total_opens == 1); 850 zv->zv_flags &= ~ZVOL_EXCL; 851 } 852 853 /* 854 * If the open count is zero, this is a spurious close. 855 * That indicates a bug in the kernel / DDI framework. 856 */ 857 ASSERT(zv->zv_open_count[otyp] != 0); 858 ASSERT(zv->zv_total_opens != 0); 859 860 /* 861 * You may get multiple opens, but only one close. 862 */ 863 zv->zv_open_count[otyp]--; 864 zv->zv_total_opens--; 865 866 if (zv->zv_total_opens == 0) 867 zvol_last_close(zv); 868 869 mutex_exit(&zvol_state_lock); 870 return (error); 871 } 872 873 static void 874 zvol_get_done(dmu_buf_t *db, void *vzgd) 875 { 876 zgd_t *zgd = (zgd_t *)vzgd; 877 rl_t *rl = zgd->zgd_rl; 878 879 dmu_buf_rele(db, vzgd); 880 zfs_range_unlock(rl); 881 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 882 kmem_free(zgd, sizeof (zgd_t)); 883 } 884 885 /* 886 * Get data to generate a TX_WRITE intent log record. 887 */ 888 static int 889 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 890 { 891 zvol_state_t *zv = arg; 892 objset_t *os = zv->zv_objset; 893 dmu_buf_t *db; 894 rl_t *rl; 895 zgd_t *zgd; 896 uint64_t boff; /* block starting offset */ 897 int dlen = lr->lr_length; /* length of user data */ 898 int error; 899 900 ASSERT(zio); 901 ASSERT(dlen != 0); 902 903 /* 904 * Write records come in two flavors: immediate and indirect. 905 * For small writes it's cheaper to store the data with the 906 * log record (immediate); for large writes it's cheaper to 907 * sync the data and get a pointer to it (indirect) so that 908 * we don't have to write the data twice. 909 */ 910 if (buf != NULL) /* immediate write */ 911 return (dmu_read(os, ZVOL_OBJ, lr->lr_offset, dlen, buf, 912 DMU_READ_NO_PREFETCH)); 913 914 zgd = (zgd_t *)kmem_alloc(sizeof (zgd_t), KM_SLEEP); 915 zgd->zgd_zilog = zv->zv_zilog; 916 zgd->zgd_bp = &lr->lr_blkptr; 917 918 /* 919 * Lock the range of the block to ensure that when the data is 920 * written out and its checksum is being calculated that no other 921 * thread can change the block. 922 */ 923 boff = P2ALIGN_TYPED(lr->lr_offset, zv->zv_volblocksize, uint64_t); 924 rl = zfs_range_lock(&zv->zv_znode, boff, zv->zv_volblocksize, 925 RL_READER); 926 zgd->zgd_rl = rl; 927 928 VERIFY(0 == dmu_buf_hold(os, ZVOL_OBJ, lr->lr_offset, zgd, &db)); 929 error = dmu_sync(zio, db, &lr->lr_blkptr, 930 lr->lr_common.lrc_txg, zvol_get_done, zgd); 931 if (error == 0) 932 zil_add_block(zv->zv_zilog, &lr->lr_blkptr); 933 /* 934 * If we get EINPROGRESS, then we need to wait for a 935 * write IO initiated by dmu_sync() to complete before 936 * we can release this dbuf. We will finish everything 937 * up in the zvol_get_done() callback. 938 */ 939 if (error == EINPROGRESS) 940 return (0); 941 dmu_buf_rele(db, zgd); 942 zfs_range_unlock(rl); 943 kmem_free(zgd, sizeof (zgd_t)); 944 return (error); 945 } 946 947 /* 948 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions. 949 * 950 * We store data in the log buffers if it's small enough. 951 * Otherwise we will later flush the data out via dmu_sync(). 952 */ 953 ssize_t zvol_immediate_write_sz = 32768; 954 955 static void 956 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid, 957 boolean_t sync) 958 { 959 uint32_t blocksize = zv->zv_volblocksize; 960 zilog_t *zilog = zv->zv_zilog; 961 boolean_t slogging; 962 ssize_t immediate_write_sz; 963 964 if (zil_disable) 965 return; 966 967 if (zilog->zl_replay) { 968 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); 969 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = 970 zilog->zl_replaying_seq; 971 return; 972 } 973 974 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT) 975 ? 0 : zvol_immediate_write_sz; 976 977 slogging = spa_has_slogs(zilog->zl_spa) && 978 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 979 980 while (resid) { 981 itx_t *itx; 982 lr_write_t *lr; 983 ssize_t len; 984 itx_wr_state_t write_state; 985 986 /* 987 * Unlike zfs_log_write() we can be called with 988 * upto DMU_MAX_ACCESS/2 (5MB) writes. 989 */ 990 if (blocksize > immediate_write_sz && !slogging && 991 resid >= blocksize && off % blocksize == 0) { 992 write_state = WR_INDIRECT; /* uses dmu_sync */ 993 len = blocksize; 994 } else if (sync) { 995 write_state = WR_COPIED; 996 len = MIN(ZIL_MAX_LOG_DATA, resid); 997 } else { 998 write_state = WR_NEED_COPY; 999 len = MIN(ZIL_MAX_LOG_DATA, resid); 1000 } 1001 1002 itx = zil_itx_create(TX_WRITE, sizeof (*lr) + 1003 (write_state == WR_COPIED ? len : 0)); 1004 lr = (lr_write_t *)&itx->itx_lr; 1005 if (write_state == WR_COPIED && dmu_read(zv->zv_objset, 1006 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) { 1007 kmem_free(itx, offsetof(itx_t, itx_lr) + 1008 itx->itx_lr.lrc_reclen); 1009 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1010 lr = (lr_write_t *)&itx->itx_lr; 1011 write_state = WR_NEED_COPY; 1012 } 1013 1014 itx->itx_wr_state = write_state; 1015 if (write_state == WR_NEED_COPY) 1016 itx->itx_sod += len; 1017 lr->lr_foid = ZVOL_OBJ; 1018 lr->lr_offset = off; 1019 lr->lr_length = len; 1020 lr->lr_blkoff = off - P2ALIGN_TYPED(off, blocksize, uint64_t); 1021 BP_ZERO(&lr->lr_blkptr); 1022 1023 itx->itx_private = zv; 1024 itx->itx_sync = sync; 1025 1026 (void) zil_itx_assign(zilog, itx, tx); 1027 1028 off += len; 1029 resid -= len; 1030 } 1031 } 1032 1033 static int 1034 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t size, 1035 boolean_t doread, boolean_t isdump) 1036 { 1037 vdev_disk_t *dvd; 1038 int c; 1039 int numerrors = 0; 1040 1041 for (c = 0; c < vd->vdev_children; c++) { 1042 ASSERT(vd->vdev_ops == &vdev_mirror_ops || 1043 vd->vdev_ops == &vdev_replacing_ops || 1044 vd->vdev_ops == &vdev_spare_ops); 1045 int err = zvol_dumpio_vdev(vd->vdev_child[c], 1046 addr, offset, size, doread, isdump); 1047 if (err != 0) { 1048 numerrors++; 1049 } else if (doread) { 1050 break; 1051 } 1052 } 1053 1054 if (!vd->vdev_ops->vdev_op_leaf) 1055 return (numerrors < vd->vdev_children ? 0 : EIO); 1056 1057 if (doread && !vdev_readable(vd)) 1058 return (EIO); 1059 else if (!doread && !vdev_writeable(vd)) 1060 return (EIO); 1061 1062 dvd = vd->vdev_tsd; 1063 ASSERT3P(dvd, !=, NULL); 1064 offset += VDEV_LABEL_START_SIZE; 1065 1066 if (ddi_in_panic() || isdump) { 1067 ASSERT(!doread); 1068 if (doread) 1069 return (EIO); 1070 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset), 1071 lbtodb(size))); 1072 } else { 1073 return (vdev_disk_physio(dvd->vd_lh, addr, size, offset, 1074 doread ? B_READ : B_WRITE)); 1075 } 1076 } 1077 1078 static int 1079 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size, 1080 boolean_t doread, boolean_t isdump) 1081 { 1082 vdev_t *vd; 1083 int error; 1084 zvol_extent_t *ze; 1085 spa_t *spa = dmu_objset_spa(zv->zv_objset); 1086 1087 /* Must be sector aligned, and not stradle a block boundary. */ 1088 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) || 1089 P2BOUNDARY(offset, size, zv->zv_volblocksize)) { 1090 return (EINVAL); 1091 } 1092 ASSERT(size <= zv->zv_volblocksize); 1093 1094 /* Locate the extent this belongs to */ 1095 ze = list_head(&zv->zv_extents); 1096 while (offset >= ze->ze_nblks * zv->zv_volblocksize) { 1097 offset -= ze->ze_nblks * zv->zv_volblocksize; 1098 ze = list_next(&zv->zv_extents, ze); 1099 } 1100 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 1101 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva)); 1102 offset += DVA_GET_OFFSET(&ze->ze_dva); 1103 error = zvol_dumpio_vdev(vd, addr, offset, size, doread, isdump); 1104 spa_config_exit(spa, SCL_STATE, FTAG); 1105 return (error); 1106 } 1107 1108 int 1109 zvol_strategy(buf_t *bp) 1110 { 1111 zvol_state_t *zv = ddi_get_soft_state(zvol_state, getminor(bp->b_edev)); 1112 uint64_t off, volsize; 1113 size_t resid; 1114 char *addr; 1115 objset_t *os; 1116 rl_t *rl; 1117 int error = 0; 1118 boolean_t doread = bp->b_flags & B_READ; 1119 boolean_t is_dump = zv->zv_flags & ZVOL_DUMPIFIED; 1120 boolean_t sync; 1121 1122 if (zv == NULL) { 1123 bioerror(bp, ENXIO); 1124 biodone(bp); 1125 return (0); 1126 } 1127 1128 if (getminor(bp->b_edev) == 0) { 1129 bioerror(bp, EINVAL); 1130 biodone(bp); 1131 return (0); 1132 } 1133 1134 if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) { 1135 bioerror(bp, EROFS); 1136 biodone(bp); 1137 return (0); 1138 } 1139 1140 off = ldbtob(bp->b_blkno); 1141 volsize = zv->zv_volsize; 1142 1143 os = zv->zv_objset; 1144 ASSERT(os != NULL); 1145 1146 bp_mapin(bp); 1147 addr = bp->b_un.b_addr; 1148 resid = bp->b_bcount; 1149 1150 if (resid > 0 && (off < 0 || off >= volsize)) { 1151 bioerror(bp, EIO); 1152 biodone(bp); 1153 return (0); 1154 } 1155 1156 sync = !(bp->b_flags & B_ASYNC) && !doread && !is_dump && 1157 !(zv->zv_flags & ZVOL_WCE) && !zil_disable; 1158 1159 /* 1160 * There must be no buffer changes when doing a dmu_sync() because 1161 * we can't change the data whilst calculating the checksum. 1162 */ 1163 rl = zfs_range_lock(&zv->zv_znode, off, resid, 1164 doread ? RL_READER : RL_WRITER); 1165 1166 while (resid != 0 && off < volsize) { 1167 size_t size = MIN(resid, zvol_maxphys); 1168 if (is_dump) { 1169 size = MIN(size, P2END(off, zv->zv_volblocksize) - off); 1170 error = zvol_dumpio(zv, addr, off, size, 1171 doread, B_FALSE); 1172 } else if (doread) { 1173 error = dmu_read(os, ZVOL_OBJ, off, size, addr, 1174 DMU_READ_PREFETCH); 1175 } else { 1176 dmu_tx_t *tx = dmu_tx_create(os); 1177 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size); 1178 error = dmu_tx_assign(tx, TXG_WAIT); 1179 if (error) { 1180 dmu_tx_abort(tx); 1181 } else { 1182 dmu_write(os, ZVOL_OBJ, off, size, addr, tx); 1183 zvol_log_write(zv, tx, off, size, sync); 1184 dmu_tx_commit(tx); 1185 } 1186 } 1187 if (error) { 1188 /* convert checksum errors into IO errors */ 1189 if (error == ECKSUM) 1190 error = EIO; 1191 break; 1192 } 1193 off += size; 1194 addr += size; 1195 resid -= size; 1196 } 1197 zfs_range_unlock(rl); 1198 1199 if ((bp->b_resid = resid) == bp->b_bcount) 1200 bioerror(bp, off > volsize ? EINVAL : error); 1201 1202 if (sync) 1203 zil_commit(zv->zv_zilog, UINT64_MAX, ZVOL_OBJ); 1204 biodone(bp); 1205 1206 return (0); 1207 } 1208 1209 /* 1210 * Set the buffer count to the zvol maximum transfer. 1211 * Using our own routine instead of the default minphys() 1212 * means that for larger writes we write bigger buffers on X86 1213 * (128K instead of 56K) and flush the disk write cache less often 1214 * (every zvol_maxphys - currently 1MB) instead of minphys (currently 1215 * 56K on X86 and 128K on sparc). 1216 */ 1217 void 1218 zvol_minphys(struct buf *bp) 1219 { 1220 if (bp->b_bcount > zvol_maxphys) 1221 bp->b_bcount = zvol_maxphys; 1222 } 1223 1224 int 1225 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks) 1226 { 1227 minor_t minor = getminor(dev); 1228 zvol_state_t *zv; 1229 int error = 0; 1230 uint64_t size; 1231 uint64_t boff; 1232 uint64_t resid; 1233 1234 if (minor == 0) /* This is the control device */ 1235 return (ENXIO); 1236 1237 zv = ddi_get_soft_state(zvol_state, minor); 1238 if (zv == NULL) 1239 return (ENXIO); 1240 1241 boff = ldbtob(blkno); 1242 resid = ldbtob(nblocks); 1243 1244 VERIFY3U(boff + resid, <=, zv->zv_volsize); 1245 1246 while (resid) { 1247 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff); 1248 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE); 1249 if (error) 1250 break; 1251 boff += size; 1252 addr += size; 1253 resid -= size; 1254 } 1255 1256 return (error); 1257 } 1258 1259 /*ARGSUSED*/ 1260 int 1261 zvol_read(dev_t dev, uio_t *uio, cred_t *cr) 1262 { 1263 minor_t minor = getminor(dev); 1264 zvol_state_t *zv; 1265 uint64_t volsize; 1266 rl_t *rl; 1267 int error = 0; 1268 1269 if (minor == 0) /* This is the control device */ 1270 return (ENXIO); 1271 1272 zv = ddi_get_soft_state(zvol_state, minor); 1273 if (zv == NULL) 1274 return (ENXIO); 1275 1276 volsize = zv->zv_volsize; 1277 if (uio->uio_resid > 0 && 1278 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize)) 1279 return (EIO); 1280 1281 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1282 error = physio(zvol_strategy, NULL, dev, B_READ, 1283 zvol_minphys, uio); 1284 return (error); 1285 } 1286 1287 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1288 RL_READER); 1289 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1290 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1291 1292 /* don't read past the end */ 1293 if (bytes > volsize - uio->uio_loffset) 1294 bytes = volsize - uio->uio_loffset; 1295 1296 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes); 1297 if (error) { 1298 /* convert checksum errors into IO errors */ 1299 if (error == ECKSUM) 1300 error = EIO; 1301 break; 1302 } 1303 } 1304 zfs_range_unlock(rl); 1305 return (error); 1306 } 1307 1308 /*ARGSUSED*/ 1309 int 1310 zvol_write(dev_t dev, uio_t *uio, cred_t *cr) 1311 { 1312 minor_t minor = getminor(dev); 1313 zvol_state_t *zv; 1314 uint64_t volsize; 1315 rl_t *rl; 1316 int error = 0; 1317 boolean_t sync; 1318 1319 if (minor == 0) /* This is the control device */ 1320 return (ENXIO); 1321 1322 zv = ddi_get_soft_state(zvol_state, minor); 1323 if (zv == NULL) 1324 return (ENXIO); 1325 1326 volsize = zv->zv_volsize; 1327 if (uio->uio_resid > 0 && 1328 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize)) 1329 return (EIO); 1330 1331 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1332 error = physio(zvol_strategy, NULL, dev, B_WRITE, 1333 zvol_minphys, uio); 1334 return (error); 1335 } 1336 1337 sync = !(zv->zv_flags & ZVOL_WCE) && !zil_disable; 1338 1339 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1340 RL_WRITER); 1341 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1342 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1343 uint64_t off = uio->uio_loffset; 1344 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 1345 1346 if (bytes > volsize - off) /* don't write past the end */ 1347 bytes = volsize - off; 1348 1349 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 1350 error = dmu_tx_assign(tx, TXG_WAIT); 1351 if (error) { 1352 dmu_tx_abort(tx); 1353 break; 1354 } 1355 error = dmu_write_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes, tx); 1356 if (error == 0) 1357 zvol_log_write(zv, tx, off, bytes, sync); 1358 dmu_tx_commit(tx); 1359 1360 if (error) 1361 break; 1362 } 1363 zfs_range_unlock(rl); 1364 if (sync) 1365 zil_commit(zv->zv_zilog, UINT64_MAX, ZVOL_OBJ); 1366 return (error); 1367 } 1368 1369 int 1370 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs) 1371 { 1372 struct uuid uuid = EFI_RESERVED; 1373 efi_gpe_t gpe = { 0 }; 1374 uint32_t crc; 1375 dk_efi_t efi; 1376 int length; 1377 char *ptr; 1378 1379 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag)) 1380 return (EFAULT); 1381 ptr = (char *)(uintptr_t)efi.dki_data_64; 1382 length = efi.dki_length; 1383 /* 1384 * Some clients may attempt to request a PMBR for the 1385 * zvol. Currently this interface will return EINVAL to 1386 * such requests. These requests could be supported by 1387 * adding a check for lba == 0 and consing up an appropriate 1388 * PMBR. 1389 */ 1390 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0) 1391 return (EINVAL); 1392 1393 gpe.efi_gpe_StartingLBA = LE_64(34ULL); 1394 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1); 1395 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid); 1396 1397 if (efi.dki_lba == 1) { 1398 efi_gpt_t gpt = { 0 }; 1399 1400 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE); 1401 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 1402 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt)); 1403 gpt.efi_gpt_MyLBA = LE_64(1ULL); 1404 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL); 1405 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1); 1406 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL); 1407 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1); 1408 gpt.efi_gpt_SizeOfPartitionEntry = 1409 LE_32(sizeof (efi_gpe_t)); 1410 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table); 1411 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 1412 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table); 1413 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc); 1414 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length), 1415 flag)) 1416 return (EFAULT); 1417 ptr += sizeof (gpt); 1418 length -= sizeof (gpt); 1419 } 1420 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe), 1421 length), flag)) 1422 return (EFAULT); 1423 return (0); 1424 } 1425 1426 /* 1427 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I). 1428 */ 1429 /*ARGSUSED*/ 1430 int 1431 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp) 1432 { 1433 zvol_state_t *zv; 1434 struct dk_cinfo dki; 1435 struct dk_minfo dkm; 1436 struct dk_callback *dkc; 1437 int error = 0; 1438 rl_t *rl; 1439 1440 mutex_enter(&zvol_state_lock); 1441 1442 zv = ddi_get_soft_state(zvol_state, getminor(dev)); 1443 1444 if (zv == NULL) { 1445 mutex_exit(&zvol_state_lock); 1446 return (ENXIO); 1447 } 1448 ASSERT(zv->zv_total_opens > 0); 1449 1450 switch (cmd) { 1451 1452 case DKIOCINFO: 1453 bzero(&dki, sizeof (dki)); 1454 (void) strcpy(dki.dki_cname, "zvol"); 1455 (void) strcpy(dki.dki_dname, "zvol"); 1456 dki.dki_ctype = DKC_UNKNOWN; 1457 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs); 1458 mutex_exit(&zvol_state_lock); 1459 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag)) 1460 error = EFAULT; 1461 return (error); 1462 1463 case DKIOCGMEDIAINFO: 1464 bzero(&dkm, sizeof (dkm)); 1465 dkm.dki_lbsize = 1U << zv->zv_min_bs; 1466 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 1467 dkm.dki_media_type = DK_UNKNOWN; 1468 mutex_exit(&zvol_state_lock); 1469 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag)) 1470 error = EFAULT; 1471 return (error); 1472 1473 case DKIOCGETEFI: 1474 { 1475 uint64_t vs = zv->zv_volsize; 1476 uint8_t bs = zv->zv_min_bs; 1477 1478 mutex_exit(&zvol_state_lock); 1479 error = zvol_getefi((void *)arg, flag, vs, bs); 1480 return (error); 1481 } 1482 1483 case DKIOCFLUSHWRITECACHE: 1484 dkc = (struct dk_callback *)arg; 1485 mutex_exit(&zvol_state_lock); 1486 zil_commit(zv->zv_zilog, UINT64_MAX, ZVOL_OBJ); 1487 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) { 1488 (*dkc->dkc_callback)(dkc->dkc_cookie, error); 1489 error = 0; 1490 } 1491 return (error); 1492 1493 case DKIOCGETWCE: 1494 { 1495 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0; 1496 if (ddi_copyout(&wce, (void *)arg, sizeof (int), 1497 flag)) 1498 error = EFAULT; 1499 break; 1500 } 1501 case DKIOCSETWCE: 1502 { 1503 int wce; 1504 if (ddi_copyin((void *)arg, &wce, sizeof (int), 1505 flag)) { 1506 error = EFAULT; 1507 break; 1508 } 1509 if (wce) { 1510 zv->zv_flags |= ZVOL_WCE; 1511 mutex_exit(&zvol_state_lock); 1512 } else { 1513 zv->zv_flags &= ~ZVOL_WCE; 1514 mutex_exit(&zvol_state_lock); 1515 zil_commit(zv->zv_zilog, UINT64_MAX, ZVOL_OBJ); 1516 } 1517 return (0); 1518 } 1519 1520 case DKIOCGGEOM: 1521 case DKIOCGVTOC: 1522 /* 1523 * commands using these (like prtvtoc) expect ENOTSUP 1524 * since we're emulating an EFI label 1525 */ 1526 error = ENOTSUP; 1527 break; 1528 1529 case DKIOCDUMPINIT: 1530 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 1531 RL_WRITER); 1532 error = zvol_dumpify(zv); 1533 zfs_range_unlock(rl); 1534 break; 1535 1536 case DKIOCDUMPFINI: 1537 if (!(zv->zv_flags & ZVOL_DUMPIFIED)) 1538 break; 1539 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 1540 RL_WRITER); 1541 error = zvol_dump_fini(zv); 1542 zfs_range_unlock(rl); 1543 break; 1544 1545 default: 1546 error = ENOTTY; 1547 break; 1548 1549 } 1550 mutex_exit(&zvol_state_lock); 1551 return (error); 1552 } 1553 1554 int 1555 zvol_busy(void) 1556 { 1557 return (zvol_minors != 0); 1558 } 1559 1560 void 1561 zvol_init(void) 1562 { 1563 VERIFY(ddi_soft_state_init(&zvol_state, sizeof (zvol_state_t), 1) == 0); 1564 mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL); 1565 } 1566 1567 void 1568 zvol_fini(void) 1569 { 1570 mutex_destroy(&zvol_state_lock); 1571 ddi_soft_state_fini(&zvol_state); 1572 } 1573 1574 static int 1575 zvol_dump_init(zvol_state_t *zv, boolean_t resize) 1576 { 1577 dmu_tx_t *tx; 1578 int error = 0; 1579 objset_t *os = zv->zv_objset; 1580 nvlist_t *nv = NULL; 1581 1582 ASSERT(MUTEX_HELD(&zvol_state_lock)); 1583 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0, 1584 DMU_OBJECT_END); 1585 /* wait for dmu_free_long_range to actually free the blocks */ 1586 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 1587 1588 tx = dmu_tx_create(os); 1589 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 1590 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 1591 error = dmu_tx_assign(tx, TXG_WAIT); 1592 if (error) { 1593 dmu_tx_abort(tx); 1594 return (error); 1595 } 1596 1597 /* 1598 * If we are resizing the dump device then we only need to 1599 * update the refreservation to match the newly updated 1600 * zvolsize. Otherwise, we save off the original state of the 1601 * zvol so that we can restore them if the zvol is ever undumpified. 1602 */ 1603 if (resize) { 1604 error = zap_update(os, ZVOL_ZAP_OBJ, 1605 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 1606 &zv->zv_volsize, tx); 1607 } else { 1608 uint64_t checksum, compress, refresrv, vbs; 1609 1610 error = dsl_prop_get_integer(zv->zv_name, 1611 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL); 1612 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1613 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL); 1614 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1615 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL); 1616 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1617 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL); 1618 1619 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1620 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, 1621 &compress, tx); 1622 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1623 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx); 1624 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1625 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 1626 &refresrv, tx); 1627 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1628 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, 1629 &vbs, tx); 1630 error = error ? error : dmu_object_set_blocksize( 1631 os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx); 1632 if (error == 0) 1633 zv->zv_volblocksize = SPA_MAXBLOCKSIZE; 1634 } 1635 dmu_tx_commit(tx); 1636 1637 /* 1638 * We only need update the zvol's property if we are initializing 1639 * the dump area for the first time. 1640 */ 1641 if (!resize) { 1642 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1643 VERIFY(nvlist_add_uint64(nv, 1644 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0); 1645 VERIFY(nvlist_add_uint64(nv, 1646 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 1647 ZIO_COMPRESS_OFF) == 0); 1648 VERIFY(nvlist_add_uint64(nv, 1649 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 1650 ZIO_CHECKSUM_OFF) == 0); 1651 1652 error = zfs_set_prop_nvlist(zv->zv_name, nv); 1653 nvlist_free(nv); 1654 1655 if (error) 1656 return (error); 1657 } 1658 1659 /* Allocate the space for the dump */ 1660 error = zvol_prealloc(zv); 1661 return (error); 1662 } 1663 1664 static int 1665 zvol_dumpify(zvol_state_t *zv) 1666 { 1667 int error = 0; 1668 uint64_t dumpsize = 0; 1669 dmu_tx_t *tx; 1670 objset_t *os = zv->zv_objset; 1671 1672 if (zv->zv_flags & ZVOL_RDONLY) 1673 return (EROFS); 1674 1675 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 1676 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) { 1677 boolean_t resize = (dumpsize > 0) ? B_TRUE : B_FALSE; 1678 1679 if ((error = zvol_dump_init(zv, resize)) != 0) { 1680 (void) zvol_dump_fini(zv); 1681 return (error); 1682 } 1683 } 1684 1685 /* 1686 * Build up our lba mapping. 1687 */ 1688 error = zvol_get_lbas(zv); 1689 if (error) { 1690 (void) zvol_dump_fini(zv); 1691 return (error); 1692 } 1693 1694 tx = dmu_tx_create(os); 1695 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 1696 error = dmu_tx_assign(tx, TXG_WAIT); 1697 if (error) { 1698 dmu_tx_abort(tx); 1699 (void) zvol_dump_fini(zv); 1700 return (error); 1701 } 1702 1703 zv->zv_flags |= ZVOL_DUMPIFIED; 1704 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1, 1705 &zv->zv_volsize, tx); 1706 dmu_tx_commit(tx); 1707 1708 if (error) { 1709 (void) zvol_dump_fini(zv); 1710 return (error); 1711 } 1712 1713 txg_wait_synced(dmu_objset_pool(os), 0); 1714 return (0); 1715 } 1716 1717 static int 1718 zvol_dump_fini(zvol_state_t *zv) 1719 { 1720 dmu_tx_t *tx; 1721 objset_t *os = zv->zv_objset; 1722 nvlist_t *nv; 1723 int error = 0; 1724 uint64_t checksum, compress, refresrv, vbs; 1725 1726 /* 1727 * Attempt to restore the zvol back to its pre-dumpified state. 1728 * This is a best-effort attempt as it's possible that not all 1729 * of these properties were initialized during the dumpify process 1730 * (i.e. error during zvol_dump_init). 1731 */ 1732 1733 tx = dmu_tx_create(os); 1734 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 1735 error = dmu_tx_assign(tx, TXG_WAIT); 1736 if (error) { 1737 dmu_tx_abort(tx); 1738 return (error); 1739 } 1740 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx); 1741 dmu_tx_commit(tx); 1742 1743 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 1744 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum); 1745 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 1746 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress); 1747 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 1748 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv); 1749 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 1750 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs); 1751 1752 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1753 (void) nvlist_add_uint64(nv, 1754 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum); 1755 (void) nvlist_add_uint64(nv, 1756 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress); 1757 (void) nvlist_add_uint64(nv, 1758 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv); 1759 (void) zfs_set_prop_nvlist(zv->zv_name, nv); 1760 nvlist_free(nv); 1761 1762 zvol_free_extents(zv); 1763 zv->zv_flags &= ~ZVOL_DUMPIFIED; 1764 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END); 1765 /* wait for dmu_free_long_range to actually free the blocks */ 1766 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 1767 tx = dmu_tx_create(os); 1768 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 1769 error = dmu_tx_assign(tx, TXG_WAIT); 1770 if (error) { 1771 dmu_tx_abort(tx); 1772 return (error); 1773 } 1774 (void) dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx); 1775 dmu_tx_commit(tx); 1776 1777 return (0); 1778 } 1779