1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * 24 * Portions Copyright 2010 Robert Milkowski 25 * 26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 27 * Copyright (c) 2012 by Delphix. All rights reserved. 28 */ 29 30 /* 31 * ZFS volume emulation driver. 32 * 33 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes. 34 * Volumes are accessed through the symbolic links named: 35 * 36 * /dev/zvol/dsk/<pool_name>/<dataset_name> 37 * /dev/zvol/rdsk/<pool_name>/<dataset_name> 38 * 39 * These links are created by the /dev filesystem (sdev_zvolops.c). 40 * Volumes are persistent through reboot. No user command needs to be 41 * run before opening and using a device. 42 */ 43 44 #include <sys/types.h> 45 #include <sys/param.h> 46 #include <sys/errno.h> 47 #include <sys/uio.h> 48 #include <sys/buf.h> 49 #include <sys/modctl.h> 50 #include <sys/open.h> 51 #include <sys/kmem.h> 52 #include <sys/conf.h> 53 #include <sys/cmn_err.h> 54 #include <sys/stat.h> 55 #include <sys/zap.h> 56 #include <sys/spa.h> 57 #include <sys/zio.h> 58 #include <sys/dmu_traverse.h> 59 #include <sys/dnode.h> 60 #include <sys/dsl_dataset.h> 61 #include <sys/dsl_prop.h> 62 #include <sys/dkio.h> 63 #include <sys/efi_partition.h> 64 #include <sys/byteorder.h> 65 #include <sys/pathname.h> 66 #include <sys/ddi.h> 67 #include <sys/sunddi.h> 68 #include <sys/crc32.h> 69 #include <sys/dirent.h> 70 #include <sys/policy.h> 71 #include <sys/fs/zfs.h> 72 #include <sys/zfs_ioctl.h> 73 #include <sys/mkdev.h> 74 #include <sys/zil.h> 75 #include <sys/refcount.h> 76 #include <sys/zfs_znode.h> 77 #include <sys/zfs_rlock.h> 78 #include <sys/vdev_disk.h> 79 #include <sys/vdev_impl.h> 80 #include <sys/zvol.h> 81 #include <sys/dumphdr.h> 82 #include <sys/zil_impl.h> 83 84 #include "zfs_namecheck.h" 85 86 void *zfsdev_state; 87 static char *zvol_tag = "zvol_tag"; 88 89 #define ZVOL_DUMPSIZE "dumpsize" 90 91 /* 92 * This lock protects the zfsdev_state structure from being modified 93 * while it's being used, e.g. an open that comes in before a create 94 * finishes. It also protects temporary opens of the dataset so that, 95 * e.g., an open doesn't get a spurious EBUSY. 96 */ 97 kmutex_t zfsdev_state_lock; 98 static uint32_t zvol_minors; 99 100 typedef struct zvol_extent { 101 list_node_t ze_node; 102 dva_t ze_dva; /* dva associated with this extent */ 103 uint64_t ze_nblks; /* number of blocks in extent */ 104 } zvol_extent_t; 105 106 /* 107 * The in-core state of each volume. 108 */ 109 typedef struct zvol_state { 110 char zv_name[MAXPATHLEN]; /* pool/dd name */ 111 uint64_t zv_volsize; /* amount of space we advertise */ 112 uint64_t zv_volblocksize; /* volume block size */ 113 minor_t zv_minor; /* minor number */ 114 uint8_t zv_min_bs; /* minimum addressable block shift */ 115 uint8_t zv_flags; /* readonly, dumpified, etc. */ 116 objset_t *zv_objset; /* objset handle */ 117 uint32_t zv_open_count[OTYPCNT]; /* open counts */ 118 uint32_t zv_total_opens; /* total open count */ 119 zilog_t *zv_zilog; /* ZIL handle */ 120 list_t zv_extents; /* List of extents for dump */ 121 znode_t zv_znode; /* for range locking */ 122 dmu_buf_t *zv_dbuf; /* bonus handle */ 123 } zvol_state_t; 124 125 /* 126 * zvol specific flags 127 */ 128 #define ZVOL_RDONLY 0x1 129 #define ZVOL_DUMPIFIED 0x2 130 #define ZVOL_EXCL 0x4 131 #define ZVOL_WCE 0x8 132 133 /* 134 * zvol maximum transfer in one DMU tx. 135 */ 136 int zvol_maxphys = DMU_MAX_ACCESS/2; 137 138 extern int zfs_set_prop_nvlist(const char *, zprop_source_t, 139 nvlist_t *, nvlist_t *); 140 static int zvol_remove_zv(zvol_state_t *); 141 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio); 142 static int zvol_dumpify(zvol_state_t *zv); 143 static int zvol_dump_fini(zvol_state_t *zv); 144 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize); 145 146 static void 147 zvol_size_changed(uint64_t volsize, major_t maj, minor_t min) 148 { 149 dev_t dev = makedevice(maj, min); 150 151 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 152 "Size", volsize) == DDI_SUCCESS); 153 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 154 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS); 155 156 /* Notify specfs to invalidate the cached size */ 157 spec_size_invalidate(dev, VBLK); 158 spec_size_invalidate(dev, VCHR); 159 } 160 161 int 162 zvol_check_volsize(uint64_t volsize, uint64_t blocksize) 163 { 164 if (volsize == 0) 165 return (EINVAL); 166 167 if (volsize % blocksize != 0) 168 return (EINVAL); 169 170 #ifdef _ILP32 171 if (volsize - 1 > SPEC_MAXOFFSET_T) 172 return (EOVERFLOW); 173 #endif 174 return (0); 175 } 176 177 int 178 zvol_check_volblocksize(uint64_t volblocksize) 179 { 180 if (volblocksize < SPA_MINBLOCKSIZE || 181 volblocksize > SPA_MAXBLOCKSIZE || 182 !ISP2(volblocksize)) 183 return (EDOM); 184 185 return (0); 186 } 187 188 int 189 zvol_get_stats(objset_t *os, nvlist_t *nv) 190 { 191 int error; 192 dmu_object_info_t doi; 193 uint64_t val; 194 195 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val); 196 if (error) 197 return (error); 198 199 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val); 200 201 error = dmu_object_info(os, ZVOL_OBJ, &doi); 202 203 if (error == 0) { 204 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE, 205 doi.doi_data_block_size); 206 } 207 208 return (error); 209 } 210 211 static zvol_state_t * 212 zvol_minor_lookup(const char *name) 213 { 214 minor_t minor; 215 zvol_state_t *zv; 216 217 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 218 219 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 220 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 221 if (zv == NULL) 222 continue; 223 if (strcmp(zv->zv_name, name) == 0) 224 return (zv); 225 } 226 227 return (NULL); 228 } 229 230 /* extent mapping arg */ 231 struct maparg { 232 zvol_state_t *ma_zv; 233 uint64_t ma_blks; 234 }; 235 236 /*ARGSUSED*/ 237 static int 238 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf, 239 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg) 240 { 241 struct maparg *ma = arg; 242 zvol_extent_t *ze; 243 int bs = ma->ma_zv->zv_volblocksize; 244 245 if (bp == NULL || zb->zb_object != ZVOL_OBJ || zb->zb_level != 0) 246 return (0); 247 248 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid); 249 ma->ma_blks++; 250 251 /* Abort immediately if we have encountered gang blocks */ 252 if (BP_IS_GANG(bp)) 253 return (EFRAGS); 254 255 /* 256 * See if the block is at the end of the previous extent. 257 */ 258 ze = list_tail(&ma->ma_zv->zv_extents); 259 if (ze && 260 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) && 261 DVA_GET_OFFSET(BP_IDENTITY(bp)) == 262 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) { 263 ze->ze_nblks++; 264 return (0); 265 } 266 267 dprintf_bp(bp, "%s", "next blkptr:"); 268 269 /* start a new extent */ 270 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP); 271 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */ 272 ze->ze_nblks = 1; 273 list_insert_tail(&ma->ma_zv->zv_extents, ze); 274 return (0); 275 } 276 277 static void 278 zvol_free_extents(zvol_state_t *zv) 279 { 280 zvol_extent_t *ze; 281 282 while (ze = list_head(&zv->zv_extents)) { 283 list_remove(&zv->zv_extents, ze); 284 kmem_free(ze, sizeof (zvol_extent_t)); 285 } 286 } 287 288 static int 289 zvol_get_lbas(zvol_state_t *zv) 290 { 291 objset_t *os = zv->zv_objset; 292 struct maparg ma; 293 int err; 294 295 ma.ma_zv = zv; 296 ma.ma_blks = 0; 297 zvol_free_extents(zv); 298 299 /* commit any in-flight changes before traversing the dataset */ 300 txg_wait_synced(dmu_objset_pool(os), 0); 301 err = traverse_dataset(dmu_objset_ds(os), 0, 302 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma); 303 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) { 304 zvol_free_extents(zv); 305 return (err ? err : EIO); 306 } 307 308 return (0); 309 } 310 311 /* ARGSUSED */ 312 void 313 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 314 { 315 zfs_creat_t *zct = arg; 316 nvlist_t *nvprops = zct->zct_props; 317 int error; 318 uint64_t volblocksize, volsize; 319 320 VERIFY(nvlist_lookup_uint64(nvprops, 321 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0); 322 if (nvlist_lookup_uint64(nvprops, 323 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0) 324 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE); 325 326 /* 327 * These properties must be removed from the list so the generic 328 * property setting step won't apply to them. 329 */ 330 VERIFY(nvlist_remove_all(nvprops, 331 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0); 332 (void) nvlist_remove_all(nvprops, 333 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE)); 334 335 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize, 336 DMU_OT_NONE, 0, tx); 337 ASSERT(error == 0); 338 339 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP, 340 DMU_OT_NONE, 0, tx); 341 ASSERT(error == 0); 342 343 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); 344 ASSERT(error == 0); 345 } 346 347 /* 348 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we 349 * implement DKIOCFREE/free-long-range. 350 */ 351 static int 352 zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap) 353 { 354 uint64_t offset, length; 355 356 if (byteswap) 357 byteswap_uint64_array(lr, sizeof (*lr)); 358 359 offset = lr->lr_offset; 360 length = lr->lr_length; 361 362 return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length)); 363 } 364 365 /* 366 * Replay a TX_WRITE ZIL transaction that didn't get committed 367 * after a system failure 368 */ 369 static int 370 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap) 371 { 372 objset_t *os = zv->zv_objset; 373 char *data = (char *)(lr + 1); /* data follows lr_write_t */ 374 uint64_t offset, length; 375 dmu_tx_t *tx; 376 int error; 377 378 if (byteswap) 379 byteswap_uint64_array(lr, sizeof (*lr)); 380 381 offset = lr->lr_offset; 382 length = lr->lr_length; 383 384 /* If it's a dmu_sync() block, write the whole block */ 385 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 386 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 387 if (length < blocksize) { 388 offset -= offset % blocksize; 389 length = blocksize; 390 } 391 } 392 393 tx = dmu_tx_create(os); 394 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length); 395 error = dmu_tx_assign(tx, TXG_WAIT); 396 if (error) { 397 dmu_tx_abort(tx); 398 } else { 399 dmu_write(os, ZVOL_OBJ, offset, length, data, tx); 400 dmu_tx_commit(tx); 401 } 402 403 return (error); 404 } 405 406 /* ARGSUSED */ 407 static int 408 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap) 409 { 410 return (ENOTSUP); 411 } 412 413 /* 414 * Callback vectors for replaying records. 415 * Only TX_WRITE and TX_TRUNCATE are needed for zvol. 416 */ 417 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = { 418 zvol_replay_err, /* 0 no such transaction type */ 419 zvol_replay_err, /* TX_CREATE */ 420 zvol_replay_err, /* TX_MKDIR */ 421 zvol_replay_err, /* TX_MKXATTR */ 422 zvol_replay_err, /* TX_SYMLINK */ 423 zvol_replay_err, /* TX_REMOVE */ 424 zvol_replay_err, /* TX_RMDIR */ 425 zvol_replay_err, /* TX_LINK */ 426 zvol_replay_err, /* TX_RENAME */ 427 zvol_replay_write, /* TX_WRITE */ 428 zvol_replay_truncate, /* TX_TRUNCATE */ 429 zvol_replay_err, /* TX_SETATTR */ 430 zvol_replay_err, /* TX_ACL */ 431 zvol_replay_err, /* TX_CREATE_ACL */ 432 zvol_replay_err, /* TX_CREATE_ATTR */ 433 zvol_replay_err, /* TX_CREATE_ACL_ATTR */ 434 zvol_replay_err, /* TX_MKDIR_ACL */ 435 zvol_replay_err, /* TX_MKDIR_ATTR */ 436 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */ 437 zvol_replay_err, /* TX_WRITE2 */ 438 }; 439 440 int 441 zvol_name2minor(const char *name, minor_t *minor) 442 { 443 zvol_state_t *zv; 444 445 mutex_enter(&zfsdev_state_lock); 446 zv = zvol_minor_lookup(name); 447 if (minor && zv) 448 *minor = zv->zv_minor; 449 mutex_exit(&zfsdev_state_lock); 450 return (zv ? 0 : -1); 451 } 452 453 /* 454 * Create a minor node (plus a whole lot more) for the specified volume. 455 */ 456 int 457 zvol_create_minor(const char *name) 458 { 459 zfs_soft_state_t *zs; 460 zvol_state_t *zv; 461 objset_t *os; 462 dmu_object_info_t doi; 463 minor_t minor = 0; 464 char chrbuf[30], blkbuf[30]; 465 int error; 466 467 mutex_enter(&zfsdev_state_lock); 468 469 if (zvol_minor_lookup(name) != NULL) { 470 mutex_exit(&zfsdev_state_lock); 471 return (EEXIST); 472 } 473 474 /* lie and say we're read-only */ 475 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os); 476 477 if (error) { 478 mutex_exit(&zfsdev_state_lock); 479 return (error); 480 } 481 482 if ((minor = zfsdev_minor_alloc()) == 0) { 483 dmu_objset_disown(os, FTAG); 484 mutex_exit(&zfsdev_state_lock); 485 return (ENXIO); 486 } 487 488 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) { 489 dmu_objset_disown(os, FTAG); 490 mutex_exit(&zfsdev_state_lock); 491 return (EAGAIN); 492 } 493 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME, 494 (char *)name); 495 496 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor); 497 498 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR, 499 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 500 ddi_soft_state_free(zfsdev_state, minor); 501 dmu_objset_disown(os, FTAG); 502 mutex_exit(&zfsdev_state_lock); 503 return (EAGAIN); 504 } 505 506 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor); 507 508 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK, 509 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 510 ddi_remove_minor_node(zfs_dip, chrbuf); 511 ddi_soft_state_free(zfsdev_state, minor); 512 dmu_objset_disown(os, FTAG); 513 mutex_exit(&zfsdev_state_lock); 514 return (EAGAIN); 515 } 516 517 zs = ddi_get_soft_state(zfsdev_state, minor); 518 zs->zss_type = ZSST_ZVOL; 519 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP); 520 (void) strlcpy(zv->zv_name, name, MAXPATHLEN); 521 zv->zv_min_bs = DEV_BSHIFT; 522 zv->zv_minor = minor; 523 zv->zv_objset = os; 524 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os))) 525 zv->zv_flags |= ZVOL_RDONLY; 526 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL); 527 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare, 528 sizeof (rl_t), offsetof(rl_t, r_node)); 529 list_create(&zv->zv_extents, sizeof (zvol_extent_t), 530 offsetof(zvol_extent_t, ze_node)); 531 /* get and cache the blocksize */ 532 error = dmu_object_info(os, ZVOL_OBJ, &doi); 533 ASSERT(error == 0); 534 zv->zv_volblocksize = doi.doi_data_block_size; 535 536 if (spa_writeable(dmu_objset_spa(os))) { 537 if (zil_replay_disable) 538 zil_destroy(dmu_objset_zil(os), B_FALSE); 539 else 540 zil_replay(os, zv, zvol_replay_vector); 541 } 542 dmu_objset_disown(os, FTAG); 543 zv->zv_objset = NULL; 544 545 zvol_minors++; 546 547 mutex_exit(&zfsdev_state_lock); 548 549 return (0); 550 } 551 552 /* 553 * Remove minor node for the specified volume. 554 */ 555 static int 556 zvol_remove_zv(zvol_state_t *zv) 557 { 558 char nmbuf[20]; 559 minor_t minor = zv->zv_minor; 560 561 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 562 if (zv->zv_total_opens != 0) 563 return (EBUSY); 564 565 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor); 566 ddi_remove_minor_node(zfs_dip, nmbuf); 567 568 (void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor); 569 ddi_remove_minor_node(zfs_dip, nmbuf); 570 571 avl_destroy(&zv->zv_znode.z_range_avl); 572 mutex_destroy(&zv->zv_znode.z_range_lock); 573 574 kmem_free(zv, sizeof (zvol_state_t)); 575 576 ddi_soft_state_free(zfsdev_state, minor); 577 578 zvol_minors--; 579 return (0); 580 } 581 582 int 583 zvol_remove_minor(const char *name) 584 { 585 zvol_state_t *zv; 586 int rc; 587 588 mutex_enter(&zfsdev_state_lock); 589 if ((zv = zvol_minor_lookup(name)) == NULL) { 590 mutex_exit(&zfsdev_state_lock); 591 return (ENXIO); 592 } 593 rc = zvol_remove_zv(zv); 594 mutex_exit(&zfsdev_state_lock); 595 return (rc); 596 } 597 598 int 599 zvol_first_open(zvol_state_t *zv) 600 { 601 objset_t *os; 602 uint64_t volsize; 603 int error; 604 uint64_t readonly; 605 606 /* lie and say we're read-only */ 607 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE, 608 zvol_tag, &os); 609 if (error) 610 return (error); 611 612 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 613 if (error) { 614 ASSERT(error == 0); 615 dmu_objset_disown(os, zvol_tag); 616 return (error); 617 } 618 zv->zv_objset = os; 619 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf); 620 if (error) { 621 dmu_objset_disown(os, zvol_tag); 622 return (error); 623 } 624 zv->zv_volsize = volsize; 625 zv->zv_zilog = zil_open(os, zvol_get_data); 626 zvol_size_changed(zv->zv_volsize, ddi_driver_major(zfs_dip), 627 zv->zv_minor); 628 629 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly, 630 NULL) == 0); 631 if (readonly || dmu_objset_is_snapshot(os) || 632 !spa_writeable(dmu_objset_spa(os))) 633 zv->zv_flags |= ZVOL_RDONLY; 634 else 635 zv->zv_flags &= ~ZVOL_RDONLY; 636 return (error); 637 } 638 639 void 640 zvol_last_close(zvol_state_t *zv) 641 { 642 zil_close(zv->zv_zilog); 643 zv->zv_zilog = NULL; 644 645 dmu_buf_rele(zv->zv_dbuf, zvol_tag); 646 zv->zv_dbuf = NULL; 647 648 /* 649 * Evict cached data 650 */ 651 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) && 652 !(zv->zv_flags & ZVOL_RDONLY)) 653 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 654 (void) dmu_objset_evict_dbufs(zv->zv_objset); 655 656 dmu_objset_disown(zv->zv_objset, zvol_tag); 657 zv->zv_objset = NULL; 658 } 659 660 int 661 zvol_prealloc(zvol_state_t *zv) 662 { 663 objset_t *os = zv->zv_objset; 664 dmu_tx_t *tx; 665 uint64_t refd, avail, usedobjs, availobjs; 666 uint64_t resid = zv->zv_volsize; 667 uint64_t off = 0; 668 669 /* Check the space usage before attempting to allocate the space */ 670 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs); 671 if (avail < zv->zv_volsize) 672 return (ENOSPC); 673 674 /* Free old extents if they exist */ 675 zvol_free_extents(zv); 676 677 while (resid != 0) { 678 int error; 679 uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE); 680 681 tx = dmu_tx_create(os); 682 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 683 error = dmu_tx_assign(tx, TXG_WAIT); 684 if (error) { 685 dmu_tx_abort(tx); 686 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off); 687 return (error); 688 } 689 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx); 690 dmu_tx_commit(tx); 691 off += bytes; 692 resid -= bytes; 693 } 694 txg_wait_synced(dmu_objset_pool(os), 0); 695 696 return (0); 697 } 698 699 int 700 zvol_update_volsize(objset_t *os, uint64_t volsize) 701 { 702 dmu_tx_t *tx; 703 int error; 704 705 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 706 707 tx = dmu_tx_create(os); 708 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 709 error = dmu_tx_assign(tx, TXG_WAIT); 710 if (error) { 711 dmu_tx_abort(tx); 712 return (error); 713 } 714 715 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, 716 &volsize, tx); 717 dmu_tx_commit(tx); 718 719 if (error == 0) 720 error = dmu_free_long_range(os, 721 ZVOL_OBJ, volsize, DMU_OBJECT_END); 722 return (error); 723 } 724 725 void 726 zvol_remove_minors(const char *name) 727 { 728 zvol_state_t *zv; 729 char *namebuf; 730 minor_t minor; 731 732 namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP); 733 (void) strncpy(namebuf, name, strlen(name)); 734 (void) strcat(namebuf, "/"); 735 mutex_enter(&zfsdev_state_lock); 736 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 737 738 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 739 if (zv == NULL) 740 continue; 741 if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0) 742 (void) zvol_remove_zv(zv); 743 } 744 kmem_free(namebuf, strlen(name) + 2); 745 746 mutex_exit(&zfsdev_state_lock); 747 } 748 749 int 750 zvol_set_volsize(const char *name, major_t maj, uint64_t volsize) 751 { 752 zvol_state_t *zv = NULL; 753 objset_t *os; 754 int error; 755 dmu_object_info_t doi; 756 uint64_t old_volsize = 0ULL; 757 uint64_t readonly; 758 759 mutex_enter(&zfsdev_state_lock); 760 zv = zvol_minor_lookup(name); 761 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) { 762 mutex_exit(&zfsdev_state_lock); 763 return (error); 764 } 765 766 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 || 767 (error = zvol_check_volsize(volsize, 768 doi.doi_data_block_size)) != 0) 769 goto out; 770 771 VERIFY(dsl_prop_get_integer(name, "readonly", &readonly, 772 NULL) == 0); 773 if (readonly) { 774 error = EROFS; 775 goto out; 776 } 777 778 error = zvol_update_volsize(os, volsize); 779 /* 780 * Reinitialize the dump area to the new size. If we 781 * failed to resize the dump area then restore it back to 782 * its original size. 783 */ 784 if (zv && error == 0) { 785 if (zv->zv_flags & ZVOL_DUMPIFIED) { 786 old_volsize = zv->zv_volsize; 787 zv->zv_volsize = volsize; 788 if ((error = zvol_dumpify(zv)) != 0 || 789 (error = dumpvp_resize()) != 0) { 790 (void) zvol_update_volsize(os, old_volsize); 791 zv->zv_volsize = old_volsize; 792 error = zvol_dumpify(zv); 793 } 794 } 795 if (error == 0) { 796 zv->zv_volsize = volsize; 797 zvol_size_changed(volsize, maj, zv->zv_minor); 798 } 799 } 800 801 /* 802 * Generate a LUN expansion event. 803 */ 804 if (zv && error == 0) { 805 sysevent_id_t eid; 806 nvlist_t *attr; 807 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 808 809 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV, 810 zv->zv_minor); 811 812 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 813 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 814 815 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 816 ESC_DEV_DLE, attr, &eid, DDI_SLEEP); 817 818 nvlist_free(attr); 819 kmem_free(physpath, MAXPATHLEN); 820 } 821 822 out: 823 dmu_objset_rele(os, FTAG); 824 825 mutex_exit(&zfsdev_state_lock); 826 827 return (error); 828 } 829 830 /*ARGSUSED*/ 831 int 832 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr) 833 { 834 zvol_state_t *zv; 835 int err = 0; 836 837 mutex_enter(&zfsdev_state_lock); 838 839 zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL); 840 if (zv == NULL) { 841 mutex_exit(&zfsdev_state_lock); 842 return (ENXIO); 843 } 844 845 if (zv->zv_total_opens == 0) 846 err = zvol_first_open(zv); 847 if (err) { 848 mutex_exit(&zfsdev_state_lock); 849 return (err); 850 } 851 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) { 852 err = EROFS; 853 goto out; 854 } 855 if (zv->zv_flags & ZVOL_EXCL) { 856 err = EBUSY; 857 goto out; 858 } 859 if (flag & FEXCL) { 860 if (zv->zv_total_opens != 0) { 861 err = EBUSY; 862 goto out; 863 } 864 zv->zv_flags |= ZVOL_EXCL; 865 } 866 867 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) { 868 zv->zv_open_count[otyp]++; 869 zv->zv_total_opens++; 870 } 871 mutex_exit(&zfsdev_state_lock); 872 873 return (err); 874 out: 875 if (zv->zv_total_opens == 0) 876 zvol_last_close(zv); 877 mutex_exit(&zfsdev_state_lock); 878 return (err); 879 } 880 881 /*ARGSUSED*/ 882 int 883 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr) 884 { 885 minor_t minor = getminor(dev); 886 zvol_state_t *zv; 887 int error = 0; 888 889 mutex_enter(&zfsdev_state_lock); 890 891 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 892 if (zv == NULL) { 893 mutex_exit(&zfsdev_state_lock); 894 return (ENXIO); 895 } 896 897 if (zv->zv_flags & ZVOL_EXCL) { 898 ASSERT(zv->zv_total_opens == 1); 899 zv->zv_flags &= ~ZVOL_EXCL; 900 } 901 902 /* 903 * If the open count is zero, this is a spurious close. 904 * That indicates a bug in the kernel / DDI framework. 905 */ 906 ASSERT(zv->zv_open_count[otyp] != 0); 907 ASSERT(zv->zv_total_opens != 0); 908 909 /* 910 * You may get multiple opens, but only one close. 911 */ 912 zv->zv_open_count[otyp]--; 913 zv->zv_total_opens--; 914 915 if (zv->zv_total_opens == 0) 916 zvol_last_close(zv); 917 918 mutex_exit(&zfsdev_state_lock); 919 return (error); 920 } 921 922 static void 923 zvol_get_done(zgd_t *zgd, int error) 924 { 925 if (zgd->zgd_db) 926 dmu_buf_rele(zgd->zgd_db, zgd); 927 928 zfs_range_unlock(zgd->zgd_rl); 929 930 if (error == 0 && zgd->zgd_bp) 931 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 932 933 kmem_free(zgd, sizeof (zgd_t)); 934 } 935 936 /* 937 * Get data to generate a TX_WRITE intent log record. 938 */ 939 static int 940 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 941 { 942 zvol_state_t *zv = arg; 943 objset_t *os = zv->zv_objset; 944 uint64_t object = ZVOL_OBJ; 945 uint64_t offset = lr->lr_offset; 946 uint64_t size = lr->lr_length; /* length of user data */ 947 blkptr_t *bp = &lr->lr_blkptr; 948 dmu_buf_t *db; 949 zgd_t *zgd; 950 int error; 951 952 ASSERT(zio != NULL); 953 ASSERT(size != 0); 954 955 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP); 956 zgd->zgd_zilog = zv->zv_zilog; 957 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER); 958 959 /* 960 * Write records come in two flavors: immediate and indirect. 961 * For small writes it's cheaper to store the data with the 962 * log record (immediate); for large writes it's cheaper to 963 * sync the data and get a pointer to it (indirect) so that 964 * we don't have to write the data twice. 965 */ 966 if (buf != NULL) { /* immediate write */ 967 error = dmu_read(os, object, offset, size, buf, 968 DMU_READ_NO_PREFETCH); 969 } else { 970 size = zv->zv_volblocksize; 971 offset = P2ALIGN(offset, size); 972 error = dmu_buf_hold(os, object, offset, zgd, &db, 973 DMU_READ_NO_PREFETCH); 974 if (error == 0) { 975 zgd->zgd_db = db; 976 zgd->zgd_bp = bp; 977 978 ASSERT(db->db_offset == offset); 979 ASSERT(db->db_size == size); 980 981 error = dmu_sync(zio, lr->lr_common.lrc_txg, 982 zvol_get_done, zgd); 983 984 if (error == 0) 985 return (0); 986 } 987 } 988 989 zvol_get_done(zgd, error); 990 991 return (error); 992 } 993 994 /* 995 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions. 996 * 997 * We store data in the log buffers if it's small enough. 998 * Otherwise we will later flush the data out via dmu_sync(). 999 */ 1000 ssize_t zvol_immediate_write_sz = 32768; 1001 1002 static void 1003 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid, 1004 boolean_t sync) 1005 { 1006 uint32_t blocksize = zv->zv_volblocksize; 1007 zilog_t *zilog = zv->zv_zilog; 1008 boolean_t slogging; 1009 ssize_t immediate_write_sz; 1010 1011 if (zil_replaying(zilog, tx)) 1012 return; 1013 1014 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT) 1015 ? 0 : zvol_immediate_write_sz; 1016 1017 slogging = spa_has_slogs(zilog->zl_spa) && 1018 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 1019 1020 while (resid) { 1021 itx_t *itx; 1022 lr_write_t *lr; 1023 ssize_t len; 1024 itx_wr_state_t write_state; 1025 1026 /* 1027 * Unlike zfs_log_write() we can be called with 1028 * upto DMU_MAX_ACCESS/2 (5MB) writes. 1029 */ 1030 if (blocksize > immediate_write_sz && !slogging && 1031 resid >= blocksize && off % blocksize == 0) { 1032 write_state = WR_INDIRECT; /* uses dmu_sync */ 1033 len = blocksize; 1034 } else if (sync) { 1035 write_state = WR_COPIED; 1036 len = MIN(ZIL_MAX_LOG_DATA, resid); 1037 } else { 1038 write_state = WR_NEED_COPY; 1039 len = MIN(ZIL_MAX_LOG_DATA, resid); 1040 } 1041 1042 itx = zil_itx_create(TX_WRITE, sizeof (*lr) + 1043 (write_state == WR_COPIED ? len : 0)); 1044 lr = (lr_write_t *)&itx->itx_lr; 1045 if (write_state == WR_COPIED && dmu_read(zv->zv_objset, 1046 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) { 1047 zil_itx_destroy(itx); 1048 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1049 lr = (lr_write_t *)&itx->itx_lr; 1050 write_state = WR_NEED_COPY; 1051 } 1052 1053 itx->itx_wr_state = write_state; 1054 if (write_state == WR_NEED_COPY) 1055 itx->itx_sod += len; 1056 lr->lr_foid = ZVOL_OBJ; 1057 lr->lr_offset = off; 1058 lr->lr_length = len; 1059 lr->lr_blkoff = 0; 1060 BP_ZERO(&lr->lr_blkptr); 1061 1062 itx->itx_private = zv; 1063 itx->itx_sync = sync; 1064 1065 zil_itx_assign(zilog, itx, tx); 1066 1067 off += len; 1068 resid -= len; 1069 } 1070 } 1071 1072 static int 1073 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t size, 1074 boolean_t doread, boolean_t isdump) 1075 { 1076 vdev_disk_t *dvd; 1077 int c; 1078 int numerrors = 0; 1079 1080 for (c = 0; c < vd->vdev_children; c++) { 1081 ASSERT(vd->vdev_ops == &vdev_mirror_ops || 1082 vd->vdev_ops == &vdev_replacing_ops || 1083 vd->vdev_ops == &vdev_spare_ops); 1084 int err = zvol_dumpio_vdev(vd->vdev_child[c], 1085 addr, offset, size, doread, isdump); 1086 if (err != 0) { 1087 numerrors++; 1088 } else if (doread) { 1089 break; 1090 } 1091 } 1092 1093 if (!vd->vdev_ops->vdev_op_leaf) 1094 return (numerrors < vd->vdev_children ? 0 : EIO); 1095 1096 if (doread && !vdev_readable(vd)) 1097 return (EIO); 1098 else if (!doread && !vdev_writeable(vd)) 1099 return (EIO); 1100 1101 dvd = vd->vdev_tsd; 1102 ASSERT3P(dvd, !=, NULL); 1103 offset += VDEV_LABEL_START_SIZE; 1104 1105 if (ddi_in_panic() || isdump) { 1106 ASSERT(!doread); 1107 if (doread) 1108 return (EIO); 1109 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset), 1110 lbtodb(size))); 1111 } else { 1112 return (vdev_disk_physio(dvd->vd_lh, addr, size, offset, 1113 doread ? B_READ : B_WRITE)); 1114 } 1115 } 1116 1117 static int 1118 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size, 1119 boolean_t doread, boolean_t isdump) 1120 { 1121 vdev_t *vd; 1122 int error; 1123 zvol_extent_t *ze; 1124 spa_t *spa = dmu_objset_spa(zv->zv_objset); 1125 1126 /* Must be sector aligned, and not stradle a block boundary. */ 1127 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) || 1128 P2BOUNDARY(offset, size, zv->zv_volblocksize)) { 1129 return (EINVAL); 1130 } 1131 ASSERT(size <= zv->zv_volblocksize); 1132 1133 /* Locate the extent this belongs to */ 1134 ze = list_head(&zv->zv_extents); 1135 while (offset >= ze->ze_nblks * zv->zv_volblocksize) { 1136 offset -= ze->ze_nblks * zv->zv_volblocksize; 1137 ze = list_next(&zv->zv_extents, ze); 1138 } 1139 1140 if (!ddi_in_panic()) 1141 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 1142 1143 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva)); 1144 offset += DVA_GET_OFFSET(&ze->ze_dva); 1145 error = zvol_dumpio_vdev(vd, addr, offset, size, doread, isdump); 1146 1147 if (!ddi_in_panic()) 1148 spa_config_exit(spa, SCL_STATE, FTAG); 1149 1150 return (error); 1151 } 1152 1153 int 1154 zvol_strategy(buf_t *bp) 1155 { 1156 zfs_soft_state_t *zs = NULL; 1157 zvol_state_t *zv; 1158 uint64_t off, volsize; 1159 size_t resid; 1160 char *addr; 1161 objset_t *os; 1162 rl_t *rl; 1163 int error = 0; 1164 boolean_t doread = bp->b_flags & B_READ; 1165 boolean_t is_dump; 1166 boolean_t sync; 1167 1168 if (getminor(bp->b_edev) == 0) { 1169 error = EINVAL; 1170 } else { 1171 zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev)); 1172 if (zs == NULL) 1173 error = ENXIO; 1174 else if (zs->zss_type != ZSST_ZVOL) 1175 error = EINVAL; 1176 } 1177 1178 if (error) { 1179 bioerror(bp, error); 1180 biodone(bp); 1181 return (0); 1182 } 1183 1184 zv = zs->zss_data; 1185 1186 if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) { 1187 bioerror(bp, EROFS); 1188 biodone(bp); 1189 return (0); 1190 } 1191 1192 off = ldbtob(bp->b_blkno); 1193 volsize = zv->zv_volsize; 1194 1195 os = zv->zv_objset; 1196 ASSERT(os != NULL); 1197 1198 bp_mapin(bp); 1199 addr = bp->b_un.b_addr; 1200 resid = bp->b_bcount; 1201 1202 if (resid > 0 && (off < 0 || off >= volsize)) { 1203 bioerror(bp, EIO); 1204 biodone(bp); 1205 return (0); 1206 } 1207 1208 is_dump = zv->zv_flags & ZVOL_DUMPIFIED; 1209 sync = ((!(bp->b_flags & B_ASYNC) && 1210 !(zv->zv_flags & ZVOL_WCE)) || 1211 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) && 1212 !doread && !is_dump; 1213 1214 /* 1215 * There must be no buffer changes when doing a dmu_sync() because 1216 * we can't change the data whilst calculating the checksum. 1217 */ 1218 rl = zfs_range_lock(&zv->zv_znode, off, resid, 1219 doread ? RL_READER : RL_WRITER); 1220 1221 while (resid != 0 && off < volsize) { 1222 size_t size = MIN(resid, zvol_maxphys); 1223 if (is_dump) { 1224 size = MIN(size, P2END(off, zv->zv_volblocksize) - off); 1225 error = zvol_dumpio(zv, addr, off, size, 1226 doread, B_FALSE); 1227 } else if (doread) { 1228 error = dmu_read(os, ZVOL_OBJ, off, size, addr, 1229 DMU_READ_PREFETCH); 1230 } else { 1231 dmu_tx_t *tx = dmu_tx_create(os); 1232 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size); 1233 error = dmu_tx_assign(tx, TXG_WAIT); 1234 if (error) { 1235 dmu_tx_abort(tx); 1236 } else { 1237 dmu_write(os, ZVOL_OBJ, off, size, addr, tx); 1238 zvol_log_write(zv, tx, off, size, sync); 1239 dmu_tx_commit(tx); 1240 } 1241 } 1242 if (error) { 1243 /* convert checksum errors into IO errors */ 1244 if (error == ECKSUM) 1245 error = EIO; 1246 break; 1247 } 1248 off += size; 1249 addr += size; 1250 resid -= size; 1251 } 1252 zfs_range_unlock(rl); 1253 1254 if ((bp->b_resid = resid) == bp->b_bcount) 1255 bioerror(bp, off > volsize ? EINVAL : error); 1256 1257 if (sync) 1258 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1259 biodone(bp); 1260 1261 return (0); 1262 } 1263 1264 /* 1265 * Set the buffer count to the zvol maximum transfer. 1266 * Using our own routine instead of the default minphys() 1267 * means that for larger writes we write bigger buffers on X86 1268 * (128K instead of 56K) and flush the disk write cache less often 1269 * (every zvol_maxphys - currently 1MB) instead of minphys (currently 1270 * 56K on X86 and 128K on sparc). 1271 */ 1272 void 1273 zvol_minphys(struct buf *bp) 1274 { 1275 if (bp->b_bcount > zvol_maxphys) 1276 bp->b_bcount = zvol_maxphys; 1277 } 1278 1279 int 1280 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks) 1281 { 1282 minor_t minor = getminor(dev); 1283 zvol_state_t *zv; 1284 int error = 0; 1285 uint64_t size; 1286 uint64_t boff; 1287 uint64_t resid; 1288 1289 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1290 if (zv == NULL) 1291 return (ENXIO); 1292 1293 boff = ldbtob(blkno); 1294 resid = ldbtob(nblocks); 1295 1296 VERIFY3U(boff + resid, <=, zv->zv_volsize); 1297 1298 while (resid) { 1299 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff); 1300 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE); 1301 if (error) 1302 break; 1303 boff += size; 1304 addr += size; 1305 resid -= size; 1306 } 1307 1308 return (error); 1309 } 1310 1311 /*ARGSUSED*/ 1312 int 1313 zvol_read(dev_t dev, uio_t *uio, cred_t *cr) 1314 { 1315 minor_t minor = getminor(dev); 1316 zvol_state_t *zv; 1317 uint64_t volsize; 1318 rl_t *rl; 1319 int error = 0; 1320 1321 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1322 if (zv == NULL) 1323 return (ENXIO); 1324 1325 volsize = zv->zv_volsize; 1326 if (uio->uio_resid > 0 && 1327 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize)) 1328 return (EIO); 1329 1330 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1331 error = physio(zvol_strategy, NULL, dev, B_READ, 1332 zvol_minphys, uio); 1333 return (error); 1334 } 1335 1336 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1337 RL_READER); 1338 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1339 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1340 1341 /* don't read past the end */ 1342 if (bytes > volsize - uio->uio_loffset) 1343 bytes = volsize - uio->uio_loffset; 1344 1345 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes); 1346 if (error) { 1347 /* convert checksum errors into IO errors */ 1348 if (error == ECKSUM) 1349 error = EIO; 1350 break; 1351 } 1352 } 1353 zfs_range_unlock(rl); 1354 return (error); 1355 } 1356 1357 /*ARGSUSED*/ 1358 int 1359 zvol_write(dev_t dev, uio_t *uio, cred_t *cr) 1360 { 1361 minor_t minor = getminor(dev); 1362 zvol_state_t *zv; 1363 uint64_t volsize; 1364 rl_t *rl; 1365 int error = 0; 1366 boolean_t sync; 1367 1368 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1369 if (zv == NULL) 1370 return (ENXIO); 1371 1372 volsize = zv->zv_volsize; 1373 if (uio->uio_resid > 0 && 1374 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize)) 1375 return (EIO); 1376 1377 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1378 error = physio(zvol_strategy, NULL, dev, B_WRITE, 1379 zvol_minphys, uio); 1380 return (error); 1381 } 1382 1383 sync = !(zv->zv_flags & ZVOL_WCE) || 1384 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS); 1385 1386 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1387 RL_WRITER); 1388 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1389 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1390 uint64_t off = uio->uio_loffset; 1391 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 1392 1393 if (bytes > volsize - off) /* don't write past the end */ 1394 bytes = volsize - off; 1395 1396 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 1397 error = dmu_tx_assign(tx, TXG_WAIT); 1398 if (error) { 1399 dmu_tx_abort(tx); 1400 break; 1401 } 1402 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx); 1403 if (error == 0) 1404 zvol_log_write(zv, tx, off, bytes, sync); 1405 dmu_tx_commit(tx); 1406 1407 if (error) 1408 break; 1409 } 1410 zfs_range_unlock(rl); 1411 if (sync) 1412 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1413 return (error); 1414 } 1415 1416 int 1417 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs) 1418 { 1419 struct uuid uuid = EFI_RESERVED; 1420 efi_gpe_t gpe = { 0 }; 1421 uint32_t crc; 1422 dk_efi_t efi; 1423 int length; 1424 char *ptr; 1425 1426 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag)) 1427 return (EFAULT); 1428 ptr = (char *)(uintptr_t)efi.dki_data_64; 1429 length = efi.dki_length; 1430 /* 1431 * Some clients may attempt to request a PMBR for the 1432 * zvol. Currently this interface will return EINVAL to 1433 * such requests. These requests could be supported by 1434 * adding a check for lba == 0 and consing up an appropriate 1435 * PMBR. 1436 */ 1437 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0) 1438 return (EINVAL); 1439 1440 gpe.efi_gpe_StartingLBA = LE_64(34ULL); 1441 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1); 1442 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid); 1443 1444 if (efi.dki_lba == 1) { 1445 efi_gpt_t gpt = { 0 }; 1446 1447 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE); 1448 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 1449 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt)); 1450 gpt.efi_gpt_MyLBA = LE_64(1ULL); 1451 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL); 1452 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1); 1453 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL); 1454 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1); 1455 gpt.efi_gpt_SizeOfPartitionEntry = 1456 LE_32(sizeof (efi_gpe_t)); 1457 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table); 1458 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 1459 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table); 1460 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc); 1461 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length), 1462 flag)) 1463 return (EFAULT); 1464 ptr += sizeof (gpt); 1465 length -= sizeof (gpt); 1466 } 1467 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe), 1468 length), flag)) 1469 return (EFAULT); 1470 return (0); 1471 } 1472 1473 /* 1474 * BEGIN entry points to allow external callers access to the volume. 1475 */ 1476 /* 1477 * Return the volume parameters needed for access from an external caller. 1478 * These values are invariant as long as the volume is held open. 1479 */ 1480 int 1481 zvol_get_volume_params(minor_t minor, uint64_t *blksize, 1482 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl, 1483 void **rl_hdl, void **bonus_hdl) 1484 { 1485 zvol_state_t *zv; 1486 1487 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1488 if (zv == NULL) 1489 return (ENXIO); 1490 if (zv->zv_flags & ZVOL_DUMPIFIED) 1491 return (ENXIO); 1492 1493 ASSERT(blksize && max_xfer_len && minor_hdl && 1494 objset_hdl && zil_hdl && rl_hdl && bonus_hdl); 1495 1496 *blksize = zv->zv_volblocksize; 1497 *max_xfer_len = (uint64_t)zvol_maxphys; 1498 *minor_hdl = zv; 1499 *objset_hdl = zv->zv_objset; 1500 *zil_hdl = zv->zv_zilog; 1501 *rl_hdl = &zv->zv_znode; 1502 *bonus_hdl = zv->zv_dbuf; 1503 return (0); 1504 } 1505 1506 /* 1507 * Return the current volume size to an external caller. 1508 * The size can change while the volume is open. 1509 */ 1510 uint64_t 1511 zvol_get_volume_size(void *minor_hdl) 1512 { 1513 zvol_state_t *zv = minor_hdl; 1514 1515 return (zv->zv_volsize); 1516 } 1517 1518 /* 1519 * Return the current WCE setting to an external caller. 1520 * The WCE setting can change while the volume is open. 1521 */ 1522 int 1523 zvol_get_volume_wce(void *minor_hdl) 1524 { 1525 zvol_state_t *zv = minor_hdl; 1526 1527 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0); 1528 } 1529 1530 /* 1531 * Entry point for external callers to zvol_log_write 1532 */ 1533 void 1534 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid, 1535 boolean_t sync) 1536 { 1537 zvol_state_t *zv = minor_hdl; 1538 1539 zvol_log_write(zv, tx, off, resid, sync); 1540 } 1541 /* 1542 * END entry points to allow external callers access to the volume. 1543 */ 1544 1545 /* 1546 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE. 1547 */ 1548 static void 1549 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len, 1550 boolean_t sync) 1551 { 1552 itx_t *itx; 1553 lr_truncate_t *lr; 1554 zilog_t *zilog = zv->zv_zilog; 1555 1556 if (zil_replaying(zilog, tx)) 1557 return; 1558 1559 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1560 lr = (lr_truncate_t *)&itx->itx_lr; 1561 lr->lr_foid = ZVOL_OBJ; 1562 lr->lr_offset = off; 1563 lr->lr_length = len; 1564 1565 itx->itx_sync = sync; 1566 zil_itx_assign(zilog, itx, tx); 1567 } 1568 1569 /* 1570 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I). 1571 * Also a dirtbag dkio ioctl for unmap/free-block functionality. 1572 */ 1573 /*ARGSUSED*/ 1574 int 1575 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp) 1576 { 1577 zvol_state_t *zv; 1578 struct dk_cinfo dki; 1579 struct dk_minfo dkm; 1580 struct dk_callback *dkc; 1581 int error = 0; 1582 rl_t *rl; 1583 1584 mutex_enter(&zfsdev_state_lock); 1585 1586 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL); 1587 1588 if (zv == NULL) { 1589 mutex_exit(&zfsdev_state_lock); 1590 return (ENXIO); 1591 } 1592 ASSERT(zv->zv_total_opens > 0); 1593 1594 switch (cmd) { 1595 1596 case DKIOCINFO: 1597 bzero(&dki, sizeof (dki)); 1598 (void) strcpy(dki.dki_cname, "zvol"); 1599 (void) strcpy(dki.dki_dname, "zvol"); 1600 dki.dki_ctype = DKC_UNKNOWN; 1601 dki.dki_unit = getminor(dev); 1602 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs); 1603 mutex_exit(&zfsdev_state_lock); 1604 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag)) 1605 error = EFAULT; 1606 return (error); 1607 1608 case DKIOCGMEDIAINFO: 1609 bzero(&dkm, sizeof (dkm)); 1610 dkm.dki_lbsize = 1U << zv->zv_min_bs; 1611 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 1612 dkm.dki_media_type = DK_UNKNOWN; 1613 mutex_exit(&zfsdev_state_lock); 1614 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag)) 1615 error = EFAULT; 1616 return (error); 1617 1618 case DKIOCGETEFI: 1619 { 1620 uint64_t vs = zv->zv_volsize; 1621 uint8_t bs = zv->zv_min_bs; 1622 1623 mutex_exit(&zfsdev_state_lock); 1624 error = zvol_getefi((void *)arg, flag, vs, bs); 1625 return (error); 1626 } 1627 1628 case DKIOCFLUSHWRITECACHE: 1629 dkc = (struct dk_callback *)arg; 1630 mutex_exit(&zfsdev_state_lock); 1631 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1632 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) { 1633 (*dkc->dkc_callback)(dkc->dkc_cookie, error); 1634 error = 0; 1635 } 1636 return (error); 1637 1638 case DKIOCGETWCE: 1639 { 1640 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0; 1641 if (ddi_copyout(&wce, (void *)arg, sizeof (int), 1642 flag)) 1643 error = EFAULT; 1644 break; 1645 } 1646 case DKIOCSETWCE: 1647 { 1648 int wce; 1649 if (ddi_copyin((void *)arg, &wce, sizeof (int), 1650 flag)) { 1651 error = EFAULT; 1652 break; 1653 } 1654 if (wce) { 1655 zv->zv_flags |= ZVOL_WCE; 1656 mutex_exit(&zfsdev_state_lock); 1657 } else { 1658 zv->zv_flags &= ~ZVOL_WCE; 1659 mutex_exit(&zfsdev_state_lock); 1660 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1661 } 1662 return (0); 1663 } 1664 1665 case DKIOCGGEOM: 1666 case DKIOCGVTOC: 1667 /* 1668 * commands using these (like prtvtoc) expect ENOTSUP 1669 * since we're emulating an EFI label 1670 */ 1671 error = ENOTSUP; 1672 break; 1673 1674 case DKIOCDUMPINIT: 1675 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 1676 RL_WRITER); 1677 error = zvol_dumpify(zv); 1678 zfs_range_unlock(rl); 1679 break; 1680 1681 case DKIOCDUMPFINI: 1682 if (!(zv->zv_flags & ZVOL_DUMPIFIED)) 1683 break; 1684 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 1685 RL_WRITER); 1686 error = zvol_dump_fini(zv); 1687 zfs_range_unlock(rl); 1688 break; 1689 1690 case DKIOCFREE: 1691 { 1692 dkioc_free_t df; 1693 dmu_tx_t *tx; 1694 1695 if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) { 1696 error = EFAULT; 1697 break; 1698 } 1699 1700 /* 1701 * Apply Postel's Law to length-checking. If they overshoot, 1702 * just blank out until the end, if there's a need to blank 1703 * out anything. 1704 */ 1705 if (df.df_start >= zv->zv_volsize) 1706 break; /* No need to do anything... */ 1707 if (df.df_start + df.df_length > zv->zv_volsize) 1708 df.df_length = DMU_OBJECT_END; 1709 1710 rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length, 1711 RL_WRITER); 1712 tx = dmu_tx_create(zv->zv_objset); 1713 error = dmu_tx_assign(tx, TXG_WAIT); 1714 if (error != 0) { 1715 dmu_tx_abort(tx); 1716 } else { 1717 zvol_log_truncate(zv, tx, df.df_start, 1718 df.df_length, B_TRUE); 1719 dmu_tx_commit(tx); 1720 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 1721 df.df_start, df.df_length); 1722 } 1723 1724 zfs_range_unlock(rl); 1725 1726 if (error == 0) { 1727 /* 1728 * If the write-cache is disabled or 'sync' property 1729 * is set to 'always' then treat this as a synchronous 1730 * operation (i.e. commit to zil). 1731 */ 1732 if (!(zv->zv_flags & ZVOL_WCE) || 1733 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) 1734 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1735 1736 /* 1737 * If the caller really wants synchronous writes, and 1738 * can't wait for them, don't return until the write 1739 * is done. 1740 */ 1741 if (df.df_flags & DF_WAIT_SYNC) { 1742 txg_wait_synced( 1743 dmu_objset_pool(zv->zv_objset), 0); 1744 } 1745 } 1746 break; 1747 } 1748 1749 default: 1750 error = ENOTTY; 1751 break; 1752 1753 } 1754 mutex_exit(&zfsdev_state_lock); 1755 return (error); 1756 } 1757 1758 int 1759 zvol_busy(void) 1760 { 1761 return (zvol_minors != 0); 1762 } 1763 1764 void 1765 zvol_init(void) 1766 { 1767 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t), 1768 1) == 0); 1769 mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL); 1770 } 1771 1772 void 1773 zvol_fini(void) 1774 { 1775 mutex_destroy(&zfsdev_state_lock); 1776 ddi_soft_state_fini(&zfsdev_state); 1777 } 1778 1779 static int 1780 zvol_dump_init(zvol_state_t *zv, boolean_t resize) 1781 { 1782 dmu_tx_t *tx; 1783 int error = 0; 1784 objset_t *os = zv->zv_objset; 1785 nvlist_t *nv = NULL; 1786 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset)); 1787 1788 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 1789 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0, 1790 DMU_OBJECT_END); 1791 /* wait for dmu_free_long_range to actually free the blocks */ 1792 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 1793 1794 tx = dmu_tx_create(os); 1795 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 1796 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 1797 error = dmu_tx_assign(tx, TXG_WAIT); 1798 if (error) { 1799 dmu_tx_abort(tx); 1800 return (error); 1801 } 1802 1803 /* 1804 * If we are resizing the dump device then we only need to 1805 * update the refreservation to match the newly updated 1806 * zvolsize. Otherwise, we save off the original state of the 1807 * zvol so that we can restore them if the zvol is ever undumpified. 1808 */ 1809 if (resize) { 1810 error = zap_update(os, ZVOL_ZAP_OBJ, 1811 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 1812 &zv->zv_volsize, tx); 1813 } else { 1814 uint64_t checksum, compress, refresrv, vbs, dedup; 1815 1816 error = dsl_prop_get_integer(zv->zv_name, 1817 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL); 1818 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1819 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL); 1820 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1821 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL); 1822 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1823 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL); 1824 if (version >= SPA_VERSION_DEDUP) { 1825 error = error ? error : 1826 dsl_prop_get_integer(zv->zv_name, 1827 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL); 1828 } 1829 1830 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1831 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, 1832 &compress, tx); 1833 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1834 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx); 1835 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1836 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 1837 &refresrv, tx); 1838 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1839 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, 1840 &vbs, tx); 1841 error = error ? error : dmu_object_set_blocksize( 1842 os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx); 1843 if (version >= SPA_VERSION_DEDUP) { 1844 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1845 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, 1846 &dedup, tx); 1847 } 1848 if (error == 0) 1849 zv->zv_volblocksize = SPA_MAXBLOCKSIZE; 1850 } 1851 dmu_tx_commit(tx); 1852 1853 /* 1854 * We only need update the zvol's property if we are initializing 1855 * the dump area for the first time. 1856 */ 1857 if (!resize) { 1858 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1859 VERIFY(nvlist_add_uint64(nv, 1860 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0); 1861 VERIFY(nvlist_add_uint64(nv, 1862 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 1863 ZIO_COMPRESS_OFF) == 0); 1864 VERIFY(nvlist_add_uint64(nv, 1865 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 1866 ZIO_CHECKSUM_OFF) == 0); 1867 if (version >= SPA_VERSION_DEDUP) { 1868 VERIFY(nvlist_add_uint64(nv, 1869 zfs_prop_to_name(ZFS_PROP_DEDUP), 1870 ZIO_CHECKSUM_OFF) == 0); 1871 } 1872 1873 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 1874 nv, NULL); 1875 nvlist_free(nv); 1876 1877 if (error) 1878 return (error); 1879 } 1880 1881 /* Allocate the space for the dump */ 1882 error = zvol_prealloc(zv); 1883 return (error); 1884 } 1885 1886 static int 1887 zvol_dumpify(zvol_state_t *zv) 1888 { 1889 int error = 0; 1890 uint64_t dumpsize = 0; 1891 dmu_tx_t *tx; 1892 objset_t *os = zv->zv_objset; 1893 1894 if (zv->zv_flags & ZVOL_RDONLY) 1895 return (EROFS); 1896 1897 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 1898 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) { 1899 boolean_t resize = (dumpsize > 0); 1900 1901 if ((error = zvol_dump_init(zv, resize)) != 0) { 1902 (void) zvol_dump_fini(zv); 1903 return (error); 1904 } 1905 } 1906 1907 /* 1908 * Build up our lba mapping. 1909 */ 1910 error = zvol_get_lbas(zv); 1911 if (error) { 1912 (void) zvol_dump_fini(zv); 1913 return (error); 1914 } 1915 1916 tx = dmu_tx_create(os); 1917 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 1918 error = dmu_tx_assign(tx, TXG_WAIT); 1919 if (error) { 1920 dmu_tx_abort(tx); 1921 (void) zvol_dump_fini(zv); 1922 return (error); 1923 } 1924 1925 zv->zv_flags |= ZVOL_DUMPIFIED; 1926 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1, 1927 &zv->zv_volsize, tx); 1928 dmu_tx_commit(tx); 1929 1930 if (error) { 1931 (void) zvol_dump_fini(zv); 1932 return (error); 1933 } 1934 1935 txg_wait_synced(dmu_objset_pool(os), 0); 1936 return (0); 1937 } 1938 1939 static int 1940 zvol_dump_fini(zvol_state_t *zv) 1941 { 1942 dmu_tx_t *tx; 1943 objset_t *os = zv->zv_objset; 1944 nvlist_t *nv; 1945 int error = 0; 1946 uint64_t checksum, compress, refresrv, vbs, dedup; 1947 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset)); 1948 1949 /* 1950 * Attempt to restore the zvol back to its pre-dumpified state. 1951 * This is a best-effort attempt as it's possible that not all 1952 * of these properties were initialized during the dumpify process 1953 * (i.e. error during zvol_dump_init). 1954 */ 1955 1956 tx = dmu_tx_create(os); 1957 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 1958 error = dmu_tx_assign(tx, TXG_WAIT); 1959 if (error) { 1960 dmu_tx_abort(tx); 1961 return (error); 1962 } 1963 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx); 1964 dmu_tx_commit(tx); 1965 1966 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 1967 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum); 1968 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 1969 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress); 1970 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 1971 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv); 1972 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 1973 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs); 1974 1975 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1976 (void) nvlist_add_uint64(nv, 1977 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum); 1978 (void) nvlist_add_uint64(nv, 1979 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress); 1980 (void) nvlist_add_uint64(nv, 1981 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv); 1982 if (version >= SPA_VERSION_DEDUP && 1983 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 1984 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) { 1985 (void) nvlist_add_uint64(nv, 1986 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup); 1987 } 1988 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 1989 nv, NULL); 1990 nvlist_free(nv); 1991 1992 zvol_free_extents(zv); 1993 zv->zv_flags &= ~ZVOL_DUMPIFIED; 1994 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END); 1995 /* wait for dmu_free_long_range to actually free the blocks */ 1996 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 1997 tx = dmu_tx_create(os); 1998 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 1999 error = dmu_tx_assign(tx, TXG_WAIT); 2000 if (error) { 2001 dmu_tx_abort(tx); 2002 return (error); 2003 } 2004 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0) 2005 zv->zv_volblocksize = vbs; 2006 dmu_tx_commit(tx); 2007 2008 return (0); 2009 } 2010