1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * 24 * Portions Copyright 2010 Robert Milkowski 25 * 26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 27 * Copyright (c) 2013 by Delphix. All rights reserved. 28 */ 29 30 /* 31 * ZFS volume emulation driver. 32 * 33 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes. 34 * Volumes are accessed through the symbolic links named: 35 * 36 * /dev/zvol/dsk/<pool_name>/<dataset_name> 37 * /dev/zvol/rdsk/<pool_name>/<dataset_name> 38 * 39 * These links are created by the /dev filesystem (sdev_zvolops.c). 40 * Volumes are persistent through reboot. No user command needs to be 41 * run before opening and using a device. 42 */ 43 44 #include <sys/types.h> 45 #include <sys/param.h> 46 #include <sys/errno.h> 47 #include <sys/uio.h> 48 #include <sys/buf.h> 49 #include <sys/modctl.h> 50 #include <sys/open.h> 51 #include <sys/kmem.h> 52 #include <sys/conf.h> 53 #include <sys/cmn_err.h> 54 #include <sys/stat.h> 55 #include <sys/zap.h> 56 #include <sys/spa.h> 57 #include <sys/zio.h> 58 #include <sys/dmu_traverse.h> 59 #include <sys/dnode.h> 60 #include <sys/dsl_dataset.h> 61 #include <sys/dsl_prop.h> 62 #include <sys/dkio.h> 63 #include <sys/efi_partition.h> 64 #include <sys/byteorder.h> 65 #include <sys/pathname.h> 66 #include <sys/ddi.h> 67 #include <sys/sunddi.h> 68 #include <sys/crc32.h> 69 #include <sys/dirent.h> 70 #include <sys/policy.h> 71 #include <sys/fs/zfs.h> 72 #include <sys/zfs_ioctl.h> 73 #include <sys/mkdev.h> 74 #include <sys/zil.h> 75 #include <sys/refcount.h> 76 #include <sys/zfs_znode.h> 77 #include <sys/zfs_rlock.h> 78 #include <sys/vdev_disk.h> 79 #include <sys/vdev_impl.h> 80 #include <sys/zvol.h> 81 #include <sys/dumphdr.h> 82 #include <sys/zil_impl.h> 83 #include <sys/dbuf.h> 84 85 #include "zfs_namecheck.h" 86 87 void *zfsdev_state; 88 static char *zvol_tag = "zvol_tag"; 89 90 #define ZVOL_DUMPSIZE "dumpsize" 91 92 /* 93 * This lock protects the zfsdev_state structure from being modified 94 * while it's being used, e.g. an open that comes in before a create 95 * finishes. It also protects temporary opens of the dataset so that, 96 * e.g., an open doesn't get a spurious EBUSY. 97 */ 98 kmutex_t zfsdev_state_lock; 99 static uint32_t zvol_minors; 100 101 typedef struct zvol_extent { 102 list_node_t ze_node; 103 dva_t ze_dva; /* dva associated with this extent */ 104 uint64_t ze_nblks; /* number of blocks in extent */ 105 } zvol_extent_t; 106 107 /* 108 * The in-core state of each volume. 109 */ 110 typedef struct zvol_state { 111 char zv_name[MAXPATHLEN]; /* pool/dd name */ 112 uint64_t zv_volsize; /* amount of space we advertise */ 113 uint64_t zv_volblocksize; /* volume block size */ 114 minor_t zv_minor; /* minor number */ 115 uint8_t zv_min_bs; /* minimum addressable block shift */ 116 uint8_t zv_flags; /* readonly, dumpified, etc. */ 117 objset_t *zv_objset; /* objset handle */ 118 uint32_t zv_open_count[OTYPCNT]; /* open counts */ 119 uint32_t zv_total_opens; /* total open count */ 120 zilog_t *zv_zilog; /* ZIL handle */ 121 list_t zv_extents; /* List of extents for dump */ 122 znode_t zv_znode; /* for range locking */ 123 dmu_buf_t *zv_dbuf; /* bonus handle */ 124 } zvol_state_t; 125 126 /* 127 * zvol specific flags 128 */ 129 #define ZVOL_RDONLY 0x1 130 #define ZVOL_DUMPIFIED 0x2 131 #define ZVOL_EXCL 0x4 132 #define ZVOL_WCE 0x8 133 134 /* 135 * zvol maximum transfer in one DMU tx. 136 */ 137 int zvol_maxphys = DMU_MAX_ACCESS/2; 138 139 extern int zfs_set_prop_nvlist(const char *, zprop_source_t, 140 nvlist_t *, nvlist_t *); 141 static int zvol_remove_zv(zvol_state_t *); 142 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio); 143 static int zvol_dumpify(zvol_state_t *zv); 144 static int zvol_dump_fini(zvol_state_t *zv); 145 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize); 146 147 static void 148 zvol_size_changed(zvol_state_t *zv, uint64_t volsize) 149 { 150 dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor); 151 152 zv->zv_volsize = volsize; 153 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 154 "Size", volsize) == DDI_SUCCESS); 155 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 156 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS); 157 158 /* Notify specfs to invalidate the cached size */ 159 spec_size_invalidate(dev, VBLK); 160 spec_size_invalidate(dev, VCHR); 161 } 162 163 int 164 zvol_check_volsize(uint64_t volsize, uint64_t blocksize) 165 { 166 if (volsize == 0) 167 return (SET_ERROR(EINVAL)); 168 169 if (volsize % blocksize != 0) 170 return (SET_ERROR(EINVAL)); 171 172 #ifdef _ILP32 173 if (volsize - 1 > SPEC_MAXOFFSET_T) 174 return (SET_ERROR(EOVERFLOW)); 175 #endif 176 return (0); 177 } 178 179 int 180 zvol_check_volblocksize(uint64_t volblocksize) 181 { 182 if (volblocksize < SPA_MINBLOCKSIZE || 183 volblocksize > SPA_MAXBLOCKSIZE || 184 !ISP2(volblocksize)) 185 return (SET_ERROR(EDOM)); 186 187 return (0); 188 } 189 190 int 191 zvol_get_stats(objset_t *os, nvlist_t *nv) 192 { 193 int error; 194 dmu_object_info_t doi; 195 uint64_t val; 196 197 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val); 198 if (error) 199 return (error); 200 201 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val); 202 203 error = dmu_object_info(os, ZVOL_OBJ, &doi); 204 205 if (error == 0) { 206 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE, 207 doi.doi_data_block_size); 208 } 209 210 return (error); 211 } 212 213 static zvol_state_t * 214 zvol_minor_lookup(const char *name) 215 { 216 minor_t minor; 217 zvol_state_t *zv; 218 219 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 220 221 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 222 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 223 if (zv == NULL) 224 continue; 225 if (strcmp(zv->zv_name, name) == 0) 226 return (zv); 227 } 228 229 return (NULL); 230 } 231 232 /* extent mapping arg */ 233 struct maparg { 234 zvol_state_t *ma_zv; 235 uint64_t ma_blks; 236 }; 237 238 /*ARGSUSED*/ 239 static int 240 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 241 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg) 242 { 243 struct maparg *ma = arg; 244 zvol_extent_t *ze; 245 int bs = ma->ma_zv->zv_volblocksize; 246 247 if (bp == NULL || zb->zb_object != ZVOL_OBJ || zb->zb_level != 0) 248 return (0); 249 250 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid); 251 ma->ma_blks++; 252 253 /* Abort immediately if we have encountered gang blocks */ 254 if (BP_IS_GANG(bp)) 255 return (SET_ERROR(EFRAGS)); 256 257 /* 258 * See if the block is at the end of the previous extent. 259 */ 260 ze = list_tail(&ma->ma_zv->zv_extents); 261 if (ze && 262 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) && 263 DVA_GET_OFFSET(BP_IDENTITY(bp)) == 264 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) { 265 ze->ze_nblks++; 266 return (0); 267 } 268 269 dprintf_bp(bp, "%s", "next blkptr:"); 270 271 /* start a new extent */ 272 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP); 273 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */ 274 ze->ze_nblks = 1; 275 list_insert_tail(&ma->ma_zv->zv_extents, ze); 276 return (0); 277 } 278 279 static void 280 zvol_free_extents(zvol_state_t *zv) 281 { 282 zvol_extent_t *ze; 283 284 while (ze = list_head(&zv->zv_extents)) { 285 list_remove(&zv->zv_extents, ze); 286 kmem_free(ze, sizeof (zvol_extent_t)); 287 } 288 } 289 290 static int 291 zvol_get_lbas(zvol_state_t *zv) 292 { 293 objset_t *os = zv->zv_objset; 294 struct maparg ma; 295 int err; 296 297 ma.ma_zv = zv; 298 ma.ma_blks = 0; 299 zvol_free_extents(zv); 300 301 /* commit any in-flight changes before traversing the dataset */ 302 txg_wait_synced(dmu_objset_pool(os), 0); 303 err = traverse_dataset(dmu_objset_ds(os), 0, 304 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma); 305 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) { 306 zvol_free_extents(zv); 307 return (err ? err : EIO); 308 } 309 310 return (0); 311 } 312 313 /* ARGSUSED */ 314 void 315 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 316 { 317 zfs_creat_t *zct = arg; 318 nvlist_t *nvprops = zct->zct_props; 319 int error; 320 uint64_t volblocksize, volsize; 321 322 VERIFY(nvlist_lookup_uint64(nvprops, 323 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0); 324 if (nvlist_lookup_uint64(nvprops, 325 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0) 326 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE); 327 328 /* 329 * These properties must be removed from the list so the generic 330 * property setting step won't apply to them. 331 */ 332 VERIFY(nvlist_remove_all(nvprops, 333 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0); 334 (void) nvlist_remove_all(nvprops, 335 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE)); 336 337 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize, 338 DMU_OT_NONE, 0, tx); 339 ASSERT(error == 0); 340 341 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP, 342 DMU_OT_NONE, 0, tx); 343 ASSERT(error == 0); 344 345 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); 346 ASSERT(error == 0); 347 } 348 349 /* 350 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we 351 * implement DKIOCFREE/free-long-range. 352 */ 353 static int 354 zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap) 355 { 356 uint64_t offset, length; 357 358 if (byteswap) 359 byteswap_uint64_array(lr, sizeof (*lr)); 360 361 offset = lr->lr_offset; 362 length = lr->lr_length; 363 364 return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length)); 365 } 366 367 /* 368 * Replay a TX_WRITE ZIL transaction that didn't get committed 369 * after a system failure 370 */ 371 static int 372 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap) 373 { 374 objset_t *os = zv->zv_objset; 375 char *data = (char *)(lr + 1); /* data follows lr_write_t */ 376 uint64_t offset, length; 377 dmu_tx_t *tx; 378 int error; 379 380 if (byteswap) 381 byteswap_uint64_array(lr, sizeof (*lr)); 382 383 offset = lr->lr_offset; 384 length = lr->lr_length; 385 386 /* If it's a dmu_sync() block, write the whole block */ 387 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 388 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 389 if (length < blocksize) { 390 offset -= offset % blocksize; 391 length = blocksize; 392 } 393 } 394 395 tx = dmu_tx_create(os); 396 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length); 397 error = dmu_tx_assign(tx, TXG_WAIT); 398 if (error) { 399 dmu_tx_abort(tx); 400 } else { 401 dmu_write(os, ZVOL_OBJ, offset, length, data, tx); 402 dmu_tx_commit(tx); 403 } 404 405 return (error); 406 } 407 408 /* ARGSUSED */ 409 static int 410 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap) 411 { 412 return (SET_ERROR(ENOTSUP)); 413 } 414 415 /* 416 * Callback vectors for replaying records. 417 * Only TX_WRITE and TX_TRUNCATE are needed for zvol. 418 */ 419 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = { 420 zvol_replay_err, /* 0 no such transaction type */ 421 zvol_replay_err, /* TX_CREATE */ 422 zvol_replay_err, /* TX_MKDIR */ 423 zvol_replay_err, /* TX_MKXATTR */ 424 zvol_replay_err, /* TX_SYMLINK */ 425 zvol_replay_err, /* TX_REMOVE */ 426 zvol_replay_err, /* TX_RMDIR */ 427 zvol_replay_err, /* TX_LINK */ 428 zvol_replay_err, /* TX_RENAME */ 429 zvol_replay_write, /* TX_WRITE */ 430 zvol_replay_truncate, /* TX_TRUNCATE */ 431 zvol_replay_err, /* TX_SETATTR */ 432 zvol_replay_err, /* TX_ACL */ 433 zvol_replay_err, /* TX_CREATE_ACL */ 434 zvol_replay_err, /* TX_CREATE_ATTR */ 435 zvol_replay_err, /* TX_CREATE_ACL_ATTR */ 436 zvol_replay_err, /* TX_MKDIR_ACL */ 437 zvol_replay_err, /* TX_MKDIR_ATTR */ 438 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */ 439 zvol_replay_err, /* TX_WRITE2 */ 440 }; 441 442 int 443 zvol_name2minor(const char *name, minor_t *minor) 444 { 445 zvol_state_t *zv; 446 447 mutex_enter(&zfsdev_state_lock); 448 zv = zvol_minor_lookup(name); 449 if (minor && zv) 450 *minor = zv->zv_minor; 451 mutex_exit(&zfsdev_state_lock); 452 return (zv ? 0 : -1); 453 } 454 455 /* 456 * Create a minor node (plus a whole lot more) for the specified volume. 457 */ 458 int 459 zvol_create_minor(const char *name) 460 { 461 zfs_soft_state_t *zs; 462 zvol_state_t *zv; 463 objset_t *os; 464 dmu_object_info_t doi; 465 minor_t minor = 0; 466 char chrbuf[30], blkbuf[30]; 467 int error; 468 469 mutex_enter(&zfsdev_state_lock); 470 471 if (zvol_minor_lookup(name) != NULL) { 472 mutex_exit(&zfsdev_state_lock); 473 return (SET_ERROR(EEXIST)); 474 } 475 476 /* lie and say we're read-only */ 477 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os); 478 479 if (error) { 480 mutex_exit(&zfsdev_state_lock); 481 return (error); 482 } 483 484 if ((minor = zfsdev_minor_alloc()) == 0) { 485 dmu_objset_disown(os, FTAG); 486 mutex_exit(&zfsdev_state_lock); 487 return (SET_ERROR(ENXIO)); 488 } 489 490 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) { 491 dmu_objset_disown(os, FTAG); 492 mutex_exit(&zfsdev_state_lock); 493 return (SET_ERROR(EAGAIN)); 494 } 495 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME, 496 (char *)name); 497 498 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor); 499 500 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR, 501 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 502 ddi_soft_state_free(zfsdev_state, minor); 503 dmu_objset_disown(os, FTAG); 504 mutex_exit(&zfsdev_state_lock); 505 return (SET_ERROR(EAGAIN)); 506 } 507 508 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor); 509 510 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK, 511 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 512 ddi_remove_minor_node(zfs_dip, chrbuf); 513 ddi_soft_state_free(zfsdev_state, minor); 514 dmu_objset_disown(os, FTAG); 515 mutex_exit(&zfsdev_state_lock); 516 return (SET_ERROR(EAGAIN)); 517 } 518 519 zs = ddi_get_soft_state(zfsdev_state, minor); 520 zs->zss_type = ZSST_ZVOL; 521 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP); 522 (void) strlcpy(zv->zv_name, name, MAXPATHLEN); 523 zv->zv_min_bs = DEV_BSHIFT; 524 zv->zv_minor = minor; 525 zv->zv_objset = os; 526 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os))) 527 zv->zv_flags |= ZVOL_RDONLY; 528 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL); 529 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare, 530 sizeof (rl_t), offsetof(rl_t, r_node)); 531 list_create(&zv->zv_extents, sizeof (zvol_extent_t), 532 offsetof(zvol_extent_t, ze_node)); 533 /* get and cache the blocksize */ 534 error = dmu_object_info(os, ZVOL_OBJ, &doi); 535 ASSERT(error == 0); 536 zv->zv_volblocksize = doi.doi_data_block_size; 537 538 if (spa_writeable(dmu_objset_spa(os))) { 539 if (zil_replay_disable) 540 zil_destroy(dmu_objset_zil(os), B_FALSE); 541 else 542 zil_replay(os, zv, zvol_replay_vector); 543 } 544 dmu_objset_disown(os, FTAG); 545 zv->zv_objset = NULL; 546 547 zvol_minors++; 548 549 mutex_exit(&zfsdev_state_lock); 550 551 return (0); 552 } 553 554 /* 555 * Remove minor node for the specified volume. 556 */ 557 static int 558 zvol_remove_zv(zvol_state_t *zv) 559 { 560 char nmbuf[20]; 561 minor_t minor = zv->zv_minor; 562 563 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 564 if (zv->zv_total_opens != 0) 565 return (SET_ERROR(EBUSY)); 566 567 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor); 568 ddi_remove_minor_node(zfs_dip, nmbuf); 569 570 (void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor); 571 ddi_remove_minor_node(zfs_dip, nmbuf); 572 573 avl_destroy(&zv->zv_znode.z_range_avl); 574 mutex_destroy(&zv->zv_znode.z_range_lock); 575 576 kmem_free(zv, sizeof (zvol_state_t)); 577 578 ddi_soft_state_free(zfsdev_state, minor); 579 580 zvol_minors--; 581 return (0); 582 } 583 584 int 585 zvol_remove_minor(const char *name) 586 { 587 zvol_state_t *zv; 588 int rc; 589 590 mutex_enter(&zfsdev_state_lock); 591 if ((zv = zvol_minor_lookup(name)) == NULL) { 592 mutex_exit(&zfsdev_state_lock); 593 return (SET_ERROR(ENXIO)); 594 } 595 rc = zvol_remove_zv(zv); 596 mutex_exit(&zfsdev_state_lock); 597 return (rc); 598 } 599 600 int 601 zvol_first_open(zvol_state_t *zv) 602 { 603 objset_t *os; 604 uint64_t volsize; 605 int error; 606 uint64_t readonly; 607 608 /* lie and say we're read-only */ 609 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE, 610 zvol_tag, &os); 611 if (error) 612 return (error); 613 614 zv->zv_objset = os; 615 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 616 if (error) { 617 ASSERT(error == 0); 618 dmu_objset_disown(os, zvol_tag); 619 return (error); 620 } 621 622 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf); 623 if (error) { 624 dmu_objset_disown(os, zvol_tag); 625 return (error); 626 } 627 628 zvol_size_changed(zv, volsize); 629 zv->zv_zilog = zil_open(os, zvol_get_data); 630 631 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly, 632 NULL) == 0); 633 if (readonly || dmu_objset_is_snapshot(os) || 634 !spa_writeable(dmu_objset_spa(os))) 635 zv->zv_flags |= ZVOL_RDONLY; 636 else 637 zv->zv_flags &= ~ZVOL_RDONLY; 638 return (error); 639 } 640 641 void 642 zvol_last_close(zvol_state_t *zv) 643 { 644 zil_close(zv->zv_zilog); 645 zv->zv_zilog = NULL; 646 647 dmu_buf_rele(zv->zv_dbuf, zvol_tag); 648 zv->zv_dbuf = NULL; 649 650 /* 651 * Evict cached data 652 */ 653 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) && 654 !(zv->zv_flags & ZVOL_RDONLY)) 655 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 656 dmu_objset_evict_dbufs(zv->zv_objset); 657 658 dmu_objset_disown(zv->zv_objset, zvol_tag); 659 zv->zv_objset = NULL; 660 } 661 662 int 663 zvol_prealloc(zvol_state_t *zv) 664 { 665 objset_t *os = zv->zv_objset; 666 dmu_tx_t *tx; 667 uint64_t refd, avail, usedobjs, availobjs; 668 uint64_t resid = zv->zv_volsize; 669 uint64_t off = 0; 670 671 /* Check the space usage before attempting to allocate the space */ 672 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs); 673 if (avail < zv->zv_volsize) 674 return (SET_ERROR(ENOSPC)); 675 676 /* Free old extents if they exist */ 677 zvol_free_extents(zv); 678 679 while (resid != 0) { 680 int error; 681 uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE); 682 683 tx = dmu_tx_create(os); 684 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 685 error = dmu_tx_assign(tx, TXG_WAIT); 686 if (error) { 687 dmu_tx_abort(tx); 688 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off); 689 return (error); 690 } 691 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx); 692 dmu_tx_commit(tx); 693 off += bytes; 694 resid -= bytes; 695 } 696 txg_wait_synced(dmu_objset_pool(os), 0); 697 698 return (0); 699 } 700 701 static int 702 zvol_update_volsize(objset_t *os, uint64_t volsize) 703 { 704 dmu_tx_t *tx; 705 int error; 706 707 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 708 709 tx = dmu_tx_create(os); 710 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 711 error = dmu_tx_assign(tx, TXG_WAIT); 712 if (error) { 713 dmu_tx_abort(tx); 714 return (error); 715 } 716 717 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, 718 &volsize, tx); 719 dmu_tx_commit(tx); 720 721 if (error == 0) 722 error = dmu_free_long_range(os, 723 ZVOL_OBJ, volsize, DMU_OBJECT_END); 724 return (error); 725 } 726 727 void 728 zvol_remove_minors(const char *name) 729 { 730 zvol_state_t *zv; 731 char *namebuf; 732 minor_t minor; 733 734 namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP); 735 (void) strncpy(namebuf, name, strlen(name)); 736 (void) strcat(namebuf, "/"); 737 mutex_enter(&zfsdev_state_lock); 738 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 739 740 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 741 if (zv == NULL) 742 continue; 743 if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0) 744 (void) zvol_remove_zv(zv); 745 } 746 kmem_free(namebuf, strlen(name) + 2); 747 748 mutex_exit(&zfsdev_state_lock); 749 } 750 751 static int 752 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize) 753 { 754 uint64_t old_volsize = 0ULL; 755 int error = 0; 756 757 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 758 759 /* 760 * Reinitialize the dump area to the new size. If we 761 * failed to resize the dump area then restore it back to 762 * its original size. We must set the new volsize prior 763 * to calling dumpvp_resize() to ensure that the devices' 764 * size(9P) is not visible by the dump subsystem. 765 */ 766 old_volsize = zv->zv_volsize; 767 zvol_size_changed(zv, volsize); 768 769 if (zv->zv_flags & ZVOL_DUMPIFIED) { 770 if ((error = zvol_dumpify(zv)) != 0 || 771 (error = dumpvp_resize()) != 0) { 772 int dumpify_error; 773 774 (void) zvol_update_volsize(zv->zv_objset, old_volsize); 775 zvol_size_changed(zv, old_volsize); 776 dumpify_error = zvol_dumpify(zv); 777 error = dumpify_error ? dumpify_error : error; 778 } 779 } 780 781 /* 782 * Generate a LUN expansion event. 783 */ 784 if (error == 0) { 785 sysevent_id_t eid; 786 nvlist_t *attr; 787 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 788 789 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV, 790 zv->zv_minor); 791 792 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 793 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 794 795 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 796 ESC_DEV_DLE, attr, &eid, DDI_SLEEP); 797 798 nvlist_free(attr); 799 kmem_free(physpath, MAXPATHLEN); 800 } 801 return (error); 802 } 803 804 int 805 zvol_set_volsize(const char *name, uint64_t volsize) 806 { 807 zvol_state_t *zv = NULL; 808 objset_t *os; 809 int error; 810 dmu_object_info_t doi; 811 uint64_t readonly; 812 boolean_t owned = B_FALSE; 813 814 error = dsl_prop_get_integer(name, 815 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL); 816 if (error != 0) 817 return (error); 818 if (readonly) 819 return (SET_ERROR(EROFS)); 820 821 mutex_enter(&zfsdev_state_lock); 822 zv = zvol_minor_lookup(name); 823 824 if (zv == NULL || zv->zv_objset == NULL) { 825 if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE, 826 FTAG, &os)) != 0) { 827 mutex_exit(&zfsdev_state_lock); 828 return (error); 829 } 830 owned = B_TRUE; 831 if (zv != NULL) 832 zv->zv_objset = os; 833 } else { 834 os = zv->zv_objset; 835 } 836 837 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 || 838 (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0) 839 goto out; 840 841 error = zvol_update_volsize(os, volsize); 842 843 if (error == 0 && zv != NULL) 844 error = zvol_update_live_volsize(zv, volsize); 845 out: 846 if (owned) { 847 dmu_objset_disown(os, FTAG); 848 if (zv != NULL) 849 zv->zv_objset = NULL; 850 } 851 mutex_exit(&zfsdev_state_lock); 852 return (error); 853 } 854 855 /*ARGSUSED*/ 856 int 857 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr) 858 { 859 zvol_state_t *zv; 860 int err = 0; 861 862 mutex_enter(&zfsdev_state_lock); 863 864 zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL); 865 if (zv == NULL) { 866 mutex_exit(&zfsdev_state_lock); 867 return (SET_ERROR(ENXIO)); 868 } 869 870 if (zv->zv_total_opens == 0) 871 err = zvol_first_open(zv); 872 if (err) { 873 mutex_exit(&zfsdev_state_lock); 874 return (err); 875 } 876 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) { 877 err = SET_ERROR(EROFS); 878 goto out; 879 } 880 if (zv->zv_flags & ZVOL_EXCL) { 881 err = SET_ERROR(EBUSY); 882 goto out; 883 } 884 if (flag & FEXCL) { 885 if (zv->zv_total_opens != 0) { 886 err = SET_ERROR(EBUSY); 887 goto out; 888 } 889 zv->zv_flags |= ZVOL_EXCL; 890 } 891 892 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) { 893 zv->zv_open_count[otyp]++; 894 zv->zv_total_opens++; 895 } 896 mutex_exit(&zfsdev_state_lock); 897 898 return (err); 899 out: 900 if (zv->zv_total_opens == 0) 901 zvol_last_close(zv); 902 mutex_exit(&zfsdev_state_lock); 903 return (err); 904 } 905 906 /*ARGSUSED*/ 907 int 908 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr) 909 { 910 minor_t minor = getminor(dev); 911 zvol_state_t *zv; 912 int error = 0; 913 914 mutex_enter(&zfsdev_state_lock); 915 916 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 917 if (zv == NULL) { 918 mutex_exit(&zfsdev_state_lock); 919 return (SET_ERROR(ENXIO)); 920 } 921 922 if (zv->zv_flags & ZVOL_EXCL) { 923 ASSERT(zv->zv_total_opens == 1); 924 zv->zv_flags &= ~ZVOL_EXCL; 925 } 926 927 /* 928 * If the open count is zero, this is a spurious close. 929 * That indicates a bug in the kernel / DDI framework. 930 */ 931 ASSERT(zv->zv_open_count[otyp] != 0); 932 ASSERT(zv->zv_total_opens != 0); 933 934 /* 935 * You may get multiple opens, but only one close. 936 */ 937 zv->zv_open_count[otyp]--; 938 zv->zv_total_opens--; 939 940 if (zv->zv_total_opens == 0) 941 zvol_last_close(zv); 942 943 mutex_exit(&zfsdev_state_lock); 944 return (error); 945 } 946 947 static void 948 zvol_get_done(zgd_t *zgd, int error) 949 { 950 if (zgd->zgd_db) 951 dmu_buf_rele(zgd->zgd_db, zgd); 952 953 zfs_range_unlock(zgd->zgd_rl); 954 955 if (error == 0 && zgd->zgd_bp) 956 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 957 958 kmem_free(zgd, sizeof (zgd_t)); 959 } 960 961 /* 962 * Get data to generate a TX_WRITE intent log record. 963 */ 964 static int 965 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 966 { 967 zvol_state_t *zv = arg; 968 objset_t *os = zv->zv_objset; 969 uint64_t object = ZVOL_OBJ; 970 uint64_t offset = lr->lr_offset; 971 uint64_t size = lr->lr_length; /* length of user data */ 972 blkptr_t *bp = &lr->lr_blkptr; 973 dmu_buf_t *db; 974 zgd_t *zgd; 975 int error; 976 977 ASSERT(zio != NULL); 978 ASSERT(size != 0); 979 980 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP); 981 zgd->zgd_zilog = zv->zv_zilog; 982 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER); 983 984 /* 985 * Write records come in two flavors: immediate and indirect. 986 * For small writes it's cheaper to store the data with the 987 * log record (immediate); for large writes it's cheaper to 988 * sync the data and get a pointer to it (indirect) so that 989 * we don't have to write the data twice. 990 */ 991 if (buf != NULL) { /* immediate write */ 992 error = dmu_read(os, object, offset, size, buf, 993 DMU_READ_NO_PREFETCH); 994 } else { 995 size = zv->zv_volblocksize; 996 offset = P2ALIGN(offset, size); 997 error = dmu_buf_hold(os, object, offset, zgd, &db, 998 DMU_READ_NO_PREFETCH); 999 if (error == 0) { 1000 blkptr_t *obp = dmu_buf_get_blkptr(db); 1001 if (obp) { 1002 ASSERT(BP_IS_HOLE(bp)); 1003 *bp = *obp; 1004 } 1005 1006 zgd->zgd_db = db; 1007 zgd->zgd_bp = bp; 1008 1009 ASSERT(db->db_offset == offset); 1010 ASSERT(db->db_size == size); 1011 1012 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1013 zvol_get_done, zgd); 1014 1015 if (error == 0) 1016 return (0); 1017 } 1018 } 1019 1020 zvol_get_done(zgd, error); 1021 1022 return (error); 1023 } 1024 1025 /* 1026 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions. 1027 * 1028 * We store data in the log buffers if it's small enough. 1029 * Otherwise we will later flush the data out via dmu_sync(). 1030 */ 1031 ssize_t zvol_immediate_write_sz = 32768; 1032 1033 static void 1034 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid, 1035 boolean_t sync) 1036 { 1037 uint32_t blocksize = zv->zv_volblocksize; 1038 zilog_t *zilog = zv->zv_zilog; 1039 boolean_t slogging; 1040 ssize_t immediate_write_sz; 1041 1042 if (zil_replaying(zilog, tx)) 1043 return; 1044 1045 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT) 1046 ? 0 : zvol_immediate_write_sz; 1047 1048 slogging = spa_has_slogs(zilog->zl_spa) && 1049 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 1050 1051 while (resid) { 1052 itx_t *itx; 1053 lr_write_t *lr; 1054 ssize_t len; 1055 itx_wr_state_t write_state; 1056 1057 /* 1058 * Unlike zfs_log_write() we can be called with 1059 * upto DMU_MAX_ACCESS/2 (5MB) writes. 1060 */ 1061 if (blocksize > immediate_write_sz && !slogging && 1062 resid >= blocksize && off % blocksize == 0) { 1063 write_state = WR_INDIRECT; /* uses dmu_sync */ 1064 len = blocksize; 1065 } else if (sync) { 1066 write_state = WR_COPIED; 1067 len = MIN(ZIL_MAX_LOG_DATA, resid); 1068 } else { 1069 write_state = WR_NEED_COPY; 1070 len = MIN(ZIL_MAX_LOG_DATA, resid); 1071 } 1072 1073 itx = zil_itx_create(TX_WRITE, sizeof (*lr) + 1074 (write_state == WR_COPIED ? len : 0)); 1075 lr = (lr_write_t *)&itx->itx_lr; 1076 if (write_state == WR_COPIED && dmu_read(zv->zv_objset, 1077 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) { 1078 zil_itx_destroy(itx); 1079 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1080 lr = (lr_write_t *)&itx->itx_lr; 1081 write_state = WR_NEED_COPY; 1082 } 1083 1084 itx->itx_wr_state = write_state; 1085 if (write_state == WR_NEED_COPY) 1086 itx->itx_sod += len; 1087 lr->lr_foid = ZVOL_OBJ; 1088 lr->lr_offset = off; 1089 lr->lr_length = len; 1090 lr->lr_blkoff = 0; 1091 BP_ZERO(&lr->lr_blkptr); 1092 1093 itx->itx_private = zv; 1094 itx->itx_sync = sync; 1095 1096 zil_itx_assign(zilog, itx, tx); 1097 1098 off += len; 1099 resid -= len; 1100 } 1101 } 1102 1103 static int 1104 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t size, 1105 boolean_t doread, boolean_t isdump) 1106 { 1107 vdev_disk_t *dvd; 1108 int c; 1109 int numerrors = 0; 1110 1111 for (c = 0; c < vd->vdev_children; c++) { 1112 ASSERT(vd->vdev_ops == &vdev_mirror_ops || 1113 vd->vdev_ops == &vdev_replacing_ops || 1114 vd->vdev_ops == &vdev_spare_ops); 1115 int err = zvol_dumpio_vdev(vd->vdev_child[c], 1116 addr, offset, size, doread, isdump); 1117 if (err != 0) { 1118 numerrors++; 1119 } else if (doread) { 1120 break; 1121 } 1122 } 1123 1124 if (!vd->vdev_ops->vdev_op_leaf) 1125 return (numerrors < vd->vdev_children ? 0 : EIO); 1126 1127 if (doread && !vdev_readable(vd)) 1128 return (SET_ERROR(EIO)); 1129 else if (!doread && !vdev_writeable(vd)) 1130 return (SET_ERROR(EIO)); 1131 1132 dvd = vd->vdev_tsd; 1133 ASSERT3P(dvd, !=, NULL); 1134 offset += VDEV_LABEL_START_SIZE; 1135 1136 if (ddi_in_panic() || isdump) { 1137 ASSERT(!doread); 1138 if (doread) 1139 return (SET_ERROR(EIO)); 1140 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset), 1141 lbtodb(size))); 1142 } else { 1143 return (vdev_disk_physio(dvd->vd_lh, addr, size, offset, 1144 doread ? B_READ : B_WRITE)); 1145 } 1146 } 1147 1148 static int 1149 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size, 1150 boolean_t doread, boolean_t isdump) 1151 { 1152 vdev_t *vd; 1153 int error; 1154 zvol_extent_t *ze; 1155 spa_t *spa = dmu_objset_spa(zv->zv_objset); 1156 1157 /* Must be sector aligned, and not stradle a block boundary. */ 1158 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) || 1159 P2BOUNDARY(offset, size, zv->zv_volblocksize)) { 1160 return (SET_ERROR(EINVAL)); 1161 } 1162 ASSERT(size <= zv->zv_volblocksize); 1163 1164 /* Locate the extent this belongs to */ 1165 ze = list_head(&zv->zv_extents); 1166 while (offset >= ze->ze_nblks * zv->zv_volblocksize) { 1167 offset -= ze->ze_nblks * zv->zv_volblocksize; 1168 ze = list_next(&zv->zv_extents, ze); 1169 } 1170 1171 if (ze == NULL) 1172 return (SET_ERROR(EINVAL)); 1173 1174 if (!ddi_in_panic()) 1175 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 1176 1177 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva)); 1178 offset += DVA_GET_OFFSET(&ze->ze_dva); 1179 error = zvol_dumpio_vdev(vd, addr, offset, size, doread, isdump); 1180 1181 if (!ddi_in_panic()) 1182 spa_config_exit(spa, SCL_STATE, FTAG); 1183 1184 return (error); 1185 } 1186 1187 int 1188 zvol_strategy(buf_t *bp) 1189 { 1190 zfs_soft_state_t *zs = NULL; 1191 zvol_state_t *zv; 1192 uint64_t off, volsize; 1193 size_t resid; 1194 char *addr; 1195 objset_t *os; 1196 rl_t *rl; 1197 int error = 0; 1198 boolean_t doread = bp->b_flags & B_READ; 1199 boolean_t is_dump; 1200 boolean_t sync; 1201 1202 if (getminor(bp->b_edev) == 0) { 1203 error = SET_ERROR(EINVAL); 1204 } else { 1205 zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev)); 1206 if (zs == NULL) 1207 error = SET_ERROR(ENXIO); 1208 else if (zs->zss_type != ZSST_ZVOL) 1209 error = SET_ERROR(EINVAL); 1210 } 1211 1212 if (error) { 1213 bioerror(bp, error); 1214 biodone(bp); 1215 return (0); 1216 } 1217 1218 zv = zs->zss_data; 1219 1220 if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) { 1221 bioerror(bp, EROFS); 1222 biodone(bp); 1223 return (0); 1224 } 1225 1226 off = ldbtob(bp->b_blkno); 1227 volsize = zv->zv_volsize; 1228 1229 os = zv->zv_objset; 1230 ASSERT(os != NULL); 1231 1232 bp_mapin(bp); 1233 addr = bp->b_un.b_addr; 1234 resid = bp->b_bcount; 1235 1236 if (resid > 0 && (off < 0 || off >= volsize)) { 1237 bioerror(bp, EIO); 1238 biodone(bp); 1239 return (0); 1240 } 1241 1242 is_dump = zv->zv_flags & ZVOL_DUMPIFIED; 1243 sync = ((!(bp->b_flags & B_ASYNC) && 1244 !(zv->zv_flags & ZVOL_WCE)) || 1245 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) && 1246 !doread && !is_dump; 1247 1248 /* 1249 * There must be no buffer changes when doing a dmu_sync() because 1250 * we can't change the data whilst calculating the checksum. 1251 */ 1252 rl = zfs_range_lock(&zv->zv_znode, off, resid, 1253 doread ? RL_READER : RL_WRITER); 1254 1255 while (resid != 0 && off < volsize) { 1256 size_t size = MIN(resid, zvol_maxphys); 1257 if (is_dump) { 1258 size = MIN(size, P2END(off, zv->zv_volblocksize) - off); 1259 error = zvol_dumpio(zv, addr, off, size, 1260 doread, B_FALSE); 1261 } else if (doread) { 1262 error = dmu_read(os, ZVOL_OBJ, off, size, addr, 1263 DMU_READ_PREFETCH); 1264 } else { 1265 dmu_tx_t *tx = dmu_tx_create(os); 1266 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size); 1267 error = dmu_tx_assign(tx, TXG_WAIT); 1268 if (error) { 1269 dmu_tx_abort(tx); 1270 } else { 1271 dmu_write(os, ZVOL_OBJ, off, size, addr, tx); 1272 zvol_log_write(zv, tx, off, size, sync); 1273 dmu_tx_commit(tx); 1274 } 1275 } 1276 if (error) { 1277 /* convert checksum errors into IO errors */ 1278 if (error == ECKSUM) 1279 error = SET_ERROR(EIO); 1280 break; 1281 } 1282 off += size; 1283 addr += size; 1284 resid -= size; 1285 } 1286 zfs_range_unlock(rl); 1287 1288 if ((bp->b_resid = resid) == bp->b_bcount) 1289 bioerror(bp, off > volsize ? EINVAL : error); 1290 1291 if (sync) 1292 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1293 biodone(bp); 1294 1295 return (0); 1296 } 1297 1298 /* 1299 * Set the buffer count to the zvol maximum transfer. 1300 * Using our own routine instead of the default minphys() 1301 * means that for larger writes we write bigger buffers on X86 1302 * (128K instead of 56K) and flush the disk write cache less often 1303 * (every zvol_maxphys - currently 1MB) instead of minphys (currently 1304 * 56K on X86 and 128K on sparc). 1305 */ 1306 void 1307 zvol_minphys(struct buf *bp) 1308 { 1309 if (bp->b_bcount > zvol_maxphys) 1310 bp->b_bcount = zvol_maxphys; 1311 } 1312 1313 int 1314 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks) 1315 { 1316 minor_t minor = getminor(dev); 1317 zvol_state_t *zv; 1318 int error = 0; 1319 uint64_t size; 1320 uint64_t boff; 1321 uint64_t resid; 1322 1323 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1324 if (zv == NULL) 1325 return (SET_ERROR(ENXIO)); 1326 1327 if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0) 1328 return (SET_ERROR(EINVAL)); 1329 1330 boff = ldbtob(blkno); 1331 resid = ldbtob(nblocks); 1332 1333 VERIFY3U(boff + resid, <=, zv->zv_volsize); 1334 1335 while (resid) { 1336 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff); 1337 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE); 1338 if (error) 1339 break; 1340 boff += size; 1341 addr += size; 1342 resid -= size; 1343 } 1344 1345 return (error); 1346 } 1347 1348 /*ARGSUSED*/ 1349 int 1350 zvol_read(dev_t dev, uio_t *uio, cred_t *cr) 1351 { 1352 minor_t minor = getminor(dev); 1353 zvol_state_t *zv; 1354 uint64_t volsize; 1355 rl_t *rl; 1356 int error = 0; 1357 1358 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1359 if (zv == NULL) 1360 return (SET_ERROR(ENXIO)); 1361 1362 volsize = zv->zv_volsize; 1363 if (uio->uio_resid > 0 && 1364 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize)) 1365 return (SET_ERROR(EIO)); 1366 1367 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1368 error = physio(zvol_strategy, NULL, dev, B_READ, 1369 zvol_minphys, uio); 1370 return (error); 1371 } 1372 1373 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1374 RL_READER); 1375 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1376 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1377 1378 /* don't read past the end */ 1379 if (bytes > volsize - uio->uio_loffset) 1380 bytes = volsize - uio->uio_loffset; 1381 1382 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes); 1383 if (error) { 1384 /* convert checksum errors into IO errors */ 1385 if (error == ECKSUM) 1386 error = SET_ERROR(EIO); 1387 break; 1388 } 1389 } 1390 zfs_range_unlock(rl); 1391 return (error); 1392 } 1393 1394 /*ARGSUSED*/ 1395 int 1396 zvol_write(dev_t dev, uio_t *uio, cred_t *cr) 1397 { 1398 minor_t minor = getminor(dev); 1399 zvol_state_t *zv; 1400 uint64_t volsize; 1401 rl_t *rl; 1402 int error = 0; 1403 boolean_t sync; 1404 1405 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1406 if (zv == NULL) 1407 return (SET_ERROR(ENXIO)); 1408 1409 volsize = zv->zv_volsize; 1410 if (uio->uio_resid > 0 && 1411 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize)) 1412 return (SET_ERROR(EIO)); 1413 1414 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1415 error = physio(zvol_strategy, NULL, dev, B_WRITE, 1416 zvol_minphys, uio); 1417 return (error); 1418 } 1419 1420 sync = !(zv->zv_flags & ZVOL_WCE) || 1421 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS); 1422 1423 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1424 RL_WRITER); 1425 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1426 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1427 uint64_t off = uio->uio_loffset; 1428 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 1429 1430 if (bytes > volsize - off) /* don't write past the end */ 1431 bytes = volsize - off; 1432 1433 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 1434 error = dmu_tx_assign(tx, TXG_WAIT); 1435 if (error) { 1436 dmu_tx_abort(tx); 1437 break; 1438 } 1439 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx); 1440 if (error == 0) 1441 zvol_log_write(zv, tx, off, bytes, sync); 1442 dmu_tx_commit(tx); 1443 1444 if (error) 1445 break; 1446 } 1447 zfs_range_unlock(rl); 1448 if (sync) 1449 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1450 return (error); 1451 } 1452 1453 int 1454 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs) 1455 { 1456 struct uuid uuid = EFI_RESERVED; 1457 efi_gpe_t gpe = { 0 }; 1458 uint32_t crc; 1459 dk_efi_t efi; 1460 int length; 1461 char *ptr; 1462 1463 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag)) 1464 return (SET_ERROR(EFAULT)); 1465 ptr = (char *)(uintptr_t)efi.dki_data_64; 1466 length = efi.dki_length; 1467 /* 1468 * Some clients may attempt to request a PMBR for the 1469 * zvol. Currently this interface will return EINVAL to 1470 * such requests. These requests could be supported by 1471 * adding a check for lba == 0 and consing up an appropriate 1472 * PMBR. 1473 */ 1474 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0) 1475 return (SET_ERROR(EINVAL)); 1476 1477 gpe.efi_gpe_StartingLBA = LE_64(34ULL); 1478 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1); 1479 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid); 1480 1481 if (efi.dki_lba == 1) { 1482 efi_gpt_t gpt = { 0 }; 1483 1484 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE); 1485 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 1486 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt)); 1487 gpt.efi_gpt_MyLBA = LE_64(1ULL); 1488 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL); 1489 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1); 1490 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL); 1491 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1); 1492 gpt.efi_gpt_SizeOfPartitionEntry = 1493 LE_32(sizeof (efi_gpe_t)); 1494 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table); 1495 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 1496 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table); 1497 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc); 1498 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length), 1499 flag)) 1500 return (SET_ERROR(EFAULT)); 1501 ptr += sizeof (gpt); 1502 length -= sizeof (gpt); 1503 } 1504 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe), 1505 length), flag)) 1506 return (SET_ERROR(EFAULT)); 1507 return (0); 1508 } 1509 1510 /* 1511 * BEGIN entry points to allow external callers access to the volume. 1512 */ 1513 /* 1514 * Return the volume parameters needed for access from an external caller. 1515 * These values are invariant as long as the volume is held open. 1516 */ 1517 int 1518 zvol_get_volume_params(minor_t minor, uint64_t *blksize, 1519 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl, 1520 void **rl_hdl, void **bonus_hdl) 1521 { 1522 zvol_state_t *zv; 1523 1524 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1525 if (zv == NULL) 1526 return (SET_ERROR(ENXIO)); 1527 if (zv->zv_flags & ZVOL_DUMPIFIED) 1528 return (SET_ERROR(ENXIO)); 1529 1530 ASSERT(blksize && max_xfer_len && minor_hdl && 1531 objset_hdl && zil_hdl && rl_hdl && bonus_hdl); 1532 1533 *blksize = zv->zv_volblocksize; 1534 *max_xfer_len = (uint64_t)zvol_maxphys; 1535 *minor_hdl = zv; 1536 *objset_hdl = zv->zv_objset; 1537 *zil_hdl = zv->zv_zilog; 1538 *rl_hdl = &zv->zv_znode; 1539 *bonus_hdl = zv->zv_dbuf; 1540 return (0); 1541 } 1542 1543 /* 1544 * Return the current volume size to an external caller. 1545 * The size can change while the volume is open. 1546 */ 1547 uint64_t 1548 zvol_get_volume_size(void *minor_hdl) 1549 { 1550 zvol_state_t *zv = minor_hdl; 1551 1552 return (zv->zv_volsize); 1553 } 1554 1555 /* 1556 * Return the current WCE setting to an external caller. 1557 * The WCE setting can change while the volume is open. 1558 */ 1559 int 1560 zvol_get_volume_wce(void *minor_hdl) 1561 { 1562 zvol_state_t *zv = minor_hdl; 1563 1564 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0); 1565 } 1566 1567 /* 1568 * Entry point for external callers to zvol_log_write 1569 */ 1570 void 1571 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid, 1572 boolean_t sync) 1573 { 1574 zvol_state_t *zv = minor_hdl; 1575 1576 zvol_log_write(zv, tx, off, resid, sync); 1577 } 1578 /* 1579 * END entry points to allow external callers access to the volume. 1580 */ 1581 1582 /* 1583 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE. 1584 */ 1585 static void 1586 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len, 1587 boolean_t sync) 1588 { 1589 itx_t *itx; 1590 lr_truncate_t *lr; 1591 zilog_t *zilog = zv->zv_zilog; 1592 1593 if (zil_replaying(zilog, tx)) 1594 return; 1595 1596 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1597 lr = (lr_truncate_t *)&itx->itx_lr; 1598 lr->lr_foid = ZVOL_OBJ; 1599 lr->lr_offset = off; 1600 lr->lr_length = len; 1601 1602 itx->itx_sync = sync; 1603 zil_itx_assign(zilog, itx, tx); 1604 } 1605 1606 /* 1607 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I). 1608 * Also a dirtbag dkio ioctl for unmap/free-block functionality. 1609 */ 1610 /*ARGSUSED*/ 1611 int 1612 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp) 1613 { 1614 zvol_state_t *zv; 1615 struct dk_cinfo dki; 1616 struct dk_minfo dkm; 1617 struct dk_callback *dkc; 1618 int error = 0; 1619 rl_t *rl; 1620 1621 mutex_enter(&zfsdev_state_lock); 1622 1623 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL); 1624 1625 if (zv == NULL) { 1626 mutex_exit(&zfsdev_state_lock); 1627 return (SET_ERROR(ENXIO)); 1628 } 1629 ASSERT(zv->zv_total_opens > 0); 1630 1631 switch (cmd) { 1632 1633 case DKIOCINFO: 1634 bzero(&dki, sizeof (dki)); 1635 (void) strcpy(dki.dki_cname, "zvol"); 1636 (void) strcpy(dki.dki_dname, "zvol"); 1637 dki.dki_ctype = DKC_UNKNOWN; 1638 dki.dki_unit = getminor(dev); 1639 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs); 1640 mutex_exit(&zfsdev_state_lock); 1641 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag)) 1642 error = SET_ERROR(EFAULT); 1643 return (error); 1644 1645 case DKIOCGMEDIAINFO: 1646 bzero(&dkm, sizeof (dkm)); 1647 dkm.dki_lbsize = 1U << zv->zv_min_bs; 1648 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 1649 dkm.dki_media_type = DK_UNKNOWN; 1650 mutex_exit(&zfsdev_state_lock); 1651 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag)) 1652 error = SET_ERROR(EFAULT); 1653 return (error); 1654 1655 case DKIOCGETEFI: 1656 { 1657 uint64_t vs = zv->zv_volsize; 1658 uint8_t bs = zv->zv_min_bs; 1659 1660 mutex_exit(&zfsdev_state_lock); 1661 error = zvol_getefi((void *)arg, flag, vs, bs); 1662 return (error); 1663 } 1664 1665 case DKIOCFLUSHWRITECACHE: 1666 dkc = (struct dk_callback *)arg; 1667 mutex_exit(&zfsdev_state_lock); 1668 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1669 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) { 1670 (*dkc->dkc_callback)(dkc->dkc_cookie, error); 1671 error = 0; 1672 } 1673 return (error); 1674 1675 case DKIOCGETWCE: 1676 { 1677 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0; 1678 if (ddi_copyout(&wce, (void *)arg, sizeof (int), 1679 flag)) 1680 error = SET_ERROR(EFAULT); 1681 break; 1682 } 1683 case DKIOCSETWCE: 1684 { 1685 int wce; 1686 if (ddi_copyin((void *)arg, &wce, sizeof (int), 1687 flag)) { 1688 error = SET_ERROR(EFAULT); 1689 break; 1690 } 1691 if (wce) { 1692 zv->zv_flags |= ZVOL_WCE; 1693 mutex_exit(&zfsdev_state_lock); 1694 } else { 1695 zv->zv_flags &= ~ZVOL_WCE; 1696 mutex_exit(&zfsdev_state_lock); 1697 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1698 } 1699 return (0); 1700 } 1701 1702 case DKIOCGGEOM: 1703 case DKIOCGVTOC: 1704 /* 1705 * commands using these (like prtvtoc) expect ENOTSUP 1706 * since we're emulating an EFI label 1707 */ 1708 error = SET_ERROR(ENOTSUP); 1709 break; 1710 1711 case DKIOCDUMPINIT: 1712 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 1713 RL_WRITER); 1714 error = zvol_dumpify(zv); 1715 zfs_range_unlock(rl); 1716 break; 1717 1718 case DKIOCDUMPFINI: 1719 if (!(zv->zv_flags & ZVOL_DUMPIFIED)) 1720 break; 1721 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 1722 RL_WRITER); 1723 error = zvol_dump_fini(zv); 1724 zfs_range_unlock(rl); 1725 break; 1726 1727 case DKIOCFREE: 1728 { 1729 dkioc_free_t df; 1730 dmu_tx_t *tx; 1731 1732 if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) { 1733 error = SET_ERROR(EFAULT); 1734 break; 1735 } 1736 1737 /* 1738 * Apply Postel's Law to length-checking. If they overshoot, 1739 * just blank out until the end, if there's a need to blank 1740 * out anything. 1741 */ 1742 if (df.df_start >= zv->zv_volsize) 1743 break; /* No need to do anything... */ 1744 if (df.df_start + df.df_length > zv->zv_volsize) 1745 df.df_length = DMU_OBJECT_END; 1746 1747 rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length, 1748 RL_WRITER); 1749 tx = dmu_tx_create(zv->zv_objset); 1750 error = dmu_tx_assign(tx, TXG_WAIT); 1751 if (error != 0) { 1752 dmu_tx_abort(tx); 1753 } else { 1754 zvol_log_truncate(zv, tx, df.df_start, 1755 df.df_length, B_TRUE); 1756 dmu_tx_commit(tx); 1757 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 1758 df.df_start, df.df_length); 1759 } 1760 1761 zfs_range_unlock(rl); 1762 1763 if (error == 0) { 1764 /* 1765 * If the write-cache is disabled or 'sync' property 1766 * is set to 'always' then treat this as a synchronous 1767 * operation (i.e. commit to zil). 1768 */ 1769 if (!(zv->zv_flags & ZVOL_WCE) || 1770 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) 1771 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1772 1773 /* 1774 * If the caller really wants synchronous writes, and 1775 * can't wait for them, don't return until the write 1776 * is done. 1777 */ 1778 if (df.df_flags & DF_WAIT_SYNC) { 1779 txg_wait_synced( 1780 dmu_objset_pool(zv->zv_objset), 0); 1781 } 1782 } 1783 break; 1784 } 1785 1786 default: 1787 error = SET_ERROR(ENOTTY); 1788 break; 1789 1790 } 1791 mutex_exit(&zfsdev_state_lock); 1792 return (error); 1793 } 1794 1795 int 1796 zvol_busy(void) 1797 { 1798 return (zvol_minors != 0); 1799 } 1800 1801 void 1802 zvol_init(void) 1803 { 1804 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t), 1805 1) == 0); 1806 mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL); 1807 } 1808 1809 void 1810 zvol_fini(void) 1811 { 1812 mutex_destroy(&zfsdev_state_lock); 1813 ddi_soft_state_fini(&zfsdev_state); 1814 } 1815 1816 static int 1817 zvol_dump_init(zvol_state_t *zv, boolean_t resize) 1818 { 1819 dmu_tx_t *tx; 1820 int error = 0; 1821 objset_t *os = zv->zv_objset; 1822 nvlist_t *nv = NULL; 1823 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset)); 1824 1825 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 1826 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0, 1827 DMU_OBJECT_END); 1828 /* wait for dmu_free_long_range to actually free the blocks */ 1829 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 1830 1831 tx = dmu_tx_create(os); 1832 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 1833 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 1834 error = dmu_tx_assign(tx, TXG_WAIT); 1835 if (error) { 1836 dmu_tx_abort(tx); 1837 return (error); 1838 } 1839 1840 /* 1841 * If we are resizing the dump device then we only need to 1842 * update the refreservation to match the newly updated 1843 * zvolsize. Otherwise, we save off the original state of the 1844 * zvol so that we can restore them if the zvol is ever undumpified. 1845 */ 1846 if (resize) { 1847 error = zap_update(os, ZVOL_ZAP_OBJ, 1848 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 1849 &zv->zv_volsize, tx); 1850 } else { 1851 uint64_t checksum, compress, refresrv, vbs, dedup; 1852 1853 error = dsl_prop_get_integer(zv->zv_name, 1854 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL); 1855 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1856 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL); 1857 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1858 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL); 1859 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1860 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL); 1861 if (version >= SPA_VERSION_DEDUP) { 1862 error = error ? error : 1863 dsl_prop_get_integer(zv->zv_name, 1864 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL); 1865 } 1866 1867 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1868 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, 1869 &compress, tx); 1870 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1871 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx); 1872 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1873 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 1874 &refresrv, tx); 1875 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1876 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, 1877 &vbs, tx); 1878 error = error ? error : dmu_object_set_blocksize( 1879 os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx); 1880 if (version >= SPA_VERSION_DEDUP) { 1881 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1882 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, 1883 &dedup, tx); 1884 } 1885 if (error == 0) 1886 zv->zv_volblocksize = SPA_MAXBLOCKSIZE; 1887 } 1888 dmu_tx_commit(tx); 1889 1890 /* 1891 * We only need update the zvol's property if we are initializing 1892 * the dump area for the first time. 1893 */ 1894 if (!resize) { 1895 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1896 VERIFY(nvlist_add_uint64(nv, 1897 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0); 1898 VERIFY(nvlist_add_uint64(nv, 1899 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 1900 ZIO_COMPRESS_OFF) == 0); 1901 VERIFY(nvlist_add_uint64(nv, 1902 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 1903 ZIO_CHECKSUM_OFF) == 0); 1904 if (version >= SPA_VERSION_DEDUP) { 1905 VERIFY(nvlist_add_uint64(nv, 1906 zfs_prop_to_name(ZFS_PROP_DEDUP), 1907 ZIO_CHECKSUM_OFF) == 0); 1908 } 1909 1910 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 1911 nv, NULL); 1912 nvlist_free(nv); 1913 1914 if (error) 1915 return (error); 1916 } 1917 1918 /* Allocate the space for the dump */ 1919 error = zvol_prealloc(zv); 1920 return (error); 1921 } 1922 1923 static int 1924 zvol_dumpify(zvol_state_t *zv) 1925 { 1926 int error = 0; 1927 uint64_t dumpsize = 0; 1928 dmu_tx_t *tx; 1929 objset_t *os = zv->zv_objset; 1930 1931 if (zv->zv_flags & ZVOL_RDONLY) 1932 return (SET_ERROR(EROFS)); 1933 1934 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 1935 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) { 1936 boolean_t resize = (dumpsize > 0); 1937 1938 if ((error = zvol_dump_init(zv, resize)) != 0) { 1939 (void) zvol_dump_fini(zv); 1940 return (error); 1941 } 1942 } 1943 1944 /* 1945 * Build up our lba mapping. 1946 */ 1947 error = zvol_get_lbas(zv); 1948 if (error) { 1949 (void) zvol_dump_fini(zv); 1950 return (error); 1951 } 1952 1953 tx = dmu_tx_create(os); 1954 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 1955 error = dmu_tx_assign(tx, TXG_WAIT); 1956 if (error) { 1957 dmu_tx_abort(tx); 1958 (void) zvol_dump_fini(zv); 1959 return (error); 1960 } 1961 1962 zv->zv_flags |= ZVOL_DUMPIFIED; 1963 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1, 1964 &zv->zv_volsize, tx); 1965 dmu_tx_commit(tx); 1966 1967 if (error) { 1968 (void) zvol_dump_fini(zv); 1969 return (error); 1970 } 1971 1972 txg_wait_synced(dmu_objset_pool(os), 0); 1973 return (0); 1974 } 1975 1976 static int 1977 zvol_dump_fini(zvol_state_t *zv) 1978 { 1979 dmu_tx_t *tx; 1980 objset_t *os = zv->zv_objset; 1981 nvlist_t *nv; 1982 int error = 0; 1983 uint64_t checksum, compress, refresrv, vbs, dedup; 1984 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset)); 1985 1986 /* 1987 * Attempt to restore the zvol back to its pre-dumpified state. 1988 * This is a best-effort attempt as it's possible that not all 1989 * of these properties were initialized during the dumpify process 1990 * (i.e. error during zvol_dump_init). 1991 */ 1992 1993 tx = dmu_tx_create(os); 1994 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 1995 error = dmu_tx_assign(tx, TXG_WAIT); 1996 if (error) { 1997 dmu_tx_abort(tx); 1998 return (error); 1999 } 2000 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx); 2001 dmu_tx_commit(tx); 2002 2003 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2004 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum); 2005 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2006 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress); 2007 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2008 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv); 2009 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2010 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs); 2011 2012 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2013 (void) nvlist_add_uint64(nv, 2014 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum); 2015 (void) nvlist_add_uint64(nv, 2016 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress); 2017 (void) nvlist_add_uint64(nv, 2018 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv); 2019 if (version >= SPA_VERSION_DEDUP && 2020 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2021 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) { 2022 (void) nvlist_add_uint64(nv, 2023 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup); 2024 } 2025 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 2026 nv, NULL); 2027 nvlist_free(nv); 2028 2029 zvol_free_extents(zv); 2030 zv->zv_flags &= ~ZVOL_DUMPIFIED; 2031 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END); 2032 /* wait for dmu_free_long_range to actually free the blocks */ 2033 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 2034 tx = dmu_tx_create(os); 2035 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 2036 error = dmu_tx_assign(tx, TXG_WAIT); 2037 if (error) { 2038 dmu_tx_abort(tx); 2039 return (error); 2040 } 2041 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0) 2042 zv->zv_volblocksize = vbs; 2043 dmu_tx_commit(tx); 2044 2045 return (0); 2046 } 2047