1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * 24 * Portions Copyright 2010 Robert Milkowski 25 * 26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 27 * Copyright (c) 2013 by Delphix. All rights reserved. 28 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 29 */ 30 31 /* 32 * ZFS volume emulation driver. 33 * 34 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes. 35 * Volumes are accessed through the symbolic links named: 36 * 37 * /dev/zvol/dsk/<pool_name>/<dataset_name> 38 * /dev/zvol/rdsk/<pool_name>/<dataset_name> 39 * 40 * These links are created by the /dev filesystem (sdev_zvolops.c). 41 * Volumes are persistent through reboot. No user command needs to be 42 * run before opening and using a device. 43 */ 44 45 #include <sys/types.h> 46 #include <sys/param.h> 47 #include <sys/errno.h> 48 #include <sys/uio.h> 49 #include <sys/buf.h> 50 #include <sys/modctl.h> 51 #include <sys/open.h> 52 #include <sys/kmem.h> 53 #include <sys/conf.h> 54 #include <sys/cmn_err.h> 55 #include <sys/stat.h> 56 #include <sys/zap.h> 57 #include <sys/spa.h> 58 #include <sys/spa_impl.h> 59 #include <sys/zio.h> 60 #include <sys/dmu_traverse.h> 61 #include <sys/dnode.h> 62 #include <sys/dsl_dataset.h> 63 #include <sys/dsl_prop.h> 64 #include <sys/dkio.h> 65 #include <sys/efi_partition.h> 66 #include <sys/byteorder.h> 67 #include <sys/pathname.h> 68 #include <sys/ddi.h> 69 #include <sys/sunddi.h> 70 #include <sys/crc32.h> 71 #include <sys/dirent.h> 72 #include <sys/policy.h> 73 #include <sys/fs/zfs.h> 74 #include <sys/zfs_ioctl.h> 75 #include <sys/mkdev.h> 76 #include <sys/zil.h> 77 #include <sys/refcount.h> 78 #include <sys/zfs_znode.h> 79 #include <sys/zfs_rlock.h> 80 #include <sys/vdev_disk.h> 81 #include <sys/vdev_impl.h> 82 #include <sys/vdev_raidz.h> 83 #include <sys/zvol.h> 84 #include <sys/dumphdr.h> 85 #include <sys/zil_impl.h> 86 #include <sys/dbuf.h> 87 #include <sys/dmu_tx.h> 88 #include <sys/zfeature.h> 89 #include <sys/zio_checksum.h> 90 91 #include "zfs_namecheck.h" 92 93 void *zfsdev_state; 94 static char *zvol_tag = "zvol_tag"; 95 96 #define ZVOL_DUMPSIZE "dumpsize" 97 98 /* 99 * This lock protects the zfsdev_state structure from being modified 100 * while it's being used, e.g. an open that comes in before a create 101 * finishes. It also protects temporary opens of the dataset so that, 102 * e.g., an open doesn't get a spurious EBUSY. 103 */ 104 kmutex_t zfsdev_state_lock; 105 static uint32_t zvol_minors; 106 107 typedef struct zvol_extent { 108 list_node_t ze_node; 109 dva_t ze_dva; /* dva associated with this extent */ 110 uint64_t ze_nblks; /* number of blocks in extent */ 111 } zvol_extent_t; 112 113 /* 114 * The in-core state of each volume. 115 */ 116 typedef struct zvol_state { 117 char zv_name[MAXPATHLEN]; /* pool/dd name */ 118 uint64_t zv_volsize; /* amount of space we advertise */ 119 uint64_t zv_volblocksize; /* volume block size */ 120 minor_t zv_minor; /* minor number */ 121 uint8_t zv_min_bs; /* minimum addressable block shift */ 122 uint8_t zv_flags; /* readonly, dumpified, etc. */ 123 objset_t *zv_objset; /* objset handle */ 124 uint32_t zv_open_count[OTYPCNT]; /* open counts */ 125 uint32_t zv_total_opens; /* total open count */ 126 zilog_t *zv_zilog; /* ZIL handle */ 127 list_t zv_extents; /* List of extents for dump */ 128 znode_t zv_znode; /* for range locking */ 129 dmu_buf_t *zv_dbuf; /* bonus handle */ 130 } zvol_state_t; 131 132 /* 133 * zvol specific flags 134 */ 135 #define ZVOL_RDONLY 0x1 136 #define ZVOL_DUMPIFIED 0x2 137 #define ZVOL_EXCL 0x4 138 #define ZVOL_WCE 0x8 139 140 /* 141 * zvol maximum transfer in one DMU tx. 142 */ 143 int zvol_maxphys = DMU_MAX_ACCESS/2; 144 145 extern int zfs_set_prop_nvlist(const char *, zprop_source_t, 146 nvlist_t *, nvlist_t *); 147 static int zvol_remove_zv(zvol_state_t *); 148 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio); 149 static int zvol_dumpify(zvol_state_t *zv); 150 static int zvol_dump_fini(zvol_state_t *zv); 151 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize); 152 153 static void 154 zvol_size_changed(zvol_state_t *zv, uint64_t volsize) 155 { 156 dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor); 157 158 zv->zv_volsize = volsize; 159 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 160 "Size", volsize) == DDI_SUCCESS); 161 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 162 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS); 163 164 /* Notify specfs to invalidate the cached size */ 165 spec_size_invalidate(dev, VBLK); 166 spec_size_invalidate(dev, VCHR); 167 } 168 169 int 170 zvol_check_volsize(uint64_t volsize, uint64_t blocksize) 171 { 172 if (volsize == 0) 173 return (SET_ERROR(EINVAL)); 174 175 if (volsize % blocksize != 0) 176 return (SET_ERROR(EINVAL)); 177 178 #ifdef _ILP32 179 if (volsize - 1 > SPEC_MAXOFFSET_T) 180 return (SET_ERROR(EOVERFLOW)); 181 #endif 182 return (0); 183 } 184 185 int 186 zvol_check_volblocksize(uint64_t volblocksize) 187 { 188 if (volblocksize < SPA_MINBLOCKSIZE || 189 volblocksize > SPA_MAXBLOCKSIZE || 190 !ISP2(volblocksize)) 191 return (SET_ERROR(EDOM)); 192 193 return (0); 194 } 195 196 int 197 zvol_get_stats(objset_t *os, nvlist_t *nv) 198 { 199 int error; 200 dmu_object_info_t doi; 201 uint64_t val; 202 203 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val); 204 if (error) 205 return (error); 206 207 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val); 208 209 error = dmu_object_info(os, ZVOL_OBJ, &doi); 210 211 if (error == 0) { 212 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE, 213 doi.doi_data_block_size); 214 } 215 216 return (error); 217 } 218 219 static zvol_state_t * 220 zvol_minor_lookup(const char *name) 221 { 222 minor_t minor; 223 zvol_state_t *zv; 224 225 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 226 227 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 228 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 229 if (zv == NULL) 230 continue; 231 if (strcmp(zv->zv_name, name) == 0) 232 return (zv); 233 } 234 235 return (NULL); 236 } 237 238 /* extent mapping arg */ 239 struct maparg { 240 zvol_state_t *ma_zv; 241 uint64_t ma_blks; 242 }; 243 244 /*ARGSUSED*/ 245 static int 246 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 247 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg) 248 { 249 struct maparg *ma = arg; 250 zvol_extent_t *ze; 251 int bs = ma->ma_zv->zv_volblocksize; 252 253 if (BP_IS_HOLE(bp) || 254 zb->zb_object != ZVOL_OBJ || zb->zb_level != 0) 255 return (0); 256 257 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid); 258 ma->ma_blks++; 259 260 /* Abort immediately if we have encountered gang blocks */ 261 if (BP_IS_GANG(bp)) 262 return (SET_ERROR(EFRAGS)); 263 264 /* 265 * See if the block is at the end of the previous extent. 266 */ 267 ze = list_tail(&ma->ma_zv->zv_extents); 268 if (ze && 269 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) && 270 DVA_GET_OFFSET(BP_IDENTITY(bp)) == 271 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) { 272 ze->ze_nblks++; 273 return (0); 274 } 275 276 dprintf_bp(bp, "%s", "next blkptr:"); 277 278 /* start a new extent */ 279 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP); 280 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */ 281 ze->ze_nblks = 1; 282 list_insert_tail(&ma->ma_zv->zv_extents, ze); 283 return (0); 284 } 285 286 static void 287 zvol_free_extents(zvol_state_t *zv) 288 { 289 zvol_extent_t *ze; 290 291 while (ze = list_head(&zv->zv_extents)) { 292 list_remove(&zv->zv_extents, ze); 293 kmem_free(ze, sizeof (zvol_extent_t)); 294 } 295 } 296 297 static int 298 zvol_get_lbas(zvol_state_t *zv) 299 { 300 objset_t *os = zv->zv_objset; 301 struct maparg ma; 302 int err; 303 304 ma.ma_zv = zv; 305 ma.ma_blks = 0; 306 zvol_free_extents(zv); 307 308 /* commit any in-flight changes before traversing the dataset */ 309 txg_wait_synced(dmu_objset_pool(os), 0); 310 err = traverse_dataset(dmu_objset_ds(os), 0, 311 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma); 312 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) { 313 zvol_free_extents(zv); 314 return (err ? err : EIO); 315 } 316 317 return (0); 318 } 319 320 /* ARGSUSED */ 321 void 322 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 323 { 324 zfs_creat_t *zct = arg; 325 nvlist_t *nvprops = zct->zct_props; 326 int error; 327 uint64_t volblocksize, volsize; 328 329 VERIFY(nvlist_lookup_uint64(nvprops, 330 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0); 331 if (nvlist_lookup_uint64(nvprops, 332 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0) 333 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE); 334 335 /* 336 * These properties must be removed from the list so the generic 337 * property setting step won't apply to them. 338 */ 339 VERIFY(nvlist_remove_all(nvprops, 340 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0); 341 (void) nvlist_remove_all(nvprops, 342 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE)); 343 344 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize, 345 DMU_OT_NONE, 0, tx); 346 ASSERT(error == 0); 347 348 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP, 349 DMU_OT_NONE, 0, tx); 350 ASSERT(error == 0); 351 352 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); 353 ASSERT(error == 0); 354 } 355 356 /* 357 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we 358 * implement DKIOCFREE/free-long-range. 359 */ 360 static int 361 zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap) 362 { 363 uint64_t offset, length; 364 365 if (byteswap) 366 byteswap_uint64_array(lr, sizeof (*lr)); 367 368 offset = lr->lr_offset; 369 length = lr->lr_length; 370 371 return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length)); 372 } 373 374 /* 375 * Replay a TX_WRITE ZIL transaction that didn't get committed 376 * after a system failure 377 */ 378 static int 379 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap) 380 { 381 objset_t *os = zv->zv_objset; 382 char *data = (char *)(lr + 1); /* data follows lr_write_t */ 383 uint64_t offset, length; 384 dmu_tx_t *tx; 385 int error; 386 387 if (byteswap) 388 byteswap_uint64_array(lr, sizeof (*lr)); 389 390 offset = lr->lr_offset; 391 length = lr->lr_length; 392 393 /* If it's a dmu_sync() block, write the whole block */ 394 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 395 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 396 if (length < blocksize) { 397 offset -= offset % blocksize; 398 length = blocksize; 399 } 400 } 401 402 tx = dmu_tx_create(os); 403 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length); 404 error = dmu_tx_assign(tx, TXG_WAIT); 405 if (error) { 406 dmu_tx_abort(tx); 407 } else { 408 dmu_write(os, ZVOL_OBJ, offset, length, data, tx); 409 dmu_tx_commit(tx); 410 } 411 412 return (error); 413 } 414 415 /* ARGSUSED */ 416 static int 417 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap) 418 { 419 return (SET_ERROR(ENOTSUP)); 420 } 421 422 /* 423 * Callback vectors for replaying records. 424 * Only TX_WRITE and TX_TRUNCATE are needed for zvol. 425 */ 426 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = { 427 zvol_replay_err, /* 0 no such transaction type */ 428 zvol_replay_err, /* TX_CREATE */ 429 zvol_replay_err, /* TX_MKDIR */ 430 zvol_replay_err, /* TX_MKXATTR */ 431 zvol_replay_err, /* TX_SYMLINK */ 432 zvol_replay_err, /* TX_REMOVE */ 433 zvol_replay_err, /* TX_RMDIR */ 434 zvol_replay_err, /* TX_LINK */ 435 zvol_replay_err, /* TX_RENAME */ 436 zvol_replay_write, /* TX_WRITE */ 437 zvol_replay_truncate, /* TX_TRUNCATE */ 438 zvol_replay_err, /* TX_SETATTR */ 439 zvol_replay_err, /* TX_ACL */ 440 zvol_replay_err, /* TX_CREATE_ACL */ 441 zvol_replay_err, /* TX_CREATE_ATTR */ 442 zvol_replay_err, /* TX_CREATE_ACL_ATTR */ 443 zvol_replay_err, /* TX_MKDIR_ACL */ 444 zvol_replay_err, /* TX_MKDIR_ATTR */ 445 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */ 446 zvol_replay_err, /* TX_WRITE2 */ 447 }; 448 449 int 450 zvol_name2minor(const char *name, minor_t *minor) 451 { 452 zvol_state_t *zv; 453 454 mutex_enter(&zfsdev_state_lock); 455 zv = zvol_minor_lookup(name); 456 if (minor && zv) 457 *minor = zv->zv_minor; 458 mutex_exit(&zfsdev_state_lock); 459 return (zv ? 0 : -1); 460 } 461 462 /* 463 * Create a minor node (plus a whole lot more) for the specified volume. 464 */ 465 int 466 zvol_create_minor(const char *name) 467 { 468 zfs_soft_state_t *zs; 469 zvol_state_t *zv; 470 objset_t *os; 471 dmu_object_info_t doi; 472 minor_t minor = 0; 473 char chrbuf[30], blkbuf[30]; 474 int error; 475 476 mutex_enter(&zfsdev_state_lock); 477 478 if (zvol_minor_lookup(name) != NULL) { 479 mutex_exit(&zfsdev_state_lock); 480 return (SET_ERROR(EEXIST)); 481 } 482 483 /* lie and say we're read-only */ 484 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os); 485 486 if (error) { 487 mutex_exit(&zfsdev_state_lock); 488 return (error); 489 } 490 491 if ((minor = zfsdev_minor_alloc()) == 0) { 492 dmu_objset_disown(os, FTAG); 493 mutex_exit(&zfsdev_state_lock); 494 return (SET_ERROR(ENXIO)); 495 } 496 497 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) { 498 dmu_objset_disown(os, FTAG); 499 mutex_exit(&zfsdev_state_lock); 500 return (SET_ERROR(EAGAIN)); 501 } 502 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME, 503 (char *)name); 504 505 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor); 506 507 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR, 508 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 509 ddi_soft_state_free(zfsdev_state, minor); 510 dmu_objset_disown(os, FTAG); 511 mutex_exit(&zfsdev_state_lock); 512 return (SET_ERROR(EAGAIN)); 513 } 514 515 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor); 516 517 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK, 518 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 519 ddi_remove_minor_node(zfs_dip, chrbuf); 520 ddi_soft_state_free(zfsdev_state, minor); 521 dmu_objset_disown(os, FTAG); 522 mutex_exit(&zfsdev_state_lock); 523 return (SET_ERROR(EAGAIN)); 524 } 525 526 zs = ddi_get_soft_state(zfsdev_state, minor); 527 zs->zss_type = ZSST_ZVOL; 528 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP); 529 (void) strlcpy(zv->zv_name, name, MAXPATHLEN); 530 zv->zv_min_bs = DEV_BSHIFT; 531 zv->zv_minor = minor; 532 zv->zv_objset = os; 533 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os))) 534 zv->zv_flags |= ZVOL_RDONLY; 535 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL); 536 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare, 537 sizeof (rl_t), offsetof(rl_t, r_node)); 538 list_create(&zv->zv_extents, sizeof (zvol_extent_t), 539 offsetof(zvol_extent_t, ze_node)); 540 /* get and cache the blocksize */ 541 error = dmu_object_info(os, ZVOL_OBJ, &doi); 542 ASSERT(error == 0); 543 zv->zv_volblocksize = doi.doi_data_block_size; 544 545 if (spa_writeable(dmu_objset_spa(os))) { 546 if (zil_replay_disable) 547 zil_destroy(dmu_objset_zil(os), B_FALSE); 548 else 549 zil_replay(os, zv, zvol_replay_vector); 550 } 551 dmu_objset_disown(os, FTAG); 552 zv->zv_objset = NULL; 553 554 zvol_minors++; 555 556 mutex_exit(&zfsdev_state_lock); 557 558 return (0); 559 } 560 561 /* 562 * Remove minor node for the specified volume. 563 */ 564 static int 565 zvol_remove_zv(zvol_state_t *zv) 566 { 567 char nmbuf[20]; 568 minor_t minor = zv->zv_minor; 569 570 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 571 if (zv->zv_total_opens != 0) 572 return (SET_ERROR(EBUSY)); 573 574 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor); 575 ddi_remove_minor_node(zfs_dip, nmbuf); 576 577 (void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor); 578 ddi_remove_minor_node(zfs_dip, nmbuf); 579 580 avl_destroy(&zv->zv_znode.z_range_avl); 581 mutex_destroy(&zv->zv_znode.z_range_lock); 582 583 kmem_free(zv, sizeof (zvol_state_t)); 584 585 ddi_soft_state_free(zfsdev_state, minor); 586 587 zvol_minors--; 588 return (0); 589 } 590 591 int 592 zvol_remove_minor(const char *name) 593 { 594 zvol_state_t *zv; 595 int rc; 596 597 mutex_enter(&zfsdev_state_lock); 598 if ((zv = zvol_minor_lookup(name)) == NULL) { 599 mutex_exit(&zfsdev_state_lock); 600 return (SET_ERROR(ENXIO)); 601 } 602 rc = zvol_remove_zv(zv); 603 mutex_exit(&zfsdev_state_lock); 604 return (rc); 605 } 606 607 int 608 zvol_first_open(zvol_state_t *zv) 609 { 610 objset_t *os; 611 uint64_t volsize; 612 int error; 613 uint64_t readonly; 614 615 /* lie and say we're read-only */ 616 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE, 617 zvol_tag, &os); 618 if (error) 619 return (error); 620 621 zv->zv_objset = os; 622 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 623 if (error) { 624 ASSERT(error == 0); 625 dmu_objset_disown(os, zvol_tag); 626 return (error); 627 } 628 629 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf); 630 if (error) { 631 dmu_objset_disown(os, zvol_tag); 632 return (error); 633 } 634 635 zvol_size_changed(zv, volsize); 636 zv->zv_zilog = zil_open(os, zvol_get_data); 637 638 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly, 639 NULL) == 0); 640 if (readonly || dmu_objset_is_snapshot(os) || 641 !spa_writeable(dmu_objset_spa(os))) 642 zv->zv_flags |= ZVOL_RDONLY; 643 else 644 zv->zv_flags &= ~ZVOL_RDONLY; 645 return (error); 646 } 647 648 void 649 zvol_last_close(zvol_state_t *zv) 650 { 651 zil_close(zv->zv_zilog); 652 zv->zv_zilog = NULL; 653 654 dmu_buf_rele(zv->zv_dbuf, zvol_tag); 655 zv->zv_dbuf = NULL; 656 657 /* 658 * Evict cached data 659 */ 660 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) && 661 !(zv->zv_flags & ZVOL_RDONLY)) 662 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 663 dmu_objset_evict_dbufs(zv->zv_objset); 664 665 dmu_objset_disown(zv->zv_objset, zvol_tag); 666 zv->zv_objset = NULL; 667 } 668 669 int 670 zvol_prealloc(zvol_state_t *zv) 671 { 672 objset_t *os = zv->zv_objset; 673 dmu_tx_t *tx; 674 uint64_t refd, avail, usedobjs, availobjs; 675 uint64_t resid = zv->zv_volsize; 676 uint64_t off = 0; 677 678 /* Check the space usage before attempting to allocate the space */ 679 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs); 680 if (avail < zv->zv_volsize) 681 return (SET_ERROR(ENOSPC)); 682 683 /* Free old extents if they exist */ 684 zvol_free_extents(zv); 685 686 while (resid != 0) { 687 int error; 688 uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE); 689 690 tx = dmu_tx_create(os); 691 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 692 error = dmu_tx_assign(tx, TXG_WAIT); 693 if (error) { 694 dmu_tx_abort(tx); 695 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off); 696 return (error); 697 } 698 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx); 699 dmu_tx_commit(tx); 700 off += bytes; 701 resid -= bytes; 702 } 703 txg_wait_synced(dmu_objset_pool(os), 0); 704 705 return (0); 706 } 707 708 static int 709 zvol_update_volsize(objset_t *os, uint64_t volsize) 710 { 711 dmu_tx_t *tx; 712 int error; 713 714 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 715 716 tx = dmu_tx_create(os); 717 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 718 error = dmu_tx_assign(tx, TXG_WAIT); 719 if (error) { 720 dmu_tx_abort(tx); 721 return (error); 722 } 723 724 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, 725 &volsize, tx); 726 dmu_tx_commit(tx); 727 728 if (error == 0) 729 error = dmu_free_long_range(os, 730 ZVOL_OBJ, volsize, DMU_OBJECT_END); 731 return (error); 732 } 733 734 void 735 zvol_remove_minors(const char *name) 736 { 737 zvol_state_t *zv; 738 char *namebuf; 739 minor_t minor; 740 741 namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP); 742 (void) strncpy(namebuf, name, strlen(name)); 743 (void) strcat(namebuf, "/"); 744 mutex_enter(&zfsdev_state_lock); 745 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 746 747 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 748 if (zv == NULL) 749 continue; 750 if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0) 751 (void) zvol_remove_zv(zv); 752 } 753 kmem_free(namebuf, strlen(name) + 2); 754 755 mutex_exit(&zfsdev_state_lock); 756 } 757 758 static int 759 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize) 760 { 761 uint64_t old_volsize = 0ULL; 762 int error = 0; 763 764 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 765 766 /* 767 * Reinitialize the dump area to the new size. If we 768 * failed to resize the dump area then restore it back to 769 * its original size. We must set the new volsize prior 770 * to calling dumpvp_resize() to ensure that the devices' 771 * size(9P) is not visible by the dump subsystem. 772 */ 773 old_volsize = zv->zv_volsize; 774 zvol_size_changed(zv, volsize); 775 776 if (zv->zv_flags & ZVOL_DUMPIFIED) { 777 if ((error = zvol_dumpify(zv)) != 0 || 778 (error = dumpvp_resize()) != 0) { 779 int dumpify_error; 780 781 (void) zvol_update_volsize(zv->zv_objset, old_volsize); 782 zvol_size_changed(zv, old_volsize); 783 dumpify_error = zvol_dumpify(zv); 784 error = dumpify_error ? dumpify_error : error; 785 } 786 } 787 788 /* 789 * Generate a LUN expansion event. 790 */ 791 if (error == 0) { 792 sysevent_id_t eid; 793 nvlist_t *attr; 794 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 795 796 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV, 797 zv->zv_minor); 798 799 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 800 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 801 802 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 803 ESC_DEV_DLE, attr, &eid, DDI_SLEEP); 804 805 nvlist_free(attr); 806 kmem_free(physpath, MAXPATHLEN); 807 } 808 return (error); 809 } 810 811 int 812 zvol_set_volsize(const char *name, uint64_t volsize) 813 { 814 zvol_state_t *zv = NULL; 815 objset_t *os; 816 int error; 817 dmu_object_info_t doi; 818 uint64_t readonly; 819 boolean_t owned = B_FALSE; 820 821 error = dsl_prop_get_integer(name, 822 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL); 823 if (error != 0) 824 return (error); 825 if (readonly) 826 return (SET_ERROR(EROFS)); 827 828 mutex_enter(&zfsdev_state_lock); 829 zv = zvol_minor_lookup(name); 830 831 if (zv == NULL || zv->zv_objset == NULL) { 832 if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE, 833 FTAG, &os)) != 0) { 834 mutex_exit(&zfsdev_state_lock); 835 return (error); 836 } 837 owned = B_TRUE; 838 if (zv != NULL) 839 zv->zv_objset = os; 840 } else { 841 os = zv->zv_objset; 842 } 843 844 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 || 845 (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0) 846 goto out; 847 848 error = zvol_update_volsize(os, volsize); 849 850 if (error == 0 && zv != NULL) 851 error = zvol_update_live_volsize(zv, volsize); 852 out: 853 if (owned) { 854 dmu_objset_disown(os, FTAG); 855 if (zv != NULL) 856 zv->zv_objset = NULL; 857 } 858 mutex_exit(&zfsdev_state_lock); 859 return (error); 860 } 861 862 /*ARGSUSED*/ 863 int 864 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr) 865 { 866 zvol_state_t *zv; 867 int err = 0; 868 869 mutex_enter(&zfsdev_state_lock); 870 871 zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL); 872 if (zv == NULL) { 873 mutex_exit(&zfsdev_state_lock); 874 return (SET_ERROR(ENXIO)); 875 } 876 877 if (zv->zv_total_opens == 0) 878 err = zvol_first_open(zv); 879 if (err) { 880 mutex_exit(&zfsdev_state_lock); 881 return (err); 882 } 883 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) { 884 err = SET_ERROR(EROFS); 885 goto out; 886 } 887 if (zv->zv_flags & ZVOL_EXCL) { 888 err = SET_ERROR(EBUSY); 889 goto out; 890 } 891 if (flag & FEXCL) { 892 if (zv->zv_total_opens != 0) { 893 err = SET_ERROR(EBUSY); 894 goto out; 895 } 896 zv->zv_flags |= ZVOL_EXCL; 897 } 898 899 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) { 900 zv->zv_open_count[otyp]++; 901 zv->zv_total_opens++; 902 } 903 mutex_exit(&zfsdev_state_lock); 904 905 return (err); 906 out: 907 if (zv->zv_total_opens == 0) 908 zvol_last_close(zv); 909 mutex_exit(&zfsdev_state_lock); 910 return (err); 911 } 912 913 /*ARGSUSED*/ 914 int 915 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr) 916 { 917 minor_t minor = getminor(dev); 918 zvol_state_t *zv; 919 int error = 0; 920 921 mutex_enter(&zfsdev_state_lock); 922 923 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 924 if (zv == NULL) { 925 mutex_exit(&zfsdev_state_lock); 926 return (SET_ERROR(ENXIO)); 927 } 928 929 if (zv->zv_flags & ZVOL_EXCL) { 930 ASSERT(zv->zv_total_opens == 1); 931 zv->zv_flags &= ~ZVOL_EXCL; 932 } 933 934 /* 935 * If the open count is zero, this is a spurious close. 936 * That indicates a bug in the kernel / DDI framework. 937 */ 938 ASSERT(zv->zv_open_count[otyp] != 0); 939 ASSERT(zv->zv_total_opens != 0); 940 941 /* 942 * You may get multiple opens, but only one close. 943 */ 944 zv->zv_open_count[otyp]--; 945 zv->zv_total_opens--; 946 947 if (zv->zv_total_opens == 0) 948 zvol_last_close(zv); 949 950 mutex_exit(&zfsdev_state_lock); 951 return (error); 952 } 953 954 static void 955 zvol_get_done(zgd_t *zgd, int error) 956 { 957 if (zgd->zgd_db) 958 dmu_buf_rele(zgd->zgd_db, zgd); 959 960 zfs_range_unlock(zgd->zgd_rl); 961 962 if (error == 0 && zgd->zgd_bp) 963 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 964 965 kmem_free(zgd, sizeof (zgd_t)); 966 } 967 968 /* 969 * Get data to generate a TX_WRITE intent log record. 970 */ 971 static int 972 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 973 { 974 zvol_state_t *zv = arg; 975 objset_t *os = zv->zv_objset; 976 uint64_t object = ZVOL_OBJ; 977 uint64_t offset = lr->lr_offset; 978 uint64_t size = lr->lr_length; /* length of user data */ 979 blkptr_t *bp = &lr->lr_blkptr; 980 dmu_buf_t *db; 981 zgd_t *zgd; 982 int error; 983 984 ASSERT(zio != NULL); 985 ASSERT(size != 0); 986 987 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP); 988 zgd->zgd_zilog = zv->zv_zilog; 989 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER); 990 991 /* 992 * Write records come in two flavors: immediate and indirect. 993 * For small writes it's cheaper to store the data with the 994 * log record (immediate); for large writes it's cheaper to 995 * sync the data and get a pointer to it (indirect) so that 996 * we don't have to write the data twice. 997 */ 998 if (buf != NULL) { /* immediate write */ 999 error = dmu_read(os, object, offset, size, buf, 1000 DMU_READ_NO_PREFETCH); 1001 } else { 1002 size = zv->zv_volblocksize; 1003 offset = P2ALIGN(offset, size); 1004 error = dmu_buf_hold(os, object, offset, zgd, &db, 1005 DMU_READ_NO_PREFETCH); 1006 if (error == 0) { 1007 blkptr_t *obp = dmu_buf_get_blkptr(db); 1008 if (obp) { 1009 ASSERT(BP_IS_HOLE(bp)); 1010 *bp = *obp; 1011 } 1012 1013 zgd->zgd_db = db; 1014 zgd->zgd_bp = bp; 1015 1016 ASSERT(db->db_offset == offset); 1017 ASSERT(db->db_size == size); 1018 1019 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1020 zvol_get_done, zgd); 1021 1022 if (error == 0) 1023 return (0); 1024 } 1025 } 1026 1027 zvol_get_done(zgd, error); 1028 1029 return (error); 1030 } 1031 1032 /* 1033 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions. 1034 * 1035 * We store data in the log buffers if it's small enough. 1036 * Otherwise we will later flush the data out via dmu_sync(). 1037 */ 1038 ssize_t zvol_immediate_write_sz = 32768; 1039 1040 static void 1041 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid, 1042 boolean_t sync) 1043 { 1044 uint32_t blocksize = zv->zv_volblocksize; 1045 zilog_t *zilog = zv->zv_zilog; 1046 boolean_t slogging; 1047 ssize_t immediate_write_sz; 1048 1049 if (zil_replaying(zilog, tx)) 1050 return; 1051 1052 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT) 1053 ? 0 : zvol_immediate_write_sz; 1054 1055 slogging = spa_has_slogs(zilog->zl_spa) && 1056 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 1057 1058 while (resid) { 1059 itx_t *itx; 1060 lr_write_t *lr; 1061 ssize_t len; 1062 itx_wr_state_t write_state; 1063 1064 /* 1065 * Unlike zfs_log_write() we can be called with 1066 * upto DMU_MAX_ACCESS/2 (5MB) writes. 1067 */ 1068 if (blocksize > immediate_write_sz && !slogging && 1069 resid >= blocksize && off % blocksize == 0) { 1070 write_state = WR_INDIRECT; /* uses dmu_sync */ 1071 len = blocksize; 1072 } else if (sync) { 1073 write_state = WR_COPIED; 1074 len = MIN(ZIL_MAX_LOG_DATA, resid); 1075 } else { 1076 write_state = WR_NEED_COPY; 1077 len = MIN(ZIL_MAX_LOG_DATA, resid); 1078 } 1079 1080 itx = zil_itx_create(TX_WRITE, sizeof (*lr) + 1081 (write_state == WR_COPIED ? len : 0)); 1082 lr = (lr_write_t *)&itx->itx_lr; 1083 if (write_state == WR_COPIED && dmu_read(zv->zv_objset, 1084 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) { 1085 zil_itx_destroy(itx); 1086 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1087 lr = (lr_write_t *)&itx->itx_lr; 1088 write_state = WR_NEED_COPY; 1089 } 1090 1091 itx->itx_wr_state = write_state; 1092 if (write_state == WR_NEED_COPY) 1093 itx->itx_sod += len; 1094 lr->lr_foid = ZVOL_OBJ; 1095 lr->lr_offset = off; 1096 lr->lr_length = len; 1097 lr->lr_blkoff = 0; 1098 BP_ZERO(&lr->lr_blkptr); 1099 1100 itx->itx_private = zv; 1101 itx->itx_sync = sync; 1102 1103 zil_itx_assign(zilog, itx, tx); 1104 1105 off += len; 1106 resid -= len; 1107 } 1108 } 1109 1110 static int 1111 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset, 1112 uint64_t size, boolean_t doread, boolean_t isdump) 1113 { 1114 vdev_disk_t *dvd; 1115 int c; 1116 int numerrors = 0; 1117 1118 if (vd->vdev_ops == &vdev_mirror_ops || 1119 vd->vdev_ops == &vdev_replacing_ops || 1120 vd->vdev_ops == &vdev_spare_ops) { 1121 for (c = 0; c < vd->vdev_children; c++) { 1122 int err = zvol_dumpio_vdev(vd->vdev_child[c], 1123 addr, offset, origoffset, size, doread, isdump); 1124 if (err != 0) { 1125 numerrors++; 1126 } else if (doread) { 1127 break; 1128 } 1129 } 1130 } 1131 1132 if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops) 1133 return (numerrors < vd->vdev_children ? 0 : EIO); 1134 1135 if (doread && !vdev_readable(vd)) 1136 return (SET_ERROR(EIO)); 1137 else if (!doread && !vdev_writeable(vd)) 1138 return (SET_ERROR(EIO)); 1139 1140 if (vd->vdev_ops == &vdev_raidz_ops) { 1141 return (vdev_raidz_physio(vd, 1142 addr, size, offset, origoffset, doread, isdump)); 1143 } 1144 1145 offset += VDEV_LABEL_START_SIZE; 1146 1147 if (ddi_in_panic() || isdump) { 1148 ASSERT(!doread); 1149 if (doread) 1150 return (SET_ERROR(EIO)); 1151 dvd = vd->vdev_tsd; 1152 ASSERT3P(dvd, !=, NULL); 1153 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset), 1154 lbtodb(size))); 1155 } else { 1156 dvd = vd->vdev_tsd; 1157 ASSERT3P(dvd, !=, NULL); 1158 return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size, 1159 offset, doread ? B_READ : B_WRITE)); 1160 } 1161 } 1162 1163 static int 1164 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size, 1165 boolean_t doread, boolean_t isdump) 1166 { 1167 vdev_t *vd; 1168 int error; 1169 zvol_extent_t *ze; 1170 spa_t *spa = dmu_objset_spa(zv->zv_objset); 1171 1172 /* Must be sector aligned, and not stradle a block boundary. */ 1173 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) || 1174 P2BOUNDARY(offset, size, zv->zv_volblocksize)) { 1175 return (SET_ERROR(EINVAL)); 1176 } 1177 ASSERT(size <= zv->zv_volblocksize); 1178 1179 /* Locate the extent this belongs to */ 1180 ze = list_head(&zv->zv_extents); 1181 while (offset >= ze->ze_nblks * zv->zv_volblocksize) { 1182 offset -= ze->ze_nblks * zv->zv_volblocksize; 1183 ze = list_next(&zv->zv_extents, ze); 1184 } 1185 1186 if (ze == NULL) 1187 return (SET_ERROR(EINVAL)); 1188 1189 if (!ddi_in_panic()) 1190 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 1191 1192 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva)); 1193 offset += DVA_GET_OFFSET(&ze->ze_dva); 1194 error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva), 1195 size, doread, isdump); 1196 1197 if (!ddi_in_panic()) 1198 spa_config_exit(spa, SCL_STATE, FTAG); 1199 1200 return (error); 1201 } 1202 1203 int 1204 zvol_strategy(buf_t *bp) 1205 { 1206 zfs_soft_state_t *zs = NULL; 1207 zvol_state_t *zv; 1208 uint64_t off, volsize; 1209 size_t resid; 1210 char *addr; 1211 objset_t *os; 1212 rl_t *rl; 1213 int error = 0; 1214 boolean_t doread = bp->b_flags & B_READ; 1215 boolean_t is_dumpified; 1216 boolean_t sync; 1217 1218 if (getminor(bp->b_edev) == 0) { 1219 error = SET_ERROR(EINVAL); 1220 } else { 1221 zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev)); 1222 if (zs == NULL) 1223 error = SET_ERROR(ENXIO); 1224 else if (zs->zss_type != ZSST_ZVOL) 1225 error = SET_ERROR(EINVAL); 1226 } 1227 1228 if (error) { 1229 bioerror(bp, error); 1230 biodone(bp); 1231 return (0); 1232 } 1233 1234 zv = zs->zss_data; 1235 1236 if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) { 1237 bioerror(bp, EROFS); 1238 biodone(bp); 1239 return (0); 1240 } 1241 1242 off = ldbtob(bp->b_blkno); 1243 volsize = zv->zv_volsize; 1244 1245 os = zv->zv_objset; 1246 ASSERT(os != NULL); 1247 1248 bp_mapin(bp); 1249 addr = bp->b_un.b_addr; 1250 resid = bp->b_bcount; 1251 1252 if (resid > 0 && (off < 0 || off >= volsize)) { 1253 bioerror(bp, EIO); 1254 biodone(bp); 1255 return (0); 1256 } 1257 1258 is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED; 1259 sync = ((!(bp->b_flags & B_ASYNC) && 1260 !(zv->zv_flags & ZVOL_WCE)) || 1261 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) && 1262 !doread && !is_dumpified; 1263 1264 /* 1265 * There must be no buffer changes when doing a dmu_sync() because 1266 * we can't change the data whilst calculating the checksum. 1267 */ 1268 rl = zfs_range_lock(&zv->zv_znode, off, resid, 1269 doread ? RL_READER : RL_WRITER); 1270 1271 while (resid != 0 && off < volsize) { 1272 size_t size = MIN(resid, zvol_maxphys); 1273 if (is_dumpified) { 1274 size = MIN(size, P2END(off, zv->zv_volblocksize) - off); 1275 error = zvol_dumpio(zv, addr, off, size, 1276 doread, B_FALSE); 1277 } else if (doread) { 1278 error = dmu_read(os, ZVOL_OBJ, off, size, addr, 1279 DMU_READ_PREFETCH); 1280 } else { 1281 dmu_tx_t *tx = dmu_tx_create(os); 1282 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size); 1283 error = dmu_tx_assign(tx, TXG_WAIT); 1284 if (error) { 1285 dmu_tx_abort(tx); 1286 } else { 1287 dmu_write(os, ZVOL_OBJ, off, size, addr, tx); 1288 zvol_log_write(zv, tx, off, size, sync); 1289 dmu_tx_commit(tx); 1290 } 1291 } 1292 if (error) { 1293 /* convert checksum errors into IO errors */ 1294 if (error == ECKSUM) 1295 error = SET_ERROR(EIO); 1296 break; 1297 } 1298 off += size; 1299 addr += size; 1300 resid -= size; 1301 } 1302 zfs_range_unlock(rl); 1303 1304 if ((bp->b_resid = resid) == bp->b_bcount) 1305 bioerror(bp, off > volsize ? EINVAL : error); 1306 1307 if (sync) 1308 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1309 biodone(bp); 1310 1311 return (0); 1312 } 1313 1314 /* 1315 * Set the buffer count to the zvol maximum transfer. 1316 * Using our own routine instead of the default minphys() 1317 * means that for larger writes we write bigger buffers on X86 1318 * (128K instead of 56K) and flush the disk write cache less often 1319 * (every zvol_maxphys - currently 1MB) instead of minphys (currently 1320 * 56K on X86 and 128K on sparc). 1321 */ 1322 void 1323 zvol_minphys(struct buf *bp) 1324 { 1325 if (bp->b_bcount > zvol_maxphys) 1326 bp->b_bcount = zvol_maxphys; 1327 } 1328 1329 int 1330 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks) 1331 { 1332 minor_t minor = getminor(dev); 1333 zvol_state_t *zv; 1334 int error = 0; 1335 uint64_t size; 1336 uint64_t boff; 1337 uint64_t resid; 1338 1339 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1340 if (zv == NULL) 1341 return (SET_ERROR(ENXIO)); 1342 1343 if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0) 1344 return (SET_ERROR(EINVAL)); 1345 1346 boff = ldbtob(blkno); 1347 resid = ldbtob(nblocks); 1348 1349 VERIFY3U(boff + resid, <=, zv->zv_volsize); 1350 1351 while (resid) { 1352 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff); 1353 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE); 1354 if (error) 1355 break; 1356 boff += size; 1357 addr += size; 1358 resid -= size; 1359 } 1360 1361 return (error); 1362 } 1363 1364 /*ARGSUSED*/ 1365 int 1366 zvol_read(dev_t dev, uio_t *uio, cred_t *cr) 1367 { 1368 minor_t minor = getminor(dev); 1369 zvol_state_t *zv; 1370 uint64_t volsize; 1371 rl_t *rl; 1372 int error = 0; 1373 1374 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1375 if (zv == NULL) 1376 return (SET_ERROR(ENXIO)); 1377 1378 volsize = zv->zv_volsize; 1379 if (uio->uio_resid > 0 && 1380 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize)) 1381 return (SET_ERROR(EIO)); 1382 1383 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1384 error = physio(zvol_strategy, NULL, dev, B_READ, 1385 zvol_minphys, uio); 1386 return (error); 1387 } 1388 1389 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1390 RL_READER); 1391 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1392 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1393 1394 /* don't read past the end */ 1395 if (bytes > volsize - uio->uio_loffset) 1396 bytes = volsize - uio->uio_loffset; 1397 1398 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes); 1399 if (error) { 1400 /* convert checksum errors into IO errors */ 1401 if (error == ECKSUM) 1402 error = SET_ERROR(EIO); 1403 break; 1404 } 1405 } 1406 zfs_range_unlock(rl); 1407 return (error); 1408 } 1409 1410 /*ARGSUSED*/ 1411 int 1412 zvol_write(dev_t dev, uio_t *uio, cred_t *cr) 1413 { 1414 minor_t minor = getminor(dev); 1415 zvol_state_t *zv; 1416 uint64_t volsize; 1417 rl_t *rl; 1418 int error = 0; 1419 boolean_t sync; 1420 1421 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1422 if (zv == NULL) 1423 return (SET_ERROR(ENXIO)); 1424 1425 volsize = zv->zv_volsize; 1426 if (uio->uio_resid > 0 && 1427 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize)) 1428 return (SET_ERROR(EIO)); 1429 1430 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1431 error = physio(zvol_strategy, NULL, dev, B_WRITE, 1432 zvol_minphys, uio); 1433 return (error); 1434 } 1435 1436 sync = !(zv->zv_flags & ZVOL_WCE) || 1437 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS); 1438 1439 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1440 RL_WRITER); 1441 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1442 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1443 uint64_t off = uio->uio_loffset; 1444 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 1445 1446 if (bytes > volsize - off) /* don't write past the end */ 1447 bytes = volsize - off; 1448 1449 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 1450 error = dmu_tx_assign(tx, TXG_WAIT); 1451 if (error) { 1452 dmu_tx_abort(tx); 1453 break; 1454 } 1455 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx); 1456 if (error == 0) 1457 zvol_log_write(zv, tx, off, bytes, sync); 1458 dmu_tx_commit(tx); 1459 1460 if (error) 1461 break; 1462 } 1463 zfs_range_unlock(rl); 1464 if (sync) 1465 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1466 return (error); 1467 } 1468 1469 int 1470 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs) 1471 { 1472 struct uuid uuid = EFI_RESERVED; 1473 efi_gpe_t gpe = { 0 }; 1474 uint32_t crc; 1475 dk_efi_t efi; 1476 int length; 1477 char *ptr; 1478 1479 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag)) 1480 return (SET_ERROR(EFAULT)); 1481 ptr = (char *)(uintptr_t)efi.dki_data_64; 1482 length = efi.dki_length; 1483 /* 1484 * Some clients may attempt to request a PMBR for the 1485 * zvol. Currently this interface will return EINVAL to 1486 * such requests. These requests could be supported by 1487 * adding a check for lba == 0 and consing up an appropriate 1488 * PMBR. 1489 */ 1490 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0) 1491 return (SET_ERROR(EINVAL)); 1492 1493 gpe.efi_gpe_StartingLBA = LE_64(34ULL); 1494 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1); 1495 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid); 1496 1497 if (efi.dki_lba == 1) { 1498 efi_gpt_t gpt = { 0 }; 1499 1500 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE); 1501 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 1502 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt)); 1503 gpt.efi_gpt_MyLBA = LE_64(1ULL); 1504 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL); 1505 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1); 1506 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL); 1507 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1); 1508 gpt.efi_gpt_SizeOfPartitionEntry = 1509 LE_32(sizeof (efi_gpe_t)); 1510 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table); 1511 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 1512 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table); 1513 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc); 1514 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length), 1515 flag)) 1516 return (SET_ERROR(EFAULT)); 1517 ptr += sizeof (gpt); 1518 length -= sizeof (gpt); 1519 } 1520 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe), 1521 length), flag)) 1522 return (SET_ERROR(EFAULT)); 1523 return (0); 1524 } 1525 1526 /* 1527 * BEGIN entry points to allow external callers access to the volume. 1528 */ 1529 /* 1530 * Return the volume parameters needed for access from an external caller. 1531 * These values are invariant as long as the volume is held open. 1532 */ 1533 int 1534 zvol_get_volume_params(minor_t minor, uint64_t *blksize, 1535 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl, 1536 void **rl_hdl, void **bonus_hdl) 1537 { 1538 zvol_state_t *zv; 1539 1540 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1541 if (zv == NULL) 1542 return (SET_ERROR(ENXIO)); 1543 if (zv->zv_flags & ZVOL_DUMPIFIED) 1544 return (SET_ERROR(ENXIO)); 1545 1546 ASSERT(blksize && max_xfer_len && minor_hdl && 1547 objset_hdl && zil_hdl && rl_hdl && bonus_hdl); 1548 1549 *blksize = zv->zv_volblocksize; 1550 *max_xfer_len = (uint64_t)zvol_maxphys; 1551 *minor_hdl = zv; 1552 *objset_hdl = zv->zv_objset; 1553 *zil_hdl = zv->zv_zilog; 1554 *rl_hdl = &zv->zv_znode; 1555 *bonus_hdl = zv->zv_dbuf; 1556 return (0); 1557 } 1558 1559 /* 1560 * Return the current volume size to an external caller. 1561 * The size can change while the volume is open. 1562 */ 1563 uint64_t 1564 zvol_get_volume_size(void *minor_hdl) 1565 { 1566 zvol_state_t *zv = minor_hdl; 1567 1568 return (zv->zv_volsize); 1569 } 1570 1571 /* 1572 * Return the current WCE setting to an external caller. 1573 * The WCE setting can change while the volume is open. 1574 */ 1575 int 1576 zvol_get_volume_wce(void *minor_hdl) 1577 { 1578 zvol_state_t *zv = minor_hdl; 1579 1580 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0); 1581 } 1582 1583 /* 1584 * Entry point for external callers to zvol_log_write 1585 */ 1586 void 1587 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid, 1588 boolean_t sync) 1589 { 1590 zvol_state_t *zv = minor_hdl; 1591 1592 zvol_log_write(zv, tx, off, resid, sync); 1593 } 1594 /* 1595 * END entry points to allow external callers access to the volume. 1596 */ 1597 1598 /* 1599 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE. 1600 */ 1601 static void 1602 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len, 1603 boolean_t sync) 1604 { 1605 itx_t *itx; 1606 lr_truncate_t *lr; 1607 zilog_t *zilog = zv->zv_zilog; 1608 1609 if (zil_replaying(zilog, tx)) 1610 return; 1611 1612 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1613 lr = (lr_truncate_t *)&itx->itx_lr; 1614 lr->lr_foid = ZVOL_OBJ; 1615 lr->lr_offset = off; 1616 lr->lr_length = len; 1617 1618 itx->itx_sync = sync; 1619 zil_itx_assign(zilog, itx, tx); 1620 } 1621 1622 /* 1623 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I). 1624 * Also a dirtbag dkio ioctl for unmap/free-block functionality. 1625 */ 1626 /*ARGSUSED*/ 1627 int 1628 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp) 1629 { 1630 zvol_state_t *zv; 1631 struct dk_callback *dkc; 1632 int error = 0; 1633 rl_t *rl; 1634 1635 mutex_enter(&zfsdev_state_lock); 1636 1637 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL); 1638 1639 if (zv == NULL) { 1640 mutex_exit(&zfsdev_state_lock); 1641 return (SET_ERROR(ENXIO)); 1642 } 1643 ASSERT(zv->zv_total_opens > 0); 1644 1645 switch (cmd) { 1646 1647 case DKIOCINFO: 1648 { 1649 struct dk_cinfo dki; 1650 1651 bzero(&dki, sizeof (dki)); 1652 (void) strcpy(dki.dki_cname, "zvol"); 1653 (void) strcpy(dki.dki_dname, "zvol"); 1654 dki.dki_ctype = DKC_UNKNOWN; 1655 dki.dki_unit = getminor(dev); 1656 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs); 1657 mutex_exit(&zfsdev_state_lock); 1658 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag)) 1659 error = SET_ERROR(EFAULT); 1660 return (error); 1661 } 1662 1663 case DKIOCGMEDIAINFO: 1664 { 1665 struct dk_minfo dkm; 1666 1667 bzero(&dkm, sizeof (dkm)); 1668 dkm.dki_lbsize = 1U << zv->zv_min_bs; 1669 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 1670 dkm.dki_media_type = DK_UNKNOWN; 1671 mutex_exit(&zfsdev_state_lock); 1672 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag)) 1673 error = SET_ERROR(EFAULT); 1674 return (error); 1675 } 1676 1677 case DKIOCGMEDIAINFOEXT: 1678 { 1679 struct dk_minfo_ext dkmext; 1680 1681 bzero(&dkmext, sizeof (dkmext)); 1682 dkmext.dki_lbsize = 1U << zv->zv_min_bs; 1683 dkmext.dki_pbsize = zv->zv_volblocksize; 1684 dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 1685 dkmext.dki_media_type = DK_UNKNOWN; 1686 mutex_exit(&zfsdev_state_lock); 1687 if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag)) 1688 error = SET_ERROR(EFAULT); 1689 return (error); 1690 } 1691 1692 case DKIOCGETEFI: 1693 { 1694 uint64_t vs = zv->zv_volsize; 1695 uint8_t bs = zv->zv_min_bs; 1696 1697 mutex_exit(&zfsdev_state_lock); 1698 error = zvol_getefi((void *)arg, flag, vs, bs); 1699 return (error); 1700 } 1701 1702 case DKIOCFLUSHWRITECACHE: 1703 dkc = (struct dk_callback *)arg; 1704 mutex_exit(&zfsdev_state_lock); 1705 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1706 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) { 1707 (*dkc->dkc_callback)(dkc->dkc_cookie, error); 1708 error = 0; 1709 } 1710 return (error); 1711 1712 case DKIOCGETWCE: 1713 { 1714 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0; 1715 if (ddi_copyout(&wce, (void *)arg, sizeof (int), 1716 flag)) 1717 error = SET_ERROR(EFAULT); 1718 break; 1719 } 1720 case DKIOCSETWCE: 1721 { 1722 int wce; 1723 if (ddi_copyin((void *)arg, &wce, sizeof (int), 1724 flag)) { 1725 error = SET_ERROR(EFAULT); 1726 break; 1727 } 1728 if (wce) { 1729 zv->zv_flags |= ZVOL_WCE; 1730 mutex_exit(&zfsdev_state_lock); 1731 } else { 1732 zv->zv_flags &= ~ZVOL_WCE; 1733 mutex_exit(&zfsdev_state_lock); 1734 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1735 } 1736 return (0); 1737 } 1738 1739 case DKIOCGGEOM: 1740 case DKIOCGVTOC: 1741 /* 1742 * commands using these (like prtvtoc) expect ENOTSUP 1743 * since we're emulating an EFI label 1744 */ 1745 error = SET_ERROR(ENOTSUP); 1746 break; 1747 1748 case DKIOCDUMPINIT: 1749 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 1750 RL_WRITER); 1751 error = zvol_dumpify(zv); 1752 zfs_range_unlock(rl); 1753 break; 1754 1755 case DKIOCDUMPFINI: 1756 if (!(zv->zv_flags & ZVOL_DUMPIFIED)) 1757 break; 1758 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 1759 RL_WRITER); 1760 error = zvol_dump_fini(zv); 1761 zfs_range_unlock(rl); 1762 break; 1763 1764 case DKIOCFREE: 1765 { 1766 dkioc_free_t df; 1767 dmu_tx_t *tx; 1768 1769 if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) { 1770 error = SET_ERROR(EFAULT); 1771 break; 1772 } 1773 1774 /* 1775 * Apply Postel's Law to length-checking. If they overshoot, 1776 * just blank out until the end, if there's a need to blank 1777 * out anything. 1778 */ 1779 if (df.df_start >= zv->zv_volsize) 1780 break; /* No need to do anything... */ 1781 if (df.df_start + df.df_length > zv->zv_volsize) 1782 df.df_length = DMU_OBJECT_END; 1783 1784 rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length, 1785 RL_WRITER); 1786 tx = dmu_tx_create(zv->zv_objset); 1787 error = dmu_tx_assign(tx, TXG_WAIT); 1788 if (error != 0) { 1789 dmu_tx_abort(tx); 1790 } else { 1791 zvol_log_truncate(zv, tx, df.df_start, 1792 df.df_length, B_TRUE); 1793 dmu_tx_commit(tx); 1794 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 1795 df.df_start, df.df_length); 1796 } 1797 1798 zfs_range_unlock(rl); 1799 1800 if (error == 0) { 1801 /* 1802 * If the write-cache is disabled or 'sync' property 1803 * is set to 'always' then treat this as a synchronous 1804 * operation (i.e. commit to zil). 1805 */ 1806 if (!(zv->zv_flags & ZVOL_WCE) || 1807 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) 1808 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1809 1810 /* 1811 * If the caller really wants synchronous writes, and 1812 * can't wait for them, don't return until the write 1813 * is done. 1814 */ 1815 if (df.df_flags & DF_WAIT_SYNC) { 1816 txg_wait_synced( 1817 dmu_objset_pool(zv->zv_objset), 0); 1818 } 1819 } 1820 break; 1821 } 1822 1823 default: 1824 error = SET_ERROR(ENOTTY); 1825 break; 1826 1827 } 1828 mutex_exit(&zfsdev_state_lock); 1829 return (error); 1830 } 1831 1832 int 1833 zvol_busy(void) 1834 { 1835 return (zvol_minors != 0); 1836 } 1837 1838 void 1839 zvol_init(void) 1840 { 1841 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t), 1842 1) == 0); 1843 mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL); 1844 } 1845 1846 void 1847 zvol_fini(void) 1848 { 1849 mutex_destroy(&zfsdev_state_lock); 1850 ddi_soft_state_fini(&zfsdev_state); 1851 } 1852 1853 /*ARGSUSED*/ 1854 static int 1855 zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx) 1856 { 1857 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1858 1859 if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP)) 1860 return (1); 1861 return (0); 1862 } 1863 1864 /*ARGSUSED*/ 1865 static void 1866 zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx) 1867 { 1868 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1869 1870 spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx); 1871 } 1872 1873 static int 1874 zvol_dump_init(zvol_state_t *zv, boolean_t resize) 1875 { 1876 dmu_tx_t *tx; 1877 int error; 1878 objset_t *os = zv->zv_objset; 1879 spa_t *spa = dmu_objset_spa(os); 1880 vdev_t *vd = spa->spa_root_vdev; 1881 nvlist_t *nv = NULL; 1882 uint64_t version = spa_version(spa); 1883 enum zio_checksum checksum; 1884 1885 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 1886 ASSERT(vd->vdev_ops == &vdev_root_ops); 1887 1888 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0, 1889 DMU_OBJECT_END); 1890 /* wait for dmu_free_long_range to actually free the blocks */ 1891 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 1892 1893 /* 1894 * If the pool on which the dump device is being initialized has more 1895 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is 1896 * enabled. If so, bump that feature's counter to indicate that the 1897 * feature is active. We also check the vdev type to handle the 1898 * following case: 1899 * # zpool create test raidz disk1 disk2 disk3 1900 * Now have spa_root_vdev->vdev_children == 1 (the raidz vdev), 1901 * the raidz vdev itself has 3 children. 1902 */ 1903 if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) { 1904 if (!spa_feature_is_enabled(spa, 1905 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP)) 1906 return (SET_ERROR(ENOTSUP)); 1907 (void) dsl_sync_task(spa_name(spa), 1908 zfs_mvdev_dump_feature_check, 1909 zfs_mvdev_dump_activate_feature_sync, NULL, 2); 1910 } 1911 1912 tx = dmu_tx_create(os); 1913 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 1914 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 1915 error = dmu_tx_assign(tx, TXG_WAIT); 1916 if (error) { 1917 dmu_tx_abort(tx); 1918 return (error); 1919 } 1920 1921 /* 1922 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum 1923 * function. Otherwise, use the old default -- OFF. 1924 */ 1925 checksum = spa_feature_is_active(spa, 1926 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY : 1927 ZIO_CHECKSUM_OFF; 1928 1929 /* 1930 * If we are resizing the dump device then we only need to 1931 * update the refreservation to match the newly updated 1932 * zvolsize. Otherwise, we save off the original state of the 1933 * zvol so that we can restore them if the zvol is ever undumpified. 1934 */ 1935 if (resize) { 1936 error = zap_update(os, ZVOL_ZAP_OBJ, 1937 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 1938 &zv->zv_volsize, tx); 1939 } else { 1940 uint64_t checksum, compress, refresrv, vbs, dedup; 1941 1942 error = dsl_prop_get_integer(zv->zv_name, 1943 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL); 1944 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1945 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL); 1946 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1947 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL); 1948 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1949 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL); 1950 if (version >= SPA_VERSION_DEDUP) { 1951 error = error ? error : 1952 dsl_prop_get_integer(zv->zv_name, 1953 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL); 1954 } 1955 1956 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1957 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, 1958 &compress, tx); 1959 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1960 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx); 1961 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1962 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 1963 &refresrv, tx); 1964 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1965 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, 1966 &vbs, tx); 1967 error = error ? error : dmu_object_set_blocksize( 1968 os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx); 1969 if (version >= SPA_VERSION_DEDUP) { 1970 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1971 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, 1972 &dedup, tx); 1973 } 1974 if (error == 0) 1975 zv->zv_volblocksize = SPA_MAXBLOCKSIZE; 1976 } 1977 dmu_tx_commit(tx); 1978 1979 /* 1980 * We only need update the zvol's property if we are initializing 1981 * the dump area for the first time. 1982 */ 1983 if (!resize) { 1984 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1985 VERIFY(nvlist_add_uint64(nv, 1986 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0); 1987 VERIFY(nvlist_add_uint64(nv, 1988 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 1989 ZIO_COMPRESS_OFF) == 0); 1990 VERIFY(nvlist_add_uint64(nv, 1991 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 1992 checksum) == 0); 1993 if (version >= SPA_VERSION_DEDUP) { 1994 VERIFY(nvlist_add_uint64(nv, 1995 zfs_prop_to_name(ZFS_PROP_DEDUP), 1996 ZIO_CHECKSUM_OFF) == 0); 1997 } 1998 1999 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 2000 nv, NULL); 2001 nvlist_free(nv); 2002 2003 if (error) 2004 return (error); 2005 } 2006 2007 /* Allocate the space for the dump */ 2008 error = zvol_prealloc(zv); 2009 return (error); 2010 } 2011 2012 static int 2013 zvol_dumpify(zvol_state_t *zv) 2014 { 2015 int error = 0; 2016 uint64_t dumpsize = 0; 2017 dmu_tx_t *tx; 2018 objset_t *os = zv->zv_objset; 2019 2020 if (zv->zv_flags & ZVOL_RDONLY) 2021 return (SET_ERROR(EROFS)); 2022 2023 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 2024 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) { 2025 boolean_t resize = (dumpsize > 0); 2026 2027 if ((error = zvol_dump_init(zv, resize)) != 0) { 2028 (void) zvol_dump_fini(zv); 2029 return (error); 2030 } 2031 } 2032 2033 /* 2034 * Build up our lba mapping. 2035 */ 2036 error = zvol_get_lbas(zv); 2037 if (error) { 2038 (void) zvol_dump_fini(zv); 2039 return (error); 2040 } 2041 2042 tx = dmu_tx_create(os); 2043 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2044 error = dmu_tx_assign(tx, TXG_WAIT); 2045 if (error) { 2046 dmu_tx_abort(tx); 2047 (void) zvol_dump_fini(zv); 2048 return (error); 2049 } 2050 2051 zv->zv_flags |= ZVOL_DUMPIFIED; 2052 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1, 2053 &zv->zv_volsize, tx); 2054 dmu_tx_commit(tx); 2055 2056 if (error) { 2057 (void) zvol_dump_fini(zv); 2058 return (error); 2059 } 2060 2061 txg_wait_synced(dmu_objset_pool(os), 0); 2062 return (0); 2063 } 2064 2065 static int 2066 zvol_dump_fini(zvol_state_t *zv) 2067 { 2068 dmu_tx_t *tx; 2069 objset_t *os = zv->zv_objset; 2070 nvlist_t *nv; 2071 int error = 0; 2072 uint64_t checksum, compress, refresrv, vbs, dedup; 2073 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset)); 2074 2075 /* 2076 * Attempt to restore the zvol back to its pre-dumpified state. 2077 * This is a best-effort attempt as it's possible that not all 2078 * of these properties were initialized during the dumpify process 2079 * (i.e. error during zvol_dump_init). 2080 */ 2081 2082 tx = dmu_tx_create(os); 2083 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2084 error = dmu_tx_assign(tx, TXG_WAIT); 2085 if (error) { 2086 dmu_tx_abort(tx); 2087 return (error); 2088 } 2089 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx); 2090 dmu_tx_commit(tx); 2091 2092 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2093 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum); 2094 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2095 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress); 2096 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2097 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv); 2098 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2099 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs); 2100 2101 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2102 (void) nvlist_add_uint64(nv, 2103 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum); 2104 (void) nvlist_add_uint64(nv, 2105 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress); 2106 (void) nvlist_add_uint64(nv, 2107 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv); 2108 if (version >= SPA_VERSION_DEDUP && 2109 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2110 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) { 2111 (void) nvlist_add_uint64(nv, 2112 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup); 2113 } 2114 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 2115 nv, NULL); 2116 nvlist_free(nv); 2117 2118 zvol_free_extents(zv); 2119 zv->zv_flags &= ~ZVOL_DUMPIFIED; 2120 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END); 2121 /* wait for dmu_free_long_range to actually free the blocks */ 2122 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 2123 tx = dmu_tx_create(os); 2124 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 2125 error = dmu_tx_assign(tx, TXG_WAIT); 2126 if (error) { 2127 dmu_tx_abort(tx); 2128 return (error); 2129 } 2130 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0) 2131 zv->zv_volblocksize = vbs; 2132 dmu_tx_commit(tx); 2133 2134 return (0); 2135 } 2136