1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * 24 * Portions Copyright 2010 Robert Milkowski 25 * 26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 27 * Copyright (c) 2013 by Delphix. All rights reserved. 28 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 29 */ 30 31 /* 32 * ZFS volume emulation driver. 33 * 34 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes. 35 * Volumes are accessed through the symbolic links named: 36 * 37 * /dev/zvol/dsk/<pool_name>/<dataset_name> 38 * /dev/zvol/rdsk/<pool_name>/<dataset_name> 39 * 40 * These links are created by the /dev filesystem (sdev_zvolops.c). 41 * Volumes are persistent through reboot. No user command needs to be 42 * run before opening and using a device. 43 */ 44 45 #include <sys/types.h> 46 #include <sys/param.h> 47 #include <sys/errno.h> 48 #include <sys/uio.h> 49 #include <sys/buf.h> 50 #include <sys/modctl.h> 51 #include <sys/open.h> 52 #include <sys/kmem.h> 53 #include <sys/conf.h> 54 #include <sys/cmn_err.h> 55 #include <sys/stat.h> 56 #include <sys/zap.h> 57 #include <sys/spa.h> 58 #include <sys/spa_impl.h> 59 #include <sys/zio.h> 60 #include <sys/dmu_traverse.h> 61 #include <sys/dnode.h> 62 #include <sys/dsl_dataset.h> 63 #include <sys/dsl_prop.h> 64 #include <sys/dkio.h> 65 #include <sys/efi_partition.h> 66 #include <sys/byteorder.h> 67 #include <sys/pathname.h> 68 #include <sys/ddi.h> 69 #include <sys/sunddi.h> 70 #include <sys/crc32.h> 71 #include <sys/dirent.h> 72 #include <sys/policy.h> 73 #include <sys/fs/zfs.h> 74 #include <sys/zfs_ioctl.h> 75 #include <sys/mkdev.h> 76 #include <sys/zil.h> 77 #include <sys/refcount.h> 78 #include <sys/zfs_znode.h> 79 #include <sys/zfs_rlock.h> 80 #include <sys/vdev_disk.h> 81 #include <sys/vdev_impl.h> 82 #include <sys/vdev_raidz.h> 83 #include <sys/zvol.h> 84 #include <sys/dumphdr.h> 85 #include <sys/zil_impl.h> 86 #include <sys/dbuf.h> 87 #include <sys/dmu_tx.h> 88 #include <sys/zfeature.h> 89 #include <sys/zio_checksum.h> 90 91 #include "zfs_namecheck.h" 92 93 void *zfsdev_state; 94 static char *zvol_tag = "zvol_tag"; 95 96 #define ZVOL_DUMPSIZE "dumpsize" 97 98 /* 99 * This lock protects the zfsdev_state structure from being modified 100 * while it's being used, e.g. an open that comes in before a create 101 * finishes. It also protects temporary opens of the dataset so that, 102 * e.g., an open doesn't get a spurious EBUSY. 103 */ 104 kmutex_t zfsdev_state_lock; 105 static uint32_t zvol_minors; 106 107 typedef struct zvol_extent { 108 list_node_t ze_node; 109 dva_t ze_dva; /* dva associated with this extent */ 110 uint64_t ze_nblks; /* number of blocks in extent */ 111 } zvol_extent_t; 112 113 /* 114 * The in-core state of each volume. 115 */ 116 typedef struct zvol_state { 117 char zv_name[MAXPATHLEN]; /* pool/dd name */ 118 uint64_t zv_volsize; /* amount of space we advertise */ 119 uint64_t zv_volblocksize; /* volume block size */ 120 minor_t zv_minor; /* minor number */ 121 uint8_t zv_min_bs; /* minimum addressable block shift */ 122 uint8_t zv_flags; /* readonly, dumpified, etc. */ 123 objset_t *zv_objset; /* objset handle */ 124 uint32_t zv_open_count[OTYPCNT]; /* open counts */ 125 uint32_t zv_total_opens; /* total open count */ 126 zilog_t *zv_zilog; /* ZIL handle */ 127 list_t zv_extents; /* List of extents for dump */ 128 znode_t zv_znode; /* for range locking */ 129 dmu_buf_t *zv_dbuf; /* bonus handle */ 130 } zvol_state_t; 131 132 /* 133 * zvol specific flags 134 */ 135 #define ZVOL_RDONLY 0x1 136 #define ZVOL_DUMPIFIED 0x2 137 #define ZVOL_EXCL 0x4 138 #define ZVOL_WCE 0x8 139 140 /* 141 * zvol maximum transfer in one DMU tx. 142 */ 143 int zvol_maxphys = DMU_MAX_ACCESS/2; 144 145 extern int zfs_set_prop_nvlist(const char *, zprop_source_t, 146 nvlist_t *, nvlist_t *); 147 static int zvol_remove_zv(zvol_state_t *); 148 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio); 149 static int zvol_dumpify(zvol_state_t *zv); 150 static int zvol_dump_fini(zvol_state_t *zv); 151 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize); 152 153 static void 154 zvol_size_changed(zvol_state_t *zv, uint64_t volsize) 155 { 156 dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor); 157 158 zv->zv_volsize = volsize; 159 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 160 "Size", volsize) == DDI_SUCCESS); 161 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 162 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS); 163 164 /* Notify specfs to invalidate the cached size */ 165 spec_size_invalidate(dev, VBLK); 166 spec_size_invalidate(dev, VCHR); 167 } 168 169 int 170 zvol_check_volsize(uint64_t volsize, uint64_t blocksize) 171 { 172 if (volsize == 0) 173 return (SET_ERROR(EINVAL)); 174 175 if (volsize % blocksize != 0) 176 return (SET_ERROR(EINVAL)); 177 178 #ifdef _ILP32 179 if (volsize - 1 > SPEC_MAXOFFSET_T) 180 return (SET_ERROR(EOVERFLOW)); 181 #endif 182 return (0); 183 } 184 185 int 186 zvol_check_volblocksize(uint64_t volblocksize) 187 { 188 if (volblocksize < SPA_MINBLOCKSIZE || 189 volblocksize > SPA_MAXBLOCKSIZE || 190 !ISP2(volblocksize)) 191 return (SET_ERROR(EDOM)); 192 193 return (0); 194 } 195 196 int 197 zvol_get_stats(objset_t *os, nvlist_t *nv) 198 { 199 int error; 200 dmu_object_info_t doi; 201 uint64_t val; 202 203 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val); 204 if (error) 205 return (error); 206 207 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val); 208 209 error = dmu_object_info(os, ZVOL_OBJ, &doi); 210 211 if (error == 0) { 212 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE, 213 doi.doi_data_block_size); 214 } 215 216 return (error); 217 } 218 219 static zvol_state_t * 220 zvol_minor_lookup(const char *name) 221 { 222 minor_t minor; 223 zvol_state_t *zv; 224 225 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 226 227 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 228 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 229 if (zv == NULL) 230 continue; 231 if (strcmp(zv->zv_name, name) == 0) 232 return (zv); 233 } 234 235 return (NULL); 236 } 237 238 /* extent mapping arg */ 239 struct maparg { 240 zvol_state_t *ma_zv; 241 uint64_t ma_blks; 242 }; 243 244 /*ARGSUSED*/ 245 static int 246 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 247 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg) 248 { 249 struct maparg *ma = arg; 250 zvol_extent_t *ze; 251 int bs = ma->ma_zv->zv_volblocksize; 252 253 if (BP_IS_HOLE(bp) || 254 zb->zb_object != ZVOL_OBJ || zb->zb_level != 0) 255 return (0); 256 257 VERIFY(!BP_IS_EMBEDDED(bp)); 258 259 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid); 260 ma->ma_blks++; 261 262 /* Abort immediately if we have encountered gang blocks */ 263 if (BP_IS_GANG(bp)) 264 return (SET_ERROR(EFRAGS)); 265 266 /* 267 * See if the block is at the end of the previous extent. 268 */ 269 ze = list_tail(&ma->ma_zv->zv_extents); 270 if (ze && 271 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) && 272 DVA_GET_OFFSET(BP_IDENTITY(bp)) == 273 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) { 274 ze->ze_nblks++; 275 return (0); 276 } 277 278 dprintf_bp(bp, "%s", "next blkptr:"); 279 280 /* start a new extent */ 281 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP); 282 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */ 283 ze->ze_nblks = 1; 284 list_insert_tail(&ma->ma_zv->zv_extents, ze); 285 return (0); 286 } 287 288 static void 289 zvol_free_extents(zvol_state_t *zv) 290 { 291 zvol_extent_t *ze; 292 293 while (ze = list_head(&zv->zv_extents)) { 294 list_remove(&zv->zv_extents, ze); 295 kmem_free(ze, sizeof (zvol_extent_t)); 296 } 297 } 298 299 static int 300 zvol_get_lbas(zvol_state_t *zv) 301 { 302 objset_t *os = zv->zv_objset; 303 struct maparg ma; 304 int err; 305 306 ma.ma_zv = zv; 307 ma.ma_blks = 0; 308 zvol_free_extents(zv); 309 310 /* commit any in-flight changes before traversing the dataset */ 311 txg_wait_synced(dmu_objset_pool(os), 0); 312 err = traverse_dataset(dmu_objset_ds(os), 0, 313 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma); 314 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) { 315 zvol_free_extents(zv); 316 return (err ? err : EIO); 317 } 318 319 return (0); 320 } 321 322 /* ARGSUSED */ 323 void 324 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 325 { 326 zfs_creat_t *zct = arg; 327 nvlist_t *nvprops = zct->zct_props; 328 int error; 329 uint64_t volblocksize, volsize; 330 331 VERIFY(nvlist_lookup_uint64(nvprops, 332 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0); 333 if (nvlist_lookup_uint64(nvprops, 334 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0) 335 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE); 336 337 /* 338 * These properties must be removed from the list so the generic 339 * property setting step won't apply to them. 340 */ 341 VERIFY(nvlist_remove_all(nvprops, 342 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0); 343 (void) nvlist_remove_all(nvprops, 344 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE)); 345 346 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize, 347 DMU_OT_NONE, 0, tx); 348 ASSERT(error == 0); 349 350 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP, 351 DMU_OT_NONE, 0, tx); 352 ASSERT(error == 0); 353 354 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); 355 ASSERT(error == 0); 356 } 357 358 /* 359 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we 360 * implement DKIOCFREE/free-long-range. 361 */ 362 static int 363 zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap) 364 { 365 uint64_t offset, length; 366 367 if (byteswap) 368 byteswap_uint64_array(lr, sizeof (*lr)); 369 370 offset = lr->lr_offset; 371 length = lr->lr_length; 372 373 return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length)); 374 } 375 376 /* 377 * Replay a TX_WRITE ZIL transaction that didn't get committed 378 * after a system failure 379 */ 380 static int 381 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap) 382 { 383 objset_t *os = zv->zv_objset; 384 char *data = (char *)(lr + 1); /* data follows lr_write_t */ 385 uint64_t offset, length; 386 dmu_tx_t *tx; 387 int error; 388 389 if (byteswap) 390 byteswap_uint64_array(lr, sizeof (*lr)); 391 392 offset = lr->lr_offset; 393 length = lr->lr_length; 394 395 /* If it's a dmu_sync() block, write the whole block */ 396 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 397 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 398 if (length < blocksize) { 399 offset -= offset % blocksize; 400 length = blocksize; 401 } 402 } 403 404 tx = dmu_tx_create(os); 405 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length); 406 error = dmu_tx_assign(tx, TXG_WAIT); 407 if (error) { 408 dmu_tx_abort(tx); 409 } else { 410 dmu_write(os, ZVOL_OBJ, offset, length, data, tx); 411 dmu_tx_commit(tx); 412 } 413 414 return (error); 415 } 416 417 /* ARGSUSED */ 418 static int 419 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap) 420 { 421 return (SET_ERROR(ENOTSUP)); 422 } 423 424 /* 425 * Callback vectors for replaying records. 426 * Only TX_WRITE and TX_TRUNCATE are needed for zvol. 427 */ 428 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = { 429 zvol_replay_err, /* 0 no such transaction type */ 430 zvol_replay_err, /* TX_CREATE */ 431 zvol_replay_err, /* TX_MKDIR */ 432 zvol_replay_err, /* TX_MKXATTR */ 433 zvol_replay_err, /* TX_SYMLINK */ 434 zvol_replay_err, /* TX_REMOVE */ 435 zvol_replay_err, /* TX_RMDIR */ 436 zvol_replay_err, /* TX_LINK */ 437 zvol_replay_err, /* TX_RENAME */ 438 zvol_replay_write, /* TX_WRITE */ 439 zvol_replay_truncate, /* TX_TRUNCATE */ 440 zvol_replay_err, /* TX_SETATTR */ 441 zvol_replay_err, /* TX_ACL */ 442 zvol_replay_err, /* TX_CREATE_ACL */ 443 zvol_replay_err, /* TX_CREATE_ATTR */ 444 zvol_replay_err, /* TX_CREATE_ACL_ATTR */ 445 zvol_replay_err, /* TX_MKDIR_ACL */ 446 zvol_replay_err, /* TX_MKDIR_ATTR */ 447 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */ 448 zvol_replay_err, /* TX_WRITE2 */ 449 }; 450 451 int 452 zvol_name2minor(const char *name, minor_t *minor) 453 { 454 zvol_state_t *zv; 455 456 mutex_enter(&zfsdev_state_lock); 457 zv = zvol_minor_lookup(name); 458 if (minor && zv) 459 *minor = zv->zv_minor; 460 mutex_exit(&zfsdev_state_lock); 461 return (zv ? 0 : -1); 462 } 463 464 /* 465 * Create a minor node (plus a whole lot more) for the specified volume. 466 */ 467 int 468 zvol_create_minor(const char *name) 469 { 470 zfs_soft_state_t *zs; 471 zvol_state_t *zv; 472 objset_t *os; 473 dmu_object_info_t doi; 474 minor_t minor = 0; 475 char chrbuf[30], blkbuf[30]; 476 int error; 477 478 mutex_enter(&zfsdev_state_lock); 479 480 if (zvol_minor_lookup(name) != NULL) { 481 mutex_exit(&zfsdev_state_lock); 482 return (SET_ERROR(EEXIST)); 483 } 484 485 /* lie and say we're read-only */ 486 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os); 487 488 if (error) { 489 mutex_exit(&zfsdev_state_lock); 490 return (error); 491 } 492 493 if ((minor = zfsdev_minor_alloc()) == 0) { 494 dmu_objset_disown(os, FTAG); 495 mutex_exit(&zfsdev_state_lock); 496 return (SET_ERROR(ENXIO)); 497 } 498 499 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) { 500 dmu_objset_disown(os, FTAG); 501 mutex_exit(&zfsdev_state_lock); 502 return (SET_ERROR(EAGAIN)); 503 } 504 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME, 505 (char *)name); 506 507 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor); 508 509 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR, 510 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 511 ddi_soft_state_free(zfsdev_state, minor); 512 dmu_objset_disown(os, FTAG); 513 mutex_exit(&zfsdev_state_lock); 514 return (SET_ERROR(EAGAIN)); 515 } 516 517 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor); 518 519 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK, 520 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 521 ddi_remove_minor_node(zfs_dip, chrbuf); 522 ddi_soft_state_free(zfsdev_state, minor); 523 dmu_objset_disown(os, FTAG); 524 mutex_exit(&zfsdev_state_lock); 525 return (SET_ERROR(EAGAIN)); 526 } 527 528 zs = ddi_get_soft_state(zfsdev_state, minor); 529 zs->zss_type = ZSST_ZVOL; 530 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP); 531 (void) strlcpy(zv->zv_name, name, MAXPATHLEN); 532 zv->zv_min_bs = DEV_BSHIFT; 533 zv->zv_minor = minor; 534 zv->zv_objset = os; 535 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os))) 536 zv->zv_flags |= ZVOL_RDONLY; 537 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL); 538 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare, 539 sizeof (rl_t), offsetof(rl_t, r_node)); 540 list_create(&zv->zv_extents, sizeof (zvol_extent_t), 541 offsetof(zvol_extent_t, ze_node)); 542 /* get and cache the blocksize */ 543 error = dmu_object_info(os, ZVOL_OBJ, &doi); 544 ASSERT(error == 0); 545 zv->zv_volblocksize = doi.doi_data_block_size; 546 547 if (spa_writeable(dmu_objset_spa(os))) { 548 if (zil_replay_disable) 549 zil_destroy(dmu_objset_zil(os), B_FALSE); 550 else 551 zil_replay(os, zv, zvol_replay_vector); 552 } 553 dmu_objset_disown(os, FTAG); 554 zv->zv_objset = NULL; 555 556 zvol_minors++; 557 558 mutex_exit(&zfsdev_state_lock); 559 560 return (0); 561 } 562 563 /* 564 * Remove minor node for the specified volume. 565 */ 566 static int 567 zvol_remove_zv(zvol_state_t *zv) 568 { 569 char nmbuf[20]; 570 minor_t minor = zv->zv_minor; 571 572 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 573 if (zv->zv_total_opens != 0) 574 return (SET_ERROR(EBUSY)); 575 576 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor); 577 ddi_remove_minor_node(zfs_dip, nmbuf); 578 579 (void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor); 580 ddi_remove_minor_node(zfs_dip, nmbuf); 581 582 avl_destroy(&zv->zv_znode.z_range_avl); 583 mutex_destroy(&zv->zv_znode.z_range_lock); 584 585 kmem_free(zv, sizeof (zvol_state_t)); 586 587 ddi_soft_state_free(zfsdev_state, minor); 588 589 zvol_minors--; 590 return (0); 591 } 592 593 int 594 zvol_remove_minor(const char *name) 595 { 596 zvol_state_t *zv; 597 int rc; 598 599 mutex_enter(&zfsdev_state_lock); 600 if ((zv = zvol_minor_lookup(name)) == NULL) { 601 mutex_exit(&zfsdev_state_lock); 602 return (SET_ERROR(ENXIO)); 603 } 604 rc = zvol_remove_zv(zv); 605 mutex_exit(&zfsdev_state_lock); 606 return (rc); 607 } 608 609 int 610 zvol_first_open(zvol_state_t *zv) 611 { 612 objset_t *os; 613 uint64_t volsize; 614 int error; 615 uint64_t readonly; 616 617 /* lie and say we're read-only */ 618 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE, 619 zvol_tag, &os); 620 if (error) 621 return (error); 622 623 zv->zv_objset = os; 624 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 625 if (error) { 626 ASSERT(error == 0); 627 dmu_objset_disown(os, zvol_tag); 628 return (error); 629 } 630 631 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf); 632 if (error) { 633 dmu_objset_disown(os, zvol_tag); 634 return (error); 635 } 636 637 zvol_size_changed(zv, volsize); 638 zv->zv_zilog = zil_open(os, zvol_get_data); 639 640 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly, 641 NULL) == 0); 642 if (readonly || dmu_objset_is_snapshot(os) || 643 !spa_writeable(dmu_objset_spa(os))) 644 zv->zv_flags |= ZVOL_RDONLY; 645 else 646 zv->zv_flags &= ~ZVOL_RDONLY; 647 return (error); 648 } 649 650 void 651 zvol_last_close(zvol_state_t *zv) 652 { 653 zil_close(zv->zv_zilog); 654 zv->zv_zilog = NULL; 655 656 dmu_buf_rele(zv->zv_dbuf, zvol_tag); 657 zv->zv_dbuf = NULL; 658 659 /* 660 * Evict cached data 661 */ 662 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) && 663 !(zv->zv_flags & ZVOL_RDONLY)) 664 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 665 dmu_objset_evict_dbufs(zv->zv_objset); 666 667 dmu_objset_disown(zv->zv_objset, zvol_tag); 668 zv->zv_objset = NULL; 669 } 670 671 int 672 zvol_prealloc(zvol_state_t *zv) 673 { 674 objset_t *os = zv->zv_objset; 675 dmu_tx_t *tx; 676 uint64_t refd, avail, usedobjs, availobjs; 677 uint64_t resid = zv->zv_volsize; 678 uint64_t off = 0; 679 680 /* Check the space usage before attempting to allocate the space */ 681 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs); 682 if (avail < zv->zv_volsize) 683 return (SET_ERROR(ENOSPC)); 684 685 /* Free old extents if they exist */ 686 zvol_free_extents(zv); 687 688 while (resid != 0) { 689 int error; 690 uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE); 691 692 tx = dmu_tx_create(os); 693 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 694 error = dmu_tx_assign(tx, TXG_WAIT); 695 if (error) { 696 dmu_tx_abort(tx); 697 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off); 698 return (error); 699 } 700 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx); 701 dmu_tx_commit(tx); 702 off += bytes; 703 resid -= bytes; 704 } 705 txg_wait_synced(dmu_objset_pool(os), 0); 706 707 return (0); 708 } 709 710 static int 711 zvol_update_volsize(objset_t *os, uint64_t volsize) 712 { 713 dmu_tx_t *tx; 714 int error; 715 716 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 717 718 tx = dmu_tx_create(os); 719 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 720 error = dmu_tx_assign(tx, TXG_WAIT); 721 if (error) { 722 dmu_tx_abort(tx); 723 return (error); 724 } 725 726 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, 727 &volsize, tx); 728 dmu_tx_commit(tx); 729 730 if (error == 0) 731 error = dmu_free_long_range(os, 732 ZVOL_OBJ, volsize, DMU_OBJECT_END); 733 return (error); 734 } 735 736 void 737 zvol_remove_minors(const char *name) 738 { 739 zvol_state_t *zv; 740 char *namebuf; 741 minor_t minor; 742 743 namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP); 744 (void) strncpy(namebuf, name, strlen(name)); 745 (void) strcat(namebuf, "/"); 746 mutex_enter(&zfsdev_state_lock); 747 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 748 749 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 750 if (zv == NULL) 751 continue; 752 if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0) 753 (void) zvol_remove_zv(zv); 754 } 755 kmem_free(namebuf, strlen(name) + 2); 756 757 mutex_exit(&zfsdev_state_lock); 758 } 759 760 static int 761 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize) 762 { 763 uint64_t old_volsize = 0ULL; 764 int error = 0; 765 766 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 767 768 /* 769 * Reinitialize the dump area to the new size. If we 770 * failed to resize the dump area then restore it back to 771 * its original size. We must set the new volsize prior 772 * to calling dumpvp_resize() to ensure that the devices' 773 * size(9P) is not visible by the dump subsystem. 774 */ 775 old_volsize = zv->zv_volsize; 776 zvol_size_changed(zv, volsize); 777 778 if (zv->zv_flags & ZVOL_DUMPIFIED) { 779 if ((error = zvol_dumpify(zv)) != 0 || 780 (error = dumpvp_resize()) != 0) { 781 int dumpify_error; 782 783 (void) zvol_update_volsize(zv->zv_objset, old_volsize); 784 zvol_size_changed(zv, old_volsize); 785 dumpify_error = zvol_dumpify(zv); 786 error = dumpify_error ? dumpify_error : error; 787 } 788 } 789 790 /* 791 * Generate a LUN expansion event. 792 */ 793 if (error == 0) { 794 sysevent_id_t eid; 795 nvlist_t *attr; 796 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 797 798 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV, 799 zv->zv_minor); 800 801 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 802 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 803 804 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 805 ESC_DEV_DLE, attr, &eid, DDI_SLEEP); 806 807 nvlist_free(attr); 808 kmem_free(physpath, MAXPATHLEN); 809 } 810 return (error); 811 } 812 813 int 814 zvol_set_volsize(const char *name, uint64_t volsize) 815 { 816 zvol_state_t *zv = NULL; 817 objset_t *os; 818 int error; 819 dmu_object_info_t doi; 820 uint64_t readonly; 821 boolean_t owned = B_FALSE; 822 823 error = dsl_prop_get_integer(name, 824 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL); 825 if (error != 0) 826 return (error); 827 if (readonly) 828 return (SET_ERROR(EROFS)); 829 830 mutex_enter(&zfsdev_state_lock); 831 zv = zvol_minor_lookup(name); 832 833 if (zv == NULL || zv->zv_objset == NULL) { 834 if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE, 835 FTAG, &os)) != 0) { 836 mutex_exit(&zfsdev_state_lock); 837 return (error); 838 } 839 owned = B_TRUE; 840 if (zv != NULL) 841 zv->zv_objset = os; 842 } else { 843 os = zv->zv_objset; 844 } 845 846 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 || 847 (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0) 848 goto out; 849 850 error = zvol_update_volsize(os, volsize); 851 852 if (error == 0 && zv != NULL) 853 error = zvol_update_live_volsize(zv, volsize); 854 out: 855 if (owned) { 856 dmu_objset_disown(os, FTAG); 857 if (zv != NULL) 858 zv->zv_objset = NULL; 859 } 860 mutex_exit(&zfsdev_state_lock); 861 return (error); 862 } 863 864 /*ARGSUSED*/ 865 int 866 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr) 867 { 868 zvol_state_t *zv; 869 int err = 0; 870 871 mutex_enter(&zfsdev_state_lock); 872 873 zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL); 874 if (zv == NULL) { 875 mutex_exit(&zfsdev_state_lock); 876 return (SET_ERROR(ENXIO)); 877 } 878 879 if (zv->zv_total_opens == 0) 880 err = zvol_first_open(zv); 881 if (err) { 882 mutex_exit(&zfsdev_state_lock); 883 return (err); 884 } 885 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) { 886 err = SET_ERROR(EROFS); 887 goto out; 888 } 889 if (zv->zv_flags & ZVOL_EXCL) { 890 err = SET_ERROR(EBUSY); 891 goto out; 892 } 893 if (flag & FEXCL) { 894 if (zv->zv_total_opens != 0) { 895 err = SET_ERROR(EBUSY); 896 goto out; 897 } 898 zv->zv_flags |= ZVOL_EXCL; 899 } 900 901 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) { 902 zv->zv_open_count[otyp]++; 903 zv->zv_total_opens++; 904 } 905 mutex_exit(&zfsdev_state_lock); 906 907 return (err); 908 out: 909 if (zv->zv_total_opens == 0) 910 zvol_last_close(zv); 911 mutex_exit(&zfsdev_state_lock); 912 return (err); 913 } 914 915 /*ARGSUSED*/ 916 int 917 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr) 918 { 919 minor_t minor = getminor(dev); 920 zvol_state_t *zv; 921 int error = 0; 922 923 mutex_enter(&zfsdev_state_lock); 924 925 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 926 if (zv == NULL) { 927 mutex_exit(&zfsdev_state_lock); 928 return (SET_ERROR(ENXIO)); 929 } 930 931 if (zv->zv_flags & ZVOL_EXCL) { 932 ASSERT(zv->zv_total_opens == 1); 933 zv->zv_flags &= ~ZVOL_EXCL; 934 } 935 936 /* 937 * If the open count is zero, this is a spurious close. 938 * That indicates a bug in the kernel / DDI framework. 939 */ 940 ASSERT(zv->zv_open_count[otyp] != 0); 941 ASSERT(zv->zv_total_opens != 0); 942 943 /* 944 * You may get multiple opens, but only one close. 945 */ 946 zv->zv_open_count[otyp]--; 947 zv->zv_total_opens--; 948 949 if (zv->zv_total_opens == 0) 950 zvol_last_close(zv); 951 952 mutex_exit(&zfsdev_state_lock); 953 return (error); 954 } 955 956 static void 957 zvol_get_done(zgd_t *zgd, int error) 958 { 959 if (zgd->zgd_db) 960 dmu_buf_rele(zgd->zgd_db, zgd); 961 962 zfs_range_unlock(zgd->zgd_rl); 963 964 if (error == 0 && zgd->zgd_bp) 965 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 966 967 kmem_free(zgd, sizeof (zgd_t)); 968 } 969 970 /* 971 * Get data to generate a TX_WRITE intent log record. 972 */ 973 static int 974 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 975 { 976 zvol_state_t *zv = arg; 977 objset_t *os = zv->zv_objset; 978 uint64_t object = ZVOL_OBJ; 979 uint64_t offset = lr->lr_offset; 980 uint64_t size = lr->lr_length; /* length of user data */ 981 blkptr_t *bp = &lr->lr_blkptr; 982 dmu_buf_t *db; 983 zgd_t *zgd; 984 int error; 985 986 ASSERT(zio != NULL); 987 ASSERT(size != 0); 988 989 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP); 990 zgd->zgd_zilog = zv->zv_zilog; 991 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER); 992 993 /* 994 * Write records come in two flavors: immediate and indirect. 995 * For small writes it's cheaper to store the data with the 996 * log record (immediate); for large writes it's cheaper to 997 * sync the data and get a pointer to it (indirect) so that 998 * we don't have to write the data twice. 999 */ 1000 if (buf != NULL) { /* immediate write */ 1001 error = dmu_read(os, object, offset, size, buf, 1002 DMU_READ_NO_PREFETCH); 1003 } else { 1004 size = zv->zv_volblocksize; 1005 offset = P2ALIGN(offset, size); 1006 error = dmu_buf_hold(os, object, offset, zgd, &db, 1007 DMU_READ_NO_PREFETCH); 1008 if (error == 0) { 1009 blkptr_t *obp = dmu_buf_get_blkptr(db); 1010 if (obp) { 1011 ASSERT(BP_IS_HOLE(bp)); 1012 *bp = *obp; 1013 } 1014 1015 zgd->zgd_db = db; 1016 zgd->zgd_bp = bp; 1017 1018 ASSERT(db->db_offset == offset); 1019 ASSERT(db->db_size == size); 1020 1021 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1022 zvol_get_done, zgd); 1023 1024 if (error == 0) 1025 return (0); 1026 } 1027 } 1028 1029 zvol_get_done(zgd, error); 1030 1031 return (error); 1032 } 1033 1034 /* 1035 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions. 1036 * 1037 * We store data in the log buffers if it's small enough. 1038 * Otherwise we will later flush the data out via dmu_sync(). 1039 */ 1040 ssize_t zvol_immediate_write_sz = 32768; 1041 1042 static void 1043 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid, 1044 boolean_t sync) 1045 { 1046 uint32_t blocksize = zv->zv_volblocksize; 1047 zilog_t *zilog = zv->zv_zilog; 1048 boolean_t slogging; 1049 ssize_t immediate_write_sz; 1050 1051 if (zil_replaying(zilog, tx)) 1052 return; 1053 1054 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT) 1055 ? 0 : zvol_immediate_write_sz; 1056 1057 slogging = spa_has_slogs(zilog->zl_spa) && 1058 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 1059 1060 while (resid) { 1061 itx_t *itx; 1062 lr_write_t *lr; 1063 ssize_t len; 1064 itx_wr_state_t write_state; 1065 1066 /* 1067 * Unlike zfs_log_write() we can be called with 1068 * upto DMU_MAX_ACCESS/2 (5MB) writes. 1069 */ 1070 if (blocksize > immediate_write_sz && !slogging && 1071 resid >= blocksize && off % blocksize == 0) { 1072 write_state = WR_INDIRECT; /* uses dmu_sync */ 1073 len = blocksize; 1074 } else if (sync) { 1075 write_state = WR_COPIED; 1076 len = MIN(ZIL_MAX_LOG_DATA, resid); 1077 } else { 1078 write_state = WR_NEED_COPY; 1079 len = MIN(ZIL_MAX_LOG_DATA, resid); 1080 } 1081 1082 itx = zil_itx_create(TX_WRITE, sizeof (*lr) + 1083 (write_state == WR_COPIED ? len : 0)); 1084 lr = (lr_write_t *)&itx->itx_lr; 1085 if (write_state == WR_COPIED && dmu_read(zv->zv_objset, 1086 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) { 1087 zil_itx_destroy(itx); 1088 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1089 lr = (lr_write_t *)&itx->itx_lr; 1090 write_state = WR_NEED_COPY; 1091 } 1092 1093 itx->itx_wr_state = write_state; 1094 if (write_state == WR_NEED_COPY) 1095 itx->itx_sod += len; 1096 lr->lr_foid = ZVOL_OBJ; 1097 lr->lr_offset = off; 1098 lr->lr_length = len; 1099 lr->lr_blkoff = 0; 1100 BP_ZERO(&lr->lr_blkptr); 1101 1102 itx->itx_private = zv; 1103 itx->itx_sync = sync; 1104 1105 zil_itx_assign(zilog, itx, tx); 1106 1107 off += len; 1108 resid -= len; 1109 } 1110 } 1111 1112 static int 1113 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset, 1114 uint64_t size, boolean_t doread, boolean_t isdump) 1115 { 1116 vdev_disk_t *dvd; 1117 int c; 1118 int numerrors = 0; 1119 1120 if (vd->vdev_ops == &vdev_mirror_ops || 1121 vd->vdev_ops == &vdev_replacing_ops || 1122 vd->vdev_ops == &vdev_spare_ops) { 1123 for (c = 0; c < vd->vdev_children; c++) { 1124 int err = zvol_dumpio_vdev(vd->vdev_child[c], 1125 addr, offset, origoffset, size, doread, isdump); 1126 if (err != 0) { 1127 numerrors++; 1128 } else if (doread) { 1129 break; 1130 } 1131 } 1132 } 1133 1134 if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops) 1135 return (numerrors < vd->vdev_children ? 0 : EIO); 1136 1137 if (doread && !vdev_readable(vd)) 1138 return (SET_ERROR(EIO)); 1139 else if (!doread && !vdev_writeable(vd)) 1140 return (SET_ERROR(EIO)); 1141 1142 if (vd->vdev_ops == &vdev_raidz_ops) { 1143 return (vdev_raidz_physio(vd, 1144 addr, size, offset, origoffset, doread, isdump)); 1145 } 1146 1147 offset += VDEV_LABEL_START_SIZE; 1148 1149 if (ddi_in_panic() || isdump) { 1150 ASSERT(!doread); 1151 if (doread) 1152 return (SET_ERROR(EIO)); 1153 dvd = vd->vdev_tsd; 1154 ASSERT3P(dvd, !=, NULL); 1155 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset), 1156 lbtodb(size))); 1157 } else { 1158 dvd = vd->vdev_tsd; 1159 ASSERT3P(dvd, !=, NULL); 1160 return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size, 1161 offset, doread ? B_READ : B_WRITE)); 1162 } 1163 } 1164 1165 static int 1166 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size, 1167 boolean_t doread, boolean_t isdump) 1168 { 1169 vdev_t *vd; 1170 int error; 1171 zvol_extent_t *ze; 1172 spa_t *spa = dmu_objset_spa(zv->zv_objset); 1173 1174 /* Must be sector aligned, and not stradle a block boundary. */ 1175 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) || 1176 P2BOUNDARY(offset, size, zv->zv_volblocksize)) { 1177 return (SET_ERROR(EINVAL)); 1178 } 1179 ASSERT(size <= zv->zv_volblocksize); 1180 1181 /* Locate the extent this belongs to */ 1182 ze = list_head(&zv->zv_extents); 1183 while (offset >= ze->ze_nblks * zv->zv_volblocksize) { 1184 offset -= ze->ze_nblks * zv->zv_volblocksize; 1185 ze = list_next(&zv->zv_extents, ze); 1186 } 1187 1188 if (ze == NULL) 1189 return (SET_ERROR(EINVAL)); 1190 1191 if (!ddi_in_panic()) 1192 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 1193 1194 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva)); 1195 offset += DVA_GET_OFFSET(&ze->ze_dva); 1196 error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva), 1197 size, doread, isdump); 1198 1199 if (!ddi_in_panic()) 1200 spa_config_exit(spa, SCL_STATE, FTAG); 1201 1202 return (error); 1203 } 1204 1205 int 1206 zvol_strategy(buf_t *bp) 1207 { 1208 zfs_soft_state_t *zs = NULL; 1209 zvol_state_t *zv; 1210 uint64_t off, volsize; 1211 size_t resid; 1212 char *addr; 1213 objset_t *os; 1214 rl_t *rl; 1215 int error = 0; 1216 boolean_t doread = bp->b_flags & B_READ; 1217 boolean_t is_dumpified; 1218 boolean_t sync; 1219 1220 if (getminor(bp->b_edev) == 0) { 1221 error = SET_ERROR(EINVAL); 1222 } else { 1223 zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev)); 1224 if (zs == NULL) 1225 error = SET_ERROR(ENXIO); 1226 else if (zs->zss_type != ZSST_ZVOL) 1227 error = SET_ERROR(EINVAL); 1228 } 1229 1230 if (error) { 1231 bioerror(bp, error); 1232 biodone(bp); 1233 return (0); 1234 } 1235 1236 zv = zs->zss_data; 1237 1238 if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) { 1239 bioerror(bp, EROFS); 1240 biodone(bp); 1241 return (0); 1242 } 1243 1244 off = ldbtob(bp->b_blkno); 1245 volsize = zv->zv_volsize; 1246 1247 os = zv->zv_objset; 1248 ASSERT(os != NULL); 1249 1250 bp_mapin(bp); 1251 addr = bp->b_un.b_addr; 1252 resid = bp->b_bcount; 1253 1254 if (resid > 0 && (off < 0 || off >= volsize)) { 1255 bioerror(bp, EIO); 1256 biodone(bp); 1257 return (0); 1258 } 1259 1260 is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED; 1261 sync = ((!(bp->b_flags & B_ASYNC) && 1262 !(zv->zv_flags & ZVOL_WCE)) || 1263 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) && 1264 !doread && !is_dumpified; 1265 1266 /* 1267 * There must be no buffer changes when doing a dmu_sync() because 1268 * we can't change the data whilst calculating the checksum. 1269 */ 1270 rl = zfs_range_lock(&zv->zv_znode, off, resid, 1271 doread ? RL_READER : RL_WRITER); 1272 1273 while (resid != 0 && off < volsize) { 1274 size_t size = MIN(resid, zvol_maxphys); 1275 if (is_dumpified) { 1276 size = MIN(size, P2END(off, zv->zv_volblocksize) - off); 1277 error = zvol_dumpio(zv, addr, off, size, 1278 doread, B_FALSE); 1279 } else if (doread) { 1280 error = dmu_read(os, ZVOL_OBJ, off, size, addr, 1281 DMU_READ_PREFETCH); 1282 } else { 1283 dmu_tx_t *tx = dmu_tx_create(os); 1284 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size); 1285 error = dmu_tx_assign(tx, TXG_WAIT); 1286 if (error) { 1287 dmu_tx_abort(tx); 1288 } else { 1289 dmu_write(os, ZVOL_OBJ, off, size, addr, tx); 1290 zvol_log_write(zv, tx, off, size, sync); 1291 dmu_tx_commit(tx); 1292 } 1293 } 1294 if (error) { 1295 /* convert checksum errors into IO errors */ 1296 if (error == ECKSUM) 1297 error = SET_ERROR(EIO); 1298 break; 1299 } 1300 off += size; 1301 addr += size; 1302 resid -= size; 1303 } 1304 zfs_range_unlock(rl); 1305 1306 if ((bp->b_resid = resid) == bp->b_bcount) 1307 bioerror(bp, off > volsize ? EINVAL : error); 1308 1309 if (sync) 1310 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1311 biodone(bp); 1312 1313 return (0); 1314 } 1315 1316 /* 1317 * Set the buffer count to the zvol maximum transfer. 1318 * Using our own routine instead of the default minphys() 1319 * means that for larger writes we write bigger buffers on X86 1320 * (128K instead of 56K) and flush the disk write cache less often 1321 * (every zvol_maxphys - currently 1MB) instead of minphys (currently 1322 * 56K on X86 and 128K on sparc). 1323 */ 1324 void 1325 zvol_minphys(struct buf *bp) 1326 { 1327 if (bp->b_bcount > zvol_maxphys) 1328 bp->b_bcount = zvol_maxphys; 1329 } 1330 1331 int 1332 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks) 1333 { 1334 minor_t minor = getminor(dev); 1335 zvol_state_t *zv; 1336 int error = 0; 1337 uint64_t size; 1338 uint64_t boff; 1339 uint64_t resid; 1340 1341 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1342 if (zv == NULL) 1343 return (SET_ERROR(ENXIO)); 1344 1345 if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0) 1346 return (SET_ERROR(EINVAL)); 1347 1348 boff = ldbtob(blkno); 1349 resid = ldbtob(nblocks); 1350 1351 VERIFY3U(boff + resid, <=, zv->zv_volsize); 1352 1353 while (resid) { 1354 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff); 1355 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE); 1356 if (error) 1357 break; 1358 boff += size; 1359 addr += size; 1360 resid -= size; 1361 } 1362 1363 return (error); 1364 } 1365 1366 /*ARGSUSED*/ 1367 int 1368 zvol_read(dev_t dev, uio_t *uio, cred_t *cr) 1369 { 1370 minor_t minor = getminor(dev); 1371 zvol_state_t *zv; 1372 uint64_t volsize; 1373 rl_t *rl; 1374 int error = 0; 1375 1376 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1377 if (zv == NULL) 1378 return (SET_ERROR(ENXIO)); 1379 1380 volsize = zv->zv_volsize; 1381 if (uio->uio_resid > 0 && 1382 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize)) 1383 return (SET_ERROR(EIO)); 1384 1385 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1386 error = physio(zvol_strategy, NULL, dev, B_READ, 1387 zvol_minphys, uio); 1388 return (error); 1389 } 1390 1391 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1392 RL_READER); 1393 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1394 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1395 1396 /* don't read past the end */ 1397 if (bytes > volsize - uio->uio_loffset) 1398 bytes = volsize - uio->uio_loffset; 1399 1400 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes); 1401 if (error) { 1402 /* convert checksum errors into IO errors */ 1403 if (error == ECKSUM) 1404 error = SET_ERROR(EIO); 1405 break; 1406 } 1407 } 1408 zfs_range_unlock(rl); 1409 return (error); 1410 } 1411 1412 /*ARGSUSED*/ 1413 int 1414 zvol_write(dev_t dev, uio_t *uio, cred_t *cr) 1415 { 1416 minor_t minor = getminor(dev); 1417 zvol_state_t *zv; 1418 uint64_t volsize; 1419 rl_t *rl; 1420 int error = 0; 1421 boolean_t sync; 1422 1423 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1424 if (zv == NULL) 1425 return (SET_ERROR(ENXIO)); 1426 1427 volsize = zv->zv_volsize; 1428 if (uio->uio_resid > 0 && 1429 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize)) 1430 return (SET_ERROR(EIO)); 1431 1432 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1433 error = physio(zvol_strategy, NULL, dev, B_WRITE, 1434 zvol_minphys, uio); 1435 return (error); 1436 } 1437 1438 sync = !(zv->zv_flags & ZVOL_WCE) || 1439 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS); 1440 1441 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1442 RL_WRITER); 1443 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1444 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1445 uint64_t off = uio->uio_loffset; 1446 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 1447 1448 if (bytes > volsize - off) /* don't write past the end */ 1449 bytes = volsize - off; 1450 1451 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 1452 error = dmu_tx_assign(tx, TXG_WAIT); 1453 if (error) { 1454 dmu_tx_abort(tx); 1455 break; 1456 } 1457 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx); 1458 if (error == 0) 1459 zvol_log_write(zv, tx, off, bytes, sync); 1460 dmu_tx_commit(tx); 1461 1462 if (error) 1463 break; 1464 } 1465 zfs_range_unlock(rl); 1466 if (sync) 1467 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1468 return (error); 1469 } 1470 1471 int 1472 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs) 1473 { 1474 struct uuid uuid = EFI_RESERVED; 1475 efi_gpe_t gpe = { 0 }; 1476 uint32_t crc; 1477 dk_efi_t efi; 1478 int length; 1479 char *ptr; 1480 1481 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag)) 1482 return (SET_ERROR(EFAULT)); 1483 ptr = (char *)(uintptr_t)efi.dki_data_64; 1484 length = efi.dki_length; 1485 /* 1486 * Some clients may attempt to request a PMBR for the 1487 * zvol. Currently this interface will return EINVAL to 1488 * such requests. These requests could be supported by 1489 * adding a check for lba == 0 and consing up an appropriate 1490 * PMBR. 1491 */ 1492 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0) 1493 return (SET_ERROR(EINVAL)); 1494 1495 gpe.efi_gpe_StartingLBA = LE_64(34ULL); 1496 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1); 1497 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid); 1498 1499 if (efi.dki_lba == 1) { 1500 efi_gpt_t gpt = { 0 }; 1501 1502 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE); 1503 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 1504 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt)); 1505 gpt.efi_gpt_MyLBA = LE_64(1ULL); 1506 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL); 1507 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1); 1508 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL); 1509 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1); 1510 gpt.efi_gpt_SizeOfPartitionEntry = 1511 LE_32(sizeof (efi_gpe_t)); 1512 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table); 1513 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 1514 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table); 1515 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc); 1516 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length), 1517 flag)) 1518 return (SET_ERROR(EFAULT)); 1519 ptr += sizeof (gpt); 1520 length -= sizeof (gpt); 1521 } 1522 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe), 1523 length), flag)) 1524 return (SET_ERROR(EFAULT)); 1525 return (0); 1526 } 1527 1528 /* 1529 * BEGIN entry points to allow external callers access to the volume. 1530 */ 1531 /* 1532 * Return the volume parameters needed for access from an external caller. 1533 * These values are invariant as long as the volume is held open. 1534 */ 1535 int 1536 zvol_get_volume_params(minor_t minor, uint64_t *blksize, 1537 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl, 1538 void **rl_hdl, void **bonus_hdl) 1539 { 1540 zvol_state_t *zv; 1541 1542 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1543 if (zv == NULL) 1544 return (SET_ERROR(ENXIO)); 1545 if (zv->zv_flags & ZVOL_DUMPIFIED) 1546 return (SET_ERROR(ENXIO)); 1547 1548 ASSERT(blksize && max_xfer_len && minor_hdl && 1549 objset_hdl && zil_hdl && rl_hdl && bonus_hdl); 1550 1551 *blksize = zv->zv_volblocksize; 1552 *max_xfer_len = (uint64_t)zvol_maxphys; 1553 *minor_hdl = zv; 1554 *objset_hdl = zv->zv_objset; 1555 *zil_hdl = zv->zv_zilog; 1556 *rl_hdl = &zv->zv_znode; 1557 *bonus_hdl = zv->zv_dbuf; 1558 return (0); 1559 } 1560 1561 /* 1562 * Return the current volume size to an external caller. 1563 * The size can change while the volume is open. 1564 */ 1565 uint64_t 1566 zvol_get_volume_size(void *minor_hdl) 1567 { 1568 zvol_state_t *zv = minor_hdl; 1569 1570 return (zv->zv_volsize); 1571 } 1572 1573 /* 1574 * Return the current WCE setting to an external caller. 1575 * The WCE setting can change while the volume is open. 1576 */ 1577 int 1578 zvol_get_volume_wce(void *minor_hdl) 1579 { 1580 zvol_state_t *zv = minor_hdl; 1581 1582 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0); 1583 } 1584 1585 /* 1586 * Entry point for external callers to zvol_log_write 1587 */ 1588 void 1589 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid, 1590 boolean_t sync) 1591 { 1592 zvol_state_t *zv = minor_hdl; 1593 1594 zvol_log_write(zv, tx, off, resid, sync); 1595 } 1596 /* 1597 * END entry points to allow external callers access to the volume. 1598 */ 1599 1600 /* 1601 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE. 1602 */ 1603 static void 1604 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len, 1605 boolean_t sync) 1606 { 1607 itx_t *itx; 1608 lr_truncate_t *lr; 1609 zilog_t *zilog = zv->zv_zilog; 1610 1611 if (zil_replaying(zilog, tx)) 1612 return; 1613 1614 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1615 lr = (lr_truncate_t *)&itx->itx_lr; 1616 lr->lr_foid = ZVOL_OBJ; 1617 lr->lr_offset = off; 1618 lr->lr_length = len; 1619 1620 itx->itx_sync = sync; 1621 zil_itx_assign(zilog, itx, tx); 1622 } 1623 1624 /* 1625 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I). 1626 * Also a dirtbag dkio ioctl for unmap/free-block functionality. 1627 */ 1628 /*ARGSUSED*/ 1629 int 1630 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp) 1631 { 1632 zvol_state_t *zv; 1633 struct dk_callback *dkc; 1634 int error = 0; 1635 rl_t *rl; 1636 1637 mutex_enter(&zfsdev_state_lock); 1638 1639 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL); 1640 1641 if (zv == NULL) { 1642 mutex_exit(&zfsdev_state_lock); 1643 return (SET_ERROR(ENXIO)); 1644 } 1645 ASSERT(zv->zv_total_opens > 0); 1646 1647 switch (cmd) { 1648 1649 case DKIOCINFO: 1650 { 1651 struct dk_cinfo dki; 1652 1653 bzero(&dki, sizeof (dki)); 1654 (void) strcpy(dki.dki_cname, "zvol"); 1655 (void) strcpy(dki.dki_dname, "zvol"); 1656 dki.dki_ctype = DKC_UNKNOWN; 1657 dki.dki_unit = getminor(dev); 1658 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs); 1659 mutex_exit(&zfsdev_state_lock); 1660 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag)) 1661 error = SET_ERROR(EFAULT); 1662 return (error); 1663 } 1664 1665 case DKIOCGMEDIAINFO: 1666 { 1667 struct dk_minfo dkm; 1668 1669 bzero(&dkm, sizeof (dkm)); 1670 dkm.dki_lbsize = 1U << zv->zv_min_bs; 1671 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 1672 dkm.dki_media_type = DK_UNKNOWN; 1673 mutex_exit(&zfsdev_state_lock); 1674 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag)) 1675 error = SET_ERROR(EFAULT); 1676 return (error); 1677 } 1678 1679 case DKIOCGMEDIAINFOEXT: 1680 { 1681 struct dk_minfo_ext dkmext; 1682 1683 bzero(&dkmext, sizeof (dkmext)); 1684 dkmext.dki_lbsize = 1U << zv->zv_min_bs; 1685 dkmext.dki_pbsize = zv->zv_volblocksize; 1686 dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 1687 dkmext.dki_media_type = DK_UNKNOWN; 1688 mutex_exit(&zfsdev_state_lock); 1689 if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag)) 1690 error = SET_ERROR(EFAULT); 1691 return (error); 1692 } 1693 1694 case DKIOCGETEFI: 1695 { 1696 uint64_t vs = zv->zv_volsize; 1697 uint8_t bs = zv->zv_min_bs; 1698 1699 mutex_exit(&zfsdev_state_lock); 1700 error = zvol_getefi((void *)arg, flag, vs, bs); 1701 return (error); 1702 } 1703 1704 case DKIOCFLUSHWRITECACHE: 1705 dkc = (struct dk_callback *)arg; 1706 mutex_exit(&zfsdev_state_lock); 1707 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1708 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) { 1709 (*dkc->dkc_callback)(dkc->dkc_cookie, error); 1710 error = 0; 1711 } 1712 return (error); 1713 1714 case DKIOCGETWCE: 1715 { 1716 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0; 1717 if (ddi_copyout(&wce, (void *)arg, sizeof (int), 1718 flag)) 1719 error = SET_ERROR(EFAULT); 1720 break; 1721 } 1722 case DKIOCSETWCE: 1723 { 1724 int wce; 1725 if (ddi_copyin((void *)arg, &wce, sizeof (int), 1726 flag)) { 1727 error = SET_ERROR(EFAULT); 1728 break; 1729 } 1730 if (wce) { 1731 zv->zv_flags |= ZVOL_WCE; 1732 mutex_exit(&zfsdev_state_lock); 1733 } else { 1734 zv->zv_flags &= ~ZVOL_WCE; 1735 mutex_exit(&zfsdev_state_lock); 1736 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1737 } 1738 return (0); 1739 } 1740 1741 case DKIOCGGEOM: 1742 case DKIOCGVTOC: 1743 /* 1744 * commands using these (like prtvtoc) expect ENOTSUP 1745 * since we're emulating an EFI label 1746 */ 1747 error = SET_ERROR(ENOTSUP); 1748 break; 1749 1750 case DKIOCDUMPINIT: 1751 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 1752 RL_WRITER); 1753 error = zvol_dumpify(zv); 1754 zfs_range_unlock(rl); 1755 break; 1756 1757 case DKIOCDUMPFINI: 1758 if (!(zv->zv_flags & ZVOL_DUMPIFIED)) 1759 break; 1760 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 1761 RL_WRITER); 1762 error = zvol_dump_fini(zv); 1763 zfs_range_unlock(rl); 1764 break; 1765 1766 case DKIOCFREE: 1767 { 1768 dkioc_free_t df; 1769 dmu_tx_t *tx; 1770 1771 if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) { 1772 error = SET_ERROR(EFAULT); 1773 break; 1774 } 1775 1776 /* 1777 * Apply Postel's Law to length-checking. If they overshoot, 1778 * just blank out until the end, if there's a need to blank 1779 * out anything. 1780 */ 1781 if (df.df_start >= zv->zv_volsize) 1782 break; /* No need to do anything... */ 1783 if (df.df_start + df.df_length > zv->zv_volsize) 1784 df.df_length = DMU_OBJECT_END; 1785 1786 rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length, 1787 RL_WRITER); 1788 tx = dmu_tx_create(zv->zv_objset); 1789 error = dmu_tx_assign(tx, TXG_WAIT); 1790 if (error != 0) { 1791 dmu_tx_abort(tx); 1792 } else { 1793 zvol_log_truncate(zv, tx, df.df_start, 1794 df.df_length, B_TRUE); 1795 dmu_tx_commit(tx); 1796 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 1797 df.df_start, df.df_length); 1798 } 1799 1800 zfs_range_unlock(rl); 1801 1802 if (error == 0) { 1803 /* 1804 * If the write-cache is disabled or 'sync' property 1805 * is set to 'always' then treat this as a synchronous 1806 * operation (i.e. commit to zil). 1807 */ 1808 if (!(zv->zv_flags & ZVOL_WCE) || 1809 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) 1810 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1811 1812 /* 1813 * If the caller really wants synchronous writes, and 1814 * can't wait for them, don't return until the write 1815 * is done. 1816 */ 1817 if (df.df_flags & DF_WAIT_SYNC) { 1818 txg_wait_synced( 1819 dmu_objset_pool(zv->zv_objset), 0); 1820 } 1821 } 1822 break; 1823 } 1824 1825 default: 1826 error = SET_ERROR(ENOTTY); 1827 break; 1828 1829 } 1830 mutex_exit(&zfsdev_state_lock); 1831 return (error); 1832 } 1833 1834 int 1835 zvol_busy(void) 1836 { 1837 return (zvol_minors != 0); 1838 } 1839 1840 void 1841 zvol_init(void) 1842 { 1843 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t), 1844 1) == 0); 1845 mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL); 1846 } 1847 1848 void 1849 zvol_fini(void) 1850 { 1851 mutex_destroy(&zfsdev_state_lock); 1852 ddi_soft_state_fini(&zfsdev_state); 1853 } 1854 1855 /*ARGSUSED*/ 1856 static int 1857 zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx) 1858 { 1859 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1860 1861 if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP)) 1862 return (1); 1863 return (0); 1864 } 1865 1866 /*ARGSUSED*/ 1867 static void 1868 zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx) 1869 { 1870 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1871 1872 spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx); 1873 } 1874 1875 static int 1876 zvol_dump_init(zvol_state_t *zv, boolean_t resize) 1877 { 1878 dmu_tx_t *tx; 1879 int error; 1880 objset_t *os = zv->zv_objset; 1881 spa_t *spa = dmu_objset_spa(os); 1882 vdev_t *vd = spa->spa_root_vdev; 1883 nvlist_t *nv = NULL; 1884 uint64_t version = spa_version(spa); 1885 enum zio_checksum checksum; 1886 1887 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 1888 ASSERT(vd->vdev_ops == &vdev_root_ops); 1889 1890 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0, 1891 DMU_OBJECT_END); 1892 /* wait for dmu_free_long_range to actually free the blocks */ 1893 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 1894 1895 /* 1896 * If the pool on which the dump device is being initialized has more 1897 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is 1898 * enabled. If so, bump that feature's counter to indicate that the 1899 * feature is active. We also check the vdev type to handle the 1900 * following case: 1901 * # zpool create test raidz disk1 disk2 disk3 1902 * Now have spa_root_vdev->vdev_children == 1 (the raidz vdev), 1903 * the raidz vdev itself has 3 children. 1904 */ 1905 if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) { 1906 if (!spa_feature_is_enabled(spa, 1907 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP)) 1908 return (SET_ERROR(ENOTSUP)); 1909 (void) dsl_sync_task(spa_name(spa), 1910 zfs_mvdev_dump_feature_check, 1911 zfs_mvdev_dump_activate_feature_sync, NULL, 2); 1912 } 1913 1914 tx = dmu_tx_create(os); 1915 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 1916 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 1917 error = dmu_tx_assign(tx, TXG_WAIT); 1918 if (error) { 1919 dmu_tx_abort(tx); 1920 return (error); 1921 } 1922 1923 /* 1924 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum 1925 * function. Otherwise, use the old default -- OFF. 1926 */ 1927 checksum = spa_feature_is_active(spa, 1928 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY : 1929 ZIO_CHECKSUM_OFF; 1930 1931 /* 1932 * If we are resizing the dump device then we only need to 1933 * update the refreservation to match the newly updated 1934 * zvolsize. Otherwise, we save off the original state of the 1935 * zvol so that we can restore them if the zvol is ever undumpified. 1936 */ 1937 if (resize) { 1938 error = zap_update(os, ZVOL_ZAP_OBJ, 1939 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 1940 &zv->zv_volsize, tx); 1941 } else { 1942 uint64_t checksum, compress, refresrv, vbs, dedup; 1943 1944 error = dsl_prop_get_integer(zv->zv_name, 1945 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL); 1946 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1947 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL); 1948 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1949 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL); 1950 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1951 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL); 1952 if (version >= SPA_VERSION_DEDUP) { 1953 error = error ? error : 1954 dsl_prop_get_integer(zv->zv_name, 1955 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL); 1956 } 1957 1958 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1959 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, 1960 &compress, tx); 1961 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1962 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx); 1963 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1964 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 1965 &refresrv, tx); 1966 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1967 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, 1968 &vbs, tx); 1969 error = error ? error : dmu_object_set_blocksize( 1970 os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx); 1971 if (version >= SPA_VERSION_DEDUP) { 1972 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1973 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, 1974 &dedup, tx); 1975 } 1976 if (error == 0) 1977 zv->zv_volblocksize = SPA_MAXBLOCKSIZE; 1978 } 1979 dmu_tx_commit(tx); 1980 1981 /* 1982 * We only need update the zvol's property if we are initializing 1983 * the dump area for the first time. 1984 */ 1985 if (!resize) { 1986 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1987 VERIFY(nvlist_add_uint64(nv, 1988 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0); 1989 VERIFY(nvlist_add_uint64(nv, 1990 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 1991 ZIO_COMPRESS_OFF) == 0); 1992 VERIFY(nvlist_add_uint64(nv, 1993 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 1994 checksum) == 0); 1995 if (version >= SPA_VERSION_DEDUP) { 1996 VERIFY(nvlist_add_uint64(nv, 1997 zfs_prop_to_name(ZFS_PROP_DEDUP), 1998 ZIO_CHECKSUM_OFF) == 0); 1999 } 2000 2001 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 2002 nv, NULL); 2003 nvlist_free(nv); 2004 2005 if (error) 2006 return (error); 2007 } 2008 2009 /* Allocate the space for the dump */ 2010 error = zvol_prealloc(zv); 2011 return (error); 2012 } 2013 2014 static int 2015 zvol_dumpify(zvol_state_t *zv) 2016 { 2017 int error = 0; 2018 uint64_t dumpsize = 0; 2019 dmu_tx_t *tx; 2020 objset_t *os = zv->zv_objset; 2021 2022 if (zv->zv_flags & ZVOL_RDONLY) 2023 return (SET_ERROR(EROFS)); 2024 2025 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 2026 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) { 2027 boolean_t resize = (dumpsize > 0); 2028 2029 if ((error = zvol_dump_init(zv, resize)) != 0) { 2030 (void) zvol_dump_fini(zv); 2031 return (error); 2032 } 2033 } 2034 2035 /* 2036 * Build up our lba mapping. 2037 */ 2038 error = zvol_get_lbas(zv); 2039 if (error) { 2040 (void) zvol_dump_fini(zv); 2041 return (error); 2042 } 2043 2044 tx = dmu_tx_create(os); 2045 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2046 error = dmu_tx_assign(tx, TXG_WAIT); 2047 if (error) { 2048 dmu_tx_abort(tx); 2049 (void) zvol_dump_fini(zv); 2050 return (error); 2051 } 2052 2053 zv->zv_flags |= ZVOL_DUMPIFIED; 2054 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1, 2055 &zv->zv_volsize, tx); 2056 dmu_tx_commit(tx); 2057 2058 if (error) { 2059 (void) zvol_dump_fini(zv); 2060 return (error); 2061 } 2062 2063 txg_wait_synced(dmu_objset_pool(os), 0); 2064 return (0); 2065 } 2066 2067 static int 2068 zvol_dump_fini(zvol_state_t *zv) 2069 { 2070 dmu_tx_t *tx; 2071 objset_t *os = zv->zv_objset; 2072 nvlist_t *nv; 2073 int error = 0; 2074 uint64_t checksum, compress, refresrv, vbs, dedup; 2075 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset)); 2076 2077 /* 2078 * Attempt to restore the zvol back to its pre-dumpified state. 2079 * This is a best-effort attempt as it's possible that not all 2080 * of these properties were initialized during the dumpify process 2081 * (i.e. error during zvol_dump_init). 2082 */ 2083 2084 tx = dmu_tx_create(os); 2085 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2086 error = dmu_tx_assign(tx, TXG_WAIT); 2087 if (error) { 2088 dmu_tx_abort(tx); 2089 return (error); 2090 } 2091 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx); 2092 dmu_tx_commit(tx); 2093 2094 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2095 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum); 2096 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2097 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress); 2098 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2099 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv); 2100 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2101 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs); 2102 2103 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2104 (void) nvlist_add_uint64(nv, 2105 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum); 2106 (void) nvlist_add_uint64(nv, 2107 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress); 2108 (void) nvlist_add_uint64(nv, 2109 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv); 2110 if (version >= SPA_VERSION_DEDUP && 2111 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2112 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) { 2113 (void) nvlist_add_uint64(nv, 2114 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup); 2115 } 2116 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 2117 nv, NULL); 2118 nvlist_free(nv); 2119 2120 zvol_free_extents(zv); 2121 zv->zv_flags &= ~ZVOL_DUMPIFIED; 2122 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END); 2123 /* wait for dmu_free_long_range to actually free the blocks */ 2124 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 2125 tx = dmu_tx_create(os); 2126 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 2127 error = dmu_tx_assign(tx, TXG_WAIT); 2128 if (error) { 2129 dmu_tx_abort(tx); 2130 return (error); 2131 } 2132 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0) 2133 zv->zv_volblocksize = vbs; 2134 dmu_tx_commit(tx); 2135 2136 return (0); 2137 } 2138