1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * 24 * Portions Copyright 2010 Robert Milkowski 25 * 26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 27 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 28 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 29 */ 30 31 /* 32 * ZFS volume emulation driver. 33 * 34 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes. 35 * Volumes are accessed through the symbolic links named: 36 * 37 * /dev/zvol/dsk/<pool_name>/<dataset_name> 38 * /dev/zvol/rdsk/<pool_name>/<dataset_name> 39 * 40 * These links are created by the /dev filesystem (sdev_zvolops.c). 41 * Volumes are persistent through reboot. No user command needs to be 42 * run before opening and using a device. 43 */ 44 45 #include <sys/types.h> 46 #include <sys/param.h> 47 #include <sys/errno.h> 48 #include <sys/uio.h> 49 #include <sys/buf.h> 50 #include <sys/modctl.h> 51 #include <sys/open.h> 52 #include <sys/kmem.h> 53 #include <sys/conf.h> 54 #include <sys/cmn_err.h> 55 #include <sys/stat.h> 56 #include <sys/zap.h> 57 #include <sys/spa.h> 58 #include <sys/spa_impl.h> 59 #include <sys/zio.h> 60 #include <sys/dmu_traverse.h> 61 #include <sys/dnode.h> 62 #include <sys/dsl_dataset.h> 63 #include <sys/dsl_prop.h> 64 #include <sys/dkio.h> 65 #include <sys/efi_partition.h> 66 #include <sys/byteorder.h> 67 #include <sys/pathname.h> 68 #include <sys/ddi.h> 69 #include <sys/sunddi.h> 70 #include <sys/crc32.h> 71 #include <sys/dirent.h> 72 #include <sys/policy.h> 73 #include <sys/fs/zfs.h> 74 #include <sys/zfs_ioctl.h> 75 #include <sys/mkdev.h> 76 #include <sys/zil.h> 77 #include <sys/refcount.h> 78 #include <sys/zfs_znode.h> 79 #include <sys/zfs_rlock.h> 80 #include <sys/vdev_disk.h> 81 #include <sys/vdev_impl.h> 82 #include <sys/vdev_raidz.h> 83 #include <sys/zvol.h> 84 #include <sys/dumphdr.h> 85 #include <sys/zil_impl.h> 86 #include <sys/dbuf.h> 87 #include <sys/dmu_tx.h> 88 #include <sys/zfeature.h> 89 #include <sys/zio_checksum.h> 90 91 #include "zfs_namecheck.h" 92 93 void *zfsdev_state; 94 static char *zvol_tag = "zvol_tag"; 95 96 #define ZVOL_DUMPSIZE "dumpsize" 97 98 /* 99 * This lock protects the zfsdev_state structure from being modified 100 * while it's being used, e.g. an open that comes in before a create 101 * finishes. It also protects temporary opens of the dataset so that, 102 * e.g., an open doesn't get a spurious EBUSY. 103 */ 104 kmutex_t zfsdev_state_lock; 105 static uint32_t zvol_minors; 106 107 typedef struct zvol_extent { 108 list_node_t ze_node; 109 dva_t ze_dva; /* dva associated with this extent */ 110 uint64_t ze_nblks; /* number of blocks in extent */ 111 } zvol_extent_t; 112 113 /* 114 * The in-core state of each volume. 115 */ 116 typedef struct zvol_state { 117 char zv_name[MAXPATHLEN]; /* pool/dd name */ 118 uint64_t zv_volsize; /* amount of space we advertise */ 119 uint64_t zv_volblocksize; /* volume block size */ 120 minor_t zv_minor; /* minor number */ 121 uint8_t zv_min_bs; /* minimum addressable block shift */ 122 uint8_t zv_flags; /* readonly, dumpified, etc. */ 123 objset_t *zv_objset; /* objset handle */ 124 uint32_t zv_open_count[OTYPCNT]; /* open counts */ 125 uint32_t zv_total_opens; /* total open count */ 126 zilog_t *zv_zilog; /* ZIL handle */ 127 list_t zv_extents; /* List of extents for dump */ 128 znode_t zv_znode; /* for range locking */ 129 dmu_buf_t *zv_dbuf; /* bonus handle */ 130 } zvol_state_t; 131 132 /* 133 * zvol specific flags 134 */ 135 #define ZVOL_RDONLY 0x1 136 #define ZVOL_DUMPIFIED 0x2 137 #define ZVOL_EXCL 0x4 138 #define ZVOL_WCE 0x8 139 140 /* 141 * zvol maximum transfer in one DMU tx. 142 */ 143 int zvol_maxphys = DMU_MAX_ACCESS/2; 144 145 extern int zfs_set_prop_nvlist(const char *, zprop_source_t, 146 nvlist_t *, nvlist_t *); 147 static int zvol_remove_zv(zvol_state_t *); 148 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio); 149 static int zvol_dumpify(zvol_state_t *zv); 150 static int zvol_dump_fini(zvol_state_t *zv); 151 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize); 152 153 static void 154 zvol_size_changed(zvol_state_t *zv, uint64_t volsize) 155 { 156 dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor); 157 158 zv->zv_volsize = volsize; 159 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 160 "Size", volsize) == DDI_SUCCESS); 161 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 162 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS); 163 164 /* Notify specfs to invalidate the cached size */ 165 spec_size_invalidate(dev, VBLK); 166 spec_size_invalidate(dev, VCHR); 167 } 168 169 int 170 zvol_check_volsize(uint64_t volsize, uint64_t blocksize) 171 { 172 if (volsize == 0) 173 return (SET_ERROR(EINVAL)); 174 175 if (volsize % blocksize != 0) 176 return (SET_ERROR(EINVAL)); 177 178 #ifdef _ILP32 179 if (volsize - 1 > SPEC_MAXOFFSET_T) 180 return (SET_ERROR(EOVERFLOW)); 181 #endif 182 return (0); 183 } 184 185 int 186 zvol_check_volblocksize(uint64_t volblocksize) 187 { 188 if (volblocksize < SPA_MINBLOCKSIZE || 189 volblocksize > SPA_MAXBLOCKSIZE || 190 !ISP2(volblocksize)) 191 return (SET_ERROR(EDOM)); 192 193 return (0); 194 } 195 196 int 197 zvol_get_stats(objset_t *os, nvlist_t *nv) 198 { 199 int error; 200 dmu_object_info_t doi; 201 uint64_t val; 202 203 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val); 204 if (error) 205 return (error); 206 207 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val); 208 209 error = dmu_object_info(os, ZVOL_OBJ, &doi); 210 211 if (error == 0) { 212 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE, 213 doi.doi_data_block_size); 214 } 215 216 return (error); 217 } 218 219 static zvol_state_t * 220 zvol_minor_lookup(const char *name) 221 { 222 minor_t minor; 223 zvol_state_t *zv; 224 225 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 226 227 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 228 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 229 if (zv == NULL) 230 continue; 231 if (strcmp(zv->zv_name, name) == 0) 232 return (zv); 233 } 234 235 return (NULL); 236 } 237 238 /* extent mapping arg */ 239 struct maparg { 240 zvol_state_t *ma_zv; 241 uint64_t ma_blks; 242 }; 243 244 /*ARGSUSED*/ 245 static int 246 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 247 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 248 { 249 struct maparg *ma = arg; 250 zvol_extent_t *ze; 251 int bs = ma->ma_zv->zv_volblocksize; 252 253 if (BP_IS_HOLE(bp) || 254 zb->zb_object != ZVOL_OBJ || zb->zb_level != 0) 255 return (0); 256 257 VERIFY(!BP_IS_EMBEDDED(bp)); 258 259 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid); 260 ma->ma_blks++; 261 262 /* Abort immediately if we have encountered gang blocks */ 263 if (BP_IS_GANG(bp)) 264 return (SET_ERROR(EFRAGS)); 265 266 /* 267 * See if the block is at the end of the previous extent. 268 */ 269 ze = list_tail(&ma->ma_zv->zv_extents); 270 if (ze && 271 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) && 272 DVA_GET_OFFSET(BP_IDENTITY(bp)) == 273 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) { 274 ze->ze_nblks++; 275 return (0); 276 } 277 278 dprintf_bp(bp, "%s", "next blkptr:"); 279 280 /* start a new extent */ 281 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP); 282 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */ 283 ze->ze_nblks = 1; 284 list_insert_tail(&ma->ma_zv->zv_extents, ze); 285 return (0); 286 } 287 288 static void 289 zvol_free_extents(zvol_state_t *zv) 290 { 291 zvol_extent_t *ze; 292 293 while (ze = list_head(&zv->zv_extents)) { 294 list_remove(&zv->zv_extents, ze); 295 kmem_free(ze, sizeof (zvol_extent_t)); 296 } 297 } 298 299 static int 300 zvol_get_lbas(zvol_state_t *zv) 301 { 302 objset_t *os = zv->zv_objset; 303 struct maparg ma; 304 int err; 305 306 ma.ma_zv = zv; 307 ma.ma_blks = 0; 308 zvol_free_extents(zv); 309 310 /* commit any in-flight changes before traversing the dataset */ 311 txg_wait_synced(dmu_objset_pool(os), 0); 312 err = traverse_dataset(dmu_objset_ds(os), 0, 313 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma); 314 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) { 315 zvol_free_extents(zv); 316 return (err ? err : EIO); 317 } 318 319 return (0); 320 } 321 322 /* ARGSUSED */ 323 void 324 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 325 { 326 zfs_creat_t *zct = arg; 327 nvlist_t *nvprops = zct->zct_props; 328 int error; 329 uint64_t volblocksize, volsize; 330 331 VERIFY(nvlist_lookup_uint64(nvprops, 332 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0); 333 if (nvlist_lookup_uint64(nvprops, 334 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0) 335 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE); 336 337 /* 338 * These properties must be removed from the list so the generic 339 * property setting step won't apply to them. 340 */ 341 VERIFY(nvlist_remove_all(nvprops, 342 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0); 343 (void) nvlist_remove_all(nvprops, 344 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE)); 345 346 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize, 347 DMU_OT_NONE, 0, tx); 348 ASSERT(error == 0); 349 350 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP, 351 DMU_OT_NONE, 0, tx); 352 ASSERT(error == 0); 353 354 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); 355 ASSERT(error == 0); 356 } 357 358 /* 359 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we 360 * implement DKIOCFREE/free-long-range. 361 */ 362 static int 363 zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap) 364 { 365 uint64_t offset, length; 366 367 if (byteswap) 368 byteswap_uint64_array(lr, sizeof (*lr)); 369 370 offset = lr->lr_offset; 371 length = lr->lr_length; 372 373 return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length)); 374 } 375 376 /* 377 * Replay a TX_WRITE ZIL transaction that didn't get committed 378 * after a system failure 379 */ 380 static int 381 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap) 382 { 383 objset_t *os = zv->zv_objset; 384 char *data = (char *)(lr + 1); /* data follows lr_write_t */ 385 uint64_t offset, length; 386 dmu_tx_t *tx; 387 int error; 388 389 if (byteswap) 390 byteswap_uint64_array(lr, sizeof (*lr)); 391 392 offset = lr->lr_offset; 393 length = lr->lr_length; 394 395 /* If it's a dmu_sync() block, write the whole block */ 396 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 397 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 398 if (length < blocksize) { 399 offset -= offset % blocksize; 400 length = blocksize; 401 } 402 } 403 404 tx = dmu_tx_create(os); 405 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length); 406 error = dmu_tx_assign(tx, TXG_WAIT); 407 if (error) { 408 dmu_tx_abort(tx); 409 } else { 410 dmu_write(os, ZVOL_OBJ, offset, length, data, tx); 411 dmu_tx_commit(tx); 412 } 413 414 return (error); 415 } 416 417 /* ARGSUSED */ 418 static int 419 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap) 420 { 421 return (SET_ERROR(ENOTSUP)); 422 } 423 424 /* 425 * Callback vectors for replaying records. 426 * Only TX_WRITE and TX_TRUNCATE are needed for zvol. 427 */ 428 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = { 429 zvol_replay_err, /* 0 no such transaction type */ 430 zvol_replay_err, /* TX_CREATE */ 431 zvol_replay_err, /* TX_MKDIR */ 432 zvol_replay_err, /* TX_MKXATTR */ 433 zvol_replay_err, /* TX_SYMLINK */ 434 zvol_replay_err, /* TX_REMOVE */ 435 zvol_replay_err, /* TX_RMDIR */ 436 zvol_replay_err, /* TX_LINK */ 437 zvol_replay_err, /* TX_RENAME */ 438 zvol_replay_write, /* TX_WRITE */ 439 zvol_replay_truncate, /* TX_TRUNCATE */ 440 zvol_replay_err, /* TX_SETATTR */ 441 zvol_replay_err, /* TX_ACL */ 442 zvol_replay_err, /* TX_CREATE_ACL */ 443 zvol_replay_err, /* TX_CREATE_ATTR */ 444 zvol_replay_err, /* TX_CREATE_ACL_ATTR */ 445 zvol_replay_err, /* TX_MKDIR_ACL */ 446 zvol_replay_err, /* TX_MKDIR_ATTR */ 447 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */ 448 zvol_replay_err, /* TX_WRITE2 */ 449 }; 450 451 int 452 zvol_name2minor(const char *name, minor_t *minor) 453 { 454 zvol_state_t *zv; 455 456 mutex_enter(&zfsdev_state_lock); 457 zv = zvol_minor_lookup(name); 458 if (minor && zv) 459 *minor = zv->zv_minor; 460 mutex_exit(&zfsdev_state_lock); 461 return (zv ? 0 : -1); 462 } 463 464 /* 465 * Create a minor node (plus a whole lot more) for the specified volume. 466 */ 467 int 468 zvol_create_minor(const char *name) 469 { 470 zfs_soft_state_t *zs; 471 zvol_state_t *zv; 472 objset_t *os; 473 dmu_object_info_t doi; 474 minor_t minor = 0; 475 char chrbuf[30], blkbuf[30]; 476 int error; 477 478 mutex_enter(&zfsdev_state_lock); 479 480 if (zvol_minor_lookup(name) != NULL) { 481 mutex_exit(&zfsdev_state_lock); 482 return (SET_ERROR(EEXIST)); 483 } 484 485 /* lie and say we're read-only */ 486 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os); 487 488 if (error) { 489 mutex_exit(&zfsdev_state_lock); 490 return (error); 491 } 492 493 if ((minor = zfsdev_minor_alloc()) == 0) { 494 dmu_objset_disown(os, FTAG); 495 mutex_exit(&zfsdev_state_lock); 496 return (SET_ERROR(ENXIO)); 497 } 498 499 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) { 500 dmu_objset_disown(os, FTAG); 501 mutex_exit(&zfsdev_state_lock); 502 return (SET_ERROR(EAGAIN)); 503 } 504 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME, 505 (char *)name); 506 507 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor); 508 509 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR, 510 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 511 ddi_soft_state_free(zfsdev_state, minor); 512 dmu_objset_disown(os, FTAG); 513 mutex_exit(&zfsdev_state_lock); 514 return (SET_ERROR(EAGAIN)); 515 } 516 517 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor); 518 519 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK, 520 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 521 ddi_remove_minor_node(zfs_dip, chrbuf); 522 ddi_soft_state_free(zfsdev_state, minor); 523 dmu_objset_disown(os, FTAG); 524 mutex_exit(&zfsdev_state_lock); 525 return (SET_ERROR(EAGAIN)); 526 } 527 528 zs = ddi_get_soft_state(zfsdev_state, minor); 529 zs->zss_type = ZSST_ZVOL; 530 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP); 531 (void) strlcpy(zv->zv_name, name, MAXPATHLEN); 532 zv->zv_min_bs = DEV_BSHIFT; 533 zv->zv_minor = minor; 534 zv->zv_objset = os; 535 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os))) 536 zv->zv_flags |= ZVOL_RDONLY; 537 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL); 538 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare, 539 sizeof (rl_t), offsetof(rl_t, r_node)); 540 list_create(&zv->zv_extents, sizeof (zvol_extent_t), 541 offsetof(zvol_extent_t, ze_node)); 542 /* get and cache the blocksize */ 543 error = dmu_object_info(os, ZVOL_OBJ, &doi); 544 ASSERT(error == 0); 545 zv->zv_volblocksize = doi.doi_data_block_size; 546 547 if (spa_writeable(dmu_objset_spa(os))) { 548 if (zil_replay_disable) 549 zil_destroy(dmu_objset_zil(os), B_FALSE); 550 else 551 zil_replay(os, zv, zvol_replay_vector); 552 } 553 dmu_objset_disown(os, FTAG); 554 zv->zv_objset = NULL; 555 556 zvol_minors++; 557 558 mutex_exit(&zfsdev_state_lock); 559 560 return (0); 561 } 562 563 /* 564 * Remove minor node for the specified volume. 565 */ 566 static int 567 zvol_remove_zv(zvol_state_t *zv) 568 { 569 char nmbuf[20]; 570 minor_t minor = zv->zv_minor; 571 572 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 573 if (zv->zv_total_opens != 0) 574 return (SET_ERROR(EBUSY)); 575 576 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor); 577 ddi_remove_minor_node(zfs_dip, nmbuf); 578 579 (void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor); 580 ddi_remove_minor_node(zfs_dip, nmbuf); 581 582 avl_destroy(&zv->zv_znode.z_range_avl); 583 mutex_destroy(&zv->zv_znode.z_range_lock); 584 585 kmem_free(zv, sizeof (zvol_state_t)); 586 587 ddi_soft_state_free(zfsdev_state, minor); 588 589 zvol_minors--; 590 return (0); 591 } 592 593 int 594 zvol_remove_minor(const char *name) 595 { 596 zvol_state_t *zv; 597 int rc; 598 599 mutex_enter(&zfsdev_state_lock); 600 if ((zv = zvol_minor_lookup(name)) == NULL) { 601 mutex_exit(&zfsdev_state_lock); 602 return (SET_ERROR(ENXIO)); 603 } 604 rc = zvol_remove_zv(zv); 605 mutex_exit(&zfsdev_state_lock); 606 return (rc); 607 } 608 609 int 610 zvol_first_open(zvol_state_t *zv) 611 { 612 objset_t *os; 613 uint64_t volsize; 614 int error; 615 uint64_t readonly; 616 617 /* lie and say we're read-only */ 618 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE, 619 zvol_tag, &os); 620 if (error) 621 return (error); 622 623 zv->zv_objset = os; 624 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 625 if (error) { 626 ASSERT(error == 0); 627 dmu_objset_disown(os, zvol_tag); 628 return (error); 629 } 630 631 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf); 632 if (error) { 633 dmu_objset_disown(os, zvol_tag); 634 return (error); 635 } 636 637 zvol_size_changed(zv, volsize); 638 zv->zv_zilog = zil_open(os, zvol_get_data); 639 640 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly, 641 NULL) == 0); 642 if (readonly || dmu_objset_is_snapshot(os) || 643 !spa_writeable(dmu_objset_spa(os))) 644 zv->zv_flags |= ZVOL_RDONLY; 645 else 646 zv->zv_flags &= ~ZVOL_RDONLY; 647 return (error); 648 } 649 650 void 651 zvol_last_close(zvol_state_t *zv) 652 { 653 zil_close(zv->zv_zilog); 654 zv->zv_zilog = NULL; 655 656 dmu_buf_rele(zv->zv_dbuf, zvol_tag); 657 zv->zv_dbuf = NULL; 658 659 /* 660 * Evict cached data 661 */ 662 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) && 663 !(zv->zv_flags & ZVOL_RDONLY)) 664 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 665 dmu_objset_evict_dbufs(zv->zv_objset); 666 667 dmu_objset_disown(zv->zv_objset, zvol_tag); 668 zv->zv_objset = NULL; 669 } 670 671 int 672 zvol_prealloc(zvol_state_t *zv) 673 { 674 objset_t *os = zv->zv_objset; 675 dmu_tx_t *tx; 676 uint64_t refd, avail, usedobjs, availobjs; 677 uint64_t resid = zv->zv_volsize; 678 uint64_t off = 0; 679 680 /* Check the space usage before attempting to allocate the space */ 681 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs); 682 if (avail < zv->zv_volsize) 683 return (SET_ERROR(ENOSPC)); 684 685 /* Free old extents if they exist */ 686 zvol_free_extents(zv); 687 688 while (resid != 0) { 689 int error; 690 uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE); 691 692 tx = dmu_tx_create(os); 693 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 694 error = dmu_tx_assign(tx, TXG_WAIT); 695 if (error) { 696 dmu_tx_abort(tx); 697 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off); 698 return (error); 699 } 700 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx); 701 dmu_tx_commit(tx); 702 off += bytes; 703 resid -= bytes; 704 } 705 txg_wait_synced(dmu_objset_pool(os), 0); 706 707 return (0); 708 } 709 710 static int 711 zvol_update_volsize(objset_t *os, uint64_t volsize) 712 { 713 dmu_tx_t *tx; 714 int error; 715 716 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 717 718 tx = dmu_tx_create(os); 719 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 720 dmu_tx_mark_netfree(tx); 721 error = dmu_tx_assign(tx, TXG_WAIT); 722 if (error) { 723 dmu_tx_abort(tx); 724 return (error); 725 } 726 727 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, 728 &volsize, tx); 729 dmu_tx_commit(tx); 730 731 if (error == 0) 732 error = dmu_free_long_range(os, 733 ZVOL_OBJ, volsize, DMU_OBJECT_END); 734 return (error); 735 } 736 737 void 738 zvol_remove_minors(const char *name) 739 { 740 zvol_state_t *zv; 741 char *namebuf; 742 minor_t minor; 743 744 namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP); 745 (void) strncpy(namebuf, name, strlen(name)); 746 (void) strcat(namebuf, "/"); 747 mutex_enter(&zfsdev_state_lock); 748 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 749 750 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 751 if (zv == NULL) 752 continue; 753 if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0) 754 (void) zvol_remove_zv(zv); 755 } 756 kmem_free(namebuf, strlen(name) + 2); 757 758 mutex_exit(&zfsdev_state_lock); 759 } 760 761 static int 762 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize) 763 { 764 uint64_t old_volsize = 0ULL; 765 int error = 0; 766 767 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 768 769 /* 770 * Reinitialize the dump area to the new size. If we 771 * failed to resize the dump area then restore it back to 772 * its original size. We must set the new volsize prior 773 * to calling dumpvp_resize() to ensure that the devices' 774 * size(9P) is not visible by the dump subsystem. 775 */ 776 old_volsize = zv->zv_volsize; 777 zvol_size_changed(zv, volsize); 778 779 if (zv->zv_flags & ZVOL_DUMPIFIED) { 780 if ((error = zvol_dumpify(zv)) != 0 || 781 (error = dumpvp_resize()) != 0) { 782 int dumpify_error; 783 784 (void) zvol_update_volsize(zv->zv_objset, old_volsize); 785 zvol_size_changed(zv, old_volsize); 786 dumpify_error = zvol_dumpify(zv); 787 error = dumpify_error ? dumpify_error : error; 788 } 789 } 790 791 /* 792 * Generate a LUN expansion event. 793 */ 794 if (error == 0) { 795 sysevent_id_t eid; 796 nvlist_t *attr; 797 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 798 799 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV, 800 zv->zv_minor); 801 802 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 803 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 804 805 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 806 ESC_DEV_DLE, attr, &eid, DDI_SLEEP); 807 808 nvlist_free(attr); 809 kmem_free(physpath, MAXPATHLEN); 810 } 811 return (error); 812 } 813 814 int 815 zvol_set_volsize(const char *name, uint64_t volsize) 816 { 817 zvol_state_t *zv = NULL; 818 objset_t *os; 819 int error; 820 dmu_object_info_t doi; 821 uint64_t readonly; 822 boolean_t owned = B_FALSE; 823 824 error = dsl_prop_get_integer(name, 825 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL); 826 if (error != 0) 827 return (error); 828 if (readonly) 829 return (SET_ERROR(EROFS)); 830 831 mutex_enter(&zfsdev_state_lock); 832 zv = zvol_minor_lookup(name); 833 834 if (zv == NULL || zv->zv_objset == NULL) { 835 if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE, 836 FTAG, &os)) != 0) { 837 mutex_exit(&zfsdev_state_lock); 838 return (error); 839 } 840 owned = B_TRUE; 841 if (zv != NULL) 842 zv->zv_objset = os; 843 } else { 844 os = zv->zv_objset; 845 } 846 847 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 || 848 (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0) 849 goto out; 850 851 error = zvol_update_volsize(os, volsize); 852 853 if (error == 0 && zv != NULL) 854 error = zvol_update_live_volsize(zv, volsize); 855 out: 856 if (owned) { 857 dmu_objset_disown(os, FTAG); 858 if (zv != NULL) 859 zv->zv_objset = NULL; 860 } 861 mutex_exit(&zfsdev_state_lock); 862 return (error); 863 } 864 865 /*ARGSUSED*/ 866 int 867 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr) 868 { 869 zvol_state_t *zv; 870 int err = 0; 871 872 mutex_enter(&zfsdev_state_lock); 873 874 zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL); 875 if (zv == NULL) { 876 mutex_exit(&zfsdev_state_lock); 877 return (SET_ERROR(ENXIO)); 878 } 879 880 if (zv->zv_total_opens == 0) 881 err = zvol_first_open(zv); 882 if (err) { 883 mutex_exit(&zfsdev_state_lock); 884 return (err); 885 } 886 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) { 887 err = SET_ERROR(EROFS); 888 goto out; 889 } 890 if (zv->zv_flags & ZVOL_EXCL) { 891 err = SET_ERROR(EBUSY); 892 goto out; 893 } 894 if (flag & FEXCL) { 895 if (zv->zv_total_opens != 0) { 896 err = SET_ERROR(EBUSY); 897 goto out; 898 } 899 zv->zv_flags |= ZVOL_EXCL; 900 } 901 902 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) { 903 zv->zv_open_count[otyp]++; 904 zv->zv_total_opens++; 905 } 906 mutex_exit(&zfsdev_state_lock); 907 908 return (err); 909 out: 910 if (zv->zv_total_opens == 0) 911 zvol_last_close(zv); 912 mutex_exit(&zfsdev_state_lock); 913 return (err); 914 } 915 916 /*ARGSUSED*/ 917 int 918 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr) 919 { 920 minor_t minor = getminor(dev); 921 zvol_state_t *zv; 922 int error = 0; 923 924 mutex_enter(&zfsdev_state_lock); 925 926 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 927 if (zv == NULL) { 928 mutex_exit(&zfsdev_state_lock); 929 return (SET_ERROR(ENXIO)); 930 } 931 932 if (zv->zv_flags & ZVOL_EXCL) { 933 ASSERT(zv->zv_total_opens == 1); 934 zv->zv_flags &= ~ZVOL_EXCL; 935 } 936 937 /* 938 * If the open count is zero, this is a spurious close. 939 * That indicates a bug in the kernel / DDI framework. 940 */ 941 ASSERT(zv->zv_open_count[otyp] != 0); 942 ASSERT(zv->zv_total_opens != 0); 943 944 /* 945 * You may get multiple opens, but only one close. 946 */ 947 zv->zv_open_count[otyp]--; 948 zv->zv_total_opens--; 949 950 if (zv->zv_total_opens == 0) 951 zvol_last_close(zv); 952 953 mutex_exit(&zfsdev_state_lock); 954 return (error); 955 } 956 957 static void 958 zvol_get_done(zgd_t *zgd, int error) 959 { 960 if (zgd->zgd_db) 961 dmu_buf_rele(zgd->zgd_db, zgd); 962 963 zfs_range_unlock(zgd->zgd_rl); 964 965 if (error == 0 && zgd->zgd_bp) 966 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 967 968 kmem_free(zgd, sizeof (zgd_t)); 969 } 970 971 /* 972 * Get data to generate a TX_WRITE intent log record. 973 */ 974 static int 975 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 976 { 977 zvol_state_t *zv = arg; 978 objset_t *os = zv->zv_objset; 979 uint64_t object = ZVOL_OBJ; 980 uint64_t offset = lr->lr_offset; 981 uint64_t size = lr->lr_length; /* length of user data */ 982 blkptr_t *bp = &lr->lr_blkptr; 983 dmu_buf_t *db; 984 zgd_t *zgd; 985 int error; 986 987 ASSERT(zio != NULL); 988 ASSERT(size != 0); 989 990 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP); 991 zgd->zgd_zilog = zv->zv_zilog; 992 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER); 993 994 /* 995 * Write records come in two flavors: immediate and indirect. 996 * For small writes it's cheaper to store the data with the 997 * log record (immediate); for large writes it's cheaper to 998 * sync the data and get a pointer to it (indirect) so that 999 * we don't have to write the data twice. 1000 */ 1001 if (buf != NULL) { /* immediate write */ 1002 error = dmu_read(os, object, offset, size, buf, 1003 DMU_READ_NO_PREFETCH); 1004 } else { 1005 size = zv->zv_volblocksize; 1006 offset = P2ALIGN(offset, size); 1007 error = dmu_buf_hold(os, object, offset, zgd, &db, 1008 DMU_READ_NO_PREFETCH); 1009 if (error == 0) { 1010 blkptr_t *obp = dmu_buf_get_blkptr(db); 1011 if (obp) { 1012 ASSERT(BP_IS_HOLE(bp)); 1013 *bp = *obp; 1014 } 1015 1016 zgd->zgd_db = db; 1017 zgd->zgd_bp = bp; 1018 1019 ASSERT(db->db_offset == offset); 1020 ASSERT(db->db_size == size); 1021 1022 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1023 zvol_get_done, zgd); 1024 1025 if (error == 0) 1026 return (0); 1027 } 1028 } 1029 1030 zvol_get_done(zgd, error); 1031 1032 return (error); 1033 } 1034 1035 /* 1036 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions. 1037 * 1038 * We store data in the log buffers if it's small enough. 1039 * Otherwise we will later flush the data out via dmu_sync(). 1040 */ 1041 ssize_t zvol_immediate_write_sz = 32768; 1042 1043 static void 1044 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid, 1045 boolean_t sync) 1046 { 1047 uint32_t blocksize = zv->zv_volblocksize; 1048 zilog_t *zilog = zv->zv_zilog; 1049 boolean_t slogging; 1050 ssize_t immediate_write_sz; 1051 1052 if (zil_replaying(zilog, tx)) 1053 return; 1054 1055 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT) 1056 ? 0 : zvol_immediate_write_sz; 1057 1058 slogging = spa_has_slogs(zilog->zl_spa) && 1059 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 1060 1061 while (resid) { 1062 itx_t *itx; 1063 lr_write_t *lr; 1064 ssize_t len; 1065 itx_wr_state_t write_state; 1066 1067 /* 1068 * Unlike zfs_log_write() we can be called with 1069 * upto DMU_MAX_ACCESS/2 (5MB) writes. 1070 */ 1071 if (blocksize > immediate_write_sz && !slogging && 1072 resid >= blocksize && off % blocksize == 0) { 1073 write_state = WR_INDIRECT; /* uses dmu_sync */ 1074 len = blocksize; 1075 } else if (sync) { 1076 write_state = WR_COPIED; 1077 len = MIN(ZIL_MAX_LOG_DATA, resid); 1078 } else { 1079 write_state = WR_NEED_COPY; 1080 len = MIN(ZIL_MAX_LOG_DATA, resid); 1081 } 1082 1083 itx = zil_itx_create(TX_WRITE, sizeof (*lr) + 1084 (write_state == WR_COPIED ? len : 0)); 1085 lr = (lr_write_t *)&itx->itx_lr; 1086 if (write_state == WR_COPIED && dmu_read(zv->zv_objset, 1087 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) { 1088 zil_itx_destroy(itx); 1089 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1090 lr = (lr_write_t *)&itx->itx_lr; 1091 write_state = WR_NEED_COPY; 1092 } 1093 1094 itx->itx_wr_state = write_state; 1095 if (write_state == WR_NEED_COPY) 1096 itx->itx_sod += len; 1097 lr->lr_foid = ZVOL_OBJ; 1098 lr->lr_offset = off; 1099 lr->lr_length = len; 1100 lr->lr_blkoff = 0; 1101 BP_ZERO(&lr->lr_blkptr); 1102 1103 itx->itx_private = zv; 1104 itx->itx_sync = sync; 1105 1106 zil_itx_assign(zilog, itx, tx); 1107 1108 off += len; 1109 resid -= len; 1110 } 1111 } 1112 1113 static int 1114 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset, 1115 uint64_t size, boolean_t doread, boolean_t isdump) 1116 { 1117 vdev_disk_t *dvd; 1118 int c; 1119 int numerrors = 0; 1120 1121 if (vd->vdev_ops == &vdev_mirror_ops || 1122 vd->vdev_ops == &vdev_replacing_ops || 1123 vd->vdev_ops == &vdev_spare_ops) { 1124 for (c = 0; c < vd->vdev_children; c++) { 1125 int err = zvol_dumpio_vdev(vd->vdev_child[c], 1126 addr, offset, origoffset, size, doread, isdump); 1127 if (err != 0) { 1128 numerrors++; 1129 } else if (doread) { 1130 break; 1131 } 1132 } 1133 } 1134 1135 if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops) 1136 return (numerrors < vd->vdev_children ? 0 : EIO); 1137 1138 if (doread && !vdev_readable(vd)) 1139 return (SET_ERROR(EIO)); 1140 else if (!doread && !vdev_writeable(vd)) 1141 return (SET_ERROR(EIO)); 1142 1143 if (vd->vdev_ops == &vdev_raidz_ops) { 1144 return (vdev_raidz_physio(vd, 1145 addr, size, offset, origoffset, doread, isdump)); 1146 } 1147 1148 offset += VDEV_LABEL_START_SIZE; 1149 1150 if (ddi_in_panic() || isdump) { 1151 ASSERT(!doread); 1152 if (doread) 1153 return (SET_ERROR(EIO)); 1154 dvd = vd->vdev_tsd; 1155 ASSERT3P(dvd, !=, NULL); 1156 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset), 1157 lbtodb(size))); 1158 } else { 1159 dvd = vd->vdev_tsd; 1160 ASSERT3P(dvd, !=, NULL); 1161 return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size, 1162 offset, doread ? B_READ : B_WRITE)); 1163 } 1164 } 1165 1166 static int 1167 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size, 1168 boolean_t doread, boolean_t isdump) 1169 { 1170 vdev_t *vd; 1171 int error; 1172 zvol_extent_t *ze; 1173 spa_t *spa = dmu_objset_spa(zv->zv_objset); 1174 1175 /* Must be sector aligned, and not stradle a block boundary. */ 1176 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) || 1177 P2BOUNDARY(offset, size, zv->zv_volblocksize)) { 1178 return (SET_ERROR(EINVAL)); 1179 } 1180 ASSERT(size <= zv->zv_volblocksize); 1181 1182 /* Locate the extent this belongs to */ 1183 ze = list_head(&zv->zv_extents); 1184 while (offset >= ze->ze_nblks * zv->zv_volblocksize) { 1185 offset -= ze->ze_nblks * zv->zv_volblocksize; 1186 ze = list_next(&zv->zv_extents, ze); 1187 } 1188 1189 if (ze == NULL) 1190 return (SET_ERROR(EINVAL)); 1191 1192 if (!ddi_in_panic()) 1193 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 1194 1195 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva)); 1196 offset += DVA_GET_OFFSET(&ze->ze_dva); 1197 error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva), 1198 size, doread, isdump); 1199 1200 if (!ddi_in_panic()) 1201 spa_config_exit(spa, SCL_STATE, FTAG); 1202 1203 return (error); 1204 } 1205 1206 int 1207 zvol_strategy(buf_t *bp) 1208 { 1209 zfs_soft_state_t *zs = NULL; 1210 zvol_state_t *zv; 1211 uint64_t off, volsize; 1212 size_t resid; 1213 char *addr; 1214 objset_t *os; 1215 rl_t *rl; 1216 int error = 0; 1217 boolean_t doread = bp->b_flags & B_READ; 1218 boolean_t is_dumpified; 1219 boolean_t sync; 1220 1221 if (getminor(bp->b_edev) == 0) { 1222 error = SET_ERROR(EINVAL); 1223 } else { 1224 zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev)); 1225 if (zs == NULL) 1226 error = SET_ERROR(ENXIO); 1227 else if (zs->zss_type != ZSST_ZVOL) 1228 error = SET_ERROR(EINVAL); 1229 } 1230 1231 if (error) { 1232 bioerror(bp, error); 1233 biodone(bp); 1234 return (0); 1235 } 1236 1237 zv = zs->zss_data; 1238 1239 if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) { 1240 bioerror(bp, EROFS); 1241 biodone(bp); 1242 return (0); 1243 } 1244 1245 off = ldbtob(bp->b_blkno); 1246 volsize = zv->zv_volsize; 1247 1248 os = zv->zv_objset; 1249 ASSERT(os != NULL); 1250 1251 bp_mapin(bp); 1252 addr = bp->b_un.b_addr; 1253 resid = bp->b_bcount; 1254 1255 if (resid > 0 && (off < 0 || off >= volsize)) { 1256 bioerror(bp, EIO); 1257 biodone(bp); 1258 return (0); 1259 } 1260 1261 is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED; 1262 sync = ((!(bp->b_flags & B_ASYNC) && 1263 !(zv->zv_flags & ZVOL_WCE)) || 1264 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) && 1265 !doread && !is_dumpified; 1266 1267 /* 1268 * There must be no buffer changes when doing a dmu_sync() because 1269 * we can't change the data whilst calculating the checksum. 1270 */ 1271 rl = zfs_range_lock(&zv->zv_znode, off, resid, 1272 doread ? RL_READER : RL_WRITER); 1273 1274 while (resid != 0 && off < volsize) { 1275 size_t size = MIN(resid, zvol_maxphys); 1276 if (is_dumpified) { 1277 size = MIN(size, P2END(off, zv->zv_volblocksize) - off); 1278 error = zvol_dumpio(zv, addr, off, size, 1279 doread, B_FALSE); 1280 } else if (doread) { 1281 error = dmu_read(os, ZVOL_OBJ, off, size, addr, 1282 DMU_READ_PREFETCH); 1283 } else { 1284 dmu_tx_t *tx = dmu_tx_create(os); 1285 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size); 1286 error = dmu_tx_assign(tx, TXG_WAIT); 1287 if (error) { 1288 dmu_tx_abort(tx); 1289 } else { 1290 dmu_write(os, ZVOL_OBJ, off, size, addr, tx); 1291 zvol_log_write(zv, tx, off, size, sync); 1292 dmu_tx_commit(tx); 1293 } 1294 } 1295 if (error) { 1296 /* convert checksum errors into IO errors */ 1297 if (error == ECKSUM) 1298 error = SET_ERROR(EIO); 1299 break; 1300 } 1301 off += size; 1302 addr += size; 1303 resid -= size; 1304 } 1305 zfs_range_unlock(rl); 1306 1307 if ((bp->b_resid = resid) == bp->b_bcount) 1308 bioerror(bp, off > volsize ? EINVAL : error); 1309 1310 if (sync) 1311 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1312 biodone(bp); 1313 1314 return (0); 1315 } 1316 1317 /* 1318 * Set the buffer count to the zvol maximum transfer. 1319 * Using our own routine instead of the default minphys() 1320 * means that for larger writes we write bigger buffers on X86 1321 * (128K instead of 56K) and flush the disk write cache less often 1322 * (every zvol_maxphys - currently 1MB) instead of minphys (currently 1323 * 56K on X86 and 128K on sparc). 1324 */ 1325 void 1326 zvol_minphys(struct buf *bp) 1327 { 1328 if (bp->b_bcount > zvol_maxphys) 1329 bp->b_bcount = zvol_maxphys; 1330 } 1331 1332 int 1333 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks) 1334 { 1335 minor_t minor = getminor(dev); 1336 zvol_state_t *zv; 1337 int error = 0; 1338 uint64_t size; 1339 uint64_t boff; 1340 uint64_t resid; 1341 1342 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1343 if (zv == NULL) 1344 return (SET_ERROR(ENXIO)); 1345 1346 if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0) 1347 return (SET_ERROR(EINVAL)); 1348 1349 boff = ldbtob(blkno); 1350 resid = ldbtob(nblocks); 1351 1352 VERIFY3U(boff + resid, <=, zv->zv_volsize); 1353 1354 while (resid) { 1355 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff); 1356 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE); 1357 if (error) 1358 break; 1359 boff += size; 1360 addr += size; 1361 resid -= size; 1362 } 1363 1364 return (error); 1365 } 1366 1367 /*ARGSUSED*/ 1368 int 1369 zvol_read(dev_t dev, uio_t *uio, cred_t *cr) 1370 { 1371 minor_t minor = getminor(dev); 1372 zvol_state_t *zv; 1373 uint64_t volsize; 1374 rl_t *rl; 1375 int error = 0; 1376 1377 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1378 if (zv == NULL) 1379 return (SET_ERROR(ENXIO)); 1380 1381 volsize = zv->zv_volsize; 1382 if (uio->uio_resid > 0 && 1383 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize)) 1384 return (SET_ERROR(EIO)); 1385 1386 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1387 error = physio(zvol_strategy, NULL, dev, B_READ, 1388 zvol_minphys, uio); 1389 return (error); 1390 } 1391 1392 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1393 RL_READER); 1394 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1395 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1396 1397 /* don't read past the end */ 1398 if (bytes > volsize - uio->uio_loffset) 1399 bytes = volsize - uio->uio_loffset; 1400 1401 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes); 1402 if (error) { 1403 /* convert checksum errors into IO errors */ 1404 if (error == ECKSUM) 1405 error = SET_ERROR(EIO); 1406 break; 1407 } 1408 } 1409 zfs_range_unlock(rl); 1410 return (error); 1411 } 1412 1413 /*ARGSUSED*/ 1414 int 1415 zvol_write(dev_t dev, uio_t *uio, cred_t *cr) 1416 { 1417 minor_t minor = getminor(dev); 1418 zvol_state_t *zv; 1419 uint64_t volsize; 1420 rl_t *rl; 1421 int error = 0; 1422 boolean_t sync; 1423 1424 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1425 if (zv == NULL) 1426 return (SET_ERROR(ENXIO)); 1427 1428 volsize = zv->zv_volsize; 1429 if (uio->uio_resid > 0 && 1430 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize)) 1431 return (SET_ERROR(EIO)); 1432 1433 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1434 error = physio(zvol_strategy, NULL, dev, B_WRITE, 1435 zvol_minphys, uio); 1436 return (error); 1437 } 1438 1439 sync = !(zv->zv_flags & ZVOL_WCE) || 1440 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS); 1441 1442 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1443 RL_WRITER); 1444 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1445 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1446 uint64_t off = uio->uio_loffset; 1447 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 1448 1449 if (bytes > volsize - off) /* don't write past the end */ 1450 bytes = volsize - off; 1451 1452 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 1453 error = dmu_tx_assign(tx, TXG_WAIT); 1454 if (error) { 1455 dmu_tx_abort(tx); 1456 break; 1457 } 1458 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx); 1459 if (error == 0) 1460 zvol_log_write(zv, tx, off, bytes, sync); 1461 dmu_tx_commit(tx); 1462 1463 if (error) 1464 break; 1465 } 1466 zfs_range_unlock(rl); 1467 if (sync) 1468 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1469 return (error); 1470 } 1471 1472 int 1473 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs) 1474 { 1475 struct uuid uuid = EFI_RESERVED; 1476 efi_gpe_t gpe = { 0 }; 1477 uint32_t crc; 1478 dk_efi_t efi; 1479 int length; 1480 char *ptr; 1481 1482 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag)) 1483 return (SET_ERROR(EFAULT)); 1484 ptr = (char *)(uintptr_t)efi.dki_data_64; 1485 length = efi.dki_length; 1486 /* 1487 * Some clients may attempt to request a PMBR for the 1488 * zvol. Currently this interface will return EINVAL to 1489 * such requests. These requests could be supported by 1490 * adding a check for lba == 0 and consing up an appropriate 1491 * PMBR. 1492 */ 1493 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0) 1494 return (SET_ERROR(EINVAL)); 1495 1496 gpe.efi_gpe_StartingLBA = LE_64(34ULL); 1497 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1); 1498 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid); 1499 1500 if (efi.dki_lba == 1) { 1501 efi_gpt_t gpt = { 0 }; 1502 1503 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE); 1504 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 1505 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt)); 1506 gpt.efi_gpt_MyLBA = LE_64(1ULL); 1507 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL); 1508 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1); 1509 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL); 1510 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1); 1511 gpt.efi_gpt_SizeOfPartitionEntry = 1512 LE_32(sizeof (efi_gpe_t)); 1513 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table); 1514 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 1515 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table); 1516 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc); 1517 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length), 1518 flag)) 1519 return (SET_ERROR(EFAULT)); 1520 ptr += sizeof (gpt); 1521 length -= sizeof (gpt); 1522 } 1523 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe), 1524 length), flag)) 1525 return (SET_ERROR(EFAULT)); 1526 return (0); 1527 } 1528 1529 /* 1530 * BEGIN entry points to allow external callers access to the volume. 1531 */ 1532 /* 1533 * Return the volume parameters needed for access from an external caller. 1534 * These values are invariant as long as the volume is held open. 1535 */ 1536 int 1537 zvol_get_volume_params(minor_t minor, uint64_t *blksize, 1538 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl, 1539 void **rl_hdl, void **bonus_hdl) 1540 { 1541 zvol_state_t *zv; 1542 1543 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1544 if (zv == NULL) 1545 return (SET_ERROR(ENXIO)); 1546 if (zv->zv_flags & ZVOL_DUMPIFIED) 1547 return (SET_ERROR(ENXIO)); 1548 1549 ASSERT(blksize && max_xfer_len && minor_hdl && 1550 objset_hdl && zil_hdl && rl_hdl && bonus_hdl); 1551 1552 *blksize = zv->zv_volblocksize; 1553 *max_xfer_len = (uint64_t)zvol_maxphys; 1554 *minor_hdl = zv; 1555 *objset_hdl = zv->zv_objset; 1556 *zil_hdl = zv->zv_zilog; 1557 *rl_hdl = &zv->zv_znode; 1558 *bonus_hdl = zv->zv_dbuf; 1559 return (0); 1560 } 1561 1562 /* 1563 * Return the current volume size to an external caller. 1564 * The size can change while the volume is open. 1565 */ 1566 uint64_t 1567 zvol_get_volume_size(void *minor_hdl) 1568 { 1569 zvol_state_t *zv = minor_hdl; 1570 1571 return (zv->zv_volsize); 1572 } 1573 1574 /* 1575 * Return the current WCE setting to an external caller. 1576 * The WCE setting can change while the volume is open. 1577 */ 1578 int 1579 zvol_get_volume_wce(void *minor_hdl) 1580 { 1581 zvol_state_t *zv = minor_hdl; 1582 1583 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0); 1584 } 1585 1586 /* 1587 * Entry point for external callers to zvol_log_write 1588 */ 1589 void 1590 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid, 1591 boolean_t sync) 1592 { 1593 zvol_state_t *zv = minor_hdl; 1594 1595 zvol_log_write(zv, tx, off, resid, sync); 1596 } 1597 /* 1598 * END entry points to allow external callers access to the volume. 1599 */ 1600 1601 /* 1602 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE. 1603 */ 1604 static void 1605 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len, 1606 boolean_t sync) 1607 { 1608 itx_t *itx; 1609 lr_truncate_t *lr; 1610 zilog_t *zilog = zv->zv_zilog; 1611 1612 if (zil_replaying(zilog, tx)) 1613 return; 1614 1615 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1616 lr = (lr_truncate_t *)&itx->itx_lr; 1617 lr->lr_foid = ZVOL_OBJ; 1618 lr->lr_offset = off; 1619 lr->lr_length = len; 1620 1621 itx->itx_sync = sync; 1622 zil_itx_assign(zilog, itx, tx); 1623 } 1624 1625 /* 1626 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I). 1627 * Also a dirtbag dkio ioctl for unmap/free-block functionality. 1628 */ 1629 /*ARGSUSED*/ 1630 int 1631 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp) 1632 { 1633 zvol_state_t *zv; 1634 struct dk_callback *dkc; 1635 int error = 0; 1636 rl_t *rl; 1637 1638 mutex_enter(&zfsdev_state_lock); 1639 1640 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL); 1641 1642 if (zv == NULL) { 1643 mutex_exit(&zfsdev_state_lock); 1644 return (SET_ERROR(ENXIO)); 1645 } 1646 ASSERT(zv->zv_total_opens > 0); 1647 1648 switch (cmd) { 1649 1650 case DKIOCINFO: 1651 { 1652 struct dk_cinfo dki; 1653 1654 bzero(&dki, sizeof (dki)); 1655 (void) strcpy(dki.dki_cname, "zvol"); 1656 (void) strcpy(dki.dki_dname, "zvol"); 1657 dki.dki_ctype = DKC_UNKNOWN; 1658 dki.dki_unit = getminor(dev); 1659 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs); 1660 mutex_exit(&zfsdev_state_lock); 1661 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag)) 1662 error = SET_ERROR(EFAULT); 1663 return (error); 1664 } 1665 1666 case DKIOCGMEDIAINFO: 1667 { 1668 struct dk_minfo dkm; 1669 1670 bzero(&dkm, sizeof (dkm)); 1671 dkm.dki_lbsize = 1U << zv->zv_min_bs; 1672 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 1673 dkm.dki_media_type = DK_UNKNOWN; 1674 mutex_exit(&zfsdev_state_lock); 1675 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag)) 1676 error = SET_ERROR(EFAULT); 1677 return (error); 1678 } 1679 1680 case DKIOCGMEDIAINFOEXT: 1681 { 1682 struct dk_minfo_ext dkmext; 1683 1684 bzero(&dkmext, sizeof (dkmext)); 1685 dkmext.dki_lbsize = 1U << zv->zv_min_bs; 1686 dkmext.dki_pbsize = zv->zv_volblocksize; 1687 dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 1688 dkmext.dki_media_type = DK_UNKNOWN; 1689 mutex_exit(&zfsdev_state_lock); 1690 if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag)) 1691 error = SET_ERROR(EFAULT); 1692 return (error); 1693 } 1694 1695 case DKIOCGETEFI: 1696 { 1697 uint64_t vs = zv->zv_volsize; 1698 uint8_t bs = zv->zv_min_bs; 1699 1700 mutex_exit(&zfsdev_state_lock); 1701 error = zvol_getefi((void *)arg, flag, vs, bs); 1702 return (error); 1703 } 1704 1705 case DKIOCFLUSHWRITECACHE: 1706 dkc = (struct dk_callback *)arg; 1707 mutex_exit(&zfsdev_state_lock); 1708 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1709 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) { 1710 (*dkc->dkc_callback)(dkc->dkc_cookie, error); 1711 error = 0; 1712 } 1713 return (error); 1714 1715 case DKIOCGETWCE: 1716 { 1717 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0; 1718 if (ddi_copyout(&wce, (void *)arg, sizeof (int), 1719 flag)) 1720 error = SET_ERROR(EFAULT); 1721 break; 1722 } 1723 case DKIOCSETWCE: 1724 { 1725 int wce; 1726 if (ddi_copyin((void *)arg, &wce, sizeof (int), 1727 flag)) { 1728 error = SET_ERROR(EFAULT); 1729 break; 1730 } 1731 if (wce) { 1732 zv->zv_flags |= ZVOL_WCE; 1733 mutex_exit(&zfsdev_state_lock); 1734 } else { 1735 zv->zv_flags &= ~ZVOL_WCE; 1736 mutex_exit(&zfsdev_state_lock); 1737 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1738 } 1739 return (0); 1740 } 1741 1742 case DKIOCGGEOM: 1743 case DKIOCGVTOC: 1744 /* 1745 * commands using these (like prtvtoc) expect ENOTSUP 1746 * since we're emulating an EFI label 1747 */ 1748 error = SET_ERROR(ENOTSUP); 1749 break; 1750 1751 case DKIOCDUMPINIT: 1752 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 1753 RL_WRITER); 1754 error = zvol_dumpify(zv); 1755 zfs_range_unlock(rl); 1756 break; 1757 1758 case DKIOCDUMPFINI: 1759 if (!(zv->zv_flags & ZVOL_DUMPIFIED)) 1760 break; 1761 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 1762 RL_WRITER); 1763 error = zvol_dump_fini(zv); 1764 zfs_range_unlock(rl); 1765 break; 1766 1767 case DKIOCFREE: 1768 { 1769 dkioc_free_t df; 1770 dmu_tx_t *tx; 1771 1772 if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) { 1773 error = SET_ERROR(EFAULT); 1774 break; 1775 } 1776 1777 /* 1778 * Apply Postel's Law to length-checking. If they overshoot, 1779 * just blank out until the end, if there's a need to blank 1780 * out anything. 1781 */ 1782 if (df.df_start >= zv->zv_volsize) 1783 break; /* No need to do anything... */ 1784 if (df.df_start + df.df_length > zv->zv_volsize) 1785 df.df_length = DMU_OBJECT_END; 1786 1787 rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length, 1788 RL_WRITER); 1789 tx = dmu_tx_create(zv->zv_objset); 1790 dmu_tx_mark_netfree(tx); 1791 error = dmu_tx_assign(tx, TXG_WAIT); 1792 if (error != 0) { 1793 dmu_tx_abort(tx); 1794 } else { 1795 zvol_log_truncate(zv, tx, df.df_start, 1796 df.df_length, B_TRUE); 1797 dmu_tx_commit(tx); 1798 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 1799 df.df_start, df.df_length); 1800 } 1801 1802 zfs_range_unlock(rl); 1803 1804 if (error == 0) { 1805 /* 1806 * If the write-cache is disabled or 'sync' property 1807 * is set to 'always' then treat this as a synchronous 1808 * operation (i.e. commit to zil). 1809 */ 1810 if (!(zv->zv_flags & ZVOL_WCE) || 1811 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) 1812 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1813 1814 /* 1815 * If the caller really wants synchronous writes, and 1816 * can't wait for them, don't return until the write 1817 * is done. 1818 */ 1819 if (df.df_flags & DF_WAIT_SYNC) { 1820 txg_wait_synced( 1821 dmu_objset_pool(zv->zv_objset), 0); 1822 } 1823 } 1824 break; 1825 } 1826 1827 default: 1828 error = SET_ERROR(ENOTTY); 1829 break; 1830 1831 } 1832 mutex_exit(&zfsdev_state_lock); 1833 return (error); 1834 } 1835 1836 int 1837 zvol_busy(void) 1838 { 1839 return (zvol_minors != 0); 1840 } 1841 1842 void 1843 zvol_init(void) 1844 { 1845 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t), 1846 1) == 0); 1847 mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL); 1848 } 1849 1850 void 1851 zvol_fini(void) 1852 { 1853 mutex_destroy(&zfsdev_state_lock); 1854 ddi_soft_state_fini(&zfsdev_state); 1855 } 1856 1857 /*ARGSUSED*/ 1858 static int 1859 zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx) 1860 { 1861 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1862 1863 if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP)) 1864 return (1); 1865 return (0); 1866 } 1867 1868 /*ARGSUSED*/ 1869 static void 1870 zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx) 1871 { 1872 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1873 1874 spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx); 1875 } 1876 1877 static int 1878 zvol_dump_init(zvol_state_t *zv, boolean_t resize) 1879 { 1880 dmu_tx_t *tx; 1881 int error; 1882 objset_t *os = zv->zv_objset; 1883 spa_t *spa = dmu_objset_spa(os); 1884 vdev_t *vd = spa->spa_root_vdev; 1885 nvlist_t *nv = NULL; 1886 uint64_t version = spa_version(spa); 1887 enum zio_checksum checksum; 1888 1889 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 1890 ASSERT(vd->vdev_ops == &vdev_root_ops); 1891 1892 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0, 1893 DMU_OBJECT_END); 1894 /* wait for dmu_free_long_range to actually free the blocks */ 1895 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 1896 1897 /* 1898 * If the pool on which the dump device is being initialized has more 1899 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is 1900 * enabled. If so, bump that feature's counter to indicate that the 1901 * feature is active. We also check the vdev type to handle the 1902 * following case: 1903 * # zpool create test raidz disk1 disk2 disk3 1904 * Now have spa_root_vdev->vdev_children == 1 (the raidz vdev), 1905 * the raidz vdev itself has 3 children. 1906 */ 1907 if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) { 1908 if (!spa_feature_is_enabled(spa, 1909 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP)) 1910 return (SET_ERROR(ENOTSUP)); 1911 (void) dsl_sync_task(spa_name(spa), 1912 zfs_mvdev_dump_feature_check, 1913 zfs_mvdev_dump_activate_feature_sync, NULL, 2); 1914 } 1915 1916 tx = dmu_tx_create(os); 1917 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 1918 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 1919 error = dmu_tx_assign(tx, TXG_WAIT); 1920 if (error) { 1921 dmu_tx_abort(tx); 1922 return (error); 1923 } 1924 1925 /* 1926 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum 1927 * function. Otherwise, use the old default -- OFF. 1928 */ 1929 checksum = spa_feature_is_active(spa, 1930 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY : 1931 ZIO_CHECKSUM_OFF; 1932 1933 /* 1934 * If we are resizing the dump device then we only need to 1935 * update the refreservation to match the newly updated 1936 * zvolsize. Otherwise, we save off the original state of the 1937 * zvol so that we can restore them if the zvol is ever undumpified. 1938 */ 1939 if (resize) { 1940 error = zap_update(os, ZVOL_ZAP_OBJ, 1941 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 1942 &zv->zv_volsize, tx); 1943 } else { 1944 uint64_t checksum, compress, refresrv, vbs, dedup; 1945 1946 error = dsl_prop_get_integer(zv->zv_name, 1947 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL); 1948 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1949 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL); 1950 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1951 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL); 1952 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1953 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL); 1954 if (version >= SPA_VERSION_DEDUP) { 1955 error = error ? error : 1956 dsl_prop_get_integer(zv->zv_name, 1957 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL); 1958 } 1959 1960 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1961 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, 1962 &compress, tx); 1963 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1964 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx); 1965 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1966 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 1967 &refresrv, tx); 1968 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1969 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, 1970 &vbs, tx); 1971 error = error ? error : dmu_object_set_blocksize( 1972 os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx); 1973 if (version >= SPA_VERSION_DEDUP) { 1974 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1975 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, 1976 &dedup, tx); 1977 } 1978 if (error == 0) 1979 zv->zv_volblocksize = SPA_MAXBLOCKSIZE; 1980 } 1981 dmu_tx_commit(tx); 1982 1983 /* 1984 * We only need update the zvol's property if we are initializing 1985 * the dump area for the first time. 1986 */ 1987 if (!resize) { 1988 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1989 VERIFY(nvlist_add_uint64(nv, 1990 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0); 1991 VERIFY(nvlist_add_uint64(nv, 1992 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 1993 ZIO_COMPRESS_OFF) == 0); 1994 VERIFY(nvlist_add_uint64(nv, 1995 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 1996 checksum) == 0); 1997 if (version >= SPA_VERSION_DEDUP) { 1998 VERIFY(nvlist_add_uint64(nv, 1999 zfs_prop_to_name(ZFS_PROP_DEDUP), 2000 ZIO_CHECKSUM_OFF) == 0); 2001 } 2002 2003 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 2004 nv, NULL); 2005 nvlist_free(nv); 2006 2007 if (error) 2008 return (error); 2009 } 2010 2011 /* Allocate the space for the dump */ 2012 error = zvol_prealloc(zv); 2013 return (error); 2014 } 2015 2016 static int 2017 zvol_dumpify(zvol_state_t *zv) 2018 { 2019 int error = 0; 2020 uint64_t dumpsize = 0; 2021 dmu_tx_t *tx; 2022 objset_t *os = zv->zv_objset; 2023 2024 if (zv->zv_flags & ZVOL_RDONLY) 2025 return (SET_ERROR(EROFS)); 2026 2027 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 2028 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) { 2029 boolean_t resize = (dumpsize > 0); 2030 2031 if ((error = zvol_dump_init(zv, resize)) != 0) { 2032 (void) zvol_dump_fini(zv); 2033 return (error); 2034 } 2035 } 2036 2037 /* 2038 * Build up our lba mapping. 2039 */ 2040 error = zvol_get_lbas(zv); 2041 if (error) { 2042 (void) zvol_dump_fini(zv); 2043 return (error); 2044 } 2045 2046 tx = dmu_tx_create(os); 2047 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2048 error = dmu_tx_assign(tx, TXG_WAIT); 2049 if (error) { 2050 dmu_tx_abort(tx); 2051 (void) zvol_dump_fini(zv); 2052 return (error); 2053 } 2054 2055 zv->zv_flags |= ZVOL_DUMPIFIED; 2056 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1, 2057 &zv->zv_volsize, tx); 2058 dmu_tx_commit(tx); 2059 2060 if (error) { 2061 (void) zvol_dump_fini(zv); 2062 return (error); 2063 } 2064 2065 txg_wait_synced(dmu_objset_pool(os), 0); 2066 return (0); 2067 } 2068 2069 static int 2070 zvol_dump_fini(zvol_state_t *zv) 2071 { 2072 dmu_tx_t *tx; 2073 objset_t *os = zv->zv_objset; 2074 nvlist_t *nv; 2075 int error = 0; 2076 uint64_t checksum, compress, refresrv, vbs, dedup; 2077 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset)); 2078 2079 /* 2080 * Attempt to restore the zvol back to its pre-dumpified state. 2081 * This is a best-effort attempt as it's possible that not all 2082 * of these properties were initialized during the dumpify process 2083 * (i.e. error during zvol_dump_init). 2084 */ 2085 2086 tx = dmu_tx_create(os); 2087 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2088 error = dmu_tx_assign(tx, TXG_WAIT); 2089 if (error) { 2090 dmu_tx_abort(tx); 2091 return (error); 2092 } 2093 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx); 2094 dmu_tx_commit(tx); 2095 2096 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2097 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum); 2098 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2099 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress); 2100 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2101 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv); 2102 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2103 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs); 2104 2105 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2106 (void) nvlist_add_uint64(nv, 2107 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum); 2108 (void) nvlist_add_uint64(nv, 2109 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress); 2110 (void) nvlist_add_uint64(nv, 2111 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv); 2112 if (version >= SPA_VERSION_DEDUP && 2113 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2114 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) { 2115 (void) nvlist_add_uint64(nv, 2116 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup); 2117 } 2118 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 2119 nv, NULL); 2120 nvlist_free(nv); 2121 2122 zvol_free_extents(zv); 2123 zv->zv_flags &= ~ZVOL_DUMPIFIED; 2124 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END); 2125 /* wait for dmu_free_long_range to actually free the blocks */ 2126 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 2127 tx = dmu_tx_create(os); 2128 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 2129 error = dmu_tx_assign(tx, TXG_WAIT); 2130 if (error) { 2131 dmu_tx_abort(tx); 2132 return (error); 2133 } 2134 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0) 2135 zv->zv_volblocksize = vbs; 2136 dmu_tx_commit(tx); 2137 2138 return (0); 2139 } 2140