1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * 24 * Portions Copyright 2010 Robert Milkowski 25 * 26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 27 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 28 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 29 */ 30 31 /* 32 * ZFS volume emulation driver. 33 * 34 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes. 35 * Volumes are accessed through the symbolic links named: 36 * 37 * /dev/zvol/dsk/<pool_name>/<dataset_name> 38 * /dev/zvol/rdsk/<pool_name>/<dataset_name> 39 * 40 * These links are created by the /dev filesystem (sdev_zvolops.c). 41 * Volumes are persistent through reboot. No user command needs to be 42 * run before opening and using a device. 43 */ 44 45 #include <sys/types.h> 46 #include <sys/param.h> 47 #include <sys/errno.h> 48 #include <sys/uio.h> 49 #include <sys/buf.h> 50 #include <sys/modctl.h> 51 #include <sys/open.h> 52 #include <sys/kmem.h> 53 #include <sys/conf.h> 54 #include <sys/cmn_err.h> 55 #include <sys/stat.h> 56 #include <sys/zap.h> 57 #include <sys/spa.h> 58 #include <sys/spa_impl.h> 59 #include <sys/zio.h> 60 #include <sys/dmu_traverse.h> 61 #include <sys/dnode.h> 62 #include <sys/dsl_dataset.h> 63 #include <sys/dsl_prop.h> 64 #include <sys/dkio.h> 65 #include <sys/efi_partition.h> 66 #include <sys/byteorder.h> 67 #include <sys/pathname.h> 68 #include <sys/ddi.h> 69 #include <sys/sunddi.h> 70 #include <sys/crc32.h> 71 #include <sys/dirent.h> 72 #include <sys/policy.h> 73 #include <sys/fs/zfs.h> 74 #include <sys/zfs_ioctl.h> 75 #include <sys/mkdev.h> 76 #include <sys/zil.h> 77 #include <sys/refcount.h> 78 #include <sys/zfs_znode.h> 79 #include <sys/zfs_rlock.h> 80 #include <sys/vdev_disk.h> 81 #include <sys/vdev_impl.h> 82 #include <sys/vdev_raidz.h> 83 #include <sys/zvol.h> 84 #include <sys/dumphdr.h> 85 #include <sys/zil_impl.h> 86 #include <sys/dbuf.h> 87 #include <sys/zfs_events.h> 88 #include <sys/dmu_tx.h> 89 #include <sys/zfeature.h> 90 #include <sys/zio_checksum.h> 91 92 #include "zfs_namecheck.h" 93 94 void *zfsdev_state; 95 static char *zvol_tag = "zvol_tag"; 96 97 #define ZVOL_DUMPSIZE "dumpsize" 98 99 /* 100 * This lock protects the zfsdev_state structure from being modified 101 * while it's being used, e.g. an open that comes in before a create 102 * finishes. It also protects temporary opens of the dataset so that, 103 * e.g., an open doesn't get a spurious EBUSY. 104 */ 105 kmutex_t zfsdev_state_lock; 106 static uint32_t zvol_minors; 107 108 typedef struct zvol_extent { 109 list_node_t ze_node; 110 dva_t ze_dva; /* dva associated with this extent */ 111 uint64_t ze_nblks; /* number of blocks in extent */ 112 } zvol_extent_t; 113 114 /* 115 * The in-core state of each volume. 116 */ 117 typedef struct zvol_state { 118 char zv_name[MAXPATHLEN]; /* pool/dd name */ 119 uint64_t zv_volsize; /* amount of space we advertise */ 120 uint64_t zv_volblocksize; /* volume block size */ 121 minor_t zv_minor; /* minor number */ 122 uint8_t zv_min_bs; /* minimum addressable block shift */ 123 uint8_t zv_flags; /* readonly, dumpified, etc. */ 124 objset_t *zv_objset; /* objset handle */ 125 uint32_t zv_open_count[OTYPCNT]; /* open counts */ 126 uint32_t zv_total_opens; /* total open count */ 127 zilog_t *zv_zilog; /* ZIL handle */ 128 list_t zv_extents; /* List of extents for dump */ 129 znode_t zv_znode; /* for range locking */ 130 dmu_buf_t *zv_dbuf; /* bonus handle */ 131 } zvol_state_t; 132 133 /* 134 * zvol specific flags 135 */ 136 #define ZVOL_RDONLY 0x1 137 #define ZVOL_DUMPIFIED 0x2 138 #define ZVOL_EXCL 0x4 139 #define ZVOL_WCE 0x8 140 141 /* 142 * zvol maximum transfer in one DMU tx. 143 */ 144 int zvol_maxphys = DMU_MAX_ACCESS/2; 145 146 extern int zfs_set_prop_nvlist(const char *, zprop_source_t, 147 nvlist_t *, nvlist_t *); 148 static int zvol_remove_zv(zvol_state_t *); 149 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio); 150 static int zvol_dumpify(zvol_state_t *zv); 151 static int zvol_dump_fini(zvol_state_t *zv); 152 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize); 153 154 static void 155 zvol_size_changed(zvol_state_t *zv, uint64_t volsize) 156 { 157 dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor); 158 159 zv->zv_volsize = volsize; 160 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 161 "Size", volsize) == DDI_SUCCESS); 162 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 163 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS); 164 165 /* Notify specfs to invalidate the cached size */ 166 spec_size_invalidate(dev, VBLK); 167 spec_size_invalidate(dev, VCHR); 168 } 169 170 int 171 zvol_check_volsize(uint64_t volsize, uint64_t blocksize) 172 { 173 if (volsize == 0) 174 return (SET_ERROR(EINVAL)); 175 176 if (volsize % blocksize != 0) 177 return (SET_ERROR(EINVAL)); 178 179 #ifdef _ILP32 180 if (volsize - 1 > SPEC_MAXOFFSET_T) 181 return (SET_ERROR(EOVERFLOW)); 182 #endif 183 return (0); 184 } 185 186 int 187 zvol_check_volblocksize(uint64_t volblocksize) 188 { 189 if (volblocksize < SPA_MINBLOCKSIZE || 190 volblocksize > SPA_MAXBLOCKSIZE || 191 !ISP2(volblocksize)) 192 return (SET_ERROR(EDOM)); 193 194 return (0); 195 } 196 197 int 198 zvol_get_stats(objset_t *os, nvlist_t *nv) 199 { 200 int error; 201 dmu_object_info_t doi; 202 uint64_t val; 203 204 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val); 205 if (error) 206 return (error); 207 208 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val); 209 210 error = dmu_object_info(os, ZVOL_OBJ, &doi); 211 212 if (error == 0) { 213 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE, 214 doi.doi_data_block_size); 215 } 216 217 return (error); 218 } 219 220 static zvol_state_t * 221 zvol_minor_lookup(const char *name) 222 { 223 minor_t minor; 224 zvol_state_t *zv; 225 226 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 227 228 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 229 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 230 if (zv == NULL) 231 continue; 232 if (strcmp(zv->zv_name, name) == 0) 233 return (zv); 234 } 235 236 return (NULL); 237 } 238 239 /* extent mapping arg */ 240 struct maparg { 241 zvol_state_t *ma_zv; 242 uint64_t ma_blks; 243 }; 244 245 /*ARGSUSED*/ 246 static int 247 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 248 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 249 { 250 struct maparg *ma = arg; 251 zvol_extent_t *ze; 252 int bs = ma->ma_zv->zv_volblocksize; 253 254 if (BP_IS_HOLE(bp) || 255 zb->zb_object != ZVOL_OBJ || zb->zb_level != 0) 256 return (0); 257 258 VERIFY(!BP_IS_EMBEDDED(bp)); 259 260 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid); 261 ma->ma_blks++; 262 263 /* Abort immediately if we have encountered gang blocks */ 264 if (BP_IS_GANG(bp)) 265 return (SET_ERROR(EFRAGS)); 266 267 /* 268 * See if the block is at the end of the previous extent. 269 */ 270 ze = list_tail(&ma->ma_zv->zv_extents); 271 if (ze && 272 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) && 273 DVA_GET_OFFSET(BP_IDENTITY(bp)) == 274 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) { 275 ze->ze_nblks++; 276 return (0); 277 } 278 279 dprintf_bp(bp, "%s", "next blkptr:"); 280 281 /* start a new extent */ 282 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP); 283 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */ 284 ze->ze_nblks = 1; 285 list_insert_tail(&ma->ma_zv->zv_extents, ze); 286 return (0); 287 } 288 289 static void 290 zvol_free_extents(zvol_state_t *zv) 291 { 292 zvol_extent_t *ze; 293 294 while (ze = list_head(&zv->zv_extents)) { 295 list_remove(&zv->zv_extents, ze); 296 kmem_free(ze, sizeof (zvol_extent_t)); 297 } 298 } 299 300 static int 301 zvol_get_lbas(zvol_state_t *zv) 302 { 303 objset_t *os = zv->zv_objset; 304 struct maparg ma; 305 int err; 306 307 ma.ma_zv = zv; 308 ma.ma_blks = 0; 309 zvol_free_extents(zv); 310 311 /* commit any in-flight changes before traversing the dataset */ 312 txg_wait_synced(dmu_objset_pool(os), 0); 313 err = traverse_dataset(dmu_objset_ds(os), 0, 314 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma); 315 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) { 316 zvol_free_extents(zv); 317 return (err ? err : EIO); 318 } 319 320 return (0); 321 } 322 323 /* ARGSUSED */ 324 void 325 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 326 { 327 zfs_creat_t *zct = arg; 328 nvlist_t *nvprops = zct->zct_props; 329 int error; 330 uint64_t volblocksize, volsize; 331 332 VERIFY(nvlist_lookup_uint64(nvprops, 333 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0); 334 if (nvlist_lookup_uint64(nvprops, 335 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0) 336 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE); 337 338 /* 339 * These properties must be removed from the list so the generic 340 * property setting step won't apply to them. 341 */ 342 VERIFY(nvlist_remove_all(nvprops, 343 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0); 344 (void) nvlist_remove_all(nvprops, 345 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE)); 346 347 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize, 348 DMU_OT_NONE, 0, tx); 349 ASSERT(error == 0); 350 351 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP, 352 DMU_OT_NONE, 0, tx); 353 ASSERT(error == 0); 354 355 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); 356 ASSERT(error == 0); 357 } 358 359 /* 360 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we 361 * implement DKIOCFREE/free-long-range. 362 */ 363 static int 364 zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap) 365 { 366 uint64_t offset, length; 367 368 if (byteswap) 369 byteswap_uint64_array(lr, sizeof (*lr)); 370 371 offset = lr->lr_offset; 372 length = lr->lr_length; 373 374 return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length)); 375 } 376 377 /* 378 * Replay a TX_WRITE ZIL transaction that didn't get committed 379 * after a system failure 380 */ 381 static int 382 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap) 383 { 384 objset_t *os = zv->zv_objset; 385 char *data = (char *)(lr + 1); /* data follows lr_write_t */ 386 uint64_t offset, length; 387 dmu_tx_t *tx; 388 int error; 389 390 if (byteswap) 391 byteswap_uint64_array(lr, sizeof (*lr)); 392 393 offset = lr->lr_offset; 394 length = lr->lr_length; 395 396 /* If it's a dmu_sync() block, write the whole block */ 397 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) { 398 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr); 399 if (length < blocksize) { 400 offset -= offset % blocksize; 401 length = blocksize; 402 } 403 } 404 405 tx = dmu_tx_create(os); 406 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length); 407 error = dmu_tx_assign(tx, TXG_WAIT); 408 if (error) { 409 dmu_tx_abort(tx); 410 } else { 411 dmu_write(os, ZVOL_OBJ, offset, length, data, tx); 412 dmu_tx_commit(tx); 413 } 414 415 return (error); 416 } 417 418 /* ARGSUSED */ 419 static int 420 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap) 421 { 422 return (SET_ERROR(ENOTSUP)); 423 } 424 425 /* 426 * Callback vectors for replaying records. 427 * Only TX_WRITE and TX_TRUNCATE are needed for zvol. 428 */ 429 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = { 430 zvol_replay_err, /* 0 no such transaction type */ 431 zvol_replay_err, /* TX_CREATE */ 432 zvol_replay_err, /* TX_MKDIR */ 433 zvol_replay_err, /* TX_MKXATTR */ 434 zvol_replay_err, /* TX_SYMLINK */ 435 zvol_replay_err, /* TX_REMOVE */ 436 zvol_replay_err, /* TX_RMDIR */ 437 zvol_replay_err, /* TX_LINK */ 438 zvol_replay_err, /* TX_RENAME */ 439 zvol_replay_write, /* TX_WRITE */ 440 zvol_replay_truncate, /* TX_TRUNCATE */ 441 zvol_replay_err, /* TX_SETATTR */ 442 zvol_replay_err, /* TX_ACL */ 443 zvol_replay_err, /* TX_CREATE_ACL */ 444 zvol_replay_err, /* TX_CREATE_ATTR */ 445 zvol_replay_err, /* TX_CREATE_ACL_ATTR */ 446 zvol_replay_err, /* TX_MKDIR_ACL */ 447 zvol_replay_err, /* TX_MKDIR_ATTR */ 448 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */ 449 zvol_replay_err, /* TX_WRITE2 */ 450 }; 451 452 int 453 zvol_name2minor(const char *name, minor_t *minor) 454 { 455 zvol_state_t *zv; 456 457 mutex_enter(&zfsdev_state_lock); 458 zv = zvol_minor_lookup(name); 459 if (minor && zv) 460 *minor = zv->zv_minor; 461 mutex_exit(&zfsdev_state_lock); 462 return (zv ? 0 : -1); 463 } 464 465 /* 466 * Create a minor node (plus a whole lot more) for the specified volume. 467 */ 468 int 469 zvol_create_minor(const char *name) 470 { 471 zfs_soft_state_t *zs; 472 zvol_state_t *zv; 473 objset_t *os; 474 dmu_object_info_t doi; 475 minor_t minor = 0; 476 char chrbuf[30], blkbuf[30]; 477 int error; 478 479 mutex_enter(&zfsdev_state_lock); 480 481 if (zvol_minor_lookup(name) != NULL) { 482 mutex_exit(&zfsdev_state_lock); 483 return (SET_ERROR(EEXIST)); 484 } 485 486 /* lie and say we're read-only */ 487 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os); 488 489 if (error) { 490 mutex_exit(&zfsdev_state_lock); 491 return (error); 492 } 493 494 if ((minor = zfsdev_minor_alloc()) == 0) { 495 dmu_objset_disown(os, FTAG); 496 mutex_exit(&zfsdev_state_lock); 497 return (SET_ERROR(ENXIO)); 498 } 499 500 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) { 501 dmu_objset_disown(os, FTAG); 502 mutex_exit(&zfsdev_state_lock); 503 return (SET_ERROR(EAGAIN)); 504 } 505 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME, 506 (char *)name); 507 508 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor); 509 510 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR, 511 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 512 ddi_soft_state_free(zfsdev_state, minor); 513 dmu_objset_disown(os, FTAG); 514 mutex_exit(&zfsdev_state_lock); 515 return (SET_ERROR(EAGAIN)); 516 } 517 518 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor); 519 520 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK, 521 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 522 ddi_remove_minor_node(zfs_dip, chrbuf); 523 ddi_soft_state_free(zfsdev_state, minor); 524 dmu_objset_disown(os, FTAG); 525 mutex_exit(&zfsdev_state_lock); 526 return (SET_ERROR(EAGAIN)); 527 } 528 529 zs = ddi_get_soft_state(zfsdev_state, minor); 530 zs->zss_type = ZSST_ZVOL; 531 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP); 532 (void) strlcpy(zv->zv_name, name, MAXPATHLEN); 533 zv->zv_min_bs = DEV_BSHIFT; 534 zv->zv_minor = minor; 535 zv->zv_objset = os; 536 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os))) 537 zv->zv_flags |= ZVOL_RDONLY; 538 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL); 539 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare, 540 sizeof (rl_t), offsetof(rl_t, r_node)); 541 list_create(&zv->zv_extents, sizeof (zvol_extent_t), 542 offsetof(zvol_extent_t, ze_node)); 543 /* get and cache the blocksize */ 544 error = dmu_object_info(os, ZVOL_OBJ, &doi); 545 ASSERT(error == 0); 546 zv->zv_volblocksize = doi.doi_data_block_size; 547 548 if (spa_writeable(dmu_objset_spa(os))) { 549 if (zil_replay_disable) 550 zil_destroy(dmu_objset_zil(os), B_FALSE); 551 else 552 zil_replay(os, zv, zvol_replay_vector); 553 } 554 dmu_objset_disown(os, FTAG); 555 zv->zv_objset = NULL; 556 557 zvol_minors++; 558 559 mutex_exit(&zfsdev_state_lock); 560 561 return (0); 562 } 563 564 /* 565 * Remove minor node for the specified volume. 566 */ 567 static int 568 zvol_remove_zv(zvol_state_t *zv) 569 { 570 char nmbuf[20]; 571 minor_t minor = zv->zv_minor; 572 573 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 574 if (zv->zv_total_opens != 0) 575 return (SET_ERROR(EBUSY)); 576 577 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor); 578 ddi_remove_minor_node(zfs_dip, nmbuf); 579 580 (void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor); 581 ddi_remove_minor_node(zfs_dip, nmbuf); 582 583 avl_destroy(&zv->zv_znode.z_range_avl); 584 mutex_destroy(&zv->zv_znode.z_range_lock); 585 586 kmem_free(zv, sizeof (zvol_state_t)); 587 588 ddi_soft_state_free(zfsdev_state, minor); 589 590 zvol_minors--; 591 return (0); 592 } 593 594 int 595 zvol_remove_minor(const char *name) 596 { 597 zvol_state_t *zv; 598 int rc; 599 600 mutex_enter(&zfsdev_state_lock); 601 if ((zv = zvol_minor_lookup(name)) == NULL) { 602 mutex_exit(&zfsdev_state_lock); 603 return (SET_ERROR(ENXIO)); 604 } 605 rc = zvol_remove_zv(zv); 606 mutex_exit(&zfsdev_state_lock); 607 return (rc); 608 } 609 610 int 611 zvol_first_open(zvol_state_t *zv) 612 { 613 objset_t *os; 614 uint64_t volsize; 615 int error; 616 uint64_t readonly; 617 618 /* lie and say we're read-only */ 619 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE, 620 zvol_tag, &os); 621 if (error) 622 return (error); 623 624 zv->zv_objset = os; 625 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 626 if (error) { 627 ASSERT(error == 0); 628 dmu_objset_disown(os, zvol_tag); 629 return (error); 630 } 631 632 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf); 633 if (error) { 634 dmu_objset_disown(os, zvol_tag); 635 return (error); 636 } 637 638 zvol_size_changed(zv, volsize); 639 zv->zv_zilog = zil_open(os, zvol_get_data); 640 641 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly, 642 NULL) == 0); 643 if (readonly || dmu_objset_is_snapshot(os) || 644 !spa_writeable(dmu_objset_spa(os))) 645 zv->zv_flags |= ZVOL_RDONLY; 646 else 647 zv->zv_flags &= ~ZVOL_RDONLY; 648 return (error); 649 } 650 651 void 652 zvol_last_close(zvol_state_t *zv) 653 { 654 zil_close(zv->zv_zilog); 655 zv->zv_zilog = NULL; 656 657 dmu_buf_rele(zv->zv_dbuf, zvol_tag); 658 zv->zv_dbuf = NULL; 659 660 /* 661 * Evict cached data 662 */ 663 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) && 664 !(zv->zv_flags & ZVOL_RDONLY)) 665 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 666 dmu_objset_evict_dbufs(zv->zv_objset); 667 668 dmu_objset_disown(zv->zv_objset, zvol_tag); 669 zv->zv_objset = NULL; 670 } 671 672 int 673 zvol_prealloc(zvol_state_t *zv) 674 { 675 objset_t *os = zv->zv_objset; 676 dmu_tx_t *tx; 677 uint64_t refd, avail, usedobjs, availobjs; 678 uint64_t resid = zv->zv_volsize; 679 uint64_t off = 0; 680 681 /* Check the space usage before attempting to allocate the space */ 682 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs); 683 if (avail < zv->zv_volsize) 684 return (SET_ERROR(ENOSPC)); 685 686 /* Free old extents if they exist */ 687 zvol_free_extents(zv); 688 689 while (resid != 0) { 690 int error; 691 uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE); 692 693 tx = dmu_tx_create(os); 694 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 695 error = dmu_tx_assign(tx, TXG_WAIT); 696 if (error) { 697 dmu_tx_abort(tx); 698 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off); 699 return (error); 700 } 701 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx); 702 dmu_tx_commit(tx); 703 off += bytes; 704 resid -= bytes; 705 } 706 txg_wait_synced(dmu_objset_pool(os), 0); 707 708 return (0); 709 } 710 711 static int 712 zvol_update_volsize(objset_t *os, uint64_t volsize) 713 { 714 dmu_tx_t *tx; 715 int error; 716 717 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 718 719 tx = dmu_tx_create(os); 720 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 721 dmu_tx_mark_netfree(tx); 722 error = dmu_tx_assign(tx, TXG_WAIT); 723 if (error) { 724 dmu_tx_abort(tx); 725 return (error); 726 } 727 728 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, 729 &volsize, tx); 730 dmu_tx_commit(tx); 731 732 if (error == 0) 733 error = dmu_free_long_range(os, 734 ZVOL_OBJ, volsize, DMU_OBJECT_END); 735 return (error); 736 } 737 738 void 739 zvol_remove_minors(const char *name) 740 { 741 zvol_state_t *zv; 742 char *namebuf; 743 minor_t minor; 744 745 namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP); 746 (void) strncpy(namebuf, name, strlen(name)); 747 (void) strcat(namebuf, "/"); 748 mutex_enter(&zfsdev_state_lock); 749 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) { 750 751 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 752 if (zv == NULL) 753 continue; 754 if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0) 755 (void) zvol_remove_zv(zv); 756 } 757 kmem_free(namebuf, strlen(name) + 2); 758 759 mutex_exit(&zfsdev_state_lock); 760 } 761 762 static int 763 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize) 764 { 765 uint64_t old_volsize = 0ULL; 766 int error = 0; 767 768 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 769 770 /* 771 * Reinitialize the dump area to the new size. If we 772 * failed to resize the dump area then restore it back to 773 * its original size. We must set the new volsize prior 774 * to calling dumpvp_resize() to ensure that the devices' 775 * size(9P) is not visible by the dump subsystem. 776 */ 777 old_volsize = zv->zv_volsize; 778 zvol_size_changed(zv, volsize); 779 780 if (zv->zv_flags & ZVOL_DUMPIFIED) { 781 if ((error = zvol_dumpify(zv)) != 0 || 782 (error = dumpvp_resize()) != 0) { 783 int dumpify_error; 784 785 (void) zvol_update_volsize(zv->zv_objset, old_volsize); 786 zvol_size_changed(zv, old_volsize); 787 dumpify_error = zvol_dumpify(zv); 788 error = dumpify_error ? dumpify_error : error; 789 } 790 } 791 792 /* 793 * Generate a LUN expansion event. 794 */ 795 if (error == 0) { 796 sysevent_id_t eid; 797 nvlist_t *attr; 798 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 799 800 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV, 801 zv->zv_minor); 802 803 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 804 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 805 806 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 807 ESC_DEV_DLE, attr, &eid, DDI_SLEEP); 808 809 nvlist_free(attr); 810 kmem_free(physpath, MAXPATHLEN); 811 } 812 return (error); 813 } 814 815 int 816 zvol_set_volsize(const char *name, uint64_t volsize) 817 { 818 zvol_state_t *zv = NULL; 819 objset_t *os; 820 int error; 821 dmu_object_info_t doi; 822 uint64_t readonly; 823 boolean_t owned = B_FALSE; 824 825 error = dsl_prop_get_integer(name, 826 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL); 827 if (error != 0) 828 return (error); 829 if (readonly) 830 return (SET_ERROR(EROFS)); 831 832 mutex_enter(&zfsdev_state_lock); 833 zv = zvol_minor_lookup(name); 834 835 if (zv == NULL || zv->zv_objset == NULL) { 836 if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE, 837 FTAG, &os)) != 0) { 838 mutex_exit(&zfsdev_state_lock); 839 return (error); 840 } 841 owned = B_TRUE; 842 if (zv != NULL) 843 zv->zv_objset = os; 844 } else { 845 os = zv->zv_objset; 846 } 847 848 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 || 849 (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0) 850 goto out; 851 852 error = zvol_update_volsize(os, volsize); 853 854 if (error == 0 && zv != NULL) 855 error = zvol_update_live_volsize(zv, volsize); 856 out: 857 if (owned) { 858 dmu_objset_disown(os, FTAG); 859 if (zv != NULL) 860 zv->zv_objset = NULL; 861 } 862 mutex_exit(&zfsdev_state_lock); 863 return (error); 864 } 865 866 /*ARGSUSED*/ 867 int 868 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr) 869 { 870 zvol_state_t *zv; 871 int err = 0; 872 873 mutex_enter(&zfsdev_state_lock); 874 875 zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL); 876 if (zv == NULL) { 877 mutex_exit(&zfsdev_state_lock); 878 return (SET_ERROR(ENXIO)); 879 } 880 881 if (zv->zv_total_opens == 0) 882 err = zvol_first_open(zv); 883 if (err) { 884 mutex_exit(&zfsdev_state_lock); 885 return (err); 886 } 887 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) { 888 err = SET_ERROR(EROFS); 889 goto out; 890 } 891 if (zv->zv_flags & ZVOL_EXCL) { 892 err = SET_ERROR(EBUSY); 893 goto out; 894 } 895 if (flag & FEXCL) { 896 if (zv->zv_total_opens != 0) { 897 err = SET_ERROR(EBUSY); 898 goto out; 899 } 900 zv->zv_flags |= ZVOL_EXCL; 901 } 902 903 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) { 904 zv->zv_open_count[otyp]++; 905 zv->zv_total_opens++; 906 } 907 mutex_exit(&zfsdev_state_lock); 908 909 return (err); 910 out: 911 if (zv->zv_total_opens == 0) 912 zvol_last_close(zv); 913 mutex_exit(&zfsdev_state_lock); 914 return (err); 915 } 916 917 /*ARGSUSED*/ 918 int 919 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr) 920 { 921 minor_t minor = getminor(dev); 922 zvol_state_t *zv; 923 int error = 0; 924 925 mutex_enter(&zfsdev_state_lock); 926 927 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 928 if (zv == NULL) { 929 mutex_exit(&zfsdev_state_lock); 930 return (SET_ERROR(ENXIO)); 931 } 932 933 if (zv->zv_flags & ZVOL_EXCL) { 934 ASSERT(zv->zv_total_opens == 1); 935 zv->zv_flags &= ~ZVOL_EXCL; 936 } 937 938 /* 939 * If the open count is zero, this is a spurious close. 940 * That indicates a bug in the kernel / DDI framework. 941 */ 942 ASSERT(zv->zv_open_count[otyp] != 0); 943 ASSERT(zv->zv_total_opens != 0); 944 945 /* 946 * You may get multiple opens, but only one close. 947 */ 948 zv->zv_open_count[otyp]--; 949 zv->zv_total_opens--; 950 951 if (zv->zv_total_opens == 0) 952 zvol_last_close(zv); 953 954 mutex_exit(&zfsdev_state_lock); 955 return (error); 956 } 957 958 static void 959 zvol_get_done(zgd_t *zgd, int error) 960 { 961 if (zgd->zgd_db) 962 dmu_buf_rele(zgd->zgd_db, zgd); 963 964 zfs_range_unlock(zgd->zgd_rl); 965 966 if (error == 0 && zgd->zgd_bp) 967 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 968 969 kmem_free(zgd, sizeof (zgd_t)); 970 } 971 972 /* 973 * Get data to generate a TX_WRITE intent log record. 974 */ 975 static int 976 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 977 { 978 zvol_state_t *zv = arg; 979 objset_t *os = zv->zv_objset; 980 uint64_t object = ZVOL_OBJ; 981 uint64_t offset = lr->lr_offset; 982 uint64_t size = lr->lr_length; /* length of user data */ 983 blkptr_t *bp = &lr->lr_blkptr; 984 dmu_buf_t *db; 985 zgd_t *zgd; 986 int error; 987 988 ASSERT(zio != NULL); 989 ASSERT(size != 0); 990 991 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP); 992 zgd->zgd_zilog = zv->zv_zilog; 993 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER); 994 995 /* 996 * Write records come in two flavors: immediate and indirect. 997 * For small writes it's cheaper to store the data with the 998 * log record (immediate); for large writes it's cheaper to 999 * sync the data and get a pointer to it (indirect) so that 1000 * we don't have to write the data twice. 1001 */ 1002 if (buf != NULL) { /* immediate write */ 1003 error = dmu_read(os, object, offset, size, buf, 1004 DMU_READ_NO_PREFETCH); 1005 } else { 1006 size = zv->zv_volblocksize; 1007 offset = P2ALIGN(offset, size); 1008 error = dmu_buf_hold(os, object, offset, zgd, &db, 1009 DMU_READ_NO_PREFETCH); 1010 if (error == 0) { 1011 blkptr_t *obp = dmu_buf_get_blkptr(db); 1012 if (obp) { 1013 ASSERT(BP_IS_HOLE(bp)); 1014 *bp = *obp; 1015 } 1016 1017 zgd->zgd_db = db; 1018 zgd->zgd_bp = bp; 1019 1020 ASSERT(db->db_offset == offset); 1021 ASSERT(db->db_size == size); 1022 1023 error = dmu_sync(zio, lr->lr_common.lrc_txg, 1024 zvol_get_done, zgd); 1025 1026 if (error == 0) 1027 return (0); 1028 } 1029 } 1030 1031 zvol_get_done(zgd, error); 1032 1033 return (error); 1034 } 1035 1036 /* 1037 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions. 1038 * 1039 * We store data in the log buffers if it's small enough. 1040 * Otherwise we will later flush the data out via dmu_sync(). 1041 */ 1042 ssize_t zvol_immediate_write_sz = 32768; 1043 1044 static void 1045 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid, 1046 boolean_t sync) 1047 { 1048 uint32_t blocksize = zv->zv_volblocksize; 1049 zilog_t *zilog = zv->zv_zilog; 1050 boolean_t slogging; 1051 ssize_t immediate_write_sz; 1052 1053 if (zil_replaying(zilog, tx)) 1054 return; 1055 1056 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT) 1057 ? 0 : zvol_immediate_write_sz; 1058 1059 slogging = spa_has_slogs(zilog->zl_spa) && 1060 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY); 1061 1062 while (resid) { 1063 itx_t *itx; 1064 lr_write_t *lr; 1065 ssize_t len; 1066 itx_wr_state_t write_state; 1067 1068 /* 1069 * Unlike zfs_log_write() we can be called with 1070 * upto DMU_MAX_ACCESS/2 (5MB) writes. 1071 */ 1072 if (blocksize > immediate_write_sz && !slogging && 1073 resid >= blocksize && off % blocksize == 0) { 1074 write_state = WR_INDIRECT; /* uses dmu_sync */ 1075 len = blocksize; 1076 } else if (sync) { 1077 write_state = WR_COPIED; 1078 len = MIN(ZIL_MAX_LOG_DATA, resid); 1079 } else { 1080 write_state = WR_NEED_COPY; 1081 len = MIN(ZIL_MAX_LOG_DATA, resid); 1082 } 1083 1084 itx = zil_itx_create(TX_WRITE, sizeof (*lr) + 1085 (write_state == WR_COPIED ? len : 0)); 1086 lr = (lr_write_t *)&itx->itx_lr; 1087 if (write_state == WR_COPIED && dmu_read(zv->zv_objset, 1088 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) { 1089 zil_itx_destroy(itx); 1090 itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 1091 lr = (lr_write_t *)&itx->itx_lr; 1092 write_state = WR_NEED_COPY; 1093 } 1094 1095 itx->itx_wr_state = write_state; 1096 if (write_state == WR_NEED_COPY) 1097 itx->itx_sod += len; 1098 lr->lr_foid = ZVOL_OBJ; 1099 lr->lr_offset = off; 1100 lr->lr_length = len; 1101 lr->lr_blkoff = 0; 1102 BP_ZERO(&lr->lr_blkptr); 1103 1104 itx->itx_private = zv; 1105 itx->itx_sync = sync; 1106 1107 zil_itx_assign(zilog, itx, tx); 1108 1109 rw_enter(&rz_zev_rwlock, RW_READER); 1110 if (rz_zev_callbacks && rz_zev_callbacks->rz_zev_zvol_write) 1111 rz_zev_callbacks->rz_zev_zvol_write(zv->zv_name, 1112 zv->zv_objset, tx, off, len); 1113 rw_exit(&rz_zev_rwlock); 1114 1115 off += len; 1116 resid -= len; 1117 } 1118 } 1119 1120 static int 1121 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset, 1122 uint64_t size, boolean_t doread, boolean_t isdump) 1123 { 1124 vdev_disk_t *dvd; 1125 int c; 1126 int numerrors = 0; 1127 1128 if (vd->vdev_ops == &vdev_mirror_ops || 1129 vd->vdev_ops == &vdev_replacing_ops || 1130 vd->vdev_ops == &vdev_spare_ops) { 1131 for (c = 0; c < vd->vdev_children; c++) { 1132 int err = zvol_dumpio_vdev(vd->vdev_child[c], 1133 addr, offset, origoffset, size, doread, isdump); 1134 if (err != 0) { 1135 numerrors++; 1136 } else if (doread) { 1137 break; 1138 } 1139 } 1140 } 1141 1142 if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops) 1143 return (numerrors < vd->vdev_children ? 0 : EIO); 1144 1145 if (doread && !vdev_readable(vd)) 1146 return (SET_ERROR(EIO)); 1147 else if (!doread && !vdev_writeable(vd)) 1148 return (SET_ERROR(EIO)); 1149 1150 if (vd->vdev_ops == &vdev_raidz_ops) { 1151 return (vdev_raidz_physio(vd, 1152 addr, size, offset, origoffset, doread, isdump)); 1153 } 1154 1155 offset += VDEV_LABEL_START_SIZE; 1156 1157 if (ddi_in_panic() || isdump) { 1158 ASSERT(!doread); 1159 if (doread) 1160 return (SET_ERROR(EIO)); 1161 dvd = vd->vdev_tsd; 1162 ASSERT3P(dvd, !=, NULL); 1163 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset), 1164 lbtodb(size))); 1165 } else { 1166 dvd = vd->vdev_tsd; 1167 ASSERT3P(dvd, !=, NULL); 1168 return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size, 1169 offset, doread ? B_READ : B_WRITE)); 1170 } 1171 } 1172 1173 static int 1174 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size, 1175 boolean_t doread, boolean_t isdump) 1176 { 1177 vdev_t *vd; 1178 int error; 1179 zvol_extent_t *ze; 1180 spa_t *spa = dmu_objset_spa(zv->zv_objset); 1181 1182 /* Must be sector aligned, and not stradle a block boundary. */ 1183 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) || 1184 P2BOUNDARY(offset, size, zv->zv_volblocksize)) { 1185 return (SET_ERROR(EINVAL)); 1186 } 1187 ASSERT(size <= zv->zv_volblocksize); 1188 1189 /* Locate the extent this belongs to */ 1190 ze = list_head(&zv->zv_extents); 1191 while (offset >= ze->ze_nblks * zv->zv_volblocksize) { 1192 offset -= ze->ze_nblks * zv->zv_volblocksize; 1193 ze = list_next(&zv->zv_extents, ze); 1194 } 1195 1196 if (ze == NULL) 1197 return (SET_ERROR(EINVAL)); 1198 1199 if (!ddi_in_panic()) 1200 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 1201 1202 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva)); 1203 offset += DVA_GET_OFFSET(&ze->ze_dva); 1204 error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva), 1205 size, doread, isdump); 1206 1207 if (!ddi_in_panic()) 1208 spa_config_exit(spa, SCL_STATE, FTAG); 1209 1210 return (error); 1211 } 1212 1213 int 1214 zvol_strategy(buf_t *bp) 1215 { 1216 zfs_soft_state_t *zs = NULL; 1217 zvol_state_t *zv; 1218 uint64_t off, volsize; 1219 size_t resid; 1220 char *addr; 1221 objset_t *os; 1222 rl_t *rl; 1223 int error = 0; 1224 boolean_t doread = bp->b_flags & B_READ; 1225 boolean_t is_dumpified; 1226 boolean_t sync; 1227 1228 if (getminor(bp->b_edev) == 0) { 1229 error = SET_ERROR(EINVAL); 1230 } else { 1231 zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev)); 1232 if (zs == NULL) 1233 error = SET_ERROR(ENXIO); 1234 else if (zs->zss_type != ZSST_ZVOL) 1235 error = SET_ERROR(EINVAL); 1236 } 1237 1238 if (error) { 1239 bioerror(bp, error); 1240 biodone(bp); 1241 return (0); 1242 } 1243 1244 zv = zs->zss_data; 1245 1246 if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) { 1247 bioerror(bp, EROFS); 1248 biodone(bp); 1249 return (0); 1250 } 1251 1252 off = ldbtob(bp->b_blkno); 1253 volsize = zv->zv_volsize; 1254 1255 os = zv->zv_objset; 1256 ASSERT(os != NULL); 1257 1258 bp_mapin(bp); 1259 addr = bp->b_un.b_addr; 1260 resid = bp->b_bcount; 1261 1262 if (resid > 0 && (off < 0 || off >= volsize)) { 1263 bioerror(bp, EIO); 1264 biodone(bp); 1265 return (0); 1266 } 1267 1268 is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED; 1269 sync = ((!(bp->b_flags & B_ASYNC) && 1270 !(zv->zv_flags & ZVOL_WCE)) || 1271 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) && 1272 !doread && !is_dumpified; 1273 1274 /* 1275 * There must be no buffer changes when doing a dmu_sync() because 1276 * we can't change the data whilst calculating the checksum. 1277 */ 1278 rl = zfs_range_lock(&zv->zv_znode, off, resid, 1279 doread ? RL_READER : RL_WRITER); 1280 1281 while (resid != 0 && off < volsize) { 1282 size_t size = MIN(resid, zvol_maxphys); 1283 if (is_dumpified) { 1284 size = MIN(size, P2END(off, zv->zv_volblocksize) - off); 1285 error = zvol_dumpio(zv, addr, off, size, 1286 doread, B_FALSE); 1287 } else if (doread) { 1288 error = dmu_read(os, ZVOL_OBJ, off, size, addr, 1289 DMU_READ_PREFETCH); 1290 } else { 1291 dmu_tx_t *tx = dmu_tx_create(os); 1292 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size); 1293 error = dmu_tx_assign(tx, TXG_WAIT); 1294 if (error) { 1295 dmu_tx_abort(tx); 1296 } else { 1297 dmu_write(os, ZVOL_OBJ, off, size, addr, tx); 1298 zvol_log_write(zv, tx, off, size, sync); 1299 dmu_tx_commit(tx); 1300 } 1301 } 1302 if (error) { 1303 /* convert checksum errors into IO errors */ 1304 if (error == ECKSUM) 1305 error = SET_ERROR(EIO); 1306 break; 1307 } 1308 off += size; 1309 addr += size; 1310 resid -= size; 1311 } 1312 zfs_range_unlock(rl); 1313 1314 if ((bp->b_resid = resid) == bp->b_bcount) 1315 bioerror(bp, off > volsize ? EINVAL : error); 1316 1317 if (sync) 1318 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1319 biodone(bp); 1320 1321 return (0); 1322 } 1323 1324 /* 1325 * Set the buffer count to the zvol maximum transfer. 1326 * Using our own routine instead of the default minphys() 1327 * means that for larger writes we write bigger buffers on X86 1328 * (128K instead of 56K) and flush the disk write cache less often 1329 * (every zvol_maxphys - currently 1MB) instead of minphys (currently 1330 * 56K on X86 and 128K on sparc). 1331 */ 1332 void 1333 zvol_minphys(struct buf *bp) 1334 { 1335 if (bp->b_bcount > zvol_maxphys) 1336 bp->b_bcount = zvol_maxphys; 1337 } 1338 1339 int 1340 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks) 1341 { 1342 minor_t minor = getminor(dev); 1343 zvol_state_t *zv; 1344 int error = 0; 1345 uint64_t size; 1346 uint64_t boff; 1347 uint64_t resid; 1348 1349 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1350 if (zv == NULL) 1351 return (SET_ERROR(ENXIO)); 1352 1353 if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0) 1354 return (SET_ERROR(EINVAL)); 1355 1356 boff = ldbtob(blkno); 1357 resid = ldbtob(nblocks); 1358 1359 VERIFY3U(boff + resid, <=, zv->zv_volsize); 1360 1361 while (resid) { 1362 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff); 1363 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE); 1364 if (error) 1365 break; 1366 boff += size; 1367 addr += size; 1368 resid -= size; 1369 } 1370 1371 return (error); 1372 } 1373 1374 /*ARGSUSED*/ 1375 int 1376 zvol_read(dev_t dev, uio_t *uio, cred_t *cr) 1377 { 1378 minor_t minor = getminor(dev); 1379 zvol_state_t *zv; 1380 uint64_t volsize; 1381 rl_t *rl; 1382 int error = 0; 1383 1384 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1385 if (zv == NULL) 1386 return (SET_ERROR(ENXIO)); 1387 1388 volsize = zv->zv_volsize; 1389 if (uio->uio_resid > 0 && 1390 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize)) 1391 return (SET_ERROR(EIO)); 1392 1393 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1394 error = physio(zvol_strategy, NULL, dev, B_READ, 1395 zvol_minphys, uio); 1396 return (error); 1397 } 1398 1399 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1400 RL_READER); 1401 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1402 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1403 1404 /* don't read past the end */ 1405 if (bytes > volsize - uio->uio_loffset) 1406 bytes = volsize - uio->uio_loffset; 1407 1408 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes); 1409 if (error) { 1410 /* convert checksum errors into IO errors */ 1411 if (error == ECKSUM) 1412 error = SET_ERROR(EIO); 1413 break; 1414 } 1415 } 1416 zfs_range_unlock(rl); 1417 return (error); 1418 } 1419 1420 /*ARGSUSED*/ 1421 int 1422 zvol_write(dev_t dev, uio_t *uio, cred_t *cr) 1423 { 1424 minor_t minor = getminor(dev); 1425 zvol_state_t *zv; 1426 uint64_t volsize; 1427 rl_t *rl; 1428 int error = 0; 1429 boolean_t sync; 1430 1431 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1432 if (zv == NULL) 1433 return (SET_ERROR(ENXIO)); 1434 1435 volsize = zv->zv_volsize; 1436 if (uio->uio_resid > 0 && 1437 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize)) 1438 return (SET_ERROR(EIO)); 1439 1440 if (zv->zv_flags & ZVOL_DUMPIFIED) { 1441 error = physio(zvol_strategy, NULL, dev, B_WRITE, 1442 zvol_minphys, uio); 1443 return (error); 1444 } 1445 1446 sync = !(zv->zv_flags & ZVOL_WCE) || 1447 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS); 1448 1449 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 1450 RL_WRITER); 1451 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) { 1452 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 1453 uint64_t off = uio->uio_loffset; 1454 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 1455 1456 if (bytes > volsize - off) /* don't write past the end */ 1457 bytes = volsize - off; 1458 1459 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 1460 error = dmu_tx_assign(tx, TXG_WAIT); 1461 if (error) { 1462 dmu_tx_abort(tx); 1463 break; 1464 } 1465 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx); 1466 if (error == 0) 1467 zvol_log_write(zv, tx, off, bytes, sync); 1468 dmu_tx_commit(tx); 1469 1470 if (error) 1471 break; 1472 } 1473 zfs_range_unlock(rl); 1474 if (sync) 1475 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1476 return (error); 1477 } 1478 1479 int 1480 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs) 1481 { 1482 struct uuid uuid = EFI_RESERVED; 1483 efi_gpe_t gpe = { 0 }; 1484 uint32_t crc; 1485 dk_efi_t efi; 1486 int length; 1487 char *ptr; 1488 1489 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag)) 1490 return (SET_ERROR(EFAULT)); 1491 ptr = (char *)(uintptr_t)efi.dki_data_64; 1492 length = efi.dki_length; 1493 /* 1494 * Some clients may attempt to request a PMBR for the 1495 * zvol. Currently this interface will return EINVAL to 1496 * such requests. These requests could be supported by 1497 * adding a check for lba == 0 and consing up an appropriate 1498 * PMBR. 1499 */ 1500 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0) 1501 return (SET_ERROR(EINVAL)); 1502 1503 gpe.efi_gpe_StartingLBA = LE_64(34ULL); 1504 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1); 1505 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid); 1506 1507 if (efi.dki_lba == 1) { 1508 efi_gpt_t gpt = { 0 }; 1509 1510 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE); 1511 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 1512 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt)); 1513 gpt.efi_gpt_MyLBA = LE_64(1ULL); 1514 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL); 1515 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1); 1516 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL); 1517 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1); 1518 gpt.efi_gpt_SizeOfPartitionEntry = 1519 LE_32(sizeof (efi_gpe_t)); 1520 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table); 1521 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 1522 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table); 1523 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc); 1524 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length), 1525 flag)) 1526 return (SET_ERROR(EFAULT)); 1527 ptr += sizeof (gpt); 1528 length -= sizeof (gpt); 1529 } 1530 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe), 1531 length), flag)) 1532 return (SET_ERROR(EFAULT)); 1533 return (0); 1534 } 1535 1536 /* 1537 * BEGIN entry points to allow external callers access to the volume. 1538 */ 1539 /* 1540 * Return the volume parameters needed for access from an external caller. 1541 * These values are invariant as long as the volume is held open. 1542 */ 1543 int 1544 zvol_get_volume_params(minor_t minor, uint64_t *blksize, 1545 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl, 1546 void **rl_hdl, void **bonus_hdl) 1547 { 1548 zvol_state_t *zv; 1549 1550 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL); 1551 if (zv == NULL) 1552 return (SET_ERROR(ENXIO)); 1553 if (zv->zv_flags & ZVOL_DUMPIFIED) 1554 return (SET_ERROR(ENXIO)); 1555 1556 ASSERT(blksize && max_xfer_len && minor_hdl && 1557 objset_hdl && zil_hdl && rl_hdl && bonus_hdl); 1558 1559 *blksize = zv->zv_volblocksize; 1560 *max_xfer_len = (uint64_t)zvol_maxphys; 1561 *minor_hdl = zv; 1562 *objset_hdl = zv->zv_objset; 1563 *zil_hdl = zv->zv_zilog; 1564 *rl_hdl = &zv->zv_znode; 1565 *bonus_hdl = zv->zv_dbuf; 1566 return (0); 1567 } 1568 1569 /* 1570 * Return the current volume size to an external caller. 1571 * The size can change while the volume is open. 1572 */ 1573 uint64_t 1574 zvol_get_volume_size(void *minor_hdl) 1575 { 1576 zvol_state_t *zv = minor_hdl; 1577 1578 return (zv->zv_volsize); 1579 } 1580 1581 /* 1582 * Return the current WCE setting to an external caller. 1583 * The WCE setting can change while the volume is open. 1584 */ 1585 int 1586 zvol_get_volume_wce(void *minor_hdl) 1587 { 1588 zvol_state_t *zv = minor_hdl; 1589 1590 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0); 1591 } 1592 1593 /* 1594 * Entry point for external callers to zvol_log_write 1595 */ 1596 void 1597 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid, 1598 boolean_t sync) 1599 { 1600 zvol_state_t *zv = minor_hdl; 1601 1602 zvol_log_write(zv, tx, off, resid, sync); 1603 } 1604 /* 1605 * END entry points to allow external callers access to the volume. 1606 */ 1607 1608 /* 1609 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE. 1610 */ 1611 static void 1612 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len, 1613 boolean_t sync) 1614 { 1615 itx_t *itx; 1616 lr_truncate_t *lr; 1617 zilog_t *zilog = zv->zv_zilog; 1618 1619 if (zil_replaying(zilog, tx)) 1620 return; 1621 1622 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr)); 1623 lr = (lr_truncate_t *)&itx->itx_lr; 1624 lr->lr_foid = ZVOL_OBJ; 1625 lr->lr_offset = off; 1626 lr->lr_length = len; 1627 1628 itx->itx_sync = sync; 1629 zil_itx_assign(zilog, itx, tx); 1630 1631 rw_enter(&rz_zev_rwlock, RW_READER); 1632 if (rz_zev_callbacks && rz_zev_callbacks->rz_zev_zvol_truncate) 1633 rz_zev_callbacks->rz_zev_zvol_truncate(zv->zv_name, 1634 zv->zv_objset, tx, off, len); 1635 rw_exit(&rz_zev_rwlock); 1636 } 1637 1638 /* 1639 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I). 1640 * Also a dirtbag dkio ioctl for unmap/free-block functionality. 1641 */ 1642 /*ARGSUSED*/ 1643 int 1644 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp) 1645 { 1646 zvol_state_t *zv; 1647 struct dk_callback *dkc; 1648 int error = 0; 1649 rl_t *rl; 1650 1651 mutex_enter(&zfsdev_state_lock); 1652 1653 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL); 1654 1655 if (zv == NULL) { 1656 mutex_exit(&zfsdev_state_lock); 1657 return (SET_ERROR(ENXIO)); 1658 } 1659 ASSERT(zv->zv_total_opens > 0); 1660 1661 switch (cmd) { 1662 1663 case DKIOCINFO: 1664 { 1665 struct dk_cinfo dki; 1666 1667 bzero(&dki, sizeof (dki)); 1668 (void) strcpy(dki.dki_cname, "zvol"); 1669 (void) strcpy(dki.dki_dname, "zvol"); 1670 dki.dki_ctype = DKC_UNKNOWN; 1671 dki.dki_unit = getminor(dev); 1672 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs); 1673 mutex_exit(&zfsdev_state_lock); 1674 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag)) 1675 error = SET_ERROR(EFAULT); 1676 return (error); 1677 } 1678 1679 case DKIOCGMEDIAINFO: 1680 { 1681 struct dk_minfo dkm; 1682 1683 bzero(&dkm, sizeof (dkm)); 1684 dkm.dki_lbsize = 1U << zv->zv_min_bs; 1685 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 1686 dkm.dki_media_type = DK_UNKNOWN; 1687 mutex_exit(&zfsdev_state_lock); 1688 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag)) 1689 error = SET_ERROR(EFAULT); 1690 return (error); 1691 } 1692 1693 case DKIOCGMEDIAINFOEXT: 1694 { 1695 struct dk_minfo_ext dkmext; 1696 1697 bzero(&dkmext, sizeof (dkmext)); 1698 dkmext.dki_lbsize = 1U << zv->zv_min_bs; 1699 dkmext.dki_pbsize = zv->zv_volblocksize; 1700 dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 1701 dkmext.dki_media_type = DK_UNKNOWN; 1702 mutex_exit(&zfsdev_state_lock); 1703 if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag)) 1704 error = SET_ERROR(EFAULT); 1705 return (error); 1706 } 1707 1708 case DKIOCGETEFI: 1709 { 1710 uint64_t vs = zv->zv_volsize; 1711 uint8_t bs = zv->zv_min_bs; 1712 1713 mutex_exit(&zfsdev_state_lock); 1714 error = zvol_getefi((void *)arg, flag, vs, bs); 1715 return (error); 1716 } 1717 1718 case DKIOCFLUSHWRITECACHE: 1719 dkc = (struct dk_callback *)arg; 1720 mutex_exit(&zfsdev_state_lock); 1721 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1722 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) { 1723 (*dkc->dkc_callback)(dkc->dkc_cookie, error); 1724 error = 0; 1725 } 1726 return (error); 1727 1728 case DKIOCGETWCE: 1729 { 1730 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0; 1731 if (ddi_copyout(&wce, (void *)arg, sizeof (int), 1732 flag)) 1733 error = SET_ERROR(EFAULT); 1734 break; 1735 } 1736 case DKIOCSETWCE: 1737 { 1738 int wce; 1739 if (ddi_copyin((void *)arg, &wce, sizeof (int), 1740 flag)) { 1741 error = SET_ERROR(EFAULT); 1742 break; 1743 } 1744 if (wce) { 1745 zv->zv_flags |= ZVOL_WCE; 1746 mutex_exit(&zfsdev_state_lock); 1747 } else { 1748 zv->zv_flags &= ~ZVOL_WCE; 1749 mutex_exit(&zfsdev_state_lock); 1750 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1751 } 1752 return (0); 1753 } 1754 1755 case DKIOCGGEOM: 1756 case DKIOCGVTOC: 1757 /* 1758 * commands using these (like prtvtoc) expect ENOTSUP 1759 * since we're emulating an EFI label 1760 */ 1761 error = SET_ERROR(ENOTSUP); 1762 break; 1763 1764 case DKIOCDUMPINIT: 1765 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 1766 RL_WRITER); 1767 error = zvol_dumpify(zv); 1768 zfs_range_unlock(rl); 1769 break; 1770 1771 case DKIOCDUMPFINI: 1772 if (!(zv->zv_flags & ZVOL_DUMPIFIED)) 1773 break; 1774 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize, 1775 RL_WRITER); 1776 error = zvol_dump_fini(zv); 1777 zfs_range_unlock(rl); 1778 break; 1779 1780 case DKIOCFREE: 1781 { 1782 dkioc_free_t df; 1783 dmu_tx_t *tx; 1784 1785 if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) { 1786 error = SET_ERROR(EFAULT); 1787 break; 1788 } 1789 1790 /* 1791 * Apply Postel's Law to length-checking. If they overshoot, 1792 * just blank out until the end, if there's a need to blank 1793 * out anything. 1794 */ 1795 if (df.df_start >= zv->zv_volsize) 1796 break; /* No need to do anything... */ 1797 if (df.df_start + df.df_length > zv->zv_volsize) 1798 df.df_length = DMU_OBJECT_END; 1799 1800 rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length, 1801 RL_WRITER); 1802 tx = dmu_tx_create(zv->zv_objset); 1803 dmu_tx_mark_netfree(tx); 1804 error = dmu_tx_assign(tx, TXG_WAIT); 1805 if (error != 0) { 1806 dmu_tx_abort(tx); 1807 } else { 1808 zvol_log_truncate(zv, tx, df.df_start, 1809 df.df_length, B_TRUE); 1810 dmu_tx_commit(tx); 1811 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 1812 df.df_start, df.df_length); 1813 } 1814 1815 zfs_range_unlock(rl); 1816 1817 if (error == 0) { 1818 /* 1819 * If the write-cache is disabled or 'sync' property 1820 * is set to 'always' then treat this as a synchronous 1821 * operation (i.e. commit to zil). 1822 */ 1823 if (!(zv->zv_flags & ZVOL_WCE) || 1824 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) 1825 zil_commit(zv->zv_zilog, ZVOL_OBJ); 1826 1827 /* 1828 * If the caller really wants synchronous writes, and 1829 * can't wait for them, don't return until the write 1830 * is done. 1831 */ 1832 if (df.df_flags & DF_WAIT_SYNC) { 1833 txg_wait_synced( 1834 dmu_objset_pool(zv->zv_objset), 0); 1835 } 1836 } 1837 break; 1838 } 1839 1840 default: 1841 error = SET_ERROR(ENOTTY); 1842 break; 1843 1844 } 1845 mutex_exit(&zfsdev_state_lock); 1846 return (error); 1847 } 1848 1849 int 1850 zvol_busy(void) 1851 { 1852 return (zvol_minors != 0); 1853 } 1854 1855 void 1856 zvol_init(void) 1857 { 1858 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t), 1859 1) == 0); 1860 mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL); 1861 } 1862 1863 void 1864 zvol_fini(void) 1865 { 1866 mutex_destroy(&zfsdev_state_lock); 1867 ddi_soft_state_fini(&zfsdev_state); 1868 } 1869 1870 /*ARGSUSED*/ 1871 static int 1872 zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx) 1873 { 1874 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1875 1876 if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP)) 1877 return (1); 1878 return (0); 1879 } 1880 1881 /*ARGSUSED*/ 1882 static void 1883 zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx) 1884 { 1885 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1886 1887 spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx); 1888 } 1889 1890 static int 1891 zvol_dump_init(zvol_state_t *zv, boolean_t resize) 1892 { 1893 dmu_tx_t *tx; 1894 int error; 1895 objset_t *os = zv->zv_objset; 1896 spa_t *spa = dmu_objset_spa(os); 1897 vdev_t *vd = spa->spa_root_vdev; 1898 nvlist_t *nv = NULL; 1899 uint64_t version = spa_version(spa); 1900 enum zio_checksum checksum; 1901 1902 ASSERT(MUTEX_HELD(&zfsdev_state_lock)); 1903 ASSERT(vd->vdev_ops == &vdev_root_ops); 1904 1905 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0, 1906 DMU_OBJECT_END); 1907 /* wait for dmu_free_long_range to actually free the blocks */ 1908 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 1909 1910 /* 1911 * If the pool on which the dump device is being initialized has more 1912 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is 1913 * enabled. If so, bump that feature's counter to indicate that the 1914 * feature is active. We also check the vdev type to handle the 1915 * following case: 1916 * # zpool create test raidz disk1 disk2 disk3 1917 * Now have spa_root_vdev->vdev_children == 1 (the raidz vdev), 1918 * the raidz vdev itself has 3 children. 1919 */ 1920 if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) { 1921 if (!spa_feature_is_enabled(spa, 1922 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP)) 1923 return (SET_ERROR(ENOTSUP)); 1924 (void) dsl_sync_task(spa_name(spa), 1925 zfs_mvdev_dump_feature_check, 1926 zfs_mvdev_dump_activate_feature_sync, NULL, 1927 2, ZFS_SPACE_CHECK_RESERVED); 1928 } 1929 1930 tx = dmu_tx_create(os); 1931 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 1932 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 1933 error = dmu_tx_assign(tx, TXG_WAIT); 1934 if (error) { 1935 dmu_tx_abort(tx); 1936 return (error); 1937 } 1938 1939 /* 1940 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum 1941 * function. Otherwise, use the old default -- OFF. 1942 */ 1943 checksum = spa_feature_is_active(spa, 1944 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY : 1945 ZIO_CHECKSUM_OFF; 1946 1947 /* 1948 * If we are resizing the dump device then we only need to 1949 * update the refreservation to match the newly updated 1950 * zvolsize. Otherwise, we save off the original state of the 1951 * zvol so that we can restore them if the zvol is ever undumpified. 1952 */ 1953 if (resize) { 1954 error = zap_update(os, ZVOL_ZAP_OBJ, 1955 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 1956 &zv->zv_volsize, tx); 1957 } else { 1958 uint64_t checksum, compress, refresrv, vbs, dedup; 1959 1960 error = dsl_prop_get_integer(zv->zv_name, 1961 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL); 1962 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1963 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL); 1964 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1965 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL); 1966 error = error ? error : dsl_prop_get_integer(zv->zv_name, 1967 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL); 1968 if (version >= SPA_VERSION_DEDUP) { 1969 error = error ? error : 1970 dsl_prop_get_integer(zv->zv_name, 1971 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL); 1972 } 1973 1974 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1975 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, 1976 &compress, tx); 1977 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1978 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx); 1979 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1980 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, 1981 &refresrv, tx); 1982 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1983 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, 1984 &vbs, tx); 1985 error = error ? error : dmu_object_set_blocksize( 1986 os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx); 1987 if (version >= SPA_VERSION_DEDUP) { 1988 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ, 1989 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, 1990 &dedup, tx); 1991 } 1992 if (error == 0) 1993 zv->zv_volblocksize = SPA_MAXBLOCKSIZE; 1994 } 1995 dmu_tx_commit(tx); 1996 1997 /* 1998 * We only need update the zvol's property if we are initializing 1999 * the dump area for the first time. 2000 */ 2001 if (!resize) { 2002 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2003 VERIFY(nvlist_add_uint64(nv, 2004 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0); 2005 VERIFY(nvlist_add_uint64(nv, 2006 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 2007 ZIO_COMPRESS_OFF) == 0); 2008 VERIFY(nvlist_add_uint64(nv, 2009 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 2010 checksum) == 0); 2011 if (version >= SPA_VERSION_DEDUP) { 2012 VERIFY(nvlist_add_uint64(nv, 2013 zfs_prop_to_name(ZFS_PROP_DEDUP), 2014 ZIO_CHECKSUM_OFF) == 0); 2015 } 2016 2017 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 2018 nv, NULL); 2019 nvlist_free(nv); 2020 2021 if (error) 2022 return (error); 2023 } 2024 2025 /* Allocate the space for the dump */ 2026 error = zvol_prealloc(zv); 2027 return (error); 2028 } 2029 2030 static int 2031 zvol_dumpify(zvol_state_t *zv) 2032 { 2033 int error = 0; 2034 uint64_t dumpsize = 0; 2035 dmu_tx_t *tx; 2036 objset_t *os = zv->zv_objset; 2037 2038 if (zv->zv_flags & ZVOL_RDONLY) 2039 return (SET_ERROR(EROFS)); 2040 2041 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 2042 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) { 2043 boolean_t resize = (dumpsize > 0); 2044 2045 if ((error = zvol_dump_init(zv, resize)) != 0) { 2046 (void) zvol_dump_fini(zv); 2047 return (error); 2048 } 2049 } 2050 2051 /* 2052 * Build up our lba mapping. 2053 */ 2054 error = zvol_get_lbas(zv); 2055 if (error) { 2056 (void) zvol_dump_fini(zv); 2057 return (error); 2058 } 2059 2060 tx = dmu_tx_create(os); 2061 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2062 error = dmu_tx_assign(tx, TXG_WAIT); 2063 if (error) { 2064 dmu_tx_abort(tx); 2065 (void) zvol_dump_fini(zv); 2066 return (error); 2067 } 2068 2069 zv->zv_flags |= ZVOL_DUMPIFIED; 2070 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1, 2071 &zv->zv_volsize, tx); 2072 dmu_tx_commit(tx); 2073 2074 if (error) { 2075 (void) zvol_dump_fini(zv); 2076 return (error); 2077 } 2078 2079 txg_wait_synced(dmu_objset_pool(os), 0); 2080 return (0); 2081 } 2082 2083 static int 2084 zvol_dump_fini(zvol_state_t *zv) 2085 { 2086 dmu_tx_t *tx; 2087 objset_t *os = zv->zv_objset; 2088 nvlist_t *nv; 2089 int error = 0; 2090 uint64_t checksum, compress, refresrv, vbs, dedup; 2091 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset)); 2092 2093 /* 2094 * Attempt to restore the zvol back to its pre-dumpified state. 2095 * This is a best-effort attempt as it's possible that not all 2096 * of these properties were initialized during the dumpify process 2097 * (i.e. error during zvol_dump_init). 2098 */ 2099 2100 tx = dmu_tx_create(os); 2101 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 2102 error = dmu_tx_assign(tx, TXG_WAIT); 2103 if (error) { 2104 dmu_tx_abort(tx); 2105 return (error); 2106 } 2107 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx); 2108 dmu_tx_commit(tx); 2109 2110 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2111 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum); 2112 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2113 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress); 2114 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2115 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv); 2116 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2117 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs); 2118 2119 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2120 (void) nvlist_add_uint64(nv, 2121 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum); 2122 (void) nvlist_add_uint64(nv, 2123 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress); 2124 (void) nvlist_add_uint64(nv, 2125 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv); 2126 if (version >= SPA_VERSION_DEDUP && 2127 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, 2128 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) { 2129 (void) nvlist_add_uint64(nv, 2130 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup); 2131 } 2132 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL, 2133 nv, NULL); 2134 nvlist_free(nv); 2135 2136 zvol_free_extents(zv); 2137 zv->zv_flags &= ~ZVOL_DUMPIFIED; 2138 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END); 2139 /* wait for dmu_free_long_range to actually free the blocks */ 2140 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 2141 tx = dmu_tx_create(os); 2142 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 2143 error = dmu_tx_assign(tx, TXG_WAIT); 2144 if (error) { 2145 dmu_tx_abort(tx); 2146 return (error); 2147 } 2148 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0) 2149 zv->zv_volblocksize = vbs; 2150 dmu_tx_commit(tx); 2151 2152 return (0); 2153 } 2154