1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * ZFS volume emulation driver. 30 * 31 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes. 32 * Volumes are accessed through the symbolic links named: 33 * 34 * /dev/zvol/dsk/<pool_name>/<dataset_name> 35 * /dev/zvol/rdsk/<pool_name>/<dataset_name> 36 * 37 * These links are created by the ZFS-specific devfsadm link generator. 38 * Volumes are persistent through reboot. No user command needs to be 39 * run before opening and using a device. 40 */ 41 42 #include <sys/types.h> 43 #include <sys/param.h> 44 #include <sys/errno.h> 45 #include <sys/uio.h> 46 #include <sys/buf.h> 47 #include <sys/modctl.h> 48 #include <sys/open.h> 49 #include <sys/kmem.h> 50 #include <sys/conf.h> 51 #include <sys/cmn_err.h> 52 #include <sys/stat.h> 53 #include <sys/zap.h> 54 #include <sys/spa.h> 55 #include <sys/zio.h> 56 #include <sys/dsl_prop.h> 57 #include <sys/dkio.h> 58 #include <sys/efi_partition.h> 59 #include <sys/byteorder.h> 60 #include <sys/pathname.h> 61 #include <sys/ddi.h> 62 #include <sys/sunddi.h> 63 #include <sys/crc32.h> 64 #include <sys/dirent.h> 65 #include <sys/policy.h> 66 #include <sys/fs/zfs.h> 67 #include <sys/zfs_ioctl.h> 68 #include <sys/mkdev.h> 69 #include <sys/zil.h> 70 #include <sys/refcount.h> 71 #include <sys/zfs_znode.h> 72 #include <sys/zfs_rlock.h> 73 74 #include "zfs_namecheck.h" 75 76 #define ZVOL_OBJ 1ULL 77 #define ZVOL_ZAP_OBJ 2ULL 78 79 static void *zvol_state; 80 81 /* 82 * This lock protects the zvol_state structure from being modified 83 * while it's being used, e.g. an open that comes in before a create 84 * finishes. It also protects temporary opens of the dataset so that, 85 * e.g., an open doesn't get a spurious EBUSY. 86 */ 87 static kmutex_t zvol_state_lock; 88 static uint32_t zvol_minors; 89 90 /* 91 * The in-core state of each volume. 92 */ 93 typedef struct zvol_state { 94 char zv_name[MAXPATHLEN]; /* pool/dd name */ 95 uint64_t zv_volsize; /* amount of space we advertise */ 96 uint64_t zv_volblocksize; /* volume block size */ 97 minor_t zv_minor; /* minor number */ 98 uint8_t zv_min_bs; /* minimum addressable block shift */ 99 uint8_t zv_readonly; /* hard readonly; like write-protect */ 100 objset_t *zv_objset; /* objset handle */ 101 uint32_t zv_mode; /* DS_MODE_* flags at open time */ 102 uint32_t zv_open_count[OTYPCNT]; /* open counts */ 103 uint32_t zv_total_opens; /* total open count */ 104 zilog_t *zv_zilog; /* ZIL handle */ 105 uint64_t zv_txg_assign; /* txg to assign during ZIL replay */ 106 znode_t zv_znode; /* for range locking */ 107 } zvol_state_t; 108 109 /* 110 * zvol maximum transfer in one DMU tx. 111 */ 112 int zvol_maxphys = DMU_MAX_ACCESS/2; 113 114 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio); 115 116 static void 117 zvol_size_changed(zvol_state_t *zv, major_t maj) 118 { 119 dev_t dev = makedevice(maj, zv->zv_minor); 120 121 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 122 "Size", zv->zv_volsize) == DDI_SUCCESS); 123 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 124 "Nblocks", lbtodb(zv->zv_volsize)) == DDI_SUCCESS); 125 } 126 127 int 128 zvol_check_volsize(uint64_t volsize, uint64_t blocksize) 129 { 130 if (volsize == 0) 131 return (EINVAL); 132 133 if (volsize % blocksize != 0) 134 return (EINVAL); 135 136 #ifdef _ILP32 137 if (volsize - 1 > SPEC_MAXOFFSET_T) 138 return (EOVERFLOW); 139 #endif 140 return (0); 141 } 142 143 int 144 zvol_check_volblocksize(uint64_t volblocksize) 145 { 146 if (volblocksize < SPA_MINBLOCKSIZE || 147 volblocksize > SPA_MAXBLOCKSIZE || 148 !ISP2(volblocksize)) 149 return (EDOM); 150 151 return (0); 152 } 153 154 static void 155 zvol_readonly_changed_cb(void *arg, uint64_t newval) 156 { 157 zvol_state_t *zv = arg; 158 159 zv->zv_readonly = (uint8_t)newval; 160 } 161 162 int 163 zvol_get_stats(objset_t *os, nvlist_t *nv) 164 { 165 int error; 166 dmu_object_info_t doi; 167 uint64_t val; 168 169 170 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val); 171 if (error) 172 return (error); 173 174 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val); 175 176 error = dmu_object_info(os, ZVOL_OBJ, &doi); 177 178 if (error == 0) { 179 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE, 180 doi.doi_data_block_size); 181 } 182 183 return (error); 184 } 185 186 /* 187 * Find a free minor number. 188 */ 189 static minor_t 190 zvol_minor_alloc(void) 191 { 192 minor_t minor; 193 194 ASSERT(MUTEX_HELD(&zvol_state_lock)); 195 196 for (minor = 1; minor <= ZVOL_MAX_MINOR; minor++) 197 if (ddi_get_soft_state(zvol_state, minor) == NULL) 198 return (minor); 199 200 return (0); 201 } 202 203 static zvol_state_t * 204 zvol_minor_lookup(const char *name) 205 { 206 minor_t minor; 207 zvol_state_t *zv; 208 209 ASSERT(MUTEX_HELD(&zvol_state_lock)); 210 211 for (minor = 1; minor <= ZVOL_MAX_MINOR; minor++) { 212 zv = ddi_get_soft_state(zvol_state, minor); 213 if (zv == NULL) 214 continue; 215 if (strcmp(zv->zv_name, name) == 0) 216 break; 217 } 218 219 return (zv); 220 } 221 222 /* ARGSUSED */ 223 void 224 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 225 { 226 zfs_creat_t *zct = arg; 227 nvlist_t *nvprops = zct->zct_props; 228 int error; 229 uint64_t volblocksize, volsize; 230 231 VERIFY(nvlist_lookup_uint64(nvprops, 232 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0); 233 if (nvlist_lookup_uint64(nvprops, 234 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0) 235 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE); 236 237 /* 238 * These properites must be removed from the list so the generic 239 * property setting step won't apply to them. 240 */ 241 VERIFY(nvlist_remove_all(nvprops, 242 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0); 243 (void) nvlist_remove_all(nvprops, 244 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE)); 245 246 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize, 247 DMU_OT_NONE, 0, tx); 248 ASSERT(error == 0); 249 250 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP, 251 DMU_OT_NONE, 0, tx); 252 ASSERT(error == 0); 253 254 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); 255 ASSERT(error == 0); 256 } 257 258 /* 259 * Replay a TX_WRITE ZIL transaction that didn't get committed 260 * after a system failure 261 */ 262 static int 263 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap) 264 { 265 objset_t *os = zv->zv_objset; 266 char *data = (char *)(lr + 1); /* data follows lr_write_t */ 267 uint64_t off = lr->lr_offset; 268 uint64_t len = lr->lr_length; 269 dmu_tx_t *tx; 270 int error; 271 272 if (byteswap) 273 byteswap_uint64_array(lr, sizeof (*lr)); 274 275 tx = dmu_tx_create(os); 276 dmu_tx_hold_write(tx, ZVOL_OBJ, off, len); 277 error = dmu_tx_assign(tx, zv->zv_txg_assign); 278 if (error) { 279 dmu_tx_abort(tx); 280 } else { 281 dmu_write(os, ZVOL_OBJ, off, len, data, tx); 282 dmu_tx_commit(tx); 283 } 284 285 return (error); 286 } 287 288 /* ARGSUSED */ 289 static int 290 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap) 291 { 292 return (ENOTSUP); 293 } 294 295 /* 296 * Callback vectors for replaying records. 297 * Only TX_WRITE is needed for zvol. 298 */ 299 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = { 300 zvol_replay_err, /* 0 no such transaction type */ 301 zvol_replay_err, /* TX_CREATE */ 302 zvol_replay_err, /* TX_MKDIR */ 303 zvol_replay_err, /* TX_MKXATTR */ 304 zvol_replay_err, /* TX_SYMLINK */ 305 zvol_replay_err, /* TX_REMOVE */ 306 zvol_replay_err, /* TX_RMDIR */ 307 zvol_replay_err, /* TX_LINK */ 308 zvol_replay_err, /* TX_RENAME */ 309 zvol_replay_write, /* TX_WRITE */ 310 zvol_replay_err, /* TX_TRUNCATE */ 311 zvol_replay_err, /* TX_SETATTR */ 312 zvol_replay_err, /* TX_ACL */ 313 }; 314 315 /* 316 * Create a minor node for the specified volume. 317 */ 318 int 319 zvol_create_minor(const char *name, major_t maj) 320 { 321 zvol_state_t *zv; 322 objset_t *os; 323 dmu_object_info_t doi; 324 uint64_t volsize; 325 minor_t minor = 0; 326 struct pathname linkpath; 327 int ds_mode = DS_MODE_PRIMARY; 328 vnode_t *vp = NULL; 329 char *devpath; 330 size_t devpathlen = strlen(ZVOL_FULL_DEV_DIR) + 1 + strlen(name) + 1; 331 char chrbuf[30], blkbuf[30]; 332 int error; 333 334 mutex_enter(&zvol_state_lock); 335 336 if ((zv = zvol_minor_lookup(name)) != NULL) { 337 mutex_exit(&zvol_state_lock); 338 return (EEXIST); 339 } 340 341 if (strchr(name, '@') != 0) 342 ds_mode |= DS_MODE_READONLY; 343 344 error = dmu_objset_open(name, DMU_OST_ZVOL, ds_mode, &os); 345 346 if (error) { 347 mutex_exit(&zvol_state_lock); 348 return (error); 349 } 350 351 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 352 353 if (error) { 354 dmu_objset_close(os); 355 mutex_exit(&zvol_state_lock); 356 return (error); 357 } 358 359 /* 360 * If there's an existing /dev/zvol symlink, try to use the 361 * same minor number we used last time. 362 */ 363 devpath = kmem_alloc(devpathlen, KM_SLEEP); 364 365 (void) sprintf(devpath, "%s/%s", ZVOL_FULL_DEV_DIR, name); 366 367 error = lookupname(devpath, UIO_SYSSPACE, NO_FOLLOW, NULL, &vp); 368 369 kmem_free(devpath, devpathlen); 370 371 if (error == 0 && vp->v_type != VLNK) 372 error = EINVAL; 373 374 if (error == 0) { 375 pn_alloc(&linkpath); 376 error = pn_getsymlink(vp, &linkpath, kcred); 377 if (error == 0) { 378 char *ms = strstr(linkpath.pn_path, ZVOL_PSEUDO_DEV); 379 if (ms != NULL) { 380 ms += strlen(ZVOL_PSEUDO_DEV); 381 minor = stoi(&ms); 382 } 383 } 384 pn_free(&linkpath); 385 } 386 387 if (vp != NULL) 388 VN_RELE(vp); 389 390 /* 391 * If we found a minor but it's already in use, we must pick a new one. 392 */ 393 if (minor != 0 && ddi_get_soft_state(zvol_state, minor) != NULL) 394 minor = 0; 395 396 if (minor == 0) 397 minor = zvol_minor_alloc(); 398 399 if (minor == 0) { 400 dmu_objset_close(os); 401 mutex_exit(&zvol_state_lock); 402 return (ENXIO); 403 } 404 405 if (ddi_soft_state_zalloc(zvol_state, minor) != DDI_SUCCESS) { 406 dmu_objset_close(os); 407 mutex_exit(&zvol_state_lock); 408 return (EAGAIN); 409 } 410 411 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME, 412 (char *)name); 413 414 (void) sprintf(chrbuf, "%uc,raw", minor); 415 416 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR, 417 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 418 ddi_soft_state_free(zvol_state, minor); 419 dmu_objset_close(os); 420 mutex_exit(&zvol_state_lock); 421 return (EAGAIN); 422 } 423 424 (void) sprintf(blkbuf, "%uc", minor); 425 426 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK, 427 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 428 ddi_remove_minor_node(zfs_dip, chrbuf); 429 ddi_soft_state_free(zvol_state, minor); 430 dmu_objset_close(os); 431 mutex_exit(&zvol_state_lock); 432 return (EAGAIN); 433 } 434 435 zv = ddi_get_soft_state(zvol_state, minor); 436 437 (void) strcpy(zv->zv_name, name); 438 zv->zv_min_bs = DEV_BSHIFT; 439 zv->zv_minor = minor; 440 zv->zv_volsize = volsize; 441 zv->zv_objset = os; 442 zv->zv_mode = ds_mode; 443 zv->zv_zilog = zil_open(os, zvol_get_data); 444 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL); 445 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare, 446 sizeof (rl_t), offsetof(rl_t, r_node)); 447 448 449 /* get and cache the blocksize */ 450 error = dmu_object_info(os, ZVOL_OBJ, &doi); 451 ASSERT(error == 0); 452 zv->zv_volblocksize = doi.doi_data_block_size; 453 454 zil_replay(os, zv, &zv->zv_txg_assign, zvol_replay_vector); 455 456 zvol_size_changed(zv, maj); 457 458 /* XXX this should handle the possible i/o error */ 459 VERIFY(dsl_prop_register(dmu_objset_ds(zv->zv_objset), 460 "readonly", zvol_readonly_changed_cb, zv) == 0); 461 462 zvol_minors++; 463 464 mutex_exit(&zvol_state_lock); 465 466 return (0); 467 } 468 469 /* 470 * Remove minor node for the specified volume. 471 */ 472 int 473 zvol_remove_minor(const char *name) 474 { 475 zvol_state_t *zv; 476 char namebuf[30]; 477 478 mutex_enter(&zvol_state_lock); 479 480 if ((zv = zvol_minor_lookup(name)) == NULL) { 481 mutex_exit(&zvol_state_lock); 482 return (ENXIO); 483 } 484 485 if (zv->zv_total_opens != 0) { 486 mutex_exit(&zvol_state_lock); 487 return (EBUSY); 488 } 489 490 (void) sprintf(namebuf, "%uc,raw", zv->zv_minor); 491 ddi_remove_minor_node(zfs_dip, namebuf); 492 493 (void) sprintf(namebuf, "%uc", zv->zv_minor); 494 ddi_remove_minor_node(zfs_dip, namebuf); 495 496 VERIFY(dsl_prop_unregister(dmu_objset_ds(zv->zv_objset), 497 "readonly", zvol_readonly_changed_cb, zv) == 0); 498 499 zil_close(zv->zv_zilog); 500 zv->zv_zilog = NULL; 501 dmu_objset_close(zv->zv_objset); 502 zv->zv_objset = NULL; 503 avl_destroy(&zv->zv_znode.z_range_avl); 504 mutex_destroy(&zv->zv_znode.z_range_lock); 505 506 ddi_soft_state_free(zvol_state, zv->zv_minor); 507 508 zvol_minors--; 509 510 mutex_exit(&zvol_state_lock); 511 512 return (0); 513 } 514 515 int 516 zvol_set_volsize(const char *name, major_t maj, uint64_t volsize) 517 { 518 zvol_state_t *zv; 519 dmu_tx_t *tx; 520 int error; 521 dmu_object_info_t doi; 522 523 mutex_enter(&zvol_state_lock); 524 525 if ((zv = zvol_minor_lookup(name)) == NULL) { 526 mutex_exit(&zvol_state_lock); 527 return (ENXIO); 528 } 529 530 if ((error = dmu_object_info(zv->zv_objset, ZVOL_OBJ, &doi)) != 0 || 531 (error = zvol_check_volsize(volsize, 532 doi.doi_data_block_size)) != 0) { 533 mutex_exit(&zvol_state_lock); 534 return (error); 535 } 536 537 if (zv->zv_readonly || (zv->zv_mode & DS_MODE_READONLY)) { 538 mutex_exit(&zvol_state_lock); 539 return (EROFS); 540 } 541 542 tx = dmu_tx_create(zv->zv_objset); 543 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 544 dmu_tx_hold_free(tx, ZVOL_OBJ, volsize, DMU_OBJECT_END); 545 error = dmu_tx_assign(tx, TXG_WAIT); 546 if (error) { 547 dmu_tx_abort(tx); 548 mutex_exit(&zvol_state_lock); 549 return (error); 550 } 551 552 error = zap_update(zv->zv_objset, ZVOL_ZAP_OBJ, "size", 8, 1, 553 &volsize, tx); 554 if (error == 0) { 555 error = dmu_free_range(zv->zv_objset, ZVOL_OBJ, volsize, 556 DMU_OBJECT_END, tx); 557 } 558 559 dmu_tx_commit(tx); 560 561 if (error == 0) { 562 zv->zv_volsize = volsize; 563 zvol_size_changed(zv, maj); 564 } 565 566 mutex_exit(&zvol_state_lock); 567 568 return (error); 569 } 570 571 int 572 zvol_set_volblocksize(const char *name, uint64_t volblocksize) 573 { 574 zvol_state_t *zv; 575 dmu_tx_t *tx; 576 int error; 577 578 mutex_enter(&zvol_state_lock); 579 580 if ((zv = zvol_minor_lookup(name)) == NULL) { 581 mutex_exit(&zvol_state_lock); 582 return (ENXIO); 583 } 584 585 if (zv->zv_readonly || (zv->zv_mode & DS_MODE_READONLY)) { 586 mutex_exit(&zvol_state_lock); 587 return (EROFS); 588 } 589 590 tx = dmu_tx_create(zv->zv_objset); 591 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 592 error = dmu_tx_assign(tx, TXG_WAIT); 593 if (error) { 594 dmu_tx_abort(tx); 595 } else { 596 error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ, 597 volblocksize, 0, tx); 598 if (error == ENOTSUP) 599 error = EBUSY; 600 dmu_tx_commit(tx); 601 } 602 603 mutex_exit(&zvol_state_lock); 604 605 return (error); 606 } 607 608 /*ARGSUSED*/ 609 int 610 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr) 611 { 612 minor_t minor = getminor(*devp); 613 zvol_state_t *zv; 614 615 if (minor == 0) /* This is the control device */ 616 return (0); 617 618 mutex_enter(&zvol_state_lock); 619 620 zv = ddi_get_soft_state(zvol_state, minor); 621 if (zv == NULL) { 622 mutex_exit(&zvol_state_lock); 623 return (ENXIO); 624 } 625 626 ASSERT(zv->zv_objset != NULL); 627 628 if ((flag & FWRITE) && 629 (zv->zv_readonly || (zv->zv_mode & DS_MODE_READONLY))) { 630 mutex_exit(&zvol_state_lock); 631 return (EROFS); 632 } 633 634 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) { 635 zv->zv_open_count[otyp]++; 636 zv->zv_total_opens++; 637 } 638 639 mutex_exit(&zvol_state_lock); 640 641 return (0); 642 } 643 644 /*ARGSUSED*/ 645 int 646 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr) 647 { 648 minor_t minor = getminor(dev); 649 zvol_state_t *zv; 650 651 if (minor == 0) /* This is the control device */ 652 return (0); 653 654 mutex_enter(&zvol_state_lock); 655 656 zv = ddi_get_soft_state(zvol_state, minor); 657 if (zv == NULL) { 658 mutex_exit(&zvol_state_lock); 659 return (ENXIO); 660 } 661 662 /* 663 * The next statement is a workaround for the following DDI bug: 664 * 6343604 specfs race: multiple "last-close" of the same device 665 */ 666 if (zv->zv_total_opens == 0) { 667 mutex_exit(&zvol_state_lock); 668 return (0); 669 } 670 671 /* 672 * If the open count is zero, this is a spurious close. 673 * That indicates a bug in the kernel / DDI framework. 674 */ 675 ASSERT(zv->zv_open_count[otyp] != 0); 676 ASSERT(zv->zv_total_opens != 0); 677 678 /* 679 * You may get multiple opens, but only one close. 680 */ 681 zv->zv_open_count[otyp]--; 682 zv->zv_total_opens--; 683 684 mutex_exit(&zvol_state_lock); 685 686 return (0); 687 } 688 689 static void 690 zvol_get_done(dmu_buf_t *db, void *vzgd) 691 { 692 zgd_t *zgd = (zgd_t *)vzgd; 693 rl_t *rl = zgd->zgd_rl; 694 695 dmu_buf_rele(db, vzgd); 696 zfs_range_unlock(rl); 697 zil_add_vdev(zgd->zgd_zilog, DVA_GET_VDEV(BP_IDENTITY(zgd->zgd_bp))); 698 kmem_free(zgd, sizeof (zgd_t)); 699 } 700 701 /* 702 * Get data to generate a TX_WRITE intent log record. 703 */ 704 static int 705 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 706 { 707 zvol_state_t *zv = arg; 708 objset_t *os = zv->zv_objset; 709 dmu_buf_t *db; 710 rl_t *rl; 711 zgd_t *zgd; 712 uint64_t boff; /* block starting offset */ 713 int dlen = lr->lr_length; /* length of user data */ 714 int error; 715 716 ASSERT(zio); 717 ASSERT(dlen != 0); 718 719 /* 720 * Write records come in two flavors: immediate and indirect. 721 * For small writes it's cheaper to store the data with the 722 * log record (immediate); for large writes it's cheaper to 723 * sync the data and get a pointer to it (indirect) so that 724 * we don't have to write the data twice. 725 */ 726 if (buf != NULL) /* immediate write */ 727 return (dmu_read(os, ZVOL_OBJ, lr->lr_offset, dlen, buf)); 728 729 zgd = (zgd_t *)kmem_alloc(sizeof (zgd_t), KM_SLEEP); 730 zgd->zgd_zilog = zv->zv_zilog; 731 zgd->zgd_bp = &lr->lr_blkptr; 732 733 /* 734 * Lock the range of the block to ensure that when the data is 735 * written out and it's checksum is being calculated that no other 736 * thread can change the block. 737 */ 738 boff = P2ALIGN_TYPED(lr->lr_offset, zv->zv_volblocksize, uint64_t); 739 rl = zfs_range_lock(&zv->zv_znode, boff, zv->zv_volblocksize, 740 RL_READER); 741 zgd->zgd_rl = rl; 742 743 VERIFY(0 == dmu_buf_hold(os, ZVOL_OBJ, lr->lr_offset, zgd, &db)); 744 error = dmu_sync(zio, db, &lr->lr_blkptr, 745 lr->lr_common.lrc_txg, zvol_get_done, zgd); 746 if (error == 0) 747 zil_add_vdev(zv->zv_zilog, 748 DVA_GET_VDEV(BP_IDENTITY(&lr->lr_blkptr))); 749 /* 750 * If we get EINPROGRESS, then we need to wait for a 751 * write IO initiated by dmu_sync() to complete before 752 * we can release this dbuf. We will finish everything 753 * up in the zvol_get_done() callback. 754 */ 755 if (error == EINPROGRESS) 756 return (0); 757 dmu_buf_rele(db, zgd); 758 zfs_range_unlock(rl); 759 kmem_free(zgd, sizeof (zgd_t)); 760 return (error); 761 } 762 763 /* 764 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions. 765 * 766 * We store data in the log buffers if it's small enough. 767 * Otherwise we will later flush the data out via dmu_sync(). 768 */ 769 ssize_t zvol_immediate_write_sz = 32768; 770 771 static void 772 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t len) 773 { 774 uint32_t blocksize = zv->zv_volblocksize; 775 lr_write_t *lr; 776 777 while (len) { 778 ssize_t nbytes = MIN(len, blocksize - P2PHASE(off, blocksize)); 779 itx_t *itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 780 781 itx->itx_wr_state = 782 len > zvol_immediate_write_sz ? WR_INDIRECT : WR_NEED_COPY; 783 itx->itx_private = zv; 784 lr = (lr_write_t *)&itx->itx_lr; 785 lr->lr_foid = ZVOL_OBJ; 786 lr->lr_offset = off; 787 lr->lr_length = nbytes; 788 lr->lr_blkoff = off - P2ALIGN_TYPED(off, blocksize, uint64_t); 789 BP_ZERO(&lr->lr_blkptr); 790 791 (void) zil_itx_assign(zv->zv_zilog, itx, tx); 792 len -= nbytes; 793 off += nbytes; 794 } 795 } 796 797 int 798 zvol_strategy(buf_t *bp) 799 { 800 zvol_state_t *zv = ddi_get_soft_state(zvol_state, getminor(bp->b_edev)); 801 uint64_t off, volsize; 802 size_t size, resid; 803 char *addr; 804 objset_t *os; 805 rl_t *rl; 806 int error = 0; 807 boolean_t reading; 808 809 if (zv == NULL) { 810 bioerror(bp, ENXIO); 811 biodone(bp); 812 return (0); 813 } 814 815 if (getminor(bp->b_edev) == 0) { 816 bioerror(bp, EINVAL); 817 biodone(bp); 818 return (0); 819 } 820 821 if ((zv->zv_readonly || (zv->zv_mode & DS_MODE_READONLY)) && 822 !(bp->b_flags & B_READ)) { 823 bioerror(bp, EROFS); 824 biodone(bp); 825 return (0); 826 } 827 828 off = ldbtob(bp->b_blkno); 829 volsize = zv->zv_volsize; 830 831 os = zv->zv_objset; 832 ASSERT(os != NULL); 833 834 bp_mapin(bp); 835 addr = bp->b_un.b_addr; 836 resid = bp->b_bcount; 837 838 /* 839 * There must be no buffer changes when doing a dmu_sync() because 840 * we can't change the data whilst calculating the checksum. 841 */ 842 reading = bp->b_flags & B_READ; 843 rl = zfs_range_lock(&zv->zv_znode, off, resid, 844 reading ? RL_READER : RL_WRITER); 845 846 while (resid != 0 && off < volsize) { 847 848 size = MIN(resid, zvol_maxphys); /* zvol_maxphys per tx */ 849 850 if (size > volsize - off) /* don't write past the end */ 851 size = volsize - off; 852 853 if (reading) { 854 error = dmu_read(os, ZVOL_OBJ, off, size, addr); 855 } else { 856 dmu_tx_t *tx = dmu_tx_create(os); 857 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size); 858 error = dmu_tx_assign(tx, TXG_WAIT); 859 if (error) { 860 dmu_tx_abort(tx); 861 } else { 862 dmu_write(os, ZVOL_OBJ, off, size, addr, tx); 863 zvol_log_write(zv, tx, off, size); 864 dmu_tx_commit(tx); 865 } 866 } 867 if (error) 868 break; 869 off += size; 870 addr += size; 871 resid -= size; 872 } 873 zfs_range_unlock(rl); 874 875 if ((bp->b_resid = resid) == bp->b_bcount) 876 bioerror(bp, off > volsize ? EINVAL : error); 877 878 if (!(bp->b_flags & B_ASYNC) && !reading && !zil_disable) 879 zil_commit(zv->zv_zilog, UINT64_MAX, ZVOL_OBJ); 880 881 biodone(bp); 882 883 return (0); 884 } 885 886 /* 887 * Set the buffer count to the zvol maximum transfer. 888 * Using our own routine instead of the default minphys() 889 * means that for larger writes we write bigger buffers on X86 890 * (128K instead of 56K) and flush the disk write cache less often 891 * (every zvol_maxphys - currently 1MB) instead of minphys (currently 892 * 56K on X86 and 128K on sparc). 893 */ 894 void 895 zvol_minphys(struct buf *bp) 896 { 897 if (bp->b_bcount > zvol_maxphys) 898 bp->b_bcount = zvol_maxphys; 899 } 900 901 /*ARGSUSED*/ 902 int 903 zvol_read(dev_t dev, uio_t *uio, cred_t *cr) 904 { 905 minor_t minor = getminor(dev); 906 zvol_state_t *zv; 907 rl_t *rl; 908 int error = 0; 909 910 if (minor == 0) /* This is the control device */ 911 return (ENXIO); 912 913 zv = ddi_get_soft_state(zvol_state, minor); 914 if (zv == NULL) 915 return (ENXIO); 916 917 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 918 RL_READER); 919 while (uio->uio_resid > 0) { 920 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 921 922 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes); 923 if (error) 924 break; 925 } 926 zfs_range_unlock(rl); 927 return (error); 928 } 929 930 /*ARGSUSED*/ 931 int 932 zvol_write(dev_t dev, uio_t *uio, cred_t *cr) 933 { 934 minor_t minor = getminor(dev); 935 zvol_state_t *zv; 936 rl_t *rl; 937 int error = 0; 938 939 if (minor == 0) /* This is the control device */ 940 return (ENXIO); 941 942 zv = ddi_get_soft_state(zvol_state, minor); 943 if (zv == NULL) 944 return (ENXIO); 945 946 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 947 RL_WRITER); 948 while (uio->uio_resid > 0) { 949 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 950 uint64_t off = uio->uio_loffset; 951 952 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 953 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 954 error = dmu_tx_assign(tx, TXG_WAIT); 955 if (error) { 956 dmu_tx_abort(tx); 957 break; 958 } 959 error = dmu_write_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes, tx); 960 if (error == 0) 961 zvol_log_write(zv, tx, off, bytes); 962 dmu_tx_commit(tx); 963 964 if (error) 965 break; 966 } 967 zfs_range_unlock(rl); 968 return (error); 969 } 970 971 /* 972 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I). 973 */ 974 /*ARGSUSED*/ 975 int 976 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp) 977 { 978 zvol_state_t *zv; 979 struct dk_cinfo dki; 980 struct dk_minfo dkm; 981 dk_efi_t efi; 982 struct dk_callback *dkc; 983 struct uuid uuid = EFI_RESERVED; 984 uint32_t crc; 985 int error = 0; 986 987 mutex_enter(&zvol_state_lock); 988 989 zv = ddi_get_soft_state(zvol_state, getminor(dev)); 990 991 if (zv == NULL) { 992 mutex_exit(&zvol_state_lock); 993 return (ENXIO); 994 } 995 996 switch (cmd) { 997 998 case DKIOCINFO: 999 bzero(&dki, sizeof (dki)); 1000 (void) strcpy(dki.dki_cname, "zvol"); 1001 (void) strcpy(dki.dki_dname, "zvol"); 1002 dki.dki_ctype = DKC_UNKNOWN; 1003 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs); 1004 mutex_exit(&zvol_state_lock); 1005 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag)) 1006 error = EFAULT; 1007 return (error); 1008 1009 case DKIOCGMEDIAINFO: 1010 bzero(&dkm, sizeof (dkm)); 1011 dkm.dki_lbsize = 1U << zv->zv_min_bs; 1012 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 1013 dkm.dki_media_type = DK_UNKNOWN; 1014 mutex_exit(&zvol_state_lock); 1015 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag)) 1016 error = EFAULT; 1017 return (error); 1018 1019 case DKIOCGETEFI: 1020 if (ddi_copyin((void *)arg, &efi, sizeof (dk_efi_t), flag)) { 1021 mutex_exit(&zvol_state_lock); 1022 return (EFAULT); 1023 } 1024 efi.dki_data = (void *)(uintptr_t)efi.dki_data_64; 1025 1026 /* 1027 * Some clients may attempt to request a PMBR for the 1028 * zvol. Currently this interface will return ENOTTY to 1029 * such requests. These requests could be supported by 1030 * adding a check for lba == 0 and consing up an appropriate 1031 * RMBR. 1032 */ 1033 if (efi.dki_lba == 1) { 1034 efi_gpt_t gpt; 1035 efi_gpe_t gpe; 1036 1037 bzero(&gpt, sizeof (gpt)); 1038 bzero(&gpe, sizeof (gpe)); 1039 1040 if (efi.dki_length < sizeof (gpt)) { 1041 mutex_exit(&zvol_state_lock); 1042 return (EINVAL); 1043 } 1044 1045 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE); 1046 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 1047 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt)); 1048 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL); 1049 gpt.efi_gpt_LastUsableLBA = 1050 LE_64((zv->zv_volsize >> zv->zv_min_bs) - 1); 1051 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1); 1052 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL); 1053 gpt.efi_gpt_SizeOfPartitionEntry = LE_32(sizeof (gpe)); 1054 1055 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid); 1056 gpe.efi_gpe_StartingLBA = gpt.efi_gpt_FirstUsableLBA; 1057 gpe.efi_gpe_EndingLBA = gpt.efi_gpt_LastUsableLBA; 1058 1059 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table); 1060 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 1061 1062 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table); 1063 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc); 1064 1065 mutex_exit(&zvol_state_lock); 1066 if (ddi_copyout(&gpt, efi.dki_data, sizeof (gpt), flag)) 1067 error = EFAULT; 1068 } else if (efi.dki_lba == 2) { 1069 efi_gpe_t gpe; 1070 1071 bzero(&gpe, sizeof (gpe)); 1072 1073 if (efi.dki_length < sizeof (gpe)) { 1074 mutex_exit(&zvol_state_lock); 1075 return (EINVAL); 1076 } 1077 1078 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid); 1079 gpe.efi_gpe_StartingLBA = LE_64(34ULL); 1080 gpe.efi_gpe_EndingLBA = 1081 LE_64((zv->zv_volsize >> zv->zv_min_bs) - 1); 1082 1083 mutex_exit(&zvol_state_lock); 1084 if (ddi_copyout(&gpe, efi.dki_data, sizeof (gpe), flag)) 1085 error = EFAULT; 1086 } else { 1087 mutex_exit(&zvol_state_lock); 1088 error = EINVAL; 1089 } 1090 return (error); 1091 1092 case DKIOCFLUSHWRITECACHE: 1093 dkc = (struct dk_callback *)arg; 1094 zil_commit(zv->zv_zilog, UINT64_MAX, ZVOL_OBJ); 1095 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) { 1096 (*dkc->dkc_callback)(dkc->dkc_cookie, error); 1097 error = 0; 1098 } 1099 break; 1100 1101 case DKIOCGGEOM: 1102 case DKIOCGVTOC: 1103 /* commands using these (like prtvtoc) expect ENOTSUP */ 1104 error = ENOTSUP; 1105 break; 1106 1107 default: 1108 error = ENOTTY; 1109 break; 1110 1111 } 1112 mutex_exit(&zvol_state_lock); 1113 return (error); 1114 } 1115 1116 int 1117 zvol_busy(void) 1118 { 1119 return (zvol_minors != 0); 1120 } 1121 1122 void 1123 zvol_init(void) 1124 { 1125 VERIFY(ddi_soft_state_init(&zvol_state, sizeof (zvol_state_t), 1) == 0); 1126 mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL); 1127 } 1128 1129 void 1130 zvol_fini(void) 1131 { 1132 mutex_destroy(&zvol_state_lock); 1133 ddi_soft_state_fini(&zvol_state); 1134 } 1135