1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * ZFS volume emulation driver. 30 * 31 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes. 32 * Volumes are accessed through the symbolic links named: 33 * 34 * /dev/zvol/dsk/<pool_name>/<dataset_name> 35 * /dev/zvol/rdsk/<pool_name>/<dataset_name> 36 * 37 * These links are created by the ZFS-specific devfsadm link generator. 38 * Volumes are persistent through reboot. No user command needs to be 39 * run before opening and using a device. 40 */ 41 42 #include <sys/types.h> 43 #include <sys/param.h> 44 #include <sys/errno.h> 45 #include <sys/uio.h> 46 #include <sys/buf.h> 47 #include <sys/modctl.h> 48 #include <sys/open.h> 49 #include <sys/kmem.h> 50 #include <sys/conf.h> 51 #include <sys/cmn_err.h> 52 #include <sys/stat.h> 53 #include <sys/zap.h> 54 #include <sys/spa.h> 55 #include <sys/zio.h> 56 #include <sys/dsl_prop.h> 57 #include <sys/dkio.h> 58 #include <sys/efi_partition.h> 59 #include <sys/byteorder.h> 60 #include <sys/pathname.h> 61 #include <sys/ddi.h> 62 #include <sys/sunddi.h> 63 #include <sys/crc32.h> 64 #include <sys/dirent.h> 65 #include <sys/policy.h> 66 #include <sys/fs/zfs.h> 67 #include <sys/zfs_ioctl.h> 68 #include <sys/mkdev.h> 69 #include <sys/zil.h> 70 #include <sys/refcount.h> 71 #include <sys/zfs_znode.h> 72 #include <sys/zfs_rlock.h> 73 74 #include "zfs_namecheck.h" 75 76 #define ZVOL_OBJ 1ULL 77 #define ZVOL_ZAP_OBJ 2ULL 78 79 static void *zvol_state; 80 81 /* 82 * This lock protects the zvol_state structure from being modified 83 * while it's being used, e.g. an open that comes in before a create 84 * finishes. It also protects temporary opens of the dataset so that, 85 * e.g., an open doesn't get a spurious EBUSY. 86 */ 87 static kmutex_t zvol_state_lock; 88 static uint32_t zvol_minors; 89 90 /* 91 * The in-core state of each volume. 92 */ 93 typedef struct zvol_state { 94 char zv_name[MAXPATHLEN]; /* pool/dd name */ 95 uint64_t zv_volsize; /* amount of space we advertise */ 96 uint64_t zv_volblocksize; /* volume block size */ 97 minor_t zv_minor; /* minor number */ 98 uint8_t zv_min_bs; /* minimum addressable block shift */ 99 uint8_t zv_readonly; /* hard readonly; like write-protect */ 100 objset_t *zv_objset; /* objset handle */ 101 uint32_t zv_mode; /* DS_MODE_* flags at open time */ 102 uint32_t zv_open_count[OTYPCNT]; /* open counts */ 103 uint32_t zv_total_opens; /* total open count */ 104 zilog_t *zv_zilog; /* ZIL handle */ 105 uint64_t zv_txg_assign; /* txg to assign during ZIL replay */ 106 znode_t zv_znode; /* for range locking */ 107 } zvol_state_t; 108 109 /* 110 * zvol maximum transfer in one DMU tx. 111 */ 112 int zvol_maxphys = DMU_MAX_ACCESS/2; 113 114 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio); 115 116 static void 117 zvol_size_changed(zvol_state_t *zv, major_t maj) 118 { 119 dev_t dev = makedevice(maj, zv->zv_minor); 120 121 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 122 "Size", zv->zv_volsize) == DDI_SUCCESS); 123 VERIFY(ddi_prop_update_int64(dev, zfs_dip, 124 "Nblocks", lbtodb(zv->zv_volsize)) == DDI_SUCCESS); 125 } 126 127 int 128 zvol_check_volsize(uint64_t volsize, uint64_t blocksize) 129 { 130 if (volsize == 0) 131 return (EINVAL); 132 133 if (volsize % blocksize != 0) 134 return (EINVAL); 135 136 #ifdef _ILP32 137 if (volsize - 1 > SPEC_MAXOFFSET_T) 138 return (EOVERFLOW); 139 #endif 140 return (0); 141 } 142 143 int 144 zvol_check_volblocksize(uint64_t volblocksize) 145 { 146 if (volblocksize < SPA_MINBLOCKSIZE || 147 volblocksize > SPA_MAXBLOCKSIZE || 148 !ISP2(volblocksize)) 149 return (EDOM); 150 151 return (0); 152 } 153 154 static void 155 zvol_readonly_changed_cb(void *arg, uint64_t newval) 156 { 157 zvol_state_t *zv = arg; 158 159 zv->zv_readonly = (uint8_t)newval; 160 } 161 162 int 163 zvol_get_stats(objset_t *os, nvlist_t *nv) 164 { 165 int error; 166 dmu_object_info_t doi; 167 uint64_t val; 168 169 170 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val); 171 if (error) 172 return (error); 173 174 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val); 175 176 error = dmu_object_info(os, ZVOL_OBJ, &doi); 177 178 if (error == 0) { 179 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE, 180 doi.doi_data_block_size); 181 } 182 183 return (error); 184 } 185 186 /* 187 * Find a free minor number. 188 */ 189 static minor_t 190 zvol_minor_alloc(void) 191 { 192 minor_t minor; 193 194 ASSERT(MUTEX_HELD(&zvol_state_lock)); 195 196 for (minor = 1; minor <= ZVOL_MAX_MINOR; minor++) 197 if (ddi_get_soft_state(zvol_state, minor) == NULL) 198 return (minor); 199 200 return (0); 201 } 202 203 static zvol_state_t * 204 zvol_minor_lookup(const char *name) 205 { 206 minor_t minor; 207 zvol_state_t *zv; 208 209 ASSERT(MUTEX_HELD(&zvol_state_lock)); 210 211 for (minor = 1; minor <= ZVOL_MAX_MINOR; minor++) { 212 zv = ddi_get_soft_state(zvol_state, minor); 213 if (zv == NULL) 214 continue; 215 if (strcmp(zv->zv_name, name) == 0) 216 break; 217 } 218 219 return (zv); 220 } 221 222 /* ARGSUSED */ 223 void 224 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx) 225 { 226 zfs_creat_t *zct = arg; 227 nvlist_t *nvprops = zct->zct_props; 228 int error; 229 uint64_t volblocksize, volsize; 230 231 VERIFY(nvlist_lookup_uint64(nvprops, 232 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0); 233 if (nvlist_lookup_uint64(nvprops, 234 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0) 235 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE); 236 237 /* 238 * These properites must be removed from the list so the generic 239 * property setting step won't apply to them. 240 */ 241 VERIFY(nvlist_remove_all(nvprops, 242 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0); 243 (void) nvlist_remove_all(nvprops, 244 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE)); 245 246 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize, 247 DMU_OT_NONE, 0, tx); 248 ASSERT(error == 0); 249 250 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP, 251 DMU_OT_NONE, 0, tx); 252 ASSERT(error == 0); 253 254 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx); 255 ASSERT(error == 0); 256 } 257 258 /* 259 * Replay a TX_WRITE ZIL transaction that didn't get committed 260 * after a system failure 261 */ 262 static int 263 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap) 264 { 265 objset_t *os = zv->zv_objset; 266 char *data = (char *)(lr + 1); /* data follows lr_write_t */ 267 uint64_t off = lr->lr_offset; 268 uint64_t len = lr->lr_length; 269 dmu_tx_t *tx; 270 int error; 271 272 if (byteswap) 273 byteswap_uint64_array(lr, sizeof (*lr)); 274 275 tx = dmu_tx_create(os); 276 dmu_tx_hold_write(tx, ZVOL_OBJ, off, len); 277 error = dmu_tx_assign(tx, zv->zv_txg_assign); 278 if (error) { 279 dmu_tx_abort(tx); 280 } else { 281 dmu_write(os, ZVOL_OBJ, off, len, data, tx); 282 dmu_tx_commit(tx); 283 } 284 285 return (error); 286 } 287 288 /* ARGSUSED */ 289 static int 290 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap) 291 { 292 return (ENOTSUP); 293 } 294 295 /* 296 * Callback vectors for replaying records. 297 * Only TX_WRITE is needed for zvol. 298 */ 299 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = { 300 zvol_replay_err, /* 0 no such transaction type */ 301 zvol_replay_err, /* TX_CREATE */ 302 zvol_replay_err, /* TX_MKDIR */ 303 zvol_replay_err, /* TX_MKXATTR */ 304 zvol_replay_err, /* TX_SYMLINK */ 305 zvol_replay_err, /* TX_REMOVE */ 306 zvol_replay_err, /* TX_RMDIR */ 307 zvol_replay_err, /* TX_LINK */ 308 zvol_replay_err, /* TX_RENAME */ 309 zvol_replay_write, /* TX_WRITE */ 310 zvol_replay_err, /* TX_TRUNCATE */ 311 zvol_replay_err, /* TX_SETATTR */ 312 zvol_replay_err, /* TX_ACL */ 313 }; 314 315 /* 316 * Create a minor node for the specified volume. 317 */ 318 int 319 zvol_create_minor(const char *name, major_t maj) 320 { 321 zvol_state_t *zv; 322 objset_t *os; 323 dmu_object_info_t doi; 324 uint64_t volsize; 325 minor_t minor = 0; 326 struct pathname linkpath; 327 int ds_mode = DS_MODE_PRIMARY; 328 vnode_t *vp = NULL; 329 char *devpath; 330 size_t devpathlen = strlen(ZVOL_FULL_DEV_DIR) + 1 + strlen(name) + 1; 331 char chrbuf[30], blkbuf[30]; 332 int error; 333 334 mutex_enter(&zvol_state_lock); 335 336 if ((zv = zvol_minor_lookup(name)) != NULL) { 337 mutex_exit(&zvol_state_lock); 338 return (EEXIST); 339 } 340 341 if (strchr(name, '@') != 0) 342 ds_mode |= DS_MODE_READONLY; 343 344 error = dmu_objset_open(name, DMU_OST_ZVOL, ds_mode, &os); 345 346 if (error) { 347 mutex_exit(&zvol_state_lock); 348 return (error); 349 } 350 351 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 352 353 if (error) { 354 dmu_objset_close(os); 355 mutex_exit(&zvol_state_lock); 356 return (error); 357 } 358 359 /* 360 * If there's an existing /dev/zvol symlink, try to use the 361 * same minor number we used last time. 362 */ 363 devpath = kmem_alloc(devpathlen, KM_SLEEP); 364 365 (void) sprintf(devpath, "%s/%s", ZVOL_FULL_DEV_DIR, name); 366 367 error = lookupname(devpath, UIO_SYSSPACE, NO_FOLLOW, NULL, &vp); 368 369 kmem_free(devpath, devpathlen); 370 371 if (error == 0 && vp->v_type != VLNK) 372 error = EINVAL; 373 374 if (error == 0) { 375 pn_alloc(&linkpath); 376 error = pn_getsymlink(vp, &linkpath, kcred); 377 if (error == 0) { 378 char *ms = strstr(linkpath.pn_path, ZVOL_PSEUDO_DEV); 379 if (ms != NULL) { 380 ms += strlen(ZVOL_PSEUDO_DEV); 381 minor = stoi(&ms); 382 } 383 } 384 pn_free(&linkpath); 385 } 386 387 if (vp != NULL) 388 VN_RELE(vp); 389 390 /* 391 * If we found a minor but it's already in use, we must pick a new one. 392 */ 393 if (minor != 0 && ddi_get_soft_state(zvol_state, minor) != NULL) 394 minor = 0; 395 396 if (minor == 0) 397 minor = zvol_minor_alloc(); 398 399 if (minor == 0) { 400 dmu_objset_close(os); 401 mutex_exit(&zvol_state_lock); 402 return (ENXIO); 403 } 404 405 if (ddi_soft_state_zalloc(zvol_state, minor) != DDI_SUCCESS) { 406 dmu_objset_close(os); 407 mutex_exit(&zvol_state_lock); 408 return (EAGAIN); 409 } 410 411 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME, 412 (char *)name); 413 414 (void) sprintf(chrbuf, "%uc,raw", minor); 415 416 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR, 417 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 418 ddi_soft_state_free(zvol_state, minor); 419 dmu_objset_close(os); 420 mutex_exit(&zvol_state_lock); 421 return (EAGAIN); 422 } 423 424 (void) sprintf(blkbuf, "%uc", minor); 425 426 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK, 427 minor, DDI_PSEUDO, 0) == DDI_FAILURE) { 428 ddi_remove_minor_node(zfs_dip, chrbuf); 429 ddi_soft_state_free(zvol_state, minor); 430 dmu_objset_close(os); 431 mutex_exit(&zvol_state_lock); 432 return (EAGAIN); 433 } 434 435 zv = ddi_get_soft_state(zvol_state, minor); 436 437 (void) strcpy(zv->zv_name, name); 438 zv->zv_min_bs = DEV_BSHIFT; 439 zv->zv_minor = minor; 440 zv->zv_volsize = volsize; 441 zv->zv_objset = os; 442 zv->zv_mode = ds_mode; 443 zv->zv_zilog = zil_open(os, zvol_get_data); 444 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL); 445 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare, 446 sizeof (rl_t), offsetof(rl_t, r_node)); 447 448 449 /* get and cache the blocksize */ 450 error = dmu_object_info(os, ZVOL_OBJ, &doi); 451 ASSERT(error == 0); 452 zv->zv_volblocksize = doi.doi_data_block_size; 453 454 zil_replay(os, zv, &zv->zv_txg_assign, zvol_replay_vector); 455 456 zvol_size_changed(zv, maj); 457 458 /* XXX this should handle the possible i/o error */ 459 VERIFY(dsl_prop_register(dmu_objset_ds(zv->zv_objset), 460 "readonly", zvol_readonly_changed_cb, zv) == 0); 461 462 zvol_minors++; 463 464 mutex_exit(&zvol_state_lock); 465 466 return (0); 467 } 468 469 /* 470 * Remove minor node for the specified volume. 471 */ 472 int 473 zvol_remove_minor(const char *name) 474 { 475 zvol_state_t *zv; 476 char namebuf[30]; 477 478 mutex_enter(&zvol_state_lock); 479 480 if ((zv = zvol_minor_lookup(name)) == NULL) { 481 mutex_exit(&zvol_state_lock); 482 return (ENXIO); 483 } 484 485 if (zv->zv_total_opens != 0) { 486 mutex_exit(&zvol_state_lock); 487 return (EBUSY); 488 } 489 490 (void) sprintf(namebuf, "%uc,raw", zv->zv_minor); 491 ddi_remove_minor_node(zfs_dip, namebuf); 492 493 (void) sprintf(namebuf, "%uc", zv->zv_minor); 494 ddi_remove_minor_node(zfs_dip, namebuf); 495 496 VERIFY(dsl_prop_unregister(dmu_objset_ds(zv->zv_objset), 497 "readonly", zvol_readonly_changed_cb, zv) == 0); 498 499 zil_close(zv->zv_zilog); 500 zv->zv_zilog = NULL; 501 dmu_objset_close(zv->zv_objset); 502 zv->zv_objset = NULL; 503 avl_destroy(&zv->zv_znode.z_range_avl); 504 mutex_destroy(&zv->zv_znode.z_range_lock); 505 506 ddi_soft_state_free(zvol_state, zv->zv_minor); 507 508 zvol_minors--; 509 510 mutex_exit(&zvol_state_lock); 511 512 return (0); 513 } 514 515 int 516 zvol_set_volsize(const char *name, major_t maj, uint64_t volsize) 517 { 518 zvol_state_t *zv; 519 dmu_tx_t *tx; 520 int error; 521 dmu_object_info_t doi; 522 523 mutex_enter(&zvol_state_lock); 524 525 if ((zv = zvol_minor_lookup(name)) == NULL) { 526 mutex_exit(&zvol_state_lock); 527 return (ENXIO); 528 } 529 530 if ((error = dmu_object_info(zv->zv_objset, ZVOL_OBJ, &doi)) != 0 || 531 (error = zvol_check_volsize(volsize, 532 doi.doi_data_block_size)) != 0) { 533 mutex_exit(&zvol_state_lock); 534 return (error); 535 } 536 537 if (zv->zv_readonly || (zv->zv_mode & DS_MODE_READONLY)) { 538 mutex_exit(&zvol_state_lock); 539 return (EROFS); 540 } 541 542 tx = dmu_tx_create(zv->zv_objset); 543 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); 544 dmu_tx_hold_free(tx, ZVOL_OBJ, volsize, DMU_OBJECT_END); 545 error = dmu_tx_assign(tx, TXG_WAIT); 546 if (error) { 547 dmu_tx_abort(tx); 548 mutex_exit(&zvol_state_lock); 549 return (error); 550 } 551 552 error = zap_update(zv->zv_objset, ZVOL_ZAP_OBJ, "size", 8, 1, 553 &volsize, tx); 554 if (error == 0) { 555 error = dmu_free_range(zv->zv_objset, ZVOL_OBJ, volsize, 556 DMU_OBJECT_END, tx); 557 } 558 559 dmu_tx_commit(tx); 560 561 if (error == 0) { 562 zv->zv_volsize = volsize; 563 zvol_size_changed(zv, maj); 564 } 565 566 mutex_exit(&zvol_state_lock); 567 568 return (error); 569 } 570 571 int 572 zvol_set_volblocksize(const char *name, uint64_t volblocksize) 573 { 574 zvol_state_t *zv; 575 dmu_tx_t *tx; 576 int error; 577 578 mutex_enter(&zvol_state_lock); 579 580 if ((zv = zvol_minor_lookup(name)) == NULL) { 581 mutex_exit(&zvol_state_lock); 582 return (ENXIO); 583 } 584 585 if (zv->zv_readonly || (zv->zv_mode & DS_MODE_READONLY)) { 586 mutex_exit(&zvol_state_lock); 587 return (EROFS); 588 } 589 590 tx = dmu_tx_create(zv->zv_objset); 591 dmu_tx_hold_bonus(tx, ZVOL_OBJ); 592 error = dmu_tx_assign(tx, TXG_WAIT); 593 if (error) { 594 dmu_tx_abort(tx); 595 } else { 596 error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ, 597 volblocksize, 0, tx); 598 if (error == ENOTSUP) 599 error = EBUSY; 600 dmu_tx_commit(tx); 601 } 602 603 mutex_exit(&zvol_state_lock); 604 605 return (error); 606 } 607 608 /*ARGSUSED*/ 609 int 610 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr) 611 { 612 minor_t minor = getminor(*devp); 613 zvol_state_t *zv; 614 615 if (minor == 0) /* This is the control device */ 616 return (0); 617 618 mutex_enter(&zvol_state_lock); 619 620 zv = ddi_get_soft_state(zvol_state, minor); 621 if (zv == NULL) { 622 mutex_exit(&zvol_state_lock); 623 return (ENXIO); 624 } 625 626 ASSERT(zv->zv_objset != NULL); 627 628 if ((flag & FWRITE) && 629 (zv->zv_readonly || (zv->zv_mode & DS_MODE_READONLY))) { 630 mutex_exit(&zvol_state_lock); 631 return (EROFS); 632 } 633 634 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) { 635 zv->zv_open_count[otyp]++; 636 zv->zv_total_opens++; 637 } 638 639 mutex_exit(&zvol_state_lock); 640 641 return (0); 642 } 643 644 /*ARGSUSED*/ 645 int 646 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr) 647 { 648 minor_t minor = getminor(dev); 649 zvol_state_t *zv; 650 651 if (minor == 0) /* This is the control device */ 652 return (0); 653 654 mutex_enter(&zvol_state_lock); 655 656 zv = ddi_get_soft_state(zvol_state, minor); 657 if (zv == NULL) { 658 mutex_exit(&zvol_state_lock); 659 return (ENXIO); 660 } 661 662 /* 663 * The next statement is a workaround for the following DDI bug: 664 * 6343604 specfs race: multiple "last-close" of the same device 665 */ 666 if (zv->zv_total_opens == 0) { 667 mutex_exit(&zvol_state_lock); 668 return (0); 669 } 670 671 /* 672 * If the open count is zero, this is a spurious close. 673 * That indicates a bug in the kernel / DDI framework. 674 */ 675 ASSERT(zv->zv_open_count[otyp] != 0); 676 ASSERT(zv->zv_total_opens != 0); 677 678 /* 679 * You may get multiple opens, but only one close. 680 */ 681 zv->zv_open_count[otyp]--; 682 zv->zv_total_opens--; 683 684 mutex_exit(&zvol_state_lock); 685 686 return (0); 687 } 688 689 static void 690 zvol_get_done(dmu_buf_t *db, void *vzgd) 691 { 692 zgd_t *zgd = (zgd_t *)vzgd; 693 rl_t *rl = zgd->zgd_rl; 694 695 dmu_buf_rele(db, vzgd); 696 zfs_range_unlock(rl); 697 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp); 698 kmem_free(zgd, sizeof (zgd_t)); 699 } 700 701 /* 702 * Get data to generate a TX_WRITE intent log record. 703 */ 704 static int 705 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio) 706 { 707 zvol_state_t *zv = arg; 708 objset_t *os = zv->zv_objset; 709 dmu_buf_t *db; 710 rl_t *rl; 711 zgd_t *zgd; 712 uint64_t boff; /* block starting offset */ 713 int dlen = lr->lr_length; /* length of user data */ 714 int error; 715 716 ASSERT(zio); 717 ASSERT(dlen != 0); 718 719 /* 720 * Write records come in two flavors: immediate and indirect. 721 * For small writes it's cheaper to store the data with the 722 * log record (immediate); for large writes it's cheaper to 723 * sync the data and get a pointer to it (indirect) so that 724 * we don't have to write the data twice. 725 */ 726 if (buf != NULL) /* immediate write */ 727 return (dmu_read(os, ZVOL_OBJ, lr->lr_offset, dlen, buf)); 728 729 zgd = (zgd_t *)kmem_alloc(sizeof (zgd_t), KM_SLEEP); 730 zgd->zgd_zilog = zv->zv_zilog; 731 zgd->zgd_bp = &lr->lr_blkptr; 732 733 /* 734 * Lock the range of the block to ensure that when the data is 735 * written out and it's checksum is being calculated that no other 736 * thread can change the block. 737 */ 738 boff = P2ALIGN_TYPED(lr->lr_offset, zv->zv_volblocksize, uint64_t); 739 rl = zfs_range_lock(&zv->zv_znode, boff, zv->zv_volblocksize, 740 RL_READER); 741 zgd->zgd_rl = rl; 742 743 VERIFY(0 == dmu_buf_hold(os, ZVOL_OBJ, lr->lr_offset, zgd, &db)); 744 error = dmu_sync(zio, db, &lr->lr_blkptr, 745 lr->lr_common.lrc_txg, zvol_get_done, zgd); 746 if (error == 0) 747 zil_add_block(zv->zv_zilog, &lr->lr_blkptr); 748 /* 749 * If we get EINPROGRESS, then we need to wait for a 750 * write IO initiated by dmu_sync() to complete before 751 * we can release this dbuf. We will finish everything 752 * up in the zvol_get_done() callback. 753 */ 754 if (error == EINPROGRESS) 755 return (0); 756 dmu_buf_rele(db, zgd); 757 zfs_range_unlock(rl); 758 kmem_free(zgd, sizeof (zgd_t)); 759 return (error); 760 } 761 762 /* 763 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions. 764 * 765 * We store data in the log buffers if it's small enough. 766 * Otherwise we will later flush the data out via dmu_sync(). 767 */ 768 ssize_t zvol_immediate_write_sz = 32768; 769 770 static void 771 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t len) 772 { 773 uint32_t blocksize = zv->zv_volblocksize; 774 lr_write_t *lr; 775 776 while (len) { 777 ssize_t nbytes = MIN(len, blocksize - P2PHASE(off, blocksize)); 778 itx_t *itx = zil_itx_create(TX_WRITE, sizeof (*lr)); 779 780 itx->itx_wr_state = 781 len > zvol_immediate_write_sz ? WR_INDIRECT : WR_NEED_COPY; 782 itx->itx_private = zv; 783 lr = (lr_write_t *)&itx->itx_lr; 784 lr->lr_foid = ZVOL_OBJ; 785 lr->lr_offset = off; 786 lr->lr_length = nbytes; 787 lr->lr_blkoff = off - P2ALIGN_TYPED(off, blocksize, uint64_t); 788 BP_ZERO(&lr->lr_blkptr); 789 790 (void) zil_itx_assign(zv->zv_zilog, itx, tx); 791 len -= nbytes; 792 off += nbytes; 793 } 794 } 795 796 int 797 zvol_strategy(buf_t *bp) 798 { 799 zvol_state_t *zv = ddi_get_soft_state(zvol_state, getminor(bp->b_edev)); 800 uint64_t off, volsize; 801 size_t size, resid; 802 char *addr; 803 objset_t *os; 804 rl_t *rl; 805 int error = 0; 806 boolean_t reading; 807 808 if (zv == NULL) { 809 bioerror(bp, ENXIO); 810 biodone(bp); 811 return (0); 812 } 813 814 if (getminor(bp->b_edev) == 0) { 815 bioerror(bp, EINVAL); 816 biodone(bp); 817 return (0); 818 } 819 820 if ((zv->zv_readonly || (zv->zv_mode & DS_MODE_READONLY)) && 821 !(bp->b_flags & B_READ)) { 822 bioerror(bp, EROFS); 823 biodone(bp); 824 return (0); 825 } 826 827 off = ldbtob(bp->b_blkno); 828 volsize = zv->zv_volsize; 829 830 os = zv->zv_objset; 831 ASSERT(os != NULL); 832 833 bp_mapin(bp); 834 addr = bp->b_un.b_addr; 835 resid = bp->b_bcount; 836 837 /* 838 * There must be no buffer changes when doing a dmu_sync() because 839 * we can't change the data whilst calculating the checksum. 840 */ 841 reading = bp->b_flags & B_READ; 842 rl = zfs_range_lock(&zv->zv_znode, off, resid, 843 reading ? RL_READER : RL_WRITER); 844 845 while (resid != 0 && off < volsize) { 846 847 size = MIN(resid, zvol_maxphys); /* zvol_maxphys per tx */ 848 849 if (size > volsize - off) /* don't write past the end */ 850 size = volsize - off; 851 852 if (reading) { 853 error = dmu_read(os, ZVOL_OBJ, off, size, addr); 854 } else { 855 dmu_tx_t *tx = dmu_tx_create(os); 856 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size); 857 error = dmu_tx_assign(tx, TXG_WAIT); 858 if (error) { 859 dmu_tx_abort(tx); 860 } else { 861 dmu_write(os, ZVOL_OBJ, off, size, addr, tx); 862 zvol_log_write(zv, tx, off, size); 863 dmu_tx_commit(tx); 864 } 865 } 866 if (error) 867 break; 868 off += size; 869 addr += size; 870 resid -= size; 871 } 872 zfs_range_unlock(rl); 873 874 if ((bp->b_resid = resid) == bp->b_bcount) 875 bioerror(bp, off > volsize ? EINVAL : error); 876 877 if (!(bp->b_flags & B_ASYNC) && !reading && !zil_disable) 878 zil_commit(zv->zv_zilog, UINT64_MAX, ZVOL_OBJ); 879 880 biodone(bp); 881 882 return (0); 883 } 884 885 /* 886 * Set the buffer count to the zvol maximum transfer. 887 * Using our own routine instead of the default minphys() 888 * means that for larger writes we write bigger buffers on X86 889 * (128K instead of 56K) and flush the disk write cache less often 890 * (every zvol_maxphys - currently 1MB) instead of minphys (currently 891 * 56K on X86 and 128K on sparc). 892 */ 893 void 894 zvol_minphys(struct buf *bp) 895 { 896 if (bp->b_bcount > zvol_maxphys) 897 bp->b_bcount = zvol_maxphys; 898 } 899 900 /*ARGSUSED*/ 901 int 902 zvol_read(dev_t dev, uio_t *uio, cred_t *cr) 903 { 904 minor_t minor = getminor(dev); 905 zvol_state_t *zv; 906 rl_t *rl; 907 int error = 0; 908 909 if (minor == 0) /* This is the control device */ 910 return (ENXIO); 911 912 zv = ddi_get_soft_state(zvol_state, minor); 913 if (zv == NULL) 914 return (ENXIO); 915 916 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 917 RL_READER); 918 while (uio->uio_resid > 0) { 919 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 920 921 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes); 922 if (error) 923 break; 924 } 925 zfs_range_unlock(rl); 926 return (error); 927 } 928 929 /*ARGSUSED*/ 930 int 931 zvol_write(dev_t dev, uio_t *uio, cred_t *cr) 932 { 933 minor_t minor = getminor(dev); 934 zvol_state_t *zv; 935 rl_t *rl; 936 int error = 0; 937 938 if (minor == 0) /* This is the control device */ 939 return (ENXIO); 940 941 zv = ddi_get_soft_state(zvol_state, minor); 942 if (zv == NULL) 943 return (ENXIO); 944 945 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid, 946 RL_WRITER); 947 while (uio->uio_resid > 0) { 948 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1); 949 uint64_t off = uio->uio_loffset; 950 951 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 952 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes); 953 error = dmu_tx_assign(tx, TXG_WAIT); 954 if (error) { 955 dmu_tx_abort(tx); 956 break; 957 } 958 error = dmu_write_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes, tx); 959 if (error == 0) 960 zvol_log_write(zv, tx, off, bytes); 961 dmu_tx_commit(tx); 962 963 if (error) 964 break; 965 } 966 zfs_range_unlock(rl); 967 return (error); 968 } 969 970 /* 971 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I). 972 */ 973 /*ARGSUSED*/ 974 int 975 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp) 976 { 977 zvol_state_t *zv; 978 struct dk_cinfo dki; 979 struct dk_minfo dkm; 980 dk_efi_t efi; 981 struct dk_callback *dkc; 982 struct uuid uuid = EFI_RESERVED; 983 uint32_t crc; 984 int error = 0; 985 986 mutex_enter(&zvol_state_lock); 987 988 zv = ddi_get_soft_state(zvol_state, getminor(dev)); 989 990 if (zv == NULL) { 991 mutex_exit(&zvol_state_lock); 992 return (ENXIO); 993 } 994 995 switch (cmd) { 996 997 case DKIOCINFO: 998 bzero(&dki, sizeof (dki)); 999 (void) strcpy(dki.dki_cname, "zvol"); 1000 (void) strcpy(dki.dki_dname, "zvol"); 1001 dki.dki_ctype = DKC_UNKNOWN; 1002 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs); 1003 mutex_exit(&zvol_state_lock); 1004 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag)) 1005 error = EFAULT; 1006 return (error); 1007 1008 case DKIOCGMEDIAINFO: 1009 bzero(&dkm, sizeof (dkm)); 1010 dkm.dki_lbsize = 1U << zv->zv_min_bs; 1011 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs; 1012 dkm.dki_media_type = DK_UNKNOWN; 1013 mutex_exit(&zvol_state_lock); 1014 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag)) 1015 error = EFAULT; 1016 return (error); 1017 1018 case DKIOCGETEFI: 1019 if (ddi_copyin((void *)arg, &efi, sizeof (dk_efi_t), flag)) { 1020 mutex_exit(&zvol_state_lock); 1021 return (EFAULT); 1022 } 1023 efi.dki_data = (void *)(uintptr_t)efi.dki_data_64; 1024 1025 /* 1026 * Some clients may attempt to request a PMBR for the 1027 * zvol. Currently this interface will return ENOTTY to 1028 * such requests. These requests could be supported by 1029 * adding a check for lba == 0 and consing up an appropriate 1030 * RMBR. 1031 */ 1032 if (efi.dki_lba == 1) { 1033 efi_gpt_t gpt; 1034 efi_gpe_t gpe; 1035 1036 bzero(&gpt, sizeof (gpt)); 1037 bzero(&gpe, sizeof (gpe)); 1038 1039 if (efi.dki_length < sizeof (gpt)) { 1040 mutex_exit(&zvol_state_lock); 1041 return (EINVAL); 1042 } 1043 1044 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE); 1045 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT); 1046 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt)); 1047 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL); 1048 gpt.efi_gpt_LastUsableLBA = 1049 LE_64((zv->zv_volsize >> zv->zv_min_bs) - 1); 1050 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1); 1051 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL); 1052 gpt.efi_gpt_SizeOfPartitionEntry = LE_32(sizeof (gpe)); 1053 1054 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid); 1055 gpe.efi_gpe_StartingLBA = gpt.efi_gpt_FirstUsableLBA; 1056 gpe.efi_gpe_EndingLBA = gpt.efi_gpt_LastUsableLBA; 1057 1058 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table); 1059 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc); 1060 1061 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table); 1062 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc); 1063 1064 mutex_exit(&zvol_state_lock); 1065 if (ddi_copyout(&gpt, efi.dki_data, sizeof (gpt), flag)) 1066 error = EFAULT; 1067 } else if (efi.dki_lba == 2) { 1068 efi_gpe_t gpe; 1069 1070 bzero(&gpe, sizeof (gpe)); 1071 1072 if (efi.dki_length < sizeof (gpe)) { 1073 mutex_exit(&zvol_state_lock); 1074 return (EINVAL); 1075 } 1076 1077 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid); 1078 gpe.efi_gpe_StartingLBA = LE_64(34ULL); 1079 gpe.efi_gpe_EndingLBA = 1080 LE_64((zv->zv_volsize >> zv->zv_min_bs) - 1); 1081 1082 mutex_exit(&zvol_state_lock); 1083 if (ddi_copyout(&gpe, efi.dki_data, sizeof (gpe), flag)) 1084 error = EFAULT; 1085 } else { 1086 mutex_exit(&zvol_state_lock); 1087 error = EINVAL; 1088 } 1089 return (error); 1090 1091 case DKIOCFLUSHWRITECACHE: 1092 dkc = (struct dk_callback *)arg; 1093 zil_commit(zv->zv_zilog, UINT64_MAX, ZVOL_OBJ); 1094 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) { 1095 (*dkc->dkc_callback)(dkc->dkc_cookie, error); 1096 error = 0; 1097 } 1098 break; 1099 1100 case DKIOCGGEOM: 1101 case DKIOCGVTOC: 1102 /* commands using these (like prtvtoc) expect ENOTSUP */ 1103 error = ENOTSUP; 1104 break; 1105 1106 default: 1107 error = ENOTTY; 1108 break; 1109 1110 } 1111 mutex_exit(&zvol_state_lock); 1112 return (error); 1113 } 1114 1115 int 1116 zvol_busy(void) 1117 { 1118 return (zvol_minors != 0); 1119 } 1120 1121 void 1122 zvol_init(void) 1123 { 1124 VERIFY(ddi_soft_state_init(&zvol_state, sizeof (zvol_state_t), 1) == 0); 1125 mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL); 1126 } 1127 1128 void 1129 zvol_fini(void) 1130 { 1131 mutex_destroy(&zvol_state_lock); 1132 ddi_soft_state_fini(&zvol_state); 1133 } 1134