1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2012, 2020 by Delphix. All rights reserved. 23 */ 24 25 #include <sys/dataset_kstats.h> 26 #include <sys/dbuf.h> 27 #include <sys/dmu_traverse.h> 28 #include <sys/dsl_dataset.h> 29 #include <sys/dsl_prop.h> 30 #include <sys/dsl_dir.h> 31 #include <sys/zap.h> 32 #include <sys/zfeature.h> 33 #include <sys/zil_impl.h> 34 #include <sys/dmu_tx.h> 35 #include <sys/zio.h> 36 #include <sys/zfs_rlock.h> 37 #include <sys/spa_impl.h> 38 #include <sys/zvol.h> 39 #include <sys/zvol_impl.h> 40 41 #include <linux/blkdev_compat.h> 42 #include <linux/task_io_accounting_ops.h> 43 44 unsigned int zvol_major = ZVOL_MAJOR; 45 unsigned int zvol_request_sync = 0; 46 unsigned int zvol_prefetch_bytes = (128 * 1024); 47 unsigned long zvol_max_discard_blocks = 16384; 48 unsigned int zvol_threads = 32; 49 50 struct zvol_state_os { 51 struct gendisk *zvo_disk; /* generic disk */ 52 struct request_queue *zvo_queue; /* request queue */ 53 dev_t zvo_dev; /* device id */ 54 }; 55 56 taskq_t *zvol_taskq; 57 static struct ida zvol_ida; 58 59 typedef struct zv_request { 60 zvol_state_t *zv; 61 struct bio *bio; 62 taskq_ent_t ent; 63 } zv_request_t; 64 65 /* 66 * Given a path, return TRUE if path is a ZVOL. 67 */ 68 static boolean_t 69 zvol_is_zvol_impl(const char *path) 70 { 71 dev_t dev = 0; 72 73 if (vdev_lookup_bdev(path, &dev) != 0) 74 return (B_FALSE); 75 76 if (MAJOR(dev) == zvol_major) 77 return (B_TRUE); 78 79 return (B_FALSE); 80 } 81 82 static void 83 zvol_write(void *arg) 84 { 85 zv_request_t *zvr = arg; 86 struct bio *bio = zvr->bio; 87 int error = 0; 88 uio_t uio; 89 90 uio_bvec_init(&uio, bio); 91 92 zvol_state_t *zv = zvr->zv; 93 ASSERT3P(zv, !=, NULL); 94 ASSERT3U(zv->zv_open_count, >, 0); 95 ASSERT3P(zv->zv_zilog, !=, NULL); 96 97 /* bio marked as FLUSH need to flush before write */ 98 if (bio_is_flush(bio)) 99 zil_commit(zv->zv_zilog, ZVOL_OBJ); 100 101 /* Some requests are just for flush and nothing else. */ 102 if (uio.uio_resid == 0) { 103 rw_exit(&zv->zv_suspend_lock); 104 BIO_END_IO(bio, 0); 105 kmem_free(zvr, sizeof (zv_request_t)); 106 return; 107 } 108 109 struct request_queue *q = zv->zv_zso->zvo_queue; 110 struct gendisk *disk = zv->zv_zso->zvo_disk; 111 ssize_t start_resid = uio.uio_resid; 112 unsigned long start_time; 113 114 boolean_t acct = blk_queue_io_stat(q); 115 if (acct) 116 start_time = blk_generic_start_io_acct(q, disk, WRITE, bio); 117 118 boolean_t sync = 119 bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS; 120 121 zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock, 122 uio.uio_loffset, uio.uio_resid, RL_WRITER); 123 124 uint64_t volsize = zv->zv_volsize; 125 while (uio.uio_resid > 0 && uio.uio_loffset < volsize) { 126 uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1); 127 uint64_t off = uio.uio_loffset; 128 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); 129 130 if (bytes > volsize - off) /* don't write past the end */ 131 bytes = volsize - off; 132 133 dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes); 134 135 /* This will only fail for ENOSPC */ 136 error = dmu_tx_assign(tx, TXG_WAIT); 137 if (error) { 138 dmu_tx_abort(tx); 139 break; 140 } 141 error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx); 142 if (error == 0) { 143 zvol_log_write(zv, tx, off, bytes, sync); 144 } 145 dmu_tx_commit(tx); 146 147 if (error) 148 break; 149 } 150 zfs_rangelock_exit(lr); 151 152 int64_t nwritten = start_resid - uio.uio_resid; 153 dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten); 154 task_io_account_write(nwritten); 155 156 if (sync) 157 zil_commit(zv->zv_zilog, ZVOL_OBJ); 158 159 rw_exit(&zv->zv_suspend_lock); 160 161 if (acct) 162 blk_generic_end_io_acct(q, disk, WRITE, bio, start_time); 163 164 BIO_END_IO(bio, -error); 165 kmem_free(zvr, sizeof (zv_request_t)); 166 } 167 168 static void 169 zvol_discard(void *arg) 170 { 171 zv_request_t *zvr = arg; 172 struct bio *bio = zvr->bio; 173 zvol_state_t *zv = zvr->zv; 174 uint64_t start = BIO_BI_SECTOR(bio) << 9; 175 uint64_t size = BIO_BI_SIZE(bio); 176 uint64_t end = start + size; 177 boolean_t sync; 178 int error = 0; 179 dmu_tx_t *tx; 180 181 ASSERT3P(zv, !=, NULL); 182 ASSERT3U(zv->zv_open_count, >, 0); 183 ASSERT3P(zv->zv_zilog, !=, NULL); 184 185 struct request_queue *q = zv->zv_zso->zvo_queue; 186 struct gendisk *disk = zv->zv_zso->zvo_disk; 187 unsigned long start_time; 188 189 boolean_t acct = blk_queue_io_stat(q); 190 if (acct) 191 start_time = blk_generic_start_io_acct(q, disk, WRITE, bio); 192 193 sync = bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS; 194 195 if (end > zv->zv_volsize) { 196 error = SET_ERROR(EIO); 197 goto unlock; 198 } 199 200 /* 201 * Align the request to volume block boundaries when a secure erase is 202 * not required. This will prevent dnode_free_range() from zeroing out 203 * the unaligned parts which is slow (read-modify-write) and useless 204 * since we are not freeing any space by doing so. 205 */ 206 if (!bio_is_secure_erase(bio)) { 207 start = P2ROUNDUP(start, zv->zv_volblocksize); 208 end = P2ALIGN(end, zv->zv_volblocksize); 209 size = end - start; 210 } 211 212 if (start >= end) 213 goto unlock; 214 215 zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock, 216 start, size, RL_WRITER); 217 218 tx = dmu_tx_create(zv->zv_objset); 219 dmu_tx_mark_netfree(tx); 220 error = dmu_tx_assign(tx, TXG_WAIT); 221 if (error != 0) { 222 dmu_tx_abort(tx); 223 } else { 224 zvol_log_truncate(zv, tx, start, size, B_TRUE); 225 dmu_tx_commit(tx); 226 error = dmu_free_long_range(zv->zv_objset, 227 ZVOL_OBJ, start, size); 228 } 229 zfs_rangelock_exit(lr); 230 231 if (error == 0 && sync) 232 zil_commit(zv->zv_zilog, ZVOL_OBJ); 233 234 unlock: 235 rw_exit(&zv->zv_suspend_lock); 236 237 if (acct) 238 blk_generic_end_io_acct(q, disk, WRITE, bio, start_time); 239 240 BIO_END_IO(bio, -error); 241 kmem_free(zvr, sizeof (zv_request_t)); 242 } 243 244 static void 245 zvol_read(void *arg) 246 { 247 zv_request_t *zvr = arg; 248 struct bio *bio = zvr->bio; 249 int error = 0; 250 uio_t uio; 251 252 uio_bvec_init(&uio, bio); 253 254 zvol_state_t *zv = zvr->zv; 255 ASSERT3P(zv, !=, NULL); 256 ASSERT3U(zv->zv_open_count, >, 0); 257 258 struct request_queue *q = zv->zv_zso->zvo_queue; 259 struct gendisk *disk = zv->zv_zso->zvo_disk; 260 ssize_t start_resid = uio.uio_resid; 261 unsigned long start_time; 262 263 boolean_t acct = blk_queue_io_stat(q); 264 if (acct) 265 start_time = blk_generic_start_io_acct(q, disk, READ, bio); 266 267 zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock, 268 uio.uio_loffset, uio.uio_resid, RL_READER); 269 270 uint64_t volsize = zv->zv_volsize; 271 while (uio.uio_resid > 0 && uio.uio_loffset < volsize) { 272 uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1); 273 274 /* don't read past the end */ 275 if (bytes > volsize - uio.uio_loffset) 276 bytes = volsize - uio.uio_loffset; 277 278 error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes); 279 if (error) { 280 /* convert checksum errors into IO errors */ 281 if (error == ECKSUM) 282 error = SET_ERROR(EIO); 283 break; 284 } 285 } 286 zfs_rangelock_exit(lr); 287 288 int64_t nread = start_resid - uio.uio_resid; 289 dataset_kstats_update_read_kstats(&zv->zv_kstat, nread); 290 task_io_account_read(nread); 291 292 rw_exit(&zv->zv_suspend_lock); 293 294 if (acct) 295 blk_generic_end_io_acct(q, disk, READ, bio, start_time); 296 297 BIO_END_IO(bio, -error); 298 kmem_free(zvr, sizeof (zv_request_t)); 299 } 300 301 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS 302 static blk_qc_t 303 zvol_submit_bio(struct bio *bio) 304 #else 305 static MAKE_REQUEST_FN_RET 306 zvol_request(struct request_queue *q, struct bio *bio) 307 #endif 308 { 309 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS 310 struct request_queue *q = bio->bi_disk->queue; 311 #endif 312 zvol_state_t *zv = q->queuedata; 313 fstrans_cookie_t cookie = spl_fstrans_mark(); 314 uint64_t offset = BIO_BI_SECTOR(bio) << 9; 315 uint64_t size = BIO_BI_SIZE(bio); 316 int rw = bio_data_dir(bio); 317 zv_request_t *zvr; 318 319 if (bio_has_data(bio) && offset + size > zv->zv_volsize) { 320 printk(KERN_INFO 321 "%s: bad access: offset=%llu, size=%lu\n", 322 zv->zv_zso->zvo_disk->disk_name, 323 (long long unsigned)offset, 324 (long unsigned)size); 325 326 BIO_END_IO(bio, -SET_ERROR(EIO)); 327 goto out; 328 } 329 330 if (rw == WRITE) { 331 if (unlikely(zv->zv_flags & ZVOL_RDONLY)) { 332 BIO_END_IO(bio, -SET_ERROR(EROFS)); 333 goto out; 334 } 335 336 /* 337 * Prevents the zvol from being suspended, or the ZIL being 338 * concurrently opened. Will be released after the i/o 339 * completes. 340 */ 341 rw_enter(&zv->zv_suspend_lock, RW_READER); 342 343 /* 344 * Open a ZIL if this is the first time we have written to this 345 * zvol. We protect zv->zv_zilog with zv_suspend_lock rather 346 * than zv_state_lock so that we don't need to acquire an 347 * additional lock in this path. 348 */ 349 if (zv->zv_zilog == NULL) { 350 rw_exit(&zv->zv_suspend_lock); 351 rw_enter(&zv->zv_suspend_lock, RW_WRITER); 352 if (zv->zv_zilog == NULL) { 353 zv->zv_zilog = zil_open(zv->zv_objset, 354 zvol_get_data); 355 zv->zv_flags |= ZVOL_WRITTEN_TO; 356 } 357 rw_downgrade(&zv->zv_suspend_lock); 358 } 359 360 zvr = kmem_alloc(sizeof (zv_request_t), KM_SLEEP); 361 zvr->zv = zv; 362 zvr->bio = bio; 363 taskq_init_ent(&zvr->ent); 364 365 /* 366 * We don't want this thread to be blocked waiting for i/o to 367 * complete, so we instead wait from a taskq callback. The 368 * i/o may be a ZIL write (via zil_commit()), or a read of an 369 * indirect block, or a read of a data block (if this is a 370 * partial-block write). We will indicate that the i/o is 371 * complete by calling BIO_END_IO() from the taskq callback. 372 * 373 * This design allows the calling thread to continue and 374 * initiate more concurrent operations by calling 375 * zvol_request() again. There are typically only a small 376 * number of threads available to call zvol_request() (e.g. 377 * one per iSCSI target), so keeping the latency of 378 * zvol_request() low is important for performance. 379 * 380 * The zvol_request_sync module parameter allows this 381 * behavior to be altered, for performance evaluation 382 * purposes. If the callback blocks, setting 383 * zvol_request_sync=1 will result in much worse performance. 384 * 385 * We can have up to zvol_threads concurrent i/o's being 386 * processed for all zvols on the system. This is typically 387 * a vast improvement over the zvol_request_sync=1 behavior 388 * of one i/o at a time per zvol. However, an even better 389 * design would be for zvol_request() to initiate the zio 390 * directly, and then be notified by the zio_done callback, 391 * which would call BIO_END_IO(). Unfortunately, the DMU/ZIL 392 * interfaces lack this functionality (they block waiting for 393 * the i/o to complete). 394 */ 395 if (bio_is_discard(bio) || bio_is_secure_erase(bio)) { 396 if (zvol_request_sync) { 397 zvol_discard(zvr); 398 } else { 399 taskq_dispatch_ent(zvol_taskq, 400 zvol_discard, zvr, 0, &zvr->ent); 401 } 402 } else { 403 if (zvol_request_sync) { 404 zvol_write(zvr); 405 } else { 406 taskq_dispatch_ent(zvol_taskq, 407 zvol_write, zvr, 0, &zvr->ent); 408 } 409 } 410 } else { 411 /* 412 * The SCST driver, and possibly others, may issue READ I/Os 413 * with a length of zero bytes. These empty I/Os contain no 414 * data and require no additional handling. 415 */ 416 if (size == 0) { 417 BIO_END_IO(bio, 0); 418 goto out; 419 } 420 421 zvr = kmem_alloc(sizeof (zv_request_t), KM_SLEEP); 422 zvr->zv = zv; 423 zvr->bio = bio; 424 taskq_init_ent(&zvr->ent); 425 426 rw_enter(&zv->zv_suspend_lock, RW_READER); 427 428 /* See comment in WRITE case above. */ 429 if (zvol_request_sync) { 430 zvol_read(zvr); 431 } else { 432 taskq_dispatch_ent(zvol_taskq, 433 zvol_read, zvr, 0, &zvr->ent); 434 } 435 } 436 437 out: 438 spl_fstrans_unmark(cookie); 439 #if defined(HAVE_MAKE_REQUEST_FN_RET_QC) || \ 440 defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) 441 return (BLK_QC_T_NONE); 442 #endif 443 } 444 445 static int 446 zvol_open(struct block_device *bdev, fmode_t flag) 447 { 448 zvol_state_t *zv; 449 int error = 0; 450 boolean_t drop_suspend = B_TRUE; 451 452 rw_enter(&zvol_state_lock, RW_READER); 453 /* 454 * Obtain a copy of private_data under the zvol_state_lock to make 455 * sure that either the result of zvol free code path setting 456 * bdev->bd_disk->private_data to NULL is observed, or zvol_free() 457 * is not called on this zv because of the positive zv_open_count. 458 */ 459 zv = bdev->bd_disk->private_data; 460 if (zv == NULL) { 461 rw_exit(&zvol_state_lock); 462 return (SET_ERROR(-ENXIO)); 463 } 464 465 mutex_enter(&zv->zv_state_lock); 466 /* 467 * make sure zvol is not suspended during first open 468 * (hold zv_suspend_lock) and respect proper lock acquisition 469 * ordering - zv_suspend_lock before zv_state_lock 470 */ 471 if (zv->zv_open_count == 0) { 472 if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) { 473 mutex_exit(&zv->zv_state_lock); 474 rw_enter(&zv->zv_suspend_lock, RW_READER); 475 mutex_enter(&zv->zv_state_lock); 476 /* check to see if zv_suspend_lock is needed */ 477 if (zv->zv_open_count != 0) { 478 rw_exit(&zv->zv_suspend_lock); 479 drop_suspend = B_FALSE; 480 } 481 } 482 } else { 483 drop_suspend = B_FALSE; 484 } 485 rw_exit(&zvol_state_lock); 486 487 ASSERT(MUTEX_HELD(&zv->zv_state_lock)); 488 489 if (zv->zv_open_count == 0) { 490 ASSERT(RW_READ_HELD(&zv->zv_suspend_lock)); 491 error = -zvol_first_open(zv, !(flag & FMODE_WRITE)); 492 if (error) 493 goto out_mutex; 494 } 495 496 if ((flag & FMODE_WRITE) && (zv->zv_flags & ZVOL_RDONLY)) { 497 error = -EROFS; 498 goto out_open_count; 499 } 500 501 zv->zv_open_count++; 502 503 mutex_exit(&zv->zv_state_lock); 504 if (drop_suspend) 505 rw_exit(&zv->zv_suspend_lock); 506 507 zfs_check_media_change(bdev); 508 509 return (0); 510 511 out_open_count: 512 if (zv->zv_open_count == 0) 513 zvol_last_close(zv); 514 515 out_mutex: 516 mutex_exit(&zv->zv_state_lock); 517 if (drop_suspend) 518 rw_exit(&zv->zv_suspend_lock); 519 if (error == -EINTR) { 520 error = -ERESTARTSYS; 521 schedule(); 522 } 523 return (SET_ERROR(error)); 524 } 525 526 static void 527 zvol_release(struct gendisk *disk, fmode_t mode) 528 { 529 zvol_state_t *zv; 530 boolean_t drop_suspend = B_TRUE; 531 532 rw_enter(&zvol_state_lock, RW_READER); 533 zv = disk->private_data; 534 535 mutex_enter(&zv->zv_state_lock); 536 ASSERT3U(zv->zv_open_count, >, 0); 537 /* 538 * make sure zvol is not suspended during last close 539 * (hold zv_suspend_lock) and respect proper lock acquisition 540 * ordering - zv_suspend_lock before zv_state_lock 541 */ 542 if (zv->zv_open_count == 1) { 543 if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) { 544 mutex_exit(&zv->zv_state_lock); 545 rw_enter(&zv->zv_suspend_lock, RW_READER); 546 mutex_enter(&zv->zv_state_lock); 547 /* check to see if zv_suspend_lock is needed */ 548 if (zv->zv_open_count != 1) { 549 rw_exit(&zv->zv_suspend_lock); 550 drop_suspend = B_FALSE; 551 } 552 } 553 } else { 554 drop_suspend = B_FALSE; 555 } 556 rw_exit(&zvol_state_lock); 557 558 ASSERT(MUTEX_HELD(&zv->zv_state_lock)); 559 560 zv->zv_open_count--; 561 if (zv->zv_open_count == 0) { 562 ASSERT(RW_READ_HELD(&zv->zv_suspend_lock)); 563 zvol_last_close(zv); 564 } 565 566 mutex_exit(&zv->zv_state_lock); 567 568 if (drop_suspend) 569 rw_exit(&zv->zv_suspend_lock); 570 } 571 572 static int 573 zvol_ioctl(struct block_device *bdev, fmode_t mode, 574 unsigned int cmd, unsigned long arg) 575 { 576 zvol_state_t *zv = bdev->bd_disk->private_data; 577 int error = 0; 578 579 ASSERT3U(zv->zv_open_count, >, 0); 580 581 switch (cmd) { 582 case BLKFLSBUF: 583 fsync_bdev(bdev); 584 invalidate_bdev(bdev); 585 rw_enter(&zv->zv_suspend_lock, RW_READER); 586 587 if (!(zv->zv_flags & ZVOL_RDONLY)) 588 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0); 589 590 rw_exit(&zv->zv_suspend_lock); 591 break; 592 593 case BLKZNAME: 594 mutex_enter(&zv->zv_state_lock); 595 error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN); 596 mutex_exit(&zv->zv_state_lock); 597 break; 598 599 default: 600 error = -ENOTTY; 601 break; 602 } 603 604 return (SET_ERROR(error)); 605 } 606 607 #ifdef CONFIG_COMPAT 608 static int 609 zvol_compat_ioctl(struct block_device *bdev, fmode_t mode, 610 unsigned cmd, unsigned long arg) 611 { 612 return (zvol_ioctl(bdev, mode, cmd, arg)); 613 } 614 #else 615 #define zvol_compat_ioctl NULL 616 #endif 617 618 static unsigned int 619 zvol_check_events(struct gendisk *disk, unsigned int clearing) 620 { 621 unsigned int mask = 0; 622 623 rw_enter(&zvol_state_lock, RW_READER); 624 625 zvol_state_t *zv = disk->private_data; 626 if (zv != NULL) { 627 mutex_enter(&zv->zv_state_lock); 628 mask = zv->zv_changed ? DISK_EVENT_MEDIA_CHANGE : 0; 629 zv->zv_changed = 0; 630 mutex_exit(&zv->zv_state_lock); 631 } 632 633 rw_exit(&zvol_state_lock); 634 635 return (mask); 636 } 637 638 static int 639 zvol_revalidate_disk(struct gendisk *disk) 640 { 641 rw_enter(&zvol_state_lock, RW_READER); 642 643 zvol_state_t *zv = disk->private_data; 644 if (zv != NULL) { 645 mutex_enter(&zv->zv_state_lock); 646 set_capacity(zv->zv_zso->zvo_disk, 647 zv->zv_volsize >> SECTOR_BITS); 648 mutex_exit(&zv->zv_state_lock); 649 } 650 651 rw_exit(&zvol_state_lock); 652 653 return (0); 654 } 655 656 static int 657 zvol_update_volsize(zvol_state_t *zv, uint64_t volsize) 658 { 659 struct gendisk *disk = zv->zv_zso->zvo_disk; 660 661 #if defined(HAVE_REVALIDATE_DISK_SIZE) 662 revalidate_disk_size(disk, zvol_revalidate_disk(disk) == 0); 663 #elif defined(HAVE_REVALIDATE_DISK) 664 revalidate_disk(disk); 665 #else 666 zvol_revalidate_disk(disk); 667 #endif 668 return (0); 669 } 670 671 static void 672 zvol_clear_private(zvol_state_t *zv) 673 { 674 /* 675 * Cleared while holding zvol_state_lock as a writer 676 * which will prevent zvol_open() from opening it. 677 */ 678 zv->zv_zso->zvo_disk->private_data = NULL; 679 } 680 681 /* 682 * Provide a simple virtual geometry for legacy compatibility. For devices 683 * smaller than 1 MiB a small head and sector count is used to allow very 684 * tiny devices. For devices over 1 Mib a standard head and sector count 685 * is used to keep the cylinders count reasonable. 686 */ 687 static int 688 zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo) 689 { 690 zvol_state_t *zv = bdev->bd_disk->private_data; 691 sector_t sectors; 692 693 ASSERT3U(zv->zv_open_count, >, 0); 694 695 sectors = get_capacity(zv->zv_zso->zvo_disk); 696 697 if (sectors > 2048) { 698 geo->heads = 16; 699 geo->sectors = 63; 700 } else { 701 geo->heads = 2; 702 geo->sectors = 4; 703 } 704 705 geo->start = 0; 706 geo->cylinders = sectors / (geo->heads * geo->sectors); 707 708 return (0); 709 } 710 711 static struct block_device_operations zvol_ops = { 712 .open = zvol_open, 713 .release = zvol_release, 714 .ioctl = zvol_ioctl, 715 .compat_ioctl = zvol_compat_ioctl, 716 .check_events = zvol_check_events, 717 .revalidate_disk = zvol_revalidate_disk, 718 .getgeo = zvol_getgeo, 719 .owner = THIS_MODULE, 720 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS 721 .submit_bio = zvol_submit_bio, 722 #endif 723 }; 724 725 /* 726 * Allocate memory for a new zvol_state_t and setup the required 727 * request queue and generic disk structures for the block device. 728 */ 729 static zvol_state_t * 730 zvol_alloc(dev_t dev, const char *name) 731 { 732 zvol_state_t *zv; 733 struct zvol_state_os *zso; 734 uint64_t volmode; 735 736 if (dsl_prop_get_integer(name, "volmode", &volmode, NULL) != 0) 737 return (NULL); 738 739 if (volmode == ZFS_VOLMODE_DEFAULT) 740 volmode = zvol_volmode; 741 742 if (volmode == ZFS_VOLMODE_NONE) 743 return (NULL); 744 745 zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP); 746 zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP); 747 zv->zv_zso = zso; 748 zv->zv_volmode = volmode; 749 750 list_link_init(&zv->zv_next); 751 mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL); 752 753 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS 754 zso->zvo_queue = blk_alloc_queue(NUMA_NO_NODE); 755 #else 756 zso->zvo_queue = blk_generic_alloc_queue(zvol_request, NUMA_NO_NODE); 757 #endif 758 if (zso->zvo_queue == NULL) 759 goto out_kmem; 760 761 blk_queue_set_write_cache(zso->zvo_queue, B_TRUE, B_TRUE); 762 763 /* Limit read-ahead to a single page to prevent over-prefetching. */ 764 blk_queue_set_read_ahead(zso->zvo_queue, 1); 765 766 /* Disable write merging in favor of the ZIO pipeline. */ 767 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, zso->zvo_queue); 768 769 zso->zvo_disk = alloc_disk(ZVOL_MINORS); 770 if (zso->zvo_disk == NULL) 771 goto out_queue; 772 773 zso->zvo_queue->queuedata = zv; 774 zso->zvo_dev = dev; 775 zv->zv_open_count = 0; 776 strlcpy(zv->zv_name, name, MAXNAMELEN); 777 778 zfs_rangelock_init(&zv->zv_rangelock, NULL, NULL); 779 rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL); 780 781 zso->zvo_disk->major = zvol_major; 782 zso->zvo_disk->events = DISK_EVENT_MEDIA_CHANGE; 783 784 if (volmode == ZFS_VOLMODE_DEV) { 785 /* 786 * ZFS_VOLMODE_DEV disable partitioning on ZVOL devices: set 787 * gendisk->minors = 1 as noted in include/linux/genhd.h. 788 * Also disable extended partition numbers (GENHD_FL_EXT_DEVT) 789 * and suppresses partition scanning (GENHD_FL_NO_PART_SCAN) 790 * setting gendisk->flags accordingly. 791 */ 792 zso->zvo_disk->minors = 1; 793 #if defined(GENHD_FL_EXT_DEVT) 794 zso->zvo_disk->flags &= ~GENHD_FL_EXT_DEVT; 795 #endif 796 #if defined(GENHD_FL_NO_PART_SCAN) 797 zso->zvo_disk->flags |= GENHD_FL_NO_PART_SCAN; 798 #endif 799 } 800 zso->zvo_disk->first_minor = (dev & MINORMASK); 801 zso->zvo_disk->fops = &zvol_ops; 802 zso->zvo_disk->private_data = zv; 803 zso->zvo_disk->queue = zso->zvo_queue; 804 snprintf(zso->zvo_disk->disk_name, DISK_NAME_LEN, "%s%d", 805 ZVOL_DEV_NAME, (dev & MINORMASK)); 806 807 return (zv); 808 809 out_queue: 810 blk_cleanup_queue(zso->zvo_queue); 811 out_kmem: 812 kmem_free(zso, sizeof (struct zvol_state_os)); 813 kmem_free(zv, sizeof (zvol_state_t)); 814 return (NULL); 815 } 816 817 /* 818 * Cleanup then free a zvol_state_t which was created by zvol_alloc(). 819 * At this time, the structure is not opened by anyone, is taken off 820 * the zvol_state_list, and has its private data set to NULL. 821 * The zvol_state_lock is dropped. 822 * 823 * This function may take many milliseconds to complete (e.g. we've seen 824 * it take over 256ms), due to the calls to "blk_cleanup_queue" and 825 * "del_gendisk". Thus, consumers need to be careful to account for this 826 * latency when calling this function. 827 */ 828 static void 829 zvol_free(zvol_state_t *zv) 830 { 831 832 ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock)); 833 ASSERT(!MUTEX_HELD(&zv->zv_state_lock)); 834 ASSERT0(zv->zv_open_count); 835 ASSERT3P(zv->zv_zso->zvo_disk->private_data, ==, NULL); 836 837 rw_destroy(&zv->zv_suspend_lock); 838 zfs_rangelock_fini(&zv->zv_rangelock); 839 840 del_gendisk(zv->zv_zso->zvo_disk); 841 blk_cleanup_queue(zv->zv_zso->zvo_queue); 842 put_disk(zv->zv_zso->zvo_disk); 843 844 ida_simple_remove(&zvol_ida, 845 MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS); 846 847 mutex_destroy(&zv->zv_state_lock); 848 dataset_kstats_destroy(&zv->zv_kstat); 849 850 kmem_free(zv->zv_zso, sizeof (struct zvol_state_os)); 851 kmem_free(zv, sizeof (zvol_state_t)); 852 } 853 854 void 855 zvol_wait_close(zvol_state_t *zv) 856 { 857 } 858 859 /* 860 * Create a block device minor node and setup the linkage between it 861 * and the specified volume. Once this function returns the block 862 * device is live and ready for use. 863 */ 864 static int 865 zvol_os_create_minor(const char *name) 866 { 867 zvol_state_t *zv; 868 objset_t *os; 869 dmu_object_info_t *doi; 870 uint64_t volsize; 871 uint64_t len; 872 unsigned minor = 0; 873 int error = 0; 874 int idx; 875 uint64_t hash = zvol_name_hash(name); 876 877 if (zvol_inhibit_dev) 878 return (0); 879 880 idx = ida_simple_get(&zvol_ida, 0, 0, kmem_flags_convert(KM_SLEEP)); 881 if (idx < 0) 882 return (SET_ERROR(-idx)); 883 minor = idx << ZVOL_MINOR_BITS; 884 885 zv = zvol_find_by_name_hash(name, hash, RW_NONE); 886 if (zv) { 887 ASSERT(MUTEX_HELD(&zv->zv_state_lock)); 888 mutex_exit(&zv->zv_state_lock); 889 ida_simple_remove(&zvol_ida, idx); 890 return (SET_ERROR(EEXIST)); 891 } 892 893 doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP); 894 895 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os); 896 if (error) 897 goto out_doi; 898 899 error = dmu_object_info(os, ZVOL_OBJ, doi); 900 if (error) 901 goto out_dmu_objset_disown; 902 903 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize); 904 if (error) 905 goto out_dmu_objset_disown; 906 907 zv = zvol_alloc(MKDEV(zvol_major, minor), name); 908 if (zv == NULL) { 909 error = SET_ERROR(EAGAIN); 910 goto out_dmu_objset_disown; 911 } 912 zv->zv_hash = hash; 913 914 if (dmu_objset_is_snapshot(os)) 915 zv->zv_flags |= ZVOL_RDONLY; 916 917 zv->zv_volblocksize = doi->doi_data_block_size; 918 zv->zv_volsize = volsize; 919 zv->zv_objset = os; 920 921 set_capacity(zv->zv_zso->zvo_disk, zv->zv_volsize >> 9); 922 923 blk_queue_max_hw_sectors(zv->zv_zso->zvo_queue, 924 (DMU_MAX_ACCESS / 4) >> 9); 925 blk_queue_max_segments(zv->zv_zso->zvo_queue, UINT16_MAX); 926 blk_queue_max_segment_size(zv->zv_zso->zvo_queue, UINT_MAX); 927 blk_queue_physical_block_size(zv->zv_zso->zvo_queue, 928 zv->zv_volblocksize); 929 blk_queue_io_opt(zv->zv_zso->zvo_queue, zv->zv_volblocksize); 930 blk_queue_max_discard_sectors(zv->zv_zso->zvo_queue, 931 (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9); 932 blk_queue_discard_granularity(zv->zv_zso->zvo_queue, 933 zv->zv_volblocksize); 934 blk_queue_flag_set(QUEUE_FLAG_DISCARD, zv->zv_zso->zvo_queue); 935 #ifdef QUEUE_FLAG_NONROT 936 blk_queue_flag_set(QUEUE_FLAG_NONROT, zv->zv_zso->zvo_queue); 937 #endif 938 #ifdef QUEUE_FLAG_ADD_RANDOM 939 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zv->zv_zso->zvo_queue); 940 #endif 941 /* This flag was introduced in kernel version 4.12. */ 942 #ifdef QUEUE_FLAG_SCSI_PASSTHROUGH 943 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, zv->zv_zso->zvo_queue); 944 #endif 945 946 if (spa_writeable(dmu_objset_spa(os))) { 947 if (zil_replay_disable) 948 zil_destroy(dmu_objset_zil(os), B_FALSE); 949 else 950 zil_replay(os, zv, zvol_replay_vector); 951 } 952 ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL); 953 dataset_kstats_create(&zv->zv_kstat, zv->zv_objset); 954 955 /* 956 * When udev detects the addition of the device it will immediately 957 * invoke blkid(8) to determine the type of content on the device. 958 * Prefetching the blocks commonly scanned by blkid(8) will speed 959 * up this process. 960 */ 961 len = MIN(MAX(zvol_prefetch_bytes, 0), SPA_MAXBLOCKSIZE); 962 if (len > 0) { 963 dmu_prefetch(os, ZVOL_OBJ, 0, 0, len, ZIO_PRIORITY_SYNC_READ); 964 dmu_prefetch(os, ZVOL_OBJ, 0, volsize - len, len, 965 ZIO_PRIORITY_SYNC_READ); 966 } 967 968 zv->zv_objset = NULL; 969 out_dmu_objset_disown: 970 dmu_objset_disown(os, B_TRUE, FTAG); 971 out_doi: 972 kmem_free(doi, sizeof (dmu_object_info_t)); 973 974 /* 975 * Keep in mind that once add_disk() is called, the zvol is 976 * announced to the world, and zvol_open()/zvol_release() can 977 * be called at any time. Incidentally, add_disk() itself calls 978 * zvol_open()->zvol_first_open() and zvol_release()->zvol_last_close() 979 * directly as well. 980 */ 981 if (error == 0) { 982 rw_enter(&zvol_state_lock, RW_WRITER); 983 zvol_insert(zv); 984 rw_exit(&zvol_state_lock); 985 add_disk(zv->zv_zso->zvo_disk); 986 } else { 987 ida_simple_remove(&zvol_ida, idx); 988 } 989 990 return (error); 991 } 992 993 static void 994 zvol_rename_minor(zvol_state_t *zv, const char *newname) 995 { 996 int readonly = get_disk_ro(zv->zv_zso->zvo_disk); 997 998 ASSERT(RW_LOCK_HELD(&zvol_state_lock)); 999 ASSERT(MUTEX_HELD(&zv->zv_state_lock)); 1000 1001 strlcpy(zv->zv_name, newname, sizeof (zv->zv_name)); 1002 1003 /* move to new hashtable entry */ 1004 zv->zv_hash = zvol_name_hash(zv->zv_name); 1005 hlist_del(&zv->zv_hlink); 1006 hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash)); 1007 1008 /* 1009 * The block device's read-only state is briefly changed causing 1010 * a KOBJ_CHANGE uevent to be issued. This ensures udev detects 1011 * the name change and fixes the symlinks. This does not change 1012 * ZVOL_RDONLY in zv->zv_flags so the actual read-only state never 1013 * changes. This would normally be done using kobject_uevent() but 1014 * that is a GPL-only symbol which is why we need this workaround. 1015 */ 1016 set_disk_ro(zv->zv_zso->zvo_disk, !readonly); 1017 set_disk_ro(zv->zv_zso->zvo_disk, readonly); 1018 } 1019 1020 static void 1021 zvol_set_disk_ro_impl(zvol_state_t *zv, int flags) 1022 { 1023 1024 set_disk_ro(zv->zv_zso->zvo_disk, flags); 1025 } 1026 1027 static void 1028 zvol_set_capacity_impl(zvol_state_t *zv, uint64_t capacity) 1029 { 1030 1031 set_capacity(zv->zv_zso->zvo_disk, capacity); 1032 } 1033 1034 const static zvol_platform_ops_t zvol_linux_ops = { 1035 .zv_free = zvol_free, 1036 .zv_rename_minor = zvol_rename_minor, 1037 .zv_create_minor = zvol_os_create_minor, 1038 .zv_update_volsize = zvol_update_volsize, 1039 .zv_clear_private = zvol_clear_private, 1040 .zv_is_zvol = zvol_is_zvol_impl, 1041 .zv_set_disk_ro = zvol_set_disk_ro_impl, 1042 .zv_set_capacity = zvol_set_capacity_impl, 1043 }; 1044 1045 int 1046 zvol_init(void) 1047 { 1048 int error; 1049 int threads = MIN(MAX(zvol_threads, 1), 1024); 1050 1051 error = register_blkdev(zvol_major, ZVOL_DRIVER); 1052 if (error) { 1053 printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error); 1054 return (error); 1055 } 1056 zvol_taskq = taskq_create(ZVOL_DRIVER, threads, maxclsyspri, 1057 threads * 2, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC); 1058 if (zvol_taskq == NULL) { 1059 unregister_blkdev(zvol_major, ZVOL_DRIVER); 1060 return (-ENOMEM); 1061 } 1062 zvol_init_impl(); 1063 ida_init(&zvol_ida); 1064 zvol_register_ops(&zvol_linux_ops); 1065 return (0); 1066 } 1067 1068 void 1069 zvol_fini(void) 1070 { 1071 zvol_fini_impl(); 1072 unregister_blkdev(zvol_major, ZVOL_DRIVER); 1073 taskq_destroy(zvol_taskq); 1074 ida_destroy(&zvol_ida); 1075 } 1076 1077 /* BEGIN CSTYLED */ 1078 module_param(zvol_inhibit_dev, uint, 0644); 1079 MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes"); 1080 1081 module_param(zvol_major, uint, 0444); 1082 MODULE_PARM_DESC(zvol_major, "Major number for zvol device"); 1083 1084 module_param(zvol_threads, uint, 0444); 1085 MODULE_PARM_DESC(zvol_threads, "Max number of threads to handle I/O requests"); 1086 1087 module_param(zvol_request_sync, uint, 0644); 1088 MODULE_PARM_DESC(zvol_request_sync, "Synchronously handle bio requests"); 1089 1090 module_param(zvol_max_discard_blocks, ulong, 0444); 1091 MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard"); 1092 1093 module_param(zvol_prefetch_bytes, uint, 0644); 1094 MODULE_PARM_DESC(zvol_prefetch_bytes, "Prefetch N bytes at zvol start+end"); 1095 1096 module_param(zvol_volmode, uint, 0644); 1097 MODULE_PARM_DESC(zvol_volmode, "Default volmode property value"); 1098 /* END CSTYLED */ 1099