1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC. 23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). 24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>. 25 * LLNL-CODE-403049. 26 * Copyright (c) 2012, 2019 by Delphix. All rights reserved. 27 */ 28 29 #include <sys/zfs_context.h> 30 #include <sys/spa_impl.h> 31 #include <sys/vdev_disk.h> 32 #include <sys/vdev_impl.h> 33 #include <sys/vdev_trim.h> 34 #include <sys/abd.h> 35 #include <sys/fs/zfs.h> 36 #include <sys/zio.h> 37 #include <linux/blkpg.h> 38 #include <linux/msdos_fs.h> 39 #include <linux/vfs_compat.h> 40 #ifdef HAVE_LINUX_BLK_CGROUP_HEADER 41 #include <linux/blk-cgroup.h> 42 #endif 43 44 typedef struct vdev_disk { 45 struct block_device *vd_bdev; 46 krwlock_t vd_lock; 47 } vdev_disk_t; 48 49 /* 50 * Unique identifier for the exclusive vdev holder. 51 */ 52 static void *zfs_vdev_holder = VDEV_HOLDER; 53 54 /* 55 * Wait up to zfs_vdev_open_timeout_ms milliseconds before determining the 56 * device is missing. The missing path may be transient since the links 57 * can be briefly removed and recreated in response to udev events. 58 */ 59 static unsigned zfs_vdev_open_timeout_ms = 1000; 60 61 /* 62 * Size of the "reserved" partition, in blocks. 63 */ 64 #define EFI_MIN_RESV_SIZE (16 * 1024) 65 66 /* 67 * Virtual device vector for disks. 68 */ 69 typedef struct dio_request { 70 zio_t *dr_zio; /* Parent ZIO */ 71 atomic_t dr_ref; /* References */ 72 int dr_error; /* Bio error */ 73 int dr_bio_count; /* Count of bio's */ 74 struct bio *dr_bio[0]; /* Attached bio's */ 75 } dio_request_t; 76 77 static fmode_t 78 vdev_bdev_mode(spa_mode_t spa_mode) 79 { 80 fmode_t mode = 0; 81 82 if (spa_mode & SPA_MODE_READ) 83 mode |= FMODE_READ; 84 85 if (spa_mode & SPA_MODE_WRITE) 86 mode |= FMODE_WRITE; 87 88 return (mode); 89 } 90 91 /* 92 * Returns the usable capacity (in bytes) for the partition or disk. 93 */ 94 static uint64_t 95 bdev_capacity(struct block_device *bdev) 96 { 97 return (i_size_read(bdev->bd_inode)); 98 } 99 100 #if !defined(HAVE_BDEV_WHOLE) 101 static inline struct block_device * 102 bdev_whole(struct block_device *bdev) 103 { 104 return (bdev->bd_contains); 105 } 106 #endif 107 108 /* 109 * Returns the maximum expansion capacity of the block device (in bytes). 110 * 111 * It is possible to expand a vdev when it has been created as a wholedisk 112 * and the containing block device has increased in capacity. Or when the 113 * partition containing the pool has been manually increased in size. 114 * 115 * This function is only responsible for calculating the potential expansion 116 * size so it can be reported by 'zpool list'. The efi_use_whole_disk() is 117 * responsible for verifying the expected partition layout in the wholedisk 118 * case, and updating the partition table if appropriate. Once the partition 119 * size has been increased the additional capacity will be visible using 120 * bdev_capacity(). 121 * 122 * The returned maximum expansion capacity is always expected to be larger, or 123 * at the very least equal, to its usable capacity to prevent overestimating 124 * the pool expandsize. 125 */ 126 static uint64_t 127 bdev_max_capacity(struct block_device *bdev, uint64_t wholedisk) 128 { 129 uint64_t psize; 130 int64_t available; 131 132 if (wholedisk && bdev != bdev_whole(bdev)) { 133 /* 134 * When reporting maximum expansion capacity for a wholedisk 135 * deduct any capacity which is expected to be lost due to 136 * alignment restrictions. Over reporting this value isn't 137 * harmful and would only result in slightly less capacity 138 * than expected post expansion. 139 * The estimated available space may be slightly smaller than 140 * bdev_capacity() for devices where the number of sectors is 141 * not a multiple of the alignment size and the partition layout 142 * is keeping less than PARTITION_END_ALIGNMENT bytes after the 143 * "reserved" EFI partition: in such cases return the device 144 * usable capacity. 145 */ 146 available = i_size_read(bdev_whole(bdev)->bd_inode) - 147 ((EFI_MIN_RESV_SIZE + NEW_START_BLOCK + 148 PARTITION_END_ALIGNMENT) << SECTOR_BITS); 149 psize = MAX(available, bdev_capacity(bdev)); 150 } else { 151 psize = bdev_capacity(bdev); 152 } 153 154 return (psize); 155 } 156 157 static void 158 vdev_disk_error(zio_t *zio) 159 { 160 /* 161 * This function can be called in interrupt context, for instance while 162 * handling IRQs coming from a misbehaving disk device; use printk() 163 * which is safe from any context. 164 */ 165 printk(KERN_WARNING "zio pool=%s vdev=%s error=%d type=%d " 166 "offset=%llu size=%llu flags=%x\n", spa_name(zio->io_spa), 167 zio->io_vd->vdev_path, zio->io_error, zio->io_type, 168 (u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size, 169 zio->io_flags); 170 } 171 172 static int 173 vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize, 174 uint64_t *logical_ashift, uint64_t *physical_ashift) 175 { 176 struct block_device *bdev; 177 fmode_t mode = vdev_bdev_mode(spa_mode(v->vdev_spa)); 178 hrtime_t timeout = MSEC2NSEC(zfs_vdev_open_timeout_ms); 179 vdev_disk_t *vd; 180 181 /* Must have a pathname and it must be absolute. */ 182 if (v->vdev_path == NULL || v->vdev_path[0] != '/') { 183 v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; 184 vdev_dbgmsg(v, "invalid vdev_path"); 185 return (SET_ERROR(EINVAL)); 186 } 187 188 /* 189 * Reopen the device if it is currently open. When expanding a 190 * partition force re-scanning the partition table if userland 191 * did not take care of this already. We need to do this while closed 192 * in order to get an accurate updated block device size. Then 193 * since udev may need to recreate the device links increase the 194 * open retry timeout before reporting the device as unavailable. 195 */ 196 vd = v->vdev_tsd; 197 if (vd) { 198 char disk_name[BDEVNAME_SIZE + 6] = "/dev/"; 199 boolean_t reread_part = B_FALSE; 200 201 rw_enter(&vd->vd_lock, RW_WRITER); 202 bdev = vd->vd_bdev; 203 vd->vd_bdev = NULL; 204 205 if (bdev) { 206 if (v->vdev_expanding && bdev != bdev_whole(bdev)) { 207 bdevname(bdev_whole(bdev), disk_name + 5); 208 /* 209 * If userland has BLKPG_RESIZE_PARTITION, 210 * then it should have updated the partition 211 * table already. We can detect this by 212 * comparing our current physical size 213 * with that of the device. If they are 214 * the same, then we must not have 215 * BLKPG_RESIZE_PARTITION or it failed to 216 * update the partition table online. We 217 * fallback to rescanning the partition 218 * table from the kernel below. However, 219 * if the capacity already reflects the 220 * updated partition, then we skip 221 * rescanning the partition table here. 222 */ 223 if (v->vdev_psize == bdev_capacity(bdev)) 224 reread_part = B_TRUE; 225 } 226 227 blkdev_put(bdev, mode | FMODE_EXCL); 228 } 229 230 if (reread_part) { 231 bdev = blkdev_get_by_path(disk_name, mode | FMODE_EXCL, 232 zfs_vdev_holder); 233 if (!IS_ERR(bdev)) { 234 int error = vdev_bdev_reread_part(bdev); 235 blkdev_put(bdev, mode | FMODE_EXCL); 236 if (error == 0) { 237 timeout = MSEC2NSEC( 238 zfs_vdev_open_timeout_ms * 2); 239 } 240 } 241 } 242 } else { 243 vd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP); 244 245 rw_init(&vd->vd_lock, NULL, RW_DEFAULT, NULL); 246 rw_enter(&vd->vd_lock, RW_WRITER); 247 } 248 249 /* 250 * Devices are always opened by the path provided at configuration 251 * time. This means that if the provided path is a udev by-id path 252 * then drives may be re-cabled without an issue. If the provided 253 * path is a udev by-path path, then the physical location information 254 * will be preserved. This can be critical for more complicated 255 * configurations where drives are located in specific physical 256 * locations to maximize the systems tolerance to component failure. 257 * 258 * Alternatively, you can provide your own udev rule to flexibly map 259 * the drives as you see fit. It is not advised that you use the 260 * /dev/[hd]d devices which may be reordered due to probing order. 261 * Devices in the wrong locations will be detected by the higher 262 * level vdev validation. 263 * 264 * The specified paths may be briefly removed and recreated in 265 * response to udev events. This should be exceptionally unlikely 266 * because the zpool command makes every effort to verify these paths 267 * have already settled prior to reaching this point. Therefore, 268 * a ENOENT failure at this point is highly likely to be transient 269 * and it is reasonable to sleep and retry before giving up. In 270 * practice delays have been observed to be on the order of 100ms. 271 * 272 * When ERESTARTSYS is returned it indicates the block device is 273 * a zvol which could not be opened due to the deadlock detection 274 * logic in zvol_open(). Extend the timeout and retry the open 275 * subsequent attempts are expected to eventually succeed. 276 */ 277 hrtime_t start = gethrtime(); 278 bdev = ERR_PTR(-ENXIO); 279 while (IS_ERR(bdev) && ((gethrtime() - start) < timeout)) { 280 bdev = blkdev_get_by_path(v->vdev_path, mode | FMODE_EXCL, 281 zfs_vdev_holder); 282 if (unlikely(PTR_ERR(bdev) == -ENOENT)) { 283 schedule_timeout(MSEC_TO_TICK(10)); 284 } else if (unlikely(PTR_ERR(bdev) == -ERESTARTSYS)) { 285 timeout = MSEC2NSEC(zfs_vdev_open_timeout_ms * 10); 286 continue; 287 } else if (IS_ERR(bdev)) { 288 break; 289 } 290 } 291 292 if (IS_ERR(bdev)) { 293 int error = -PTR_ERR(bdev); 294 vdev_dbgmsg(v, "open error=%d timeout=%llu/%llu", error, 295 (u_longlong_t)(gethrtime() - start), 296 (u_longlong_t)timeout); 297 vd->vd_bdev = NULL; 298 v->vdev_tsd = vd; 299 rw_exit(&vd->vd_lock); 300 return (SET_ERROR(error)); 301 } else { 302 vd->vd_bdev = bdev; 303 v->vdev_tsd = vd; 304 rw_exit(&vd->vd_lock); 305 } 306 307 struct request_queue *q = bdev_get_queue(vd->vd_bdev); 308 309 /* Determine the physical block size */ 310 int physical_block_size = bdev_physical_block_size(vd->vd_bdev); 311 312 /* Determine the logical block size */ 313 int logical_block_size = bdev_logical_block_size(vd->vd_bdev); 314 315 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */ 316 v->vdev_nowritecache = B_FALSE; 317 318 /* Set when device reports it supports TRIM. */ 319 v->vdev_has_trim = !!blk_queue_discard(q); 320 321 /* Set when device reports it supports secure TRIM. */ 322 v->vdev_has_securetrim = !!blk_queue_discard_secure(q); 323 324 /* Inform the ZIO pipeline that we are non-rotational */ 325 v->vdev_nonrot = blk_queue_nonrot(q); 326 327 /* Physical volume size in bytes for the partition */ 328 *psize = bdev_capacity(vd->vd_bdev); 329 330 /* Physical volume size in bytes including possible expansion space */ 331 *max_psize = bdev_max_capacity(vd->vd_bdev, v->vdev_wholedisk); 332 333 /* Based on the minimum sector size set the block size */ 334 *physical_ashift = highbit64(MAX(physical_block_size, 335 SPA_MINBLOCKSIZE)) - 1; 336 337 *logical_ashift = highbit64(MAX(logical_block_size, 338 SPA_MINBLOCKSIZE)) - 1; 339 340 return (0); 341 } 342 343 static void 344 vdev_disk_close(vdev_t *v) 345 { 346 vdev_disk_t *vd = v->vdev_tsd; 347 348 if (v->vdev_reopening || vd == NULL) 349 return; 350 351 if (vd->vd_bdev != NULL) { 352 blkdev_put(vd->vd_bdev, 353 vdev_bdev_mode(spa_mode(v->vdev_spa)) | FMODE_EXCL); 354 } 355 356 rw_destroy(&vd->vd_lock); 357 kmem_free(vd, sizeof (vdev_disk_t)); 358 v->vdev_tsd = NULL; 359 } 360 361 static dio_request_t * 362 vdev_disk_dio_alloc(int bio_count) 363 { 364 dio_request_t *dr = kmem_zalloc(sizeof (dio_request_t) + 365 sizeof (struct bio *) * bio_count, KM_SLEEP); 366 atomic_set(&dr->dr_ref, 0); 367 dr->dr_bio_count = bio_count; 368 dr->dr_error = 0; 369 370 for (int i = 0; i < dr->dr_bio_count; i++) 371 dr->dr_bio[i] = NULL; 372 373 return (dr); 374 } 375 376 static void 377 vdev_disk_dio_free(dio_request_t *dr) 378 { 379 int i; 380 381 for (i = 0; i < dr->dr_bio_count; i++) 382 if (dr->dr_bio[i]) 383 bio_put(dr->dr_bio[i]); 384 385 kmem_free(dr, sizeof (dio_request_t) + 386 sizeof (struct bio *) * dr->dr_bio_count); 387 } 388 389 static void 390 vdev_disk_dio_get(dio_request_t *dr) 391 { 392 atomic_inc(&dr->dr_ref); 393 } 394 395 static int 396 vdev_disk_dio_put(dio_request_t *dr) 397 { 398 int rc = atomic_dec_return(&dr->dr_ref); 399 400 /* 401 * Free the dio_request when the last reference is dropped and 402 * ensure zio_interpret is called only once with the correct zio 403 */ 404 if (rc == 0) { 405 zio_t *zio = dr->dr_zio; 406 int error = dr->dr_error; 407 408 vdev_disk_dio_free(dr); 409 410 if (zio) { 411 zio->io_error = error; 412 ASSERT3S(zio->io_error, >=, 0); 413 if (zio->io_error) 414 vdev_disk_error(zio); 415 416 zio_delay_interrupt(zio); 417 } 418 } 419 420 return (rc); 421 } 422 423 BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, error) 424 { 425 dio_request_t *dr = bio->bi_private; 426 int rc; 427 428 if (dr->dr_error == 0) { 429 #ifdef HAVE_1ARG_BIO_END_IO_T 430 dr->dr_error = BIO_END_IO_ERROR(bio); 431 #else 432 if (error) 433 dr->dr_error = -(error); 434 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 435 dr->dr_error = EIO; 436 #endif 437 } 438 439 /* Drop reference acquired by __vdev_disk_physio */ 440 rc = vdev_disk_dio_put(dr); 441 } 442 443 static inline void 444 vdev_submit_bio_impl(struct bio *bio) 445 { 446 #ifdef HAVE_1ARG_SUBMIT_BIO 447 (void) submit_bio(bio); 448 #else 449 (void) submit_bio(bio_data_dir(bio), bio); 450 #endif 451 } 452 453 /* 454 * preempt_schedule_notrace is GPL-only which breaks the ZFS build, so 455 * replace it with preempt_schedule under the following condition: 456 */ 457 #if defined(CONFIG_ARM64) && \ 458 defined(CONFIG_PREEMPTION) && \ 459 defined(CONFIG_BLK_CGROUP) 460 #define preempt_schedule_notrace(x) preempt_schedule(x) 461 #endif 462 463 #ifdef HAVE_BIO_SET_DEV 464 #if defined(CONFIG_BLK_CGROUP) && defined(HAVE_BIO_SET_DEV_GPL_ONLY) 465 /* 466 * The Linux 5.5 kernel updated percpu_ref_tryget() which is inlined by 467 * blkg_tryget() to use rcu_read_lock() instead of rcu_read_lock_sched(). 468 * As a side effect the function was converted to GPL-only. Define our 469 * own version when needed which uses rcu_read_lock_sched(). 470 * 471 * The Linux 5.17 kernel split linux/blk-cgroup.h into a private and a public 472 * part, moving blkg_tryget into the private one. Define our own version. 473 */ 474 #if defined(HAVE_BLKG_TRYGET_GPL_ONLY) || !defined(HAVE_BLKG_TRYGET) 475 static inline bool 476 vdev_blkg_tryget(struct blkcg_gq *blkg) 477 { 478 struct percpu_ref *ref = &blkg->refcnt; 479 unsigned long __percpu *count; 480 bool rc; 481 482 rcu_read_lock_sched(); 483 484 if (__ref_is_percpu(ref, &count)) { 485 this_cpu_inc(*count); 486 rc = true; 487 } else { 488 #ifdef ZFS_PERCPU_REF_COUNT_IN_DATA 489 rc = atomic_long_inc_not_zero(&ref->data->count); 490 #else 491 rc = atomic_long_inc_not_zero(&ref->count); 492 #endif 493 } 494 495 rcu_read_unlock_sched(); 496 497 return (rc); 498 } 499 #else 500 #define vdev_blkg_tryget(bg) blkg_tryget(bg) 501 #endif 502 #ifdef HAVE_BIO_SET_DEV_MACRO 503 /* 504 * The Linux 5.0 kernel updated the bio_set_dev() macro so it calls the 505 * GPL-only bio_associate_blkg() symbol thus inadvertently converting 506 * the entire macro. Provide a minimal version which always assigns the 507 * request queue's root_blkg to the bio. 508 */ 509 static inline void 510 vdev_bio_associate_blkg(struct bio *bio) 511 { 512 #if defined(HAVE_BIO_BDEV_DISK) 513 struct request_queue *q = bio->bi_bdev->bd_disk->queue; 514 #else 515 struct request_queue *q = bio->bi_disk->queue; 516 #endif 517 518 ASSERT3P(q, !=, NULL); 519 ASSERT3P(bio->bi_blkg, ==, NULL); 520 521 if (q->root_blkg && vdev_blkg_tryget(q->root_blkg)) 522 bio->bi_blkg = q->root_blkg; 523 } 524 525 #define bio_associate_blkg vdev_bio_associate_blkg 526 #else 527 static inline void 528 vdev_bio_set_dev(struct bio *bio, struct block_device *bdev) 529 { 530 #if defined(HAVE_BIO_BDEV_DISK) 531 struct request_queue *q = bdev->bd_disk->queue; 532 #else 533 struct request_queue *q = bio->bi_disk->queue; 534 #endif 535 bio_clear_flag(bio, BIO_REMAPPED); 536 if (bio->bi_bdev != bdev) 537 bio_clear_flag(bio, BIO_THROTTLED); 538 bio->bi_bdev = bdev; 539 540 ASSERT3P(q, !=, NULL); 541 ASSERT3P(bio->bi_blkg, ==, NULL); 542 543 if (q->root_blkg && vdev_blkg_tryget(q->root_blkg)) 544 bio->bi_blkg = q->root_blkg; 545 } 546 #define bio_set_dev vdev_bio_set_dev 547 #endif 548 #endif 549 #else 550 /* 551 * Provide a bio_set_dev() helper macro for pre-Linux 4.14 kernels. 552 */ 553 static inline void 554 bio_set_dev(struct bio *bio, struct block_device *bdev) 555 { 556 bio->bi_bdev = bdev; 557 } 558 #endif /* HAVE_BIO_SET_DEV */ 559 560 static inline void 561 vdev_submit_bio(struct bio *bio) 562 { 563 struct bio_list *bio_list = current->bio_list; 564 current->bio_list = NULL; 565 vdev_submit_bio_impl(bio); 566 current->bio_list = bio_list; 567 } 568 569 #ifdef HAVE_BIO_ALLOC_4ARG 570 #define bio_alloc(gfp_mask, nr_iovecs) bio_alloc(NULL, nr_iovecs, 0, gfp_mask) 571 #endif 572 573 static int 574 __vdev_disk_physio(struct block_device *bdev, zio_t *zio, 575 size_t io_size, uint64_t io_offset, int rw, int flags) 576 { 577 dio_request_t *dr; 578 uint64_t abd_offset; 579 uint64_t bio_offset; 580 int bio_size; 581 int bio_count = 16; 582 int error = 0; 583 struct blk_plug plug; 584 585 /* 586 * Accessing outside the block device is never allowed. 587 */ 588 if (io_offset + io_size > bdev->bd_inode->i_size) { 589 vdev_dbgmsg(zio->io_vd, 590 "Illegal access %llu size %llu, device size %llu", 591 (u_longlong_t)io_offset, 592 (u_longlong_t)io_size, 593 (u_longlong_t)i_size_read(bdev->bd_inode)); 594 return (SET_ERROR(EIO)); 595 } 596 597 retry: 598 dr = vdev_disk_dio_alloc(bio_count); 599 600 if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD))) 601 bio_set_flags_failfast(bdev, &flags); 602 603 dr->dr_zio = zio; 604 605 /* 606 * Since bio's can have up to BIO_MAX_PAGES=256 iovec's, each of which 607 * is at least 512 bytes and at most PAGESIZE (typically 4K), one bio 608 * can cover at least 128KB and at most 1MB. When the required number 609 * of iovec's exceeds this, we are forced to break the IO in multiple 610 * bio's and wait for them all to complete. This is likely if the 611 * recordsize property is increased beyond 1MB. The default 612 * bio_count=16 should typically accommodate the maximum-size zio of 613 * 16MB. 614 */ 615 616 abd_offset = 0; 617 bio_offset = io_offset; 618 bio_size = io_size; 619 for (int i = 0; i <= dr->dr_bio_count; i++) { 620 621 /* Finished constructing bio's for given buffer */ 622 if (bio_size <= 0) 623 break; 624 625 /* 626 * If additional bio's are required, we have to retry, but 627 * this should be rare - see the comment above. 628 */ 629 if (dr->dr_bio_count == i) { 630 vdev_disk_dio_free(dr); 631 bio_count *= 2; 632 goto retry; 633 } 634 635 /* bio_alloc() with __GFP_WAIT never returns NULL */ 636 #ifdef HAVE_BIO_MAX_SEGS 637 dr->dr_bio[i] = bio_alloc(GFP_NOIO, bio_max_segs( 638 abd_nr_pages_off(zio->io_abd, bio_size, abd_offset))); 639 #else 640 dr->dr_bio[i] = bio_alloc(GFP_NOIO, 641 MIN(abd_nr_pages_off(zio->io_abd, bio_size, abd_offset), 642 BIO_MAX_PAGES)); 643 #endif 644 if (unlikely(dr->dr_bio[i] == NULL)) { 645 vdev_disk_dio_free(dr); 646 return (SET_ERROR(ENOMEM)); 647 } 648 649 /* Matching put called by vdev_disk_physio_completion */ 650 vdev_disk_dio_get(dr); 651 652 bio_set_dev(dr->dr_bio[i], bdev); 653 BIO_BI_SECTOR(dr->dr_bio[i]) = bio_offset >> 9; 654 dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion; 655 dr->dr_bio[i]->bi_private = dr; 656 bio_set_op_attrs(dr->dr_bio[i], rw, flags); 657 658 /* Remaining size is returned to become the new size */ 659 bio_size = abd_bio_map_off(dr->dr_bio[i], zio->io_abd, 660 bio_size, abd_offset); 661 662 /* Advance in buffer and construct another bio if needed */ 663 abd_offset += BIO_BI_SIZE(dr->dr_bio[i]); 664 bio_offset += BIO_BI_SIZE(dr->dr_bio[i]); 665 } 666 667 /* Extra reference to protect dio_request during vdev_submit_bio */ 668 vdev_disk_dio_get(dr); 669 670 if (dr->dr_bio_count > 1) 671 blk_start_plug(&plug); 672 673 /* Submit all bio's associated with this dio */ 674 for (int i = 0; i < dr->dr_bio_count; i++) { 675 if (dr->dr_bio[i]) 676 vdev_submit_bio(dr->dr_bio[i]); 677 } 678 679 if (dr->dr_bio_count > 1) 680 blk_finish_plug(&plug); 681 682 (void) vdev_disk_dio_put(dr); 683 684 return (error); 685 } 686 687 BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, error) 688 { 689 zio_t *zio = bio->bi_private; 690 #ifdef HAVE_1ARG_BIO_END_IO_T 691 zio->io_error = BIO_END_IO_ERROR(bio); 692 #else 693 zio->io_error = -error; 694 #endif 695 696 if (zio->io_error && (zio->io_error == EOPNOTSUPP)) 697 zio->io_vd->vdev_nowritecache = B_TRUE; 698 699 bio_put(bio); 700 ASSERT3S(zio->io_error, >=, 0); 701 if (zio->io_error) 702 vdev_disk_error(zio); 703 zio_interrupt(zio); 704 } 705 706 static int 707 vdev_disk_io_flush(struct block_device *bdev, zio_t *zio) 708 { 709 struct request_queue *q; 710 struct bio *bio; 711 712 q = bdev_get_queue(bdev); 713 if (!q) 714 return (SET_ERROR(ENXIO)); 715 716 bio = bio_alloc(GFP_NOIO, 0); 717 /* bio_alloc() with __GFP_WAIT never returns NULL */ 718 if (unlikely(bio == NULL)) 719 return (SET_ERROR(ENOMEM)); 720 721 bio->bi_end_io = vdev_disk_io_flush_completion; 722 bio->bi_private = zio; 723 bio_set_dev(bio, bdev); 724 bio_set_flush(bio); 725 vdev_submit_bio(bio); 726 invalidate_bdev(bdev); 727 728 return (0); 729 } 730 731 static void 732 vdev_disk_io_start(zio_t *zio) 733 { 734 vdev_t *v = zio->io_vd; 735 vdev_disk_t *vd = v->vdev_tsd; 736 unsigned long trim_flags = 0; 737 int rw, error; 738 739 /* 740 * If the vdev is closed, it's likely in the REMOVED or FAULTED state. 741 * Nothing to be done here but return failure. 742 */ 743 if (vd == NULL) { 744 zio->io_error = ENXIO; 745 zio_interrupt(zio); 746 return; 747 } 748 749 rw_enter(&vd->vd_lock, RW_READER); 750 751 /* 752 * If the vdev is closed, it's likely due to a failed reopen and is 753 * in the UNAVAIL state. Nothing to be done here but return failure. 754 */ 755 if (vd->vd_bdev == NULL) { 756 rw_exit(&vd->vd_lock); 757 zio->io_error = ENXIO; 758 zio_interrupt(zio); 759 return; 760 } 761 762 switch (zio->io_type) { 763 case ZIO_TYPE_IOCTL: 764 765 if (!vdev_readable(v)) { 766 rw_exit(&vd->vd_lock); 767 zio->io_error = SET_ERROR(ENXIO); 768 zio_interrupt(zio); 769 return; 770 } 771 772 switch (zio->io_cmd) { 773 case DKIOCFLUSHWRITECACHE: 774 775 if (zfs_nocacheflush) 776 break; 777 778 if (v->vdev_nowritecache) { 779 zio->io_error = SET_ERROR(ENOTSUP); 780 break; 781 } 782 783 error = vdev_disk_io_flush(vd->vd_bdev, zio); 784 if (error == 0) { 785 rw_exit(&vd->vd_lock); 786 return; 787 } 788 789 zio->io_error = error; 790 791 break; 792 793 default: 794 zio->io_error = SET_ERROR(ENOTSUP); 795 } 796 797 rw_exit(&vd->vd_lock); 798 zio_execute(zio); 799 return; 800 case ZIO_TYPE_WRITE: 801 rw = WRITE; 802 break; 803 804 case ZIO_TYPE_READ: 805 rw = READ; 806 break; 807 808 case ZIO_TYPE_TRIM: 809 #if defined(BLKDEV_DISCARD_SECURE) 810 if (zio->io_trim_flags & ZIO_TRIM_SECURE) 811 trim_flags |= BLKDEV_DISCARD_SECURE; 812 #endif 813 zio->io_error = -blkdev_issue_discard(vd->vd_bdev, 814 zio->io_offset >> 9, zio->io_size >> 9, GFP_NOFS, 815 trim_flags); 816 817 rw_exit(&vd->vd_lock); 818 zio_interrupt(zio); 819 return; 820 821 default: 822 rw_exit(&vd->vd_lock); 823 zio->io_error = SET_ERROR(ENOTSUP); 824 zio_interrupt(zio); 825 return; 826 } 827 828 zio->io_target_timestamp = zio_handle_io_delay(zio); 829 error = __vdev_disk_physio(vd->vd_bdev, zio, 830 zio->io_size, zio->io_offset, rw, 0); 831 rw_exit(&vd->vd_lock); 832 833 if (error) { 834 zio->io_error = error; 835 zio_interrupt(zio); 836 return; 837 } 838 } 839 840 static void 841 vdev_disk_io_done(zio_t *zio) 842 { 843 /* 844 * If the device returned EIO, we revalidate the media. If it is 845 * determined the media has changed this triggers the asynchronous 846 * removal of the device from the configuration. 847 */ 848 if (zio->io_error == EIO) { 849 vdev_t *v = zio->io_vd; 850 vdev_disk_t *vd = v->vdev_tsd; 851 852 if (zfs_check_media_change(vd->vd_bdev)) { 853 invalidate_bdev(vd->vd_bdev); 854 v->vdev_remove_wanted = B_TRUE; 855 spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE); 856 } 857 } 858 } 859 860 static void 861 vdev_disk_hold(vdev_t *vd) 862 { 863 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER)); 864 865 /* We must have a pathname, and it must be absolute. */ 866 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') 867 return; 868 869 /* 870 * Only prefetch path and devid info if the device has 871 * never been opened. 872 */ 873 if (vd->vdev_tsd != NULL) 874 return; 875 876 } 877 878 static void 879 vdev_disk_rele(vdev_t *vd) 880 { 881 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER)); 882 883 /* XXX: Implement me as a vnode rele for the device */ 884 } 885 886 vdev_ops_t vdev_disk_ops = { 887 .vdev_op_init = NULL, 888 .vdev_op_fini = NULL, 889 .vdev_op_open = vdev_disk_open, 890 .vdev_op_close = vdev_disk_close, 891 .vdev_op_asize = vdev_default_asize, 892 .vdev_op_min_asize = vdev_default_min_asize, 893 .vdev_op_min_alloc = NULL, 894 .vdev_op_io_start = vdev_disk_io_start, 895 .vdev_op_io_done = vdev_disk_io_done, 896 .vdev_op_state_change = NULL, 897 .vdev_op_need_resilver = NULL, 898 .vdev_op_hold = vdev_disk_hold, 899 .vdev_op_rele = vdev_disk_rele, 900 .vdev_op_remap = NULL, 901 .vdev_op_xlate = vdev_default_xlate, 902 .vdev_op_rebuild_asize = NULL, 903 .vdev_op_metaslab_init = NULL, 904 .vdev_op_config_generate = NULL, 905 .vdev_op_nparity = NULL, 906 .vdev_op_ndisks = NULL, 907 .vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */ 908 .vdev_op_leaf = B_TRUE /* leaf vdev */ 909 }; 910 911 /* 912 * The zfs_vdev_scheduler module option has been deprecated. Setting this 913 * value no longer has any effect. It has not yet been entirely removed 914 * to allow the module to be loaded if this option is specified in the 915 * /etc/modprobe.d/zfs.conf file. The following warning will be logged. 916 */ 917 static int 918 param_set_vdev_scheduler(const char *val, zfs_kernel_param_t *kp) 919 { 920 int error = param_set_charp(val, kp); 921 if (error == 0) { 922 printk(KERN_INFO "The 'zfs_vdev_scheduler' module option " 923 "is not supported.\n"); 924 } 925 926 return (error); 927 } 928 929 static const char *zfs_vdev_scheduler = "unused"; 930 module_param_call(zfs_vdev_scheduler, param_set_vdev_scheduler, 931 param_get_charp, &zfs_vdev_scheduler, 0644); 932 MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler"); 933 934 int 935 param_set_min_auto_ashift(const char *buf, zfs_kernel_param_t *kp) 936 { 937 uint64_t val; 938 int error; 939 940 error = kstrtoull(buf, 0, &val); 941 if (error < 0) 942 return (SET_ERROR(error)); 943 944 if (val < ASHIFT_MIN || val > zfs_vdev_max_auto_ashift) 945 return (SET_ERROR(-EINVAL)); 946 947 error = param_set_ulong(buf, kp); 948 if (error < 0) 949 return (SET_ERROR(error)); 950 951 return (0); 952 } 953 954 int 955 param_set_max_auto_ashift(const char *buf, zfs_kernel_param_t *kp) 956 { 957 uint64_t val; 958 int error; 959 960 error = kstrtoull(buf, 0, &val); 961 if (error < 0) 962 return (SET_ERROR(error)); 963 964 if (val > ASHIFT_MAX || val < zfs_vdev_min_auto_ashift) 965 return (SET_ERROR(-EINVAL)); 966 967 error = param_set_ulong(buf, kp); 968 if (error < 0) 969 return (SET_ERROR(error)); 970 971 return (0); 972 } 973