1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Copyright (c) 2012, 2015 by Delphix. All rights reserved. 28 */ 29 30 #include <sys/zfs_context.h> 31 #include <sys/spa.h> 32 #include <sys/spa_impl.h> 33 #include <sys/dsl_pool.h> 34 #include <sys/dsl_scan.h> 35 #include <sys/vdev_impl.h> 36 #include <sys/vdev_draid.h> 37 #include <sys/zio.h> 38 #include <sys/zio_checksum.h> 39 #include <sys/abd.h> 40 #include <sys/fs/zfs.h> 41 42 /* 43 * Vdev mirror kstats 44 */ 45 static kstat_t *mirror_ksp = NULL; 46 47 typedef struct mirror_stats { 48 kstat_named_t vdev_mirror_stat_rotating_linear; 49 kstat_named_t vdev_mirror_stat_rotating_offset; 50 kstat_named_t vdev_mirror_stat_rotating_seek; 51 kstat_named_t vdev_mirror_stat_non_rotating_linear; 52 kstat_named_t vdev_mirror_stat_non_rotating_seek; 53 54 kstat_named_t vdev_mirror_stat_preferred_found; 55 kstat_named_t vdev_mirror_stat_preferred_not_found; 56 } mirror_stats_t; 57 58 static mirror_stats_t mirror_stats = { 59 /* New I/O follows directly the last I/O */ 60 { "rotating_linear", KSTAT_DATA_UINT64 }, 61 /* New I/O is within zfs_vdev_mirror_rotating_seek_offset of the last */ 62 { "rotating_offset", KSTAT_DATA_UINT64 }, 63 /* New I/O requires random seek */ 64 { "rotating_seek", KSTAT_DATA_UINT64 }, 65 /* New I/O follows directly the last I/O (nonrot) */ 66 { "non_rotating_linear", KSTAT_DATA_UINT64 }, 67 /* New I/O requires random seek (nonrot) */ 68 { "non_rotating_seek", KSTAT_DATA_UINT64 }, 69 /* Preferred child vdev found */ 70 { "preferred_found", KSTAT_DATA_UINT64 }, 71 /* Preferred child vdev not found or equal load */ 72 { "preferred_not_found", KSTAT_DATA_UINT64 }, 73 74 }; 75 76 #define MIRROR_STAT(stat) (mirror_stats.stat.value.ui64) 77 #define MIRROR_INCR(stat, val) atomic_add_64(&MIRROR_STAT(stat), val) 78 #define MIRROR_BUMP(stat) MIRROR_INCR(stat, 1) 79 80 void 81 vdev_mirror_stat_init(void) 82 { 83 mirror_ksp = kstat_create("zfs", 0, "vdev_mirror_stats", 84 "misc", KSTAT_TYPE_NAMED, 85 sizeof (mirror_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 86 if (mirror_ksp != NULL) { 87 mirror_ksp->ks_data = &mirror_stats; 88 kstat_install(mirror_ksp); 89 } 90 } 91 92 void 93 vdev_mirror_stat_fini(void) 94 { 95 if (mirror_ksp != NULL) { 96 kstat_delete(mirror_ksp); 97 mirror_ksp = NULL; 98 } 99 } 100 101 /* 102 * Virtual device vector for mirroring. 103 */ 104 typedef struct mirror_child { 105 vdev_t *mc_vd; 106 abd_t *mc_abd; 107 uint64_t mc_offset; 108 int mc_error; 109 int mc_load; 110 uint8_t mc_tried; 111 uint8_t mc_skipped; 112 uint8_t mc_speculative; 113 uint8_t mc_rebuilding; 114 } mirror_child_t; 115 116 typedef struct mirror_map { 117 int *mm_preferred; 118 int mm_preferred_cnt; 119 int mm_children; 120 boolean_t mm_resilvering; 121 boolean_t mm_rebuilding; 122 boolean_t mm_root; 123 mirror_child_t mm_child[]; 124 } mirror_map_t; 125 126 static const int vdev_mirror_shift = 21; 127 128 /* 129 * The load configuration settings below are tuned by default for 130 * the case where all devices are of the same rotational type. 131 * 132 * If there is a mixture of rotating and non-rotating media, setting 133 * zfs_vdev_mirror_non_rotating_seek_inc to 0 may well provide better results 134 * as it will direct more reads to the non-rotating vdevs which are more likely 135 * to have a higher performance. 136 */ 137 138 /* Rotating media load calculation configuration. */ 139 static int zfs_vdev_mirror_rotating_inc = 0; 140 static int zfs_vdev_mirror_rotating_seek_inc = 5; 141 static int zfs_vdev_mirror_rotating_seek_offset = 1 * 1024 * 1024; 142 143 /* Non-rotating media load calculation configuration. */ 144 static int zfs_vdev_mirror_non_rotating_inc = 0; 145 static int zfs_vdev_mirror_non_rotating_seek_inc = 1; 146 147 static inline size_t 148 vdev_mirror_map_size(int children) 149 { 150 return (offsetof(mirror_map_t, mm_child[children]) + 151 sizeof (int) * children); 152 } 153 154 static inline mirror_map_t * 155 vdev_mirror_map_alloc(int children, boolean_t resilvering, boolean_t root) 156 { 157 mirror_map_t *mm; 158 159 mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP); 160 mm->mm_children = children; 161 mm->mm_resilvering = resilvering; 162 mm->mm_root = root; 163 mm->mm_preferred = (int *)((uintptr_t)mm + 164 offsetof(mirror_map_t, mm_child[children])); 165 166 return (mm); 167 } 168 169 static void 170 vdev_mirror_map_free(zio_t *zio) 171 { 172 mirror_map_t *mm = zio->io_vsd; 173 174 kmem_free(mm, vdev_mirror_map_size(mm->mm_children)); 175 } 176 177 static const zio_vsd_ops_t vdev_mirror_vsd_ops = { 178 .vsd_free = vdev_mirror_map_free, 179 }; 180 181 static int 182 vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset) 183 { 184 uint64_t last_offset; 185 int64_t offset_diff; 186 int load; 187 188 /* All DVAs have equal weight at the root. */ 189 if (mm->mm_root) 190 return (INT_MAX); 191 192 /* 193 * We don't return INT_MAX if the device is resilvering i.e. 194 * vdev_resilver_txg != 0 as when tested performance was slightly 195 * worse overall when resilvering with compared to without. 196 */ 197 198 /* Fix zio_offset for leaf vdevs */ 199 if (vd->vdev_ops->vdev_op_leaf) 200 zio_offset += VDEV_LABEL_START_SIZE; 201 202 /* Standard load based on pending queue length. */ 203 load = vdev_queue_length(vd); 204 last_offset = vdev_queue_last_offset(vd); 205 206 if (vd->vdev_nonrot) { 207 /* Non-rotating media. */ 208 if (last_offset == zio_offset) { 209 MIRROR_BUMP(vdev_mirror_stat_non_rotating_linear); 210 return (load + zfs_vdev_mirror_non_rotating_inc); 211 } 212 213 /* 214 * Apply a seek penalty even for non-rotating devices as 215 * sequential I/O's can be aggregated into fewer operations on 216 * the device, thus avoiding unnecessary per-command overhead 217 * and boosting performance. 218 */ 219 MIRROR_BUMP(vdev_mirror_stat_non_rotating_seek); 220 return (load + zfs_vdev_mirror_non_rotating_seek_inc); 221 } 222 223 /* Rotating media I/O's which directly follow the last I/O. */ 224 if (last_offset == zio_offset) { 225 MIRROR_BUMP(vdev_mirror_stat_rotating_linear); 226 return (load + zfs_vdev_mirror_rotating_inc); 227 } 228 229 /* 230 * Apply half the seek increment to I/O's within seek offset 231 * of the last I/O issued to this vdev as they should incur less 232 * of a seek increment. 233 */ 234 offset_diff = (int64_t)(last_offset - zio_offset); 235 if (ABS(offset_diff) < zfs_vdev_mirror_rotating_seek_offset) { 236 MIRROR_BUMP(vdev_mirror_stat_rotating_offset); 237 return (load + (zfs_vdev_mirror_rotating_seek_inc / 2)); 238 } 239 240 /* Apply the full seek increment to all other I/O's. */ 241 MIRROR_BUMP(vdev_mirror_stat_rotating_seek); 242 return (load + zfs_vdev_mirror_rotating_seek_inc); 243 } 244 245 static boolean_t 246 vdev_mirror_rebuilding(vdev_t *vd) 247 { 248 if (vd->vdev_ops->vdev_op_leaf && vd->vdev_rebuild_txg) 249 return (B_TRUE); 250 251 for (int i = 0; i < vd->vdev_children; i++) { 252 if (vdev_mirror_rebuilding(vd->vdev_child[i])) { 253 return (B_TRUE); 254 } 255 } 256 257 return (B_FALSE); 258 } 259 260 /* 261 * Avoid inlining the function to keep vdev_mirror_io_start(), which 262 * is this functions only caller, as small as possible on the stack. 263 */ 264 noinline static mirror_map_t * 265 vdev_mirror_map_init(zio_t *zio) 266 { 267 mirror_map_t *mm = NULL; 268 mirror_child_t *mc; 269 vdev_t *vd = zio->io_vd; 270 int c; 271 272 if (vd == NULL) { 273 dva_t *dva = zio->io_bp->blk_dva; 274 spa_t *spa = zio->io_spa; 275 dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan; 276 dva_t dva_copy[SPA_DVAS_PER_BP]; 277 278 /* 279 * The sequential scrub code sorts and issues all DVAs 280 * of a bp separately. Each of these IOs includes all 281 * original DVA copies so that repairs can be performed 282 * in the event of an error, but we only actually want 283 * to check the first DVA since the others will be 284 * checked by their respective sorted IOs. Only if we 285 * hit an error will we try all DVAs upon retrying. 286 * 287 * Note: This check is safe even if the user switches 288 * from a legacy scrub to a sequential one in the middle 289 * of processing, since scn_is_sorted isn't updated until 290 * all outstanding IOs from the previous scrub pass 291 * complete. 292 */ 293 if ((zio->io_flags & ZIO_FLAG_SCRUB) && 294 !(zio->io_flags & ZIO_FLAG_IO_RETRY) && 295 dsl_scan_scrubbing(spa->spa_dsl_pool) && 296 scn->scn_is_sorted) { 297 c = 1; 298 } else { 299 c = BP_GET_NDVAS(zio->io_bp); 300 } 301 302 /* 303 * If the pool cannot be written to, then infer that some 304 * DVAs might be invalid or point to vdevs that do not exist. 305 * We skip them. 306 */ 307 if (!spa_writeable(spa)) { 308 ASSERT3U(zio->io_type, ==, ZIO_TYPE_READ); 309 int j = 0; 310 for (int i = 0; i < c; i++) { 311 if (zfs_dva_valid(spa, &dva[i], zio->io_bp)) 312 dva_copy[j++] = dva[i]; 313 } 314 if (j == 0) { 315 zio->io_vsd = NULL; 316 zio->io_error = ENXIO; 317 return (NULL); 318 } 319 if (j < c) { 320 dva = dva_copy; 321 c = j; 322 } 323 } 324 325 mm = vdev_mirror_map_alloc(c, B_FALSE, B_TRUE); 326 for (c = 0; c < mm->mm_children; c++) { 327 mc = &mm->mm_child[c]; 328 329 mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c])); 330 mc->mc_offset = DVA_GET_OFFSET(&dva[c]); 331 if (mc->mc_vd == NULL) { 332 kmem_free(mm, vdev_mirror_map_size( 333 mm->mm_children)); 334 zio->io_vsd = NULL; 335 zio->io_error = ENXIO; 336 return (NULL); 337 } 338 } 339 } else { 340 /* 341 * If we are resilvering, then we should handle scrub reads 342 * differently; we shouldn't issue them to the resilvering 343 * device because it might not have those blocks. 344 * 345 * We are resilvering iff: 346 * 1) We are a replacing vdev (ie our name is "replacing-1" or 347 * "spare-1" or something like that), and 348 * 2) The pool is currently being resilvered. 349 * 350 * We cannot simply check vd->vdev_resilver_txg, because it's 351 * not set in this path. 352 * 353 * Nor can we just check our vdev_ops; there are cases (such as 354 * when a user types "zpool replace pool odev spare_dev" and 355 * spare_dev is in the spare list, or when a spare device is 356 * automatically used to replace a DEGRADED device) when 357 * resilvering is complete but both the original vdev and the 358 * spare vdev remain in the pool. That behavior is intentional. 359 * It helps implement the policy that a spare should be 360 * automatically removed from the pool after the user replaces 361 * the device that originally failed. 362 * 363 * If a spa load is in progress, then spa_dsl_pool may be 364 * uninitialized. But we shouldn't be resilvering during a spa 365 * load anyway. 366 */ 367 boolean_t replacing = (vd->vdev_ops == &vdev_replacing_ops || 368 vd->vdev_ops == &vdev_spare_ops) && 369 spa_load_state(vd->vdev_spa) == SPA_LOAD_NONE && 370 dsl_scan_resilvering(vd->vdev_spa->spa_dsl_pool); 371 mm = vdev_mirror_map_alloc(vd->vdev_children, replacing, 372 B_FALSE); 373 for (c = 0; c < mm->mm_children; c++) { 374 mc = &mm->mm_child[c]; 375 mc->mc_vd = vd->vdev_child[c]; 376 mc->mc_offset = zio->io_offset; 377 378 if (vdev_mirror_rebuilding(mc->mc_vd)) 379 mm->mm_rebuilding = mc->mc_rebuilding = B_TRUE; 380 } 381 } 382 383 return (mm); 384 } 385 386 static int 387 vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize, 388 uint64_t *logical_ashift, uint64_t *physical_ashift) 389 { 390 int numerrors = 0; 391 int lasterror = 0; 392 393 if (vd->vdev_children == 0) { 394 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; 395 return (SET_ERROR(EINVAL)); 396 } 397 398 vdev_open_children(vd); 399 400 for (int c = 0; c < vd->vdev_children; c++) { 401 vdev_t *cvd = vd->vdev_child[c]; 402 403 if (cvd->vdev_open_error) { 404 lasterror = cvd->vdev_open_error; 405 numerrors++; 406 continue; 407 } 408 409 *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1; 410 *max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1; 411 *logical_ashift = MAX(*logical_ashift, cvd->vdev_ashift); 412 *physical_ashift = MAX(*physical_ashift, 413 cvd->vdev_physical_ashift); 414 } 415 416 if (numerrors == vd->vdev_children) { 417 if (vdev_children_are_offline(vd)) 418 vd->vdev_stat.vs_aux = VDEV_AUX_CHILDREN_OFFLINE; 419 else 420 vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS; 421 return (lasterror); 422 } 423 424 return (0); 425 } 426 427 static void 428 vdev_mirror_close(vdev_t *vd) 429 { 430 for (int c = 0; c < vd->vdev_children; c++) 431 vdev_close(vd->vdev_child[c]); 432 } 433 434 static void 435 vdev_mirror_child_done(zio_t *zio) 436 { 437 mirror_child_t *mc = zio->io_private; 438 439 mc->mc_error = zio->io_error; 440 mc->mc_tried = 1; 441 mc->mc_skipped = 0; 442 } 443 444 /* 445 * Check the other, lower-index DVAs to see if they're on the same 446 * vdev as the child we picked. If they are, use them since they 447 * are likely to have been allocated from the primary metaslab in 448 * use at the time, and hence are more likely to have locality with 449 * single-copy data. 450 */ 451 static int 452 vdev_mirror_dva_select(zio_t *zio, int p) 453 { 454 dva_t *dva = zio->io_bp->blk_dva; 455 mirror_map_t *mm = zio->io_vsd; 456 int preferred; 457 int c; 458 459 preferred = mm->mm_preferred[p]; 460 for (p--; p >= 0; p--) { 461 c = mm->mm_preferred[p]; 462 if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred])) 463 preferred = c; 464 } 465 return (preferred); 466 } 467 468 static int 469 vdev_mirror_preferred_child_randomize(zio_t *zio) 470 { 471 mirror_map_t *mm = zio->io_vsd; 472 int p; 473 474 if (mm->mm_root) { 475 p = random_in_range(mm->mm_preferred_cnt); 476 return (vdev_mirror_dva_select(zio, p)); 477 } 478 479 /* 480 * To ensure we don't always favour the first matching vdev, 481 * which could lead to wear leveling issues on SSD's, we 482 * use the I/O offset as a pseudo random seed into the vdevs 483 * which have the lowest load. 484 */ 485 p = (zio->io_offset >> vdev_mirror_shift) % mm->mm_preferred_cnt; 486 return (mm->mm_preferred[p]); 487 } 488 489 static boolean_t 490 vdev_mirror_child_readable(mirror_child_t *mc) 491 { 492 vdev_t *vd = mc->mc_vd; 493 494 if (vd->vdev_top != NULL && vd->vdev_top->vdev_ops == &vdev_draid_ops) 495 return (vdev_draid_readable(vd, mc->mc_offset)); 496 else 497 return (vdev_readable(vd)); 498 } 499 500 static boolean_t 501 vdev_mirror_child_missing(mirror_child_t *mc, uint64_t txg, uint64_t size) 502 { 503 vdev_t *vd = mc->mc_vd; 504 505 if (vd->vdev_top != NULL && vd->vdev_top->vdev_ops == &vdev_draid_ops) 506 return (vdev_draid_missing(vd, mc->mc_offset, txg, size)); 507 else 508 return (vdev_dtl_contains(vd, DTL_MISSING, txg, size)); 509 } 510 511 /* 512 * Try to find a vdev whose DTL doesn't contain the block we want to read 513 * preferring vdevs based on determined load. If we can't, try the read on 514 * any vdev we haven't already tried. 515 * 516 * Distributed spares are an exception to the above load rule. They are 517 * always preferred in order to detect gaps in the distributed spare which 518 * are created when another disk in the dRAID fails. In order to restore 519 * redundancy those gaps must be read to trigger the required repair IO. 520 */ 521 static int 522 vdev_mirror_child_select(zio_t *zio) 523 { 524 mirror_map_t *mm = zio->io_vsd; 525 uint64_t txg = zio->io_txg; 526 int c, lowest_load; 527 528 ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg); 529 530 lowest_load = INT_MAX; 531 mm->mm_preferred_cnt = 0; 532 for (c = 0; c < mm->mm_children; c++) { 533 mirror_child_t *mc; 534 535 mc = &mm->mm_child[c]; 536 if (mc->mc_tried || mc->mc_skipped) 537 continue; 538 539 if (mc->mc_vd == NULL || 540 !vdev_mirror_child_readable(mc)) { 541 mc->mc_error = SET_ERROR(ENXIO); 542 mc->mc_tried = 1; /* don't even try */ 543 mc->mc_skipped = 1; 544 continue; 545 } 546 547 if (vdev_mirror_child_missing(mc, txg, 1)) { 548 mc->mc_error = SET_ERROR(ESTALE); 549 mc->mc_skipped = 1; 550 mc->mc_speculative = 1; 551 continue; 552 } 553 554 if (mc->mc_vd->vdev_ops == &vdev_draid_spare_ops) { 555 mm->mm_preferred[0] = c; 556 mm->mm_preferred_cnt = 1; 557 break; 558 } 559 560 mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset); 561 if (mc->mc_load > lowest_load) 562 continue; 563 564 if (mc->mc_load < lowest_load) { 565 lowest_load = mc->mc_load; 566 mm->mm_preferred_cnt = 0; 567 } 568 mm->mm_preferred[mm->mm_preferred_cnt] = c; 569 mm->mm_preferred_cnt++; 570 } 571 572 if (mm->mm_preferred_cnt == 1) { 573 MIRROR_BUMP(vdev_mirror_stat_preferred_found); 574 return (mm->mm_preferred[0]); 575 } 576 577 if (mm->mm_preferred_cnt > 1) { 578 MIRROR_BUMP(vdev_mirror_stat_preferred_not_found); 579 return (vdev_mirror_preferred_child_randomize(zio)); 580 } 581 582 /* 583 * Every device is either missing or has this txg in its DTL. 584 * Look for any child we haven't already tried before giving up. 585 */ 586 for (c = 0; c < mm->mm_children; c++) { 587 if (!mm->mm_child[c].mc_tried) 588 return (c); 589 } 590 591 /* 592 * Every child failed. There's no place left to look. 593 */ 594 return (-1); 595 } 596 597 static void 598 vdev_mirror_io_start(zio_t *zio) 599 { 600 mirror_map_t *mm; 601 mirror_child_t *mc; 602 int c, children; 603 604 mm = vdev_mirror_map_init(zio); 605 zio->io_vsd = mm; 606 zio->io_vsd_ops = &vdev_mirror_vsd_ops; 607 608 if (mm == NULL) { 609 ASSERT(!spa_trust_config(zio->io_spa)); 610 ASSERT(zio->io_type == ZIO_TYPE_READ); 611 zio_execute(zio); 612 return; 613 } 614 615 if (zio->io_type == ZIO_TYPE_READ) { 616 if ((zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_resilvering) { 617 /* 618 * For scrubbing reads we need to issue reads to all 619 * children. One child can reuse parent buffer, but 620 * for others we have to allocate separate ones to 621 * verify checksums if io_bp is non-NULL, or compare 622 * them in vdev_mirror_io_done() otherwise. 623 */ 624 boolean_t first = B_TRUE; 625 for (c = 0; c < mm->mm_children; c++) { 626 mc = &mm->mm_child[c]; 627 628 /* Don't issue ZIOs to offline children */ 629 if (!vdev_mirror_child_readable(mc)) { 630 mc->mc_error = SET_ERROR(ENXIO); 631 mc->mc_tried = 1; 632 mc->mc_skipped = 1; 633 continue; 634 } 635 636 mc->mc_abd = first ? zio->io_abd : 637 abd_alloc_sametype(zio->io_abd, 638 zio->io_size); 639 zio_nowait(zio_vdev_child_io(zio, zio->io_bp, 640 mc->mc_vd, mc->mc_offset, mc->mc_abd, 641 zio->io_size, zio->io_type, 642 zio->io_priority, 0, 643 vdev_mirror_child_done, mc)); 644 first = B_FALSE; 645 } 646 zio_execute(zio); 647 return; 648 } 649 /* 650 * For normal reads just pick one child. 651 */ 652 c = vdev_mirror_child_select(zio); 653 children = (c >= 0); 654 } else { 655 ASSERT(zio->io_type == ZIO_TYPE_WRITE); 656 657 /* 658 * Writes go to all children. 659 */ 660 c = 0; 661 children = mm->mm_children; 662 } 663 664 while (children--) { 665 mc = &mm->mm_child[c]; 666 c++; 667 668 /* 669 * When sequentially resilvering only issue write repair 670 * IOs to the vdev which is being rebuilt since performance 671 * is limited by the slowest child. This is an issue for 672 * faster replacement devices such as distributed spares. 673 */ 674 if ((zio->io_priority == ZIO_PRIORITY_REBUILD) && 675 (zio->io_flags & ZIO_FLAG_IO_REPAIR) && 676 !(zio->io_flags & ZIO_FLAG_SCRUB) && 677 mm->mm_rebuilding && !mc->mc_rebuilding) { 678 continue; 679 } 680 681 zio_nowait(zio_vdev_child_io(zio, zio->io_bp, 682 mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size, 683 zio->io_type, zio->io_priority, 0, 684 vdev_mirror_child_done, mc)); 685 } 686 687 zio_execute(zio); 688 } 689 690 static int 691 vdev_mirror_worst_error(mirror_map_t *mm) 692 { 693 int error[2] = { 0, 0 }; 694 695 for (int c = 0; c < mm->mm_children; c++) { 696 mirror_child_t *mc = &mm->mm_child[c]; 697 int s = mc->mc_speculative; 698 error[s] = zio_worst_error(error[s], mc->mc_error); 699 } 700 701 return (error[0] ? error[0] : error[1]); 702 } 703 704 static void 705 vdev_mirror_io_done(zio_t *zio) 706 { 707 mirror_map_t *mm = zio->io_vsd; 708 mirror_child_t *mc; 709 int c; 710 int good_copies = 0; 711 int unexpected_errors = 0; 712 int last_good_copy = -1; 713 714 if (mm == NULL) 715 return; 716 717 for (c = 0; c < mm->mm_children; c++) { 718 mc = &mm->mm_child[c]; 719 720 if (mc->mc_error) { 721 if (!mc->mc_skipped) 722 unexpected_errors++; 723 } else if (mc->mc_tried) { 724 last_good_copy = c; 725 good_copies++; 726 } 727 } 728 729 if (zio->io_type == ZIO_TYPE_WRITE) { 730 /* 731 * XXX -- for now, treat partial writes as success. 732 * 733 * Now that we support write reallocation, it would be better 734 * to treat partial failure as real failure unless there are 735 * no non-degraded top-level vdevs left, and not update DTLs 736 * if we intend to reallocate. 737 */ 738 if (good_copies != mm->mm_children) { 739 /* 740 * Always require at least one good copy. 741 * 742 * For ditto blocks (io_vd == NULL), require 743 * all copies to be good. 744 * 745 * XXX -- for replacing vdevs, there's no great answer. 746 * If the old device is really dead, we may not even 747 * be able to access it -- so we only want to 748 * require good writes to the new device. But if 749 * the new device turns out to be flaky, we want 750 * to be able to detach it -- which requires all 751 * writes to the old device to have succeeded. 752 */ 753 if (good_copies == 0 || zio->io_vd == NULL) 754 zio->io_error = vdev_mirror_worst_error(mm); 755 } 756 return; 757 } 758 759 ASSERT(zio->io_type == ZIO_TYPE_READ); 760 761 /* 762 * If we don't have a good copy yet, keep trying other children. 763 */ 764 if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) { 765 ASSERT(c >= 0 && c < mm->mm_children); 766 mc = &mm->mm_child[c]; 767 zio_vdev_io_redone(zio); 768 zio_nowait(zio_vdev_child_io(zio, zio->io_bp, 769 mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size, 770 ZIO_TYPE_READ, zio->io_priority, 0, 771 vdev_mirror_child_done, mc)); 772 return; 773 } 774 775 if (zio->io_flags & ZIO_FLAG_SCRUB && !mm->mm_resilvering) { 776 abd_t *best_abd = NULL; 777 if (last_good_copy >= 0) 778 best_abd = mm->mm_child[last_good_copy].mc_abd; 779 780 /* 781 * If we're scrubbing but don't have a BP available (because 782 * this vdev is under a raidz or draid vdev) then the best we 783 * can do is compare all of the copies read. If they're not 784 * identical then return a checksum error and the most likely 785 * correct data. The raidz code will issue a repair I/O if 786 * possible. 787 */ 788 if (zio->io_bp == NULL) { 789 ASSERT(zio->io_vd->vdev_ops == &vdev_replacing_ops || 790 zio->io_vd->vdev_ops == &vdev_spare_ops); 791 792 abd_t *pref_abd = NULL; 793 for (c = 0; c < last_good_copy; c++) { 794 mc = &mm->mm_child[c]; 795 if (mc->mc_error || !mc->mc_tried) 796 continue; 797 798 if (abd_cmp(mc->mc_abd, best_abd) != 0) 799 zio->io_error = SET_ERROR(ECKSUM); 800 801 /* 802 * The distributed spare is always prefered 803 * by vdev_mirror_child_select() so it's 804 * considered to be the best candidate. 805 */ 806 if (pref_abd == NULL && 807 mc->mc_vd->vdev_ops == 808 &vdev_draid_spare_ops) 809 pref_abd = mc->mc_abd; 810 811 /* 812 * In the absence of a preferred copy, use 813 * the parent pointer to avoid a memory copy. 814 */ 815 if (mc->mc_abd == zio->io_abd) 816 best_abd = mc->mc_abd; 817 } 818 if (pref_abd) 819 best_abd = pref_abd; 820 } else { 821 822 /* 823 * If we have a BP available, then checksums are 824 * already verified and we just need a buffer 825 * with valid data, preferring parent one to 826 * avoid a memory copy. 827 */ 828 for (c = 0; c < last_good_copy; c++) { 829 mc = &mm->mm_child[c]; 830 if (mc->mc_error || !mc->mc_tried) 831 continue; 832 if (mc->mc_abd == zio->io_abd) { 833 best_abd = mc->mc_abd; 834 break; 835 } 836 } 837 } 838 839 if (best_abd && best_abd != zio->io_abd) 840 abd_copy(zio->io_abd, best_abd, zio->io_size); 841 for (c = 0; c < mm->mm_children; c++) { 842 mc = &mm->mm_child[c]; 843 if (mc->mc_abd != zio->io_abd) 844 abd_free(mc->mc_abd); 845 mc->mc_abd = NULL; 846 } 847 } 848 849 if (good_copies == 0) { 850 zio->io_error = vdev_mirror_worst_error(mm); 851 ASSERT(zio->io_error != 0); 852 } 853 854 if (good_copies && spa_writeable(zio->io_spa) && 855 (unexpected_errors || 856 (zio->io_flags & ZIO_FLAG_RESILVER) || 857 ((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_resilvering))) { 858 /* 859 * Use the good data we have in hand to repair damaged children. 860 */ 861 for (c = 0; c < mm->mm_children; c++) { 862 /* 863 * Don't rewrite known good children. 864 * Not only is it unnecessary, it could 865 * actually be harmful: if the system lost 866 * power while rewriting the only good copy, 867 * there would be no good copies left! 868 */ 869 mc = &mm->mm_child[c]; 870 871 if (mc->mc_error == 0) { 872 vdev_ops_t *ops = mc->mc_vd->vdev_ops; 873 874 if (mc->mc_tried) 875 continue; 876 /* 877 * We didn't try this child. We need to 878 * repair it if: 879 * 1. it's a scrub (in which case we have 880 * tried everything that was healthy) 881 * - or - 882 * 2. it's an indirect or distributed spare 883 * vdev (in which case it could point to any 884 * other vdev, which might have a bad DTL) 885 * - or - 886 * 3. the DTL indicates that this data is 887 * missing from this vdev 888 */ 889 if (!(zio->io_flags & ZIO_FLAG_SCRUB) && 890 ops != &vdev_indirect_ops && 891 ops != &vdev_draid_spare_ops && 892 !vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL, 893 zio->io_txg, 1)) 894 continue; 895 mc->mc_error = SET_ERROR(ESTALE); 896 } 897 898 zio_nowait(zio_vdev_child_io(zio, zio->io_bp, 899 mc->mc_vd, mc->mc_offset, 900 zio->io_abd, zio->io_size, ZIO_TYPE_WRITE, 901 zio->io_priority == ZIO_PRIORITY_REBUILD ? 902 ZIO_PRIORITY_REBUILD : ZIO_PRIORITY_ASYNC_WRITE, 903 ZIO_FLAG_IO_REPAIR | (unexpected_errors ? 904 ZIO_FLAG_SELF_HEAL : 0), NULL, NULL)); 905 } 906 } 907 } 908 909 static void 910 vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded) 911 { 912 if (faulted == vd->vdev_children) { 913 if (vdev_children_are_offline(vd)) { 914 vdev_set_state(vd, B_FALSE, VDEV_STATE_OFFLINE, 915 VDEV_AUX_CHILDREN_OFFLINE); 916 } else { 917 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 918 VDEV_AUX_NO_REPLICAS); 919 } 920 } else if (degraded + faulted != 0) { 921 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE); 922 } else { 923 vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE); 924 } 925 } 926 927 /* 928 * Return the maximum asize for a rebuild zio in the provided range. 929 */ 930 static uint64_t 931 vdev_mirror_rebuild_asize(vdev_t *vd, uint64_t start, uint64_t asize, 932 uint64_t max_segment) 933 { 934 (void) start; 935 936 uint64_t psize = MIN(P2ROUNDUP(max_segment, 1 << vd->vdev_ashift), 937 SPA_MAXBLOCKSIZE); 938 939 return (MIN(asize, vdev_psize_to_asize(vd, psize))); 940 } 941 942 vdev_ops_t vdev_mirror_ops = { 943 .vdev_op_init = NULL, 944 .vdev_op_fini = NULL, 945 .vdev_op_open = vdev_mirror_open, 946 .vdev_op_close = vdev_mirror_close, 947 .vdev_op_asize = vdev_default_asize, 948 .vdev_op_min_asize = vdev_default_min_asize, 949 .vdev_op_min_alloc = NULL, 950 .vdev_op_io_start = vdev_mirror_io_start, 951 .vdev_op_io_done = vdev_mirror_io_done, 952 .vdev_op_state_change = vdev_mirror_state_change, 953 .vdev_op_need_resilver = vdev_default_need_resilver, 954 .vdev_op_hold = NULL, 955 .vdev_op_rele = NULL, 956 .vdev_op_remap = NULL, 957 .vdev_op_xlate = vdev_default_xlate, 958 .vdev_op_rebuild_asize = vdev_mirror_rebuild_asize, 959 .vdev_op_metaslab_init = NULL, 960 .vdev_op_config_generate = NULL, 961 .vdev_op_nparity = NULL, 962 .vdev_op_ndisks = NULL, 963 .vdev_op_type = VDEV_TYPE_MIRROR, /* name of this vdev type */ 964 .vdev_op_leaf = B_FALSE /* not a leaf vdev */ 965 }; 966 967 vdev_ops_t vdev_replacing_ops = { 968 .vdev_op_init = NULL, 969 .vdev_op_fini = NULL, 970 .vdev_op_open = vdev_mirror_open, 971 .vdev_op_close = vdev_mirror_close, 972 .vdev_op_asize = vdev_default_asize, 973 .vdev_op_min_asize = vdev_default_min_asize, 974 .vdev_op_min_alloc = NULL, 975 .vdev_op_io_start = vdev_mirror_io_start, 976 .vdev_op_io_done = vdev_mirror_io_done, 977 .vdev_op_state_change = vdev_mirror_state_change, 978 .vdev_op_need_resilver = vdev_default_need_resilver, 979 .vdev_op_hold = NULL, 980 .vdev_op_rele = NULL, 981 .vdev_op_remap = NULL, 982 .vdev_op_xlate = vdev_default_xlate, 983 .vdev_op_rebuild_asize = vdev_mirror_rebuild_asize, 984 .vdev_op_metaslab_init = NULL, 985 .vdev_op_config_generate = NULL, 986 .vdev_op_nparity = NULL, 987 .vdev_op_ndisks = NULL, 988 .vdev_op_type = VDEV_TYPE_REPLACING, /* name of this vdev type */ 989 .vdev_op_leaf = B_FALSE /* not a leaf vdev */ 990 }; 991 992 vdev_ops_t vdev_spare_ops = { 993 .vdev_op_init = NULL, 994 .vdev_op_fini = NULL, 995 .vdev_op_open = vdev_mirror_open, 996 .vdev_op_close = vdev_mirror_close, 997 .vdev_op_asize = vdev_default_asize, 998 .vdev_op_min_asize = vdev_default_min_asize, 999 .vdev_op_min_alloc = NULL, 1000 .vdev_op_io_start = vdev_mirror_io_start, 1001 .vdev_op_io_done = vdev_mirror_io_done, 1002 .vdev_op_state_change = vdev_mirror_state_change, 1003 .vdev_op_need_resilver = vdev_default_need_resilver, 1004 .vdev_op_hold = NULL, 1005 .vdev_op_rele = NULL, 1006 .vdev_op_remap = NULL, 1007 .vdev_op_xlate = vdev_default_xlate, 1008 .vdev_op_rebuild_asize = vdev_mirror_rebuild_asize, 1009 .vdev_op_metaslab_init = NULL, 1010 .vdev_op_config_generate = NULL, 1011 .vdev_op_nparity = NULL, 1012 .vdev_op_ndisks = NULL, 1013 .vdev_op_type = VDEV_TYPE_SPARE, /* name of this vdev type */ 1014 .vdev_op_leaf = B_FALSE /* not a leaf vdev */ 1015 }; 1016 1017 ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_inc, INT, ZMOD_RW, 1018 "Rotating media load increment for non-seeking I/Os"); 1019 1020 ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_seek_inc, INT, 1021 ZMOD_RW, "Rotating media load increment for seeking I/Os"); 1022 1023 /* BEGIN CSTYLED */ 1024 ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, rotating_seek_offset, INT, 1025 ZMOD_RW, 1026 "Offset in bytes from the last I/O which triggers " 1027 "a reduced rotating media seek increment"); 1028 /* END CSTYLED */ 1029 1030 ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, non_rotating_inc, INT, 1031 ZMOD_RW, "Non-rotating media load increment for non-seeking I/Os"); 1032 1033 ZFS_MODULE_PARAM(zfs_vdev_mirror, zfs_vdev_mirror_, non_rotating_seek_inc, INT, 1034 ZMOD_RW, "Non-rotating media load increment for seeking I/Os"); 1035