1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * 23 * Copyright (c) 2018, Intel Corporation. 24 * Copyright (c) 2020 by Lawrence Livermore National Security, LLC. 25 * Copyright (c) 2022 Hewlett Packard Enterprise Development LP. 26 * Copyright (c) 2024 by Delphix. All rights reserved. 27 */ 28 29 #include <sys/vdev_impl.h> 30 #include <sys/vdev_draid.h> 31 #include <sys/dsl_scan.h> 32 #include <sys/spa_impl.h> 33 #include <sys/metaslab_impl.h> 34 #include <sys/vdev_rebuild.h> 35 #include <sys/zio.h> 36 #include <sys/dmu_tx.h> 37 #include <sys/arc.h> 38 #include <sys/arc_impl.h> 39 #include <sys/zap.h> 40 41 /* 42 * This file contains the sequential reconstruction implementation for 43 * resilvering. This form of resilvering is internally referred to as device 44 * rebuild to avoid conflating it with the traditional healing reconstruction 45 * performed by the dsl scan code. 46 * 47 * When replacing a device, or scrubbing the pool, ZFS has historically used 48 * a process called resilvering which is a form of healing reconstruction. 49 * This approach has the advantage that as blocks are read from disk their 50 * checksums can be immediately verified and the data repaired. Unfortunately, 51 * it also results in a random IO pattern to the disk even when extra care 52 * is taken to sequentialize the IO as much as possible. This substantially 53 * increases the time required to resilver the pool and restore redundancy. 54 * 55 * For mirrored devices it's possible to implement an alternate sequential 56 * reconstruction strategy when resilvering. Sequential reconstruction 57 * behaves like a traditional RAID rebuild and reconstructs a device in LBA 58 * order without verifying the checksum. After this phase completes a second 59 * scrub phase is started to verify all of the checksums. This two phase 60 * process will take longer than the healing reconstruction described above. 61 * However, it has that advantage that after the reconstruction first phase 62 * completes redundancy has been restored. At this point the pool can incur 63 * another device failure without risking data loss. 64 * 65 * There are a few noteworthy limitations and other advantages of resilvering 66 * using sequential reconstruction vs healing reconstruction. 67 * 68 * Limitations: 69 * 70 * - Sequential reconstruction is not possible on RAIDZ due to its 71 * variable stripe width. Note dRAID uses a fixed stripe width which 72 * avoids this issue, but comes at the expense of some usable capacity. 73 * 74 * - Block checksums are not verified during sequential reconstruction. 75 * Similar to traditional RAID the parity/mirror data is reconstructed 76 * but cannot be immediately double checked. For this reason when the 77 * last active resilver completes the pool is automatically scrubbed 78 * by default. 79 * 80 * - Deferred resilvers using sequential reconstruction are not currently 81 * supported. When adding another vdev to an active top-level resilver 82 * it must be restarted. 83 * 84 * Advantages: 85 * 86 * - Sequential reconstruction is performed in LBA order which may be faster 87 * than healing reconstruction particularly when using HDDs (or 88 * especially with SMR devices). Only allocated capacity is resilvered. 89 * 90 * - Sequential reconstruction is not constrained by ZFS block boundaries. 91 * This allows it to issue larger IOs to disk which span multiple blocks 92 * allowing all of these logical blocks to be repaired with a single IO. 93 * 94 * - Unlike a healing resilver or scrub which are pool wide operations, 95 * sequential reconstruction is handled by the top-level vdevs. This 96 * allows for it to be started or canceled on a top-level vdev without 97 * impacting any other top-level vdevs in the pool. 98 * 99 * - Data only referenced by a pool checkpoint will be repaired because 100 * that space is reflected in the space maps. This differs for a 101 * healing resilver or scrub which will not repair that data. 102 */ 103 104 105 /* 106 * Size of rebuild reads; defaults to 1MiB per data disk and is capped at 107 * SPA_MAXBLOCKSIZE. 108 */ 109 static uint64_t zfs_rebuild_max_segment = 1024 * 1024; 110 111 /* 112 * Maximum number of parallelly executed bytes per leaf vdev caused by a 113 * sequential resilver. We attempt to strike a balance here between keeping 114 * the vdev queues full of I/Os at all times and not overflowing the queues 115 * to cause long latency, which would cause long txg sync times. 116 * 117 * A large default value can be safely used here because the default target 118 * segment size is also large (zfs_rebuild_max_segment=1M). This helps keep 119 * the queue depth short. 120 * 121 * 64MB was observed to deliver the best performance and set as the default. 122 * Testing was performed with a 106-drive dRAID HDD pool (draid2:11d:106c) 123 * and a rebuild rate of 1.2GB/s was measured to the distribute spare. 124 * Smaller values were unable to fully saturate the available pool I/O. 125 */ 126 static uint64_t zfs_rebuild_vdev_limit = 64 << 20; 127 128 /* 129 * Automatically start a pool scrub when the last active sequential resilver 130 * completes in order to verify the checksums of all blocks which have been 131 * resilvered. This option is enabled by default and is strongly recommended. 132 */ 133 static int zfs_rebuild_scrub_enabled = 1; 134 135 /* 136 * For vdev_rebuild_initiate_sync() and vdev_rebuild_reset_sync(). 137 */ 138 static __attribute__((noreturn)) void vdev_rebuild_thread(void *arg); 139 static void vdev_rebuild_reset_sync(void *arg, dmu_tx_t *tx); 140 141 /* 142 * Clear the per-vdev rebuild bytes value for a vdev tree. 143 */ 144 static void 145 clear_rebuild_bytes(vdev_t *vd) 146 { 147 vdev_stat_t *vs = &vd->vdev_stat; 148 149 for (uint64_t i = 0; i < vd->vdev_children; i++) 150 clear_rebuild_bytes(vd->vdev_child[i]); 151 152 mutex_enter(&vd->vdev_stat_lock); 153 vs->vs_rebuild_processed = 0; 154 mutex_exit(&vd->vdev_stat_lock); 155 } 156 157 /* 158 * Determines whether a vdev_rebuild_thread() should be stopped. 159 */ 160 static boolean_t 161 vdev_rebuild_should_stop(vdev_t *vd) 162 { 163 return (!vdev_writeable(vd) || vd->vdev_removing || 164 vd->vdev_rebuild_exit_wanted || 165 vd->vdev_rebuild_cancel_wanted || 166 vd->vdev_rebuild_reset_wanted); 167 } 168 169 /* 170 * Determine if the rebuild should be canceled. This may happen when all 171 * vdevs with MISSING DTLs are detached. 172 */ 173 static boolean_t 174 vdev_rebuild_should_cancel(vdev_t *vd) 175 { 176 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 177 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 178 179 if (!vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg)) 180 return (B_TRUE); 181 182 return (B_FALSE); 183 } 184 185 /* 186 * The sync task for updating the on-disk state of a rebuild. This is 187 * scheduled by vdev_rebuild_range(). 188 */ 189 static void 190 vdev_rebuild_update_sync(void *arg, dmu_tx_t *tx) 191 { 192 int vdev_id = (uintptr_t)arg; 193 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 194 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 195 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 196 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 197 uint64_t txg = dmu_tx_get_txg(tx); 198 199 mutex_enter(&vd->vdev_rebuild_lock); 200 201 if (vr->vr_scan_offset[txg & TXG_MASK] > 0) { 202 vrp->vrp_last_offset = vr->vr_scan_offset[txg & TXG_MASK]; 203 vr->vr_scan_offset[txg & TXG_MASK] = 0; 204 } 205 206 vrp->vrp_scan_time_ms = vr->vr_prev_scan_time_ms + 207 NSEC2MSEC(gethrtime() - vr->vr_pass_start_time); 208 209 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 210 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 211 REBUILD_PHYS_ENTRIES, vrp, tx)); 212 213 mutex_exit(&vd->vdev_rebuild_lock); 214 } 215 216 /* 217 * Initialize the on-disk state for a new rebuild, start the rebuild thread. 218 */ 219 static void 220 vdev_rebuild_initiate_sync(void *arg, dmu_tx_t *tx) 221 { 222 int vdev_id = (uintptr_t)arg; 223 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 224 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 225 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 226 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 227 228 ASSERT(vd->vdev_rebuilding); 229 230 spa_feature_incr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx); 231 232 mutex_enter(&vd->vdev_rebuild_lock); 233 memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES); 234 vrp->vrp_rebuild_state = VDEV_REBUILD_ACTIVE; 235 vrp->vrp_min_txg = 0; 236 vrp->vrp_max_txg = dmu_tx_get_txg(tx); 237 vrp->vrp_start_time = gethrestime_sec(); 238 vrp->vrp_scan_time_ms = 0; 239 vr->vr_prev_scan_time_ms = 0; 240 241 /* 242 * Rebuilds are currently only used when replacing a device, in which 243 * case there must be DTL_MISSING entries. In the future, we could 244 * allow rebuilds to be used in a way similar to a scrub. This would 245 * be useful because it would allow us to rebuild the space used by 246 * pool checkpoints. 247 */ 248 VERIFY(vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg)); 249 250 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 251 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 252 REBUILD_PHYS_ENTRIES, vrp, tx)); 253 254 spa_history_log_internal(spa, "rebuild", tx, 255 "vdev_id=%llu vdev_guid=%llu started", 256 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid); 257 258 ASSERT3P(vd->vdev_rebuild_thread, ==, NULL); 259 vd->vdev_rebuild_thread = thread_create(NULL, 0, 260 vdev_rebuild_thread, vd, 0, &p0, TS_RUN, maxclsyspri); 261 262 mutex_exit(&vd->vdev_rebuild_lock); 263 } 264 265 static void 266 vdev_rebuild_log_notify(spa_t *spa, vdev_t *vd, const char *name) 267 { 268 nvlist_t *aux = fnvlist_alloc(); 269 270 fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, "sequential"); 271 spa_event_notify(spa, vd, aux, name); 272 nvlist_free(aux); 273 } 274 275 /* 276 * Called to request that a new rebuild be started. The feature will remain 277 * active for the duration of the rebuild, then revert to the enabled state. 278 */ 279 static void 280 vdev_rebuild_initiate(vdev_t *vd) 281 { 282 spa_t *spa = vd->vdev_spa; 283 284 ASSERT(vd->vdev_top == vd); 285 ASSERT(MUTEX_HELD(&vd->vdev_rebuild_lock)); 286 ASSERT(!vd->vdev_rebuilding); 287 288 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 289 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 290 291 vd->vdev_rebuilding = B_TRUE; 292 293 dsl_sync_task_nowait(spa_get_dsl(spa), vdev_rebuild_initiate_sync, 294 (void *)(uintptr_t)vd->vdev_id, tx); 295 dmu_tx_commit(tx); 296 297 vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_START); 298 } 299 300 /* 301 * Update the on-disk state to completed when a rebuild finishes. 302 */ 303 static void 304 vdev_rebuild_complete_sync(void *arg, dmu_tx_t *tx) 305 { 306 int vdev_id = (uintptr_t)arg; 307 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 308 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 309 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 310 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 311 312 mutex_enter(&vd->vdev_rebuild_lock); 313 314 /* 315 * Handle a second device failure if it occurs after all rebuild I/O 316 * has completed but before this sync task has been executed. 317 */ 318 if (vd->vdev_rebuild_reset_wanted) { 319 mutex_exit(&vd->vdev_rebuild_lock); 320 vdev_rebuild_reset_sync(arg, tx); 321 return; 322 } 323 324 vrp->vrp_rebuild_state = VDEV_REBUILD_COMPLETE; 325 vrp->vrp_end_time = gethrestime_sec(); 326 327 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 328 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 329 REBUILD_PHYS_ENTRIES, vrp, tx)); 330 331 vdev_dtl_reassess(vd, tx->tx_txg, vrp->vrp_max_txg, B_TRUE, B_TRUE); 332 spa_feature_decr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx); 333 334 spa_history_log_internal(spa, "rebuild", tx, 335 "vdev_id=%llu vdev_guid=%llu complete", 336 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid); 337 vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_FINISH); 338 339 /* Handles detaching of spares */ 340 spa_async_request(spa, SPA_ASYNC_REBUILD_DONE); 341 vd->vdev_rebuilding = B_FALSE; 342 mutex_exit(&vd->vdev_rebuild_lock); 343 344 /* 345 * While we're in syncing context take the opportunity to 346 * setup the scrub when there are no more active rebuilds. 347 */ 348 setup_sync_arg_t setup_sync_arg = { 349 .func = POOL_SCAN_SCRUB, 350 .txgstart = 0, 351 .txgend = 0, 352 }; 353 if (dsl_scan_setup_check(&setup_sync_arg.func, tx) == 0 && 354 zfs_rebuild_scrub_enabled) { 355 dsl_scan_setup_sync(&setup_sync_arg, tx); 356 } 357 358 cv_broadcast(&vd->vdev_rebuild_cv); 359 360 /* Clear recent error events (i.e. duplicate events tracking) */ 361 zfs_ereport_clear(spa, NULL); 362 } 363 364 /* 365 * Update the on-disk state to canceled when a rebuild finishes. 366 */ 367 static void 368 vdev_rebuild_cancel_sync(void *arg, dmu_tx_t *tx) 369 { 370 int vdev_id = (uintptr_t)arg; 371 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 372 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 373 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 374 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 375 376 mutex_enter(&vd->vdev_rebuild_lock); 377 vrp->vrp_rebuild_state = VDEV_REBUILD_CANCELED; 378 vrp->vrp_end_time = gethrestime_sec(); 379 380 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 381 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 382 REBUILD_PHYS_ENTRIES, vrp, tx)); 383 384 spa_feature_decr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx); 385 386 spa_history_log_internal(spa, "rebuild", tx, 387 "vdev_id=%llu vdev_guid=%llu canceled", 388 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid); 389 vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_FINISH); 390 391 vd->vdev_rebuild_cancel_wanted = B_FALSE; 392 vd->vdev_rebuilding = B_FALSE; 393 mutex_exit(&vd->vdev_rebuild_lock); 394 395 spa_notify_waiters(spa); 396 cv_broadcast(&vd->vdev_rebuild_cv); 397 } 398 399 /* 400 * Resets the progress of a running rebuild. This will occur when a new 401 * vdev is added to rebuild. 402 */ 403 static void 404 vdev_rebuild_reset_sync(void *arg, dmu_tx_t *tx) 405 { 406 int vdev_id = (uintptr_t)arg; 407 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 408 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 409 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 410 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 411 412 mutex_enter(&vd->vdev_rebuild_lock); 413 414 ASSERT(vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE); 415 ASSERT3P(vd->vdev_rebuild_thread, ==, NULL); 416 417 vrp->vrp_last_offset = 0; 418 vrp->vrp_min_txg = 0; 419 vrp->vrp_max_txg = dmu_tx_get_txg(tx); 420 vrp->vrp_bytes_scanned = 0; 421 vrp->vrp_bytes_issued = 0; 422 vrp->vrp_bytes_rebuilt = 0; 423 vrp->vrp_bytes_est = 0; 424 vrp->vrp_scan_time_ms = 0; 425 vr->vr_prev_scan_time_ms = 0; 426 427 /* See vdev_rebuild_initiate_sync comment */ 428 VERIFY(vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg)); 429 430 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 431 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 432 REBUILD_PHYS_ENTRIES, vrp, tx)); 433 434 spa_history_log_internal(spa, "rebuild", tx, 435 "vdev_id=%llu vdev_guid=%llu reset", 436 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid); 437 438 vd->vdev_rebuild_reset_wanted = B_FALSE; 439 ASSERT(vd->vdev_rebuilding); 440 441 vd->vdev_rebuild_thread = thread_create(NULL, 0, 442 vdev_rebuild_thread, vd, 0, &p0, TS_RUN, maxclsyspri); 443 444 mutex_exit(&vd->vdev_rebuild_lock); 445 } 446 447 /* 448 * Clear the last rebuild status. 449 */ 450 void 451 vdev_rebuild_clear_sync(void *arg, dmu_tx_t *tx) 452 { 453 int vdev_id = (uintptr_t)arg; 454 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 455 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 456 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 457 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 458 objset_t *mos = spa_meta_objset(spa); 459 460 mutex_enter(&vd->vdev_rebuild_lock); 461 462 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD) || 463 vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE) { 464 mutex_exit(&vd->vdev_rebuild_lock); 465 return; 466 } 467 468 clear_rebuild_bytes(vd); 469 memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES); 470 471 if (vd->vdev_top_zap != 0 && zap_contains(mos, vd->vdev_top_zap, 472 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS) == 0) { 473 VERIFY0(zap_update(mos, vd->vdev_top_zap, 474 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 475 REBUILD_PHYS_ENTRIES, vrp, tx)); 476 } 477 478 mutex_exit(&vd->vdev_rebuild_lock); 479 } 480 481 /* 482 * The zio_done_func_t callback for each rebuild I/O issued. It's responsible 483 * for updating the rebuild stats and limiting the number of in flight I/Os. 484 */ 485 static void 486 vdev_rebuild_cb(zio_t *zio) 487 { 488 vdev_rebuild_t *vr = zio->io_private; 489 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 490 vdev_t *vd = vr->vr_top_vdev; 491 492 mutex_enter(&vr->vr_io_lock); 493 if (zio->io_error == ENXIO && !vdev_writeable(vd)) { 494 /* 495 * The I/O failed because the top-level vdev was unavailable. 496 * Attempt to roll back to the last completed offset, in order 497 * resume from the correct location if the pool is resumed. 498 * (This works because spa_sync waits on spa_txg_zio before 499 * it runs sync tasks.) 500 */ 501 uint64_t *off = &vr->vr_scan_offset[zio->io_txg & TXG_MASK]; 502 *off = MIN(*off, zio->io_offset); 503 } else if (zio->io_error) { 504 vrp->vrp_errors++; 505 } 506 507 abd_free(zio->io_abd); 508 509 ASSERT3U(vr->vr_bytes_inflight, >, 0); 510 vr->vr_bytes_inflight -= zio->io_size; 511 cv_broadcast(&vr->vr_io_cv); 512 mutex_exit(&vr->vr_io_lock); 513 514 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); 515 } 516 517 /* 518 * Initialize a block pointer that can be used to read the given segment 519 * for sequential rebuild. 520 */ 521 static void 522 vdev_rebuild_blkptr_init(blkptr_t *bp, vdev_t *vd, uint64_t start, 523 uint64_t asize) 524 { 525 ASSERT(vd->vdev_ops == &vdev_draid_ops || 526 vd->vdev_ops == &vdev_mirror_ops || 527 vd->vdev_ops == &vdev_replacing_ops || 528 vd->vdev_ops == &vdev_spare_ops); 529 530 uint64_t psize = vd->vdev_ops == &vdev_draid_ops ? 531 vdev_draid_asize_to_psize(vd, asize) : asize; 532 533 BP_ZERO(bp); 534 535 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id); 536 DVA_SET_OFFSET(&bp->blk_dva[0], start); 537 DVA_SET_GANG(&bp->blk_dva[0], 0); 538 DVA_SET_ASIZE(&bp->blk_dva[0], asize); 539 540 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL); 541 BP_SET_LSIZE(bp, psize); 542 BP_SET_PSIZE(bp, psize); 543 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); 544 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF); 545 BP_SET_TYPE(bp, DMU_OT_NONE); 546 BP_SET_LEVEL(bp, 0); 547 BP_SET_DEDUP(bp, 0); 548 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 549 } 550 551 /* 552 * Issues a rebuild I/O and takes care of rate limiting the number of queued 553 * rebuild I/Os. The provided start and size must be properly aligned for the 554 * top-level vdev type being rebuilt. 555 */ 556 static int 557 vdev_rebuild_range(vdev_rebuild_t *vr, uint64_t start, uint64_t size) 558 { 559 uint64_t ms_id __maybe_unused = vr->vr_scan_msp->ms_id; 560 vdev_t *vd = vr->vr_top_vdev; 561 spa_t *spa = vd->vdev_spa; 562 blkptr_t blk; 563 564 ASSERT3U(ms_id, ==, start >> vd->vdev_ms_shift); 565 ASSERT3U(ms_id, ==, (start + size - 1) >> vd->vdev_ms_shift); 566 567 vr->vr_pass_bytes_scanned += size; 568 vr->vr_rebuild_phys.vrp_bytes_scanned += size; 569 570 /* 571 * Rebuild the data in this range by constructing a special block 572 * pointer. It has no relation to any existing blocks in the pool. 573 * However, by disabling checksum verification and issuing a scrub IO 574 * we can reconstruct and repair any children with missing data. 575 */ 576 vdev_rebuild_blkptr_init(&blk, vd, start, size); 577 uint64_t psize = BP_GET_PSIZE(&blk); 578 579 if (!vdev_dtl_need_resilver(vd, &blk.blk_dva[0], psize, TXG_UNKNOWN)) { 580 vr->vr_pass_bytes_skipped += size; 581 return (0); 582 } 583 584 mutex_enter(&vr->vr_io_lock); 585 586 /* Limit in flight rebuild I/Os */ 587 while (vr->vr_bytes_inflight >= vr->vr_bytes_inflight_max) 588 cv_wait(&vr->vr_io_cv, &vr->vr_io_lock); 589 590 vr->vr_bytes_inflight += psize; 591 mutex_exit(&vr->vr_io_lock); 592 593 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 594 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 595 uint64_t txg = dmu_tx_get_txg(tx); 596 597 spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER); 598 mutex_enter(&vd->vdev_rebuild_lock); 599 600 /* This is the first I/O for this txg. */ 601 if (vr->vr_scan_offset[txg & TXG_MASK] == 0) { 602 vr->vr_scan_offset[txg & TXG_MASK] = start; 603 dsl_sync_task_nowait(spa_get_dsl(spa), 604 vdev_rebuild_update_sync, 605 (void *)(uintptr_t)vd->vdev_id, tx); 606 } 607 608 /* When exiting write out our progress. */ 609 if (vdev_rebuild_should_stop(vd)) { 610 mutex_enter(&vr->vr_io_lock); 611 vr->vr_bytes_inflight -= psize; 612 mutex_exit(&vr->vr_io_lock); 613 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); 614 mutex_exit(&vd->vdev_rebuild_lock); 615 dmu_tx_commit(tx); 616 return (SET_ERROR(EINTR)); 617 } 618 mutex_exit(&vd->vdev_rebuild_lock); 619 dmu_tx_commit(tx); 620 621 vr->vr_scan_offset[txg & TXG_MASK] = start + size; 622 vr->vr_pass_bytes_issued += size; 623 vr->vr_rebuild_phys.vrp_bytes_issued += size; 624 625 zio_nowait(zio_read(spa->spa_txg_zio[txg & TXG_MASK], spa, &blk, 626 abd_alloc(psize, B_FALSE), psize, vdev_rebuild_cb, vr, 627 ZIO_PRIORITY_REBUILD, ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL | 628 ZIO_FLAG_RESILVER, NULL)); 629 630 return (0); 631 } 632 633 /* 634 * Issues rebuild I/Os for all ranges in the provided vr->vr_tree range tree. 635 */ 636 static int 637 vdev_rebuild_ranges(vdev_rebuild_t *vr) 638 { 639 vdev_t *vd = vr->vr_top_vdev; 640 zfs_btree_t *t = &vr->vr_scan_tree->rt_root; 641 zfs_btree_index_t idx; 642 int error; 643 644 for (range_seg_t *rs = zfs_btree_first(t, &idx); rs != NULL; 645 rs = zfs_btree_next(t, &idx, &idx)) { 646 uint64_t start = rs_get_start(rs, vr->vr_scan_tree); 647 uint64_t size = rs_get_end(rs, vr->vr_scan_tree) - start; 648 649 /* 650 * zfs_scan_suspend_progress can be set to disable rebuild 651 * progress for testing. See comment in dsl_scan_sync(). 652 */ 653 while (zfs_scan_suspend_progress && 654 !vdev_rebuild_should_stop(vd)) { 655 delay(hz); 656 } 657 658 while (size > 0) { 659 uint64_t chunk_size; 660 661 /* 662 * Split range into legally-sized logical chunks 663 * given the constraints of the top-level vdev 664 * being rebuilt (dRAID or mirror). 665 */ 666 ASSERT3P(vd->vdev_ops, !=, NULL); 667 chunk_size = vd->vdev_ops->vdev_op_rebuild_asize(vd, 668 start, size, zfs_rebuild_max_segment); 669 670 error = vdev_rebuild_range(vr, start, chunk_size); 671 if (error != 0) 672 return (error); 673 674 size -= chunk_size; 675 start += chunk_size; 676 } 677 } 678 679 return (0); 680 } 681 682 /* 683 * Calculates the estimated capacity which remains to be scanned. Since 684 * we traverse the pool in metaslab order only allocated capacity beyond 685 * the vrp_last_offset need be considered. All lower offsets must have 686 * already been rebuilt and are thus already included in vrp_bytes_scanned. 687 */ 688 static void 689 vdev_rebuild_update_bytes_est(vdev_t *vd, uint64_t ms_id) 690 { 691 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 692 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 693 uint64_t bytes_est = vrp->vrp_bytes_scanned; 694 695 if (vrp->vrp_last_offset < vd->vdev_ms[ms_id]->ms_start) 696 return; 697 698 for (uint64_t i = ms_id; i < vd->vdev_ms_count; i++) { 699 metaslab_t *msp = vd->vdev_ms[i]; 700 701 mutex_enter(&msp->ms_lock); 702 bytes_est += metaslab_allocated_space(msp); 703 mutex_exit(&msp->ms_lock); 704 } 705 706 vrp->vrp_bytes_est = bytes_est; 707 } 708 709 /* 710 * Load from disk the top-level vdev's rebuild information. 711 */ 712 int 713 vdev_rebuild_load(vdev_t *vd) 714 { 715 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 716 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 717 spa_t *spa = vd->vdev_spa; 718 int err = 0; 719 720 mutex_enter(&vd->vdev_rebuild_lock); 721 vd->vdev_rebuilding = B_FALSE; 722 723 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD)) { 724 memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES); 725 mutex_exit(&vd->vdev_rebuild_lock); 726 return (SET_ERROR(ENOTSUP)); 727 } 728 729 ASSERT(vd->vdev_top == vd); 730 731 err = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap, 732 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 733 REBUILD_PHYS_ENTRIES, vrp); 734 735 /* 736 * A missing or damaged VDEV_TOP_ZAP_VDEV_REBUILD_PHYS should 737 * not prevent a pool from being imported. Clear the rebuild 738 * status allowing a new resilver/rebuild to be started. 739 */ 740 if (err == ENOENT || err == EOVERFLOW || err == ECKSUM) { 741 memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES); 742 } else if (err) { 743 mutex_exit(&vd->vdev_rebuild_lock); 744 return (err); 745 } 746 747 vr->vr_prev_scan_time_ms = vrp->vrp_scan_time_ms; 748 vr->vr_top_vdev = vd; 749 750 mutex_exit(&vd->vdev_rebuild_lock); 751 752 return (0); 753 } 754 755 /* 756 * Each scan thread is responsible for rebuilding a top-level vdev. The 757 * rebuild progress in tracked on-disk in VDEV_TOP_ZAP_VDEV_REBUILD_PHYS. 758 */ 759 static __attribute__((noreturn)) void 760 vdev_rebuild_thread(void *arg) 761 { 762 vdev_t *vd = arg; 763 spa_t *spa = vd->vdev_spa; 764 vdev_t *rvd = spa->spa_root_vdev; 765 int error = 0; 766 767 /* 768 * If there's a scrub in process request that it be stopped. This 769 * is not required for a correct rebuild, but we do want rebuilds to 770 * emulate the resilver behavior as much as possible. 771 */ 772 dsl_pool_t *dsl = spa_get_dsl(spa); 773 if (dsl_scan_scrubbing(dsl)) 774 dsl_scan_cancel(dsl); 775 776 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 777 mutex_enter(&vd->vdev_rebuild_lock); 778 779 ASSERT3P(vd->vdev_top, ==, vd); 780 ASSERT3P(vd->vdev_rebuild_thread, !=, NULL); 781 ASSERT(vd->vdev_rebuilding); 782 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REBUILD)); 783 ASSERT3B(vd->vdev_rebuild_cancel_wanted, ==, B_FALSE); 784 785 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 786 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 787 vr->vr_top_vdev = vd; 788 vr->vr_scan_msp = NULL; 789 vr->vr_scan_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0); 790 mutex_init(&vr->vr_io_lock, NULL, MUTEX_DEFAULT, NULL); 791 cv_init(&vr->vr_io_cv, NULL, CV_DEFAULT, NULL); 792 793 vr->vr_pass_start_time = gethrtime(); 794 vr->vr_pass_bytes_scanned = 0; 795 vr->vr_pass_bytes_issued = 0; 796 vr->vr_pass_bytes_skipped = 0; 797 798 uint64_t update_est_time = gethrtime(); 799 vdev_rebuild_update_bytes_est(vd, 0); 800 801 clear_rebuild_bytes(vr->vr_top_vdev); 802 803 mutex_exit(&vd->vdev_rebuild_lock); 804 805 /* 806 * Systematically walk the metaslabs and issue rebuild I/Os for 807 * all ranges in the allocated space map. 808 */ 809 for (uint64_t i = 0; i < vd->vdev_ms_count; i++) { 810 metaslab_t *msp = vd->vdev_ms[i]; 811 vr->vr_scan_msp = msp; 812 813 /* 814 * Calculate the max number of in-flight bytes for top-level 815 * vdev scanning operations (minimum 1MB, maximum 1/2 of 816 * arc_c_max shared by all top-level vdevs). Limits for the 817 * issuing phase are done per top-level vdev and are handled 818 * separately. 819 */ 820 uint64_t limit = (arc_c_max / 2) / MAX(rvd->vdev_children, 1); 821 vr->vr_bytes_inflight_max = MIN(limit, MAX(1ULL << 20, 822 zfs_rebuild_vdev_limit * vd->vdev_children)); 823 824 /* 825 * Removal of vdevs from the vdev tree may eliminate the need 826 * for the rebuild, in which case it should be canceled. The 827 * vdev_rebuild_cancel_wanted flag is set until the sync task 828 * completes. This may be after the rebuild thread exits. 829 */ 830 if (vdev_rebuild_should_cancel(vd)) { 831 vd->vdev_rebuild_cancel_wanted = B_TRUE; 832 error = EINTR; 833 break; 834 } 835 836 ASSERT0(range_tree_space(vr->vr_scan_tree)); 837 838 /* Disable any new allocations to this metaslab */ 839 spa_config_exit(spa, SCL_CONFIG, FTAG); 840 metaslab_disable(msp); 841 842 mutex_enter(&msp->ms_sync_lock); 843 mutex_enter(&msp->ms_lock); 844 845 /* 846 * If there are outstanding allocations wait for them to be 847 * synced. This is needed to ensure all allocated ranges are 848 * on disk and therefore will be rebuilt. 849 */ 850 for (int j = 0; j < TXG_SIZE; j++) { 851 if (range_tree_space(msp->ms_allocating[j])) { 852 mutex_exit(&msp->ms_lock); 853 mutex_exit(&msp->ms_sync_lock); 854 txg_wait_synced(dsl, 0); 855 mutex_enter(&msp->ms_sync_lock); 856 mutex_enter(&msp->ms_lock); 857 break; 858 } 859 } 860 861 /* 862 * When a metaslab has been allocated from read its allocated 863 * ranges from the space map object into the vr_scan_tree. 864 * Then add inflight / unflushed ranges and remove inflight / 865 * unflushed frees. This is the minimum range to be rebuilt. 866 */ 867 if (msp->ms_sm != NULL) { 868 VERIFY0(space_map_load(msp->ms_sm, 869 vr->vr_scan_tree, SM_ALLOC)); 870 871 for (int i = 0; i < TXG_SIZE; i++) { 872 ASSERT0(range_tree_space( 873 msp->ms_allocating[i])); 874 } 875 876 range_tree_walk(msp->ms_unflushed_allocs, 877 range_tree_add, vr->vr_scan_tree); 878 range_tree_walk(msp->ms_unflushed_frees, 879 range_tree_remove, vr->vr_scan_tree); 880 881 /* 882 * Remove ranges which have already been rebuilt based 883 * on the last offset. This can happen when restarting 884 * a scan after exporting and re-importing the pool. 885 */ 886 range_tree_clear(vr->vr_scan_tree, 0, 887 vrp->vrp_last_offset); 888 } 889 890 mutex_exit(&msp->ms_lock); 891 mutex_exit(&msp->ms_sync_lock); 892 893 /* 894 * To provide an accurate estimate re-calculate the estimated 895 * size every 5 minutes to account for recent allocations and 896 * frees made to space maps which have not yet been rebuilt. 897 */ 898 if (gethrtime() > update_est_time + SEC2NSEC(300)) { 899 update_est_time = gethrtime(); 900 vdev_rebuild_update_bytes_est(vd, i); 901 } 902 903 /* 904 * Walk the allocated space map and issue the rebuild I/O. 905 */ 906 error = vdev_rebuild_ranges(vr); 907 range_tree_vacate(vr->vr_scan_tree, NULL, NULL); 908 909 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 910 metaslab_enable(msp, B_FALSE, B_FALSE); 911 912 if (error != 0) 913 break; 914 } 915 916 range_tree_destroy(vr->vr_scan_tree); 917 spa_config_exit(spa, SCL_CONFIG, FTAG); 918 919 /* Wait for any remaining rebuild I/O to complete */ 920 mutex_enter(&vr->vr_io_lock); 921 while (vr->vr_bytes_inflight > 0) 922 cv_wait(&vr->vr_io_cv, &vr->vr_io_lock); 923 924 mutex_exit(&vr->vr_io_lock); 925 926 mutex_destroy(&vr->vr_io_lock); 927 cv_destroy(&vr->vr_io_cv); 928 929 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 930 931 dsl_pool_t *dp = spa_get_dsl(spa); 932 dmu_tx_t *tx = dmu_tx_create_dd(dp->dp_mos_dir); 933 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 934 935 mutex_enter(&vd->vdev_rebuild_lock); 936 if (error == 0) { 937 /* 938 * After a successful rebuild clear the DTLs of all ranges 939 * which were missing when the rebuild was started. These 940 * ranges must have been rebuilt as a consequence of rebuilding 941 * all allocated space. Note that unlike a scrub or resilver 942 * the rebuild operation will reconstruct data only referenced 943 * by a pool checkpoint. See the dsl_scan_done() comments. 944 */ 945 dsl_sync_task_nowait(dp, vdev_rebuild_complete_sync, 946 (void *)(uintptr_t)vd->vdev_id, tx); 947 } else if (vd->vdev_rebuild_cancel_wanted) { 948 /* 949 * The rebuild operation was canceled. This will occur when 950 * a device participating in the rebuild is detached. 951 */ 952 dsl_sync_task_nowait(dp, vdev_rebuild_cancel_sync, 953 (void *)(uintptr_t)vd->vdev_id, tx); 954 } else if (vd->vdev_rebuild_reset_wanted) { 955 /* 956 * Reset the running rebuild without canceling and restarting 957 * it. This will occur when a new device is attached and must 958 * participate in the rebuild. 959 */ 960 dsl_sync_task_nowait(dp, vdev_rebuild_reset_sync, 961 (void *)(uintptr_t)vd->vdev_id, tx); 962 } else { 963 /* 964 * The rebuild operation should be suspended. This may occur 965 * when detaching a child vdev or when exporting the pool. The 966 * rebuild is left in the active state so it will be resumed. 967 */ 968 ASSERT(vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE); 969 vd->vdev_rebuilding = B_FALSE; 970 } 971 972 dmu_tx_commit(tx); 973 974 vd->vdev_rebuild_thread = NULL; 975 mutex_exit(&vd->vdev_rebuild_lock); 976 spa_config_exit(spa, SCL_CONFIG, FTAG); 977 978 cv_broadcast(&vd->vdev_rebuild_cv); 979 980 thread_exit(); 981 } 982 983 /* 984 * Returns B_TRUE if any top-level vdev are rebuilding. 985 */ 986 boolean_t 987 vdev_rebuild_active(vdev_t *vd) 988 { 989 spa_t *spa = vd->vdev_spa; 990 boolean_t ret = B_FALSE; 991 992 if (vd == spa->spa_root_vdev) { 993 for (uint64_t i = 0; i < vd->vdev_children; i++) { 994 ret = vdev_rebuild_active(vd->vdev_child[i]); 995 if (ret) 996 return (ret); 997 } 998 } else if (vd->vdev_top_zap != 0) { 999 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 1000 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 1001 1002 mutex_enter(&vd->vdev_rebuild_lock); 1003 ret = (vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE); 1004 mutex_exit(&vd->vdev_rebuild_lock); 1005 } 1006 1007 return (ret); 1008 } 1009 1010 /* 1011 * Start a rebuild operation. The rebuild may be restarted when the 1012 * top-level vdev is currently actively rebuilding. 1013 */ 1014 void 1015 vdev_rebuild(vdev_t *vd) 1016 { 1017 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 1018 vdev_rebuild_phys_t *vrp __maybe_unused = &vr->vr_rebuild_phys; 1019 1020 ASSERT(vd->vdev_top == vd); 1021 ASSERT(vdev_is_concrete(vd)); 1022 ASSERT(!vd->vdev_removing); 1023 ASSERT(spa_feature_is_enabled(vd->vdev_spa, 1024 SPA_FEATURE_DEVICE_REBUILD)); 1025 1026 mutex_enter(&vd->vdev_rebuild_lock); 1027 if (vd->vdev_rebuilding) { 1028 ASSERT3U(vrp->vrp_rebuild_state, ==, VDEV_REBUILD_ACTIVE); 1029 1030 /* 1031 * Signal a running rebuild operation that it should restart 1032 * from the beginning because a new device was attached. The 1033 * vdev_rebuild_reset_wanted flag is set until the sync task 1034 * completes. This may be after the rebuild thread exits. 1035 */ 1036 if (!vd->vdev_rebuild_reset_wanted) 1037 vd->vdev_rebuild_reset_wanted = B_TRUE; 1038 } else { 1039 vdev_rebuild_initiate(vd); 1040 } 1041 mutex_exit(&vd->vdev_rebuild_lock); 1042 } 1043 1044 static void 1045 vdev_rebuild_restart_impl(vdev_t *vd) 1046 { 1047 spa_t *spa = vd->vdev_spa; 1048 1049 if (vd == spa->spa_root_vdev) { 1050 for (uint64_t i = 0; i < vd->vdev_children; i++) 1051 vdev_rebuild_restart_impl(vd->vdev_child[i]); 1052 1053 } else if (vd->vdev_top_zap != 0) { 1054 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 1055 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 1056 1057 mutex_enter(&vd->vdev_rebuild_lock); 1058 if (vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE && 1059 vdev_writeable(vd) && !vd->vdev_rebuilding) { 1060 ASSERT(spa_feature_is_active(spa, 1061 SPA_FEATURE_DEVICE_REBUILD)); 1062 vd->vdev_rebuilding = B_TRUE; 1063 vd->vdev_rebuild_thread = thread_create(NULL, 0, 1064 vdev_rebuild_thread, vd, 0, &p0, TS_RUN, 1065 maxclsyspri); 1066 } 1067 mutex_exit(&vd->vdev_rebuild_lock); 1068 } 1069 } 1070 1071 /* 1072 * Conditionally restart all of the vdev_rebuild_thread's for a pool. The 1073 * feature flag must be active and the rebuild in the active state. This 1074 * cannot be used to start a new rebuild. 1075 */ 1076 void 1077 vdev_rebuild_restart(spa_t *spa) 1078 { 1079 ASSERT(MUTEX_HELD(&spa_namespace_lock) || 1080 spa->spa_load_thread == curthread); 1081 1082 vdev_rebuild_restart_impl(spa->spa_root_vdev); 1083 } 1084 1085 /* 1086 * Stop and wait for all of the vdev_rebuild_thread's associated with the 1087 * vdev tree provide to be terminated (canceled or stopped). 1088 */ 1089 void 1090 vdev_rebuild_stop_wait(vdev_t *vd) 1091 { 1092 spa_t *spa = vd->vdev_spa; 1093 1094 ASSERT(MUTEX_HELD(&spa_namespace_lock) || 1095 spa->spa_export_thread == curthread); 1096 1097 if (vd == spa->spa_root_vdev) { 1098 for (uint64_t i = 0; i < vd->vdev_children; i++) 1099 vdev_rebuild_stop_wait(vd->vdev_child[i]); 1100 1101 } else if (vd->vdev_top_zap != 0) { 1102 ASSERT(vd == vd->vdev_top); 1103 1104 mutex_enter(&vd->vdev_rebuild_lock); 1105 if (vd->vdev_rebuild_thread != NULL) { 1106 vd->vdev_rebuild_exit_wanted = B_TRUE; 1107 while (vd->vdev_rebuilding) { 1108 cv_wait(&vd->vdev_rebuild_cv, 1109 &vd->vdev_rebuild_lock); 1110 } 1111 vd->vdev_rebuild_exit_wanted = B_FALSE; 1112 } 1113 mutex_exit(&vd->vdev_rebuild_lock); 1114 } 1115 } 1116 1117 /* 1118 * Stop all rebuild operations but leave them in the active state so they 1119 * will be resumed when importing the pool. 1120 */ 1121 void 1122 vdev_rebuild_stop_all(spa_t *spa) 1123 { 1124 vdev_rebuild_stop_wait(spa->spa_root_vdev); 1125 } 1126 1127 /* 1128 * Rebuild statistics reported per top-level vdev. 1129 */ 1130 int 1131 vdev_rebuild_get_stats(vdev_t *tvd, vdev_rebuild_stat_t *vrs) 1132 { 1133 spa_t *spa = tvd->vdev_spa; 1134 1135 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD)) 1136 return (SET_ERROR(ENOTSUP)); 1137 1138 if (tvd != tvd->vdev_top || tvd->vdev_top_zap == 0) 1139 return (SET_ERROR(EINVAL)); 1140 1141 int error = zap_contains(spa_meta_objset(spa), 1142 tvd->vdev_top_zap, VDEV_TOP_ZAP_VDEV_REBUILD_PHYS); 1143 1144 if (error == ENOENT) { 1145 memset(vrs, 0, sizeof (vdev_rebuild_stat_t)); 1146 vrs->vrs_state = VDEV_REBUILD_NONE; 1147 error = 0; 1148 } else if (error == 0) { 1149 vdev_rebuild_t *vr = &tvd->vdev_rebuild_config; 1150 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 1151 1152 mutex_enter(&tvd->vdev_rebuild_lock); 1153 vrs->vrs_state = vrp->vrp_rebuild_state; 1154 vrs->vrs_start_time = vrp->vrp_start_time; 1155 vrs->vrs_end_time = vrp->vrp_end_time; 1156 vrs->vrs_scan_time_ms = vrp->vrp_scan_time_ms; 1157 vrs->vrs_bytes_scanned = vrp->vrp_bytes_scanned; 1158 vrs->vrs_bytes_issued = vrp->vrp_bytes_issued; 1159 vrs->vrs_bytes_rebuilt = vrp->vrp_bytes_rebuilt; 1160 vrs->vrs_bytes_est = vrp->vrp_bytes_est; 1161 vrs->vrs_errors = vrp->vrp_errors; 1162 vrs->vrs_pass_time_ms = NSEC2MSEC(gethrtime() - 1163 vr->vr_pass_start_time); 1164 vrs->vrs_pass_bytes_scanned = vr->vr_pass_bytes_scanned; 1165 vrs->vrs_pass_bytes_issued = vr->vr_pass_bytes_issued; 1166 vrs->vrs_pass_bytes_skipped = vr->vr_pass_bytes_skipped; 1167 mutex_exit(&tvd->vdev_rebuild_lock); 1168 } 1169 1170 return (error); 1171 } 1172 1173 ZFS_MODULE_PARAM(zfs, zfs_, rebuild_max_segment, U64, ZMOD_RW, 1174 "Max segment size in bytes of rebuild reads"); 1175 1176 ZFS_MODULE_PARAM(zfs, zfs_, rebuild_vdev_limit, U64, ZMOD_RW, 1177 "Max bytes in flight per leaf vdev for sequential resilvers"); 1178 1179 ZFS_MODULE_PARAM(zfs, zfs_, rebuild_scrub_enabled, INT, ZMOD_RW, 1180 "Automatically scrub after sequential resilver completes"); 1181