1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * 23 * Copyright (c) 2018, Intel Corporation. 24 * Copyright (c) 2020 by Lawrence Livermore National Security, LLC. 25 * Copyright (c) 2022 Hewlett Packard Enterprise Development LP. 26 * Copyright (c) 2024 by Delphix. All rights reserved. 27 */ 28 29 #include <sys/vdev_impl.h> 30 #include <sys/vdev_draid.h> 31 #include <sys/dsl_scan.h> 32 #include <sys/spa_impl.h> 33 #include <sys/metaslab_impl.h> 34 #include <sys/vdev_rebuild.h> 35 #include <sys/zio.h> 36 #include <sys/dmu_tx.h> 37 #include <sys/arc.h> 38 #include <sys/arc_impl.h> 39 #include <sys/zap.h> 40 41 /* 42 * This file contains the sequential reconstruction implementation for 43 * resilvering. This form of resilvering is internally referred to as device 44 * rebuild to avoid conflating it with the traditional healing reconstruction 45 * performed by the dsl scan code. 46 * 47 * When replacing a device, or scrubbing the pool, ZFS has historically used 48 * a process called resilvering which is a form of healing reconstruction. 49 * This approach has the advantage that as blocks are read from disk their 50 * checksums can be immediately verified and the data repaired. Unfortunately, 51 * it also results in a random IO pattern to the disk even when extra care 52 * is taken to sequentialize the IO as much as possible. This substantially 53 * increases the time required to resilver the pool and restore redundancy. 54 * 55 * For mirrored devices it's possible to implement an alternate sequential 56 * reconstruction strategy when resilvering. Sequential reconstruction 57 * behaves like a traditional RAID rebuild and reconstructs a device in LBA 58 * order without verifying the checksum. After this phase completes a second 59 * scrub phase is started to verify all of the checksums. This two phase 60 * process will take longer than the healing reconstruction described above. 61 * However, it has that advantage that after the reconstruction first phase 62 * completes redundancy has been restored. At this point the pool can incur 63 * another device failure without risking data loss. 64 * 65 * There are a few noteworthy limitations and other advantages of resilvering 66 * using sequential reconstruction vs healing reconstruction. 67 * 68 * Limitations: 69 * 70 * - Sequential reconstruction is not possible on RAIDZ due to its 71 * variable stripe width. Note dRAID uses a fixed stripe width which 72 * avoids this issue, but comes at the expense of some usable capacity. 73 * 74 * - Block checksums are not verified during sequential reconstruction. 75 * Similar to traditional RAID the parity/mirror data is reconstructed 76 * but cannot be immediately double checked. For this reason when the 77 * last active resilver completes the pool is automatically scrubbed 78 * by default. 79 * 80 * - Deferred resilvers using sequential reconstruction are not currently 81 * supported. When adding another vdev to an active top-level resilver 82 * it must be restarted. 83 * 84 * Advantages: 85 * 86 * - Sequential reconstruction is performed in LBA order which may be faster 87 * than healing reconstruction particularly when using HDDs (or 88 * especially with SMR devices). Only allocated capacity is resilvered. 89 * 90 * - Sequential reconstruction is not constrained by ZFS block boundaries. 91 * This allows it to issue larger IOs to disk which span multiple blocks 92 * allowing all of these logical blocks to be repaired with a single IO. 93 * 94 * - Unlike a healing resilver or scrub which are pool wide operations, 95 * sequential reconstruction is handled by the top-level vdevs. This 96 * allows for it to be started or canceled on a top-level vdev without 97 * impacting any other top-level vdevs in the pool. 98 * 99 * - Data only referenced by a pool checkpoint will be repaired because 100 * that space is reflected in the space maps. This differs for a 101 * healing resilver or scrub which will not repair that data. 102 */ 103 104 105 /* 106 * Size of rebuild reads; defaults to 1MiB per data disk and is capped at 107 * SPA_MAXBLOCKSIZE. 108 */ 109 static uint64_t zfs_rebuild_max_segment = 1024 * 1024; 110 111 /* 112 * Maximum number of parallelly executed bytes per leaf vdev caused by a 113 * sequential resilver. We attempt to strike a balance here between keeping 114 * the vdev queues full of I/Os at all times and not overflowing the queues 115 * to cause long latency, which would cause long txg sync times. 116 * 117 * A large default value can be safely used here because the default target 118 * segment size is also large (zfs_rebuild_max_segment=1M). This helps keep 119 * the queue depth short. 120 * 121 * 64MB was observed to deliver the best performance and set as the default. 122 * Testing was performed with a 106-drive dRAID HDD pool (draid2:11d:106c) 123 * and a rebuild rate of 1.2GB/s was measured to the distribute spare. 124 * Smaller values were unable to fully saturate the available pool I/O. 125 */ 126 static uint64_t zfs_rebuild_vdev_limit = 64 << 20; 127 128 /* 129 * Automatically start a pool scrub when the last active sequential resilver 130 * completes in order to verify the checksums of all blocks which have been 131 * resilvered. This option is enabled by default and is strongly recommended. 132 */ 133 static int zfs_rebuild_scrub_enabled = 1; 134 135 /* 136 * For vdev_rebuild_initiate_sync() and vdev_rebuild_reset_sync(). 137 */ 138 static __attribute__((noreturn)) void vdev_rebuild_thread(void *arg); 139 static void vdev_rebuild_reset_sync(void *arg, dmu_tx_t *tx); 140 141 /* 142 * Clear the per-vdev rebuild bytes value for a vdev tree. 143 */ 144 static void 145 clear_rebuild_bytes(vdev_t *vd) 146 { 147 vdev_stat_t *vs = &vd->vdev_stat; 148 149 for (uint64_t i = 0; i < vd->vdev_children; i++) 150 clear_rebuild_bytes(vd->vdev_child[i]); 151 152 mutex_enter(&vd->vdev_stat_lock); 153 vs->vs_rebuild_processed = 0; 154 mutex_exit(&vd->vdev_stat_lock); 155 } 156 157 /* 158 * Determines whether a vdev_rebuild_thread() should be stopped. 159 */ 160 static boolean_t 161 vdev_rebuild_should_stop(vdev_t *vd) 162 { 163 return (!vdev_writeable(vd) || vd->vdev_removing || 164 vd->vdev_rebuild_exit_wanted || 165 vd->vdev_rebuild_cancel_wanted || 166 vd->vdev_rebuild_reset_wanted); 167 } 168 169 /* 170 * Determine if the rebuild should be canceled. This may happen when all 171 * vdevs with MISSING DTLs are detached. 172 */ 173 static boolean_t 174 vdev_rebuild_should_cancel(vdev_t *vd) 175 { 176 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 177 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 178 179 if (!vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg)) 180 return (B_TRUE); 181 182 return (B_FALSE); 183 } 184 185 /* 186 * The sync task for updating the on-disk state of a rebuild. This is 187 * scheduled by vdev_rebuild_range(). 188 */ 189 static void 190 vdev_rebuild_update_sync(void *arg, dmu_tx_t *tx) 191 { 192 int vdev_id = (uintptr_t)arg; 193 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 194 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 195 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 196 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 197 uint64_t txg = dmu_tx_get_txg(tx); 198 199 mutex_enter(&vd->vdev_rebuild_lock); 200 201 if (vr->vr_scan_offset[txg & TXG_MASK] > 0) { 202 vrp->vrp_last_offset = vr->vr_scan_offset[txg & TXG_MASK]; 203 vr->vr_scan_offset[txg & TXG_MASK] = 0; 204 } 205 206 vrp->vrp_scan_time_ms = vr->vr_prev_scan_time_ms + 207 NSEC2MSEC(gethrtime() - vr->vr_pass_start_time); 208 209 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 210 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 211 REBUILD_PHYS_ENTRIES, vrp, tx)); 212 213 mutex_exit(&vd->vdev_rebuild_lock); 214 } 215 216 /* 217 * Initialize the on-disk state for a new rebuild, start the rebuild thread. 218 */ 219 static void 220 vdev_rebuild_initiate_sync(void *arg, dmu_tx_t *tx) 221 { 222 int vdev_id = (uintptr_t)arg; 223 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 224 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 225 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 226 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 227 228 ASSERT(vd->vdev_rebuilding); 229 230 spa_feature_incr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx); 231 232 mutex_enter(&vd->vdev_rebuild_lock); 233 memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES); 234 vrp->vrp_rebuild_state = VDEV_REBUILD_ACTIVE; 235 vrp->vrp_min_txg = 0; 236 vrp->vrp_max_txg = dmu_tx_get_txg(tx); 237 vrp->vrp_start_time = gethrestime_sec(); 238 vrp->vrp_scan_time_ms = 0; 239 vr->vr_prev_scan_time_ms = 0; 240 241 /* 242 * Rebuilds are currently only used when replacing a device, in which 243 * case there must be DTL_MISSING entries. In the future, we could 244 * allow rebuilds to be used in a way similar to a scrub. This would 245 * be useful because it would allow us to rebuild the space used by 246 * pool checkpoints. 247 */ 248 VERIFY(vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg)); 249 250 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 251 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 252 REBUILD_PHYS_ENTRIES, vrp, tx)); 253 254 spa_history_log_internal(spa, "rebuild", tx, 255 "vdev_id=%llu vdev_guid=%llu started", 256 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid); 257 258 ASSERT3P(vd->vdev_rebuild_thread, ==, NULL); 259 vd->vdev_rebuild_thread = thread_create(NULL, 0, 260 vdev_rebuild_thread, vd, 0, &p0, TS_RUN, maxclsyspri); 261 262 mutex_exit(&vd->vdev_rebuild_lock); 263 } 264 265 static void 266 vdev_rebuild_log_notify(spa_t *spa, vdev_t *vd, const char *name) 267 { 268 nvlist_t *aux = fnvlist_alloc(); 269 270 fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, "sequential"); 271 spa_event_notify(spa, vd, aux, name); 272 nvlist_free(aux); 273 } 274 275 /* 276 * Called to request that a new rebuild be started. The feature will remain 277 * active for the duration of the rebuild, then revert to the enabled state. 278 */ 279 static void 280 vdev_rebuild_initiate(vdev_t *vd) 281 { 282 spa_t *spa = vd->vdev_spa; 283 284 ASSERT(vd->vdev_top == vd); 285 ASSERT(MUTEX_HELD(&vd->vdev_rebuild_lock)); 286 ASSERT(!vd->vdev_rebuilding); 287 288 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 289 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 290 291 vd->vdev_rebuilding = B_TRUE; 292 293 dsl_sync_task_nowait(spa_get_dsl(spa), vdev_rebuild_initiate_sync, 294 (void *)(uintptr_t)vd->vdev_id, tx); 295 dmu_tx_commit(tx); 296 297 vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_START); 298 } 299 300 /* 301 * Update the on-disk state to completed when a rebuild finishes. 302 */ 303 static void 304 vdev_rebuild_complete_sync(void *arg, dmu_tx_t *tx) 305 { 306 int vdev_id = (uintptr_t)arg; 307 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 308 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 309 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 310 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 311 312 mutex_enter(&vd->vdev_rebuild_lock); 313 314 /* 315 * Handle a second device failure if it occurs after all rebuild I/O 316 * has completed but before this sync task has been executed. 317 */ 318 if (vd->vdev_rebuild_reset_wanted) { 319 mutex_exit(&vd->vdev_rebuild_lock); 320 vdev_rebuild_reset_sync(arg, tx); 321 return; 322 } 323 324 vrp->vrp_rebuild_state = VDEV_REBUILD_COMPLETE; 325 vrp->vrp_end_time = gethrestime_sec(); 326 327 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 328 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 329 REBUILD_PHYS_ENTRIES, vrp, tx)); 330 331 vdev_dtl_reassess(vd, tx->tx_txg, vrp->vrp_max_txg, B_TRUE, B_TRUE); 332 spa_feature_decr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx); 333 334 spa_history_log_internal(spa, "rebuild", tx, 335 "vdev_id=%llu vdev_guid=%llu complete", 336 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid); 337 vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_FINISH); 338 339 /* Handles detaching of spares */ 340 spa_async_request(spa, SPA_ASYNC_REBUILD_DONE); 341 vd->vdev_rebuilding = B_FALSE; 342 mutex_exit(&vd->vdev_rebuild_lock); 343 344 /* 345 * While we're in syncing context take the opportunity to 346 * setup the scrub when there are no more active rebuilds. 347 */ 348 setup_sync_arg_t setup_sync_arg = { 349 .func = POOL_SCAN_SCRUB, 350 .txgstart = 0, 351 .txgend = 0, 352 }; 353 if (dsl_scan_setup_check(&setup_sync_arg.func, tx) == 0 && 354 zfs_rebuild_scrub_enabled) { 355 dsl_scan_setup_sync(&setup_sync_arg, tx); 356 } 357 358 cv_broadcast(&vd->vdev_rebuild_cv); 359 360 /* Clear recent error events (i.e. duplicate events tracking) */ 361 zfs_ereport_clear(spa, NULL); 362 } 363 364 /* 365 * Update the on-disk state to canceled when a rebuild finishes. 366 */ 367 static void 368 vdev_rebuild_cancel_sync(void *arg, dmu_tx_t *tx) 369 { 370 int vdev_id = (uintptr_t)arg; 371 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 372 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 373 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 374 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 375 376 mutex_enter(&vd->vdev_rebuild_lock); 377 vrp->vrp_rebuild_state = VDEV_REBUILD_CANCELED; 378 vrp->vrp_end_time = gethrestime_sec(); 379 380 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 381 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 382 REBUILD_PHYS_ENTRIES, vrp, tx)); 383 384 spa_feature_decr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx); 385 386 spa_history_log_internal(spa, "rebuild", tx, 387 "vdev_id=%llu vdev_guid=%llu canceled", 388 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid); 389 vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_FINISH); 390 391 vd->vdev_rebuild_cancel_wanted = B_FALSE; 392 vd->vdev_rebuilding = B_FALSE; 393 mutex_exit(&vd->vdev_rebuild_lock); 394 395 spa_notify_waiters(spa); 396 cv_broadcast(&vd->vdev_rebuild_cv); 397 } 398 399 /* 400 * Resets the progress of a running rebuild. This will occur when a new 401 * vdev is added to rebuild. 402 */ 403 static void 404 vdev_rebuild_reset_sync(void *arg, dmu_tx_t *tx) 405 { 406 int vdev_id = (uintptr_t)arg; 407 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 408 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 409 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 410 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 411 412 mutex_enter(&vd->vdev_rebuild_lock); 413 414 ASSERT(vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE); 415 ASSERT3P(vd->vdev_rebuild_thread, ==, NULL); 416 417 vrp->vrp_last_offset = 0; 418 vrp->vrp_min_txg = 0; 419 vrp->vrp_max_txg = dmu_tx_get_txg(tx); 420 vrp->vrp_bytes_scanned = 0; 421 vrp->vrp_bytes_issued = 0; 422 vrp->vrp_bytes_rebuilt = 0; 423 vrp->vrp_bytes_est = 0; 424 vrp->vrp_scan_time_ms = 0; 425 vr->vr_prev_scan_time_ms = 0; 426 427 /* See vdev_rebuild_initiate_sync comment */ 428 VERIFY(vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg)); 429 430 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap, 431 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 432 REBUILD_PHYS_ENTRIES, vrp, tx)); 433 434 spa_history_log_internal(spa, "rebuild", tx, 435 "vdev_id=%llu vdev_guid=%llu reset", 436 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid); 437 438 vd->vdev_rebuild_reset_wanted = B_FALSE; 439 ASSERT(vd->vdev_rebuilding); 440 441 vd->vdev_rebuild_thread = thread_create(NULL, 0, 442 vdev_rebuild_thread, vd, 0, &p0, TS_RUN, maxclsyspri); 443 444 mutex_exit(&vd->vdev_rebuild_lock); 445 } 446 447 /* 448 * Clear the last rebuild status. 449 */ 450 void 451 vdev_rebuild_clear_sync(void *arg, dmu_tx_t *tx) 452 { 453 int vdev_id = (uintptr_t)arg; 454 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 455 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 456 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 457 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 458 objset_t *mos = spa_meta_objset(spa); 459 460 mutex_enter(&vd->vdev_rebuild_lock); 461 462 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD) || 463 vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE) { 464 mutex_exit(&vd->vdev_rebuild_lock); 465 return; 466 } 467 468 clear_rebuild_bytes(vd); 469 memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES); 470 471 if (vd->vdev_top_zap != 0 && zap_contains(mos, vd->vdev_top_zap, 472 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS) == 0) { 473 VERIFY0(zap_update(mos, vd->vdev_top_zap, 474 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 475 REBUILD_PHYS_ENTRIES, vrp, tx)); 476 } 477 478 mutex_exit(&vd->vdev_rebuild_lock); 479 } 480 481 /* 482 * The zio_done_func_t callback for each rebuild I/O issued. It's responsible 483 * for updating the rebuild stats and limiting the number of in flight I/Os. 484 */ 485 static void 486 vdev_rebuild_cb(zio_t *zio) 487 { 488 vdev_rebuild_t *vr = zio->io_private; 489 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 490 vdev_t *vd = vr->vr_top_vdev; 491 492 mutex_enter(&vr->vr_io_lock); 493 if (zio->io_error == ENXIO && !vdev_writeable(vd)) { 494 /* 495 * The I/O failed because the top-level vdev was unavailable. 496 * Attempt to roll back to the last completed offset, in order 497 * resume from the correct location if the pool is resumed. 498 * (This works because spa_sync waits on spa_txg_zio before 499 * it runs sync tasks.) 500 */ 501 uint64_t *off = &vr->vr_scan_offset[zio->io_txg & TXG_MASK]; 502 *off = MIN(*off, zio->io_offset); 503 } else if (zio->io_error) { 504 vrp->vrp_errors++; 505 } 506 507 abd_free(zio->io_abd); 508 509 ASSERT3U(vr->vr_bytes_inflight, >, 0); 510 vr->vr_bytes_inflight -= zio->io_size; 511 cv_broadcast(&vr->vr_io_cv); 512 mutex_exit(&vr->vr_io_lock); 513 514 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); 515 } 516 517 /* 518 * Initialize a block pointer that can be used to read the given segment 519 * for sequential rebuild. 520 */ 521 static void 522 vdev_rebuild_blkptr_init(blkptr_t *bp, vdev_t *vd, uint64_t start, 523 uint64_t asize) 524 { 525 ASSERT(vd->vdev_ops == &vdev_draid_ops || 526 vd->vdev_ops == &vdev_mirror_ops || 527 vd->vdev_ops == &vdev_replacing_ops || 528 vd->vdev_ops == &vdev_spare_ops); 529 530 uint64_t psize = vd->vdev_ops == &vdev_draid_ops ? 531 vdev_draid_asize_to_psize(vd, asize) : asize; 532 533 BP_ZERO(bp); 534 535 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id); 536 DVA_SET_OFFSET(&bp->blk_dva[0], start); 537 DVA_SET_GANG(&bp->blk_dva[0], 0); 538 DVA_SET_ASIZE(&bp->blk_dva[0], asize); 539 540 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL); 541 BP_SET_LSIZE(bp, psize); 542 BP_SET_PSIZE(bp, psize); 543 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF); 544 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF); 545 BP_SET_TYPE(bp, DMU_OT_NONE); 546 BP_SET_LEVEL(bp, 0); 547 BP_SET_DEDUP(bp, 0); 548 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER); 549 } 550 551 /* 552 * Issues a rebuild I/O and takes care of rate limiting the number of queued 553 * rebuild I/Os. The provided start and size must be properly aligned for the 554 * top-level vdev type being rebuilt. 555 */ 556 static int 557 vdev_rebuild_range(vdev_rebuild_t *vr, uint64_t start, uint64_t size) 558 { 559 uint64_t ms_id __maybe_unused = vr->vr_scan_msp->ms_id; 560 vdev_t *vd = vr->vr_top_vdev; 561 spa_t *spa = vd->vdev_spa; 562 blkptr_t blk; 563 564 ASSERT3U(ms_id, ==, start >> vd->vdev_ms_shift); 565 ASSERT3U(ms_id, ==, (start + size - 1) >> vd->vdev_ms_shift); 566 567 vr->vr_pass_bytes_scanned += size; 568 vr->vr_rebuild_phys.vrp_bytes_scanned += size; 569 570 /* 571 * Rebuild the data in this range by constructing a special block 572 * pointer. It has no relation to any existing blocks in the pool. 573 * However, by disabling checksum verification and issuing a scrub IO 574 * we can reconstruct and repair any children with missing data. 575 */ 576 vdev_rebuild_blkptr_init(&blk, vd, start, size); 577 uint64_t psize = BP_GET_PSIZE(&blk); 578 579 if (!vdev_dtl_need_resilver(vd, &blk.blk_dva[0], psize, TXG_UNKNOWN)) { 580 vr->vr_pass_bytes_skipped += size; 581 return (0); 582 } 583 584 mutex_enter(&vr->vr_io_lock); 585 586 /* Limit in flight rebuild I/Os */ 587 while (vr->vr_bytes_inflight >= vr->vr_bytes_inflight_max) 588 cv_wait(&vr->vr_io_cv, &vr->vr_io_lock); 589 590 vr->vr_bytes_inflight += psize; 591 mutex_exit(&vr->vr_io_lock); 592 593 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 594 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 595 uint64_t txg = dmu_tx_get_txg(tx); 596 597 spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER); 598 mutex_enter(&vd->vdev_rebuild_lock); 599 600 /* This is the first I/O for this txg. */ 601 if (vr->vr_scan_offset[txg & TXG_MASK] == 0) { 602 vr->vr_scan_offset[txg & TXG_MASK] = start; 603 dsl_sync_task_nowait(spa_get_dsl(spa), 604 vdev_rebuild_update_sync, 605 (void *)(uintptr_t)vd->vdev_id, tx); 606 } 607 608 /* When exiting write out our progress. */ 609 if (vdev_rebuild_should_stop(vd)) { 610 mutex_enter(&vr->vr_io_lock); 611 vr->vr_bytes_inflight -= psize; 612 mutex_exit(&vr->vr_io_lock); 613 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd); 614 mutex_exit(&vd->vdev_rebuild_lock); 615 dmu_tx_commit(tx); 616 return (SET_ERROR(EINTR)); 617 } 618 mutex_exit(&vd->vdev_rebuild_lock); 619 dmu_tx_commit(tx); 620 621 vr->vr_scan_offset[txg & TXG_MASK] = start + size; 622 vr->vr_pass_bytes_issued += size; 623 vr->vr_rebuild_phys.vrp_bytes_issued += size; 624 625 zio_nowait(zio_read(spa->spa_txg_zio[txg & TXG_MASK], spa, &blk, 626 abd_alloc(psize, B_FALSE), psize, vdev_rebuild_cb, vr, 627 ZIO_PRIORITY_REBUILD, ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL | 628 ZIO_FLAG_RESILVER, NULL)); 629 630 return (0); 631 } 632 633 /* 634 * Issues rebuild I/Os for all ranges in the provided vr->vr_tree range tree. 635 */ 636 static int 637 vdev_rebuild_ranges(vdev_rebuild_t *vr) 638 { 639 vdev_t *vd = vr->vr_top_vdev; 640 zfs_btree_t *t = &vr->vr_scan_tree->rt_root; 641 zfs_btree_index_t idx; 642 int error; 643 644 for (zfs_range_seg_t *rs = zfs_btree_first(t, &idx); rs != NULL; 645 rs = zfs_btree_next(t, &idx, &idx)) { 646 uint64_t start = zfs_rs_get_start(rs, vr->vr_scan_tree); 647 uint64_t size = zfs_rs_get_end(rs, vr->vr_scan_tree) - start; 648 649 /* 650 * zfs_scan_suspend_progress can be set to disable rebuild 651 * progress for testing. See comment in dsl_scan_sync(). 652 */ 653 while (zfs_scan_suspend_progress && 654 !vdev_rebuild_should_stop(vd)) { 655 delay(hz); 656 } 657 658 while (size > 0) { 659 uint64_t chunk_size; 660 661 /* 662 * Split range into legally-sized logical chunks 663 * given the constraints of the top-level vdev 664 * being rebuilt (dRAID or mirror). 665 */ 666 ASSERT3P(vd->vdev_ops, !=, NULL); 667 chunk_size = vd->vdev_ops->vdev_op_rebuild_asize(vd, 668 start, size, zfs_rebuild_max_segment); 669 670 error = vdev_rebuild_range(vr, start, chunk_size); 671 if (error != 0) 672 return (error); 673 674 size -= chunk_size; 675 start += chunk_size; 676 } 677 } 678 679 return (0); 680 } 681 682 /* 683 * Calculates the estimated capacity which remains to be scanned. Since 684 * we traverse the pool in metaslab order only allocated capacity beyond 685 * the vrp_last_offset need be considered. All lower offsets must have 686 * already been rebuilt and are thus already included in vrp_bytes_scanned. 687 */ 688 static void 689 vdev_rebuild_update_bytes_est(vdev_t *vd, uint64_t ms_id) 690 { 691 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 692 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 693 uint64_t bytes_est = vrp->vrp_bytes_scanned; 694 695 if (vrp->vrp_last_offset < vd->vdev_ms[ms_id]->ms_start) 696 return; 697 698 for (uint64_t i = ms_id; i < vd->vdev_ms_count; i++) { 699 metaslab_t *msp = vd->vdev_ms[i]; 700 701 mutex_enter(&msp->ms_lock); 702 bytes_est += metaslab_allocated_space(msp); 703 mutex_exit(&msp->ms_lock); 704 } 705 706 vrp->vrp_bytes_est = bytes_est; 707 } 708 709 /* 710 * Load from disk the top-level vdev's rebuild information. 711 */ 712 int 713 vdev_rebuild_load(vdev_t *vd) 714 { 715 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 716 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 717 spa_t *spa = vd->vdev_spa; 718 int err = 0; 719 720 mutex_enter(&vd->vdev_rebuild_lock); 721 vd->vdev_rebuilding = B_FALSE; 722 723 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD)) { 724 memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES); 725 mutex_exit(&vd->vdev_rebuild_lock); 726 return (SET_ERROR(ENOTSUP)); 727 } 728 729 ASSERT(vd->vdev_top == vd); 730 731 err = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap, 732 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t), 733 REBUILD_PHYS_ENTRIES, vrp); 734 735 /* 736 * A missing or damaged VDEV_TOP_ZAP_VDEV_REBUILD_PHYS should 737 * not prevent a pool from being imported. Clear the rebuild 738 * status allowing a new resilver/rebuild to be started. 739 */ 740 if (err == ENOENT || err == EOVERFLOW || err == ECKSUM) { 741 memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES); 742 } else if (err) { 743 mutex_exit(&vd->vdev_rebuild_lock); 744 return (err); 745 } 746 747 vr->vr_prev_scan_time_ms = vrp->vrp_scan_time_ms; 748 vr->vr_top_vdev = vd; 749 750 mutex_exit(&vd->vdev_rebuild_lock); 751 752 return (0); 753 } 754 755 /* 756 * Each scan thread is responsible for rebuilding a top-level vdev. The 757 * rebuild progress in tracked on-disk in VDEV_TOP_ZAP_VDEV_REBUILD_PHYS. 758 */ 759 static __attribute__((noreturn)) void 760 vdev_rebuild_thread(void *arg) 761 { 762 vdev_t *vd = arg; 763 spa_t *spa = vd->vdev_spa; 764 vdev_t *rvd = spa->spa_root_vdev; 765 int error = 0; 766 767 /* 768 * If there's a scrub in process request that it be stopped. This 769 * is not required for a correct rebuild, but we do want rebuilds to 770 * emulate the resilver behavior as much as possible. 771 */ 772 dsl_pool_t *dsl = spa_get_dsl(spa); 773 if (dsl_scan_scrubbing(dsl)) 774 dsl_scan_cancel(dsl); 775 776 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 777 mutex_enter(&vd->vdev_rebuild_lock); 778 779 ASSERT3P(vd->vdev_top, ==, vd); 780 ASSERT3P(vd->vdev_rebuild_thread, !=, NULL); 781 ASSERT(vd->vdev_rebuilding); 782 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REBUILD)); 783 ASSERT3B(vd->vdev_rebuild_cancel_wanted, ==, B_FALSE); 784 785 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 786 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 787 vr->vr_top_vdev = vd; 788 vr->vr_scan_msp = NULL; 789 vr->vr_scan_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64, NULL, 790 0, 0); 791 mutex_init(&vr->vr_io_lock, NULL, MUTEX_DEFAULT, NULL); 792 cv_init(&vr->vr_io_cv, NULL, CV_DEFAULT, NULL); 793 794 vr->vr_pass_start_time = gethrtime(); 795 vr->vr_pass_bytes_scanned = 0; 796 vr->vr_pass_bytes_issued = 0; 797 vr->vr_pass_bytes_skipped = 0; 798 799 uint64_t update_est_time = gethrtime(); 800 vdev_rebuild_update_bytes_est(vd, 0); 801 802 clear_rebuild_bytes(vr->vr_top_vdev); 803 804 mutex_exit(&vd->vdev_rebuild_lock); 805 806 /* 807 * Systematically walk the metaslabs and issue rebuild I/Os for 808 * all ranges in the allocated space map. 809 */ 810 for (uint64_t i = 0; i < vd->vdev_ms_count; i++) { 811 metaslab_t *msp = vd->vdev_ms[i]; 812 vr->vr_scan_msp = msp; 813 814 /* 815 * Calculate the max number of in-flight bytes for top-level 816 * vdev scanning operations (minimum 1MB, maximum 1/2 of 817 * arc_c_max shared by all top-level vdevs). Limits for the 818 * issuing phase are done per top-level vdev and are handled 819 * separately. 820 */ 821 uint64_t limit = (arc_c_max / 2) / MAX(rvd->vdev_children, 1); 822 vr->vr_bytes_inflight_max = MIN(limit, MAX(1ULL << 20, 823 zfs_rebuild_vdev_limit * vd->vdev_children)); 824 825 /* 826 * Removal of vdevs from the vdev tree may eliminate the need 827 * for the rebuild, in which case it should be canceled. The 828 * vdev_rebuild_cancel_wanted flag is set until the sync task 829 * completes. This may be after the rebuild thread exits. 830 */ 831 if (vdev_rebuild_should_cancel(vd)) { 832 vd->vdev_rebuild_cancel_wanted = B_TRUE; 833 error = EINTR; 834 break; 835 } 836 837 ASSERT0(zfs_range_tree_space(vr->vr_scan_tree)); 838 839 /* Disable any new allocations to this metaslab */ 840 spa_config_exit(spa, SCL_CONFIG, FTAG); 841 metaslab_disable(msp); 842 843 mutex_enter(&msp->ms_sync_lock); 844 mutex_enter(&msp->ms_lock); 845 846 /* 847 * If there are outstanding allocations wait for them to be 848 * synced. This is needed to ensure all allocated ranges are 849 * on disk and therefore will be rebuilt. 850 */ 851 for (int j = 0; j < TXG_SIZE; j++) { 852 if (zfs_range_tree_space(msp->ms_allocating[j])) { 853 mutex_exit(&msp->ms_lock); 854 mutex_exit(&msp->ms_sync_lock); 855 txg_wait_synced(dsl, 0); 856 mutex_enter(&msp->ms_sync_lock); 857 mutex_enter(&msp->ms_lock); 858 break; 859 } 860 } 861 862 /* 863 * When a metaslab has been allocated from read its allocated 864 * ranges from the space map object into the vr_scan_tree. 865 * Then add inflight / unflushed ranges and remove inflight / 866 * unflushed frees. This is the minimum range to be rebuilt. 867 */ 868 if (msp->ms_sm != NULL) { 869 VERIFY0(space_map_load(msp->ms_sm, 870 vr->vr_scan_tree, SM_ALLOC)); 871 872 for (int i = 0; i < TXG_SIZE; i++) { 873 ASSERT0(zfs_range_tree_space( 874 msp->ms_allocating[i])); 875 } 876 877 zfs_range_tree_walk(msp->ms_unflushed_allocs, 878 zfs_range_tree_add, vr->vr_scan_tree); 879 zfs_range_tree_walk(msp->ms_unflushed_frees, 880 zfs_range_tree_remove, vr->vr_scan_tree); 881 882 /* 883 * Remove ranges which have already been rebuilt based 884 * on the last offset. This can happen when restarting 885 * a scan after exporting and re-importing the pool. 886 */ 887 zfs_range_tree_clear(vr->vr_scan_tree, 0, 888 vrp->vrp_last_offset); 889 } 890 891 mutex_exit(&msp->ms_lock); 892 mutex_exit(&msp->ms_sync_lock); 893 894 /* 895 * To provide an accurate estimate re-calculate the estimated 896 * size every 5 minutes to account for recent allocations and 897 * frees made to space maps which have not yet been rebuilt. 898 */ 899 if (gethrtime() > update_est_time + SEC2NSEC(300)) { 900 update_est_time = gethrtime(); 901 vdev_rebuild_update_bytes_est(vd, i); 902 } 903 904 /* 905 * Walk the allocated space map and issue the rebuild I/O. 906 */ 907 error = vdev_rebuild_ranges(vr); 908 zfs_range_tree_vacate(vr->vr_scan_tree, NULL, NULL); 909 910 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 911 metaslab_enable(msp, B_FALSE, B_FALSE); 912 913 if (error != 0) 914 break; 915 } 916 917 zfs_range_tree_destroy(vr->vr_scan_tree); 918 spa_config_exit(spa, SCL_CONFIG, FTAG); 919 920 /* Wait for any remaining rebuild I/O to complete */ 921 mutex_enter(&vr->vr_io_lock); 922 while (vr->vr_bytes_inflight > 0) 923 cv_wait(&vr->vr_io_cv, &vr->vr_io_lock); 924 925 mutex_exit(&vr->vr_io_lock); 926 927 mutex_destroy(&vr->vr_io_lock); 928 cv_destroy(&vr->vr_io_cv); 929 930 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 931 932 dsl_pool_t *dp = spa_get_dsl(spa); 933 dmu_tx_t *tx = dmu_tx_create_dd(dp->dp_mos_dir); 934 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 935 936 mutex_enter(&vd->vdev_rebuild_lock); 937 if (error == 0) { 938 /* 939 * After a successful rebuild clear the DTLs of all ranges 940 * which were missing when the rebuild was started. These 941 * ranges must have been rebuilt as a consequence of rebuilding 942 * all allocated space. Note that unlike a scrub or resilver 943 * the rebuild operation will reconstruct data only referenced 944 * by a pool checkpoint. See the dsl_scan_done() comments. 945 */ 946 dsl_sync_task_nowait(dp, vdev_rebuild_complete_sync, 947 (void *)(uintptr_t)vd->vdev_id, tx); 948 } else if (vd->vdev_rebuild_cancel_wanted) { 949 /* 950 * The rebuild operation was canceled. This will occur when 951 * a device participating in the rebuild is detached. 952 */ 953 dsl_sync_task_nowait(dp, vdev_rebuild_cancel_sync, 954 (void *)(uintptr_t)vd->vdev_id, tx); 955 } else if (vd->vdev_rebuild_reset_wanted) { 956 /* 957 * Reset the running rebuild without canceling and restarting 958 * it. This will occur when a new device is attached and must 959 * participate in the rebuild. 960 */ 961 dsl_sync_task_nowait(dp, vdev_rebuild_reset_sync, 962 (void *)(uintptr_t)vd->vdev_id, tx); 963 } else { 964 /* 965 * The rebuild operation should be suspended. This may occur 966 * when detaching a child vdev or when exporting the pool. The 967 * rebuild is left in the active state so it will be resumed. 968 */ 969 ASSERT(vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE); 970 vd->vdev_rebuilding = B_FALSE; 971 } 972 973 dmu_tx_commit(tx); 974 975 vd->vdev_rebuild_thread = NULL; 976 mutex_exit(&vd->vdev_rebuild_lock); 977 spa_config_exit(spa, SCL_CONFIG, FTAG); 978 979 cv_broadcast(&vd->vdev_rebuild_cv); 980 981 thread_exit(); 982 } 983 984 /* 985 * Returns B_TRUE if any top-level vdev are rebuilding. 986 */ 987 boolean_t 988 vdev_rebuild_active(vdev_t *vd) 989 { 990 spa_t *spa = vd->vdev_spa; 991 boolean_t ret = B_FALSE; 992 993 if (vd == spa->spa_root_vdev) { 994 for (uint64_t i = 0; i < vd->vdev_children; i++) { 995 ret = vdev_rebuild_active(vd->vdev_child[i]); 996 if (ret) 997 return (ret); 998 } 999 } else if (vd->vdev_top_zap != 0) { 1000 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 1001 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 1002 1003 mutex_enter(&vd->vdev_rebuild_lock); 1004 ret = (vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE); 1005 mutex_exit(&vd->vdev_rebuild_lock); 1006 } 1007 1008 return (ret); 1009 } 1010 1011 /* 1012 * Start a rebuild operation. The rebuild may be restarted when the 1013 * top-level vdev is currently actively rebuilding. 1014 */ 1015 void 1016 vdev_rebuild(vdev_t *vd) 1017 { 1018 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 1019 vdev_rebuild_phys_t *vrp __maybe_unused = &vr->vr_rebuild_phys; 1020 1021 ASSERT(vd->vdev_top == vd); 1022 ASSERT(vdev_is_concrete(vd)); 1023 ASSERT(!vd->vdev_removing); 1024 ASSERT(spa_feature_is_enabled(vd->vdev_spa, 1025 SPA_FEATURE_DEVICE_REBUILD)); 1026 1027 mutex_enter(&vd->vdev_rebuild_lock); 1028 if (vd->vdev_rebuilding) { 1029 ASSERT3U(vrp->vrp_rebuild_state, ==, VDEV_REBUILD_ACTIVE); 1030 1031 /* 1032 * Signal a running rebuild operation that it should restart 1033 * from the beginning because a new device was attached. The 1034 * vdev_rebuild_reset_wanted flag is set until the sync task 1035 * completes. This may be after the rebuild thread exits. 1036 */ 1037 if (!vd->vdev_rebuild_reset_wanted) 1038 vd->vdev_rebuild_reset_wanted = B_TRUE; 1039 } else { 1040 vdev_rebuild_initiate(vd); 1041 } 1042 mutex_exit(&vd->vdev_rebuild_lock); 1043 } 1044 1045 static void 1046 vdev_rebuild_restart_impl(vdev_t *vd) 1047 { 1048 spa_t *spa = vd->vdev_spa; 1049 1050 if (vd == spa->spa_root_vdev) { 1051 for (uint64_t i = 0; i < vd->vdev_children; i++) 1052 vdev_rebuild_restart_impl(vd->vdev_child[i]); 1053 1054 } else if (vd->vdev_top_zap != 0) { 1055 vdev_rebuild_t *vr = &vd->vdev_rebuild_config; 1056 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 1057 1058 mutex_enter(&vd->vdev_rebuild_lock); 1059 if (vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE && 1060 vdev_writeable(vd) && !vd->vdev_rebuilding) { 1061 ASSERT(spa_feature_is_active(spa, 1062 SPA_FEATURE_DEVICE_REBUILD)); 1063 vd->vdev_rebuilding = B_TRUE; 1064 vd->vdev_rebuild_thread = thread_create(NULL, 0, 1065 vdev_rebuild_thread, vd, 0, &p0, TS_RUN, 1066 maxclsyspri); 1067 } 1068 mutex_exit(&vd->vdev_rebuild_lock); 1069 } 1070 } 1071 1072 /* 1073 * Conditionally restart all of the vdev_rebuild_thread's for a pool. The 1074 * feature flag must be active and the rebuild in the active state. This 1075 * cannot be used to start a new rebuild. 1076 */ 1077 void 1078 vdev_rebuild_restart(spa_t *spa) 1079 { 1080 ASSERT(MUTEX_HELD(&spa_namespace_lock) || 1081 spa->spa_load_thread == curthread); 1082 1083 vdev_rebuild_restart_impl(spa->spa_root_vdev); 1084 } 1085 1086 /* 1087 * Stop and wait for all of the vdev_rebuild_thread's associated with the 1088 * vdev tree provide to be terminated (canceled or stopped). 1089 */ 1090 void 1091 vdev_rebuild_stop_wait(vdev_t *vd) 1092 { 1093 spa_t *spa = vd->vdev_spa; 1094 1095 ASSERT(MUTEX_HELD(&spa_namespace_lock) || 1096 spa->spa_export_thread == curthread); 1097 1098 if (vd == spa->spa_root_vdev) { 1099 for (uint64_t i = 0; i < vd->vdev_children; i++) 1100 vdev_rebuild_stop_wait(vd->vdev_child[i]); 1101 1102 } else if (vd->vdev_top_zap != 0) { 1103 ASSERT(vd == vd->vdev_top); 1104 1105 mutex_enter(&vd->vdev_rebuild_lock); 1106 if (vd->vdev_rebuild_thread != NULL) { 1107 vd->vdev_rebuild_exit_wanted = B_TRUE; 1108 while (vd->vdev_rebuilding) { 1109 cv_wait(&vd->vdev_rebuild_cv, 1110 &vd->vdev_rebuild_lock); 1111 } 1112 vd->vdev_rebuild_exit_wanted = B_FALSE; 1113 } 1114 mutex_exit(&vd->vdev_rebuild_lock); 1115 } 1116 } 1117 1118 /* 1119 * Stop all rebuild operations but leave them in the active state so they 1120 * will be resumed when importing the pool. 1121 */ 1122 void 1123 vdev_rebuild_stop_all(spa_t *spa) 1124 { 1125 vdev_rebuild_stop_wait(spa->spa_root_vdev); 1126 } 1127 1128 /* 1129 * Rebuild statistics reported per top-level vdev. 1130 */ 1131 int 1132 vdev_rebuild_get_stats(vdev_t *tvd, vdev_rebuild_stat_t *vrs) 1133 { 1134 spa_t *spa = tvd->vdev_spa; 1135 1136 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD)) 1137 return (SET_ERROR(ENOTSUP)); 1138 1139 if (tvd != tvd->vdev_top || tvd->vdev_top_zap == 0) 1140 return (SET_ERROR(EINVAL)); 1141 1142 int error = zap_contains(spa_meta_objset(spa), 1143 tvd->vdev_top_zap, VDEV_TOP_ZAP_VDEV_REBUILD_PHYS); 1144 1145 if (error == ENOENT) { 1146 memset(vrs, 0, sizeof (vdev_rebuild_stat_t)); 1147 vrs->vrs_state = VDEV_REBUILD_NONE; 1148 error = 0; 1149 } else if (error == 0) { 1150 vdev_rebuild_t *vr = &tvd->vdev_rebuild_config; 1151 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys; 1152 1153 mutex_enter(&tvd->vdev_rebuild_lock); 1154 vrs->vrs_state = vrp->vrp_rebuild_state; 1155 vrs->vrs_start_time = vrp->vrp_start_time; 1156 vrs->vrs_end_time = vrp->vrp_end_time; 1157 vrs->vrs_scan_time_ms = vrp->vrp_scan_time_ms; 1158 vrs->vrs_bytes_scanned = vrp->vrp_bytes_scanned; 1159 vrs->vrs_bytes_issued = vrp->vrp_bytes_issued; 1160 vrs->vrs_bytes_rebuilt = vrp->vrp_bytes_rebuilt; 1161 vrs->vrs_bytes_est = vrp->vrp_bytes_est; 1162 vrs->vrs_errors = vrp->vrp_errors; 1163 vrs->vrs_pass_time_ms = NSEC2MSEC(gethrtime() - 1164 vr->vr_pass_start_time); 1165 vrs->vrs_pass_bytes_scanned = vr->vr_pass_bytes_scanned; 1166 vrs->vrs_pass_bytes_issued = vr->vr_pass_bytes_issued; 1167 vrs->vrs_pass_bytes_skipped = vr->vr_pass_bytes_skipped; 1168 mutex_exit(&tvd->vdev_rebuild_lock); 1169 } 1170 1171 return (error); 1172 } 1173 1174 ZFS_MODULE_PARAM(zfs, zfs_, rebuild_max_segment, U64, ZMOD_RW, 1175 "Max segment size in bytes of rebuild reads"); 1176 1177 ZFS_MODULE_PARAM(zfs, zfs_, rebuild_vdev_limit, U64, ZMOD_RW, 1178 "Max bytes in flight per leaf vdev for sequential resilvers"); 1179 1180 ZFS_MODULE_PARAM(zfs, zfs_, rebuild_scrub_enabled, INT, ZMOD_RW, 1181 "Automatically scrub after sequential resilver completes"); 1182