1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 *
23 * Copyright (c) 2018, Intel Corporation.
24 * Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
25 * Copyright (c) 2022 Hewlett Packard Enterprise Development LP.
26 * Copyright (c) 2024 by Delphix. All rights reserved.
27 */
28
29 #include <sys/vdev_impl.h>
30 #include <sys/vdev_draid.h>
31 #include <sys/dsl_scan.h>
32 #include <sys/spa_impl.h>
33 #include <sys/metaslab_impl.h>
34 #include <sys/vdev_rebuild.h>
35 #include <sys/zio.h>
36 #include <sys/dmu_tx.h>
37 #include <sys/arc.h>
38 #include <sys/arc_impl.h>
39 #include <sys/zap.h>
40
41 /*
42 * This file contains the sequential reconstruction implementation for
43 * resilvering. This form of resilvering is internally referred to as device
44 * rebuild to avoid conflating it with the traditional healing reconstruction
45 * performed by the dsl scan code.
46 *
47 * When replacing a device, or scrubbing the pool, ZFS has historically used
48 * a process called resilvering which is a form of healing reconstruction.
49 * This approach has the advantage that as blocks are read from disk their
50 * checksums can be immediately verified and the data repaired. Unfortunately,
51 * it also results in a random IO pattern to the disk even when extra care
52 * is taken to sequentialize the IO as much as possible. This substantially
53 * increases the time required to resilver the pool and restore redundancy.
54 *
55 * For mirrored devices it's possible to implement an alternate sequential
56 * reconstruction strategy when resilvering. Sequential reconstruction
57 * behaves like a traditional RAID rebuild and reconstructs a device in LBA
58 * order without verifying the checksum. After this phase completes a second
59 * scrub phase is started to verify all of the checksums. This two phase
60 * process will take longer than the healing reconstruction described above.
61 * However, it has that advantage that after the reconstruction first phase
62 * completes redundancy has been restored. At this point the pool can incur
63 * another device failure without risking data loss.
64 *
65 * There are a few noteworthy limitations and other advantages of resilvering
66 * using sequential reconstruction vs healing reconstruction.
67 *
68 * Limitations:
69 *
70 * - Sequential reconstruction is not possible on RAIDZ due to its
71 * variable stripe width. Note dRAID uses a fixed stripe width which
72 * avoids this issue, but comes at the expense of some usable capacity.
73 *
74 * - Block checksums are not verified during sequential reconstruction.
75 * Similar to traditional RAID the parity/mirror data is reconstructed
76 * but cannot be immediately double checked. For this reason when the
77 * last active resilver completes the pool is automatically scrubbed
78 * by default.
79 *
80 * - Deferred resilvers using sequential reconstruction are not currently
81 * supported. When adding another vdev to an active top-level resilver
82 * it must be restarted.
83 *
84 * Advantages:
85 *
86 * - Sequential reconstruction is performed in LBA order which may be faster
87 * than healing reconstruction particularly when using HDDs (or
88 * especially with SMR devices). Only allocated capacity is resilvered.
89 *
90 * - Sequential reconstruction is not constrained by ZFS block boundaries.
91 * This allows it to issue larger IOs to disk which span multiple blocks
92 * allowing all of these logical blocks to be repaired with a single IO.
93 *
94 * - Unlike a healing resilver or scrub which are pool wide operations,
95 * sequential reconstruction is handled by the top-level vdevs. This
96 * allows for it to be started or canceled on a top-level vdev without
97 * impacting any other top-level vdevs in the pool.
98 *
99 * - Data only referenced by a pool checkpoint will be repaired because
100 * that space is reflected in the space maps. This differs for a
101 * healing resilver or scrub which will not repair that data.
102 */
103
104
105 /*
106 * Size of rebuild reads; defaults to 1MiB per data disk and is capped at
107 * SPA_MAXBLOCKSIZE.
108 */
109 static uint64_t zfs_rebuild_max_segment = 1024 * 1024;
110
111 /*
112 * Maximum number of parallelly executed bytes per leaf vdev caused by a
113 * sequential resilver. We attempt to strike a balance here between keeping
114 * the vdev queues full of I/Os at all times and not overflowing the queues
115 * to cause long latency, which would cause long txg sync times.
116 *
117 * A large default value can be safely used here because the default target
118 * segment size is also large (zfs_rebuild_max_segment=1M). This helps keep
119 * the queue depth short.
120 *
121 * 64MB was observed to deliver the best performance and set as the default.
122 * Testing was performed with a 106-drive dRAID HDD pool (draid2:11d:106c)
123 * and a rebuild rate of 1.2GB/s was measured to the distribute spare.
124 * Smaller values were unable to fully saturate the available pool I/O.
125 */
126 static uint64_t zfs_rebuild_vdev_limit = 64 << 20;
127
128 /*
129 * Automatically start a pool scrub when the last active sequential resilver
130 * completes in order to verify the checksums of all blocks which have been
131 * resilvered. This option is enabled by default and is strongly recommended.
132 */
133 static int zfs_rebuild_scrub_enabled = 1;
134
135 /*
136 * For vdev_rebuild_initiate_sync() and vdev_rebuild_reset_sync().
137 */
138 static __attribute__((noreturn)) void vdev_rebuild_thread(void *arg);
139 static void vdev_rebuild_reset_sync(void *arg, dmu_tx_t *tx);
140
141 /*
142 * Clear the per-vdev rebuild bytes value for a vdev tree.
143 */
144 static void
clear_rebuild_bytes(vdev_t * vd)145 clear_rebuild_bytes(vdev_t *vd)
146 {
147 vdev_stat_t *vs = &vd->vdev_stat;
148
149 for (uint64_t i = 0; i < vd->vdev_children; i++)
150 clear_rebuild_bytes(vd->vdev_child[i]);
151
152 mutex_enter(&vd->vdev_stat_lock);
153 vs->vs_rebuild_processed = 0;
154 mutex_exit(&vd->vdev_stat_lock);
155 }
156
157 /*
158 * Determines whether a vdev_rebuild_thread() should be stopped.
159 */
160 static boolean_t
vdev_rebuild_should_stop(vdev_t * vd)161 vdev_rebuild_should_stop(vdev_t *vd)
162 {
163 return (!vdev_writeable(vd) || vd->vdev_removing ||
164 vd->vdev_rebuild_exit_wanted ||
165 vd->vdev_rebuild_cancel_wanted ||
166 vd->vdev_rebuild_reset_wanted);
167 }
168
169 /*
170 * Determine if the rebuild should be canceled. This may happen when all
171 * vdevs with MISSING DTLs are detached.
172 */
173 static boolean_t
vdev_rebuild_should_cancel(vdev_t * vd)174 vdev_rebuild_should_cancel(vdev_t *vd)
175 {
176 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
177 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
178
179 if (!vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg))
180 return (B_TRUE);
181
182 return (B_FALSE);
183 }
184
185 /*
186 * The sync task for updating the on-disk state of a rebuild. This is
187 * scheduled by vdev_rebuild_range().
188 */
189 static void
vdev_rebuild_update_sync(void * arg,dmu_tx_t * tx)190 vdev_rebuild_update_sync(void *arg, dmu_tx_t *tx)
191 {
192 int vdev_id = (uintptr_t)arg;
193 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
194 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
195 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
196 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
197 uint64_t txg = dmu_tx_get_txg(tx);
198
199 mutex_enter(&vd->vdev_rebuild_lock);
200
201 if (vr->vr_scan_offset[txg & TXG_MASK] > 0) {
202 vrp->vrp_last_offset = vr->vr_scan_offset[txg & TXG_MASK];
203 vr->vr_scan_offset[txg & TXG_MASK] = 0;
204 }
205
206 vrp->vrp_scan_time_ms = vr->vr_prev_scan_time_ms +
207 NSEC2MSEC(gethrtime() - vr->vr_pass_start_time);
208
209 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
210 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
211 REBUILD_PHYS_ENTRIES, vrp, tx));
212
213 mutex_exit(&vd->vdev_rebuild_lock);
214 }
215
216 /*
217 * Initialize the on-disk state for a new rebuild, start the rebuild thread.
218 */
219 static void
vdev_rebuild_initiate_sync(void * arg,dmu_tx_t * tx)220 vdev_rebuild_initiate_sync(void *arg, dmu_tx_t *tx)
221 {
222 int vdev_id = (uintptr_t)arg;
223 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
224 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
225 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
226 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
227
228 ASSERT(vd->vdev_rebuilding);
229
230 spa_feature_incr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx);
231
232 mutex_enter(&vd->vdev_rebuild_lock);
233 memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
234 vrp->vrp_rebuild_state = VDEV_REBUILD_ACTIVE;
235 vrp->vrp_min_txg = 0;
236 vrp->vrp_max_txg = dmu_tx_get_txg(tx);
237 vrp->vrp_start_time = gethrestime_sec();
238 vrp->vrp_scan_time_ms = 0;
239 vr->vr_prev_scan_time_ms = 0;
240
241 /*
242 * Rebuilds are currently only used when replacing a device, in which
243 * case there must be DTL_MISSING entries. In the future, we could
244 * allow rebuilds to be used in a way similar to a scrub. This would
245 * be useful because it would allow us to rebuild the space used by
246 * pool checkpoints.
247 */
248 VERIFY(vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg));
249
250 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
251 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
252 REBUILD_PHYS_ENTRIES, vrp, tx));
253
254 spa_history_log_internal(spa, "rebuild", tx,
255 "vdev_id=%llu vdev_guid=%llu started",
256 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
257
258 ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
259 vd->vdev_rebuild_thread = thread_create(NULL, 0,
260 vdev_rebuild_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
261
262 mutex_exit(&vd->vdev_rebuild_lock);
263 }
264
265 static void
vdev_rebuild_log_notify(spa_t * spa,vdev_t * vd,const char * name)266 vdev_rebuild_log_notify(spa_t *spa, vdev_t *vd, const char *name)
267 {
268 nvlist_t *aux = fnvlist_alloc();
269
270 fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, "sequential");
271 spa_event_notify(spa, vd, aux, name);
272 nvlist_free(aux);
273 }
274
275 /*
276 * Called to request that a new rebuild be started. The feature will remain
277 * active for the duration of the rebuild, then revert to the enabled state.
278 */
279 static void
vdev_rebuild_initiate(vdev_t * vd)280 vdev_rebuild_initiate(vdev_t *vd)
281 {
282 spa_t *spa = vd->vdev_spa;
283
284 ASSERT(vd->vdev_top == vd);
285 ASSERT(MUTEX_HELD(&vd->vdev_rebuild_lock));
286 ASSERT(!vd->vdev_rebuilding);
287
288 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
289 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
290
291 vd->vdev_rebuilding = B_TRUE;
292
293 dsl_sync_task_nowait(spa_get_dsl(spa), vdev_rebuild_initiate_sync,
294 (void *)(uintptr_t)vd->vdev_id, tx);
295 dmu_tx_commit(tx);
296
297 vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_START);
298 }
299
300 /*
301 * Update the on-disk state to completed when a rebuild finishes.
302 */
303 static void
vdev_rebuild_complete_sync(void * arg,dmu_tx_t * tx)304 vdev_rebuild_complete_sync(void *arg, dmu_tx_t *tx)
305 {
306 int vdev_id = (uintptr_t)arg;
307 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
308 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
309 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
310 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
311
312 mutex_enter(&vd->vdev_rebuild_lock);
313
314 /*
315 * Handle a second device failure if it occurs after all rebuild I/O
316 * has completed but before this sync task has been executed.
317 */
318 if (vd->vdev_rebuild_reset_wanted) {
319 mutex_exit(&vd->vdev_rebuild_lock);
320 vdev_rebuild_reset_sync(arg, tx);
321 return;
322 }
323
324 vrp->vrp_rebuild_state = VDEV_REBUILD_COMPLETE;
325 vrp->vrp_end_time = gethrestime_sec();
326
327 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
328 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
329 REBUILD_PHYS_ENTRIES, vrp, tx));
330
331 vdev_dtl_reassess(vd, tx->tx_txg, vrp->vrp_max_txg, B_TRUE, B_TRUE);
332 spa_feature_decr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx);
333
334 spa_history_log_internal(spa, "rebuild", tx,
335 "vdev_id=%llu vdev_guid=%llu complete",
336 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
337 vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_FINISH);
338
339 /* Handles detaching of spares */
340 spa_async_request(spa, SPA_ASYNC_REBUILD_DONE);
341 vd->vdev_rebuilding = B_FALSE;
342 mutex_exit(&vd->vdev_rebuild_lock);
343
344 /*
345 * While we're in syncing context take the opportunity to
346 * setup the scrub when there are no more active rebuilds.
347 */
348 pool_scan_func_t func = POOL_SCAN_SCRUB;
349 if (dsl_scan_setup_check(&func, tx) == 0 &&
350 zfs_rebuild_scrub_enabled) {
351 dsl_scan_setup_sync(&func, tx);
352 }
353
354 cv_broadcast(&vd->vdev_rebuild_cv);
355
356 /* Clear recent error events (i.e. duplicate events tracking) */
357 zfs_ereport_clear(spa, NULL);
358 }
359
360 /*
361 * Update the on-disk state to canceled when a rebuild finishes.
362 */
363 static void
vdev_rebuild_cancel_sync(void * arg,dmu_tx_t * tx)364 vdev_rebuild_cancel_sync(void *arg, dmu_tx_t *tx)
365 {
366 int vdev_id = (uintptr_t)arg;
367 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
368 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
369 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
370 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
371
372 mutex_enter(&vd->vdev_rebuild_lock);
373 vrp->vrp_rebuild_state = VDEV_REBUILD_CANCELED;
374 vrp->vrp_end_time = gethrestime_sec();
375
376 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
377 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
378 REBUILD_PHYS_ENTRIES, vrp, tx));
379
380 spa_feature_decr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx);
381
382 spa_history_log_internal(spa, "rebuild", tx,
383 "vdev_id=%llu vdev_guid=%llu canceled",
384 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
385 vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_FINISH);
386
387 vd->vdev_rebuild_cancel_wanted = B_FALSE;
388 vd->vdev_rebuilding = B_FALSE;
389 mutex_exit(&vd->vdev_rebuild_lock);
390
391 spa_notify_waiters(spa);
392 cv_broadcast(&vd->vdev_rebuild_cv);
393 }
394
395 /*
396 * Resets the progress of a running rebuild. This will occur when a new
397 * vdev is added to rebuild.
398 */
399 static void
vdev_rebuild_reset_sync(void * arg,dmu_tx_t * tx)400 vdev_rebuild_reset_sync(void *arg, dmu_tx_t *tx)
401 {
402 int vdev_id = (uintptr_t)arg;
403 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
404 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
405 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
406 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
407
408 mutex_enter(&vd->vdev_rebuild_lock);
409
410 ASSERT(vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE);
411 ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
412
413 vrp->vrp_last_offset = 0;
414 vrp->vrp_min_txg = 0;
415 vrp->vrp_max_txg = dmu_tx_get_txg(tx);
416 vrp->vrp_bytes_scanned = 0;
417 vrp->vrp_bytes_issued = 0;
418 vrp->vrp_bytes_rebuilt = 0;
419 vrp->vrp_bytes_est = 0;
420 vrp->vrp_scan_time_ms = 0;
421 vr->vr_prev_scan_time_ms = 0;
422
423 /* See vdev_rebuild_initiate_sync comment */
424 VERIFY(vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg));
425
426 VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
427 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
428 REBUILD_PHYS_ENTRIES, vrp, tx));
429
430 spa_history_log_internal(spa, "rebuild", tx,
431 "vdev_id=%llu vdev_guid=%llu reset",
432 (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
433
434 vd->vdev_rebuild_reset_wanted = B_FALSE;
435 ASSERT(vd->vdev_rebuilding);
436
437 vd->vdev_rebuild_thread = thread_create(NULL, 0,
438 vdev_rebuild_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
439
440 mutex_exit(&vd->vdev_rebuild_lock);
441 }
442
443 /*
444 * Clear the last rebuild status.
445 */
446 void
vdev_rebuild_clear_sync(void * arg,dmu_tx_t * tx)447 vdev_rebuild_clear_sync(void *arg, dmu_tx_t *tx)
448 {
449 int vdev_id = (uintptr_t)arg;
450 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
451 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
452 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
453 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
454 objset_t *mos = spa_meta_objset(spa);
455
456 mutex_enter(&vd->vdev_rebuild_lock);
457
458 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD) ||
459 vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE) {
460 mutex_exit(&vd->vdev_rebuild_lock);
461 return;
462 }
463
464 clear_rebuild_bytes(vd);
465 memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
466
467 if (vd->vdev_top_zap != 0 && zap_contains(mos, vd->vdev_top_zap,
468 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS) == 0) {
469 VERIFY0(zap_update(mos, vd->vdev_top_zap,
470 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
471 REBUILD_PHYS_ENTRIES, vrp, tx));
472 }
473
474 mutex_exit(&vd->vdev_rebuild_lock);
475 }
476
477 /*
478 * The zio_done_func_t callback for each rebuild I/O issued. It's responsible
479 * for updating the rebuild stats and limiting the number of in flight I/Os.
480 */
481 static void
vdev_rebuild_cb(zio_t * zio)482 vdev_rebuild_cb(zio_t *zio)
483 {
484 vdev_rebuild_t *vr = zio->io_private;
485 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
486 vdev_t *vd = vr->vr_top_vdev;
487
488 mutex_enter(&vr->vr_io_lock);
489 if (zio->io_error == ENXIO && !vdev_writeable(vd)) {
490 /*
491 * The I/O failed because the top-level vdev was unavailable.
492 * Attempt to roll back to the last completed offset, in order
493 * resume from the correct location if the pool is resumed.
494 * (This works because spa_sync waits on spa_txg_zio before
495 * it runs sync tasks.)
496 */
497 uint64_t *off = &vr->vr_scan_offset[zio->io_txg & TXG_MASK];
498 *off = MIN(*off, zio->io_offset);
499 } else if (zio->io_error) {
500 vrp->vrp_errors++;
501 }
502
503 abd_free(zio->io_abd);
504
505 ASSERT3U(vr->vr_bytes_inflight, >, 0);
506 vr->vr_bytes_inflight -= zio->io_size;
507 cv_broadcast(&vr->vr_io_cv);
508 mutex_exit(&vr->vr_io_lock);
509
510 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
511 }
512
513 /*
514 * Initialize a block pointer that can be used to read the given segment
515 * for sequential rebuild.
516 */
517 static void
vdev_rebuild_blkptr_init(blkptr_t * bp,vdev_t * vd,uint64_t start,uint64_t asize)518 vdev_rebuild_blkptr_init(blkptr_t *bp, vdev_t *vd, uint64_t start,
519 uint64_t asize)
520 {
521 ASSERT(vd->vdev_ops == &vdev_draid_ops ||
522 vd->vdev_ops == &vdev_mirror_ops ||
523 vd->vdev_ops == &vdev_replacing_ops ||
524 vd->vdev_ops == &vdev_spare_ops);
525
526 uint64_t psize = vd->vdev_ops == &vdev_draid_ops ?
527 vdev_draid_asize_to_psize(vd, asize) : asize;
528
529 BP_ZERO(bp);
530
531 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
532 DVA_SET_OFFSET(&bp->blk_dva[0], start);
533 DVA_SET_GANG(&bp->blk_dva[0], 0);
534 DVA_SET_ASIZE(&bp->blk_dva[0], asize);
535
536 BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
537 BP_SET_LSIZE(bp, psize);
538 BP_SET_PSIZE(bp, psize);
539 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
540 BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
541 BP_SET_TYPE(bp, DMU_OT_NONE);
542 BP_SET_LEVEL(bp, 0);
543 BP_SET_DEDUP(bp, 0);
544 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
545 }
546
547 /*
548 * Issues a rebuild I/O and takes care of rate limiting the number of queued
549 * rebuild I/Os. The provided start and size must be properly aligned for the
550 * top-level vdev type being rebuilt.
551 */
552 static int
vdev_rebuild_range(vdev_rebuild_t * vr,uint64_t start,uint64_t size)553 vdev_rebuild_range(vdev_rebuild_t *vr, uint64_t start, uint64_t size)
554 {
555 uint64_t ms_id __maybe_unused = vr->vr_scan_msp->ms_id;
556 vdev_t *vd = vr->vr_top_vdev;
557 spa_t *spa = vd->vdev_spa;
558 blkptr_t blk;
559
560 ASSERT3U(ms_id, ==, start >> vd->vdev_ms_shift);
561 ASSERT3U(ms_id, ==, (start + size - 1) >> vd->vdev_ms_shift);
562
563 vr->vr_pass_bytes_scanned += size;
564 vr->vr_rebuild_phys.vrp_bytes_scanned += size;
565
566 /*
567 * Rebuild the data in this range by constructing a special block
568 * pointer. It has no relation to any existing blocks in the pool.
569 * However, by disabling checksum verification and issuing a scrub IO
570 * we can reconstruct and repair any children with missing data.
571 */
572 vdev_rebuild_blkptr_init(&blk, vd, start, size);
573 uint64_t psize = BP_GET_PSIZE(&blk);
574
575 if (!vdev_dtl_need_resilver(vd, &blk.blk_dva[0], psize, TXG_UNKNOWN)) {
576 vr->vr_pass_bytes_skipped += size;
577 return (0);
578 }
579
580 mutex_enter(&vr->vr_io_lock);
581
582 /* Limit in flight rebuild I/Os */
583 while (vr->vr_bytes_inflight >= vr->vr_bytes_inflight_max)
584 cv_wait(&vr->vr_io_cv, &vr->vr_io_lock);
585
586 vr->vr_bytes_inflight += psize;
587 mutex_exit(&vr->vr_io_lock);
588
589 dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
590 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
591 uint64_t txg = dmu_tx_get_txg(tx);
592
593 spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
594 mutex_enter(&vd->vdev_rebuild_lock);
595
596 /* This is the first I/O for this txg. */
597 if (vr->vr_scan_offset[txg & TXG_MASK] == 0) {
598 vr->vr_scan_offset[txg & TXG_MASK] = start;
599 dsl_sync_task_nowait(spa_get_dsl(spa),
600 vdev_rebuild_update_sync,
601 (void *)(uintptr_t)vd->vdev_id, tx);
602 }
603
604 /* When exiting write out our progress. */
605 if (vdev_rebuild_should_stop(vd)) {
606 mutex_enter(&vr->vr_io_lock);
607 vr->vr_bytes_inflight -= psize;
608 mutex_exit(&vr->vr_io_lock);
609 spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
610 mutex_exit(&vd->vdev_rebuild_lock);
611 dmu_tx_commit(tx);
612 return (SET_ERROR(EINTR));
613 }
614 mutex_exit(&vd->vdev_rebuild_lock);
615 dmu_tx_commit(tx);
616
617 vr->vr_scan_offset[txg & TXG_MASK] = start + size;
618 vr->vr_pass_bytes_issued += size;
619 vr->vr_rebuild_phys.vrp_bytes_issued += size;
620
621 zio_nowait(zio_read(spa->spa_txg_zio[txg & TXG_MASK], spa, &blk,
622 abd_alloc(psize, B_FALSE), psize, vdev_rebuild_cb, vr,
623 ZIO_PRIORITY_REBUILD, ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL |
624 ZIO_FLAG_RESILVER, NULL));
625
626 return (0);
627 }
628
629 /*
630 * Issues rebuild I/Os for all ranges in the provided vr->vr_tree range tree.
631 */
632 static int
vdev_rebuild_ranges(vdev_rebuild_t * vr)633 vdev_rebuild_ranges(vdev_rebuild_t *vr)
634 {
635 vdev_t *vd = vr->vr_top_vdev;
636 zfs_btree_t *t = &vr->vr_scan_tree->rt_root;
637 zfs_btree_index_t idx;
638 int error;
639
640 for (range_seg_t *rs = zfs_btree_first(t, &idx); rs != NULL;
641 rs = zfs_btree_next(t, &idx, &idx)) {
642 uint64_t start = rs_get_start(rs, vr->vr_scan_tree);
643 uint64_t size = rs_get_end(rs, vr->vr_scan_tree) - start;
644
645 /*
646 * zfs_scan_suspend_progress can be set to disable rebuild
647 * progress for testing. See comment in dsl_scan_sync().
648 */
649 while (zfs_scan_suspend_progress &&
650 !vdev_rebuild_should_stop(vd)) {
651 delay(hz);
652 }
653
654 while (size > 0) {
655 uint64_t chunk_size;
656
657 /*
658 * Split range into legally-sized logical chunks
659 * given the constraints of the top-level vdev
660 * being rebuilt (dRAID or mirror).
661 */
662 ASSERT3P(vd->vdev_ops, !=, NULL);
663 chunk_size = vd->vdev_ops->vdev_op_rebuild_asize(vd,
664 start, size, zfs_rebuild_max_segment);
665
666 error = vdev_rebuild_range(vr, start, chunk_size);
667 if (error != 0)
668 return (error);
669
670 size -= chunk_size;
671 start += chunk_size;
672 }
673 }
674
675 return (0);
676 }
677
678 /*
679 * Calculates the estimated capacity which remains to be scanned. Since
680 * we traverse the pool in metaslab order only allocated capacity beyond
681 * the vrp_last_offset need be considered. All lower offsets must have
682 * already been rebuilt and are thus already included in vrp_bytes_scanned.
683 */
684 static void
vdev_rebuild_update_bytes_est(vdev_t * vd,uint64_t ms_id)685 vdev_rebuild_update_bytes_est(vdev_t *vd, uint64_t ms_id)
686 {
687 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
688 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
689 uint64_t bytes_est = vrp->vrp_bytes_scanned;
690
691 if (vrp->vrp_last_offset < vd->vdev_ms[ms_id]->ms_start)
692 return;
693
694 for (uint64_t i = ms_id; i < vd->vdev_ms_count; i++) {
695 metaslab_t *msp = vd->vdev_ms[i];
696
697 mutex_enter(&msp->ms_lock);
698 bytes_est += metaslab_allocated_space(msp);
699 mutex_exit(&msp->ms_lock);
700 }
701
702 vrp->vrp_bytes_est = bytes_est;
703 }
704
705 /*
706 * Load from disk the top-level vdev's rebuild information.
707 */
708 int
vdev_rebuild_load(vdev_t * vd)709 vdev_rebuild_load(vdev_t *vd)
710 {
711 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
712 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
713 spa_t *spa = vd->vdev_spa;
714 int err = 0;
715
716 mutex_enter(&vd->vdev_rebuild_lock);
717 vd->vdev_rebuilding = B_FALSE;
718
719 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD)) {
720 memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
721 mutex_exit(&vd->vdev_rebuild_lock);
722 return (SET_ERROR(ENOTSUP));
723 }
724
725 ASSERT(vd->vdev_top == vd);
726
727 err = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
728 VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
729 REBUILD_PHYS_ENTRIES, vrp);
730
731 /*
732 * A missing or damaged VDEV_TOP_ZAP_VDEV_REBUILD_PHYS should
733 * not prevent a pool from being imported. Clear the rebuild
734 * status allowing a new resilver/rebuild to be started.
735 */
736 if (err == ENOENT || err == EOVERFLOW || err == ECKSUM) {
737 memset(vrp, 0, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
738 } else if (err) {
739 mutex_exit(&vd->vdev_rebuild_lock);
740 return (err);
741 }
742
743 vr->vr_prev_scan_time_ms = vrp->vrp_scan_time_ms;
744 vr->vr_top_vdev = vd;
745
746 mutex_exit(&vd->vdev_rebuild_lock);
747
748 return (0);
749 }
750
751 /*
752 * Each scan thread is responsible for rebuilding a top-level vdev. The
753 * rebuild progress in tracked on-disk in VDEV_TOP_ZAP_VDEV_REBUILD_PHYS.
754 */
755 static __attribute__((noreturn)) void
vdev_rebuild_thread(void * arg)756 vdev_rebuild_thread(void *arg)
757 {
758 vdev_t *vd = arg;
759 spa_t *spa = vd->vdev_spa;
760 vdev_t *rvd = spa->spa_root_vdev;
761 int error = 0;
762
763 /*
764 * If there's a scrub in process request that it be stopped. This
765 * is not required for a correct rebuild, but we do want rebuilds to
766 * emulate the resilver behavior as much as possible.
767 */
768 dsl_pool_t *dsl = spa_get_dsl(spa);
769 if (dsl_scan_scrubbing(dsl))
770 dsl_scan_cancel(dsl);
771
772 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
773 mutex_enter(&vd->vdev_rebuild_lock);
774
775 ASSERT3P(vd->vdev_top, ==, vd);
776 ASSERT3P(vd->vdev_rebuild_thread, !=, NULL);
777 ASSERT(vd->vdev_rebuilding);
778 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REBUILD));
779 ASSERT3B(vd->vdev_rebuild_cancel_wanted, ==, B_FALSE);
780
781 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
782 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
783 vr->vr_top_vdev = vd;
784 vr->vr_scan_msp = NULL;
785 vr->vr_scan_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
786 mutex_init(&vr->vr_io_lock, NULL, MUTEX_DEFAULT, NULL);
787 cv_init(&vr->vr_io_cv, NULL, CV_DEFAULT, NULL);
788
789 vr->vr_pass_start_time = gethrtime();
790 vr->vr_pass_bytes_scanned = 0;
791 vr->vr_pass_bytes_issued = 0;
792 vr->vr_pass_bytes_skipped = 0;
793
794 uint64_t update_est_time = gethrtime();
795 vdev_rebuild_update_bytes_est(vd, 0);
796
797 clear_rebuild_bytes(vr->vr_top_vdev);
798
799 mutex_exit(&vd->vdev_rebuild_lock);
800
801 /*
802 * Systematically walk the metaslabs and issue rebuild I/Os for
803 * all ranges in the allocated space map.
804 */
805 for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
806 metaslab_t *msp = vd->vdev_ms[i];
807 vr->vr_scan_msp = msp;
808
809 /*
810 * Calculate the max number of in-flight bytes for top-level
811 * vdev scanning operations (minimum 1MB, maximum 1/2 of
812 * arc_c_max shared by all top-level vdevs). Limits for the
813 * issuing phase are done per top-level vdev and are handled
814 * separately.
815 */
816 uint64_t limit = (arc_c_max / 2) / MAX(rvd->vdev_children, 1);
817 vr->vr_bytes_inflight_max = MIN(limit, MAX(1ULL << 20,
818 zfs_rebuild_vdev_limit * vd->vdev_children));
819
820 /*
821 * Removal of vdevs from the vdev tree may eliminate the need
822 * for the rebuild, in which case it should be canceled. The
823 * vdev_rebuild_cancel_wanted flag is set until the sync task
824 * completes. This may be after the rebuild thread exits.
825 */
826 if (vdev_rebuild_should_cancel(vd)) {
827 vd->vdev_rebuild_cancel_wanted = B_TRUE;
828 error = EINTR;
829 break;
830 }
831
832 ASSERT0(range_tree_space(vr->vr_scan_tree));
833
834 /* Disable any new allocations to this metaslab */
835 spa_config_exit(spa, SCL_CONFIG, FTAG);
836 metaslab_disable(msp);
837
838 mutex_enter(&msp->ms_sync_lock);
839 mutex_enter(&msp->ms_lock);
840
841 /*
842 * If there are outstanding allocations wait for them to be
843 * synced. This is needed to ensure all allocated ranges are
844 * on disk and therefore will be rebuilt.
845 */
846 for (int j = 0; j < TXG_SIZE; j++) {
847 if (range_tree_space(msp->ms_allocating[j])) {
848 mutex_exit(&msp->ms_lock);
849 mutex_exit(&msp->ms_sync_lock);
850 txg_wait_synced(dsl, 0);
851 mutex_enter(&msp->ms_sync_lock);
852 mutex_enter(&msp->ms_lock);
853 break;
854 }
855 }
856
857 /*
858 * When a metaslab has been allocated from read its allocated
859 * ranges from the space map object into the vr_scan_tree.
860 * Then add inflight / unflushed ranges and remove inflight /
861 * unflushed frees. This is the minimum range to be rebuilt.
862 */
863 if (msp->ms_sm != NULL) {
864 VERIFY0(space_map_load(msp->ms_sm,
865 vr->vr_scan_tree, SM_ALLOC));
866
867 for (int i = 0; i < TXG_SIZE; i++) {
868 ASSERT0(range_tree_space(
869 msp->ms_allocating[i]));
870 }
871
872 range_tree_walk(msp->ms_unflushed_allocs,
873 range_tree_add, vr->vr_scan_tree);
874 range_tree_walk(msp->ms_unflushed_frees,
875 range_tree_remove, vr->vr_scan_tree);
876
877 /*
878 * Remove ranges which have already been rebuilt based
879 * on the last offset. This can happen when restarting
880 * a scan after exporting and re-importing the pool.
881 */
882 range_tree_clear(vr->vr_scan_tree, 0,
883 vrp->vrp_last_offset);
884 }
885
886 mutex_exit(&msp->ms_lock);
887 mutex_exit(&msp->ms_sync_lock);
888
889 /*
890 * To provide an accurate estimate re-calculate the estimated
891 * size every 5 minutes to account for recent allocations and
892 * frees made to space maps which have not yet been rebuilt.
893 */
894 if (gethrtime() > update_est_time + SEC2NSEC(300)) {
895 update_est_time = gethrtime();
896 vdev_rebuild_update_bytes_est(vd, i);
897 }
898
899 /*
900 * Walk the allocated space map and issue the rebuild I/O.
901 */
902 error = vdev_rebuild_ranges(vr);
903 range_tree_vacate(vr->vr_scan_tree, NULL, NULL);
904
905 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
906 metaslab_enable(msp, B_FALSE, B_FALSE);
907
908 if (error != 0)
909 break;
910 }
911
912 range_tree_destroy(vr->vr_scan_tree);
913 spa_config_exit(spa, SCL_CONFIG, FTAG);
914
915 /* Wait for any remaining rebuild I/O to complete */
916 mutex_enter(&vr->vr_io_lock);
917 while (vr->vr_bytes_inflight > 0)
918 cv_wait(&vr->vr_io_cv, &vr->vr_io_lock);
919
920 mutex_exit(&vr->vr_io_lock);
921
922 mutex_destroy(&vr->vr_io_lock);
923 cv_destroy(&vr->vr_io_cv);
924
925 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
926
927 dsl_pool_t *dp = spa_get_dsl(spa);
928 dmu_tx_t *tx = dmu_tx_create_dd(dp->dp_mos_dir);
929 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
930
931 mutex_enter(&vd->vdev_rebuild_lock);
932 if (error == 0) {
933 /*
934 * After a successful rebuild clear the DTLs of all ranges
935 * which were missing when the rebuild was started. These
936 * ranges must have been rebuilt as a consequence of rebuilding
937 * all allocated space. Note that unlike a scrub or resilver
938 * the rebuild operation will reconstruct data only referenced
939 * by a pool checkpoint. See the dsl_scan_done() comments.
940 */
941 dsl_sync_task_nowait(dp, vdev_rebuild_complete_sync,
942 (void *)(uintptr_t)vd->vdev_id, tx);
943 } else if (vd->vdev_rebuild_cancel_wanted) {
944 /*
945 * The rebuild operation was canceled. This will occur when
946 * a device participating in the rebuild is detached.
947 */
948 dsl_sync_task_nowait(dp, vdev_rebuild_cancel_sync,
949 (void *)(uintptr_t)vd->vdev_id, tx);
950 } else if (vd->vdev_rebuild_reset_wanted) {
951 /*
952 * Reset the running rebuild without canceling and restarting
953 * it. This will occur when a new device is attached and must
954 * participate in the rebuild.
955 */
956 dsl_sync_task_nowait(dp, vdev_rebuild_reset_sync,
957 (void *)(uintptr_t)vd->vdev_id, tx);
958 } else {
959 /*
960 * The rebuild operation should be suspended. This may occur
961 * when detaching a child vdev or when exporting the pool. The
962 * rebuild is left in the active state so it will be resumed.
963 */
964 ASSERT(vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE);
965 vd->vdev_rebuilding = B_FALSE;
966 }
967
968 dmu_tx_commit(tx);
969
970 vd->vdev_rebuild_thread = NULL;
971 mutex_exit(&vd->vdev_rebuild_lock);
972 spa_config_exit(spa, SCL_CONFIG, FTAG);
973
974 cv_broadcast(&vd->vdev_rebuild_cv);
975
976 thread_exit();
977 }
978
979 /*
980 * Returns B_TRUE if any top-level vdev are rebuilding.
981 */
982 boolean_t
vdev_rebuild_active(vdev_t * vd)983 vdev_rebuild_active(vdev_t *vd)
984 {
985 spa_t *spa = vd->vdev_spa;
986 boolean_t ret = B_FALSE;
987
988 if (vd == spa->spa_root_vdev) {
989 for (uint64_t i = 0; i < vd->vdev_children; i++) {
990 ret = vdev_rebuild_active(vd->vdev_child[i]);
991 if (ret)
992 return (ret);
993 }
994 } else if (vd->vdev_top_zap != 0) {
995 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
996 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
997
998 mutex_enter(&vd->vdev_rebuild_lock);
999 ret = (vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE);
1000 mutex_exit(&vd->vdev_rebuild_lock);
1001 }
1002
1003 return (ret);
1004 }
1005
1006 /*
1007 * Start a rebuild operation. The rebuild may be restarted when the
1008 * top-level vdev is currently actively rebuilding.
1009 */
1010 void
vdev_rebuild(vdev_t * vd)1011 vdev_rebuild(vdev_t *vd)
1012 {
1013 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
1014 vdev_rebuild_phys_t *vrp __maybe_unused = &vr->vr_rebuild_phys;
1015
1016 ASSERT(vd->vdev_top == vd);
1017 ASSERT(vdev_is_concrete(vd));
1018 ASSERT(!vd->vdev_removing);
1019 ASSERT(spa_feature_is_enabled(vd->vdev_spa,
1020 SPA_FEATURE_DEVICE_REBUILD));
1021
1022 mutex_enter(&vd->vdev_rebuild_lock);
1023 if (vd->vdev_rebuilding) {
1024 ASSERT3U(vrp->vrp_rebuild_state, ==, VDEV_REBUILD_ACTIVE);
1025
1026 /*
1027 * Signal a running rebuild operation that it should restart
1028 * from the beginning because a new device was attached. The
1029 * vdev_rebuild_reset_wanted flag is set until the sync task
1030 * completes. This may be after the rebuild thread exits.
1031 */
1032 if (!vd->vdev_rebuild_reset_wanted)
1033 vd->vdev_rebuild_reset_wanted = B_TRUE;
1034 } else {
1035 vdev_rebuild_initiate(vd);
1036 }
1037 mutex_exit(&vd->vdev_rebuild_lock);
1038 }
1039
1040 static void
vdev_rebuild_restart_impl(vdev_t * vd)1041 vdev_rebuild_restart_impl(vdev_t *vd)
1042 {
1043 spa_t *spa = vd->vdev_spa;
1044
1045 if (vd == spa->spa_root_vdev) {
1046 for (uint64_t i = 0; i < vd->vdev_children; i++)
1047 vdev_rebuild_restart_impl(vd->vdev_child[i]);
1048
1049 } else if (vd->vdev_top_zap != 0) {
1050 vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
1051 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
1052
1053 mutex_enter(&vd->vdev_rebuild_lock);
1054 if (vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE &&
1055 vdev_writeable(vd) && !vd->vdev_rebuilding) {
1056 ASSERT(spa_feature_is_active(spa,
1057 SPA_FEATURE_DEVICE_REBUILD));
1058 vd->vdev_rebuilding = B_TRUE;
1059 vd->vdev_rebuild_thread = thread_create(NULL, 0,
1060 vdev_rebuild_thread, vd, 0, &p0, TS_RUN,
1061 maxclsyspri);
1062 }
1063 mutex_exit(&vd->vdev_rebuild_lock);
1064 }
1065 }
1066
1067 /*
1068 * Conditionally restart all of the vdev_rebuild_thread's for a pool. The
1069 * feature flag must be active and the rebuild in the active state. This
1070 * cannot be used to start a new rebuild.
1071 */
1072 void
vdev_rebuild_restart(spa_t * spa)1073 vdev_rebuild_restart(spa_t *spa)
1074 {
1075 ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
1076 spa->spa_load_thread == curthread);
1077
1078 vdev_rebuild_restart_impl(spa->spa_root_vdev);
1079 }
1080
1081 /*
1082 * Stop and wait for all of the vdev_rebuild_thread's associated with the
1083 * vdev tree provide to be terminated (canceled or stopped).
1084 */
1085 void
vdev_rebuild_stop_wait(vdev_t * vd)1086 vdev_rebuild_stop_wait(vdev_t *vd)
1087 {
1088 spa_t *spa = vd->vdev_spa;
1089
1090 ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
1091 spa->spa_export_thread == curthread);
1092
1093 if (vd == spa->spa_root_vdev) {
1094 for (uint64_t i = 0; i < vd->vdev_children; i++)
1095 vdev_rebuild_stop_wait(vd->vdev_child[i]);
1096
1097 } else if (vd->vdev_top_zap != 0) {
1098 ASSERT(vd == vd->vdev_top);
1099
1100 mutex_enter(&vd->vdev_rebuild_lock);
1101 if (vd->vdev_rebuild_thread != NULL) {
1102 vd->vdev_rebuild_exit_wanted = B_TRUE;
1103 while (vd->vdev_rebuilding) {
1104 cv_wait(&vd->vdev_rebuild_cv,
1105 &vd->vdev_rebuild_lock);
1106 }
1107 vd->vdev_rebuild_exit_wanted = B_FALSE;
1108 }
1109 mutex_exit(&vd->vdev_rebuild_lock);
1110 }
1111 }
1112
1113 /*
1114 * Stop all rebuild operations but leave them in the active state so they
1115 * will be resumed when importing the pool.
1116 */
1117 void
vdev_rebuild_stop_all(spa_t * spa)1118 vdev_rebuild_stop_all(spa_t *spa)
1119 {
1120 vdev_rebuild_stop_wait(spa->spa_root_vdev);
1121 }
1122
1123 /*
1124 * Rebuild statistics reported per top-level vdev.
1125 */
1126 int
vdev_rebuild_get_stats(vdev_t * tvd,vdev_rebuild_stat_t * vrs)1127 vdev_rebuild_get_stats(vdev_t *tvd, vdev_rebuild_stat_t *vrs)
1128 {
1129 spa_t *spa = tvd->vdev_spa;
1130
1131 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
1132 return (SET_ERROR(ENOTSUP));
1133
1134 if (tvd != tvd->vdev_top || tvd->vdev_top_zap == 0)
1135 return (SET_ERROR(EINVAL));
1136
1137 int error = zap_contains(spa_meta_objset(spa),
1138 tvd->vdev_top_zap, VDEV_TOP_ZAP_VDEV_REBUILD_PHYS);
1139
1140 if (error == ENOENT) {
1141 memset(vrs, 0, sizeof (vdev_rebuild_stat_t));
1142 vrs->vrs_state = VDEV_REBUILD_NONE;
1143 error = 0;
1144 } else if (error == 0) {
1145 vdev_rebuild_t *vr = &tvd->vdev_rebuild_config;
1146 vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
1147
1148 mutex_enter(&tvd->vdev_rebuild_lock);
1149 vrs->vrs_state = vrp->vrp_rebuild_state;
1150 vrs->vrs_start_time = vrp->vrp_start_time;
1151 vrs->vrs_end_time = vrp->vrp_end_time;
1152 vrs->vrs_scan_time_ms = vrp->vrp_scan_time_ms;
1153 vrs->vrs_bytes_scanned = vrp->vrp_bytes_scanned;
1154 vrs->vrs_bytes_issued = vrp->vrp_bytes_issued;
1155 vrs->vrs_bytes_rebuilt = vrp->vrp_bytes_rebuilt;
1156 vrs->vrs_bytes_est = vrp->vrp_bytes_est;
1157 vrs->vrs_errors = vrp->vrp_errors;
1158 vrs->vrs_pass_time_ms = NSEC2MSEC(gethrtime() -
1159 vr->vr_pass_start_time);
1160 vrs->vrs_pass_bytes_scanned = vr->vr_pass_bytes_scanned;
1161 vrs->vrs_pass_bytes_issued = vr->vr_pass_bytes_issued;
1162 vrs->vrs_pass_bytes_skipped = vr->vr_pass_bytes_skipped;
1163 mutex_exit(&tvd->vdev_rebuild_lock);
1164 }
1165
1166 return (error);
1167 }
1168
1169 ZFS_MODULE_PARAM(zfs, zfs_, rebuild_max_segment, U64, ZMOD_RW,
1170 "Max segment size in bytes of rebuild reads");
1171
1172 ZFS_MODULE_PARAM(zfs, zfs_, rebuild_vdev_limit, U64, ZMOD_RW,
1173 "Max bytes in flight per leaf vdev for sequential resilvers");
1174
1175 ZFS_MODULE_PARAM(zfs, zfs_, rebuild_scrub_enabled, INT, ZMOD_RW,
1176 "Automatically scrub after sequential resilver completes");
1177