xref: /freebsd/sys/contrib/openzfs/module/zfs/vdev_rebuild.c (revision f9fd7337f63698f33239c58c07bf430198235a22)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  *
23  * Copyright (c) 2018, Intel Corporation.
24  * Copyright (c) 2020 by Lawrence Livermore National Security, LLC.
25  */
26 
27 #include <sys/vdev_impl.h>
28 #include <sys/dsl_scan.h>
29 #include <sys/spa_impl.h>
30 #include <sys/metaslab_impl.h>
31 #include <sys/vdev_rebuild.h>
32 #include <sys/zio.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/arc.h>
35 #include <sys/zap.h>
36 
37 /*
38  * This file contains the sequential reconstruction implementation for
39  * resilvering.  This form of resilvering is internally referred to as device
40  * rebuild to avoid conflating it with the traditional healing reconstruction
41  * performed by the dsl scan code.
42  *
43  * When replacing a device, or scrubbing the pool, ZFS has historically used
44  * a process called resilvering which is a form of healing reconstruction.
45  * This approach has the advantage that as blocks are read from disk their
46  * checksums can be immediately verified and the data repaired.  Unfortunately,
47  * it also results in a random IO pattern to the disk even when extra care
48  * is taken to sequentialize the IO as much as possible.  This substantially
49  * increases the time required to resilver the pool and restore redundancy.
50  *
51  * For mirrored devices it's possible to implement an alternate sequential
52  * reconstruction strategy when resilvering.  Sequential reconstruction
53  * behaves like a traditional RAID rebuild and reconstructs a device in LBA
54  * order without verifying the checksum.  After this phase completes a second
55  * scrub phase is started to verify all of the checksums.  This two phase
56  * process will take longer than the healing reconstruction described above.
57  * However, it has that advantage that after the reconstruction first phase
58  * completes redundancy has been restored.  At this point the pool can incur
59  * another device failure without risking data loss.
60  *
61  * There are a few noteworthy limitations and other advantages of resilvering
62  * using sequential reconstruction vs healing reconstruction.
63  *
64  * Limitations:
65  *
66  *   - Only supported for mirror vdev types.  Due to the variable stripe
67  *     width used by raidz sequential reconstruction is not possible.
68  *
69  *   - Block checksums are not verified during sequential reconstuction.
70  *     Similar to traditional RAID the parity/mirror data is reconstructed
71  *     but cannot be immediately double checked.  For this reason when the
72  *     last active resilver completes the pool is automatically scrubbed.
73  *
74  *   - Deferred resilvers using sequential reconstruction are not currently
75  *     supported.  When adding another vdev to an active top-level resilver
76  *     it must be restarted.
77  *
78  * Advantages:
79  *
80  *   - Sequential reconstuction is performed in LBA order which may be faster
81  *     than healing reconstuction particularly when using using HDDs (or
82  *     especially with SMR devices).  Only allocated capacity is resilvered.
83  *
84  *   - Sequential reconstruction is not constrained by ZFS block boundaries.
85  *     This allows it to issue larger IOs to disk which span multiple blocks
86  *     allowing all of these logical blocks to be repaired with a single IO.
87  *
88  *   - Unlike a healing resilver or scrub which are pool wide operations,
89  *     sequential reconstruction is handled by the top-level mirror vdevs.
90  *     This allows for it to be started or canceled on a top-level vdev
91  *     without impacting any other top-level vdevs in the pool.
92  *
93  *   - Data only referenced by a pool checkpoint will be repaired because
94  *     that space is reflected in the space maps.  This differs for a
95  *     healing resilver or scrub which will not repair that data.
96  */
97 
98 
99 /*
100  * Maximum number of queued rebuild I/Os top-level vdev.  The number of
101  * concurrent rebuild I/Os issued to the device is controlled by the
102  * zfs_vdev_rebuild_min_active and zfs_vdev_rebuild_max_active module
103  * options.
104  */
105 unsigned int zfs_rebuild_queue_limit = 20;
106 
107 /*
108  * Size of rebuild reads; defaults to 1MiB and is capped at SPA_MAXBLOCKSIZE.
109  */
110 unsigned long zfs_rebuild_max_segment = 1024 * 1024;
111 
112 /*
113  * For vdev_rebuild_initiate_sync() and vdev_rebuild_reset_sync().
114  */
115 static void vdev_rebuild_thread(void *arg);
116 
117 /*
118  * Clear the per-vdev rebuild bytes value for a vdev tree.
119  */
120 static void
121 clear_rebuild_bytes(vdev_t *vd)
122 {
123 	vdev_stat_t *vs = &vd->vdev_stat;
124 
125 	for (uint64_t i = 0; i < vd->vdev_children; i++)
126 		clear_rebuild_bytes(vd->vdev_child[i]);
127 
128 	mutex_enter(&vd->vdev_stat_lock);
129 	vs->vs_rebuild_processed = 0;
130 	mutex_exit(&vd->vdev_stat_lock);
131 }
132 
133 /*
134  * Determines whether a vdev_rebuild_thread() should be stopped.
135  */
136 static boolean_t
137 vdev_rebuild_should_stop(vdev_t *vd)
138 {
139 	return (!vdev_writeable(vd) || vd->vdev_removing ||
140 	    vd->vdev_rebuild_exit_wanted ||
141 	    vd->vdev_rebuild_cancel_wanted ||
142 	    vd->vdev_rebuild_reset_wanted);
143 }
144 
145 /*
146  * Determine if the rebuild should be canceled.  This may happen when all
147  * vdevs with MISSING DTLs are detached.
148  */
149 static boolean_t
150 vdev_rebuild_should_cancel(vdev_t *vd)
151 {
152 	vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
153 	vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
154 
155 	if (!vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg))
156 		return (B_TRUE);
157 
158 	return (B_FALSE);
159 }
160 
161 /*
162  * The sync task for updating the on-disk state of a rebuild.  This is
163  * scheduled by vdev_rebuild_range().
164  */
165 static void
166 vdev_rebuild_update_sync(void *arg, dmu_tx_t *tx)
167 {
168 	int vdev_id = (uintptr_t)arg;
169 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
170 	vdev_t *vd = vdev_lookup_top(spa, vdev_id);
171 	vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
172 	vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
173 	uint64_t txg = dmu_tx_get_txg(tx);
174 
175 	mutex_enter(&vd->vdev_rebuild_lock);
176 
177 	if (vr->vr_scan_offset[txg & TXG_MASK] > 0) {
178 		vrp->vrp_last_offset = vr->vr_scan_offset[txg & TXG_MASK];
179 		vr->vr_scan_offset[txg & TXG_MASK] = 0;
180 	}
181 
182 	vrp->vrp_scan_time_ms = vr->vr_prev_scan_time_ms +
183 	    NSEC2MSEC(gethrtime() - vr->vr_pass_start_time);
184 
185 	VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
186 	    VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
187 	    REBUILD_PHYS_ENTRIES, vrp, tx));
188 
189 	mutex_exit(&vd->vdev_rebuild_lock);
190 }
191 
192 /*
193  * Initialize the on-disk state for a new rebuild, start the rebuild thread.
194  */
195 static void
196 vdev_rebuild_initiate_sync(void *arg, dmu_tx_t *tx)
197 {
198 	int vdev_id = (uintptr_t)arg;
199 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
200 	vdev_t *vd = vdev_lookup_top(spa, vdev_id);
201 	vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
202 	vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
203 
204 	ASSERT(vd->vdev_rebuilding);
205 
206 	spa_feature_incr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx);
207 
208 	mutex_enter(&vd->vdev_rebuild_lock);
209 	bzero(vrp, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
210 	vrp->vrp_rebuild_state = VDEV_REBUILD_ACTIVE;
211 	vrp->vrp_min_txg = 0;
212 	vrp->vrp_max_txg = dmu_tx_get_txg(tx);
213 	vrp->vrp_start_time = gethrestime_sec();
214 	vrp->vrp_scan_time_ms = 0;
215 	vr->vr_prev_scan_time_ms = 0;
216 
217 	/*
218 	 * Rebuilds are currently only used when replacing a device, in which
219 	 * case there must be DTL_MISSING entries.  In the future, we could
220 	 * allow rebuilds to be used in a way similar to a scrub.  This would
221 	 * be useful because it would allow us to rebuild the space used by
222 	 * pool checkpoints.
223 	 */
224 	VERIFY(vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg));
225 
226 	VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
227 	    VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
228 	    REBUILD_PHYS_ENTRIES, vrp, tx));
229 
230 	spa_history_log_internal(spa, "rebuild", tx,
231 	    "vdev_id=%llu vdev_guid=%llu started",
232 	    (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
233 
234 	ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
235 	vd->vdev_rebuild_thread = thread_create(NULL, 0,
236 	    vdev_rebuild_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
237 
238 	mutex_exit(&vd->vdev_rebuild_lock);
239 }
240 
241 static void
242 vdev_rebuild_log_notify(spa_t *spa, vdev_t *vd, char *name)
243 {
244 	nvlist_t *aux = fnvlist_alloc();
245 
246 	fnvlist_add_string(aux, ZFS_EV_RESILVER_TYPE, "sequential");
247 	spa_event_notify(spa, vd, aux, name);
248 	nvlist_free(aux);
249 }
250 
251 /*
252  * Called to request that a new rebuild be started.  The feature will remain
253  * active for the duration of the rebuild, then revert to the enabled state.
254  */
255 static void
256 vdev_rebuild_initiate(vdev_t *vd)
257 {
258 	spa_t *spa = vd->vdev_spa;
259 
260 	ASSERT(vd->vdev_top == vd);
261 	ASSERT(MUTEX_HELD(&vd->vdev_rebuild_lock));
262 	ASSERT(!vd->vdev_rebuilding);
263 
264 	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
265 	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
266 
267 	vd->vdev_rebuilding = B_TRUE;
268 
269 	dsl_sync_task_nowait(spa_get_dsl(spa), vdev_rebuild_initiate_sync,
270 	    (void *)(uintptr_t)vd->vdev_id, tx);
271 	dmu_tx_commit(tx);
272 
273 	vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_START);
274 }
275 
276 /*
277  * Update the on-disk state to completed when a rebuild finishes.
278  */
279 static void
280 vdev_rebuild_complete_sync(void *arg, dmu_tx_t *tx)
281 {
282 	int vdev_id = (uintptr_t)arg;
283 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
284 	vdev_t *vd = vdev_lookup_top(spa, vdev_id);
285 	vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
286 	vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
287 
288 	mutex_enter(&vd->vdev_rebuild_lock);
289 	vrp->vrp_rebuild_state = VDEV_REBUILD_COMPLETE;
290 	vrp->vrp_end_time = gethrestime_sec();
291 
292 	VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
293 	    VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
294 	    REBUILD_PHYS_ENTRIES, vrp, tx));
295 
296 	vdev_dtl_reassess(vd,  tx->tx_txg, vrp->vrp_max_txg, B_TRUE, B_TRUE);
297 	spa_feature_decr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx);
298 
299 	spa_history_log_internal(spa, "rebuild",  tx,
300 	    "vdev_id=%llu vdev_guid=%llu complete",
301 	    (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
302 	vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_FINISH);
303 
304 	/* Handles detaching of spares */
305 	spa_async_request(spa, SPA_ASYNC_REBUILD_DONE);
306 	vd->vdev_rebuilding = B_FALSE;
307 	mutex_exit(&vd->vdev_rebuild_lock);
308 
309 	spa_notify_waiters(spa);
310 	cv_broadcast(&vd->vdev_rebuild_cv);
311 }
312 
313 /*
314  * Update the on-disk state to canceled when a rebuild finishes.
315  */
316 static void
317 vdev_rebuild_cancel_sync(void *arg, dmu_tx_t *tx)
318 {
319 	int vdev_id = (uintptr_t)arg;
320 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
321 	vdev_t *vd = vdev_lookup_top(spa, vdev_id);
322 	vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
323 	vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
324 
325 	mutex_enter(&vd->vdev_rebuild_lock);
326 	vrp->vrp_rebuild_state = VDEV_REBUILD_CANCELED;
327 	vrp->vrp_end_time = gethrestime_sec();
328 
329 	VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
330 	    VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
331 	    REBUILD_PHYS_ENTRIES, vrp, tx));
332 
333 	spa_feature_decr(vd->vdev_spa, SPA_FEATURE_DEVICE_REBUILD, tx);
334 
335 	spa_history_log_internal(spa, "rebuild",  tx,
336 	    "vdev_id=%llu vdev_guid=%llu canceled",
337 	    (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
338 	vdev_rebuild_log_notify(spa, vd, ESC_ZFS_RESILVER_FINISH);
339 
340 	vd->vdev_rebuild_cancel_wanted = B_FALSE;
341 	vd->vdev_rebuilding = B_FALSE;
342 	mutex_exit(&vd->vdev_rebuild_lock);
343 
344 	spa_notify_waiters(spa);
345 	cv_broadcast(&vd->vdev_rebuild_cv);
346 }
347 
348 /*
349  * Resets the progress of a running rebuild.  This will occur when a new
350  * vdev is added to rebuild.
351  */
352 static void
353 vdev_rebuild_reset_sync(void *arg, dmu_tx_t *tx)
354 {
355 	int vdev_id = (uintptr_t)arg;
356 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
357 	vdev_t *vd = vdev_lookup_top(spa, vdev_id);
358 	vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
359 	vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
360 
361 	mutex_enter(&vd->vdev_rebuild_lock);
362 
363 	ASSERT(vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE);
364 	ASSERT3P(vd->vdev_rebuild_thread, ==, NULL);
365 
366 	vrp->vrp_last_offset = 0;
367 	vrp->vrp_min_txg = 0;
368 	vrp->vrp_max_txg = dmu_tx_get_txg(tx);
369 	vrp->vrp_bytes_scanned = 0;
370 	vrp->vrp_bytes_issued = 0;
371 	vrp->vrp_bytes_rebuilt = 0;
372 	vrp->vrp_bytes_est = 0;
373 	vrp->vrp_scan_time_ms = 0;
374 	vr->vr_prev_scan_time_ms = 0;
375 
376 	/* See vdev_rebuild_initiate_sync comment */
377 	VERIFY(vdev_resilver_needed(vd, &vrp->vrp_min_txg, &vrp->vrp_max_txg));
378 
379 	VERIFY0(zap_update(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
380 	    VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
381 	    REBUILD_PHYS_ENTRIES, vrp, tx));
382 
383 	spa_history_log_internal(spa, "rebuild",  tx,
384 	    "vdev_id=%llu vdev_guid=%llu reset",
385 	    (u_longlong_t)vd->vdev_id, (u_longlong_t)vd->vdev_guid);
386 
387 	vd->vdev_rebuild_reset_wanted = B_FALSE;
388 	ASSERT(vd->vdev_rebuilding);
389 
390 	vd->vdev_rebuild_thread = thread_create(NULL, 0,
391 	    vdev_rebuild_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
392 
393 	mutex_exit(&vd->vdev_rebuild_lock);
394 }
395 
396 /*
397  * Clear the last rebuild status.
398  */
399 void
400 vdev_rebuild_clear_sync(void *arg, dmu_tx_t *tx)
401 {
402 	int vdev_id = (uintptr_t)arg;
403 	spa_t *spa = dmu_tx_pool(tx)->dp_spa;
404 	vdev_t *vd = vdev_lookup_top(spa, vdev_id);
405 	vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
406 	vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
407 	objset_t *mos = spa_meta_objset(spa);
408 
409 	mutex_enter(&vd->vdev_rebuild_lock);
410 
411 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD) ||
412 	    vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE) {
413 		mutex_exit(&vd->vdev_rebuild_lock);
414 		return;
415 	}
416 
417 	clear_rebuild_bytes(vd);
418 	bzero(vrp, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
419 
420 	if (vd->vdev_top_zap != 0 && zap_contains(mos, vd->vdev_top_zap,
421 	    VDEV_TOP_ZAP_VDEV_REBUILD_PHYS) == 0) {
422 		VERIFY0(zap_update(mos, vd->vdev_top_zap,
423 		    VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
424 		    REBUILD_PHYS_ENTRIES, vrp, tx));
425 	}
426 
427 	mutex_exit(&vd->vdev_rebuild_lock);
428 }
429 
430 /*
431  * The zio_done_func_t callback for each rebuild I/O issued.  It's responsible
432  * for updating the rebuild stats and limiting the number of in flight I/Os.
433  */
434 static void
435 vdev_rebuild_cb(zio_t *zio)
436 {
437 	vdev_rebuild_t *vr = zio->io_private;
438 	vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
439 	vdev_t *vd = vr->vr_top_vdev;
440 
441 	mutex_enter(&vd->vdev_rebuild_io_lock);
442 	if (zio->io_error == ENXIO && !vdev_writeable(vd)) {
443 		/*
444 		 * The I/O failed because the top-level vdev was unavailable.
445 		 * Attempt to roll back to the last completed offset, in order
446 		 * resume from the correct location if the pool is resumed.
447 		 * (This works because spa_sync waits on spa_txg_zio before
448 		 * it runs sync tasks.)
449 		 */
450 		uint64_t *off = &vr->vr_scan_offset[zio->io_txg & TXG_MASK];
451 		*off = MIN(*off, zio->io_offset);
452 	} else if (zio->io_error) {
453 		vrp->vrp_errors++;
454 	}
455 
456 	abd_free(zio->io_abd);
457 
458 	ASSERT3U(vd->vdev_rebuild_inflight, >, 0);
459 	vd->vdev_rebuild_inflight--;
460 	cv_broadcast(&vd->vdev_rebuild_io_cv);
461 	mutex_exit(&vd->vdev_rebuild_io_lock);
462 
463 	spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
464 }
465 
466 /*
467  * Rebuild the data in this range by constructing a special dummy block
468  * pointer for the given range.  It has no relation to any existing blocks
469  * in the pool.  But by disabling checksum verification and issuing a scrub
470  * I/O mirrored vdevs will replicate the block using any available mirror
471  * leaf vdevs.
472  */
473 static void
474 vdev_rebuild_rebuild_block(vdev_rebuild_t *vr, uint64_t start, uint64_t asize,
475     uint64_t txg)
476 {
477 	vdev_t *vd = vr->vr_top_vdev;
478 	spa_t *spa = vd->vdev_spa;
479 	uint64_t psize = asize;
480 
481 	ASSERT(vd->vdev_ops == &vdev_mirror_ops ||
482 	    vd->vdev_ops == &vdev_replacing_ops ||
483 	    vd->vdev_ops == &vdev_spare_ops);
484 
485 	blkptr_t blk, *bp = &blk;
486 	BP_ZERO(bp);
487 
488 	DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
489 	DVA_SET_OFFSET(&bp->blk_dva[0], start);
490 	DVA_SET_GANG(&bp->blk_dva[0], 0);
491 	DVA_SET_ASIZE(&bp->blk_dva[0], asize);
492 
493 	BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
494 	BP_SET_LSIZE(bp, psize);
495 	BP_SET_PSIZE(bp, psize);
496 	BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
497 	BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
498 	BP_SET_TYPE(bp, DMU_OT_NONE);
499 	BP_SET_LEVEL(bp, 0);
500 	BP_SET_DEDUP(bp, 0);
501 	BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
502 
503 	/*
504 	 * We increment the issued bytes by the asize rather than the psize
505 	 * so the scanned and issued bytes may be directly compared.  This
506 	 * is consistent with the scrub/resilver issued reporting.
507 	 */
508 	vr->vr_pass_bytes_issued += asize;
509 	vr->vr_rebuild_phys.vrp_bytes_issued += asize;
510 
511 	zio_nowait(zio_read(spa->spa_txg_zio[txg & TXG_MASK], spa, bp,
512 	    abd_alloc(psize, B_FALSE), psize, vdev_rebuild_cb, vr,
513 	    ZIO_PRIORITY_REBUILD, ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL |
514 	    ZIO_FLAG_RESILVER, NULL));
515 }
516 
517 /*
518  * Issues a rebuild I/O and takes care of rate limiting the number of queued
519  * rebuild I/Os.  The provided start and size must be properly aligned for the
520  * top-level vdev type being rebuilt.
521  */
522 static int
523 vdev_rebuild_range(vdev_rebuild_t *vr, uint64_t start, uint64_t size)
524 {
525 	uint64_t ms_id __maybe_unused = vr->vr_scan_msp->ms_id;
526 	vdev_t *vd = vr->vr_top_vdev;
527 	spa_t *spa = vd->vdev_spa;
528 
529 	ASSERT3U(ms_id, ==, start >> vd->vdev_ms_shift);
530 	ASSERT3U(ms_id, ==, (start + size - 1) >> vd->vdev_ms_shift);
531 
532 	vr->vr_pass_bytes_scanned += size;
533 	vr->vr_rebuild_phys.vrp_bytes_scanned += size;
534 
535 	mutex_enter(&vd->vdev_rebuild_io_lock);
536 
537 	/* Limit in flight rebuild I/Os */
538 	while (vd->vdev_rebuild_inflight >= zfs_rebuild_queue_limit)
539 		cv_wait(&vd->vdev_rebuild_io_cv, &vd->vdev_rebuild_io_lock);
540 
541 	vd->vdev_rebuild_inflight++;
542 	mutex_exit(&vd->vdev_rebuild_io_lock);
543 
544 	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
545 	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
546 	uint64_t txg = dmu_tx_get_txg(tx);
547 
548 	spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
549 	mutex_enter(&vd->vdev_rebuild_lock);
550 
551 	/* This is the first I/O for this txg. */
552 	if (vr->vr_scan_offset[txg & TXG_MASK] == 0) {
553 		vr->vr_scan_offset[txg & TXG_MASK] = start;
554 		dsl_sync_task_nowait(spa_get_dsl(spa),
555 		    vdev_rebuild_update_sync,
556 		    (void *)(uintptr_t)vd->vdev_id, tx);
557 	}
558 
559 	/* When exiting write out our progress. */
560 	if (vdev_rebuild_should_stop(vd)) {
561 		mutex_enter(&vd->vdev_rebuild_io_lock);
562 		vd->vdev_rebuild_inflight--;
563 		mutex_exit(&vd->vdev_rebuild_io_lock);
564 		spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
565 		mutex_exit(&vd->vdev_rebuild_lock);
566 		dmu_tx_commit(tx);
567 		return (SET_ERROR(EINTR));
568 	}
569 	mutex_exit(&vd->vdev_rebuild_lock);
570 
571 	vr->vr_scan_offset[txg & TXG_MASK] = start + size;
572 	vdev_rebuild_rebuild_block(vr, start, size, txg);
573 
574 	dmu_tx_commit(tx);
575 
576 	return (0);
577 }
578 
579 /*
580  * Split range into legally-sized logical chunks given the constraints of the
581  * top-level mirror vdev type.
582  */
583 static uint64_t
584 vdev_rebuild_chunk_size(vdev_t *vd, uint64_t start, uint64_t size)
585 {
586 	uint64_t chunk_size, max_asize, max_segment;
587 
588 	ASSERT(vd->vdev_ops == &vdev_mirror_ops ||
589 	    vd->vdev_ops == &vdev_replacing_ops ||
590 	    vd->vdev_ops == &vdev_spare_ops);
591 
592 	max_segment = MIN(P2ROUNDUP(zfs_rebuild_max_segment,
593 	    1 << vd->vdev_ashift), SPA_MAXBLOCKSIZE);
594 	max_asize = vdev_psize_to_asize(vd, max_segment);
595 	chunk_size = MIN(size, max_asize);
596 
597 	return (chunk_size);
598 }
599 
600 /*
601  * Issues rebuild I/Os for all ranges in the provided vr->vr_tree range tree.
602  */
603 static int
604 vdev_rebuild_ranges(vdev_rebuild_t *vr)
605 {
606 	vdev_t *vd = vr->vr_top_vdev;
607 	zfs_btree_t *t = &vr->vr_scan_tree->rt_root;
608 	zfs_btree_index_t idx;
609 	int error;
610 
611 	for (range_seg_t *rs = zfs_btree_first(t, &idx); rs != NULL;
612 	    rs = zfs_btree_next(t, &idx, &idx)) {
613 		uint64_t start = rs_get_start(rs, vr->vr_scan_tree);
614 		uint64_t size = rs_get_end(rs, vr->vr_scan_tree) - start;
615 
616 		/*
617 		 * zfs_scan_suspend_progress can be set to disable rebuild
618 		 * progress for testing.  See comment in dsl_scan_sync().
619 		 */
620 		while (zfs_scan_suspend_progress &&
621 		    !vdev_rebuild_should_stop(vd)) {
622 			delay(hz);
623 		}
624 
625 		while (size > 0) {
626 			uint64_t chunk_size;
627 
628 			chunk_size = vdev_rebuild_chunk_size(vd, start, size);
629 
630 			error = vdev_rebuild_range(vr, start, chunk_size);
631 			if (error != 0)
632 				return (error);
633 
634 			size -= chunk_size;
635 			start += chunk_size;
636 		}
637 	}
638 
639 	return (0);
640 }
641 
642 /*
643  * Calculates the estimated capacity which remains to be scanned.  Since
644  * we traverse the pool in metaslab order only allocated capacity beyond
645  * the vrp_last_offset need be considered.  All lower offsets must have
646  * already been rebuilt and are thus already included in vrp_bytes_scanned.
647  */
648 static void
649 vdev_rebuild_update_bytes_est(vdev_t *vd, uint64_t ms_id)
650 {
651 	vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
652 	vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
653 	uint64_t bytes_est = vrp->vrp_bytes_scanned;
654 
655 	if (vrp->vrp_last_offset < vd->vdev_ms[ms_id]->ms_start)
656 		return;
657 
658 	for (uint64_t i = ms_id; i < vd->vdev_ms_count; i++) {
659 		metaslab_t *msp = vd->vdev_ms[i];
660 
661 		mutex_enter(&msp->ms_lock);
662 		bytes_est += metaslab_allocated_space(msp);
663 		mutex_exit(&msp->ms_lock);
664 	}
665 
666 	vrp->vrp_bytes_est = bytes_est;
667 }
668 
669 /*
670  * Load from disk the top-level vdev's rebuild information.
671  */
672 int
673 vdev_rebuild_load(vdev_t *vd)
674 {
675 	vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
676 	vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
677 	spa_t *spa = vd->vdev_spa;
678 	int err = 0;
679 
680 	mutex_enter(&vd->vdev_rebuild_lock);
681 	vd->vdev_rebuilding = B_FALSE;
682 
683 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD)) {
684 		bzero(vrp, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
685 		mutex_exit(&vd->vdev_rebuild_lock);
686 		return (SET_ERROR(ENOTSUP));
687 	}
688 
689 	ASSERT(vd->vdev_top == vd);
690 
691 	err = zap_lookup(spa->spa_meta_objset, vd->vdev_top_zap,
692 	    VDEV_TOP_ZAP_VDEV_REBUILD_PHYS, sizeof (uint64_t),
693 	    REBUILD_PHYS_ENTRIES, vrp);
694 
695 	/*
696 	 * A missing or damaged VDEV_TOP_ZAP_VDEV_REBUILD_PHYS should
697 	 * not prevent a pool from being imported.  Clear the rebuild
698 	 * status allowing a new resilver/rebuild to be started.
699 	 */
700 	if (err == ENOENT || err == EOVERFLOW || err == ECKSUM) {
701 		bzero(vrp, sizeof (uint64_t) * REBUILD_PHYS_ENTRIES);
702 	} else if (err) {
703 		mutex_exit(&vd->vdev_rebuild_lock);
704 		return (err);
705 	}
706 
707 	vr->vr_prev_scan_time_ms = vrp->vrp_scan_time_ms;
708 	vr->vr_top_vdev = vd;
709 
710 	mutex_exit(&vd->vdev_rebuild_lock);
711 
712 	return (0);
713 }
714 
715 /*
716  * Each scan thread is responsible for rebuilding a top-level vdev.  The
717  * rebuild progress in tracked on-disk in VDEV_TOP_ZAP_VDEV_REBUILD_PHYS.
718  */
719 static void
720 vdev_rebuild_thread(void *arg)
721 {
722 	vdev_t *vd = arg;
723 	spa_t *spa = vd->vdev_spa;
724 	int error = 0;
725 
726 	/*
727 	 * If there's a scrub in process request that it be stopped.  This
728 	 * is not required for a correct rebuild, but we do want rebuilds to
729 	 * emulate the resilver behavior as much as possible.
730 	 */
731 	dsl_pool_t *dsl = spa_get_dsl(spa);
732 	if (dsl_scan_scrubbing(dsl))
733 		dsl_scan_cancel(dsl);
734 
735 	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
736 	mutex_enter(&vd->vdev_rebuild_lock);
737 
738 	ASSERT3P(vd->vdev_top, ==, vd);
739 	ASSERT3P(vd->vdev_rebuild_thread, !=, NULL);
740 	ASSERT(vd->vdev_rebuilding);
741 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REBUILD));
742 	ASSERT3B(vd->vdev_rebuild_cancel_wanted, ==, B_FALSE);
743 	ASSERT3B(vd->vdev_rebuild_reset_wanted, ==, B_FALSE);
744 
745 	vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
746 	vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
747 	vr->vr_top_vdev = vd;
748 	vr->vr_scan_msp = NULL;
749 	vr->vr_scan_tree = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
750 	vr->vr_pass_start_time = gethrtime();
751 	vr->vr_pass_bytes_scanned = 0;
752 	vr->vr_pass_bytes_issued = 0;
753 
754 	uint64_t update_est_time = gethrtime();
755 	vdev_rebuild_update_bytes_est(vd, 0);
756 
757 	clear_rebuild_bytes(vr->vr_top_vdev);
758 
759 	mutex_exit(&vd->vdev_rebuild_lock);
760 
761 	/*
762 	 * Systematically walk the metaslabs and issue rebuild I/Os for
763 	 * all ranges in the allocated space map.
764 	 */
765 	for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
766 		metaslab_t *msp = vd->vdev_ms[i];
767 		vr->vr_scan_msp = msp;
768 
769 		/*
770 		 * Removal of vdevs from the vdev tree may eliminate the need
771 		 * for the rebuild, in which case it should be canceled.  The
772 		 * vdev_rebuild_cancel_wanted flag is set until the sync task
773 		 * completes.  This may be after the rebuild thread exits.
774 		 */
775 		if (vdev_rebuild_should_cancel(vd)) {
776 			vd->vdev_rebuild_cancel_wanted = B_TRUE;
777 			error = EINTR;
778 			break;
779 		}
780 
781 		ASSERT0(range_tree_space(vr->vr_scan_tree));
782 
783 		/*
784 		 * Disable any new allocations to this metaslab and wait
785 		 * for any writes inflight to complete.  This is needed to
786 		 * ensure all allocated ranges are rebuilt.
787 		 */
788 		metaslab_disable(msp);
789 		spa_config_exit(spa, SCL_CONFIG, FTAG);
790 		txg_wait_synced(dsl, 0);
791 
792 		mutex_enter(&msp->ms_sync_lock);
793 		mutex_enter(&msp->ms_lock);
794 
795 		/*
796 		 * When a metaslab has been allocated from read its allocated
797 		 * ranges from the space map object in to the vr_scan_tree.
798 		 * Then add inflight / unflushed ranges and remove inflight /
799 		 * unflushed frees.  This is the minimum range to be rebuilt.
800 		 */
801 		if (msp->ms_sm != NULL) {
802 			VERIFY0(space_map_load(msp->ms_sm,
803 			    vr->vr_scan_tree, SM_ALLOC));
804 
805 			for (int i = 0; i < TXG_SIZE; i++) {
806 				ASSERT0(range_tree_space(
807 				    msp->ms_allocating[i]));
808 			}
809 
810 			range_tree_walk(msp->ms_unflushed_allocs,
811 			    range_tree_add, vr->vr_scan_tree);
812 			range_tree_walk(msp->ms_unflushed_frees,
813 			    range_tree_remove, vr->vr_scan_tree);
814 
815 			/*
816 			 * Remove ranges which have already been rebuilt based
817 			 * on the last offset.  This can happen when restarting
818 			 * a scan after exporting and re-importing the pool.
819 			 */
820 			range_tree_clear(vr->vr_scan_tree, 0,
821 			    vrp->vrp_last_offset);
822 		}
823 
824 		mutex_exit(&msp->ms_lock);
825 		mutex_exit(&msp->ms_sync_lock);
826 
827 		/*
828 		 * To provide an accurate estimate re-calculate the estimated
829 		 * size every 5 minutes to account for recent allocations and
830 		 * frees made space maps which have not yet been rebuilt.
831 		 */
832 		if (gethrtime() > update_est_time + SEC2NSEC(300)) {
833 			update_est_time = gethrtime();
834 			vdev_rebuild_update_bytes_est(vd, i);
835 		}
836 
837 		/*
838 		 * Walk the allocated space map and issue the rebuild I/O.
839 		 */
840 		error = vdev_rebuild_ranges(vr);
841 		range_tree_vacate(vr->vr_scan_tree, NULL, NULL);
842 
843 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
844 		metaslab_enable(msp, B_FALSE, B_FALSE);
845 
846 		if (error != 0)
847 			break;
848 	}
849 
850 	range_tree_destroy(vr->vr_scan_tree);
851 	spa_config_exit(spa, SCL_CONFIG, FTAG);
852 
853 	/* Wait for any remaining rebuild I/O to complete */
854 	mutex_enter(&vd->vdev_rebuild_io_lock);
855 	while (vd->vdev_rebuild_inflight > 0)
856 		cv_wait(&vd->vdev_rebuild_io_cv, &vd->vdev_rebuild_io_lock);
857 
858 	mutex_exit(&vd->vdev_rebuild_io_lock);
859 
860 	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
861 
862 	dsl_pool_t *dp = spa_get_dsl(spa);
863 	dmu_tx_t *tx = dmu_tx_create_dd(dp->dp_mos_dir);
864 	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
865 
866 	mutex_enter(&vd->vdev_rebuild_lock);
867 	if (error == 0) {
868 		/*
869 		 * After a successful rebuild clear the DTLs of all ranges
870 		 * which were missing when the rebuild was started.  These
871 		 * ranges must have been rebuilt as a consequence of rebuilding
872 		 * all allocated space.  Note that unlike a scrub or resilver
873 		 * the rebuild operation will reconstruct data only referenced
874 		 * by a pool checkpoint.  See the dsl_scan_done() comments.
875 		 */
876 		dsl_sync_task_nowait(dp, vdev_rebuild_complete_sync,
877 		    (void *)(uintptr_t)vd->vdev_id, tx);
878 	} else if (vd->vdev_rebuild_cancel_wanted) {
879 		/*
880 		 * The rebuild operation was canceled.  This will occur when
881 		 * a device participating in the rebuild is detached.
882 		 */
883 		dsl_sync_task_nowait(dp, vdev_rebuild_cancel_sync,
884 		    (void *)(uintptr_t)vd->vdev_id, tx);
885 	} else if (vd->vdev_rebuild_reset_wanted) {
886 		/*
887 		 * Reset the running rebuild without canceling and restarting
888 		 * it.  This will occur when a new device is attached and must
889 		 * participate in the rebuild.
890 		 */
891 		dsl_sync_task_nowait(dp, vdev_rebuild_reset_sync,
892 		    (void *)(uintptr_t)vd->vdev_id, tx);
893 	} else {
894 		/*
895 		 * The rebuild operation should be suspended.  This may occur
896 		 * when detaching a child vdev or when exporting the pool.  The
897 		 * rebuild is left in the active state so it will be resumed.
898 		 */
899 		ASSERT(vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE);
900 		vd->vdev_rebuilding = B_FALSE;
901 	}
902 
903 	dmu_tx_commit(tx);
904 
905 	vd->vdev_rebuild_thread = NULL;
906 	mutex_exit(&vd->vdev_rebuild_lock);
907 	spa_config_exit(spa, SCL_CONFIG, FTAG);
908 
909 	cv_broadcast(&vd->vdev_rebuild_cv);
910 
911 	thread_exit();
912 }
913 
914 /*
915  * Returns B_TRUE if any top-level vdev are rebuilding.
916  */
917 boolean_t
918 vdev_rebuild_active(vdev_t *vd)
919 {
920 	spa_t *spa = vd->vdev_spa;
921 	boolean_t ret = B_FALSE;
922 
923 	if (vd == spa->spa_root_vdev) {
924 		for (uint64_t i = 0; i < vd->vdev_children; i++) {
925 			ret = vdev_rebuild_active(vd->vdev_child[i]);
926 			if (ret)
927 				return (ret);
928 		}
929 	} else if (vd->vdev_top_zap != 0) {
930 		vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
931 		vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
932 
933 		mutex_enter(&vd->vdev_rebuild_lock);
934 		ret = (vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE);
935 		mutex_exit(&vd->vdev_rebuild_lock);
936 	}
937 
938 	return (ret);
939 }
940 
941 /*
942  * Start a rebuild operation.  The rebuild may be restarted when the
943  * top-level vdev is currently actively rebuilding.
944  */
945 void
946 vdev_rebuild(vdev_t *vd)
947 {
948 	vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
949 	vdev_rebuild_phys_t *vrp __maybe_unused = &vr->vr_rebuild_phys;
950 
951 	ASSERT(vd->vdev_top == vd);
952 	ASSERT(vdev_is_concrete(vd));
953 	ASSERT(!vd->vdev_removing);
954 	ASSERT(spa_feature_is_enabled(vd->vdev_spa,
955 	    SPA_FEATURE_DEVICE_REBUILD));
956 
957 	mutex_enter(&vd->vdev_rebuild_lock);
958 	if (vd->vdev_rebuilding) {
959 		ASSERT3U(vrp->vrp_rebuild_state, ==, VDEV_REBUILD_ACTIVE);
960 
961 		/*
962 		 * Signal a running rebuild operation that it should restart
963 		 * from the beginning because a new device was attached.  The
964 		 * vdev_rebuild_reset_wanted flag is set until the sync task
965 		 * completes.  This may be after the rebuild thread exits.
966 		 */
967 		if (!vd->vdev_rebuild_reset_wanted)
968 			vd->vdev_rebuild_reset_wanted = B_TRUE;
969 	} else {
970 		vdev_rebuild_initiate(vd);
971 	}
972 	mutex_exit(&vd->vdev_rebuild_lock);
973 }
974 
975 static void
976 vdev_rebuild_restart_impl(vdev_t *vd)
977 {
978 	spa_t *spa = vd->vdev_spa;
979 
980 	if (vd == spa->spa_root_vdev) {
981 		for (uint64_t i = 0; i < vd->vdev_children; i++)
982 			vdev_rebuild_restart_impl(vd->vdev_child[i]);
983 
984 	} else if (vd->vdev_top_zap != 0) {
985 		vdev_rebuild_t *vr = &vd->vdev_rebuild_config;
986 		vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
987 
988 		mutex_enter(&vd->vdev_rebuild_lock);
989 		if (vrp->vrp_rebuild_state == VDEV_REBUILD_ACTIVE &&
990 		    vdev_writeable(vd) && !vd->vdev_rebuilding) {
991 			ASSERT(spa_feature_is_active(spa,
992 			    SPA_FEATURE_DEVICE_REBUILD));
993 			vd->vdev_rebuilding = B_TRUE;
994 			vd->vdev_rebuild_thread = thread_create(NULL, 0,
995 			    vdev_rebuild_thread, vd, 0, &p0, TS_RUN,
996 			    maxclsyspri);
997 		}
998 		mutex_exit(&vd->vdev_rebuild_lock);
999 	}
1000 }
1001 
1002 /*
1003  * Conditionally restart all of the vdev_rebuild_thread's for a pool.  The
1004  * feature flag must be active and the rebuild in the active state.   This
1005  * cannot be used to start a new rebuild.
1006  */
1007 void
1008 vdev_rebuild_restart(spa_t *spa)
1009 {
1010 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1011 
1012 	vdev_rebuild_restart_impl(spa->spa_root_vdev);
1013 }
1014 
1015 /*
1016  * Stop and wait for all of the vdev_rebuild_thread's associated with the
1017  * vdev tree provide to be terminated (canceled or stopped).
1018  */
1019 void
1020 vdev_rebuild_stop_wait(vdev_t *vd)
1021 {
1022 	spa_t *spa = vd->vdev_spa;
1023 
1024 	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1025 
1026 	if (vd == spa->spa_root_vdev) {
1027 		for (uint64_t i = 0; i < vd->vdev_children; i++)
1028 			vdev_rebuild_stop_wait(vd->vdev_child[i]);
1029 
1030 	} else if (vd->vdev_top_zap != 0) {
1031 		ASSERT(vd == vd->vdev_top);
1032 
1033 		mutex_enter(&vd->vdev_rebuild_lock);
1034 		if (vd->vdev_rebuild_thread != NULL) {
1035 			vd->vdev_rebuild_exit_wanted = B_TRUE;
1036 			while (vd->vdev_rebuilding) {
1037 				cv_wait(&vd->vdev_rebuild_cv,
1038 				    &vd->vdev_rebuild_lock);
1039 			}
1040 			vd->vdev_rebuild_exit_wanted = B_FALSE;
1041 		}
1042 		mutex_exit(&vd->vdev_rebuild_lock);
1043 	}
1044 }
1045 
1046 /*
1047  * Stop all rebuild operations but leave them in the active state so they
1048  * will be resumed when importing the pool.
1049  */
1050 void
1051 vdev_rebuild_stop_all(spa_t *spa)
1052 {
1053 	vdev_rebuild_stop_wait(spa->spa_root_vdev);
1054 }
1055 
1056 /*
1057  * Rebuild statistics reported per top-level vdev.
1058  */
1059 int
1060 vdev_rebuild_get_stats(vdev_t *tvd, vdev_rebuild_stat_t *vrs)
1061 {
1062 	spa_t *spa = tvd->vdev_spa;
1063 
1064 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
1065 		return (SET_ERROR(ENOTSUP));
1066 
1067 	if (tvd != tvd->vdev_top || tvd->vdev_top_zap == 0)
1068 		return (SET_ERROR(EINVAL));
1069 
1070 	int error = zap_contains(spa_meta_objset(spa),
1071 	    tvd->vdev_top_zap, VDEV_TOP_ZAP_VDEV_REBUILD_PHYS);
1072 
1073 	if (error == ENOENT) {
1074 		bzero(vrs, sizeof (vdev_rebuild_stat_t));
1075 		vrs->vrs_state = VDEV_REBUILD_NONE;
1076 		error = 0;
1077 	} else if (error == 0) {
1078 		vdev_rebuild_t *vr = &tvd->vdev_rebuild_config;
1079 		vdev_rebuild_phys_t *vrp = &vr->vr_rebuild_phys;
1080 
1081 		mutex_enter(&tvd->vdev_rebuild_lock);
1082 		vrs->vrs_state = vrp->vrp_rebuild_state;
1083 		vrs->vrs_start_time = vrp->vrp_start_time;
1084 		vrs->vrs_end_time = vrp->vrp_end_time;
1085 		vrs->vrs_scan_time_ms = vrp->vrp_scan_time_ms;
1086 		vrs->vrs_bytes_scanned = vrp->vrp_bytes_scanned;
1087 		vrs->vrs_bytes_issued = vrp->vrp_bytes_issued;
1088 		vrs->vrs_bytes_rebuilt = vrp->vrp_bytes_rebuilt;
1089 		vrs->vrs_bytes_est = vrp->vrp_bytes_est;
1090 		vrs->vrs_errors = vrp->vrp_errors;
1091 		vrs->vrs_pass_time_ms = NSEC2MSEC(gethrtime() -
1092 		    vr->vr_pass_start_time);
1093 		vrs->vrs_pass_bytes_scanned = vr->vr_pass_bytes_scanned;
1094 		vrs->vrs_pass_bytes_issued = vr->vr_pass_bytes_issued;
1095 		mutex_exit(&tvd->vdev_rebuild_lock);
1096 	}
1097 
1098 	return (error);
1099 }
1100 
1101 /* BEGIN CSTYLED */
1102 ZFS_MODULE_PARAM(zfs, zfs_, rebuild_max_segment, ULONG, ZMOD_RW,
1103         "Max segment size in bytes of rebuild reads");
1104 /* END CSTYLED */
1105