xref: /freebsd/sys/contrib/openzfs/module/zfs/vdev_initialize.c (revision 61145dc2b94f12f6a47344fb9aac702321880e43)
1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or https://opensource.org/licenses/CDDL-1.0.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 
23 /*
24  * Copyright (c) 2016, 2024 by Delphix. All rights reserved.
25  */
26 
27 #include <sys/spa.h>
28 #include <sys/spa_impl.h>
29 #include <sys/txg.h>
30 #include <sys/vdev_impl.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/dsl_synctask.h>
33 #include <sys/zap.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/vdev_initialize.h>
36 
37 /*
38  * Value that is written to disk during initialization.
39  */
40 static uint64_t zfs_initialize_value = 0xdeadbeefdeadbeeeULL;
41 
42 /* maximum number of I/Os outstanding per leaf vdev */
43 static const int zfs_initialize_limit = 1;
44 
45 /* size of initializing writes; default 1MiB, see zfs_remove_max_segment */
46 static uint64_t zfs_initialize_chunk_size = 1024 * 1024;
47 
48 static boolean_t
vdev_initialize_should_stop(vdev_t * vd)49 vdev_initialize_should_stop(vdev_t *vd)
50 {
51 	return (vd->vdev_initialize_exit_wanted || !vdev_writeable(vd) ||
52 	    vd->vdev_detached || vd->vdev_top->vdev_removing ||
53 	    vd->vdev_top->vdev_rz_expanding);
54 }
55 
56 static void
vdev_initialize_zap_update_sync(void * arg,dmu_tx_t * tx)57 vdev_initialize_zap_update_sync(void *arg, dmu_tx_t *tx)
58 {
59 	/*
60 	 * We pass in the guid instead of the vdev_t since the vdev may
61 	 * have been freed prior to the sync task being processed. This
62 	 * happens when a vdev is detached as we call spa_config_vdev_exit(),
63 	 * stop the initializing thread, schedule the sync task, and free
64 	 * the vdev. Later when the scheduled sync task is invoked, it would
65 	 * find that the vdev has been freed.
66 	 */
67 	uint64_t guid = *(uint64_t *)arg;
68 	uint64_t txg = dmu_tx_get_txg(tx);
69 	kmem_free(arg, sizeof (uint64_t));
70 
71 	vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE);
72 	if (vd == NULL || vd->vdev_top->vdev_removing ||
73 	    !vdev_is_concrete(vd) || vd->vdev_top->vdev_rz_expanding)
74 		return;
75 
76 	uint64_t last_offset = vd->vdev_initialize_offset[txg & TXG_MASK];
77 	vd->vdev_initialize_offset[txg & TXG_MASK] = 0;
78 
79 	VERIFY(vd->vdev_leaf_zap != 0);
80 
81 	objset_t *mos = vd->vdev_spa->spa_meta_objset;
82 
83 	if (last_offset > 0) {
84 		vd->vdev_initialize_last_offset = last_offset;
85 		VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
86 		    VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET,
87 		    sizeof (last_offset), 1, &last_offset, tx));
88 	}
89 	if (vd->vdev_initialize_action_time > 0) {
90 		uint64_t val = (uint64_t)vd->vdev_initialize_action_time;
91 		VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
92 		    VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, sizeof (val),
93 		    1, &val, tx));
94 	}
95 
96 	uint64_t initialize_state = vd->vdev_initialize_state;
97 	VERIFY0(zap_update(mos, vd->vdev_leaf_zap,
98 	    VDEV_LEAF_ZAP_INITIALIZE_STATE, sizeof (initialize_state), 1,
99 	    &initialize_state, tx));
100 }
101 
102 static void
vdev_initialize_zap_remove_sync(void * arg,dmu_tx_t * tx)103 vdev_initialize_zap_remove_sync(void *arg, dmu_tx_t *tx)
104 {
105 	uint64_t guid = *(uint64_t *)arg;
106 
107 	kmem_free(arg, sizeof (uint64_t));
108 
109 	vdev_t *vd = spa_lookup_by_guid(tx->tx_pool->dp_spa, guid, B_FALSE);
110 	if (vd == NULL || vd->vdev_top->vdev_removing || !vdev_is_concrete(vd))
111 		return;
112 
113 	ASSERT3S(vd->vdev_initialize_state, ==, VDEV_INITIALIZE_NONE);
114 	ASSERT3U(vd->vdev_leaf_zap, !=, 0);
115 
116 	vd->vdev_initialize_last_offset = 0;
117 	vd->vdev_initialize_action_time = 0;
118 
119 	objset_t *mos = vd->vdev_spa->spa_meta_objset;
120 	int error;
121 
122 	error = zap_remove(mos, vd->vdev_leaf_zap,
123 	    VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET, tx);
124 	VERIFY(error == 0 || error == ENOENT);
125 
126 	error = zap_remove(mos, vd->vdev_leaf_zap,
127 	    VDEV_LEAF_ZAP_INITIALIZE_STATE, tx);
128 	VERIFY(error == 0 || error == ENOENT);
129 
130 	error = zap_remove(mos, vd->vdev_leaf_zap,
131 	    VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME, tx);
132 	VERIFY(error == 0 || error == ENOENT);
133 }
134 
135 static void
vdev_initialize_change_state(vdev_t * vd,vdev_initializing_state_t new_state)136 vdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state)
137 {
138 	ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
139 	spa_t *spa = vd->vdev_spa;
140 
141 	if (new_state == vd->vdev_initialize_state)
142 		return;
143 
144 	/*
145 	 * Copy the vd's guid, this will be freed by the sync task.
146 	 */
147 	uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
148 	*guid = vd->vdev_guid;
149 
150 	/*
151 	 * If we're suspending, then preserving the original start time.
152 	 */
153 	if (vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED) {
154 		vd->vdev_initialize_action_time = gethrestime_sec();
155 	}
156 
157 	vdev_initializing_state_t old_state = vd->vdev_initialize_state;
158 	vd->vdev_initialize_state = new_state;
159 
160 	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
161 	VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
162 
163 	if (new_state != VDEV_INITIALIZE_NONE) {
164 		dsl_sync_task_nowait(spa_get_dsl(spa),
165 		    vdev_initialize_zap_update_sync, guid, tx);
166 	} else {
167 		dsl_sync_task_nowait(spa_get_dsl(spa),
168 		    vdev_initialize_zap_remove_sync, guid, tx);
169 	}
170 
171 	switch (new_state) {
172 	case VDEV_INITIALIZE_ACTIVE:
173 		spa_history_log_internal(spa, "initialize", tx,
174 		    "vdev=%s activated", vd->vdev_path);
175 		break;
176 	case VDEV_INITIALIZE_SUSPENDED:
177 		spa_history_log_internal(spa, "initialize", tx,
178 		    "vdev=%s suspended", vd->vdev_path);
179 		break;
180 	case VDEV_INITIALIZE_CANCELED:
181 		if (old_state == VDEV_INITIALIZE_ACTIVE ||
182 		    old_state == VDEV_INITIALIZE_SUSPENDED)
183 			spa_history_log_internal(spa, "initialize", tx,
184 			    "vdev=%s canceled", vd->vdev_path);
185 		break;
186 	case VDEV_INITIALIZE_COMPLETE:
187 		spa_history_log_internal(spa, "initialize", tx,
188 		    "vdev=%s complete", vd->vdev_path);
189 		break;
190 	case VDEV_INITIALIZE_NONE:
191 		spa_history_log_internal(spa, "uninitialize", tx,
192 		    "vdev=%s", vd->vdev_path);
193 		break;
194 	default:
195 		panic("invalid state %llu", (unsigned long long)new_state);
196 	}
197 
198 	dmu_tx_commit(tx);
199 
200 	if (new_state != VDEV_INITIALIZE_ACTIVE)
201 		spa_notify_waiters(spa);
202 }
203 
204 static void
vdev_initialize_cb(zio_t * zio)205 vdev_initialize_cb(zio_t *zio)
206 {
207 	vdev_t *vd = zio->io_vd;
208 	mutex_enter(&vd->vdev_initialize_io_lock);
209 	if (zio->io_error == ENXIO && !vdev_writeable(vd)) {
210 		/*
211 		 * The I/O failed because the vdev was unavailable; roll the
212 		 * last offset back. (This works because spa_sync waits on
213 		 * spa_txg_zio before it runs sync tasks.)
214 		 */
215 		uint64_t *off =
216 		    &vd->vdev_initialize_offset[zio->io_txg & TXG_MASK];
217 		*off = MIN(*off, zio->io_offset);
218 	} else {
219 		/*
220 		 * Since initializing is best-effort, we ignore I/O errors and
221 		 * rely on vdev_probe to determine if the errors are more
222 		 * critical.
223 		 */
224 		if (zio->io_error != 0)
225 			vd->vdev_stat.vs_initialize_errors++;
226 
227 		vd->vdev_initialize_bytes_done += zio->io_orig_size;
228 	}
229 	ASSERT3U(vd->vdev_initialize_inflight, >, 0);
230 	vd->vdev_initialize_inflight--;
231 	cv_broadcast(&vd->vdev_initialize_io_cv);
232 	mutex_exit(&vd->vdev_initialize_io_lock);
233 
234 	spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
235 }
236 
237 /* Takes care of physical writing and limiting # of concurrent ZIOs. */
238 static int
vdev_initialize_write(vdev_t * vd,uint64_t start,uint64_t size,abd_t * data)239 vdev_initialize_write(vdev_t *vd, uint64_t start, uint64_t size, abd_t *data)
240 {
241 	spa_t *spa = vd->vdev_spa;
242 
243 	/* Limit inflight initializing I/Os */
244 	mutex_enter(&vd->vdev_initialize_io_lock);
245 	while (vd->vdev_initialize_inflight >= zfs_initialize_limit) {
246 		cv_wait(&vd->vdev_initialize_io_cv,
247 		    &vd->vdev_initialize_io_lock);
248 	}
249 	vd->vdev_initialize_inflight++;
250 	mutex_exit(&vd->vdev_initialize_io_lock);
251 
252 	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
253 	VERIFY0(dmu_tx_assign(tx, DMU_TX_WAIT));
254 	uint64_t txg = dmu_tx_get_txg(tx);
255 
256 	spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
257 	mutex_enter(&vd->vdev_initialize_lock);
258 
259 	if (vd->vdev_initialize_offset[txg & TXG_MASK] == 0) {
260 		uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
261 		*guid = vd->vdev_guid;
262 
263 		/* This is the first write of this txg. */
264 		dsl_sync_task_nowait(spa_get_dsl(spa),
265 		    vdev_initialize_zap_update_sync, guid, tx);
266 	}
267 
268 	/*
269 	 * We know the vdev struct will still be around since all
270 	 * consumers of vdev_free must stop the initialization first.
271 	 */
272 	if (vdev_initialize_should_stop(vd)) {
273 		mutex_enter(&vd->vdev_initialize_io_lock);
274 		ASSERT3U(vd->vdev_initialize_inflight, >, 0);
275 		vd->vdev_initialize_inflight--;
276 		mutex_exit(&vd->vdev_initialize_io_lock);
277 		spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
278 		mutex_exit(&vd->vdev_initialize_lock);
279 		dmu_tx_commit(tx);
280 		return (SET_ERROR(EINTR));
281 	}
282 	mutex_exit(&vd->vdev_initialize_lock);
283 
284 	vd->vdev_initialize_offset[txg & TXG_MASK] = start + size;
285 	zio_nowait(zio_write_phys(spa->spa_txg_zio[txg & TXG_MASK], vd, start,
286 	    size, data, ZIO_CHECKSUM_OFF, vdev_initialize_cb, NULL,
287 	    ZIO_PRIORITY_INITIALIZING, ZIO_FLAG_CANFAIL, B_FALSE));
288 	/* vdev_initialize_cb releases SCL_STATE_ALL */
289 
290 	dmu_tx_commit(tx);
291 
292 	return (0);
293 }
294 
295 /*
296  * Callback to fill each ABD chunk with zfs_initialize_value. len must be
297  * divisible by sizeof (uint64_t), and buf must be 8-byte aligned. The ABD
298  * allocation will guarantee these for us.
299  */
300 static int
vdev_initialize_block_fill(void * buf,size_t len,void * unused)301 vdev_initialize_block_fill(void *buf, size_t len, void *unused)
302 {
303 	(void) unused;
304 
305 	ASSERT0(len % sizeof (uint64_t));
306 	for (uint64_t i = 0; i < len; i += sizeof (uint64_t)) {
307 		*(uint64_t *)((char *)(buf) + i) = zfs_initialize_value;
308 	}
309 	return (0);
310 }
311 
312 static abd_t *
vdev_initialize_block_alloc(void)313 vdev_initialize_block_alloc(void)
314 {
315 	/* Allocate ABD for filler data */
316 	abd_t *data = abd_alloc_for_io(zfs_initialize_chunk_size, B_FALSE);
317 
318 	ASSERT0(zfs_initialize_chunk_size % sizeof (uint64_t));
319 	(void) abd_iterate_func(data, 0, zfs_initialize_chunk_size,
320 	    vdev_initialize_block_fill, NULL);
321 
322 	return (data);
323 }
324 
325 static void
vdev_initialize_block_free(abd_t * data)326 vdev_initialize_block_free(abd_t *data)
327 {
328 	abd_free(data);
329 }
330 
331 static int
vdev_initialize_ranges(vdev_t * vd,abd_t * data)332 vdev_initialize_ranges(vdev_t *vd, abd_t *data)
333 {
334 	zfs_range_tree_t *rt = vd->vdev_initialize_tree;
335 	zfs_btree_t *bt = &rt->rt_root;
336 	zfs_btree_index_t where;
337 
338 	for (zfs_range_seg_t *rs = zfs_btree_first(bt, &where); rs != NULL;
339 	    rs = zfs_btree_next(bt, &where, &where)) {
340 		uint64_t size = zfs_rs_get_end(rs, rt) -
341 		    zfs_rs_get_start(rs, rt);
342 
343 		/* Split range into legally-sized physical chunks */
344 		uint64_t writes_required =
345 		    ((size - 1) / zfs_initialize_chunk_size) + 1;
346 
347 		for (uint64_t w = 0; w < writes_required; w++) {
348 			int error;
349 
350 			error = vdev_initialize_write(vd,
351 			    VDEV_LABEL_START_SIZE + zfs_rs_get_start(rs, rt) +
352 			    (w * zfs_initialize_chunk_size),
353 			    MIN(size - (w * zfs_initialize_chunk_size),
354 			    zfs_initialize_chunk_size), data);
355 			if (error != 0)
356 				return (error);
357 		}
358 	}
359 	return (0);
360 }
361 
362 static void
vdev_initialize_xlate_last_rs_end(void * arg,zfs_range_seg64_t * physical_rs)363 vdev_initialize_xlate_last_rs_end(void *arg, zfs_range_seg64_t *physical_rs)
364 {
365 	uint64_t *last_rs_end = (uint64_t *)arg;
366 
367 	if (physical_rs->rs_end > *last_rs_end)
368 		*last_rs_end = physical_rs->rs_end;
369 }
370 
371 static void
vdev_initialize_xlate_progress(void * arg,zfs_range_seg64_t * physical_rs)372 vdev_initialize_xlate_progress(void *arg, zfs_range_seg64_t *physical_rs)
373 {
374 	vdev_t *vd = (vdev_t *)arg;
375 
376 	uint64_t size = physical_rs->rs_end - physical_rs->rs_start;
377 	vd->vdev_initialize_bytes_est += size;
378 
379 	if (vd->vdev_initialize_last_offset > physical_rs->rs_end) {
380 		vd->vdev_initialize_bytes_done += size;
381 	} else if (vd->vdev_initialize_last_offset > physical_rs->rs_start &&
382 	    vd->vdev_initialize_last_offset < physical_rs->rs_end) {
383 		vd->vdev_initialize_bytes_done +=
384 		    vd->vdev_initialize_last_offset - physical_rs->rs_start;
385 	}
386 }
387 
388 static void
vdev_initialize_calculate_progress(vdev_t * vd)389 vdev_initialize_calculate_progress(vdev_t *vd)
390 {
391 	ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
392 	    spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
393 	ASSERT(vd->vdev_leaf_zap != 0);
394 
395 	vd->vdev_initialize_bytes_est = 0;
396 	vd->vdev_initialize_bytes_done = 0;
397 
398 	for (uint64_t i = 0; i < vd->vdev_top->vdev_ms_count; i++) {
399 		metaslab_t *msp = vd->vdev_top->vdev_ms[i];
400 		mutex_enter(&msp->ms_lock);
401 
402 		uint64_t ms_free = (msp->ms_size -
403 		    metaslab_allocated_space(msp)) /
404 		    vdev_get_ndisks(vd->vdev_top);
405 
406 		/*
407 		 * Convert the metaslab range to a physical range
408 		 * on our vdev. We use this to determine if we are
409 		 * in the middle of this metaslab range.
410 		 */
411 		zfs_range_seg64_t logical_rs, physical_rs, remain_rs;
412 		logical_rs.rs_start = msp->ms_start;
413 		logical_rs.rs_end = msp->ms_start + msp->ms_size;
414 
415 		/* Metaslab space after this offset has not been initialized */
416 		vdev_xlate(vd, &logical_rs, &physical_rs, &remain_rs);
417 		if (vd->vdev_initialize_last_offset <= physical_rs.rs_start) {
418 			vd->vdev_initialize_bytes_est += ms_free;
419 			mutex_exit(&msp->ms_lock);
420 			continue;
421 		}
422 
423 		/* Metaslab space before this offset has been initialized */
424 		uint64_t last_rs_end = physical_rs.rs_end;
425 		if (!vdev_xlate_is_empty(&remain_rs)) {
426 			vdev_xlate_walk(vd, &remain_rs,
427 			    vdev_initialize_xlate_last_rs_end, &last_rs_end);
428 		}
429 
430 		if (vd->vdev_initialize_last_offset > last_rs_end) {
431 			vd->vdev_initialize_bytes_done += ms_free;
432 			vd->vdev_initialize_bytes_est += ms_free;
433 			mutex_exit(&msp->ms_lock);
434 			continue;
435 		}
436 
437 		/*
438 		 * If we get here, we're in the middle of initializing this
439 		 * metaslab. Load it and walk the free tree for more accurate
440 		 * progress estimation.
441 		 */
442 		VERIFY0(metaslab_load(msp));
443 
444 		zfs_btree_index_t where;
445 		zfs_range_tree_t *rt = msp->ms_allocatable;
446 		for (zfs_range_seg_t *rs =
447 		    zfs_btree_first(&rt->rt_root, &where); rs;
448 		    rs = zfs_btree_next(&rt->rt_root, &where,
449 		    &where)) {
450 			logical_rs.rs_start = zfs_rs_get_start(rs, rt);
451 			logical_rs.rs_end = zfs_rs_get_end(rs, rt);
452 
453 			vdev_xlate_walk(vd, &logical_rs,
454 			    vdev_initialize_xlate_progress, vd);
455 		}
456 		mutex_exit(&msp->ms_lock);
457 	}
458 }
459 
460 static int
vdev_initialize_load(vdev_t * vd)461 vdev_initialize_load(vdev_t *vd)
462 {
463 	int err = 0;
464 	ASSERT(spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_READER) ||
465 	    spa_config_held(vd->vdev_spa, SCL_CONFIG, RW_WRITER));
466 	ASSERT(vd->vdev_leaf_zap != 0);
467 
468 	if (vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE ||
469 	    vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED) {
470 		err = zap_lookup(vd->vdev_spa->spa_meta_objset,
471 		    vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET,
472 		    sizeof (vd->vdev_initialize_last_offset), 1,
473 		    &vd->vdev_initialize_last_offset);
474 		if (err == ENOENT) {
475 			vd->vdev_initialize_last_offset = 0;
476 			err = 0;
477 		}
478 	}
479 
480 	vdev_initialize_calculate_progress(vd);
481 	return (err);
482 }
483 
484 static void
vdev_initialize_xlate_range_add(void * arg,zfs_range_seg64_t * physical_rs)485 vdev_initialize_xlate_range_add(void *arg, zfs_range_seg64_t *physical_rs)
486 {
487 	vdev_t *vd = arg;
488 
489 	/* Only add segments that we have not visited yet */
490 	if (physical_rs->rs_end <= vd->vdev_initialize_last_offset)
491 		return;
492 
493 	/* Pick up where we left off mid-range. */
494 	if (vd->vdev_initialize_last_offset > physical_rs->rs_start) {
495 		zfs_dbgmsg("range write: vd %s changed (%llu, %llu) to "
496 		    "(%llu, %llu)", vd->vdev_path,
497 		    (u_longlong_t)physical_rs->rs_start,
498 		    (u_longlong_t)physical_rs->rs_end,
499 		    (u_longlong_t)vd->vdev_initialize_last_offset,
500 		    (u_longlong_t)physical_rs->rs_end);
501 		ASSERT3U(physical_rs->rs_end, >,
502 		    vd->vdev_initialize_last_offset);
503 		physical_rs->rs_start = vd->vdev_initialize_last_offset;
504 	}
505 
506 	ASSERT3U(physical_rs->rs_end, >, physical_rs->rs_start);
507 
508 	zfs_range_tree_add(vd->vdev_initialize_tree, physical_rs->rs_start,
509 	    physical_rs->rs_end - physical_rs->rs_start);
510 }
511 
512 /*
513  * Convert the logical range into a physical range and add it to our
514  * avl tree.
515  */
516 static void
vdev_initialize_range_add(void * arg,uint64_t start,uint64_t size)517 vdev_initialize_range_add(void *arg, uint64_t start, uint64_t size)
518 {
519 	vdev_t *vd = arg;
520 	zfs_range_seg64_t logical_rs;
521 	logical_rs.rs_start = start;
522 	logical_rs.rs_end = start + size;
523 
524 	ASSERT(vd->vdev_ops->vdev_op_leaf);
525 	vdev_xlate_walk(vd, &logical_rs, vdev_initialize_xlate_range_add, arg);
526 }
527 
528 static __attribute__((noreturn)) void
vdev_initialize_thread(void * arg)529 vdev_initialize_thread(void *arg)
530 {
531 	vdev_t *vd = arg;
532 	spa_t *spa = vd->vdev_spa;
533 	int error = 0;
534 	uint64_t ms_count = 0;
535 
536 	ASSERT(vdev_is_concrete(vd));
537 	spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
538 
539 	vd->vdev_initialize_last_offset = 0;
540 	VERIFY0(vdev_initialize_load(vd));
541 
542 	abd_t *deadbeef = vdev_initialize_block_alloc();
543 
544 	vd->vdev_initialize_tree = zfs_range_tree_create(NULL, ZFS_RANGE_SEG64,
545 	    NULL, 0, 0);
546 
547 	for (uint64_t i = 0; !vd->vdev_detached &&
548 	    i < vd->vdev_top->vdev_ms_count; i++) {
549 		metaslab_t *msp = vd->vdev_top->vdev_ms[i];
550 		boolean_t unload_when_done = B_FALSE;
551 
552 		/*
553 		 * If we've expanded the top-level vdev or it's our
554 		 * first pass, calculate our progress.
555 		 */
556 		if (vd->vdev_top->vdev_ms_count != ms_count) {
557 			vdev_initialize_calculate_progress(vd);
558 			ms_count = vd->vdev_top->vdev_ms_count;
559 		}
560 
561 		spa_config_exit(spa, SCL_CONFIG, FTAG);
562 		metaslab_disable(msp);
563 		mutex_enter(&msp->ms_lock);
564 		if (!msp->ms_loaded && !msp->ms_loading)
565 			unload_when_done = B_TRUE;
566 		VERIFY0(metaslab_load(msp));
567 
568 		zfs_range_tree_walk(msp->ms_allocatable,
569 		    vdev_initialize_range_add, vd);
570 		mutex_exit(&msp->ms_lock);
571 
572 		error = vdev_initialize_ranges(vd, deadbeef);
573 		metaslab_enable(msp, B_TRUE, unload_when_done);
574 		spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
575 
576 		zfs_range_tree_vacate(vd->vdev_initialize_tree, NULL, NULL);
577 		if (error != 0)
578 			break;
579 	}
580 
581 	spa_config_exit(spa, SCL_CONFIG, FTAG);
582 	mutex_enter(&vd->vdev_initialize_io_lock);
583 	while (vd->vdev_initialize_inflight > 0) {
584 		cv_wait(&vd->vdev_initialize_io_cv,
585 		    &vd->vdev_initialize_io_lock);
586 	}
587 	mutex_exit(&vd->vdev_initialize_io_lock);
588 
589 	zfs_range_tree_destroy(vd->vdev_initialize_tree);
590 	vdev_initialize_block_free(deadbeef);
591 	vd->vdev_initialize_tree = NULL;
592 
593 	mutex_enter(&vd->vdev_initialize_lock);
594 	if (!vd->vdev_initialize_exit_wanted) {
595 		if (vdev_writeable(vd)) {
596 			vdev_initialize_change_state(vd,
597 			    VDEV_INITIALIZE_COMPLETE);
598 		} else if (vd->vdev_faulted) {
599 			vdev_initialize_change_state(vd,
600 			    VDEV_INITIALIZE_CANCELED);
601 		}
602 	}
603 	ASSERT(vd->vdev_initialize_thread != NULL ||
604 	    vd->vdev_initialize_inflight == 0);
605 
606 	/*
607 	 * Drop the vdev_initialize_lock while we sync out the
608 	 * txg since it's possible that a device might be trying to
609 	 * come online and must check to see if it needs to restart an
610 	 * initialization. That thread will be holding the spa_config_lock
611 	 * which would prevent the txg_wait_synced from completing.
612 	 */
613 	mutex_exit(&vd->vdev_initialize_lock);
614 	txg_wait_synced(spa_get_dsl(spa), 0);
615 	mutex_enter(&vd->vdev_initialize_lock);
616 
617 	vd->vdev_initialize_thread = NULL;
618 	cv_broadcast(&vd->vdev_initialize_cv);
619 	mutex_exit(&vd->vdev_initialize_lock);
620 
621 	thread_exit();
622 }
623 
624 /*
625  * Initiates a device. Caller must hold vdev_initialize_lock.
626  * Device must be a leaf and not already be initializing.
627  */
628 void
vdev_initialize(vdev_t * vd)629 vdev_initialize(vdev_t *vd)
630 {
631 	ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
632 	ASSERT(vd->vdev_ops->vdev_op_leaf);
633 	ASSERT(vdev_is_concrete(vd));
634 	ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
635 	ASSERT(!vd->vdev_detached);
636 	ASSERT(!vd->vdev_initialize_exit_wanted);
637 	ASSERT(!vd->vdev_top->vdev_removing);
638 	ASSERT(!vd->vdev_top->vdev_rz_expanding);
639 
640 	vdev_initialize_change_state(vd, VDEV_INITIALIZE_ACTIVE);
641 	vd->vdev_initialize_thread = thread_create(NULL, 0,
642 	    vdev_initialize_thread, vd, 0, &p0, TS_RUN, maxclsyspri);
643 }
644 
645 /*
646  * Uninitializes a device. Caller must hold vdev_initialize_lock.
647  * Device must be a leaf and not already be initializing.
648  */
649 void
vdev_uninitialize(vdev_t * vd)650 vdev_uninitialize(vdev_t *vd)
651 {
652 	ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
653 	ASSERT(vd->vdev_ops->vdev_op_leaf);
654 	ASSERT(vdev_is_concrete(vd));
655 	ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
656 	ASSERT(!vd->vdev_detached);
657 	ASSERT(!vd->vdev_initialize_exit_wanted);
658 	ASSERT(!vd->vdev_top->vdev_removing);
659 
660 	vdev_initialize_change_state(vd, VDEV_INITIALIZE_NONE);
661 }
662 
663 /*
664  * Wait for the initialize thread to be terminated (cancelled or stopped).
665  */
666 static void
vdev_initialize_stop_wait_impl(vdev_t * vd)667 vdev_initialize_stop_wait_impl(vdev_t *vd)
668 {
669 	ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
670 
671 	while (vd->vdev_initialize_thread != NULL)
672 		cv_wait(&vd->vdev_initialize_cv, &vd->vdev_initialize_lock);
673 
674 	ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
675 	vd->vdev_initialize_exit_wanted = B_FALSE;
676 }
677 
678 /*
679  * Wait for vdev initialize threads which were either to cleanly exit.
680  */
681 void
vdev_initialize_stop_wait(spa_t * spa,list_t * vd_list)682 vdev_initialize_stop_wait(spa_t *spa, list_t *vd_list)
683 {
684 	(void) spa;
685 	vdev_t *vd;
686 
687 	ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
688 	    spa->spa_export_thread == curthread);
689 
690 	while ((vd = list_remove_head(vd_list)) != NULL) {
691 		mutex_enter(&vd->vdev_initialize_lock);
692 		vdev_initialize_stop_wait_impl(vd);
693 		mutex_exit(&vd->vdev_initialize_lock);
694 	}
695 }
696 
697 /*
698  * Stop initializing a device, with the resultant initializing state being
699  * tgt_state.  For blocking behavior pass NULL for vd_list.  Otherwise, when
700  * a list_t is provided the stopping vdev is inserted in to the list.  Callers
701  * are then required to call vdev_initialize_stop_wait() to block for all the
702  * initialization threads to exit.  The caller must hold vdev_initialize_lock
703  * and must not be writing to the spa config, as the initializing thread may
704  * try to enter the config as a reader before exiting.
705  */
706 void
vdev_initialize_stop(vdev_t * vd,vdev_initializing_state_t tgt_state,list_t * vd_list)707 vdev_initialize_stop(vdev_t *vd, vdev_initializing_state_t tgt_state,
708     list_t *vd_list)
709 {
710 	ASSERT(!spa_config_held(vd->vdev_spa, SCL_CONFIG|SCL_STATE, RW_WRITER));
711 	ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
712 	ASSERT(vd->vdev_ops->vdev_op_leaf);
713 	ASSERT(vdev_is_concrete(vd));
714 
715 	/*
716 	 * Allow cancel requests to proceed even if the initialize thread
717 	 * has stopped.
718 	 */
719 	if (vd->vdev_initialize_thread == NULL &&
720 	    tgt_state != VDEV_INITIALIZE_CANCELED) {
721 		return;
722 	}
723 
724 	vdev_initialize_change_state(vd, tgt_state);
725 	vd->vdev_initialize_exit_wanted = B_TRUE;
726 
727 	if (vd_list == NULL) {
728 		vdev_initialize_stop_wait_impl(vd);
729 	} else {
730 		ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
731 		    vd->vdev_spa->spa_export_thread == curthread);
732 		list_insert_tail(vd_list, vd);
733 	}
734 }
735 
736 static void
vdev_initialize_stop_all_impl(vdev_t * vd,vdev_initializing_state_t tgt_state,list_t * vd_list)737 vdev_initialize_stop_all_impl(vdev_t *vd, vdev_initializing_state_t tgt_state,
738     list_t *vd_list)
739 {
740 	if (vd->vdev_ops->vdev_op_leaf && vdev_is_concrete(vd)) {
741 		mutex_enter(&vd->vdev_initialize_lock);
742 		vdev_initialize_stop(vd, tgt_state, vd_list);
743 		mutex_exit(&vd->vdev_initialize_lock);
744 		return;
745 	}
746 
747 	for (uint64_t i = 0; i < vd->vdev_children; i++) {
748 		vdev_initialize_stop_all_impl(vd->vdev_child[i], tgt_state,
749 		    vd_list);
750 	}
751 }
752 
753 /*
754  * Convenience function to stop initializing of a vdev tree and set all
755  * initialize thread pointers to NULL.
756  */
757 void
vdev_initialize_stop_all(vdev_t * vd,vdev_initializing_state_t tgt_state)758 vdev_initialize_stop_all(vdev_t *vd, vdev_initializing_state_t tgt_state)
759 {
760 	spa_t *spa = vd->vdev_spa;
761 	list_t vd_list;
762 
763 	ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
764 	    spa->spa_export_thread == curthread);
765 
766 	list_create(&vd_list, sizeof (vdev_t),
767 	    offsetof(vdev_t, vdev_initialize_node));
768 
769 	vdev_initialize_stop_all_impl(vd, tgt_state, &vd_list);
770 	vdev_initialize_stop_wait(spa, &vd_list);
771 
772 	if (vd->vdev_spa->spa_sync_on) {
773 		/* Make sure that our state has been synced to disk */
774 		txg_wait_synced(spa_get_dsl(vd->vdev_spa), 0);
775 	}
776 
777 	list_destroy(&vd_list);
778 }
779 
780 void
vdev_initialize_restart(vdev_t * vd)781 vdev_initialize_restart(vdev_t *vd)
782 {
783 	ASSERT(MUTEX_HELD(&spa_namespace_lock) ||
784 	    vd->vdev_spa->spa_load_thread == curthread);
785 	ASSERT(!spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
786 
787 	if (vd->vdev_leaf_zap != 0) {
788 		mutex_enter(&vd->vdev_initialize_lock);
789 		uint64_t initialize_state = VDEV_INITIALIZE_NONE;
790 		int err = zap_lookup(vd->vdev_spa->spa_meta_objset,
791 		    vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_STATE,
792 		    sizeof (initialize_state), 1, &initialize_state);
793 		ASSERT(err == 0 || err == ENOENT);
794 		vd->vdev_initialize_state = initialize_state;
795 
796 		uint64_t timestamp = 0;
797 		err = zap_lookup(vd->vdev_spa->spa_meta_objset,
798 		    vd->vdev_leaf_zap, VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME,
799 		    sizeof (timestamp), 1, &timestamp);
800 		ASSERT(err == 0 || err == ENOENT);
801 		vd->vdev_initialize_action_time = timestamp;
802 
803 		if ((vd->vdev_initialize_state == VDEV_INITIALIZE_SUSPENDED ||
804 		    vd->vdev_offline) && !vd->vdev_top->vdev_rz_expanding) {
805 			/* load progress for reporting, but don't resume */
806 			VERIFY0(vdev_initialize_load(vd));
807 		} else if (vd->vdev_initialize_state ==
808 		    VDEV_INITIALIZE_ACTIVE && vdev_writeable(vd) &&
809 		    !vd->vdev_top->vdev_removing &&
810 		    !vd->vdev_top->vdev_rz_expanding &&
811 		    vd->vdev_initialize_thread == NULL) {
812 			vdev_initialize(vd);
813 		}
814 
815 		mutex_exit(&vd->vdev_initialize_lock);
816 	}
817 
818 	for (uint64_t i = 0; i < vd->vdev_children; i++) {
819 		vdev_initialize_restart(vd->vdev_child[i]);
820 	}
821 }
822 
823 EXPORT_SYMBOL(vdev_initialize);
824 EXPORT_SYMBOL(vdev_uninitialize);
825 EXPORT_SYMBOL(vdev_initialize_stop);
826 EXPORT_SYMBOL(vdev_initialize_stop_all);
827 EXPORT_SYMBOL(vdev_initialize_stop_wait);
828 EXPORT_SYMBOL(vdev_initialize_restart);
829 
830 ZFS_MODULE_PARAM(zfs, zfs_, initialize_value, U64, ZMOD_RW,
831 	"Value written during zpool initialize");
832 
833 ZFS_MODULE_PARAM(zfs, zfs_, initialize_chunk_size, U64, ZMOD_RW,
834 	"Size in bytes of writes by zpool initialize");
835