xref: /freebsd/sys/contrib/openzfs/module/zfs/metaslab.c (revision c66ec88fed842fbaad62c30d510644ceb7bd2d71)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25  * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
26  * Copyright (c) 2017, Intel Corporation.
27  */
28 
29 #include <sys/zfs_context.h>
30 #include <sys/dmu.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/space_map.h>
33 #include <sys/metaslab_impl.h>
34 #include <sys/vdev_impl.h>
35 #include <sys/vdev_draid.h>
36 #include <sys/zio.h>
37 #include <sys/spa_impl.h>
38 #include <sys/zfeature.h>
39 #include <sys/vdev_indirect_mapping.h>
40 #include <sys/zap.h>
41 #include <sys/btree.h>
42 
43 #define	WITH_DF_BLOCK_ALLOCATOR
44 
45 #define	GANG_ALLOCATION(flags) \
46 	((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
47 
48 /*
49  * Metaslab granularity, in bytes. This is roughly similar to what would be
50  * referred to as the "stripe size" in traditional RAID arrays. In normal
51  * operation, we will try to write this amount of data to a top-level vdev
52  * before moving on to the next one.
53  */
54 unsigned long metaslab_aliquot = 512 << 10;
55 
56 /*
57  * For testing, make some blocks above a certain size be gang blocks.
58  */
59 unsigned long metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
60 
61 /*
62  * In pools where the log space map feature is not enabled we touch
63  * multiple metaslabs (and their respective space maps) with each
64  * transaction group. Thus, we benefit from having a small space map
65  * block size since it allows us to issue more I/O operations scattered
66  * around the disk. So a sane default for the space map block size
67  * is 8~16K.
68  */
69 int zfs_metaslab_sm_blksz_no_log = (1 << 14);
70 
71 /*
72  * When the log space map feature is enabled, we accumulate a lot of
73  * changes per metaslab that are flushed once in a while so we benefit
74  * from a bigger block size like 128K for the metaslab space maps.
75  */
76 int zfs_metaslab_sm_blksz_with_log = (1 << 17);
77 
78 /*
79  * The in-core space map representation is more compact than its on-disk form.
80  * The zfs_condense_pct determines how much more compact the in-core
81  * space map representation must be before we compact it on-disk.
82  * Values should be greater than or equal to 100.
83  */
84 int zfs_condense_pct = 200;
85 
86 /*
87  * Condensing a metaslab is not guaranteed to actually reduce the amount of
88  * space used on disk. In particular, a space map uses data in increments of
89  * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
90  * same number of blocks after condensing. Since the goal of condensing is to
91  * reduce the number of IOPs required to read the space map, we only want to
92  * condense when we can be sure we will reduce the number of blocks used by the
93  * space map. Unfortunately, we cannot precisely compute whether or not this is
94  * the case in metaslab_should_condense since we are holding ms_lock. Instead,
95  * we apply the following heuristic: do not condense a spacemap unless the
96  * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
97  * blocks.
98  */
99 int zfs_metaslab_condense_block_threshold = 4;
100 
101 /*
102  * The zfs_mg_noalloc_threshold defines which metaslab groups should
103  * be eligible for allocation. The value is defined as a percentage of
104  * free space. Metaslab groups that have more free space than
105  * zfs_mg_noalloc_threshold are always eligible for allocations. Once
106  * a metaslab group's free space is less than or equal to the
107  * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
108  * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
109  * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
110  * groups are allowed to accept allocations. Gang blocks are always
111  * eligible to allocate on any metaslab group. The default value of 0 means
112  * no metaslab group will be excluded based on this criterion.
113  */
114 int zfs_mg_noalloc_threshold = 0;
115 
116 /*
117  * Metaslab groups are considered eligible for allocations if their
118  * fragmentation metric (measured as a percentage) is less than or
119  * equal to zfs_mg_fragmentation_threshold. If a metaslab group
120  * exceeds this threshold then it will be skipped unless all metaslab
121  * groups within the metaslab class have also crossed this threshold.
122  *
123  * This tunable was introduced to avoid edge cases where we continue
124  * allocating from very fragmented disks in our pool while other, less
125  * fragmented disks, exists. On the other hand, if all disks in the
126  * pool are uniformly approaching the threshold, the threshold can
127  * be a speed bump in performance, where we keep switching the disks
128  * that we allocate from (e.g. we allocate some segments from disk A
129  * making it bypassing the threshold while freeing segments from disk
130  * B getting its fragmentation below the threshold).
131  *
132  * Empirically, we've seen that our vdev selection for allocations is
133  * good enough that fragmentation increases uniformly across all vdevs
134  * the majority of the time. Thus we set the threshold percentage high
135  * enough to avoid hitting the speed bump on pools that are being pushed
136  * to the edge.
137  */
138 int zfs_mg_fragmentation_threshold = 95;
139 
140 /*
141  * Allow metaslabs to keep their active state as long as their fragmentation
142  * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
143  * active metaslab that exceeds this threshold will no longer keep its active
144  * status allowing better metaslabs to be selected.
145  */
146 int zfs_metaslab_fragmentation_threshold = 70;
147 
148 /*
149  * When set will load all metaslabs when pool is first opened.
150  */
151 int metaslab_debug_load = 0;
152 
153 /*
154  * When set will prevent metaslabs from being unloaded.
155  */
156 int metaslab_debug_unload = 0;
157 
158 /*
159  * Minimum size which forces the dynamic allocator to change
160  * it's allocation strategy.  Once the space map cannot satisfy
161  * an allocation of this size then it switches to using more
162  * aggressive strategy (i.e search by size rather than offset).
163  */
164 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
165 
166 /*
167  * The minimum free space, in percent, which must be available
168  * in a space map to continue allocations in a first-fit fashion.
169  * Once the space map's free space drops below this level we dynamically
170  * switch to using best-fit allocations.
171  */
172 int metaslab_df_free_pct = 4;
173 
174 /*
175  * Maximum distance to search forward from the last offset. Without this
176  * limit, fragmented pools can see >100,000 iterations and
177  * metaslab_block_picker() becomes the performance limiting factor on
178  * high-performance storage.
179  *
180  * With the default setting of 16MB, we typically see less than 500
181  * iterations, even with very fragmented, ashift=9 pools. The maximum number
182  * of iterations possible is:
183  *     metaslab_df_max_search / (2 * (1<<ashift))
184  * With the default setting of 16MB this is 16*1024 (with ashift=9) or
185  * 2048 (with ashift=12).
186  */
187 int metaslab_df_max_search = 16 * 1024 * 1024;
188 
189 /*
190  * Forces the metaslab_block_picker function to search for at least this many
191  * segments forwards until giving up on finding a segment that the allocation
192  * will fit into.
193  */
194 uint32_t metaslab_min_search_count = 100;
195 
196 /*
197  * If we are not searching forward (due to metaslab_df_max_search,
198  * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
199  * controls what segment is used.  If it is set, we will use the largest free
200  * segment.  If it is not set, we will use a segment of exactly the requested
201  * size (or larger).
202  */
203 int metaslab_df_use_largest_segment = B_FALSE;
204 
205 /*
206  * Percentage of all cpus that can be used by the metaslab taskq.
207  */
208 int metaslab_load_pct = 50;
209 
210 /*
211  * These tunables control how long a metaslab will remain loaded after the
212  * last allocation from it.  A metaslab can't be unloaded until at least
213  * metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds
214  * have elapsed.  However, zfs_metaslab_mem_limit may cause it to be
215  * unloaded sooner.  These settings are intended to be generous -- to keep
216  * metaslabs loaded for a long time, reducing the rate of metaslab loading.
217  */
218 int metaslab_unload_delay = 32;
219 int metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */
220 
221 /*
222  * Max number of metaslabs per group to preload.
223  */
224 int metaslab_preload_limit = 10;
225 
226 /*
227  * Enable/disable preloading of metaslab.
228  */
229 int metaslab_preload_enabled = B_TRUE;
230 
231 /*
232  * Enable/disable fragmentation weighting on metaslabs.
233  */
234 int metaslab_fragmentation_factor_enabled = B_TRUE;
235 
236 /*
237  * Enable/disable lba weighting (i.e. outer tracks are given preference).
238  */
239 int metaslab_lba_weighting_enabled = B_TRUE;
240 
241 /*
242  * Enable/disable metaslab group biasing.
243  */
244 int metaslab_bias_enabled = B_TRUE;
245 
246 /*
247  * Enable/disable remapping of indirect DVAs to their concrete vdevs.
248  */
249 boolean_t zfs_remap_blkptr_enable = B_TRUE;
250 
251 /*
252  * Enable/disable segment-based metaslab selection.
253  */
254 int zfs_metaslab_segment_weight_enabled = B_TRUE;
255 
256 /*
257  * When using segment-based metaslab selection, we will continue
258  * allocating from the active metaslab until we have exhausted
259  * zfs_metaslab_switch_threshold of its buckets.
260  */
261 int zfs_metaslab_switch_threshold = 2;
262 
263 /*
264  * Internal switch to enable/disable the metaslab allocation tracing
265  * facility.
266  */
267 boolean_t metaslab_trace_enabled = B_FALSE;
268 
269 /*
270  * Maximum entries that the metaslab allocation tracing facility will keep
271  * in a given list when running in non-debug mode. We limit the number
272  * of entries in non-debug mode to prevent us from using up too much memory.
273  * The limit should be sufficiently large that we don't expect any allocation
274  * to every exceed this value. In debug mode, the system will panic if this
275  * limit is ever reached allowing for further investigation.
276  */
277 uint64_t metaslab_trace_max_entries = 5000;
278 
279 /*
280  * Maximum number of metaslabs per group that can be disabled
281  * simultaneously.
282  */
283 int max_disabled_ms = 3;
284 
285 /*
286  * Time (in seconds) to respect ms_max_size when the metaslab is not loaded.
287  * To avoid 64-bit overflow, don't set above UINT32_MAX.
288  */
289 unsigned long zfs_metaslab_max_size_cache_sec = 3600; /* 1 hour */
290 
291 /*
292  * Maximum percentage of memory to use on storing loaded metaslabs. If loading
293  * a metaslab would take it over this percentage, the oldest selected metaslab
294  * is automatically unloaded.
295  */
296 int zfs_metaslab_mem_limit = 75;
297 
298 /*
299  * Force the per-metaslab range trees to use 64-bit integers to store
300  * segments. Used for debugging purposes.
301  */
302 boolean_t zfs_metaslab_force_large_segs = B_FALSE;
303 
304 /*
305  * By default we only store segments over a certain size in the size-sorted
306  * metaslab trees (ms_allocatable_by_size and
307  * ms_unflushed_frees_by_size). This dramatically reduces memory usage and
308  * improves load and unload times at the cost of causing us to use slightly
309  * larger segments than we would otherwise in some cases.
310  */
311 uint32_t metaslab_by_size_min_shift = 14;
312 
313 /*
314  * If not set, we will first try normal allocation.  If that fails then
315  * we will do a gang allocation.  If that fails then we will do a "try hard"
316  * gang allocation.  If that fails then we will have a multi-layer gang
317  * block.
318  *
319  * If set, we will first try normal allocation.  If that fails then
320  * we will do a "try hard" allocation.  If that fails we will do a gang
321  * allocation.  If that fails we will do a "try hard" gang allocation.  If
322  * that fails then we will have a multi-layer gang block.
323  */
324 int zfs_metaslab_try_hard_before_gang = B_FALSE;
325 
326 /*
327  * When not trying hard, we only consider the best zfs_metaslab_find_max_tries
328  * metaslabs.  This improves performance, especially when there are many
329  * metaslabs per vdev and the allocation can't actually be satisfied (so we
330  * would otherwise iterate all the metaslabs).  If there is a metaslab with a
331  * worse weight but it can actually satisfy the allocation, we won't find it
332  * until trying hard.  This may happen if the worse metaslab is not loaded
333  * (and the true weight is better than we have calculated), or due to weight
334  * bucketization.  E.g. we are looking for a 60K segment, and the best
335  * metaslabs all have free segments in the 32-63K bucket, but the best
336  * zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a
337  * subsequent metaslab has ms_max_size >60KB (but fewer segments in this
338  * bucket, and therefore a lower weight).
339  */
340 int zfs_metaslab_find_max_tries = 100;
341 
342 static uint64_t metaslab_weight(metaslab_t *, boolean_t);
343 static void metaslab_set_fragmentation(metaslab_t *, boolean_t);
344 static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
345 static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
346 
347 static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
348 static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
349 static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
350 static unsigned int metaslab_idx_func(multilist_t *, void *);
351 static void metaslab_evict(metaslab_t *, uint64_t);
352 static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg);
353 kmem_cache_t *metaslab_alloc_trace_cache;
354 
355 typedef struct metaslab_stats {
356 	kstat_named_t metaslabstat_trace_over_limit;
357 	kstat_named_t metaslabstat_reload_tree;
358 	kstat_named_t metaslabstat_too_many_tries;
359 	kstat_named_t metaslabstat_try_hard;
360 } metaslab_stats_t;
361 
362 static metaslab_stats_t metaslab_stats = {
363 	{ "trace_over_limit",		KSTAT_DATA_UINT64 },
364 	{ "reload_tree",		KSTAT_DATA_UINT64 },
365 	{ "too_many_tries",		KSTAT_DATA_UINT64 },
366 	{ "try_hard",			KSTAT_DATA_UINT64 },
367 };
368 
369 #define	METASLABSTAT_BUMP(stat) \
370 	atomic_inc_64(&metaslab_stats.stat.value.ui64);
371 
372 
373 kstat_t *metaslab_ksp;
374 
375 void
376 metaslab_stat_init(void)
377 {
378 	ASSERT(metaslab_alloc_trace_cache == NULL);
379 	metaslab_alloc_trace_cache = kmem_cache_create(
380 	    "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
381 	    0, NULL, NULL, NULL, NULL, NULL, 0);
382 	metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats",
383 	    "misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) /
384 	    sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
385 	if (metaslab_ksp != NULL) {
386 		metaslab_ksp->ks_data = &metaslab_stats;
387 		kstat_install(metaslab_ksp);
388 	}
389 }
390 
391 void
392 metaslab_stat_fini(void)
393 {
394 	if (metaslab_ksp != NULL) {
395 		kstat_delete(metaslab_ksp);
396 		metaslab_ksp = NULL;
397 	}
398 
399 	kmem_cache_destroy(metaslab_alloc_trace_cache);
400 	metaslab_alloc_trace_cache = NULL;
401 }
402 
403 /*
404  * ==========================================================================
405  * Metaslab classes
406  * ==========================================================================
407  */
408 metaslab_class_t *
409 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
410 {
411 	metaslab_class_t *mc;
412 
413 	mc = kmem_zalloc(offsetof(metaslab_class_t,
414 	    mc_allocator[spa->spa_alloc_count]), KM_SLEEP);
415 
416 	mc->mc_spa = spa;
417 	mc->mc_ops = ops;
418 	mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
419 	mc->mc_metaslab_txg_list = multilist_create(sizeof (metaslab_t),
420 	    offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func);
421 	for (int i = 0; i < spa->spa_alloc_count; i++) {
422 		metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
423 		mca->mca_rotor = NULL;
424 		zfs_refcount_create_tracked(&mca->mca_alloc_slots);
425 	}
426 
427 	return (mc);
428 }
429 
430 void
431 metaslab_class_destroy(metaslab_class_t *mc)
432 {
433 	spa_t *spa = mc->mc_spa;
434 
435 	ASSERT(mc->mc_alloc == 0);
436 	ASSERT(mc->mc_deferred == 0);
437 	ASSERT(mc->mc_space == 0);
438 	ASSERT(mc->mc_dspace == 0);
439 
440 	for (int i = 0; i < spa->spa_alloc_count; i++) {
441 		metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
442 		ASSERT(mca->mca_rotor == NULL);
443 		zfs_refcount_destroy(&mca->mca_alloc_slots);
444 	}
445 	mutex_destroy(&mc->mc_lock);
446 	multilist_destroy(mc->mc_metaslab_txg_list);
447 	kmem_free(mc, offsetof(metaslab_class_t,
448 	    mc_allocator[spa->spa_alloc_count]));
449 }
450 
451 int
452 metaslab_class_validate(metaslab_class_t *mc)
453 {
454 	metaslab_group_t *mg;
455 	vdev_t *vd;
456 
457 	/*
458 	 * Must hold one of the spa_config locks.
459 	 */
460 	ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
461 	    spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
462 
463 	if ((mg = mc->mc_allocator[0].mca_rotor) == NULL)
464 		return (0);
465 
466 	do {
467 		vd = mg->mg_vd;
468 		ASSERT(vd->vdev_mg != NULL);
469 		ASSERT3P(vd->vdev_top, ==, vd);
470 		ASSERT3P(mg->mg_class, ==, mc);
471 		ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
472 	} while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor);
473 
474 	return (0);
475 }
476 
477 static void
478 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
479     int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
480 {
481 	atomic_add_64(&mc->mc_alloc, alloc_delta);
482 	atomic_add_64(&mc->mc_deferred, defer_delta);
483 	atomic_add_64(&mc->mc_space, space_delta);
484 	atomic_add_64(&mc->mc_dspace, dspace_delta);
485 }
486 
487 uint64_t
488 metaslab_class_get_alloc(metaslab_class_t *mc)
489 {
490 	return (mc->mc_alloc);
491 }
492 
493 uint64_t
494 metaslab_class_get_deferred(metaslab_class_t *mc)
495 {
496 	return (mc->mc_deferred);
497 }
498 
499 uint64_t
500 metaslab_class_get_space(metaslab_class_t *mc)
501 {
502 	return (mc->mc_space);
503 }
504 
505 uint64_t
506 metaslab_class_get_dspace(metaslab_class_t *mc)
507 {
508 	return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
509 }
510 
511 void
512 metaslab_class_histogram_verify(metaslab_class_t *mc)
513 {
514 	spa_t *spa = mc->mc_spa;
515 	vdev_t *rvd = spa->spa_root_vdev;
516 	uint64_t *mc_hist;
517 	int i;
518 
519 	if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
520 		return;
521 
522 	mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
523 	    KM_SLEEP);
524 
525 	for (int c = 0; c < rvd->vdev_children; c++) {
526 		vdev_t *tvd = rvd->vdev_child[c];
527 		metaslab_group_t *mg = tvd->vdev_mg;
528 
529 		/*
530 		 * Skip any holes, uninitialized top-levels, or
531 		 * vdevs that are not in this metalab class.
532 		 */
533 		if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
534 		    mg->mg_class != mc) {
535 			continue;
536 		}
537 
538 		for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
539 			mc_hist[i] += mg->mg_histogram[i];
540 	}
541 
542 	for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
543 		VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
544 
545 	kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
546 }
547 
548 /*
549  * Calculate the metaslab class's fragmentation metric. The metric
550  * is weighted based on the space contribution of each metaslab group.
551  * The return value will be a number between 0 and 100 (inclusive), or
552  * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
553  * zfs_frag_table for more information about the metric.
554  */
555 uint64_t
556 metaslab_class_fragmentation(metaslab_class_t *mc)
557 {
558 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
559 	uint64_t fragmentation = 0;
560 
561 	spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
562 
563 	for (int c = 0; c < rvd->vdev_children; c++) {
564 		vdev_t *tvd = rvd->vdev_child[c];
565 		metaslab_group_t *mg = tvd->vdev_mg;
566 
567 		/*
568 		 * Skip any holes, uninitialized top-levels,
569 		 * or vdevs that are not in this metalab class.
570 		 */
571 		if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
572 		    mg->mg_class != mc) {
573 			continue;
574 		}
575 
576 		/*
577 		 * If a metaslab group does not contain a fragmentation
578 		 * metric then just bail out.
579 		 */
580 		if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
581 			spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
582 			return (ZFS_FRAG_INVALID);
583 		}
584 
585 		/*
586 		 * Determine how much this metaslab_group is contributing
587 		 * to the overall pool fragmentation metric.
588 		 */
589 		fragmentation += mg->mg_fragmentation *
590 		    metaslab_group_get_space(mg);
591 	}
592 	fragmentation /= metaslab_class_get_space(mc);
593 
594 	ASSERT3U(fragmentation, <=, 100);
595 	spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
596 	return (fragmentation);
597 }
598 
599 /*
600  * Calculate the amount of expandable space that is available in
601  * this metaslab class. If a device is expanded then its expandable
602  * space will be the amount of allocatable space that is currently not
603  * part of this metaslab class.
604  */
605 uint64_t
606 metaslab_class_expandable_space(metaslab_class_t *mc)
607 {
608 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
609 	uint64_t space = 0;
610 
611 	spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
612 	for (int c = 0; c < rvd->vdev_children; c++) {
613 		vdev_t *tvd = rvd->vdev_child[c];
614 		metaslab_group_t *mg = tvd->vdev_mg;
615 
616 		if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
617 		    mg->mg_class != mc) {
618 			continue;
619 		}
620 
621 		/*
622 		 * Calculate if we have enough space to add additional
623 		 * metaslabs. We report the expandable space in terms
624 		 * of the metaslab size since that's the unit of expansion.
625 		 */
626 		space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
627 		    1ULL << tvd->vdev_ms_shift);
628 	}
629 	spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
630 	return (space);
631 }
632 
633 void
634 metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg)
635 {
636 	multilist_t *ml = mc->mc_metaslab_txg_list;
637 	for (int i = 0; i < multilist_get_num_sublists(ml); i++) {
638 		multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
639 		metaslab_t *msp = multilist_sublist_head(mls);
640 		multilist_sublist_unlock(mls);
641 		while (msp != NULL) {
642 			mutex_enter(&msp->ms_lock);
643 
644 			/*
645 			 * If the metaslab has been removed from the list
646 			 * (which could happen if we were at the memory limit
647 			 * and it was evicted during this loop), then we can't
648 			 * proceed and we should restart the sublist.
649 			 */
650 			if (!multilist_link_active(&msp->ms_class_txg_node)) {
651 				mutex_exit(&msp->ms_lock);
652 				i--;
653 				break;
654 			}
655 			mls = multilist_sublist_lock(ml, i);
656 			metaslab_t *next_msp = multilist_sublist_next(mls, msp);
657 			multilist_sublist_unlock(mls);
658 			if (txg >
659 			    msp->ms_selected_txg + metaslab_unload_delay &&
660 			    gethrtime() > msp->ms_selected_time +
661 			    (uint64_t)MSEC2NSEC(metaslab_unload_delay_ms)) {
662 				metaslab_evict(msp, txg);
663 			} else {
664 				/*
665 				 * Once we've hit a metaslab selected too
666 				 * recently to evict, we're done evicting for
667 				 * now.
668 				 */
669 				mutex_exit(&msp->ms_lock);
670 				break;
671 			}
672 			mutex_exit(&msp->ms_lock);
673 			msp = next_msp;
674 		}
675 	}
676 }
677 
678 static int
679 metaslab_compare(const void *x1, const void *x2)
680 {
681 	const metaslab_t *m1 = (const metaslab_t *)x1;
682 	const metaslab_t *m2 = (const metaslab_t *)x2;
683 
684 	int sort1 = 0;
685 	int sort2 = 0;
686 	if (m1->ms_allocator != -1 && m1->ms_primary)
687 		sort1 = 1;
688 	else if (m1->ms_allocator != -1 && !m1->ms_primary)
689 		sort1 = 2;
690 	if (m2->ms_allocator != -1 && m2->ms_primary)
691 		sort2 = 1;
692 	else if (m2->ms_allocator != -1 && !m2->ms_primary)
693 		sort2 = 2;
694 
695 	/*
696 	 * Sort inactive metaslabs first, then primaries, then secondaries. When
697 	 * selecting a metaslab to allocate from, an allocator first tries its
698 	 * primary, then secondary active metaslab. If it doesn't have active
699 	 * metaslabs, or can't allocate from them, it searches for an inactive
700 	 * metaslab to activate. If it can't find a suitable one, it will steal
701 	 * a primary or secondary metaslab from another allocator.
702 	 */
703 	if (sort1 < sort2)
704 		return (-1);
705 	if (sort1 > sort2)
706 		return (1);
707 
708 	int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight);
709 	if (likely(cmp))
710 		return (cmp);
711 
712 	IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
713 
714 	return (TREE_CMP(m1->ms_start, m2->ms_start));
715 }
716 
717 /*
718  * ==========================================================================
719  * Metaslab groups
720  * ==========================================================================
721  */
722 /*
723  * Update the allocatable flag and the metaslab group's capacity.
724  * The allocatable flag is set to true if the capacity is below
725  * the zfs_mg_noalloc_threshold or has a fragmentation value that is
726  * greater than zfs_mg_fragmentation_threshold. If a metaslab group
727  * transitions from allocatable to non-allocatable or vice versa then the
728  * metaslab group's class is updated to reflect the transition.
729  */
730 static void
731 metaslab_group_alloc_update(metaslab_group_t *mg)
732 {
733 	vdev_t *vd = mg->mg_vd;
734 	metaslab_class_t *mc = mg->mg_class;
735 	vdev_stat_t *vs = &vd->vdev_stat;
736 	boolean_t was_allocatable;
737 	boolean_t was_initialized;
738 
739 	ASSERT(vd == vd->vdev_top);
740 	ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
741 	    SCL_ALLOC);
742 
743 	mutex_enter(&mg->mg_lock);
744 	was_allocatable = mg->mg_allocatable;
745 	was_initialized = mg->mg_initialized;
746 
747 	mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
748 	    (vs->vs_space + 1);
749 
750 	mutex_enter(&mc->mc_lock);
751 
752 	/*
753 	 * If the metaslab group was just added then it won't
754 	 * have any space until we finish syncing out this txg.
755 	 * At that point we will consider it initialized and available
756 	 * for allocations.  We also don't consider non-activated
757 	 * metaslab groups (e.g. vdevs that are in the middle of being removed)
758 	 * to be initialized, because they can't be used for allocation.
759 	 */
760 	mg->mg_initialized = metaslab_group_initialized(mg);
761 	if (!was_initialized && mg->mg_initialized) {
762 		mc->mc_groups++;
763 	} else if (was_initialized && !mg->mg_initialized) {
764 		ASSERT3U(mc->mc_groups, >, 0);
765 		mc->mc_groups--;
766 	}
767 	if (mg->mg_initialized)
768 		mg->mg_no_free_space = B_FALSE;
769 
770 	/*
771 	 * A metaslab group is considered allocatable if it has plenty
772 	 * of free space or is not heavily fragmented. We only take
773 	 * fragmentation into account if the metaslab group has a valid
774 	 * fragmentation metric (i.e. a value between 0 and 100).
775 	 */
776 	mg->mg_allocatable = (mg->mg_activation_count > 0 &&
777 	    mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
778 	    (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
779 	    mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
780 
781 	/*
782 	 * The mc_alloc_groups maintains a count of the number of
783 	 * groups in this metaslab class that are still above the
784 	 * zfs_mg_noalloc_threshold. This is used by the allocating
785 	 * threads to determine if they should avoid allocations to
786 	 * a given group. The allocator will avoid allocations to a group
787 	 * if that group has reached or is below the zfs_mg_noalloc_threshold
788 	 * and there are still other groups that are above the threshold.
789 	 * When a group transitions from allocatable to non-allocatable or
790 	 * vice versa we update the metaslab class to reflect that change.
791 	 * When the mc_alloc_groups value drops to 0 that means that all
792 	 * groups have reached the zfs_mg_noalloc_threshold making all groups
793 	 * eligible for allocations. This effectively means that all devices
794 	 * are balanced again.
795 	 */
796 	if (was_allocatable && !mg->mg_allocatable)
797 		mc->mc_alloc_groups--;
798 	else if (!was_allocatable && mg->mg_allocatable)
799 		mc->mc_alloc_groups++;
800 	mutex_exit(&mc->mc_lock);
801 
802 	mutex_exit(&mg->mg_lock);
803 }
804 
805 int
806 metaslab_sort_by_flushed(const void *va, const void *vb)
807 {
808 	const metaslab_t *a = va;
809 	const metaslab_t *b = vb;
810 
811 	int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
812 	if (likely(cmp))
813 		return (cmp);
814 
815 	uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
816 	uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
817 	cmp = TREE_CMP(a_vdev_id, b_vdev_id);
818 	if (cmp)
819 		return (cmp);
820 
821 	return (TREE_CMP(a->ms_id, b->ms_id));
822 }
823 
824 metaslab_group_t *
825 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
826 {
827 	metaslab_group_t *mg;
828 
829 	mg = kmem_zalloc(offsetof(metaslab_group_t,
830 	    mg_allocator[allocators]), KM_SLEEP);
831 	mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
832 	mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
833 	cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
834 	avl_create(&mg->mg_metaslab_tree, metaslab_compare,
835 	    sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
836 	mg->mg_vd = vd;
837 	mg->mg_class = mc;
838 	mg->mg_activation_count = 0;
839 	mg->mg_initialized = B_FALSE;
840 	mg->mg_no_free_space = B_TRUE;
841 	mg->mg_allocators = allocators;
842 
843 	for (int i = 0; i < allocators; i++) {
844 		metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
845 		zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth);
846 	}
847 
848 	mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
849 	    maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
850 
851 	return (mg);
852 }
853 
854 void
855 metaslab_group_destroy(metaslab_group_t *mg)
856 {
857 	ASSERT(mg->mg_prev == NULL);
858 	ASSERT(mg->mg_next == NULL);
859 	/*
860 	 * We may have gone below zero with the activation count
861 	 * either because we never activated in the first place or
862 	 * because we're done, and possibly removing the vdev.
863 	 */
864 	ASSERT(mg->mg_activation_count <= 0);
865 
866 	taskq_destroy(mg->mg_taskq);
867 	avl_destroy(&mg->mg_metaslab_tree);
868 	mutex_destroy(&mg->mg_lock);
869 	mutex_destroy(&mg->mg_ms_disabled_lock);
870 	cv_destroy(&mg->mg_ms_disabled_cv);
871 
872 	for (int i = 0; i < mg->mg_allocators; i++) {
873 		metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
874 		zfs_refcount_destroy(&mga->mga_alloc_queue_depth);
875 	}
876 	kmem_free(mg, offsetof(metaslab_group_t,
877 	    mg_allocator[mg->mg_allocators]));
878 }
879 
880 void
881 metaslab_group_activate(metaslab_group_t *mg)
882 {
883 	metaslab_class_t *mc = mg->mg_class;
884 	spa_t *spa = mc->mc_spa;
885 	metaslab_group_t *mgprev, *mgnext;
886 
887 	ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
888 
889 	ASSERT(mg->mg_prev == NULL);
890 	ASSERT(mg->mg_next == NULL);
891 	ASSERT(mg->mg_activation_count <= 0);
892 
893 	if (++mg->mg_activation_count <= 0)
894 		return;
895 
896 	mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
897 	metaslab_group_alloc_update(mg);
898 
899 	if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) {
900 		mg->mg_prev = mg;
901 		mg->mg_next = mg;
902 	} else {
903 		mgnext = mgprev->mg_next;
904 		mg->mg_prev = mgprev;
905 		mg->mg_next = mgnext;
906 		mgprev->mg_next = mg;
907 		mgnext->mg_prev = mg;
908 	}
909 	for (int i = 0; i < spa->spa_alloc_count; i++) {
910 		mc->mc_allocator[i].mca_rotor = mg;
911 		mg = mg->mg_next;
912 	}
913 }
914 
915 /*
916  * Passivate a metaslab group and remove it from the allocation rotor.
917  * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
918  * a metaslab group. This function will momentarily drop spa_config_locks
919  * that are lower than the SCL_ALLOC lock (see comment below).
920  */
921 void
922 metaslab_group_passivate(metaslab_group_t *mg)
923 {
924 	metaslab_class_t *mc = mg->mg_class;
925 	spa_t *spa = mc->mc_spa;
926 	metaslab_group_t *mgprev, *mgnext;
927 	int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
928 
929 	ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
930 	    (SCL_ALLOC | SCL_ZIO));
931 
932 	if (--mg->mg_activation_count != 0) {
933 		for (int i = 0; i < spa->spa_alloc_count; i++)
934 			ASSERT(mc->mc_allocator[i].mca_rotor != mg);
935 		ASSERT(mg->mg_prev == NULL);
936 		ASSERT(mg->mg_next == NULL);
937 		ASSERT(mg->mg_activation_count < 0);
938 		return;
939 	}
940 
941 	/*
942 	 * The spa_config_lock is an array of rwlocks, ordered as
943 	 * follows (from highest to lowest):
944 	 *	SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
945 	 *	SCL_ZIO > SCL_FREE > SCL_VDEV
946 	 * (For more information about the spa_config_lock see spa_misc.c)
947 	 * The higher the lock, the broader its coverage. When we passivate
948 	 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
949 	 * config locks. However, the metaslab group's taskq might be trying
950 	 * to preload metaslabs so we must drop the SCL_ZIO lock and any
951 	 * lower locks to allow the I/O to complete. At a minimum,
952 	 * we continue to hold the SCL_ALLOC lock, which prevents any future
953 	 * allocations from taking place and any changes to the vdev tree.
954 	 */
955 	spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
956 	taskq_wait_outstanding(mg->mg_taskq, 0);
957 	spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
958 	metaslab_group_alloc_update(mg);
959 	for (int i = 0; i < mg->mg_allocators; i++) {
960 		metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
961 		metaslab_t *msp = mga->mga_primary;
962 		if (msp != NULL) {
963 			mutex_enter(&msp->ms_lock);
964 			metaslab_passivate(msp,
965 			    metaslab_weight_from_range_tree(msp));
966 			mutex_exit(&msp->ms_lock);
967 		}
968 		msp = mga->mga_secondary;
969 		if (msp != NULL) {
970 			mutex_enter(&msp->ms_lock);
971 			metaslab_passivate(msp,
972 			    metaslab_weight_from_range_tree(msp));
973 			mutex_exit(&msp->ms_lock);
974 		}
975 	}
976 
977 	mgprev = mg->mg_prev;
978 	mgnext = mg->mg_next;
979 
980 	if (mg == mgnext) {
981 		mgnext = NULL;
982 	} else {
983 		mgprev->mg_next = mgnext;
984 		mgnext->mg_prev = mgprev;
985 	}
986 	for (int i = 0; i < spa->spa_alloc_count; i++) {
987 		if (mc->mc_allocator[i].mca_rotor == mg)
988 			mc->mc_allocator[i].mca_rotor = mgnext;
989 	}
990 
991 	mg->mg_prev = NULL;
992 	mg->mg_next = NULL;
993 }
994 
995 boolean_t
996 metaslab_group_initialized(metaslab_group_t *mg)
997 {
998 	vdev_t *vd = mg->mg_vd;
999 	vdev_stat_t *vs = &vd->vdev_stat;
1000 
1001 	return (vs->vs_space != 0 && mg->mg_activation_count > 0);
1002 }
1003 
1004 uint64_t
1005 metaslab_group_get_space(metaslab_group_t *mg)
1006 {
1007 	return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
1008 }
1009 
1010 void
1011 metaslab_group_histogram_verify(metaslab_group_t *mg)
1012 {
1013 	uint64_t *mg_hist;
1014 	vdev_t *vd = mg->mg_vd;
1015 	uint64_t ashift = vd->vdev_ashift;
1016 	int i;
1017 
1018 	if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
1019 		return;
1020 
1021 	mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
1022 	    KM_SLEEP);
1023 
1024 	ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
1025 	    SPACE_MAP_HISTOGRAM_SIZE + ashift);
1026 
1027 	for (int m = 0; m < vd->vdev_ms_count; m++) {
1028 		metaslab_t *msp = vd->vdev_ms[m];
1029 
1030 		/* skip if not active or not a member */
1031 		if (msp->ms_sm == NULL || msp->ms_group != mg)
1032 			continue;
1033 
1034 		for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
1035 			mg_hist[i + ashift] +=
1036 			    msp->ms_sm->sm_phys->smp_histogram[i];
1037 	}
1038 
1039 	for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
1040 		VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
1041 
1042 	kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
1043 }
1044 
1045 static void
1046 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
1047 {
1048 	metaslab_class_t *mc = mg->mg_class;
1049 	uint64_t ashift = mg->mg_vd->vdev_ashift;
1050 
1051 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1052 	if (msp->ms_sm == NULL)
1053 		return;
1054 
1055 	mutex_enter(&mg->mg_lock);
1056 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1057 		mg->mg_histogram[i + ashift] +=
1058 		    msp->ms_sm->sm_phys->smp_histogram[i];
1059 		mc->mc_histogram[i + ashift] +=
1060 		    msp->ms_sm->sm_phys->smp_histogram[i];
1061 	}
1062 	mutex_exit(&mg->mg_lock);
1063 }
1064 
1065 void
1066 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
1067 {
1068 	metaslab_class_t *mc = mg->mg_class;
1069 	uint64_t ashift = mg->mg_vd->vdev_ashift;
1070 
1071 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1072 	if (msp->ms_sm == NULL)
1073 		return;
1074 
1075 	mutex_enter(&mg->mg_lock);
1076 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1077 		ASSERT3U(mg->mg_histogram[i + ashift], >=,
1078 		    msp->ms_sm->sm_phys->smp_histogram[i]);
1079 		ASSERT3U(mc->mc_histogram[i + ashift], >=,
1080 		    msp->ms_sm->sm_phys->smp_histogram[i]);
1081 
1082 		mg->mg_histogram[i + ashift] -=
1083 		    msp->ms_sm->sm_phys->smp_histogram[i];
1084 		mc->mc_histogram[i + ashift] -=
1085 		    msp->ms_sm->sm_phys->smp_histogram[i];
1086 	}
1087 	mutex_exit(&mg->mg_lock);
1088 }
1089 
1090 static void
1091 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
1092 {
1093 	ASSERT(msp->ms_group == NULL);
1094 	mutex_enter(&mg->mg_lock);
1095 	msp->ms_group = mg;
1096 	msp->ms_weight = 0;
1097 	avl_add(&mg->mg_metaslab_tree, msp);
1098 	mutex_exit(&mg->mg_lock);
1099 
1100 	mutex_enter(&msp->ms_lock);
1101 	metaslab_group_histogram_add(mg, msp);
1102 	mutex_exit(&msp->ms_lock);
1103 }
1104 
1105 static void
1106 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
1107 {
1108 	mutex_enter(&msp->ms_lock);
1109 	metaslab_group_histogram_remove(mg, msp);
1110 	mutex_exit(&msp->ms_lock);
1111 
1112 	mutex_enter(&mg->mg_lock);
1113 	ASSERT(msp->ms_group == mg);
1114 	avl_remove(&mg->mg_metaslab_tree, msp);
1115 
1116 	metaslab_class_t *mc = msp->ms_group->mg_class;
1117 	multilist_sublist_t *mls =
1118 	    multilist_sublist_lock_obj(mc->mc_metaslab_txg_list, msp);
1119 	if (multilist_link_active(&msp->ms_class_txg_node))
1120 		multilist_sublist_remove(mls, msp);
1121 	multilist_sublist_unlock(mls);
1122 
1123 	msp->ms_group = NULL;
1124 	mutex_exit(&mg->mg_lock);
1125 }
1126 
1127 static void
1128 metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1129 {
1130 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1131 	ASSERT(MUTEX_HELD(&mg->mg_lock));
1132 	ASSERT(msp->ms_group == mg);
1133 
1134 	avl_remove(&mg->mg_metaslab_tree, msp);
1135 	msp->ms_weight = weight;
1136 	avl_add(&mg->mg_metaslab_tree, msp);
1137 
1138 }
1139 
1140 static void
1141 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1142 {
1143 	/*
1144 	 * Although in principle the weight can be any value, in
1145 	 * practice we do not use values in the range [1, 511].
1146 	 */
1147 	ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
1148 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1149 
1150 	mutex_enter(&mg->mg_lock);
1151 	metaslab_group_sort_impl(mg, msp, weight);
1152 	mutex_exit(&mg->mg_lock);
1153 }
1154 
1155 /*
1156  * Calculate the fragmentation for a given metaslab group. We can use
1157  * a simple average here since all metaslabs within the group must have
1158  * the same size. The return value will be a value between 0 and 100
1159  * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
1160  * group have a fragmentation metric.
1161  */
1162 uint64_t
1163 metaslab_group_fragmentation(metaslab_group_t *mg)
1164 {
1165 	vdev_t *vd = mg->mg_vd;
1166 	uint64_t fragmentation = 0;
1167 	uint64_t valid_ms = 0;
1168 
1169 	for (int m = 0; m < vd->vdev_ms_count; m++) {
1170 		metaslab_t *msp = vd->vdev_ms[m];
1171 
1172 		if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
1173 			continue;
1174 		if (msp->ms_group != mg)
1175 			continue;
1176 
1177 		valid_ms++;
1178 		fragmentation += msp->ms_fragmentation;
1179 	}
1180 
1181 	if (valid_ms <= mg->mg_vd->vdev_ms_count / 2)
1182 		return (ZFS_FRAG_INVALID);
1183 
1184 	fragmentation /= valid_ms;
1185 	ASSERT3U(fragmentation, <=, 100);
1186 	return (fragmentation);
1187 }
1188 
1189 /*
1190  * Determine if a given metaslab group should skip allocations. A metaslab
1191  * group should avoid allocations if its free capacity is less than the
1192  * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
1193  * zfs_mg_fragmentation_threshold and there is at least one metaslab group
1194  * that can still handle allocations. If the allocation throttle is enabled
1195  * then we skip allocations to devices that have reached their maximum
1196  * allocation queue depth unless the selected metaslab group is the only
1197  * eligible group remaining.
1198  */
1199 static boolean_t
1200 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
1201     uint64_t psize, int allocator, int d)
1202 {
1203 	spa_t *spa = mg->mg_vd->vdev_spa;
1204 	metaslab_class_t *mc = mg->mg_class;
1205 
1206 	/*
1207 	 * We can only consider skipping this metaslab group if it's
1208 	 * in the normal metaslab class and there are other metaslab
1209 	 * groups to select from. Otherwise, we always consider it eligible
1210 	 * for allocations.
1211 	 */
1212 	if ((mc != spa_normal_class(spa) &&
1213 	    mc != spa_special_class(spa) &&
1214 	    mc != spa_dedup_class(spa)) ||
1215 	    mc->mc_groups <= 1)
1216 		return (B_TRUE);
1217 
1218 	/*
1219 	 * If the metaslab group's mg_allocatable flag is set (see comments
1220 	 * in metaslab_group_alloc_update() for more information) and
1221 	 * the allocation throttle is disabled then allow allocations to this
1222 	 * device. However, if the allocation throttle is enabled then
1223 	 * check if we have reached our allocation limit (mga_alloc_queue_depth)
1224 	 * to determine if we should allow allocations to this metaslab group.
1225 	 * If all metaslab groups are no longer considered allocatable
1226 	 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1227 	 * gang block size then we allow allocations on this metaslab group
1228 	 * regardless of the mg_allocatable or throttle settings.
1229 	 */
1230 	if (mg->mg_allocatable) {
1231 		metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
1232 		int64_t qdepth;
1233 		uint64_t qmax = mga->mga_cur_max_alloc_queue_depth;
1234 
1235 		if (!mc->mc_alloc_throttle_enabled)
1236 			return (B_TRUE);
1237 
1238 		/*
1239 		 * If this metaslab group does not have any free space, then
1240 		 * there is no point in looking further.
1241 		 */
1242 		if (mg->mg_no_free_space)
1243 			return (B_FALSE);
1244 
1245 		/*
1246 		 * Relax allocation throttling for ditto blocks.  Due to
1247 		 * random imbalances in allocation it tends to push copies
1248 		 * to one vdev, that looks a bit better at the moment.
1249 		 */
1250 		qmax = qmax * (4 + d) / 4;
1251 
1252 		qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth);
1253 
1254 		/*
1255 		 * If this metaslab group is below its qmax or it's
1256 		 * the only allocatable metasable group, then attempt
1257 		 * to allocate from it.
1258 		 */
1259 		if (qdepth < qmax || mc->mc_alloc_groups == 1)
1260 			return (B_TRUE);
1261 		ASSERT3U(mc->mc_alloc_groups, >, 1);
1262 
1263 		/*
1264 		 * Since this metaslab group is at or over its qmax, we
1265 		 * need to determine if there are metaslab groups after this
1266 		 * one that might be able to handle this allocation. This is
1267 		 * racy since we can't hold the locks for all metaslab
1268 		 * groups at the same time when we make this check.
1269 		 */
1270 		for (metaslab_group_t *mgp = mg->mg_next;
1271 		    mgp != rotor; mgp = mgp->mg_next) {
1272 			metaslab_group_allocator_t *mgap =
1273 			    &mgp->mg_allocator[allocator];
1274 			qmax = mgap->mga_cur_max_alloc_queue_depth;
1275 			qmax = qmax * (4 + d) / 4;
1276 			qdepth =
1277 			    zfs_refcount_count(&mgap->mga_alloc_queue_depth);
1278 
1279 			/*
1280 			 * If there is another metaslab group that
1281 			 * might be able to handle the allocation, then
1282 			 * we return false so that we skip this group.
1283 			 */
1284 			if (qdepth < qmax && !mgp->mg_no_free_space)
1285 				return (B_FALSE);
1286 		}
1287 
1288 		/*
1289 		 * We didn't find another group to handle the allocation
1290 		 * so we can't skip this metaslab group even though
1291 		 * we are at or over our qmax.
1292 		 */
1293 		return (B_TRUE);
1294 
1295 	} else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
1296 		return (B_TRUE);
1297 	}
1298 	return (B_FALSE);
1299 }
1300 
1301 /*
1302  * ==========================================================================
1303  * Range tree callbacks
1304  * ==========================================================================
1305  */
1306 
1307 /*
1308  * Comparison function for the private size-ordered tree using 32-bit
1309  * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1310  */
1311 static int
1312 metaslab_rangesize32_compare(const void *x1, const void *x2)
1313 {
1314 	const range_seg32_t *r1 = x1;
1315 	const range_seg32_t *r2 = x2;
1316 
1317 	uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1318 	uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1319 
1320 	int cmp = TREE_CMP(rs_size1, rs_size2);
1321 	if (likely(cmp))
1322 		return (cmp);
1323 
1324 	return (TREE_CMP(r1->rs_start, r2->rs_start));
1325 }
1326 
1327 /*
1328  * Comparison function for the private size-ordered tree using 64-bit
1329  * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1330  */
1331 static int
1332 metaslab_rangesize64_compare(const void *x1, const void *x2)
1333 {
1334 	const range_seg64_t *r1 = x1;
1335 	const range_seg64_t *r2 = x2;
1336 
1337 	uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1338 	uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1339 
1340 	int cmp = TREE_CMP(rs_size1, rs_size2);
1341 	if (likely(cmp))
1342 		return (cmp);
1343 
1344 	return (TREE_CMP(r1->rs_start, r2->rs_start));
1345 }
1346 typedef struct metaslab_rt_arg {
1347 	zfs_btree_t *mra_bt;
1348 	uint32_t mra_floor_shift;
1349 } metaslab_rt_arg_t;
1350 
1351 struct mssa_arg {
1352 	range_tree_t *rt;
1353 	metaslab_rt_arg_t *mra;
1354 };
1355 
1356 static void
1357 metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size)
1358 {
1359 	struct mssa_arg *mssap = arg;
1360 	range_tree_t *rt = mssap->rt;
1361 	metaslab_rt_arg_t *mrap = mssap->mra;
1362 	range_seg_max_t seg = {0};
1363 	rs_set_start(&seg, rt, start);
1364 	rs_set_end(&seg, rt, start + size);
1365 	metaslab_rt_add(rt, &seg, mrap);
1366 }
1367 
1368 static void
1369 metaslab_size_tree_full_load(range_tree_t *rt)
1370 {
1371 	metaslab_rt_arg_t *mrap = rt->rt_arg;
1372 	METASLABSTAT_BUMP(metaslabstat_reload_tree);
1373 	ASSERT0(zfs_btree_numnodes(mrap->mra_bt));
1374 	mrap->mra_floor_shift = 0;
1375 	struct mssa_arg arg = {0};
1376 	arg.rt = rt;
1377 	arg.mra = mrap;
1378 	range_tree_walk(rt, metaslab_size_sorted_add, &arg);
1379 }
1380 
1381 /*
1382  * Create any block allocator specific components. The current allocators
1383  * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
1384  */
1385 /* ARGSUSED */
1386 static void
1387 metaslab_rt_create(range_tree_t *rt, void *arg)
1388 {
1389 	metaslab_rt_arg_t *mrap = arg;
1390 	zfs_btree_t *size_tree = mrap->mra_bt;
1391 
1392 	size_t size;
1393 	int (*compare) (const void *, const void *);
1394 	switch (rt->rt_type) {
1395 	case RANGE_SEG32:
1396 		size = sizeof (range_seg32_t);
1397 		compare = metaslab_rangesize32_compare;
1398 		break;
1399 	case RANGE_SEG64:
1400 		size = sizeof (range_seg64_t);
1401 		compare = metaslab_rangesize64_compare;
1402 		break;
1403 	default:
1404 		panic("Invalid range seg type %d", rt->rt_type);
1405 	}
1406 	zfs_btree_create(size_tree, compare, size);
1407 	mrap->mra_floor_shift = metaslab_by_size_min_shift;
1408 }
1409 
1410 /* ARGSUSED */
1411 static void
1412 metaslab_rt_destroy(range_tree_t *rt, void *arg)
1413 {
1414 	metaslab_rt_arg_t *mrap = arg;
1415 	zfs_btree_t *size_tree = mrap->mra_bt;
1416 
1417 	zfs_btree_destroy(size_tree);
1418 	kmem_free(mrap, sizeof (*mrap));
1419 }
1420 
1421 /* ARGSUSED */
1422 static void
1423 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
1424 {
1425 	metaslab_rt_arg_t *mrap = arg;
1426 	zfs_btree_t *size_tree = mrap->mra_bt;
1427 
1428 	if (rs_get_end(rs, rt) - rs_get_start(rs, rt) <
1429 	    (1 << mrap->mra_floor_shift))
1430 		return;
1431 
1432 	zfs_btree_add(size_tree, rs);
1433 }
1434 
1435 /* ARGSUSED */
1436 static void
1437 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
1438 {
1439 	metaslab_rt_arg_t *mrap = arg;
1440 	zfs_btree_t *size_tree = mrap->mra_bt;
1441 
1442 	if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1 <<
1443 	    mrap->mra_floor_shift))
1444 		return;
1445 
1446 	zfs_btree_remove(size_tree, rs);
1447 }
1448 
1449 /* ARGSUSED */
1450 static void
1451 metaslab_rt_vacate(range_tree_t *rt, void *arg)
1452 {
1453 	metaslab_rt_arg_t *mrap = arg;
1454 	zfs_btree_t *size_tree = mrap->mra_bt;
1455 	zfs_btree_clear(size_tree);
1456 	zfs_btree_destroy(size_tree);
1457 
1458 	metaslab_rt_create(rt, arg);
1459 }
1460 
1461 static range_tree_ops_t metaslab_rt_ops = {
1462 	.rtop_create = metaslab_rt_create,
1463 	.rtop_destroy = metaslab_rt_destroy,
1464 	.rtop_add = metaslab_rt_add,
1465 	.rtop_remove = metaslab_rt_remove,
1466 	.rtop_vacate = metaslab_rt_vacate
1467 };
1468 
1469 /*
1470  * ==========================================================================
1471  * Common allocator routines
1472  * ==========================================================================
1473  */
1474 
1475 /*
1476  * Return the maximum contiguous segment within the metaslab.
1477  */
1478 uint64_t
1479 metaslab_largest_allocatable(metaslab_t *msp)
1480 {
1481 	zfs_btree_t *t = &msp->ms_allocatable_by_size;
1482 	range_seg_t *rs;
1483 
1484 	if (t == NULL)
1485 		return (0);
1486 	if (zfs_btree_numnodes(t) == 0)
1487 		metaslab_size_tree_full_load(msp->ms_allocatable);
1488 
1489 	rs = zfs_btree_last(t, NULL);
1490 	if (rs == NULL)
1491 		return (0);
1492 
1493 	return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs,
1494 	    msp->ms_allocatable));
1495 }
1496 
1497 /*
1498  * Return the maximum contiguous segment within the unflushed frees of this
1499  * metaslab.
1500  */
1501 static uint64_t
1502 metaslab_largest_unflushed_free(metaslab_t *msp)
1503 {
1504 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1505 
1506 	if (msp->ms_unflushed_frees == NULL)
1507 		return (0);
1508 
1509 	if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0)
1510 		metaslab_size_tree_full_load(msp->ms_unflushed_frees);
1511 	range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size,
1512 	    NULL);
1513 	if (rs == NULL)
1514 		return (0);
1515 
1516 	/*
1517 	 * When a range is freed from the metaslab, that range is added to
1518 	 * both the unflushed frees and the deferred frees. While the block
1519 	 * will eventually be usable, if the metaslab were loaded the range
1520 	 * would not be added to the ms_allocatable tree until TXG_DEFER_SIZE
1521 	 * txgs had passed.  As a result, when attempting to estimate an upper
1522 	 * bound for the largest currently-usable free segment in the
1523 	 * metaslab, we need to not consider any ranges currently in the defer
1524 	 * trees. This algorithm approximates the largest available chunk in
1525 	 * the largest range in the unflushed_frees tree by taking the first
1526 	 * chunk.  While this may be a poor estimate, it should only remain so
1527 	 * briefly and should eventually self-correct as frees are no longer
1528 	 * deferred. Similar logic applies to the ms_freed tree. See
1529 	 * metaslab_load() for more details.
1530 	 *
1531 	 * There are two primary sources of inaccuracy in this estimate. Both
1532 	 * are tolerated for performance reasons. The first source is that we
1533 	 * only check the largest segment for overlaps. Smaller segments may
1534 	 * have more favorable overlaps with the other trees, resulting in
1535 	 * larger usable chunks.  Second, we only look at the first chunk in
1536 	 * the largest segment; there may be other usable chunks in the
1537 	 * largest segment, but we ignore them.
1538 	 */
1539 	uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees);
1540 	uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart;
1541 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1542 		uint64_t start = 0;
1543 		uint64_t size = 0;
1544 		boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart,
1545 		    rsize, &start, &size);
1546 		if (found) {
1547 			if (rstart == start)
1548 				return (0);
1549 			rsize = start - rstart;
1550 		}
1551 	}
1552 
1553 	uint64_t start = 0;
1554 	uint64_t size = 0;
1555 	boolean_t found = range_tree_find_in(msp->ms_freed, rstart,
1556 	    rsize, &start, &size);
1557 	if (found)
1558 		rsize = start - rstart;
1559 
1560 	return (rsize);
1561 }
1562 
1563 static range_seg_t *
1564 metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start,
1565     uint64_t size, zfs_btree_index_t *where)
1566 {
1567 	range_seg_t *rs;
1568 	range_seg_max_t rsearch;
1569 
1570 	rs_set_start(&rsearch, rt, start);
1571 	rs_set_end(&rsearch, rt, start + size);
1572 
1573 	rs = zfs_btree_find(t, &rsearch, where);
1574 	if (rs == NULL) {
1575 		rs = zfs_btree_next(t, where, where);
1576 	}
1577 
1578 	return (rs);
1579 }
1580 
1581 #if defined(WITH_DF_BLOCK_ALLOCATOR) || \
1582     defined(WITH_CF_BLOCK_ALLOCATOR)
1583 
1584 /*
1585  * This is a helper function that can be used by the allocator to find a
1586  * suitable block to allocate. This will search the specified B-tree looking
1587  * for a block that matches the specified criteria.
1588  */
1589 static uint64_t
1590 metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size,
1591     uint64_t max_search)
1592 {
1593 	if (*cursor == 0)
1594 		*cursor = rt->rt_start;
1595 	zfs_btree_t *bt = &rt->rt_root;
1596 	zfs_btree_index_t where;
1597 	range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where);
1598 	uint64_t first_found;
1599 	int count_searched = 0;
1600 
1601 	if (rs != NULL)
1602 		first_found = rs_get_start(rs, rt);
1603 
1604 	while (rs != NULL && (rs_get_start(rs, rt) - first_found <=
1605 	    max_search || count_searched < metaslab_min_search_count)) {
1606 		uint64_t offset = rs_get_start(rs, rt);
1607 		if (offset + size <= rs_get_end(rs, rt)) {
1608 			*cursor = offset + size;
1609 			return (offset);
1610 		}
1611 		rs = zfs_btree_next(bt, &where, &where);
1612 		count_searched++;
1613 	}
1614 
1615 	*cursor = 0;
1616 	return (-1ULL);
1617 }
1618 #endif /* WITH_DF/CF_BLOCK_ALLOCATOR */
1619 
1620 #if defined(WITH_DF_BLOCK_ALLOCATOR)
1621 /*
1622  * ==========================================================================
1623  * Dynamic Fit (df) block allocator
1624  *
1625  * Search for a free chunk of at least this size, starting from the last
1626  * offset (for this alignment of block) looking for up to
1627  * metaslab_df_max_search bytes (16MB).  If a large enough free chunk is not
1628  * found within 16MB, then return a free chunk of exactly the requested size (or
1629  * larger).
1630  *
1631  * If it seems like searching from the last offset will be unproductive, skip
1632  * that and just return a free chunk of exactly the requested size (or larger).
1633  * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct.  This
1634  * mechanism is probably not very useful and may be removed in the future.
1635  *
1636  * The behavior when not searching can be changed to return the largest free
1637  * chunk, instead of a free chunk of exactly the requested size, by setting
1638  * metaslab_df_use_largest_segment.
1639  * ==========================================================================
1640  */
1641 static uint64_t
1642 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1643 {
1644 	/*
1645 	 * Find the largest power of 2 block size that evenly divides the
1646 	 * requested size. This is used to try to allocate blocks with similar
1647 	 * alignment from the same area of the metaslab (i.e. same cursor
1648 	 * bucket) but it does not guarantee that other allocations sizes
1649 	 * may exist in the same region.
1650 	 */
1651 	uint64_t align = size & -size;
1652 	uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1653 	range_tree_t *rt = msp->ms_allocatable;
1654 	int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1655 	uint64_t offset;
1656 
1657 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1658 
1659 	/*
1660 	 * If we're running low on space, find a segment based on size,
1661 	 * rather than iterating based on offset.
1662 	 */
1663 	if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold ||
1664 	    free_pct < metaslab_df_free_pct) {
1665 		offset = -1;
1666 	} else {
1667 		offset = metaslab_block_picker(rt,
1668 		    cursor, size, metaslab_df_max_search);
1669 	}
1670 
1671 	if (offset == -1) {
1672 		range_seg_t *rs;
1673 		if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0)
1674 			metaslab_size_tree_full_load(msp->ms_allocatable);
1675 
1676 		if (metaslab_df_use_largest_segment) {
1677 			/* use largest free segment */
1678 			rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL);
1679 		} else {
1680 			zfs_btree_index_t where;
1681 			/* use segment of this size, or next largest */
1682 			rs = metaslab_block_find(&msp->ms_allocatable_by_size,
1683 			    rt, msp->ms_start, size, &where);
1684 		}
1685 		if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs,
1686 		    rt)) {
1687 			offset = rs_get_start(rs, rt);
1688 			*cursor = offset + size;
1689 		}
1690 	}
1691 
1692 	return (offset);
1693 }
1694 
1695 static metaslab_ops_t metaslab_df_ops = {
1696 	metaslab_df_alloc
1697 };
1698 
1699 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1700 #endif /* WITH_DF_BLOCK_ALLOCATOR */
1701 
1702 #if defined(WITH_CF_BLOCK_ALLOCATOR)
1703 /*
1704  * ==========================================================================
1705  * Cursor fit block allocator -
1706  * Select the largest region in the metaslab, set the cursor to the beginning
1707  * of the range and the cursor_end to the end of the range. As allocations
1708  * are made advance the cursor. Continue allocating from the cursor until
1709  * the range is exhausted and then find a new range.
1710  * ==========================================================================
1711  */
1712 static uint64_t
1713 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1714 {
1715 	range_tree_t *rt = msp->ms_allocatable;
1716 	zfs_btree_t *t = &msp->ms_allocatable_by_size;
1717 	uint64_t *cursor = &msp->ms_lbas[0];
1718 	uint64_t *cursor_end = &msp->ms_lbas[1];
1719 	uint64_t offset = 0;
1720 
1721 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1722 
1723 	ASSERT3U(*cursor_end, >=, *cursor);
1724 
1725 	if ((*cursor + size) > *cursor_end) {
1726 		range_seg_t *rs;
1727 
1728 		if (zfs_btree_numnodes(t) == 0)
1729 			metaslab_size_tree_full_load(msp->ms_allocatable);
1730 		rs = zfs_btree_last(t, NULL);
1731 		if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) <
1732 		    size)
1733 			return (-1ULL);
1734 
1735 		*cursor = rs_get_start(rs, rt);
1736 		*cursor_end = rs_get_end(rs, rt);
1737 	}
1738 
1739 	offset = *cursor;
1740 	*cursor += size;
1741 
1742 	return (offset);
1743 }
1744 
1745 static metaslab_ops_t metaslab_cf_ops = {
1746 	metaslab_cf_alloc
1747 };
1748 
1749 metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops;
1750 #endif /* WITH_CF_BLOCK_ALLOCATOR */
1751 
1752 #if defined(WITH_NDF_BLOCK_ALLOCATOR)
1753 /*
1754  * ==========================================================================
1755  * New dynamic fit allocator -
1756  * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1757  * contiguous blocks. If no region is found then just use the largest segment
1758  * that remains.
1759  * ==========================================================================
1760  */
1761 
1762 /*
1763  * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1764  * to request from the allocator.
1765  */
1766 uint64_t metaslab_ndf_clump_shift = 4;
1767 
1768 static uint64_t
1769 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1770 {
1771 	zfs_btree_t *t = &msp->ms_allocatable->rt_root;
1772 	range_tree_t *rt = msp->ms_allocatable;
1773 	zfs_btree_index_t where;
1774 	range_seg_t *rs;
1775 	range_seg_max_t rsearch;
1776 	uint64_t hbit = highbit64(size);
1777 	uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1778 	uint64_t max_size = metaslab_largest_allocatable(msp);
1779 
1780 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1781 
1782 	if (max_size < size)
1783 		return (-1ULL);
1784 
1785 	rs_set_start(&rsearch, rt, *cursor);
1786 	rs_set_end(&rsearch, rt, *cursor + size);
1787 
1788 	rs = zfs_btree_find(t, &rsearch, &where);
1789 	if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) {
1790 		t = &msp->ms_allocatable_by_size;
1791 
1792 		rs_set_start(&rsearch, rt, 0);
1793 		rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit +
1794 		    metaslab_ndf_clump_shift)));
1795 
1796 		rs = zfs_btree_find(t, &rsearch, &where);
1797 		if (rs == NULL)
1798 			rs = zfs_btree_next(t, &where, &where);
1799 		ASSERT(rs != NULL);
1800 	}
1801 
1802 	if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) {
1803 		*cursor = rs_get_start(rs, rt) + size;
1804 		return (rs_get_start(rs, rt));
1805 	}
1806 	return (-1ULL);
1807 }
1808 
1809 static metaslab_ops_t metaslab_ndf_ops = {
1810 	metaslab_ndf_alloc
1811 };
1812 
1813 metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
1814 #endif /* WITH_NDF_BLOCK_ALLOCATOR */
1815 
1816 
1817 /*
1818  * ==========================================================================
1819  * Metaslabs
1820  * ==========================================================================
1821  */
1822 
1823 /*
1824  * Wait for any in-progress metaslab loads to complete.
1825  */
1826 static void
1827 metaslab_load_wait(metaslab_t *msp)
1828 {
1829 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1830 
1831 	while (msp->ms_loading) {
1832 		ASSERT(!msp->ms_loaded);
1833 		cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1834 	}
1835 }
1836 
1837 /*
1838  * Wait for any in-progress flushing to complete.
1839  */
1840 static void
1841 metaslab_flush_wait(metaslab_t *msp)
1842 {
1843 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1844 
1845 	while (msp->ms_flushing)
1846 		cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
1847 }
1848 
1849 static unsigned int
1850 metaslab_idx_func(multilist_t *ml, void *arg)
1851 {
1852 	metaslab_t *msp = arg;
1853 	return (msp->ms_id % multilist_get_num_sublists(ml));
1854 }
1855 
1856 uint64_t
1857 metaslab_allocated_space(metaslab_t *msp)
1858 {
1859 	return (msp->ms_allocated_space);
1860 }
1861 
1862 /*
1863  * Verify that the space accounting on disk matches the in-core range_trees.
1864  */
1865 static void
1866 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
1867 {
1868 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1869 	uint64_t allocating = 0;
1870 	uint64_t sm_free_space, msp_free_space;
1871 
1872 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1873 	ASSERT(!msp->ms_condensing);
1874 
1875 	if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
1876 		return;
1877 
1878 	/*
1879 	 * We can only verify the metaslab space when we're called
1880 	 * from syncing context with a loaded metaslab that has an
1881 	 * allocated space map. Calling this in non-syncing context
1882 	 * does not provide a consistent view of the metaslab since
1883 	 * we're performing allocations in the future.
1884 	 */
1885 	if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
1886 	    !msp->ms_loaded)
1887 		return;
1888 
1889 	/*
1890 	 * Even though the smp_alloc field can get negative,
1891 	 * when it comes to a metaslab's space map, that should
1892 	 * never be the case.
1893 	 */
1894 	ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
1895 
1896 	ASSERT3U(space_map_allocated(msp->ms_sm), >=,
1897 	    range_tree_space(msp->ms_unflushed_frees));
1898 
1899 	ASSERT3U(metaslab_allocated_space(msp), ==,
1900 	    space_map_allocated(msp->ms_sm) +
1901 	    range_tree_space(msp->ms_unflushed_allocs) -
1902 	    range_tree_space(msp->ms_unflushed_frees));
1903 
1904 	sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
1905 
1906 	/*
1907 	 * Account for future allocations since we would have
1908 	 * already deducted that space from the ms_allocatable.
1909 	 */
1910 	for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
1911 		allocating +=
1912 		    range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
1913 	}
1914 	ASSERT3U(allocating + msp->ms_allocated_this_txg, ==,
1915 	    msp->ms_allocating_total);
1916 
1917 	ASSERT3U(msp->ms_deferspace, ==,
1918 	    range_tree_space(msp->ms_defer[0]) +
1919 	    range_tree_space(msp->ms_defer[1]));
1920 
1921 	msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
1922 	    msp->ms_deferspace + range_tree_space(msp->ms_freed);
1923 
1924 	VERIFY3U(sm_free_space, ==, msp_free_space);
1925 }
1926 
1927 static void
1928 metaslab_aux_histograms_clear(metaslab_t *msp)
1929 {
1930 	/*
1931 	 * Auxiliary histograms are only cleared when resetting them,
1932 	 * which can only happen while the metaslab is loaded.
1933 	 */
1934 	ASSERT(msp->ms_loaded);
1935 
1936 	bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
1937 	for (int t = 0; t < TXG_DEFER_SIZE; t++)
1938 		bzero(msp->ms_deferhist[t], sizeof (msp->ms_deferhist[t]));
1939 }
1940 
1941 static void
1942 metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
1943     range_tree_t *rt)
1944 {
1945 	/*
1946 	 * This is modeled after space_map_histogram_add(), so refer to that
1947 	 * function for implementation details. We want this to work like
1948 	 * the space map histogram, and not the range tree histogram, as we
1949 	 * are essentially constructing a delta that will be later subtracted
1950 	 * from the space map histogram.
1951 	 */
1952 	int idx = 0;
1953 	for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
1954 		ASSERT3U(i, >=, idx + shift);
1955 		histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
1956 
1957 		if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
1958 			ASSERT3U(idx + shift, ==, i);
1959 			idx++;
1960 			ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
1961 		}
1962 	}
1963 }
1964 
1965 /*
1966  * Called at every sync pass that the metaslab gets synced.
1967  *
1968  * The reason is that we want our auxiliary histograms to be updated
1969  * wherever the metaslab's space map histogram is updated. This way
1970  * we stay consistent on which parts of the metaslab space map's
1971  * histogram are currently not available for allocations (e.g because
1972  * they are in the defer, freed, and freeing trees).
1973  */
1974 static void
1975 metaslab_aux_histograms_update(metaslab_t *msp)
1976 {
1977 	space_map_t *sm = msp->ms_sm;
1978 	ASSERT(sm != NULL);
1979 
1980 	/*
1981 	 * This is similar to the metaslab's space map histogram updates
1982 	 * that take place in metaslab_sync(). The only difference is that
1983 	 * we only care about segments that haven't made it into the
1984 	 * ms_allocatable tree yet.
1985 	 */
1986 	if (msp->ms_loaded) {
1987 		metaslab_aux_histograms_clear(msp);
1988 
1989 		metaslab_aux_histogram_add(msp->ms_synchist,
1990 		    sm->sm_shift, msp->ms_freed);
1991 
1992 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1993 			metaslab_aux_histogram_add(msp->ms_deferhist[t],
1994 			    sm->sm_shift, msp->ms_defer[t]);
1995 		}
1996 	}
1997 
1998 	metaslab_aux_histogram_add(msp->ms_synchist,
1999 	    sm->sm_shift, msp->ms_freeing);
2000 }
2001 
2002 /*
2003  * Called every time we are done syncing (writing to) the metaslab,
2004  * i.e. at the end of each sync pass.
2005  * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
2006  */
2007 static void
2008 metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
2009 {
2010 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2011 	space_map_t *sm = msp->ms_sm;
2012 
2013 	if (sm == NULL) {
2014 		/*
2015 		 * We came here from metaslab_init() when creating/opening a
2016 		 * pool, looking at a metaslab that hasn't had any allocations
2017 		 * yet.
2018 		 */
2019 		return;
2020 	}
2021 
2022 	/*
2023 	 * This is similar to the actions that we take for the ms_freed
2024 	 * and ms_defer trees in metaslab_sync_done().
2025 	 */
2026 	uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
2027 	if (defer_allowed) {
2028 		bcopy(msp->ms_synchist, msp->ms_deferhist[hist_index],
2029 		    sizeof (msp->ms_synchist));
2030 	} else {
2031 		bzero(msp->ms_deferhist[hist_index],
2032 		    sizeof (msp->ms_deferhist[hist_index]));
2033 	}
2034 	bzero(msp->ms_synchist, sizeof (msp->ms_synchist));
2035 }
2036 
2037 /*
2038  * Ensure that the metaslab's weight and fragmentation are consistent
2039  * with the contents of the histogram (either the range tree's histogram
2040  * or the space map's depending whether the metaslab is loaded).
2041  */
2042 static void
2043 metaslab_verify_weight_and_frag(metaslab_t *msp)
2044 {
2045 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2046 
2047 	if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
2048 		return;
2049 
2050 	/*
2051 	 * We can end up here from vdev_remove_complete(), in which case we
2052 	 * cannot do these assertions because we hold spa config locks and
2053 	 * thus we are not allowed to read from the DMU.
2054 	 *
2055 	 * We check if the metaslab group has been removed and if that's
2056 	 * the case we return immediately as that would mean that we are
2057 	 * here from the aforementioned code path.
2058 	 */
2059 	if (msp->ms_group == NULL)
2060 		return;
2061 
2062 	/*
2063 	 * Devices being removed always return a weight of 0 and leave
2064 	 * fragmentation and ms_max_size as is - there is nothing for
2065 	 * us to verify here.
2066 	 */
2067 	vdev_t *vd = msp->ms_group->mg_vd;
2068 	if (vd->vdev_removing)
2069 		return;
2070 
2071 	/*
2072 	 * If the metaslab is dirty it probably means that we've done
2073 	 * some allocations or frees that have changed our histograms
2074 	 * and thus the weight.
2075 	 */
2076 	for (int t = 0; t < TXG_SIZE; t++) {
2077 		if (txg_list_member(&vd->vdev_ms_list, msp, t))
2078 			return;
2079 	}
2080 
2081 	/*
2082 	 * This verification checks that our in-memory state is consistent
2083 	 * with what's on disk. If the pool is read-only then there aren't
2084 	 * any changes and we just have the initially-loaded state.
2085 	 */
2086 	if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
2087 		return;
2088 
2089 	/* some extra verification for in-core tree if you can */
2090 	if (msp->ms_loaded) {
2091 		range_tree_stat_verify(msp->ms_allocatable);
2092 		VERIFY(space_map_histogram_verify(msp->ms_sm,
2093 		    msp->ms_allocatable));
2094 	}
2095 
2096 	uint64_t weight = msp->ms_weight;
2097 	uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2098 	boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
2099 	uint64_t frag = msp->ms_fragmentation;
2100 	uint64_t max_segsize = msp->ms_max_size;
2101 
2102 	msp->ms_weight = 0;
2103 	msp->ms_fragmentation = 0;
2104 
2105 	/*
2106 	 * This function is used for verification purposes and thus should
2107 	 * not introduce any side-effects/mutations on the system's state.
2108 	 *
2109 	 * Regardless of whether metaslab_weight() thinks this metaslab
2110 	 * should be active or not, we want to ensure that the actual weight
2111 	 * (and therefore the value of ms_weight) would be the same if it
2112 	 * was to be recalculated at this point.
2113 	 *
2114 	 * In addition we set the nodirty flag so metaslab_weight() does
2115 	 * not dirty the metaslab for future TXGs (e.g. when trying to
2116 	 * force condensing to upgrade the metaslab spacemaps).
2117 	 */
2118 	msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active;
2119 
2120 	VERIFY3U(max_segsize, ==, msp->ms_max_size);
2121 
2122 	/*
2123 	 * If the weight type changed then there is no point in doing
2124 	 * verification. Revert fields to their original values.
2125 	 */
2126 	if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
2127 	    (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
2128 		msp->ms_fragmentation = frag;
2129 		msp->ms_weight = weight;
2130 		return;
2131 	}
2132 
2133 	VERIFY3U(msp->ms_fragmentation, ==, frag);
2134 	VERIFY3U(msp->ms_weight, ==, weight);
2135 }
2136 
2137 /*
2138  * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from
2139  * this class that was used longest ago, and attempt to unload it.  We don't
2140  * want to spend too much time in this loop to prevent performance
2141  * degradation, and we expect that most of the time this operation will
2142  * succeed. Between that and the normal unloading processing during txg sync,
2143  * we expect this to keep the metaslab memory usage under control.
2144  */
2145 static void
2146 metaslab_potentially_evict(metaslab_class_t *mc)
2147 {
2148 #ifdef _KERNEL
2149 	uint64_t allmem = arc_all_memory();
2150 	uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2151 	uint64_t size =	spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
2152 	int tries = 0;
2153 	for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
2154 	    tries < multilist_get_num_sublists(mc->mc_metaslab_txg_list) * 2;
2155 	    tries++) {
2156 		unsigned int idx = multilist_get_random_index(
2157 		    mc->mc_metaslab_txg_list);
2158 		multilist_sublist_t *mls =
2159 		    multilist_sublist_lock(mc->mc_metaslab_txg_list, idx);
2160 		metaslab_t *msp = multilist_sublist_head(mls);
2161 		multilist_sublist_unlock(mls);
2162 		while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 <
2163 		    inuse * size) {
2164 			VERIFY3P(mls, ==, multilist_sublist_lock(
2165 			    mc->mc_metaslab_txg_list, idx));
2166 			ASSERT3U(idx, ==,
2167 			    metaslab_idx_func(mc->mc_metaslab_txg_list, msp));
2168 
2169 			if (!multilist_link_active(&msp->ms_class_txg_node)) {
2170 				multilist_sublist_unlock(mls);
2171 				break;
2172 			}
2173 			metaslab_t *next_msp = multilist_sublist_next(mls, msp);
2174 			multilist_sublist_unlock(mls);
2175 			/*
2176 			 * If the metaslab is currently loading there are two
2177 			 * cases. If it's the metaslab we're evicting, we
2178 			 * can't continue on or we'll panic when we attempt to
2179 			 * recursively lock the mutex. If it's another
2180 			 * metaslab that's loading, it can be safely skipped,
2181 			 * since we know it's very new and therefore not a
2182 			 * good eviction candidate. We check later once the
2183 			 * lock is held that the metaslab is fully loaded
2184 			 * before actually unloading it.
2185 			 */
2186 			if (msp->ms_loading) {
2187 				msp = next_msp;
2188 				inuse =
2189 				    spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2190 				continue;
2191 			}
2192 			/*
2193 			 * We can't unload metaslabs with no spacemap because
2194 			 * they're not ready to be unloaded yet. We can't
2195 			 * unload metaslabs with outstanding allocations
2196 			 * because doing so could cause the metaslab's weight
2197 			 * to decrease while it's unloaded, which violates an
2198 			 * invariant that we use to prevent unnecessary
2199 			 * loading. We also don't unload metaslabs that are
2200 			 * currently active because they are high-weight
2201 			 * metaslabs that are likely to be used in the near
2202 			 * future.
2203 			 */
2204 			mutex_enter(&msp->ms_lock);
2205 			if (msp->ms_allocator == -1 && msp->ms_sm != NULL &&
2206 			    msp->ms_allocating_total == 0) {
2207 				metaslab_unload(msp);
2208 			}
2209 			mutex_exit(&msp->ms_lock);
2210 			msp = next_msp;
2211 			inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2212 		}
2213 	}
2214 #endif
2215 }
2216 
2217 static int
2218 metaslab_load_impl(metaslab_t *msp)
2219 {
2220 	int error = 0;
2221 
2222 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2223 	ASSERT(msp->ms_loading);
2224 	ASSERT(!msp->ms_condensing);
2225 
2226 	/*
2227 	 * We temporarily drop the lock to unblock other operations while we
2228 	 * are reading the space map. Therefore, metaslab_sync() and
2229 	 * metaslab_sync_done() can run at the same time as we do.
2230 	 *
2231 	 * If we are using the log space maps, metaslab_sync() can't write to
2232 	 * the metaslab's space map while we are loading as we only write to
2233 	 * it when we are flushing the metaslab, and that can't happen while
2234 	 * we are loading it.
2235 	 *
2236 	 * If we are not using log space maps though, metaslab_sync() can
2237 	 * append to the space map while we are loading. Therefore we load
2238 	 * only entries that existed when we started the load. Additionally,
2239 	 * metaslab_sync_done() has to wait for the load to complete because
2240 	 * there are potential races like metaslab_load() loading parts of the
2241 	 * space map that are currently being appended by metaslab_sync(). If
2242 	 * we didn't, the ms_allocatable would have entries that
2243 	 * metaslab_sync_done() would try to re-add later.
2244 	 *
2245 	 * That's why before dropping the lock we remember the synced length
2246 	 * of the metaslab and read up to that point of the space map,
2247 	 * ignoring entries appended by metaslab_sync() that happen after we
2248 	 * drop the lock.
2249 	 */
2250 	uint64_t length = msp->ms_synced_length;
2251 	mutex_exit(&msp->ms_lock);
2252 
2253 	hrtime_t load_start = gethrtime();
2254 	metaslab_rt_arg_t *mrap;
2255 	if (msp->ms_allocatable->rt_arg == NULL) {
2256 		mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
2257 	} else {
2258 		mrap = msp->ms_allocatable->rt_arg;
2259 		msp->ms_allocatable->rt_ops = NULL;
2260 		msp->ms_allocatable->rt_arg = NULL;
2261 	}
2262 	mrap->mra_bt = &msp->ms_allocatable_by_size;
2263 	mrap->mra_floor_shift = metaslab_by_size_min_shift;
2264 
2265 	if (msp->ms_sm != NULL) {
2266 		error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
2267 		    SM_FREE, length);
2268 
2269 		/* Now, populate the size-sorted tree. */
2270 		metaslab_rt_create(msp->ms_allocatable, mrap);
2271 		msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
2272 		msp->ms_allocatable->rt_arg = mrap;
2273 
2274 		struct mssa_arg arg = {0};
2275 		arg.rt = msp->ms_allocatable;
2276 		arg.mra = mrap;
2277 		range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add,
2278 		    &arg);
2279 	} else {
2280 		/*
2281 		 * Add the size-sorted tree first, since we don't need to load
2282 		 * the metaslab from the spacemap.
2283 		 */
2284 		metaslab_rt_create(msp->ms_allocatable, mrap);
2285 		msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
2286 		msp->ms_allocatable->rt_arg = mrap;
2287 		/*
2288 		 * The space map has not been allocated yet, so treat
2289 		 * all the space in the metaslab as free and add it to the
2290 		 * ms_allocatable tree.
2291 		 */
2292 		range_tree_add(msp->ms_allocatable,
2293 		    msp->ms_start, msp->ms_size);
2294 
2295 		if (msp->ms_freed != NULL) {
2296 			/*
2297 			 * If the ms_sm doesn't exist, this means that this
2298 			 * metaslab hasn't gone through metaslab_sync() and
2299 			 * thus has never been dirtied. So we shouldn't
2300 			 * expect any unflushed allocs or frees from previous
2301 			 * TXGs.
2302 			 *
2303 			 * Note: ms_freed and all the other trees except for
2304 			 * the ms_allocatable, can be NULL at this point only
2305 			 * if this is a new metaslab of a vdev that just got
2306 			 * expanded.
2307 			 */
2308 			ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
2309 			ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
2310 		}
2311 	}
2312 
2313 	/*
2314 	 * We need to grab the ms_sync_lock to prevent metaslab_sync() from
2315 	 * changing the ms_sm (or log_sm) and the metaslab's range trees
2316 	 * while we are about to use them and populate the ms_allocatable.
2317 	 * The ms_lock is insufficient for this because metaslab_sync() doesn't
2318 	 * hold the ms_lock while writing the ms_checkpointing tree to disk.
2319 	 */
2320 	mutex_enter(&msp->ms_sync_lock);
2321 	mutex_enter(&msp->ms_lock);
2322 
2323 	ASSERT(!msp->ms_condensing);
2324 	ASSERT(!msp->ms_flushing);
2325 
2326 	if (error != 0) {
2327 		mutex_exit(&msp->ms_sync_lock);
2328 		return (error);
2329 	}
2330 
2331 	ASSERT3P(msp->ms_group, !=, NULL);
2332 	msp->ms_loaded = B_TRUE;
2333 
2334 	/*
2335 	 * Apply all the unflushed changes to ms_allocatable right
2336 	 * away so any manipulations we do below have a clear view
2337 	 * of what is allocated and what is free.
2338 	 */
2339 	range_tree_walk(msp->ms_unflushed_allocs,
2340 	    range_tree_remove, msp->ms_allocatable);
2341 	range_tree_walk(msp->ms_unflushed_frees,
2342 	    range_tree_add, msp->ms_allocatable);
2343 
2344 	msp->ms_loaded = B_TRUE;
2345 
2346 	ASSERT3P(msp->ms_group, !=, NULL);
2347 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2348 	if (spa_syncing_log_sm(spa) != NULL) {
2349 		ASSERT(spa_feature_is_enabled(spa,
2350 		    SPA_FEATURE_LOG_SPACEMAP));
2351 
2352 		/*
2353 		 * If we use a log space map we add all the segments
2354 		 * that are in ms_unflushed_frees so they are available
2355 		 * for allocation.
2356 		 *
2357 		 * ms_allocatable needs to contain all free segments
2358 		 * that are ready for allocations (thus not segments
2359 		 * from ms_freeing, ms_freed, and the ms_defer trees).
2360 		 * But if we grab the lock in this code path at a sync
2361 		 * pass later that 1, then it also contains the
2362 		 * segments of ms_freed (they were added to it earlier
2363 		 * in this path through ms_unflushed_frees). So we
2364 		 * need to remove all the segments that exist in
2365 		 * ms_freed from ms_allocatable as they will be added
2366 		 * later in metaslab_sync_done().
2367 		 *
2368 		 * When there's no log space map, the ms_allocatable
2369 		 * correctly doesn't contain any segments that exist
2370 		 * in ms_freed [see ms_synced_length].
2371 		 */
2372 		range_tree_walk(msp->ms_freed,
2373 		    range_tree_remove, msp->ms_allocatable);
2374 	}
2375 
2376 	/*
2377 	 * If we are not using the log space map, ms_allocatable
2378 	 * contains the segments that exist in the ms_defer trees
2379 	 * [see ms_synced_length]. Thus we need to remove them
2380 	 * from ms_allocatable as they will be added again in
2381 	 * metaslab_sync_done().
2382 	 *
2383 	 * If we are using the log space map, ms_allocatable still
2384 	 * contains the segments that exist in the ms_defer trees.
2385 	 * Not because it read them through the ms_sm though. But
2386 	 * because these segments are part of ms_unflushed_frees
2387 	 * whose segments we add to ms_allocatable earlier in this
2388 	 * code path.
2389 	 */
2390 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2391 		range_tree_walk(msp->ms_defer[t],
2392 		    range_tree_remove, msp->ms_allocatable);
2393 	}
2394 
2395 	/*
2396 	 * Call metaslab_recalculate_weight_and_sort() now that the
2397 	 * metaslab is loaded so we get the metaslab's real weight.
2398 	 *
2399 	 * Unless this metaslab was created with older software and
2400 	 * has not yet been converted to use segment-based weight, we
2401 	 * expect the new weight to be better or equal to the weight
2402 	 * that the metaslab had while it was not loaded. This is
2403 	 * because the old weight does not take into account the
2404 	 * consolidation of adjacent segments between TXGs. [see
2405 	 * comment for ms_synchist and ms_deferhist[] for more info]
2406 	 */
2407 	uint64_t weight = msp->ms_weight;
2408 	uint64_t max_size = msp->ms_max_size;
2409 	metaslab_recalculate_weight_and_sort(msp);
2410 	if (!WEIGHT_IS_SPACEBASED(weight))
2411 		ASSERT3U(weight, <=, msp->ms_weight);
2412 	msp->ms_max_size = metaslab_largest_allocatable(msp);
2413 	ASSERT3U(max_size, <=, msp->ms_max_size);
2414 	hrtime_t load_end = gethrtime();
2415 	msp->ms_load_time = load_end;
2416 	zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, "
2417 	    "ms_id %llu, smp_length %llu, "
2418 	    "unflushed_allocs %llu, unflushed_frees %llu, "
2419 	    "freed %llu, defer %llu + %llu, unloaded time %llu ms, "
2420 	    "loading_time %lld ms, ms_max_size %llu, "
2421 	    "max size error %lld, "
2422 	    "old_weight %llx, new_weight %llx",
2423 	    spa_syncing_txg(spa), spa_name(spa),
2424 	    msp->ms_group->mg_vd->vdev_id, msp->ms_id,
2425 	    space_map_length(msp->ms_sm),
2426 	    range_tree_space(msp->ms_unflushed_allocs),
2427 	    range_tree_space(msp->ms_unflushed_frees),
2428 	    range_tree_space(msp->ms_freed),
2429 	    range_tree_space(msp->ms_defer[0]),
2430 	    range_tree_space(msp->ms_defer[1]),
2431 	    (longlong_t)((load_start - msp->ms_unload_time) / 1000000),
2432 	    (longlong_t)((load_end - load_start) / 1000000),
2433 	    msp->ms_max_size, msp->ms_max_size - max_size,
2434 	    weight, msp->ms_weight);
2435 
2436 	metaslab_verify_space(msp, spa_syncing_txg(spa));
2437 	mutex_exit(&msp->ms_sync_lock);
2438 	return (0);
2439 }
2440 
2441 int
2442 metaslab_load(metaslab_t *msp)
2443 {
2444 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2445 
2446 	/*
2447 	 * There may be another thread loading the same metaslab, if that's
2448 	 * the case just wait until the other thread is done and return.
2449 	 */
2450 	metaslab_load_wait(msp);
2451 	if (msp->ms_loaded)
2452 		return (0);
2453 	VERIFY(!msp->ms_loading);
2454 	ASSERT(!msp->ms_condensing);
2455 
2456 	/*
2457 	 * We set the loading flag BEFORE potentially dropping the lock to
2458 	 * wait for an ongoing flush (see ms_flushing below). This way other
2459 	 * threads know that there is already a thread that is loading this
2460 	 * metaslab.
2461 	 */
2462 	msp->ms_loading = B_TRUE;
2463 
2464 	/*
2465 	 * Wait for any in-progress flushing to finish as we drop the ms_lock
2466 	 * both here (during space_map_load()) and in metaslab_flush() (when
2467 	 * we flush our changes to the ms_sm).
2468 	 */
2469 	if (msp->ms_flushing)
2470 		metaslab_flush_wait(msp);
2471 
2472 	/*
2473 	 * In the possibility that we were waiting for the metaslab to be
2474 	 * flushed (where we temporarily dropped the ms_lock), ensure that
2475 	 * no one else loaded the metaslab somehow.
2476 	 */
2477 	ASSERT(!msp->ms_loaded);
2478 
2479 	/*
2480 	 * If we're loading a metaslab in the normal class, consider evicting
2481 	 * another one to keep our memory usage under the limit defined by the
2482 	 * zfs_metaslab_mem_limit tunable.
2483 	 */
2484 	if (spa_normal_class(msp->ms_group->mg_class->mc_spa) ==
2485 	    msp->ms_group->mg_class) {
2486 		metaslab_potentially_evict(msp->ms_group->mg_class);
2487 	}
2488 
2489 	int error = metaslab_load_impl(msp);
2490 
2491 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2492 	msp->ms_loading = B_FALSE;
2493 	cv_broadcast(&msp->ms_load_cv);
2494 
2495 	return (error);
2496 }
2497 
2498 void
2499 metaslab_unload(metaslab_t *msp)
2500 {
2501 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2502 
2503 	/*
2504 	 * This can happen if a metaslab is selected for eviction (in
2505 	 * metaslab_potentially_evict) and then unloaded during spa_sync (via
2506 	 * metaslab_class_evict_old).
2507 	 */
2508 	if (!msp->ms_loaded)
2509 		return;
2510 
2511 	range_tree_vacate(msp->ms_allocatable, NULL, NULL);
2512 	msp->ms_loaded = B_FALSE;
2513 	msp->ms_unload_time = gethrtime();
2514 
2515 	msp->ms_activation_weight = 0;
2516 	msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
2517 
2518 	if (msp->ms_group != NULL) {
2519 		metaslab_class_t *mc = msp->ms_group->mg_class;
2520 		multilist_sublist_t *mls =
2521 		    multilist_sublist_lock_obj(mc->mc_metaslab_txg_list, msp);
2522 		if (multilist_link_active(&msp->ms_class_txg_node))
2523 			multilist_sublist_remove(mls, msp);
2524 		multilist_sublist_unlock(mls);
2525 
2526 		spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2527 		zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, "
2528 		    "ms_id %llu, weight %llx, "
2529 		    "selected txg %llu (%llu ms ago), alloc_txg %llu, "
2530 		    "loaded %llu ms ago, max_size %llu",
2531 		    spa_syncing_txg(spa), spa_name(spa),
2532 		    msp->ms_group->mg_vd->vdev_id, msp->ms_id,
2533 		    msp->ms_weight,
2534 		    msp->ms_selected_txg,
2535 		    (msp->ms_unload_time - msp->ms_selected_time) / 1000 / 1000,
2536 		    msp->ms_alloc_txg,
2537 		    (msp->ms_unload_time - msp->ms_load_time) / 1000 / 1000,
2538 		    msp->ms_max_size);
2539 	}
2540 
2541 	/*
2542 	 * We explicitly recalculate the metaslab's weight based on its space
2543 	 * map (as it is now not loaded). We want unload metaslabs to always
2544 	 * have their weights calculated from the space map histograms, while
2545 	 * loaded ones have it calculated from their in-core range tree
2546 	 * [see metaslab_load()]. This way, the weight reflects the information
2547 	 * available in-core, whether it is loaded or not.
2548 	 *
2549 	 * If ms_group == NULL means that we came here from metaslab_fini(),
2550 	 * at which point it doesn't make sense for us to do the recalculation
2551 	 * and the sorting.
2552 	 */
2553 	if (msp->ms_group != NULL)
2554 		metaslab_recalculate_weight_and_sort(msp);
2555 }
2556 
2557 /*
2558  * We want to optimize the memory use of the per-metaslab range
2559  * trees. To do this, we store the segments in the range trees in
2560  * units of sectors, zero-indexing from the start of the metaslab. If
2561  * the vdev_ms_shift - the vdev_ashift is less than 32, we can store
2562  * the ranges using two uint32_ts, rather than two uint64_ts.
2563  */
2564 range_seg_type_t
2565 metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp,
2566     uint64_t *start, uint64_t *shift)
2567 {
2568 	if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 &&
2569 	    !zfs_metaslab_force_large_segs) {
2570 		*shift = vdev->vdev_ashift;
2571 		*start = msp->ms_start;
2572 		return (RANGE_SEG32);
2573 	} else {
2574 		*shift = 0;
2575 		*start = 0;
2576 		return (RANGE_SEG64);
2577 	}
2578 }
2579 
2580 void
2581 metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg)
2582 {
2583 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2584 	metaslab_class_t *mc = msp->ms_group->mg_class;
2585 	multilist_sublist_t *mls =
2586 	    multilist_sublist_lock_obj(mc->mc_metaslab_txg_list, msp);
2587 	if (multilist_link_active(&msp->ms_class_txg_node))
2588 		multilist_sublist_remove(mls, msp);
2589 	msp->ms_selected_txg = txg;
2590 	msp->ms_selected_time = gethrtime();
2591 	multilist_sublist_insert_tail(mls, msp);
2592 	multilist_sublist_unlock(mls);
2593 }
2594 
2595 void
2596 metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
2597     int64_t defer_delta, int64_t space_delta)
2598 {
2599 	vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
2600 
2601 	ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
2602 	ASSERT(vd->vdev_ms_count != 0);
2603 
2604 	metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
2605 	    vdev_deflated_space(vd, space_delta));
2606 }
2607 
2608 int
2609 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
2610     uint64_t txg, metaslab_t **msp)
2611 {
2612 	vdev_t *vd = mg->mg_vd;
2613 	spa_t *spa = vd->vdev_spa;
2614 	objset_t *mos = spa->spa_meta_objset;
2615 	metaslab_t *ms;
2616 	int error;
2617 
2618 	ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
2619 	mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
2620 	mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
2621 	cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
2622 	cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
2623 	multilist_link_init(&ms->ms_class_txg_node);
2624 
2625 	ms->ms_id = id;
2626 	ms->ms_start = id << vd->vdev_ms_shift;
2627 	ms->ms_size = 1ULL << vd->vdev_ms_shift;
2628 	ms->ms_allocator = -1;
2629 	ms->ms_new = B_TRUE;
2630 
2631 	vdev_ops_t *ops = vd->vdev_ops;
2632 	if (ops->vdev_op_metaslab_init != NULL)
2633 		ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size);
2634 
2635 	/*
2636 	 * We only open space map objects that already exist. All others
2637 	 * will be opened when we finally allocate an object for it.
2638 	 *
2639 	 * Note:
2640 	 * When called from vdev_expand(), we can't call into the DMU as
2641 	 * we are holding the spa_config_lock as a writer and we would
2642 	 * deadlock [see relevant comment in vdev_metaslab_init()]. in
2643 	 * that case, the object parameter is zero though, so we won't
2644 	 * call into the DMU.
2645 	 */
2646 	if (object != 0) {
2647 		error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
2648 		    ms->ms_size, vd->vdev_ashift);
2649 
2650 		if (error != 0) {
2651 			kmem_free(ms, sizeof (metaslab_t));
2652 			return (error);
2653 		}
2654 
2655 		ASSERT(ms->ms_sm != NULL);
2656 		ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
2657 	}
2658 
2659 	range_seg_type_t type;
2660 	uint64_t shift, start;
2661 	type = metaslab_calculate_range_tree_type(vd, ms, &start, &shift);
2662 
2663 	/*
2664 	 * We create the ms_allocatable here, but we don't create the
2665 	 * other range trees until metaslab_sync_done().  This serves
2666 	 * two purposes: it allows metaslab_sync_done() to detect the
2667 	 * addition of new space; and for debugging, it ensures that
2668 	 * we'd data fault on any attempt to use this metaslab before
2669 	 * it's ready.
2670 	 */
2671 	ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift);
2672 
2673 	ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift);
2674 
2675 	metaslab_group_add(mg, ms);
2676 	metaslab_set_fragmentation(ms, B_FALSE);
2677 
2678 	/*
2679 	 * If we're opening an existing pool (txg == 0) or creating
2680 	 * a new one (txg == TXG_INITIAL), all space is available now.
2681 	 * If we're adding space to an existing pool, the new space
2682 	 * does not become available until after this txg has synced.
2683 	 * The metaslab's weight will also be initialized when we sync
2684 	 * out this txg. This ensures that we don't attempt to allocate
2685 	 * from it before we have initialized it completely.
2686 	 */
2687 	if (txg <= TXG_INITIAL) {
2688 		metaslab_sync_done(ms, 0);
2689 		metaslab_space_update(vd, mg->mg_class,
2690 		    metaslab_allocated_space(ms), 0, 0);
2691 	}
2692 
2693 	if (txg != 0) {
2694 		vdev_dirty(vd, 0, NULL, txg);
2695 		vdev_dirty(vd, VDD_METASLAB, ms, txg);
2696 	}
2697 
2698 	*msp = ms;
2699 
2700 	return (0);
2701 }
2702 
2703 static void
2704 metaslab_fini_flush_data(metaslab_t *msp)
2705 {
2706 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2707 
2708 	if (metaslab_unflushed_txg(msp) == 0) {
2709 		ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
2710 		    ==, NULL);
2711 		return;
2712 	}
2713 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
2714 
2715 	mutex_enter(&spa->spa_flushed_ms_lock);
2716 	avl_remove(&spa->spa_metaslabs_by_flushed, msp);
2717 	mutex_exit(&spa->spa_flushed_ms_lock);
2718 
2719 	spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
2720 	spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp));
2721 }
2722 
2723 uint64_t
2724 metaslab_unflushed_changes_memused(metaslab_t *ms)
2725 {
2726 	return ((range_tree_numsegs(ms->ms_unflushed_allocs) +
2727 	    range_tree_numsegs(ms->ms_unflushed_frees)) *
2728 	    ms->ms_unflushed_allocs->rt_root.bt_elem_size);
2729 }
2730 
2731 void
2732 metaslab_fini(metaslab_t *msp)
2733 {
2734 	metaslab_group_t *mg = msp->ms_group;
2735 	vdev_t *vd = mg->mg_vd;
2736 	spa_t *spa = vd->vdev_spa;
2737 
2738 	metaslab_fini_flush_data(msp);
2739 
2740 	metaslab_group_remove(mg, msp);
2741 
2742 	mutex_enter(&msp->ms_lock);
2743 	VERIFY(msp->ms_group == NULL);
2744 	metaslab_space_update(vd, mg->mg_class,
2745 	    -metaslab_allocated_space(msp), 0, -msp->ms_size);
2746 
2747 	space_map_close(msp->ms_sm);
2748 	msp->ms_sm = NULL;
2749 
2750 	metaslab_unload(msp);
2751 	range_tree_destroy(msp->ms_allocatable);
2752 	range_tree_destroy(msp->ms_freeing);
2753 	range_tree_destroy(msp->ms_freed);
2754 
2755 	ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
2756 	    metaslab_unflushed_changes_memused(msp));
2757 	spa->spa_unflushed_stats.sus_memused -=
2758 	    metaslab_unflushed_changes_memused(msp);
2759 	range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
2760 	range_tree_destroy(msp->ms_unflushed_allocs);
2761 	range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
2762 	range_tree_destroy(msp->ms_unflushed_frees);
2763 
2764 	for (int t = 0; t < TXG_SIZE; t++) {
2765 		range_tree_destroy(msp->ms_allocating[t]);
2766 	}
2767 
2768 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2769 		range_tree_destroy(msp->ms_defer[t]);
2770 	}
2771 	ASSERT0(msp->ms_deferspace);
2772 
2773 	range_tree_destroy(msp->ms_checkpointing);
2774 
2775 	for (int t = 0; t < TXG_SIZE; t++)
2776 		ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
2777 
2778 	range_tree_vacate(msp->ms_trim, NULL, NULL);
2779 	range_tree_destroy(msp->ms_trim);
2780 
2781 	mutex_exit(&msp->ms_lock);
2782 	cv_destroy(&msp->ms_load_cv);
2783 	cv_destroy(&msp->ms_flush_cv);
2784 	mutex_destroy(&msp->ms_lock);
2785 	mutex_destroy(&msp->ms_sync_lock);
2786 	ASSERT3U(msp->ms_allocator, ==, -1);
2787 
2788 	kmem_free(msp, sizeof (metaslab_t));
2789 }
2790 
2791 #define	FRAGMENTATION_TABLE_SIZE	17
2792 
2793 /*
2794  * This table defines a segment size based fragmentation metric that will
2795  * allow each metaslab to derive its own fragmentation value. This is done
2796  * by calculating the space in each bucket of the spacemap histogram and
2797  * multiplying that by the fragmentation metric in this table. Doing
2798  * this for all buckets and dividing it by the total amount of free
2799  * space in this metaslab (i.e. the total free space in all buckets) gives
2800  * us the fragmentation metric. This means that a high fragmentation metric
2801  * equates to most of the free space being comprised of small segments.
2802  * Conversely, if the metric is low, then most of the free space is in
2803  * large segments. A 10% change in fragmentation equates to approximately
2804  * double the number of segments.
2805  *
2806  * This table defines 0% fragmented space using 16MB segments. Testing has
2807  * shown that segments that are greater than or equal to 16MB do not suffer
2808  * from drastic performance problems. Using this value, we derive the rest
2809  * of the table. Since the fragmentation value is never stored on disk, it
2810  * is possible to change these calculations in the future.
2811  */
2812 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
2813 	100,	/* 512B	*/
2814 	100,	/* 1K	*/
2815 	98,	/* 2K	*/
2816 	95,	/* 4K	*/
2817 	90,	/* 8K	*/
2818 	80,	/* 16K	*/
2819 	70,	/* 32K	*/
2820 	60,	/* 64K	*/
2821 	50,	/* 128K	*/
2822 	40,	/* 256K	*/
2823 	30,	/* 512K	*/
2824 	20,	/* 1M	*/
2825 	15,	/* 2M	*/
2826 	10,	/* 4M	*/
2827 	5,	/* 8M	*/
2828 	0	/* 16M	*/
2829 };
2830 
2831 /*
2832  * Calculate the metaslab's fragmentation metric and set ms_fragmentation.
2833  * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
2834  * been upgraded and does not support this metric. Otherwise, the return
2835  * value should be in the range [0, 100].
2836  */
2837 static void
2838 metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty)
2839 {
2840 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2841 	uint64_t fragmentation = 0;
2842 	uint64_t total = 0;
2843 	boolean_t feature_enabled = spa_feature_is_enabled(spa,
2844 	    SPA_FEATURE_SPACEMAP_HISTOGRAM);
2845 
2846 	if (!feature_enabled) {
2847 		msp->ms_fragmentation = ZFS_FRAG_INVALID;
2848 		return;
2849 	}
2850 
2851 	/*
2852 	 * A null space map means that the entire metaslab is free
2853 	 * and thus is not fragmented.
2854 	 */
2855 	if (msp->ms_sm == NULL) {
2856 		msp->ms_fragmentation = 0;
2857 		return;
2858 	}
2859 
2860 	/*
2861 	 * If this metaslab's space map has not been upgraded, flag it
2862 	 * so that we upgrade next time we encounter it.
2863 	 */
2864 	if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
2865 		uint64_t txg = spa_syncing_txg(spa);
2866 		vdev_t *vd = msp->ms_group->mg_vd;
2867 
2868 		/*
2869 		 * If we've reached the final dirty txg, then we must
2870 		 * be shutting down the pool. We don't want to dirty
2871 		 * any data past this point so skip setting the condense
2872 		 * flag. We can retry this action the next time the pool
2873 		 * is imported. We also skip marking this metaslab for
2874 		 * condensing if the caller has explicitly set nodirty.
2875 		 */
2876 		if (!nodirty &&
2877 		    spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
2878 			msp->ms_condense_wanted = B_TRUE;
2879 			vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2880 			zfs_dbgmsg("txg %llu, requesting force condense: "
2881 			    "ms_id %llu, vdev_id %llu", txg, msp->ms_id,
2882 			    vd->vdev_id);
2883 		}
2884 		msp->ms_fragmentation = ZFS_FRAG_INVALID;
2885 		return;
2886 	}
2887 
2888 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
2889 		uint64_t space = 0;
2890 		uint8_t shift = msp->ms_sm->sm_shift;
2891 
2892 		int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
2893 		    FRAGMENTATION_TABLE_SIZE - 1);
2894 
2895 		if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
2896 			continue;
2897 
2898 		space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
2899 		total += space;
2900 
2901 		ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
2902 		fragmentation += space * zfs_frag_table[idx];
2903 	}
2904 
2905 	if (total > 0)
2906 		fragmentation /= total;
2907 	ASSERT3U(fragmentation, <=, 100);
2908 
2909 	msp->ms_fragmentation = fragmentation;
2910 }
2911 
2912 /*
2913  * Compute a weight -- a selection preference value -- for the given metaslab.
2914  * This is based on the amount of free space, the level of fragmentation,
2915  * the LBA range, and whether the metaslab is loaded.
2916  */
2917 static uint64_t
2918 metaslab_space_weight(metaslab_t *msp)
2919 {
2920 	metaslab_group_t *mg = msp->ms_group;
2921 	vdev_t *vd = mg->mg_vd;
2922 	uint64_t weight, space;
2923 
2924 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2925 
2926 	/*
2927 	 * The baseline weight is the metaslab's free space.
2928 	 */
2929 	space = msp->ms_size - metaslab_allocated_space(msp);
2930 
2931 	if (metaslab_fragmentation_factor_enabled &&
2932 	    msp->ms_fragmentation != ZFS_FRAG_INVALID) {
2933 		/*
2934 		 * Use the fragmentation information to inversely scale
2935 		 * down the baseline weight. We need to ensure that we
2936 		 * don't exclude this metaslab completely when it's 100%
2937 		 * fragmented. To avoid this we reduce the fragmented value
2938 		 * by 1.
2939 		 */
2940 		space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
2941 
2942 		/*
2943 		 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
2944 		 * this metaslab again. The fragmentation metric may have
2945 		 * decreased the space to something smaller than
2946 		 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
2947 		 * so that we can consume any remaining space.
2948 		 */
2949 		if (space > 0 && space < SPA_MINBLOCKSIZE)
2950 			space = SPA_MINBLOCKSIZE;
2951 	}
2952 	weight = space;
2953 
2954 	/*
2955 	 * Modern disks have uniform bit density and constant angular velocity.
2956 	 * Therefore, the outer recording zones are faster (higher bandwidth)
2957 	 * than the inner zones by the ratio of outer to inner track diameter,
2958 	 * which is typically around 2:1.  We account for this by assigning
2959 	 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
2960 	 * In effect, this means that we'll select the metaslab with the most
2961 	 * free bandwidth rather than simply the one with the most free space.
2962 	 */
2963 	if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
2964 		weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
2965 		ASSERT(weight >= space && weight <= 2 * space);
2966 	}
2967 
2968 	/*
2969 	 * If this metaslab is one we're actively using, adjust its
2970 	 * weight to make it preferable to any inactive metaslab so
2971 	 * we'll polish it off. If the fragmentation on this metaslab
2972 	 * has exceed our threshold, then don't mark it active.
2973 	 */
2974 	if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
2975 	    msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
2976 		weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
2977 	}
2978 
2979 	WEIGHT_SET_SPACEBASED(weight);
2980 	return (weight);
2981 }
2982 
2983 /*
2984  * Return the weight of the specified metaslab, according to the segment-based
2985  * weighting algorithm. The metaslab must be loaded. This function can
2986  * be called within a sync pass since it relies only on the metaslab's
2987  * range tree which is always accurate when the metaslab is loaded.
2988  */
2989 static uint64_t
2990 metaslab_weight_from_range_tree(metaslab_t *msp)
2991 {
2992 	uint64_t weight = 0;
2993 	uint32_t segments = 0;
2994 
2995 	ASSERT(msp->ms_loaded);
2996 
2997 	for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
2998 	    i--) {
2999 		uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
3000 		int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
3001 
3002 		segments <<= 1;
3003 		segments += msp->ms_allocatable->rt_histogram[i];
3004 
3005 		/*
3006 		 * The range tree provides more precision than the space map
3007 		 * and must be downgraded so that all values fit within the
3008 		 * space map's histogram. This allows us to compare loaded
3009 		 * vs. unloaded metaslabs to determine which metaslab is
3010 		 * considered "best".
3011 		 */
3012 		if (i > max_idx)
3013 			continue;
3014 
3015 		if (segments != 0) {
3016 			WEIGHT_SET_COUNT(weight, segments);
3017 			WEIGHT_SET_INDEX(weight, i);
3018 			WEIGHT_SET_ACTIVE(weight, 0);
3019 			break;
3020 		}
3021 	}
3022 	return (weight);
3023 }
3024 
3025 /*
3026  * Calculate the weight based on the on-disk histogram. Should be applied
3027  * only to unloaded metaslabs  (i.e no incoming allocations) in-order to
3028  * give results consistent with the on-disk state
3029  */
3030 static uint64_t
3031 metaslab_weight_from_spacemap(metaslab_t *msp)
3032 {
3033 	space_map_t *sm = msp->ms_sm;
3034 	ASSERT(!msp->ms_loaded);
3035 	ASSERT(sm != NULL);
3036 	ASSERT3U(space_map_object(sm), !=, 0);
3037 	ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
3038 
3039 	/*
3040 	 * Create a joint histogram from all the segments that have made
3041 	 * it to the metaslab's space map histogram, that are not yet
3042 	 * available for allocation because they are still in the freeing
3043 	 * pipeline (e.g. freeing, freed, and defer trees). Then subtract
3044 	 * these segments from the space map's histogram to get a more
3045 	 * accurate weight.
3046 	 */
3047 	uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
3048 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
3049 		deferspace_histogram[i] += msp->ms_synchist[i];
3050 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3051 		for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
3052 			deferspace_histogram[i] += msp->ms_deferhist[t][i];
3053 		}
3054 	}
3055 
3056 	uint64_t weight = 0;
3057 	for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
3058 		ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
3059 		    deferspace_histogram[i]);
3060 		uint64_t count =
3061 		    sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
3062 		if (count != 0) {
3063 			WEIGHT_SET_COUNT(weight, count);
3064 			WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
3065 			WEIGHT_SET_ACTIVE(weight, 0);
3066 			break;
3067 		}
3068 	}
3069 	return (weight);
3070 }
3071 
3072 /*
3073  * Compute a segment-based weight for the specified metaslab. The weight
3074  * is determined by highest bucket in the histogram. The information
3075  * for the highest bucket is encoded into the weight value.
3076  */
3077 static uint64_t
3078 metaslab_segment_weight(metaslab_t *msp)
3079 {
3080 	metaslab_group_t *mg = msp->ms_group;
3081 	uint64_t weight = 0;
3082 	uint8_t shift = mg->mg_vd->vdev_ashift;
3083 
3084 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3085 
3086 	/*
3087 	 * The metaslab is completely free.
3088 	 */
3089 	if (metaslab_allocated_space(msp) == 0) {
3090 		int idx = highbit64(msp->ms_size) - 1;
3091 		int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
3092 
3093 		if (idx < max_idx) {
3094 			WEIGHT_SET_COUNT(weight, 1ULL);
3095 			WEIGHT_SET_INDEX(weight, idx);
3096 		} else {
3097 			WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
3098 			WEIGHT_SET_INDEX(weight, max_idx);
3099 		}
3100 		WEIGHT_SET_ACTIVE(weight, 0);
3101 		ASSERT(!WEIGHT_IS_SPACEBASED(weight));
3102 		return (weight);
3103 	}
3104 
3105 	ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
3106 
3107 	/*
3108 	 * If the metaslab is fully allocated then just make the weight 0.
3109 	 */
3110 	if (metaslab_allocated_space(msp) == msp->ms_size)
3111 		return (0);
3112 	/*
3113 	 * If the metaslab is already loaded, then use the range tree to
3114 	 * determine the weight. Otherwise, we rely on the space map information
3115 	 * to generate the weight.
3116 	 */
3117 	if (msp->ms_loaded) {
3118 		weight = metaslab_weight_from_range_tree(msp);
3119 	} else {
3120 		weight = metaslab_weight_from_spacemap(msp);
3121 	}
3122 
3123 	/*
3124 	 * If the metaslab was active the last time we calculated its weight
3125 	 * then keep it active. We want to consume the entire region that
3126 	 * is associated with this weight.
3127 	 */
3128 	if (msp->ms_activation_weight != 0 && weight != 0)
3129 		WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
3130 	return (weight);
3131 }
3132 
3133 /*
3134  * Determine if we should attempt to allocate from this metaslab. If the
3135  * metaslab is loaded, then we can determine if the desired allocation
3136  * can be satisfied by looking at the size of the maximum free segment
3137  * on that metaslab. Otherwise, we make our decision based on the metaslab's
3138  * weight. For segment-based weighting we can determine the maximum
3139  * allocation based on the index encoded in its value. For space-based
3140  * weights we rely on the entire weight (excluding the weight-type bit).
3141  */
3142 static boolean_t
3143 metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard)
3144 {
3145 	/*
3146 	 * If the metaslab is loaded, ms_max_size is definitive and we can use
3147 	 * the fast check. If it's not, the ms_max_size is a lower bound (once
3148 	 * set), and we should use the fast check as long as we're not in
3149 	 * try_hard and it's been less than zfs_metaslab_max_size_cache_sec
3150 	 * seconds since the metaslab was unloaded.
3151 	 */
3152 	if (msp->ms_loaded ||
3153 	    (msp->ms_max_size != 0 && !try_hard && gethrtime() <
3154 	    msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec)))
3155 		return (msp->ms_max_size >= asize);
3156 
3157 	boolean_t should_allocate;
3158 	if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
3159 		/*
3160 		 * The metaslab segment weight indicates segments in the
3161 		 * range [2^i, 2^(i+1)), where i is the index in the weight.
3162 		 * Since the asize might be in the middle of the range, we
3163 		 * should attempt the allocation if asize < 2^(i+1).
3164 		 */
3165 		should_allocate = (asize <
3166 		    1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
3167 	} else {
3168 		should_allocate = (asize <=
3169 		    (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
3170 	}
3171 
3172 	return (should_allocate);
3173 }
3174 
3175 static uint64_t
3176 metaslab_weight(metaslab_t *msp, boolean_t nodirty)
3177 {
3178 	vdev_t *vd = msp->ms_group->mg_vd;
3179 	spa_t *spa = vd->vdev_spa;
3180 	uint64_t weight;
3181 
3182 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3183 
3184 	metaslab_set_fragmentation(msp, nodirty);
3185 
3186 	/*
3187 	 * Update the maximum size. If the metaslab is loaded, this will
3188 	 * ensure that we get an accurate maximum size if newly freed space
3189 	 * has been added back into the free tree. If the metaslab is
3190 	 * unloaded, we check if there's a larger free segment in the
3191 	 * unflushed frees. This is a lower bound on the largest allocatable
3192 	 * segment size. Coalescing of adjacent entries may reveal larger
3193 	 * allocatable segments, but we aren't aware of those until loading
3194 	 * the space map into a range tree.
3195 	 */
3196 	if (msp->ms_loaded) {
3197 		msp->ms_max_size = metaslab_largest_allocatable(msp);
3198 	} else {
3199 		msp->ms_max_size = MAX(msp->ms_max_size,
3200 		    metaslab_largest_unflushed_free(msp));
3201 	}
3202 
3203 	/*
3204 	 * Segment-based weighting requires space map histogram support.
3205 	 */
3206 	if (zfs_metaslab_segment_weight_enabled &&
3207 	    spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
3208 	    (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
3209 	    sizeof (space_map_phys_t))) {
3210 		weight = metaslab_segment_weight(msp);
3211 	} else {
3212 		weight = metaslab_space_weight(msp);
3213 	}
3214 	return (weight);
3215 }
3216 
3217 void
3218 metaslab_recalculate_weight_and_sort(metaslab_t *msp)
3219 {
3220 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3221 
3222 	/* note: we preserve the mask (e.g. indication of primary, etc..) */
3223 	uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
3224 	metaslab_group_sort(msp->ms_group, msp,
3225 	    metaslab_weight(msp, B_FALSE) | was_active);
3226 }
3227 
3228 static int
3229 metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
3230     int allocator, uint64_t activation_weight)
3231 {
3232 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
3233 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3234 
3235 	/*
3236 	 * If we're activating for the claim code, we don't want to actually
3237 	 * set the metaslab up for a specific allocator.
3238 	 */
3239 	if (activation_weight == METASLAB_WEIGHT_CLAIM) {
3240 		ASSERT0(msp->ms_activation_weight);
3241 		msp->ms_activation_weight = msp->ms_weight;
3242 		metaslab_group_sort(mg, msp, msp->ms_weight |
3243 		    activation_weight);
3244 		return (0);
3245 	}
3246 
3247 	metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
3248 	    &mga->mga_primary : &mga->mga_secondary);
3249 
3250 	mutex_enter(&mg->mg_lock);
3251 	if (*mspp != NULL) {
3252 		mutex_exit(&mg->mg_lock);
3253 		return (EEXIST);
3254 	}
3255 
3256 	*mspp = msp;
3257 	ASSERT3S(msp->ms_allocator, ==, -1);
3258 	msp->ms_allocator = allocator;
3259 	msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
3260 
3261 	ASSERT0(msp->ms_activation_weight);
3262 	msp->ms_activation_weight = msp->ms_weight;
3263 	metaslab_group_sort_impl(mg, msp,
3264 	    msp->ms_weight | activation_weight);
3265 	mutex_exit(&mg->mg_lock);
3266 
3267 	return (0);
3268 }
3269 
3270 static int
3271 metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
3272 {
3273 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3274 
3275 	/*
3276 	 * The current metaslab is already activated for us so there
3277 	 * is nothing to do. Already activated though, doesn't mean
3278 	 * that this metaslab is activated for our allocator nor our
3279 	 * requested activation weight. The metaslab could have started
3280 	 * as an active one for our allocator but changed allocators
3281 	 * while we were waiting to grab its ms_lock or we stole it
3282 	 * [see find_valid_metaslab()]. This means that there is a
3283 	 * possibility of passivating a metaslab of another allocator
3284 	 * or from a different activation mask, from this thread.
3285 	 */
3286 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
3287 		ASSERT(msp->ms_loaded);
3288 		return (0);
3289 	}
3290 
3291 	int error = metaslab_load(msp);
3292 	if (error != 0) {
3293 		metaslab_group_sort(msp->ms_group, msp, 0);
3294 		return (error);
3295 	}
3296 
3297 	/*
3298 	 * When entering metaslab_load() we may have dropped the
3299 	 * ms_lock because we were loading this metaslab, or we
3300 	 * were waiting for another thread to load it for us. In
3301 	 * that scenario, we recheck the weight of the metaslab
3302 	 * to see if it was activated by another thread.
3303 	 *
3304 	 * If the metaslab was activated for another allocator or
3305 	 * it was activated with a different activation weight (e.g.
3306 	 * we wanted to make it a primary but it was activated as
3307 	 * secondary) we return error (EBUSY).
3308 	 *
3309 	 * If the metaslab was activated for the same allocator
3310 	 * and requested activation mask, skip activating it.
3311 	 */
3312 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
3313 		if (msp->ms_allocator != allocator)
3314 			return (EBUSY);
3315 
3316 		if ((msp->ms_weight & activation_weight) == 0)
3317 			return (SET_ERROR(EBUSY));
3318 
3319 		EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY),
3320 		    msp->ms_primary);
3321 		return (0);
3322 	}
3323 
3324 	/*
3325 	 * If the metaslab has literally 0 space, it will have weight 0. In
3326 	 * that case, don't bother activating it. This can happen if the
3327 	 * metaslab had space during find_valid_metaslab, but another thread
3328 	 * loaded it and used all that space while we were waiting to grab the
3329 	 * lock.
3330 	 */
3331 	if (msp->ms_weight == 0) {
3332 		ASSERT0(range_tree_space(msp->ms_allocatable));
3333 		return (SET_ERROR(ENOSPC));
3334 	}
3335 
3336 	if ((error = metaslab_activate_allocator(msp->ms_group, msp,
3337 	    allocator, activation_weight)) != 0) {
3338 		return (error);
3339 	}
3340 
3341 	ASSERT(msp->ms_loaded);
3342 	ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
3343 
3344 	return (0);
3345 }
3346 
3347 static void
3348 metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
3349     uint64_t weight)
3350 {
3351 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3352 	ASSERT(msp->ms_loaded);
3353 
3354 	if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
3355 		metaslab_group_sort(mg, msp, weight);
3356 		return;
3357 	}
3358 
3359 	mutex_enter(&mg->mg_lock);
3360 	ASSERT3P(msp->ms_group, ==, mg);
3361 	ASSERT3S(0, <=, msp->ms_allocator);
3362 	ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
3363 
3364 	metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator];
3365 	if (msp->ms_primary) {
3366 		ASSERT3P(mga->mga_primary, ==, msp);
3367 		ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
3368 		mga->mga_primary = NULL;
3369 	} else {
3370 		ASSERT3P(mga->mga_secondary, ==, msp);
3371 		ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
3372 		mga->mga_secondary = NULL;
3373 	}
3374 	msp->ms_allocator = -1;
3375 	metaslab_group_sort_impl(mg, msp, weight);
3376 	mutex_exit(&mg->mg_lock);
3377 }
3378 
3379 static void
3380 metaslab_passivate(metaslab_t *msp, uint64_t weight)
3381 {
3382 	uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE;
3383 
3384 	/*
3385 	 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
3386 	 * this metaslab again.  In that case, it had better be empty,
3387 	 * or we would be leaving space on the table.
3388 	 */
3389 	ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
3390 	    size >= SPA_MINBLOCKSIZE ||
3391 	    range_tree_space(msp->ms_allocatable) == 0);
3392 	ASSERT0(weight & METASLAB_ACTIVE_MASK);
3393 
3394 	ASSERT(msp->ms_activation_weight != 0);
3395 	msp->ms_activation_weight = 0;
3396 	metaslab_passivate_allocator(msp->ms_group, msp, weight);
3397 	ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK);
3398 }
3399 
3400 /*
3401  * Segment-based metaslabs are activated once and remain active until
3402  * we either fail an allocation attempt (similar to space-based metaslabs)
3403  * or have exhausted the free space in zfs_metaslab_switch_threshold
3404  * buckets since the metaslab was activated. This function checks to see
3405  * if we've exhausted the zfs_metaslab_switch_threshold buckets in the
3406  * metaslab and passivates it proactively. This will allow us to select a
3407  * metaslab with a larger contiguous region, if any, remaining within this
3408  * metaslab group. If we're in sync pass > 1, then we continue using this
3409  * metaslab so that we don't dirty more block and cause more sync passes.
3410  */
3411 static void
3412 metaslab_segment_may_passivate(metaslab_t *msp)
3413 {
3414 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3415 
3416 	if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
3417 		return;
3418 
3419 	/*
3420 	 * Since we are in the middle of a sync pass, the most accurate
3421 	 * information that is accessible to us is the in-core range tree
3422 	 * histogram; calculate the new weight based on that information.
3423 	 */
3424 	uint64_t weight = metaslab_weight_from_range_tree(msp);
3425 	int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
3426 	int current_idx = WEIGHT_GET_INDEX(weight);
3427 
3428 	if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
3429 		metaslab_passivate(msp, weight);
3430 }
3431 
3432 static void
3433 metaslab_preload(void *arg)
3434 {
3435 	metaslab_t *msp = arg;
3436 	metaslab_class_t *mc = msp->ms_group->mg_class;
3437 	spa_t *spa = mc->mc_spa;
3438 	fstrans_cookie_t cookie = spl_fstrans_mark();
3439 
3440 	ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
3441 
3442 	mutex_enter(&msp->ms_lock);
3443 	(void) metaslab_load(msp);
3444 	metaslab_set_selected_txg(msp, spa_syncing_txg(spa));
3445 	mutex_exit(&msp->ms_lock);
3446 	spl_fstrans_unmark(cookie);
3447 }
3448 
3449 static void
3450 metaslab_group_preload(metaslab_group_t *mg)
3451 {
3452 	spa_t *spa = mg->mg_vd->vdev_spa;
3453 	metaslab_t *msp;
3454 	avl_tree_t *t = &mg->mg_metaslab_tree;
3455 	int m = 0;
3456 
3457 	if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
3458 		taskq_wait_outstanding(mg->mg_taskq, 0);
3459 		return;
3460 	}
3461 
3462 	mutex_enter(&mg->mg_lock);
3463 
3464 	/*
3465 	 * Load the next potential metaslabs
3466 	 */
3467 	for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
3468 		ASSERT3P(msp->ms_group, ==, mg);
3469 
3470 		/*
3471 		 * We preload only the maximum number of metaslabs specified
3472 		 * by metaslab_preload_limit. If a metaslab is being forced
3473 		 * to condense then we preload it too. This will ensure
3474 		 * that force condensing happens in the next txg.
3475 		 */
3476 		if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
3477 			continue;
3478 		}
3479 
3480 		VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
3481 		    msp, TQ_SLEEP) != TASKQID_INVALID);
3482 	}
3483 	mutex_exit(&mg->mg_lock);
3484 }
3485 
3486 /*
3487  * Determine if the space map's on-disk footprint is past our tolerance for
3488  * inefficiency. We would like to use the following criteria to make our
3489  * decision:
3490  *
3491  * 1. Do not condense if the size of the space map object would dramatically
3492  *    increase as a result of writing out the free space range tree.
3493  *
3494  * 2. Condense if the on on-disk space map representation is at least
3495  *    zfs_condense_pct/100 times the size of the optimal representation
3496  *    (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
3497  *
3498  * 3. Do not condense if the on-disk size of the space map does not actually
3499  *    decrease.
3500  *
3501  * Unfortunately, we cannot compute the on-disk size of the space map in this
3502  * context because we cannot accurately compute the effects of compression, etc.
3503  * Instead, we apply the heuristic described in the block comment for
3504  * zfs_metaslab_condense_block_threshold - we only condense if the space used
3505  * is greater than a threshold number of blocks.
3506  */
3507 static boolean_t
3508 metaslab_should_condense(metaslab_t *msp)
3509 {
3510 	space_map_t *sm = msp->ms_sm;
3511 	vdev_t *vd = msp->ms_group->mg_vd;
3512 	uint64_t vdev_blocksize = 1 << vd->vdev_ashift;
3513 
3514 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3515 	ASSERT(msp->ms_loaded);
3516 	ASSERT(sm != NULL);
3517 	ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
3518 
3519 	/*
3520 	 * We always condense metaslabs that are empty and metaslabs for
3521 	 * which a condense request has been made.
3522 	 */
3523 	if (range_tree_numsegs(msp->ms_allocatable) == 0 ||
3524 	    msp->ms_condense_wanted)
3525 		return (B_TRUE);
3526 
3527 	uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
3528 	uint64_t object_size = space_map_length(sm);
3529 	uint64_t optimal_size = space_map_estimate_optimal_size(sm,
3530 	    msp->ms_allocatable, SM_NO_VDEVID);
3531 
3532 	return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
3533 	    object_size > zfs_metaslab_condense_block_threshold * record_size);
3534 }
3535 
3536 /*
3537  * Condense the on-disk space map representation to its minimized form.
3538  * The minimized form consists of a small number of allocations followed
3539  * by the entries of the free range tree (ms_allocatable). The condensed
3540  * spacemap contains all the entries of previous TXGs (including those in
3541  * the pool-wide log spacemaps; thus this is effectively a superset of
3542  * metaslab_flush()), but this TXG's entries still need to be written.
3543  */
3544 static void
3545 metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
3546 {
3547 	range_tree_t *condense_tree;
3548 	space_map_t *sm = msp->ms_sm;
3549 	uint64_t txg = dmu_tx_get_txg(tx);
3550 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3551 
3552 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3553 	ASSERT(msp->ms_loaded);
3554 	ASSERT(msp->ms_sm != NULL);
3555 
3556 	/*
3557 	 * In order to condense the space map, we need to change it so it
3558 	 * only describes which segments are currently allocated and free.
3559 	 *
3560 	 * All the current free space resides in the ms_allocatable, all
3561 	 * the ms_defer trees, and all the ms_allocating trees. We ignore
3562 	 * ms_freed because it is empty because we're in sync pass 1. We
3563 	 * ignore ms_freeing because these changes are not yet reflected
3564 	 * in the spacemap (they will be written later this txg).
3565 	 *
3566 	 * So to truncate the space map to represent all the entries of
3567 	 * previous TXGs we do the following:
3568 	 *
3569 	 * 1] We create a range tree (condense tree) that is 100% empty.
3570 	 * 2] We add to it all segments found in the ms_defer trees
3571 	 *    as those segments are marked as free in the original space
3572 	 *    map. We do the same with the ms_allocating trees for the same
3573 	 *    reason. Adding these segments should be a relatively
3574 	 *    inexpensive operation since we expect these trees to have a
3575 	 *    small number of nodes.
3576 	 * 3] We vacate any unflushed allocs, since they are not frees we
3577 	 *    need to add to the condense tree. Then we vacate any
3578 	 *    unflushed frees as they should already be part of ms_allocatable.
3579 	 * 4] At this point, we would ideally like to add all segments
3580 	 *    in the ms_allocatable tree from the condense tree. This way
3581 	 *    we would write all the entries of the condense tree as the
3582 	 *    condensed space map, which would only contain freed
3583 	 *    segments with everything else assumed to be allocated.
3584 	 *
3585 	 *    Doing so can be prohibitively expensive as ms_allocatable can
3586 	 *    be large, and therefore computationally expensive to add to
3587 	 *    the condense_tree. Instead we first sync out an entry marking
3588 	 *    everything as allocated, then the condense_tree and then the
3589 	 *    ms_allocatable, in the condensed space map. While this is not
3590 	 *    optimal, it is typically close to optimal and more importantly
3591 	 *    much cheaper to compute.
3592 	 *
3593 	 * 5] Finally, as both of the unflushed trees were written to our
3594 	 *    new and condensed metaslab space map, we basically flushed
3595 	 *    all the unflushed changes to disk, thus we call
3596 	 *    metaslab_flush_update().
3597 	 */
3598 	ASSERT3U(spa_sync_pass(spa), ==, 1);
3599 	ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
3600 
3601 	zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
3602 	    "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
3603 	    msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
3604 	    spa->spa_name, space_map_length(msp->ms_sm),
3605 	    range_tree_numsegs(msp->ms_allocatable),
3606 	    msp->ms_condense_wanted ? "TRUE" : "FALSE");
3607 
3608 	msp->ms_condense_wanted = B_FALSE;
3609 
3610 	range_seg_type_t type;
3611 	uint64_t shift, start;
3612 	type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp,
3613 	    &start, &shift);
3614 
3615 	condense_tree = range_tree_create(NULL, type, NULL, start, shift);
3616 
3617 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3618 		range_tree_walk(msp->ms_defer[t],
3619 		    range_tree_add, condense_tree);
3620 	}
3621 
3622 	for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
3623 		range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
3624 		    range_tree_add, condense_tree);
3625 	}
3626 
3627 	ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3628 	    metaslab_unflushed_changes_memused(msp));
3629 	spa->spa_unflushed_stats.sus_memused -=
3630 	    metaslab_unflushed_changes_memused(msp);
3631 	range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3632 	range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3633 
3634 	/*
3635 	 * We're about to drop the metaslab's lock thus allowing other
3636 	 * consumers to change it's content. Set the metaslab's ms_condensing
3637 	 * flag to ensure that allocations on this metaslab do not occur
3638 	 * while we're in the middle of committing it to disk. This is only
3639 	 * critical for ms_allocatable as all other range trees use per TXG
3640 	 * views of their content.
3641 	 */
3642 	msp->ms_condensing = B_TRUE;
3643 
3644 	mutex_exit(&msp->ms_lock);
3645 	uint64_t object = space_map_object(msp->ms_sm);
3646 	space_map_truncate(sm,
3647 	    spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
3648 	    zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
3649 
3650 	/*
3651 	 * space_map_truncate() may have reallocated the spacemap object.
3652 	 * If so, update the vdev_ms_array.
3653 	 */
3654 	if (space_map_object(msp->ms_sm) != object) {
3655 		object = space_map_object(msp->ms_sm);
3656 		dmu_write(spa->spa_meta_objset,
3657 		    msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
3658 		    msp->ms_id, sizeof (uint64_t), &object, tx);
3659 	}
3660 
3661 	/*
3662 	 * Note:
3663 	 * When the log space map feature is enabled, each space map will
3664 	 * always have ALLOCS followed by FREES for each sync pass. This is
3665 	 * typically true even when the log space map feature is disabled,
3666 	 * except from the case where a metaslab goes through metaslab_sync()
3667 	 * and gets condensed. In that case the metaslab's space map will have
3668 	 * ALLOCS followed by FREES (due to condensing) followed by ALLOCS
3669 	 * followed by FREES (due to space_map_write() in metaslab_sync()) for
3670 	 * sync pass 1.
3671 	 */
3672 	range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start,
3673 	    shift);
3674 	range_tree_add(tmp_tree, msp->ms_start, msp->ms_size);
3675 	space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx);
3676 	space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
3677 	space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx);
3678 
3679 	range_tree_vacate(condense_tree, NULL, NULL);
3680 	range_tree_destroy(condense_tree);
3681 	range_tree_vacate(tmp_tree, NULL, NULL);
3682 	range_tree_destroy(tmp_tree);
3683 	mutex_enter(&msp->ms_lock);
3684 
3685 	msp->ms_condensing = B_FALSE;
3686 	metaslab_flush_update(msp, tx);
3687 }
3688 
3689 /*
3690  * Called when the metaslab has been flushed (its own spacemap now reflects
3691  * all the contents of the pool-wide spacemap log). Updates the metaslab's
3692  * metadata and any pool-wide related log space map data (e.g. summary,
3693  * obsolete logs, etc..) to reflect that.
3694  */
3695 static void
3696 metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
3697 {
3698 	metaslab_group_t *mg = msp->ms_group;
3699 	spa_t *spa = mg->mg_vd->vdev_spa;
3700 
3701 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3702 
3703 	ASSERT3U(spa_sync_pass(spa), ==, 1);
3704 	ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3705 	ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3706 
3707 	/*
3708 	 * Just because a metaslab got flushed, that doesn't mean that
3709 	 * it will pass through metaslab_sync_done(). Thus, make sure to
3710 	 * update ms_synced_length here in case it doesn't.
3711 	 */
3712 	msp->ms_synced_length = space_map_length(msp->ms_sm);
3713 
3714 	/*
3715 	 * We may end up here from metaslab_condense() without the
3716 	 * feature being active. In that case this is a no-op.
3717 	 */
3718 	if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
3719 		return;
3720 
3721 	ASSERT(spa_syncing_log_sm(spa) != NULL);
3722 	ASSERT(msp->ms_sm != NULL);
3723 	ASSERT(metaslab_unflushed_txg(msp) != 0);
3724 	ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
3725 
3726 	VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
3727 
3728 	/* update metaslab's position in our flushing tree */
3729 	uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
3730 	mutex_enter(&spa->spa_flushed_ms_lock);
3731 	avl_remove(&spa->spa_metaslabs_by_flushed, msp);
3732 	metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3733 	avl_add(&spa->spa_metaslabs_by_flushed, msp);
3734 	mutex_exit(&spa->spa_flushed_ms_lock);
3735 
3736 	/* update metaslab counts of spa_log_sm_t nodes */
3737 	spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
3738 	spa_log_sm_increment_current_mscount(spa);
3739 
3740 	/* cleanup obsolete logs if any */
3741 	uint64_t log_blocks_before = spa_log_sm_nblocks(spa);
3742 	spa_cleanup_old_sm_logs(spa, tx);
3743 	uint64_t log_blocks_after = spa_log_sm_nblocks(spa);
3744 	VERIFY3U(log_blocks_after, <=, log_blocks_before);
3745 
3746 	/* update log space map summary */
3747 	uint64_t blocks_gone = log_blocks_before - log_blocks_after;
3748 	spa_log_summary_add_flushed_metaslab(spa);
3749 	spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg);
3750 	spa_log_summary_decrement_blkcount(spa, blocks_gone);
3751 }
3752 
3753 boolean_t
3754 metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
3755 {
3756 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3757 
3758 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3759 	ASSERT3U(spa_sync_pass(spa), ==, 1);
3760 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
3761 
3762 	ASSERT(msp->ms_sm != NULL);
3763 	ASSERT(metaslab_unflushed_txg(msp) != 0);
3764 	ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
3765 
3766 	/*
3767 	 * There is nothing wrong with flushing the same metaslab twice, as
3768 	 * this codepath should work on that case. However, the current
3769 	 * flushing scheme makes sure to avoid this situation as we would be
3770 	 * making all these calls without having anything meaningful to write
3771 	 * to disk. We assert this behavior here.
3772 	 */
3773 	ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
3774 
3775 	/*
3776 	 * We can not flush while loading, because then we would
3777 	 * not load the ms_unflushed_{allocs,frees}.
3778 	 */
3779 	if (msp->ms_loading)
3780 		return (B_FALSE);
3781 
3782 	metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3783 	metaslab_verify_weight_and_frag(msp);
3784 
3785 	/*
3786 	 * Metaslab condensing is effectively flushing. Therefore if the
3787 	 * metaslab can be condensed we can just condense it instead of
3788 	 * flushing it.
3789 	 *
3790 	 * Note that metaslab_condense() does call metaslab_flush_update()
3791 	 * so we can just return immediately after condensing. We also
3792 	 * don't need to care about setting ms_flushing or broadcasting
3793 	 * ms_flush_cv, even if we temporarily drop the ms_lock in
3794 	 * metaslab_condense(), as the metaslab is already loaded.
3795 	 */
3796 	if (msp->ms_loaded && metaslab_should_condense(msp)) {
3797 		metaslab_group_t *mg = msp->ms_group;
3798 
3799 		/*
3800 		 * For all histogram operations below refer to the
3801 		 * comments of metaslab_sync() where we follow a
3802 		 * similar procedure.
3803 		 */
3804 		metaslab_group_histogram_verify(mg);
3805 		metaslab_class_histogram_verify(mg->mg_class);
3806 		metaslab_group_histogram_remove(mg, msp);
3807 
3808 		metaslab_condense(msp, tx);
3809 
3810 		space_map_histogram_clear(msp->ms_sm);
3811 		space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
3812 		ASSERT(range_tree_is_empty(msp->ms_freed));
3813 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3814 			space_map_histogram_add(msp->ms_sm,
3815 			    msp->ms_defer[t], tx);
3816 		}
3817 		metaslab_aux_histograms_update(msp);
3818 
3819 		metaslab_group_histogram_add(mg, msp);
3820 		metaslab_group_histogram_verify(mg);
3821 		metaslab_class_histogram_verify(mg->mg_class);
3822 
3823 		metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3824 
3825 		/*
3826 		 * Since we recreated the histogram (and potentially
3827 		 * the ms_sm too while condensing) ensure that the
3828 		 * weight is updated too because we are not guaranteed
3829 		 * that this metaslab is dirty and will go through
3830 		 * metaslab_sync_done().
3831 		 */
3832 		metaslab_recalculate_weight_and_sort(msp);
3833 		return (B_TRUE);
3834 	}
3835 
3836 	msp->ms_flushing = B_TRUE;
3837 	uint64_t sm_len_before = space_map_length(msp->ms_sm);
3838 
3839 	mutex_exit(&msp->ms_lock);
3840 	space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
3841 	    SM_NO_VDEVID, tx);
3842 	space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
3843 	    SM_NO_VDEVID, tx);
3844 	mutex_enter(&msp->ms_lock);
3845 
3846 	uint64_t sm_len_after = space_map_length(msp->ms_sm);
3847 	if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
3848 		zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
3849 		    "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
3850 		    "appended %llu bytes", dmu_tx_get_txg(tx), spa_name(spa),
3851 		    msp->ms_group->mg_vd->vdev_id, msp->ms_id,
3852 		    range_tree_space(msp->ms_unflushed_allocs),
3853 		    range_tree_space(msp->ms_unflushed_frees),
3854 		    (sm_len_after - sm_len_before));
3855 	}
3856 
3857 	ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3858 	    metaslab_unflushed_changes_memused(msp));
3859 	spa->spa_unflushed_stats.sus_memused -=
3860 	    metaslab_unflushed_changes_memused(msp);
3861 	range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3862 	range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3863 
3864 	metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3865 	metaslab_verify_weight_and_frag(msp);
3866 
3867 	metaslab_flush_update(msp, tx);
3868 
3869 	metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3870 	metaslab_verify_weight_and_frag(msp);
3871 
3872 	msp->ms_flushing = B_FALSE;
3873 	cv_broadcast(&msp->ms_flush_cv);
3874 	return (B_TRUE);
3875 }
3876 
3877 /*
3878  * Write a metaslab to disk in the context of the specified transaction group.
3879  */
3880 void
3881 metaslab_sync(metaslab_t *msp, uint64_t txg)
3882 {
3883 	metaslab_group_t *mg = msp->ms_group;
3884 	vdev_t *vd = mg->mg_vd;
3885 	spa_t *spa = vd->vdev_spa;
3886 	objset_t *mos = spa_meta_objset(spa);
3887 	range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
3888 	dmu_tx_t *tx;
3889 
3890 	ASSERT(!vd->vdev_ishole);
3891 
3892 	/*
3893 	 * This metaslab has just been added so there's no work to do now.
3894 	 */
3895 	if (msp->ms_freeing == NULL) {
3896 		ASSERT3P(alloctree, ==, NULL);
3897 		return;
3898 	}
3899 
3900 	ASSERT3P(alloctree, !=, NULL);
3901 	ASSERT3P(msp->ms_freeing, !=, NULL);
3902 	ASSERT3P(msp->ms_freed, !=, NULL);
3903 	ASSERT3P(msp->ms_checkpointing, !=, NULL);
3904 	ASSERT3P(msp->ms_trim, !=, NULL);
3905 
3906 	/*
3907 	 * Normally, we don't want to process a metaslab if there are no
3908 	 * allocations or frees to perform. However, if the metaslab is being
3909 	 * forced to condense, it's loaded and we're not beyond the final
3910 	 * dirty txg, we need to let it through. Not condensing beyond the
3911 	 * final dirty txg prevents an issue where metaslabs that need to be
3912 	 * condensed but were loaded for other reasons could cause a panic
3913 	 * here. By only checking the txg in that branch of the conditional,
3914 	 * we preserve the utility of the VERIFY statements in all other
3915 	 * cases.
3916 	 */
3917 	if (range_tree_is_empty(alloctree) &&
3918 	    range_tree_is_empty(msp->ms_freeing) &&
3919 	    range_tree_is_empty(msp->ms_checkpointing) &&
3920 	    !(msp->ms_loaded && msp->ms_condense_wanted &&
3921 	    txg <= spa_final_dirty_txg(spa)))
3922 		return;
3923 
3924 
3925 	VERIFY3U(txg, <=, spa_final_dirty_txg(spa));
3926 
3927 	/*
3928 	 * The only state that can actually be changing concurrently
3929 	 * with metaslab_sync() is the metaslab's ms_allocatable. No
3930 	 * other thread can be modifying this txg's alloc, freeing,
3931 	 * freed, or space_map_phys_t.  We drop ms_lock whenever we
3932 	 * could call into the DMU, because the DMU can call down to
3933 	 * us (e.g. via zio_free()) at any time.
3934 	 *
3935 	 * The spa_vdev_remove_thread() can be reading metaslab state
3936 	 * concurrently, and it is locked out by the ms_sync_lock.
3937 	 * Note that the ms_lock is insufficient for this, because it
3938 	 * is dropped by space_map_write().
3939 	 */
3940 	tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
3941 
3942 	/*
3943 	 * Generate a log space map if one doesn't exist already.
3944 	 */
3945 	spa_generate_syncing_log_sm(spa, tx);
3946 
3947 	if (msp->ms_sm == NULL) {
3948 		uint64_t new_object = space_map_alloc(mos,
3949 		    spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
3950 		    zfs_metaslab_sm_blksz_with_log :
3951 		    zfs_metaslab_sm_blksz_no_log, tx);
3952 		VERIFY3U(new_object, !=, 0);
3953 
3954 		dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
3955 		    msp->ms_id, sizeof (uint64_t), &new_object, tx);
3956 
3957 		VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
3958 		    msp->ms_start, msp->ms_size, vd->vdev_ashift));
3959 		ASSERT(msp->ms_sm != NULL);
3960 
3961 		ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3962 		ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3963 		ASSERT0(metaslab_allocated_space(msp));
3964 	}
3965 
3966 	if (metaslab_unflushed_txg(msp) == 0 &&
3967 	    spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
3968 		ASSERT(spa_syncing_log_sm(spa) != NULL);
3969 
3970 		metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3971 		spa_log_sm_increment_current_mscount(spa);
3972 		spa_log_summary_add_flushed_metaslab(spa);
3973 
3974 		ASSERT(msp->ms_sm != NULL);
3975 		mutex_enter(&spa->spa_flushed_ms_lock);
3976 		avl_add(&spa->spa_metaslabs_by_flushed, msp);
3977 		mutex_exit(&spa->spa_flushed_ms_lock);
3978 
3979 		ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3980 		ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3981 	}
3982 
3983 	if (!range_tree_is_empty(msp->ms_checkpointing) &&
3984 	    vd->vdev_checkpoint_sm == NULL) {
3985 		ASSERT(spa_has_checkpoint(spa));
3986 
3987 		uint64_t new_object = space_map_alloc(mos,
3988 		    zfs_vdev_standard_sm_blksz, tx);
3989 		VERIFY3U(new_object, !=, 0);
3990 
3991 		VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
3992 		    mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
3993 		ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
3994 
3995 		/*
3996 		 * We save the space map object as an entry in vdev_top_zap
3997 		 * so it can be retrieved when the pool is reopened after an
3998 		 * export or through zdb.
3999 		 */
4000 		VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
4001 		    vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
4002 		    sizeof (new_object), 1, &new_object, tx));
4003 	}
4004 
4005 	mutex_enter(&msp->ms_sync_lock);
4006 	mutex_enter(&msp->ms_lock);
4007 
4008 	/*
4009 	 * Note: metaslab_condense() clears the space map's histogram.
4010 	 * Therefore we must verify and remove this histogram before
4011 	 * condensing.
4012 	 */
4013 	metaslab_group_histogram_verify(mg);
4014 	metaslab_class_histogram_verify(mg->mg_class);
4015 	metaslab_group_histogram_remove(mg, msp);
4016 
4017 	if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
4018 	    metaslab_should_condense(msp))
4019 		metaslab_condense(msp, tx);
4020 
4021 	/*
4022 	 * We'll be going to disk to sync our space accounting, thus we
4023 	 * drop the ms_lock during that time so allocations coming from
4024 	 * open-context (ZIL) for future TXGs do not block.
4025 	 */
4026 	mutex_exit(&msp->ms_lock);
4027 	space_map_t *log_sm = spa_syncing_log_sm(spa);
4028 	if (log_sm != NULL) {
4029 		ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
4030 
4031 		space_map_write(log_sm, alloctree, SM_ALLOC,
4032 		    vd->vdev_id, tx);
4033 		space_map_write(log_sm, msp->ms_freeing, SM_FREE,
4034 		    vd->vdev_id, tx);
4035 		mutex_enter(&msp->ms_lock);
4036 
4037 		ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
4038 		    metaslab_unflushed_changes_memused(msp));
4039 		spa->spa_unflushed_stats.sus_memused -=
4040 		    metaslab_unflushed_changes_memused(msp);
4041 		range_tree_remove_xor_add(alloctree,
4042 		    msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
4043 		range_tree_remove_xor_add(msp->ms_freeing,
4044 		    msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
4045 		spa->spa_unflushed_stats.sus_memused +=
4046 		    metaslab_unflushed_changes_memused(msp);
4047 	} else {
4048 		ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
4049 
4050 		space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
4051 		    SM_NO_VDEVID, tx);
4052 		space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
4053 		    SM_NO_VDEVID, tx);
4054 		mutex_enter(&msp->ms_lock);
4055 	}
4056 
4057 	msp->ms_allocated_space += range_tree_space(alloctree);
4058 	ASSERT3U(msp->ms_allocated_space, >=,
4059 	    range_tree_space(msp->ms_freeing));
4060 	msp->ms_allocated_space -= range_tree_space(msp->ms_freeing);
4061 
4062 	if (!range_tree_is_empty(msp->ms_checkpointing)) {
4063 		ASSERT(spa_has_checkpoint(spa));
4064 		ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
4065 
4066 		/*
4067 		 * Since we are doing writes to disk and the ms_checkpointing
4068 		 * tree won't be changing during that time, we drop the
4069 		 * ms_lock while writing to the checkpoint space map, for the
4070 		 * same reason mentioned above.
4071 		 */
4072 		mutex_exit(&msp->ms_lock);
4073 		space_map_write(vd->vdev_checkpoint_sm,
4074 		    msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
4075 		mutex_enter(&msp->ms_lock);
4076 
4077 		spa->spa_checkpoint_info.sci_dspace +=
4078 		    range_tree_space(msp->ms_checkpointing);
4079 		vd->vdev_stat.vs_checkpoint_space +=
4080 		    range_tree_space(msp->ms_checkpointing);
4081 		ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
4082 		    -space_map_allocated(vd->vdev_checkpoint_sm));
4083 
4084 		range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
4085 	}
4086 
4087 	if (msp->ms_loaded) {
4088 		/*
4089 		 * When the space map is loaded, we have an accurate
4090 		 * histogram in the range tree. This gives us an opportunity
4091 		 * to bring the space map's histogram up-to-date so we clear
4092 		 * it first before updating it.
4093 		 */
4094 		space_map_histogram_clear(msp->ms_sm);
4095 		space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
4096 
4097 		/*
4098 		 * Since we've cleared the histogram we need to add back
4099 		 * any free space that has already been processed, plus
4100 		 * any deferred space. This allows the on-disk histogram
4101 		 * to accurately reflect all free space even if some space
4102 		 * is not yet available for allocation (i.e. deferred).
4103 		 */
4104 		space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
4105 
4106 		/*
4107 		 * Add back any deferred free space that has not been
4108 		 * added back into the in-core free tree yet. This will
4109 		 * ensure that we don't end up with a space map histogram
4110 		 * that is completely empty unless the metaslab is fully
4111 		 * allocated.
4112 		 */
4113 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
4114 			space_map_histogram_add(msp->ms_sm,
4115 			    msp->ms_defer[t], tx);
4116 		}
4117 	}
4118 
4119 	/*
4120 	 * Always add the free space from this sync pass to the space
4121 	 * map histogram. We want to make sure that the on-disk histogram
4122 	 * accounts for all free space. If the space map is not loaded,
4123 	 * then we will lose some accuracy but will correct it the next
4124 	 * time we load the space map.
4125 	 */
4126 	space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
4127 	metaslab_aux_histograms_update(msp);
4128 
4129 	metaslab_group_histogram_add(mg, msp);
4130 	metaslab_group_histogram_verify(mg);
4131 	metaslab_class_histogram_verify(mg->mg_class);
4132 
4133 	/*
4134 	 * For sync pass 1, we avoid traversing this txg's free range tree
4135 	 * and instead will just swap the pointers for freeing and freed.
4136 	 * We can safely do this since the freed_tree is guaranteed to be
4137 	 * empty on the initial pass.
4138 	 *
4139 	 * Keep in mind that even if we are currently using a log spacemap
4140 	 * we want current frees to end up in the ms_allocatable (but not
4141 	 * get appended to the ms_sm) so their ranges can be reused as usual.
4142 	 */
4143 	if (spa_sync_pass(spa) == 1) {
4144 		range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
4145 		ASSERT0(msp->ms_allocated_this_txg);
4146 	} else {
4147 		range_tree_vacate(msp->ms_freeing,
4148 		    range_tree_add, msp->ms_freed);
4149 	}
4150 	msp->ms_allocated_this_txg += range_tree_space(alloctree);
4151 	range_tree_vacate(alloctree, NULL, NULL);
4152 
4153 	ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4154 	ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
4155 	    & TXG_MASK]));
4156 	ASSERT0(range_tree_space(msp->ms_freeing));
4157 	ASSERT0(range_tree_space(msp->ms_checkpointing));
4158 
4159 	mutex_exit(&msp->ms_lock);
4160 
4161 	/*
4162 	 * Verify that the space map object ID has been recorded in the
4163 	 * vdev_ms_array.
4164 	 */
4165 	uint64_t object;
4166 	VERIFY0(dmu_read(mos, vd->vdev_ms_array,
4167 	    msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
4168 	VERIFY3U(object, ==, space_map_object(msp->ms_sm));
4169 
4170 	mutex_exit(&msp->ms_sync_lock);
4171 	dmu_tx_commit(tx);
4172 }
4173 
4174 static void
4175 metaslab_evict(metaslab_t *msp, uint64_t txg)
4176 {
4177 	if (!msp->ms_loaded || msp->ms_disabled != 0)
4178 		return;
4179 
4180 	for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
4181 		VERIFY0(range_tree_space(
4182 		    msp->ms_allocating[(txg + t) & TXG_MASK]));
4183 	}
4184 	if (msp->ms_allocator != -1)
4185 		metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK);
4186 
4187 	if (!metaslab_debug_unload)
4188 		metaslab_unload(msp);
4189 }
4190 
4191 /*
4192  * Called after a transaction group has completely synced to mark
4193  * all of the metaslab's free space as usable.
4194  */
4195 void
4196 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
4197 {
4198 	metaslab_group_t *mg = msp->ms_group;
4199 	vdev_t *vd = mg->mg_vd;
4200 	spa_t *spa = vd->vdev_spa;
4201 	range_tree_t **defer_tree;
4202 	int64_t alloc_delta, defer_delta;
4203 	boolean_t defer_allowed = B_TRUE;
4204 
4205 	ASSERT(!vd->vdev_ishole);
4206 
4207 	mutex_enter(&msp->ms_lock);
4208 
4209 	/*
4210 	 * If this metaslab is just becoming available, initialize its
4211 	 * range trees and add its capacity to the vdev.
4212 	 */
4213 	if (msp->ms_freed == NULL) {
4214 		range_seg_type_t type;
4215 		uint64_t shift, start;
4216 		type = metaslab_calculate_range_tree_type(vd, msp, &start,
4217 		    &shift);
4218 
4219 		for (int t = 0; t < TXG_SIZE; t++) {
4220 			ASSERT(msp->ms_allocating[t] == NULL);
4221 
4222 			msp->ms_allocating[t] = range_tree_create(NULL, type,
4223 			    NULL, start, shift);
4224 		}
4225 
4226 		ASSERT3P(msp->ms_freeing, ==, NULL);
4227 		msp->ms_freeing = range_tree_create(NULL, type, NULL, start,
4228 		    shift);
4229 
4230 		ASSERT3P(msp->ms_freed, ==, NULL);
4231 		msp->ms_freed = range_tree_create(NULL, type, NULL, start,
4232 		    shift);
4233 
4234 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
4235 			ASSERT3P(msp->ms_defer[t], ==, NULL);
4236 			msp->ms_defer[t] = range_tree_create(NULL, type, NULL,
4237 			    start, shift);
4238 		}
4239 
4240 		ASSERT3P(msp->ms_checkpointing, ==, NULL);
4241 		msp->ms_checkpointing = range_tree_create(NULL, type, NULL,
4242 		    start, shift);
4243 
4244 		ASSERT3P(msp->ms_unflushed_allocs, ==, NULL);
4245 		msp->ms_unflushed_allocs = range_tree_create(NULL, type, NULL,
4246 		    start, shift);
4247 
4248 		metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
4249 		mrap->mra_bt = &msp->ms_unflushed_frees_by_size;
4250 		mrap->mra_floor_shift = metaslab_by_size_min_shift;
4251 		ASSERT3P(msp->ms_unflushed_frees, ==, NULL);
4252 		msp->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops,
4253 		    type, mrap, start, shift);
4254 
4255 		metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
4256 	}
4257 	ASSERT0(range_tree_space(msp->ms_freeing));
4258 	ASSERT0(range_tree_space(msp->ms_checkpointing));
4259 
4260 	defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
4261 
4262 	uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
4263 	    metaslab_class_get_alloc(spa_normal_class(spa));
4264 	if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
4265 		defer_allowed = B_FALSE;
4266 	}
4267 
4268 	defer_delta = 0;
4269 	alloc_delta = msp->ms_allocated_this_txg -
4270 	    range_tree_space(msp->ms_freed);
4271 
4272 	if (defer_allowed) {
4273 		defer_delta = range_tree_space(msp->ms_freed) -
4274 		    range_tree_space(*defer_tree);
4275 	} else {
4276 		defer_delta -= range_tree_space(*defer_tree);
4277 	}
4278 	metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
4279 	    defer_delta, 0);
4280 
4281 	if (spa_syncing_log_sm(spa) == NULL) {
4282 		/*
4283 		 * If there's a metaslab_load() in progress and we don't have
4284 		 * a log space map, it means that we probably wrote to the
4285 		 * metaslab's space map. If this is the case, we need to
4286 		 * make sure that we wait for the load to complete so that we
4287 		 * have a consistent view at the in-core side of the metaslab.
4288 		 */
4289 		metaslab_load_wait(msp);
4290 	} else {
4291 		ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
4292 	}
4293 
4294 	/*
4295 	 * When auto-trimming is enabled, free ranges which are added to
4296 	 * ms_allocatable are also be added to ms_trim.  The ms_trim tree is
4297 	 * periodically consumed by the vdev_autotrim_thread() which issues
4298 	 * trims for all ranges and then vacates the tree.  The ms_trim tree
4299 	 * can be discarded at any time with the sole consequence of recent
4300 	 * frees not being trimmed.
4301 	 */
4302 	if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
4303 		range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim);
4304 		if (!defer_allowed) {
4305 			range_tree_walk(msp->ms_freed, range_tree_add,
4306 			    msp->ms_trim);
4307 		}
4308 	} else {
4309 		range_tree_vacate(msp->ms_trim, NULL, NULL);
4310 	}
4311 
4312 	/*
4313 	 * Move the frees from the defer_tree back to the free
4314 	 * range tree (if it's loaded). Swap the freed_tree and
4315 	 * the defer_tree -- this is safe to do because we've
4316 	 * just emptied out the defer_tree.
4317 	 */
4318 	range_tree_vacate(*defer_tree,
4319 	    msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
4320 	if (defer_allowed) {
4321 		range_tree_swap(&msp->ms_freed, defer_tree);
4322 	} else {
4323 		range_tree_vacate(msp->ms_freed,
4324 		    msp->ms_loaded ? range_tree_add : NULL,
4325 		    msp->ms_allocatable);
4326 	}
4327 
4328 	msp->ms_synced_length = space_map_length(msp->ms_sm);
4329 
4330 	msp->ms_deferspace += defer_delta;
4331 	ASSERT3S(msp->ms_deferspace, >=, 0);
4332 	ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
4333 	if (msp->ms_deferspace != 0) {
4334 		/*
4335 		 * Keep syncing this metaslab until all deferred frees
4336 		 * are back in circulation.
4337 		 */
4338 		vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
4339 	}
4340 	metaslab_aux_histograms_update_done(msp, defer_allowed);
4341 
4342 	if (msp->ms_new) {
4343 		msp->ms_new = B_FALSE;
4344 		mutex_enter(&mg->mg_lock);
4345 		mg->mg_ms_ready++;
4346 		mutex_exit(&mg->mg_lock);
4347 	}
4348 
4349 	/*
4350 	 * Re-sort metaslab within its group now that we've adjusted
4351 	 * its allocatable space.
4352 	 */
4353 	metaslab_recalculate_weight_and_sort(msp);
4354 
4355 	ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4356 	ASSERT0(range_tree_space(msp->ms_freeing));
4357 	ASSERT0(range_tree_space(msp->ms_freed));
4358 	ASSERT0(range_tree_space(msp->ms_checkpointing));
4359 	msp->ms_allocating_total -= msp->ms_allocated_this_txg;
4360 	msp->ms_allocated_this_txg = 0;
4361 	mutex_exit(&msp->ms_lock);
4362 }
4363 
4364 void
4365 metaslab_sync_reassess(metaslab_group_t *mg)
4366 {
4367 	spa_t *spa = mg->mg_class->mc_spa;
4368 
4369 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4370 	metaslab_group_alloc_update(mg);
4371 	mg->mg_fragmentation = metaslab_group_fragmentation(mg);
4372 
4373 	/*
4374 	 * Preload the next potential metaslabs but only on active
4375 	 * metaslab groups. We can get into a state where the metaslab
4376 	 * is no longer active since we dirty metaslabs as we remove a
4377 	 * a device, thus potentially making the metaslab group eligible
4378 	 * for preloading.
4379 	 */
4380 	if (mg->mg_activation_count > 0) {
4381 		metaslab_group_preload(mg);
4382 	}
4383 	spa_config_exit(spa, SCL_ALLOC, FTAG);
4384 }
4385 
4386 /*
4387  * When writing a ditto block (i.e. more than one DVA for a given BP) on
4388  * the same vdev as an existing DVA of this BP, then try to allocate it
4389  * on a different metaslab than existing DVAs (i.e. a unique metaslab).
4390  */
4391 static boolean_t
4392 metaslab_is_unique(metaslab_t *msp, dva_t *dva)
4393 {
4394 	uint64_t dva_ms_id;
4395 
4396 	if (DVA_GET_ASIZE(dva) == 0)
4397 		return (B_TRUE);
4398 
4399 	if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
4400 		return (B_TRUE);
4401 
4402 	dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
4403 
4404 	return (msp->ms_id != dva_ms_id);
4405 }
4406 
4407 /*
4408  * ==========================================================================
4409  * Metaslab allocation tracing facility
4410  * ==========================================================================
4411  */
4412 
4413 /*
4414  * Add an allocation trace element to the allocation tracing list.
4415  */
4416 static void
4417 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
4418     metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
4419     int allocator)
4420 {
4421 	metaslab_alloc_trace_t *mat;
4422 
4423 	if (!metaslab_trace_enabled)
4424 		return;
4425 
4426 	/*
4427 	 * When the tracing list reaches its maximum we remove
4428 	 * the second element in the list before adding a new one.
4429 	 * By removing the second element we preserve the original
4430 	 * entry as a clue to what allocations steps have already been
4431 	 * performed.
4432 	 */
4433 	if (zal->zal_size == metaslab_trace_max_entries) {
4434 		metaslab_alloc_trace_t *mat_next;
4435 #ifdef ZFS_DEBUG
4436 		panic("too many entries in allocation list");
4437 #endif
4438 		METASLABSTAT_BUMP(metaslabstat_trace_over_limit);
4439 		zal->zal_size--;
4440 		mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
4441 		list_remove(&zal->zal_list, mat_next);
4442 		kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
4443 	}
4444 
4445 	mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
4446 	list_link_init(&mat->mat_list_node);
4447 	mat->mat_mg = mg;
4448 	mat->mat_msp = msp;
4449 	mat->mat_size = psize;
4450 	mat->mat_dva_id = dva_id;
4451 	mat->mat_offset = offset;
4452 	mat->mat_weight = 0;
4453 	mat->mat_allocator = allocator;
4454 
4455 	if (msp != NULL)
4456 		mat->mat_weight = msp->ms_weight;
4457 
4458 	/*
4459 	 * The list is part of the zio so locking is not required. Only
4460 	 * a single thread will perform allocations for a given zio.
4461 	 */
4462 	list_insert_tail(&zal->zal_list, mat);
4463 	zal->zal_size++;
4464 
4465 	ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
4466 }
4467 
4468 void
4469 metaslab_trace_init(zio_alloc_list_t *zal)
4470 {
4471 	list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
4472 	    offsetof(metaslab_alloc_trace_t, mat_list_node));
4473 	zal->zal_size = 0;
4474 }
4475 
4476 void
4477 metaslab_trace_fini(zio_alloc_list_t *zal)
4478 {
4479 	metaslab_alloc_trace_t *mat;
4480 
4481 	while ((mat = list_remove_head(&zal->zal_list)) != NULL)
4482 		kmem_cache_free(metaslab_alloc_trace_cache, mat);
4483 	list_destroy(&zal->zal_list);
4484 	zal->zal_size = 0;
4485 }
4486 
4487 /*
4488  * ==========================================================================
4489  * Metaslab block operations
4490  * ==========================================================================
4491  */
4492 
4493 static void
4494 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags,
4495     int allocator)
4496 {
4497 	if (!(flags & METASLAB_ASYNC_ALLOC) ||
4498 	    (flags & METASLAB_DONT_THROTTLE))
4499 		return;
4500 
4501 	metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4502 	if (!mg->mg_class->mc_alloc_throttle_enabled)
4503 		return;
4504 
4505 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4506 	(void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag);
4507 }
4508 
4509 static void
4510 metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
4511 {
4512 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4513 	metaslab_class_allocator_t *mca =
4514 	    &mg->mg_class->mc_allocator[allocator];
4515 	uint64_t max = mg->mg_max_alloc_queue_depth;
4516 	uint64_t cur = mga->mga_cur_max_alloc_queue_depth;
4517 	while (cur < max) {
4518 		if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth,
4519 		    cur, cur + 1) == cur) {
4520 			atomic_inc_64(&mca->mca_alloc_max_slots);
4521 			return;
4522 		}
4523 		cur = mga->mga_cur_max_alloc_queue_depth;
4524 	}
4525 }
4526 
4527 void
4528 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags,
4529     int allocator, boolean_t io_complete)
4530 {
4531 	if (!(flags & METASLAB_ASYNC_ALLOC) ||
4532 	    (flags & METASLAB_DONT_THROTTLE))
4533 		return;
4534 
4535 	metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4536 	if (!mg->mg_class->mc_alloc_throttle_enabled)
4537 		return;
4538 
4539 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4540 	(void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag);
4541 	if (io_complete)
4542 		metaslab_group_increment_qdepth(mg, allocator);
4543 }
4544 
4545 void
4546 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag,
4547     int allocator)
4548 {
4549 #ifdef ZFS_DEBUG
4550 	const dva_t *dva = bp->blk_dva;
4551 	int ndvas = BP_GET_NDVAS(bp);
4552 
4553 	for (int d = 0; d < ndvas; d++) {
4554 		uint64_t vdev = DVA_GET_VDEV(&dva[d]);
4555 		metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4556 		metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4557 		VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag));
4558 	}
4559 #endif
4560 }
4561 
4562 static uint64_t
4563 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
4564 {
4565 	uint64_t start;
4566 	range_tree_t *rt = msp->ms_allocatable;
4567 	metaslab_class_t *mc = msp->ms_group->mg_class;
4568 
4569 	ASSERT(MUTEX_HELD(&msp->ms_lock));
4570 	VERIFY(!msp->ms_condensing);
4571 	VERIFY0(msp->ms_disabled);
4572 
4573 	start = mc->mc_ops->msop_alloc(msp, size);
4574 	if (start != -1ULL) {
4575 		metaslab_group_t *mg = msp->ms_group;
4576 		vdev_t *vd = mg->mg_vd;
4577 
4578 		VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
4579 		VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
4580 		VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
4581 		range_tree_remove(rt, start, size);
4582 		range_tree_clear(msp->ms_trim, start, size);
4583 
4584 		if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
4585 			vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
4586 
4587 		range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
4588 		msp->ms_allocating_total += size;
4589 
4590 		/* Track the last successful allocation */
4591 		msp->ms_alloc_txg = txg;
4592 		metaslab_verify_space(msp, txg);
4593 	}
4594 
4595 	/*
4596 	 * Now that we've attempted the allocation we need to update the
4597 	 * metaslab's maximum block size since it may have changed.
4598 	 */
4599 	msp->ms_max_size = metaslab_largest_allocatable(msp);
4600 	return (start);
4601 }
4602 
4603 /*
4604  * Find the metaslab with the highest weight that is less than what we've
4605  * already tried.  In the common case, this means that we will examine each
4606  * metaslab at most once. Note that concurrent callers could reorder metaslabs
4607  * by activation/passivation once we have dropped the mg_lock. If a metaslab is
4608  * activated by another thread, and we fail to allocate from the metaslab we
4609  * have selected, we may not try the newly-activated metaslab, and instead
4610  * activate another metaslab.  This is not optimal, but generally does not cause
4611  * any problems (a possible exception being if every metaslab is completely full
4612  * except for the newly-activated metaslab which we fail to examine).
4613  */
4614 static metaslab_t *
4615 find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
4616     dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator,
4617     boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search,
4618     boolean_t *was_active)
4619 {
4620 	avl_index_t idx;
4621 	avl_tree_t *t = &mg->mg_metaslab_tree;
4622 	metaslab_t *msp = avl_find(t, search, &idx);
4623 	if (msp == NULL)
4624 		msp = avl_nearest(t, idx, AVL_AFTER);
4625 
4626 	int tries = 0;
4627 	for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
4628 		int i;
4629 
4630 		if (!try_hard && tries > zfs_metaslab_find_max_tries) {
4631 			METASLABSTAT_BUMP(metaslabstat_too_many_tries);
4632 			return (NULL);
4633 		}
4634 		tries++;
4635 
4636 		if (!metaslab_should_allocate(msp, asize, try_hard)) {
4637 			metaslab_trace_add(zal, mg, msp, asize, d,
4638 			    TRACE_TOO_SMALL, allocator);
4639 			continue;
4640 		}
4641 
4642 		/*
4643 		 * If the selected metaslab is condensing or disabled,
4644 		 * skip it.
4645 		 */
4646 		if (msp->ms_condensing || msp->ms_disabled > 0)
4647 			continue;
4648 
4649 		*was_active = msp->ms_allocator != -1;
4650 		/*
4651 		 * If we're activating as primary, this is our first allocation
4652 		 * from this disk, so we don't need to check how close we are.
4653 		 * If the metaslab under consideration was already active,
4654 		 * we're getting desperate enough to steal another allocator's
4655 		 * metaslab, so we still don't care about distances.
4656 		 */
4657 		if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
4658 			break;
4659 
4660 		for (i = 0; i < d; i++) {
4661 			if (want_unique &&
4662 			    !metaslab_is_unique(msp, &dva[i]))
4663 				break;  /* try another metaslab */
4664 		}
4665 		if (i == d)
4666 			break;
4667 	}
4668 
4669 	if (msp != NULL) {
4670 		search->ms_weight = msp->ms_weight;
4671 		search->ms_start = msp->ms_start + 1;
4672 		search->ms_allocator = msp->ms_allocator;
4673 		search->ms_primary = msp->ms_primary;
4674 	}
4675 	return (msp);
4676 }
4677 
4678 static void
4679 metaslab_active_mask_verify(metaslab_t *msp)
4680 {
4681 	ASSERT(MUTEX_HELD(&msp->ms_lock));
4682 
4683 	if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
4684 		return;
4685 
4686 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0)
4687 		return;
4688 
4689 	if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) {
4690 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4691 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4692 		VERIFY3S(msp->ms_allocator, !=, -1);
4693 		VERIFY(msp->ms_primary);
4694 		return;
4695 	}
4696 
4697 	if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) {
4698 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4699 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4700 		VERIFY3S(msp->ms_allocator, !=, -1);
4701 		VERIFY(!msp->ms_primary);
4702 		return;
4703 	}
4704 
4705 	if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
4706 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4707 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4708 		VERIFY3S(msp->ms_allocator, ==, -1);
4709 		return;
4710 	}
4711 }
4712 
4713 /* ARGSUSED */
4714 static uint64_t
4715 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
4716     uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
4717     int allocator, boolean_t try_hard)
4718 {
4719 	metaslab_t *msp = NULL;
4720 	uint64_t offset = -1ULL;
4721 
4722 	uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY;
4723 	for (int i = 0; i < d; i++) {
4724 		if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4725 		    DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4726 			activation_weight = METASLAB_WEIGHT_SECONDARY;
4727 		} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4728 		    DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4729 			activation_weight = METASLAB_WEIGHT_CLAIM;
4730 			break;
4731 		}
4732 	}
4733 
4734 	/*
4735 	 * If we don't have enough metaslabs active to fill the entire array, we
4736 	 * just use the 0th slot.
4737 	 */
4738 	if (mg->mg_ms_ready < mg->mg_allocators * 3)
4739 		allocator = 0;
4740 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4741 
4742 	ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
4743 
4744 	metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
4745 	search->ms_weight = UINT64_MAX;
4746 	search->ms_start = 0;
4747 	/*
4748 	 * At the end of the metaslab tree are the already-active metaslabs,
4749 	 * first the primaries, then the secondaries. When we resume searching
4750 	 * through the tree, we need to consider ms_allocator and ms_primary so
4751 	 * we start in the location right after where we left off, and don't
4752 	 * accidentally loop forever considering the same metaslabs.
4753 	 */
4754 	search->ms_allocator = -1;
4755 	search->ms_primary = B_TRUE;
4756 	for (;;) {
4757 		boolean_t was_active = B_FALSE;
4758 
4759 		mutex_enter(&mg->mg_lock);
4760 
4761 		if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4762 		    mga->mga_primary != NULL) {
4763 			msp = mga->mga_primary;
4764 
4765 			/*
4766 			 * Even though we don't hold the ms_lock for the
4767 			 * primary metaslab, those fields should not
4768 			 * change while we hold the mg_lock. Thus it is
4769 			 * safe to make assertions on them.
4770 			 */
4771 			ASSERT(msp->ms_primary);
4772 			ASSERT3S(msp->ms_allocator, ==, allocator);
4773 			ASSERT(msp->ms_loaded);
4774 
4775 			was_active = B_TRUE;
4776 			ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
4777 		} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4778 		    mga->mga_secondary != NULL) {
4779 			msp = mga->mga_secondary;
4780 
4781 			/*
4782 			 * See comment above about the similar assertions
4783 			 * for the primary metaslab.
4784 			 */
4785 			ASSERT(!msp->ms_primary);
4786 			ASSERT3S(msp->ms_allocator, ==, allocator);
4787 			ASSERT(msp->ms_loaded);
4788 
4789 			was_active = B_TRUE;
4790 			ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
4791 		} else {
4792 			msp = find_valid_metaslab(mg, activation_weight, dva, d,
4793 			    want_unique, asize, allocator, try_hard, zal,
4794 			    search, &was_active);
4795 		}
4796 
4797 		mutex_exit(&mg->mg_lock);
4798 		if (msp == NULL) {
4799 			kmem_free(search, sizeof (*search));
4800 			return (-1ULL);
4801 		}
4802 		mutex_enter(&msp->ms_lock);
4803 
4804 		metaslab_active_mask_verify(msp);
4805 
4806 		/*
4807 		 * This code is disabled out because of issues with
4808 		 * tracepoints in non-gpl kernel modules.
4809 		 */
4810 #if 0
4811 		DTRACE_PROBE3(ms__activation__attempt,
4812 		    metaslab_t *, msp, uint64_t, activation_weight,
4813 		    boolean_t, was_active);
4814 #endif
4815 
4816 		/*
4817 		 * Ensure that the metaslab we have selected is still
4818 		 * capable of handling our request. It's possible that
4819 		 * another thread may have changed the weight while we
4820 		 * were blocked on the metaslab lock. We check the
4821 		 * active status first to see if we need to set_selected_txg
4822 		 * a new metaslab.
4823 		 */
4824 		if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
4825 			ASSERT3S(msp->ms_allocator, ==, -1);
4826 			mutex_exit(&msp->ms_lock);
4827 			continue;
4828 		}
4829 
4830 		/*
4831 		 * If the metaslab was activated for another allocator
4832 		 * while we were waiting in the ms_lock above, or it's
4833 		 * a primary and we're seeking a secondary (or vice versa),
4834 		 * we go back and select a new metaslab.
4835 		 */
4836 		if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
4837 		    (msp->ms_allocator != -1) &&
4838 		    (msp->ms_allocator != allocator || ((activation_weight ==
4839 		    METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
4840 			ASSERT(msp->ms_loaded);
4841 			ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) ||
4842 			    msp->ms_allocator != -1);
4843 			mutex_exit(&msp->ms_lock);
4844 			continue;
4845 		}
4846 
4847 		/*
4848 		 * This metaslab was used for claiming regions allocated
4849 		 * by the ZIL during pool import. Once these regions are
4850 		 * claimed we don't need to keep the CLAIM bit set
4851 		 * anymore. Passivate this metaslab to zero its activation
4852 		 * mask.
4853 		 */
4854 		if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
4855 		    activation_weight != METASLAB_WEIGHT_CLAIM) {
4856 			ASSERT(msp->ms_loaded);
4857 			ASSERT3S(msp->ms_allocator, ==, -1);
4858 			metaslab_passivate(msp, msp->ms_weight &
4859 			    ~METASLAB_WEIGHT_CLAIM);
4860 			mutex_exit(&msp->ms_lock);
4861 			continue;
4862 		}
4863 
4864 		metaslab_set_selected_txg(msp, txg);
4865 
4866 		int activation_error =
4867 		    metaslab_activate(msp, allocator, activation_weight);
4868 		metaslab_active_mask_verify(msp);
4869 
4870 		/*
4871 		 * If the metaslab was activated by another thread for
4872 		 * another allocator or activation_weight (EBUSY), or it
4873 		 * failed because another metaslab was assigned as primary
4874 		 * for this allocator (EEXIST) we continue using this
4875 		 * metaslab for our allocation, rather than going on to a
4876 		 * worse metaslab (we waited for that metaslab to be loaded
4877 		 * after all).
4878 		 *
4879 		 * If the activation failed due to an I/O error or ENOSPC we
4880 		 * skip to the next metaslab.
4881 		 */
4882 		boolean_t activated;
4883 		if (activation_error == 0) {
4884 			activated = B_TRUE;
4885 		} else if (activation_error == EBUSY ||
4886 		    activation_error == EEXIST) {
4887 			activated = B_FALSE;
4888 		} else {
4889 			mutex_exit(&msp->ms_lock);
4890 			continue;
4891 		}
4892 		ASSERT(msp->ms_loaded);
4893 
4894 		/*
4895 		 * Now that we have the lock, recheck to see if we should
4896 		 * continue to use this metaslab for this allocation. The
4897 		 * the metaslab is now loaded so metaslab_should_allocate()
4898 		 * can accurately determine if the allocation attempt should
4899 		 * proceed.
4900 		 */
4901 		if (!metaslab_should_allocate(msp, asize, try_hard)) {
4902 			/* Passivate this metaslab and select a new one. */
4903 			metaslab_trace_add(zal, mg, msp, asize, d,
4904 			    TRACE_TOO_SMALL, allocator);
4905 			goto next;
4906 		}
4907 
4908 		/*
4909 		 * If this metaslab is currently condensing then pick again
4910 		 * as we can't manipulate this metaslab until it's committed
4911 		 * to disk. If this metaslab is being initialized, we shouldn't
4912 		 * allocate from it since the allocated region might be
4913 		 * overwritten after allocation.
4914 		 */
4915 		if (msp->ms_condensing) {
4916 			metaslab_trace_add(zal, mg, msp, asize, d,
4917 			    TRACE_CONDENSING, allocator);
4918 			if (activated) {
4919 				metaslab_passivate(msp, msp->ms_weight &
4920 				    ~METASLAB_ACTIVE_MASK);
4921 			}
4922 			mutex_exit(&msp->ms_lock);
4923 			continue;
4924 		} else if (msp->ms_disabled > 0) {
4925 			metaslab_trace_add(zal, mg, msp, asize, d,
4926 			    TRACE_DISABLED, allocator);
4927 			if (activated) {
4928 				metaslab_passivate(msp, msp->ms_weight &
4929 				    ~METASLAB_ACTIVE_MASK);
4930 			}
4931 			mutex_exit(&msp->ms_lock);
4932 			continue;
4933 		}
4934 
4935 		offset = metaslab_block_alloc(msp, asize, txg);
4936 		metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
4937 
4938 		if (offset != -1ULL) {
4939 			/* Proactively passivate the metaslab, if needed */
4940 			if (activated)
4941 				metaslab_segment_may_passivate(msp);
4942 			break;
4943 		}
4944 next:
4945 		ASSERT(msp->ms_loaded);
4946 
4947 		/*
4948 		 * This code is disabled out because of issues with
4949 		 * tracepoints in non-gpl kernel modules.
4950 		 */
4951 #if 0
4952 		DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp,
4953 		    uint64_t, asize);
4954 #endif
4955 
4956 		/*
4957 		 * We were unable to allocate from this metaslab so determine
4958 		 * a new weight for this metaslab. Now that we have loaded
4959 		 * the metaslab we can provide a better hint to the metaslab
4960 		 * selector.
4961 		 *
4962 		 * For space-based metaslabs, we use the maximum block size.
4963 		 * This information is only available when the metaslab
4964 		 * is loaded and is more accurate than the generic free
4965 		 * space weight that was calculated by metaslab_weight().
4966 		 * This information allows us to quickly compare the maximum
4967 		 * available allocation in the metaslab to the allocation
4968 		 * size being requested.
4969 		 *
4970 		 * For segment-based metaslabs, determine the new weight
4971 		 * based on the highest bucket in the range tree. We
4972 		 * explicitly use the loaded segment weight (i.e. the range
4973 		 * tree histogram) since it contains the space that is
4974 		 * currently available for allocation and is accurate
4975 		 * even within a sync pass.
4976 		 */
4977 		uint64_t weight;
4978 		if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
4979 			weight = metaslab_largest_allocatable(msp);
4980 			WEIGHT_SET_SPACEBASED(weight);
4981 		} else {
4982 			weight = metaslab_weight_from_range_tree(msp);
4983 		}
4984 
4985 		if (activated) {
4986 			metaslab_passivate(msp, weight);
4987 		} else {
4988 			/*
4989 			 * For the case where we use the metaslab that is
4990 			 * active for another allocator we want to make
4991 			 * sure that we retain the activation mask.
4992 			 *
4993 			 * Note that we could attempt to use something like
4994 			 * metaslab_recalculate_weight_and_sort() that
4995 			 * retains the activation mask here. That function
4996 			 * uses metaslab_weight() to set the weight though
4997 			 * which is not as accurate as the calculations
4998 			 * above.
4999 			 */
5000 			weight |= msp->ms_weight & METASLAB_ACTIVE_MASK;
5001 			metaslab_group_sort(mg, msp, weight);
5002 		}
5003 		metaslab_active_mask_verify(msp);
5004 
5005 		/*
5006 		 * We have just failed an allocation attempt, check
5007 		 * that metaslab_should_allocate() agrees. Otherwise,
5008 		 * we may end up in an infinite loop retrying the same
5009 		 * metaslab.
5010 		 */
5011 		ASSERT(!metaslab_should_allocate(msp, asize, try_hard));
5012 
5013 		mutex_exit(&msp->ms_lock);
5014 	}
5015 	mutex_exit(&msp->ms_lock);
5016 	kmem_free(search, sizeof (*search));
5017 	return (offset);
5018 }
5019 
5020 static uint64_t
5021 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
5022     uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
5023     int allocator, boolean_t try_hard)
5024 {
5025 	uint64_t offset;
5026 	ASSERT(mg->mg_initialized);
5027 
5028 	offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
5029 	    dva, d, allocator, try_hard);
5030 
5031 	mutex_enter(&mg->mg_lock);
5032 	if (offset == -1ULL) {
5033 		mg->mg_failed_allocations++;
5034 		metaslab_trace_add(zal, mg, NULL, asize, d,
5035 		    TRACE_GROUP_FAILURE, allocator);
5036 		if (asize == SPA_GANGBLOCKSIZE) {
5037 			/*
5038 			 * This metaslab group was unable to allocate
5039 			 * the minimum gang block size so it must be out of
5040 			 * space. We must notify the allocation throttle
5041 			 * to start skipping allocation attempts to this
5042 			 * metaslab group until more space becomes available.
5043 			 * Note: this failure cannot be caused by the
5044 			 * allocation throttle since the allocation throttle
5045 			 * is only responsible for skipping devices and
5046 			 * not failing block allocations.
5047 			 */
5048 			mg->mg_no_free_space = B_TRUE;
5049 		}
5050 	}
5051 	mg->mg_allocations++;
5052 	mutex_exit(&mg->mg_lock);
5053 	return (offset);
5054 }
5055 
5056 /*
5057  * Allocate a block for the specified i/o.
5058  */
5059 int
5060 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
5061     dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
5062     zio_alloc_list_t *zal, int allocator)
5063 {
5064 	metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5065 	metaslab_group_t *mg, *fast_mg, *rotor;
5066 	vdev_t *vd;
5067 	boolean_t try_hard = B_FALSE;
5068 
5069 	ASSERT(!DVA_IS_VALID(&dva[d]));
5070 
5071 	/*
5072 	 * For testing, make some blocks above a certain size be gang blocks.
5073 	 * This will result in more split blocks when using device removal,
5074 	 * and a large number of split blocks coupled with ztest-induced
5075 	 * damage can result in extremely long reconstruction times.  This
5076 	 * will also test spilling from special to normal.
5077 	 */
5078 	if (psize >= metaslab_force_ganging && (spa_get_random(100) < 3)) {
5079 		metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
5080 		    allocator);
5081 		return (SET_ERROR(ENOSPC));
5082 	}
5083 
5084 	/*
5085 	 * Start at the rotor and loop through all mgs until we find something.
5086 	 * Note that there's no locking on mca_rotor or mca_aliquot because
5087 	 * nothing actually breaks if we miss a few updates -- we just won't
5088 	 * allocate quite as evenly.  It all balances out over time.
5089 	 *
5090 	 * If we are doing ditto or log blocks, try to spread them across
5091 	 * consecutive vdevs.  If we're forced to reuse a vdev before we've
5092 	 * allocated all of our ditto blocks, then try and spread them out on
5093 	 * that vdev as much as possible.  If it turns out to not be possible,
5094 	 * gradually lower our standards until anything becomes acceptable.
5095 	 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
5096 	 * gives us hope of containing our fault domains to something we're
5097 	 * able to reason about.  Otherwise, any two top-level vdev failures
5098 	 * will guarantee the loss of data.  With consecutive allocation,
5099 	 * only two adjacent top-level vdev failures will result in data loss.
5100 	 *
5101 	 * If we are doing gang blocks (hintdva is non-NULL), try to keep
5102 	 * ourselves on the same vdev as our gang block header.  That
5103 	 * way, we can hope for locality in vdev_cache, plus it makes our
5104 	 * fault domains something tractable.
5105 	 */
5106 	if (hintdva) {
5107 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
5108 
5109 		/*
5110 		 * It's possible the vdev we're using as the hint no
5111 		 * longer exists or its mg has been closed (e.g. by
5112 		 * device removal).  Consult the rotor when
5113 		 * all else fails.
5114 		 */
5115 		if (vd != NULL && vd->vdev_mg != NULL) {
5116 			mg = vd->vdev_mg;
5117 
5118 			if (flags & METASLAB_HINTBP_AVOID &&
5119 			    mg->mg_next != NULL)
5120 				mg = mg->mg_next;
5121 		} else {
5122 			mg = mca->mca_rotor;
5123 		}
5124 	} else if (d != 0) {
5125 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
5126 		mg = vd->vdev_mg->mg_next;
5127 	} else if (flags & METASLAB_FASTWRITE) {
5128 		mg = fast_mg = mca->mca_rotor;
5129 
5130 		do {
5131 			if (fast_mg->mg_vd->vdev_pending_fastwrite <
5132 			    mg->mg_vd->vdev_pending_fastwrite)
5133 				mg = fast_mg;
5134 		} while ((fast_mg = fast_mg->mg_next) != mca->mca_rotor);
5135 
5136 	} else {
5137 		ASSERT(mca->mca_rotor != NULL);
5138 		mg = mca->mca_rotor;
5139 	}
5140 
5141 	/*
5142 	 * If the hint put us into the wrong metaslab class, or into a
5143 	 * metaslab group that has been passivated, just follow the rotor.
5144 	 */
5145 	if (mg->mg_class != mc || mg->mg_activation_count <= 0)
5146 		mg = mca->mca_rotor;
5147 
5148 	rotor = mg;
5149 top:
5150 	do {
5151 		boolean_t allocatable;
5152 
5153 		ASSERT(mg->mg_activation_count == 1);
5154 		vd = mg->mg_vd;
5155 
5156 		/*
5157 		 * Don't allocate from faulted devices.
5158 		 */
5159 		if (try_hard) {
5160 			spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
5161 			allocatable = vdev_allocatable(vd);
5162 			spa_config_exit(spa, SCL_ZIO, FTAG);
5163 		} else {
5164 			allocatable = vdev_allocatable(vd);
5165 		}
5166 
5167 		/*
5168 		 * Determine if the selected metaslab group is eligible
5169 		 * for allocations. If we're ganging then don't allow
5170 		 * this metaslab group to skip allocations since that would
5171 		 * inadvertently return ENOSPC and suspend the pool
5172 		 * even though space is still available.
5173 		 */
5174 		if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
5175 			allocatable = metaslab_group_allocatable(mg, rotor,
5176 			    psize, allocator, d);
5177 		}
5178 
5179 		if (!allocatable) {
5180 			metaslab_trace_add(zal, mg, NULL, psize, d,
5181 			    TRACE_NOT_ALLOCATABLE, allocator);
5182 			goto next;
5183 		}
5184 
5185 		ASSERT(mg->mg_initialized);
5186 
5187 		/*
5188 		 * Avoid writing single-copy data to a failing,
5189 		 * non-redundant vdev, unless we've already tried all
5190 		 * other vdevs.
5191 		 */
5192 		if ((vd->vdev_stat.vs_write_errors > 0 ||
5193 		    vd->vdev_state < VDEV_STATE_HEALTHY) &&
5194 		    d == 0 && !try_hard && vd->vdev_children == 0) {
5195 			metaslab_trace_add(zal, mg, NULL, psize, d,
5196 			    TRACE_VDEV_ERROR, allocator);
5197 			goto next;
5198 		}
5199 
5200 		ASSERT(mg->mg_class == mc);
5201 
5202 		uint64_t asize = vdev_psize_to_asize(vd, psize);
5203 		ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
5204 
5205 		/*
5206 		 * If we don't need to try hard, then require that the
5207 		 * block be on a different metaslab from any other DVAs
5208 		 * in this BP (unique=true).  If we are trying hard, then
5209 		 * allow any metaslab to be used (unique=false).
5210 		 */
5211 		uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
5212 		    !try_hard, dva, d, allocator, try_hard);
5213 
5214 		if (offset != -1ULL) {
5215 			/*
5216 			 * If we've just selected this metaslab group,
5217 			 * figure out whether the corresponding vdev is
5218 			 * over- or under-used relative to the pool,
5219 			 * and set an allocation bias to even it out.
5220 			 *
5221 			 * Bias is also used to compensate for unequally
5222 			 * sized vdevs so that space is allocated fairly.
5223 			 */
5224 			if (mca->mca_aliquot == 0 && metaslab_bias_enabled) {
5225 				vdev_stat_t *vs = &vd->vdev_stat;
5226 				int64_t vs_free = vs->vs_space - vs->vs_alloc;
5227 				int64_t mc_free = mc->mc_space - mc->mc_alloc;
5228 				int64_t ratio;
5229 
5230 				/*
5231 				 * Calculate how much more or less we should
5232 				 * try to allocate from this device during
5233 				 * this iteration around the rotor.
5234 				 *
5235 				 * This basically introduces a zero-centered
5236 				 * bias towards the devices with the most
5237 				 * free space, while compensating for vdev
5238 				 * size differences.
5239 				 *
5240 				 * Examples:
5241 				 *  vdev V1 = 16M/128M
5242 				 *  vdev V2 = 16M/128M
5243 				 *  ratio(V1) = 100% ratio(V2) = 100%
5244 				 *
5245 				 *  vdev V1 = 16M/128M
5246 				 *  vdev V2 = 64M/128M
5247 				 *  ratio(V1) = 127% ratio(V2) =  72%
5248 				 *
5249 				 *  vdev V1 = 16M/128M
5250 				 *  vdev V2 = 64M/512M
5251 				 *  ratio(V1) =  40% ratio(V2) = 160%
5252 				 */
5253 				ratio = (vs_free * mc->mc_alloc_groups * 100) /
5254 				    (mc_free + 1);
5255 				mg->mg_bias = ((ratio - 100) *
5256 				    (int64_t)mg->mg_aliquot) / 100;
5257 			} else if (!metaslab_bias_enabled) {
5258 				mg->mg_bias = 0;
5259 			}
5260 
5261 			if ((flags & METASLAB_FASTWRITE) ||
5262 			    atomic_add_64_nv(&mca->mca_aliquot, asize) >=
5263 			    mg->mg_aliquot + mg->mg_bias) {
5264 				mca->mca_rotor = mg->mg_next;
5265 				mca->mca_aliquot = 0;
5266 			}
5267 
5268 			DVA_SET_VDEV(&dva[d], vd->vdev_id);
5269 			DVA_SET_OFFSET(&dva[d], offset);
5270 			DVA_SET_GANG(&dva[d],
5271 			    ((flags & METASLAB_GANG_HEADER) ? 1 : 0));
5272 			DVA_SET_ASIZE(&dva[d], asize);
5273 
5274 			if (flags & METASLAB_FASTWRITE) {
5275 				atomic_add_64(&vd->vdev_pending_fastwrite,
5276 				    psize);
5277 			}
5278 
5279 			return (0);
5280 		}
5281 next:
5282 		mca->mca_rotor = mg->mg_next;
5283 		mca->mca_aliquot = 0;
5284 	} while ((mg = mg->mg_next) != rotor);
5285 
5286 	/*
5287 	 * If we haven't tried hard, perhaps do so now.
5288 	 */
5289 	if (!try_hard && (zfs_metaslab_try_hard_before_gang ||
5290 	    GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 ||
5291 	    psize <= 1 << spa->spa_min_ashift)) {
5292 		METASLABSTAT_BUMP(metaslabstat_try_hard);
5293 		try_hard = B_TRUE;
5294 		goto top;
5295 	}
5296 
5297 	bzero(&dva[d], sizeof (dva_t));
5298 
5299 	metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
5300 	return (SET_ERROR(ENOSPC));
5301 }
5302 
5303 void
5304 metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
5305     boolean_t checkpoint)
5306 {
5307 	metaslab_t *msp;
5308 	spa_t *spa = vd->vdev_spa;
5309 
5310 	ASSERT(vdev_is_concrete(vd));
5311 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5312 	ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
5313 
5314 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5315 
5316 	VERIFY(!msp->ms_condensing);
5317 	VERIFY3U(offset, >=, msp->ms_start);
5318 	VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
5319 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5320 	VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
5321 
5322 	metaslab_check_free_impl(vd, offset, asize);
5323 
5324 	mutex_enter(&msp->ms_lock);
5325 	if (range_tree_is_empty(msp->ms_freeing) &&
5326 	    range_tree_is_empty(msp->ms_checkpointing)) {
5327 		vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
5328 	}
5329 
5330 	if (checkpoint) {
5331 		ASSERT(spa_has_checkpoint(spa));
5332 		range_tree_add(msp->ms_checkpointing, offset, asize);
5333 	} else {
5334 		range_tree_add(msp->ms_freeing, offset, asize);
5335 	}
5336 	mutex_exit(&msp->ms_lock);
5337 }
5338 
5339 /* ARGSUSED */
5340 void
5341 metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5342     uint64_t size, void *arg)
5343 {
5344 	boolean_t *checkpoint = arg;
5345 
5346 	ASSERT3P(checkpoint, !=, NULL);
5347 
5348 	if (vd->vdev_ops->vdev_op_remap != NULL)
5349 		vdev_indirect_mark_obsolete(vd, offset, size);
5350 	else
5351 		metaslab_free_impl(vd, offset, size, *checkpoint);
5352 }
5353 
5354 static void
5355 metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
5356     boolean_t checkpoint)
5357 {
5358 	spa_t *spa = vd->vdev_spa;
5359 
5360 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5361 
5362 	if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
5363 		return;
5364 
5365 	if (spa->spa_vdev_removal != NULL &&
5366 	    spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
5367 	    vdev_is_concrete(vd)) {
5368 		/*
5369 		 * Note: we check if the vdev is concrete because when
5370 		 * we complete the removal, we first change the vdev to be
5371 		 * an indirect vdev (in open context), and then (in syncing
5372 		 * context) clear spa_vdev_removal.
5373 		 */
5374 		free_from_removing_vdev(vd, offset, size);
5375 	} else if (vd->vdev_ops->vdev_op_remap != NULL) {
5376 		vdev_indirect_mark_obsolete(vd, offset, size);
5377 		vd->vdev_ops->vdev_op_remap(vd, offset, size,
5378 		    metaslab_free_impl_cb, &checkpoint);
5379 	} else {
5380 		metaslab_free_concrete(vd, offset, size, checkpoint);
5381 	}
5382 }
5383 
5384 typedef struct remap_blkptr_cb_arg {
5385 	blkptr_t *rbca_bp;
5386 	spa_remap_cb_t rbca_cb;
5387 	vdev_t *rbca_remap_vd;
5388 	uint64_t rbca_remap_offset;
5389 	void *rbca_cb_arg;
5390 } remap_blkptr_cb_arg_t;
5391 
5392 static void
5393 remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5394     uint64_t size, void *arg)
5395 {
5396 	remap_blkptr_cb_arg_t *rbca = arg;
5397 	blkptr_t *bp = rbca->rbca_bp;
5398 
5399 	/* We can not remap split blocks. */
5400 	if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
5401 		return;
5402 	ASSERT0(inner_offset);
5403 
5404 	if (rbca->rbca_cb != NULL) {
5405 		/*
5406 		 * At this point we know that we are not handling split
5407 		 * blocks and we invoke the callback on the previous
5408 		 * vdev which must be indirect.
5409 		 */
5410 		ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
5411 
5412 		rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
5413 		    rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
5414 
5415 		/* set up remap_blkptr_cb_arg for the next call */
5416 		rbca->rbca_remap_vd = vd;
5417 		rbca->rbca_remap_offset = offset;
5418 	}
5419 
5420 	/*
5421 	 * The phys birth time is that of dva[0].  This ensures that we know
5422 	 * when each dva was written, so that resilver can determine which
5423 	 * blocks need to be scrubbed (i.e. those written during the time
5424 	 * the vdev was offline).  It also ensures that the key used in
5425 	 * the ARC hash table is unique (i.e. dva[0] + phys_birth).  If
5426 	 * we didn't change the phys_birth, a lookup in the ARC for a
5427 	 * remapped BP could find the data that was previously stored at
5428 	 * this vdev + offset.
5429 	 */
5430 	vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
5431 	    DVA_GET_VDEV(&bp->blk_dva[0]));
5432 	vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
5433 	bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
5434 	    DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
5435 
5436 	DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
5437 	DVA_SET_OFFSET(&bp->blk_dva[0], offset);
5438 }
5439 
5440 /*
5441  * If the block pointer contains any indirect DVAs, modify them to refer to
5442  * concrete DVAs.  Note that this will sometimes not be possible, leaving
5443  * the indirect DVA in place.  This happens if the indirect DVA spans multiple
5444  * segments in the mapping (i.e. it is a "split block").
5445  *
5446  * If the BP was remapped, calls the callback on the original dva (note the
5447  * callback can be called multiple times if the original indirect DVA refers
5448  * to another indirect DVA, etc).
5449  *
5450  * Returns TRUE if the BP was remapped.
5451  */
5452 boolean_t
5453 spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
5454 {
5455 	remap_blkptr_cb_arg_t rbca;
5456 
5457 	if (!zfs_remap_blkptr_enable)
5458 		return (B_FALSE);
5459 
5460 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
5461 		return (B_FALSE);
5462 
5463 	/*
5464 	 * Dedup BP's can not be remapped, because ddt_phys_select() depends
5465 	 * on DVA[0] being the same in the BP as in the DDT (dedup table).
5466 	 */
5467 	if (BP_GET_DEDUP(bp))
5468 		return (B_FALSE);
5469 
5470 	/*
5471 	 * Gang blocks can not be remapped, because
5472 	 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
5473 	 * the BP used to read the gang block header (GBH) being the same
5474 	 * as the DVA[0] that we allocated for the GBH.
5475 	 */
5476 	if (BP_IS_GANG(bp))
5477 		return (B_FALSE);
5478 
5479 	/*
5480 	 * Embedded BP's have no DVA to remap.
5481 	 */
5482 	if (BP_GET_NDVAS(bp) < 1)
5483 		return (B_FALSE);
5484 
5485 	/*
5486 	 * Note: we only remap dva[0].  If we remapped other dvas, we
5487 	 * would no longer know what their phys birth txg is.
5488 	 */
5489 	dva_t *dva = &bp->blk_dva[0];
5490 
5491 	uint64_t offset = DVA_GET_OFFSET(dva);
5492 	uint64_t size = DVA_GET_ASIZE(dva);
5493 	vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
5494 
5495 	if (vd->vdev_ops->vdev_op_remap == NULL)
5496 		return (B_FALSE);
5497 
5498 	rbca.rbca_bp = bp;
5499 	rbca.rbca_cb = callback;
5500 	rbca.rbca_remap_vd = vd;
5501 	rbca.rbca_remap_offset = offset;
5502 	rbca.rbca_cb_arg = arg;
5503 
5504 	/*
5505 	 * remap_blkptr_cb() will be called in order for each level of
5506 	 * indirection, until a concrete vdev is reached or a split block is
5507 	 * encountered. old_vd and old_offset are updated within the callback
5508 	 * as we go from the one indirect vdev to the next one (either concrete
5509 	 * or indirect again) in that order.
5510 	 */
5511 	vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
5512 
5513 	/* Check if the DVA wasn't remapped because it is a split block */
5514 	if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
5515 		return (B_FALSE);
5516 
5517 	return (B_TRUE);
5518 }
5519 
5520 /*
5521  * Undo the allocation of a DVA which happened in the given transaction group.
5522  */
5523 void
5524 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5525 {
5526 	metaslab_t *msp;
5527 	vdev_t *vd;
5528 	uint64_t vdev = DVA_GET_VDEV(dva);
5529 	uint64_t offset = DVA_GET_OFFSET(dva);
5530 	uint64_t size = DVA_GET_ASIZE(dva);
5531 
5532 	ASSERT(DVA_IS_VALID(dva));
5533 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5534 
5535 	if (txg > spa_freeze_txg(spa))
5536 		return;
5537 
5538 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
5539 	    (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
5540 		zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
5541 		    (u_longlong_t)vdev, (u_longlong_t)offset,
5542 		    (u_longlong_t)size);
5543 		return;
5544 	}
5545 
5546 	ASSERT(!vd->vdev_removing);
5547 	ASSERT(vdev_is_concrete(vd));
5548 	ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
5549 	ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
5550 
5551 	if (DVA_GET_GANG(dva))
5552 		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
5553 
5554 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5555 
5556 	mutex_enter(&msp->ms_lock);
5557 	range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
5558 	    offset, size);
5559 	msp->ms_allocating_total -= size;
5560 
5561 	VERIFY(!msp->ms_condensing);
5562 	VERIFY3U(offset, >=, msp->ms_start);
5563 	VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
5564 	VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
5565 	    msp->ms_size);
5566 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5567 	VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5568 	range_tree_add(msp->ms_allocatable, offset, size);
5569 	mutex_exit(&msp->ms_lock);
5570 }
5571 
5572 /*
5573  * Free the block represented by the given DVA.
5574  */
5575 void
5576 metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
5577 {
5578 	uint64_t vdev = DVA_GET_VDEV(dva);
5579 	uint64_t offset = DVA_GET_OFFSET(dva);
5580 	uint64_t size = DVA_GET_ASIZE(dva);
5581 	vdev_t *vd = vdev_lookup_top(spa, vdev);
5582 
5583 	ASSERT(DVA_IS_VALID(dva));
5584 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5585 
5586 	if (DVA_GET_GANG(dva)) {
5587 		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
5588 	}
5589 
5590 	metaslab_free_impl(vd, offset, size, checkpoint);
5591 }
5592 
5593 /*
5594  * Reserve some allocation slots. The reservation system must be called
5595  * before we call into the allocator. If there aren't any available slots
5596  * then the I/O will be throttled until an I/O completes and its slots are
5597  * freed up. The function returns true if it was successful in placing
5598  * the reservation.
5599  */
5600 boolean_t
5601 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
5602     zio_t *zio, int flags)
5603 {
5604 	metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5605 	uint64_t available_slots = 0;
5606 	boolean_t slot_reserved = B_FALSE;
5607 	uint64_t max = mca->mca_alloc_max_slots;
5608 
5609 	ASSERT(mc->mc_alloc_throttle_enabled);
5610 	mutex_enter(&mc->mc_lock);
5611 
5612 	uint64_t reserved_slots = zfs_refcount_count(&mca->mca_alloc_slots);
5613 	if (reserved_slots < max)
5614 		available_slots = max - reserved_slots;
5615 
5616 	if (slots <= available_slots || GANG_ALLOCATION(flags) ||
5617 	    flags & METASLAB_MUST_RESERVE) {
5618 		/*
5619 		 * We reserve the slots individually so that we can unreserve
5620 		 * them individually when an I/O completes.
5621 		 */
5622 		for (int d = 0; d < slots; d++)
5623 			zfs_refcount_add(&mca->mca_alloc_slots, zio);
5624 		zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
5625 		slot_reserved = B_TRUE;
5626 	}
5627 
5628 	mutex_exit(&mc->mc_lock);
5629 	return (slot_reserved);
5630 }
5631 
5632 void
5633 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
5634     int allocator, zio_t *zio)
5635 {
5636 	metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5637 
5638 	ASSERT(mc->mc_alloc_throttle_enabled);
5639 	mutex_enter(&mc->mc_lock);
5640 	for (int d = 0; d < slots; d++)
5641 		zfs_refcount_remove(&mca->mca_alloc_slots, zio);
5642 	mutex_exit(&mc->mc_lock);
5643 }
5644 
5645 static int
5646 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
5647     uint64_t txg)
5648 {
5649 	metaslab_t *msp;
5650 	spa_t *spa = vd->vdev_spa;
5651 	int error = 0;
5652 
5653 	if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
5654 		return (SET_ERROR(ENXIO));
5655 
5656 	ASSERT3P(vd->vdev_ms, !=, NULL);
5657 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5658 
5659 	mutex_enter(&msp->ms_lock);
5660 
5661 	if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
5662 		error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
5663 		if (error == EBUSY) {
5664 			ASSERT(msp->ms_loaded);
5665 			ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
5666 			error = 0;
5667 		}
5668 	}
5669 
5670 	if (error == 0 &&
5671 	    !range_tree_contains(msp->ms_allocatable, offset, size))
5672 		error = SET_ERROR(ENOENT);
5673 
5674 	if (error || txg == 0) {	/* txg == 0 indicates dry run */
5675 		mutex_exit(&msp->ms_lock);
5676 		return (error);
5677 	}
5678 
5679 	VERIFY(!msp->ms_condensing);
5680 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5681 	VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5682 	VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
5683 	    msp->ms_size);
5684 	range_tree_remove(msp->ms_allocatable, offset, size);
5685 	range_tree_clear(msp->ms_trim, offset, size);
5686 
5687 	if (spa_writeable(spa)) {	/* don't dirty if we're zdb(8) */
5688 		metaslab_class_t *mc = msp->ms_group->mg_class;
5689 		multilist_sublist_t *mls =
5690 		    multilist_sublist_lock_obj(mc->mc_metaslab_txg_list, msp);
5691 		if (!multilist_link_active(&msp->ms_class_txg_node)) {
5692 			msp->ms_selected_txg = txg;
5693 			multilist_sublist_insert_head(mls, msp);
5694 		}
5695 		multilist_sublist_unlock(mls);
5696 
5697 		if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
5698 			vdev_dirty(vd, VDD_METASLAB, msp, txg);
5699 		range_tree_add(msp->ms_allocating[txg & TXG_MASK],
5700 		    offset, size);
5701 		msp->ms_allocating_total += size;
5702 	}
5703 
5704 	mutex_exit(&msp->ms_lock);
5705 
5706 	return (0);
5707 }
5708 
5709 typedef struct metaslab_claim_cb_arg_t {
5710 	uint64_t	mcca_txg;
5711 	int		mcca_error;
5712 } metaslab_claim_cb_arg_t;
5713 
5714 /* ARGSUSED */
5715 static void
5716 metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5717     uint64_t size, void *arg)
5718 {
5719 	metaslab_claim_cb_arg_t *mcca_arg = arg;
5720 
5721 	if (mcca_arg->mcca_error == 0) {
5722 		mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
5723 		    size, mcca_arg->mcca_txg);
5724 	}
5725 }
5726 
5727 int
5728 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
5729 {
5730 	if (vd->vdev_ops->vdev_op_remap != NULL) {
5731 		metaslab_claim_cb_arg_t arg;
5732 
5733 		/*
5734 		 * Only zdb(8) can claim on indirect vdevs.  This is used
5735 		 * to detect leaks of mapped space (that are not accounted
5736 		 * for in the obsolete counts, spacemap, or bpobj).
5737 		 */
5738 		ASSERT(!spa_writeable(vd->vdev_spa));
5739 		arg.mcca_error = 0;
5740 		arg.mcca_txg = txg;
5741 
5742 		vd->vdev_ops->vdev_op_remap(vd, offset, size,
5743 		    metaslab_claim_impl_cb, &arg);
5744 
5745 		if (arg.mcca_error == 0) {
5746 			arg.mcca_error = metaslab_claim_concrete(vd,
5747 			    offset, size, txg);
5748 		}
5749 		return (arg.mcca_error);
5750 	} else {
5751 		return (metaslab_claim_concrete(vd, offset, size, txg));
5752 	}
5753 }
5754 
5755 /*
5756  * Intent log support: upon opening the pool after a crash, notify the SPA
5757  * of blocks that the intent log has allocated for immediate write, but
5758  * which are still considered free by the SPA because the last transaction
5759  * group didn't commit yet.
5760  */
5761 static int
5762 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5763 {
5764 	uint64_t vdev = DVA_GET_VDEV(dva);
5765 	uint64_t offset = DVA_GET_OFFSET(dva);
5766 	uint64_t size = DVA_GET_ASIZE(dva);
5767 	vdev_t *vd;
5768 
5769 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
5770 		return (SET_ERROR(ENXIO));
5771 	}
5772 
5773 	ASSERT(DVA_IS_VALID(dva));
5774 
5775 	if (DVA_GET_GANG(dva))
5776 		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
5777 
5778 	return (metaslab_claim_impl(vd, offset, size, txg));
5779 }
5780 
5781 int
5782 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
5783     int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
5784     zio_alloc_list_t *zal, zio_t *zio, int allocator)
5785 {
5786 	dva_t *dva = bp->blk_dva;
5787 	dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
5788 	int error = 0;
5789 
5790 	ASSERT(bp->blk_birth == 0);
5791 	ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
5792 
5793 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5794 
5795 	if (mc->mc_allocator[allocator].mca_rotor == NULL) {
5796 		/* no vdevs in this class */
5797 		spa_config_exit(spa, SCL_ALLOC, FTAG);
5798 		return (SET_ERROR(ENOSPC));
5799 	}
5800 
5801 	ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
5802 	ASSERT(BP_GET_NDVAS(bp) == 0);
5803 	ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
5804 	ASSERT3P(zal, !=, NULL);
5805 
5806 	for (int d = 0; d < ndvas; d++) {
5807 		error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
5808 		    txg, flags, zal, allocator);
5809 		if (error != 0) {
5810 			for (d--; d >= 0; d--) {
5811 				metaslab_unalloc_dva(spa, &dva[d], txg);
5812 				metaslab_group_alloc_decrement(spa,
5813 				    DVA_GET_VDEV(&dva[d]), zio, flags,
5814 				    allocator, B_FALSE);
5815 				bzero(&dva[d], sizeof (dva_t));
5816 			}
5817 			spa_config_exit(spa, SCL_ALLOC, FTAG);
5818 			return (error);
5819 		} else {
5820 			/*
5821 			 * Update the metaslab group's queue depth
5822 			 * based on the newly allocated dva.
5823 			 */
5824 			metaslab_group_alloc_increment(spa,
5825 			    DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
5826 		}
5827 	}
5828 	ASSERT(error == 0);
5829 	ASSERT(BP_GET_NDVAS(bp) == ndvas);
5830 
5831 	spa_config_exit(spa, SCL_ALLOC, FTAG);
5832 
5833 	BP_SET_BIRTH(bp, txg, 0);
5834 
5835 	return (0);
5836 }
5837 
5838 void
5839 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
5840 {
5841 	const dva_t *dva = bp->blk_dva;
5842 	int ndvas = BP_GET_NDVAS(bp);
5843 
5844 	ASSERT(!BP_IS_HOLE(bp));
5845 	ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
5846 
5847 	/*
5848 	 * If we have a checkpoint for the pool we need to make sure that
5849 	 * the blocks that we free that are part of the checkpoint won't be
5850 	 * reused until the checkpoint is discarded or we revert to it.
5851 	 *
5852 	 * The checkpoint flag is passed down the metaslab_free code path
5853 	 * and is set whenever we want to add a block to the checkpoint's
5854 	 * accounting. That is, we "checkpoint" blocks that existed at the
5855 	 * time the checkpoint was created and are therefore referenced by
5856 	 * the checkpointed uberblock.
5857 	 *
5858 	 * Note that, we don't checkpoint any blocks if the current
5859 	 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
5860 	 * normally as they will be referenced by the checkpointed uberblock.
5861 	 */
5862 	boolean_t checkpoint = B_FALSE;
5863 	if (bp->blk_birth <= spa->spa_checkpoint_txg &&
5864 	    spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
5865 		/*
5866 		 * At this point, if the block is part of the checkpoint
5867 		 * there is no way it was created in the current txg.
5868 		 */
5869 		ASSERT(!now);
5870 		ASSERT3U(spa_syncing_txg(spa), ==, txg);
5871 		checkpoint = B_TRUE;
5872 	}
5873 
5874 	spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
5875 
5876 	for (int d = 0; d < ndvas; d++) {
5877 		if (now) {
5878 			metaslab_unalloc_dva(spa, &dva[d], txg);
5879 		} else {
5880 			ASSERT3U(txg, ==, spa_syncing_txg(spa));
5881 			metaslab_free_dva(spa, &dva[d], checkpoint);
5882 		}
5883 	}
5884 
5885 	spa_config_exit(spa, SCL_FREE, FTAG);
5886 }
5887 
5888 int
5889 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
5890 {
5891 	const dva_t *dva = bp->blk_dva;
5892 	int ndvas = BP_GET_NDVAS(bp);
5893 	int error = 0;
5894 
5895 	ASSERT(!BP_IS_HOLE(bp));
5896 
5897 	if (txg != 0) {
5898 		/*
5899 		 * First do a dry run to make sure all DVAs are claimable,
5900 		 * so we don't have to unwind from partial failures below.
5901 		 */
5902 		if ((error = metaslab_claim(spa, bp, 0)) != 0)
5903 			return (error);
5904 	}
5905 
5906 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5907 
5908 	for (int d = 0; d < ndvas; d++) {
5909 		error = metaslab_claim_dva(spa, &dva[d], txg);
5910 		if (error != 0)
5911 			break;
5912 	}
5913 
5914 	spa_config_exit(spa, SCL_ALLOC, FTAG);
5915 
5916 	ASSERT(error == 0 || txg == 0);
5917 
5918 	return (error);
5919 }
5920 
5921 void
5922 metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
5923 {
5924 	const dva_t *dva = bp->blk_dva;
5925 	int ndvas = BP_GET_NDVAS(bp);
5926 	uint64_t psize = BP_GET_PSIZE(bp);
5927 	int d;
5928 	vdev_t *vd;
5929 
5930 	ASSERT(!BP_IS_HOLE(bp));
5931 	ASSERT(!BP_IS_EMBEDDED(bp));
5932 	ASSERT(psize > 0);
5933 
5934 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
5935 
5936 	for (d = 0; d < ndvas; d++) {
5937 		if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
5938 			continue;
5939 		atomic_add_64(&vd->vdev_pending_fastwrite, psize);
5940 	}
5941 
5942 	spa_config_exit(spa, SCL_VDEV, FTAG);
5943 }
5944 
5945 void
5946 metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
5947 {
5948 	const dva_t *dva = bp->blk_dva;
5949 	int ndvas = BP_GET_NDVAS(bp);
5950 	uint64_t psize = BP_GET_PSIZE(bp);
5951 	int d;
5952 	vdev_t *vd;
5953 
5954 	ASSERT(!BP_IS_HOLE(bp));
5955 	ASSERT(!BP_IS_EMBEDDED(bp));
5956 	ASSERT(psize > 0);
5957 
5958 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
5959 
5960 	for (d = 0; d < ndvas; d++) {
5961 		if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
5962 			continue;
5963 		ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
5964 		atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
5965 	}
5966 
5967 	spa_config_exit(spa, SCL_VDEV, FTAG);
5968 }
5969 
5970 /* ARGSUSED */
5971 static void
5972 metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
5973     uint64_t size, void *arg)
5974 {
5975 	if (vd->vdev_ops == &vdev_indirect_ops)
5976 		return;
5977 
5978 	metaslab_check_free_impl(vd, offset, size);
5979 }
5980 
5981 static void
5982 metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
5983 {
5984 	metaslab_t *msp;
5985 	spa_t *spa __maybe_unused = vd->vdev_spa;
5986 
5987 	if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
5988 		return;
5989 
5990 	if (vd->vdev_ops->vdev_op_remap != NULL) {
5991 		vd->vdev_ops->vdev_op_remap(vd, offset, size,
5992 		    metaslab_check_free_impl_cb, NULL);
5993 		return;
5994 	}
5995 
5996 	ASSERT(vdev_is_concrete(vd));
5997 	ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
5998 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5999 
6000 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
6001 
6002 	mutex_enter(&msp->ms_lock);
6003 	if (msp->ms_loaded) {
6004 		range_tree_verify_not_present(msp->ms_allocatable,
6005 		    offset, size);
6006 	}
6007 
6008 	/*
6009 	 * Check all segments that currently exist in the freeing pipeline.
6010 	 *
6011 	 * It would intuitively make sense to also check the current allocating
6012 	 * tree since metaslab_unalloc_dva() exists for extents that are
6013 	 * allocated and freed in the same sync pass within the same txg.
6014 	 * Unfortunately there are places (e.g. the ZIL) where we allocate a
6015 	 * segment but then we free part of it within the same txg
6016 	 * [see zil_sync()]. Thus, we don't call range_tree_verify() in the
6017 	 * current allocating tree.
6018 	 */
6019 	range_tree_verify_not_present(msp->ms_freeing, offset, size);
6020 	range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
6021 	range_tree_verify_not_present(msp->ms_freed, offset, size);
6022 	for (int j = 0; j < TXG_DEFER_SIZE; j++)
6023 		range_tree_verify_not_present(msp->ms_defer[j], offset, size);
6024 	range_tree_verify_not_present(msp->ms_trim, offset, size);
6025 	mutex_exit(&msp->ms_lock);
6026 }
6027 
6028 void
6029 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
6030 {
6031 	if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
6032 		return;
6033 
6034 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
6035 	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
6036 		uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
6037 		vdev_t *vd = vdev_lookup_top(spa, vdev);
6038 		uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
6039 		uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
6040 
6041 		if (DVA_GET_GANG(&bp->blk_dva[i]))
6042 			size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
6043 
6044 		ASSERT3P(vd, !=, NULL);
6045 
6046 		metaslab_check_free_impl(vd, offset, size);
6047 	}
6048 	spa_config_exit(spa, SCL_VDEV, FTAG);
6049 }
6050 
6051 static void
6052 metaslab_group_disable_wait(metaslab_group_t *mg)
6053 {
6054 	ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
6055 	while (mg->mg_disabled_updating) {
6056 		cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
6057 	}
6058 }
6059 
6060 static void
6061 metaslab_group_disabled_increment(metaslab_group_t *mg)
6062 {
6063 	ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
6064 	ASSERT(mg->mg_disabled_updating);
6065 
6066 	while (mg->mg_ms_disabled >= max_disabled_ms) {
6067 		cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
6068 	}
6069 	mg->mg_ms_disabled++;
6070 	ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
6071 }
6072 
6073 /*
6074  * Mark the metaslab as disabled to prevent any allocations on this metaslab.
6075  * We must also track how many metaslabs are currently disabled within a
6076  * metaslab group and limit them to prevent allocation failures from
6077  * occurring because all metaslabs are disabled.
6078  */
6079 void
6080 metaslab_disable(metaslab_t *msp)
6081 {
6082 	ASSERT(!MUTEX_HELD(&msp->ms_lock));
6083 	metaslab_group_t *mg = msp->ms_group;
6084 
6085 	mutex_enter(&mg->mg_ms_disabled_lock);
6086 
6087 	/*
6088 	 * To keep an accurate count of how many threads have disabled
6089 	 * a specific metaslab group, we only allow one thread to mark
6090 	 * the metaslab group at a time. This ensures that the value of
6091 	 * ms_disabled will be accurate when we decide to mark a metaslab
6092 	 * group as disabled. To do this we force all other threads
6093 	 * to wait till the metaslab's mg_disabled_updating flag is no
6094 	 * longer set.
6095 	 */
6096 	metaslab_group_disable_wait(mg);
6097 	mg->mg_disabled_updating = B_TRUE;
6098 	if (msp->ms_disabled == 0) {
6099 		metaslab_group_disabled_increment(mg);
6100 	}
6101 	mutex_enter(&msp->ms_lock);
6102 	msp->ms_disabled++;
6103 	mutex_exit(&msp->ms_lock);
6104 
6105 	mg->mg_disabled_updating = B_FALSE;
6106 	cv_broadcast(&mg->mg_ms_disabled_cv);
6107 	mutex_exit(&mg->mg_ms_disabled_lock);
6108 }
6109 
6110 void
6111 metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload)
6112 {
6113 	metaslab_group_t *mg = msp->ms_group;
6114 	spa_t *spa = mg->mg_vd->vdev_spa;
6115 
6116 	/*
6117 	 * Wait for the outstanding IO to be synced to prevent newly
6118 	 * allocated blocks from being overwritten.  This used by
6119 	 * initialize and TRIM which are modifying unallocated space.
6120 	 */
6121 	if (sync)
6122 		txg_wait_synced(spa_get_dsl(spa), 0);
6123 
6124 	mutex_enter(&mg->mg_ms_disabled_lock);
6125 	mutex_enter(&msp->ms_lock);
6126 	if (--msp->ms_disabled == 0) {
6127 		mg->mg_ms_disabled--;
6128 		cv_broadcast(&mg->mg_ms_disabled_cv);
6129 		if (unload)
6130 			metaslab_unload(msp);
6131 	}
6132 	mutex_exit(&msp->ms_lock);
6133 	mutex_exit(&mg->mg_ms_disabled_lock);
6134 }
6135 
6136 static void
6137 metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
6138 {
6139 	vdev_t *vd = ms->ms_group->mg_vd;
6140 	spa_t *spa = vd->vdev_spa;
6141 	objset_t *mos = spa_meta_objset(spa);
6142 
6143 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
6144 
6145 	metaslab_unflushed_phys_t entry = {
6146 		.msp_unflushed_txg = metaslab_unflushed_txg(ms),
6147 	};
6148 	uint64_t entry_size = sizeof (entry);
6149 	uint64_t entry_offset = ms->ms_id * entry_size;
6150 
6151 	uint64_t object = 0;
6152 	int err = zap_lookup(mos, vd->vdev_top_zap,
6153 	    VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
6154 	    &object);
6155 	if (err == ENOENT) {
6156 		object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
6157 		    SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
6158 		VERIFY0(zap_add(mos, vd->vdev_top_zap,
6159 		    VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
6160 		    &object, tx));
6161 	} else {
6162 		VERIFY0(err);
6163 	}
6164 
6165 	dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
6166 	    &entry, tx);
6167 }
6168 
6169 void
6170 metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
6171 {
6172 	spa_t *spa = ms->ms_group->mg_vd->vdev_spa;
6173 
6174 	if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
6175 		return;
6176 
6177 	ms->ms_unflushed_txg = txg;
6178 	metaslab_update_ondisk_flush_data(ms, tx);
6179 }
6180 
6181 uint64_t
6182 metaslab_unflushed_txg(metaslab_t *ms)
6183 {
6184 	return (ms->ms_unflushed_txg);
6185 }
6186 
6187 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, ULONG, ZMOD_RW,
6188 	"Allocation granularity (a.k.a. stripe size)");
6189 
6190 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW,
6191 	"Load all metaslabs when pool is first opened");
6192 
6193 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW,
6194 	"Prevent metaslabs from being unloaded");
6195 
6196 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW,
6197 	"Preload potential metaslabs during reassessment");
6198 
6199 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, INT, ZMOD_RW,
6200 	"Delay in txgs after metaslab was last used before unloading");
6201 
6202 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, INT, ZMOD_RW,
6203 	"Delay in milliseconds after metaslab was last used before unloading");
6204 
6205 /* BEGIN CSTYLED */
6206 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, INT, ZMOD_RW,
6207 	"Percentage of metaslab group size that should be free to make it "
6208 	"eligible for allocation");
6209 
6210 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, INT, ZMOD_RW,
6211 	"Percentage of metaslab group size that should be considered eligible "
6212 	"for allocations unless all metaslab groups within the metaslab class "
6213 	"have also crossed this threshold");
6214 
6215 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, INT,
6216 	 ZMOD_RW, "Fragmentation for metaslab to allow allocation");
6217 
6218 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT, ZMOD_RW,
6219 	"Use the fragmentation metric to prefer less fragmented metaslabs");
6220 /* END CSTYLED */
6221 
6222 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW,
6223 	"Prefer metaslabs with lower LBAs");
6224 
6225 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW,
6226 	"Enable metaslab group biasing");
6227 
6228 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT,
6229 	ZMOD_RW, "Enable segment-based metaslab selection");
6230 
6231 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW,
6232 	"Segment-based metaslab selection maximum buckets before switching");
6233 
6234 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, ULONG, ZMOD_RW,
6235 	"Blocks larger than this size are forced to be gang blocks");
6236 
6237 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, INT, ZMOD_RW,
6238 	"Max distance (bytes) to search forward before using size tree");
6239 
6240 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
6241 	"When looking in size tree, use largest segment instead of exact fit");
6242 
6243 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, ULONG,
6244 	ZMOD_RW, "How long to trust the cached max chunk size of a metaslab");
6245 
6246 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, INT, ZMOD_RW,
6247 	"Percentage of memory that can be used to store metaslab range trees");
6248 
6249 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
6250 	ZMOD_RW, "Try hard to allocate before ganging");
6251 
6252 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, INT, ZMOD_RW,
6253 	"Normally only consider this many of the best metaslabs in each vdev");
6254