xref: /freebsd/sys/contrib/openzfs/module/zfs/metaslab.c (revision b670c9bafc0e31c7609969bf374b2e80bdc00211)
1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3  * CDDL HEADER START
4  *
5  * The contents of this file are subject to the terms of the
6  * Common Development and Distribution License (the "License").
7  * You may not use this file except in compliance with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or https://opensource.org/licenses/CDDL-1.0.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
25  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26  * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
27  * Copyright (c) 2017, Intel Corporation.
28  */
29 
30 #include <sys/zfs_context.h>
31 #include <sys/brt.h>
32 #include <sys/dmu.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/space_map.h>
35 #include <sys/metaslab_impl.h>
36 #include <sys/vdev_impl.h>
37 #include <sys/vdev_draid.h>
38 #include <sys/zio.h>
39 #include <sys/spa_impl.h>
40 #include <sys/zfeature.h>
41 #include <sys/vdev_indirect_mapping.h>
42 #include <sys/zap.h>
43 #include <sys/btree.h>
44 
45 #define	GANG_ALLOCATION(flags) \
46 	((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
47 
48 /*
49  * Metaslab group's per child vdev granularity, in bytes.  This is roughly
50  * similar to what would be referred to as the "stripe size" in traditional
51  * RAID arrays. In normal operation, we will try to write this amount of
52  * data to each disk before moving on to the next top-level vdev.
53  */
54 static uint64_t metaslab_aliquot = 2 * 1024 * 1024;
55 
56 /*
57  * For testing, make some blocks above a certain size be gang blocks.
58  */
59 uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
60 
61 /*
62  * Of blocks of size >= metaslab_force_ganging, actually gang them this often.
63  */
64 uint_t metaslab_force_ganging_pct = 3;
65 
66 /*
67  * In pools where the log space map feature is not enabled we touch
68  * multiple metaslabs (and their respective space maps) with each
69  * transaction group. Thus, we benefit from having a small space map
70  * block size since it allows us to issue more I/O operations scattered
71  * around the disk. So a sane default for the space map block size
72  * is 8~16K.
73  */
74 int zfs_metaslab_sm_blksz_no_log = (1 << 14);
75 
76 /*
77  * When the log space map feature is enabled, we accumulate a lot of
78  * changes per metaslab that are flushed once in a while so we benefit
79  * from a bigger block size like 128K for the metaslab space maps.
80  */
81 int zfs_metaslab_sm_blksz_with_log = (1 << 17);
82 
83 /*
84  * The in-core space map representation is more compact than its on-disk form.
85  * The zfs_condense_pct determines how much more compact the in-core
86  * space map representation must be before we compact it on-disk.
87  * Values should be greater than or equal to 100.
88  */
89 uint_t zfs_condense_pct = 200;
90 
91 /*
92  * Condensing a metaslab is not guaranteed to actually reduce the amount of
93  * space used on disk. In particular, a space map uses data in increments of
94  * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
95  * same number of blocks after condensing. Since the goal of condensing is to
96  * reduce the number of IOPs required to read the space map, we only want to
97  * condense when we can be sure we will reduce the number of blocks used by the
98  * space map. Unfortunately, we cannot precisely compute whether or not this is
99  * the case in metaslab_should_condense since we are holding ms_lock. Instead,
100  * we apply the following heuristic: do not condense a spacemap unless the
101  * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
102  * blocks.
103  */
104 static const int zfs_metaslab_condense_block_threshold = 4;
105 
106 /*
107  * The zfs_mg_noalloc_threshold defines which metaslab groups should
108  * be eligible for allocation. The value is defined as a percentage of
109  * free space. Metaslab groups that have more free space than
110  * zfs_mg_noalloc_threshold are always eligible for allocations. Once
111  * a metaslab group's free space is less than or equal to the
112  * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
113  * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
114  * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
115  * groups are allowed to accept allocations. Gang blocks are always
116  * eligible to allocate on any metaslab group. The default value of 0 means
117  * no metaslab group will be excluded based on this criterion.
118  */
119 static uint_t zfs_mg_noalloc_threshold = 0;
120 
121 /*
122  * Metaslab groups are considered eligible for allocations if their
123  * fragmentation metric (measured as a percentage) is less than or
124  * equal to zfs_mg_fragmentation_threshold. If a metaslab group
125  * exceeds this threshold then it will be skipped unless all metaslab
126  * groups within the metaslab class have also crossed this threshold.
127  *
128  * This tunable was introduced to avoid edge cases where we continue
129  * allocating from very fragmented disks in our pool while other, less
130  * fragmented disks, exists. On the other hand, if all disks in the
131  * pool are uniformly approaching the threshold, the threshold can
132  * be a speed bump in performance, where we keep switching the disks
133  * that we allocate from (e.g. we allocate some segments from disk A
134  * making it bypassing the threshold while freeing segments from disk
135  * B getting its fragmentation below the threshold).
136  *
137  * Empirically, we've seen that our vdev selection for allocations is
138  * good enough that fragmentation increases uniformly across all vdevs
139  * the majority of the time. Thus we set the threshold percentage high
140  * enough to avoid hitting the speed bump on pools that are being pushed
141  * to the edge.
142  */
143 static uint_t zfs_mg_fragmentation_threshold = 95;
144 
145 /*
146  * Allow metaslabs to keep their active state as long as their fragmentation
147  * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
148  * active metaslab that exceeds this threshold will no longer keep its active
149  * status allowing better metaslabs to be selected.
150  */
151 static uint_t zfs_metaslab_fragmentation_threshold = 77;
152 
153 /*
154  * When set will load all metaslabs when pool is first opened.
155  */
156 int metaslab_debug_load = B_FALSE;
157 
158 /*
159  * When set will prevent metaslabs from being unloaded.
160  */
161 static int metaslab_debug_unload = B_FALSE;
162 
163 /*
164  * Minimum size which forces the dynamic allocator to change
165  * it's allocation strategy.  Once the space map cannot satisfy
166  * an allocation of this size then it switches to using more
167  * aggressive strategy (i.e search by size rather than offset).
168  */
169 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
170 
171 /*
172  * The minimum free space, in percent, which must be available
173  * in a space map to continue allocations in a first-fit fashion.
174  * Once the space map's free space drops below this level we dynamically
175  * switch to using best-fit allocations.
176  */
177 uint_t metaslab_df_free_pct = 4;
178 
179 /*
180  * Maximum distance to search forward from the last offset. Without this
181  * limit, fragmented pools can see >100,000 iterations and
182  * metaslab_block_picker() becomes the performance limiting factor on
183  * high-performance storage.
184  *
185  * With the default setting of 16MB, we typically see less than 500
186  * iterations, even with very fragmented, ashift=9 pools. The maximum number
187  * of iterations possible is:
188  *     metaslab_df_max_search / (2 * (1<<ashift))
189  * With the default setting of 16MB this is 16*1024 (with ashift=9) or
190  * 2048 (with ashift=12).
191  */
192 static uint_t metaslab_df_max_search = 16 * 1024 * 1024;
193 
194 /*
195  * Forces the metaslab_block_picker function to search for at least this many
196  * segments forwards until giving up on finding a segment that the allocation
197  * will fit into.
198  */
199 static const uint32_t metaslab_min_search_count = 100;
200 
201 /*
202  * If we are not searching forward (due to metaslab_df_max_search,
203  * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
204  * controls what segment is used.  If it is set, we will use the largest free
205  * segment.  If it is not set, we will use a segment of exactly the requested
206  * size (or larger).
207  */
208 static int metaslab_df_use_largest_segment = B_FALSE;
209 
210 /*
211  * These tunables control how long a metaslab will remain loaded after the
212  * last allocation from it.  A metaslab can't be unloaded until at least
213  * metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds
214  * have elapsed.  However, zfs_metaslab_mem_limit may cause it to be
215  * unloaded sooner.  These settings are intended to be generous -- to keep
216  * metaslabs loaded for a long time, reducing the rate of metaslab loading.
217  */
218 static uint_t metaslab_unload_delay = 32;
219 static uint_t metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */
220 
221 /*
222  * Max number of metaslabs per group to preload.
223  */
224 uint_t metaslab_preload_limit = 10;
225 
226 /*
227  * Enable/disable preloading of metaslab.
228  */
229 static int metaslab_preload_enabled = B_TRUE;
230 
231 /*
232  * Enable/disable fragmentation weighting on metaslabs.
233  */
234 static int metaslab_fragmentation_factor_enabled = B_TRUE;
235 
236 /*
237  * Enable/disable lba weighting (i.e. outer tracks are given preference).
238  */
239 static int metaslab_lba_weighting_enabled = B_TRUE;
240 
241 /*
242  * Enable/disable space-based metaslab group biasing.
243  */
244 static int metaslab_bias_enabled = B_TRUE;
245 
246 /*
247  * Control performance-based metaslab group biasing.
248  */
249 static int metaslab_perf_bias = 1;
250 
251 /*
252  * Enable/disable remapping of indirect DVAs to their concrete vdevs.
253  */
254 static const boolean_t zfs_remap_blkptr_enable = B_TRUE;
255 
256 /*
257  * Enable/disable segment-based metaslab selection.
258  */
259 static int zfs_metaslab_segment_weight_enabled = B_TRUE;
260 
261 /*
262  * When using segment-based metaslab selection, we will continue
263  * allocating from the active metaslab until we have exhausted
264  * zfs_metaslab_switch_threshold of its buckets.
265  */
266 static int zfs_metaslab_switch_threshold = 2;
267 
268 /*
269  * Internal switch to enable/disable the metaslab allocation tracing
270  * facility.
271  */
272 static const boolean_t metaslab_trace_enabled = B_FALSE;
273 
274 /*
275  * Maximum entries that the metaslab allocation tracing facility will keep
276  * in a given list when running in non-debug mode. We limit the number
277  * of entries in non-debug mode to prevent us from using up too much memory.
278  * The limit should be sufficiently large that we don't expect any allocation
279  * to every exceed this value. In debug mode, the system will panic if this
280  * limit is ever reached allowing for further investigation.
281  */
282 static const uint64_t metaslab_trace_max_entries = 5000;
283 
284 /*
285  * Maximum number of metaslabs per group that can be disabled
286  * simultaneously.
287  */
288 static const int max_disabled_ms = 3;
289 
290 /*
291  * Time (in seconds) to respect ms_max_size when the metaslab is not loaded.
292  * To avoid 64-bit overflow, don't set above UINT32_MAX.
293  */
294 static uint64_t zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */
295 
296 /*
297  * Maximum percentage of memory to use on storing loaded metaslabs. If loading
298  * a metaslab would take it over this percentage, the oldest selected metaslab
299  * is automatically unloaded.
300  */
301 static uint_t zfs_metaslab_mem_limit = 25;
302 
303 /*
304  * Force the per-metaslab range trees to use 64-bit integers to store
305  * segments. Used for debugging purposes.
306  */
307 static const boolean_t zfs_metaslab_force_large_segs = B_FALSE;
308 
309 /*
310  * By default we only store segments over a certain size in the size-sorted
311  * metaslab trees (ms_allocatable_by_size and
312  * ms_unflushed_frees_by_size). This dramatically reduces memory usage and
313  * improves load and unload times at the cost of causing us to use slightly
314  * larger segments than we would otherwise in some cases.
315  */
316 static const uint32_t metaslab_by_size_min_shift = 14;
317 
318 /*
319  * If not set, we will first try normal allocation.  If that fails then
320  * we will do a gang allocation.  If that fails then we will do a "try hard"
321  * gang allocation.  If that fails then we will have a multi-layer gang
322  * block.
323  *
324  * If set, we will first try normal allocation.  If that fails then
325  * we will do a "try hard" allocation.  If that fails we will do a gang
326  * allocation.  If that fails we will do a "try hard" gang allocation.  If
327  * that fails then we will have a multi-layer gang block.
328  */
329 static int zfs_metaslab_try_hard_before_gang = B_FALSE;
330 
331 /*
332  * When not trying hard, we only consider the best zfs_metaslab_find_max_tries
333  * metaslabs.  This improves performance, especially when there are many
334  * metaslabs per vdev and the allocation can't actually be satisfied (so we
335  * would otherwise iterate all the metaslabs).  If there is a metaslab with a
336  * worse weight but it can actually satisfy the allocation, we won't find it
337  * until trying hard.  This may happen if the worse metaslab is not loaded
338  * (and the true weight is better than we have calculated), or due to weight
339  * bucketization.  E.g. we are looking for a 60K segment, and the best
340  * metaslabs all have free segments in the 32-63K bucket, but the best
341  * zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a
342  * subsequent metaslab has ms_max_size >60KB (but fewer segments in this
343  * bucket, and therefore a lower weight).
344  */
345 static uint_t zfs_metaslab_find_max_tries = 100;
346 
347 static uint64_t metaslab_weight(metaslab_t *, boolean_t);
348 static void metaslab_set_fragmentation(metaslab_t *, boolean_t);
349 static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
350 static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
351 
352 static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
353 static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
354 static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
355 static unsigned int metaslab_idx_func(multilist_t *, void *);
356 static void metaslab_evict(metaslab_t *, uint64_t);
357 static void metaslab_rt_add(zfs_range_tree_t *rt, zfs_range_seg_t *rs,
358     void *arg);
359 kmem_cache_t *metaslab_alloc_trace_cache;
360 
361 typedef struct metaslab_stats {
362 	kstat_named_t metaslabstat_trace_over_limit;
363 	kstat_named_t metaslabstat_reload_tree;
364 	kstat_named_t metaslabstat_too_many_tries;
365 	kstat_named_t metaslabstat_try_hard;
366 } metaslab_stats_t;
367 
368 static metaslab_stats_t metaslab_stats = {
369 	{ "trace_over_limit",		KSTAT_DATA_UINT64 },
370 	{ "reload_tree",		KSTAT_DATA_UINT64 },
371 	{ "too_many_tries",		KSTAT_DATA_UINT64 },
372 	{ "try_hard",			KSTAT_DATA_UINT64 },
373 };
374 
375 #define	METASLABSTAT_BUMP(stat) \
376 	atomic_inc_64(&metaslab_stats.stat.value.ui64);
377 
378 
379 static kstat_t *metaslab_ksp;
380 
381 void
382 metaslab_stat_init(void)
383 {
384 	ASSERT(metaslab_alloc_trace_cache == NULL);
385 	metaslab_alloc_trace_cache = kmem_cache_create(
386 	    "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
387 	    0, NULL, NULL, NULL, NULL, NULL, 0);
388 	metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats",
389 	    "misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) /
390 	    sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
391 	if (metaslab_ksp != NULL) {
392 		metaslab_ksp->ks_data = &metaslab_stats;
393 		kstat_install(metaslab_ksp);
394 	}
395 }
396 
397 void
398 metaslab_stat_fini(void)
399 {
400 	if (metaslab_ksp != NULL) {
401 		kstat_delete(metaslab_ksp);
402 		metaslab_ksp = NULL;
403 	}
404 
405 	kmem_cache_destroy(metaslab_alloc_trace_cache);
406 	metaslab_alloc_trace_cache = NULL;
407 }
408 
409 /*
410  * ==========================================================================
411  * Metaslab classes
412  * ==========================================================================
413  */
414 metaslab_class_t *
415 metaslab_class_create(spa_t *spa, const metaslab_ops_t *ops, boolean_t is_log)
416 {
417 	metaslab_class_t *mc;
418 
419 	mc = kmem_zalloc(offsetof(metaslab_class_t,
420 	    mc_allocator[spa->spa_alloc_count]), KM_SLEEP);
421 
422 	mc->mc_spa = spa;
423 	mc->mc_ops = ops;
424 	mc->mc_is_log = is_log;
425 	mc->mc_alloc_io_size = SPA_OLD_MAXBLOCKSIZE;
426 	mc->mc_alloc_max = UINT64_MAX;
427 	mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
428 	multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t),
429 	    offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func);
430 	for (int i = 0; i < spa->spa_alloc_count; i++) {
431 		metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
432 		mutex_init(&mca->mca_lock, NULL, MUTEX_DEFAULT, NULL);
433 		avl_create(&mca->mca_tree, zio_bookmark_compare,
434 		    sizeof (zio_t), offsetof(zio_t, io_queue_node.a));
435 		mca->mca_rotor = NULL;
436 		mca->mca_reserved = 0;
437 	}
438 
439 	return (mc);
440 }
441 
442 void
443 metaslab_class_destroy(metaslab_class_t *mc)
444 {
445 	spa_t *spa = mc->mc_spa;
446 
447 	ASSERT(mc->mc_alloc == 0);
448 	ASSERT(mc->mc_deferred == 0);
449 	ASSERT(mc->mc_space == 0);
450 	ASSERT(mc->mc_dspace == 0);
451 
452 	for (int i = 0; i < spa->spa_alloc_count; i++) {
453 		metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
454 		avl_destroy(&mca->mca_tree);
455 		mutex_destroy(&mca->mca_lock);
456 		ASSERT(mca->mca_rotor == NULL);
457 		ASSERT0(mca->mca_reserved);
458 	}
459 	mutex_destroy(&mc->mc_lock);
460 	multilist_destroy(&mc->mc_metaslab_txg_list);
461 	kmem_free(mc, offsetof(metaslab_class_t,
462 	    mc_allocator[spa->spa_alloc_count]));
463 }
464 
465 void
466 metaslab_class_validate(metaslab_class_t *mc)
467 {
468 #ifdef ZFS_DEBUG
469 	spa_t *spa = mc->mc_spa;
470 
471 	/*
472 	 * Must hold one of the spa_config locks.
473 	 */
474 	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) ||
475 	    spa_config_held(spa, SCL_ALL, RW_WRITER));
476 
477 	for (int i = 0; i < spa->spa_alloc_count; i++) {
478 		metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
479 		metaslab_group_t *mg, *rotor;
480 
481 		ASSERT0(avl_numnodes(&mca->mca_tree));
482 		ASSERT0(mca->mca_reserved);
483 
484 		if ((mg = rotor = mca->mca_rotor) == NULL)
485 			continue;
486 		do {
487 			metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
488 			vdev_t *vd = mg->mg_vd;
489 
490 			ASSERT3P(vd->vdev_top, ==, vd);
491 			ASSERT(vd->vdev_mg == mg || vd->vdev_log_mg == mg);
492 			ASSERT3P(mg->mg_class, ==, mc);
493 			ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
494 			ASSERT0(zfs_refcount_count(&mga->mga_queue_depth));
495 		} while ((mg = mg->mg_next) != rotor);
496 	}
497 #endif
498 }
499 
500 /*
501  * For each metaslab group in a class pre-calculate allocation quota and
502  * target queue depth to balance their space usage and write performance.
503  * Based on those pre-calculate class allocation throttle threshold for
504  * optimal saturation.  onsync is true once per TXG to enable/disable
505  * allocation throttling and update moving average of maximum I/O size.
506  */
507 void
508 metaslab_class_balance(metaslab_class_t *mc, boolean_t onsync)
509 {
510 	metaslab_group_t *mg, *first;
511 
512 	/*
513 	 * Must hold one of the spa_config locks.
514 	 */
515 	ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
516 	    spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
517 
518 	if (onsync)
519 		metaslab_class_validate(mc);
520 
521 	if (mc->mc_groups == 0) {
522 		if (onsync)
523 			mc->mc_alloc_throttle_enabled = B_FALSE;
524 		mc->mc_alloc_max = UINT64_MAX;
525 		return;
526 	}
527 
528 	if (onsync) {
529 		/*
530 		 * Moving average of maximum allocation size, in absence of
531 		 * large allocations shrinking to 1/8 of metaslab_aliquot.
532 		 */
533 		mc->mc_alloc_io_size = (3 * mc->mc_alloc_io_size +
534 		    metaslab_aliquot / 8) / 4;
535 		mc->mc_alloc_throttle_enabled = mc->mc_is_log ? 0 :
536 		    zio_dva_throttle_enabled;
537 	}
538 
539 	mg = first = mc->mc_allocator[0].mca_rotor;
540 	uint64_t children = 0;
541 	do {
542 		children += vdev_get_ndisks(mg->mg_vd) -
543 		    vdev_get_nparity(mg->mg_vd);
544 	} while ((mg = mg->mg_next) != first);
545 
546 	uint64_t sum_aliquot = 0;
547 	do {
548 		vdev_stat_t *vs = &mg->mg_vd->vdev_stat;
549 		uint_t ratio;
550 
551 		/*
552 		 * Scale allocations per iteration with average number of
553 		 * children.  Wider vdevs need more sequential allocations
554 		 * to keep decent per-child I/O size.
555 		 */
556 		uint64_t mg_aliquot = MAX(metaslab_aliquot * children /
557 		    mc->mc_groups, mc->mc_alloc_io_size * 4);
558 
559 		/*
560 		 * Scale allocations per iteration with the vdev capacity,
561 		 * relative to average.  Bigger vdevs should get more to
562 		 * fill up at the same time as smaller ones.
563 		 */
564 		if (mc->mc_space > 0 && vs->vs_space > 0) {
565 			ratio = vs->vs_space / (mc->mc_space / (mc->mc_groups *
566 			    256) + 1);
567 			mg_aliquot = mg_aliquot * ratio / 256;
568 		}
569 
570 		/*
571 		 * Scale allocations per iteration with the vdev's free space
572 		 * fraction, relative to average. Despite the above, vdevs free
573 		 * space fractions may get imbalanced, for example due to new
574 		 * vdev addition or different performance.  We want free space
575 		 * fractions to be similar to postpone fragmentation.
576 		 *
577 		 * But same time we don't want to throttle vdevs still having
578 		 * plenty of free space, that appear faster than others, even
579 		 * if that cause temporary imbalance.  Allow them to allocate
580 		 * more by keeping their allocation queue depth equivalent to
581 		 * 2.5 full iteration, even if they repeatedly drain it. Later
582 		 * with the free space reduction gradually reduce the target
583 		 * queue depth, stronger enforcing the free space balance.
584 		 */
585 		if (metaslab_bias_enabled &&
586 		    mc->mc_space > 0 && vs->vs_space > 0) {
587 			uint64_t vs_free = vs->vs_space > vs->vs_alloc ?
588 			    vs->vs_space - vs->vs_alloc : 0;
589 			uint64_t mc_free = mc->mc_space > mc->mc_alloc ?
590 			    mc->mc_space - mc->mc_alloc : 0;
591 			/*
592 			 * vs_fr is 16 bit fixed-point free space fraction.
593 			 * mc_fr is 8 bit fixed-point free space fraction.
594 			 * ratio as their quotient is 8 bit fixed-point.
595 			 */
596 			uint_t vs_fr = vs_free / (vs->vs_space / 65536 + 1);
597 			uint_t mc_fr = mc_free / (mc->mc_space / 256 + 1);
598 			ratio = vs_fr / (mc_fr + 1);
599 			mg->mg_aliquot = mg_aliquot * ratio / 256;
600 			/* From 2.5x at 25% full to 1x at 75%. */
601 			ratio = MIN(163840, vs_fr * 3 + 16384);
602 			mg->mg_queue_target = MAX(mg->mg_aliquot,
603 			    mg->mg_aliquot * ratio / 65536);
604 		} else {
605 			mg->mg_aliquot = mg_aliquot;
606 			mg->mg_queue_target = mg->mg_aliquot * 2;
607 		}
608 		sum_aliquot += mg->mg_aliquot;
609 	} while ((mg = mg->mg_next) != first);
610 
611 	/*
612 	 * Set per-class allocation throttle threshold to 4 iterations through
613 	 * all the vdevs.  This should keep all vdevs busy even if some are
614 	 * allocating more than we planned for them due to bigger blocks or
615 	 * better performance.
616 	 */
617 	mc->mc_alloc_max = sum_aliquot * 4;
618 }
619 
620 static void
621 metaslab_class_rotate(metaslab_group_t *mg, int allocator, uint64_t psize,
622     boolean_t success)
623 {
624 	metaslab_class_t *mc = mg->mg_class;
625 	metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
626 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
627 
628 	/*
629 	 * Exit fast if there is nothing to rotate, we are not following
630 	 * the rotor (copies, gangs, etc) or somebody already rotated it.
631 	 */
632 	if (mc->mc_groups < 2 || mca->mca_rotor != mg)
633 		return;
634 
635 	/*
636 	 * Always rotate in case of allocation error or a log class.
637 	 */
638 	if (!success || mc->mc_is_log)
639 		goto rotate;
640 
641 	/*
642 	 * Allocate from this group if we expect next I/O of the same size to
643 	 * mostly fit within the allocation quota.  Rotate if we expect it to
644 	 * mostly go over the target queue depth.  Meanwhile, to stripe between
645 	 * groups in configured amounts per child even if we can't reach the
646 	 * target queue depth, i.e. can't saturate the group write performance,
647 	 * always rotate after allocating the queue target bytes.
648 	 */
649 	uint64_t naq = atomic_add_64_nv(&mca->mca_aliquot, psize) + psize / 2;
650 	if (naq < mg->mg_aliquot)
651 		return;
652 	if (naq >= mg->mg_queue_target)
653 		goto rotate;
654 	if (zfs_refcount_count(&mga->mga_queue_depth) + psize + psize / 2 >=
655 	    mg->mg_queue_target)
656 		goto rotate;
657 
658 	/*
659 	 * When the pool is not too busy, prefer restoring the vdev free space
660 	 * balance instead of getting maximum speed we might not need, so that
661 	 * we could have more flexibility during more busy times later.
662 	 */
663 	if (metaslab_perf_bias <= 0)
664 		goto rotate;
665 	if (metaslab_perf_bias >= 2)
666 		return;
667 	spa_t *spa = mc->mc_spa;
668 	dsl_pool_t *dp = spa_get_dsl(spa);
669 	if (dp == NULL)
670 		return;
671 	uint64_t busy_thresh = zfs_dirty_data_max *
672 	    (zfs_vdev_async_write_active_min_dirty_percent +
673 	    zfs_vdev_async_write_active_max_dirty_percent) / 200;
674 	if (dp->dp_dirty_total > busy_thresh || spa_has_pending_synctask(spa))
675 		return;
676 
677 rotate:
678 	mca->mca_rotor = mg->mg_next;
679 	mca->mca_aliquot = 0;
680 }
681 
682 static void
683 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
684     int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
685 {
686 	atomic_add_64(&mc->mc_alloc, alloc_delta);
687 	atomic_add_64(&mc->mc_deferred, defer_delta);
688 	atomic_add_64(&mc->mc_space, space_delta);
689 	atomic_add_64(&mc->mc_dspace, dspace_delta);
690 }
691 
692 uint64_t
693 metaslab_class_get_alloc(metaslab_class_t *mc)
694 {
695 	return (mc->mc_alloc);
696 }
697 
698 uint64_t
699 metaslab_class_get_deferred(metaslab_class_t *mc)
700 {
701 	return (mc->mc_deferred);
702 }
703 
704 uint64_t
705 metaslab_class_get_space(metaslab_class_t *mc)
706 {
707 	return (mc->mc_space);
708 }
709 
710 uint64_t
711 metaslab_class_get_dspace(metaslab_class_t *mc)
712 {
713 	return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
714 }
715 
716 void
717 metaslab_class_histogram_verify(metaslab_class_t *mc)
718 {
719 	spa_t *spa = mc->mc_spa;
720 	vdev_t *rvd = spa->spa_root_vdev;
721 	uint64_t *mc_hist;
722 	int i;
723 
724 	if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
725 		return;
726 
727 	mc_hist = kmem_zalloc(sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE,
728 	    KM_SLEEP);
729 
730 	mutex_enter(&mc->mc_lock);
731 	for (int c = 0; c < rvd->vdev_children; c++) {
732 		vdev_t *tvd = rvd->vdev_child[c];
733 		metaslab_group_t *mg = vdev_get_mg(tvd, mc);
734 
735 		/*
736 		 * Skip any holes, uninitialized top-levels, or
737 		 * vdevs that are not in this metalab class.
738 		 */
739 		if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
740 		    mg->mg_class != mc) {
741 			continue;
742 		}
743 
744 		IMPLY(mg == mg->mg_vd->vdev_log_mg,
745 		    mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
746 
747 		for (i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++)
748 			mc_hist[i] += mg->mg_histogram[i];
749 	}
750 
751 	for (i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) {
752 		VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
753 	}
754 
755 	mutex_exit(&mc->mc_lock);
756 	kmem_free(mc_hist, sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE);
757 }
758 
759 /*
760  * Calculate the metaslab class's fragmentation metric. The metric
761  * is weighted based on the space contribution of each metaslab group.
762  * The return value will be a number between 0 and 100 (inclusive), or
763  * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
764  * zfs_frag_table for more information about the metric.
765  */
766 uint64_t
767 metaslab_class_fragmentation(metaslab_class_t *mc)
768 {
769 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
770 	uint64_t fragmentation = 0;
771 
772 	spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
773 
774 	for (int c = 0; c < rvd->vdev_children; c++) {
775 		vdev_t *tvd = rvd->vdev_child[c];
776 		metaslab_group_t *mg = tvd->vdev_mg;
777 
778 		/*
779 		 * Skip any holes, uninitialized top-levels,
780 		 * or vdevs that are not in this metalab class.
781 		 */
782 		if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
783 		    mg->mg_class != mc) {
784 			continue;
785 		}
786 
787 		/*
788 		 * If a metaslab group does not contain a fragmentation
789 		 * metric then just bail out.
790 		 */
791 		if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
792 			spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
793 			return (ZFS_FRAG_INVALID);
794 		}
795 
796 		/*
797 		 * Determine how much this metaslab_group is contributing
798 		 * to the overall pool fragmentation metric.
799 		 */
800 		fragmentation += mg->mg_fragmentation *
801 		    metaslab_group_get_space(mg);
802 	}
803 	fragmentation /= metaslab_class_get_space(mc);
804 
805 	ASSERT3U(fragmentation, <=, 100);
806 	spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
807 	return (fragmentation);
808 }
809 
810 /*
811  * Calculate the amount of expandable space that is available in
812  * this metaslab class. If a device is expanded then its expandable
813  * space will be the amount of allocatable space that is currently not
814  * part of this metaslab class.
815  */
816 uint64_t
817 metaslab_class_expandable_space(metaslab_class_t *mc)
818 {
819 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
820 	uint64_t space = 0;
821 
822 	spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
823 	for (int c = 0; c < rvd->vdev_children; c++) {
824 		vdev_t *tvd = rvd->vdev_child[c];
825 		metaslab_group_t *mg = tvd->vdev_mg;
826 
827 		if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
828 		    mg->mg_class != mc) {
829 			continue;
830 		}
831 
832 		/*
833 		 * Calculate if we have enough space to add additional
834 		 * metaslabs. We report the expandable space in terms
835 		 * of the metaslab size since that's the unit of expansion.
836 		 */
837 		space += P2ALIGN_TYPED(tvd->vdev_max_asize - tvd->vdev_asize,
838 		    1ULL << tvd->vdev_ms_shift, uint64_t);
839 	}
840 	spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
841 	return (space);
842 }
843 
844 void
845 metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg)
846 {
847 	multilist_t *ml = &mc->mc_metaslab_txg_list;
848 	uint64_t now = gethrestime_sec();
849 	/* Round delay up to next second. */
850 	uint_t delay = (metaslab_unload_delay_ms + 999) / 1000;
851 	for (int i = 0; i < multilist_get_num_sublists(ml); i++) {
852 		multilist_sublist_t *mls = multilist_sublist_lock_idx(ml, i);
853 		metaslab_t *msp = multilist_sublist_head(mls);
854 		multilist_sublist_unlock(mls);
855 		while (msp != NULL) {
856 			mutex_enter(&msp->ms_lock);
857 
858 			/*
859 			 * If the metaslab has been removed from the list
860 			 * (which could happen if we were at the memory limit
861 			 * and it was evicted during this loop), then we can't
862 			 * proceed and we should restart the sublist.
863 			 */
864 			if (!multilist_link_active(&msp->ms_class_txg_node)) {
865 				mutex_exit(&msp->ms_lock);
866 				i--;
867 				break;
868 			}
869 			mls = multilist_sublist_lock_idx(ml, i);
870 			metaslab_t *next_msp = multilist_sublist_next(mls, msp);
871 			multilist_sublist_unlock(mls);
872 			if (txg >
873 			    msp->ms_selected_txg + metaslab_unload_delay &&
874 			    now > msp->ms_selected_time + delay &&
875 			    (msp->ms_allocator == -1 ||
876 			    !metaslab_preload_enabled)) {
877 				metaslab_evict(msp, txg);
878 			} else {
879 				/*
880 				 * Once we've hit a metaslab selected too
881 				 * recently to evict, we're done evicting for
882 				 * now.
883 				 */
884 				mutex_exit(&msp->ms_lock);
885 				break;
886 			}
887 			mutex_exit(&msp->ms_lock);
888 			msp = next_msp;
889 		}
890 	}
891 }
892 
893 static int
894 metaslab_compare(const void *x1, const void *x2)
895 {
896 	const metaslab_t *m1 = (const metaslab_t *)x1;
897 	const metaslab_t *m2 = (const metaslab_t *)x2;
898 
899 	int sort1 = 0;
900 	int sort2 = 0;
901 	if (m1->ms_allocator != -1 && m1->ms_primary)
902 		sort1 = 1;
903 	else if (m1->ms_allocator != -1 && !m1->ms_primary)
904 		sort1 = 2;
905 	if (m2->ms_allocator != -1 && m2->ms_primary)
906 		sort2 = 1;
907 	else if (m2->ms_allocator != -1 && !m2->ms_primary)
908 		sort2 = 2;
909 
910 	/*
911 	 * Sort inactive metaslabs first, then primaries, then secondaries. When
912 	 * selecting a metaslab to allocate from, an allocator first tries its
913 	 * primary, then secondary active metaslab. If it doesn't have active
914 	 * metaslabs, or can't allocate from them, it searches for an inactive
915 	 * metaslab to activate. If it can't find a suitable one, it will steal
916 	 * a primary or secondary metaslab from another allocator.
917 	 */
918 	if (sort1 < sort2)
919 		return (-1);
920 	if (sort1 > sort2)
921 		return (1);
922 
923 	int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight);
924 	if (likely(cmp))
925 		return (cmp);
926 
927 	IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
928 
929 	return (TREE_CMP(m1->ms_start, m2->ms_start));
930 }
931 
932 /*
933  * ==========================================================================
934  * Metaslab groups
935  * ==========================================================================
936  */
937 /*
938  * Update the allocatable flag and the metaslab group's capacity.
939  * The allocatable flag is set to true if the capacity is below
940  * the zfs_mg_noalloc_threshold or has a fragmentation value that is
941  * greater than zfs_mg_fragmentation_threshold. If a metaslab group
942  * transitions from allocatable to non-allocatable or vice versa then the
943  * metaslab group's class is updated to reflect the transition.
944  */
945 static void
946 metaslab_group_alloc_update(metaslab_group_t *mg)
947 {
948 	vdev_t *vd = mg->mg_vd;
949 	metaslab_class_t *mc = mg->mg_class;
950 	vdev_stat_t *vs = &vd->vdev_stat;
951 	boolean_t was_allocatable;
952 	boolean_t was_initialized;
953 
954 	ASSERT(vd == vd->vdev_top);
955 	ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
956 	    SCL_ALLOC);
957 
958 	mutex_enter(&mg->mg_lock);
959 	was_allocatable = mg->mg_allocatable;
960 	was_initialized = mg->mg_initialized;
961 
962 	uint64_t free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
963 	    (vs->vs_space + 1);
964 
965 	mutex_enter(&mc->mc_lock);
966 
967 	/*
968 	 * If the metaslab group was just added then it won't
969 	 * have any space until we finish syncing out this txg.
970 	 * At that point we will consider it initialized and available
971 	 * for allocations.  We also don't consider non-activated
972 	 * metaslab groups (e.g. vdevs that are in the middle of being removed)
973 	 * to be initialized, because they can't be used for allocation.
974 	 */
975 	mg->mg_initialized = metaslab_group_initialized(mg);
976 	if (!was_initialized && mg->mg_initialized) {
977 		mc->mc_groups++;
978 	} else if (was_initialized && !mg->mg_initialized) {
979 		ASSERT3U(mc->mc_groups, >, 0);
980 		mc->mc_groups--;
981 	}
982 	if (mg->mg_initialized)
983 		mg->mg_no_free_space = B_FALSE;
984 
985 	/*
986 	 * A metaslab group is considered allocatable if it has plenty
987 	 * of free space or is not heavily fragmented. We only take
988 	 * fragmentation into account if the metaslab group has a valid
989 	 * fragmentation metric (i.e. a value between 0 and 100).
990 	 */
991 	mg->mg_allocatable = (mg->mg_activation_count > 0 &&
992 	    free_capacity > zfs_mg_noalloc_threshold &&
993 	    (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
994 	    mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
995 
996 	/*
997 	 * The mc_alloc_groups maintains a count of the number of
998 	 * groups in this metaslab class that are still above the
999 	 * zfs_mg_noalloc_threshold. This is used by the allocating
1000 	 * threads to determine if they should avoid allocations to
1001 	 * a given group. The allocator will avoid allocations to a group
1002 	 * if that group has reached or is below the zfs_mg_noalloc_threshold
1003 	 * and there are still other groups that are above the threshold.
1004 	 * When a group transitions from allocatable to non-allocatable or
1005 	 * vice versa we update the metaslab class to reflect that change.
1006 	 * When the mc_alloc_groups value drops to 0 that means that all
1007 	 * groups have reached the zfs_mg_noalloc_threshold making all groups
1008 	 * eligible for allocations. This effectively means that all devices
1009 	 * are balanced again.
1010 	 */
1011 	if (was_allocatable && !mg->mg_allocatable)
1012 		mc->mc_alloc_groups--;
1013 	else if (!was_allocatable && mg->mg_allocatable)
1014 		mc->mc_alloc_groups++;
1015 	mutex_exit(&mc->mc_lock);
1016 
1017 	mutex_exit(&mg->mg_lock);
1018 }
1019 
1020 int
1021 metaslab_sort_by_flushed(const void *va, const void *vb)
1022 {
1023 	const metaslab_t *a = va;
1024 	const metaslab_t *b = vb;
1025 
1026 	int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
1027 	if (likely(cmp))
1028 		return (cmp);
1029 
1030 	uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
1031 	uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
1032 	cmp = TREE_CMP(a_vdev_id, b_vdev_id);
1033 	if (cmp)
1034 		return (cmp);
1035 
1036 	return (TREE_CMP(a->ms_id, b->ms_id));
1037 }
1038 
1039 metaslab_group_t *
1040 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
1041 {
1042 	spa_t *spa = mc->mc_spa;
1043 	metaslab_group_t *mg;
1044 
1045 	mg = kmem_zalloc(offsetof(metaslab_group_t,
1046 	    mg_allocator[spa->spa_alloc_count]), KM_SLEEP);
1047 	mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
1048 	mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
1049 	cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
1050 	avl_create(&mg->mg_metaslab_tree, metaslab_compare,
1051 	    sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
1052 	mg->mg_vd = vd;
1053 	mg->mg_class = mc;
1054 	mg->mg_activation_count = 0;
1055 	mg->mg_initialized = B_FALSE;
1056 	mg->mg_no_free_space = B_TRUE;
1057 
1058 	for (int i = 0; i < spa->spa_alloc_count; i++) {
1059 		metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
1060 		zfs_refcount_create_tracked(&mga->mga_queue_depth);
1061 	}
1062 
1063 	return (mg);
1064 }
1065 
1066 void
1067 metaslab_group_destroy(metaslab_group_t *mg)
1068 {
1069 	spa_t *spa = mg->mg_class->mc_spa;
1070 
1071 	ASSERT(mg->mg_prev == NULL);
1072 	ASSERT(mg->mg_next == NULL);
1073 	/*
1074 	 * We may have gone below zero with the activation count
1075 	 * either because we never activated in the first place or
1076 	 * because we're done, and possibly removing the vdev.
1077 	 */
1078 	ASSERT(mg->mg_activation_count <= 0);
1079 
1080 	avl_destroy(&mg->mg_metaslab_tree);
1081 	mutex_destroy(&mg->mg_lock);
1082 	mutex_destroy(&mg->mg_ms_disabled_lock);
1083 	cv_destroy(&mg->mg_ms_disabled_cv);
1084 
1085 	for (int i = 0; i < spa->spa_alloc_count; i++) {
1086 		metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
1087 		zfs_refcount_destroy(&mga->mga_queue_depth);
1088 	}
1089 	kmem_free(mg, offsetof(metaslab_group_t,
1090 	    mg_allocator[spa->spa_alloc_count]));
1091 }
1092 
1093 void
1094 metaslab_group_activate(metaslab_group_t *mg)
1095 {
1096 	metaslab_class_t *mc = mg->mg_class;
1097 	spa_t *spa = mc->mc_spa;
1098 	metaslab_group_t *mgprev, *mgnext;
1099 
1100 	ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
1101 
1102 	ASSERT(mg->mg_prev == NULL);
1103 	ASSERT(mg->mg_next == NULL);
1104 	ASSERT(mg->mg_activation_count <= 0);
1105 
1106 	if (++mg->mg_activation_count <= 0)
1107 		return;
1108 
1109 	metaslab_group_alloc_update(mg);
1110 
1111 	if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) {
1112 		mg->mg_prev = mg;
1113 		mg->mg_next = mg;
1114 	} else {
1115 		mgnext = mgprev->mg_next;
1116 		mg->mg_prev = mgprev;
1117 		mg->mg_next = mgnext;
1118 		mgprev->mg_next = mg;
1119 		mgnext->mg_prev = mg;
1120 	}
1121 	for (int i = 0; i < spa->spa_alloc_count; i++) {
1122 		mc->mc_allocator[i].mca_rotor = mg;
1123 		mg = mg->mg_next;
1124 	}
1125 	metaslab_class_balance(mc, B_FALSE);
1126 }
1127 
1128 /*
1129  * Passivate a metaslab group and remove it from the allocation rotor.
1130  * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
1131  * a metaslab group. This function will momentarily drop spa_config_locks
1132  * that are lower than the SCL_ALLOC lock (see comment below).
1133  */
1134 void
1135 metaslab_group_passivate(metaslab_group_t *mg)
1136 {
1137 	metaslab_class_t *mc = mg->mg_class;
1138 	spa_t *spa = mc->mc_spa;
1139 	metaslab_group_t *mgprev, *mgnext;
1140 	int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
1141 
1142 	ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
1143 	    (SCL_ALLOC | SCL_ZIO));
1144 
1145 	if (--mg->mg_activation_count != 0) {
1146 		for (int i = 0; i < spa->spa_alloc_count; i++)
1147 			ASSERT(mc->mc_allocator[i].mca_rotor != mg);
1148 		ASSERT(mg->mg_prev == NULL);
1149 		ASSERT(mg->mg_next == NULL);
1150 		ASSERT(mg->mg_activation_count < 0);
1151 		return;
1152 	}
1153 
1154 	/*
1155 	 * The spa_config_lock is an array of rwlocks, ordered as
1156 	 * follows (from highest to lowest):
1157 	 *	SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
1158 	 *	SCL_ZIO > SCL_FREE > SCL_VDEV
1159 	 * (For more information about the spa_config_lock see spa_misc.c)
1160 	 * The higher the lock, the broader its coverage. When we passivate
1161 	 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
1162 	 * config locks. However, the metaslab group's taskq might be trying
1163 	 * to preload metaslabs so we must drop the SCL_ZIO lock and any
1164 	 * lower locks to allow the I/O to complete. At a minimum,
1165 	 * we continue to hold the SCL_ALLOC lock, which prevents any future
1166 	 * allocations from taking place and any changes to the vdev tree.
1167 	 */
1168 	spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
1169 	taskq_wait_outstanding(spa->spa_metaslab_taskq, 0);
1170 	spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
1171 	metaslab_group_alloc_update(mg);
1172 	for (int i = 0; i < spa->spa_alloc_count; i++) {
1173 		metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
1174 		metaslab_t *msp = mga->mga_primary;
1175 		if (msp != NULL) {
1176 			mutex_enter(&msp->ms_lock);
1177 			metaslab_passivate(msp,
1178 			    metaslab_weight_from_range_tree(msp));
1179 			mutex_exit(&msp->ms_lock);
1180 		}
1181 		msp = mga->mga_secondary;
1182 		if (msp != NULL) {
1183 			mutex_enter(&msp->ms_lock);
1184 			metaslab_passivate(msp,
1185 			    metaslab_weight_from_range_tree(msp));
1186 			mutex_exit(&msp->ms_lock);
1187 		}
1188 	}
1189 
1190 	mgprev = mg->mg_prev;
1191 	mgnext = mg->mg_next;
1192 
1193 	if (mg == mgnext) {
1194 		mgnext = NULL;
1195 	} else {
1196 		mgprev->mg_next = mgnext;
1197 		mgnext->mg_prev = mgprev;
1198 	}
1199 	for (int i = 0; i < spa->spa_alloc_count; i++) {
1200 		if (mc->mc_allocator[i].mca_rotor == mg)
1201 			mc->mc_allocator[i].mca_rotor = mgnext;
1202 	}
1203 
1204 	mg->mg_prev = NULL;
1205 	mg->mg_next = NULL;
1206 	metaslab_class_balance(mc, B_FALSE);
1207 }
1208 
1209 boolean_t
1210 metaslab_group_initialized(metaslab_group_t *mg)
1211 {
1212 	vdev_t *vd = mg->mg_vd;
1213 	vdev_stat_t *vs = &vd->vdev_stat;
1214 
1215 	return (vs->vs_space != 0 && mg->mg_activation_count > 0);
1216 }
1217 
1218 uint64_t
1219 metaslab_group_get_space(metaslab_group_t *mg)
1220 {
1221 	/*
1222 	 * Note that the number of nodes in mg_metaslab_tree may be one less
1223 	 * than vdev_ms_count, due to the embedded log metaslab.
1224 	 */
1225 	mutex_enter(&mg->mg_lock);
1226 	uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree);
1227 	mutex_exit(&mg->mg_lock);
1228 	return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count);
1229 }
1230 
1231 void
1232 metaslab_group_histogram_verify(metaslab_group_t *mg)
1233 {
1234 	uint64_t *mg_hist;
1235 	avl_tree_t *t = &mg->mg_metaslab_tree;
1236 	uint64_t ashift = mg->mg_vd->vdev_ashift;
1237 
1238 	if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
1239 		return;
1240 
1241 	mg_hist = kmem_zalloc(sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE,
1242 	    KM_SLEEP);
1243 
1244 	ASSERT3U(ZFS_RANGE_TREE_HISTOGRAM_SIZE, >=,
1245 	    SPACE_MAP_HISTOGRAM_SIZE + ashift);
1246 
1247 	mutex_enter(&mg->mg_lock);
1248 	for (metaslab_t *msp = avl_first(t);
1249 	    msp != NULL; msp = AVL_NEXT(t, msp)) {
1250 		VERIFY3P(msp->ms_group, ==, mg);
1251 		/* skip if not active */
1252 		if (msp->ms_sm == NULL)
1253 			continue;
1254 
1255 		for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1256 			mg_hist[i + ashift] +=
1257 			    msp->ms_sm->sm_phys->smp_histogram[i];
1258 		}
1259 	}
1260 
1261 	for (int i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i ++)
1262 		VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
1263 
1264 	mutex_exit(&mg->mg_lock);
1265 
1266 	kmem_free(mg_hist, sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE);
1267 }
1268 
1269 static void
1270 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
1271 {
1272 	metaslab_class_t *mc = mg->mg_class;
1273 	uint64_t ashift = mg->mg_vd->vdev_ashift;
1274 
1275 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1276 	if (msp->ms_sm == NULL)
1277 		return;
1278 
1279 	mutex_enter(&mg->mg_lock);
1280 	mutex_enter(&mc->mc_lock);
1281 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1282 		IMPLY(mg == mg->mg_vd->vdev_log_mg,
1283 		    mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
1284 		mg->mg_histogram[i + ashift] +=
1285 		    msp->ms_sm->sm_phys->smp_histogram[i];
1286 		mc->mc_histogram[i + ashift] +=
1287 		    msp->ms_sm->sm_phys->smp_histogram[i];
1288 	}
1289 	mutex_exit(&mc->mc_lock);
1290 	mutex_exit(&mg->mg_lock);
1291 }
1292 
1293 void
1294 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
1295 {
1296 	metaslab_class_t *mc = mg->mg_class;
1297 	uint64_t ashift = mg->mg_vd->vdev_ashift;
1298 
1299 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1300 	if (msp->ms_sm == NULL)
1301 		return;
1302 
1303 	mutex_enter(&mg->mg_lock);
1304 	mutex_enter(&mc->mc_lock);
1305 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1306 		ASSERT3U(mg->mg_histogram[i + ashift], >=,
1307 		    msp->ms_sm->sm_phys->smp_histogram[i]);
1308 		ASSERT3U(mc->mc_histogram[i + ashift], >=,
1309 		    msp->ms_sm->sm_phys->smp_histogram[i]);
1310 		IMPLY(mg == mg->mg_vd->vdev_log_mg,
1311 		    mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
1312 
1313 		mg->mg_histogram[i + ashift] -=
1314 		    msp->ms_sm->sm_phys->smp_histogram[i];
1315 		mc->mc_histogram[i + ashift] -=
1316 		    msp->ms_sm->sm_phys->smp_histogram[i];
1317 	}
1318 	mutex_exit(&mc->mc_lock);
1319 	mutex_exit(&mg->mg_lock);
1320 }
1321 
1322 static void
1323 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
1324 {
1325 	ASSERT(msp->ms_group == NULL);
1326 	mutex_enter(&mg->mg_lock);
1327 	msp->ms_group = mg;
1328 	msp->ms_weight = 0;
1329 	avl_add(&mg->mg_metaslab_tree, msp);
1330 	mutex_exit(&mg->mg_lock);
1331 
1332 	mutex_enter(&msp->ms_lock);
1333 	metaslab_group_histogram_add(mg, msp);
1334 	mutex_exit(&msp->ms_lock);
1335 }
1336 
1337 static void
1338 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
1339 {
1340 	mutex_enter(&msp->ms_lock);
1341 	metaslab_group_histogram_remove(mg, msp);
1342 	mutex_exit(&msp->ms_lock);
1343 
1344 	mutex_enter(&mg->mg_lock);
1345 	ASSERT(msp->ms_group == mg);
1346 	avl_remove(&mg->mg_metaslab_tree, msp);
1347 
1348 	metaslab_class_t *mc = msp->ms_group->mg_class;
1349 	multilist_sublist_t *mls =
1350 	    multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
1351 	if (multilist_link_active(&msp->ms_class_txg_node))
1352 		multilist_sublist_remove(mls, msp);
1353 	multilist_sublist_unlock(mls);
1354 
1355 	msp->ms_group = NULL;
1356 	mutex_exit(&mg->mg_lock);
1357 }
1358 
1359 static void
1360 metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1361 {
1362 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1363 	ASSERT(MUTEX_HELD(&mg->mg_lock));
1364 	ASSERT(msp->ms_group == mg);
1365 
1366 	avl_remove(&mg->mg_metaslab_tree, msp);
1367 	msp->ms_weight = weight;
1368 	avl_add(&mg->mg_metaslab_tree, msp);
1369 
1370 }
1371 
1372 static void
1373 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1374 {
1375 	/*
1376 	 * Although in principle the weight can be any value, in
1377 	 * practice we do not use values in the range [1, 511].
1378 	 */
1379 	ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
1380 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1381 
1382 	mutex_enter(&mg->mg_lock);
1383 	metaslab_group_sort_impl(mg, msp, weight);
1384 	mutex_exit(&mg->mg_lock);
1385 }
1386 
1387 /*
1388  * Calculate the fragmentation for a given metaslab group.  Weight metaslabs
1389  * on the amount of free space.  The return value will be between 0 and 100
1390  * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
1391  * group have a fragmentation metric.
1392  */
1393 uint64_t
1394 metaslab_group_fragmentation(metaslab_group_t *mg)
1395 {
1396 	vdev_t *vd = mg->mg_vd;
1397 	uint64_t fragmentation = 0;
1398 	uint64_t valid_ms = 0, total_ms = 0;
1399 	uint64_t free, total_free = 0;
1400 
1401 	for (int m = 0; m < vd->vdev_ms_count; m++) {
1402 		metaslab_t *msp = vd->vdev_ms[m];
1403 
1404 		if (msp->ms_group != mg)
1405 			continue;
1406 		total_ms++;
1407 		if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
1408 			continue;
1409 
1410 		valid_ms++;
1411 		free = (msp->ms_size - metaslab_allocated_space(msp)) /
1412 		    SPA_MINBLOCKSIZE;  /* To prevent overflows. */
1413 		total_free += free;
1414 		fragmentation += msp->ms_fragmentation * free;
1415 	}
1416 
1417 	if (valid_ms < (total_ms + 1) / 2 || total_free == 0)
1418 		return (ZFS_FRAG_INVALID);
1419 
1420 	fragmentation /= total_free;
1421 	ASSERT3U(fragmentation, <=, 100);
1422 	return (fragmentation);
1423 }
1424 
1425 /*
1426  * ==========================================================================
1427  * Range tree callbacks
1428  * ==========================================================================
1429  */
1430 
1431 /*
1432  * Comparison function for the private size-ordered tree using 32-bit
1433  * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1434  */
1435 __attribute__((always_inline)) inline
1436 static int
1437 metaslab_rangesize32_compare(const void *x1, const void *x2)
1438 {
1439 	const zfs_range_seg32_t *r1 = x1;
1440 	const zfs_range_seg32_t *r2 = x2;
1441 
1442 	uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1443 	uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1444 
1445 	int cmp = TREE_CMP(rs_size1, rs_size2);
1446 
1447 	return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
1448 }
1449 
1450 /*
1451  * Comparison function for the private size-ordered tree using 64-bit
1452  * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1453  */
1454 __attribute__((always_inline)) inline
1455 static int
1456 metaslab_rangesize64_compare(const void *x1, const void *x2)
1457 {
1458 	const zfs_range_seg64_t *r1 = x1;
1459 	const zfs_range_seg64_t *r2 = x2;
1460 
1461 	uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1462 	uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1463 
1464 	int cmp = TREE_CMP(rs_size1, rs_size2);
1465 
1466 	return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
1467 }
1468 
1469 typedef struct metaslab_rt_arg {
1470 	zfs_btree_t *mra_bt;
1471 	uint32_t mra_floor_shift;
1472 } metaslab_rt_arg_t;
1473 
1474 struct mssa_arg {
1475 	zfs_range_tree_t *rt;
1476 	metaslab_rt_arg_t *mra;
1477 };
1478 
1479 static void
1480 metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size)
1481 {
1482 	struct mssa_arg *mssap = arg;
1483 	zfs_range_tree_t *rt = mssap->rt;
1484 	metaslab_rt_arg_t *mrap = mssap->mra;
1485 	zfs_range_seg_max_t seg = {0};
1486 	zfs_rs_set_start(&seg, rt, start);
1487 	zfs_rs_set_end(&seg, rt, start + size);
1488 	metaslab_rt_add(rt, &seg, mrap);
1489 }
1490 
1491 static void
1492 metaslab_size_tree_full_load(zfs_range_tree_t *rt)
1493 {
1494 	metaslab_rt_arg_t *mrap = rt->rt_arg;
1495 	METASLABSTAT_BUMP(metaslabstat_reload_tree);
1496 	ASSERT0(zfs_btree_numnodes(mrap->mra_bt));
1497 	mrap->mra_floor_shift = 0;
1498 	struct mssa_arg arg = {0};
1499 	arg.rt = rt;
1500 	arg.mra = mrap;
1501 	zfs_range_tree_walk(rt, metaslab_size_sorted_add, &arg);
1502 }
1503 
1504 
1505 ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize32_in_buf,
1506     zfs_range_seg32_t, metaslab_rangesize32_compare)
1507 
1508 ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize64_in_buf,
1509     zfs_range_seg64_t, metaslab_rangesize64_compare)
1510 
1511 /*
1512  * Create any block allocator specific components. The current allocators
1513  * rely on using both a size-ordered zfs_range_tree_t and an array of
1514  * uint64_t's.
1515  */
1516 static void
1517 metaslab_rt_create(zfs_range_tree_t *rt, void *arg)
1518 {
1519 	metaslab_rt_arg_t *mrap = arg;
1520 	zfs_btree_t *size_tree = mrap->mra_bt;
1521 
1522 	size_t size;
1523 	int (*compare) (const void *, const void *);
1524 	bt_find_in_buf_f bt_find;
1525 	switch (rt->rt_type) {
1526 	case ZFS_RANGE_SEG32:
1527 		size = sizeof (zfs_range_seg32_t);
1528 		compare = metaslab_rangesize32_compare;
1529 		bt_find = metaslab_rt_find_rangesize32_in_buf;
1530 		break;
1531 	case ZFS_RANGE_SEG64:
1532 		size = sizeof (zfs_range_seg64_t);
1533 		compare = metaslab_rangesize64_compare;
1534 		bt_find = metaslab_rt_find_rangesize64_in_buf;
1535 		break;
1536 	default:
1537 		panic("Invalid range seg type %d", rt->rt_type);
1538 	}
1539 	zfs_btree_create(size_tree, compare, bt_find, size);
1540 	mrap->mra_floor_shift = metaslab_by_size_min_shift;
1541 }
1542 
1543 static void
1544 metaslab_rt_destroy(zfs_range_tree_t *rt, void *arg)
1545 {
1546 	(void) rt;
1547 	metaslab_rt_arg_t *mrap = arg;
1548 	zfs_btree_t *size_tree = mrap->mra_bt;
1549 
1550 	zfs_btree_destroy(size_tree);
1551 	kmem_free(mrap, sizeof (*mrap));
1552 }
1553 
1554 static void
1555 metaslab_rt_add(zfs_range_tree_t *rt, zfs_range_seg_t *rs, void *arg)
1556 {
1557 	metaslab_rt_arg_t *mrap = arg;
1558 	zfs_btree_t *size_tree = mrap->mra_bt;
1559 
1560 	if (zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt) <
1561 	    (1ULL << mrap->mra_floor_shift))
1562 		return;
1563 
1564 	zfs_btree_add(size_tree, rs);
1565 }
1566 
1567 static void
1568 metaslab_rt_remove(zfs_range_tree_t *rt, zfs_range_seg_t *rs, void *arg)
1569 {
1570 	metaslab_rt_arg_t *mrap = arg;
1571 	zfs_btree_t *size_tree = mrap->mra_bt;
1572 
1573 	if (zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt) < (1ULL <<
1574 	    mrap->mra_floor_shift))
1575 		return;
1576 
1577 	zfs_btree_remove(size_tree, rs);
1578 }
1579 
1580 static void
1581 metaslab_rt_vacate(zfs_range_tree_t *rt, void *arg)
1582 {
1583 	metaslab_rt_arg_t *mrap = arg;
1584 	zfs_btree_t *size_tree = mrap->mra_bt;
1585 	zfs_btree_clear(size_tree);
1586 	zfs_btree_destroy(size_tree);
1587 
1588 	metaslab_rt_create(rt, arg);
1589 }
1590 
1591 static const zfs_range_tree_ops_t metaslab_rt_ops = {
1592 	.rtop_create = metaslab_rt_create,
1593 	.rtop_destroy = metaslab_rt_destroy,
1594 	.rtop_add = metaslab_rt_add,
1595 	.rtop_remove = metaslab_rt_remove,
1596 	.rtop_vacate = metaslab_rt_vacate
1597 };
1598 
1599 /*
1600  * ==========================================================================
1601  * Common allocator routines
1602  * ==========================================================================
1603  */
1604 
1605 /*
1606  * Return the maximum contiguous segment within the metaslab.
1607  */
1608 uint64_t
1609 metaslab_largest_allocatable(metaslab_t *msp)
1610 {
1611 	zfs_btree_t *t = &msp->ms_allocatable_by_size;
1612 	zfs_range_seg_t *rs;
1613 
1614 	if (t == NULL)
1615 		return (0);
1616 	if (zfs_btree_numnodes(t) == 0)
1617 		metaslab_size_tree_full_load(msp->ms_allocatable);
1618 
1619 	rs = zfs_btree_last(t, NULL);
1620 	if (rs == NULL)
1621 		return (0);
1622 
1623 	return (zfs_rs_get_end(rs, msp->ms_allocatable) - zfs_rs_get_start(rs,
1624 	    msp->ms_allocatable));
1625 }
1626 
1627 /*
1628  * Return the maximum contiguous segment within the unflushed frees of this
1629  * metaslab.
1630  */
1631 static uint64_t
1632 metaslab_largest_unflushed_free(metaslab_t *msp)
1633 {
1634 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1635 
1636 	if (msp->ms_unflushed_frees == NULL)
1637 		return (0);
1638 
1639 	if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0)
1640 		metaslab_size_tree_full_load(msp->ms_unflushed_frees);
1641 	zfs_range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size,
1642 	    NULL);
1643 	if (rs == NULL)
1644 		return (0);
1645 
1646 	/*
1647 	 * When a range is freed from the metaslab, that range is added to
1648 	 * both the unflushed frees and the deferred frees. While the block
1649 	 * will eventually be usable, if the metaslab were loaded the range
1650 	 * would not be added to the ms_allocatable tree until TXG_DEFER_SIZE
1651 	 * txgs had passed.  As a result, when attempting to estimate an upper
1652 	 * bound for the largest currently-usable free segment in the
1653 	 * metaslab, we need to not consider any ranges currently in the defer
1654 	 * trees. This algorithm approximates the largest available chunk in
1655 	 * the largest range in the unflushed_frees tree by taking the first
1656 	 * chunk.  While this may be a poor estimate, it should only remain so
1657 	 * briefly and should eventually self-correct as frees are no longer
1658 	 * deferred. Similar logic applies to the ms_freed tree. See
1659 	 * metaslab_load() for more details.
1660 	 *
1661 	 * There are two primary sources of inaccuracy in this estimate. Both
1662 	 * are tolerated for performance reasons. The first source is that we
1663 	 * only check the largest segment for overlaps. Smaller segments may
1664 	 * have more favorable overlaps with the other trees, resulting in
1665 	 * larger usable chunks.  Second, we only look at the first chunk in
1666 	 * the largest segment; there may be other usable chunks in the
1667 	 * largest segment, but we ignore them.
1668 	 */
1669 	uint64_t rstart = zfs_rs_get_start(rs, msp->ms_unflushed_frees);
1670 	uint64_t rsize = zfs_rs_get_end(rs, msp->ms_unflushed_frees) - rstart;
1671 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1672 		uint64_t start = 0;
1673 		uint64_t size = 0;
1674 		boolean_t found = zfs_range_tree_find_in(msp->ms_defer[t],
1675 		    rstart, rsize, &start, &size);
1676 		if (found) {
1677 			if (rstart == start)
1678 				return (0);
1679 			rsize = start - rstart;
1680 		}
1681 	}
1682 
1683 	uint64_t start = 0;
1684 	uint64_t size = 0;
1685 	boolean_t found = zfs_range_tree_find_in(msp->ms_freed, rstart,
1686 	    rsize, &start, &size);
1687 	if (found)
1688 		rsize = start - rstart;
1689 
1690 	return (rsize);
1691 }
1692 
1693 static zfs_range_seg_t *
1694 metaslab_block_find(zfs_btree_t *t, zfs_range_tree_t *rt, uint64_t start,
1695     uint64_t size, uint64_t max_size, zfs_btree_index_t *where)
1696 {
1697 	zfs_range_seg_t *rs;
1698 	zfs_range_seg_max_t rsearch;
1699 
1700 	zfs_rs_set_start(&rsearch, rt, start);
1701 	zfs_rs_set_end(&rsearch, rt, start + max_size);
1702 
1703 	rs = zfs_btree_find(t, &rsearch, where);
1704 	if (rs == NULL) {
1705 		if (size == max_size) {
1706 			rs = zfs_btree_next(t, where, where);
1707 		} else {
1708 			/*
1709 			 * If we're searching for a range, get the largest
1710 			 * segment in that range, or the smallest one bigger
1711 			 * than it.
1712 			 */
1713 			rs = zfs_btree_prev(t, where, where);
1714 			if (rs == NULL || zfs_rs_get_end(rs, rt) -
1715 			    zfs_rs_get_start(rs, rt) < size) {
1716 				rs = zfs_btree_next(t, where, where);
1717 			}
1718 		}
1719 	}
1720 
1721 	return (rs);
1722 }
1723 
1724 /*
1725  * This is a helper function that can be used by the allocator to find a
1726  * suitable block to allocate. This will search the specified B-tree looking
1727  * for a block that matches the specified criteria.
1728  */
1729 static uint64_t
1730 metaslab_block_picker(zfs_range_tree_t *rt, uint64_t *cursor, uint64_t size,
1731     uint64_t max_size, uint64_t max_search, uint64_t *found_size)
1732 {
1733 	if (*cursor == 0)
1734 		*cursor = rt->rt_start;
1735 	zfs_btree_t *bt = &rt->rt_root;
1736 	zfs_btree_index_t where;
1737 	zfs_range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size,
1738 	    max_size, &where);
1739 	uint64_t first_found;
1740 	int count_searched = 0;
1741 
1742 	if (rs != NULL)
1743 		first_found = zfs_rs_get_start(rs, rt);
1744 
1745 	while (rs != NULL && (zfs_rs_get_start(rs, rt) - first_found <=
1746 	    max_search || count_searched < metaslab_min_search_count)) {
1747 		uint64_t offset = zfs_rs_get_start(rs, rt);
1748 		if (offset + size <= zfs_rs_get_end(rs, rt)) {
1749 			*found_size = MIN(zfs_rs_get_end(rs, rt) - offset,
1750 			    max_size);
1751 			*cursor = offset + *found_size;
1752 			return (offset);
1753 		}
1754 		rs = zfs_btree_next(bt, &where, &where);
1755 		count_searched++;
1756 	}
1757 
1758 	*cursor = 0;
1759 	*found_size = 0;
1760 	return (-1ULL);
1761 }
1762 
1763 static uint64_t metaslab_df_alloc(metaslab_t *msp, uint64_t size,
1764     uint64_t max_size, uint64_t *found_size);
1765 static uint64_t metaslab_cf_alloc(metaslab_t *msp, uint64_t size,
1766     uint64_t max_size, uint64_t *found_size);
1767 static uint64_t metaslab_ndf_alloc(metaslab_t *msp, uint64_t size,
1768     uint64_t max_size, uint64_t *found_size);
1769 metaslab_ops_t *metaslab_allocator(spa_t *spa);
1770 
1771 static metaslab_ops_t metaslab_allocators[] = {
1772 	{ "dynamic", metaslab_df_alloc },
1773 	{ "cursor", metaslab_cf_alloc },
1774 	{ "new-dynamic", metaslab_ndf_alloc },
1775 };
1776 
1777 static int
1778 spa_find_allocator_byname(const char *val)
1779 {
1780 	int a = ARRAY_SIZE(metaslab_allocators) - 1;
1781 	if (strcmp("new-dynamic", val) == 0)
1782 		return (-1); /* remove when ndf is working */
1783 	for (; a >= 0; a--) {
1784 		if (strcmp(val, metaslab_allocators[a].msop_name) == 0)
1785 			return (a);
1786 	}
1787 	return (-1);
1788 }
1789 
1790 void
1791 spa_set_allocator(spa_t *spa, const char *allocator)
1792 {
1793 	int a = spa_find_allocator_byname(allocator);
1794 	if (a < 0) a = 0;
1795 	spa->spa_active_allocator = a;
1796 	zfs_dbgmsg("spa allocator: %s", metaslab_allocators[a].msop_name);
1797 }
1798 
1799 int
1800 spa_get_allocator(spa_t *spa)
1801 {
1802 	return (spa->spa_active_allocator);
1803 }
1804 
1805 #if defined(_KERNEL)
1806 int
1807 param_set_active_allocator_common(const char *val)
1808 {
1809 	char *p;
1810 
1811 	if (val == NULL)
1812 		return (SET_ERROR(EINVAL));
1813 
1814 	if ((p = strchr(val, '\n')) != NULL)
1815 		*p = '\0';
1816 
1817 	int a = spa_find_allocator_byname(val);
1818 	if (a < 0)
1819 		return (SET_ERROR(EINVAL));
1820 
1821 	zfs_active_allocator = metaslab_allocators[a].msop_name;
1822 	return (0);
1823 }
1824 #endif
1825 
1826 metaslab_ops_t *
1827 metaslab_allocator(spa_t *spa)
1828 {
1829 	int allocator = spa_get_allocator(spa);
1830 	return (&metaslab_allocators[allocator]);
1831 }
1832 
1833 /*
1834  * ==========================================================================
1835  * Dynamic Fit (df) block allocator
1836  *
1837  * Search for a free chunk of at least this size, starting from the last
1838  * offset (for this alignment of block) looking for up to
1839  * metaslab_df_max_search bytes (16MB).  If a large enough free chunk is not
1840  * found within 16MB, then return a free chunk of exactly the requested size (or
1841  * larger).
1842  *
1843  * If it seems like searching from the last offset will be unproductive, skip
1844  * that and just return a free chunk of exactly the requested size (or larger).
1845  * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct.  This
1846  * mechanism is probably not very useful and may be removed in the future.
1847  *
1848  * The behavior when not searching can be changed to return the largest free
1849  * chunk, instead of a free chunk of exactly the requested size, by setting
1850  * metaslab_df_use_largest_segment.
1851  * ==========================================================================
1852  */
1853 static uint64_t
1854 metaslab_df_alloc(metaslab_t *msp, uint64_t size, uint64_t max_size,
1855     uint64_t *found_size)
1856 {
1857 	/*
1858 	 * Find the largest power of 2 block size that evenly divides the
1859 	 * requested size. This is used to try to allocate blocks with similar
1860 	 * alignment from the same area of the metaslab (i.e. same cursor
1861 	 * bucket) but it does not guarantee that other allocations sizes
1862 	 * may exist in the same region.
1863 	 */
1864 	uint64_t align = max_size & -max_size;
1865 	uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1866 	zfs_range_tree_t *rt = msp->ms_allocatable;
1867 	uint_t free_pct = zfs_range_tree_space(rt) * 100 / msp->ms_size;
1868 	uint64_t offset;
1869 
1870 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1871 
1872 	/*
1873 	 * If we're running low on space, find a segment based on size,
1874 	 * rather than iterating based on offset.
1875 	 */
1876 	if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold ||
1877 	    free_pct < metaslab_df_free_pct) {
1878 		align = size & -size;
1879 		cursor = &msp->ms_lbas[highbit64(align) - 1];
1880 		offset = -1;
1881 	} else {
1882 		offset = metaslab_block_picker(rt, cursor, size, max_size,
1883 		    metaslab_df_max_search, found_size);
1884 		if (max_size != size && offset == -1) {
1885 			align = size & -size;
1886 			cursor = &msp->ms_lbas[highbit64(align) - 1];
1887 			offset = metaslab_block_picker(rt, cursor, size,
1888 			    max_size, metaslab_df_max_search, found_size);
1889 		}
1890 	}
1891 
1892 	if (offset == -1) {
1893 		zfs_range_seg_t *rs;
1894 		if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0)
1895 			metaslab_size_tree_full_load(msp->ms_allocatable);
1896 
1897 		if (metaslab_df_use_largest_segment) {
1898 			/* use largest free segment */
1899 			rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL);
1900 		} else {
1901 			zfs_btree_index_t where;
1902 			/* use segment of this size, or next largest */
1903 			rs = metaslab_block_find(&msp->ms_allocatable_by_size,
1904 			    rt, msp->ms_start, size, max_size, &where);
1905 		}
1906 		if (rs != NULL && zfs_rs_get_start(rs, rt) + size <=
1907 		    zfs_rs_get_end(rs, rt)) {
1908 			offset = zfs_rs_get_start(rs, rt);
1909 			*found_size = MIN(zfs_rs_get_end(rs, rt) - offset,
1910 			    max_size);
1911 			*cursor = offset + *found_size;
1912 		}
1913 	}
1914 
1915 	return (offset);
1916 }
1917 
1918 /*
1919  * ==========================================================================
1920  * Cursor fit block allocator -
1921  * Select the largest region in the metaslab, set the cursor to the beginning
1922  * of the range and the cursor_end to the end of the range. As allocations
1923  * are made advance the cursor. Continue allocating from the cursor until
1924  * the range is exhausted and then find a new range.
1925  * ==========================================================================
1926  */
1927 static uint64_t
1928 metaslab_cf_alloc(metaslab_t *msp, uint64_t size, uint64_t max_size,
1929     uint64_t *found_size)
1930 {
1931 	zfs_range_tree_t *rt = msp->ms_allocatable;
1932 	zfs_btree_t *t = &msp->ms_allocatable_by_size;
1933 	uint64_t *cursor = &msp->ms_lbas[0];
1934 	uint64_t *cursor_end = &msp->ms_lbas[1];
1935 	uint64_t offset = 0;
1936 
1937 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1938 
1939 	ASSERT3U(*cursor_end, >=, *cursor);
1940 
1941 	if ((*cursor + size) > *cursor_end) {
1942 		zfs_range_seg_t *rs;
1943 
1944 		if (zfs_btree_numnodes(t) == 0)
1945 			metaslab_size_tree_full_load(msp->ms_allocatable);
1946 		rs = zfs_btree_last(t, NULL);
1947 		if (rs == NULL || (zfs_rs_get_end(rs, rt) -
1948 		    zfs_rs_get_start(rs, rt)) < size)
1949 			return (-1ULL);
1950 
1951 		*cursor = zfs_rs_get_start(rs, rt);
1952 		*cursor_end = zfs_rs_get_end(rs, rt);
1953 	}
1954 
1955 	offset = *cursor;
1956 	*found_size = MIN(*cursor_end - offset, max_size);
1957 	*cursor = offset + *found_size;
1958 
1959 	return (offset);
1960 }
1961 
1962 /*
1963  * ==========================================================================
1964  * New dynamic fit allocator -
1965  * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1966  * contiguous blocks. If no region is found then just use the largest segment
1967  * that remains.
1968  * ==========================================================================
1969  */
1970 
1971 /*
1972  * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1973  * to request from the allocator.
1974  */
1975 uint64_t metaslab_ndf_clump_shift = 4;
1976 
1977 static uint64_t
1978 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size, uint64_t max_size,
1979     uint64_t *found_size)
1980 {
1981 	zfs_btree_t *t = &msp->ms_allocatable->rt_root;
1982 	zfs_range_tree_t *rt = msp->ms_allocatable;
1983 	zfs_btree_index_t where;
1984 	zfs_range_seg_t *rs;
1985 	zfs_range_seg_max_t rsearch;
1986 	uint64_t hbit = highbit64(max_size);
1987 	uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1988 	uint64_t max_possible_size = metaslab_largest_allocatable(msp);
1989 
1990 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1991 
1992 	if (max_possible_size < size)
1993 		return (-1ULL);
1994 
1995 	zfs_rs_set_start(&rsearch, rt, *cursor);
1996 	zfs_rs_set_end(&rsearch, rt, *cursor + max_size);
1997 
1998 	rs = zfs_btree_find(t, &rsearch, &where);
1999 	if (rs == NULL || (zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt)) <
2000 	    max_size) {
2001 		hbit = highbit64(size);
2002 		cursor = &msp->ms_lbas[hbit - 1];
2003 		zfs_rs_set_start(&rsearch, rt, *cursor);
2004 		zfs_rs_set_end(&rsearch, rt, *cursor + size);
2005 
2006 		rs = zfs_btree_find(t, &rsearch, &where);
2007 	}
2008 	if (rs == NULL || (zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt)) <
2009 	    size) {
2010 		t = &msp->ms_allocatable_by_size;
2011 
2012 		zfs_rs_set_start(&rsearch, rt, 0);
2013 		zfs_rs_set_end(&rsearch, rt, MIN(max_possible_size,
2014 		    1ULL << (hbit + metaslab_ndf_clump_shift)));
2015 
2016 		rs = zfs_btree_find(t, &rsearch, &where);
2017 		if (rs == NULL)
2018 			rs = zfs_btree_next(t, &where, &where);
2019 		ASSERT(rs != NULL);
2020 	}
2021 
2022 	if ((zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt)) >= size) {
2023 		*found_size = MIN(zfs_rs_get_end(rs, rt) -
2024 		    zfs_rs_get_start(rs, rt), max_size);
2025 		*cursor = zfs_rs_get_start(rs, rt) + *found_size;
2026 		return (zfs_rs_get_start(rs, rt));
2027 	}
2028 	return (-1ULL);
2029 }
2030 
2031 /*
2032  * ==========================================================================
2033  * Metaslabs
2034  * ==========================================================================
2035  */
2036 
2037 /*
2038  * Wait for any in-progress metaslab loads to complete.
2039  */
2040 static void
2041 metaslab_load_wait(metaslab_t *msp)
2042 {
2043 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2044 
2045 	while (msp->ms_loading) {
2046 		ASSERT(!msp->ms_loaded);
2047 		cv_wait(&msp->ms_load_cv, &msp->ms_lock);
2048 	}
2049 }
2050 
2051 /*
2052  * Wait for any in-progress flushing to complete.
2053  */
2054 static void
2055 metaslab_flush_wait(metaslab_t *msp)
2056 {
2057 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2058 
2059 	while (msp->ms_flushing)
2060 		cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
2061 }
2062 
2063 static unsigned int
2064 metaslab_idx_func(multilist_t *ml, void *arg)
2065 {
2066 	metaslab_t *msp = arg;
2067 
2068 	/*
2069 	 * ms_id values are allocated sequentially, so full 64bit
2070 	 * division would be a waste of time, so limit it to 32 bits.
2071 	 */
2072 	return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml));
2073 }
2074 
2075 uint64_t
2076 metaslab_allocated_space(metaslab_t *msp)
2077 {
2078 	return (msp->ms_allocated_space);
2079 }
2080 
2081 /*
2082  * Verify that the space accounting on disk matches the in-core range_trees.
2083  */
2084 static void
2085 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
2086 {
2087 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2088 	uint64_t allocating = 0;
2089 	uint64_t sm_free_space, msp_free_space;
2090 
2091 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2092 	ASSERT(!msp->ms_condensing);
2093 
2094 	if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
2095 		return;
2096 
2097 	/*
2098 	 * We can only verify the metaslab space when we're called
2099 	 * from syncing context with a loaded metaslab that has an
2100 	 * allocated space map. Calling this in non-syncing context
2101 	 * does not provide a consistent view of the metaslab since
2102 	 * we're performing allocations in the future.
2103 	 */
2104 	if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
2105 	    !msp->ms_loaded)
2106 		return;
2107 
2108 	/*
2109 	 * Even though the smp_alloc field can get negative,
2110 	 * when it comes to a metaslab's space map, that should
2111 	 * never be the case.
2112 	 */
2113 	ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
2114 
2115 	ASSERT3U(space_map_allocated(msp->ms_sm), >=,
2116 	    zfs_range_tree_space(msp->ms_unflushed_frees));
2117 
2118 	ASSERT3U(metaslab_allocated_space(msp), ==,
2119 	    space_map_allocated(msp->ms_sm) +
2120 	    zfs_range_tree_space(msp->ms_unflushed_allocs) -
2121 	    zfs_range_tree_space(msp->ms_unflushed_frees));
2122 
2123 	sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
2124 
2125 	/*
2126 	 * Account for future allocations since we would have
2127 	 * already deducted that space from the ms_allocatable.
2128 	 */
2129 	for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
2130 		allocating +=
2131 		    zfs_range_tree_space(msp->ms_allocating[(txg + t) &
2132 		    TXG_MASK]);
2133 	}
2134 	ASSERT3U(allocating + msp->ms_allocated_this_txg, ==,
2135 	    msp->ms_allocating_total);
2136 
2137 	ASSERT3U(msp->ms_deferspace, ==,
2138 	    zfs_range_tree_space(msp->ms_defer[0]) +
2139 	    zfs_range_tree_space(msp->ms_defer[1]));
2140 
2141 	msp_free_space = zfs_range_tree_space(msp->ms_allocatable) +
2142 	    allocating + msp->ms_deferspace +
2143 	    zfs_range_tree_space(msp->ms_freed);
2144 
2145 	VERIFY3U(sm_free_space, ==, msp_free_space);
2146 }
2147 
2148 static void
2149 metaslab_aux_histograms_clear(metaslab_t *msp)
2150 {
2151 	/*
2152 	 * Auxiliary histograms are only cleared when resetting them,
2153 	 * which can only happen while the metaslab is loaded.
2154 	 */
2155 	ASSERT(msp->ms_loaded);
2156 
2157 	memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
2158 	for (int t = 0; t < TXG_DEFER_SIZE; t++)
2159 		memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t]));
2160 }
2161 
2162 static void
2163 metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
2164     zfs_range_tree_t *rt)
2165 {
2166 	/*
2167 	 * This is modeled after space_map_histogram_add(), so refer to that
2168 	 * function for implementation details. We want this to work like
2169 	 * the space map histogram, and not the range tree histogram, as we
2170 	 * are essentially constructing a delta that will be later subtracted
2171 	 * from the space map histogram.
2172 	 */
2173 	int idx = 0;
2174 	for (int i = shift; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) {
2175 		ASSERT3U(i, >=, idx + shift);
2176 		histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
2177 
2178 		if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
2179 			ASSERT3U(idx + shift, ==, i);
2180 			idx++;
2181 			ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
2182 		}
2183 	}
2184 }
2185 
2186 /*
2187  * Called at every sync pass that the metaslab gets synced.
2188  *
2189  * The reason is that we want our auxiliary histograms to be updated
2190  * wherever the metaslab's space map histogram is updated. This way
2191  * we stay consistent on which parts of the metaslab space map's
2192  * histogram are currently not available for allocations (e.g because
2193  * they are in the defer, freed, and freeing trees).
2194  */
2195 static void
2196 metaslab_aux_histograms_update(metaslab_t *msp)
2197 {
2198 	space_map_t *sm = msp->ms_sm;
2199 	ASSERT(sm != NULL);
2200 
2201 	/*
2202 	 * This is similar to the metaslab's space map histogram updates
2203 	 * that take place in metaslab_sync(). The only difference is that
2204 	 * we only care about segments that haven't made it into the
2205 	 * ms_allocatable tree yet.
2206 	 */
2207 	if (msp->ms_loaded) {
2208 		metaslab_aux_histograms_clear(msp);
2209 
2210 		metaslab_aux_histogram_add(msp->ms_synchist,
2211 		    sm->sm_shift, msp->ms_freed);
2212 
2213 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2214 			metaslab_aux_histogram_add(msp->ms_deferhist[t],
2215 			    sm->sm_shift, msp->ms_defer[t]);
2216 		}
2217 	}
2218 
2219 	metaslab_aux_histogram_add(msp->ms_synchist,
2220 	    sm->sm_shift, msp->ms_freeing);
2221 }
2222 
2223 /*
2224  * Called every time we are done syncing (writing to) the metaslab,
2225  * i.e. at the end of each sync pass.
2226  * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
2227  */
2228 static void
2229 metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
2230 {
2231 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2232 	space_map_t *sm = msp->ms_sm;
2233 
2234 	if (sm == NULL) {
2235 		/*
2236 		 * We came here from metaslab_init() when creating/opening a
2237 		 * pool, looking at a metaslab that hasn't had any allocations
2238 		 * yet.
2239 		 */
2240 		return;
2241 	}
2242 
2243 	/*
2244 	 * This is similar to the actions that we take for the ms_freed
2245 	 * and ms_defer trees in metaslab_sync_done().
2246 	 */
2247 	uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
2248 	if (defer_allowed) {
2249 		memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist,
2250 		    sizeof (msp->ms_synchist));
2251 	} else {
2252 		memset(msp->ms_deferhist[hist_index], 0,
2253 		    sizeof (msp->ms_deferhist[hist_index]));
2254 	}
2255 	memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
2256 }
2257 
2258 /*
2259  * Ensure that the metaslab's weight and fragmentation are consistent
2260  * with the contents of the histogram (either the range tree's histogram
2261  * or the space map's depending whether the metaslab is loaded).
2262  */
2263 static void
2264 metaslab_verify_weight_and_frag(metaslab_t *msp)
2265 {
2266 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2267 
2268 	if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
2269 		return;
2270 
2271 	/*
2272 	 * We can end up here from vdev_remove_complete(), in which case we
2273 	 * cannot do these assertions because we hold spa config locks and
2274 	 * thus we are not allowed to read from the DMU.
2275 	 *
2276 	 * We check if the metaslab group has been removed and if that's
2277 	 * the case we return immediately as that would mean that we are
2278 	 * here from the aforementioned code path.
2279 	 */
2280 	if (msp->ms_group == NULL)
2281 		return;
2282 
2283 	/*
2284 	 * Devices being removed always return a weight of 0 and leave
2285 	 * fragmentation and ms_max_size as is - there is nothing for
2286 	 * us to verify here.
2287 	 */
2288 	vdev_t *vd = msp->ms_group->mg_vd;
2289 	if (vd->vdev_removing)
2290 		return;
2291 
2292 	/*
2293 	 * If the metaslab is dirty it probably means that we've done
2294 	 * some allocations or frees that have changed our histograms
2295 	 * and thus the weight.
2296 	 */
2297 	for (int t = 0; t < TXG_SIZE; t++) {
2298 		if (txg_list_member(&vd->vdev_ms_list, msp, t))
2299 			return;
2300 	}
2301 
2302 	/*
2303 	 * This verification checks that our in-memory state is consistent
2304 	 * with what's on disk. If the pool is read-only then there aren't
2305 	 * any changes and we just have the initially-loaded state.
2306 	 */
2307 	if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
2308 		return;
2309 
2310 	/* some extra verification for in-core tree if you can */
2311 	if (msp->ms_loaded) {
2312 		zfs_range_tree_stat_verify(msp->ms_allocatable);
2313 		VERIFY(space_map_histogram_verify(msp->ms_sm,
2314 		    msp->ms_allocatable));
2315 	}
2316 
2317 	uint64_t weight = msp->ms_weight;
2318 	uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2319 	boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
2320 	uint64_t frag = msp->ms_fragmentation;
2321 	uint64_t max_segsize = msp->ms_max_size;
2322 
2323 	msp->ms_weight = 0;
2324 	msp->ms_fragmentation = 0;
2325 
2326 	/*
2327 	 * This function is used for verification purposes and thus should
2328 	 * not introduce any side-effects/mutations on the system's state.
2329 	 *
2330 	 * Regardless of whether metaslab_weight() thinks this metaslab
2331 	 * should be active or not, we want to ensure that the actual weight
2332 	 * (and therefore the value of ms_weight) would be the same if it
2333 	 * was to be recalculated at this point.
2334 	 *
2335 	 * In addition we set the nodirty flag so metaslab_weight() does
2336 	 * not dirty the metaslab for future TXGs (e.g. when trying to
2337 	 * force condensing to upgrade the metaslab spacemaps).
2338 	 */
2339 	msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active;
2340 
2341 	VERIFY3U(max_segsize, ==, msp->ms_max_size);
2342 
2343 	/*
2344 	 * If the weight type changed then there is no point in doing
2345 	 * verification. Revert fields to their original values.
2346 	 */
2347 	if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
2348 	    (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
2349 		msp->ms_fragmentation = frag;
2350 		msp->ms_weight = weight;
2351 		return;
2352 	}
2353 
2354 	VERIFY3U(msp->ms_fragmentation, ==, frag);
2355 	VERIFY3U(msp->ms_weight, ==, weight);
2356 }
2357 
2358 /*
2359  * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from
2360  * this class that was used longest ago, and attempt to unload it.  We don't
2361  * want to spend too much time in this loop to prevent performance
2362  * degradation, and we expect that most of the time this operation will
2363  * succeed. Between that and the normal unloading processing during txg sync,
2364  * we expect this to keep the metaslab memory usage under control.
2365  */
2366 static void
2367 metaslab_potentially_evict(metaslab_class_t *mc)
2368 {
2369 #ifdef _KERNEL
2370 	uint64_t allmem = arc_all_memory();
2371 	uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2372 	uint64_t size =	spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
2373 	uint_t tries = 0;
2374 	for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
2375 	    tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2;
2376 	    tries++) {
2377 		unsigned int idx = multilist_get_random_index(
2378 		    &mc->mc_metaslab_txg_list);
2379 		multilist_sublist_t *mls =
2380 		    multilist_sublist_lock_idx(&mc->mc_metaslab_txg_list, idx);
2381 		metaslab_t *msp = multilist_sublist_head(mls);
2382 		multilist_sublist_unlock(mls);
2383 		while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 <
2384 		    inuse * size) {
2385 			VERIFY3P(mls, ==, multilist_sublist_lock_idx(
2386 			    &mc->mc_metaslab_txg_list, idx));
2387 			ASSERT3U(idx, ==,
2388 			    metaslab_idx_func(&mc->mc_metaslab_txg_list, msp));
2389 
2390 			if (!multilist_link_active(&msp->ms_class_txg_node)) {
2391 				multilist_sublist_unlock(mls);
2392 				break;
2393 			}
2394 			metaslab_t *next_msp = multilist_sublist_next(mls, msp);
2395 			multilist_sublist_unlock(mls);
2396 			/*
2397 			 * If the metaslab is currently loading there are two
2398 			 * cases. If it's the metaslab we're evicting, we
2399 			 * can't continue on or we'll panic when we attempt to
2400 			 * recursively lock the mutex. If it's another
2401 			 * metaslab that's loading, it can be safely skipped,
2402 			 * since we know it's very new and therefore not a
2403 			 * good eviction candidate. We check later once the
2404 			 * lock is held that the metaslab is fully loaded
2405 			 * before actually unloading it.
2406 			 */
2407 			if (msp->ms_loading) {
2408 				msp = next_msp;
2409 				inuse =
2410 				    spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2411 				continue;
2412 			}
2413 			/*
2414 			 * We can't unload metaslabs with no spacemap because
2415 			 * they're not ready to be unloaded yet. We can't
2416 			 * unload metaslabs with outstanding allocations
2417 			 * because doing so could cause the metaslab's weight
2418 			 * to decrease while it's unloaded, which violates an
2419 			 * invariant that we use to prevent unnecessary
2420 			 * loading. We also don't unload metaslabs that are
2421 			 * currently active because they are high-weight
2422 			 * metaslabs that are likely to be used in the near
2423 			 * future.
2424 			 */
2425 			mutex_enter(&msp->ms_lock);
2426 			if (msp->ms_allocator == -1 && msp->ms_sm != NULL &&
2427 			    msp->ms_allocating_total == 0) {
2428 				metaslab_unload(msp);
2429 			}
2430 			mutex_exit(&msp->ms_lock);
2431 			msp = next_msp;
2432 			inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2433 		}
2434 	}
2435 #else
2436 	(void) mc, (void) zfs_metaslab_mem_limit;
2437 #endif
2438 }
2439 
2440 static int
2441 metaslab_load_impl(metaslab_t *msp)
2442 {
2443 	int error = 0;
2444 
2445 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2446 	ASSERT(msp->ms_loading);
2447 	ASSERT(!msp->ms_condensing);
2448 
2449 	/*
2450 	 * We temporarily drop the lock to unblock other operations while we
2451 	 * are reading the space map. Therefore, metaslab_sync() and
2452 	 * metaslab_sync_done() can run at the same time as we do.
2453 	 *
2454 	 * If we are using the log space maps, metaslab_sync() can't write to
2455 	 * the metaslab's space map while we are loading as we only write to
2456 	 * it when we are flushing the metaslab, and that can't happen while
2457 	 * we are loading it.
2458 	 *
2459 	 * If we are not using log space maps though, metaslab_sync() can
2460 	 * append to the space map while we are loading. Therefore we load
2461 	 * only entries that existed when we started the load. Additionally,
2462 	 * metaslab_sync_done() has to wait for the load to complete because
2463 	 * there are potential races like metaslab_load() loading parts of the
2464 	 * space map that are currently being appended by metaslab_sync(). If
2465 	 * we didn't, the ms_allocatable would have entries that
2466 	 * metaslab_sync_done() would try to re-add later.
2467 	 *
2468 	 * That's why before dropping the lock we remember the synced length
2469 	 * of the metaslab and read up to that point of the space map,
2470 	 * ignoring entries appended by metaslab_sync() that happen after we
2471 	 * drop the lock.
2472 	 */
2473 	uint64_t length = msp->ms_synced_length;
2474 	mutex_exit(&msp->ms_lock);
2475 
2476 	hrtime_t load_start = gethrtime();
2477 	metaslab_rt_arg_t *mrap;
2478 	if (msp->ms_allocatable->rt_arg == NULL) {
2479 		mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
2480 	} else {
2481 		mrap = msp->ms_allocatable->rt_arg;
2482 		msp->ms_allocatable->rt_ops = NULL;
2483 		msp->ms_allocatable->rt_arg = NULL;
2484 	}
2485 	mrap->mra_bt = &msp->ms_allocatable_by_size;
2486 	mrap->mra_floor_shift = metaslab_by_size_min_shift;
2487 
2488 	if (msp->ms_sm != NULL) {
2489 		error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
2490 		    SM_FREE, length);
2491 
2492 		/* Now, populate the size-sorted tree. */
2493 		metaslab_rt_create(msp->ms_allocatable, mrap);
2494 		msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
2495 		msp->ms_allocatable->rt_arg = mrap;
2496 
2497 		struct mssa_arg arg = {0};
2498 		arg.rt = msp->ms_allocatable;
2499 		arg.mra = mrap;
2500 		zfs_range_tree_walk(msp->ms_allocatable,
2501 		    metaslab_size_sorted_add, &arg);
2502 	} else {
2503 		/*
2504 		 * Add the size-sorted tree first, since we don't need to load
2505 		 * the metaslab from the spacemap.
2506 		 */
2507 		metaslab_rt_create(msp->ms_allocatable, mrap);
2508 		msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
2509 		msp->ms_allocatable->rt_arg = mrap;
2510 		/*
2511 		 * The space map has not been allocated yet, so treat
2512 		 * all the space in the metaslab as free and add it to the
2513 		 * ms_allocatable tree.
2514 		 */
2515 		zfs_range_tree_add(msp->ms_allocatable,
2516 		    msp->ms_start, msp->ms_size);
2517 
2518 		if (msp->ms_new) {
2519 			/*
2520 			 * If the ms_sm doesn't exist, this means that this
2521 			 * metaslab hasn't gone through metaslab_sync() and
2522 			 * thus has never been dirtied. So we shouldn't
2523 			 * expect any unflushed allocs or frees from previous
2524 			 * TXGs.
2525 			 */
2526 			ASSERT(zfs_range_tree_is_empty(
2527 			    msp->ms_unflushed_allocs));
2528 			ASSERT(zfs_range_tree_is_empty(
2529 			    msp->ms_unflushed_frees));
2530 		}
2531 	}
2532 
2533 	/*
2534 	 * We need to grab the ms_sync_lock to prevent metaslab_sync() from
2535 	 * changing the ms_sm (or log_sm) and the metaslab's range trees
2536 	 * while we are about to use them and populate the ms_allocatable.
2537 	 * The ms_lock is insufficient for this because metaslab_sync() doesn't
2538 	 * hold the ms_lock while writing the ms_checkpointing tree to disk.
2539 	 */
2540 	mutex_enter(&msp->ms_sync_lock);
2541 	mutex_enter(&msp->ms_lock);
2542 
2543 	ASSERT(!msp->ms_condensing);
2544 	ASSERT(!msp->ms_flushing);
2545 
2546 	if (error != 0) {
2547 		mutex_exit(&msp->ms_sync_lock);
2548 		return (error);
2549 	}
2550 
2551 	ASSERT3P(msp->ms_group, !=, NULL);
2552 	msp->ms_loaded = B_TRUE;
2553 
2554 	/*
2555 	 * Apply all the unflushed changes to ms_allocatable right
2556 	 * away so any manipulations we do below have a clear view
2557 	 * of what is allocated and what is free.
2558 	 */
2559 	zfs_range_tree_walk(msp->ms_unflushed_allocs,
2560 	    zfs_range_tree_remove, msp->ms_allocatable);
2561 	zfs_range_tree_walk(msp->ms_unflushed_frees,
2562 	    zfs_range_tree_add, msp->ms_allocatable);
2563 
2564 	ASSERT3P(msp->ms_group, !=, NULL);
2565 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2566 	if (spa_syncing_log_sm(spa) != NULL) {
2567 		ASSERT(spa_feature_is_enabled(spa,
2568 		    SPA_FEATURE_LOG_SPACEMAP));
2569 
2570 		/*
2571 		 * If we use a log space map we add all the segments
2572 		 * that are in ms_unflushed_frees so they are available
2573 		 * for allocation.
2574 		 *
2575 		 * ms_allocatable needs to contain all free segments
2576 		 * that are ready for allocations (thus not segments
2577 		 * from ms_freeing, ms_freed, and the ms_defer trees).
2578 		 * But if we grab the lock in this code path at a sync
2579 		 * pass later that 1, then it also contains the
2580 		 * segments of ms_freed (they were added to it earlier
2581 		 * in this path through ms_unflushed_frees). So we
2582 		 * need to remove all the segments that exist in
2583 		 * ms_freed from ms_allocatable as they will be added
2584 		 * later in metaslab_sync_done().
2585 		 *
2586 		 * When there's no log space map, the ms_allocatable
2587 		 * correctly doesn't contain any segments that exist
2588 		 * in ms_freed [see ms_synced_length].
2589 		 */
2590 		zfs_range_tree_walk(msp->ms_freed,
2591 		    zfs_range_tree_remove, msp->ms_allocatable);
2592 	}
2593 
2594 	/*
2595 	 * If we are not using the log space map, ms_allocatable
2596 	 * contains the segments that exist in the ms_defer trees
2597 	 * [see ms_synced_length]. Thus we need to remove them
2598 	 * from ms_allocatable as they will be added again in
2599 	 * metaslab_sync_done().
2600 	 *
2601 	 * If we are using the log space map, ms_allocatable still
2602 	 * contains the segments that exist in the ms_defer trees.
2603 	 * Not because it read them through the ms_sm though. But
2604 	 * because these segments are part of ms_unflushed_frees
2605 	 * whose segments we add to ms_allocatable earlier in this
2606 	 * code path.
2607 	 */
2608 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2609 		zfs_range_tree_walk(msp->ms_defer[t],
2610 		    zfs_range_tree_remove, msp->ms_allocatable);
2611 	}
2612 
2613 	/*
2614 	 * Call metaslab_recalculate_weight_and_sort() now that the
2615 	 * metaslab is loaded so we get the metaslab's real weight.
2616 	 *
2617 	 * Unless this metaslab was created with older software and
2618 	 * has not yet been converted to use segment-based weight, we
2619 	 * expect the new weight to be better or equal to the weight
2620 	 * that the metaslab had while it was not loaded. This is
2621 	 * because the old weight does not take into account the
2622 	 * consolidation of adjacent segments between TXGs. [see
2623 	 * comment for ms_synchist and ms_deferhist[] for more info]
2624 	 */
2625 	uint64_t weight = msp->ms_weight;
2626 	uint64_t max_size = msp->ms_max_size;
2627 	metaslab_recalculate_weight_and_sort(msp);
2628 	if (!WEIGHT_IS_SPACEBASED(weight))
2629 		ASSERT3U(weight, <=, msp->ms_weight);
2630 	msp->ms_max_size = metaslab_largest_allocatable(msp);
2631 	ASSERT3U(max_size, <=, msp->ms_max_size);
2632 	hrtime_t load_end = gethrtime();
2633 	msp->ms_load_time = load_end;
2634 	zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, "
2635 	    "ms_id %llu, smp_length %llu, "
2636 	    "unflushed_allocs %llu, unflushed_frees %llu, "
2637 	    "freed %llu, defer %llu + %llu, unloaded time %llu ms, "
2638 	    "loading_time %lld ms, ms_max_size %llu, "
2639 	    "max size error %lld, "
2640 	    "old_weight %llx, new_weight %llx",
2641 	    (u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
2642 	    (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
2643 	    (u_longlong_t)msp->ms_id,
2644 	    (u_longlong_t)space_map_length(msp->ms_sm),
2645 	    (u_longlong_t)zfs_range_tree_space(msp->ms_unflushed_allocs),
2646 	    (u_longlong_t)zfs_range_tree_space(msp->ms_unflushed_frees),
2647 	    (u_longlong_t)zfs_range_tree_space(msp->ms_freed),
2648 	    (u_longlong_t)zfs_range_tree_space(msp->ms_defer[0]),
2649 	    (u_longlong_t)zfs_range_tree_space(msp->ms_defer[1]),
2650 	    (longlong_t)((load_start - msp->ms_unload_time) / 1000000),
2651 	    (longlong_t)((load_end - load_start) / 1000000),
2652 	    (u_longlong_t)msp->ms_max_size,
2653 	    (u_longlong_t)msp->ms_max_size - max_size,
2654 	    (u_longlong_t)weight, (u_longlong_t)msp->ms_weight);
2655 
2656 	metaslab_verify_space(msp, spa_syncing_txg(spa));
2657 	mutex_exit(&msp->ms_sync_lock);
2658 	return (0);
2659 }
2660 
2661 int
2662 metaslab_load(metaslab_t *msp)
2663 {
2664 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2665 
2666 	/*
2667 	 * There may be another thread loading the same metaslab, if that's
2668 	 * the case just wait until the other thread is done and return.
2669 	 */
2670 	metaslab_load_wait(msp);
2671 	if (msp->ms_loaded)
2672 		return (0);
2673 	VERIFY(!msp->ms_loading);
2674 	ASSERT(!msp->ms_condensing);
2675 
2676 	/*
2677 	 * We set the loading flag BEFORE potentially dropping the lock to
2678 	 * wait for an ongoing flush (see ms_flushing below). This way other
2679 	 * threads know that there is already a thread that is loading this
2680 	 * metaslab.
2681 	 */
2682 	msp->ms_loading = B_TRUE;
2683 
2684 	/*
2685 	 * Wait for any in-progress flushing to finish as we drop the ms_lock
2686 	 * both here (during space_map_load()) and in metaslab_flush() (when
2687 	 * we flush our changes to the ms_sm).
2688 	 */
2689 	if (msp->ms_flushing)
2690 		metaslab_flush_wait(msp);
2691 
2692 	/*
2693 	 * In the possibility that we were waiting for the metaslab to be
2694 	 * flushed (where we temporarily dropped the ms_lock), ensure that
2695 	 * no one else loaded the metaslab somehow.
2696 	 */
2697 	ASSERT(!msp->ms_loaded);
2698 
2699 	/*
2700 	 * If we're loading a metaslab in the normal class, consider evicting
2701 	 * another one to keep our memory usage under the limit defined by the
2702 	 * zfs_metaslab_mem_limit tunable.
2703 	 */
2704 	if (spa_normal_class(msp->ms_group->mg_class->mc_spa) ==
2705 	    msp->ms_group->mg_class) {
2706 		metaslab_potentially_evict(msp->ms_group->mg_class);
2707 	}
2708 
2709 	int error = metaslab_load_impl(msp);
2710 
2711 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2712 	msp->ms_loading = B_FALSE;
2713 	cv_broadcast(&msp->ms_load_cv);
2714 
2715 	return (error);
2716 }
2717 
2718 void
2719 metaslab_unload(metaslab_t *msp)
2720 {
2721 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2722 
2723 	/*
2724 	 * This can happen if a metaslab is selected for eviction (in
2725 	 * metaslab_potentially_evict) and then unloaded during spa_sync (via
2726 	 * metaslab_class_evict_old).
2727 	 */
2728 	if (!msp->ms_loaded)
2729 		return;
2730 
2731 	zfs_range_tree_vacate(msp->ms_allocatable, NULL, NULL);
2732 	msp->ms_loaded = B_FALSE;
2733 	msp->ms_unload_time = gethrtime();
2734 
2735 	msp->ms_activation_weight = 0;
2736 	msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
2737 
2738 	if (msp->ms_group != NULL) {
2739 		metaslab_class_t *mc = msp->ms_group->mg_class;
2740 		multilist_sublist_t *mls =
2741 		    multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
2742 		if (multilist_link_active(&msp->ms_class_txg_node))
2743 			multilist_sublist_remove(mls, msp);
2744 		multilist_sublist_unlock(mls);
2745 
2746 		spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2747 		zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, "
2748 		    "ms_id %llu, weight %llx, "
2749 		    "selected txg %llu (%llu s ago), alloc_txg %llu, "
2750 		    "loaded %llu ms ago, max_size %llu",
2751 		    (u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
2752 		    (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
2753 		    (u_longlong_t)msp->ms_id,
2754 		    (u_longlong_t)msp->ms_weight,
2755 		    (u_longlong_t)msp->ms_selected_txg,
2756 		    (u_longlong_t)(NSEC2SEC(msp->ms_unload_time) -
2757 		    msp->ms_selected_time),
2758 		    (u_longlong_t)msp->ms_alloc_txg,
2759 		    (u_longlong_t)(msp->ms_unload_time -
2760 		    msp->ms_load_time) / 1000 / 1000,
2761 		    (u_longlong_t)msp->ms_max_size);
2762 	}
2763 
2764 	/*
2765 	 * We explicitly recalculate the metaslab's weight based on its space
2766 	 * map (as it is now not loaded). We want unload metaslabs to always
2767 	 * have their weights calculated from the space map histograms, while
2768 	 * loaded ones have it calculated from their in-core range tree
2769 	 * [see metaslab_load()]. This way, the weight reflects the information
2770 	 * available in-core, whether it is loaded or not.
2771 	 *
2772 	 * If ms_group == NULL means that we came here from metaslab_fini(),
2773 	 * at which point it doesn't make sense for us to do the recalculation
2774 	 * and the sorting.
2775 	 */
2776 	if (msp->ms_group != NULL)
2777 		metaslab_recalculate_weight_and_sort(msp);
2778 }
2779 
2780 /*
2781  * We want to optimize the memory use of the per-metaslab range
2782  * trees. To do this, we store the segments in the range trees in
2783  * units of sectors, zero-indexing from the start of the metaslab. If
2784  * the vdev_ms_shift - the vdev_ashift is less than 32, we can store
2785  * the ranges using two uint32_ts, rather than two uint64_ts.
2786  */
2787 zfs_range_seg_type_t
2788 metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp,
2789     uint64_t *start, uint64_t *shift)
2790 {
2791 	if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 &&
2792 	    !zfs_metaslab_force_large_segs) {
2793 		*shift = vdev->vdev_ashift;
2794 		*start = msp->ms_start;
2795 		return (ZFS_RANGE_SEG32);
2796 	} else {
2797 		*shift = 0;
2798 		*start = 0;
2799 		return (ZFS_RANGE_SEG64);
2800 	}
2801 }
2802 
2803 void
2804 metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg)
2805 {
2806 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2807 	metaslab_class_t *mc = msp->ms_group->mg_class;
2808 	multilist_sublist_t *mls =
2809 	    multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
2810 	if (multilist_link_active(&msp->ms_class_txg_node))
2811 		multilist_sublist_remove(mls, msp);
2812 	msp->ms_selected_txg = txg;
2813 	msp->ms_selected_time = gethrestime_sec();
2814 	multilist_sublist_insert_tail(mls, msp);
2815 	multilist_sublist_unlock(mls);
2816 }
2817 
2818 void
2819 metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
2820     int64_t defer_delta, int64_t space_delta)
2821 {
2822 	vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
2823 
2824 	ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
2825 	ASSERT(vd->vdev_ms_count != 0);
2826 
2827 	metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
2828 	    vdev_deflated_space(vd, space_delta));
2829 }
2830 
2831 int
2832 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
2833     uint64_t txg, metaslab_t **msp)
2834 {
2835 	vdev_t *vd = mg->mg_vd;
2836 	spa_t *spa = vd->vdev_spa;
2837 	objset_t *mos = spa->spa_meta_objset;
2838 	metaslab_t *ms;
2839 	int error;
2840 
2841 	ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
2842 	mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
2843 	mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
2844 	cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
2845 	cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
2846 	multilist_link_init(&ms->ms_class_txg_node);
2847 
2848 	ms->ms_id = id;
2849 	ms->ms_start = id << vd->vdev_ms_shift;
2850 	ms->ms_size = 1ULL << vd->vdev_ms_shift;
2851 	ms->ms_allocator = -1;
2852 	ms->ms_new = B_TRUE;
2853 
2854 	vdev_ops_t *ops = vd->vdev_ops;
2855 	if (ops->vdev_op_metaslab_init != NULL)
2856 		ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size);
2857 
2858 	/*
2859 	 * We only open space map objects that already exist. All others
2860 	 * will be opened when we finally allocate an object for it. For
2861 	 * readonly pools there is no need to open the space map object.
2862 	 *
2863 	 * Note:
2864 	 * When called from vdev_expand(), we can't call into the DMU as
2865 	 * we are holding the spa_config_lock as a writer and we would
2866 	 * deadlock [see relevant comment in vdev_metaslab_init()]. in
2867 	 * that case, the object parameter is zero though, so we won't
2868 	 * call into the DMU.
2869 	 */
2870 	if (object != 0 && !(spa->spa_mode == SPA_MODE_READ &&
2871 	    !spa->spa_read_spacemaps)) {
2872 		error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
2873 		    ms->ms_size, vd->vdev_ashift);
2874 
2875 		if (error != 0) {
2876 			kmem_free(ms, sizeof (metaslab_t));
2877 			return (error);
2878 		}
2879 
2880 		ASSERT(ms->ms_sm != NULL);
2881 		ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
2882 	}
2883 
2884 	uint64_t shift, start;
2885 	zfs_range_seg_type_t type =
2886 	    metaslab_calculate_range_tree_type(vd, ms, &start, &shift);
2887 
2888 	ms->ms_allocatable = zfs_range_tree_create(NULL, type, NULL, start,
2889 	    shift);
2890 	for (int t = 0; t < TXG_SIZE; t++) {
2891 		ms->ms_allocating[t] = zfs_range_tree_create(NULL, type,
2892 		    NULL, start, shift);
2893 	}
2894 	ms->ms_freeing = zfs_range_tree_create(NULL, type, NULL, start, shift);
2895 	ms->ms_freed = zfs_range_tree_create(NULL, type, NULL, start, shift);
2896 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2897 		ms->ms_defer[t] = zfs_range_tree_create(NULL, type, NULL,
2898 		    start, shift);
2899 	}
2900 	ms->ms_checkpointing =
2901 	    zfs_range_tree_create(NULL, type, NULL, start, shift);
2902 	ms->ms_unflushed_allocs =
2903 	    zfs_range_tree_create(NULL, type, NULL, start, shift);
2904 
2905 	metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
2906 	mrap->mra_bt = &ms->ms_unflushed_frees_by_size;
2907 	mrap->mra_floor_shift = metaslab_by_size_min_shift;
2908 	ms->ms_unflushed_frees = zfs_range_tree_create(&metaslab_rt_ops,
2909 	    type, mrap, start, shift);
2910 
2911 	ms->ms_trim = zfs_range_tree_create(NULL, type, NULL, start, shift);
2912 
2913 	metaslab_group_add(mg, ms);
2914 	metaslab_set_fragmentation(ms, B_FALSE);
2915 
2916 	/*
2917 	 * If we're opening an existing pool (txg == 0) or creating
2918 	 * a new one (txg == TXG_INITIAL), all space is available now.
2919 	 * If we're adding space to an existing pool, the new space
2920 	 * does not become available until after this txg has synced.
2921 	 * The metaslab's weight will also be initialized when we sync
2922 	 * out this txg. This ensures that we don't attempt to allocate
2923 	 * from it before we have initialized it completely.
2924 	 */
2925 	if (txg <= TXG_INITIAL) {
2926 		metaslab_sync_done(ms, 0);
2927 		metaslab_space_update(vd, mg->mg_class,
2928 		    metaslab_allocated_space(ms), 0, 0);
2929 	}
2930 
2931 	if (txg != 0) {
2932 		vdev_dirty(vd, 0, NULL, txg);
2933 		vdev_dirty(vd, VDD_METASLAB, ms, txg);
2934 	}
2935 
2936 	*msp = ms;
2937 
2938 	return (0);
2939 }
2940 
2941 static void
2942 metaslab_fini_flush_data(metaslab_t *msp)
2943 {
2944 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2945 
2946 	if (metaslab_unflushed_txg(msp) == 0) {
2947 		ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
2948 		    ==, NULL);
2949 		return;
2950 	}
2951 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
2952 
2953 	mutex_enter(&spa->spa_flushed_ms_lock);
2954 	avl_remove(&spa->spa_metaslabs_by_flushed, msp);
2955 	mutex_exit(&spa->spa_flushed_ms_lock);
2956 
2957 	spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
2958 	spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp),
2959 	    metaslab_unflushed_dirty(msp));
2960 }
2961 
2962 uint64_t
2963 metaslab_unflushed_changes_memused(metaslab_t *ms)
2964 {
2965 	return ((zfs_range_tree_numsegs(ms->ms_unflushed_allocs) +
2966 	    zfs_range_tree_numsegs(ms->ms_unflushed_frees)) *
2967 	    ms->ms_unflushed_allocs->rt_root.bt_elem_size);
2968 }
2969 
2970 void
2971 metaslab_fini(metaslab_t *msp)
2972 {
2973 	metaslab_group_t *mg = msp->ms_group;
2974 	vdev_t *vd = mg->mg_vd;
2975 	spa_t *spa = vd->vdev_spa;
2976 
2977 	metaslab_fini_flush_data(msp);
2978 
2979 	metaslab_group_remove(mg, msp);
2980 
2981 	mutex_enter(&msp->ms_lock);
2982 	VERIFY(msp->ms_group == NULL);
2983 
2984 	/*
2985 	 * If this metaslab hasn't been through metaslab_sync_done() yet its
2986 	 * space hasn't been accounted for in its vdev and doesn't need to be
2987 	 * subtracted.
2988 	 */
2989 	if (!msp->ms_new) {
2990 		metaslab_space_update(vd, mg->mg_class,
2991 		    -metaslab_allocated_space(msp), 0, -msp->ms_size);
2992 
2993 	}
2994 	space_map_close(msp->ms_sm);
2995 	msp->ms_sm = NULL;
2996 
2997 	metaslab_unload(msp);
2998 
2999 	zfs_range_tree_destroy(msp->ms_allocatable);
3000 	zfs_range_tree_destroy(msp->ms_freeing);
3001 	zfs_range_tree_destroy(msp->ms_freed);
3002 
3003 	ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3004 	    metaslab_unflushed_changes_memused(msp));
3005 	spa->spa_unflushed_stats.sus_memused -=
3006 	    metaslab_unflushed_changes_memused(msp);
3007 	zfs_range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3008 	zfs_range_tree_destroy(msp->ms_unflushed_allocs);
3009 	zfs_range_tree_destroy(msp->ms_checkpointing);
3010 	zfs_range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3011 	zfs_range_tree_destroy(msp->ms_unflushed_frees);
3012 
3013 	for (int t = 0; t < TXG_SIZE; t++) {
3014 		zfs_range_tree_destroy(msp->ms_allocating[t]);
3015 	}
3016 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3017 		zfs_range_tree_destroy(msp->ms_defer[t]);
3018 	}
3019 	ASSERT0(msp->ms_deferspace);
3020 
3021 	for (int t = 0; t < TXG_SIZE; t++)
3022 		ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
3023 
3024 	zfs_range_tree_vacate(msp->ms_trim, NULL, NULL);
3025 	zfs_range_tree_destroy(msp->ms_trim);
3026 
3027 	mutex_exit(&msp->ms_lock);
3028 	cv_destroy(&msp->ms_load_cv);
3029 	cv_destroy(&msp->ms_flush_cv);
3030 	mutex_destroy(&msp->ms_lock);
3031 	mutex_destroy(&msp->ms_sync_lock);
3032 	ASSERT3U(msp->ms_allocator, ==, -1);
3033 
3034 	kmem_free(msp, sizeof (metaslab_t));
3035 }
3036 
3037 /*
3038  * This table defines a segment size based fragmentation metric that will
3039  * allow each metaslab to derive its own fragmentation value. This is done
3040  * by calculating the space in each bucket of the spacemap histogram and
3041  * multiplying that by the fragmentation metric in this table. Doing
3042  * this for all buckets and dividing it by the total amount of free
3043  * space in this metaslab (i.e. the total free space in all buckets) gives
3044  * us the fragmentation metric. This means that a high fragmentation metric
3045  * equates to most of the free space being comprised of small segments.
3046  * Conversely, if the metric is low, then most of the free space is in
3047  * large segments.
3048  *
3049  * This table defines 0% fragmented space using 512M segments. Using this value,
3050  * we derive the rest of the table. This table originally went up to 16MB, but
3051  * with larger recordsizes, larger ashifts, and use of raidz3, it is possible
3052  * to have significantly larger allocations than were previously possible.
3053  * Since the fragmentation value is never stored on disk, it is possible to
3054  * change these calculations in the future.
3055  */
3056 static const int zfs_frag_table[] = {
3057 	100,	/* 512B	*/
3058 	99,	/* 1K	*/
3059 	97,	/* 2K	*/
3060 	93,	/* 4K	*/
3061 	88,	/* 8K	*/
3062 	83,	/* 16K	*/
3063 	77,	/* 32K	*/
3064 	71,	/* 64K	*/
3065 	64,	/* 128K	*/
3066 	57,	/* 256K	*/
3067 	50,	/* 512K	*/
3068 	43,	/* 1M	*/
3069 	36,	/* 2M	*/
3070 	29,	/* 4M	*/
3071 	23,	/* 8M	*/
3072 	17,	/* 16M	*/
3073 	12,	/* 32M	*/
3074 	7,	/* 64M	*/
3075 	3,	/* 128M	*/
3076 	1,	/* 256M	*/
3077 	0,	/* 512M	*/
3078 };
3079 #define	FRAGMENTATION_TABLE_SIZE \
3080 	(sizeof (zfs_frag_table)/(sizeof (zfs_frag_table[0])))
3081 
3082 /*
3083  * Calculate the metaslab's fragmentation metric and set ms_fragmentation.
3084  * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
3085  * been upgraded and does not support this metric. Otherwise, the return
3086  * value should be in the range [0, 100].
3087  */
3088 static void
3089 metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty)
3090 {
3091 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3092 	uint64_t fragmentation = 0;
3093 	uint64_t total = 0;
3094 	boolean_t feature_enabled = spa_feature_is_enabled(spa,
3095 	    SPA_FEATURE_SPACEMAP_HISTOGRAM);
3096 
3097 	if (!feature_enabled) {
3098 		msp->ms_fragmentation = ZFS_FRAG_INVALID;
3099 		return;
3100 	}
3101 
3102 	/*
3103 	 * A null space map means that the entire metaslab is free
3104 	 * and thus is not fragmented.
3105 	 */
3106 	if (msp->ms_sm == NULL) {
3107 		msp->ms_fragmentation = 0;
3108 		return;
3109 	}
3110 
3111 	/*
3112 	 * If this metaslab's space map has not been upgraded, flag it
3113 	 * so that we upgrade next time we encounter it.
3114 	 */
3115 	if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
3116 		uint64_t txg = spa_syncing_txg(spa);
3117 		vdev_t *vd = msp->ms_group->mg_vd;
3118 
3119 		/*
3120 		 * If we've reached the final dirty txg, then we must
3121 		 * be shutting down the pool. We don't want to dirty
3122 		 * any data past this point so skip setting the condense
3123 		 * flag. We can retry this action the next time the pool
3124 		 * is imported. We also skip marking this metaslab for
3125 		 * condensing if the caller has explicitly set nodirty.
3126 		 */
3127 		if (!nodirty &&
3128 		    spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
3129 			msp->ms_condense_wanted = B_TRUE;
3130 			vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
3131 			zfs_dbgmsg("txg %llu, requesting force condense: "
3132 			    "ms_id %llu, vdev_id %llu", (u_longlong_t)txg,
3133 			    (u_longlong_t)msp->ms_id,
3134 			    (u_longlong_t)vd->vdev_id);
3135 		}
3136 		msp->ms_fragmentation = ZFS_FRAG_INVALID;
3137 		return;
3138 	}
3139 
3140 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
3141 		uint64_t space = 0;
3142 		uint8_t shift = msp->ms_sm->sm_shift;
3143 
3144 		int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
3145 		    FRAGMENTATION_TABLE_SIZE - 1);
3146 
3147 		if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
3148 			continue;
3149 
3150 		space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
3151 		total += space;
3152 
3153 		ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
3154 		fragmentation += space * zfs_frag_table[idx];
3155 	}
3156 
3157 	if (total > 0)
3158 		fragmentation /= total;
3159 	ASSERT3U(fragmentation, <=, 100);
3160 
3161 	msp->ms_fragmentation = fragmentation;
3162 }
3163 
3164 /*
3165  * Compute a weight -- a selection preference value -- for the given metaslab.
3166  * This is based on the amount of free space, the level of fragmentation,
3167  * the LBA range, and whether the metaslab is loaded.
3168  */
3169 static uint64_t
3170 metaslab_space_weight(metaslab_t *msp)
3171 {
3172 	metaslab_group_t *mg = msp->ms_group;
3173 	vdev_t *vd = mg->mg_vd;
3174 	uint64_t weight, space;
3175 
3176 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3177 
3178 	/*
3179 	 * The baseline weight is the metaslab's free space.
3180 	 */
3181 	space = msp->ms_size - metaslab_allocated_space(msp);
3182 
3183 	if (metaslab_fragmentation_factor_enabled &&
3184 	    msp->ms_fragmentation != ZFS_FRAG_INVALID) {
3185 		/*
3186 		 * Use the fragmentation information to inversely scale
3187 		 * down the baseline weight. We need to ensure that we
3188 		 * don't exclude this metaslab completely when it's 100%
3189 		 * fragmented. To avoid this we reduce the fragmented value
3190 		 * by 1.
3191 		 */
3192 		space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
3193 
3194 		/*
3195 		 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
3196 		 * this metaslab again. The fragmentation metric may have
3197 		 * decreased the space to something smaller than
3198 		 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
3199 		 * so that we can consume any remaining space.
3200 		 */
3201 		if (space > 0 && space < SPA_MINBLOCKSIZE)
3202 			space = SPA_MINBLOCKSIZE;
3203 	}
3204 	weight = space;
3205 
3206 	/*
3207 	 * Modern disks have uniform bit density and constant angular velocity.
3208 	 * Therefore, the outer recording zones are faster (higher bandwidth)
3209 	 * than the inner zones by the ratio of outer to inner track diameter,
3210 	 * which is typically around 2:1.  We account for this by assigning
3211 	 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
3212 	 * In effect, this means that we'll select the metaslab with the most
3213 	 * free bandwidth rather than simply the one with the most free space.
3214 	 */
3215 	if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
3216 		weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
3217 		ASSERT(weight >= space && weight <= 2 * space);
3218 	}
3219 
3220 	/*
3221 	 * If this metaslab is one we're actively using, adjust its
3222 	 * weight to make it preferable to any inactive metaslab so
3223 	 * we'll polish it off. If the fragmentation on this metaslab
3224 	 * has exceed our threshold, then don't mark it active.
3225 	 */
3226 	if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
3227 	    msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
3228 		weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
3229 	}
3230 
3231 	WEIGHT_SET_SPACEBASED(weight);
3232 	return (weight);
3233 }
3234 
3235 /*
3236  * Return the weight of the specified metaslab, according to the segment-based
3237  * weighting algorithm. The metaslab must be loaded. This function can
3238  * be called within a sync pass since it relies only on the metaslab's
3239  * range tree which is always accurate when the metaslab is loaded.
3240  */
3241 static uint64_t
3242 metaslab_weight_from_range_tree(metaslab_t *msp)
3243 {
3244 	uint64_t weight = 0;
3245 	uint32_t segments = 0;
3246 
3247 	ASSERT(msp->ms_loaded);
3248 
3249 	for (int i = ZFS_RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
3250 	    i--) {
3251 		uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
3252 		int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
3253 
3254 		segments <<= 1;
3255 		segments += msp->ms_allocatable->rt_histogram[i];
3256 
3257 		/*
3258 		 * The range tree provides more precision than the space map
3259 		 * and must be downgraded so that all values fit within the
3260 		 * space map's histogram. This allows us to compare loaded
3261 		 * vs. unloaded metaslabs to determine which metaslab is
3262 		 * considered "best".
3263 		 */
3264 		if (i > max_idx)
3265 			continue;
3266 
3267 		if (segments != 0) {
3268 			WEIGHT_SET_COUNT(weight, segments);
3269 			WEIGHT_SET_INDEX(weight, i);
3270 			WEIGHT_SET_ACTIVE(weight, 0);
3271 			break;
3272 		}
3273 	}
3274 	return (weight);
3275 }
3276 
3277 /*
3278  * Calculate the weight based on the on-disk histogram. Should be applied
3279  * only to unloaded metaslabs  (i.e no incoming allocations) in-order to
3280  * give results consistent with the on-disk state
3281  */
3282 static uint64_t
3283 metaslab_weight_from_spacemap(metaslab_t *msp)
3284 {
3285 	space_map_t *sm = msp->ms_sm;
3286 	ASSERT(!msp->ms_loaded);
3287 	ASSERT(sm != NULL);
3288 	ASSERT3U(space_map_object(sm), !=, 0);
3289 	ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
3290 
3291 	/*
3292 	 * Create a joint histogram from all the segments that have made
3293 	 * it to the metaslab's space map histogram, that are not yet
3294 	 * available for allocation because they are still in the freeing
3295 	 * pipeline (e.g. freeing, freed, and defer trees). Then subtract
3296 	 * these segments from the space map's histogram to get a more
3297 	 * accurate weight.
3298 	 */
3299 	uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
3300 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
3301 		deferspace_histogram[i] += msp->ms_synchist[i];
3302 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3303 		for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
3304 			deferspace_histogram[i] += msp->ms_deferhist[t][i];
3305 		}
3306 	}
3307 
3308 	uint64_t weight = 0;
3309 	for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
3310 		ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
3311 		    deferspace_histogram[i]);
3312 		uint64_t count =
3313 		    sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
3314 		if (count != 0) {
3315 			WEIGHT_SET_COUNT(weight, count);
3316 			WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
3317 			WEIGHT_SET_ACTIVE(weight, 0);
3318 			break;
3319 		}
3320 	}
3321 	return (weight);
3322 }
3323 
3324 /*
3325  * Compute a segment-based weight for the specified metaslab. The weight
3326  * is determined by highest bucket in the histogram. The information
3327  * for the highest bucket is encoded into the weight value.
3328  */
3329 static uint64_t
3330 metaslab_segment_weight(metaslab_t *msp)
3331 {
3332 	metaslab_group_t *mg = msp->ms_group;
3333 	uint64_t weight = 0;
3334 	uint8_t shift = mg->mg_vd->vdev_ashift;
3335 
3336 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3337 
3338 	/*
3339 	 * The metaslab is completely free.
3340 	 */
3341 	if (metaslab_allocated_space(msp) == 0) {
3342 		int idx = highbit64(msp->ms_size) - 1;
3343 		int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
3344 
3345 		if (idx < max_idx) {
3346 			WEIGHT_SET_COUNT(weight, 1ULL);
3347 			WEIGHT_SET_INDEX(weight, idx);
3348 		} else {
3349 			WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
3350 			WEIGHT_SET_INDEX(weight, max_idx);
3351 		}
3352 		WEIGHT_SET_ACTIVE(weight, 0);
3353 		ASSERT(!WEIGHT_IS_SPACEBASED(weight));
3354 		return (weight);
3355 	}
3356 
3357 	ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
3358 
3359 	/*
3360 	 * If the metaslab is fully allocated then just make the weight 0.
3361 	 */
3362 	if (metaslab_allocated_space(msp) == msp->ms_size)
3363 		return (0);
3364 	/*
3365 	 * If the metaslab is already loaded, then use the range tree to
3366 	 * determine the weight. Otherwise, we rely on the space map information
3367 	 * to generate the weight.
3368 	 */
3369 	if (msp->ms_loaded) {
3370 		weight = metaslab_weight_from_range_tree(msp);
3371 	} else {
3372 		weight = metaslab_weight_from_spacemap(msp);
3373 	}
3374 
3375 	/*
3376 	 * If the metaslab was active the last time we calculated its weight
3377 	 * then keep it active. We want to consume the entire region that
3378 	 * is associated with this weight.
3379 	 */
3380 	if (msp->ms_activation_weight != 0 && weight != 0)
3381 		WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
3382 	return (weight);
3383 }
3384 
3385 /*
3386  * Determine if we should attempt to allocate from this metaslab. If the
3387  * metaslab is loaded, then we can determine if the desired allocation
3388  * can be satisfied by looking at the size of the maximum free segment
3389  * on that metaslab. Otherwise, we make our decision based on the metaslab's
3390  * weight. For segment-based weighting we can determine the maximum
3391  * allocation based on the index encoded in its value. For space-based
3392  * weights we rely on the entire weight (excluding the weight-type bit).
3393  */
3394 static boolean_t
3395 metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard)
3396 {
3397 	/*
3398 	 * This case will usually but not always get caught by the checks below;
3399 	 * metaslabs can be loaded by various means, including the trim and
3400 	 * initialize code. Once that happens, without this check they are
3401 	 * allocatable even before they finish their first txg sync.
3402 	 */
3403 	if (unlikely(msp->ms_new))
3404 		return (B_FALSE);
3405 
3406 	/*
3407 	 * If the metaslab is loaded, ms_max_size is definitive and we can use
3408 	 * the fast check. If it's not, the ms_max_size is a lower bound (once
3409 	 * set), and we should use the fast check as long as we're not in
3410 	 * try_hard and it's been less than zfs_metaslab_max_size_cache_sec
3411 	 * seconds since the metaslab was unloaded.
3412 	 */
3413 	if (msp->ms_loaded ||
3414 	    (msp->ms_max_size != 0 && !try_hard && gethrtime() <
3415 	    msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec)))
3416 		return (msp->ms_max_size >= asize);
3417 
3418 	boolean_t should_allocate;
3419 	if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
3420 		/*
3421 		 * The metaslab segment weight indicates segments in the
3422 		 * range [2^i, 2^(i+1)), where i is the index in the weight.
3423 		 * Since the asize might be in the middle of the range, we
3424 		 * should attempt the allocation if asize < 2^(i+1).
3425 		 */
3426 		should_allocate = (asize <
3427 		    1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
3428 	} else {
3429 		should_allocate = (asize <=
3430 		    (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
3431 	}
3432 
3433 	return (should_allocate);
3434 }
3435 
3436 static uint64_t
3437 metaslab_weight(metaslab_t *msp, boolean_t nodirty)
3438 {
3439 	vdev_t *vd = msp->ms_group->mg_vd;
3440 	spa_t *spa = vd->vdev_spa;
3441 	uint64_t weight;
3442 
3443 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3444 
3445 	metaslab_set_fragmentation(msp, nodirty);
3446 
3447 	/*
3448 	 * Update the maximum size. If the metaslab is loaded, this will
3449 	 * ensure that we get an accurate maximum size if newly freed space
3450 	 * has been added back into the free tree. If the metaslab is
3451 	 * unloaded, we check if there's a larger free segment in the
3452 	 * unflushed frees. This is a lower bound on the largest allocatable
3453 	 * segment size. Coalescing of adjacent entries may reveal larger
3454 	 * allocatable segments, but we aren't aware of those until loading
3455 	 * the space map into a range tree.
3456 	 */
3457 	if (msp->ms_loaded) {
3458 		msp->ms_max_size = metaslab_largest_allocatable(msp);
3459 	} else {
3460 		msp->ms_max_size = MAX(msp->ms_max_size,
3461 		    metaslab_largest_unflushed_free(msp));
3462 	}
3463 
3464 	/*
3465 	 * Segment-based weighting requires space map histogram support.
3466 	 */
3467 	if (zfs_metaslab_segment_weight_enabled &&
3468 	    spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
3469 	    (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
3470 	    sizeof (space_map_phys_t))) {
3471 		weight = metaslab_segment_weight(msp);
3472 	} else {
3473 		weight = metaslab_space_weight(msp);
3474 	}
3475 	return (weight);
3476 }
3477 
3478 void
3479 metaslab_recalculate_weight_and_sort(metaslab_t *msp)
3480 {
3481 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3482 
3483 	/* note: we preserve the mask (e.g. indication of primary, etc..) */
3484 	uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
3485 	metaslab_group_sort(msp->ms_group, msp,
3486 	    metaslab_weight(msp, B_FALSE) | was_active);
3487 }
3488 
3489 static int
3490 metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
3491     int allocator, uint64_t activation_weight)
3492 {
3493 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
3494 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3495 
3496 	/*
3497 	 * If we're activating for the claim code, we don't want to actually
3498 	 * set the metaslab up for a specific allocator.
3499 	 */
3500 	if (activation_weight == METASLAB_WEIGHT_CLAIM) {
3501 		ASSERT0(msp->ms_activation_weight);
3502 		msp->ms_activation_weight = msp->ms_weight;
3503 		metaslab_group_sort(mg, msp, msp->ms_weight |
3504 		    activation_weight);
3505 		return (0);
3506 	}
3507 
3508 	metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
3509 	    &mga->mga_primary : &mga->mga_secondary);
3510 
3511 	mutex_enter(&mg->mg_lock);
3512 	if (*mspp != NULL) {
3513 		mutex_exit(&mg->mg_lock);
3514 		return (EEXIST);
3515 	}
3516 
3517 	*mspp = msp;
3518 	ASSERT3S(msp->ms_allocator, ==, -1);
3519 	msp->ms_allocator = allocator;
3520 	msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
3521 
3522 	ASSERT0(msp->ms_activation_weight);
3523 	msp->ms_activation_weight = msp->ms_weight;
3524 	metaslab_group_sort_impl(mg, msp,
3525 	    msp->ms_weight | activation_weight);
3526 	mutex_exit(&mg->mg_lock);
3527 
3528 	return (0);
3529 }
3530 
3531 static int
3532 metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
3533 {
3534 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3535 
3536 	/*
3537 	 * The current metaslab is already activated for us so there
3538 	 * is nothing to do. Already activated though, doesn't mean
3539 	 * that this metaslab is activated for our allocator nor our
3540 	 * requested activation weight. The metaslab could have started
3541 	 * as an active one for our allocator but changed allocators
3542 	 * while we were waiting to grab its ms_lock or we stole it
3543 	 * [see find_valid_metaslab()]. This means that there is a
3544 	 * possibility of passivating a metaslab of another allocator
3545 	 * or from a different activation mask, from this thread.
3546 	 */
3547 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
3548 		ASSERT(msp->ms_loaded);
3549 		return (0);
3550 	}
3551 
3552 	int error = metaslab_load(msp);
3553 	if (error != 0) {
3554 		metaslab_group_sort(msp->ms_group, msp, 0);
3555 		return (error);
3556 	}
3557 
3558 	/*
3559 	 * When entering metaslab_load() we may have dropped the
3560 	 * ms_lock because we were loading this metaslab, or we
3561 	 * were waiting for another thread to load it for us. In
3562 	 * that scenario, we recheck the weight of the metaslab
3563 	 * to see if it was activated by another thread.
3564 	 *
3565 	 * If the metaslab was activated for another allocator or
3566 	 * it was activated with a different activation weight (e.g.
3567 	 * we wanted to make it a primary but it was activated as
3568 	 * secondary) we return error (EBUSY).
3569 	 *
3570 	 * If the metaslab was activated for the same allocator
3571 	 * and requested activation mask, skip activating it.
3572 	 */
3573 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
3574 		if (msp->ms_allocator != allocator)
3575 			return (EBUSY);
3576 
3577 		if ((msp->ms_weight & activation_weight) == 0)
3578 			return (SET_ERROR(EBUSY));
3579 
3580 		EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY),
3581 		    msp->ms_primary);
3582 		return (0);
3583 	}
3584 
3585 	/*
3586 	 * If the metaslab has literally 0 space, it will have weight 0. In
3587 	 * that case, don't bother activating it. This can happen if the
3588 	 * metaslab had space during find_valid_metaslab, but another thread
3589 	 * loaded it and used all that space while we were waiting to grab the
3590 	 * lock.
3591 	 */
3592 	if (msp->ms_weight == 0) {
3593 		ASSERT0(zfs_range_tree_space(msp->ms_allocatable));
3594 		return (SET_ERROR(ENOSPC));
3595 	}
3596 
3597 	if ((error = metaslab_activate_allocator(msp->ms_group, msp,
3598 	    allocator, activation_weight)) != 0) {
3599 		return (error);
3600 	}
3601 
3602 	ASSERT(msp->ms_loaded);
3603 	ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
3604 
3605 	return (0);
3606 }
3607 
3608 static void
3609 metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
3610     uint64_t weight)
3611 {
3612 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3613 	ASSERT(msp->ms_loaded);
3614 
3615 	if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
3616 		metaslab_group_sort(mg, msp, weight);
3617 		return;
3618 	}
3619 
3620 	mutex_enter(&mg->mg_lock);
3621 	ASSERT3P(msp->ms_group, ==, mg);
3622 	ASSERT3S(0, <=, msp->ms_allocator);
3623 	ASSERT3U(msp->ms_allocator, <, mg->mg_class->mc_spa->spa_alloc_count);
3624 
3625 	metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator];
3626 	if (msp->ms_primary) {
3627 		ASSERT3P(mga->mga_primary, ==, msp);
3628 		ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
3629 		mga->mga_primary = NULL;
3630 	} else {
3631 		ASSERT3P(mga->mga_secondary, ==, msp);
3632 		ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
3633 		mga->mga_secondary = NULL;
3634 	}
3635 	msp->ms_allocator = -1;
3636 	metaslab_group_sort_impl(mg, msp, weight);
3637 	mutex_exit(&mg->mg_lock);
3638 }
3639 
3640 static void
3641 metaslab_passivate(metaslab_t *msp, uint64_t weight)
3642 {
3643 	uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE;
3644 
3645 	/*
3646 	 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
3647 	 * this metaslab again.  In that case, it had better be empty,
3648 	 * or we would be leaving space on the table.
3649 	 */
3650 	ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
3651 	    size >= SPA_MINBLOCKSIZE ||
3652 	    zfs_range_tree_space(msp->ms_allocatable) == 0);
3653 	ASSERT0(weight & METASLAB_ACTIVE_MASK);
3654 
3655 	ASSERT(msp->ms_activation_weight != 0);
3656 	msp->ms_activation_weight = 0;
3657 	metaslab_passivate_allocator(msp->ms_group, msp, weight);
3658 	ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK);
3659 }
3660 
3661 /*
3662  * Segment-based metaslabs are activated once and remain active until
3663  * we either fail an allocation attempt (similar to space-based metaslabs)
3664  * or have exhausted the free space in zfs_metaslab_switch_threshold
3665  * buckets since the metaslab was activated. This function checks to see
3666  * if we've exhausted the zfs_metaslab_switch_threshold buckets in the
3667  * metaslab and passivates it proactively. This will allow us to select a
3668  * metaslab with a larger contiguous region, if any, remaining within this
3669  * metaslab group. If we're in sync pass > 1, then we continue using this
3670  * metaslab so that we don't dirty more block and cause more sync passes.
3671  */
3672 static void
3673 metaslab_segment_may_passivate(metaslab_t *msp)
3674 {
3675 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3676 
3677 	if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
3678 		return;
3679 
3680 	/*
3681 	 * As long as a single largest free segment covers majorioty of free
3682 	 * space, don't consider the metaslab fragmented.  It should allow
3683 	 * us to fill new unfragmented metaslabs full before switching.
3684 	 */
3685 	if (metaslab_largest_allocatable(msp) >
3686 	    zfs_range_tree_space(msp->ms_allocatable) * 15 / 16)
3687 		return;
3688 
3689 	/*
3690 	 * Since we are in the middle of a sync pass, the most accurate
3691 	 * information that is accessible to us is the in-core range tree
3692 	 * histogram; calculate the new weight based on that information.
3693 	 */
3694 	uint64_t weight = metaslab_weight_from_range_tree(msp);
3695 	int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
3696 	int current_idx = WEIGHT_GET_INDEX(weight);
3697 
3698 	if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
3699 		metaslab_passivate(msp, weight);
3700 }
3701 
3702 static void
3703 metaslab_preload(void *arg)
3704 {
3705 	metaslab_t *msp = arg;
3706 	metaslab_class_t *mc = msp->ms_group->mg_class;
3707 	spa_t *spa = mc->mc_spa;
3708 	fstrans_cookie_t cookie = spl_fstrans_mark();
3709 
3710 	ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
3711 
3712 	mutex_enter(&msp->ms_lock);
3713 	(void) metaslab_load(msp);
3714 	metaslab_set_selected_txg(msp, spa_syncing_txg(spa));
3715 	mutex_exit(&msp->ms_lock);
3716 	spl_fstrans_unmark(cookie);
3717 }
3718 
3719 static void
3720 metaslab_group_preload(metaslab_group_t *mg)
3721 {
3722 	spa_t *spa = mg->mg_vd->vdev_spa;
3723 	metaslab_t *msp;
3724 	avl_tree_t *t = &mg->mg_metaslab_tree;
3725 	int m = 0;
3726 
3727 	if (spa_shutting_down(spa) || !metaslab_preload_enabled)
3728 		return;
3729 
3730 	mutex_enter(&mg->mg_lock);
3731 
3732 	/*
3733 	 * Load the next potential metaslabs
3734 	 */
3735 	for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
3736 		ASSERT3P(msp->ms_group, ==, mg);
3737 
3738 		/*
3739 		 * We preload only the maximum number of metaslabs specified
3740 		 * by metaslab_preload_limit. If a metaslab is being forced
3741 		 * to condense then we preload it too. This will ensure
3742 		 * that force condensing happens in the next txg.
3743 		 */
3744 		if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
3745 			continue;
3746 		}
3747 
3748 		VERIFY(taskq_dispatch(spa->spa_metaslab_taskq, metaslab_preload,
3749 		    msp, TQ_SLEEP | (m <= spa->spa_alloc_count ? TQ_FRONT : 0))
3750 		    != TASKQID_INVALID);
3751 	}
3752 	mutex_exit(&mg->mg_lock);
3753 }
3754 
3755 /*
3756  * Determine if the space map's on-disk footprint is past our tolerance for
3757  * inefficiency. We would like to use the following criteria to make our
3758  * decision:
3759  *
3760  * 1. Do not condense if the size of the space map object would dramatically
3761  *    increase as a result of writing out the free space range tree.
3762  *
3763  * 2. Condense if the on on-disk space map representation is at least
3764  *    zfs_condense_pct/100 times the size of the optimal representation
3765  *    (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
3766  *
3767  * 3. Do not condense if the on-disk size of the space map does not actually
3768  *    decrease.
3769  *
3770  * Unfortunately, we cannot compute the on-disk size of the space map in this
3771  * context because we cannot accurately compute the effects of compression, etc.
3772  * Instead, we apply the heuristic described in the block comment for
3773  * zfs_metaslab_condense_block_threshold - we only condense if the space used
3774  * is greater than a threshold number of blocks.
3775  */
3776 static boolean_t
3777 metaslab_should_condense(metaslab_t *msp)
3778 {
3779 	space_map_t *sm = msp->ms_sm;
3780 	vdev_t *vd = msp->ms_group->mg_vd;
3781 	uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift;
3782 
3783 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3784 	ASSERT(msp->ms_loaded);
3785 	ASSERT(sm != NULL);
3786 	ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
3787 
3788 	/*
3789 	 * We always condense metaslabs that are empty and metaslabs for
3790 	 * which a condense request has been made.
3791 	 */
3792 	if (zfs_range_tree_numsegs(msp->ms_allocatable) == 0 ||
3793 	    msp->ms_condense_wanted)
3794 		return (B_TRUE);
3795 
3796 	uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
3797 	uint64_t object_size = space_map_length(sm);
3798 	uint64_t optimal_size = space_map_estimate_optimal_size(sm,
3799 	    msp->ms_allocatable, SM_NO_VDEVID);
3800 
3801 	return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
3802 	    object_size > zfs_metaslab_condense_block_threshold * record_size);
3803 }
3804 
3805 /*
3806  * Condense the on-disk space map representation to its minimized form.
3807  * The minimized form consists of a small number of allocations followed
3808  * by the entries of the free range tree (ms_allocatable). The condensed
3809  * spacemap contains all the entries of previous TXGs (including those in
3810  * the pool-wide log spacemaps; thus this is effectively a superset of
3811  * metaslab_flush()), but this TXG's entries still need to be written.
3812  */
3813 static void
3814 metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
3815 {
3816 	zfs_range_tree_t *condense_tree;
3817 	space_map_t *sm = msp->ms_sm;
3818 	uint64_t txg = dmu_tx_get_txg(tx);
3819 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3820 
3821 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3822 	ASSERT(msp->ms_loaded);
3823 	ASSERT(msp->ms_sm != NULL);
3824 
3825 	/*
3826 	 * In order to condense the space map, we need to change it so it
3827 	 * only describes which segments are currently allocated and free.
3828 	 *
3829 	 * All the current free space resides in the ms_allocatable, all
3830 	 * the ms_defer trees, and all the ms_allocating trees. We ignore
3831 	 * ms_freed because it is empty because we're in sync pass 1. We
3832 	 * ignore ms_freeing because these changes are not yet reflected
3833 	 * in the spacemap (they will be written later this txg).
3834 	 *
3835 	 * So to truncate the space map to represent all the entries of
3836 	 * previous TXGs we do the following:
3837 	 *
3838 	 * 1] We create a range tree (condense tree) that is 100% empty.
3839 	 * 2] We add to it all segments found in the ms_defer trees
3840 	 *    as those segments are marked as free in the original space
3841 	 *    map. We do the same with the ms_allocating trees for the same
3842 	 *    reason. Adding these segments should be a relatively
3843 	 *    inexpensive operation since we expect these trees to have a
3844 	 *    small number of nodes.
3845 	 * 3] We vacate any unflushed allocs, since they are not frees we
3846 	 *    need to add to the condense tree. Then we vacate any
3847 	 *    unflushed frees as they should already be part of ms_allocatable.
3848 	 * 4] At this point, we would ideally like to add all segments
3849 	 *    in the ms_allocatable tree from the condense tree. This way
3850 	 *    we would write all the entries of the condense tree as the
3851 	 *    condensed space map, which would only contain freed
3852 	 *    segments with everything else assumed to be allocated.
3853 	 *
3854 	 *    Doing so can be prohibitively expensive as ms_allocatable can
3855 	 *    be large, and therefore computationally expensive to add to
3856 	 *    the condense_tree. Instead we first sync out an entry marking
3857 	 *    everything as allocated, then the condense_tree and then the
3858 	 *    ms_allocatable, in the condensed space map. While this is not
3859 	 *    optimal, it is typically close to optimal and more importantly
3860 	 *    much cheaper to compute.
3861 	 *
3862 	 * 5] Finally, as both of the unflushed trees were written to our
3863 	 *    new and condensed metaslab space map, we basically flushed
3864 	 *    all the unflushed changes to disk, thus we call
3865 	 *    metaslab_flush_update().
3866 	 */
3867 	ASSERT3U(spa_sync_pass(spa), ==, 1);
3868 	ASSERT(zfs_range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
3869 
3870 	zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
3871 	    "spa %s, smp size %llu, segments %llu, forcing condense=%s",
3872 	    (u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp,
3873 	    (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
3874 	    spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm),
3875 	    (u_longlong_t)zfs_range_tree_numsegs(msp->ms_allocatable),
3876 	    msp->ms_condense_wanted ? "TRUE" : "FALSE");
3877 
3878 	msp->ms_condense_wanted = B_FALSE;
3879 
3880 	zfs_range_seg_type_t type;
3881 	uint64_t shift, start;
3882 	type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp,
3883 	    &start, &shift);
3884 
3885 	condense_tree = zfs_range_tree_create(NULL, type, NULL, start, shift);
3886 
3887 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3888 		zfs_range_tree_walk(msp->ms_defer[t],
3889 		    zfs_range_tree_add, condense_tree);
3890 	}
3891 
3892 	for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
3893 		zfs_range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
3894 		    zfs_range_tree_add, condense_tree);
3895 	}
3896 
3897 	ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3898 	    metaslab_unflushed_changes_memused(msp));
3899 	spa->spa_unflushed_stats.sus_memused -=
3900 	    metaslab_unflushed_changes_memused(msp);
3901 	zfs_range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3902 	zfs_range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3903 
3904 	/*
3905 	 * We're about to drop the metaslab's lock thus allowing other
3906 	 * consumers to change it's content. Set the metaslab's ms_condensing
3907 	 * flag to ensure that allocations on this metaslab do not occur
3908 	 * while we're in the middle of committing it to disk. This is only
3909 	 * critical for ms_allocatable as all other range trees use per TXG
3910 	 * views of their content.
3911 	 */
3912 	msp->ms_condensing = B_TRUE;
3913 
3914 	mutex_exit(&msp->ms_lock);
3915 	uint64_t object = space_map_object(msp->ms_sm);
3916 	space_map_truncate(sm,
3917 	    spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
3918 	    zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
3919 
3920 	/*
3921 	 * space_map_truncate() may have reallocated the spacemap object.
3922 	 * If so, update the vdev_ms_array.
3923 	 */
3924 	if (space_map_object(msp->ms_sm) != object) {
3925 		object = space_map_object(msp->ms_sm);
3926 		dmu_write(spa->spa_meta_objset,
3927 		    msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
3928 		    msp->ms_id, sizeof (uint64_t), &object, tx);
3929 	}
3930 
3931 	/*
3932 	 * Note:
3933 	 * When the log space map feature is enabled, each space map will
3934 	 * always have ALLOCS followed by FREES for each sync pass. This is
3935 	 * typically true even when the log space map feature is disabled,
3936 	 * except from the case where a metaslab goes through metaslab_sync()
3937 	 * and gets condensed. In that case the metaslab's space map will have
3938 	 * ALLOCS followed by FREES (due to condensing) followed by ALLOCS
3939 	 * followed by FREES (due to space_map_write() in metaslab_sync()) for
3940 	 * sync pass 1.
3941 	 */
3942 	zfs_range_tree_t *tmp_tree = zfs_range_tree_create(NULL, type, NULL,
3943 	    start, shift);
3944 	zfs_range_tree_add(tmp_tree, msp->ms_start, msp->ms_size);
3945 	space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx);
3946 	space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
3947 	space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx);
3948 
3949 	zfs_range_tree_vacate(condense_tree, NULL, NULL);
3950 	zfs_range_tree_destroy(condense_tree);
3951 	zfs_range_tree_vacate(tmp_tree, NULL, NULL);
3952 	zfs_range_tree_destroy(tmp_tree);
3953 	mutex_enter(&msp->ms_lock);
3954 
3955 	msp->ms_condensing = B_FALSE;
3956 	metaslab_flush_update(msp, tx);
3957 }
3958 
3959 static void
3960 metaslab_unflushed_add(metaslab_t *msp, dmu_tx_t *tx)
3961 {
3962 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3963 	ASSERT(spa_syncing_log_sm(spa) != NULL);
3964 	ASSERT(msp->ms_sm != NULL);
3965 	ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_allocs));
3966 	ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_frees));
3967 
3968 	mutex_enter(&spa->spa_flushed_ms_lock);
3969 	metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3970 	metaslab_set_unflushed_dirty(msp, B_TRUE);
3971 	avl_add(&spa->spa_metaslabs_by_flushed, msp);
3972 	mutex_exit(&spa->spa_flushed_ms_lock);
3973 
3974 	spa_log_sm_increment_current_mscount(spa);
3975 	spa_log_summary_add_flushed_metaslab(spa, B_TRUE);
3976 }
3977 
3978 void
3979 metaslab_unflushed_bump(metaslab_t *msp, dmu_tx_t *tx, boolean_t dirty)
3980 {
3981 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3982 	ASSERT(spa_syncing_log_sm(spa) != NULL);
3983 	ASSERT(msp->ms_sm != NULL);
3984 	ASSERT(metaslab_unflushed_txg(msp) != 0);
3985 	ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
3986 	ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_allocs));
3987 	ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_frees));
3988 
3989 	VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
3990 
3991 	/* update metaslab's position in our flushing tree */
3992 	uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
3993 	boolean_t ms_prev_flushed_dirty = metaslab_unflushed_dirty(msp);
3994 	mutex_enter(&spa->spa_flushed_ms_lock);
3995 	avl_remove(&spa->spa_metaslabs_by_flushed, msp);
3996 	metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3997 	metaslab_set_unflushed_dirty(msp, dirty);
3998 	avl_add(&spa->spa_metaslabs_by_flushed, msp);
3999 	mutex_exit(&spa->spa_flushed_ms_lock);
4000 
4001 	/* update metaslab counts of spa_log_sm_t nodes */
4002 	spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
4003 	spa_log_sm_increment_current_mscount(spa);
4004 
4005 	/* update log space map summary */
4006 	spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg,
4007 	    ms_prev_flushed_dirty);
4008 	spa_log_summary_add_flushed_metaslab(spa, dirty);
4009 
4010 	/* cleanup obsolete logs if any */
4011 	spa_cleanup_old_sm_logs(spa, tx);
4012 }
4013 
4014 /*
4015  * Called when the metaslab has been flushed (its own spacemap now reflects
4016  * all the contents of the pool-wide spacemap log). Updates the metaslab's
4017  * metadata and any pool-wide related log space map data (e.g. summary,
4018  * obsolete logs, etc..) to reflect that.
4019  */
4020 static void
4021 metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
4022 {
4023 	metaslab_group_t *mg = msp->ms_group;
4024 	spa_t *spa = mg->mg_vd->vdev_spa;
4025 
4026 	ASSERT(MUTEX_HELD(&msp->ms_lock));
4027 
4028 	ASSERT3U(spa_sync_pass(spa), ==, 1);
4029 
4030 	/*
4031 	 * Just because a metaslab got flushed, that doesn't mean that
4032 	 * it will pass through metaslab_sync_done(). Thus, make sure to
4033 	 * update ms_synced_length here in case it doesn't.
4034 	 */
4035 	msp->ms_synced_length = space_map_length(msp->ms_sm);
4036 
4037 	/*
4038 	 * We may end up here from metaslab_condense() without the
4039 	 * feature being active. In that case this is a no-op.
4040 	 */
4041 	if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP) ||
4042 	    metaslab_unflushed_txg(msp) == 0)
4043 		return;
4044 
4045 	metaslab_unflushed_bump(msp, tx, B_FALSE);
4046 }
4047 
4048 boolean_t
4049 metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
4050 {
4051 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
4052 
4053 	ASSERT(MUTEX_HELD(&msp->ms_lock));
4054 	ASSERT3U(spa_sync_pass(spa), ==, 1);
4055 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
4056 
4057 	ASSERT(msp->ms_sm != NULL);
4058 	ASSERT(metaslab_unflushed_txg(msp) != 0);
4059 	ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
4060 
4061 	/*
4062 	 * There is nothing wrong with flushing the same metaslab twice, as
4063 	 * this codepath should work on that case. However, the current
4064 	 * flushing scheme makes sure to avoid this situation as we would be
4065 	 * making all these calls without having anything meaningful to write
4066 	 * to disk. We assert this behavior here.
4067 	 */
4068 	ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
4069 
4070 	/*
4071 	 * We can not flush while loading, because then we would
4072 	 * not load the ms_unflushed_{allocs,frees}.
4073 	 */
4074 	if (msp->ms_loading)
4075 		return (B_FALSE);
4076 
4077 	metaslab_verify_space(msp, dmu_tx_get_txg(tx));
4078 	metaslab_verify_weight_and_frag(msp);
4079 
4080 	/*
4081 	 * Metaslab condensing is effectively flushing. Therefore if the
4082 	 * metaslab can be condensed we can just condense it instead of
4083 	 * flushing it.
4084 	 *
4085 	 * Note that metaslab_condense() does call metaslab_flush_update()
4086 	 * so we can just return immediately after condensing. We also
4087 	 * don't need to care about setting ms_flushing or broadcasting
4088 	 * ms_flush_cv, even if we temporarily drop the ms_lock in
4089 	 * metaslab_condense(), as the metaslab is already loaded.
4090 	 */
4091 	if (msp->ms_loaded && metaslab_should_condense(msp)) {
4092 		metaslab_group_t *mg = msp->ms_group;
4093 
4094 		/*
4095 		 * For all histogram operations below refer to the
4096 		 * comments of metaslab_sync() where we follow a
4097 		 * similar procedure.
4098 		 */
4099 		metaslab_group_histogram_verify(mg);
4100 		metaslab_class_histogram_verify(mg->mg_class);
4101 		metaslab_group_histogram_remove(mg, msp);
4102 
4103 		metaslab_condense(msp, tx);
4104 
4105 		space_map_histogram_clear(msp->ms_sm);
4106 		space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
4107 		ASSERT(zfs_range_tree_is_empty(msp->ms_freed));
4108 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
4109 			space_map_histogram_add(msp->ms_sm,
4110 			    msp->ms_defer[t], tx);
4111 		}
4112 		metaslab_aux_histograms_update(msp);
4113 
4114 		metaslab_group_histogram_add(mg, msp);
4115 		metaslab_group_histogram_verify(mg);
4116 		metaslab_class_histogram_verify(mg->mg_class);
4117 
4118 		metaslab_verify_space(msp, dmu_tx_get_txg(tx));
4119 
4120 		/*
4121 		 * Since we recreated the histogram (and potentially
4122 		 * the ms_sm too while condensing) ensure that the
4123 		 * weight is updated too because we are not guaranteed
4124 		 * that this metaslab is dirty and will go through
4125 		 * metaslab_sync_done().
4126 		 */
4127 		metaslab_recalculate_weight_and_sort(msp);
4128 		return (B_TRUE);
4129 	}
4130 
4131 	msp->ms_flushing = B_TRUE;
4132 	uint64_t sm_len_before = space_map_length(msp->ms_sm);
4133 
4134 	mutex_exit(&msp->ms_lock);
4135 	space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
4136 	    SM_NO_VDEVID, tx);
4137 	space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
4138 	    SM_NO_VDEVID, tx);
4139 	mutex_enter(&msp->ms_lock);
4140 
4141 	uint64_t sm_len_after = space_map_length(msp->ms_sm);
4142 	if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
4143 		zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
4144 		    "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
4145 		    "appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx),
4146 		    spa_name(spa),
4147 		    (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
4148 		    (u_longlong_t)msp->ms_id,
4149 		    (u_longlong_t)zfs_range_tree_space(
4150 		    msp->ms_unflushed_allocs),
4151 		    (u_longlong_t)zfs_range_tree_space(
4152 		    msp->ms_unflushed_frees),
4153 		    (u_longlong_t)(sm_len_after - sm_len_before));
4154 	}
4155 
4156 	ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
4157 	    metaslab_unflushed_changes_memused(msp));
4158 	spa->spa_unflushed_stats.sus_memused -=
4159 	    metaslab_unflushed_changes_memused(msp);
4160 	zfs_range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
4161 	zfs_range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
4162 
4163 	metaslab_verify_space(msp, dmu_tx_get_txg(tx));
4164 	metaslab_verify_weight_and_frag(msp);
4165 
4166 	metaslab_flush_update(msp, tx);
4167 
4168 	metaslab_verify_space(msp, dmu_tx_get_txg(tx));
4169 	metaslab_verify_weight_and_frag(msp);
4170 
4171 	msp->ms_flushing = B_FALSE;
4172 	cv_broadcast(&msp->ms_flush_cv);
4173 	return (B_TRUE);
4174 }
4175 
4176 /*
4177  * Write a metaslab to disk in the context of the specified transaction group.
4178  */
4179 void
4180 metaslab_sync(metaslab_t *msp, uint64_t txg)
4181 {
4182 	metaslab_group_t *mg = msp->ms_group;
4183 	vdev_t *vd = mg->mg_vd;
4184 	spa_t *spa = vd->vdev_spa;
4185 	objset_t *mos = spa_meta_objset(spa);
4186 	zfs_range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
4187 	dmu_tx_t *tx;
4188 
4189 	ASSERT(!vd->vdev_ishole);
4190 
4191 	/*
4192 	 * This metaslab has just been added so there's no work to do now.
4193 	 */
4194 	if (msp->ms_new) {
4195 		ASSERT0(zfs_range_tree_space(alloctree));
4196 		ASSERT0(zfs_range_tree_space(msp->ms_freeing));
4197 		ASSERT0(zfs_range_tree_space(msp->ms_freed));
4198 		ASSERT0(zfs_range_tree_space(msp->ms_checkpointing));
4199 		ASSERT0(zfs_range_tree_space(msp->ms_trim));
4200 		return;
4201 	}
4202 
4203 	/*
4204 	 * Normally, we don't want to process a metaslab if there are no
4205 	 * allocations or frees to perform. However, if the metaslab is being
4206 	 * forced to condense, it's loaded and we're not beyond the final
4207 	 * dirty txg, we need to let it through. Not condensing beyond the
4208 	 * final dirty txg prevents an issue where metaslabs that need to be
4209 	 * condensed but were loaded for other reasons could cause a panic
4210 	 * here. By only checking the txg in that branch of the conditional,
4211 	 * we preserve the utility of the VERIFY statements in all other
4212 	 * cases.
4213 	 */
4214 	if (zfs_range_tree_is_empty(alloctree) &&
4215 	    zfs_range_tree_is_empty(msp->ms_freeing) &&
4216 	    zfs_range_tree_is_empty(msp->ms_checkpointing) &&
4217 	    !(msp->ms_loaded && msp->ms_condense_wanted &&
4218 	    txg <= spa_final_dirty_txg(spa)))
4219 		return;
4220 
4221 
4222 	VERIFY3U(txg, <=, spa_final_dirty_txg(spa));
4223 
4224 	/*
4225 	 * The only state that can actually be changing concurrently
4226 	 * with metaslab_sync() is the metaslab's ms_allocatable. No
4227 	 * other thread can be modifying this txg's alloc, freeing,
4228 	 * freed, or space_map_phys_t.  We drop ms_lock whenever we
4229 	 * could call into the DMU, because the DMU can call down to
4230 	 * us (e.g. via zio_free()) at any time.
4231 	 *
4232 	 * The spa_vdev_remove_thread() can be reading metaslab state
4233 	 * concurrently, and it is locked out by the ms_sync_lock.
4234 	 * Note that the ms_lock is insufficient for this, because it
4235 	 * is dropped by space_map_write().
4236 	 */
4237 	tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
4238 
4239 	/*
4240 	 * Generate a log space map if one doesn't exist already.
4241 	 */
4242 	spa_generate_syncing_log_sm(spa, tx);
4243 
4244 	if (msp->ms_sm == NULL) {
4245 		uint64_t new_object = space_map_alloc(mos,
4246 		    spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
4247 		    zfs_metaslab_sm_blksz_with_log :
4248 		    zfs_metaslab_sm_blksz_no_log, tx);
4249 		VERIFY3U(new_object, !=, 0);
4250 
4251 		dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
4252 		    msp->ms_id, sizeof (uint64_t), &new_object, tx);
4253 
4254 		VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
4255 		    msp->ms_start, msp->ms_size, vd->vdev_ashift));
4256 		ASSERT(msp->ms_sm != NULL);
4257 
4258 		ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_allocs));
4259 		ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_frees));
4260 		ASSERT0(metaslab_allocated_space(msp));
4261 	}
4262 
4263 	if (!zfs_range_tree_is_empty(msp->ms_checkpointing) &&
4264 	    vd->vdev_checkpoint_sm == NULL) {
4265 		ASSERT(spa_has_checkpoint(spa));
4266 
4267 		uint64_t new_object = space_map_alloc(mos,
4268 		    zfs_vdev_standard_sm_blksz, tx);
4269 		VERIFY3U(new_object, !=, 0);
4270 
4271 		VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
4272 		    mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
4273 		ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
4274 
4275 		/*
4276 		 * We save the space map object as an entry in vdev_top_zap
4277 		 * so it can be retrieved when the pool is reopened after an
4278 		 * export or through zdb.
4279 		 */
4280 		VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
4281 		    vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
4282 		    sizeof (new_object), 1, &new_object, tx));
4283 	}
4284 
4285 	mutex_enter(&msp->ms_sync_lock);
4286 	mutex_enter(&msp->ms_lock);
4287 
4288 	/*
4289 	 * Note: metaslab_condense() clears the space map's histogram.
4290 	 * Therefore we must verify and remove this histogram before
4291 	 * condensing.
4292 	 */
4293 	metaslab_group_histogram_verify(mg);
4294 	metaslab_class_histogram_verify(mg->mg_class);
4295 	metaslab_group_histogram_remove(mg, msp);
4296 
4297 	if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
4298 	    metaslab_should_condense(msp))
4299 		metaslab_condense(msp, tx);
4300 
4301 	/*
4302 	 * We'll be going to disk to sync our space accounting, thus we
4303 	 * drop the ms_lock during that time so allocations coming from
4304 	 * open-context (ZIL) for future TXGs do not block.
4305 	 */
4306 	mutex_exit(&msp->ms_lock);
4307 	space_map_t *log_sm = spa_syncing_log_sm(spa);
4308 	if (log_sm != NULL) {
4309 		ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
4310 		if (metaslab_unflushed_txg(msp) == 0)
4311 			metaslab_unflushed_add(msp, tx);
4312 		else if (!metaslab_unflushed_dirty(msp))
4313 			metaslab_unflushed_bump(msp, tx, B_TRUE);
4314 
4315 		space_map_write(log_sm, alloctree, SM_ALLOC,
4316 		    vd->vdev_id, tx);
4317 		space_map_write(log_sm, msp->ms_freeing, SM_FREE,
4318 		    vd->vdev_id, tx);
4319 		mutex_enter(&msp->ms_lock);
4320 
4321 		ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
4322 		    metaslab_unflushed_changes_memused(msp));
4323 		spa->spa_unflushed_stats.sus_memused -=
4324 		    metaslab_unflushed_changes_memused(msp);
4325 		zfs_range_tree_remove_xor_add(alloctree,
4326 		    msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
4327 		zfs_range_tree_remove_xor_add(msp->ms_freeing,
4328 		    msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
4329 		spa->spa_unflushed_stats.sus_memused +=
4330 		    metaslab_unflushed_changes_memused(msp);
4331 	} else {
4332 		ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
4333 
4334 		space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
4335 		    SM_NO_VDEVID, tx);
4336 		space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
4337 		    SM_NO_VDEVID, tx);
4338 		mutex_enter(&msp->ms_lock);
4339 	}
4340 
4341 	msp->ms_allocated_space += zfs_range_tree_space(alloctree);
4342 	ASSERT3U(msp->ms_allocated_space, >=,
4343 	    zfs_range_tree_space(msp->ms_freeing));
4344 	msp->ms_allocated_space -= zfs_range_tree_space(msp->ms_freeing);
4345 
4346 	if (!zfs_range_tree_is_empty(msp->ms_checkpointing)) {
4347 		ASSERT(spa_has_checkpoint(spa));
4348 		ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
4349 
4350 		/*
4351 		 * Since we are doing writes to disk and the ms_checkpointing
4352 		 * tree won't be changing during that time, we drop the
4353 		 * ms_lock while writing to the checkpoint space map, for the
4354 		 * same reason mentioned above.
4355 		 */
4356 		mutex_exit(&msp->ms_lock);
4357 		space_map_write(vd->vdev_checkpoint_sm,
4358 		    msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
4359 		mutex_enter(&msp->ms_lock);
4360 
4361 		spa->spa_checkpoint_info.sci_dspace +=
4362 		    zfs_range_tree_space(msp->ms_checkpointing);
4363 		vd->vdev_stat.vs_checkpoint_space +=
4364 		    zfs_range_tree_space(msp->ms_checkpointing);
4365 		ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
4366 		    -space_map_allocated(vd->vdev_checkpoint_sm));
4367 
4368 		zfs_range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
4369 	}
4370 
4371 	if (msp->ms_loaded) {
4372 		/*
4373 		 * When the space map is loaded, we have an accurate
4374 		 * histogram in the range tree. This gives us an opportunity
4375 		 * to bring the space map's histogram up-to-date so we clear
4376 		 * it first before updating it.
4377 		 */
4378 		space_map_histogram_clear(msp->ms_sm);
4379 		space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
4380 
4381 		/*
4382 		 * Since we've cleared the histogram we need to add back
4383 		 * any free space that has already been processed, plus
4384 		 * any deferred space. This allows the on-disk histogram
4385 		 * to accurately reflect all free space even if some space
4386 		 * is not yet available for allocation (i.e. deferred).
4387 		 */
4388 		space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
4389 
4390 		/*
4391 		 * Add back any deferred free space that has not been
4392 		 * added back into the in-core free tree yet. This will
4393 		 * ensure that we don't end up with a space map histogram
4394 		 * that is completely empty unless the metaslab is fully
4395 		 * allocated.
4396 		 */
4397 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
4398 			space_map_histogram_add(msp->ms_sm,
4399 			    msp->ms_defer[t], tx);
4400 		}
4401 	}
4402 
4403 	/*
4404 	 * Always add the free space from this sync pass to the space
4405 	 * map histogram. We want to make sure that the on-disk histogram
4406 	 * accounts for all free space. If the space map is not loaded,
4407 	 * then we will lose some accuracy but will correct it the next
4408 	 * time we load the space map.
4409 	 */
4410 	space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
4411 	metaslab_aux_histograms_update(msp);
4412 
4413 	metaslab_group_histogram_add(mg, msp);
4414 	metaslab_group_histogram_verify(mg);
4415 	metaslab_class_histogram_verify(mg->mg_class);
4416 
4417 	/*
4418 	 * For sync pass 1, we avoid traversing this txg's free range tree
4419 	 * and instead will just swap the pointers for freeing and freed.
4420 	 * We can safely do this since the freed_tree is guaranteed to be
4421 	 * empty on the initial pass.
4422 	 *
4423 	 * Keep in mind that even if we are currently using a log spacemap
4424 	 * we want current frees to end up in the ms_allocatable (but not
4425 	 * get appended to the ms_sm) so their ranges can be reused as usual.
4426 	 */
4427 	if (spa_sync_pass(spa) == 1) {
4428 		zfs_range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
4429 		ASSERT0(msp->ms_allocated_this_txg);
4430 	} else {
4431 		zfs_range_tree_vacate(msp->ms_freeing,
4432 		    zfs_range_tree_add, msp->ms_freed);
4433 	}
4434 	msp->ms_allocated_this_txg += zfs_range_tree_space(alloctree);
4435 	zfs_range_tree_vacate(alloctree, NULL, NULL);
4436 
4437 	ASSERT0(zfs_range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4438 	ASSERT0(zfs_range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
4439 	    & TXG_MASK]));
4440 	ASSERT0(zfs_range_tree_space(msp->ms_freeing));
4441 	ASSERT0(zfs_range_tree_space(msp->ms_checkpointing));
4442 
4443 	mutex_exit(&msp->ms_lock);
4444 
4445 	/*
4446 	 * Verify that the space map object ID has been recorded in the
4447 	 * vdev_ms_array.
4448 	 */
4449 	uint64_t object;
4450 	VERIFY0(dmu_read(mos, vd->vdev_ms_array,
4451 	    msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
4452 	VERIFY3U(object, ==, space_map_object(msp->ms_sm));
4453 
4454 	mutex_exit(&msp->ms_sync_lock);
4455 	dmu_tx_commit(tx);
4456 }
4457 
4458 static void
4459 metaslab_evict(metaslab_t *msp, uint64_t txg)
4460 {
4461 	if (!msp->ms_loaded || msp->ms_disabled != 0)
4462 		return;
4463 
4464 	for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
4465 		VERIFY0(zfs_range_tree_space(
4466 		    msp->ms_allocating[(txg + t) & TXG_MASK]));
4467 	}
4468 	if (msp->ms_allocator != -1)
4469 		metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK);
4470 
4471 	if (!metaslab_debug_unload)
4472 		metaslab_unload(msp);
4473 }
4474 
4475 /*
4476  * Called after a transaction group has completely synced to mark
4477  * all of the metaslab's free space as usable.
4478  */
4479 void
4480 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
4481 {
4482 	metaslab_group_t *mg = msp->ms_group;
4483 	vdev_t *vd = mg->mg_vd;
4484 	spa_t *spa = vd->vdev_spa;
4485 	zfs_range_tree_t **defer_tree;
4486 	int64_t alloc_delta, defer_delta;
4487 	boolean_t defer_allowed = B_TRUE;
4488 
4489 	ASSERT(!vd->vdev_ishole);
4490 
4491 	mutex_enter(&msp->ms_lock);
4492 
4493 	if (msp->ms_new) {
4494 		/* this is a new metaslab, add its capacity to the vdev */
4495 		metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
4496 
4497 		/* there should be no allocations nor frees at this point */
4498 		VERIFY0(msp->ms_allocated_this_txg);
4499 		VERIFY0(zfs_range_tree_space(msp->ms_freed));
4500 	}
4501 
4502 	ASSERT0(zfs_range_tree_space(msp->ms_freeing));
4503 	ASSERT0(zfs_range_tree_space(msp->ms_checkpointing));
4504 
4505 	defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
4506 
4507 	uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
4508 	    metaslab_class_get_alloc(spa_normal_class(spa));
4509 	if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing ||
4510 	    vd->vdev_rz_expanding) {
4511 		defer_allowed = B_FALSE;
4512 	}
4513 
4514 	defer_delta = 0;
4515 	alloc_delta = msp->ms_allocated_this_txg -
4516 	    zfs_range_tree_space(msp->ms_freed);
4517 
4518 	if (defer_allowed) {
4519 		defer_delta = zfs_range_tree_space(msp->ms_freed) -
4520 		    zfs_range_tree_space(*defer_tree);
4521 	} else {
4522 		defer_delta -= zfs_range_tree_space(*defer_tree);
4523 	}
4524 	metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
4525 	    defer_delta, 0);
4526 
4527 	if (spa_syncing_log_sm(spa) == NULL) {
4528 		/*
4529 		 * If there's a metaslab_load() in progress and we don't have
4530 		 * a log space map, it means that we probably wrote to the
4531 		 * metaslab's space map. If this is the case, we need to
4532 		 * make sure that we wait for the load to complete so that we
4533 		 * have a consistent view at the in-core side of the metaslab.
4534 		 */
4535 		metaslab_load_wait(msp);
4536 	} else {
4537 		ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
4538 	}
4539 
4540 	/*
4541 	 * When auto-trimming is enabled, free ranges which are added to
4542 	 * ms_allocatable are also be added to ms_trim.  The ms_trim tree is
4543 	 * periodically consumed by the vdev_autotrim_thread() which issues
4544 	 * trims for all ranges and then vacates the tree.  The ms_trim tree
4545 	 * can be discarded at any time with the sole consequence of recent
4546 	 * frees not being trimmed.
4547 	 */
4548 	if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
4549 		zfs_range_tree_walk(*defer_tree, zfs_range_tree_add,
4550 		    msp->ms_trim);
4551 		if (!defer_allowed) {
4552 			zfs_range_tree_walk(msp->ms_freed, zfs_range_tree_add,
4553 			    msp->ms_trim);
4554 		}
4555 	} else {
4556 		zfs_range_tree_vacate(msp->ms_trim, NULL, NULL);
4557 	}
4558 
4559 	/*
4560 	 * Move the frees from the defer_tree back to the free
4561 	 * range tree (if it's loaded). Swap the freed_tree and
4562 	 * the defer_tree -- this is safe to do because we've
4563 	 * just emptied out the defer_tree.
4564 	 */
4565 	zfs_range_tree_vacate(*defer_tree,
4566 	    msp->ms_loaded ? zfs_range_tree_add : NULL, msp->ms_allocatable);
4567 	if (defer_allowed) {
4568 		zfs_range_tree_swap(&msp->ms_freed, defer_tree);
4569 	} else {
4570 		zfs_range_tree_vacate(msp->ms_freed,
4571 		    msp->ms_loaded ? zfs_range_tree_add : NULL,
4572 		    msp->ms_allocatable);
4573 	}
4574 
4575 	msp->ms_synced_length = space_map_length(msp->ms_sm);
4576 
4577 	msp->ms_deferspace += defer_delta;
4578 	ASSERT3S(msp->ms_deferspace, >=, 0);
4579 	ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
4580 	if (msp->ms_deferspace != 0) {
4581 		/*
4582 		 * Keep syncing this metaslab until all deferred frees
4583 		 * are back in circulation.
4584 		 */
4585 		vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
4586 	}
4587 	metaslab_aux_histograms_update_done(msp, defer_allowed);
4588 
4589 	if (msp->ms_new) {
4590 		msp->ms_new = B_FALSE;
4591 		mutex_enter(&mg->mg_lock);
4592 		mg->mg_ms_ready++;
4593 		mutex_exit(&mg->mg_lock);
4594 	}
4595 
4596 	/*
4597 	 * Re-sort metaslab within its group now that we've adjusted
4598 	 * its allocatable space.
4599 	 */
4600 	metaslab_recalculate_weight_and_sort(msp);
4601 
4602 	ASSERT0(zfs_range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4603 	ASSERT0(zfs_range_tree_space(msp->ms_freeing));
4604 	ASSERT0(zfs_range_tree_space(msp->ms_freed));
4605 	ASSERT0(zfs_range_tree_space(msp->ms_checkpointing));
4606 	msp->ms_allocating_total -= msp->ms_allocated_this_txg;
4607 	msp->ms_allocated_this_txg = 0;
4608 	mutex_exit(&msp->ms_lock);
4609 }
4610 
4611 void
4612 metaslab_sync_reassess(metaslab_group_t *mg)
4613 {
4614 	spa_t *spa = mg->mg_class->mc_spa;
4615 
4616 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4617 	mg->mg_fragmentation = metaslab_group_fragmentation(mg);
4618 	metaslab_group_alloc_update(mg);
4619 
4620 	/*
4621 	 * Preload the next potential metaslabs but only on active
4622 	 * metaslab groups. We can get into a state where the metaslab
4623 	 * is no longer active since we dirty metaslabs as we remove a
4624 	 * a device, thus potentially making the metaslab group eligible
4625 	 * for preloading.
4626 	 */
4627 	if (mg->mg_activation_count > 0) {
4628 		metaslab_group_preload(mg);
4629 	}
4630 	spa_config_exit(spa, SCL_ALLOC, FTAG);
4631 }
4632 
4633 /*
4634  * When writing a ditto block (i.e. more than one DVA for a given BP) on
4635  * the same vdev as an existing DVA of this BP, then try to allocate it
4636  * on a different metaslab than existing DVAs (i.e. a unique metaslab).
4637  */
4638 static boolean_t
4639 metaslab_is_unique(metaslab_t *msp, dva_t *dva)
4640 {
4641 	uint64_t dva_ms_id;
4642 
4643 	if (DVA_GET_ASIZE(dva) == 0)
4644 		return (B_TRUE);
4645 
4646 	if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
4647 		return (B_TRUE);
4648 
4649 	dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
4650 
4651 	return (msp->ms_id != dva_ms_id);
4652 }
4653 
4654 /*
4655  * ==========================================================================
4656  * Metaslab allocation tracing facility
4657  * ==========================================================================
4658  */
4659 
4660 /*
4661  * Add an allocation trace element to the allocation tracing list.
4662  */
4663 static void
4664 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
4665     metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
4666     int allocator)
4667 {
4668 	metaslab_alloc_trace_t *mat;
4669 
4670 	if (!metaslab_trace_enabled)
4671 		return;
4672 
4673 	/*
4674 	 * When the tracing list reaches its maximum we remove
4675 	 * the second element in the list before adding a new one.
4676 	 * By removing the second element we preserve the original
4677 	 * entry as a clue to what allocations steps have already been
4678 	 * performed.
4679 	 */
4680 	if (zal->zal_size == metaslab_trace_max_entries) {
4681 		metaslab_alloc_trace_t *mat_next;
4682 #ifdef ZFS_DEBUG
4683 		panic("too many entries in allocation list");
4684 #endif
4685 		METASLABSTAT_BUMP(metaslabstat_trace_over_limit);
4686 		zal->zal_size--;
4687 		mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
4688 		list_remove(&zal->zal_list, mat_next);
4689 		kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
4690 	}
4691 
4692 	mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
4693 	list_link_init(&mat->mat_list_node);
4694 	mat->mat_mg = mg;
4695 	mat->mat_msp = msp;
4696 	mat->mat_size = psize;
4697 	mat->mat_dva_id = dva_id;
4698 	mat->mat_offset = offset;
4699 	mat->mat_weight = 0;
4700 	mat->mat_allocator = allocator;
4701 
4702 	if (msp != NULL)
4703 		mat->mat_weight = msp->ms_weight;
4704 
4705 	/*
4706 	 * The list is part of the zio so locking is not required. Only
4707 	 * a single thread will perform allocations for a given zio.
4708 	 */
4709 	list_insert_tail(&zal->zal_list, mat);
4710 	zal->zal_size++;
4711 
4712 	ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
4713 }
4714 
4715 void
4716 metaslab_trace_move(zio_alloc_list_t *old, zio_alloc_list_t *new)
4717 {
4718 	ASSERT0(new->zal_size);
4719 	list_move_tail(&new->zal_list, &old->zal_list);
4720 	new->zal_size = old->zal_size;
4721 	list_destroy(&old->zal_list);
4722 }
4723 
4724 void
4725 metaslab_trace_init(zio_alloc_list_t *zal)
4726 {
4727 	list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
4728 	    offsetof(metaslab_alloc_trace_t, mat_list_node));
4729 	zal->zal_size = 0;
4730 }
4731 
4732 void
4733 metaslab_trace_fini(zio_alloc_list_t *zal)
4734 {
4735 	metaslab_alloc_trace_t *mat;
4736 
4737 	while ((mat = list_remove_head(&zal->zal_list)) != NULL)
4738 		kmem_cache_free(metaslab_alloc_trace_cache, mat);
4739 	list_destroy(&zal->zal_list);
4740 	zal->zal_size = 0;
4741 }
4742 
4743 /*
4744  * ==========================================================================
4745  * Metaslab block operations
4746  * ==========================================================================
4747  */
4748 
4749 static void
4750 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, int allocator,
4751     int flags, uint64_t psize, const void *tag)
4752 {
4753 	if (!(flags & METASLAB_ASYNC_ALLOC) || tag == NULL)
4754 		return;
4755 
4756 	metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4757 	if (!mg->mg_class->mc_alloc_throttle_enabled)
4758 		return;
4759 
4760 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4761 	(void) zfs_refcount_add_many(&mga->mga_queue_depth, psize, tag);
4762 }
4763 
4764 void
4765 metaslab_group_alloc_increment_all(spa_t *spa, blkptr_t *bp, int allocator,
4766     int flags, uint64_t psize, const void *tag)
4767 {
4768 	for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
4769 		uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[d]);
4770 		metaslab_group_alloc_increment(spa, vdev, allocator, flags,
4771 		    psize, tag);
4772 	}
4773 }
4774 
4775 void
4776 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, int allocator,
4777     int flags, uint64_t psize, const void *tag)
4778 {
4779 	if (!(flags & METASLAB_ASYNC_ALLOC) || tag == NULL)
4780 		return;
4781 
4782 	metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4783 	if (!mg->mg_class->mc_alloc_throttle_enabled)
4784 		return;
4785 
4786 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4787 	(void) zfs_refcount_remove_many(&mga->mga_queue_depth, psize, tag);
4788 }
4789 
4790 static uint64_t
4791 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t max_size,
4792     uint64_t txg, uint64_t *actual_size)
4793 {
4794 	uint64_t start;
4795 	zfs_range_tree_t *rt = msp->ms_allocatable;
4796 	metaslab_class_t *mc = msp->ms_group->mg_class;
4797 
4798 	ASSERT(MUTEX_HELD(&msp->ms_lock));
4799 	VERIFY(!msp->ms_condensing);
4800 	VERIFY0(msp->ms_disabled);
4801 	VERIFY0(msp->ms_new);
4802 
4803 	start = mc->mc_ops->msop_alloc(msp, size, max_size, actual_size);
4804 	if (start != -1ULL) {
4805 		size = *actual_size;
4806 		metaslab_group_t *mg = msp->ms_group;
4807 		vdev_t *vd = mg->mg_vd;
4808 
4809 		VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
4810 		VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
4811 		VERIFY3U(zfs_range_tree_space(rt) - size, <=, msp->ms_size);
4812 		zfs_range_tree_remove(rt, start, size);
4813 		zfs_range_tree_clear(msp->ms_trim, start, size);
4814 
4815 		if (zfs_range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
4816 			vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
4817 
4818 		zfs_range_tree_add(msp->ms_allocating[txg & TXG_MASK], start,
4819 		    size);
4820 		msp->ms_allocating_total += size;
4821 
4822 		/* Track the last successful allocation */
4823 		msp->ms_alloc_txg = txg;
4824 		metaslab_verify_space(msp, txg);
4825 	}
4826 
4827 	/*
4828 	 * Now that we've attempted the allocation we need to update the
4829 	 * metaslab's maximum block size since it may have changed.
4830 	 */
4831 	msp->ms_max_size = metaslab_largest_allocatable(msp);
4832 	return (start);
4833 }
4834 
4835 /*
4836  * Find the metaslab with the highest weight that is less than what we've
4837  * already tried.  In the common case, this means that we will examine each
4838  * metaslab at most once. Note that concurrent callers could reorder metaslabs
4839  * by activation/passivation once we have dropped the mg_lock. If a metaslab is
4840  * activated by another thread, and we fail to allocate from the metaslab we
4841  * have selected, we may not try the newly-activated metaslab, and instead
4842  * activate another metaslab.  This is not optimal, but generally does not cause
4843  * any problems (a possible exception being if every metaslab is completely full
4844  * except for the newly-activated metaslab which we fail to examine).
4845  */
4846 static metaslab_t *
4847 find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
4848     dva_t *dva, int d, uint64_t asize, int allocator,
4849     boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search,
4850     boolean_t *was_active)
4851 {
4852 	avl_index_t idx;
4853 	avl_tree_t *t = &mg->mg_metaslab_tree;
4854 	metaslab_t *msp = avl_find(t, search, &idx);
4855 	if (msp == NULL)
4856 		msp = avl_nearest(t, idx, AVL_AFTER);
4857 
4858 	uint_t tries = 0;
4859 	for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
4860 		int i;
4861 
4862 		if (!try_hard && tries > zfs_metaslab_find_max_tries) {
4863 			METASLABSTAT_BUMP(metaslabstat_too_many_tries);
4864 			return (NULL);
4865 		}
4866 		tries++;
4867 
4868 		if (!metaslab_should_allocate(msp, asize, try_hard)) {
4869 			metaslab_trace_add(zal, mg, msp, asize, d,
4870 			    TRACE_TOO_SMALL, allocator);
4871 			continue;
4872 		}
4873 
4874 		/*
4875 		 * If the selected metaslab is condensing or disabled, or
4876 		 * hasn't gone through a metaslab_sync_done(), then skip it.
4877 		 */
4878 		if (msp->ms_condensing || msp->ms_disabled > 0 || msp->ms_new)
4879 			continue;
4880 
4881 		*was_active = msp->ms_allocator != -1;
4882 		/*
4883 		 * If we're activating as primary, this is our first allocation
4884 		 * from this disk, so we don't need to check how close we are.
4885 		 * If the metaslab under consideration was already active,
4886 		 * we're getting desperate enough to steal another allocator's
4887 		 * metaslab, so we still don't care about distances.
4888 		 */
4889 		if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
4890 			break;
4891 
4892 		if (!try_hard) {
4893 			for (i = 0; i < d; i++) {
4894 				if (!metaslab_is_unique(msp, &dva[i]))
4895 					break;  /* try another metaslab */
4896 			}
4897 			if (i == d)
4898 				break;
4899 		}
4900 	}
4901 
4902 	if (msp != NULL) {
4903 		search->ms_weight = msp->ms_weight;
4904 		search->ms_start = msp->ms_start + 1;
4905 		search->ms_allocator = msp->ms_allocator;
4906 		search->ms_primary = msp->ms_primary;
4907 	}
4908 	return (msp);
4909 }
4910 
4911 static void
4912 metaslab_active_mask_verify(metaslab_t *msp)
4913 {
4914 	ASSERT(MUTEX_HELD(&msp->ms_lock));
4915 
4916 	if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
4917 		return;
4918 
4919 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0)
4920 		return;
4921 
4922 	if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) {
4923 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4924 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4925 		VERIFY3S(msp->ms_allocator, !=, -1);
4926 		VERIFY(msp->ms_primary);
4927 		return;
4928 	}
4929 
4930 	if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) {
4931 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4932 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4933 		VERIFY3S(msp->ms_allocator, !=, -1);
4934 		VERIFY(!msp->ms_primary);
4935 		return;
4936 	}
4937 
4938 	if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
4939 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4940 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4941 		VERIFY3S(msp->ms_allocator, ==, -1);
4942 		return;
4943 	}
4944 }
4945 
4946 static uint64_t
4947 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
4948     uint64_t asize, uint64_t max_asize, uint64_t txg,
4949     dva_t *dva, int d, int allocator, boolean_t try_hard,
4950     uint64_t *actual_asize)
4951 {
4952 	metaslab_t *msp = NULL;
4953 	uint64_t offset = -1ULL;
4954 
4955 	uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY;
4956 	for (int i = 0; i < d; i++) {
4957 		if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4958 		    DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4959 			activation_weight = METASLAB_WEIGHT_SECONDARY;
4960 		} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4961 		    DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4962 			activation_weight = METASLAB_WEIGHT_CLAIM;
4963 			break;
4964 		}
4965 	}
4966 
4967 	/*
4968 	 * If we don't have enough metaslabs active, we just use the 0th slot.
4969 	 */
4970 	if (allocator >= mg->mg_ms_ready / 3)
4971 		allocator = 0;
4972 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4973 
4974 	ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
4975 
4976 	metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
4977 	search->ms_weight = UINT64_MAX;
4978 	search->ms_start = 0;
4979 	/*
4980 	 * At the end of the metaslab tree are the already-active metaslabs,
4981 	 * first the primaries, then the secondaries. When we resume searching
4982 	 * through the tree, we need to consider ms_allocator and ms_primary so
4983 	 * we start in the location right after where we left off, and don't
4984 	 * accidentally loop forever considering the same metaslabs.
4985 	 */
4986 	search->ms_allocator = -1;
4987 	search->ms_primary = B_TRUE;
4988 	for (;;) {
4989 		boolean_t was_active = B_FALSE;
4990 
4991 		mutex_enter(&mg->mg_lock);
4992 
4993 		if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4994 		    mga->mga_primary != NULL) {
4995 			msp = mga->mga_primary;
4996 
4997 			/*
4998 			 * Even though we don't hold the ms_lock for the
4999 			 * primary metaslab, those fields should not
5000 			 * change while we hold the mg_lock. Thus it is
5001 			 * safe to make assertions on them.
5002 			 */
5003 			ASSERT(msp->ms_primary);
5004 			ASSERT3S(msp->ms_allocator, ==, allocator);
5005 			ASSERT(msp->ms_loaded);
5006 
5007 			was_active = B_TRUE;
5008 			ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
5009 		} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
5010 		    mga->mga_secondary != NULL) {
5011 			msp = mga->mga_secondary;
5012 
5013 			/*
5014 			 * See comment above about the similar assertions
5015 			 * for the primary metaslab.
5016 			 */
5017 			ASSERT(!msp->ms_primary);
5018 			ASSERT3S(msp->ms_allocator, ==, allocator);
5019 			ASSERT(msp->ms_loaded);
5020 
5021 			was_active = B_TRUE;
5022 			ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
5023 		} else {
5024 			msp = find_valid_metaslab(mg, activation_weight, dva, d,
5025 			    asize, allocator, try_hard, zal, search,
5026 			    &was_active);
5027 		}
5028 
5029 		mutex_exit(&mg->mg_lock);
5030 		if (msp == NULL)
5031 			break;
5032 		mutex_enter(&msp->ms_lock);
5033 
5034 		metaslab_active_mask_verify(msp);
5035 
5036 		/*
5037 		 * This code is disabled out because of issues with
5038 		 * tracepoints in non-gpl kernel modules.
5039 		 */
5040 #if 0
5041 		DTRACE_PROBE3(ms__activation__attempt,
5042 		    metaslab_t *, msp, uint64_t, activation_weight,
5043 		    boolean_t, was_active);
5044 #endif
5045 
5046 		/*
5047 		 * Ensure that the metaslab we have selected is still
5048 		 * capable of handling our request. It's possible that
5049 		 * another thread may have changed the weight while we
5050 		 * were blocked on the metaslab lock. We check the
5051 		 * active status first to see if we need to set_selected_txg
5052 		 * a new metaslab.
5053 		 */
5054 		if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
5055 			ASSERT3S(msp->ms_allocator, ==, -1);
5056 			mutex_exit(&msp->ms_lock);
5057 			continue;
5058 		}
5059 
5060 		/*
5061 		 * If the metaslab was activated for another allocator
5062 		 * while we were waiting in the ms_lock above, or it's
5063 		 * a primary and we're seeking a secondary (or vice versa),
5064 		 * we go back and select a new metaslab.
5065 		 */
5066 		if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
5067 		    (msp->ms_allocator != -1) &&
5068 		    (msp->ms_allocator != allocator || ((activation_weight ==
5069 		    METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
5070 			ASSERT(msp->ms_loaded);
5071 			ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) ||
5072 			    msp->ms_allocator != -1);
5073 			mutex_exit(&msp->ms_lock);
5074 			continue;
5075 		}
5076 
5077 		/*
5078 		 * This metaslab was used for claiming regions allocated
5079 		 * by the ZIL during pool import. Once these regions are
5080 		 * claimed we don't need to keep the CLAIM bit set
5081 		 * anymore. Passivate this metaslab to zero its activation
5082 		 * mask.
5083 		 */
5084 		if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
5085 		    activation_weight != METASLAB_WEIGHT_CLAIM) {
5086 			ASSERT(msp->ms_loaded);
5087 			ASSERT3S(msp->ms_allocator, ==, -1);
5088 			metaslab_passivate(msp, msp->ms_weight &
5089 			    ~METASLAB_WEIGHT_CLAIM);
5090 			mutex_exit(&msp->ms_lock);
5091 			continue;
5092 		}
5093 
5094 		metaslab_set_selected_txg(msp, txg);
5095 
5096 		int activation_error =
5097 		    metaslab_activate(msp, allocator, activation_weight);
5098 		metaslab_active_mask_verify(msp);
5099 
5100 		/*
5101 		 * If the metaslab was activated by another thread for
5102 		 * another allocator or activation_weight (EBUSY), or it
5103 		 * failed because another metaslab was assigned as primary
5104 		 * for this allocator (EEXIST) we continue using this
5105 		 * metaslab for our allocation, rather than going on to a
5106 		 * worse metaslab (we waited for that metaslab to be loaded
5107 		 * after all).
5108 		 *
5109 		 * If the activation failed due to an I/O error or ENOSPC we
5110 		 * skip to the next metaslab.
5111 		 */
5112 		boolean_t activated;
5113 		if (activation_error == 0) {
5114 			activated = B_TRUE;
5115 		} else if (activation_error == EBUSY ||
5116 		    activation_error == EEXIST) {
5117 			activated = B_FALSE;
5118 		} else {
5119 			mutex_exit(&msp->ms_lock);
5120 			continue;
5121 		}
5122 		ASSERT(msp->ms_loaded);
5123 
5124 		/*
5125 		 * Now that we have the lock, recheck to see if we should
5126 		 * continue to use this metaslab for this allocation. The
5127 		 * the metaslab is now loaded so metaslab_should_allocate()
5128 		 * can accurately determine if the allocation attempt should
5129 		 * proceed.
5130 		 */
5131 		if (!metaslab_should_allocate(msp, asize, try_hard)) {
5132 			/* Passivate this metaslab and select a new one. */
5133 			metaslab_trace_add(zal, mg, msp, asize, d,
5134 			    TRACE_TOO_SMALL, allocator);
5135 			goto next;
5136 		}
5137 
5138 		/*
5139 		 * If this metaslab is currently condensing then pick again
5140 		 * as we can't manipulate this metaslab until it's committed
5141 		 * to disk. If this metaslab is being initialized, we shouldn't
5142 		 * allocate from it since the allocated region might be
5143 		 * overwritten after allocation.
5144 		 */
5145 		if (msp->ms_condensing) {
5146 			metaslab_trace_add(zal, mg, msp, asize, d,
5147 			    TRACE_CONDENSING, allocator);
5148 			if (activated) {
5149 				metaslab_passivate(msp, msp->ms_weight &
5150 				    ~METASLAB_ACTIVE_MASK);
5151 			}
5152 			mutex_exit(&msp->ms_lock);
5153 			continue;
5154 		} else if (msp->ms_disabled > 0) {
5155 			metaslab_trace_add(zal, mg, msp, asize, d,
5156 			    TRACE_DISABLED, allocator);
5157 			if (activated) {
5158 				metaslab_passivate(msp, msp->ms_weight &
5159 				    ~METASLAB_ACTIVE_MASK);
5160 			}
5161 			mutex_exit(&msp->ms_lock);
5162 			continue;
5163 		}
5164 
5165 		offset = metaslab_block_alloc(msp, asize, max_asize, txg,
5166 		    actual_asize);
5167 
5168 		if (offset != -1ULL) {
5169 			metaslab_trace_add(zal, mg, msp, *actual_asize, d,
5170 			    offset, allocator);
5171 			/* Proactively passivate the metaslab, if needed */
5172 			if (activated)
5173 				metaslab_segment_may_passivate(msp);
5174 			mutex_exit(&msp->ms_lock);
5175 			break;
5176 		}
5177 		metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
5178 next:
5179 		ASSERT(msp->ms_loaded);
5180 
5181 		/*
5182 		 * This code is disabled out because of issues with
5183 		 * tracepoints in non-gpl kernel modules.
5184 		 */
5185 #if 0
5186 		DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp,
5187 		    uint64_t, asize);
5188 #endif
5189 
5190 		/*
5191 		 * We were unable to allocate from this metaslab so determine
5192 		 * a new weight for this metaslab. Now that we have loaded
5193 		 * the metaslab we can provide a better hint to the metaslab
5194 		 * selector.
5195 		 *
5196 		 * For space-based metaslabs, we use the maximum block size.
5197 		 * This information is only available when the metaslab
5198 		 * is loaded and is more accurate than the generic free
5199 		 * space weight that was calculated by metaslab_weight().
5200 		 * This information allows us to quickly compare the maximum
5201 		 * available allocation in the metaslab to the allocation
5202 		 * size being requested.
5203 		 *
5204 		 * For segment-based metaslabs, determine the new weight
5205 		 * based on the highest bucket in the range tree. We
5206 		 * explicitly use the loaded segment weight (i.e. the range
5207 		 * tree histogram) since it contains the space that is
5208 		 * currently available for allocation and is accurate
5209 		 * even within a sync pass.
5210 		 */
5211 		uint64_t weight;
5212 		if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
5213 			weight = metaslab_largest_allocatable(msp);
5214 			WEIGHT_SET_SPACEBASED(weight);
5215 		} else {
5216 			weight = metaslab_weight_from_range_tree(msp);
5217 		}
5218 
5219 		if (activated) {
5220 			metaslab_passivate(msp, weight);
5221 		} else {
5222 			/*
5223 			 * For the case where we use the metaslab that is
5224 			 * active for another allocator we want to make
5225 			 * sure that we retain the activation mask.
5226 			 *
5227 			 * Note that we could attempt to use something like
5228 			 * metaslab_recalculate_weight_and_sort() that
5229 			 * retains the activation mask here. That function
5230 			 * uses metaslab_weight() to set the weight though
5231 			 * which is not as accurate as the calculations
5232 			 * above.
5233 			 */
5234 			weight |= msp->ms_weight & METASLAB_ACTIVE_MASK;
5235 			metaslab_group_sort(mg, msp, weight);
5236 		}
5237 		metaslab_active_mask_verify(msp);
5238 
5239 		/*
5240 		 * We have just failed an allocation attempt, check
5241 		 * that metaslab_should_allocate() agrees. Otherwise,
5242 		 * we may end up in an infinite loop retrying the same
5243 		 * metaslab.
5244 		 */
5245 		ASSERT(!metaslab_should_allocate(msp, asize, try_hard));
5246 
5247 		mutex_exit(&msp->ms_lock);
5248 	}
5249 	kmem_free(search, sizeof (*search));
5250 
5251 	if (offset == -1ULL) {
5252 		metaslab_trace_add(zal, mg, NULL, asize, d,
5253 		    TRACE_GROUP_FAILURE, allocator);
5254 		if (asize <= vdev_get_min_alloc(mg->mg_vd)) {
5255 			/*
5256 			 * This metaslab group was unable to allocate
5257 			 * the minimum block size so it must be out of
5258 			 * space.  Notify the allocation throttle to
5259 			 * skip allocation attempts to this group until
5260 			 * more space becomes available.
5261 			 */
5262 			mg->mg_no_free_space = B_TRUE;
5263 		}
5264 	}
5265 	return (offset);
5266 }
5267 
5268 static boolean_t
5269 metaslab_group_allocatable(spa_t *spa, metaslab_group_t *mg, uint64_t psize,
5270     int d, int flags, boolean_t try_hard, zio_alloc_list_t *zal, int allocator)
5271 {
5272 	metaslab_class_t *mc = mg->mg_class;
5273 	vdev_t *vd = mg->mg_vd;
5274 	boolean_t allocatable;
5275 
5276 	/*
5277 	 * Don't allocate from faulted devices.
5278 	 */
5279 	if (try_hard)
5280 		spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
5281 	allocatable = vdev_allocatable(vd);
5282 	if (try_hard)
5283 		spa_config_exit(spa, SCL_ZIO, FTAG);
5284 	if (!allocatable) {
5285 		metaslab_trace_add(zal, mg, NULL, psize, d,
5286 		    TRACE_NOT_ALLOCATABLE, allocator);
5287 		return (B_FALSE);
5288 	}
5289 
5290 	if (!try_hard) {
5291 		/*
5292 		 * Avoid vdevs with too little space or too fragmented.
5293 		 */
5294 		if (!GANG_ALLOCATION(flags) && (mg->mg_no_free_space ||
5295 		    (!mg->mg_allocatable && mc->mc_alloc_groups > 0))) {
5296 			metaslab_trace_add(zal, mg, NULL, psize, d,
5297 			    TRACE_NOT_ALLOCATABLE, allocator);
5298 			return (B_FALSE);
5299 		}
5300 
5301 		/*
5302 		 * Avoid writing single-copy data to an unhealthy,
5303 		 * non-redundant vdev.
5304 		 */
5305 		if (d == 0 && vd->vdev_state < VDEV_STATE_HEALTHY &&
5306 		    vd->vdev_children == 0) {
5307 			metaslab_trace_add(zal, mg, NULL, psize, d,
5308 			    TRACE_VDEV_ERROR, allocator);
5309 			return (B_FALSE);
5310 		}
5311 	}
5312 
5313 	return (B_TRUE);
5314 }
5315 
5316 static int
5317 metaslab_alloc_dva_range(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
5318     uint64_t max_psize, dva_t *dva, int d, const dva_t *hintdva, uint64_t txg,
5319     int flags, zio_alloc_list_t *zal, int allocator, uint64_t *actual_psize)
5320 {
5321 	metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5322 	metaslab_group_t *mg = NULL, *rotor;
5323 	vdev_t *vd;
5324 	boolean_t try_hard = B_FALSE;
5325 
5326 	ASSERT(!DVA_IS_VALID(&dva[d]));
5327 
5328 	/*
5329 	 * For testing, make some blocks above a certain size be gang blocks.
5330 	 * This will result in more split blocks when using device removal,
5331 	 * and a large number of split blocks coupled with ztest-induced
5332 	 * damage can result in extremely long reconstruction times.  This
5333 	 * will also test spilling from special to normal.
5334 	 */
5335 	if (psize >= metaslab_force_ganging &&
5336 	    metaslab_force_ganging_pct > 0 &&
5337 	    (random_in_range(100) < MIN(metaslab_force_ganging_pct, 100))) {
5338 		metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
5339 		    allocator);
5340 		return (SET_ERROR(ENOSPC));
5341 	}
5342 	if (max_psize > psize && max_psize >= metaslab_force_ganging &&
5343 	    metaslab_force_ganging_pct > 0 &&
5344 	    (random_in_range(100) < MIN(metaslab_force_ganging_pct, 100))) {
5345 		max_psize = MAX((psize + max_psize) / 2,
5346 		    metaslab_force_ganging);
5347 	}
5348 	ASSERT3U(psize, <=, max_psize);
5349 
5350 	/*
5351 	 * Start at the rotor and loop through all mgs until we find something.
5352 	 * Note that there's no locking on mca_rotor or mca_aliquot because
5353 	 * nothing actually breaks if we miss a few updates -- we just won't
5354 	 * allocate quite as evenly.  It all balances out over time.
5355 	 *
5356 	 * If we are doing ditto or log blocks, try to spread them across
5357 	 * consecutive vdevs.  If we're forced to reuse a vdev before we've
5358 	 * allocated all of our ditto blocks, then try and spread them out on
5359 	 * that vdev as much as possible.  If it turns out to not be possible,
5360 	 * gradually lower our standards until anything becomes acceptable.
5361 	 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
5362 	 * gives us hope of containing our fault domains to something we're
5363 	 * able to reason about.  Otherwise, any two top-level vdev failures
5364 	 * will guarantee the loss of data.  With consecutive allocation,
5365 	 * only two adjacent top-level vdev failures will result in data loss.
5366 	 *
5367 	 * If we are doing gang blocks (hintdva is non-NULL), try to keep
5368 	 * ourselves on the same vdev as our gang block header.  It makes our
5369 	 * fault domains something tractable.
5370 	 */
5371 	if (hintdva && DVA_IS_VALID(&hintdva[d])) {
5372 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
5373 		mg = vdev_get_mg(vd, mc);
5374 	}
5375 	if (mg == NULL && d != 0) {
5376 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
5377 		mg = vdev_get_mg(vd, mc)->mg_next;
5378 	}
5379 	if (mg == NULL || mg->mg_class != mc || mg->mg_activation_count <= 0) {
5380 		ASSERT(mca->mca_rotor != NULL);
5381 		mg = mca->mca_rotor;
5382 	}
5383 
5384 	rotor = mg;
5385 top:
5386 	do {
5387 		ASSERT(mg->mg_activation_count == 1);
5388 		ASSERT(mg->mg_class == mc);
5389 
5390 		if (!metaslab_group_allocatable(spa, mg, psize, d, flags,
5391 		    try_hard, zal, allocator))
5392 			goto next;
5393 
5394 		vd = mg->mg_vd;
5395 		uint64_t asize = vdev_psize_to_asize_txg(vd, psize, txg);
5396 		ASSERT0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
5397 		uint64_t max_asize = vdev_psize_to_asize_txg(vd, max_psize,
5398 		    txg);
5399 		ASSERT0(P2PHASE(max_asize, 1ULL << vd->vdev_ashift));
5400 		uint64_t offset = metaslab_group_alloc(mg, zal, asize,
5401 		    max_asize, txg, dva, d, allocator, try_hard,
5402 		    &asize);
5403 
5404 		if (offset != -1ULL) {
5405 			if (actual_psize)
5406 				*actual_psize = vdev_asize_to_psize_txg(vd,
5407 				    asize, txg);
5408 			metaslab_class_rotate(mg, allocator, psize, B_TRUE);
5409 
5410 			DVA_SET_VDEV(&dva[d], vd->vdev_id);
5411 			DVA_SET_OFFSET(&dva[d], offset);
5412 			DVA_SET_GANG(&dva[d],
5413 			    ((flags & METASLAB_GANG_HEADER) ? 1 : 0));
5414 			DVA_SET_ASIZE(&dva[d], asize);
5415 			return (0);
5416 		}
5417 next:
5418 		metaslab_class_rotate(mg, allocator, psize, B_FALSE);
5419 	} while ((mg = mg->mg_next) != rotor);
5420 
5421 	/*
5422 	 * If we haven't tried hard, perhaps do so now.
5423 	 */
5424 	if (!try_hard && (zfs_metaslab_try_hard_before_gang ||
5425 	    GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 ||
5426 	    psize <= spa->spa_min_alloc)) {
5427 		METASLABSTAT_BUMP(metaslabstat_try_hard);
5428 		try_hard = B_TRUE;
5429 		goto top;
5430 	}
5431 
5432 	memset(&dva[d], 0, sizeof (dva_t));
5433 
5434 	metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
5435 	return (SET_ERROR(ENOSPC));
5436 }
5437 
5438 /*
5439  * Allocate a block for the specified i/o.
5440  */
5441 int
5442 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
5443     dva_t *dva, int d, const dva_t *hintdva, uint64_t txg, int flags,
5444     zio_alloc_list_t *zal, int allocator)
5445 {
5446 	return (metaslab_alloc_dva_range(spa, mc, psize, psize, dva, d, hintdva,
5447 	    txg, flags, zal, allocator, NULL));
5448 }
5449 
5450 void
5451 metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
5452     boolean_t checkpoint)
5453 {
5454 	metaslab_t *msp;
5455 	spa_t *spa = vd->vdev_spa;
5456 	int m = offset >> vd->vdev_ms_shift;
5457 
5458 	ASSERT(vdev_is_concrete(vd));
5459 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5460 	VERIFY3U(m, <, vd->vdev_ms_count);
5461 
5462 	msp = vd->vdev_ms[m];
5463 
5464 	VERIFY(!msp->ms_condensing);
5465 	VERIFY3U(offset, >=, msp->ms_start);
5466 	VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
5467 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5468 	VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
5469 
5470 	metaslab_check_free_impl(vd, offset, asize);
5471 
5472 	mutex_enter(&msp->ms_lock);
5473 	if (zfs_range_tree_is_empty(msp->ms_freeing) &&
5474 	    zfs_range_tree_is_empty(msp->ms_checkpointing)) {
5475 		vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
5476 	}
5477 
5478 	if (checkpoint) {
5479 		ASSERT(spa_has_checkpoint(spa));
5480 		zfs_range_tree_add(msp->ms_checkpointing, offset, asize);
5481 	} else {
5482 		zfs_range_tree_add(msp->ms_freeing, offset, asize);
5483 	}
5484 	mutex_exit(&msp->ms_lock);
5485 }
5486 
5487 void
5488 metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5489     uint64_t size, void *arg)
5490 {
5491 	(void) inner_offset;
5492 	boolean_t *checkpoint = arg;
5493 
5494 	ASSERT3P(checkpoint, !=, NULL);
5495 
5496 	if (vd->vdev_ops->vdev_op_remap != NULL)
5497 		vdev_indirect_mark_obsolete(vd, offset, size);
5498 	else
5499 		metaslab_free_impl(vd, offset, size, *checkpoint);
5500 }
5501 
5502 static void
5503 metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
5504     boolean_t checkpoint)
5505 {
5506 	spa_t *spa = vd->vdev_spa;
5507 
5508 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5509 
5510 	if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
5511 		return;
5512 
5513 	if (spa->spa_vdev_removal != NULL &&
5514 	    spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
5515 	    vdev_is_concrete(vd)) {
5516 		/*
5517 		 * Note: we check if the vdev is concrete because when
5518 		 * we complete the removal, we first change the vdev to be
5519 		 * an indirect vdev (in open context), and then (in syncing
5520 		 * context) clear spa_vdev_removal.
5521 		 */
5522 		free_from_removing_vdev(vd, offset, size);
5523 	} else if (vd->vdev_ops->vdev_op_remap != NULL) {
5524 		vdev_indirect_mark_obsolete(vd, offset, size);
5525 		vd->vdev_ops->vdev_op_remap(vd, offset, size,
5526 		    metaslab_free_impl_cb, &checkpoint);
5527 	} else {
5528 		metaslab_free_concrete(vd, offset, size, checkpoint);
5529 	}
5530 }
5531 
5532 typedef struct remap_blkptr_cb_arg {
5533 	blkptr_t *rbca_bp;
5534 	spa_remap_cb_t rbca_cb;
5535 	vdev_t *rbca_remap_vd;
5536 	uint64_t rbca_remap_offset;
5537 	void *rbca_cb_arg;
5538 } remap_blkptr_cb_arg_t;
5539 
5540 static void
5541 remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5542     uint64_t size, void *arg)
5543 {
5544 	remap_blkptr_cb_arg_t *rbca = arg;
5545 	blkptr_t *bp = rbca->rbca_bp;
5546 
5547 	/* We can not remap split blocks. */
5548 	if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
5549 		return;
5550 	ASSERT0(inner_offset);
5551 
5552 	if (rbca->rbca_cb != NULL) {
5553 		/*
5554 		 * At this point we know that we are not handling split
5555 		 * blocks and we invoke the callback on the previous
5556 		 * vdev which must be indirect.
5557 		 */
5558 		ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
5559 
5560 		rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
5561 		    rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
5562 
5563 		/* set up remap_blkptr_cb_arg for the next call */
5564 		rbca->rbca_remap_vd = vd;
5565 		rbca->rbca_remap_offset = offset;
5566 	}
5567 
5568 	/*
5569 	 * The phys birth time is that of dva[0].  This ensures that we know
5570 	 * when each dva was written, so that resilver can determine which
5571 	 * blocks need to be scrubbed (i.e. those written during the time
5572 	 * the vdev was offline).  It also ensures that the key used in
5573 	 * the ARC hash table is unique (i.e. dva[0] + phys_birth).  If
5574 	 * we didn't change the phys_birth, a lookup in the ARC for a
5575 	 * remapped BP could find the data that was previously stored at
5576 	 * this vdev + offset.
5577 	 */
5578 	vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
5579 	    DVA_GET_VDEV(&bp->blk_dva[0]));
5580 	vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
5581 	uint64_t physical_birth = vdev_indirect_births_physbirth(vib,
5582 	    DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
5583 	BP_SET_PHYSICAL_BIRTH(bp, physical_birth);
5584 
5585 	DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
5586 	DVA_SET_OFFSET(&bp->blk_dva[0], offset);
5587 }
5588 
5589 /*
5590  * If the block pointer contains any indirect DVAs, modify them to refer to
5591  * concrete DVAs.  Note that this will sometimes not be possible, leaving
5592  * the indirect DVA in place.  This happens if the indirect DVA spans multiple
5593  * segments in the mapping (i.e. it is a "split block").
5594  *
5595  * If the BP was remapped, calls the callback on the original dva (note the
5596  * callback can be called multiple times if the original indirect DVA refers
5597  * to another indirect DVA, etc).
5598  *
5599  * Returns TRUE if the BP was remapped.
5600  */
5601 boolean_t
5602 spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
5603 {
5604 	remap_blkptr_cb_arg_t rbca;
5605 
5606 	if (!zfs_remap_blkptr_enable)
5607 		return (B_FALSE);
5608 
5609 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
5610 		return (B_FALSE);
5611 
5612 	/*
5613 	 * Dedup BP's can not be remapped, because ddt_phys_select() depends
5614 	 * on DVA[0] being the same in the BP as in the DDT (dedup table).
5615 	 */
5616 	if (BP_GET_DEDUP(bp))
5617 		return (B_FALSE);
5618 
5619 	/*
5620 	 * Gang blocks can not be remapped, because
5621 	 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
5622 	 * the BP used to read the gang block header (GBH) being the same
5623 	 * as the DVA[0] that we allocated for the GBH.
5624 	 */
5625 	if (BP_IS_GANG(bp))
5626 		return (B_FALSE);
5627 
5628 	/*
5629 	 * Embedded BP's have no DVA to remap.
5630 	 */
5631 	if (BP_GET_NDVAS(bp) < 1)
5632 		return (B_FALSE);
5633 
5634 	/*
5635 	 * Cloned blocks can not be remapped since BRT depends on specific
5636 	 * vdev id and offset in the DVA[0] for its reference counting.
5637 	 */
5638 	if (!BP_IS_METADATA(bp) && brt_maybe_exists(spa, bp))
5639 		return (B_FALSE);
5640 
5641 	/*
5642 	 * Note: we only remap dva[0].  If we remapped other dvas, we
5643 	 * would no longer know what their phys birth txg is.
5644 	 */
5645 	dva_t *dva = &bp->blk_dva[0];
5646 
5647 	uint64_t offset = DVA_GET_OFFSET(dva);
5648 	uint64_t size = DVA_GET_ASIZE(dva);
5649 	vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
5650 
5651 	if (vd->vdev_ops->vdev_op_remap == NULL)
5652 		return (B_FALSE);
5653 
5654 	rbca.rbca_bp = bp;
5655 	rbca.rbca_cb = callback;
5656 	rbca.rbca_remap_vd = vd;
5657 	rbca.rbca_remap_offset = offset;
5658 	rbca.rbca_cb_arg = arg;
5659 
5660 	/*
5661 	 * remap_blkptr_cb() will be called in order for each level of
5662 	 * indirection, until a concrete vdev is reached or a split block is
5663 	 * encountered. old_vd and old_offset are updated within the callback
5664 	 * as we go from the one indirect vdev to the next one (either concrete
5665 	 * or indirect again) in that order.
5666 	 */
5667 	vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
5668 
5669 	/* Check if the DVA wasn't remapped because it is a split block */
5670 	if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
5671 		return (B_FALSE);
5672 
5673 	return (B_TRUE);
5674 }
5675 
5676 /*
5677  * Undo the allocation of a DVA which happened in the given transaction group.
5678  */
5679 void
5680 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5681 {
5682 	metaslab_t *msp;
5683 	vdev_t *vd;
5684 	uint64_t vdev = DVA_GET_VDEV(dva);
5685 	uint64_t offset = DVA_GET_OFFSET(dva);
5686 	uint64_t size = DVA_GET_ASIZE(dva);
5687 
5688 	ASSERT(DVA_IS_VALID(dva));
5689 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5690 
5691 	if (txg > spa_freeze_txg(spa))
5692 		return;
5693 
5694 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
5695 	    (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
5696 		zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
5697 		    (u_longlong_t)vdev, (u_longlong_t)offset,
5698 		    (u_longlong_t)size);
5699 		return;
5700 	}
5701 
5702 	ASSERT(!vd->vdev_removing);
5703 	ASSERT(vdev_is_concrete(vd));
5704 	ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
5705 	ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
5706 
5707 	if (DVA_GET_GANG(dva))
5708 		size = vdev_gang_header_asize(vd);
5709 
5710 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5711 
5712 	mutex_enter(&msp->ms_lock);
5713 	zfs_range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
5714 	    offset, size);
5715 	msp->ms_allocating_total -= size;
5716 
5717 	VERIFY(!msp->ms_condensing);
5718 	VERIFY3U(offset, >=, msp->ms_start);
5719 	VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
5720 	VERIFY3U(zfs_range_tree_space(msp->ms_allocatable) + size, <=,
5721 	    msp->ms_size);
5722 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5723 	VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5724 	zfs_range_tree_add(msp->ms_allocatable, offset, size);
5725 	mutex_exit(&msp->ms_lock);
5726 }
5727 
5728 /*
5729  * Free the block represented by the given DVA.
5730  */
5731 void
5732 metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
5733 {
5734 	uint64_t vdev = DVA_GET_VDEV(dva);
5735 	uint64_t offset = DVA_GET_OFFSET(dva);
5736 	uint64_t size = DVA_GET_ASIZE(dva);
5737 	vdev_t *vd = vdev_lookup_top(spa, vdev);
5738 
5739 	ASSERT(DVA_IS_VALID(dva));
5740 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5741 
5742 	if (DVA_GET_GANG(dva)) {
5743 		size = vdev_gang_header_asize(vd);
5744 	}
5745 
5746 	metaslab_free_impl(vd, offset, size, checkpoint);
5747 }
5748 
5749 /*
5750  * Reserve some allocation slots. The reservation system must be called
5751  * before we call into the allocator. If there aren't any available slots
5752  * then the I/O will be throttled until an I/O completes and its slots are
5753  * freed up. The function returns true if it was successful in placing
5754  * the reservation.
5755  */
5756 boolean_t
5757 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
5758     boolean_t must, boolean_t *more)
5759 {
5760 	metaslab_class_allocator_t *mca = &mc->mc_allocator[zio->io_allocator];
5761 
5762 	ASSERT(mc->mc_alloc_throttle_enabled);
5763 	if (mc->mc_alloc_io_size < zio->io_size) {
5764 		mc->mc_alloc_io_size = zio->io_size;
5765 		metaslab_class_balance(mc, B_FALSE);
5766 	}
5767 	if (must || mca->mca_reserved <= mc->mc_alloc_max) {
5768 		/*
5769 		 * The potential race between compare and add is covered by the
5770 		 * allocator lock in most cases, or irrelevant due to must set.
5771 		 * But even if we assume some other non-existing scenario, the
5772 		 * worst that can happen is few more I/Os get to allocation
5773 		 * earlier, that is not a problem.
5774 		 */
5775 		int64_t delta = slots * zio->io_size;
5776 		*more = (atomic_add_64_nv(&mca->mca_reserved, delta) <=
5777 		    mc->mc_alloc_max);
5778 		zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
5779 		return (B_TRUE);
5780 	}
5781 	*more = B_FALSE;
5782 	return (B_FALSE);
5783 }
5784 
5785 boolean_t
5786 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
5787     zio_t *zio)
5788 {
5789 	metaslab_class_allocator_t *mca = &mc->mc_allocator[zio->io_allocator];
5790 
5791 	ASSERT(mc->mc_alloc_throttle_enabled);
5792 	int64_t delta = slots * zio->io_size;
5793 	return (atomic_add_64_nv(&mca->mca_reserved, -delta) <=
5794 	    mc->mc_alloc_max);
5795 }
5796 
5797 static int
5798 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
5799     uint64_t txg)
5800 {
5801 	metaslab_t *msp;
5802 	spa_t *spa = vd->vdev_spa;
5803 	int error = 0;
5804 
5805 	if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
5806 		return (SET_ERROR(ENXIO));
5807 
5808 	ASSERT3P(vd->vdev_ms, !=, NULL);
5809 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5810 
5811 	mutex_enter(&msp->ms_lock);
5812 
5813 	if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
5814 		error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
5815 		if (error == EBUSY) {
5816 			ASSERT(msp->ms_loaded);
5817 			ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
5818 			error = 0;
5819 		}
5820 	}
5821 
5822 	if (error == 0 &&
5823 	    !zfs_range_tree_contains(msp->ms_allocatable, offset, size))
5824 		error = SET_ERROR(ENOENT);
5825 
5826 	if (error || txg == 0) {	/* txg == 0 indicates dry run */
5827 		mutex_exit(&msp->ms_lock);
5828 		return (error);
5829 	}
5830 
5831 	VERIFY(!msp->ms_condensing);
5832 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5833 	VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5834 	VERIFY3U(zfs_range_tree_space(msp->ms_allocatable) - size, <=,
5835 	    msp->ms_size);
5836 	zfs_range_tree_remove(msp->ms_allocatable, offset, size);
5837 	zfs_range_tree_clear(msp->ms_trim, offset, size);
5838 
5839 	if (spa_writeable(spa)) {	/* don't dirty if we're zdb(8) */
5840 		metaslab_class_t *mc = msp->ms_group->mg_class;
5841 		multilist_sublist_t *mls =
5842 		    multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
5843 		if (!multilist_link_active(&msp->ms_class_txg_node)) {
5844 			msp->ms_selected_txg = txg;
5845 			multilist_sublist_insert_head(mls, msp);
5846 		}
5847 		multilist_sublist_unlock(mls);
5848 
5849 		if (zfs_range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
5850 			vdev_dirty(vd, VDD_METASLAB, msp, txg);
5851 		zfs_range_tree_add(msp->ms_allocating[txg & TXG_MASK],
5852 		    offset, size);
5853 		msp->ms_allocating_total += size;
5854 	}
5855 
5856 	mutex_exit(&msp->ms_lock);
5857 
5858 	return (0);
5859 }
5860 
5861 typedef struct metaslab_claim_cb_arg_t {
5862 	uint64_t	mcca_txg;
5863 	int		mcca_error;
5864 } metaslab_claim_cb_arg_t;
5865 
5866 static void
5867 metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5868     uint64_t size, void *arg)
5869 {
5870 	(void) inner_offset;
5871 	metaslab_claim_cb_arg_t *mcca_arg = arg;
5872 
5873 	if (mcca_arg->mcca_error == 0) {
5874 		mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
5875 		    size, mcca_arg->mcca_txg);
5876 	}
5877 }
5878 
5879 int
5880 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
5881 {
5882 	if (vd->vdev_ops->vdev_op_remap != NULL) {
5883 		metaslab_claim_cb_arg_t arg;
5884 
5885 		/*
5886 		 * Only zdb(8) can claim on indirect vdevs.  This is used
5887 		 * to detect leaks of mapped space (that are not accounted
5888 		 * for in the obsolete counts, spacemap, or bpobj).
5889 		 */
5890 		ASSERT(!spa_writeable(vd->vdev_spa));
5891 		arg.mcca_error = 0;
5892 		arg.mcca_txg = txg;
5893 
5894 		vd->vdev_ops->vdev_op_remap(vd, offset, size,
5895 		    metaslab_claim_impl_cb, &arg);
5896 
5897 		if (arg.mcca_error == 0) {
5898 			arg.mcca_error = metaslab_claim_concrete(vd,
5899 			    offset, size, txg);
5900 		}
5901 		return (arg.mcca_error);
5902 	} else {
5903 		return (metaslab_claim_concrete(vd, offset, size, txg));
5904 	}
5905 }
5906 
5907 /*
5908  * Intent log support: upon opening the pool after a crash, notify the SPA
5909  * of blocks that the intent log has allocated for immediate write, but
5910  * which are still considered free by the SPA because the last transaction
5911  * group didn't commit yet.
5912  */
5913 static int
5914 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5915 {
5916 	uint64_t vdev = DVA_GET_VDEV(dva);
5917 	uint64_t offset = DVA_GET_OFFSET(dva);
5918 	uint64_t size = DVA_GET_ASIZE(dva);
5919 	vdev_t *vd;
5920 
5921 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
5922 		return (SET_ERROR(ENXIO));
5923 	}
5924 
5925 	ASSERT(DVA_IS_VALID(dva));
5926 
5927 	if (DVA_GET_GANG(dva))
5928 		size = vdev_gang_header_asize(vd);
5929 
5930 	return (metaslab_claim_impl(vd, offset, size, txg));
5931 }
5932 
5933 int
5934 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
5935     int ndvas, uint64_t txg, const blkptr_t *hintbp, int flags,
5936     zio_alloc_list_t *zal, int allocator, const void *tag)
5937 {
5938 	return (metaslab_alloc_range(spa, mc, psize, psize, bp, ndvas, txg,
5939 	    hintbp, flags, zal, allocator, tag, NULL));
5940 }
5941 
5942 int
5943 metaslab_alloc_range(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
5944     uint64_t max_psize, blkptr_t *bp, int ndvas, uint64_t txg,
5945     const blkptr_t *hintbp, int flags, zio_alloc_list_t *zal, int allocator,
5946     const void *tag, uint64_t *actual_psize)
5947 {
5948 	dva_t *dva = bp->blk_dva;
5949 	const dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
5950 	int error = 0;
5951 
5952 	ASSERT0(BP_GET_LOGICAL_BIRTH(bp));
5953 	ASSERT0(BP_GET_PHYSICAL_BIRTH(bp));
5954 
5955 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5956 
5957 	if (mc->mc_allocator[allocator].mca_rotor == NULL) {
5958 		/* no vdevs in this class */
5959 		spa_config_exit(spa, SCL_ALLOC, FTAG);
5960 		return (SET_ERROR(ENOSPC));
5961 	}
5962 
5963 	ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
5964 	ASSERT(BP_GET_NDVAS(bp) == 0);
5965 	ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
5966 	ASSERT3P(zal, !=, NULL);
5967 
5968 	uint64_t cur_psize = 0;
5969 
5970 	for (int d = 0; d < ndvas; d++) {
5971 		error = metaslab_alloc_dva_range(spa, mc, psize, max_psize,
5972 		    dva, d, hintdva, txg, flags, zal, allocator,
5973 		    actual_psize ? &cur_psize : NULL);
5974 		if (error != 0) {
5975 			for (d--; d >= 0; d--) {
5976 				metaslab_unalloc_dva(spa, &dva[d], txg);
5977 				metaslab_group_alloc_decrement(spa,
5978 				    DVA_GET_VDEV(&dva[d]), allocator, flags,
5979 				    psize, tag);
5980 				memset(&dva[d], 0, sizeof (dva_t));
5981 			}
5982 			spa_config_exit(spa, SCL_ALLOC, FTAG);
5983 			return (error);
5984 		} else {
5985 			/*
5986 			 * Update the metaslab group's queue depth
5987 			 * based on the newly allocated dva.
5988 			 */
5989 			metaslab_group_alloc_increment(spa,
5990 			    DVA_GET_VDEV(&dva[d]), allocator, flags, psize,
5991 			    tag);
5992 			if (actual_psize)
5993 				max_psize = MIN(cur_psize, max_psize);
5994 		}
5995 	}
5996 	ASSERT(error == 0);
5997 	ASSERT(BP_GET_NDVAS(bp) == ndvas);
5998 	if (actual_psize)
5999 		*actual_psize = max_psize;
6000 
6001 	spa_config_exit(spa, SCL_ALLOC, FTAG);
6002 
6003 	BP_SET_BIRTH(bp, txg, 0);
6004 
6005 	return (0);
6006 }
6007 
6008 void
6009 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
6010 {
6011 	const dva_t *dva = bp->blk_dva;
6012 	int ndvas = BP_GET_NDVAS(bp);
6013 
6014 	ASSERT(!BP_IS_HOLE(bp));
6015 	ASSERT(!now || BP_GET_LOGICAL_BIRTH(bp) >= spa_syncing_txg(spa));
6016 
6017 	/*
6018 	 * If we have a checkpoint for the pool we need to make sure that
6019 	 * the blocks that we free that are part of the checkpoint won't be
6020 	 * reused until the checkpoint is discarded or we revert to it.
6021 	 *
6022 	 * The checkpoint flag is passed down the metaslab_free code path
6023 	 * and is set whenever we want to add a block to the checkpoint's
6024 	 * accounting. That is, we "checkpoint" blocks that existed at the
6025 	 * time the checkpoint was created and are therefore referenced by
6026 	 * the checkpointed uberblock.
6027 	 *
6028 	 * Note that, we don't checkpoint any blocks if the current
6029 	 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
6030 	 * normally as they will be referenced by the checkpointed uberblock.
6031 	 */
6032 	boolean_t checkpoint = B_FALSE;
6033 	if (BP_GET_LOGICAL_BIRTH(bp) <= spa->spa_checkpoint_txg &&
6034 	    spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
6035 		/*
6036 		 * At this point, if the block is part of the checkpoint
6037 		 * there is no way it was created in the current txg.
6038 		 */
6039 		ASSERT(!now);
6040 		ASSERT3U(spa_syncing_txg(spa), ==, txg);
6041 		checkpoint = B_TRUE;
6042 	}
6043 
6044 	spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
6045 
6046 	for (int d = 0; d < ndvas; d++) {
6047 		if (now) {
6048 			metaslab_unalloc_dva(spa, &dva[d], txg);
6049 		} else {
6050 			ASSERT3U(txg, ==, spa_syncing_txg(spa));
6051 			metaslab_free_dva(spa, &dva[d], checkpoint);
6052 		}
6053 	}
6054 
6055 	spa_config_exit(spa, SCL_FREE, FTAG);
6056 }
6057 
6058 int
6059 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
6060 {
6061 	const dva_t *dva = bp->blk_dva;
6062 	int ndvas = BP_GET_NDVAS(bp);
6063 	int error = 0;
6064 
6065 	ASSERT(!BP_IS_HOLE(bp));
6066 
6067 	if (txg != 0) {
6068 		/*
6069 		 * First do a dry run to make sure all DVAs are claimable,
6070 		 * so we don't have to unwind from partial failures below.
6071 		 */
6072 		if ((error = metaslab_claim(spa, bp, 0)) != 0)
6073 			return (error);
6074 	}
6075 
6076 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
6077 
6078 	for (int d = 0; d < ndvas; d++) {
6079 		error = metaslab_claim_dva(spa, &dva[d], txg);
6080 		if (error != 0)
6081 			break;
6082 	}
6083 
6084 	spa_config_exit(spa, SCL_ALLOC, FTAG);
6085 
6086 	ASSERT(error == 0 || txg == 0);
6087 
6088 	return (error);
6089 }
6090 
6091 static void
6092 metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
6093     uint64_t size, void *arg)
6094 {
6095 	(void) inner, (void) arg;
6096 
6097 	if (vd->vdev_ops == &vdev_indirect_ops)
6098 		return;
6099 
6100 	metaslab_check_free_impl(vd, offset, size);
6101 }
6102 
6103 static void
6104 metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
6105 {
6106 	metaslab_t *msp;
6107 	spa_t *spa __maybe_unused = vd->vdev_spa;
6108 
6109 	if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
6110 		return;
6111 
6112 	if (vd->vdev_ops->vdev_op_remap != NULL) {
6113 		vd->vdev_ops->vdev_op_remap(vd, offset, size,
6114 		    metaslab_check_free_impl_cb, NULL);
6115 		return;
6116 	}
6117 
6118 	ASSERT(vdev_is_concrete(vd));
6119 	ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
6120 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
6121 
6122 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
6123 
6124 	mutex_enter(&msp->ms_lock);
6125 	if (msp->ms_loaded) {
6126 		zfs_range_tree_verify_not_present(msp->ms_allocatable,
6127 		    offset, size);
6128 	}
6129 
6130 	/*
6131 	 * Check all segments that currently exist in the freeing pipeline.
6132 	 *
6133 	 * It would intuitively make sense to also check the current allocating
6134 	 * tree since metaslab_unalloc_dva() exists for extents that are
6135 	 * allocated and freed in the same sync pass within the same txg.
6136 	 * Unfortunately there are places (e.g. the ZIL) where we allocate a
6137 	 * segment but then we free part of it within the same txg
6138 	 * [see zil_sync()]. Thus, we don't call zfs_range_tree_verify() in the
6139 	 * current allocating tree.
6140 	 */
6141 	zfs_range_tree_verify_not_present(msp->ms_freeing, offset, size);
6142 	zfs_range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
6143 	zfs_range_tree_verify_not_present(msp->ms_freed, offset, size);
6144 	for (int j = 0; j < TXG_DEFER_SIZE; j++)
6145 		zfs_range_tree_verify_not_present(msp->ms_defer[j], offset,
6146 		    size);
6147 	zfs_range_tree_verify_not_present(msp->ms_trim, offset, size);
6148 	mutex_exit(&msp->ms_lock);
6149 }
6150 
6151 void
6152 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
6153 {
6154 	if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
6155 		return;
6156 
6157 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
6158 	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
6159 		uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
6160 		vdev_t *vd = vdev_lookup_top(spa, vdev);
6161 		uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
6162 		uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
6163 
6164 		if (DVA_GET_GANG(&bp->blk_dva[i]))
6165 			size = vdev_gang_header_asize(vd);
6166 
6167 		ASSERT3P(vd, !=, NULL);
6168 
6169 		metaslab_check_free_impl(vd, offset, size);
6170 	}
6171 	spa_config_exit(spa, SCL_VDEV, FTAG);
6172 }
6173 
6174 static void
6175 metaslab_group_disable_wait(metaslab_group_t *mg)
6176 {
6177 	ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
6178 	while (mg->mg_disabled_updating) {
6179 		cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
6180 	}
6181 }
6182 
6183 static void
6184 metaslab_group_disabled_increment(metaslab_group_t *mg)
6185 {
6186 	ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
6187 	ASSERT(mg->mg_disabled_updating);
6188 
6189 	while (mg->mg_ms_disabled >= max_disabled_ms) {
6190 		cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
6191 	}
6192 	mg->mg_ms_disabled++;
6193 	ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
6194 }
6195 
6196 /*
6197  * Mark the metaslab as disabled to prevent any allocations on this metaslab.
6198  * We must also track how many metaslabs are currently disabled within a
6199  * metaslab group and limit them to prevent allocation failures from
6200  * occurring because all metaslabs are disabled.
6201  */
6202 void
6203 metaslab_disable(metaslab_t *msp)
6204 {
6205 	ASSERT(!MUTEX_HELD(&msp->ms_lock));
6206 	metaslab_group_t *mg = msp->ms_group;
6207 
6208 	mutex_enter(&mg->mg_ms_disabled_lock);
6209 
6210 	/*
6211 	 * To keep an accurate count of how many threads have disabled
6212 	 * a specific metaslab group, we only allow one thread to mark
6213 	 * the metaslab group at a time. This ensures that the value of
6214 	 * ms_disabled will be accurate when we decide to mark a metaslab
6215 	 * group as disabled. To do this we force all other threads
6216 	 * to wait till the metaslab's mg_disabled_updating flag is no
6217 	 * longer set.
6218 	 */
6219 	metaslab_group_disable_wait(mg);
6220 	mg->mg_disabled_updating = B_TRUE;
6221 	if (msp->ms_disabled == 0) {
6222 		metaslab_group_disabled_increment(mg);
6223 	}
6224 	mutex_enter(&msp->ms_lock);
6225 	msp->ms_disabled++;
6226 	mutex_exit(&msp->ms_lock);
6227 
6228 	mg->mg_disabled_updating = B_FALSE;
6229 	cv_broadcast(&mg->mg_ms_disabled_cv);
6230 	mutex_exit(&mg->mg_ms_disabled_lock);
6231 }
6232 
6233 void
6234 metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload)
6235 {
6236 	metaslab_group_t *mg = msp->ms_group;
6237 	spa_t *spa = mg->mg_vd->vdev_spa;
6238 
6239 	/*
6240 	 * Wait for the outstanding IO to be synced to prevent newly
6241 	 * allocated blocks from being overwritten.  This used by
6242 	 * initialize and TRIM which are modifying unallocated space.
6243 	 */
6244 	if (sync)
6245 		txg_wait_synced(spa_get_dsl(spa), 0);
6246 
6247 	mutex_enter(&mg->mg_ms_disabled_lock);
6248 	mutex_enter(&msp->ms_lock);
6249 	if (--msp->ms_disabled == 0) {
6250 		mg->mg_ms_disabled--;
6251 		cv_broadcast(&mg->mg_ms_disabled_cv);
6252 		if (unload)
6253 			metaslab_unload(msp);
6254 	}
6255 	mutex_exit(&msp->ms_lock);
6256 	mutex_exit(&mg->mg_ms_disabled_lock);
6257 }
6258 
6259 void
6260 metaslab_set_unflushed_dirty(metaslab_t *ms, boolean_t dirty)
6261 {
6262 	ms->ms_unflushed_dirty = dirty;
6263 }
6264 
6265 static void
6266 metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
6267 {
6268 	vdev_t *vd = ms->ms_group->mg_vd;
6269 	spa_t *spa = vd->vdev_spa;
6270 	objset_t *mos = spa_meta_objset(spa);
6271 
6272 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
6273 
6274 	metaslab_unflushed_phys_t entry = {
6275 		.msp_unflushed_txg = metaslab_unflushed_txg(ms),
6276 	};
6277 	uint64_t entry_size = sizeof (entry);
6278 	uint64_t entry_offset = ms->ms_id * entry_size;
6279 
6280 	uint64_t object = 0;
6281 	int err = zap_lookup(mos, vd->vdev_top_zap,
6282 	    VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
6283 	    &object);
6284 	if (err == ENOENT) {
6285 		object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
6286 		    SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
6287 		VERIFY0(zap_add(mos, vd->vdev_top_zap,
6288 		    VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
6289 		    &object, tx));
6290 	} else {
6291 		VERIFY0(err);
6292 	}
6293 
6294 	dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
6295 	    &entry, tx);
6296 }
6297 
6298 void
6299 metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
6300 {
6301 	ms->ms_unflushed_txg = txg;
6302 	metaslab_update_ondisk_flush_data(ms, tx);
6303 }
6304 
6305 boolean_t
6306 metaslab_unflushed_dirty(metaslab_t *ms)
6307 {
6308 	return (ms->ms_unflushed_dirty);
6309 }
6310 
6311 uint64_t
6312 metaslab_unflushed_txg(metaslab_t *ms)
6313 {
6314 	return (ms->ms_unflushed_txg);
6315 }
6316 
6317 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, U64, ZMOD_RW,
6318 	"Allocation granularity (a.k.a. stripe size)");
6319 
6320 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW,
6321 	"Load all metaslabs when pool is first opened");
6322 
6323 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW,
6324 	"Prevent metaslabs from being unloaded");
6325 
6326 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW,
6327 	"Preload potential metaslabs during reassessment");
6328 
6329 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_limit, UINT, ZMOD_RW,
6330 	"Max number of metaslabs per group to preload");
6331 
6332 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW,
6333 	"Delay in txgs after metaslab was last used before unloading");
6334 
6335 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW,
6336 	"Delay in milliseconds after metaslab was last used before unloading");
6337 
6338 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW,
6339 	"Percentage of metaslab group size that should be free to make it "
6340 	"eligible for allocation");
6341 
6342 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW,
6343 	"Percentage of metaslab group size that should be considered eligible "
6344 	"for allocations unless all metaslab groups within the metaslab class "
6345 	"have also crossed this threshold");
6346 
6347 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT,
6348 	ZMOD_RW,
6349 	"Use the fragmentation metric to prefer less fragmented metaslabs");
6350 
6351 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT,
6352 	ZMOD_RW, "Fragmentation for metaslab to allow allocation");
6353 
6354 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW,
6355 	"Prefer metaslabs with lower LBAs");
6356 
6357 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW,
6358 	"Enable space-based metaslab group biasing");
6359 
6360 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, perf_bias, INT, ZMOD_RW,
6361 	"Enable performance-based metaslab group biasing");
6362 
6363 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT,
6364 	ZMOD_RW, "Enable segment-based metaslab selection");
6365 
6366 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW,
6367 	"Segment-based metaslab selection maximum buckets before switching");
6368 
6369 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, U64, ZMOD_RW,
6370 	"Blocks larger than this size are sometimes forced to be gang blocks");
6371 
6372 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging_pct, UINT, ZMOD_RW,
6373 	"Percentage of large blocks that will be forced to be gang blocks");
6374 
6375 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, UINT, ZMOD_RW,
6376 	"Max distance (bytes) to search forward before using size tree");
6377 
6378 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
6379 	"When looking in size tree, use largest segment instead of exact fit");
6380 
6381 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, U64,
6382 	ZMOD_RW, "How long to trust the cached max chunk size of a metaslab");
6383 
6384 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, UINT, ZMOD_RW,
6385 	"Percentage of memory that can be used to store metaslab range trees");
6386 
6387 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
6388 	ZMOD_RW, "Try hard to allocate before ganging");
6389 
6390 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW,
6391 	"Normally only consider this many of the best metaslabs in each vdev");
6392 
6393 ZFS_MODULE_PARAM_CALL(zfs, zfs_, active_allocator,
6394 	param_set_active_allocator, param_get_charp, ZMOD_RW,
6395 	"SPA active allocator");
6396