xref: /illumos-gate/usr/src/uts/common/fs/zfs/metaslab.c (revision 985cc36c07a787e0cb720fcf2fab565aa2a77590)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
24  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25  * Copyright (c) 2014 Integros [integros.com]
26  */
27 
28 #include <sys/zfs_context.h>
29 #include <sys/dmu.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/space_map.h>
32 #include <sys/metaslab_impl.h>
33 #include <sys/vdev_impl.h>
34 #include <sys/zio.h>
35 #include <sys/spa_impl.h>
36 #include <sys/zfeature.h>
37 
38 #define	GANG_ALLOCATION(flags) \
39 	((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
40 
41 uint64_t metaslab_aliquot = 512ULL << 10;
42 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1;	/* force gang blocks */
43 
44 /*
45  * The in-core space map representation is more compact than its on-disk form.
46  * The zfs_condense_pct determines how much more compact the in-core
47  * space map representation must be before we compact it on-disk.
48  * Values should be greater than or equal to 100.
49  */
50 int zfs_condense_pct = 200;
51 
52 /*
53  * Condensing a metaslab is not guaranteed to actually reduce the amount of
54  * space used on disk. In particular, a space map uses data in increments of
55  * MAX(1 << ashift, space_map_blksize), so a metaslab might use the
56  * same number of blocks after condensing. Since the goal of condensing is to
57  * reduce the number of IOPs required to read the space map, we only want to
58  * condense when we can be sure we will reduce the number of blocks used by the
59  * space map. Unfortunately, we cannot precisely compute whether or not this is
60  * the case in metaslab_should_condense since we are holding ms_lock. Instead,
61  * we apply the following heuristic: do not condense a spacemap unless the
62  * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
63  * blocks.
64  */
65 int zfs_metaslab_condense_block_threshold = 4;
66 
67 /*
68  * The zfs_mg_noalloc_threshold defines which metaslab groups should
69  * be eligible for allocation. The value is defined as a percentage of
70  * free space. Metaslab groups that have more free space than
71  * zfs_mg_noalloc_threshold are always eligible for allocations. Once
72  * a metaslab group's free space is less than or equal to the
73  * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
74  * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
75  * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
76  * groups are allowed to accept allocations. Gang blocks are always
77  * eligible to allocate on any metaslab group. The default value of 0 means
78  * no metaslab group will be excluded based on this criterion.
79  */
80 int zfs_mg_noalloc_threshold = 0;
81 
82 /*
83  * Metaslab groups are considered eligible for allocations if their
84  * fragmenation metric (measured as a percentage) is less than or equal to
85  * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
86  * then it will be skipped unless all metaslab groups within the metaslab
87  * class have also crossed this threshold.
88  */
89 int zfs_mg_fragmentation_threshold = 85;
90 
91 /*
92  * Allow metaslabs to keep their active state as long as their fragmentation
93  * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
94  * active metaslab that exceeds this threshold will no longer keep its active
95  * status allowing better metaslabs to be selected.
96  */
97 int zfs_metaslab_fragmentation_threshold = 70;
98 
99 /*
100  * When set will load all metaslabs when pool is first opened.
101  */
102 int metaslab_debug_load = 0;
103 
104 /*
105  * When set will prevent metaslabs from being unloaded.
106  */
107 int metaslab_debug_unload = 0;
108 
109 /*
110  * Minimum size which forces the dynamic allocator to change
111  * it's allocation strategy.  Once the space map cannot satisfy
112  * an allocation of this size then it switches to using more
113  * aggressive strategy (i.e search by size rather than offset).
114  */
115 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
116 
117 /*
118  * The minimum free space, in percent, which must be available
119  * in a space map to continue allocations in a first-fit fashion.
120  * Once the space map's free space drops below this level we dynamically
121  * switch to using best-fit allocations.
122  */
123 int metaslab_df_free_pct = 4;
124 
125 /*
126  * A metaslab is considered "free" if it contains a contiguous
127  * segment which is greater than metaslab_min_alloc_size.
128  */
129 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
130 
131 /*
132  * Percentage of all cpus that can be used by the metaslab taskq.
133  */
134 int metaslab_load_pct = 50;
135 
136 /*
137  * Determines how many txgs a metaslab may remain loaded without having any
138  * allocations from it. As long as a metaslab continues to be used we will
139  * keep it loaded.
140  */
141 int metaslab_unload_delay = TXG_SIZE * 2;
142 
143 /*
144  * Max number of metaslabs per group to preload.
145  */
146 int metaslab_preload_limit = SPA_DVAS_PER_BP;
147 
148 /*
149  * Enable/disable preloading of metaslab.
150  */
151 boolean_t metaslab_preload_enabled = B_TRUE;
152 
153 /*
154  * Enable/disable fragmentation weighting on metaslabs.
155  */
156 boolean_t metaslab_fragmentation_factor_enabled = B_TRUE;
157 
158 /*
159  * Enable/disable lba weighting (i.e. outer tracks are given preference).
160  */
161 boolean_t metaslab_lba_weighting_enabled = B_TRUE;
162 
163 /*
164  * Enable/disable metaslab group biasing.
165  */
166 boolean_t metaslab_bias_enabled = B_TRUE;
167 
168 /*
169  * Enable/disable segment-based metaslab selection.
170  */
171 boolean_t zfs_metaslab_segment_weight_enabled = B_TRUE;
172 
173 /*
174  * When using segment-based metaslab selection, we will continue
175  * allocating from the active metaslab until we have exhausted
176  * zfs_metaslab_switch_threshold of its buckets.
177  */
178 int zfs_metaslab_switch_threshold = 2;
179 
180 /*
181  * Internal switch to enable/disable the metaslab allocation tracing
182  * facility.
183  */
184 boolean_t metaslab_trace_enabled = B_TRUE;
185 
186 /*
187  * Maximum entries that the metaslab allocation tracing facility will keep
188  * in a given list when running in non-debug mode. We limit the number
189  * of entries in non-debug mode to prevent us from using up too much memory.
190  * The limit should be sufficiently large that we don't expect any allocation
191  * to every exceed this value. In debug mode, the system will panic if this
192  * limit is ever reached allowing for further investigation.
193  */
194 uint64_t metaslab_trace_max_entries = 5000;
195 
196 static uint64_t metaslab_weight(metaslab_t *);
197 static void metaslab_set_fragmentation(metaslab_t *);
198 
199 kmem_cache_t *metaslab_alloc_trace_cache;
200 
201 /*
202  * ==========================================================================
203  * Metaslab classes
204  * ==========================================================================
205  */
206 metaslab_class_t *
207 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
208 {
209 	metaslab_class_t *mc;
210 
211 	mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
212 
213 	mc->mc_spa = spa;
214 	mc->mc_rotor = NULL;
215 	mc->mc_ops = ops;
216 	mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
217 	refcount_create_tracked(&mc->mc_alloc_slots);
218 
219 	return (mc);
220 }
221 
222 void
223 metaslab_class_destroy(metaslab_class_t *mc)
224 {
225 	ASSERT(mc->mc_rotor == NULL);
226 	ASSERT(mc->mc_alloc == 0);
227 	ASSERT(mc->mc_deferred == 0);
228 	ASSERT(mc->mc_space == 0);
229 	ASSERT(mc->mc_dspace == 0);
230 
231 	refcount_destroy(&mc->mc_alloc_slots);
232 	mutex_destroy(&mc->mc_lock);
233 	kmem_free(mc, sizeof (metaslab_class_t));
234 }
235 
236 int
237 metaslab_class_validate(metaslab_class_t *mc)
238 {
239 	metaslab_group_t *mg;
240 	vdev_t *vd;
241 
242 	/*
243 	 * Must hold one of the spa_config locks.
244 	 */
245 	ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
246 	    spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
247 
248 	if ((mg = mc->mc_rotor) == NULL)
249 		return (0);
250 
251 	do {
252 		vd = mg->mg_vd;
253 		ASSERT(vd->vdev_mg != NULL);
254 		ASSERT3P(vd->vdev_top, ==, vd);
255 		ASSERT3P(mg->mg_class, ==, mc);
256 		ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
257 	} while ((mg = mg->mg_next) != mc->mc_rotor);
258 
259 	return (0);
260 }
261 
262 void
263 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
264     int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
265 {
266 	atomic_add_64(&mc->mc_alloc, alloc_delta);
267 	atomic_add_64(&mc->mc_deferred, defer_delta);
268 	atomic_add_64(&mc->mc_space, space_delta);
269 	atomic_add_64(&mc->mc_dspace, dspace_delta);
270 }
271 
272 uint64_t
273 metaslab_class_get_alloc(metaslab_class_t *mc)
274 {
275 	return (mc->mc_alloc);
276 }
277 
278 uint64_t
279 metaslab_class_get_deferred(metaslab_class_t *mc)
280 {
281 	return (mc->mc_deferred);
282 }
283 
284 uint64_t
285 metaslab_class_get_space(metaslab_class_t *mc)
286 {
287 	return (mc->mc_space);
288 }
289 
290 uint64_t
291 metaslab_class_get_dspace(metaslab_class_t *mc)
292 {
293 	return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
294 }
295 
296 void
297 metaslab_class_histogram_verify(metaslab_class_t *mc)
298 {
299 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
300 	uint64_t *mc_hist;
301 	int i;
302 
303 	if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
304 		return;
305 
306 	mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
307 	    KM_SLEEP);
308 
309 	for (int c = 0; c < rvd->vdev_children; c++) {
310 		vdev_t *tvd = rvd->vdev_child[c];
311 		metaslab_group_t *mg = tvd->vdev_mg;
312 
313 		/*
314 		 * Skip any holes, uninitialized top-levels, or
315 		 * vdevs that are not in this metalab class.
316 		 */
317 		if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
318 		    mg->mg_class != mc) {
319 			continue;
320 		}
321 
322 		for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
323 			mc_hist[i] += mg->mg_histogram[i];
324 	}
325 
326 	for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
327 		VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
328 
329 	kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
330 }
331 
332 /*
333  * Calculate the metaslab class's fragmentation metric. The metric
334  * is weighted based on the space contribution of each metaslab group.
335  * The return value will be a number between 0 and 100 (inclusive), or
336  * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
337  * zfs_frag_table for more information about the metric.
338  */
339 uint64_t
340 metaslab_class_fragmentation(metaslab_class_t *mc)
341 {
342 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
343 	uint64_t fragmentation = 0;
344 
345 	spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
346 
347 	for (int c = 0; c < rvd->vdev_children; c++) {
348 		vdev_t *tvd = rvd->vdev_child[c];
349 		metaslab_group_t *mg = tvd->vdev_mg;
350 
351 		/*
352 		 * Skip any holes, uninitialized top-levels, or
353 		 * vdevs that are not in this metalab class.
354 		 */
355 		if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
356 		    mg->mg_class != mc) {
357 			continue;
358 		}
359 
360 		/*
361 		 * If a metaslab group does not contain a fragmentation
362 		 * metric then just bail out.
363 		 */
364 		if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
365 			spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
366 			return (ZFS_FRAG_INVALID);
367 		}
368 
369 		/*
370 		 * Determine how much this metaslab_group is contributing
371 		 * to the overall pool fragmentation metric.
372 		 */
373 		fragmentation += mg->mg_fragmentation *
374 		    metaslab_group_get_space(mg);
375 	}
376 	fragmentation /= metaslab_class_get_space(mc);
377 
378 	ASSERT3U(fragmentation, <=, 100);
379 	spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
380 	return (fragmentation);
381 }
382 
383 /*
384  * Calculate the amount of expandable space that is available in
385  * this metaslab class. If a device is expanded then its expandable
386  * space will be the amount of allocatable space that is currently not
387  * part of this metaslab class.
388  */
389 uint64_t
390 metaslab_class_expandable_space(metaslab_class_t *mc)
391 {
392 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
393 	uint64_t space = 0;
394 
395 	spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
396 	for (int c = 0; c < rvd->vdev_children; c++) {
397 		vdev_t *tvd = rvd->vdev_child[c];
398 		metaslab_group_t *mg = tvd->vdev_mg;
399 
400 		if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
401 		    mg->mg_class != mc) {
402 			continue;
403 		}
404 
405 		/*
406 		 * Calculate if we have enough space to add additional
407 		 * metaslabs. We report the expandable space in terms
408 		 * of the metaslab size since that's the unit of expansion.
409 		 */
410 		space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
411 		    1ULL << tvd->vdev_ms_shift);
412 	}
413 	spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
414 	return (space);
415 }
416 
417 static int
418 metaslab_compare(const void *x1, const void *x2)
419 {
420 	const metaslab_t *m1 = x1;
421 	const metaslab_t *m2 = x2;
422 
423 	if (m1->ms_weight < m2->ms_weight)
424 		return (1);
425 	if (m1->ms_weight > m2->ms_weight)
426 		return (-1);
427 
428 	/*
429 	 * If the weights are identical, use the offset to force uniqueness.
430 	 */
431 	if (m1->ms_start < m2->ms_start)
432 		return (-1);
433 	if (m1->ms_start > m2->ms_start)
434 		return (1);
435 
436 	ASSERT3P(m1, ==, m2);
437 
438 	return (0);
439 }
440 
441 /*
442  * Verify that the space accounting on disk matches the in-core range_trees.
443  */
444 void
445 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
446 {
447 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
448 	uint64_t allocated = 0;
449 	uint64_t sm_free_space, msp_free_space;
450 
451 	ASSERT(MUTEX_HELD(&msp->ms_lock));
452 
453 	if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
454 		return;
455 
456 	/*
457 	 * We can only verify the metaslab space when we're called
458 	 * from syncing context with a loaded metaslab that has an allocated
459 	 * space map. Calling this in non-syncing context does not
460 	 * provide a consistent view of the metaslab since we're performing
461 	 * allocations in the future.
462 	 */
463 	if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
464 	    !msp->ms_loaded)
465 		return;
466 
467 	sm_free_space = msp->ms_size - space_map_allocated(msp->ms_sm) -
468 	    space_map_alloc_delta(msp->ms_sm);
469 
470 	/*
471 	 * Account for future allocations since we would have already
472 	 * deducted that space from the ms_freetree.
473 	 */
474 	for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
475 		allocated +=
476 		    range_tree_space(msp->ms_alloctree[(txg + t) & TXG_MASK]);
477 	}
478 
479 	msp_free_space = range_tree_space(msp->ms_tree) + allocated +
480 	    msp->ms_deferspace + range_tree_space(msp->ms_freedtree);
481 
482 	VERIFY3U(sm_free_space, ==, msp_free_space);
483 }
484 
485 /*
486  * ==========================================================================
487  * Metaslab groups
488  * ==========================================================================
489  */
490 /*
491  * Update the allocatable flag and the metaslab group's capacity.
492  * The allocatable flag is set to true if the capacity is below
493  * the zfs_mg_noalloc_threshold or has a fragmentation value that is
494  * greater than zfs_mg_fragmentation_threshold. If a metaslab group
495  * transitions from allocatable to non-allocatable or vice versa then the
496  * metaslab group's class is updated to reflect the transition.
497  */
498 static void
499 metaslab_group_alloc_update(metaslab_group_t *mg)
500 {
501 	vdev_t *vd = mg->mg_vd;
502 	metaslab_class_t *mc = mg->mg_class;
503 	vdev_stat_t *vs = &vd->vdev_stat;
504 	boolean_t was_allocatable;
505 	boolean_t was_initialized;
506 
507 	ASSERT(vd == vd->vdev_top);
508 
509 	mutex_enter(&mg->mg_lock);
510 	was_allocatable = mg->mg_allocatable;
511 	was_initialized = mg->mg_initialized;
512 
513 	mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
514 	    (vs->vs_space + 1);
515 
516 	mutex_enter(&mc->mc_lock);
517 
518 	/*
519 	 * If the metaslab group was just added then it won't
520 	 * have any space until we finish syncing out this txg.
521 	 * At that point we will consider it initialized and available
522 	 * for allocations.  We also don't consider non-activated
523 	 * metaslab groups (e.g. vdevs that are in the middle of being removed)
524 	 * to be initialized, because they can't be used for allocation.
525 	 */
526 	mg->mg_initialized = metaslab_group_initialized(mg);
527 	if (!was_initialized && mg->mg_initialized) {
528 		mc->mc_groups++;
529 	} else if (was_initialized && !mg->mg_initialized) {
530 		ASSERT3U(mc->mc_groups, >, 0);
531 		mc->mc_groups--;
532 	}
533 	if (mg->mg_initialized)
534 		mg->mg_no_free_space = B_FALSE;
535 
536 	/*
537 	 * A metaslab group is considered allocatable if it has plenty
538 	 * of free space or is not heavily fragmented. We only take
539 	 * fragmentation into account if the metaslab group has a valid
540 	 * fragmentation metric (i.e. a value between 0 and 100).
541 	 */
542 	mg->mg_allocatable = (mg->mg_activation_count > 0 &&
543 	    mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
544 	    (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
545 	    mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
546 
547 	/*
548 	 * The mc_alloc_groups maintains a count of the number of
549 	 * groups in this metaslab class that are still above the
550 	 * zfs_mg_noalloc_threshold. This is used by the allocating
551 	 * threads to determine if they should avoid allocations to
552 	 * a given group. The allocator will avoid allocations to a group
553 	 * if that group has reached or is below the zfs_mg_noalloc_threshold
554 	 * and there are still other groups that are above the threshold.
555 	 * When a group transitions from allocatable to non-allocatable or
556 	 * vice versa we update the metaslab class to reflect that change.
557 	 * When the mc_alloc_groups value drops to 0 that means that all
558 	 * groups have reached the zfs_mg_noalloc_threshold making all groups
559 	 * eligible for allocations. This effectively means that all devices
560 	 * are balanced again.
561 	 */
562 	if (was_allocatable && !mg->mg_allocatable)
563 		mc->mc_alloc_groups--;
564 	else if (!was_allocatable && mg->mg_allocatable)
565 		mc->mc_alloc_groups++;
566 	mutex_exit(&mc->mc_lock);
567 
568 	mutex_exit(&mg->mg_lock);
569 }
570 
571 metaslab_group_t *
572 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
573 {
574 	metaslab_group_t *mg;
575 
576 	mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
577 	mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
578 	avl_create(&mg->mg_metaslab_tree, metaslab_compare,
579 	    sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
580 	mg->mg_vd = vd;
581 	mg->mg_class = mc;
582 	mg->mg_activation_count = 0;
583 	mg->mg_initialized = B_FALSE;
584 	mg->mg_no_free_space = B_TRUE;
585 	refcount_create_tracked(&mg->mg_alloc_queue_depth);
586 
587 	mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
588 	    minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
589 
590 	return (mg);
591 }
592 
593 void
594 metaslab_group_destroy(metaslab_group_t *mg)
595 {
596 	ASSERT(mg->mg_prev == NULL);
597 	ASSERT(mg->mg_next == NULL);
598 	/*
599 	 * We may have gone below zero with the activation count
600 	 * either because we never activated in the first place or
601 	 * because we're done, and possibly removing the vdev.
602 	 */
603 	ASSERT(mg->mg_activation_count <= 0);
604 
605 	taskq_destroy(mg->mg_taskq);
606 	avl_destroy(&mg->mg_metaslab_tree);
607 	mutex_destroy(&mg->mg_lock);
608 	refcount_destroy(&mg->mg_alloc_queue_depth);
609 	kmem_free(mg, sizeof (metaslab_group_t));
610 }
611 
612 void
613 metaslab_group_activate(metaslab_group_t *mg)
614 {
615 	metaslab_class_t *mc = mg->mg_class;
616 	metaslab_group_t *mgprev, *mgnext;
617 
618 	ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
619 
620 	ASSERT(mc->mc_rotor != mg);
621 	ASSERT(mg->mg_prev == NULL);
622 	ASSERT(mg->mg_next == NULL);
623 	ASSERT(mg->mg_activation_count <= 0);
624 
625 	if (++mg->mg_activation_count <= 0)
626 		return;
627 
628 	mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
629 	metaslab_group_alloc_update(mg);
630 
631 	if ((mgprev = mc->mc_rotor) == NULL) {
632 		mg->mg_prev = mg;
633 		mg->mg_next = mg;
634 	} else {
635 		mgnext = mgprev->mg_next;
636 		mg->mg_prev = mgprev;
637 		mg->mg_next = mgnext;
638 		mgprev->mg_next = mg;
639 		mgnext->mg_prev = mg;
640 	}
641 	mc->mc_rotor = mg;
642 }
643 
644 void
645 metaslab_group_passivate(metaslab_group_t *mg)
646 {
647 	metaslab_class_t *mc = mg->mg_class;
648 	metaslab_group_t *mgprev, *mgnext;
649 
650 	ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
651 
652 	if (--mg->mg_activation_count != 0) {
653 		ASSERT(mc->mc_rotor != mg);
654 		ASSERT(mg->mg_prev == NULL);
655 		ASSERT(mg->mg_next == NULL);
656 		ASSERT(mg->mg_activation_count < 0);
657 		return;
658 	}
659 
660 	taskq_wait(mg->mg_taskq);
661 	metaslab_group_alloc_update(mg);
662 
663 	mgprev = mg->mg_prev;
664 	mgnext = mg->mg_next;
665 
666 	if (mg == mgnext) {
667 		mc->mc_rotor = NULL;
668 	} else {
669 		mc->mc_rotor = mgnext;
670 		mgprev->mg_next = mgnext;
671 		mgnext->mg_prev = mgprev;
672 	}
673 
674 	mg->mg_prev = NULL;
675 	mg->mg_next = NULL;
676 }
677 
678 boolean_t
679 metaslab_group_initialized(metaslab_group_t *mg)
680 {
681 	vdev_t *vd = mg->mg_vd;
682 	vdev_stat_t *vs = &vd->vdev_stat;
683 
684 	return (vs->vs_space != 0 && mg->mg_activation_count > 0);
685 }
686 
687 uint64_t
688 metaslab_group_get_space(metaslab_group_t *mg)
689 {
690 	return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
691 }
692 
693 void
694 metaslab_group_histogram_verify(metaslab_group_t *mg)
695 {
696 	uint64_t *mg_hist;
697 	vdev_t *vd = mg->mg_vd;
698 	uint64_t ashift = vd->vdev_ashift;
699 	int i;
700 
701 	if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
702 		return;
703 
704 	mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
705 	    KM_SLEEP);
706 
707 	ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
708 	    SPACE_MAP_HISTOGRAM_SIZE + ashift);
709 
710 	for (int m = 0; m < vd->vdev_ms_count; m++) {
711 		metaslab_t *msp = vd->vdev_ms[m];
712 
713 		if (msp->ms_sm == NULL)
714 			continue;
715 
716 		for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
717 			mg_hist[i + ashift] +=
718 			    msp->ms_sm->sm_phys->smp_histogram[i];
719 	}
720 
721 	for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
722 		VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
723 
724 	kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
725 }
726 
727 static void
728 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
729 {
730 	metaslab_class_t *mc = mg->mg_class;
731 	uint64_t ashift = mg->mg_vd->vdev_ashift;
732 
733 	ASSERT(MUTEX_HELD(&msp->ms_lock));
734 	if (msp->ms_sm == NULL)
735 		return;
736 
737 	mutex_enter(&mg->mg_lock);
738 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
739 		mg->mg_histogram[i + ashift] +=
740 		    msp->ms_sm->sm_phys->smp_histogram[i];
741 		mc->mc_histogram[i + ashift] +=
742 		    msp->ms_sm->sm_phys->smp_histogram[i];
743 	}
744 	mutex_exit(&mg->mg_lock);
745 }
746 
747 void
748 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
749 {
750 	metaslab_class_t *mc = mg->mg_class;
751 	uint64_t ashift = mg->mg_vd->vdev_ashift;
752 
753 	ASSERT(MUTEX_HELD(&msp->ms_lock));
754 	if (msp->ms_sm == NULL)
755 		return;
756 
757 	mutex_enter(&mg->mg_lock);
758 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
759 		ASSERT3U(mg->mg_histogram[i + ashift], >=,
760 		    msp->ms_sm->sm_phys->smp_histogram[i]);
761 		ASSERT3U(mc->mc_histogram[i + ashift], >=,
762 		    msp->ms_sm->sm_phys->smp_histogram[i]);
763 
764 		mg->mg_histogram[i + ashift] -=
765 		    msp->ms_sm->sm_phys->smp_histogram[i];
766 		mc->mc_histogram[i + ashift] -=
767 		    msp->ms_sm->sm_phys->smp_histogram[i];
768 	}
769 	mutex_exit(&mg->mg_lock);
770 }
771 
772 static void
773 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
774 {
775 	ASSERT(msp->ms_group == NULL);
776 	mutex_enter(&mg->mg_lock);
777 	msp->ms_group = mg;
778 	msp->ms_weight = 0;
779 	avl_add(&mg->mg_metaslab_tree, msp);
780 	mutex_exit(&mg->mg_lock);
781 
782 	mutex_enter(&msp->ms_lock);
783 	metaslab_group_histogram_add(mg, msp);
784 	mutex_exit(&msp->ms_lock);
785 }
786 
787 static void
788 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
789 {
790 	mutex_enter(&msp->ms_lock);
791 	metaslab_group_histogram_remove(mg, msp);
792 	mutex_exit(&msp->ms_lock);
793 
794 	mutex_enter(&mg->mg_lock);
795 	ASSERT(msp->ms_group == mg);
796 	avl_remove(&mg->mg_metaslab_tree, msp);
797 	msp->ms_group = NULL;
798 	mutex_exit(&mg->mg_lock);
799 }
800 
801 static void
802 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
803 {
804 	/*
805 	 * Although in principle the weight can be any value, in
806 	 * practice we do not use values in the range [1, 511].
807 	 */
808 	ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
809 	ASSERT(MUTEX_HELD(&msp->ms_lock));
810 
811 	mutex_enter(&mg->mg_lock);
812 	ASSERT(msp->ms_group == mg);
813 	avl_remove(&mg->mg_metaslab_tree, msp);
814 	msp->ms_weight = weight;
815 	avl_add(&mg->mg_metaslab_tree, msp);
816 	mutex_exit(&mg->mg_lock);
817 }
818 
819 /*
820  * Calculate the fragmentation for a given metaslab group. We can use
821  * a simple average here since all metaslabs within the group must have
822  * the same size. The return value will be a value between 0 and 100
823  * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
824  * group have a fragmentation metric.
825  */
826 uint64_t
827 metaslab_group_fragmentation(metaslab_group_t *mg)
828 {
829 	vdev_t *vd = mg->mg_vd;
830 	uint64_t fragmentation = 0;
831 	uint64_t valid_ms = 0;
832 
833 	for (int m = 0; m < vd->vdev_ms_count; m++) {
834 		metaslab_t *msp = vd->vdev_ms[m];
835 
836 		if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
837 			continue;
838 
839 		valid_ms++;
840 		fragmentation += msp->ms_fragmentation;
841 	}
842 
843 	if (valid_ms <= vd->vdev_ms_count / 2)
844 		return (ZFS_FRAG_INVALID);
845 
846 	fragmentation /= valid_ms;
847 	ASSERT3U(fragmentation, <=, 100);
848 	return (fragmentation);
849 }
850 
851 /*
852  * Determine if a given metaslab group should skip allocations. A metaslab
853  * group should avoid allocations if its free capacity is less than the
854  * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
855  * zfs_mg_fragmentation_threshold and there is at least one metaslab group
856  * that can still handle allocations. If the allocation throttle is enabled
857  * then we skip allocations to devices that have reached their maximum
858  * allocation queue depth unless the selected metaslab group is the only
859  * eligible group remaining.
860  */
861 static boolean_t
862 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
863     uint64_t psize)
864 {
865 	spa_t *spa = mg->mg_vd->vdev_spa;
866 	metaslab_class_t *mc = mg->mg_class;
867 
868 	/*
869 	 * We can only consider skipping this metaslab group if it's
870 	 * in the normal metaslab class and there are other metaslab
871 	 * groups to select from. Otherwise, we always consider it eligible
872 	 * for allocations.
873 	 */
874 	if (mc != spa_normal_class(spa) || mc->mc_groups <= 1)
875 		return (B_TRUE);
876 
877 	/*
878 	 * If the metaslab group's mg_allocatable flag is set (see comments
879 	 * in metaslab_group_alloc_update() for more information) and
880 	 * the allocation throttle is disabled then allow allocations to this
881 	 * device. However, if the allocation throttle is enabled then
882 	 * check if we have reached our allocation limit (mg_alloc_queue_depth)
883 	 * to determine if we should allow allocations to this metaslab group.
884 	 * If all metaslab groups are no longer considered allocatable
885 	 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
886 	 * gang block size then we allow allocations on this metaslab group
887 	 * regardless of the mg_allocatable or throttle settings.
888 	 */
889 	if (mg->mg_allocatable) {
890 		metaslab_group_t *mgp;
891 		int64_t qdepth;
892 		uint64_t qmax = mg->mg_max_alloc_queue_depth;
893 
894 		if (!mc->mc_alloc_throttle_enabled)
895 			return (B_TRUE);
896 
897 		/*
898 		 * If this metaslab group does not have any free space, then
899 		 * there is no point in looking further.
900 		 */
901 		if (mg->mg_no_free_space)
902 			return (B_FALSE);
903 
904 		qdepth = refcount_count(&mg->mg_alloc_queue_depth);
905 
906 		/*
907 		 * If this metaslab group is below its qmax or it's
908 		 * the only allocatable metasable group, then attempt
909 		 * to allocate from it.
910 		 */
911 		if (qdepth < qmax || mc->mc_alloc_groups == 1)
912 			return (B_TRUE);
913 		ASSERT3U(mc->mc_alloc_groups, >, 1);
914 
915 		/*
916 		 * Since this metaslab group is at or over its qmax, we
917 		 * need to determine if there are metaslab groups after this
918 		 * one that might be able to handle this allocation. This is
919 		 * racy since we can't hold the locks for all metaslab
920 		 * groups at the same time when we make this check.
921 		 */
922 		for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
923 			qmax = mgp->mg_max_alloc_queue_depth;
924 
925 			qdepth = refcount_count(&mgp->mg_alloc_queue_depth);
926 
927 			/*
928 			 * If there is another metaslab group that
929 			 * might be able to handle the allocation, then
930 			 * we return false so that we skip this group.
931 			 */
932 			if (qdepth < qmax && !mgp->mg_no_free_space)
933 				return (B_FALSE);
934 		}
935 
936 		/*
937 		 * We didn't find another group to handle the allocation
938 		 * so we can't skip this metaslab group even though
939 		 * we are at or over our qmax.
940 		 */
941 		return (B_TRUE);
942 
943 	} else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
944 		return (B_TRUE);
945 	}
946 	return (B_FALSE);
947 }
948 
949 /*
950  * ==========================================================================
951  * Range tree callbacks
952  * ==========================================================================
953  */
954 
955 /*
956  * Comparison function for the private size-ordered tree. Tree is sorted
957  * by size, larger sizes at the end of the tree.
958  */
959 static int
960 metaslab_rangesize_compare(const void *x1, const void *x2)
961 {
962 	const range_seg_t *r1 = x1;
963 	const range_seg_t *r2 = x2;
964 	uint64_t rs_size1 = r1->rs_end - r1->rs_start;
965 	uint64_t rs_size2 = r2->rs_end - r2->rs_start;
966 
967 	if (rs_size1 < rs_size2)
968 		return (-1);
969 	if (rs_size1 > rs_size2)
970 		return (1);
971 
972 	if (r1->rs_start < r2->rs_start)
973 		return (-1);
974 
975 	if (r1->rs_start > r2->rs_start)
976 		return (1);
977 
978 	return (0);
979 }
980 
981 /*
982  * Create any block allocator specific components. The current allocators
983  * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
984  */
985 static void
986 metaslab_rt_create(range_tree_t *rt, void *arg)
987 {
988 	metaslab_t *msp = arg;
989 
990 	ASSERT3P(rt->rt_arg, ==, msp);
991 	ASSERT(msp->ms_tree == NULL);
992 
993 	avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
994 	    sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
995 }
996 
997 /*
998  * Destroy the block allocator specific components.
999  */
1000 static void
1001 metaslab_rt_destroy(range_tree_t *rt, void *arg)
1002 {
1003 	metaslab_t *msp = arg;
1004 
1005 	ASSERT3P(rt->rt_arg, ==, msp);
1006 	ASSERT3P(msp->ms_tree, ==, rt);
1007 	ASSERT0(avl_numnodes(&msp->ms_size_tree));
1008 
1009 	avl_destroy(&msp->ms_size_tree);
1010 }
1011 
1012 static void
1013 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
1014 {
1015 	metaslab_t *msp = arg;
1016 
1017 	ASSERT3P(rt->rt_arg, ==, msp);
1018 	ASSERT3P(msp->ms_tree, ==, rt);
1019 	VERIFY(!msp->ms_condensing);
1020 	avl_add(&msp->ms_size_tree, rs);
1021 }
1022 
1023 static void
1024 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
1025 {
1026 	metaslab_t *msp = arg;
1027 
1028 	ASSERT3P(rt->rt_arg, ==, msp);
1029 	ASSERT3P(msp->ms_tree, ==, rt);
1030 	VERIFY(!msp->ms_condensing);
1031 	avl_remove(&msp->ms_size_tree, rs);
1032 }
1033 
1034 static void
1035 metaslab_rt_vacate(range_tree_t *rt, void *arg)
1036 {
1037 	metaslab_t *msp = arg;
1038 
1039 	ASSERT3P(rt->rt_arg, ==, msp);
1040 	ASSERT3P(msp->ms_tree, ==, rt);
1041 
1042 	/*
1043 	 * Normally one would walk the tree freeing nodes along the way.
1044 	 * Since the nodes are shared with the range trees we can avoid
1045 	 * walking all nodes and just reinitialize the avl tree. The nodes
1046 	 * will be freed by the range tree, so we don't want to free them here.
1047 	 */
1048 	avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
1049 	    sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
1050 }
1051 
1052 static range_tree_ops_t metaslab_rt_ops = {
1053 	metaslab_rt_create,
1054 	metaslab_rt_destroy,
1055 	metaslab_rt_add,
1056 	metaslab_rt_remove,
1057 	metaslab_rt_vacate
1058 };
1059 
1060 /*
1061  * ==========================================================================
1062  * Common allocator routines
1063  * ==========================================================================
1064  */
1065 
1066 /*
1067  * Return the maximum contiguous segment within the metaslab.
1068  */
1069 uint64_t
1070 metaslab_block_maxsize(metaslab_t *msp)
1071 {
1072 	avl_tree_t *t = &msp->ms_size_tree;
1073 	range_seg_t *rs;
1074 
1075 	if (t == NULL || (rs = avl_last(t)) == NULL)
1076 		return (0ULL);
1077 
1078 	return (rs->rs_end - rs->rs_start);
1079 }
1080 
1081 static range_seg_t *
1082 metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size)
1083 {
1084 	range_seg_t *rs, rsearch;
1085 	avl_index_t where;
1086 
1087 	rsearch.rs_start = start;
1088 	rsearch.rs_end = start + size;
1089 
1090 	rs = avl_find(t, &rsearch, &where);
1091 	if (rs == NULL) {
1092 		rs = avl_nearest(t, where, AVL_AFTER);
1093 	}
1094 
1095 	return (rs);
1096 }
1097 
1098 /*
1099  * This is a helper function that can be used by the allocator to find
1100  * a suitable block to allocate. This will search the specified AVL
1101  * tree looking for a block that matches the specified criteria.
1102  */
1103 static uint64_t
1104 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
1105     uint64_t align)
1106 {
1107 	range_seg_t *rs = metaslab_block_find(t, *cursor, size);
1108 
1109 	while (rs != NULL) {
1110 		uint64_t offset = P2ROUNDUP(rs->rs_start, align);
1111 
1112 		if (offset + size <= rs->rs_end) {
1113 			*cursor = offset + size;
1114 			return (offset);
1115 		}
1116 		rs = AVL_NEXT(t, rs);
1117 	}
1118 
1119 	/*
1120 	 * If we know we've searched the whole map (*cursor == 0), give up.
1121 	 * Otherwise, reset the cursor to the beginning and try again.
1122 	 */
1123 	if (*cursor == 0)
1124 		return (-1ULL);
1125 
1126 	*cursor = 0;
1127 	return (metaslab_block_picker(t, cursor, size, align));
1128 }
1129 
1130 /*
1131  * ==========================================================================
1132  * The first-fit block allocator
1133  * ==========================================================================
1134  */
1135 static uint64_t
1136 metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
1137 {
1138 	/*
1139 	 * Find the largest power of 2 block size that evenly divides the
1140 	 * requested size. This is used to try to allocate blocks with similar
1141 	 * alignment from the same area of the metaslab (i.e. same cursor
1142 	 * bucket) but it does not guarantee that other allocations sizes
1143 	 * may exist in the same region.
1144 	 */
1145 	uint64_t align = size & -size;
1146 	uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1147 	avl_tree_t *t = &msp->ms_tree->rt_root;
1148 
1149 	return (metaslab_block_picker(t, cursor, size, align));
1150 }
1151 
1152 static metaslab_ops_t metaslab_ff_ops = {
1153 	metaslab_ff_alloc
1154 };
1155 
1156 /*
1157  * ==========================================================================
1158  * Dynamic block allocator -
1159  * Uses the first fit allocation scheme until space get low and then
1160  * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1161  * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1162  * ==========================================================================
1163  */
1164 static uint64_t
1165 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1166 {
1167 	/*
1168 	 * Find the largest power of 2 block size that evenly divides the
1169 	 * requested size. This is used to try to allocate blocks with similar
1170 	 * alignment from the same area of the metaslab (i.e. same cursor
1171 	 * bucket) but it does not guarantee that other allocations sizes
1172 	 * may exist in the same region.
1173 	 */
1174 	uint64_t align = size & -size;
1175 	uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1176 	range_tree_t *rt = msp->ms_tree;
1177 	avl_tree_t *t = &rt->rt_root;
1178 	uint64_t max_size = metaslab_block_maxsize(msp);
1179 	int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1180 
1181 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1182 	ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1183 
1184 	if (max_size < size)
1185 		return (-1ULL);
1186 
1187 	/*
1188 	 * If we're running low on space switch to using the size
1189 	 * sorted AVL tree (best-fit).
1190 	 */
1191 	if (max_size < metaslab_df_alloc_threshold ||
1192 	    free_pct < metaslab_df_free_pct) {
1193 		t = &msp->ms_size_tree;
1194 		*cursor = 0;
1195 	}
1196 
1197 	return (metaslab_block_picker(t, cursor, size, 1ULL));
1198 }
1199 
1200 static metaslab_ops_t metaslab_df_ops = {
1201 	metaslab_df_alloc
1202 };
1203 
1204 /*
1205  * ==========================================================================
1206  * Cursor fit block allocator -
1207  * Select the largest region in the metaslab, set the cursor to the beginning
1208  * of the range and the cursor_end to the end of the range. As allocations
1209  * are made advance the cursor. Continue allocating from the cursor until
1210  * the range is exhausted and then find a new range.
1211  * ==========================================================================
1212  */
1213 static uint64_t
1214 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1215 {
1216 	range_tree_t *rt = msp->ms_tree;
1217 	avl_tree_t *t = &msp->ms_size_tree;
1218 	uint64_t *cursor = &msp->ms_lbas[0];
1219 	uint64_t *cursor_end = &msp->ms_lbas[1];
1220 	uint64_t offset = 0;
1221 
1222 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1223 	ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
1224 
1225 	ASSERT3U(*cursor_end, >=, *cursor);
1226 
1227 	if ((*cursor + size) > *cursor_end) {
1228 		range_seg_t *rs;
1229 
1230 		rs = avl_last(&msp->ms_size_tree);
1231 		if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1232 			return (-1ULL);
1233 
1234 		*cursor = rs->rs_start;
1235 		*cursor_end = rs->rs_end;
1236 	}
1237 
1238 	offset = *cursor;
1239 	*cursor += size;
1240 
1241 	return (offset);
1242 }
1243 
1244 static metaslab_ops_t metaslab_cf_ops = {
1245 	metaslab_cf_alloc
1246 };
1247 
1248 /*
1249  * ==========================================================================
1250  * New dynamic fit allocator -
1251  * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1252  * contiguous blocks. If no region is found then just use the largest segment
1253  * that remains.
1254  * ==========================================================================
1255  */
1256 
1257 /*
1258  * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1259  * to request from the allocator.
1260  */
1261 uint64_t metaslab_ndf_clump_shift = 4;
1262 
1263 static uint64_t
1264 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1265 {
1266 	avl_tree_t *t = &msp->ms_tree->rt_root;
1267 	avl_index_t where;
1268 	range_seg_t *rs, rsearch;
1269 	uint64_t hbit = highbit64(size);
1270 	uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1271 	uint64_t max_size = metaslab_block_maxsize(msp);
1272 
1273 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1274 	ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1275 
1276 	if (max_size < size)
1277 		return (-1ULL);
1278 
1279 	rsearch.rs_start = *cursor;
1280 	rsearch.rs_end = *cursor + size;
1281 
1282 	rs = avl_find(t, &rsearch, &where);
1283 	if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
1284 		t = &msp->ms_size_tree;
1285 
1286 		rsearch.rs_start = 0;
1287 		rsearch.rs_end = MIN(max_size,
1288 		    1ULL << (hbit + metaslab_ndf_clump_shift));
1289 		rs = avl_find(t, &rsearch, &where);
1290 		if (rs == NULL)
1291 			rs = avl_nearest(t, where, AVL_AFTER);
1292 		ASSERT(rs != NULL);
1293 	}
1294 
1295 	if ((rs->rs_end - rs->rs_start) >= size) {
1296 		*cursor = rs->rs_start + size;
1297 		return (rs->rs_start);
1298 	}
1299 	return (-1ULL);
1300 }
1301 
1302 static metaslab_ops_t metaslab_ndf_ops = {
1303 	metaslab_ndf_alloc
1304 };
1305 
1306 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1307 
1308 /*
1309  * ==========================================================================
1310  * Metaslabs
1311  * ==========================================================================
1312  */
1313 
1314 /*
1315  * Wait for any in-progress metaslab loads to complete.
1316  */
1317 void
1318 metaslab_load_wait(metaslab_t *msp)
1319 {
1320 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1321 
1322 	while (msp->ms_loading) {
1323 		ASSERT(!msp->ms_loaded);
1324 		cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1325 	}
1326 }
1327 
1328 int
1329 metaslab_load(metaslab_t *msp)
1330 {
1331 	int error = 0;
1332 	boolean_t success = B_FALSE;
1333 
1334 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1335 	ASSERT(!msp->ms_loaded);
1336 	ASSERT(!msp->ms_loading);
1337 
1338 	msp->ms_loading = B_TRUE;
1339 
1340 	/*
1341 	 * If the space map has not been allocated yet, then treat
1342 	 * all the space in the metaslab as free and add it to the
1343 	 * ms_tree.
1344 	 */
1345 	if (msp->ms_sm != NULL)
1346 		error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE);
1347 	else
1348 		range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size);
1349 
1350 	success = (error == 0);
1351 	msp->ms_loading = B_FALSE;
1352 
1353 	if (success) {
1354 		ASSERT3P(msp->ms_group, !=, NULL);
1355 		msp->ms_loaded = B_TRUE;
1356 
1357 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1358 			range_tree_walk(msp->ms_defertree[t],
1359 			    range_tree_remove, msp->ms_tree);
1360 		}
1361 		msp->ms_max_size = metaslab_block_maxsize(msp);
1362 	}
1363 	cv_broadcast(&msp->ms_load_cv);
1364 	return (error);
1365 }
1366 
1367 void
1368 metaslab_unload(metaslab_t *msp)
1369 {
1370 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1371 	range_tree_vacate(msp->ms_tree, NULL, NULL);
1372 	msp->ms_loaded = B_FALSE;
1373 	msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1374 	msp->ms_max_size = 0;
1375 }
1376 
1377 int
1378 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
1379     metaslab_t **msp)
1380 {
1381 	vdev_t *vd = mg->mg_vd;
1382 	objset_t *mos = vd->vdev_spa->spa_meta_objset;
1383 	metaslab_t *ms;
1384 	int error;
1385 
1386 	ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
1387 	mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1388 	cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
1389 	ms->ms_id = id;
1390 	ms->ms_start = id << vd->vdev_ms_shift;
1391 	ms->ms_size = 1ULL << vd->vdev_ms_shift;
1392 
1393 	/*
1394 	 * We only open space map objects that already exist. All others
1395 	 * will be opened when we finally allocate an object for it.
1396 	 */
1397 	if (object != 0) {
1398 		error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
1399 		    ms->ms_size, vd->vdev_ashift, &ms->ms_lock);
1400 
1401 		if (error != 0) {
1402 			kmem_free(ms, sizeof (metaslab_t));
1403 			return (error);
1404 		}
1405 
1406 		ASSERT(ms->ms_sm != NULL);
1407 	}
1408 
1409 	/*
1410 	 * We create the main range tree here, but we don't create the
1411 	 * other range trees until metaslab_sync_done().  This serves
1412 	 * two purposes: it allows metaslab_sync_done() to detect the
1413 	 * addition of new space; and for debugging, it ensures that we'd
1414 	 * data fault on any attempt to use this metaslab before it's ready.
1415 	 */
1416 	ms->ms_tree = range_tree_create(&metaslab_rt_ops, ms, &ms->ms_lock);
1417 	metaslab_group_add(mg, ms);
1418 
1419 	metaslab_set_fragmentation(ms);
1420 
1421 	/*
1422 	 * If we're opening an existing pool (txg == 0) or creating
1423 	 * a new one (txg == TXG_INITIAL), all space is available now.
1424 	 * If we're adding space to an existing pool, the new space
1425 	 * does not become available until after this txg has synced.
1426 	 * The metaslab's weight will also be initialized when we sync
1427 	 * out this txg. This ensures that we don't attempt to allocate
1428 	 * from it before we have initialized it completely.
1429 	 */
1430 	if (txg <= TXG_INITIAL)
1431 		metaslab_sync_done(ms, 0);
1432 
1433 	/*
1434 	 * If metaslab_debug_load is set and we're initializing a metaslab
1435 	 * that has an allocated space map object then load the its space
1436 	 * map so that can verify frees.
1437 	 */
1438 	if (metaslab_debug_load && ms->ms_sm != NULL) {
1439 		mutex_enter(&ms->ms_lock);
1440 		VERIFY0(metaslab_load(ms));
1441 		mutex_exit(&ms->ms_lock);
1442 	}
1443 
1444 	if (txg != 0) {
1445 		vdev_dirty(vd, 0, NULL, txg);
1446 		vdev_dirty(vd, VDD_METASLAB, ms, txg);
1447 	}
1448 
1449 	*msp = ms;
1450 
1451 	return (0);
1452 }
1453 
1454 void
1455 metaslab_fini(metaslab_t *msp)
1456 {
1457 	metaslab_group_t *mg = msp->ms_group;
1458 
1459 	metaslab_group_remove(mg, msp);
1460 
1461 	mutex_enter(&msp->ms_lock);
1462 	VERIFY(msp->ms_group == NULL);
1463 	vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1464 	    0, -msp->ms_size);
1465 	space_map_close(msp->ms_sm);
1466 
1467 	metaslab_unload(msp);
1468 	range_tree_destroy(msp->ms_tree);
1469 	range_tree_destroy(msp->ms_freeingtree);
1470 	range_tree_destroy(msp->ms_freedtree);
1471 
1472 	for (int t = 0; t < TXG_SIZE; t++) {
1473 		range_tree_destroy(msp->ms_alloctree[t]);
1474 	}
1475 
1476 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1477 		range_tree_destroy(msp->ms_defertree[t]);
1478 	}
1479 
1480 	ASSERT0(msp->ms_deferspace);
1481 
1482 	mutex_exit(&msp->ms_lock);
1483 	cv_destroy(&msp->ms_load_cv);
1484 	mutex_destroy(&msp->ms_lock);
1485 
1486 	kmem_free(msp, sizeof (metaslab_t));
1487 }
1488 
1489 #define	FRAGMENTATION_TABLE_SIZE	17
1490 
1491 /*
1492  * This table defines a segment size based fragmentation metric that will
1493  * allow each metaslab to derive its own fragmentation value. This is done
1494  * by calculating the space in each bucket of the spacemap histogram and
1495  * multiplying that by the fragmetation metric in this table. Doing
1496  * this for all buckets and dividing it by the total amount of free
1497  * space in this metaslab (i.e. the total free space in all buckets) gives
1498  * us the fragmentation metric. This means that a high fragmentation metric
1499  * equates to most of the free space being comprised of small segments.
1500  * Conversely, if the metric is low, then most of the free space is in
1501  * large segments. A 10% change in fragmentation equates to approximately
1502  * double the number of segments.
1503  *
1504  * This table defines 0% fragmented space using 16MB segments. Testing has
1505  * shown that segments that are greater than or equal to 16MB do not suffer
1506  * from drastic performance problems. Using this value, we derive the rest
1507  * of the table. Since the fragmentation value is never stored on disk, it
1508  * is possible to change these calculations in the future.
1509  */
1510 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
1511 	100,	/* 512B	*/
1512 	100,	/* 1K	*/
1513 	98,	/* 2K	*/
1514 	95,	/* 4K	*/
1515 	90,	/* 8K	*/
1516 	80,	/* 16K	*/
1517 	70,	/* 32K	*/
1518 	60,	/* 64K	*/
1519 	50,	/* 128K	*/
1520 	40,	/* 256K	*/
1521 	30,	/* 512K	*/
1522 	20,	/* 1M	*/
1523 	15,	/* 2M	*/
1524 	10,	/* 4M	*/
1525 	5,	/* 8M	*/
1526 	0	/* 16M	*/
1527 };
1528 
1529 /*
1530  * Calclate the metaslab's fragmentation metric. A return value
1531  * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1532  * not support this metric. Otherwise, the return value should be in the
1533  * range [0, 100].
1534  */
1535 static void
1536 metaslab_set_fragmentation(metaslab_t *msp)
1537 {
1538 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1539 	uint64_t fragmentation = 0;
1540 	uint64_t total = 0;
1541 	boolean_t feature_enabled = spa_feature_is_enabled(spa,
1542 	    SPA_FEATURE_SPACEMAP_HISTOGRAM);
1543 
1544 	if (!feature_enabled) {
1545 		msp->ms_fragmentation = ZFS_FRAG_INVALID;
1546 		return;
1547 	}
1548 
1549 	/*
1550 	 * A null space map means that the entire metaslab is free
1551 	 * and thus is not fragmented.
1552 	 */
1553 	if (msp->ms_sm == NULL) {
1554 		msp->ms_fragmentation = 0;
1555 		return;
1556 	}
1557 
1558 	/*
1559 	 * If this metaslab's space map has not been upgraded, flag it
1560 	 * so that we upgrade next time we encounter it.
1561 	 */
1562 	if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
1563 		uint64_t txg = spa_syncing_txg(spa);
1564 		vdev_t *vd = msp->ms_group->mg_vd;
1565 
1566 		/*
1567 		 * If we've reached the final dirty txg, then we must
1568 		 * be shutting down the pool. We don't want to dirty
1569 		 * any data past this point so skip setting the condense
1570 		 * flag. We can retry this action the next time the pool
1571 		 * is imported.
1572 		 */
1573 		if (spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
1574 			msp->ms_condense_wanted = B_TRUE;
1575 			vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1576 			spa_dbgmsg(spa, "txg %llu, requesting force condense: "
1577 			    "ms_id %llu, vdev_id %llu", txg, msp->ms_id,
1578 			    vd->vdev_id);
1579 		}
1580 		msp->ms_fragmentation = ZFS_FRAG_INVALID;
1581 		return;
1582 	}
1583 
1584 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1585 		uint64_t space = 0;
1586 		uint8_t shift = msp->ms_sm->sm_shift;
1587 
1588 		int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
1589 		    FRAGMENTATION_TABLE_SIZE - 1);
1590 
1591 		if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1592 			continue;
1593 
1594 		space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
1595 		total += space;
1596 
1597 		ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
1598 		fragmentation += space * zfs_frag_table[idx];
1599 	}
1600 
1601 	if (total > 0)
1602 		fragmentation /= total;
1603 	ASSERT3U(fragmentation, <=, 100);
1604 
1605 	msp->ms_fragmentation = fragmentation;
1606 }
1607 
1608 /*
1609  * Compute a weight -- a selection preference value -- for the given metaslab.
1610  * This is based on the amount of free space, the level of fragmentation,
1611  * the LBA range, and whether the metaslab is loaded.
1612  */
1613 static uint64_t
1614 metaslab_space_weight(metaslab_t *msp)
1615 {
1616 	metaslab_group_t *mg = msp->ms_group;
1617 	vdev_t *vd = mg->mg_vd;
1618 	uint64_t weight, space;
1619 
1620 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1621 	ASSERT(!vd->vdev_removing);
1622 
1623 	/*
1624 	 * The baseline weight is the metaslab's free space.
1625 	 */
1626 	space = msp->ms_size - space_map_allocated(msp->ms_sm);
1627 
1628 	if (metaslab_fragmentation_factor_enabled &&
1629 	    msp->ms_fragmentation != ZFS_FRAG_INVALID) {
1630 		/*
1631 		 * Use the fragmentation information to inversely scale
1632 		 * down the baseline weight. We need to ensure that we
1633 		 * don't exclude this metaslab completely when it's 100%
1634 		 * fragmented. To avoid this we reduce the fragmented value
1635 		 * by 1.
1636 		 */
1637 		space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
1638 
1639 		/*
1640 		 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1641 		 * this metaslab again. The fragmentation metric may have
1642 		 * decreased the space to something smaller than
1643 		 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1644 		 * so that we can consume any remaining space.
1645 		 */
1646 		if (space > 0 && space < SPA_MINBLOCKSIZE)
1647 			space = SPA_MINBLOCKSIZE;
1648 	}
1649 	weight = space;
1650 
1651 	/*
1652 	 * Modern disks have uniform bit density and constant angular velocity.
1653 	 * Therefore, the outer recording zones are faster (higher bandwidth)
1654 	 * than the inner zones by the ratio of outer to inner track diameter,
1655 	 * which is typically around 2:1.  We account for this by assigning
1656 	 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1657 	 * In effect, this means that we'll select the metaslab with the most
1658 	 * free bandwidth rather than simply the one with the most free space.
1659 	 */
1660 	if (metaslab_lba_weighting_enabled) {
1661 		weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1662 		ASSERT(weight >= space && weight <= 2 * space);
1663 	}
1664 
1665 	/*
1666 	 * If this metaslab is one we're actively using, adjust its
1667 	 * weight to make it preferable to any inactive metaslab so
1668 	 * we'll polish it off. If the fragmentation on this metaslab
1669 	 * has exceed our threshold, then don't mark it active.
1670 	 */
1671 	if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
1672 	    msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
1673 		weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1674 	}
1675 
1676 	WEIGHT_SET_SPACEBASED(weight);
1677 	return (weight);
1678 }
1679 
1680 /*
1681  * Return the weight of the specified metaslab, according to the segment-based
1682  * weighting algorithm. The metaslab must be loaded. This function can
1683  * be called within a sync pass since it relies only on the metaslab's
1684  * range tree which is always accurate when the metaslab is loaded.
1685  */
1686 static uint64_t
1687 metaslab_weight_from_range_tree(metaslab_t *msp)
1688 {
1689 	uint64_t weight = 0;
1690 	uint32_t segments = 0;
1691 
1692 	ASSERT(msp->ms_loaded);
1693 
1694 	for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
1695 	    i--) {
1696 		uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
1697 		int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1698 
1699 		segments <<= 1;
1700 		segments += msp->ms_tree->rt_histogram[i];
1701 
1702 		/*
1703 		 * The range tree provides more precision than the space map
1704 		 * and must be downgraded so that all values fit within the
1705 		 * space map's histogram. This allows us to compare loaded
1706 		 * vs. unloaded metaslabs to determine which metaslab is
1707 		 * considered "best".
1708 		 */
1709 		if (i > max_idx)
1710 			continue;
1711 
1712 		if (segments != 0) {
1713 			WEIGHT_SET_COUNT(weight, segments);
1714 			WEIGHT_SET_INDEX(weight, i);
1715 			WEIGHT_SET_ACTIVE(weight, 0);
1716 			break;
1717 		}
1718 	}
1719 	return (weight);
1720 }
1721 
1722 /*
1723  * Calculate the weight based on the on-disk histogram. This should only
1724  * be called after a sync pass has completely finished since the on-disk
1725  * information is updated in metaslab_sync().
1726  */
1727 static uint64_t
1728 metaslab_weight_from_spacemap(metaslab_t *msp)
1729 {
1730 	uint64_t weight = 0;
1731 
1732 	for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
1733 		if (msp->ms_sm->sm_phys->smp_histogram[i] != 0) {
1734 			WEIGHT_SET_COUNT(weight,
1735 			    msp->ms_sm->sm_phys->smp_histogram[i]);
1736 			WEIGHT_SET_INDEX(weight, i +
1737 			    msp->ms_sm->sm_shift);
1738 			WEIGHT_SET_ACTIVE(weight, 0);
1739 			break;
1740 		}
1741 	}
1742 	return (weight);
1743 }
1744 
1745 /*
1746  * Compute a segment-based weight for the specified metaslab. The weight
1747  * is determined by highest bucket in the histogram. The information
1748  * for the highest bucket is encoded into the weight value.
1749  */
1750 static uint64_t
1751 metaslab_segment_weight(metaslab_t *msp)
1752 {
1753 	metaslab_group_t *mg = msp->ms_group;
1754 	uint64_t weight = 0;
1755 	uint8_t shift = mg->mg_vd->vdev_ashift;
1756 
1757 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1758 
1759 	/*
1760 	 * The metaslab is completely free.
1761 	 */
1762 	if (space_map_allocated(msp->ms_sm) == 0) {
1763 		int idx = highbit64(msp->ms_size) - 1;
1764 		int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
1765 
1766 		if (idx < max_idx) {
1767 			WEIGHT_SET_COUNT(weight, 1ULL);
1768 			WEIGHT_SET_INDEX(weight, idx);
1769 		} else {
1770 			WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
1771 			WEIGHT_SET_INDEX(weight, max_idx);
1772 		}
1773 		WEIGHT_SET_ACTIVE(weight, 0);
1774 		ASSERT(!WEIGHT_IS_SPACEBASED(weight));
1775 
1776 		return (weight);
1777 	}
1778 
1779 	ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
1780 
1781 	/*
1782 	 * If the metaslab is fully allocated then just make the weight 0.
1783 	 */
1784 	if (space_map_allocated(msp->ms_sm) == msp->ms_size)
1785 		return (0);
1786 	/*
1787 	 * If the metaslab is already loaded, then use the range tree to
1788 	 * determine the weight. Otherwise, we rely on the space map information
1789 	 * to generate the weight.
1790 	 */
1791 	if (msp->ms_loaded) {
1792 		weight = metaslab_weight_from_range_tree(msp);
1793 	} else {
1794 		weight = metaslab_weight_from_spacemap(msp);
1795 	}
1796 
1797 	/*
1798 	 * If the metaslab was active the last time we calculated its weight
1799 	 * then keep it active. We want to consume the entire region that
1800 	 * is associated with this weight.
1801 	 */
1802 	if (msp->ms_activation_weight != 0 && weight != 0)
1803 		WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
1804 	return (weight);
1805 }
1806 
1807 /*
1808  * Determine if we should attempt to allocate from this metaslab. If the
1809  * metaslab has a maximum size then we can quickly determine if the desired
1810  * allocation size can be satisfied. Otherwise, if we're using segment-based
1811  * weighting then we can determine the maximum allocation that this metaslab
1812  * can accommodate based on the index encoded in the weight. If we're using
1813  * space-based weights then rely on the entire weight (excluding the weight
1814  * type bit).
1815  */
1816 boolean_t
1817 metaslab_should_allocate(metaslab_t *msp, uint64_t asize)
1818 {
1819 	boolean_t should_allocate;
1820 
1821 	if (msp->ms_max_size != 0)
1822 		return (msp->ms_max_size >= asize);
1823 
1824 	if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
1825 		/*
1826 		 * The metaslab segment weight indicates segments in the
1827 		 * range [2^i, 2^(i+1)), where i is the index in the weight.
1828 		 * Since the asize might be in the middle of the range, we
1829 		 * should attempt the allocation if asize < 2^(i+1).
1830 		 */
1831 		should_allocate = (asize <
1832 		    1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
1833 	} else {
1834 		should_allocate = (asize <=
1835 		    (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
1836 	}
1837 	return (should_allocate);
1838 }
1839 
1840 static uint64_t
1841 metaslab_weight(metaslab_t *msp)
1842 {
1843 	vdev_t *vd = msp->ms_group->mg_vd;
1844 	spa_t *spa = vd->vdev_spa;
1845 	uint64_t weight;
1846 
1847 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1848 
1849 	/*
1850 	 * This vdev is in the process of being removed so there is nothing
1851 	 * for us to do here.
1852 	 */
1853 	if (vd->vdev_removing) {
1854 		ASSERT0(space_map_allocated(msp->ms_sm));
1855 		ASSERT0(vd->vdev_ms_shift);
1856 		return (0);
1857 	}
1858 
1859 	metaslab_set_fragmentation(msp);
1860 
1861 	/*
1862 	 * Update the maximum size if the metaslab is loaded. This will
1863 	 * ensure that we get an accurate maximum size if newly freed space
1864 	 * has been added back into the free tree.
1865 	 */
1866 	if (msp->ms_loaded)
1867 		msp->ms_max_size = metaslab_block_maxsize(msp);
1868 
1869 	/*
1870 	 * Segment-based weighting requires space map histogram support.
1871 	 */
1872 	if (zfs_metaslab_segment_weight_enabled &&
1873 	    spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
1874 	    (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
1875 	    sizeof (space_map_phys_t))) {
1876 		weight = metaslab_segment_weight(msp);
1877 	} else {
1878 		weight = metaslab_space_weight(msp);
1879 	}
1880 	return (weight);
1881 }
1882 
1883 static int
1884 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
1885 {
1886 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1887 
1888 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1889 		metaslab_load_wait(msp);
1890 		if (!msp->ms_loaded) {
1891 			int error = metaslab_load(msp);
1892 			if (error) {
1893 				metaslab_group_sort(msp->ms_group, msp, 0);
1894 				return (error);
1895 			}
1896 		}
1897 
1898 		msp->ms_activation_weight = msp->ms_weight;
1899 		metaslab_group_sort(msp->ms_group, msp,
1900 		    msp->ms_weight | activation_weight);
1901 	}
1902 	ASSERT(msp->ms_loaded);
1903 	ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
1904 
1905 	return (0);
1906 }
1907 
1908 static void
1909 metaslab_passivate(metaslab_t *msp, uint64_t weight)
1910 {
1911 	uint64_t size = weight & ~METASLAB_WEIGHT_TYPE;
1912 
1913 	/*
1914 	 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1915 	 * this metaslab again.  In that case, it had better be empty,
1916 	 * or we would be leaving space on the table.
1917 	 */
1918 	ASSERT(size >= SPA_MINBLOCKSIZE ||
1919 	    range_tree_space(msp->ms_tree) == 0);
1920 	ASSERT0(weight & METASLAB_ACTIVE_MASK);
1921 
1922 	msp->ms_activation_weight = 0;
1923 	metaslab_group_sort(msp->ms_group, msp, weight);
1924 	ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
1925 }
1926 
1927 /*
1928  * Segment-based metaslabs are activated once and remain active until
1929  * we either fail an allocation attempt (similar to space-based metaslabs)
1930  * or have exhausted the free space in zfs_metaslab_switch_threshold
1931  * buckets since the metaslab was activated. This function checks to see
1932  * if we've exhaused the zfs_metaslab_switch_threshold buckets in the
1933  * metaslab and passivates it proactively. This will allow us to select a
1934  * metaslabs with larger contiguous region if any remaining within this
1935  * metaslab group. If we're in sync pass > 1, then we continue using this
1936  * metaslab so that we don't dirty more block and cause more sync passes.
1937  */
1938 void
1939 metaslab_segment_may_passivate(metaslab_t *msp)
1940 {
1941 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1942 
1943 	if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
1944 		return;
1945 
1946 	/*
1947 	 * Since we are in the middle of a sync pass, the most accurate
1948 	 * information that is accessible to us is the in-core range tree
1949 	 * histogram; calculate the new weight based on that information.
1950 	 */
1951 	uint64_t weight = metaslab_weight_from_range_tree(msp);
1952 	int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
1953 	int current_idx = WEIGHT_GET_INDEX(weight);
1954 
1955 	if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
1956 		metaslab_passivate(msp, weight);
1957 }
1958 
1959 static void
1960 metaslab_preload(void *arg)
1961 {
1962 	metaslab_t *msp = arg;
1963 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1964 
1965 	ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
1966 
1967 	mutex_enter(&msp->ms_lock);
1968 	metaslab_load_wait(msp);
1969 	if (!msp->ms_loaded)
1970 		(void) metaslab_load(msp);
1971 	msp->ms_selected_txg = spa_syncing_txg(spa);
1972 	mutex_exit(&msp->ms_lock);
1973 }
1974 
1975 static void
1976 metaslab_group_preload(metaslab_group_t *mg)
1977 {
1978 	spa_t *spa = mg->mg_vd->vdev_spa;
1979 	metaslab_t *msp;
1980 	avl_tree_t *t = &mg->mg_metaslab_tree;
1981 	int m = 0;
1982 
1983 	if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
1984 		taskq_wait(mg->mg_taskq);
1985 		return;
1986 	}
1987 
1988 	mutex_enter(&mg->mg_lock);
1989 	/*
1990 	 * Load the next potential metaslabs
1991 	 */
1992 	for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
1993 		/*
1994 		 * We preload only the maximum number of metaslabs specified
1995 		 * by metaslab_preload_limit. If a metaslab is being forced
1996 		 * to condense then we preload it too. This will ensure
1997 		 * that force condensing happens in the next txg.
1998 		 */
1999 		if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
2000 			continue;
2001 		}
2002 
2003 		VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
2004 		    msp, TQ_SLEEP) != NULL);
2005 	}
2006 	mutex_exit(&mg->mg_lock);
2007 }
2008 
2009 /*
2010  * Determine if the space map's on-disk footprint is past our tolerance
2011  * for inefficiency. We would like to use the following criteria to make
2012  * our decision:
2013  *
2014  * 1. The size of the space map object should not dramatically increase as a
2015  * result of writing out the free space range tree.
2016  *
2017  * 2. The minimal on-disk space map representation is zfs_condense_pct/100
2018  * times the size than the free space range tree representation
2019  * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
2020  *
2021  * 3. The on-disk size of the space map should actually decrease.
2022  *
2023  * Checking the first condition is tricky since we don't want to walk
2024  * the entire AVL tree calculating the estimated on-disk size. Instead we
2025  * use the size-ordered range tree in the metaslab and calculate the
2026  * size required to write out the largest segment in our free tree. If the
2027  * size required to represent that segment on disk is larger than the space
2028  * map object then we avoid condensing this map.
2029  *
2030  * To determine the second criterion we use a best-case estimate and assume
2031  * each segment can be represented on-disk as a single 64-bit entry. We refer
2032  * to this best-case estimate as the space map's minimal form.
2033  *
2034  * Unfortunately, we cannot compute the on-disk size of the space map in this
2035  * context because we cannot accurately compute the effects of compression, etc.
2036  * Instead, we apply the heuristic described in the block comment for
2037  * zfs_metaslab_condense_block_threshold - we only condense if the space used
2038  * is greater than a threshold number of blocks.
2039  */
2040 static boolean_t
2041 metaslab_should_condense(metaslab_t *msp)
2042 {
2043 	space_map_t *sm = msp->ms_sm;
2044 	range_seg_t *rs;
2045 	uint64_t size, entries, segsz, object_size, optimal_size, record_size;
2046 	dmu_object_info_t doi;
2047 	uint64_t vdev_blocksize = 1 << msp->ms_group->mg_vd->vdev_ashift;
2048 
2049 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2050 	ASSERT(msp->ms_loaded);
2051 
2052 	/*
2053 	 * Use the ms_size_tree range tree, which is ordered by size, to
2054 	 * obtain the largest segment in the free tree. We always condense
2055 	 * metaslabs that are empty and metaslabs for which a condense
2056 	 * request has been made.
2057 	 */
2058 	rs = avl_last(&msp->ms_size_tree);
2059 	if (rs == NULL || msp->ms_condense_wanted)
2060 		return (B_TRUE);
2061 
2062 	/*
2063 	 * Calculate the number of 64-bit entries this segment would
2064 	 * require when written to disk. If this single segment would be
2065 	 * larger on-disk than the entire current on-disk structure, then
2066 	 * clearly condensing will increase the on-disk structure size.
2067 	 */
2068 	size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
2069 	entries = size / (MIN(size, SM_RUN_MAX));
2070 	segsz = entries * sizeof (uint64_t);
2071 
2072 	optimal_size = sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root);
2073 	object_size = space_map_length(msp->ms_sm);
2074 
2075 	dmu_object_info_from_db(sm->sm_dbuf, &doi);
2076 	record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
2077 
2078 	return (segsz <= object_size &&
2079 	    object_size >= (optimal_size * zfs_condense_pct / 100) &&
2080 	    object_size > zfs_metaslab_condense_block_threshold * record_size);
2081 }
2082 
2083 /*
2084  * Condense the on-disk space map representation to its minimized form.
2085  * The minimized form consists of a small number of allocations followed by
2086  * the entries of the free range tree.
2087  */
2088 static void
2089 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
2090 {
2091 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2092 	range_tree_t *condense_tree;
2093 	space_map_t *sm = msp->ms_sm;
2094 
2095 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2096 	ASSERT3U(spa_sync_pass(spa), ==, 1);
2097 	ASSERT(msp->ms_loaded);
2098 
2099 
2100 	spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
2101 	    "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
2102 	    msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
2103 	    msp->ms_group->mg_vd->vdev_spa->spa_name,
2104 	    space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root),
2105 	    msp->ms_condense_wanted ? "TRUE" : "FALSE");
2106 
2107 	msp->ms_condense_wanted = B_FALSE;
2108 
2109 	/*
2110 	 * Create an range tree that is 100% allocated. We remove segments
2111 	 * that have been freed in this txg, any deferred frees that exist,
2112 	 * and any allocation in the future. Removing segments should be
2113 	 * a relatively inexpensive operation since we expect these trees to
2114 	 * have a small number of nodes.
2115 	 */
2116 	condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock);
2117 	range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
2118 
2119 	/*
2120 	 * Remove what's been freed in this txg from the condense_tree.
2121 	 * Since we're in sync_pass 1, we know that all the frees from
2122 	 * this txg are in the freeingtree.
2123 	 */
2124 	range_tree_walk(msp->ms_freeingtree, range_tree_remove, condense_tree);
2125 
2126 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2127 		range_tree_walk(msp->ms_defertree[t],
2128 		    range_tree_remove, condense_tree);
2129 	}
2130 
2131 	for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
2132 		range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK],
2133 		    range_tree_remove, condense_tree);
2134 	}
2135 
2136 	/*
2137 	 * We're about to drop the metaslab's lock thus allowing
2138 	 * other consumers to change it's content. Set the
2139 	 * metaslab's ms_condensing flag to ensure that
2140 	 * allocations on this metaslab do not occur while we're
2141 	 * in the middle of committing it to disk. This is only critical
2142 	 * for the ms_tree as all other range trees use per txg
2143 	 * views of their content.
2144 	 */
2145 	msp->ms_condensing = B_TRUE;
2146 
2147 	mutex_exit(&msp->ms_lock);
2148 	space_map_truncate(sm, tx);
2149 	mutex_enter(&msp->ms_lock);
2150 
2151 	/*
2152 	 * While we would ideally like to create a space map representation
2153 	 * that consists only of allocation records, doing so can be
2154 	 * prohibitively expensive because the in-core free tree can be
2155 	 * large, and therefore computationally expensive to subtract
2156 	 * from the condense_tree. Instead we sync out two trees, a cheap
2157 	 * allocation only tree followed by the in-core free tree. While not
2158 	 * optimal, this is typically close to optimal, and much cheaper to
2159 	 * compute.
2160 	 */
2161 	space_map_write(sm, condense_tree, SM_ALLOC, tx);
2162 	range_tree_vacate(condense_tree, NULL, NULL);
2163 	range_tree_destroy(condense_tree);
2164 
2165 	space_map_write(sm, msp->ms_tree, SM_FREE, tx);
2166 	msp->ms_condensing = B_FALSE;
2167 }
2168 
2169 /*
2170  * Write a metaslab to disk in the context of the specified transaction group.
2171  */
2172 void
2173 metaslab_sync(metaslab_t *msp, uint64_t txg)
2174 {
2175 	metaslab_group_t *mg = msp->ms_group;
2176 	vdev_t *vd = mg->mg_vd;
2177 	spa_t *spa = vd->vdev_spa;
2178 	objset_t *mos = spa_meta_objset(spa);
2179 	range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
2180 	dmu_tx_t *tx;
2181 	uint64_t object = space_map_object(msp->ms_sm);
2182 
2183 	ASSERT(!vd->vdev_ishole);
2184 
2185 	/*
2186 	 * This metaslab has just been added so there's no work to do now.
2187 	 */
2188 	if (msp->ms_freeingtree == NULL) {
2189 		ASSERT3P(alloctree, ==, NULL);
2190 		return;
2191 	}
2192 
2193 	ASSERT3P(alloctree, !=, NULL);
2194 	ASSERT3P(msp->ms_freeingtree, !=, NULL);
2195 	ASSERT3P(msp->ms_freedtree, !=, NULL);
2196 
2197 	/*
2198 	 * Normally, we don't want to process a metaslab if there
2199 	 * are no allocations or frees to perform. However, if the metaslab
2200 	 * is being forced to condense and it's loaded, we need to let it
2201 	 * through.
2202 	 */
2203 	if (range_tree_space(alloctree) == 0 &&
2204 	    range_tree_space(msp->ms_freeingtree) == 0 &&
2205 	    !(msp->ms_loaded && msp->ms_condense_wanted))
2206 		return;
2207 
2208 
2209 	VERIFY(txg <= spa_final_dirty_txg(spa));
2210 
2211 	/*
2212 	 * The only state that can actually be changing concurrently with
2213 	 * metaslab_sync() is the metaslab's ms_tree.  No other thread can
2214 	 * be modifying this txg's alloctree, freeingtree, freedtree, or
2215 	 * space_map_phys_t. Therefore, we only hold ms_lock to satify
2216 	 * space map ASSERTs. We drop it whenever we call into the DMU,
2217 	 * because the DMU can call down to us (e.g. via zio_free()) at
2218 	 * any time.
2219 	 */
2220 
2221 	tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
2222 
2223 	if (msp->ms_sm == NULL) {
2224 		uint64_t new_object;
2225 
2226 		new_object = space_map_alloc(mos, tx);
2227 		VERIFY3U(new_object, !=, 0);
2228 
2229 		VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
2230 		    msp->ms_start, msp->ms_size, vd->vdev_ashift,
2231 		    &msp->ms_lock));
2232 		ASSERT(msp->ms_sm != NULL);
2233 	}
2234 
2235 	mutex_enter(&msp->ms_lock);
2236 
2237 	/*
2238 	 * Note: metaslab_condense() clears the space map's histogram.
2239 	 * Therefore we must verify and remove this histogram before
2240 	 * condensing.
2241 	 */
2242 	metaslab_group_histogram_verify(mg);
2243 	metaslab_class_histogram_verify(mg->mg_class);
2244 	metaslab_group_histogram_remove(mg, msp);
2245 
2246 	if (msp->ms_loaded && spa_sync_pass(spa) == 1 &&
2247 	    metaslab_should_condense(msp)) {
2248 		metaslab_condense(msp, txg, tx);
2249 	} else {
2250 		space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
2251 		space_map_write(msp->ms_sm, msp->ms_freeingtree, SM_FREE, tx);
2252 	}
2253 
2254 	if (msp->ms_loaded) {
2255 		/*
2256 		 * When the space map is loaded, we have an accruate
2257 		 * histogram in the range tree. This gives us an opportunity
2258 		 * to bring the space map's histogram up-to-date so we clear
2259 		 * it first before updating it.
2260 		 */
2261 		space_map_histogram_clear(msp->ms_sm);
2262 		space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx);
2263 
2264 		/*
2265 		 * Since we've cleared the histogram we need to add back
2266 		 * any free space that has already been processed, plus
2267 		 * any deferred space. This allows the on-disk histogram
2268 		 * to accurately reflect all free space even if some space
2269 		 * is not yet available for allocation (i.e. deferred).
2270 		 */
2271 		space_map_histogram_add(msp->ms_sm, msp->ms_freedtree, tx);
2272 
2273 		/*
2274 		 * Add back any deferred free space that has not been
2275 		 * added back into the in-core free tree yet. This will
2276 		 * ensure that we don't end up with a space map histogram
2277 		 * that is completely empty unless the metaslab is fully
2278 		 * allocated.
2279 		 */
2280 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2281 			space_map_histogram_add(msp->ms_sm,
2282 			    msp->ms_defertree[t], tx);
2283 		}
2284 	}
2285 
2286 	/*
2287 	 * Always add the free space from this sync pass to the space
2288 	 * map histogram. We want to make sure that the on-disk histogram
2289 	 * accounts for all free space. If the space map is not loaded,
2290 	 * then we will lose some accuracy but will correct it the next
2291 	 * time we load the space map.
2292 	 */
2293 	space_map_histogram_add(msp->ms_sm, msp->ms_freeingtree, tx);
2294 
2295 	metaslab_group_histogram_add(mg, msp);
2296 	metaslab_group_histogram_verify(mg);
2297 	metaslab_class_histogram_verify(mg->mg_class);
2298 
2299 	/*
2300 	 * For sync pass 1, we avoid traversing this txg's free range tree
2301 	 * and instead will just swap the pointers for freeingtree and
2302 	 * freedtree. We can safely do this since the freed_tree is
2303 	 * guaranteed to be empty on the initial pass.
2304 	 */
2305 	if (spa_sync_pass(spa) == 1) {
2306 		range_tree_swap(&msp->ms_freeingtree, &msp->ms_freedtree);
2307 	} else {
2308 		range_tree_vacate(msp->ms_freeingtree,
2309 		    range_tree_add, msp->ms_freedtree);
2310 	}
2311 	range_tree_vacate(alloctree, NULL, NULL);
2312 
2313 	ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
2314 	ASSERT0(range_tree_space(msp->ms_alloctree[TXG_CLEAN(txg) & TXG_MASK]));
2315 	ASSERT0(range_tree_space(msp->ms_freeingtree));
2316 
2317 	mutex_exit(&msp->ms_lock);
2318 
2319 	if (object != space_map_object(msp->ms_sm)) {
2320 		object = space_map_object(msp->ms_sm);
2321 		dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
2322 		    msp->ms_id, sizeof (uint64_t), &object, tx);
2323 	}
2324 	dmu_tx_commit(tx);
2325 }
2326 
2327 /*
2328  * Called after a transaction group has completely synced to mark
2329  * all of the metaslab's free space as usable.
2330  */
2331 void
2332 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
2333 {
2334 	metaslab_group_t *mg = msp->ms_group;
2335 	vdev_t *vd = mg->mg_vd;
2336 	spa_t *spa = vd->vdev_spa;
2337 	range_tree_t **defer_tree;
2338 	int64_t alloc_delta, defer_delta;
2339 	boolean_t defer_allowed = B_TRUE;
2340 
2341 	ASSERT(!vd->vdev_ishole);
2342 
2343 	mutex_enter(&msp->ms_lock);
2344 
2345 	/*
2346 	 * If this metaslab is just becoming available, initialize its
2347 	 * range trees and add its capacity to the vdev.
2348 	 */
2349 	if (msp->ms_freedtree == NULL) {
2350 		for (int t = 0; t < TXG_SIZE; t++) {
2351 			ASSERT(msp->ms_alloctree[t] == NULL);
2352 
2353 			msp->ms_alloctree[t] = range_tree_create(NULL, msp,
2354 			    &msp->ms_lock);
2355 		}
2356 
2357 		ASSERT3P(msp->ms_freeingtree, ==, NULL);
2358 		msp->ms_freeingtree = range_tree_create(NULL, msp,
2359 		    &msp->ms_lock);
2360 
2361 		ASSERT3P(msp->ms_freedtree, ==, NULL);
2362 		msp->ms_freedtree = range_tree_create(NULL, msp,
2363 		    &msp->ms_lock);
2364 
2365 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2366 			ASSERT(msp->ms_defertree[t] == NULL);
2367 
2368 			msp->ms_defertree[t] = range_tree_create(NULL, msp,
2369 			    &msp->ms_lock);
2370 		}
2371 
2372 		vdev_space_update(vd, 0, 0, msp->ms_size);
2373 	}
2374 
2375 	defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
2376 
2377 	uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
2378 	    metaslab_class_get_alloc(spa_normal_class(spa));
2379 	if (free_space <= spa_get_slop_space(spa)) {
2380 		defer_allowed = B_FALSE;
2381 	}
2382 
2383 	defer_delta = 0;
2384 	alloc_delta = space_map_alloc_delta(msp->ms_sm);
2385 	if (defer_allowed) {
2386 		defer_delta = range_tree_space(msp->ms_freedtree) -
2387 		    range_tree_space(*defer_tree);
2388 	} else {
2389 		defer_delta -= range_tree_space(*defer_tree);
2390 	}
2391 
2392 	vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
2393 
2394 	/*
2395 	 * If there's a metaslab_load() in progress, wait for it to complete
2396 	 * so that we have a consistent view of the in-core space map.
2397 	 */
2398 	metaslab_load_wait(msp);
2399 
2400 	/*
2401 	 * Move the frees from the defer_tree back to the free
2402 	 * range tree (if it's loaded). Swap the freed_tree and the
2403 	 * defer_tree -- this is safe to do because we've just emptied out
2404 	 * the defer_tree.
2405 	 */
2406 	range_tree_vacate(*defer_tree,
2407 	    msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
2408 	if (defer_allowed) {
2409 		range_tree_swap(&msp->ms_freedtree, defer_tree);
2410 	} else {
2411 		range_tree_vacate(msp->ms_freedtree,
2412 		    msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
2413 	}
2414 
2415 	space_map_update(msp->ms_sm);
2416 
2417 	msp->ms_deferspace += defer_delta;
2418 	ASSERT3S(msp->ms_deferspace, >=, 0);
2419 	ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
2420 	if (msp->ms_deferspace != 0) {
2421 		/*
2422 		 * Keep syncing this metaslab until all deferred frees
2423 		 * are back in circulation.
2424 		 */
2425 		vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2426 	}
2427 
2428 	/*
2429 	 * Calculate the new weights before unloading any metaslabs.
2430 	 * This will give us the most accurate weighting.
2431 	 */
2432 	metaslab_group_sort(mg, msp, metaslab_weight(msp));
2433 
2434 	/*
2435 	 * If the metaslab is loaded and we've not tried to load or allocate
2436 	 * from it in 'metaslab_unload_delay' txgs, then unload it.
2437 	 */
2438 	if (msp->ms_loaded &&
2439 	    msp->ms_selected_txg + metaslab_unload_delay < txg) {
2440 		for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
2441 			VERIFY0(range_tree_space(
2442 			    msp->ms_alloctree[(txg + t) & TXG_MASK]));
2443 		}
2444 
2445 		if (!metaslab_debug_unload)
2446 			metaslab_unload(msp);
2447 	}
2448 
2449 	mutex_exit(&msp->ms_lock);
2450 }
2451 
2452 void
2453 metaslab_sync_reassess(metaslab_group_t *mg)
2454 {
2455 	metaslab_group_alloc_update(mg);
2456 	mg->mg_fragmentation = metaslab_group_fragmentation(mg);
2457 
2458 	/*
2459 	 * Preload the next potential metaslabs
2460 	 */
2461 	metaslab_group_preload(mg);
2462 }
2463 
2464 static uint64_t
2465 metaslab_distance(metaslab_t *msp, dva_t *dva)
2466 {
2467 	uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
2468 	uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
2469 	uint64_t start = msp->ms_id;
2470 
2471 	if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
2472 		return (1ULL << 63);
2473 
2474 	if (offset < start)
2475 		return ((start - offset) << ms_shift);
2476 	if (offset > start)
2477 		return ((offset - start) << ms_shift);
2478 	return (0);
2479 }
2480 
2481 /*
2482  * ==========================================================================
2483  * Metaslab allocation tracing facility
2484  * ==========================================================================
2485  */
2486 kstat_t *metaslab_trace_ksp;
2487 kstat_named_t metaslab_trace_over_limit;
2488 
2489 void
2490 metaslab_alloc_trace_init(void)
2491 {
2492 	ASSERT(metaslab_alloc_trace_cache == NULL);
2493 	metaslab_alloc_trace_cache = kmem_cache_create(
2494 	    "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
2495 	    0, NULL, NULL, NULL, NULL, NULL, 0);
2496 	metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats",
2497 	    "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL);
2498 	if (metaslab_trace_ksp != NULL) {
2499 		metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit;
2500 		kstat_named_init(&metaslab_trace_over_limit,
2501 		    "metaslab_trace_over_limit", KSTAT_DATA_UINT64);
2502 		kstat_install(metaslab_trace_ksp);
2503 	}
2504 }
2505 
2506 void
2507 metaslab_alloc_trace_fini(void)
2508 {
2509 	if (metaslab_trace_ksp != NULL) {
2510 		kstat_delete(metaslab_trace_ksp);
2511 		metaslab_trace_ksp = NULL;
2512 	}
2513 	kmem_cache_destroy(metaslab_alloc_trace_cache);
2514 	metaslab_alloc_trace_cache = NULL;
2515 }
2516 
2517 /*
2518  * Add an allocation trace element to the allocation tracing list.
2519  */
2520 static void
2521 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
2522     metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset)
2523 {
2524 	if (!metaslab_trace_enabled)
2525 		return;
2526 
2527 	/*
2528 	 * When the tracing list reaches its maximum we remove
2529 	 * the second element in the list before adding a new one.
2530 	 * By removing the second element we preserve the original
2531 	 * entry as a clue to what allocations steps have already been
2532 	 * performed.
2533 	 */
2534 	if (zal->zal_size == metaslab_trace_max_entries) {
2535 		metaslab_alloc_trace_t *mat_next;
2536 #ifdef DEBUG
2537 		panic("too many entries in allocation list");
2538 #endif
2539 		atomic_inc_64(&metaslab_trace_over_limit.value.ui64);
2540 		zal->zal_size--;
2541 		mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
2542 		list_remove(&zal->zal_list, mat_next);
2543 		kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
2544 	}
2545 
2546 	metaslab_alloc_trace_t *mat =
2547 	    kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
2548 	list_link_init(&mat->mat_list_node);
2549 	mat->mat_mg = mg;
2550 	mat->mat_msp = msp;
2551 	mat->mat_size = psize;
2552 	mat->mat_dva_id = dva_id;
2553 	mat->mat_offset = offset;
2554 	mat->mat_weight = 0;
2555 
2556 	if (msp != NULL)
2557 		mat->mat_weight = msp->ms_weight;
2558 
2559 	/*
2560 	 * The list is part of the zio so locking is not required. Only
2561 	 * a single thread will perform allocations for a given zio.
2562 	 */
2563 	list_insert_tail(&zal->zal_list, mat);
2564 	zal->zal_size++;
2565 
2566 	ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
2567 }
2568 
2569 void
2570 metaslab_trace_init(zio_alloc_list_t *zal)
2571 {
2572 	list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
2573 	    offsetof(metaslab_alloc_trace_t, mat_list_node));
2574 	zal->zal_size = 0;
2575 }
2576 
2577 void
2578 metaslab_trace_fini(zio_alloc_list_t *zal)
2579 {
2580 	metaslab_alloc_trace_t *mat;
2581 
2582 	while ((mat = list_remove_head(&zal->zal_list)) != NULL)
2583 		kmem_cache_free(metaslab_alloc_trace_cache, mat);
2584 	list_destroy(&zal->zal_list);
2585 	zal->zal_size = 0;
2586 }
2587 
2588 /*
2589  * ==========================================================================
2590  * Metaslab block operations
2591  * ==========================================================================
2592  */
2593 
2594 static void
2595 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags)
2596 {
2597 	if (!(flags & METASLAB_ASYNC_ALLOC) ||
2598 	    flags & METASLAB_DONT_THROTTLE)
2599 		return;
2600 
2601 	metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2602 	if (!mg->mg_class->mc_alloc_throttle_enabled)
2603 		return;
2604 
2605 	(void) refcount_add(&mg->mg_alloc_queue_depth, tag);
2606 }
2607 
2608 void
2609 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags)
2610 {
2611 	if (!(flags & METASLAB_ASYNC_ALLOC) ||
2612 	    flags & METASLAB_DONT_THROTTLE)
2613 		return;
2614 
2615 	metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2616 	if (!mg->mg_class->mc_alloc_throttle_enabled)
2617 		return;
2618 
2619 	(void) refcount_remove(&mg->mg_alloc_queue_depth, tag);
2620 }
2621 
2622 void
2623 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag)
2624 {
2625 #ifdef ZFS_DEBUG
2626 	const dva_t *dva = bp->blk_dva;
2627 	int ndvas = BP_GET_NDVAS(bp);
2628 
2629 	for (int d = 0; d < ndvas; d++) {
2630 		uint64_t vdev = DVA_GET_VDEV(&dva[d]);
2631 		metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
2632 		VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth, tag));
2633 	}
2634 #endif
2635 }
2636 
2637 static uint64_t
2638 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
2639 {
2640 	uint64_t start;
2641 	range_tree_t *rt = msp->ms_tree;
2642 	metaslab_class_t *mc = msp->ms_group->mg_class;
2643 
2644 	VERIFY(!msp->ms_condensing);
2645 
2646 	start = mc->mc_ops->msop_alloc(msp, size);
2647 	if (start != -1ULL) {
2648 		metaslab_group_t *mg = msp->ms_group;
2649 		vdev_t *vd = mg->mg_vd;
2650 
2651 		VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
2652 		VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2653 		VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
2654 		range_tree_remove(rt, start, size);
2655 
2656 		if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2657 			vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
2658 
2659 		range_tree_add(msp->ms_alloctree[txg & TXG_MASK], start, size);
2660 
2661 		/* Track the last successful allocation */
2662 		msp->ms_alloc_txg = txg;
2663 		metaslab_verify_space(msp, txg);
2664 	}
2665 
2666 	/*
2667 	 * Now that we've attempted the allocation we need to update the
2668 	 * metaslab's maximum block size since it may have changed.
2669 	 */
2670 	msp->ms_max_size = metaslab_block_maxsize(msp);
2671 	return (start);
2672 }
2673 
2674 static uint64_t
2675 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
2676     uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
2677 {
2678 	metaslab_t *msp = NULL;
2679 	uint64_t offset = -1ULL;
2680 	uint64_t activation_weight;
2681 	uint64_t target_distance;
2682 	int i;
2683 
2684 	activation_weight = METASLAB_WEIGHT_PRIMARY;
2685 	for (i = 0; i < d; i++) {
2686 		if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
2687 			activation_weight = METASLAB_WEIGHT_SECONDARY;
2688 			break;
2689 		}
2690 	}
2691 
2692 	metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
2693 	search->ms_weight = UINT64_MAX;
2694 	search->ms_start = 0;
2695 	for (;;) {
2696 		boolean_t was_active;
2697 		avl_tree_t *t = &mg->mg_metaslab_tree;
2698 		avl_index_t idx;
2699 
2700 		mutex_enter(&mg->mg_lock);
2701 
2702 		/*
2703 		 * Find the metaslab with the highest weight that is less
2704 		 * than what we've already tried.  In the common case, this
2705 		 * means that we will examine each metaslab at most once.
2706 		 * Note that concurrent callers could reorder metaslabs
2707 		 * by activation/passivation once we have dropped the mg_lock.
2708 		 * If a metaslab is activated by another thread, and we fail
2709 		 * to allocate from the metaslab we have selected, we may
2710 		 * not try the newly-activated metaslab, and instead activate
2711 		 * another metaslab.  This is not optimal, but generally
2712 		 * does not cause any problems (a possible exception being
2713 		 * if every metaslab is completely full except for the
2714 		 * the newly-activated metaslab which we fail to examine).
2715 		 */
2716 		msp = avl_find(t, search, &idx);
2717 		if (msp == NULL)
2718 			msp = avl_nearest(t, idx, AVL_AFTER);
2719 		for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
2720 
2721 			if (!metaslab_should_allocate(msp, asize)) {
2722 				metaslab_trace_add(zal, mg, msp, asize, d,
2723 				    TRACE_TOO_SMALL);
2724 				continue;
2725 			}
2726 
2727 			/*
2728 			 * If the selected metaslab is condensing, skip it.
2729 			 */
2730 			if (msp->ms_condensing)
2731 				continue;
2732 
2733 			was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2734 			if (activation_weight == METASLAB_WEIGHT_PRIMARY)
2735 				break;
2736 
2737 			target_distance = min_distance +
2738 			    (space_map_allocated(msp->ms_sm) != 0 ? 0 :
2739 			    min_distance >> 1);
2740 
2741 			for (i = 0; i < d; i++) {
2742 				if (metaslab_distance(msp, &dva[i]) <
2743 				    target_distance)
2744 					break;
2745 			}
2746 			if (i == d)
2747 				break;
2748 		}
2749 		mutex_exit(&mg->mg_lock);
2750 		if (msp == NULL) {
2751 			kmem_free(search, sizeof (*search));
2752 			return (-1ULL);
2753 		}
2754 		search->ms_weight = msp->ms_weight;
2755 		search->ms_start = msp->ms_start + 1;
2756 
2757 		mutex_enter(&msp->ms_lock);
2758 
2759 		/*
2760 		 * Ensure that the metaslab we have selected is still
2761 		 * capable of handling our request. It's possible that
2762 		 * another thread may have changed the weight while we
2763 		 * were blocked on the metaslab lock. We check the
2764 		 * active status first to see if we need to reselect
2765 		 * a new metaslab.
2766 		 */
2767 		if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
2768 			mutex_exit(&msp->ms_lock);
2769 			continue;
2770 		}
2771 
2772 		if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
2773 		    activation_weight == METASLAB_WEIGHT_PRIMARY) {
2774 			metaslab_passivate(msp,
2775 			    msp->ms_weight & ~METASLAB_ACTIVE_MASK);
2776 			mutex_exit(&msp->ms_lock);
2777 			continue;
2778 		}
2779 
2780 		if (metaslab_activate(msp, activation_weight) != 0) {
2781 			mutex_exit(&msp->ms_lock);
2782 			continue;
2783 		}
2784 		msp->ms_selected_txg = txg;
2785 
2786 		/*
2787 		 * Now that we have the lock, recheck to see if we should
2788 		 * continue to use this metaslab for this allocation. The
2789 		 * the metaslab is now loaded so metaslab_should_allocate() can
2790 		 * accurately determine if the allocation attempt should
2791 		 * proceed.
2792 		 */
2793 		if (!metaslab_should_allocate(msp, asize)) {
2794 			/* Passivate this metaslab and select a new one. */
2795 			metaslab_trace_add(zal, mg, msp, asize, d,
2796 			    TRACE_TOO_SMALL);
2797 			goto next;
2798 		}
2799 
2800 		/*
2801 		 * If this metaslab is currently condensing then pick again as
2802 		 * we can't manipulate this metaslab until it's committed
2803 		 * to disk.
2804 		 */
2805 		if (msp->ms_condensing) {
2806 			metaslab_trace_add(zal, mg, msp, asize, d,
2807 			    TRACE_CONDENSING);
2808 			mutex_exit(&msp->ms_lock);
2809 			continue;
2810 		}
2811 
2812 		offset = metaslab_block_alloc(msp, asize, txg);
2813 		metaslab_trace_add(zal, mg, msp, asize, d, offset);
2814 
2815 		if (offset != -1ULL) {
2816 			/* Proactively passivate the metaslab, if needed */
2817 			metaslab_segment_may_passivate(msp);
2818 			break;
2819 		}
2820 next:
2821 		ASSERT(msp->ms_loaded);
2822 
2823 		/*
2824 		 * We were unable to allocate from this metaslab so determine
2825 		 * a new weight for this metaslab. Now that we have loaded
2826 		 * the metaslab we can provide a better hint to the metaslab
2827 		 * selector.
2828 		 *
2829 		 * For space-based metaslabs, we use the maximum block size.
2830 		 * This information is only available when the metaslab
2831 		 * is loaded and is more accurate than the generic free
2832 		 * space weight that was calculated by metaslab_weight().
2833 		 * This information allows us to quickly compare the maximum
2834 		 * available allocation in the metaslab to the allocation
2835 		 * size being requested.
2836 		 *
2837 		 * For segment-based metaslabs, determine the new weight
2838 		 * based on the highest bucket in the range tree. We
2839 		 * explicitly use the loaded segment weight (i.e. the range
2840 		 * tree histogram) since it contains the space that is
2841 		 * currently available for allocation and is accurate
2842 		 * even within a sync pass.
2843 		 */
2844 		if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
2845 			uint64_t weight = metaslab_block_maxsize(msp);
2846 			WEIGHT_SET_SPACEBASED(weight);
2847 			metaslab_passivate(msp, weight);
2848 		} else {
2849 			metaslab_passivate(msp,
2850 			    metaslab_weight_from_range_tree(msp));
2851 		}
2852 
2853 		/*
2854 		 * We have just failed an allocation attempt, check
2855 		 * that metaslab_should_allocate() agrees. Otherwise,
2856 		 * we may end up in an infinite loop retrying the same
2857 		 * metaslab.
2858 		 */
2859 		ASSERT(!metaslab_should_allocate(msp, asize));
2860 		mutex_exit(&msp->ms_lock);
2861 	}
2862 	mutex_exit(&msp->ms_lock);
2863 	kmem_free(search, sizeof (*search));
2864 	return (offset);
2865 }
2866 
2867 static uint64_t
2868 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
2869     uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
2870 {
2871 	uint64_t offset;
2872 	ASSERT(mg->mg_initialized);
2873 
2874 	offset = metaslab_group_alloc_normal(mg, zal, asize, txg,
2875 	    min_distance, dva, d);
2876 
2877 	mutex_enter(&mg->mg_lock);
2878 	if (offset == -1ULL) {
2879 		mg->mg_failed_allocations++;
2880 		metaslab_trace_add(zal, mg, NULL, asize, d,
2881 		    TRACE_GROUP_FAILURE);
2882 		if (asize == SPA_GANGBLOCKSIZE) {
2883 			/*
2884 			 * This metaslab group was unable to allocate
2885 			 * the minimum gang block size so it must be out of
2886 			 * space. We must notify the allocation throttle
2887 			 * to start skipping allocation attempts to this
2888 			 * metaslab group until more space becomes available.
2889 			 * Note: this failure cannot be caused by the
2890 			 * allocation throttle since the allocation throttle
2891 			 * is only responsible for skipping devices and
2892 			 * not failing block allocations.
2893 			 */
2894 			mg->mg_no_free_space = B_TRUE;
2895 		}
2896 	}
2897 	mg->mg_allocations++;
2898 	mutex_exit(&mg->mg_lock);
2899 	return (offset);
2900 }
2901 
2902 /*
2903  * If we have to write a ditto block (i.e. more than one DVA for a given BP)
2904  * on the same vdev as an existing DVA of this BP, then try to allocate it
2905  * at least (vdev_asize / (2 ^ ditto_same_vdev_distance_shift)) away from the
2906  * existing DVAs.
2907  */
2908 int ditto_same_vdev_distance_shift = 3;
2909 
2910 /*
2911  * Allocate a block for the specified i/o.
2912  */
2913 static int
2914 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
2915     dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
2916     zio_alloc_list_t *zal)
2917 {
2918 	metaslab_group_t *mg, *rotor;
2919 	vdev_t *vd;
2920 	boolean_t try_hard = B_FALSE;
2921 
2922 	ASSERT(!DVA_IS_VALID(&dva[d]));
2923 
2924 	/*
2925 	 * For testing, make some blocks above a certain size be gang blocks.
2926 	 */
2927 	if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0) {
2928 		metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG);
2929 		return (SET_ERROR(ENOSPC));
2930 	}
2931 
2932 	/*
2933 	 * Start at the rotor and loop through all mgs until we find something.
2934 	 * Note that there's no locking on mc_rotor or mc_aliquot because
2935 	 * nothing actually breaks if we miss a few updates -- we just won't
2936 	 * allocate quite as evenly.  It all balances out over time.
2937 	 *
2938 	 * If we are doing ditto or log blocks, try to spread them across
2939 	 * consecutive vdevs.  If we're forced to reuse a vdev before we've
2940 	 * allocated all of our ditto blocks, then try and spread them out on
2941 	 * that vdev as much as possible.  If it turns out to not be possible,
2942 	 * gradually lower our standards until anything becomes acceptable.
2943 	 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
2944 	 * gives us hope of containing our fault domains to something we're
2945 	 * able to reason about.  Otherwise, any two top-level vdev failures
2946 	 * will guarantee the loss of data.  With consecutive allocation,
2947 	 * only two adjacent top-level vdev failures will result in data loss.
2948 	 *
2949 	 * If we are doing gang blocks (hintdva is non-NULL), try to keep
2950 	 * ourselves on the same vdev as our gang block header.  That
2951 	 * way, we can hope for locality in vdev_cache, plus it makes our
2952 	 * fault domains something tractable.
2953 	 */
2954 	if (hintdva) {
2955 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
2956 
2957 		/*
2958 		 * It's possible the vdev we're using as the hint no
2959 		 * longer exists (i.e. removed). Consult the rotor when
2960 		 * all else fails.
2961 		 */
2962 		if (vd != NULL) {
2963 			mg = vd->vdev_mg;
2964 
2965 			if (flags & METASLAB_HINTBP_AVOID &&
2966 			    mg->mg_next != NULL)
2967 				mg = mg->mg_next;
2968 		} else {
2969 			mg = mc->mc_rotor;
2970 		}
2971 	} else if (d != 0) {
2972 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
2973 		mg = vd->vdev_mg->mg_next;
2974 	} else {
2975 		mg = mc->mc_rotor;
2976 	}
2977 
2978 	/*
2979 	 * If the hint put us into the wrong metaslab class, or into a
2980 	 * metaslab group that has been passivated, just follow the rotor.
2981 	 */
2982 	if (mg->mg_class != mc || mg->mg_activation_count <= 0)
2983 		mg = mc->mc_rotor;
2984 
2985 	rotor = mg;
2986 top:
2987 	do {
2988 		boolean_t allocatable;
2989 
2990 		ASSERT(mg->mg_activation_count == 1);
2991 		vd = mg->mg_vd;
2992 
2993 		/*
2994 		 * Don't allocate from faulted devices.
2995 		 */
2996 		if (try_hard) {
2997 			spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
2998 			allocatable = vdev_allocatable(vd);
2999 			spa_config_exit(spa, SCL_ZIO, FTAG);
3000 		} else {
3001 			allocatable = vdev_allocatable(vd);
3002 		}
3003 
3004 		/*
3005 		 * Determine if the selected metaslab group is eligible
3006 		 * for allocations. If we're ganging then don't allow
3007 		 * this metaslab group to skip allocations since that would
3008 		 * inadvertently return ENOSPC and suspend the pool
3009 		 * even though space is still available.
3010 		 */
3011 		if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
3012 			allocatable = metaslab_group_allocatable(mg, rotor,
3013 			    psize);
3014 		}
3015 
3016 		if (!allocatable) {
3017 			metaslab_trace_add(zal, mg, NULL, psize, d,
3018 			    TRACE_NOT_ALLOCATABLE);
3019 			goto next;
3020 		}
3021 
3022 		ASSERT(mg->mg_initialized);
3023 
3024 		/*
3025 		 * Avoid writing single-copy data to a failing,
3026 		 * non-redundant vdev, unless we've already tried all
3027 		 * other vdevs.
3028 		 */
3029 		if ((vd->vdev_stat.vs_write_errors > 0 ||
3030 		    vd->vdev_state < VDEV_STATE_HEALTHY) &&
3031 		    d == 0 && !try_hard && vd->vdev_children == 0) {
3032 			metaslab_trace_add(zal, mg, NULL, psize, d,
3033 			    TRACE_VDEV_ERROR);
3034 			goto next;
3035 		}
3036 
3037 		ASSERT(mg->mg_class == mc);
3038 
3039 		/*
3040 		 * If we don't need to try hard, then require that the
3041 		 * block be 1/8th of the device away from any other DVAs
3042 		 * in this BP.  If we are trying hard, allow any offset
3043 		 * to be used (distance=0).
3044 		 */
3045 		uint64_t distance = 0;
3046 		if (!try_hard) {
3047 			distance = vd->vdev_asize >>
3048 			    ditto_same_vdev_distance_shift;
3049 			if (distance <= (1ULL << vd->vdev_ms_shift))
3050 				distance = 0;
3051 		}
3052 
3053 		uint64_t asize = vdev_psize_to_asize(vd, psize);
3054 		ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
3055 
3056 		uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
3057 		    distance, dva, d);
3058 
3059 		if (offset != -1ULL) {
3060 			/*
3061 			 * If we've just selected this metaslab group,
3062 			 * figure out whether the corresponding vdev is
3063 			 * over- or under-used relative to the pool,
3064 			 * and set an allocation bias to even it out.
3065 			 */
3066 			if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
3067 				vdev_stat_t *vs = &vd->vdev_stat;
3068 				int64_t vu, cu;
3069 
3070 				vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
3071 				cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
3072 
3073 				/*
3074 				 * Calculate how much more or less we should
3075 				 * try to allocate from this device during
3076 				 * this iteration around the rotor.
3077 				 * For example, if a device is 80% full
3078 				 * and the pool is 20% full then we should
3079 				 * reduce allocations by 60% on this device.
3080 				 *
3081 				 * mg_bias = (20 - 80) * 512K / 100 = -307K
3082 				 *
3083 				 * This reduces allocations by 307K for this
3084 				 * iteration.
3085 				 */
3086 				mg->mg_bias = ((cu - vu) *
3087 				    (int64_t)mg->mg_aliquot) / 100;
3088 			} else if (!metaslab_bias_enabled) {
3089 				mg->mg_bias = 0;
3090 			}
3091 
3092 			if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
3093 			    mg->mg_aliquot + mg->mg_bias) {
3094 				mc->mc_rotor = mg->mg_next;
3095 				mc->mc_aliquot = 0;
3096 			}
3097 
3098 			DVA_SET_VDEV(&dva[d], vd->vdev_id);
3099 			DVA_SET_OFFSET(&dva[d], offset);
3100 			DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
3101 			DVA_SET_ASIZE(&dva[d], asize);
3102 
3103 			return (0);
3104 		}
3105 next:
3106 		mc->mc_rotor = mg->mg_next;
3107 		mc->mc_aliquot = 0;
3108 	} while ((mg = mg->mg_next) != rotor);
3109 
3110 	/*
3111 	 * If we haven't tried hard, do so now.
3112 	 */
3113 	if (!try_hard) {
3114 		try_hard = B_TRUE;
3115 		goto top;
3116 	}
3117 
3118 	bzero(&dva[d], sizeof (dva_t));
3119 
3120 	metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC);
3121 	return (SET_ERROR(ENOSPC));
3122 }
3123 
3124 /*
3125  * Free the block represented by DVA in the context of the specified
3126  * transaction group.
3127  */
3128 static void
3129 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
3130 {
3131 	uint64_t vdev = DVA_GET_VDEV(dva);
3132 	uint64_t offset = DVA_GET_OFFSET(dva);
3133 	uint64_t size = DVA_GET_ASIZE(dva);
3134 	vdev_t *vd;
3135 	metaslab_t *msp;
3136 
3137 	ASSERT(DVA_IS_VALID(dva));
3138 
3139 	if (txg > spa_freeze_txg(spa))
3140 		return;
3141 
3142 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
3143 	    (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
3144 		cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
3145 		    (u_longlong_t)vdev, (u_longlong_t)offset);
3146 		ASSERT(0);
3147 		return;
3148 	}
3149 
3150 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3151 
3152 	if (DVA_GET_GANG(dva))
3153 		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3154 
3155 	mutex_enter(&msp->ms_lock);
3156 
3157 	if (now) {
3158 		range_tree_remove(msp->ms_alloctree[txg & TXG_MASK],
3159 		    offset, size);
3160 
3161 		VERIFY(!msp->ms_condensing);
3162 		VERIFY3U(offset, >=, msp->ms_start);
3163 		VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
3164 		VERIFY3U(range_tree_space(msp->ms_tree) + size, <=,
3165 		    msp->ms_size);
3166 		VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3167 		VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3168 		range_tree_add(msp->ms_tree, offset, size);
3169 		msp->ms_max_size = metaslab_block_maxsize(msp);
3170 	} else {
3171 		VERIFY3U(txg, ==, spa->spa_syncing_txg);
3172 		if (range_tree_space(msp->ms_freeingtree) == 0)
3173 			vdev_dirty(vd, VDD_METASLAB, msp, txg);
3174 		range_tree_add(msp->ms_freeingtree, offset, size);
3175 	}
3176 
3177 	mutex_exit(&msp->ms_lock);
3178 }
3179 
3180 /*
3181  * Intent log support: upon opening the pool after a crash, notify the SPA
3182  * of blocks that the intent log has allocated for immediate write, but
3183  * which are still considered free by the SPA because the last transaction
3184  * group didn't commit yet.
3185  */
3186 static int
3187 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
3188 {
3189 	uint64_t vdev = DVA_GET_VDEV(dva);
3190 	uint64_t offset = DVA_GET_OFFSET(dva);
3191 	uint64_t size = DVA_GET_ASIZE(dva);
3192 	vdev_t *vd;
3193 	metaslab_t *msp;
3194 	int error = 0;
3195 
3196 	ASSERT(DVA_IS_VALID(dva));
3197 
3198 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
3199 	    (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
3200 		return (SET_ERROR(ENXIO));
3201 
3202 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3203 
3204 	if (DVA_GET_GANG(dva))
3205 		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
3206 
3207 	mutex_enter(&msp->ms_lock);
3208 
3209 	if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
3210 		error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
3211 
3212 	if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size))
3213 		error = SET_ERROR(ENOENT);
3214 
3215 	if (error || txg == 0) {	/* txg == 0 indicates dry run */
3216 		mutex_exit(&msp->ms_lock);
3217 		return (error);
3218 	}
3219 
3220 	VERIFY(!msp->ms_condensing);
3221 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
3222 	VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
3223 	VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size);
3224 	range_tree_remove(msp->ms_tree, offset, size);
3225 
3226 	if (spa_writeable(spa)) {	/* don't dirty if we're zdb(1M) */
3227 		if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
3228 			vdev_dirty(vd, VDD_METASLAB, msp, txg);
3229 		range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size);
3230 	}
3231 
3232 	mutex_exit(&msp->ms_lock);
3233 
3234 	return (0);
3235 }
3236 
3237 /*
3238  * Reserve some allocation slots. The reservation system must be called
3239  * before we call into the allocator. If there aren't any available slots
3240  * then the I/O will be throttled until an I/O completes and its slots are
3241  * freed up. The function returns true if it was successful in placing
3242  * the reservation.
3243  */
3244 boolean_t
3245 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
3246     int flags)
3247 {
3248 	uint64_t available_slots = 0;
3249 	boolean_t slot_reserved = B_FALSE;
3250 
3251 	ASSERT(mc->mc_alloc_throttle_enabled);
3252 	mutex_enter(&mc->mc_lock);
3253 
3254 	uint64_t reserved_slots = refcount_count(&mc->mc_alloc_slots);
3255 	if (reserved_slots < mc->mc_alloc_max_slots)
3256 		available_slots = mc->mc_alloc_max_slots - reserved_slots;
3257 
3258 	if (slots <= available_slots || GANG_ALLOCATION(flags)) {
3259 		/*
3260 		 * We reserve the slots individually so that we can unreserve
3261 		 * them individually when an I/O completes.
3262 		 */
3263 		for (int d = 0; d < slots; d++) {
3264 			reserved_slots = refcount_add(&mc->mc_alloc_slots, zio);
3265 		}
3266 		zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
3267 		slot_reserved = B_TRUE;
3268 	}
3269 
3270 	mutex_exit(&mc->mc_lock);
3271 	return (slot_reserved);
3272 }
3273 
3274 void
3275 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, zio_t *zio)
3276 {
3277 	ASSERT(mc->mc_alloc_throttle_enabled);
3278 	mutex_enter(&mc->mc_lock);
3279 	for (int d = 0; d < slots; d++) {
3280 		(void) refcount_remove(&mc->mc_alloc_slots, zio);
3281 	}
3282 	mutex_exit(&mc->mc_lock);
3283 }
3284 
3285 int
3286 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
3287     int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
3288     zio_alloc_list_t *zal, zio_t *zio)
3289 {
3290 	dva_t *dva = bp->blk_dva;
3291 	dva_t *hintdva = hintbp->blk_dva;
3292 	int error = 0;
3293 
3294 	ASSERT(bp->blk_birth == 0);
3295 	ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
3296 
3297 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
3298 
3299 	if (mc->mc_rotor == NULL) {	/* no vdevs in this class */
3300 		spa_config_exit(spa, SCL_ALLOC, FTAG);
3301 		return (SET_ERROR(ENOSPC));
3302 	}
3303 
3304 	ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
3305 	ASSERT(BP_GET_NDVAS(bp) == 0);
3306 	ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
3307 	ASSERT3P(zal, !=, NULL);
3308 
3309 	for (int d = 0; d < ndvas; d++) {
3310 		error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
3311 		    txg, flags, zal);
3312 		if (error != 0) {
3313 			for (d--; d >= 0; d--) {
3314 				metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
3315 				metaslab_group_alloc_decrement(spa,
3316 				    DVA_GET_VDEV(&dva[d]), zio, flags);
3317 				bzero(&dva[d], sizeof (dva_t));
3318 			}
3319 			spa_config_exit(spa, SCL_ALLOC, FTAG);
3320 			return (error);
3321 		} else {
3322 			/*
3323 			 * Update the metaslab group's queue depth
3324 			 * based on the newly allocated dva.
3325 			 */
3326 			metaslab_group_alloc_increment(spa,
3327 			    DVA_GET_VDEV(&dva[d]), zio, flags);
3328 		}
3329 
3330 	}
3331 	ASSERT(error == 0);
3332 	ASSERT(BP_GET_NDVAS(bp) == ndvas);
3333 
3334 	spa_config_exit(spa, SCL_ALLOC, FTAG);
3335 
3336 	BP_SET_BIRTH(bp, txg, txg);
3337 
3338 	return (0);
3339 }
3340 
3341 void
3342 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
3343 {
3344 	const dva_t *dva = bp->blk_dva;
3345 	int ndvas = BP_GET_NDVAS(bp);
3346 
3347 	ASSERT(!BP_IS_HOLE(bp));
3348 	ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
3349 
3350 	spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
3351 
3352 	for (int d = 0; d < ndvas; d++)
3353 		metaslab_free_dva(spa, &dva[d], txg, now);
3354 
3355 	spa_config_exit(spa, SCL_FREE, FTAG);
3356 }
3357 
3358 int
3359 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
3360 {
3361 	const dva_t *dva = bp->blk_dva;
3362 	int ndvas = BP_GET_NDVAS(bp);
3363 	int error = 0;
3364 
3365 	ASSERT(!BP_IS_HOLE(bp));
3366 
3367 	if (txg != 0) {
3368 		/*
3369 		 * First do a dry run to make sure all DVAs are claimable,
3370 		 * so we don't have to unwind from partial failures below.
3371 		 */
3372 		if ((error = metaslab_claim(spa, bp, 0)) != 0)
3373 			return (error);
3374 	}
3375 
3376 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
3377 
3378 	for (int d = 0; d < ndvas; d++)
3379 		if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
3380 			break;
3381 
3382 	spa_config_exit(spa, SCL_ALLOC, FTAG);
3383 
3384 	ASSERT(error == 0 || txg == 0);
3385 
3386 	return (error);
3387 }
3388 
3389 void
3390 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
3391 {
3392 	if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
3393 		return;
3394 
3395 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
3396 	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
3397 		uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
3398 		vdev_t *vd = vdev_lookup_top(spa, vdev);
3399 		uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
3400 		uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
3401 		metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
3402 
3403 		if (msp->ms_loaded)
3404 			range_tree_verify(msp->ms_tree, offset, size);
3405 
3406 		range_tree_verify(msp->ms_freeingtree, offset, size);
3407 		range_tree_verify(msp->ms_freedtree, offset, size);
3408 		for (int j = 0; j < TXG_DEFER_SIZE; j++)
3409 			range_tree_verify(msp->ms_defertree[j], offset, size);
3410 	}
3411 	spa_config_exit(spa, SCL_VDEV, FTAG);
3412 }
3413