xref: /freebsd/sys/contrib/openzfs/module/zfs/metaslab.c (revision 315ee00fa9616b0a192b6834911f98bcf5316a6b)
1eda14cbcSMatt Macy /*
2eda14cbcSMatt Macy  * CDDL HEADER START
3eda14cbcSMatt Macy  *
4eda14cbcSMatt Macy  * The contents of this file are subject to the terms of the
5eda14cbcSMatt Macy  * Common Development and Distribution License (the "License").
6eda14cbcSMatt Macy  * You may not use this file except in compliance with the License.
7eda14cbcSMatt Macy  *
8eda14cbcSMatt Macy  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9271171e0SMartin Matuska  * or https://opensource.org/licenses/CDDL-1.0.
10eda14cbcSMatt Macy  * See the License for the specific language governing permissions
11eda14cbcSMatt Macy  * and limitations under the License.
12eda14cbcSMatt Macy  *
13eda14cbcSMatt Macy  * When distributing Covered Code, include this CDDL HEADER in each
14eda14cbcSMatt Macy  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15eda14cbcSMatt Macy  * If applicable, add the following below this CDDL HEADER, with the
16eda14cbcSMatt Macy  * fields enclosed by brackets "[]" replaced with your own identifying
17eda14cbcSMatt Macy  * information: Portions Copyright [yyyy] [name of copyright owner]
18eda14cbcSMatt Macy  *
19eda14cbcSMatt Macy  * CDDL HEADER END
20eda14cbcSMatt Macy  */
21eda14cbcSMatt Macy /*
22eda14cbcSMatt Macy  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23eda14cbcSMatt Macy  * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
24eda14cbcSMatt Macy  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
252c48331dSMatt Macy  * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
26eda14cbcSMatt Macy  * Copyright (c) 2017, Intel Corporation.
27eda14cbcSMatt Macy  */
28eda14cbcSMatt Macy 
29eda14cbcSMatt Macy #include <sys/zfs_context.h>
30eda14cbcSMatt Macy #include <sys/dmu.h>
31eda14cbcSMatt Macy #include <sys/dmu_tx.h>
32eda14cbcSMatt Macy #include <sys/space_map.h>
33eda14cbcSMatt Macy #include <sys/metaslab_impl.h>
34eda14cbcSMatt Macy #include <sys/vdev_impl.h>
357877fdebSMatt Macy #include <sys/vdev_draid.h>
36eda14cbcSMatt Macy #include <sys/zio.h>
37eda14cbcSMatt Macy #include <sys/spa_impl.h>
38eda14cbcSMatt Macy #include <sys/zfeature.h>
39eda14cbcSMatt Macy #include <sys/vdev_indirect_mapping.h>
40eda14cbcSMatt Macy #include <sys/zap.h>
41eda14cbcSMatt Macy #include <sys/btree.h>
42eda14cbcSMatt Macy 
43eda14cbcSMatt Macy #define	WITH_DF_BLOCK_ALLOCATOR
44eda14cbcSMatt Macy 
45eda14cbcSMatt Macy #define	GANG_ALLOCATION(flags) \
46eda14cbcSMatt Macy 	((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
47eda14cbcSMatt Macy 
48eda14cbcSMatt Macy /*
49eda14cbcSMatt Macy  * Metaslab granularity, in bytes. This is roughly similar to what would be
50eda14cbcSMatt Macy  * referred to as the "stripe size" in traditional RAID arrays. In normal
51716fd348SMartin Matuska  * operation, we will try to write this amount of data to each disk before
52716fd348SMartin Matuska  * moving on to the next top-level vdev.
53eda14cbcSMatt Macy  */
54dbd5678dSMartin Matuska static uint64_t metaslab_aliquot = 1024 * 1024;
55eda14cbcSMatt Macy 
56eda14cbcSMatt Macy /*
57eda14cbcSMatt Macy  * For testing, make some blocks above a certain size be gang blocks.
58eda14cbcSMatt Macy  */
59dbd5678dSMartin Matuska uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
60eda14cbcSMatt Macy 
61eda14cbcSMatt Macy /*
62*315ee00fSMartin Matuska  * Of blocks of size >= metaslab_force_ganging, actually gang them this often.
63*315ee00fSMartin Matuska  */
64*315ee00fSMartin Matuska uint_t metaslab_force_ganging_pct = 3;
65*315ee00fSMartin Matuska 
66*315ee00fSMartin Matuska /*
67eda14cbcSMatt Macy  * In pools where the log space map feature is not enabled we touch
68eda14cbcSMatt Macy  * multiple metaslabs (and their respective space maps) with each
69eda14cbcSMatt Macy  * transaction group. Thus, we benefit from having a small space map
70eda14cbcSMatt Macy  * block size since it allows us to issue more I/O operations scattered
71eda14cbcSMatt Macy  * around the disk. So a sane default for the space map block size
72eda14cbcSMatt Macy  * is 8~16K.
73eda14cbcSMatt Macy  */
74eda14cbcSMatt Macy int zfs_metaslab_sm_blksz_no_log = (1 << 14);
75eda14cbcSMatt Macy 
76eda14cbcSMatt Macy /*
77eda14cbcSMatt Macy  * When the log space map feature is enabled, we accumulate a lot of
78eda14cbcSMatt Macy  * changes per metaslab that are flushed once in a while so we benefit
79eda14cbcSMatt Macy  * from a bigger block size like 128K for the metaslab space maps.
80eda14cbcSMatt Macy  */
81eda14cbcSMatt Macy int zfs_metaslab_sm_blksz_with_log = (1 << 17);
82eda14cbcSMatt Macy 
83eda14cbcSMatt Macy /*
84eda14cbcSMatt Macy  * The in-core space map representation is more compact than its on-disk form.
85eda14cbcSMatt Macy  * The zfs_condense_pct determines how much more compact the in-core
86eda14cbcSMatt Macy  * space map representation must be before we compact it on-disk.
87eda14cbcSMatt Macy  * Values should be greater than or equal to 100.
88eda14cbcSMatt Macy  */
89be181ee2SMartin Matuska uint_t zfs_condense_pct = 200;
90eda14cbcSMatt Macy 
91eda14cbcSMatt Macy /*
92eda14cbcSMatt Macy  * Condensing a metaslab is not guaranteed to actually reduce the amount of
93eda14cbcSMatt Macy  * space used on disk. In particular, a space map uses data in increments of
94eda14cbcSMatt Macy  * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
95eda14cbcSMatt Macy  * same number of blocks after condensing. Since the goal of condensing is to
96eda14cbcSMatt Macy  * reduce the number of IOPs required to read the space map, we only want to
97eda14cbcSMatt Macy  * condense when we can be sure we will reduce the number of blocks used by the
98eda14cbcSMatt Macy  * space map. Unfortunately, we cannot precisely compute whether or not this is
99eda14cbcSMatt Macy  * the case in metaslab_should_condense since we are holding ms_lock. Instead,
100eda14cbcSMatt Macy  * we apply the following heuristic: do not condense a spacemap unless the
101eda14cbcSMatt Macy  * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
102eda14cbcSMatt Macy  * blocks.
103eda14cbcSMatt Macy  */
104e92ffd9bSMartin Matuska static const int zfs_metaslab_condense_block_threshold = 4;
105eda14cbcSMatt Macy 
106eda14cbcSMatt Macy /*
107eda14cbcSMatt Macy  * The zfs_mg_noalloc_threshold defines which metaslab groups should
108eda14cbcSMatt Macy  * be eligible for allocation. The value is defined as a percentage of
109eda14cbcSMatt Macy  * free space. Metaslab groups that have more free space than
110eda14cbcSMatt Macy  * zfs_mg_noalloc_threshold are always eligible for allocations. Once
111eda14cbcSMatt Macy  * a metaslab group's free space is less than or equal to the
112eda14cbcSMatt Macy  * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
113eda14cbcSMatt Macy  * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
114eda14cbcSMatt Macy  * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
115eda14cbcSMatt Macy  * groups are allowed to accept allocations. Gang blocks are always
116eda14cbcSMatt Macy  * eligible to allocate on any metaslab group. The default value of 0 means
117eda14cbcSMatt Macy  * no metaslab group will be excluded based on this criterion.
118eda14cbcSMatt Macy  */
119be181ee2SMartin Matuska static uint_t zfs_mg_noalloc_threshold = 0;
120eda14cbcSMatt Macy 
121eda14cbcSMatt Macy /*
122eda14cbcSMatt Macy  * Metaslab groups are considered eligible for allocations if their
123eda14cbcSMatt Macy  * fragmentation metric (measured as a percentage) is less than or
124eda14cbcSMatt Macy  * equal to zfs_mg_fragmentation_threshold. If a metaslab group
125eda14cbcSMatt Macy  * exceeds this threshold then it will be skipped unless all metaslab
126eda14cbcSMatt Macy  * groups within the metaslab class have also crossed this threshold.
127eda14cbcSMatt Macy  *
128eda14cbcSMatt Macy  * This tunable was introduced to avoid edge cases where we continue
129eda14cbcSMatt Macy  * allocating from very fragmented disks in our pool while other, less
130eda14cbcSMatt Macy  * fragmented disks, exists. On the other hand, if all disks in the
131eda14cbcSMatt Macy  * pool are uniformly approaching the threshold, the threshold can
132eda14cbcSMatt Macy  * be a speed bump in performance, where we keep switching the disks
133eda14cbcSMatt Macy  * that we allocate from (e.g. we allocate some segments from disk A
134eda14cbcSMatt Macy  * making it bypassing the threshold while freeing segments from disk
135eda14cbcSMatt Macy  * B getting its fragmentation below the threshold).
136eda14cbcSMatt Macy  *
137eda14cbcSMatt Macy  * Empirically, we've seen that our vdev selection for allocations is
138eda14cbcSMatt Macy  * good enough that fragmentation increases uniformly across all vdevs
139eda14cbcSMatt Macy  * the majority of the time. Thus we set the threshold percentage high
140eda14cbcSMatt Macy  * enough to avoid hitting the speed bump on pools that are being pushed
141eda14cbcSMatt Macy  * to the edge.
142eda14cbcSMatt Macy  */
143be181ee2SMartin Matuska static uint_t zfs_mg_fragmentation_threshold = 95;
144eda14cbcSMatt Macy 
145eda14cbcSMatt Macy /*
146eda14cbcSMatt Macy  * Allow metaslabs to keep their active state as long as their fragmentation
147eda14cbcSMatt Macy  * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
148eda14cbcSMatt Macy  * active metaslab that exceeds this threshold will no longer keep its active
149eda14cbcSMatt Macy  * status allowing better metaslabs to be selected.
150eda14cbcSMatt Macy  */
151be181ee2SMartin Matuska static uint_t zfs_metaslab_fragmentation_threshold = 70;
152eda14cbcSMatt Macy 
153eda14cbcSMatt Macy /*
154eda14cbcSMatt Macy  * When set will load all metaslabs when pool is first opened.
155eda14cbcSMatt Macy  */
156e92ffd9bSMartin Matuska int metaslab_debug_load = B_FALSE;
157eda14cbcSMatt Macy 
158eda14cbcSMatt Macy /*
159eda14cbcSMatt Macy  * When set will prevent metaslabs from being unloaded.
160eda14cbcSMatt Macy  */
161e92ffd9bSMartin Matuska static int metaslab_debug_unload = B_FALSE;
162eda14cbcSMatt Macy 
163eda14cbcSMatt Macy /*
164eda14cbcSMatt Macy  * Minimum size which forces the dynamic allocator to change
165eda14cbcSMatt Macy  * it's allocation strategy.  Once the space map cannot satisfy
166eda14cbcSMatt Macy  * an allocation of this size then it switches to using more
167eda14cbcSMatt Macy  * aggressive strategy (i.e search by size rather than offset).
168eda14cbcSMatt Macy  */
169eda14cbcSMatt Macy uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
170eda14cbcSMatt Macy 
171eda14cbcSMatt Macy /*
172eda14cbcSMatt Macy  * The minimum free space, in percent, which must be available
173eda14cbcSMatt Macy  * in a space map to continue allocations in a first-fit fashion.
174eda14cbcSMatt Macy  * Once the space map's free space drops below this level we dynamically
175eda14cbcSMatt Macy  * switch to using best-fit allocations.
176eda14cbcSMatt Macy  */
177be181ee2SMartin Matuska uint_t metaslab_df_free_pct = 4;
178eda14cbcSMatt Macy 
179eda14cbcSMatt Macy /*
180eda14cbcSMatt Macy  * Maximum distance to search forward from the last offset. Without this
181eda14cbcSMatt Macy  * limit, fragmented pools can see >100,000 iterations and
182eda14cbcSMatt Macy  * metaslab_block_picker() becomes the performance limiting factor on
183eda14cbcSMatt Macy  * high-performance storage.
184eda14cbcSMatt Macy  *
185eda14cbcSMatt Macy  * With the default setting of 16MB, we typically see less than 500
186eda14cbcSMatt Macy  * iterations, even with very fragmented, ashift=9 pools. The maximum number
187eda14cbcSMatt Macy  * of iterations possible is:
188eda14cbcSMatt Macy  *     metaslab_df_max_search / (2 * (1<<ashift))
189eda14cbcSMatt Macy  * With the default setting of 16MB this is 16*1024 (with ashift=9) or
190eda14cbcSMatt Macy  * 2048 (with ashift=12).
191eda14cbcSMatt Macy  */
192be181ee2SMartin Matuska static uint_t metaslab_df_max_search = 16 * 1024 * 1024;
193eda14cbcSMatt Macy 
194eda14cbcSMatt Macy /*
195eda14cbcSMatt Macy  * Forces the metaslab_block_picker function to search for at least this many
196eda14cbcSMatt Macy  * segments forwards until giving up on finding a segment that the allocation
197eda14cbcSMatt Macy  * will fit into.
198eda14cbcSMatt Macy  */
199e92ffd9bSMartin Matuska static const uint32_t metaslab_min_search_count = 100;
200eda14cbcSMatt Macy 
201eda14cbcSMatt Macy /*
202eda14cbcSMatt Macy  * If we are not searching forward (due to metaslab_df_max_search,
203eda14cbcSMatt Macy  * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
204eda14cbcSMatt Macy  * controls what segment is used.  If it is set, we will use the largest free
205eda14cbcSMatt Macy  * segment.  If it is not set, we will use a segment of exactly the requested
206eda14cbcSMatt Macy  * size (or larger).
207eda14cbcSMatt Macy  */
208e92ffd9bSMartin Matuska static int metaslab_df_use_largest_segment = B_FALSE;
209eda14cbcSMatt Macy 
210eda14cbcSMatt Macy /*
211eda14cbcSMatt Macy  * Percentage of all cpus that can be used by the metaslab taskq.
212eda14cbcSMatt Macy  */
213eda14cbcSMatt Macy int metaslab_load_pct = 50;
214eda14cbcSMatt Macy 
215eda14cbcSMatt Macy /*
216eda14cbcSMatt Macy  * These tunables control how long a metaslab will remain loaded after the
217eda14cbcSMatt Macy  * last allocation from it.  A metaslab can't be unloaded until at least
218eda14cbcSMatt Macy  * metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds
219eda14cbcSMatt Macy  * have elapsed.  However, zfs_metaslab_mem_limit may cause it to be
220eda14cbcSMatt Macy  * unloaded sooner.  These settings are intended to be generous -- to keep
221eda14cbcSMatt Macy  * metaslabs loaded for a long time, reducing the rate of metaslab loading.
222eda14cbcSMatt Macy  */
223be181ee2SMartin Matuska static uint_t metaslab_unload_delay = 32;
224be181ee2SMartin Matuska static uint_t metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */
225eda14cbcSMatt Macy 
226eda14cbcSMatt Macy /*
227eda14cbcSMatt Macy  * Max number of metaslabs per group to preload.
228eda14cbcSMatt Macy  */
229be181ee2SMartin Matuska uint_t metaslab_preload_limit = 10;
230eda14cbcSMatt Macy 
231eda14cbcSMatt Macy /*
232eda14cbcSMatt Macy  * Enable/disable preloading of metaslab.
233eda14cbcSMatt Macy  */
234e92ffd9bSMartin Matuska static int metaslab_preload_enabled = B_TRUE;
235eda14cbcSMatt Macy 
236eda14cbcSMatt Macy /*
237eda14cbcSMatt Macy  * Enable/disable fragmentation weighting on metaslabs.
238eda14cbcSMatt Macy  */
239e92ffd9bSMartin Matuska static int metaslab_fragmentation_factor_enabled = B_TRUE;
240eda14cbcSMatt Macy 
241eda14cbcSMatt Macy /*
242eda14cbcSMatt Macy  * Enable/disable lba weighting (i.e. outer tracks are given preference).
243eda14cbcSMatt Macy  */
244e92ffd9bSMartin Matuska static int metaslab_lba_weighting_enabled = B_TRUE;
245eda14cbcSMatt Macy 
246eda14cbcSMatt Macy /*
247eda14cbcSMatt Macy  * Enable/disable metaslab group biasing.
248eda14cbcSMatt Macy  */
249e92ffd9bSMartin Matuska static int metaslab_bias_enabled = B_TRUE;
250eda14cbcSMatt Macy 
251eda14cbcSMatt Macy /*
252eda14cbcSMatt Macy  * Enable/disable remapping of indirect DVAs to their concrete vdevs.
253eda14cbcSMatt Macy  */
254e92ffd9bSMartin Matuska static const boolean_t zfs_remap_blkptr_enable = B_TRUE;
255eda14cbcSMatt Macy 
256eda14cbcSMatt Macy /*
257eda14cbcSMatt Macy  * Enable/disable segment-based metaslab selection.
258eda14cbcSMatt Macy  */
259e92ffd9bSMartin Matuska static int zfs_metaslab_segment_weight_enabled = B_TRUE;
260eda14cbcSMatt Macy 
261eda14cbcSMatt Macy /*
262eda14cbcSMatt Macy  * When using segment-based metaslab selection, we will continue
263eda14cbcSMatt Macy  * allocating from the active metaslab until we have exhausted
264eda14cbcSMatt Macy  * zfs_metaslab_switch_threshold of its buckets.
265eda14cbcSMatt Macy  */
266e92ffd9bSMartin Matuska static int zfs_metaslab_switch_threshold = 2;
267eda14cbcSMatt Macy 
268eda14cbcSMatt Macy /*
269eda14cbcSMatt Macy  * Internal switch to enable/disable the metaslab allocation tracing
270eda14cbcSMatt Macy  * facility.
271eda14cbcSMatt Macy  */
272e92ffd9bSMartin Matuska static const boolean_t metaslab_trace_enabled = B_FALSE;
273eda14cbcSMatt Macy 
274eda14cbcSMatt Macy /*
275eda14cbcSMatt Macy  * Maximum entries that the metaslab allocation tracing facility will keep
276eda14cbcSMatt Macy  * in a given list when running in non-debug mode. We limit the number
277eda14cbcSMatt Macy  * of entries in non-debug mode to prevent us from using up too much memory.
278eda14cbcSMatt Macy  * The limit should be sufficiently large that we don't expect any allocation
279eda14cbcSMatt Macy  * to every exceed this value. In debug mode, the system will panic if this
280eda14cbcSMatt Macy  * limit is ever reached allowing for further investigation.
281eda14cbcSMatt Macy  */
282e92ffd9bSMartin Matuska static const uint64_t metaslab_trace_max_entries = 5000;
283eda14cbcSMatt Macy 
284eda14cbcSMatt Macy /*
285eda14cbcSMatt Macy  * Maximum number of metaslabs per group that can be disabled
286eda14cbcSMatt Macy  * simultaneously.
287eda14cbcSMatt Macy  */
288e92ffd9bSMartin Matuska static const int max_disabled_ms = 3;
289eda14cbcSMatt Macy 
290eda14cbcSMatt Macy /*
291eda14cbcSMatt Macy  * Time (in seconds) to respect ms_max_size when the metaslab is not loaded.
292eda14cbcSMatt Macy  * To avoid 64-bit overflow, don't set above UINT32_MAX.
293eda14cbcSMatt Macy  */
294dbd5678dSMartin Matuska static uint64_t zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */
295eda14cbcSMatt Macy 
296eda14cbcSMatt Macy /*
297eda14cbcSMatt Macy  * Maximum percentage of memory to use on storing loaded metaslabs. If loading
298eda14cbcSMatt Macy  * a metaslab would take it over this percentage, the oldest selected metaslab
299eda14cbcSMatt Macy  * is automatically unloaded.
300eda14cbcSMatt Macy  */
301be181ee2SMartin Matuska static uint_t zfs_metaslab_mem_limit = 25;
302eda14cbcSMatt Macy 
303eda14cbcSMatt Macy /*
304eda14cbcSMatt Macy  * Force the per-metaslab range trees to use 64-bit integers to store
305eda14cbcSMatt Macy  * segments. Used for debugging purposes.
306eda14cbcSMatt Macy  */
307e92ffd9bSMartin Matuska static const boolean_t zfs_metaslab_force_large_segs = B_FALSE;
308eda14cbcSMatt Macy 
309eda14cbcSMatt Macy /*
310eda14cbcSMatt Macy  * By default we only store segments over a certain size in the size-sorted
311eda14cbcSMatt Macy  * metaslab trees (ms_allocatable_by_size and
312eda14cbcSMatt Macy  * ms_unflushed_frees_by_size). This dramatically reduces memory usage and
313eda14cbcSMatt Macy  * improves load and unload times at the cost of causing us to use slightly
314eda14cbcSMatt Macy  * larger segments than we would otherwise in some cases.
315eda14cbcSMatt Macy  */
316e92ffd9bSMartin Matuska static const uint32_t metaslab_by_size_min_shift = 14;
317eda14cbcSMatt Macy 
3187877fdebSMatt Macy /*
3197877fdebSMatt Macy  * If not set, we will first try normal allocation.  If that fails then
3207877fdebSMatt Macy  * we will do a gang allocation.  If that fails then we will do a "try hard"
3217877fdebSMatt Macy  * gang allocation.  If that fails then we will have a multi-layer gang
3227877fdebSMatt Macy  * block.
3237877fdebSMatt Macy  *
3247877fdebSMatt Macy  * If set, we will first try normal allocation.  If that fails then
3257877fdebSMatt Macy  * we will do a "try hard" allocation.  If that fails we will do a gang
3267877fdebSMatt Macy  * allocation.  If that fails we will do a "try hard" gang allocation.  If
3277877fdebSMatt Macy  * that fails then we will have a multi-layer gang block.
3287877fdebSMatt Macy  */
329e92ffd9bSMartin Matuska static int zfs_metaslab_try_hard_before_gang = B_FALSE;
3307877fdebSMatt Macy 
3317877fdebSMatt Macy /*
3327877fdebSMatt Macy  * When not trying hard, we only consider the best zfs_metaslab_find_max_tries
3337877fdebSMatt Macy  * metaslabs.  This improves performance, especially when there are many
3347877fdebSMatt Macy  * metaslabs per vdev and the allocation can't actually be satisfied (so we
3357877fdebSMatt Macy  * would otherwise iterate all the metaslabs).  If there is a metaslab with a
3367877fdebSMatt Macy  * worse weight but it can actually satisfy the allocation, we won't find it
3377877fdebSMatt Macy  * until trying hard.  This may happen if the worse metaslab is not loaded
3387877fdebSMatt Macy  * (and the true weight is better than we have calculated), or due to weight
3397877fdebSMatt Macy  * bucketization.  E.g. we are looking for a 60K segment, and the best
3407877fdebSMatt Macy  * metaslabs all have free segments in the 32-63K bucket, but the best
3417877fdebSMatt Macy  * zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a
3427877fdebSMatt Macy  * subsequent metaslab has ms_max_size >60KB (but fewer segments in this
3437877fdebSMatt Macy  * bucket, and therefore a lower weight).
3447877fdebSMatt Macy  */
345be181ee2SMartin Matuska static uint_t zfs_metaslab_find_max_tries = 100;
3467877fdebSMatt Macy 
347eda14cbcSMatt Macy static uint64_t metaslab_weight(metaslab_t *, boolean_t);
348eda14cbcSMatt Macy static void metaslab_set_fragmentation(metaslab_t *, boolean_t);
349eda14cbcSMatt Macy static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
350eda14cbcSMatt Macy static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
351eda14cbcSMatt Macy 
352eda14cbcSMatt Macy static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
353eda14cbcSMatt Macy static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
354eda14cbcSMatt Macy static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
355eda14cbcSMatt Macy static unsigned int metaslab_idx_func(multilist_t *, void *);
356eda14cbcSMatt Macy static void metaslab_evict(metaslab_t *, uint64_t);
357eda14cbcSMatt Macy static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg);
358eda14cbcSMatt Macy kmem_cache_t *metaslab_alloc_trace_cache;
359eda14cbcSMatt Macy 
360eda14cbcSMatt Macy typedef struct metaslab_stats {
361eda14cbcSMatt Macy 	kstat_named_t metaslabstat_trace_over_limit;
362eda14cbcSMatt Macy 	kstat_named_t metaslabstat_reload_tree;
3637877fdebSMatt Macy 	kstat_named_t metaslabstat_too_many_tries;
3647877fdebSMatt Macy 	kstat_named_t metaslabstat_try_hard;
365eda14cbcSMatt Macy } metaslab_stats_t;
366eda14cbcSMatt Macy 
367eda14cbcSMatt Macy static metaslab_stats_t metaslab_stats = {
368eda14cbcSMatt Macy 	{ "trace_over_limit",		KSTAT_DATA_UINT64 },
369eda14cbcSMatt Macy 	{ "reload_tree",		KSTAT_DATA_UINT64 },
3707877fdebSMatt Macy 	{ "too_many_tries",		KSTAT_DATA_UINT64 },
3717877fdebSMatt Macy 	{ "try_hard",			KSTAT_DATA_UINT64 },
372eda14cbcSMatt Macy };
373eda14cbcSMatt Macy 
374eda14cbcSMatt Macy #define	METASLABSTAT_BUMP(stat) \
375eda14cbcSMatt Macy 	atomic_inc_64(&metaslab_stats.stat.value.ui64);
376eda14cbcSMatt Macy 
377eda14cbcSMatt Macy 
378e92ffd9bSMartin Matuska static kstat_t *metaslab_ksp;
379eda14cbcSMatt Macy 
380eda14cbcSMatt Macy void
381eda14cbcSMatt Macy metaslab_stat_init(void)
382eda14cbcSMatt Macy {
383eda14cbcSMatt Macy 	ASSERT(metaslab_alloc_trace_cache == NULL);
384eda14cbcSMatt Macy 	metaslab_alloc_trace_cache = kmem_cache_create(
385eda14cbcSMatt Macy 	    "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
386eda14cbcSMatt Macy 	    0, NULL, NULL, NULL, NULL, NULL, 0);
387eda14cbcSMatt Macy 	metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats",
388eda14cbcSMatt Macy 	    "misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) /
389eda14cbcSMatt Macy 	    sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
390eda14cbcSMatt Macy 	if (metaslab_ksp != NULL) {
391eda14cbcSMatt Macy 		metaslab_ksp->ks_data = &metaslab_stats;
392eda14cbcSMatt Macy 		kstat_install(metaslab_ksp);
393eda14cbcSMatt Macy 	}
394eda14cbcSMatt Macy }
395eda14cbcSMatt Macy 
396eda14cbcSMatt Macy void
397eda14cbcSMatt Macy metaslab_stat_fini(void)
398eda14cbcSMatt Macy {
399eda14cbcSMatt Macy 	if (metaslab_ksp != NULL) {
400eda14cbcSMatt Macy 		kstat_delete(metaslab_ksp);
401eda14cbcSMatt Macy 		metaslab_ksp = NULL;
402eda14cbcSMatt Macy 	}
403eda14cbcSMatt Macy 
404eda14cbcSMatt Macy 	kmem_cache_destroy(metaslab_alloc_trace_cache);
405eda14cbcSMatt Macy 	metaslab_alloc_trace_cache = NULL;
406eda14cbcSMatt Macy }
407eda14cbcSMatt Macy 
408eda14cbcSMatt Macy /*
409eda14cbcSMatt Macy  * ==========================================================================
410eda14cbcSMatt Macy  * Metaslab classes
411eda14cbcSMatt Macy  * ==========================================================================
412eda14cbcSMatt Macy  */
413eda14cbcSMatt Macy metaslab_class_t *
414e92ffd9bSMartin Matuska metaslab_class_create(spa_t *spa, const metaslab_ops_t *ops)
415eda14cbcSMatt Macy {
416eda14cbcSMatt Macy 	metaslab_class_t *mc;
417eda14cbcSMatt Macy 
4187877fdebSMatt Macy 	mc = kmem_zalloc(offsetof(metaslab_class_t,
4197877fdebSMatt Macy 	    mc_allocator[spa->spa_alloc_count]), KM_SLEEP);
420eda14cbcSMatt Macy 
421eda14cbcSMatt Macy 	mc->mc_spa = spa;
422eda14cbcSMatt Macy 	mc->mc_ops = ops;
423eda14cbcSMatt Macy 	mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
4243ff01b23SMartin Matuska 	multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t),
425eda14cbcSMatt Macy 	    offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func);
4267877fdebSMatt Macy 	for (int i = 0; i < spa->spa_alloc_count; i++) {
4277877fdebSMatt Macy 		metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
4287877fdebSMatt Macy 		mca->mca_rotor = NULL;
4297877fdebSMatt Macy 		zfs_refcount_create_tracked(&mca->mca_alloc_slots);
4307877fdebSMatt Macy 	}
431eda14cbcSMatt Macy 
432eda14cbcSMatt Macy 	return (mc);
433eda14cbcSMatt Macy }
434eda14cbcSMatt Macy 
435eda14cbcSMatt Macy void
436eda14cbcSMatt Macy metaslab_class_destroy(metaslab_class_t *mc)
437eda14cbcSMatt Macy {
4387877fdebSMatt Macy 	spa_t *spa = mc->mc_spa;
4397877fdebSMatt Macy 
440eda14cbcSMatt Macy 	ASSERT(mc->mc_alloc == 0);
441eda14cbcSMatt Macy 	ASSERT(mc->mc_deferred == 0);
442eda14cbcSMatt Macy 	ASSERT(mc->mc_space == 0);
443eda14cbcSMatt Macy 	ASSERT(mc->mc_dspace == 0);
444eda14cbcSMatt Macy 
4457877fdebSMatt Macy 	for (int i = 0; i < spa->spa_alloc_count; i++) {
4467877fdebSMatt Macy 		metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
4477877fdebSMatt Macy 		ASSERT(mca->mca_rotor == NULL);
4487877fdebSMatt Macy 		zfs_refcount_destroy(&mca->mca_alloc_slots);
4497877fdebSMatt Macy 	}
450eda14cbcSMatt Macy 	mutex_destroy(&mc->mc_lock);
4513ff01b23SMartin Matuska 	multilist_destroy(&mc->mc_metaslab_txg_list);
4527877fdebSMatt Macy 	kmem_free(mc, offsetof(metaslab_class_t,
4537877fdebSMatt Macy 	    mc_allocator[spa->spa_alloc_count]));
454eda14cbcSMatt Macy }
455eda14cbcSMatt Macy 
456eda14cbcSMatt Macy int
457eda14cbcSMatt Macy metaslab_class_validate(metaslab_class_t *mc)
458eda14cbcSMatt Macy {
459eda14cbcSMatt Macy 	metaslab_group_t *mg;
460eda14cbcSMatt Macy 	vdev_t *vd;
461eda14cbcSMatt Macy 
462eda14cbcSMatt Macy 	/*
463eda14cbcSMatt Macy 	 * Must hold one of the spa_config locks.
464eda14cbcSMatt Macy 	 */
465eda14cbcSMatt Macy 	ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
466eda14cbcSMatt Macy 	    spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
467eda14cbcSMatt Macy 
4687877fdebSMatt Macy 	if ((mg = mc->mc_allocator[0].mca_rotor) == NULL)
469eda14cbcSMatt Macy 		return (0);
470eda14cbcSMatt Macy 
471eda14cbcSMatt Macy 	do {
472eda14cbcSMatt Macy 		vd = mg->mg_vd;
473eda14cbcSMatt Macy 		ASSERT(vd->vdev_mg != NULL);
474eda14cbcSMatt Macy 		ASSERT3P(vd->vdev_top, ==, vd);
475eda14cbcSMatt Macy 		ASSERT3P(mg->mg_class, ==, mc);
476eda14cbcSMatt Macy 		ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
4777877fdebSMatt Macy 	} while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor);
478eda14cbcSMatt Macy 
479eda14cbcSMatt Macy 	return (0);
480eda14cbcSMatt Macy }
481eda14cbcSMatt Macy 
482eda14cbcSMatt Macy static void
483eda14cbcSMatt Macy metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
484eda14cbcSMatt Macy     int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
485eda14cbcSMatt Macy {
486eda14cbcSMatt Macy 	atomic_add_64(&mc->mc_alloc, alloc_delta);
487eda14cbcSMatt Macy 	atomic_add_64(&mc->mc_deferred, defer_delta);
488eda14cbcSMatt Macy 	atomic_add_64(&mc->mc_space, space_delta);
489eda14cbcSMatt Macy 	atomic_add_64(&mc->mc_dspace, dspace_delta);
490eda14cbcSMatt Macy }
491eda14cbcSMatt Macy 
492eda14cbcSMatt Macy uint64_t
493eda14cbcSMatt Macy metaslab_class_get_alloc(metaslab_class_t *mc)
494eda14cbcSMatt Macy {
495eda14cbcSMatt Macy 	return (mc->mc_alloc);
496eda14cbcSMatt Macy }
497eda14cbcSMatt Macy 
498eda14cbcSMatt Macy uint64_t
499eda14cbcSMatt Macy metaslab_class_get_deferred(metaslab_class_t *mc)
500eda14cbcSMatt Macy {
501eda14cbcSMatt Macy 	return (mc->mc_deferred);
502eda14cbcSMatt Macy }
503eda14cbcSMatt Macy 
504eda14cbcSMatt Macy uint64_t
505eda14cbcSMatt Macy metaslab_class_get_space(metaslab_class_t *mc)
506eda14cbcSMatt Macy {
507eda14cbcSMatt Macy 	return (mc->mc_space);
508eda14cbcSMatt Macy }
509eda14cbcSMatt Macy 
510eda14cbcSMatt Macy uint64_t
511eda14cbcSMatt Macy metaslab_class_get_dspace(metaslab_class_t *mc)
512eda14cbcSMatt Macy {
513eda14cbcSMatt Macy 	return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
514eda14cbcSMatt Macy }
515eda14cbcSMatt Macy 
516eda14cbcSMatt Macy void
517eda14cbcSMatt Macy metaslab_class_histogram_verify(metaslab_class_t *mc)
518eda14cbcSMatt Macy {
519eda14cbcSMatt Macy 	spa_t *spa = mc->mc_spa;
520eda14cbcSMatt Macy 	vdev_t *rvd = spa->spa_root_vdev;
521eda14cbcSMatt Macy 	uint64_t *mc_hist;
522eda14cbcSMatt Macy 	int i;
523eda14cbcSMatt Macy 
524eda14cbcSMatt Macy 	if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
525eda14cbcSMatt Macy 		return;
526eda14cbcSMatt Macy 
527eda14cbcSMatt Macy 	mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
528eda14cbcSMatt Macy 	    KM_SLEEP);
529eda14cbcSMatt Macy 
530184c1b94SMartin Matuska 	mutex_enter(&mc->mc_lock);
531eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++) {
532eda14cbcSMatt Macy 		vdev_t *tvd = rvd->vdev_child[c];
533184c1b94SMartin Matuska 		metaslab_group_t *mg = vdev_get_mg(tvd, mc);
534eda14cbcSMatt Macy 
535eda14cbcSMatt Macy 		/*
536eda14cbcSMatt Macy 		 * Skip any holes, uninitialized top-levels, or
537eda14cbcSMatt Macy 		 * vdevs that are not in this metalab class.
538eda14cbcSMatt Macy 		 */
539eda14cbcSMatt Macy 		if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
540eda14cbcSMatt Macy 		    mg->mg_class != mc) {
541eda14cbcSMatt Macy 			continue;
542eda14cbcSMatt Macy 		}
543eda14cbcSMatt Macy 
544184c1b94SMartin Matuska 		IMPLY(mg == mg->mg_vd->vdev_log_mg,
545184c1b94SMartin Matuska 		    mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
546184c1b94SMartin Matuska 
547eda14cbcSMatt Macy 		for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
548eda14cbcSMatt Macy 			mc_hist[i] += mg->mg_histogram[i];
549eda14cbcSMatt Macy 	}
550eda14cbcSMatt Macy 
551184c1b94SMartin Matuska 	for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
552eda14cbcSMatt Macy 		VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
553184c1b94SMartin Matuska 	}
554eda14cbcSMatt Macy 
555184c1b94SMartin Matuska 	mutex_exit(&mc->mc_lock);
556eda14cbcSMatt Macy 	kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
557eda14cbcSMatt Macy }
558eda14cbcSMatt Macy 
559eda14cbcSMatt Macy /*
560eda14cbcSMatt Macy  * Calculate the metaslab class's fragmentation metric. The metric
561eda14cbcSMatt Macy  * is weighted based on the space contribution of each metaslab group.
562eda14cbcSMatt Macy  * The return value will be a number between 0 and 100 (inclusive), or
563eda14cbcSMatt Macy  * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
564eda14cbcSMatt Macy  * zfs_frag_table for more information about the metric.
565eda14cbcSMatt Macy  */
566eda14cbcSMatt Macy uint64_t
567eda14cbcSMatt Macy metaslab_class_fragmentation(metaslab_class_t *mc)
568eda14cbcSMatt Macy {
569eda14cbcSMatt Macy 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
570eda14cbcSMatt Macy 	uint64_t fragmentation = 0;
571eda14cbcSMatt Macy 
572eda14cbcSMatt Macy 	spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
573eda14cbcSMatt Macy 
574eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++) {
575eda14cbcSMatt Macy 		vdev_t *tvd = rvd->vdev_child[c];
576eda14cbcSMatt Macy 		metaslab_group_t *mg = tvd->vdev_mg;
577eda14cbcSMatt Macy 
578eda14cbcSMatt Macy 		/*
579eda14cbcSMatt Macy 		 * Skip any holes, uninitialized top-levels,
580eda14cbcSMatt Macy 		 * or vdevs that are not in this metalab class.
581eda14cbcSMatt Macy 		 */
582eda14cbcSMatt Macy 		if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
583eda14cbcSMatt Macy 		    mg->mg_class != mc) {
584eda14cbcSMatt Macy 			continue;
585eda14cbcSMatt Macy 		}
586eda14cbcSMatt Macy 
587eda14cbcSMatt Macy 		/*
588eda14cbcSMatt Macy 		 * If a metaslab group does not contain a fragmentation
589eda14cbcSMatt Macy 		 * metric then just bail out.
590eda14cbcSMatt Macy 		 */
591eda14cbcSMatt Macy 		if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
592eda14cbcSMatt Macy 			spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
593eda14cbcSMatt Macy 			return (ZFS_FRAG_INVALID);
594eda14cbcSMatt Macy 		}
595eda14cbcSMatt Macy 
596eda14cbcSMatt Macy 		/*
597eda14cbcSMatt Macy 		 * Determine how much this metaslab_group is contributing
598eda14cbcSMatt Macy 		 * to the overall pool fragmentation metric.
599eda14cbcSMatt Macy 		 */
600eda14cbcSMatt Macy 		fragmentation += mg->mg_fragmentation *
601eda14cbcSMatt Macy 		    metaslab_group_get_space(mg);
602eda14cbcSMatt Macy 	}
603eda14cbcSMatt Macy 	fragmentation /= metaslab_class_get_space(mc);
604eda14cbcSMatt Macy 
605eda14cbcSMatt Macy 	ASSERT3U(fragmentation, <=, 100);
606eda14cbcSMatt Macy 	spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
607eda14cbcSMatt Macy 	return (fragmentation);
608eda14cbcSMatt Macy }
609eda14cbcSMatt Macy 
610eda14cbcSMatt Macy /*
611eda14cbcSMatt Macy  * Calculate the amount of expandable space that is available in
612eda14cbcSMatt Macy  * this metaslab class. If a device is expanded then its expandable
613eda14cbcSMatt Macy  * space will be the amount of allocatable space that is currently not
614eda14cbcSMatt Macy  * part of this metaslab class.
615eda14cbcSMatt Macy  */
616eda14cbcSMatt Macy uint64_t
617eda14cbcSMatt Macy metaslab_class_expandable_space(metaslab_class_t *mc)
618eda14cbcSMatt Macy {
619eda14cbcSMatt Macy 	vdev_t *rvd = mc->mc_spa->spa_root_vdev;
620eda14cbcSMatt Macy 	uint64_t space = 0;
621eda14cbcSMatt Macy 
622eda14cbcSMatt Macy 	spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
623eda14cbcSMatt Macy 	for (int c = 0; c < rvd->vdev_children; c++) {
624eda14cbcSMatt Macy 		vdev_t *tvd = rvd->vdev_child[c];
625eda14cbcSMatt Macy 		metaslab_group_t *mg = tvd->vdev_mg;
626eda14cbcSMatt Macy 
627eda14cbcSMatt Macy 		if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
628eda14cbcSMatt Macy 		    mg->mg_class != mc) {
629eda14cbcSMatt Macy 			continue;
630eda14cbcSMatt Macy 		}
631eda14cbcSMatt Macy 
632eda14cbcSMatt Macy 		/*
633eda14cbcSMatt Macy 		 * Calculate if we have enough space to add additional
634eda14cbcSMatt Macy 		 * metaslabs. We report the expandable space in terms
635eda14cbcSMatt Macy 		 * of the metaslab size since that's the unit of expansion.
636eda14cbcSMatt Macy 		 */
637eda14cbcSMatt Macy 		space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
638eda14cbcSMatt Macy 		    1ULL << tvd->vdev_ms_shift);
639eda14cbcSMatt Macy 	}
640eda14cbcSMatt Macy 	spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
641eda14cbcSMatt Macy 	return (space);
642eda14cbcSMatt Macy }
643eda14cbcSMatt Macy 
644eda14cbcSMatt Macy void
645eda14cbcSMatt Macy metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg)
646eda14cbcSMatt Macy {
6473ff01b23SMartin Matuska 	multilist_t *ml = &mc->mc_metaslab_txg_list;
648eda14cbcSMatt Macy 	for (int i = 0; i < multilist_get_num_sublists(ml); i++) {
649eda14cbcSMatt Macy 		multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
650eda14cbcSMatt Macy 		metaslab_t *msp = multilist_sublist_head(mls);
651eda14cbcSMatt Macy 		multilist_sublist_unlock(mls);
652eda14cbcSMatt Macy 		while (msp != NULL) {
653eda14cbcSMatt Macy 			mutex_enter(&msp->ms_lock);
654eda14cbcSMatt Macy 
655eda14cbcSMatt Macy 			/*
656eda14cbcSMatt Macy 			 * If the metaslab has been removed from the list
657eda14cbcSMatt Macy 			 * (which could happen if we were at the memory limit
658eda14cbcSMatt Macy 			 * and it was evicted during this loop), then we can't
659eda14cbcSMatt Macy 			 * proceed and we should restart the sublist.
660eda14cbcSMatt Macy 			 */
661eda14cbcSMatt Macy 			if (!multilist_link_active(&msp->ms_class_txg_node)) {
662eda14cbcSMatt Macy 				mutex_exit(&msp->ms_lock);
663eda14cbcSMatt Macy 				i--;
664eda14cbcSMatt Macy 				break;
665eda14cbcSMatt Macy 			}
666eda14cbcSMatt Macy 			mls = multilist_sublist_lock(ml, i);
667eda14cbcSMatt Macy 			metaslab_t *next_msp = multilist_sublist_next(mls, msp);
668eda14cbcSMatt Macy 			multilist_sublist_unlock(mls);
669eda14cbcSMatt Macy 			if (txg >
670eda14cbcSMatt Macy 			    msp->ms_selected_txg + metaslab_unload_delay &&
671eda14cbcSMatt Macy 			    gethrtime() > msp->ms_selected_time +
672eda14cbcSMatt Macy 			    (uint64_t)MSEC2NSEC(metaslab_unload_delay_ms)) {
673eda14cbcSMatt Macy 				metaslab_evict(msp, txg);
674eda14cbcSMatt Macy 			} else {
675eda14cbcSMatt Macy 				/*
676eda14cbcSMatt Macy 				 * Once we've hit a metaslab selected too
677eda14cbcSMatt Macy 				 * recently to evict, we're done evicting for
678eda14cbcSMatt Macy 				 * now.
679eda14cbcSMatt Macy 				 */
680eda14cbcSMatt Macy 				mutex_exit(&msp->ms_lock);
681eda14cbcSMatt Macy 				break;
682eda14cbcSMatt Macy 			}
683eda14cbcSMatt Macy 			mutex_exit(&msp->ms_lock);
684eda14cbcSMatt Macy 			msp = next_msp;
685eda14cbcSMatt Macy 		}
686eda14cbcSMatt Macy 	}
687eda14cbcSMatt Macy }
688eda14cbcSMatt Macy 
689eda14cbcSMatt Macy static int
690eda14cbcSMatt Macy metaslab_compare(const void *x1, const void *x2)
691eda14cbcSMatt Macy {
692eda14cbcSMatt Macy 	const metaslab_t *m1 = (const metaslab_t *)x1;
693eda14cbcSMatt Macy 	const metaslab_t *m2 = (const metaslab_t *)x2;
694eda14cbcSMatt Macy 
695eda14cbcSMatt Macy 	int sort1 = 0;
696eda14cbcSMatt Macy 	int sort2 = 0;
697eda14cbcSMatt Macy 	if (m1->ms_allocator != -1 && m1->ms_primary)
698eda14cbcSMatt Macy 		sort1 = 1;
699eda14cbcSMatt Macy 	else if (m1->ms_allocator != -1 && !m1->ms_primary)
700eda14cbcSMatt Macy 		sort1 = 2;
701eda14cbcSMatt Macy 	if (m2->ms_allocator != -1 && m2->ms_primary)
702eda14cbcSMatt Macy 		sort2 = 1;
703eda14cbcSMatt Macy 	else if (m2->ms_allocator != -1 && !m2->ms_primary)
704eda14cbcSMatt Macy 		sort2 = 2;
705eda14cbcSMatt Macy 
706eda14cbcSMatt Macy 	/*
707eda14cbcSMatt Macy 	 * Sort inactive metaslabs first, then primaries, then secondaries. When
708eda14cbcSMatt Macy 	 * selecting a metaslab to allocate from, an allocator first tries its
709eda14cbcSMatt Macy 	 * primary, then secondary active metaslab. If it doesn't have active
710eda14cbcSMatt Macy 	 * metaslabs, or can't allocate from them, it searches for an inactive
711eda14cbcSMatt Macy 	 * metaslab to activate. If it can't find a suitable one, it will steal
712eda14cbcSMatt Macy 	 * a primary or secondary metaslab from another allocator.
713eda14cbcSMatt Macy 	 */
714eda14cbcSMatt Macy 	if (sort1 < sort2)
715eda14cbcSMatt Macy 		return (-1);
716eda14cbcSMatt Macy 	if (sort1 > sort2)
717eda14cbcSMatt Macy 		return (1);
718eda14cbcSMatt Macy 
719eda14cbcSMatt Macy 	int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight);
720eda14cbcSMatt Macy 	if (likely(cmp))
721eda14cbcSMatt Macy 		return (cmp);
722eda14cbcSMatt Macy 
723eda14cbcSMatt Macy 	IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
724eda14cbcSMatt Macy 
725eda14cbcSMatt Macy 	return (TREE_CMP(m1->ms_start, m2->ms_start));
726eda14cbcSMatt Macy }
727eda14cbcSMatt Macy 
728eda14cbcSMatt Macy /*
729eda14cbcSMatt Macy  * ==========================================================================
730eda14cbcSMatt Macy  * Metaslab groups
731eda14cbcSMatt Macy  * ==========================================================================
732eda14cbcSMatt Macy  */
733eda14cbcSMatt Macy /*
734eda14cbcSMatt Macy  * Update the allocatable flag and the metaslab group's capacity.
735eda14cbcSMatt Macy  * The allocatable flag is set to true if the capacity is below
736eda14cbcSMatt Macy  * the zfs_mg_noalloc_threshold or has a fragmentation value that is
737eda14cbcSMatt Macy  * greater than zfs_mg_fragmentation_threshold. If a metaslab group
738eda14cbcSMatt Macy  * transitions from allocatable to non-allocatable or vice versa then the
739eda14cbcSMatt Macy  * metaslab group's class is updated to reflect the transition.
740eda14cbcSMatt Macy  */
741eda14cbcSMatt Macy static void
742eda14cbcSMatt Macy metaslab_group_alloc_update(metaslab_group_t *mg)
743eda14cbcSMatt Macy {
744eda14cbcSMatt Macy 	vdev_t *vd = mg->mg_vd;
745eda14cbcSMatt Macy 	metaslab_class_t *mc = mg->mg_class;
746eda14cbcSMatt Macy 	vdev_stat_t *vs = &vd->vdev_stat;
747eda14cbcSMatt Macy 	boolean_t was_allocatable;
748eda14cbcSMatt Macy 	boolean_t was_initialized;
749eda14cbcSMatt Macy 
750eda14cbcSMatt Macy 	ASSERT(vd == vd->vdev_top);
751eda14cbcSMatt Macy 	ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
752eda14cbcSMatt Macy 	    SCL_ALLOC);
753eda14cbcSMatt Macy 
754eda14cbcSMatt Macy 	mutex_enter(&mg->mg_lock);
755eda14cbcSMatt Macy 	was_allocatable = mg->mg_allocatable;
756eda14cbcSMatt Macy 	was_initialized = mg->mg_initialized;
757eda14cbcSMatt Macy 
758eda14cbcSMatt Macy 	mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
759eda14cbcSMatt Macy 	    (vs->vs_space + 1);
760eda14cbcSMatt Macy 
761eda14cbcSMatt Macy 	mutex_enter(&mc->mc_lock);
762eda14cbcSMatt Macy 
763eda14cbcSMatt Macy 	/*
764eda14cbcSMatt Macy 	 * If the metaslab group was just added then it won't
765eda14cbcSMatt Macy 	 * have any space until we finish syncing out this txg.
766eda14cbcSMatt Macy 	 * At that point we will consider it initialized and available
767eda14cbcSMatt Macy 	 * for allocations.  We also don't consider non-activated
768eda14cbcSMatt Macy 	 * metaslab groups (e.g. vdevs that are in the middle of being removed)
769eda14cbcSMatt Macy 	 * to be initialized, because they can't be used for allocation.
770eda14cbcSMatt Macy 	 */
771eda14cbcSMatt Macy 	mg->mg_initialized = metaslab_group_initialized(mg);
772eda14cbcSMatt Macy 	if (!was_initialized && mg->mg_initialized) {
773eda14cbcSMatt Macy 		mc->mc_groups++;
774eda14cbcSMatt Macy 	} else if (was_initialized && !mg->mg_initialized) {
775eda14cbcSMatt Macy 		ASSERT3U(mc->mc_groups, >, 0);
776eda14cbcSMatt Macy 		mc->mc_groups--;
777eda14cbcSMatt Macy 	}
778eda14cbcSMatt Macy 	if (mg->mg_initialized)
779eda14cbcSMatt Macy 		mg->mg_no_free_space = B_FALSE;
780eda14cbcSMatt Macy 
781eda14cbcSMatt Macy 	/*
782eda14cbcSMatt Macy 	 * A metaslab group is considered allocatable if it has plenty
783eda14cbcSMatt Macy 	 * of free space or is not heavily fragmented. We only take
784eda14cbcSMatt Macy 	 * fragmentation into account if the metaslab group has a valid
785eda14cbcSMatt Macy 	 * fragmentation metric (i.e. a value between 0 and 100).
786eda14cbcSMatt Macy 	 */
787eda14cbcSMatt Macy 	mg->mg_allocatable = (mg->mg_activation_count > 0 &&
788eda14cbcSMatt Macy 	    mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
789eda14cbcSMatt Macy 	    (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
790eda14cbcSMatt Macy 	    mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
791eda14cbcSMatt Macy 
792eda14cbcSMatt Macy 	/*
793eda14cbcSMatt Macy 	 * The mc_alloc_groups maintains a count of the number of
794eda14cbcSMatt Macy 	 * groups in this metaslab class that are still above the
795eda14cbcSMatt Macy 	 * zfs_mg_noalloc_threshold. This is used by the allocating
796eda14cbcSMatt Macy 	 * threads to determine if they should avoid allocations to
797eda14cbcSMatt Macy 	 * a given group. The allocator will avoid allocations to a group
798eda14cbcSMatt Macy 	 * if that group has reached or is below the zfs_mg_noalloc_threshold
799eda14cbcSMatt Macy 	 * and there are still other groups that are above the threshold.
800eda14cbcSMatt Macy 	 * When a group transitions from allocatable to non-allocatable or
801eda14cbcSMatt Macy 	 * vice versa we update the metaslab class to reflect that change.
802eda14cbcSMatt Macy 	 * When the mc_alloc_groups value drops to 0 that means that all
803eda14cbcSMatt Macy 	 * groups have reached the zfs_mg_noalloc_threshold making all groups
804eda14cbcSMatt Macy 	 * eligible for allocations. This effectively means that all devices
805eda14cbcSMatt Macy 	 * are balanced again.
806eda14cbcSMatt Macy 	 */
807eda14cbcSMatt Macy 	if (was_allocatable && !mg->mg_allocatable)
808eda14cbcSMatt Macy 		mc->mc_alloc_groups--;
809eda14cbcSMatt Macy 	else if (!was_allocatable && mg->mg_allocatable)
810eda14cbcSMatt Macy 		mc->mc_alloc_groups++;
811eda14cbcSMatt Macy 	mutex_exit(&mc->mc_lock);
812eda14cbcSMatt Macy 
813eda14cbcSMatt Macy 	mutex_exit(&mg->mg_lock);
814eda14cbcSMatt Macy }
815eda14cbcSMatt Macy 
816eda14cbcSMatt Macy int
817eda14cbcSMatt Macy metaslab_sort_by_flushed(const void *va, const void *vb)
818eda14cbcSMatt Macy {
819eda14cbcSMatt Macy 	const metaslab_t *a = va;
820eda14cbcSMatt Macy 	const metaslab_t *b = vb;
821eda14cbcSMatt Macy 
822eda14cbcSMatt Macy 	int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
823eda14cbcSMatt Macy 	if (likely(cmp))
824eda14cbcSMatt Macy 		return (cmp);
825eda14cbcSMatt Macy 
826eda14cbcSMatt Macy 	uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
827eda14cbcSMatt Macy 	uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
828eda14cbcSMatt Macy 	cmp = TREE_CMP(a_vdev_id, b_vdev_id);
829eda14cbcSMatt Macy 	if (cmp)
830eda14cbcSMatt Macy 		return (cmp);
831eda14cbcSMatt Macy 
832eda14cbcSMatt Macy 	return (TREE_CMP(a->ms_id, b->ms_id));
833eda14cbcSMatt Macy }
834eda14cbcSMatt Macy 
835eda14cbcSMatt Macy metaslab_group_t *
836eda14cbcSMatt Macy metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
837eda14cbcSMatt Macy {
838eda14cbcSMatt Macy 	metaslab_group_t *mg;
839eda14cbcSMatt Macy 
8407877fdebSMatt Macy 	mg = kmem_zalloc(offsetof(metaslab_group_t,
8417877fdebSMatt Macy 	    mg_allocator[allocators]), KM_SLEEP);
842eda14cbcSMatt Macy 	mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
843eda14cbcSMatt Macy 	mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
844eda14cbcSMatt Macy 	cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
845eda14cbcSMatt Macy 	avl_create(&mg->mg_metaslab_tree, metaslab_compare,
846eda14cbcSMatt Macy 	    sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
847eda14cbcSMatt Macy 	mg->mg_vd = vd;
848eda14cbcSMatt Macy 	mg->mg_class = mc;
849eda14cbcSMatt Macy 	mg->mg_activation_count = 0;
850eda14cbcSMatt Macy 	mg->mg_initialized = B_FALSE;
851eda14cbcSMatt Macy 	mg->mg_no_free_space = B_TRUE;
852eda14cbcSMatt Macy 	mg->mg_allocators = allocators;
853eda14cbcSMatt Macy 
854eda14cbcSMatt Macy 	for (int i = 0; i < allocators; i++) {
855eda14cbcSMatt Macy 		metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
856eda14cbcSMatt Macy 		zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth);
857eda14cbcSMatt Macy 	}
858eda14cbcSMatt Macy 
859eda14cbcSMatt Macy 	mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
860eda14cbcSMatt Macy 	    maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
861eda14cbcSMatt Macy 
862eda14cbcSMatt Macy 	return (mg);
863eda14cbcSMatt Macy }
864eda14cbcSMatt Macy 
865eda14cbcSMatt Macy void
866eda14cbcSMatt Macy metaslab_group_destroy(metaslab_group_t *mg)
867eda14cbcSMatt Macy {
868eda14cbcSMatt Macy 	ASSERT(mg->mg_prev == NULL);
869eda14cbcSMatt Macy 	ASSERT(mg->mg_next == NULL);
870eda14cbcSMatt Macy 	/*
871eda14cbcSMatt Macy 	 * We may have gone below zero with the activation count
872eda14cbcSMatt Macy 	 * either because we never activated in the first place or
873eda14cbcSMatt Macy 	 * because we're done, and possibly removing the vdev.
874eda14cbcSMatt Macy 	 */
875eda14cbcSMatt Macy 	ASSERT(mg->mg_activation_count <= 0);
876eda14cbcSMatt Macy 
877eda14cbcSMatt Macy 	taskq_destroy(mg->mg_taskq);
878eda14cbcSMatt Macy 	avl_destroy(&mg->mg_metaslab_tree);
879eda14cbcSMatt Macy 	mutex_destroy(&mg->mg_lock);
880eda14cbcSMatt Macy 	mutex_destroy(&mg->mg_ms_disabled_lock);
881eda14cbcSMatt Macy 	cv_destroy(&mg->mg_ms_disabled_cv);
882eda14cbcSMatt Macy 
883eda14cbcSMatt Macy 	for (int i = 0; i < mg->mg_allocators; i++) {
884eda14cbcSMatt Macy 		metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
885eda14cbcSMatt Macy 		zfs_refcount_destroy(&mga->mga_alloc_queue_depth);
886eda14cbcSMatt Macy 	}
8877877fdebSMatt Macy 	kmem_free(mg, offsetof(metaslab_group_t,
8887877fdebSMatt Macy 	    mg_allocator[mg->mg_allocators]));
889eda14cbcSMatt Macy }
890eda14cbcSMatt Macy 
891eda14cbcSMatt Macy void
892eda14cbcSMatt Macy metaslab_group_activate(metaslab_group_t *mg)
893eda14cbcSMatt Macy {
894eda14cbcSMatt Macy 	metaslab_class_t *mc = mg->mg_class;
8957877fdebSMatt Macy 	spa_t *spa = mc->mc_spa;
896eda14cbcSMatt Macy 	metaslab_group_t *mgprev, *mgnext;
897eda14cbcSMatt Macy 
8987877fdebSMatt Macy 	ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
899eda14cbcSMatt Macy 
900eda14cbcSMatt Macy 	ASSERT(mg->mg_prev == NULL);
901eda14cbcSMatt Macy 	ASSERT(mg->mg_next == NULL);
902eda14cbcSMatt Macy 	ASSERT(mg->mg_activation_count <= 0);
903eda14cbcSMatt Macy 
904eda14cbcSMatt Macy 	if (++mg->mg_activation_count <= 0)
905eda14cbcSMatt Macy 		return;
906eda14cbcSMatt Macy 
907716fd348SMartin Matuska 	mg->mg_aliquot = metaslab_aliquot * MAX(1,
908716fd348SMartin Matuska 	    vdev_get_ndisks(mg->mg_vd) - vdev_get_nparity(mg->mg_vd));
909eda14cbcSMatt Macy 	metaslab_group_alloc_update(mg);
910eda14cbcSMatt Macy 
9117877fdebSMatt Macy 	if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) {
912eda14cbcSMatt Macy 		mg->mg_prev = mg;
913eda14cbcSMatt Macy 		mg->mg_next = mg;
914eda14cbcSMatt Macy 	} else {
915eda14cbcSMatt Macy 		mgnext = mgprev->mg_next;
916eda14cbcSMatt Macy 		mg->mg_prev = mgprev;
917eda14cbcSMatt Macy 		mg->mg_next = mgnext;
918eda14cbcSMatt Macy 		mgprev->mg_next = mg;
919eda14cbcSMatt Macy 		mgnext->mg_prev = mg;
920eda14cbcSMatt Macy 	}
9217877fdebSMatt Macy 	for (int i = 0; i < spa->spa_alloc_count; i++) {
9227877fdebSMatt Macy 		mc->mc_allocator[i].mca_rotor = mg;
9237877fdebSMatt Macy 		mg = mg->mg_next;
9247877fdebSMatt Macy 	}
925eda14cbcSMatt Macy }
926eda14cbcSMatt Macy 
927eda14cbcSMatt Macy /*
928eda14cbcSMatt Macy  * Passivate a metaslab group and remove it from the allocation rotor.
929eda14cbcSMatt Macy  * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
930eda14cbcSMatt Macy  * a metaslab group. This function will momentarily drop spa_config_locks
931eda14cbcSMatt Macy  * that are lower than the SCL_ALLOC lock (see comment below).
932eda14cbcSMatt Macy  */
933eda14cbcSMatt Macy void
934eda14cbcSMatt Macy metaslab_group_passivate(metaslab_group_t *mg)
935eda14cbcSMatt Macy {
936eda14cbcSMatt Macy 	metaslab_class_t *mc = mg->mg_class;
937eda14cbcSMatt Macy 	spa_t *spa = mc->mc_spa;
938eda14cbcSMatt Macy 	metaslab_group_t *mgprev, *mgnext;
939eda14cbcSMatt Macy 	int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
940eda14cbcSMatt Macy 
941eda14cbcSMatt Macy 	ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
942eda14cbcSMatt Macy 	    (SCL_ALLOC | SCL_ZIO));
943eda14cbcSMatt Macy 
944eda14cbcSMatt Macy 	if (--mg->mg_activation_count != 0) {
9457877fdebSMatt Macy 		for (int i = 0; i < spa->spa_alloc_count; i++)
9467877fdebSMatt Macy 			ASSERT(mc->mc_allocator[i].mca_rotor != mg);
947eda14cbcSMatt Macy 		ASSERT(mg->mg_prev == NULL);
948eda14cbcSMatt Macy 		ASSERT(mg->mg_next == NULL);
949eda14cbcSMatt Macy 		ASSERT(mg->mg_activation_count < 0);
950eda14cbcSMatt Macy 		return;
951eda14cbcSMatt Macy 	}
952eda14cbcSMatt Macy 
953eda14cbcSMatt Macy 	/*
954eda14cbcSMatt Macy 	 * The spa_config_lock is an array of rwlocks, ordered as
955eda14cbcSMatt Macy 	 * follows (from highest to lowest):
956eda14cbcSMatt Macy 	 *	SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
957eda14cbcSMatt Macy 	 *	SCL_ZIO > SCL_FREE > SCL_VDEV
958eda14cbcSMatt Macy 	 * (For more information about the spa_config_lock see spa_misc.c)
959eda14cbcSMatt Macy 	 * The higher the lock, the broader its coverage. When we passivate
960eda14cbcSMatt Macy 	 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
961eda14cbcSMatt Macy 	 * config locks. However, the metaslab group's taskq might be trying
962eda14cbcSMatt Macy 	 * to preload metaslabs so we must drop the SCL_ZIO lock and any
963eda14cbcSMatt Macy 	 * lower locks to allow the I/O to complete. At a minimum,
964eda14cbcSMatt Macy 	 * we continue to hold the SCL_ALLOC lock, which prevents any future
965eda14cbcSMatt Macy 	 * allocations from taking place and any changes to the vdev tree.
966eda14cbcSMatt Macy 	 */
967eda14cbcSMatt Macy 	spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
968eda14cbcSMatt Macy 	taskq_wait_outstanding(mg->mg_taskq, 0);
969eda14cbcSMatt Macy 	spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
970eda14cbcSMatt Macy 	metaslab_group_alloc_update(mg);
971eda14cbcSMatt Macy 	for (int i = 0; i < mg->mg_allocators; i++) {
972eda14cbcSMatt Macy 		metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
973eda14cbcSMatt Macy 		metaslab_t *msp = mga->mga_primary;
974eda14cbcSMatt Macy 		if (msp != NULL) {
975eda14cbcSMatt Macy 			mutex_enter(&msp->ms_lock);
976eda14cbcSMatt Macy 			metaslab_passivate(msp,
977eda14cbcSMatt Macy 			    metaslab_weight_from_range_tree(msp));
978eda14cbcSMatt Macy 			mutex_exit(&msp->ms_lock);
979eda14cbcSMatt Macy 		}
980eda14cbcSMatt Macy 		msp = mga->mga_secondary;
981eda14cbcSMatt Macy 		if (msp != NULL) {
982eda14cbcSMatt Macy 			mutex_enter(&msp->ms_lock);
983eda14cbcSMatt Macy 			metaslab_passivate(msp,
984eda14cbcSMatt Macy 			    metaslab_weight_from_range_tree(msp));
985eda14cbcSMatt Macy 			mutex_exit(&msp->ms_lock);
986eda14cbcSMatt Macy 		}
987eda14cbcSMatt Macy 	}
988eda14cbcSMatt Macy 
989eda14cbcSMatt Macy 	mgprev = mg->mg_prev;
990eda14cbcSMatt Macy 	mgnext = mg->mg_next;
991eda14cbcSMatt Macy 
992eda14cbcSMatt Macy 	if (mg == mgnext) {
9937877fdebSMatt Macy 		mgnext = NULL;
994eda14cbcSMatt Macy 	} else {
995eda14cbcSMatt Macy 		mgprev->mg_next = mgnext;
996eda14cbcSMatt Macy 		mgnext->mg_prev = mgprev;
997eda14cbcSMatt Macy 	}
9987877fdebSMatt Macy 	for (int i = 0; i < spa->spa_alloc_count; i++) {
9997877fdebSMatt Macy 		if (mc->mc_allocator[i].mca_rotor == mg)
10007877fdebSMatt Macy 			mc->mc_allocator[i].mca_rotor = mgnext;
10017877fdebSMatt Macy 	}
1002eda14cbcSMatt Macy 
1003eda14cbcSMatt Macy 	mg->mg_prev = NULL;
1004eda14cbcSMatt Macy 	mg->mg_next = NULL;
1005eda14cbcSMatt Macy }
1006eda14cbcSMatt Macy 
1007eda14cbcSMatt Macy boolean_t
1008eda14cbcSMatt Macy metaslab_group_initialized(metaslab_group_t *mg)
1009eda14cbcSMatt Macy {
1010eda14cbcSMatt Macy 	vdev_t *vd = mg->mg_vd;
1011eda14cbcSMatt Macy 	vdev_stat_t *vs = &vd->vdev_stat;
1012eda14cbcSMatt Macy 
1013eda14cbcSMatt Macy 	return (vs->vs_space != 0 && mg->mg_activation_count > 0);
1014eda14cbcSMatt Macy }
1015eda14cbcSMatt Macy 
1016eda14cbcSMatt Macy uint64_t
1017eda14cbcSMatt Macy metaslab_group_get_space(metaslab_group_t *mg)
1018eda14cbcSMatt Macy {
1019184c1b94SMartin Matuska 	/*
1020184c1b94SMartin Matuska 	 * Note that the number of nodes in mg_metaslab_tree may be one less
1021184c1b94SMartin Matuska 	 * than vdev_ms_count, due to the embedded log metaslab.
1022184c1b94SMartin Matuska 	 */
1023184c1b94SMartin Matuska 	mutex_enter(&mg->mg_lock);
1024184c1b94SMartin Matuska 	uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree);
1025184c1b94SMartin Matuska 	mutex_exit(&mg->mg_lock);
1026184c1b94SMartin Matuska 	return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count);
1027eda14cbcSMatt Macy }
1028eda14cbcSMatt Macy 
1029eda14cbcSMatt Macy void
1030eda14cbcSMatt Macy metaslab_group_histogram_verify(metaslab_group_t *mg)
1031eda14cbcSMatt Macy {
1032eda14cbcSMatt Macy 	uint64_t *mg_hist;
1033184c1b94SMartin Matuska 	avl_tree_t *t = &mg->mg_metaslab_tree;
1034184c1b94SMartin Matuska 	uint64_t ashift = mg->mg_vd->vdev_ashift;
1035eda14cbcSMatt Macy 
1036eda14cbcSMatt Macy 	if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
1037eda14cbcSMatt Macy 		return;
1038eda14cbcSMatt Macy 
1039eda14cbcSMatt Macy 	mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
1040eda14cbcSMatt Macy 	    KM_SLEEP);
1041eda14cbcSMatt Macy 
1042eda14cbcSMatt Macy 	ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
1043eda14cbcSMatt Macy 	    SPACE_MAP_HISTOGRAM_SIZE + ashift);
1044eda14cbcSMatt Macy 
1045184c1b94SMartin Matuska 	mutex_enter(&mg->mg_lock);
1046184c1b94SMartin Matuska 	for (metaslab_t *msp = avl_first(t);
1047184c1b94SMartin Matuska 	    msp != NULL; msp = AVL_NEXT(t, msp)) {
1048184c1b94SMartin Matuska 		VERIFY3P(msp->ms_group, ==, mg);
1049184c1b94SMartin Matuska 		/* skip if not active */
1050184c1b94SMartin Matuska 		if (msp->ms_sm == NULL)
1051eda14cbcSMatt Macy 			continue;
1052eda14cbcSMatt Macy 
1053184c1b94SMartin Matuska 		for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1054eda14cbcSMatt Macy 			mg_hist[i + ashift] +=
1055eda14cbcSMatt Macy 			    msp->ms_sm->sm_phys->smp_histogram[i];
1056eda14cbcSMatt Macy 		}
1057184c1b94SMartin Matuska 	}
1058eda14cbcSMatt Macy 
1059184c1b94SMartin Matuska 	for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
1060eda14cbcSMatt Macy 		VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
1061eda14cbcSMatt Macy 
1062184c1b94SMartin Matuska 	mutex_exit(&mg->mg_lock);
1063184c1b94SMartin Matuska 
1064eda14cbcSMatt Macy 	kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
1065eda14cbcSMatt Macy }
1066eda14cbcSMatt Macy 
1067eda14cbcSMatt Macy static void
1068eda14cbcSMatt Macy metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
1069eda14cbcSMatt Macy {
1070eda14cbcSMatt Macy 	metaslab_class_t *mc = mg->mg_class;
1071eda14cbcSMatt Macy 	uint64_t ashift = mg->mg_vd->vdev_ashift;
1072eda14cbcSMatt Macy 
1073eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1074eda14cbcSMatt Macy 	if (msp->ms_sm == NULL)
1075eda14cbcSMatt Macy 		return;
1076eda14cbcSMatt Macy 
1077eda14cbcSMatt Macy 	mutex_enter(&mg->mg_lock);
1078184c1b94SMartin Matuska 	mutex_enter(&mc->mc_lock);
1079eda14cbcSMatt Macy 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1080184c1b94SMartin Matuska 		IMPLY(mg == mg->mg_vd->vdev_log_mg,
1081184c1b94SMartin Matuska 		    mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
1082eda14cbcSMatt Macy 		mg->mg_histogram[i + ashift] +=
1083eda14cbcSMatt Macy 		    msp->ms_sm->sm_phys->smp_histogram[i];
1084eda14cbcSMatt Macy 		mc->mc_histogram[i + ashift] +=
1085eda14cbcSMatt Macy 		    msp->ms_sm->sm_phys->smp_histogram[i];
1086eda14cbcSMatt Macy 	}
1087184c1b94SMartin Matuska 	mutex_exit(&mc->mc_lock);
1088eda14cbcSMatt Macy 	mutex_exit(&mg->mg_lock);
1089eda14cbcSMatt Macy }
1090eda14cbcSMatt Macy 
1091eda14cbcSMatt Macy void
1092eda14cbcSMatt Macy metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
1093eda14cbcSMatt Macy {
1094eda14cbcSMatt Macy 	metaslab_class_t *mc = mg->mg_class;
1095eda14cbcSMatt Macy 	uint64_t ashift = mg->mg_vd->vdev_ashift;
1096eda14cbcSMatt Macy 
1097eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1098eda14cbcSMatt Macy 	if (msp->ms_sm == NULL)
1099eda14cbcSMatt Macy 		return;
1100eda14cbcSMatt Macy 
1101eda14cbcSMatt Macy 	mutex_enter(&mg->mg_lock);
1102184c1b94SMartin Matuska 	mutex_enter(&mc->mc_lock);
1103eda14cbcSMatt Macy 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1104eda14cbcSMatt Macy 		ASSERT3U(mg->mg_histogram[i + ashift], >=,
1105eda14cbcSMatt Macy 		    msp->ms_sm->sm_phys->smp_histogram[i]);
1106eda14cbcSMatt Macy 		ASSERT3U(mc->mc_histogram[i + ashift], >=,
1107eda14cbcSMatt Macy 		    msp->ms_sm->sm_phys->smp_histogram[i]);
1108184c1b94SMartin Matuska 		IMPLY(mg == mg->mg_vd->vdev_log_mg,
1109184c1b94SMartin Matuska 		    mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
1110eda14cbcSMatt Macy 
1111eda14cbcSMatt Macy 		mg->mg_histogram[i + ashift] -=
1112eda14cbcSMatt Macy 		    msp->ms_sm->sm_phys->smp_histogram[i];
1113eda14cbcSMatt Macy 		mc->mc_histogram[i + ashift] -=
1114eda14cbcSMatt Macy 		    msp->ms_sm->sm_phys->smp_histogram[i];
1115eda14cbcSMatt Macy 	}
1116184c1b94SMartin Matuska 	mutex_exit(&mc->mc_lock);
1117eda14cbcSMatt Macy 	mutex_exit(&mg->mg_lock);
1118eda14cbcSMatt Macy }
1119eda14cbcSMatt Macy 
1120eda14cbcSMatt Macy static void
1121eda14cbcSMatt Macy metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
1122eda14cbcSMatt Macy {
1123eda14cbcSMatt Macy 	ASSERT(msp->ms_group == NULL);
1124eda14cbcSMatt Macy 	mutex_enter(&mg->mg_lock);
1125eda14cbcSMatt Macy 	msp->ms_group = mg;
1126eda14cbcSMatt Macy 	msp->ms_weight = 0;
1127eda14cbcSMatt Macy 	avl_add(&mg->mg_metaslab_tree, msp);
1128eda14cbcSMatt Macy 	mutex_exit(&mg->mg_lock);
1129eda14cbcSMatt Macy 
1130eda14cbcSMatt Macy 	mutex_enter(&msp->ms_lock);
1131eda14cbcSMatt Macy 	metaslab_group_histogram_add(mg, msp);
1132eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
1133eda14cbcSMatt Macy }
1134eda14cbcSMatt Macy 
1135eda14cbcSMatt Macy static void
1136eda14cbcSMatt Macy metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
1137eda14cbcSMatt Macy {
1138eda14cbcSMatt Macy 	mutex_enter(&msp->ms_lock);
1139eda14cbcSMatt Macy 	metaslab_group_histogram_remove(mg, msp);
1140eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
1141eda14cbcSMatt Macy 
1142eda14cbcSMatt Macy 	mutex_enter(&mg->mg_lock);
1143eda14cbcSMatt Macy 	ASSERT(msp->ms_group == mg);
1144eda14cbcSMatt Macy 	avl_remove(&mg->mg_metaslab_tree, msp);
1145eda14cbcSMatt Macy 
1146eda14cbcSMatt Macy 	metaslab_class_t *mc = msp->ms_group->mg_class;
1147eda14cbcSMatt Macy 	multilist_sublist_t *mls =
11483ff01b23SMartin Matuska 	    multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
1149eda14cbcSMatt Macy 	if (multilist_link_active(&msp->ms_class_txg_node))
1150eda14cbcSMatt Macy 		multilist_sublist_remove(mls, msp);
1151eda14cbcSMatt Macy 	multilist_sublist_unlock(mls);
1152eda14cbcSMatt Macy 
1153eda14cbcSMatt Macy 	msp->ms_group = NULL;
1154eda14cbcSMatt Macy 	mutex_exit(&mg->mg_lock);
1155eda14cbcSMatt Macy }
1156eda14cbcSMatt Macy 
1157eda14cbcSMatt Macy static void
1158eda14cbcSMatt Macy metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1159eda14cbcSMatt Macy {
1160eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1161eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&mg->mg_lock));
1162eda14cbcSMatt Macy 	ASSERT(msp->ms_group == mg);
1163eda14cbcSMatt Macy 
1164eda14cbcSMatt Macy 	avl_remove(&mg->mg_metaslab_tree, msp);
1165eda14cbcSMatt Macy 	msp->ms_weight = weight;
1166eda14cbcSMatt Macy 	avl_add(&mg->mg_metaslab_tree, msp);
1167eda14cbcSMatt Macy 
1168eda14cbcSMatt Macy }
1169eda14cbcSMatt Macy 
1170eda14cbcSMatt Macy static void
1171eda14cbcSMatt Macy metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1172eda14cbcSMatt Macy {
1173eda14cbcSMatt Macy 	/*
1174eda14cbcSMatt Macy 	 * Although in principle the weight can be any value, in
1175eda14cbcSMatt Macy 	 * practice we do not use values in the range [1, 511].
1176eda14cbcSMatt Macy 	 */
1177eda14cbcSMatt Macy 	ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
1178eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1179eda14cbcSMatt Macy 
1180eda14cbcSMatt Macy 	mutex_enter(&mg->mg_lock);
1181eda14cbcSMatt Macy 	metaslab_group_sort_impl(mg, msp, weight);
1182eda14cbcSMatt Macy 	mutex_exit(&mg->mg_lock);
1183eda14cbcSMatt Macy }
1184eda14cbcSMatt Macy 
1185eda14cbcSMatt Macy /*
1186eda14cbcSMatt Macy  * Calculate the fragmentation for a given metaslab group. We can use
1187eda14cbcSMatt Macy  * a simple average here since all metaslabs within the group must have
1188eda14cbcSMatt Macy  * the same size. The return value will be a value between 0 and 100
1189eda14cbcSMatt Macy  * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
1190eda14cbcSMatt Macy  * group have a fragmentation metric.
1191eda14cbcSMatt Macy  */
1192eda14cbcSMatt Macy uint64_t
1193eda14cbcSMatt Macy metaslab_group_fragmentation(metaslab_group_t *mg)
1194eda14cbcSMatt Macy {
1195eda14cbcSMatt Macy 	vdev_t *vd = mg->mg_vd;
1196eda14cbcSMatt Macy 	uint64_t fragmentation = 0;
1197eda14cbcSMatt Macy 	uint64_t valid_ms = 0;
1198eda14cbcSMatt Macy 
1199eda14cbcSMatt Macy 	for (int m = 0; m < vd->vdev_ms_count; m++) {
1200eda14cbcSMatt Macy 		metaslab_t *msp = vd->vdev_ms[m];
1201eda14cbcSMatt Macy 
1202eda14cbcSMatt Macy 		if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
1203eda14cbcSMatt Macy 			continue;
1204eda14cbcSMatt Macy 		if (msp->ms_group != mg)
1205eda14cbcSMatt Macy 			continue;
1206eda14cbcSMatt Macy 
1207eda14cbcSMatt Macy 		valid_ms++;
1208eda14cbcSMatt Macy 		fragmentation += msp->ms_fragmentation;
1209eda14cbcSMatt Macy 	}
1210eda14cbcSMatt Macy 
1211eda14cbcSMatt Macy 	if (valid_ms <= mg->mg_vd->vdev_ms_count / 2)
1212eda14cbcSMatt Macy 		return (ZFS_FRAG_INVALID);
1213eda14cbcSMatt Macy 
1214eda14cbcSMatt Macy 	fragmentation /= valid_ms;
1215eda14cbcSMatt Macy 	ASSERT3U(fragmentation, <=, 100);
1216eda14cbcSMatt Macy 	return (fragmentation);
1217eda14cbcSMatt Macy }
1218eda14cbcSMatt Macy 
1219eda14cbcSMatt Macy /*
1220eda14cbcSMatt Macy  * Determine if a given metaslab group should skip allocations. A metaslab
1221eda14cbcSMatt Macy  * group should avoid allocations if its free capacity is less than the
1222eda14cbcSMatt Macy  * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
1223eda14cbcSMatt Macy  * zfs_mg_fragmentation_threshold and there is at least one metaslab group
1224eda14cbcSMatt Macy  * that can still handle allocations. If the allocation throttle is enabled
1225eda14cbcSMatt Macy  * then we skip allocations to devices that have reached their maximum
1226eda14cbcSMatt Macy  * allocation queue depth unless the selected metaslab group is the only
1227eda14cbcSMatt Macy  * eligible group remaining.
1228eda14cbcSMatt Macy  */
1229eda14cbcSMatt Macy static boolean_t
1230eda14cbcSMatt Macy metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
123115f0b8c3SMartin Matuska     int flags, uint64_t psize, int allocator, int d)
1232eda14cbcSMatt Macy {
1233eda14cbcSMatt Macy 	spa_t *spa = mg->mg_vd->vdev_spa;
1234eda14cbcSMatt Macy 	metaslab_class_t *mc = mg->mg_class;
1235eda14cbcSMatt Macy 
1236eda14cbcSMatt Macy 	/*
1237eda14cbcSMatt Macy 	 * We can only consider skipping this metaslab group if it's
1238eda14cbcSMatt Macy 	 * in the normal metaslab class and there are other metaslab
1239eda14cbcSMatt Macy 	 * groups to select from. Otherwise, we always consider it eligible
1240eda14cbcSMatt Macy 	 * for allocations.
1241eda14cbcSMatt Macy 	 */
1242eda14cbcSMatt Macy 	if ((mc != spa_normal_class(spa) &&
1243eda14cbcSMatt Macy 	    mc != spa_special_class(spa) &&
1244eda14cbcSMatt Macy 	    mc != spa_dedup_class(spa)) ||
1245eda14cbcSMatt Macy 	    mc->mc_groups <= 1)
1246eda14cbcSMatt Macy 		return (B_TRUE);
1247eda14cbcSMatt Macy 
1248eda14cbcSMatt Macy 	/*
1249eda14cbcSMatt Macy 	 * If the metaslab group's mg_allocatable flag is set (see comments
1250eda14cbcSMatt Macy 	 * in metaslab_group_alloc_update() for more information) and
1251eda14cbcSMatt Macy 	 * the allocation throttle is disabled then allow allocations to this
1252eda14cbcSMatt Macy 	 * device. However, if the allocation throttle is enabled then
12537877fdebSMatt Macy 	 * check if we have reached our allocation limit (mga_alloc_queue_depth)
1254eda14cbcSMatt Macy 	 * to determine if we should allow allocations to this metaslab group.
1255eda14cbcSMatt Macy 	 * If all metaslab groups are no longer considered allocatable
1256eda14cbcSMatt Macy 	 * (mc_alloc_groups == 0) or we're trying to allocate the smallest
1257eda14cbcSMatt Macy 	 * gang block size then we allow allocations on this metaslab group
1258eda14cbcSMatt Macy 	 * regardless of the mg_allocatable or throttle settings.
1259eda14cbcSMatt Macy 	 */
1260eda14cbcSMatt Macy 	if (mg->mg_allocatable) {
1261eda14cbcSMatt Macy 		metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
1262eda14cbcSMatt Macy 		int64_t qdepth;
1263eda14cbcSMatt Macy 		uint64_t qmax = mga->mga_cur_max_alloc_queue_depth;
1264eda14cbcSMatt Macy 
1265eda14cbcSMatt Macy 		if (!mc->mc_alloc_throttle_enabled)
1266eda14cbcSMatt Macy 			return (B_TRUE);
1267eda14cbcSMatt Macy 
1268eda14cbcSMatt Macy 		/*
1269eda14cbcSMatt Macy 		 * If this metaslab group does not have any free space, then
1270eda14cbcSMatt Macy 		 * there is no point in looking further.
1271eda14cbcSMatt Macy 		 */
1272eda14cbcSMatt Macy 		if (mg->mg_no_free_space)
1273eda14cbcSMatt Macy 			return (B_FALSE);
1274eda14cbcSMatt Macy 
1275eda14cbcSMatt Macy 		/*
127615f0b8c3SMartin Matuska 		 * Some allocations (e.g., those coming from device removal
127715f0b8c3SMartin Matuska 		 * where the * allocations are not even counted in the
127815f0b8c3SMartin Matuska 		 * metaslab * allocation queues) are allowed to bypass
127915f0b8c3SMartin Matuska 		 * the throttle.
128015f0b8c3SMartin Matuska 		 */
128115f0b8c3SMartin Matuska 		if (flags & METASLAB_DONT_THROTTLE)
128215f0b8c3SMartin Matuska 			return (B_TRUE);
128315f0b8c3SMartin Matuska 
128415f0b8c3SMartin Matuska 		/*
1285eda14cbcSMatt Macy 		 * Relax allocation throttling for ditto blocks.  Due to
1286eda14cbcSMatt Macy 		 * random imbalances in allocation it tends to push copies
1287eda14cbcSMatt Macy 		 * to one vdev, that looks a bit better at the moment.
1288eda14cbcSMatt Macy 		 */
1289eda14cbcSMatt Macy 		qmax = qmax * (4 + d) / 4;
1290eda14cbcSMatt Macy 
1291eda14cbcSMatt Macy 		qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth);
1292eda14cbcSMatt Macy 
1293eda14cbcSMatt Macy 		/*
1294eda14cbcSMatt Macy 		 * If this metaslab group is below its qmax or it's
1295*315ee00fSMartin Matuska 		 * the only allocatable metaslab group, then attempt
1296eda14cbcSMatt Macy 		 * to allocate from it.
1297eda14cbcSMatt Macy 		 */
1298eda14cbcSMatt Macy 		if (qdepth < qmax || mc->mc_alloc_groups == 1)
1299eda14cbcSMatt Macy 			return (B_TRUE);
1300eda14cbcSMatt Macy 		ASSERT3U(mc->mc_alloc_groups, >, 1);
1301eda14cbcSMatt Macy 
1302eda14cbcSMatt Macy 		/*
1303eda14cbcSMatt Macy 		 * Since this metaslab group is at or over its qmax, we
1304eda14cbcSMatt Macy 		 * need to determine if there are metaslab groups after this
1305eda14cbcSMatt Macy 		 * one that might be able to handle this allocation. This is
1306eda14cbcSMatt Macy 		 * racy since we can't hold the locks for all metaslab
1307eda14cbcSMatt Macy 		 * groups at the same time when we make this check.
1308eda14cbcSMatt Macy 		 */
1309eda14cbcSMatt Macy 		for (metaslab_group_t *mgp = mg->mg_next;
1310eda14cbcSMatt Macy 		    mgp != rotor; mgp = mgp->mg_next) {
1311eda14cbcSMatt Macy 			metaslab_group_allocator_t *mgap =
1312eda14cbcSMatt Macy 			    &mgp->mg_allocator[allocator];
1313eda14cbcSMatt Macy 			qmax = mgap->mga_cur_max_alloc_queue_depth;
1314eda14cbcSMatt Macy 			qmax = qmax * (4 + d) / 4;
1315eda14cbcSMatt Macy 			qdepth =
1316eda14cbcSMatt Macy 			    zfs_refcount_count(&mgap->mga_alloc_queue_depth);
1317eda14cbcSMatt Macy 
1318eda14cbcSMatt Macy 			/*
1319eda14cbcSMatt Macy 			 * If there is another metaslab group that
1320eda14cbcSMatt Macy 			 * might be able to handle the allocation, then
1321eda14cbcSMatt Macy 			 * we return false so that we skip this group.
1322eda14cbcSMatt Macy 			 */
1323eda14cbcSMatt Macy 			if (qdepth < qmax && !mgp->mg_no_free_space)
1324eda14cbcSMatt Macy 				return (B_FALSE);
1325eda14cbcSMatt Macy 		}
1326eda14cbcSMatt Macy 
1327eda14cbcSMatt Macy 		/*
1328eda14cbcSMatt Macy 		 * We didn't find another group to handle the allocation
1329eda14cbcSMatt Macy 		 * so we can't skip this metaslab group even though
1330eda14cbcSMatt Macy 		 * we are at or over our qmax.
1331eda14cbcSMatt Macy 		 */
1332eda14cbcSMatt Macy 		return (B_TRUE);
1333eda14cbcSMatt Macy 
1334eda14cbcSMatt Macy 	} else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
1335eda14cbcSMatt Macy 		return (B_TRUE);
1336eda14cbcSMatt Macy 	}
1337eda14cbcSMatt Macy 	return (B_FALSE);
1338eda14cbcSMatt Macy }
1339eda14cbcSMatt Macy 
1340eda14cbcSMatt Macy /*
1341eda14cbcSMatt Macy  * ==========================================================================
1342eda14cbcSMatt Macy  * Range tree callbacks
1343eda14cbcSMatt Macy  * ==========================================================================
1344eda14cbcSMatt Macy  */
1345eda14cbcSMatt Macy 
1346eda14cbcSMatt Macy /*
1347eda14cbcSMatt Macy  * Comparison function for the private size-ordered tree using 32-bit
1348eda14cbcSMatt Macy  * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1349eda14cbcSMatt Macy  */
13504e8d558cSMartin Matuska __attribute__((always_inline)) inline
1351eda14cbcSMatt Macy static int
1352eda14cbcSMatt Macy metaslab_rangesize32_compare(const void *x1, const void *x2)
1353eda14cbcSMatt Macy {
1354eda14cbcSMatt Macy 	const range_seg32_t *r1 = x1;
1355eda14cbcSMatt Macy 	const range_seg32_t *r2 = x2;
1356eda14cbcSMatt Macy 
1357eda14cbcSMatt Macy 	uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1358eda14cbcSMatt Macy 	uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1359eda14cbcSMatt Macy 
1360eda14cbcSMatt Macy 	int cmp = TREE_CMP(rs_size1, rs_size2);
1361eda14cbcSMatt Macy 
13624e8d558cSMartin Matuska 	return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
1363eda14cbcSMatt Macy }
1364eda14cbcSMatt Macy 
1365eda14cbcSMatt Macy /*
1366eda14cbcSMatt Macy  * Comparison function for the private size-ordered tree using 64-bit
1367eda14cbcSMatt Macy  * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1368eda14cbcSMatt Macy  */
13694e8d558cSMartin Matuska __attribute__((always_inline)) inline
1370eda14cbcSMatt Macy static int
1371eda14cbcSMatt Macy metaslab_rangesize64_compare(const void *x1, const void *x2)
1372eda14cbcSMatt Macy {
1373eda14cbcSMatt Macy 	const range_seg64_t *r1 = x1;
1374eda14cbcSMatt Macy 	const range_seg64_t *r2 = x2;
1375eda14cbcSMatt Macy 
1376eda14cbcSMatt Macy 	uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1377eda14cbcSMatt Macy 	uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1378eda14cbcSMatt Macy 
1379eda14cbcSMatt Macy 	int cmp = TREE_CMP(rs_size1, rs_size2);
1380eda14cbcSMatt Macy 
13814e8d558cSMartin Matuska 	return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
1382eda14cbcSMatt Macy }
13834e8d558cSMartin Matuska 
1384eda14cbcSMatt Macy typedef struct metaslab_rt_arg {
1385eda14cbcSMatt Macy 	zfs_btree_t *mra_bt;
1386eda14cbcSMatt Macy 	uint32_t mra_floor_shift;
1387eda14cbcSMatt Macy } metaslab_rt_arg_t;
1388eda14cbcSMatt Macy 
1389eda14cbcSMatt Macy struct mssa_arg {
1390eda14cbcSMatt Macy 	range_tree_t *rt;
1391eda14cbcSMatt Macy 	metaslab_rt_arg_t *mra;
1392eda14cbcSMatt Macy };
1393eda14cbcSMatt Macy 
1394eda14cbcSMatt Macy static void
1395eda14cbcSMatt Macy metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size)
1396eda14cbcSMatt Macy {
1397eda14cbcSMatt Macy 	struct mssa_arg *mssap = arg;
1398eda14cbcSMatt Macy 	range_tree_t *rt = mssap->rt;
1399eda14cbcSMatt Macy 	metaslab_rt_arg_t *mrap = mssap->mra;
1400eda14cbcSMatt Macy 	range_seg_max_t seg = {0};
1401eda14cbcSMatt Macy 	rs_set_start(&seg, rt, start);
1402eda14cbcSMatt Macy 	rs_set_end(&seg, rt, start + size);
1403eda14cbcSMatt Macy 	metaslab_rt_add(rt, &seg, mrap);
1404eda14cbcSMatt Macy }
1405eda14cbcSMatt Macy 
1406eda14cbcSMatt Macy static void
1407eda14cbcSMatt Macy metaslab_size_tree_full_load(range_tree_t *rt)
1408eda14cbcSMatt Macy {
1409eda14cbcSMatt Macy 	metaslab_rt_arg_t *mrap = rt->rt_arg;
1410eda14cbcSMatt Macy 	METASLABSTAT_BUMP(metaslabstat_reload_tree);
1411eda14cbcSMatt Macy 	ASSERT0(zfs_btree_numnodes(mrap->mra_bt));
1412eda14cbcSMatt Macy 	mrap->mra_floor_shift = 0;
1413eda14cbcSMatt Macy 	struct mssa_arg arg = {0};
1414eda14cbcSMatt Macy 	arg.rt = rt;
1415eda14cbcSMatt Macy 	arg.mra = mrap;
1416eda14cbcSMatt Macy 	range_tree_walk(rt, metaslab_size_sorted_add, &arg);
1417eda14cbcSMatt Macy }
1418eda14cbcSMatt Macy 
14194e8d558cSMartin Matuska 
14204e8d558cSMartin Matuska ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize32_in_buf,
14214e8d558cSMartin Matuska     range_seg32_t, metaslab_rangesize32_compare)
14224e8d558cSMartin Matuska 
14234e8d558cSMartin Matuska ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize64_in_buf,
14244e8d558cSMartin Matuska     range_seg64_t, metaslab_rangesize64_compare)
14254e8d558cSMartin Matuska 
1426eda14cbcSMatt Macy /*
1427eda14cbcSMatt Macy  * Create any block allocator specific components. The current allocators
1428eda14cbcSMatt Macy  * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
1429eda14cbcSMatt Macy  */
1430eda14cbcSMatt Macy static void
1431eda14cbcSMatt Macy metaslab_rt_create(range_tree_t *rt, void *arg)
1432eda14cbcSMatt Macy {
1433eda14cbcSMatt Macy 	metaslab_rt_arg_t *mrap = arg;
1434eda14cbcSMatt Macy 	zfs_btree_t *size_tree = mrap->mra_bt;
1435eda14cbcSMatt Macy 
1436eda14cbcSMatt Macy 	size_t size;
1437eda14cbcSMatt Macy 	int (*compare) (const void *, const void *);
14384e8d558cSMartin Matuska 	bt_find_in_buf_f bt_find;
1439eda14cbcSMatt Macy 	switch (rt->rt_type) {
1440eda14cbcSMatt Macy 	case RANGE_SEG32:
1441eda14cbcSMatt Macy 		size = sizeof (range_seg32_t);
1442eda14cbcSMatt Macy 		compare = metaslab_rangesize32_compare;
14434e8d558cSMartin Matuska 		bt_find = metaslab_rt_find_rangesize32_in_buf;
1444eda14cbcSMatt Macy 		break;
1445eda14cbcSMatt Macy 	case RANGE_SEG64:
1446eda14cbcSMatt Macy 		size = sizeof (range_seg64_t);
1447eda14cbcSMatt Macy 		compare = metaslab_rangesize64_compare;
14484e8d558cSMartin Matuska 		bt_find = metaslab_rt_find_rangesize64_in_buf;
1449eda14cbcSMatt Macy 		break;
1450eda14cbcSMatt Macy 	default:
1451eda14cbcSMatt Macy 		panic("Invalid range seg type %d", rt->rt_type);
1452eda14cbcSMatt Macy 	}
14534e8d558cSMartin Matuska 	zfs_btree_create(size_tree, compare, bt_find, size);
1454eda14cbcSMatt Macy 	mrap->mra_floor_shift = metaslab_by_size_min_shift;
1455eda14cbcSMatt Macy }
1456eda14cbcSMatt Macy 
1457eda14cbcSMatt Macy static void
1458eda14cbcSMatt Macy metaslab_rt_destroy(range_tree_t *rt, void *arg)
1459eda14cbcSMatt Macy {
1460e92ffd9bSMartin Matuska 	(void) rt;
1461eda14cbcSMatt Macy 	metaslab_rt_arg_t *mrap = arg;
1462eda14cbcSMatt Macy 	zfs_btree_t *size_tree = mrap->mra_bt;
1463eda14cbcSMatt Macy 
1464eda14cbcSMatt Macy 	zfs_btree_destroy(size_tree);
1465eda14cbcSMatt Macy 	kmem_free(mrap, sizeof (*mrap));
1466eda14cbcSMatt Macy }
1467eda14cbcSMatt Macy 
1468eda14cbcSMatt Macy static void
1469eda14cbcSMatt Macy metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
1470eda14cbcSMatt Macy {
1471eda14cbcSMatt Macy 	metaslab_rt_arg_t *mrap = arg;
1472eda14cbcSMatt Macy 	zfs_btree_t *size_tree = mrap->mra_bt;
1473eda14cbcSMatt Macy 
1474eda14cbcSMatt Macy 	if (rs_get_end(rs, rt) - rs_get_start(rs, rt) <
1475be181ee2SMartin Matuska 	    (1ULL << mrap->mra_floor_shift))
1476eda14cbcSMatt Macy 		return;
1477eda14cbcSMatt Macy 
1478eda14cbcSMatt Macy 	zfs_btree_add(size_tree, rs);
1479eda14cbcSMatt Macy }
1480eda14cbcSMatt Macy 
1481eda14cbcSMatt Macy static void
1482eda14cbcSMatt Macy metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
1483eda14cbcSMatt Macy {
1484eda14cbcSMatt Macy 	metaslab_rt_arg_t *mrap = arg;
1485eda14cbcSMatt Macy 	zfs_btree_t *size_tree = mrap->mra_bt;
1486eda14cbcSMatt Macy 
1487be181ee2SMartin Matuska 	if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1ULL <<
1488eda14cbcSMatt Macy 	    mrap->mra_floor_shift))
1489eda14cbcSMatt Macy 		return;
1490eda14cbcSMatt Macy 
1491eda14cbcSMatt Macy 	zfs_btree_remove(size_tree, rs);
1492eda14cbcSMatt Macy }
1493eda14cbcSMatt Macy 
1494eda14cbcSMatt Macy static void
1495eda14cbcSMatt Macy metaslab_rt_vacate(range_tree_t *rt, void *arg)
1496eda14cbcSMatt Macy {
1497eda14cbcSMatt Macy 	metaslab_rt_arg_t *mrap = arg;
1498eda14cbcSMatt Macy 	zfs_btree_t *size_tree = mrap->mra_bt;
1499eda14cbcSMatt Macy 	zfs_btree_clear(size_tree);
1500eda14cbcSMatt Macy 	zfs_btree_destroy(size_tree);
1501eda14cbcSMatt Macy 
1502eda14cbcSMatt Macy 	metaslab_rt_create(rt, arg);
1503eda14cbcSMatt Macy }
1504eda14cbcSMatt Macy 
1505e92ffd9bSMartin Matuska static const range_tree_ops_t metaslab_rt_ops = {
1506eda14cbcSMatt Macy 	.rtop_create = metaslab_rt_create,
1507eda14cbcSMatt Macy 	.rtop_destroy = metaslab_rt_destroy,
1508eda14cbcSMatt Macy 	.rtop_add = metaslab_rt_add,
1509eda14cbcSMatt Macy 	.rtop_remove = metaslab_rt_remove,
1510eda14cbcSMatt Macy 	.rtop_vacate = metaslab_rt_vacate
1511eda14cbcSMatt Macy };
1512eda14cbcSMatt Macy 
1513eda14cbcSMatt Macy /*
1514eda14cbcSMatt Macy  * ==========================================================================
1515eda14cbcSMatt Macy  * Common allocator routines
1516eda14cbcSMatt Macy  * ==========================================================================
1517eda14cbcSMatt Macy  */
1518eda14cbcSMatt Macy 
1519eda14cbcSMatt Macy /*
1520eda14cbcSMatt Macy  * Return the maximum contiguous segment within the metaslab.
1521eda14cbcSMatt Macy  */
1522eda14cbcSMatt Macy uint64_t
1523eda14cbcSMatt Macy metaslab_largest_allocatable(metaslab_t *msp)
1524eda14cbcSMatt Macy {
1525eda14cbcSMatt Macy 	zfs_btree_t *t = &msp->ms_allocatable_by_size;
1526eda14cbcSMatt Macy 	range_seg_t *rs;
1527eda14cbcSMatt Macy 
1528eda14cbcSMatt Macy 	if (t == NULL)
1529eda14cbcSMatt Macy 		return (0);
1530eda14cbcSMatt Macy 	if (zfs_btree_numnodes(t) == 0)
1531eda14cbcSMatt Macy 		metaslab_size_tree_full_load(msp->ms_allocatable);
1532eda14cbcSMatt Macy 
1533eda14cbcSMatt Macy 	rs = zfs_btree_last(t, NULL);
1534eda14cbcSMatt Macy 	if (rs == NULL)
1535eda14cbcSMatt Macy 		return (0);
1536eda14cbcSMatt Macy 
1537eda14cbcSMatt Macy 	return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs,
1538eda14cbcSMatt Macy 	    msp->ms_allocatable));
1539eda14cbcSMatt Macy }
1540eda14cbcSMatt Macy 
1541eda14cbcSMatt Macy /*
1542eda14cbcSMatt Macy  * Return the maximum contiguous segment within the unflushed frees of this
1543eda14cbcSMatt Macy  * metaslab.
1544eda14cbcSMatt Macy  */
1545eda14cbcSMatt Macy static uint64_t
1546eda14cbcSMatt Macy metaslab_largest_unflushed_free(metaslab_t *msp)
1547eda14cbcSMatt Macy {
1548eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1549eda14cbcSMatt Macy 
1550eda14cbcSMatt Macy 	if (msp->ms_unflushed_frees == NULL)
1551eda14cbcSMatt Macy 		return (0);
1552eda14cbcSMatt Macy 
1553eda14cbcSMatt Macy 	if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0)
1554eda14cbcSMatt Macy 		metaslab_size_tree_full_load(msp->ms_unflushed_frees);
1555eda14cbcSMatt Macy 	range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size,
1556eda14cbcSMatt Macy 	    NULL);
1557eda14cbcSMatt Macy 	if (rs == NULL)
1558eda14cbcSMatt Macy 		return (0);
1559eda14cbcSMatt Macy 
1560eda14cbcSMatt Macy 	/*
1561eda14cbcSMatt Macy 	 * When a range is freed from the metaslab, that range is added to
1562eda14cbcSMatt Macy 	 * both the unflushed frees and the deferred frees. While the block
1563eda14cbcSMatt Macy 	 * will eventually be usable, if the metaslab were loaded the range
1564eda14cbcSMatt Macy 	 * would not be added to the ms_allocatable tree until TXG_DEFER_SIZE
1565eda14cbcSMatt Macy 	 * txgs had passed.  As a result, when attempting to estimate an upper
1566eda14cbcSMatt Macy 	 * bound for the largest currently-usable free segment in the
1567eda14cbcSMatt Macy 	 * metaslab, we need to not consider any ranges currently in the defer
1568eda14cbcSMatt Macy 	 * trees. This algorithm approximates the largest available chunk in
1569eda14cbcSMatt Macy 	 * the largest range in the unflushed_frees tree by taking the first
1570eda14cbcSMatt Macy 	 * chunk.  While this may be a poor estimate, it should only remain so
1571eda14cbcSMatt Macy 	 * briefly and should eventually self-correct as frees are no longer
1572eda14cbcSMatt Macy 	 * deferred. Similar logic applies to the ms_freed tree. See
1573eda14cbcSMatt Macy 	 * metaslab_load() for more details.
1574eda14cbcSMatt Macy 	 *
1575eda14cbcSMatt Macy 	 * There are two primary sources of inaccuracy in this estimate. Both
1576eda14cbcSMatt Macy 	 * are tolerated for performance reasons. The first source is that we
1577eda14cbcSMatt Macy 	 * only check the largest segment for overlaps. Smaller segments may
1578eda14cbcSMatt Macy 	 * have more favorable overlaps with the other trees, resulting in
1579eda14cbcSMatt Macy 	 * larger usable chunks.  Second, we only look at the first chunk in
1580eda14cbcSMatt Macy 	 * the largest segment; there may be other usable chunks in the
1581eda14cbcSMatt Macy 	 * largest segment, but we ignore them.
1582eda14cbcSMatt Macy 	 */
1583eda14cbcSMatt Macy 	uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees);
1584eda14cbcSMatt Macy 	uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart;
1585eda14cbcSMatt Macy 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1586eda14cbcSMatt Macy 		uint64_t start = 0;
1587eda14cbcSMatt Macy 		uint64_t size = 0;
1588eda14cbcSMatt Macy 		boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart,
1589eda14cbcSMatt Macy 		    rsize, &start, &size);
1590eda14cbcSMatt Macy 		if (found) {
1591eda14cbcSMatt Macy 			if (rstart == start)
1592eda14cbcSMatt Macy 				return (0);
1593eda14cbcSMatt Macy 			rsize = start - rstart;
1594eda14cbcSMatt Macy 		}
1595eda14cbcSMatt Macy 	}
1596eda14cbcSMatt Macy 
1597eda14cbcSMatt Macy 	uint64_t start = 0;
1598eda14cbcSMatt Macy 	uint64_t size = 0;
1599eda14cbcSMatt Macy 	boolean_t found = range_tree_find_in(msp->ms_freed, rstart,
1600eda14cbcSMatt Macy 	    rsize, &start, &size);
1601eda14cbcSMatt Macy 	if (found)
1602eda14cbcSMatt Macy 		rsize = start - rstart;
1603eda14cbcSMatt Macy 
1604eda14cbcSMatt Macy 	return (rsize);
1605eda14cbcSMatt Macy }
1606eda14cbcSMatt Macy 
1607eda14cbcSMatt Macy static range_seg_t *
1608eda14cbcSMatt Macy metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start,
1609eda14cbcSMatt Macy     uint64_t size, zfs_btree_index_t *where)
1610eda14cbcSMatt Macy {
1611eda14cbcSMatt Macy 	range_seg_t *rs;
1612eda14cbcSMatt Macy 	range_seg_max_t rsearch;
1613eda14cbcSMatt Macy 
1614eda14cbcSMatt Macy 	rs_set_start(&rsearch, rt, start);
1615eda14cbcSMatt Macy 	rs_set_end(&rsearch, rt, start + size);
1616eda14cbcSMatt Macy 
1617eda14cbcSMatt Macy 	rs = zfs_btree_find(t, &rsearch, where);
1618eda14cbcSMatt Macy 	if (rs == NULL) {
1619eda14cbcSMatt Macy 		rs = zfs_btree_next(t, where, where);
1620eda14cbcSMatt Macy 	}
1621eda14cbcSMatt Macy 
1622eda14cbcSMatt Macy 	return (rs);
1623eda14cbcSMatt Macy }
1624eda14cbcSMatt Macy 
1625eda14cbcSMatt Macy #if defined(WITH_DF_BLOCK_ALLOCATOR) || \
1626eda14cbcSMatt Macy     defined(WITH_CF_BLOCK_ALLOCATOR)
16277877fdebSMatt Macy 
1628eda14cbcSMatt Macy /*
1629eda14cbcSMatt Macy  * This is a helper function that can be used by the allocator to find a
1630eda14cbcSMatt Macy  * suitable block to allocate. This will search the specified B-tree looking
1631eda14cbcSMatt Macy  * for a block that matches the specified criteria.
1632eda14cbcSMatt Macy  */
1633eda14cbcSMatt Macy static uint64_t
1634eda14cbcSMatt Macy metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size,
1635eda14cbcSMatt Macy     uint64_t max_search)
1636eda14cbcSMatt Macy {
1637eda14cbcSMatt Macy 	if (*cursor == 0)
1638eda14cbcSMatt Macy 		*cursor = rt->rt_start;
1639eda14cbcSMatt Macy 	zfs_btree_t *bt = &rt->rt_root;
1640eda14cbcSMatt Macy 	zfs_btree_index_t where;
1641eda14cbcSMatt Macy 	range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where);
1642eda14cbcSMatt Macy 	uint64_t first_found;
1643eda14cbcSMatt Macy 	int count_searched = 0;
1644eda14cbcSMatt Macy 
1645eda14cbcSMatt Macy 	if (rs != NULL)
1646eda14cbcSMatt Macy 		first_found = rs_get_start(rs, rt);
1647eda14cbcSMatt Macy 
1648eda14cbcSMatt Macy 	while (rs != NULL && (rs_get_start(rs, rt) - first_found <=
1649eda14cbcSMatt Macy 	    max_search || count_searched < metaslab_min_search_count)) {
1650eda14cbcSMatt Macy 		uint64_t offset = rs_get_start(rs, rt);
1651eda14cbcSMatt Macy 		if (offset + size <= rs_get_end(rs, rt)) {
1652eda14cbcSMatt Macy 			*cursor = offset + size;
1653eda14cbcSMatt Macy 			return (offset);
1654eda14cbcSMatt Macy 		}
1655eda14cbcSMatt Macy 		rs = zfs_btree_next(bt, &where, &where);
1656eda14cbcSMatt Macy 		count_searched++;
1657eda14cbcSMatt Macy 	}
1658eda14cbcSMatt Macy 
1659eda14cbcSMatt Macy 	*cursor = 0;
1660eda14cbcSMatt Macy 	return (-1ULL);
1661eda14cbcSMatt Macy }
1662eda14cbcSMatt Macy #endif /* WITH_DF/CF_BLOCK_ALLOCATOR */
1663eda14cbcSMatt Macy 
1664eda14cbcSMatt Macy #if defined(WITH_DF_BLOCK_ALLOCATOR)
1665eda14cbcSMatt Macy /*
1666eda14cbcSMatt Macy  * ==========================================================================
1667eda14cbcSMatt Macy  * Dynamic Fit (df) block allocator
1668eda14cbcSMatt Macy  *
1669eda14cbcSMatt Macy  * Search for a free chunk of at least this size, starting from the last
1670eda14cbcSMatt Macy  * offset (for this alignment of block) looking for up to
1671eda14cbcSMatt Macy  * metaslab_df_max_search bytes (16MB).  If a large enough free chunk is not
1672eda14cbcSMatt Macy  * found within 16MB, then return a free chunk of exactly the requested size (or
1673eda14cbcSMatt Macy  * larger).
1674eda14cbcSMatt Macy  *
1675eda14cbcSMatt Macy  * If it seems like searching from the last offset will be unproductive, skip
1676eda14cbcSMatt Macy  * that and just return a free chunk of exactly the requested size (or larger).
1677eda14cbcSMatt Macy  * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct.  This
1678eda14cbcSMatt Macy  * mechanism is probably not very useful and may be removed in the future.
1679eda14cbcSMatt Macy  *
1680eda14cbcSMatt Macy  * The behavior when not searching can be changed to return the largest free
1681eda14cbcSMatt Macy  * chunk, instead of a free chunk of exactly the requested size, by setting
1682eda14cbcSMatt Macy  * metaslab_df_use_largest_segment.
1683eda14cbcSMatt Macy  * ==========================================================================
1684eda14cbcSMatt Macy  */
1685eda14cbcSMatt Macy static uint64_t
1686eda14cbcSMatt Macy metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1687eda14cbcSMatt Macy {
1688eda14cbcSMatt Macy 	/*
1689eda14cbcSMatt Macy 	 * Find the largest power of 2 block size that evenly divides the
1690eda14cbcSMatt Macy 	 * requested size. This is used to try to allocate blocks with similar
1691eda14cbcSMatt Macy 	 * alignment from the same area of the metaslab (i.e. same cursor
1692eda14cbcSMatt Macy 	 * bucket) but it does not guarantee that other allocations sizes
1693eda14cbcSMatt Macy 	 * may exist in the same region.
1694eda14cbcSMatt Macy 	 */
1695eda14cbcSMatt Macy 	uint64_t align = size & -size;
1696eda14cbcSMatt Macy 	uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1697eda14cbcSMatt Macy 	range_tree_t *rt = msp->ms_allocatable;
1698be181ee2SMartin Matuska 	uint_t free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1699eda14cbcSMatt Macy 	uint64_t offset;
1700eda14cbcSMatt Macy 
1701eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1702eda14cbcSMatt Macy 
1703eda14cbcSMatt Macy 	/*
1704eda14cbcSMatt Macy 	 * If we're running low on space, find a segment based on size,
1705eda14cbcSMatt Macy 	 * rather than iterating based on offset.
1706eda14cbcSMatt Macy 	 */
1707eda14cbcSMatt Macy 	if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold ||
1708eda14cbcSMatt Macy 	    free_pct < metaslab_df_free_pct) {
1709eda14cbcSMatt Macy 		offset = -1;
1710eda14cbcSMatt Macy 	} else {
1711eda14cbcSMatt Macy 		offset = metaslab_block_picker(rt,
1712eda14cbcSMatt Macy 		    cursor, size, metaslab_df_max_search);
1713eda14cbcSMatt Macy 	}
1714eda14cbcSMatt Macy 
1715eda14cbcSMatt Macy 	if (offset == -1) {
1716eda14cbcSMatt Macy 		range_seg_t *rs;
1717eda14cbcSMatt Macy 		if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0)
1718eda14cbcSMatt Macy 			metaslab_size_tree_full_load(msp->ms_allocatable);
17197877fdebSMatt Macy 
1720eda14cbcSMatt Macy 		if (metaslab_df_use_largest_segment) {
1721eda14cbcSMatt Macy 			/* use largest free segment */
1722eda14cbcSMatt Macy 			rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL);
1723eda14cbcSMatt Macy 		} else {
1724eda14cbcSMatt Macy 			zfs_btree_index_t where;
1725eda14cbcSMatt Macy 			/* use segment of this size, or next largest */
1726eda14cbcSMatt Macy 			rs = metaslab_block_find(&msp->ms_allocatable_by_size,
1727eda14cbcSMatt Macy 			    rt, msp->ms_start, size, &where);
1728eda14cbcSMatt Macy 		}
1729eda14cbcSMatt Macy 		if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs,
1730eda14cbcSMatt Macy 		    rt)) {
1731eda14cbcSMatt Macy 			offset = rs_get_start(rs, rt);
1732eda14cbcSMatt Macy 			*cursor = offset + size;
1733eda14cbcSMatt Macy 		}
1734eda14cbcSMatt Macy 	}
1735eda14cbcSMatt Macy 
1736eda14cbcSMatt Macy 	return (offset);
1737eda14cbcSMatt Macy }
1738eda14cbcSMatt Macy 
1739e92ffd9bSMartin Matuska const metaslab_ops_t zfs_metaslab_ops = {
1740eda14cbcSMatt Macy 	metaslab_df_alloc
1741eda14cbcSMatt Macy };
1742eda14cbcSMatt Macy #endif /* WITH_DF_BLOCK_ALLOCATOR */
1743eda14cbcSMatt Macy 
1744eda14cbcSMatt Macy #if defined(WITH_CF_BLOCK_ALLOCATOR)
1745eda14cbcSMatt Macy /*
1746eda14cbcSMatt Macy  * ==========================================================================
1747eda14cbcSMatt Macy  * Cursor fit block allocator -
1748eda14cbcSMatt Macy  * Select the largest region in the metaslab, set the cursor to the beginning
1749eda14cbcSMatt Macy  * of the range and the cursor_end to the end of the range. As allocations
1750eda14cbcSMatt Macy  * are made advance the cursor. Continue allocating from the cursor until
1751eda14cbcSMatt Macy  * the range is exhausted and then find a new range.
1752eda14cbcSMatt Macy  * ==========================================================================
1753eda14cbcSMatt Macy  */
1754eda14cbcSMatt Macy static uint64_t
1755eda14cbcSMatt Macy metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1756eda14cbcSMatt Macy {
1757eda14cbcSMatt Macy 	range_tree_t *rt = msp->ms_allocatable;
1758eda14cbcSMatt Macy 	zfs_btree_t *t = &msp->ms_allocatable_by_size;
1759eda14cbcSMatt Macy 	uint64_t *cursor = &msp->ms_lbas[0];
1760eda14cbcSMatt Macy 	uint64_t *cursor_end = &msp->ms_lbas[1];
1761eda14cbcSMatt Macy 	uint64_t offset = 0;
1762eda14cbcSMatt Macy 
1763eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1764eda14cbcSMatt Macy 
1765eda14cbcSMatt Macy 	ASSERT3U(*cursor_end, >=, *cursor);
1766eda14cbcSMatt Macy 
1767eda14cbcSMatt Macy 	if ((*cursor + size) > *cursor_end) {
1768eda14cbcSMatt Macy 		range_seg_t *rs;
1769eda14cbcSMatt Macy 
1770eda14cbcSMatt Macy 		if (zfs_btree_numnodes(t) == 0)
1771eda14cbcSMatt Macy 			metaslab_size_tree_full_load(msp->ms_allocatable);
1772eda14cbcSMatt Macy 		rs = zfs_btree_last(t, NULL);
1773eda14cbcSMatt Macy 		if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) <
1774eda14cbcSMatt Macy 		    size)
1775eda14cbcSMatt Macy 			return (-1ULL);
1776eda14cbcSMatt Macy 
1777eda14cbcSMatt Macy 		*cursor = rs_get_start(rs, rt);
1778eda14cbcSMatt Macy 		*cursor_end = rs_get_end(rs, rt);
1779eda14cbcSMatt Macy 	}
1780eda14cbcSMatt Macy 
1781eda14cbcSMatt Macy 	offset = *cursor;
1782eda14cbcSMatt Macy 	*cursor += size;
1783eda14cbcSMatt Macy 
1784eda14cbcSMatt Macy 	return (offset);
1785eda14cbcSMatt Macy }
1786eda14cbcSMatt Macy 
1787e92ffd9bSMartin Matuska const metaslab_ops_t zfs_metaslab_ops = {
1788eda14cbcSMatt Macy 	metaslab_cf_alloc
1789eda14cbcSMatt Macy };
1790eda14cbcSMatt Macy #endif /* WITH_CF_BLOCK_ALLOCATOR */
1791eda14cbcSMatt Macy 
1792eda14cbcSMatt Macy #if defined(WITH_NDF_BLOCK_ALLOCATOR)
1793eda14cbcSMatt Macy /*
1794eda14cbcSMatt Macy  * ==========================================================================
1795eda14cbcSMatt Macy  * New dynamic fit allocator -
1796eda14cbcSMatt Macy  * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1797eda14cbcSMatt Macy  * contiguous blocks. If no region is found then just use the largest segment
1798eda14cbcSMatt Macy  * that remains.
1799eda14cbcSMatt Macy  * ==========================================================================
1800eda14cbcSMatt Macy  */
1801eda14cbcSMatt Macy 
1802eda14cbcSMatt Macy /*
1803eda14cbcSMatt Macy  * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1804eda14cbcSMatt Macy  * to request from the allocator.
1805eda14cbcSMatt Macy  */
1806eda14cbcSMatt Macy uint64_t metaslab_ndf_clump_shift = 4;
1807eda14cbcSMatt Macy 
1808eda14cbcSMatt Macy static uint64_t
1809eda14cbcSMatt Macy metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1810eda14cbcSMatt Macy {
1811eda14cbcSMatt Macy 	zfs_btree_t *t = &msp->ms_allocatable->rt_root;
1812eda14cbcSMatt Macy 	range_tree_t *rt = msp->ms_allocatable;
1813eda14cbcSMatt Macy 	zfs_btree_index_t where;
1814eda14cbcSMatt Macy 	range_seg_t *rs;
1815eda14cbcSMatt Macy 	range_seg_max_t rsearch;
1816eda14cbcSMatt Macy 	uint64_t hbit = highbit64(size);
1817eda14cbcSMatt Macy 	uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1818eda14cbcSMatt Macy 	uint64_t max_size = metaslab_largest_allocatable(msp);
1819eda14cbcSMatt Macy 
1820eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1821eda14cbcSMatt Macy 
1822eda14cbcSMatt Macy 	if (max_size < size)
1823eda14cbcSMatt Macy 		return (-1ULL);
1824eda14cbcSMatt Macy 
1825eda14cbcSMatt Macy 	rs_set_start(&rsearch, rt, *cursor);
1826eda14cbcSMatt Macy 	rs_set_end(&rsearch, rt, *cursor + size);
1827eda14cbcSMatt Macy 
1828eda14cbcSMatt Macy 	rs = zfs_btree_find(t, &rsearch, &where);
1829eda14cbcSMatt Macy 	if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) {
1830eda14cbcSMatt Macy 		t = &msp->ms_allocatable_by_size;
1831eda14cbcSMatt Macy 
1832eda14cbcSMatt Macy 		rs_set_start(&rsearch, rt, 0);
1833eda14cbcSMatt Macy 		rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit +
1834eda14cbcSMatt Macy 		    metaslab_ndf_clump_shift)));
1835eda14cbcSMatt Macy 
1836eda14cbcSMatt Macy 		rs = zfs_btree_find(t, &rsearch, &where);
1837eda14cbcSMatt Macy 		if (rs == NULL)
1838eda14cbcSMatt Macy 			rs = zfs_btree_next(t, &where, &where);
1839eda14cbcSMatt Macy 		ASSERT(rs != NULL);
1840eda14cbcSMatt Macy 	}
1841eda14cbcSMatt Macy 
1842eda14cbcSMatt Macy 	if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) {
1843eda14cbcSMatt Macy 		*cursor = rs_get_start(rs, rt) + size;
1844eda14cbcSMatt Macy 		return (rs_get_start(rs, rt));
1845eda14cbcSMatt Macy 	}
1846eda14cbcSMatt Macy 	return (-1ULL);
1847eda14cbcSMatt Macy }
1848eda14cbcSMatt Macy 
1849e92ffd9bSMartin Matuska const metaslab_ops_t zfs_metaslab_ops = {
1850eda14cbcSMatt Macy 	metaslab_ndf_alloc
1851eda14cbcSMatt Macy };
1852eda14cbcSMatt Macy #endif /* WITH_NDF_BLOCK_ALLOCATOR */
1853eda14cbcSMatt Macy 
1854eda14cbcSMatt Macy 
1855eda14cbcSMatt Macy /*
1856eda14cbcSMatt Macy  * ==========================================================================
1857eda14cbcSMatt Macy  * Metaslabs
1858eda14cbcSMatt Macy  * ==========================================================================
1859eda14cbcSMatt Macy  */
1860eda14cbcSMatt Macy 
1861eda14cbcSMatt Macy /*
1862eda14cbcSMatt Macy  * Wait for any in-progress metaslab loads to complete.
1863eda14cbcSMatt Macy  */
1864eda14cbcSMatt Macy static void
1865eda14cbcSMatt Macy metaslab_load_wait(metaslab_t *msp)
1866eda14cbcSMatt Macy {
1867eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1868eda14cbcSMatt Macy 
1869eda14cbcSMatt Macy 	while (msp->ms_loading) {
1870eda14cbcSMatt Macy 		ASSERT(!msp->ms_loaded);
1871eda14cbcSMatt Macy 		cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1872eda14cbcSMatt Macy 	}
1873eda14cbcSMatt Macy }
1874eda14cbcSMatt Macy 
1875eda14cbcSMatt Macy /*
1876eda14cbcSMatt Macy  * Wait for any in-progress flushing to complete.
1877eda14cbcSMatt Macy  */
1878eda14cbcSMatt Macy static void
1879eda14cbcSMatt Macy metaslab_flush_wait(metaslab_t *msp)
1880eda14cbcSMatt Macy {
1881eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1882eda14cbcSMatt Macy 
1883eda14cbcSMatt Macy 	while (msp->ms_flushing)
1884eda14cbcSMatt Macy 		cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
1885eda14cbcSMatt Macy }
1886eda14cbcSMatt Macy 
1887eda14cbcSMatt Macy static unsigned int
1888eda14cbcSMatt Macy metaslab_idx_func(multilist_t *ml, void *arg)
1889eda14cbcSMatt Macy {
1890eda14cbcSMatt Macy 	metaslab_t *msp = arg;
18912617128aSMartin Matuska 
18922617128aSMartin Matuska 	/*
18932617128aSMartin Matuska 	 * ms_id values are allocated sequentially, so full 64bit
18942617128aSMartin Matuska 	 * division would be a waste of time, so limit it to 32 bits.
18952617128aSMartin Matuska 	 */
18962617128aSMartin Matuska 	return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml));
1897eda14cbcSMatt Macy }
1898eda14cbcSMatt Macy 
1899eda14cbcSMatt Macy uint64_t
1900eda14cbcSMatt Macy metaslab_allocated_space(metaslab_t *msp)
1901eda14cbcSMatt Macy {
1902eda14cbcSMatt Macy 	return (msp->ms_allocated_space);
1903eda14cbcSMatt Macy }
1904eda14cbcSMatt Macy 
1905eda14cbcSMatt Macy /*
1906eda14cbcSMatt Macy  * Verify that the space accounting on disk matches the in-core range_trees.
1907eda14cbcSMatt Macy  */
1908eda14cbcSMatt Macy static void
1909eda14cbcSMatt Macy metaslab_verify_space(metaslab_t *msp, uint64_t txg)
1910eda14cbcSMatt Macy {
1911eda14cbcSMatt Macy 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1912eda14cbcSMatt Macy 	uint64_t allocating = 0;
1913eda14cbcSMatt Macy 	uint64_t sm_free_space, msp_free_space;
1914eda14cbcSMatt Macy 
1915eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
1916eda14cbcSMatt Macy 	ASSERT(!msp->ms_condensing);
1917eda14cbcSMatt Macy 
1918eda14cbcSMatt Macy 	if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
1919eda14cbcSMatt Macy 		return;
1920eda14cbcSMatt Macy 
1921eda14cbcSMatt Macy 	/*
1922eda14cbcSMatt Macy 	 * We can only verify the metaslab space when we're called
1923eda14cbcSMatt Macy 	 * from syncing context with a loaded metaslab that has an
1924eda14cbcSMatt Macy 	 * allocated space map. Calling this in non-syncing context
1925eda14cbcSMatt Macy 	 * does not provide a consistent view of the metaslab since
1926eda14cbcSMatt Macy 	 * we're performing allocations in the future.
1927eda14cbcSMatt Macy 	 */
1928eda14cbcSMatt Macy 	if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
1929eda14cbcSMatt Macy 	    !msp->ms_loaded)
1930eda14cbcSMatt Macy 		return;
1931eda14cbcSMatt Macy 
1932eda14cbcSMatt Macy 	/*
1933eda14cbcSMatt Macy 	 * Even though the smp_alloc field can get negative,
1934eda14cbcSMatt Macy 	 * when it comes to a metaslab's space map, that should
1935eda14cbcSMatt Macy 	 * never be the case.
1936eda14cbcSMatt Macy 	 */
1937eda14cbcSMatt Macy 	ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
1938eda14cbcSMatt Macy 
1939eda14cbcSMatt Macy 	ASSERT3U(space_map_allocated(msp->ms_sm), >=,
1940eda14cbcSMatt Macy 	    range_tree_space(msp->ms_unflushed_frees));
1941eda14cbcSMatt Macy 
1942eda14cbcSMatt Macy 	ASSERT3U(metaslab_allocated_space(msp), ==,
1943eda14cbcSMatt Macy 	    space_map_allocated(msp->ms_sm) +
1944eda14cbcSMatt Macy 	    range_tree_space(msp->ms_unflushed_allocs) -
1945eda14cbcSMatt Macy 	    range_tree_space(msp->ms_unflushed_frees));
1946eda14cbcSMatt Macy 
1947eda14cbcSMatt Macy 	sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
1948eda14cbcSMatt Macy 
1949eda14cbcSMatt Macy 	/*
1950eda14cbcSMatt Macy 	 * Account for future allocations since we would have
1951eda14cbcSMatt Macy 	 * already deducted that space from the ms_allocatable.
1952eda14cbcSMatt Macy 	 */
1953eda14cbcSMatt Macy 	for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
1954eda14cbcSMatt Macy 		allocating +=
1955eda14cbcSMatt Macy 		    range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]);
1956eda14cbcSMatt Macy 	}
1957eda14cbcSMatt Macy 	ASSERT3U(allocating + msp->ms_allocated_this_txg, ==,
1958eda14cbcSMatt Macy 	    msp->ms_allocating_total);
1959eda14cbcSMatt Macy 
1960eda14cbcSMatt Macy 	ASSERT3U(msp->ms_deferspace, ==,
1961eda14cbcSMatt Macy 	    range_tree_space(msp->ms_defer[0]) +
1962eda14cbcSMatt Macy 	    range_tree_space(msp->ms_defer[1]));
1963eda14cbcSMatt Macy 
1964eda14cbcSMatt Macy 	msp_free_space = range_tree_space(msp->ms_allocatable) + allocating +
1965eda14cbcSMatt Macy 	    msp->ms_deferspace + range_tree_space(msp->ms_freed);
1966eda14cbcSMatt Macy 
1967eda14cbcSMatt Macy 	VERIFY3U(sm_free_space, ==, msp_free_space);
1968eda14cbcSMatt Macy }
1969eda14cbcSMatt Macy 
1970eda14cbcSMatt Macy static void
1971eda14cbcSMatt Macy metaslab_aux_histograms_clear(metaslab_t *msp)
1972eda14cbcSMatt Macy {
1973eda14cbcSMatt Macy 	/*
1974eda14cbcSMatt Macy 	 * Auxiliary histograms are only cleared when resetting them,
1975eda14cbcSMatt Macy 	 * which can only happen while the metaslab is loaded.
1976eda14cbcSMatt Macy 	 */
1977eda14cbcSMatt Macy 	ASSERT(msp->ms_loaded);
1978eda14cbcSMatt Macy 
1979da5137abSMartin Matuska 	memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
1980eda14cbcSMatt Macy 	for (int t = 0; t < TXG_DEFER_SIZE; t++)
1981da5137abSMartin Matuska 		memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t]));
1982eda14cbcSMatt Macy }
1983eda14cbcSMatt Macy 
1984eda14cbcSMatt Macy static void
1985eda14cbcSMatt Macy metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
1986eda14cbcSMatt Macy     range_tree_t *rt)
1987eda14cbcSMatt Macy {
1988eda14cbcSMatt Macy 	/*
1989eda14cbcSMatt Macy 	 * This is modeled after space_map_histogram_add(), so refer to that
1990eda14cbcSMatt Macy 	 * function for implementation details. We want this to work like
1991eda14cbcSMatt Macy 	 * the space map histogram, and not the range tree histogram, as we
1992eda14cbcSMatt Macy 	 * are essentially constructing a delta that will be later subtracted
1993eda14cbcSMatt Macy 	 * from the space map histogram.
1994eda14cbcSMatt Macy 	 */
1995eda14cbcSMatt Macy 	int idx = 0;
1996eda14cbcSMatt Macy 	for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
1997eda14cbcSMatt Macy 		ASSERT3U(i, >=, idx + shift);
1998eda14cbcSMatt Macy 		histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
1999eda14cbcSMatt Macy 
2000eda14cbcSMatt Macy 		if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
2001eda14cbcSMatt Macy 			ASSERT3U(idx + shift, ==, i);
2002eda14cbcSMatt Macy 			idx++;
2003eda14cbcSMatt Macy 			ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
2004eda14cbcSMatt Macy 		}
2005eda14cbcSMatt Macy 	}
2006eda14cbcSMatt Macy }
2007eda14cbcSMatt Macy 
2008eda14cbcSMatt Macy /*
2009eda14cbcSMatt Macy  * Called at every sync pass that the metaslab gets synced.
2010eda14cbcSMatt Macy  *
2011eda14cbcSMatt Macy  * The reason is that we want our auxiliary histograms to be updated
2012eda14cbcSMatt Macy  * wherever the metaslab's space map histogram is updated. This way
2013eda14cbcSMatt Macy  * we stay consistent on which parts of the metaslab space map's
2014eda14cbcSMatt Macy  * histogram are currently not available for allocations (e.g because
2015eda14cbcSMatt Macy  * they are in the defer, freed, and freeing trees).
2016eda14cbcSMatt Macy  */
2017eda14cbcSMatt Macy static void
2018eda14cbcSMatt Macy metaslab_aux_histograms_update(metaslab_t *msp)
2019eda14cbcSMatt Macy {
2020eda14cbcSMatt Macy 	space_map_t *sm = msp->ms_sm;
2021eda14cbcSMatt Macy 	ASSERT(sm != NULL);
2022eda14cbcSMatt Macy 
2023eda14cbcSMatt Macy 	/*
2024eda14cbcSMatt Macy 	 * This is similar to the metaslab's space map histogram updates
2025eda14cbcSMatt Macy 	 * that take place in metaslab_sync(). The only difference is that
2026eda14cbcSMatt Macy 	 * we only care about segments that haven't made it into the
2027eda14cbcSMatt Macy 	 * ms_allocatable tree yet.
2028eda14cbcSMatt Macy 	 */
2029eda14cbcSMatt Macy 	if (msp->ms_loaded) {
2030eda14cbcSMatt Macy 		metaslab_aux_histograms_clear(msp);
2031eda14cbcSMatt Macy 
2032eda14cbcSMatt Macy 		metaslab_aux_histogram_add(msp->ms_synchist,
2033eda14cbcSMatt Macy 		    sm->sm_shift, msp->ms_freed);
2034eda14cbcSMatt Macy 
2035eda14cbcSMatt Macy 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2036eda14cbcSMatt Macy 			metaslab_aux_histogram_add(msp->ms_deferhist[t],
2037eda14cbcSMatt Macy 			    sm->sm_shift, msp->ms_defer[t]);
2038eda14cbcSMatt Macy 		}
2039eda14cbcSMatt Macy 	}
2040eda14cbcSMatt Macy 
2041eda14cbcSMatt Macy 	metaslab_aux_histogram_add(msp->ms_synchist,
2042eda14cbcSMatt Macy 	    sm->sm_shift, msp->ms_freeing);
2043eda14cbcSMatt Macy }
2044eda14cbcSMatt Macy 
2045eda14cbcSMatt Macy /*
2046eda14cbcSMatt Macy  * Called every time we are done syncing (writing to) the metaslab,
2047eda14cbcSMatt Macy  * i.e. at the end of each sync pass.
2048eda14cbcSMatt Macy  * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
2049eda14cbcSMatt Macy  */
2050eda14cbcSMatt Macy static void
2051eda14cbcSMatt Macy metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
2052eda14cbcSMatt Macy {
2053eda14cbcSMatt Macy 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2054eda14cbcSMatt Macy 	space_map_t *sm = msp->ms_sm;
2055eda14cbcSMatt Macy 
2056eda14cbcSMatt Macy 	if (sm == NULL) {
2057eda14cbcSMatt Macy 		/*
2058eda14cbcSMatt Macy 		 * We came here from metaslab_init() when creating/opening a
2059eda14cbcSMatt Macy 		 * pool, looking at a metaslab that hasn't had any allocations
2060eda14cbcSMatt Macy 		 * yet.
2061eda14cbcSMatt Macy 		 */
2062eda14cbcSMatt Macy 		return;
2063eda14cbcSMatt Macy 	}
2064eda14cbcSMatt Macy 
2065eda14cbcSMatt Macy 	/*
2066eda14cbcSMatt Macy 	 * This is similar to the actions that we take for the ms_freed
2067eda14cbcSMatt Macy 	 * and ms_defer trees in metaslab_sync_done().
2068eda14cbcSMatt Macy 	 */
2069eda14cbcSMatt Macy 	uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
2070eda14cbcSMatt Macy 	if (defer_allowed) {
2071da5137abSMartin Matuska 		memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist,
2072eda14cbcSMatt Macy 		    sizeof (msp->ms_synchist));
2073eda14cbcSMatt Macy 	} else {
2074da5137abSMartin Matuska 		memset(msp->ms_deferhist[hist_index], 0,
2075eda14cbcSMatt Macy 		    sizeof (msp->ms_deferhist[hist_index]));
2076eda14cbcSMatt Macy 	}
2077da5137abSMartin Matuska 	memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
2078eda14cbcSMatt Macy }
2079eda14cbcSMatt Macy 
2080eda14cbcSMatt Macy /*
2081eda14cbcSMatt Macy  * Ensure that the metaslab's weight and fragmentation are consistent
2082eda14cbcSMatt Macy  * with the contents of the histogram (either the range tree's histogram
2083eda14cbcSMatt Macy  * or the space map's depending whether the metaslab is loaded).
2084eda14cbcSMatt Macy  */
2085eda14cbcSMatt Macy static void
2086eda14cbcSMatt Macy metaslab_verify_weight_and_frag(metaslab_t *msp)
2087eda14cbcSMatt Macy {
2088eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2089eda14cbcSMatt Macy 
2090eda14cbcSMatt Macy 	if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
2091eda14cbcSMatt Macy 		return;
2092eda14cbcSMatt Macy 
2093eda14cbcSMatt Macy 	/*
2094eda14cbcSMatt Macy 	 * We can end up here from vdev_remove_complete(), in which case we
2095eda14cbcSMatt Macy 	 * cannot do these assertions because we hold spa config locks and
2096eda14cbcSMatt Macy 	 * thus we are not allowed to read from the DMU.
2097eda14cbcSMatt Macy 	 *
2098eda14cbcSMatt Macy 	 * We check if the metaslab group has been removed and if that's
2099eda14cbcSMatt Macy 	 * the case we return immediately as that would mean that we are
2100eda14cbcSMatt Macy 	 * here from the aforementioned code path.
2101eda14cbcSMatt Macy 	 */
2102eda14cbcSMatt Macy 	if (msp->ms_group == NULL)
2103eda14cbcSMatt Macy 		return;
2104eda14cbcSMatt Macy 
2105eda14cbcSMatt Macy 	/*
2106eda14cbcSMatt Macy 	 * Devices being removed always return a weight of 0 and leave
2107eda14cbcSMatt Macy 	 * fragmentation and ms_max_size as is - there is nothing for
2108eda14cbcSMatt Macy 	 * us to verify here.
2109eda14cbcSMatt Macy 	 */
2110eda14cbcSMatt Macy 	vdev_t *vd = msp->ms_group->mg_vd;
2111eda14cbcSMatt Macy 	if (vd->vdev_removing)
2112eda14cbcSMatt Macy 		return;
2113eda14cbcSMatt Macy 
2114eda14cbcSMatt Macy 	/*
2115eda14cbcSMatt Macy 	 * If the metaslab is dirty it probably means that we've done
2116eda14cbcSMatt Macy 	 * some allocations or frees that have changed our histograms
2117eda14cbcSMatt Macy 	 * and thus the weight.
2118eda14cbcSMatt Macy 	 */
2119eda14cbcSMatt Macy 	for (int t = 0; t < TXG_SIZE; t++) {
2120eda14cbcSMatt Macy 		if (txg_list_member(&vd->vdev_ms_list, msp, t))
2121eda14cbcSMatt Macy 			return;
2122eda14cbcSMatt Macy 	}
2123eda14cbcSMatt Macy 
2124eda14cbcSMatt Macy 	/*
2125eda14cbcSMatt Macy 	 * This verification checks that our in-memory state is consistent
2126eda14cbcSMatt Macy 	 * with what's on disk. If the pool is read-only then there aren't
2127eda14cbcSMatt Macy 	 * any changes and we just have the initially-loaded state.
2128eda14cbcSMatt Macy 	 */
2129eda14cbcSMatt Macy 	if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
2130eda14cbcSMatt Macy 		return;
2131eda14cbcSMatt Macy 
2132eda14cbcSMatt Macy 	/* some extra verification for in-core tree if you can */
2133eda14cbcSMatt Macy 	if (msp->ms_loaded) {
2134eda14cbcSMatt Macy 		range_tree_stat_verify(msp->ms_allocatable);
2135eda14cbcSMatt Macy 		VERIFY(space_map_histogram_verify(msp->ms_sm,
2136eda14cbcSMatt Macy 		    msp->ms_allocatable));
2137eda14cbcSMatt Macy 	}
2138eda14cbcSMatt Macy 
2139eda14cbcSMatt Macy 	uint64_t weight = msp->ms_weight;
2140eda14cbcSMatt Macy 	uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2141eda14cbcSMatt Macy 	boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
2142eda14cbcSMatt Macy 	uint64_t frag = msp->ms_fragmentation;
2143eda14cbcSMatt Macy 	uint64_t max_segsize = msp->ms_max_size;
2144eda14cbcSMatt Macy 
2145eda14cbcSMatt Macy 	msp->ms_weight = 0;
2146eda14cbcSMatt Macy 	msp->ms_fragmentation = 0;
2147eda14cbcSMatt Macy 
2148eda14cbcSMatt Macy 	/*
2149eda14cbcSMatt Macy 	 * This function is used for verification purposes and thus should
2150eda14cbcSMatt Macy 	 * not introduce any side-effects/mutations on the system's state.
2151eda14cbcSMatt Macy 	 *
2152eda14cbcSMatt Macy 	 * Regardless of whether metaslab_weight() thinks this metaslab
2153eda14cbcSMatt Macy 	 * should be active or not, we want to ensure that the actual weight
2154eda14cbcSMatt Macy 	 * (and therefore the value of ms_weight) would be the same if it
2155eda14cbcSMatt Macy 	 * was to be recalculated at this point.
2156eda14cbcSMatt Macy 	 *
2157eda14cbcSMatt Macy 	 * In addition we set the nodirty flag so metaslab_weight() does
2158eda14cbcSMatt Macy 	 * not dirty the metaslab for future TXGs (e.g. when trying to
2159eda14cbcSMatt Macy 	 * force condensing to upgrade the metaslab spacemaps).
2160eda14cbcSMatt Macy 	 */
2161eda14cbcSMatt Macy 	msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active;
2162eda14cbcSMatt Macy 
2163eda14cbcSMatt Macy 	VERIFY3U(max_segsize, ==, msp->ms_max_size);
2164eda14cbcSMatt Macy 
2165eda14cbcSMatt Macy 	/*
2166eda14cbcSMatt Macy 	 * If the weight type changed then there is no point in doing
2167eda14cbcSMatt Macy 	 * verification. Revert fields to their original values.
2168eda14cbcSMatt Macy 	 */
2169eda14cbcSMatt Macy 	if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
2170eda14cbcSMatt Macy 	    (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
2171eda14cbcSMatt Macy 		msp->ms_fragmentation = frag;
2172eda14cbcSMatt Macy 		msp->ms_weight = weight;
2173eda14cbcSMatt Macy 		return;
2174eda14cbcSMatt Macy 	}
2175eda14cbcSMatt Macy 
2176eda14cbcSMatt Macy 	VERIFY3U(msp->ms_fragmentation, ==, frag);
2177eda14cbcSMatt Macy 	VERIFY3U(msp->ms_weight, ==, weight);
2178eda14cbcSMatt Macy }
2179eda14cbcSMatt Macy 
2180eda14cbcSMatt Macy /*
2181eda14cbcSMatt Macy  * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from
2182eda14cbcSMatt Macy  * this class that was used longest ago, and attempt to unload it.  We don't
2183eda14cbcSMatt Macy  * want to spend too much time in this loop to prevent performance
2184eda14cbcSMatt Macy  * degradation, and we expect that most of the time this operation will
2185eda14cbcSMatt Macy  * succeed. Between that and the normal unloading processing during txg sync,
2186eda14cbcSMatt Macy  * we expect this to keep the metaslab memory usage under control.
2187eda14cbcSMatt Macy  */
2188eda14cbcSMatt Macy static void
2189eda14cbcSMatt Macy metaslab_potentially_evict(metaslab_class_t *mc)
2190eda14cbcSMatt Macy {
2191eda14cbcSMatt Macy #ifdef _KERNEL
2192eda14cbcSMatt Macy 	uint64_t allmem = arc_all_memory();
2193eda14cbcSMatt Macy 	uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2194eda14cbcSMatt Macy 	uint64_t size =	spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
2195be181ee2SMartin Matuska 	uint_t tries = 0;
2196eda14cbcSMatt Macy 	for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
21973ff01b23SMartin Matuska 	    tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2;
2198eda14cbcSMatt Macy 	    tries++) {
2199eda14cbcSMatt Macy 		unsigned int idx = multilist_get_random_index(
22003ff01b23SMartin Matuska 		    &mc->mc_metaslab_txg_list);
2201eda14cbcSMatt Macy 		multilist_sublist_t *mls =
22023ff01b23SMartin Matuska 		    multilist_sublist_lock(&mc->mc_metaslab_txg_list, idx);
2203eda14cbcSMatt Macy 		metaslab_t *msp = multilist_sublist_head(mls);
2204eda14cbcSMatt Macy 		multilist_sublist_unlock(mls);
2205eda14cbcSMatt Macy 		while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 <
2206eda14cbcSMatt Macy 		    inuse * size) {
2207eda14cbcSMatt Macy 			VERIFY3P(mls, ==, multilist_sublist_lock(
22083ff01b23SMartin Matuska 			    &mc->mc_metaslab_txg_list, idx));
2209eda14cbcSMatt Macy 			ASSERT3U(idx, ==,
22103ff01b23SMartin Matuska 			    metaslab_idx_func(&mc->mc_metaslab_txg_list, msp));
2211eda14cbcSMatt Macy 
2212eda14cbcSMatt Macy 			if (!multilist_link_active(&msp->ms_class_txg_node)) {
2213eda14cbcSMatt Macy 				multilist_sublist_unlock(mls);
2214eda14cbcSMatt Macy 				break;
2215eda14cbcSMatt Macy 			}
2216eda14cbcSMatt Macy 			metaslab_t *next_msp = multilist_sublist_next(mls, msp);
2217eda14cbcSMatt Macy 			multilist_sublist_unlock(mls);
2218eda14cbcSMatt Macy 			/*
2219eda14cbcSMatt Macy 			 * If the metaslab is currently loading there are two
2220eda14cbcSMatt Macy 			 * cases. If it's the metaslab we're evicting, we
2221eda14cbcSMatt Macy 			 * can't continue on or we'll panic when we attempt to
2222eda14cbcSMatt Macy 			 * recursively lock the mutex. If it's another
2223eda14cbcSMatt Macy 			 * metaslab that's loading, it can be safely skipped,
2224eda14cbcSMatt Macy 			 * since we know it's very new and therefore not a
2225eda14cbcSMatt Macy 			 * good eviction candidate. We check later once the
2226eda14cbcSMatt Macy 			 * lock is held that the metaslab is fully loaded
2227eda14cbcSMatt Macy 			 * before actually unloading it.
2228eda14cbcSMatt Macy 			 */
2229eda14cbcSMatt Macy 			if (msp->ms_loading) {
2230eda14cbcSMatt Macy 				msp = next_msp;
2231eda14cbcSMatt Macy 				inuse =
2232eda14cbcSMatt Macy 				    spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2233eda14cbcSMatt Macy 				continue;
2234eda14cbcSMatt Macy 			}
2235eda14cbcSMatt Macy 			/*
2236eda14cbcSMatt Macy 			 * We can't unload metaslabs with no spacemap because
2237eda14cbcSMatt Macy 			 * they're not ready to be unloaded yet. We can't
2238eda14cbcSMatt Macy 			 * unload metaslabs with outstanding allocations
2239eda14cbcSMatt Macy 			 * because doing so could cause the metaslab's weight
2240eda14cbcSMatt Macy 			 * to decrease while it's unloaded, which violates an
2241eda14cbcSMatt Macy 			 * invariant that we use to prevent unnecessary
2242eda14cbcSMatt Macy 			 * loading. We also don't unload metaslabs that are
2243eda14cbcSMatt Macy 			 * currently active because they are high-weight
2244eda14cbcSMatt Macy 			 * metaslabs that are likely to be used in the near
2245eda14cbcSMatt Macy 			 * future.
2246eda14cbcSMatt Macy 			 */
2247eda14cbcSMatt Macy 			mutex_enter(&msp->ms_lock);
2248eda14cbcSMatt Macy 			if (msp->ms_allocator == -1 && msp->ms_sm != NULL &&
2249eda14cbcSMatt Macy 			    msp->ms_allocating_total == 0) {
2250eda14cbcSMatt Macy 				metaslab_unload(msp);
2251eda14cbcSMatt Macy 			}
2252eda14cbcSMatt Macy 			mutex_exit(&msp->ms_lock);
2253eda14cbcSMatt Macy 			msp = next_msp;
2254eda14cbcSMatt Macy 			inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2255eda14cbcSMatt Macy 		}
2256eda14cbcSMatt Macy 	}
2257e92ffd9bSMartin Matuska #else
2258e92ffd9bSMartin Matuska 	(void) mc, (void) zfs_metaslab_mem_limit;
2259eda14cbcSMatt Macy #endif
2260eda14cbcSMatt Macy }
2261eda14cbcSMatt Macy 
2262eda14cbcSMatt Macy static int
2263eda14cbcSMatt Macy metaslab_load_impl(metaslab_t *msp)
2264eda14cbcSMatt Macy {
2265eda14cbcSMatt Macy 	int error = 0;
2266eda14cbcSMatt Macy 
2267eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2268eda14cbcSMatt Macy 	ASSERT(msp->ms_loading);
2269eda14cbcSMatt Macy 	ASSERT(!msp->ms_condensing);
2270eda14cbcSMatt Macy 
2271eda14cbcSMatt Macy 	/*
2272eda14cbcSMatt Macy 	 * We temporarily drop the lock to unblock other operations while we
2273eda14cbcSMatt Macy 	 * are reading the space map. Therefore, metaslab_sync() and
2274eda14cbcSMatt Macy 	 * metaslab_sync_done() can run at the same time as we do.
2275eda14cbcSMatt Macy 	 *
2276eda14cbcSMatt Macy 	 * If we are using the log space maps, metaslab_sync() can't write to
2277eda14cbcSMatt Macy 	 * the metaslab's space map while we are loading as we only write to
2278eda14cbcSMatt Macy 	 * it when we are flushing the metaslab, and that can't happen while
2279eda14cbcSMatt Macy 	 * we are loading it.
2280eda14cbcSMatt Macy 	 *
2281eda14cbcSMatt Macy 	 * If we are not using log space maps though, metaslab_sync() can
2282eda14cbcSMatt Macy 	 * append to the space map while we are loading. Therefore we load
2283eda14cbcSMatt Macy 	 * only entries that existed when we started the load. Additionally,
2284eda14cbcSMatt Macy 	 * metaslab_sync_done() has to wait for the load to complete because
2285eda14cbcSMatt Macy 	 * there are potential races like metaslab_load() loading parts of the
2286eda14cbcSMatt Macy 	 * space map that are currently being appended by metaslab_sync(). If
2287eda14cbcSMatt Macy 	 * we didn't, the ms_allocatable would have entries that
2288eda14cbcSMatt Macy 	 * metaslab_sync_done() would try to re-add later.
2289eda14cbcSMatt Macy 	 *
2290eda14cbcSMatt Macy 	 * That's why before dropping the lock we remember the synced length
2291eda14cbcSMatt Macy 	 * of the metaslab and read up to that point of the space map,
2292eda14cbcSMatt Macy 	 * ignoring entries appended by metaslab_sync() that happen after we
2293eda14cbcSMatt Macy 	 * drop the lock.
2294eda14cbcSMatt Macy 	 */
2295eda14cbcSMatt Macy 	uint64_t length = msp->ms_synced_length;
2296eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
2297eda14cbcSMatt Macy 
2298eda14cbcSMatt Macy 	hrtime_t load_start = gethrtime();
2299eda14cbcSMatt Macy 	metaslab_rt_arg_t *mrap;
2300eda14cbcSMatt Macy 	if (msp->ms_allocatable->rt_arg == NULL) {
2301eda14cbcSMatt Macy 		mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
2302eda14cbcSMatt Macy 	} else {
2303eda14cbcSMatt Macy 		mrap = msp->ms_allocatable->rt_arg;
2304eda14cbcSMatt Macy 		msp->ms_allocatable->rt_ops = NULL;
2305eda14cbcSMatt Macy 		msp->ms_allocatable->rt_arg = NULL;
2306eda14cbcSMatt Macy 	}
2307eda14cbcSMatt Macy 	mrap->mra_bt = &msp->ms_allocatable_by_size;
2308eda14cbcSMatt Macy 	mrap->mra_floor_shift = metaslab_by_size_min_shift;
2309eda14cbcSMatt Macy 
2310eda14cbcSMatt Macy 	if (msp->ms_sm != NULL) {
2311eda14cbcSMatt Macy 		error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
2312eda14cbcSMatt Macy 		    SM_FREE, length);
2313eda14cbcSMatt Macy 
2314eda14cbcSMatt Macy 		/* Now, populate the size-sorted tree. */
2315eda14cbcSMatt Macy 		metaslab_rt_create(msp->ms_allocatable, mrap);
2316eda14cbcSMatt Macy 		msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
2317eda14cbcSMatt Macy 		msp->ms_allocatable->rt_arg = mrap;
2318eda14cbcSMatt Macy 
2319eda14cbcSMatt Macy 		struct mssa_arg arg = {0};
2320eda14cbcSMatt Macy 		arg.rt = msp->ms_allocatable;
2321eda14cbcSMatt Macy 		arg.mra = mrap;
2322eda14cbcSMatt Macy 		range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add,
2323eda14cbcSMatt Macy 		    &arg);
2324eda14cbcSMatt Macy 	} else {
2325eda14cbcSMatt Macy 		/*
2326eda14cbcSMatt Macy 		 * Add the size-sorted tree first, since we don't need to load
2327eda14cbcSMatt Macy 		 * the metaslab from the spacemap.
2328eda14cbcSMatt Macy 		 */
2329eda14cbcSMatt Macy 		metaslab_rt_create(msp->ms_allocatable, mrap);
2330eda14cbcSMatt Macy 		msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
2331eda14cbcSMatt Macy 		msp->ms_allocatable->rt_arg = mrap;
2332eda14cbcSMatt Macy 		/*
2333eda14cbcSMatt Macy 		 * The space map has not been allocated yet, so treat
2334eda14cbcSMatt Macy 		 * all the space in the metaslab as free and add it to the
2335eda14cbcSMatt Macy 		 * ms_allocatable tree.
2336eda14cbcSMatt Macy 		 */
2337eda14cbcSMatt Macy 		range_tree_add(msp->ms_allocatable,
2338eda14cbcSMatt Macy 		    msp->ms_start, msp->ms_size);
2339eda14cbcSMatt Macy 
2340f9693befSMartin Matuska 		if (msp->ms_new) {
2341eda14cbcSMatt Macy 			/*
2342eda14cbcSMatt Macy 			 * If the ms_sm doesn't exist, this means that this
2343eda14cbcSMatt Macy 			 * metaslab hasn't gone through metaslab_sync() and
2344eda14cbcSMatt Macy 			 * thus has never been dirtied. So we shouldn't
2345eda14cbcSMatt Macy 			 * expect any unflushed allocs or frees from previous
2346eda14cbcSMatt Macy 			 * TXGs.
2347eda14cbcSMatt Macy 			 */
2348eda14cbcSMatt Macy 			ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
2349eda14cbcSMatt Macy 			ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
2350eda14cbcSMatt Macy 		}
2351eda14cbcSMatt Macy 	}
2352eda14cbcSMatt Macy 
2353eda14cbcSMatt Macy 	/*
2354eda14cbcSMatt Macy 	 * We need to grab the ms_sync_lock to prevent metaslab_sync() from
2355eda14cbcSMatt Macy 	 * changing the ms_sm (or log_sm) and the metaslab's range trees
2356eda14cbcSMatt Macy 	 * while we are about to use them and populate the ms_allocatable.
2357eda14cbcSMatt Macy 	 * The ms_lock is insufficient for this because metaslab_sync() doesn't
2358eda14cbcSMatt Macy 	 * hold the ms_lock while writing the ms_checkpointing tree to disk.
2359eda14cbcSMatt Macy 	 */
2360eda14cbcSMatt Macy 	mutex_enter(&msp->ms_sync_lock);
2361eda14cbcSMatt Macy 	mutex_enter(&msp->ms_lock);
2362eda14cbcSMatt Macy 
2363eda14cbcSMatt Macy 	ASSERT(!msp->ms_condensing);
2364eda14cbcSMatt Macy 	ASSERT(!msp->ms_flushing);
2365eda14cbcSMatt Macy 
2366eda14cbcSMatt Macy 	if (error != 0) {
2367eda14cbcSMatt Macy 		mutex_exit(&msp->ms_sync_lock);
2368eda14cbcSMatt Macy 		return (error);
2369eda14cbcSMatt Macy 	}
2370eda14cbcSMatt Macy 
2371eda14cbcSMatt Macy 	ASSERT3P(msp->ms_group, !=, NULL);
2372eda14cbcSMatt Macy 	msp->ms_loaded = B_TRUE;
2373eda14cbcSMatt Macy 
2374eda14cbcSMatt Macy 	/*
2375eda14cbcSMatt Macy 	 * Apply all the unflushed changes to ms_allocatable right
2376eda14cbcSMatt Macy 	 * away so any manipulations we do below have a clear view
2377eda14cbcSMatt Macy 	 * of what is allocated and what is free.
2378eda14cbcSMatt Macy 	 */
2379eda14cbcSMatt Macy 	range_tree_walk(msp->ms_unflushed_allocs,
2380eda14cbcSMatt Macy 	    range_tree_remove, msp->ms_allocatable);
2381eda14cbcSMatt Macy 	range_tree_walk(msp->ms_unflushed_frees,
2382eda14cbcSMatt Macy 	    range_tree_add, msp->ms_allocatable);
2383eda14cbcSMatt Macy 
2384eda14cbcSMatt Macy 	ASSERT3P(msp->ms_group, !=, NULL);
2385eda14cbcSMatt Macy 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2386eda14cbcSMatt Macy 	if (spa_syncing_log_sm(spa) != NULL) {
2387eda14cbcSMatt Macy 		ASSERT(spa_feature_is_enabled(spa,
2388eda14cbcSMatt Macy 		    SPA_FEATURE_LOG_SPACEMAP));
2389eda14cbcSMatt Macy 
2390eda14cbcSMatt Macy 		/*
2391eda14cbcSMatt Macy 		 * If we use a log space map we add all the segments
2392eda14cbcSMatt Macy 		 * that are in ms_unflushed_frees so they are available
2393eda14cbcSMatt Macy 		 * for allocation.
2394eda14cbcSMatt Macy 		 *
2395eda14cbcSMatt Macy 		 * ms_allocatable needs to contain all free segments
2396eda14cbcSMatt Macy 		 * that are ready for allocations (thus not segments
2397eda14cbcSMatt Macy 		 * from ms_freeing, ms_freed, and the ms_defer trees).
2398eda14cbcSMatt Macy 		 * But if we grab the lock in this code path at a sync
2399eda14cbcSMatt Macy 		 * pass later that 1, then it also contains the
2400eda14cbcSMatt Macy 		 * segments of ms_freed (they were added to it earlier
2401eda14cbcSMatt Macy 		 * in this path through ms_unflushed_frees). So we
2402eda14cbcSMatt Macy 		 * need to remove all the segments that exist in
2403eda14cbcSMatt Macy 		 * ms_freed from ms_allocatable as they will be added
2404eda14cbcSMatt Macy 		 * later in metaslab_sync_done().
2405eda14cbcSMatt Macy 		 *
2406eda14cbcSMatt Macy 		 * When there's no log space map, the ms_allocatable
2407eda14cbcSMatt Macy 		 * correctly doesn't contain any segments that exist
2408eda14cbcSMatt Macy 		 * in ms_freed [see ms_synced_length].
2409eda14cbcSMatt Macy 		 */
2410eda14cbcSMatt Macy 		range_tree_walk(msp->ms_freed,
2411eda14cbcSMatt Macy 		    range_tree_remove, msp->ms_allocatable);
2412eda14cbcSMatt Macy 	}
2413eda14cbcSMatt Macy 
2414eda14cbcSMatt Macy 	/*
2415eda14cbcSMatt Macy 	 * If we are not using the log space map, ms_allocatable
2416eda14cbcSMatt Macy 	 * contains the segments that exist in the ms_defer trees
2417eda14cbcSMatt Macy 	 * [see ms_synced_length]. Thus we need to remove them
2418eda14cbcSMatt Macy 	 * from ms_allocatable as they will be added again in
2419eda14cbcSMatt Macy 	 * metaslab_sync_done().
2420eda14cbcSMatt Macy 	 *
2421eda14cbcSMatt Macy 	 * If we are using the log space map, ms_allocatable still
2422eda14cbcSMatt Macy 	 * contains the segments that exist in the ms_defer trees.
2423eda14cbcSMatt Macy 	 * Not because it read them through the ms_sm though. But
2424eda14cbcSMatt Macy 	 * because these segments are part of ms_unflushed_frees
2425eda14cbcSMatt Macy 	 * whose segments we add to ms_allocatable earlier in this
2426eda14cbcSMatt Macy 	 * code path.
2427eda14cbcSMatt Macy 	 */
2428eda14cbcSMatt Macy 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2429eda14cbcSMatt Macy 		range_tree_walk(msp->ms_defer[t],
2430eda14cbcSMatt Macy 		    range_tree_remove, msp->ms_allocatable);
2431eda14cbcSMatt Macy 	}
2432eda14cbcSMatt Macy 
2433eda14cbcSMatt Macy 	/*
2434eda14cbcSMatt Macy 	 * Call metaslab_recalculate_weight_and_sort() now that the
2435eda14cbcSMatt Macy 	 * metaslab is loaded so we get the metaslab's real weight.
2436eda14cbcSMatt Macy 	 *
2437eda14cbcSMatt Macy 	 * Unless this metaslab was created with older software and
2438eda14cbcSMatt Macy 	 * has not yet been converted to use segment-based weight, we
2439eda14cbcSMatt Macy 	 * expect the new weight to be better or equal to the weight
2440eda14cbcSMatt Macy 	 * that the metaslab had while it was not loaded. This is
2441eda14cbcSMatt Macy 	 * because the old weight does not take into account the
2442eda14cbcSMatt Macy 	 * consolidation of adjacent segments between TXGs. [see
2443eda14cbcSMatt Macy 	 * comment for ms_synchist and ms_deferhist[] for more info]
2444eda14cbcSMatt Macy 	 */
2445eda14cbcSMatt Macy 	uint64_t weight = msp->ms_weight;
2446eda14cbcSMatt Macy 	uint64_t max_size = msp->ms_max_size;
2447eda14cbcSMatt Macy 	metaslab_recalculate_weight_and_sort(msp);
2448eda14cbcSMatt Macy 	if (!WEIGHT_IS_SPACEBASED(weight))
2449eda14cbcSMatt Macy 		ASSERT3U(weight, <=, msp->ms_weight);
2450eda14cbcSMatt Macy 	msp->ms_max_size = metaslab_largest_allocatable(msp);
2451eda14cbcSMatt Macy 	ASSERT3U(max_size, <=, msp->ms_max_size);
2452eda14cbcSMatt Macy 	hrtime_t load_end = gethrtime();
2453eda14cbcSMatt Macy 	msp->ms_load_time = load_end;
2454eda14cbcSMatt Macy 	zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, "
2455eda14cbcSMatt Macy 	    "ms_id %llu, smp_length %llu, "
2456eda14cbcSMatt Macy 	    "unflushed_allocs %llu, unflushed_frees %llu, "
2457eda14cbcSMatt Macy 	    "freed %llu, defer %llu + %llu, unloaded time %llu ms, "
2458eda14cbcSMatt Macy 	    "loading_time %lld ms, ms_max_size %llu, "
2459eda14cbcSMatt Macy 	    "max size error %lld, "
2460eda14cbcSMatt Macy 	    "old_weight %llx, new_weight %llx",
246133b8c039SMartin Matuska 	    (u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
246233b8c039SMartin Matuska 	    (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
246333b8c039SMartin Matuska 	    (u_longlong_t)msp->ms_id,
246433b8c039SMartin Matuska 	    (u_longlong_t)space_map_length(msp->ms_sm),
246533b8c039SMartin Matuska 	    (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
246633b8c039SMartin Matuska 	    (u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
246733b8c039SMartin Matuska 	    (u_longlong_t)range_tree_space(msp->ms_freed),
246833b8c039SMartin Matuska 	    (u_longlong_t)range_tree_space(msp->ms_defer[0]),
246933b8c039SMartin Matuska 	    (u_longlong_t)range_tree_space(msp->ms_defer[1]),
2470eda14cbcSMatt Macy 	    (longlong_t)((load_start - msp->ms_unload_time) / 1000000),
2471eda14cbcSMatt Macy 	    (longlong_t)((load_end - load_start) / 1000000),
247233b8c039SMartin Matuska 	    (u_longlong_t)msp->ms_max_size,
247333b8c039SMartin Matuska 	    (u_longlong_t)msp->ms_max_size - max_size,
247433b8c039SMartin Matuska 	    (u_longlong_t)weight, (u_longlong_t)msp->ms_weight);
2475eda14cbcSMatt Macy 
2476eda14cbcSMatt Macy 	metaslab_verify_space(msp, spa_syncing_txg(spa));
2477eda14cbcSMatt Macy 	mutex_exit(&msp->ms_sync_lock);
2478eda14cbcSMatt Macy 	return (0);
2479eda14cbcSMatt Macy }
2480eda14cbcSMatt Macy 
2481eda14cbcSMatt Macy int
2482eda14cbcSMatt Macy metaslab_load(metaslab_t *msp)
2483eda14cbcSMatt Macy {
2484eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2485eda14cbcSMatt Macy 
2486eda14cbcSMatt Macy 	/*
2487eda14cbcSMatt Macy 	 * There may be another thread loading the same metaslab, if that's
2488eda14cbcSMatt Macy 	 * the case just wait until the other thread is done and return.
2489eda14cbcSMatt Macy 	 */
2490eda14cbcSMatt Macy 	metaslab_load_wait(msp);
2491eda14cbcSMatt Macy 	if (msp->ms_loaded)
2492eda14cbcSMatt Macy 		return (0);
2493eda14cbcSMatt Macy 	VERIFY(!msp->ms_loading);
2494eda14cbcSMatt Macy 	ASSERT(!msp->ms_condensing);
2495eda14cbcSMatt Macy 
2496eda14cbcSMatt Macy 	/*
2497eda14cbcSMatt Macy 	 * We set the loading flag BEFORE potentially dropping the lock to
2498eda14cbcSMatt Macy 	 * wait for an ongoing flush (see ms_flushing below). This way other
2499eda14cbcSMatt Macy 	 * threads know that there is already a thread that is loading this
2500eda14cbcSMatt Macy 	 * metaslab.
2501eda14cbcSMatt Macy 	 */
2502eda14cbcSMatt Macy 	msp->ms_loading = B_TRUE;
2503eda14cbcSMatt Macy 
2504eda14cbcSMatt Macy 	/*
2505eda14cbcSMatt Macy 	 * Wait for any in-progress flushing to finish as we drop the ms_lock
2506eda14cbcSMatt Macy 	 * both here (during space_map_load()) and in metaslab_flush() (when
2507eda14cbcSMatt Macy 	 * we flush our changes to the ms_sm).
2508eda14cbcSMatt Macy 	 */
2509eda14cbcSMatt Macy 	if (msp->ms_flushing)
2510eda14cbcSMatt Macy 		metaslab_flush_wait(msp);
2511eda14cbcSMatt Macy 
2512eda14cbcSMatt Macy 	/*
2513eda14cbcSMatt Macy 	 * In the possibility that we were waiting for the metaslab to be
2514eda14cbcSMatt Macy 	 * flushed (where we temporarily dropped the ms_lock), ensure that
2515eda14cbcSMatt Macy 	 * no one else loaded the metaslab somehow.
2516eda14cbcSMatt Macy 	 */
2517eda14cbcSMatt Macy 	ASSERT(!msp->ms_loaded);
2518eda14cbcSMatt Macy 
2519eda14cbcSMatt Macy 	/*
2520eda14cbcSMatt Macy 	 * If we're loading a metaslab in the normal class, consider evicting
2521eda14cbcSMatt Macy 	 * another one to keep our memory usage under the limit defined by the
2522eda14cbcSMatt Macy 	 * zfs_metaslab_mem_limit tunable.
2523eda14cbcSMatt Macy 	 */
2524eda14cbcSMatt Macy 	if (spa_normal_class(msp->ms_group->mg_class->mc_spa) ==
2525eda14cbcSMatt Macy 	    msp->ms_group->mg_class) {
2526eda14cbcSMatt Macy 		metaslab_potentially_evict(msp->ms_group->mg_class);
2527eda14cbcSMatt Macy 	}
2528eda14cbcSMatt Macy 
2529eda14cbcSMatt Macy 	int error = metaslab_load_impl(msp);
2530eda14cbcSMatt Macy 
2531eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2532eda14cbcSMatt Macy 	msp->ms_loading = B_FALSE;
2533eda14cbcSMatt Macy 	cv_broadcast(&msp->ms_load_cv);
2534eda14cbcSMatt Macy 
2535eda14cbcSMatt Macy 	return (error);
2536eda14cbcSMatt Macy }
2537eda14cbcSMatt Macy 
2538eda14cbcSMatt Macy void
2539eda14cbcSMatt Macy metaslab_unload(metaslab_t *msp)
2540eda14cbcSMatt Macy {
2541eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2542eda14cbcSMatt Macy 
2543eda14cbcSMatt Macy 	/*
2544eda14cbcSMatt Macy 	 * This can happen if a metaslab is selected for eviction (in
2545eda14cbcSMatt Macy 	 * metaslab_potentially_evict) and then unloaded during spa_sync (via
2546eda14cbcSMatt Macy 	 * metaslab_class_evict_old).
2547eda14cbcSMatt Macy 	 */
2548eda14cbcSMatt Macy 	if (!msp->ms_loaded)
2549eda14cbcSMatt Macy 		return;
2550eda14cbcSMatt Macy 
2551eda14cbcSMatt Macy 	range_tree_vacate(msp->ms_allocatable, NULL, NULL);
2552eda14cbcSMatt Macy 	msp->ms_loaded = B_FALSE;
2553eda14cbcSMatt Macy 	msp->ms_unload_time = gethrtime();
2554eda14cbcSMatt Macy 
2555eda14cbcSMatt Macy 	msp->ms_activation_weight = 0;
2556eda14cbcSMatt Macy 	msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
2557eda14cbcSMatt Macy 
2558eda14cbcSMatt Macy 	if (msp->ms_group != NULL) {
2559eda14cbcSMatt Macy 		metaslab_class_t *mc = msp->ms_group->mg_class;
2560eda14cbcSMatt Macy 		multilist_sublist_t *mls =
25613ff01b23SMartin Matuska 		    multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
2562eda14cbcSMatt Macy 		if (multilist_link_active(&msp->ms_class_txg_node))
2563eda14cbcSMatt Macy 			multilist_sublist_remove(mls, msp);
2564eda14cbcSMatt Macy 		multilist_sublist_unlock(mls);
2565eda14cbcSMatt Macy 
2566eda14cbcSMatt Macy 		spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2567eda14cbcSMatt Macy 		zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, "
2568eda14cbcSMatt Macy 		    "ms_id %llu, weight %llx, "
2569eda14cbcSMatt Macy 		    "selected txg %llu (%llu ms ago), alloc_txg %llu, "
2570eda14cbcSMatt Macy 		    "loaded %llu ms ago, max_size %llu",
257133b8c039SMartin Matuska 		    (u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
257233b8c039SMartin Matuska 		    (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
257333b8c039SMartin Matuska 		    (u_longlong_t)msp->ms_id,
257433b8c039SMartin Matuska 		    (u_longlong_t)msp->ms_weight,
257533b8c039SMartin Matuska 		    (u_longlong_t)msp->ms_selected_txg,
257633b8c039SMartin Matuska 		    (u_longlong_t)(msp->ms_unload_time -
257733b8c039SMartin Matuska 		    msp->ms_selected_time) / 1000 / 1000,
257833b8c039SMartin Matuska 		    (u_longlong_t)msp->ms_alloc_txg,
257933b8c039SMartin Matuska 		    (u_longlong_t)(msp->ms_unload_time -
258033b8c039SMartin Matuska 		    msp->ms_load_time) / 1000 / 1000,
258133b8c039SMartin Matuska 		    (u_longlong_t)msp->ms_max_size);
2582eda14cbcSMatt Macy 	}
2583eda14cbcSMatt Macy 
2584eda14cbcSMatt Macy 	/*
2585eda14cbcSMatt Macy 	 * We explicitly recalculate the metaslab's weight based on its space
2586eda14cbcSMatt Macy 	 * map (as it is now not loaded). We want unload metaslabs to always
2587eda14cbcSMatt Macy 	 * have their weights calculated from the space map histograms, while
2588eda14cbcSMatt Macy 	 * loaded ones have it calculated from their in-core range tree
2589eda14cbcSMatt Macy 	 * [see metaslab_load()]. This way, the weight reflects the information
2590eda14cbcSMatt Macy 	 * available in-core, whether it is loaded or not.
2591eda14cbcSMatt Macy 	 *
2592eda14cbcSMatt Macy 	 * If ms_group == NULL means that we came here from metaslab_fini(),
2593eda14cbcSMatt Macy 	 * at which point it doesn't make sense for us to do the recalculation
2594eda14cbcSMatt Macy 	 * and the sorting.
2595eda14cbcSMatt Macy 	 */
2596eda14cbcSMatt Macy 	if (msp->ms_group != NULL)
2597eda14cbcSMatt Macy 		metaslab_recalculate_weight_and_sort(msp);
2598eda14cbcSMatt Macy }
2599eda14cbcSMatt Macy 
2600eda14cbcSMatt Macy /*
2601eda14cbcSMatt Macy  * We want to optimize the memory use of the per-metaslab range
2602eda14cbcSMatt Macy  * trees. To do this, we store the segments in the range trees in
2603eda14cbcSMatt Macy  * units of sectors, zero-indexing from the start of the metaslab. If
2604eda14cbcSMatt Macy  * the vdev_ms_shift - the vdev_ashift is less than 32, we can store
2605eda14cbcSMatt Macy  * the ranges using two uint32_ts, rather than two uint64_ts.
2606eda14cbcSMatt Macy  */
2607eda14cbcSMatt Macy range_seg_type_t
2608eda14cbcSMatt Macy metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp,
2609eda14cbcSMatt Macy     uint64_t *start, uint64_t *shift)
2610eda14cbcSMatt Macy {
2611eda14cbcSMatt Macy 	if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 &&
2612eda14cbcSMatt Macy 	    !zfs_metaslab_force_large_segs) {
2613eda14cbcSMatt Macy 		*shift = vdev->vdev_ashift;
2614eda14cbcSMatt Macy 		*start = msp->ms_start;
2615eda14cbcSMatt Macy 		return (RANGE_SEG32);
2616eda14cbcSMatt Macy 	} else {
2617eda14cbcSMatt Macy 		*shift = 0;
2618eda14cbcSMatt Macy 		*start = 0;
2619eda14cbcSMatt Macy 		return (RANGE_SEG64);
2620eda14cbcSMatt Macy 	}
2621eda14cbcSMatt Macy }
2622eda14cbcSMatt Macy 
2623eda14cbcSMatt Macy void
2624eda14cbcSMatt Macy metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg)
2625eda14cbcSMatt Macy {
2626eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2627eda14cbcSMatt Macy 	metaslab_class_t *mc = msp->ms_group->mg_class;
2628eda14cbcSMatt Macy 	multilist_sublist_t *mls =
26293ff01b23SMartin Matuska 	    multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
2630eda14cbcSMatt Macy 	if (multilist_link_active(&msp->ms_class_txg_node))
2631eda14cbcSMatt Macy 		multilist_sublist_remove(mls, msp);
2632eda14cbcSMatt Macy 	msp->ms_selected_txg = txg;
2633eda14cbcSMatt Macy 	msp->ms_selected_time = gethrtime();
2634eda14cbcSMatt Macy 	multilist_sublist_insert_tail(mls, msp);
2635eda14cbcSMatt Macy 	multilist_sublist_unlock(mls);
2636eda14cbcSMatt Macy }
2637eda14cbcSMatt Macy 
2638eda14cbcSMatt Macy void
2639eda14cbcSMatt Macy metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
2640eda14cbcSMatt Macy     int64_t defer_delta, int64_t space_delta)
2641eda14cbcSMatt Macy {
2642eda14cbcSMatt Macy 	vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
2643eda14cbcSMatt Macy 
2644eda14cbcSMatt Macy 	ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
2645eda14cbcSMatt Macy 	ASSERT(vd->vdev_ms_count != 0);
2646eda14cbcSMatt Macy 
2647eda14cbcSMatt Macy 	metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
2648eda14cbcSMatt Macy 	    vdev_deflated_space(vd, space_delta));
2649eda14cbcSMatt Macy }
2650eda14cbcSMatt Macy 
2651eda14cbcSMatt Macy int
2652eda14cbcSMatt Macy metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
2653eda14cbcSMatt Macy     uint64_t txg, metaslab_t **msp)
2654eda14cbcSMatt Macy {
2655eda14cbcSMatt Macy 	vdev_t *vd = mg->mg_vd;
2656eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
2657eda14cbcSMatt Macy 	objset_t *mos = spa->spa_meta_objset;
2658eda14cbcSMatt Macy 	metaslab_t *ms;
2659eda14cbcSMatt Macy 	int error;
2660eda14cbcSMatt Macy 
2661eda14cbcSMatt Macy 	ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
2662eda14cbcSMatt Macy 	mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
2663eda14cbcSMatt Macy 	mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
2664eda14cbcSMatt Macy 	cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
2665eda14cbcSMatt Macy 	cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
2666eda14cbcSMatt Macy 	multilist_link_init(&ms->ms_class_txg_node);
2667eda14cbcSMatt Macy 
2668eda14cbcSMatt Macy 	ms->ms_id = id;
2669eda14cbcSMatt Macy 	ms->ms_start = id << vd->vdev_ms_shift;
2670eda14cbcSMatt Macy 	ms->ms_size = 1ULL << vd->vdev_ms_shift;
2671eda14cbcSMatt Macy 	ms->ms_allocator = -1;
2672eda14cbcSMatt Macy 	ms->ms_new = B_TRUE;
2673eda14cbcSMatt Macy 
26747877fdebSMatt Macy 	vdev_ops_t *ops = vd->vdev_ops;
26757877fdebSMatt Macy 	if (ops->vdev_op_metaslab_init != NULL)
26767877fdebSMatt Macy 		ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size);
26777877fdebSMatt Macy 
2678eda14cbcSMatt Macy 	/*
2679eda14cbcSMatt Macy 	 * We only open space map objects that already exist. All others
268081b22a98SMartin Matuska 	 * will be opened when we finally allocate an object for it. For
268181b22a98SMartin Matuska 	 * readonly pools there is no need to open the space map object.
2682eda14cbcSMatt Macy 	 *
2683eda14cbcSMatt Macy 	 * Note:
2684eda14cbcSMatt Macy 	 * When called from vdev_expand(), we can't call into the DMU as
2685eda14cbcSMatt Macy 	 * we are holding the spa_config_lock as a writer and we would
2686eda14cbcSMatt Macy 	 * deadlock [see relevant comment in vdev_metaslab_init()]. in
2687eda14cbcSMatt Macy 	 * that case, the object parameter is zero though, so we won't
2688eda14cbcSMatt Macy 	 * call into the DMU.
2689eda14cbcSMatt Macy 	 */
269081b22a98SMartin Matuska 	if (object != 0 && !(spa->spa_mode == SPA_MODE_READ &&
269181b22a98SMartin Matuska 	    !spa->spa_read_spacemaps)) {
2692eda14cbcSMatt Macy 		error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
2693eda14cbcSMatt Macy 		    ms->ms_size, vd->vdev_ashift);
2694eda14cbcSMatt Macy 
2695eda14cbcSMatt Macy 		if (error != 0) {
2696eda14cbcSMatt Macy 			kmem_free(ms, sizeof (metaslab_t));
2697eda14cbcSMatt Macy 			return (error);
2698eda14cbcSMatt Macy 		}
2699eda14cbcSMatt Macy 
2700eda14cbcSMatt Macy 		ASSERT(ms->ms_sm != NULL);
2701eda14cbcSMatt Macy 		ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
2702eda14cbcSMatt Macy 	}
2703eda14cbcSMatt Macy 
2704eda14cbcSMatt Macy 	uint64_t shift, start;
2705f9693befSMartin Matuska 	range_seg_type_t type =
2706f9693befSMartin Matuska 	    metaslab_calculate_range_tree_type(vd, ms, &start, &shift);
2707eda14cbcSMatt Macy 
2708eda14cbcSMatt Macy 	ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift);
2709f9693befSMartin Matuska 	for (int t = 0; t < TXG_SIZE; t++) {
2710f9693befSMartin Matuska 		ms->ms_allocating[t] = range_tree_create(NULL, type,
2711f9693befSMartin Matuska 		    NULL, start, shift);
2712f9693befSMartin Matuska 	}
2713f9693befSMartin Matuska 	ms->ms_freeing = range_tree_create(NULL, type, NULL, start, shift);
2714f9693befSMartin Matuska 	ms->ms_freed = range_tree_create(NULL, type, NULL, start, shift);
2715f9693befSMartin Matuska 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2716f9693befSMartin Matuska 		ms->ms_defer[t] = range_tree_create(NULL, type, NULL,
2717f9693befSMartin Matuska 		    start, shift);
2718f9693befSMartin Matuska 	}
2719f9693befSMartin Matuska 	ms->ms_checkpointing =
2720f9693befSMartin Matuska 	    range_tree_create(NULL, type, NULL, start, shift);
2721f9693befSMartin Matuska 	ms->ms_unflushed_allocs =
2722f9693befSMartin Matuska 	    range_tree_create(NULL, type, NULL, start, shift);
2723f9693befSMartin Matuska 
2724f9693befSMartin Matuska 	metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
2725f9693befSMartin Matuska 	mrap->mra_bt = &ms->ms_unflushed_frees_by_size;
2726f9693befSMartin Matuska 	mrap->mra_floor_shift = metaslab_by_size_min_shift;
2727f9693befSMartin Matuska 	ms->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops,
2728f9693befSMartin Matuska 	    type, mrap, start, shift);
2729eda14cbcSMatt Macy 
2730eda14cbcSMatt Macy 	ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift);
2731eda14cbcSMatt Macy 
2732eda14cbcSMatt Macy 	metaslab_group_add(mg, ms);
2733eda14cbcSMatt Macy 	metaslab_set_fragmentation(ms, B_FALSE);
2734eda14cbcSMatt Macy 
2735eda14cbcSMatt Macy 	/*
2736eda14cbcSMatt Macy 	 * If we're opening an existing pool (txg == 0) or creating
2737eda14cbcSMatt Macy 	 * a new one (txg == TXG_INITIAL), all space is available now.
2738eda14cbcSMatt Macy 	 * If we're adding space to an existing pool, the new space
2739eda14cbcSMatt Macy 	 * does not become available until after this txg has synced.
2740eda14cbcSMatt Macy 	 * The metaslab's weight will also be initialized when we sync
2741eda14cbcSMatt Macy 	 * out this txg. This ensures that we don't attempt to allocate
2742eda14cbcSMatt Macy 	 * from it before we have initialized it completely.
2743eda14cbcSMatt Macy 	 */
2744eda14cbcSMatt Macy 	if (txg <= TXG_INITIAL) {
2745eda14cbcSMatt Macy 		metaslab_sync_done(ms, 0);
2746eda14cbcSMatt Macy 		metaslab_space_update(vd, mg->mg_class,
2747eda14cbcSMatt Macy 		    metaslab_allocated_space(ms), 0, 0);
2748eda14cbcSMatt Macy 	}
2749eda14cbcSMatt Macy 
2750eda14cbcSMatt Macy 	if (txg != 0) {
2751eda14cbcSMatt Macy 		vdev_dirty(vd, 0, NULL, txg);
2752eda14cbcSMatt Macy 		vdev_dirty(vd, VDD_METASLAB, ms, txg);
2753eda14cbcSMatt Macy 	}
2754eda14cbcSMatt Macy 
2755eda14cbcSMatt Macy 	*msp = ms;
2756eda14cbcSMatt Macy 
2757eda14cbcSMatt Macy 	return (0);
2758eda14cbcSMatt Macy }
2759eda14cbcSMatt Macy 
2760eda14cbcSMatt Macy static void
2761eda14cbcSMatt Macy metaslab_fini_flush_data(metaslab_t *msp)
2762eda14cbcSMatt Macy {
2763eda14cbcSMatt Macy 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2764eda14cbcSMatt Macy 
2765eda14cbcSMatt Macy 	if (metaslab_unflushed_txg(msp) == 0) {
2766eda14cbcSMatt Macy 		ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
2767eda14cbcSMatt Macy 		    ==, NULL);
2768eda14cbcSMatt Macy 		return;
2769eda14cbcSMatt Macy 	}
2770eda14cbcSMatt Macy 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
2771eda14cbcSMatt Macy 
2772eda14cbcSMatt Macy 	mutex_enter(&spa->spa_flushed_ms_lock);
2773eda14cbcSMatt Macy 	avl_remove(&spa->spa_metaslabs_by_flushed, msp);
2774eda14cbcSMatt Macy 	mutex_exit(&spa->spa_flushed_ms_lock);
2775eda14cbcSMatt Macy 
2776eda14cbcSMatt Macy 	spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
2777716fd348SMartin Matuska 	spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp),
2778716fd348SMartin Matuska 	    metaslab_unflushed_dirty(msp));
2779eda14cbcSMatt Macy }
2780eda14cbcSMatt Macy 
2781eda14cbcSMatt Macy uint64_t
2782eda14cbcSMatt Macy metaslab_unflushed_changes_memused(metaslab_t *ms)
2783eda14cbcSMatt Macy {
2784eda14cbcSMatt Macy 	return ((range_tree_numsegs(ms->ms_unflushed_allocs) +
2785eda14cbcSMatt Macy 	    range_tree_numsegs(ms->ms_unflushed_frees)) *
2786eda14cbcSMatt Macy 	    ms->ms_unflushed_allocs->rt_root.bt_elem_size);
2787eda14cbcSMatt Macy }
2788eda14cbcSMatt Macy 
2789eda14cbcSMatt Macy void
2790eda14cbcSMatt Macy metaslab_fini(metaslab_t *msp)
2791eda14cbcSMatt Macy {
2792eda14cbcSMatt Macy 	metaslab_group_t *mg = msp->ms_group;
2793eda14cbcSMatt Macy 	vdev_t *vd = mg->mg_vd;
2794eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
2795eda14cbcSMatt Macy 
2796eda14cbcSMatt Macy 	metaslab_fini_flush_data(msp);
2797eda14cbcSMatt Macy 
2798eda14cbcSMatt Macy 	metaslab_group_remove(mg, msp);
2799eda14cbcSMatt Macy 
2800eda14cbcSMatt Macy 	mutex_enter(&msp->ms_lock);
2801eda14cbcSMatt Macy 	VERIFY(msp->ms_group == NULL);
2802f9693befSMartin Matuska 
2803184c1b94SMartin Matuska 	/*
2804f9693befSMartin Matuska 	 * If this metaslab hasn't been through metaslab_sync_done() yet its
2805184c1b94SMartin Matuska 	 * space hasn't been accounted for in its vdev and doesn't need to be
2806184c1b94SMartin Matuska 	 * subtracted.
2807184c1b94SMartin Matuska 	 */
2808f9693befSMartin Matuska 	if (!msp->ms_new) {
2809eda14cbcSMatt Macy 		metaslab_space_update(vd, mg->mg_class,
2810eda14cbcSMatt Macy 		    -metaslab_allocated_space(msp), 0, -msp->ms_size);
2811eda14cbcSMatt Macy 
2812184c1b94SMartin Matuska 	}
2813eda14cbcSMatt Macy 	space_map_close(msp->ms_sm);
2814eda14cbcSMatt Macy 	msp->ms_sm = NULL;
2815eda14cbcSMatt Macy 
2816eda14cbcSMatt Macy 	metaslab_unload(msp);
2817184c1b94SMartin Matuska 
2818eda14cbcSMatt Macy 	range_tree_destroy(msp->ms_allocatable);
2819eda14cbcSMatt Macy 	range_tree_destroy(msp->ms_freeing);
2820eda14cbcSMatt Macy 	range_tree_destroy(msp->ms_freed);
2821eda14cbcSMatt Macy 
2822eda14cbcSMatt Macy 	ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
2823eda14cbcSMatt Macy 	    metaslab_unflushed_changes_memused(msp));
2824eda14cbcSMatt Macy 	spa->spa_unflushed_stats.sus_memused -=
2825eda14cbcSMatt Macy 	    metaslab_unflushed_changes_memused(msp);
2826eda14cbcSMatt Macy 	range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
2827eda14cbcSMatt Macy 	range_tree_destroy(msp->ms_unflushed_allocs);
2828184c1b94SMartin Matuska 	range_tree_destroy(msp->ms_checkpointing);
2829eda14cbcSMatt Macy 	range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
2830eda14cbcSMatt Macy 	range_tree_destroy(msp->ms_unflushed_frees);
2831eda14cbcSMatt Macy 
2832eda14cbcSMatt Macy 	for (int t = 0; t < TXG_SIZE; t++) {
2833eda14cbcSMatt Macy 		range_tree_destroy(msp->ms_allocating[t]);
2834eda14cbcSMatt Macy 	}
2835eda14cbcSMatt Macy 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2836eda14cbcSMatt Macy 		range_tree_destroy(msp->ms_defer[t]);
2837eda14cbcSMatt Macy 	}
2838eda14cbcSMatt Macy 	ASSERT0(msp->ms_deferspace);
2839eda14cbcSMatt Macy 
2840eda14cbcSMatt Macy 	for (int t = 0; t < TXG_SIZE; t++)
2841eda14cbcSMatt Macy 		ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
2842eda14cbcSMatt Macy 
2843eda14cbcSMatt Macy 	range_tree_vacate(msp->ms_trim, NULL, NULL);
2844eda14cbcSMatt Macy 	range_tree_destroy(msp->ms_trim);
2845eda14cbcSMatt Macy 
2846eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
2847eda14cbcSMatt Macy 	cv_destroy(&msp->ms_load_cv);
2848eda14cbcSMatt Macy 	cv_destroy(&msp->ms_flush_cv);
2849eda14cbcSMatt Macy 	mutex_destroy(&msp->ms_lock);
2850eda14cbcSMatt Macy 	mutex_destroy(&msp->ms_sync_lock);
2851eda14cbcSMatt Macy 	ASSERT3U(msp->ms_allocator, ==, -1);
2852eda14cbcSMatt Macy 
2853eda14cbcSMatt Macy 	kmem_free(msp, sizeof (metaslab_t));
2854eda14cbcSMatt Macy }
2855eda14cbcSMatt Macy 
2856eda14cbcSMatt Macy #define	FRAGMENTATION_TABLE_SIZE	17
2857eda14cbcSMatt Macy 
2858eda14cbcSMatt Macy /*
2859eda14cbcSMatt Macy  * This table defines a segment size based fragmentation metric that will
2860eda14cbcSMatt Macy  * allow each metaslab to derive its own fragmentation value. This is done
2861eda14cbcSMatt Macy  * by calculating the space in each bucket of the spacemap histogram and
2862eda14cbcSMatt Macy  * multiplying that by the fragmentation metric in this table. Doing
2863eda14cbcSMatt Macy  * this for all buckets and dividing it by the total amount of free
2864eda14cbcSMatt Macy  * space in this metaslab (i.e. the total free space in all buckets) gives
2865eda14cbcSMatt Macy  * us the fragmentation metric. This means that a high fragmentation metric
2866eda14cbcSMatt Macy  * equates to most of the free space being comprised of small segments.
2867eda14cbcSMatt Macy  * Conversely, if the metric is low, then most of the free space is in
2868eda14cbcSMatt Macy  * large segments. A 10% change in fragmentation equates to approximately
2869eda14cbcSMatt Macy  * double the number of segments.
2870eda14cbcSMatt Macy  *
2871eda14cbcSMatt Macy  * This table defines 0% fragmented space using 16MB segments. Testing has
2872eda14cbcSMatt Macy  * shown that segments that are greater than or equal to 16MB do not suffer
2873eda14cbcSMatt Macy  * from drastic performance problems. Using this value, we derive the rest
2874eda14cbcSMatt Macy  * of the table. Since the fragmentation value is never stored on disk, it
2875eda14cbcSMatt Macy  * is possible to change these calculations in the future.
2876eda14cbcSMatt Macy  */
2877e92ffd9bSMartin Matuska static const int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
2878eda14cbcSMatt Macy 	100,	/* 512B	*/
2879eda14cbcSMatt Macy 	100,	/* 1K	*/
2880eda14cbcSMatt Macy 	98,	/* 2K	*/
2881eda14cbcSMatt Macy 	95,	/* 4K	*/
2882eda14cbcSMatt Macy 	90,	/* 8K	*/
2883eda14cbcSMatt Macy 	80,	/* 16K	*/
2884eda14cbcSMatt Macy 	70,	/* 32K	*/
2885eda14cbcSMatt Macy 	60,	/* 64K	*/
2886eda14cbcSMatt Macy 	50,	/* 128K	*/
2887eda14cbcSMatt Macy 	40,	/* 256K	*/
2888eda14cbcSMatt Macy 	30,	/* 512K	*/
2889eda14cbcSMatt Macy 	20,	/* 1M	*/
2890eda14cbcSMatt Macy 	15,	/* 2M	*/
2891eda14cbcSMatt Macy 	10,	/* 4M	*/
2892eda14cbcSMatt Macy 	5,	/* 8M	*/
2893eda14cbcSMatt Macy 	0	/* 16M	*/
2894eda14cbcSMatt Macy };
2895eda14cbcSMatt Macy 
2896eda14cbcSMatt Macy /*
2897eda14cbcSMatt Macy  * Calculate the metaslab's fragmentation metric and set ms_fragmentation.
2898eda14cbcSMatt Macy  * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
2899eda14cbcSMatt Macy  * been upgraded and does not support this metric. Otherwise, the return
2900eda14cbcSMatt Macy  * value should be in the range [0, 100].
2901eda14cbcSMatt Macy  */
2902eda14cbcSMatt Macy static void
2903eda14cbcSMatt Macy metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty)
2904eda14cbcSMatt Macy {
2905eda14cbcSMatt Macy 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2906eda14cbcSMatt Macy 	uint64_t fragmentation = 0;
2907eda14cbcSMatt Macy 	uint64_t total = 0;
2908eda14cbcSMatt Macy 	boolean_t feature_enabled = spa_feature_is_enabled(spa,
2909eda14cbcSMatt Macy 	    SPA_FEATURE_SPACEMAP_HISTOGRAM);
2910eda14cbcSMatt Macy 
2911eda14cbcSMatt Macy 	if (!feature_enabled) {
2912eda14cbcSMatt Macy 		msp->ms_fragmentation = ZFS_FRAG_INVALID;
2913eda14cbcSMatt Macy 		return;
2914eda14cbcSMatt Macy 	}
2915eda14cbcSMatt Macy 
2916eda14cbcSMatt Macy 	/*
2917eda14cbcSMatt Macy 	 * A null space map means that the entire metaslab is free
2918eda14cbcSMatt Macy 	 * and thus is not fragmented.
2919eda14cbcSMatt Macy 	 */
2920eda14cbcSMatt Macy 	if (msp->ms_sm == NULL) {
2921eda14cbcSMatt Macy 		msp->ms_fragmentation = 0;
2922eda14cbcSMatt Macy 		return;
2923eda14cbcSMatt Macy 	}
2924eda14cbcSMatt Macy 
2925eda14cbcSMatt Macy 	/*
2926eda14cbcSMatt Macy 	 * If this metaslab's space map has not been upgraded, flag it
2927eda14cbcSMatt Macy 	 * so that we upgrade next time we encounter it.
2928eda14cbcSMatt Macy 	 */
2929eda14cbcSMatt Macy 	if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
2930eda14cbcSMatt Macy 		uint64_t txg = spa_syncing_txg(spa);
2931eda14cbcSMatt Macy 		vdev_t *vd = msp->ms_group->mg_vd;
2932eda14cbcSMatt Macy 
2933eda14cbcSMatt Macy 		/*
2934eda14cbcSMatt Macy 		 * If we've reached the final dirty txg, then we must
2935eda14cbcSMatt Macy 		 * be shutting down the pool. We don't want to dirty
2936eda14cbcSMatt Macy 		 * any data past this point so skip setting the condense
2937eda14cbcSMatt Macy 		 * flag. We can retry this action the next time the pool
2938eda14cbcSMatt Macy 		 * is imported. We also skip marking this metaslab for
2939eda14cbcSMatt Macy 		 * condensing if the caller has explicitly set nodirty.
2940eda14cbcSMatt Macy 		 */
2941eda14cbcSMatt Macy 		if (!nodirty &&
2942eda14cbcSMatt Macy 		    spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
2943eda14cbcSMatt Macy 			msp->ms_condense_wanted = B_TRUE;
2944eda14cbcSMatt Macy 			vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
2945eda14cbcSMatt Macy 			zfs_dbgmsg("txg %llu, requesting force condense: "
294633b8c039SMartin Matuska 			    "ms_id %llu, vdev_id %llu", (u_longlong_t)txg,
294733b8c039SMartin Matuska 			    (u_longlong_t)msp->ms_id,
294833b8c039SMartin Matuska 			    (u_longlong_t)vd->vdev_id);
2949eda14cbcSMatt Macy 		}
2950eda14cbcSMatt Macy 		msp->ms_fragmentation = ZFS_FRAG_INVALID;
2951eda14cbcSMatt Macy 		return;
2952eda14cbcSMatt Macy 	}
2953eda14cbcSMatt Macy 
2954eda14cbcSMatt Macy 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
2955eda14cbcSMatt Macy 		uint64_t space = 0;
2956eda14cbcSMatt Macy 		uint8_t shift = msp->ms_sm->sm_shift;
2957eda14cbcSMatt Macy 
2958eda14cbcSMatt Macy 		int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
2959eda14cbcSMatt Macy 		    FRAGMENTATION_TABLE_SIZE - 1);
2960eda14cbcSMatt Macy 
2961eda14cbcSMatt Macy 		if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
2962eda14cbcSMatt Macy 			continue;
2963eda14cbcSMatt Macy 
2964eda14cbcSMatt Macy 		space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
2965eda14cbcSMatt Macy 		total += space;
2966eda14cbcSMatt Macy 
2967eda14cbcSMatt Macy 		ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
2968eda14cbcSMatt Macy 		fragmentation += space * zfs_frag_table[idx];
2969eda14cbcSMatt Macy 	}
2970eda14cbcSMatt Macy 
2971eda14cbcSMatt Macy 	if (total > 0)
2972eda14cbcSMatt Macy 		fragmentation /= total;
2973eda14cbcSMatt Macy 	ASSERT3U(fragmentation, <=, 100);
2974eda14cbcSMatt Macy 
2975eda14cbcSMatt Macy 	msp->ms_fragmentation = fragmentation;
2976eda14cbcSMatt Macy }
2977eda14cbcSMatt Macy 
2978eda14cbcSMatt Macy /*
2979eda14cbcSMatt Macy  * Compute a weight -- a selection preference value -- for the given metaslab.
2980eda14cbcSMatt Macy  * This is based on the amount of free space, the level of fragmentation,
2981eda14cbcSMatt Macy  * the LBA range, and whether the metaslab is loaded.
2982eda14cbcSMatt Macy  */
2983eda14cbcSMatt Macy static uint64_t
2984eda14cbcSMatt Macy metaslab_space_weight(metaslab_t *msp)
2985eda14cbcSMatt Macy {
2986eda14cbcSMatt Macy 	metaslab_group_t *mg = msp->ms_group;
2987eda14cbcSMatt Macy 	vdev_t *vd = mg->mg_vd;
2988eda14cbcSMatt Macy 	uint64_t weight, space;
2989eda14cbcSMatt Macy 
2990eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
2991eda14cbcSMatt Macy 
2992eda14cbcSMatt Macy 	/*
2993eda14cbcSMatt Macy 	 * The baseline weight is the metaslab's free space.
2994eda14cbcSMatt Macy 	 */
2995eda14cbcSMatt Macy 	space = msp->ms_size - metaslab_allocated_space(msp);
2996eda14cbcSMatt Macy 
2997eda14cbcSMatt Macy 	if (metaslab_fragmentation_factor_enabled &&
2998eda14cbcSMatt Macy 	    msp->ms_fragmentation != ZFS_FRAG_INVALID) {
2999eda14cbcSMatt Macy 		/*
3000eda14cbcSMatt Macy 		 * Use the fragmentation information to inversely scale
3001eda14cbcSMatt Macy 		 * down the baseline weight. We need to ensure that we
3002eda14cbcSMatt Macy 		 * don't exclude this metaslab completely when it's 100%
3003eda14cbcSMatt Macy 		 * fragmented. To avoid this we reduce the fragmented value
3004eda14cbcSMatt Macy 		 * by 1.
3005eda14cbcSMatt Macy 		 */
3006eda14cbcSMatt Macy 		space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
3007eda14cbcSMatt Macy 
3008eda14cbcSMatt Macy 		/*
3009eda14cbcSMatt Macy 		 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
3010eda14cbcSMatt Macy 		 * this metaslab again. The fragmentation metric may have
3011eda14cbcSMatt Macy 		 * decreased the space to something smaller than
3012eda14cbcSMatt Macy 		 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
3013eda14cbcSMatt Macy 		 * so that we can consume any remaining space.
3014eda14cbcSMatt Macy 		 */
3015eda14cbcSMatt Macy 		if (space > 0 && space < SPA_MINBLOCKSIZE)
3016eda14cbcSMatt Macy 			space = SPA_MINBLOCKSIZE;
3017eda14cbcSMatt Macy 	}
3018eda14cbcSMatt Macy 	weight = space;
3019eda14cbcSMatt Macy 
3020eda14cbcSMatt Macy 	/*
3021eda14cbcSMatt Macy 	 * Modern disks have uniform bit density and constant angular velocity.
3022eda14cbcSMatt Macy 	 * Therefore, the outer recording zones are faster (higher bandwidth)
3023eda14cbcSMatt Macy 	 * than the inner zones by the ratio of outer to inner track diameter,
3024eda14cbcSMatt Macy 	 * which is typically around 2:1.  We account for this by assigning
3025eda14cbcSMatt Macy 	 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
3026eda14cbcSMatt Macy 	 * In effect, this means that we'll select the metaslab with the most
3027eda14cbcSMatt Macy 	 * free bandwidth rather than simply the one with the most free space.
3028eda14cbcSMatt Macy 	 */
3029eda14cbcSMatt Macy 	if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
3030eda14cbcSMatt Macy 		weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
3031eda14cbcSMatt Macy 		ASSERT(weight >= space && weight <= 2 * space);
3032eda14cbcSMatt Macy 	}
3033eda14cbcSMatt Macy 
3034eda14cbcSMatt Macy 	/*
3035eda14cbcSMatt Macy 	 * If this metaslab is one we're actively using, adjust its
3036eda14cbcSMatt Macy 	 * weight to make it preferable to any inactive metaslab so
3037eda14cbcSMatt Macy 	 * we'll polish it off. If the fragmentation on this metaslab
3038eda14cbcSMatt Macy 	 * has exceed our threshold, then don't mark it active.
3039eda14cbcSMatt Macy 	 */
3040eda14cbcSMatt Macy 	if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
3041eda14cbcSMatt Macy 	    msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
3042eda14cbcSMatt Macy 		weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
3043eda14cbcSMatt Macy 	}
3044eda14cbcSMatt Macy 
3045eda14cbcSMatt Macy 	WEIGHT_SET_SPACEBASED(weight);
3046eda14cbcSMatt Macy 	return (weight);
3047eda14cbcSMatt Macy }
3048eda14cbcSMatt Macy 
3049eda14cbcSMatt Macy /*
3050eda14cbcSMatt Macy  * Return the weight of the specified metaslab, according to the segment-based
3051eda14cbcSMatt Macy  * weighting algorithm. The metaslab must be loaded. This function can
3052eda14cbcSMatt Macy  * be called within a sync pass since it relies only on the metaslab's
3053eda14cbcSMatt Macy  * range tree which is always accurate when the metaslab is loaded.
3054eda14cbcSMatt Macy  */
3055eda14cbcSMatt Macy static uint64_t
3056eda14cbcSMatt Macy metaslab_weight_from_range_tree(metaslab_t *msp)
3057eda14cbcSMatt Macy {
3058eda14cbcSMatt Macy 	uint64_t weight = 0;
3059eda14cbcSMatt Macy 	uint32_t segments = 0;
3060eda14cbcSMatt Macy 
3061eda14cbcSMatt Macy 	ASSERT(msp->ms_loaded);
3062eda14cbcSMatt Macy 
3063eda14cbcSMatt Macy 	for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
3064eda14cbcSMatt Macy 	    i--) {
3065eda14cbcSMatt Macy 		uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
3066eda14cbcSMatt Macy 		int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
3067eda14cbcSMatt Macy 
3068eda14cbcSMatt Macy 		segments <<= 1;
3069eda14cbcSMatt Macy 		segments += msp->ms_allocatable->rt_histogram[i];
3070eda14cbcSMatt Macy 
3071eda14cbcSMatt Macy 		/*
3072eda14cbcSMatt Macy 		 * The range tree provides more precision than the space map
3073eda14cbcSMatt Macy 		 * and must be downgraded so that all values fit within the
3074eda14cbcSMatt Macy 		 * space map's histogram. This allows us to compare loaded
3075eda14cbcSMatt Macy 		 * vs. unloaded metaslabs to determine which metaslab is
3076eda14cbcSMatt Macy 		 * considered "best".
3077eda14cbcSMatt Macy 		 */
3078eda14cbcSMatt Macy 		if (i > max_idx)
3079eda14cbcSMatt Macy 			continue;
3080eda14cbcSMatt Macy 
3081eda14cbcSMatt Macy 		if (segments != 0) {
3082eda14cbcSMatt Macy 			WEIGHT_SET_COUNT(weight, segments);
3083eda14cbcSMatt Macy 			WEIGHT_SET_INDEX(weight, i);
3084eda14cbcSMatt Macy 			WEIGHT_SET_ACTIVE(weight, 0);
3085eda14cbcSMatt Macy 			break;
3086eda14cbcSMatt Macy 		}
3087eda14cbcSMatt Macy 	}
3088eda14cbcSMatt Macy 	return (weight);
3089eda14cbcSMatt Macy }
3090eda14cbcSMatt Macy 
3091eda14cbcSMatt Macy /*
3092eda14cbcSMatt Macy  * Calculate the weight based on the on-disk histogram. Should be applied
3093eda14cbcSMatt Macy  * only to unloaded metaslabs  (i.e no incoming allocations) in-order to
3094eda14cbcSMatt Macy  * give results consistent with the on-disk state
3095eda14cbcSMatt Macy  */
3096eda14cbcSMatt Macy static uint64_t
3097eda14cbcSMatt Macy metaslab_weight_from_spacemap(metaslab_t *msp)
3098eda14cbcSMatt Macy {
3099eda14cbcSMatt Macy 	space_map_t *sm = msp->ms_sm;
3100eda14cbcSMatt Macy 	ASSERT(!msp->ms_loaded);
3101eda14cbcSMatt Macy 	ASSERT(sm != NULL);
3102eda14cbcSMatt Macy 	ASSERT3U(space_map_object(sm), !=, 0);
3103eda14cbcSMatt Macy 	ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
3104eda14cbcSMatt Macy 
3105eda14cbcSMatt Macy 	/*
3106eda14cbcSMatt Macy 	 * Create a joint histogram from all the segments that have made
3107eda14cbcSMatt Macy 	 * it to the metaslab's space map histogram, that are not yet
3108eda14cbcSMatt Macy 	 * available for allocation because they are still in the freeing
3109eda14cbcSMatt Macy 	 * pipeline (e.g. freeing, freed, and defer trees). Then subtract
3110eda14cbcSMatt Macy 	 * these segments from the space map's histogram to get a more
3111eda14cbcSMatt Macy 	 * accurate weight.
3112eda14cbcSMatt Macy 	 */
3113eda14cbcSMatt Macy 	uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
3114eda14cbcSMatt Macy 	for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
3115eda14cbcSMatt Macy 		deferspace_histogram[i] += msp->ms_synchist[i];
3116eda14cbcSMatt Macy 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3117eda14cbcSMatt Macy 		for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
3118eda14cbcSMatt Macy 			deferspace_histogram[i] += msp->ms_deferhist[t][i];
3119eda14cbcSMatt Macy 		}
3120eda14cbcSMatt Macy 	}
3121eda14cbcSMatt Macy 
3122eda14cbcSMatt Macy 	uint64_t weight = 0;
3123eda14cbcSMatt Macy 	for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
3124eda14cbcSMatt Macy 		ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
3125eda14cbcSMatt Macy 		    deferspace_histogram[i]);
3126eda14cbcSMatt Macy 		uint64_t count =
3127eda14cbcSMatt Macy 		    sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
3128eda14cbcSMatt Macy 		if (count != 0) {
3129eda14cbcSMatt Macy 			WEIGHT_SET_COUNT(weight, count);
3130eda14cbcSMatt Macy 			WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
3131eda14cbcSMatt Macy 			WEIGHT_SET_ACTIVE(weight, 0);
3132eda14cbcSMatt Macy 			break;
3133eda14cbcSMatt Macy 		}
3134eda14cbcSMatt Macy 	}
3135eda14cbcSMatt Macy 	return (weight);
3136eda14cbcSMatt Macy }
3137eda14cbcSMatt Macy 
3138eda14cbcSMatt Macy /*
3139eda14cbcSMatt Macy  * Compute a segment-based weight for the specified metaslab. The weight
3140eda14cbcSMatt Macy  * is determined by highest bucket in the histogram. The information
3141eda14cbcSMatt Macy  * for the highest bucket is encoded into the weight value.
3142eda14cbcSMatt Macy  */
3143eda14cbcSMatt Macy static uint64_t
3144eda14cbcSMatt Macy metaslab_segment_weight(metaslab_t *msp)
3145eda14cbcSMatt Macy {
3146eda14cbcSMatt Macy 	metaslab_group_t *mg = msp->ms_group;
3147eda14cbcSMatt Macy 	uint64_t weight = 0;
3148eda14cbcSMatt Macy 	uint8_t shift = mg->mg_vd->vdev_ashift;
3149eda14cbcSMatt Macy 
3150eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3151eda14cbcSMatt Macy 
3152eda14cbcSMatt Macy 	/*
3153eda14cbcSMatt Macy 	 * The metaslab is completely free.
3154eda14cbcSMatt Macy 	 */
3155eda14cbcSMatt Macy 	if (metaslab_allocated_space(msp) == 0) {
3156eda14cbcSMatt Macy 		int idx = highbit64(msp->ms_size) - 1;
3157eda14cbcSMatt Macy 		int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
3158eda14cbcSMatt Macy 
3159eda14cbcSMatt Macy 		if (idx < max_idx) {
3160eda14cbcSMatt Macy 			WEIGHT_SET_COUNT(weight, 1ULL);
3161eda14cbcSMatt Macy 			WEIGHT_SET_INDEX(weight, idx);
3162eda14cbcSMatt Macy 		} else {
3163eda14cbcSMatt Macy 			WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
3164eda14cbcSMatt Macy 			WEIGHT_SET_INDEX(weight, max_idx);
3165eda14cbcSMatt Macy 		}
3166eda14cbcSMatt Macy 		WEIGHT_SET_ACTIVE(weight, 0);
3167eda14cbcSMatt Macy 		ASSERT(!WEIGHT_IS_SPACEBASED(weight));
3168eda14cbcSMatt Macy 		return (weight);
3169eda14cbcSMatt Macy 	}
3170eda14cbcSMatt Macy 
3171eda14cbcSMatt Macy 	ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
3172eda14cbcSMatt Macy 
3173eda14cbcSMatt Macy 	/*
3174eda14cbcSMatt Macy 	 * If the metaslab is fully allocated then just make the weight 0.
3175eda14cbcSMatt Macy 	 */
3176eda14cbcSMatt Macy 	if (metaslab_allocated_space(msp) == msp->ms_size)
3177eda14cbcSMatt Macy 		return (0);
3178eda14cbcSMatt Macy 	/*
3179eda14cbcSMatt Macy 	 * If the metaslab is already loaded, then use the range tree to
3180eda14cbcSMatt Macy 	 * determine the weight. Otherwise, we rely on the space map information
3181eda14cbcSMatt Macy 	 * to generate the weight.
3182eda14cbcSMatt Macy 	 */
3183eda14cbcSMatt Macy 	if (msp->ms_loaded) {
3184eda14cbcSMatt Macy 		weight = metaslab_weight_from_range_tree(msp);
3185eda14cbcSMatt Macy 	} else {
3186eda14cbcSMatt Macy 		weight = metaslab_weight_from_spacemap(msp);
3187eda14cbcSMatt Macy 	}
3188eda14cbcSMatt Macy 
3189eda14cbcSMatt Macy 	/*
3190eda14cbcSMatt Macy 	 * If the metaslab was active the last time we calculated its weight
3191eda14cbcSMatt Macy 	 * then keep it active. We want to consume the entire region that
3192eda14cbcSMatt Macy 	 * is associated with this weight.
3193eda14cbcSMatt Macy 	 */
3194eda14cbcSMatt Macy 	if (msp->ms_activation_weight != 0 && weight != 0)
3195eda14cbcSMatt Macy 		WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
3196eda14cbcSMatt Macy 	return (weight);
3197eda14cbcSMatt Macy }
3198eda14cbcSMatt Macy 
3199eda14cbcSMatt Macy /*
3200eda14cbcSMatt Macy  * Determine if we should attempt to allocate from this metaslab. If the
3201eda14cbcSMatt Macy  * metaslab is loaded, then we can determine if the desired allocation
3202eda14cbcSMatt Macy  * can be satisfied by looking at the size of the maximum free segment
3203eda14cbcSMatt Macy  * on that metaslab. Otherwise, we make our decision based on the metaslab's
3204eda14cbcSMatt Macy  * weight. For segment-based weighting we can determine the maximum
3205eda14cbcSMatt Macy  * allocation based on the index encoded in its value. For space-based
3206eda14cbcSMatt Macy  * weights we rely on the entire weight (excluding the weight-type bit).
3207eda14cbcSMatt Macy  */
3208eda14cbcSMatt Macy static boolean_t
3209eda14cbcSMatt Macy metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard)
3210eda14cbcSMatt Macy {
3211eda14cbcSMatt Macy 	/*
3212eda14cbcSMatt Macy 	 * If the metaslab is loaded, ms_max_size is definitive and we can use
3213eda14cbcSMatt Macy 	 * the fast check. If it's not, the ms_max_size is a lower bound (once
3214eda14cbcSMatt Macy 	 * set), and we should use the fast check as long as we're not in
3215eda14cbcSMatt Macy 	 * try_hard and it's been less than zfs_metaslab_max_size_cache_sec
3216eda14cbcSMatt Macy 	 * seconds since the metaslab was unloaded.
3217eda14cbcSMatt Macy 	 */
3218eda14cbcSMatt Macy 	if (msp->ms_loaded ||
3219eda14cbcSMatt Macy 	    (msp->ms_max_size != 0 && !try_hard && gethrtime() <
3220eda14cbcSMatt Macy 	    msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec)))
3221eda14cbcSMatt Macy 		return (msp->ms_max_size >= asize);
3222eda14cbcSMatt Macy 
3223eda14cbcSMatt Macy 	boolean_t should_allocate;
3224eda14cbcSMatt Macy 	if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
3225eda14cbcSMatt Macy 		/*
3226eda14cbcSMatt Macy 		 * The metaslab segment weight indicates segments in the
3227eda14cbcSMatt Macy 		 * range [2^i, 2^(i+1)), where i is the index in the weight.
3228eda14cbcSMatt Macy 		 * Since the asize might be in the middle of the range, we
3229eda14cbcSMatt Macy 		 * should attempt the allocation if asize < 2^(i+1).
3230eda14cbcSMatt Macy 		 */
3231eda14cbcSMatt Macy 		should_allocate = (asize <
3232eda14cbcSMatt Macy 		    1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
3233eda14cbcSMatt Macy 	} else {
3234eda14cbcSMatt Macy 		should_allocate = (asize <=
3235eda14cbcSMatt Macy 		    (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
3236eda14cbcSMatt Macy 	}
3237eda14cbcSMatt Macy 
3238eda14cbcSMatt Macy 	return (should_allocate);
3239eda14cbcSMatt Macy }
3240eda14cbcSMatt Macy 
3241eda14cbcSMatt Macy static uint64_t
3242eda14cbcSMatt Macy metaslab_weight(metaslab_t *msp, boolean_t nodirty)
3243eda14cbcSMatt Macy {
3244eda14cbcSMatt Macy 	vdev_t *vd = msp->ms_group->mg_vd;
3245eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
3246eda14cbcSMatt Macy 	uint64_t weight;
3247eda14cbcSMatt Macy 
3248eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3249eda14cbcSMatt Macy 
3250eda14cbcSMatt Macy 	metaslab_set_fragmentation(msp, nodirty);
3251eda14cbcSMatt Macy 
3252eda14cbcSMatt Macy 	/*
3253eda14cbcSMatt Macy 	 * Update the maximum size. If the metaslab is loaded, this will
3254eda14cbcSMatt Macy 	 * ensure that we get an accurate maximum size if newly freed space
3255eda14cbcSMatt Macy 	 * has been added back into the free tree. If the metaslab is
3256eda14cbcSMatt Macy 	 * unloaded, we check if there's a larger free segment in the
3257eda14cbcSMatt Macy 	 * unflushed frees. This is a lower bound on the largest allocatable
3258eda14cbcSMatt Macy 	 * segment size. Coalescing of adjacent entries may reveal larger
3259eda14cbcSMatt Macy 	 * allocatable segments, but we aren't aware of those until loading
3260eda14cbcSMatt Macy 	 * the space map into a range tree.
3261eda14cbcSMatt Macy 	 */
3262eda14cbcSMatt Macy 	if (msp->ms_loaded) {
3263eda14cbcSMatt Macy 		msp->ms_max_size = metaslab_largest_allocatable(msp);
3264eda14cbcSMatt Macy 	} else {
3265eda14cbcSMatt Macy 		msp->ms_max_size = MAX(msp->ms_max_size,
3266eda14cbcSMatt Macy 		    metaslab_largest_unflushed_free(msp));
3267eda14cbcSMatt Macy 	}
3268eda14cbcSMatt Macy 
3269eda14cbcSMatt Macy 	/*
3270eda14cbcSMatt Macy 	 * Segment-based weighting requires space map histogram support.
3271eda14cbcSMatt Macy 	 */
3272eda14cbcSMatt Macy 	if (zfs_metaslab_segment_weight_enabled &&
3273eda14cbcSMatt Macy 	    spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
3274eda14cbcSMatt Macy 	    (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
3275eda14cbcSMatt Macy 	    sizeof (space_map_phys_t))) {
3276eda14cbcSMatt Macy 		weight = metaslab_segment_weight(msp);
3277eda14cbcSMatt Macy 	} else {
3278eda14cbcSMatt Macy 		weight = metaslab_space_weight(msp);
3279eda14cbcSMatt Macy 	}
3280eda14cbcSMatt Macy 	return (weight);
3281eda14cbcSMatt Macy }
3282eda14cbcSMatt Macy 
3283eda14cbcSMatt Macy void
3284eda14cbcSMatt Macy metaslab_recalculate_weight_and_sort(metaslab_t *msp)
3285eda14cbcSMatt Macy {
3286eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3287eda14cbcSMatt Macy 
3288eda14cbcSMatt Macy 	/* note: we preserve the mask (e.g. indication of primary, etc..) */
3289eda14cbcSMatt Macy 	uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
3290eda14cbcSMatt Macy 	metaslab_group_sort(msp->ms_group, msp,
3291eda14cbcSMatt Macy 	    metaslab_weight(msp, B_FALSE) | was_active);
3292eda14cbcSMatt Macy }
3293eda14cbcSMatt Macy 
3294eda14cbcSMatt Macy static int
3295eda14cbcSMatt Macy metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
3296eda14cbcSMatt Macy     int allocator, uint64_t activation_weight)
3297eda14cbcSMatt Macy {
3298eda14cbcSMatt Macy 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
3299eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3300eda14cbcSMatt Macy 
3301eda14cbcSMatt Macy 	/*
3302eda14cbcSMatt Macy 	 * If we're activating for the claim code, we don't want to actually
3303eda14cbcSMatt Macy 	 * set the metaslab up for a specific allocator.
3304eda14cbcSMatt Macy 	 */
3305eda14cbcSMatt Macy 	if (activation_weight == METASLAB_WEIGHT_CLAIM) {
3306eda14cbcSMatt Macy 		ASSERT0(msp->ms_activation_weight);
3307eda14cbcSMatt Macy 		msp->ms_activation_weight = msp->ms_weight;
3308eda14cbcSMatt Macy 		metaslab_group_sort(mg, msp, msp->ms_weight |
3309eda14cbcSMatt Macy 		    activation_weight);
3310eda14cbcSMatt Macy 		return (0);
3311eda14cbcSMatt Macy 	}
3312eda14cbcSMatt Macy 
3313eda14cbcSMatt Macy 	metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
3314eda14cbcSMatt Macy 	    &mga->mga_primary : &mga->mga_secondary);
3315eda14cbcSMatt Macy 
3316eda14cbcSMatt Macy 	mutex_enter(&mg->mg_lock);
3317eda14cbcSMatt Macy 	if (*mspp != NULL) {
3318eda14cbcSMatt Macy 		mutex_exit(&mg->mg_lock);
3319eda14cbcSMatt Macy 		return (EEXIST);
3320eda14cbcSMatt Macy 	}
3321eda14cbcSMatt Macy 
3322eda14cbcSMatt Macy 	*mspp = msp;
3323eda14cbcSMatt Macy 	ASSERT3S(msp->ms_allocator, ==, -1);
3324eda14cbcSMatt Macy 	msp->ms_allocator = allocator;
3325eda14cbcSMatt Macy 	msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
3326eda14cbcSMatt Macy 
3327eda14cbcSMatt Macy 	ASSERT0(msp->ms_activation_weight);
3328eda14cbcSMatt Macy 	msp->ms_activation_weight = msp->ms_weight;
3329eda14cbcSMatt Macy 	metaslab_group_sort_impl(mg, msp,
3330eda14cbcSMatt Macy 	    msp->ms_weight | activation_weight);
3331eda14cbcSMatt Macy 	mutex_exit(&mg->mg_lock);
3332eda14cbcSMatt Macy 
3333eda14cbcSMatt Macy 	return (0);
3334eda14cbcSMatt Macy }
3335eda14cbcSMatt Macy 
3336eda14cbcSMatt Macy static int
3337eda14cbcSMatt Macy metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
3338eda14cbcSMatt Macy {
3339eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3340eda14cbcSMatt Macy 
3341eda14cbcSMatt Macy 	/*
3342eda14cbcSMatt Macy 	 * The current metaslab is already activated for us so there
3343eda14cbcSMatt Macy 	 * is nothing to do. Already activated though, doesn't mean
3344eda14cbcSMatt Macy 	 * that this metaslab is activated for our allocator nor our
3345eda14cbcSMatt Macy 	 * requested activation weight. The metaslab could have started
3346eda14cbcSMatt Macy 	 * as an active one for our allocator but changed allocators
3347eda14cbcSMatt Macy 	 * while we were waiting to grab its ms_lock or we stole it
3348eda14cbcSMatt Macy 	 * [see find_valid_metaslab()]. This means that there is a
3349eda14cbcSMatt Macy 	 * possibility of passivating a metaslab of another allocator
3350eda14cbcSMatt Macy 	 * or from a different activation mask, from this thread.
3351eda14cbcSMatt Macy 	 */
3352eda14cbcSMatt Macy 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
3353eda14cbcSMatt Macy 		ASSERT(msp->ms_loaded);
3354eda14cbcSMatt Macy 		return (0);
3355eda14cbcSMatt Macy 	}
3356eda14cbcSMatt Macy 
3357eda14cbcSMatt Macy 	int error = metaslab_load(msp);
3358eda14cbcSMatt Macy 	if (error != 0) {
3359eda14cbcSMatt Macy 		metaslab_group_sort(msp->ms_group, msp, 0);
3360eda14cbcSMatt Macy 		return (error);
3361eda14cbcSMatt Macy 	}
3362eda14cbcSMatt Macy 
3363eda14cbcSMatt Macy 	/*
3364eda14cbcSMatt Macy 	 * When entering metaslab_load() we may have dropped the
3365eda14cbcSMatt Macy 	 * ms_lock because we were loading this metaslab, or we
3366eda14cbcSMatt Macy 	 * were waiting for another thread to load it for us. In
3367eda14cbcSMatt Macy 	 * that scenario, we recheck the weight of the metaslab
3368eda14cbcSMatt Macy 	 * to see if it was activated by another thread.
3369eda14cbcSMatt Macy 	 *
3370eda14cbcSMatt Macy 	 * If the metaslab was activated for another allocator or
3371eda14cbcSMatt Macy 	 * it was activated with a different activation weight (e.g.
3372eda14cbcSMatt Macy 	 * we wanted to make it a primary but it was activated as
3373eda14cbcSMatt Macy 	 * secondary) we return error (EBUSY).
3374eda14cbcSMatt Macy 	 *
3375eda14cbcSMatt Macy 	 * If the metaslab was activated for the same allocator
3376eda14cbcSMatt Macy 	 * and requested activation mask, skip activating it.
3377eda14cbcSMatt Macy 	 */
3378eda14cbcSMatt Macy 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
3379eda14cbcSMatt Macy 		if (msp->ms_allocator != allocator)
3380eda14cbcSMatt Macy 			return (EBUSY);
3381eda14cbcSMatt Macy 
3382eda14cbcSMatt Macy 		if ((msp->ms_weight & activation_weight) == 0)
3383eda14cbcSMatt Macy 			return (SET_ERROR(EBUSY));
3384eda14cbcSMatt Macy 
3385eda14cbcSMatt Macy 		EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY),
3386eda14cbcSMatt Macy 		    msp->ms_primary);
3387eda14cbcSMatt Macy 		return (0);
3388eda14cbcSMatt Macy 	}
3389eda14cbcSMatt Macy 
3390eda14cbcSMatt Macy 	/*
3391eda14cbcSMatt Macy 	 * If the metaslab has literally 0 space, it will have weight 0. In
3392eda14cbcSMatt Macy 	 * that case, don't bother activating it. This can happen if the
3393eda14cbcSMatt Macy 	 * metaslab had space during find_valid_metaslab, but another thread
3394eda14cbcSMatt Macy 	 * loaded it and used all that space while we were waiting to grab the
3395eda14cbcSMatt Macy 	 * lock.
3396eda14cbcSMatt Macy 	 */
3397eda14cbcSMatt Macy 	if (msp->ms_weight == 0) {
3398eda14cbcSMatt Macy 		ASSERT0(range_tree_space(msp->ms_allocatable));
3399eda14cbcSMatt Macy 		return (SET_ERROR(ENOSPC));
3400eda14cbcSMatt Macy 	}
3401eda14cbcSMatt Macy 
3402eda14cbcSMatt Macy 	if ((error = metaslab_activate_allocator(msp->ms_group, msp,
3403eda14cbcSMatt Macy 	    allocator, activation_weight)) != 0) {
3404eda14cbcSMatt Macy 		return (error);
3405eda14cbcSMatt Macy 	}
3406eda14cbcSMatt Macy 
3407eda14cbcSMatt Macy 	ASSERT(msp->ms_loaded);
3408eda14cbcSMatt Macy 	ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
3409eda14cbcSMatt Macy 
3410eda14cbcSMatt Macy 	return (0);
3411eda14cbcSMatt Macy }
3412eda14cbcSMatt Macy 
3413eda14cbcSMatt Macy static void
3414eda14cbcSMatt Macy metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
3415eda14cbcSMatt Macy     uint64_t weight)
3416eda14cbcSMatt Macy {
3417eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3418eda14cbcSMatt Macy 	ASSERT(msp->ms_loaded);
3419eda14cbcSMatt Macy 
3420eda14cbcSMatt Macy 	if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
3421eda14cbcSMatt Macy 		metaslab_group_sort(mg, msp, weight);
3422eda14cbcSMatt Macy 		return;
3423eda14cbcSMatt Macy 	}
3424eda14cbcSMatt Macy 
3425eda14cbcSMatt Macy 	mutex_enter(&mg->mg_lock);
3426eda14cbcSMatt Macy 	ASSERT3P(msp->ms_group, ==, mg);
3427eda14cbcSMatt Macy 	ASSERT3S(0, <=, msp->ms_allocator);
3428eda14cbcSMatt Macy 	ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
3429eda14cbcSMatt Macy 
3430eda14cbcSMatt Macy 	metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator];
3431eda14cbcSMatt Macy 	if (msp->ms_primary) {
3432eda14cbcSMatt Macy 		ASSERT3P(mga->mga_primary, ==, msp);
3433eda14cbcSMatt Macy 		ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
3434eda14cbcSMatt Macy 		mga->mga_primary = NULL;
3435eda14cbcSMatt Macy 	} else {
3436eda14cbcSMatt Macy 		ASSERT3P(mga->mga_secondary, ==, msp);
3437eda14cbcSMatt Macy 		ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
3438eda14cbcSMatt Macy 		mga->mga_secondary = NULL;
3439eda14cbcSMatt Macy 	}
3440eda14cbcSMatt Macy 	msp->ms_allocator = -1;
3441eda14cbcSMatt Macy 	metaslab_group_sort_impl(mg, msp, weight);
3442eda14cbcSMatt Macy 	mutex_exit(&mg->mg_lock);
3443eda14cbcSMatt Macy }
3444eda14cbcSMatt Macy 
3445eda14cbcSMatt Macy static void
3446eda14cbcSMatt Macy metaslab_passivate(metaslab_t *msp, uint64_t weight)
3447eda14cbcSMatt Macy {
3448eda14cbcSMatt Macy 	uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE;
3449eda14cbcSMatt Macy 
3450eda14cbcSMatt Macy 	/*
3451eda14cbcSMatt Macy 	 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
3452eda14cbcSMatt Macy 	 * this metaslab again.  In that case, it had better be empty,
3453eda14cbcSMatt Macy 	 * or we would be leaving space on the table.
3454eda14cbcSMatt Macy 	 */
3455eda14cbcSMatt Macy 	ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
3456eda14cbcSMatt Macy 	    size >= SPA_MINBLOCKSIZE ||
3457eda14cbcSMatt Macy 	    range_tree_space(msp->ms_allocatable) == 0);
3458eda14cbcSMatt Macy 	ASSERT0(weight & METASLAB_ACTIVE_MASK);
3459eda14cbcSMatt Macy 
3460eda14cbcSMatt Macy 	ASSERT(msp->ms_activation_weight != 0);
3461eda14cbcSMatt Macy 	msp->ms_activation_weight = 0;
3462eda14cbcSMatt Macy 	metaslab_passivate_allocator(msp->ms_group, msp, weight);
3463eda14cbcSMatt Macy 	ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK);
3464eda14cbcSMatt Macy }
3465eda14cbcSMatt Macy 
3466eda14cbcSMatt Macy /*
3467eda14cbcSMatt Macy  * Segment-based metaslabs are activated once and remain active until
3468eda14cbcSMatt Macy  * we either fail an allocation attempt (similar to space-based metaslabs)
3469eda14cbcSMatt Macy  * or have exhausted the free space in zfs_metaslab_switch_threshold
3470eda14cbcSMatt Macy  * buckets since the metaslab was activated. This function checks to see
3471eda14cbcSMatt Macy  * if we've exhausted the zfs_metaslab_switch_threshold buckets in the
3472eda14cbcSMatt Macy  * metaslab and passivates it proactively. This will allow us to select a
3473eda14cbcSMatt Macy  * metaslab with a larger contiguous region, if any, remaining within this
3474eda14cbcSMatt Macy  * metaslab group. If we're in sync pass > 1, then we continue using this
3475eda14cbcSMatt Macy  * metaslab so that we don't dirty more block and cause more sync passes.
3476eda14cbcSMatt Macy  */
3477eda14cbcSMatt Macy static void
3478eda14cbcSMatt Macy metaslab_segment_may_passivate(metaslab_t *msp)
3479eda14cbcSMatt Macy {
3480eda14cbcSMatt Macy 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3481eda14cbcSMatt Macy 
3482eda14cbcSMatt Macy 	if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
3483eda14cbcSMatt Macy 		return;
3484eda14cbcSMatt Macy 
3485eda14cbcSMatt Macy 	/*
3486eda14cbcSMatt Macy 	 * Since we are in the middle of a sync pass, the most accurate
3487eda14cbcSMatt Macy 	 * information that is accessible to us is the in-core range tree
3488eda14cbcSMatt Macy 	 * histogram; calculate the new weight based on that information.
3489eda14cbcSMatt Macy 	 */
3490eda14cbcSMatt Macy 	uint64_t weight = metaslab_weight_from_range_tree(msp);
3491eda14cbcSMatt Macy 	int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
3492eda14cbcSMatt Macy 	int current_idx = WEIGHT_GET_INDEX(weight);
3493eda14cbcSMatt Macy 
3494eda14cbcSMatt Macy 	if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
3495eda14cbcSMatt Macy 		metaslab_passivate(msp, weight);
3496eda14cbcSMatt Macy }
3497eda14cbcSMatt Macy 
3498eda14cbcSMatt Macy static void
3499eda14cbcSMatt Macy metaslab_preload(void *arg)
3500eda14cbcSMatt Macy {
3501eda14cbcSMatt Macy 	metaslab_t *msp = arg;
3502eda14cbcSMatt Macy 	metaslab_class_t *mc = msp->ms_group->mg_class;
3503eda14cbcSMatt Macy 	spa_t *spa = mc->mc_spa;
3504eda14cbcSMatt Macy 	fstrans_cookie_t cookie = spl_fstrans_mark();
3505eda14cbcSMatt Macy 
3506eda14cbcSMatt Macy 	ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
3507eda14cbcSMatt Macy 
3508eda14cbcSMatt Macy 	mutex_enter(&msp->ms_lock);
3509eda14cbcSMatt Macy 	(void) metaslab_load(msp);
3510eda14cbcSMatt Macy 	metaslab_set_selected_txg(msp, spa_syncing_txg(spa));
3511eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
3512eda14cbcSMatt Macy 	spl_fstrans_unmark(cookie);
3513eda14cbcSMatt Macy }
3514eda14cbcSMatt Macy 
3515eda14cbcSMatt Macy static void
3516eda14cbcSMatt Macy metaslab_group_preload(metaslab_group_t *mg)
3517eda14cbcSMatt Macy {
3518eda14cbcSMatt Macy 	spa_t *spa = mg->mg_vd->vdev_spa;
3519eda14cbcSMatt Macy 	metaslab_t *msp;
3520eda14cbcSMatt Macy 	avl_tree_t *t = &mg->mg_metaslab_tree;
3521eda14cbcSMatt Macy 	int m = 0;
3522eda14cbcSMatt Macy 
3523eda14cbcSMatt Macy 	if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
3524eda14cbcSMatt Macy 		taskq_wait_outstanding(mg->mg_taskq, 0);
3525eda14cbcSMatt Macy 		return;
3526eda14cbcSMatt Macy 	}
3527eda14cbcSMatt Macy 
3528eda14cbcSMatt Macy 	mutex_enter(&mg->mg_lock);
3529eda14cbcSMatt Macy 
3530eda14cbcSMatt Macy 	/*
3531eda14cbcSMatt Macy 	 * Load the next potential metaslabs
3532eda14cbcSMatt Macy 	 */
3533eda14cbcSMatt Macy 	for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
3534eda14cbcSMatt Macy 		ASSERT3P(msp->ms_group, ==, mg);
3535eda14cbcSMatt Macy 
3536eda14cbcSMatt Macy 		/*
3537eda14cbcSMatt Macy 		 * We preload only the maximum number of metaslabs specified
3538eda14cbcSMatt Macy 		 * by metaslab_preload_limit. If a metaslab is being forced
3539eda14cbcSMatt Macy 		 * to condense then we preload it too. This will ensure
3540eda14cbcSMatt Macy 		 * that force condensing happens in the next txg.
3541eda14cbcSMatt Macy 		 */
3542eda14cbcSMatt Macy 		if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
3543eda14cbcSMatt Macy 			continue;
3544eda14cbcSMatt Macy 		}
3545eda14cbcSMatt Macy 
3546eda14cbcSMatt Macy 		VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
3547eda14cbcSMatt Macy 		    msp, TQ_SLEEP) != TASKQID_INVALID);
3548eda14cbcSMatt Macy 	}
3549eda14cbcSMatt Macy 	mutex_exit(&mg->mg_lock);
3550eda14cbcSMatt Macy }
3551eda14cbcSMatt Macy 
3552eda14cbcSMatt Macy /*
3553eda14cbcSMatt Macy  * Determine if the space map's on-disk footprint is past our tolerance for
3554eda14cbcSMatt Macy  * inefficiency. We would like to use the following criteria to make our
3555eda14cbcSMatt Macy  * decision:
3556eda14cbcSMatt Macy  *
3557eda14cbcSMatt Macy  * 1. Do not condense if the size of the space map object would dramatically
3558eda14cbcSMatt Macy  *    increase as a result of writing out the free space range tree.
3559eda14cbcSMatt Macy  *
3560eda14cbcSMatt Macy  * 2. Condense if the on on-disk space map representation is at least
3561eda14cbcSMatt Macy  *    zfs_condense_pct/100 times the size of the optimal representation
3562eda14cbcSMatt Macy  *    (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
3563eda14cbcSMatt Macy  *
3564eda14cbcSMatt Macy  * 3. Do not condense if the on-disk size of the space map does not actually
3565eda14cbcSMatt Macy  *    decrease.
3566eda14cbcSMatt Macy  *
3567eda14cbcSMatt Macy  * Unfortunately, we cannot compute the on-disk size of the space map in this
3568eda14cbcSMatt Macy  * context because we cannot accurately compute the effects of compression, etc.
3569eda14cbcSMatt Macy  * Instead, we apply the heuristic described in the block comment for
3570eda14cbcSMatt Macy  * zfs_metaslab_condense_block_threshold - we only condense if the space used
3571eda14cbcSMatt Macy  * is greater than a threshold number of blocks.
3572eda14cbcSMatt Macy  */
3573eda14cbcSMatt Macy static boolean_t
3574eda14cbcSMatt Macy metaslab_should_condense(metaslab_t *msp)
3575eda14cbcSMatt Macy {
3576eda14cbcSMatt Macy 	space_map_t *sm = msp->ms_sm;
3577eda14cbcSMatt Macy 	vdev_t *vd = msp->ms_group->mg_vd;
3578be181ee2SMartin Matuska 	uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift;
3579eda14cbcSMatt Macy 
3580eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3581eda14cbcSMatt Macy 	ASSERT(msp->ms_loaded);
3582eda14cbcSMatt Macy 	ASSERT(sm != NULL);
3583eda14cbcSMatt Macy 	ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
3584eda14cbcSMatt Macy 
3585eda14cbcSMatt Macy 	/*
3586eda14cbcSMatt Macy 	 * We always condense metaslabs that are empty and metaslabs for
3587eda14cbcSMatt Macy 	 * which a condense request has been made.
3588eda14cbcSMatt Macy 	 */
3589eda14cbcSMatt Macy 	if (range_tree_numsegs(msp->ms_allocatable) == 0 ||
3590eda14cbcSMatt Macy 	    msp->ms_condense_wanted)
3591eda14cbcSMatt Macy 		return (B_TRUE);
3592eda14cbcSMatt Macy 
3593eda14cbcSMatt Macy 	uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
3594eda14cbcSMatt Macy 	uint64_t object_size = space_map_length(sm);
3595eda14cbcSMatt Macy 	uint64_t optimal_size = space_map_estimate_optimal_size(sm,
3596eda14cbcSMatt Macy 	    msp->ms_allocatable, SM_NO_VDEVID);
3597eda14cbcSMatt Macy 
3598eda14cbcSMatt Macy 	return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
3599eda14cbcSMatt Macy 	    object_size > zfs_metaslab_condense_block_threshold * record_size);
3600eda14cbcSMatt Macy }
3601eda14cbcSMatt Macy 
3602eda14cbcSMatt Macy /*
3603eda14cbcSMatt Macy  * Condense the on-disk space map representation to its minimized form.
3604eda14cbcSMatt Macy  * The minimized form consists of a small number of allocations followed
3605eda14cbcSMatt Macy  * by the entries of the free range tree (ms_allocatable). The condensed
3606eda14cbcSMatt Macy  * spacemap contains all the entries of previous TXGs (including those in
3607eda14cbcSMatt Macy  * the pool-wide log spacemaps; thus this is effectively a superset of
3608eda14cbcSMatt Macy  * metaslab_flush()), but this TXG's entries still need to be written.
3609eda14cbcSMatt Macy  */
3610eda14cbcSMatt Macy static void
3611eda14cbcSMatt Macy metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
3612eda14cbcSMatt Macy {
3613eda14cbcSMatt Macy 	range_tree_t *condense_tree;
3614eda14cbcSMatt Macy 	space_map_t *sm = msp->ms_sm;
3615eda14cbcSMatt Macy 	uint64_t txg = dmu_tx_get_txg(tx);
3616eda14cbcSMatt Macy 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3617eda14cbcSMatt Macy 
3618eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3619eda14cbcSMatt Macy 	ASSERT(msp->ms_loaded);
3620eda14cbcSMatt Macy 	ASSERT(msp->ms_sm != NULL);
3621eda14cbcSMatt Macy 
3622eda14cbcSMatt Macy 	/*
3623eda14cbcSMatt Macy 	 * In order to condense the space map, we need to change it so it
3624eda14cbcSMatt Macy 	 * only describes which segments are currently allocated and free.
3625eda14cbcSMatt Macy 	 *
3626eda14cbcSMatt Macy 	 * All the current free space resides in the ms_allocatable, all
3627eda14cbcSMatt Macy 	 * the ms_defer trees, and all the ms_allocating trees. We ignore
3628eda14cbcSMatt Macy 	 * ms_freed because it is empty because we're in sync pass 1. We
3629eda14cbcSMatt Macy 	 * ignore ms_freeing because these changes are not yet reflected
3630eda14cbcSMatt Macy 	 * in the spacemap (they will be written later this txg).
3631eda14cbcSMatt Macy 	 *
3632eda14cbcSMatt Macy 	 * So to truncate the space map to represent all the entries of
3633eda14cbcSMatt Macy 	 * previous TXGs we do the following:
3634eda14cbcSMatt Macy 	 *
3635eda14cbcSMatt Macy 	 * 1] We create a range tree (condense tree) that is 100% empty.
3636eda14cbcSMatt Macy 	 * 2] We add to it all segments found in the ms_defer trees
3637eda14cbcSMatt Macy 	 *    as those segments are marked as free in the original space
3638eda14cbcSMatt Macy 	 *    map. We do the same with the ms_allocating trees for the same
3639eda14cbcSMatt Macy 	 *    reason. Adding these segments should be a relatively
3640eda14cbcSMatt Macy 	 *    inexpensive operation since we expect these trees to have a
3641eda14cbcSMatt Macy 	 *    small number of nodes.
3642eda14cbcSMatt Macy 	 * 3] We vacate any unflushed allocs, since they are not frees we
3643eda14cbcSMatt Macy 	 *    need to add to the condense tree. Then we vacate any
3644eda14cbcSMatt Macy 	 *    unflushed frees as they should already be part of ms_allocatable.
3645eda14cbcSMatt Macy 	 * 4] At this point, we would ideally like to add all segments
3646eda14cbcSMatt Macy 	 *    in the ms_allocatable tree from the condense tree. This way
3647eda14cbcSMatt Macy 	 *    we would write all the entries of the condense tree as the
3648eda14cbcSMatt Macy 	 *    condensed space map, which would only contain freed
3649eda14cbcSMatt Macy 	 *    segments with everything else assumed to be allocated.
3650eda14cbcSMatt Macy 	 *
3651eda14cbcSMatt Macy 	 *    Doing so can be prohibitively expensive as ms_allocatable can
3652eda14cbcSMatt Macy 	 *    be large, and therefore computationally expensive to add to
3653eda14cbcSMatt Macy 	 *    the condense_tree. Instead we first sync out an entry marking
3654eda14cbcSMatt Macy 	 *    everything as allocated, then the condense_tree and then the
3655eda14cbcSMatt Macy 	 *    ms_allocatable, in the condensed space map. While this is not
3656eda14cbcSMatt Macy 	 *    optimal, it is typically close to optimal and more importantly
3657eda14cbcSMatt Macy 	 *    much cheaper to compute.
3658eda14cbcSMatt Macy 	 *
3659eda14cbcSMatt Macy 	 * 5] Finally, as both of the unflushed trees were written to our
3660eda14cbcSMatt Macy 	 *    new and condensed metaslab space map, we basically flushed
3661eda14cbcSMatt Macy 	 *    all the unflushed changes to disk, thus we call
3662eda14cbcSMatt Macy 	 *    metaslab_flush_update().
3663eda14cbcSMatt Macy 	 */
3664eda14cbcSMatt Macy 	ASSERT3U(spa_sync_pass(spa), ==, 1);
3665eda14cbcSMatt Macy 	ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
3666eda14cbcSMatt Macy 
3667eda14cbcSMatt Macy 	zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
366833b8c039SMartin Matuska 	    "spa %s, smp size %llu, segments %llu, forcing condense=%s",
366933b8c039SMartin Matuska 	    (u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp,
367033b8c039SMartin Matuska 	    (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
367133b8c039SMartin Matuska 	    spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm),
367233b8c039SMartin Matuska 	    (u_longlong_t)range_tree_numsegs(msp->ms_allocatable),
3673eda14cbcSMatt Macy 	    msp->ms_condense_wanted ? "TRUE" : "FALSE");
3674eda14cbcSMatt Macy 
3675eda14cbcSMatt Macy 	msp->ms_condense_wanted = B_FALSE;
3676eda14cbcSMatt Macy 
3677eda14cbcSMatt Macy 	range_seg_type_t type;
3678eda14cbcSMatt Macy 	uint64_t shift, start;
3679eda14cbcSMatt Macy 	type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp,
3680eda14cbcSMatt Macy 	    &start, &shift);
3681eda14cbcSMatt Macy 
3682eda14cbcSMatt Macy 	condense_tree = range_tree_create(NULL, type, NULL, start, shift);
3683eda14cbcSMatt Macy 
3684eda14cbcSMatt Macy 	for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3685eda14cbcSMatt Macy 		range_tree_walk(msp->ms_defer[t],
3686eda14cbcSMatt Macy 		    range_tree_add, condense_tree);
3687eda14cbcSMatt Macy 	}
3688eda14cbcSMatt Macy 
3689eda14cbcSMatt Macy 	for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
3690eda14cbcSMatt Macy 		range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
3691eda14cbcSMatt Macy 		    range_tree_add, condense_tree);
3692eda14cbcSMatt Macy 	}
3693eda14cbcSMatt Macy 
3694eda14cbcSMatt Macy 	ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3695eda14cbcSMatt Macy 	    metaslab_unflushed_changes_memused(msp));
3696eda14cbcSMatt Macy 	spa->spa_unflushed_stats.sus_memused -=
3697eda14cbcSMatt Macy 	    metaslab_unflushed_changes_memused(msp);
3698eda14cbcSMatt Macy 	range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3699eda14cbcSMatt Macy 	range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3700eda14cbcSMatt Macy 
3701eda14cbcSMatt Macy 	/*
3702eda14cbcSMatt Macy 	 * We're about to drop the metaslab's lock thus allowing other
3703eda14cbcSMatt Macy 	 * consumers to change it's content. Set the metaslab's ms_condensing
3704eda14cbcSMatt Macy 	 * flag to ensure that allocations on this metaslab do not occur
3705eda14cbcSMatt Macy 	 * while we're in the middle of committing it to disk. This is only
3706eda14cbcSMatt Macy 	 * critical for ms_allocatable as all other range trees use per TXG
3707eda14cbcSMatt Macy 	 * views of their content.
3708eda14cbcSMatt Macy 	 */
3709eda14cbcSMatt Macy 	msp->ms_condensing = B_TRUE;
3710eda14cbcSMatt Macy 
3711eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
3712eda14cbcSMatt Macy 	uint64_t object = space_map_object(msp->ms_sm);
3713eda14cbcSMatt Macy 	space_map_truncate(sm,
3714eda14cbcSMatt Macy 	    spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
3715eda14cbcSMatt Macy 	    zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
3716eda14cbcSMatt Macy 
3717eda14cbcSMatt Macy 	/*
3718eda14cbcSMatt Macy 	 * space_map_truncate() may have reallocated the spacemap object.
3719eda14cbcSMatt Macy 	 * If so, update the vdev_ms_array.
3720eda14cbcSMatt Macy 	 */
3721eda14cbcSMatt Macy 	if (space_map_object(msp->ms_sm) != object) {
3722eda14cbcSMatt Macy 		object = space_map_object(msp->ms_sm);
3723eda14cbcSMatt Macy 		dmu_write(spa->spa_meta_objset,
3724eda14cbcSMatt Macy 		    msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
3725eda14cbcSMatt Macy 		    msp->ms_id, sizeof (uint64_t), &object, tx);
3726eda14cbcSMatt Macy 	}
3727eda14cbcSMatt Macy 
3728eda14cbcSMatt Macy 	/*
3729eda14cbcSMatt Macy 	 * Note:
3730eda14cbcSMatt Macy 	 * When the log space map feature is enabled, each space map will
3731eda14cbcSMatt Macy 	 * always have ALLOCS followed by FREES for each sync pass. This is
3732eda14cbcSMatt Macy 	 * typically true even when the log space map feature is disabled,
3733eda14cbcSMatt Macy 	 * except from the case where a metaslab goes through metaslab_sync()
3734eda14cbcSMatt Macy 	 * and gets condensed. In that case the metaslab's space map will have
3735eda14cbcSMatt Macy 	 * ALLOCS followed by FREES (due to condensing) followed by ALLOCS
3736eda14cbcSMatt Macy 	 * followed by FREES (due to space_map_write() in metaslab_sync()) for
3737eda14cbcSMatt Macy 	 * sync pass 1.
3738eda14cbcSMatt Macy 	 */
3739eda14cbcSMatt Macy 	range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start,
3740eda14cbcSMatt Macy 	    shift);
3741eda14cbcSMatt Macy 	range_tree_add(tmp_tree, msp->ms_start, msp->ms_size);
3742eda14cbcSMatt Macy 	space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx);
3743eda14cbcSMatt Macy 	space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
3744eda14cbcSMatt Macy 	space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx);
3745eda14cbcSMatt Macy 
3746eda14cbcSMatt Macy 	range_tree_vacate(condense_tree, NULL, NULL);
3747eda14cbcSMatt Macy 	range_tree_destroy(condense_tree);
3748eda14cbcSMatt Macy 	range_tree_vacate(tmp_tree, NULL, NULL);
3749eda14cbcSMatt Macy 	range_tree_destroy(tmp_tree);
3750eda14cbcSMatt Macy 	mutex_enter(&msp->ms_lock);
3751eda14cbcSMatt Macy 
3752eda14cbcSMatt Macy 	msp->ms_condensing = B_FALSE;
3753eda14cbcSMatt Macy 	metaslab_flush_update(msp, tx);
3754eda14cbcSMatt Macy }
3755eda14cbcSMatt Macy 
3756716fd348SMartin Matuska static void
3757716fd348SMartin Matuska metaslab_unflushed_add(metaslab_t *msp, dmu_tx_t *tx)
3758716fd348SMartin Matuska {
3759716fd348SMartin Matuska 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3760716fd348SMartin Matuska 	ASSERT(spa_syncing_log_sm(spa) != NULL);
3761716fd348SMartin Matuska 	ASSERT(msp->ms_sm != NULL);
3762716fd348SMartin Matuska 	ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3763716fd348SMartin Matuska 	ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3764716fd348SMartin Matuska 
3765716fd348SMartin Matuska 	mutex_enter(&spa->spa_flushed_ms_lock);
3766716fd348SMartin Matuska 	metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3767716fd348SMartin Matuska 	metaslab_set_unflushed_dirty(msp, B_TRUE);
3768716fd348SMartin Matuska 	avl_add(&spa->spa_metaslabs_by_flushed, msp);
3769716fd348SMartin Matuska 	mutex_exit(&spa->spa_flushed_ms_lock);
3770716fd348SMartin Matuska 
3771716fd348SMartin Matuska 	spa_log_sm_increment_current_mscount(spa);
3772716fd348SMartin Matuska 	spa_log_summary_add_flushed_metaslab(spa, B_TRUE);
3773716fd348SMartin Matuska }
3774716fd348SMartin Matuska 
3775716fd348SMartin Matuska void
3776716fd348SMartin Matuska metaslab_unflushed_bump(metaslab_t *msp, dmu_tx_t *tx, boolean_t dirty)
3777716fd348SMartin Matuska {
3778716fd348SMartin Matuska 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3779716fd348SMartin Matuska 	ASSERT(spa_syncing_log_sm(spa) != NULL);
3780716fd348SMartin Matuska 	ASSERT(msp->ms_sm != NULL);
3781716fd348SMartin Matuska 	ASSERT(metaslab_unflushed_txg(msp) != 0);
3782716fd348SMartin Matuska 	ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
3783716fd348SMartin Matuska 	ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
3784716fd348SMartin Matuska 	ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
3785716fd348SMartin Matuska 
3786716fd348SMartin Matuska 	VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
3787716fd348SMartin Matuska 
3788716fd348SMartin Matuska 	/* update metaslab's position in our flushing tree */
3789716fd348SMartin Matuska 	uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
3790716fd348SMartin Matuska 	boolean_t ms_prev_flushed_dirty = metaslab_unflushed_dirty(msp);
3791716fd348SMartin Matuska 	mutex_enter(&spa->spa_flushed_ms_lock);
3792716fd348SMartin Matuska 	avl_remove(&spa->spa_metaslabs_by_flushed, msp);
3793716fd348SMartin Matuska 	metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3794716fd348SMartin Matuska 	metaslab_set_unflushed_dirty(msp, dirty);
3795716fd348SMartin Matuska 	avl_add(&spa->spa_metaslabs_by_flushed, msp);
3796716fd348SMartin Matuska 	mutex_exit(&spa->spa_flushed_ms_lock);
3797716fd348SMartin Matuska 
3798716fd348SMartin Matuska 	/* update metaslab counts of spa_log_sm_t nodes */
3799716fd348SMartin Matuska 	spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
3800716fd348SMartin Matuska 	spa_log_sm_increment_current_mscount(spa);
3801716fd348SMartin Matuska 
3802716fd348SMartin Matuska 	/* update log space map summary */
3803716fd348SMartin Matuska 	spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg,
3804716fd348SMartin Matuska 	    ms_prev_flushed_dirty);
3805716fd348SMartin Matuska 	spa_log_summary_add_flushed_metaslab(spa, dirty);
3806716fd348SMartin Matuska 
3807716fd348SMartin Matuska 	/* cleanup obsolete logs if any */
3808716fd348SMartin Matuska 	spa_cleanup_old_sm_logs(spa, tx);
3809716fd348SMartin Matuska }
3810716fd348SMartin Matuska 
3811eda14cbcSMatt Macy /*
3812eda14cbcSMatt Macy  * Called when the metaslab has been flushed (its own spacemap now reflects
3813eda14cbcSMatt Macy  * all the contents of the pool-wide spacemap log). Updates the metaslab's
3814eda14cbcSMatt Macy  * metadata and any pool-wide related log space map data (e.g. summary,
3815eda14cbcSMatt Macy  * obsolete logs, etc..) to reflect that.
3816eda14cbcSMatt Macy  */
3817eda14cbcSMatt Macy static void
3818eda14cbcSMatt Macy metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
3819eda14cbcSMatt Macy {
3820eda14cbcSMatt Macy 	metaslab_group_t *mg = msp->ms_group;
3821eda14cbcSMatt Macy 	spa_t *spa = mg->mg_vd->vdev_spa;
3822eda14cbcSMatt Macy 
3823eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3824eda14cbcSMatt Macy 
3825eda14cbcSMatt Macy 	ASSERT3U(spa_sync_pass(spa), ==, 1);
3826eda14cbcSMatt Macy 
3827eda14cbcSMatt Macy 	/*
3828eda14cbcSMatt Macy 	 * Just because a metaslab got flushed, that doesn't mean that
3829eda14cbcSMatt Macy 	 * it will pass through metaslab_sync_done(). Thus, make sure to
3830eda14cbcSMatt Macy 	 * update ms_synced_length here in case it doesn't.
3831eda14cbcSMatt Macy 	 */
3832eda14cbcSMatt Macy 	msp->ms_synced_length = space_map_length(msp->ms_sm);
3833eda14cbcSMatt Macy 
3834eda14cbcSMatt Macy 	/*
3835eda14cbcSMatt Macy 	 * We may end up here from metaslab_condense() without the
3836eda14cbcSMatt Macy 	 * feature being active. In that case this is a no-op.
3837eda14cbcSMatt Macy 	 */
3838716fd348SMartin Matuska 	if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP) ||
3839716fd348SMartin Matuska 	    metaslab_unflushed_txg(msp) == 0)
3840eda14cbcSMatt Macy 		return;
3841eda14cbcSMatt Macy 
3842716fd348SMartin Matuska 	metaslab_unflushed_bump(msp, tx, B_FALSE);
3843eda14cbcSMatt Macy }
3844eda14cbcSMatt Macy 
3845eda14cbcSMatt Macy boolean_t
3846eda14cbcSMatt Macy metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
3847eda14cbcSMatt Macy {
3848eda14cbcSMatt Macy 	spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3849eda14cbcSMatt Macy 
3850eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
3851eda14cbcSMatt Macy 	ASSERT3U(spa_sync_pass(spa), ==, 1);
3852eda14cbcSMatt Macy 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
3853eda14cbcSMatt Macy 
3854eda14cbcSMatt Macy 	ASSERT(msp->ms_sm != NULL);
3855eda14cbcSMatt Macy 	ASSERT(metaslab_unflushed_txg(msp) != 0);
3856eda14cbcSMatt Macy 	ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
3857eda14cbcSMatt Macy 
3858eda14cbcSMatt Macy 	/*
3859eda14cbcSMatt Macy 	 * There is nothing wrong with flushing the same metaslab twice, as
3860eda14cbcSMatt Macy 	 * this codepath should work on that case. However, the current
3861eda14cbcSMatt Macy 	 * flushing scheme makes sure to avoid this situation as we would be
3862eda14cbcSMatt Macy 	 * making all these calls without having anything meaningful to write
3863eda14cbcSMatt Macy 	 * to disk. We assert this behavior here.
3864eda14cbcSMatt Macy 	 */
3865eda14cbcSMatt Macy 	ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
3866eda14cbcSMatt Macy 
3867eda14cbcSMatt Macy 	/*
3868eda14cbcSMatt Macy 	 * We can not flush while loading, because then we would
3869eda14cbcSMatt Macy 	 * not load the ms_unflushed_{allocs,frees}.
3870eda14cbcSMatt Macy 	 */
3871eda14cbcSMatt Macy 	if (msp->ms_loading)
3872eda14cbcSMatt Macy 		return (B_FALSE);
3873eda14cbcSMatt Macy 
3874eda14cbcSMatt Macy 	metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3875eda14cbcSMatt Macy 	metaslab_verify_weight_and_frag(msp);
3876eda14cbcSMatt Macy 
3877eda14cbcSMatt Macy 	/*
3878eda14cbcSMatt Macy 	 * Metaslab condensing is effectively flushing. Therefore if the
3879eda14cbcSMatt Macy 	 * metaslab can be condensed we can just condense it instead of
3880eda14cbcSMatt Macy 	 * flushing it.
3881eda14cbcSMatt Macy 	 *
3882eda14cbcSMatt Macy 	 * Note that metaslab_condense() does call metaslab_flush_update()
3883eda14cbcSMatt Macy 	 * so we can just return immediately after condensing. We also
3884eda14cbcSMatt Macy 	 * don't need to care about setting ms_flushing or broadcasting
3885eda14cbcSMatt Macy 	 * ms_flush_cv, even if we temporarily drop the ms_lock in
3886eda14cbcSMatt Macy 	 * metaslab_condense(), as the metaslab is already loaded.
3887eda14cbcSMatt Macy 	 */
3888eda14cbcSMatt Macy 	if (msp->ms_loaded && metaslab_should_condense(msp)) {
3889eda14cbcSMatt Macy 		metaslab_group_t *mg = msp->ms_group;
3890eda14cbcSMatt Macy 
3891eda14cbcSMatt Macy 		/*
3892eda14cbcSMatt Macy 		 * For all histogram operations below refer to the
3893eda14cbcSMatt Macy 		 * comments of metaslab_sync() where we follow a
3894eda14cbcSMatt Macy 		 * similar procedure.
3895eda14cbcSMatt Macy 		 */
3896eda14cbcSMatt Macy 		metaslab_group_histogram_verify(mg);
3897eda14cbcSMatt Macy 		metaslab_class_histogram_verify(mg->mg_class);
3898eda14cbcSMatt Macy 		metaslab_group_histogram_remove(mg, msp);
3899eda14cbcSMatt Macy 
3900eda14cbcSMatt Macy 		metaslab_condense(msp, tx);
3901eda14cbcSMatt Macy 
3902eda14cbcSMatt Macy 		space_map_histogram_clear(msp->ms_sm);
3903eda14cbcSMatt Macy 		space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
3904eda14cbcSMatt Macy 		ASSERT(range_tree_is_empty(msp->ms_freed));
3905eda14cbcSMatt Macy 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3906eda14cbcSMatt Macy 			space_map_histogram_add(msp->ms_sm,
3907eda14cbcSMatt Macy 			    msp->ms_defer[t], tx);
3908eda14cbcSMatt Macy 		}
3909eda14cbcSMatt Macy 		metaslab_aux_histograms_update(msp);
3910eda14cbcSMatt Macy 
3911eda14cbcSMatt Macy 		metaslab_group_histogram_add(mg, msp);
3912eda14cbcSMatt Macy 		metaslab_group_histogram_verify(mg);
3913eda14cbcSMatt Macy 		metaslab_class_histogram_verify(mg->mg_class);
3914eda14cbcSMatt Macy 
3915eda14cbcSMatt Macy 		metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3916eda14cbcSMatt Macy 
3917eda14cbcSMatt Macy 		/*
3918eda14cbcSMatt Macy 		 * Since we recreated the histogram (and potentially
3919eda14cbcSMatt Macy 		 * the ms_sm too while condensing) ensure that the
3920eda14cbcSMatt Macy 		 * weight is updated too because we are not guaranteed
3921eda14cbcSMatt Macy 		 * that this metaslab is dirty and will go through
3922eda14cbcSMatt Macy 		 * metaslab_sync_done().
3923eda14cbcSMatt Macy 		 */
3924eda14cbcSMatt Macy 		metaslab_recalculate_weight_and_sort(msp);
3925eda14cbcSMatt Macy 		return (B_TRUE);
3926eda14cbcSMatt Macy 	}
3927eda14cbcSMatt Macy 
3928eda14cbcSMatt Macy 	msp->ms_flushing = B_TRUE;
3929eda14cbcSMatt Macy 	uint64_t sm_len_before = space_map_length(msp->ms_sm);
3930eda14cbcSMatt Macy 
3931eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
3932eda14cbcSMatt Macy 	space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
3933eda14cbcSMatt Macy 	    SM_NO_VDEVID, tx);
3934eda14cbcSMatt Macy 	space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
3935eda14cbcSMatt Macy 	    SM_NO_VDEVID, tx);
3936eda14cbcSMatt Macy 	mutex_enter(&msp->ms_lock);
3937eda14cbcSMatt Macy 
3938eda14cbcSMatt Macy 	uint64_t sm_len_after = space_map_length(msp->ms_sm);
3939eda14cbcSMatt Macy 	if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
3940eda14cbcSMatt Macy 		zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
3941eda14cbcSMatt Macy 		    "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
394233b8c039SMartin Matuska 		    "appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx),
394333b8c039SMartin Matuska 		    spa_name(spa),
394433b8c039SMartin Matuska 		    (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
394533b8c039SMartin Matuska 		    (u_longlong_t)msp->ms_id,
394633b8c039SMartin Matuska 		    (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs),
394733b8c039SMartin Matuska 		    (u_longlong_t)range_tree_space(msp->ms_unflushed_frees),
394833b8c039SMartin Matuska 		    (u_longlong_t)(sm_len_after - sm_len_before));
3949eda14cbcSMatt Macy 	}
3950eda14cbcSMatt Macy 
3951eda14cbcSMatt Macy 	ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3952eda14cbcSMatt Macy 	    metaslab_unflushed_changes_memused(msp));
3953eda14cbcSMatt Macy 	spa->spa_unflushed_stats.sus_memused -=
3954eda14cbcSMatt Macy 	    metaslab_unflushed_changes_memused(msp);
3955eda14cbcSMatt Macy 	range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3956eda14cbcSMatt Macy 	range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3957eda14cbcSMatt Macy 
3958eda14cbcSMatt Macy 	metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3959eda14cbcSMatt Macy 	metaslab_verify_weight_and_frag(msp);
3960eda14cbcSMatt Macy 
3961eda14cbcSMatt Macy 	metaslab_flush_update(msp, tx);
3962eda14cbcSMatt Macy 
3963eda14cbcSMatt Macy 	metaslab_verify_space(msp, dmu_tx_get_txg(tx));
3964eda14cbcSMatt Macy 	metaslab_verify_weight_and_frag(msp);
3965eda14cbcSMatt Macy 
3966eda14cbcSMatt Macy 	msp->ms_flushing = B_FALSE;
3967eda14cbcSMatt Macy 	cv_broadcast(&msp->ms_flush_cv);
3968eda14cbcSMatt Macy 	return (B_TRUE);
3969eda14cbcSMatt Macy }
3970eda14cbcSMatt Macy 
3971eda14cbcSMatt Macy /*
3972eda14cbcSMatt Macy  * Write a metaslab to disk in the context of the specified transaction group.
3973eda14cbcSMatt Macy  */
3974eda14cbcSMatt Macy void
3975eda14cbcSMatt Macy metaslab_sync(metaslab_t *msp, uint64_t txg)
3976eda14cbcSMatt Macy {
3977eda14cbcSMatt Macy 	metaslab_group_t *mg = msp->ms_group;
3978eda14cbcSMatt Macy 	vdev_t *vd = mg->mg_vd;
3979eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
3980eda14cbcSMatt Macy 	objset_t *mos = spa_meta_objset(spa);
3981eda14cbcSMatt Macy 	range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
3982eda14cbcSMatt Macy 	dmu_tx_t *tx;
3983eda14cbcSMatt Macy 
3984eda14cbcSMatt Macy 	ASSERT(!vd->vdev_ishole);
3985eda14cbcSMatt Macy 
3986eda14cbcSMatt Macy 	/*
3987eda14cbcSMatt Macy 	 * This metaslab has just been added so there's no work to do now.
3988eda14cbcSMatt Macy 	 */
3989f9693befSMartin Matuska 	if (msp->ms_new) {
3990f9693befSMartin Matuska 		ASSERT0(range_tree_space(alloctree));
3991f9693befSMartin Matuska 		ASSERT0(range_tree_space(msp->ms_freeing));
3992f9693befSMartin Matuska 		ASSERT0(range_tree_space(msp->ms_freed));
3993f9693befSMartin Matuska 		ASSERT0(range_tree_space(msp->ms_checkpointing));
3994f9693befSMartin Matuska 		ASSERT0(range_tree_space(msp->ms_trim));
3995eda14cbcSMatt Macy 		return;
3996eda14cbcSMatt Macy 	}
3997eda14cbcSMatt Macy 
3998eda14cbcSMatt Macy 	/*
3999eda14cbcSMatt Macy 	 * Normally, we don't want to process a metaslab if there are no
4000eda14cbcSMatt Macy 	 * allocations or frees to perform. However, if the metaslab is being
4001eda14cbcSMatt Macy 	 * forced to condense, it's loaded and we're not beyond the final
4002eda14cbcSMatt Macy 	 * dirty txg, we need to let it through. Not condensing beyond the
4003eda14cbcSMatt Macy 	 * final dirty txg prevents an issue where metaslabs that need to be
4004eda14cbcSMatt Macy 	 * condensed but were loaded for other reasons could cause a panic
4005eda14cbcSMatt Macy 	 * here. By only checking the txg in that branch of the conditional,
4006eda14cbcSMatt Macy 	 * we preserve the utility of the VERIFY statements in all other
4007eda14cbcSMatt Macy 	 * cases.
4008eda14cbcSMatt Macy 	 */
4009eda14cbcSMatt Macy 	if (range_tree_is_empty(alloctree) &&
4010eda14cbcSMatt Macy 	    range_tree_is_empty(msp->ms_freeing) &&
4011eda14cbcSMatt Macy 	    range_tree_is_empty(msp->ms_checkpointing) &&
4012eda14cbcSMatt Macy 	    !(msp->ms_loaded && msp->ms_condense_wanted &&
4013eda14cbcSMatt Macy 	    txg <= spa_final_dirty_txg(spa)))
4014eda14cbcSMatt Macy 		return;
4015eda14cbcSMatt Macy 
4016eda14cbcSMatt Macy 
4017eda14cbcSMatt Macy 	VERIFY3U(txg, <=, spa_final_dirty_txg(spa));
4018eda14cbcSMatt Macy 
4019eda14cbcSMatt Macy 	/*
4020eda14cbcSMatt Macy 	 * The only state that can actually be changing concurrently
4021eda14cbcSMatt Macy 	 * with metaslab_sync() is the metaslab's ms_allocatable. No
4022eda14cbcSMatt Macy 	 * other thread can be modifying this txg's alloc, freeing,
4023eda14cbcSMatt Macy 	 * freed, or space_map_phys_t.  We drop ms_lock whenever we
4024eda14cbcSMatt Macy 	 * could call into the DMU, because the DMU can call down to
4025eda14cbcSMatt Macy 	 * us (e.g. via zio_free()) at any time.
4026eda14cbcSMatt Macy 	 *
4027eda14cbcSMatt Macy 	 * The spa_vdev_remove_thread() can be reading metaslab state
4028eda14cbcSMatt Macy 	 * concurrently, and it is locked out by the ms_sync_lock.
4029eda14cbcSMatt Macy 	 * Note that the ms_lock is insufficient for this, because it
4030eda14cbcSMatt Macy 	 * is dropped by space_map_write().
4031eda14cbcSMatt Macy 	 */
4032eda14cbcSMatt Macy 	tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
4033eda14cbcSMatt Macy 
4034eda14cbcSMatt Macy 	/*
4035eda14cbcSMatt Macy 	 * Generate a log space map if one doesn't exist already.
4036eda14cbcSMatt Macy 	 */
4037eda14cbcSMatt Macy 	spa_generate_syncing_log_sm(spa, tx);
4038eda14cbcSMatt Macy 
4039eda14cbcSMatt Macy 	if (msp->ms_sm == NULL) {
4040eda14cbcSMatt Macy 		uint64_t new_object = space_map_alloc(mos,
4041eda14cbcSMatt Macy 		    spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
4042eda14cbcSMatt Macy 		    zfs_metaslab_sm_blksz_with_log :
4043eda14cbcSMatt Macy 		    zfs_metaslab_sm_blksz_no_log, tx);
4044eda14cbcSMatt Macy 		VERIFY3U(new_object, !=, 0);
4045eda14cbcSMatt Macy 
4046eda14cbcSMatt Macy 		dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
4047eda14cbcSMatt Macy 		    msp->ms_id, sizeof (uint64_t), &new_object, tx);
4048eda14cbcSMatt Macy 
4049eda14cbcSMatt Macy 		VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
4050eda14cbcSMatt Macy 		    msp->ms_start, msp->ms_size, vd->vdev_ashift));
4051eda14cbcSMatt Macy 		ASSERT(msp->ms_sm != NULL);
4052eda14cbcSMatt Macy 
4053eda14cbcSMatt Macy 		ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs));
4054eda14cbcSMatt Macy 		ASSERT(range_tree_is_empty(msp->ms_unflushed_frees));
4055eda14cbcSMatt Macy 		ASSERT0(metaslab_allocated_space(msp));
4056eda14cbcSMatt Macy 	}
4057eda14cbcSMatt Macy 
4058eda14cbcSMatt Macy 	if (!range_tree_is_empty(msp->ms_checkpointing) &&
4059eda14cbcSMatt Macy 	    vd->vdev_checkpoint_sm == NULL) {
4060eda14cbcSMatt Macy 		ASSERT(spa_has_checkpoint(spa));
4061eda14cbcSMatt Macy 
4062eda14cbcSMatt Macy 		uint64_t new_object = space_map_alloc(mos,
4063eda14cbcSMatt Macy 		    zfs_vdev_standard_sm_blksz, tx);
4064eda14cbcSMatt Macy 		VERIFY3U(new_object, !=, 0);
4065eda14cbcSMatt Macy 
4066eda14cbcSMatt Macy 		VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
4067eda14cbcSMatt Macy 		    mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
4068eda14cbcSMatt Macy 		ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
4069eda14cbcSMatt Macy 
4070eda14cbcSMatt Macy 		/*
4071eda14cbcSMatt Macy 		 * We save the space map object as an entry in vdev_top_zap
4072eda14cbcSMatt Macy 		 * so it can be retrieved when the pool is reopened after an
4073eda14cbcSMatt Macy 		 * export or through zdb.
4074eda14cbcSMatt Macy 		 */
4075eda14cbcSMatt Macy 		VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
4076eda14cbcSMatt Macy 		    vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
4077eda14cbcSMatt Macy 		    sizeof (new_object), 1, &new_object, tx));
4078eda14cbcSMatt Macy 	}
4079eda14cbcSMatt Macy 
4080eda14cbcSMatt Macy 	mutex_enter(&msp->ms_sync_lock);
4081eda14cbcSMatt Macy 	mutex_enter(&msp->ms_lock);
4082eda14cbcSMatt Macy 
4083eda14cbcSMatt Macy 	/*
4084eda14cbcSMatt Macy 	 * Note: metaslab_condense() clears the space map's histogram.
4085eda14cbcSMatt Macy 	 * Therefore we must verify and remove this histogram before
4086eda14cbcSMatt Macy 	 * condensing.
4087eda14cbcSMatt Macy 	 */
4088eda14cbcSMatt Macy 	metaslab_group_histogram_verify(mg);
4089eda14cbcSMatt Macy 	metaslab_class_histogram_verify(mg->mg_class);
4090eda14cbcSMatt Macy 	metaslab_group_histogram_remove(mg, msp);
4091eda14cbcSMatt Macy 
4092eda14cbcSMatt Macy 	if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
4093eda14cbcSMatt Macy 	    metaslab_should_condense(msp))
4094eda14cbcSMatt Macy 		metaslab_condense(msp, tx);
4095eda14cbcSMatt Macy 
4096eda14cbcSMatt Macy 	/*
4097eda14cbcSMatt Macy 	 * We'll be going to disk to sync our space accounting, thus we
4098eda14cbcSMatt Macy 	 * drop the ms_lock during that time so allocations coming from
4099eda14cbcSMatt Macy 	 * open-context (ZIL) for future TXGs do not block.
4100eda14cbcSMatt Macy 	 */
4101eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
4102eda14cbcSMatt Macy 	space_map_t *log_sm = spa_syncing_log_sm(spa);
4103eda14cbcSMatt Macy 	if (log_sm != NULL) {
4104eda14cbcSMatt Macy 		ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
4105716fd348SMartin Matuska 		if (metaslab_unflushed_txg(msp) == 0)
4106716fd348SMartin Matuska 			metaslab_unflushed_add(msp, tx);
4107716fd348SMartin Matuska 		else if (!metaslab_unflushed_dirty(msp))
4108716fd348SMartin Matuska 			metaslab_unflushed_bump(msp, tx, B_TRUE);
4109eda14cbcSMatt Macy 
4110eda14cbcSMatt Macy 		space_map_write(log_sm, alloctree, SM_ALLOC,
4111eda14cbcSMatt Macy 		    vd->vdev_id, tx);
4112eda14cbcSMatt Macy 		space_map_write(log_sm, msp->ms_freeing, SM_FREE,
4113eda14cbcSMatt Macy 		    vd->vdev_id, tx);
4114eda14cbcSMatt Macy 		mutex_enter(&msp->ms_lock);
4115eda14cbcSMatt Macy 
4116eda14cbcSMatt Macy 		ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
4117eda14cbcSMatt Macy 		    metaslab_unflushed_changes_memused(msp));
4118eda14cbcSMatt Macy 		spa->spa_unflushed_stats.sus_memused -=
4119eda14cbcSMatt Macy 		    metaslab_unflushed_changes_memused(msp);
4120eda14cbcSMatt Macy 		range_tree_remove_xor_add(alloctree,
4121eda14cbcSMatt Macy 		    msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
4122eda14cbcSMatt Macy 		range_tree_remove_xor_add(msp->ms_freeing,
4123eda14cbcSMatt Macy 		    msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
4124eda14cbcSMatt Macy 		spa->spa_unflushed_stats.sus_memused +=
4125eda14cbcSMatt Macy 		    metaslab_unflushed_changes_memused(msp);
4126eda14cbcSMatt Macy 	} else {
4127eda14cbcSMatt Macy 		ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
4128eda14cbcSMatt Macy 
4129eda14cbcSMatt Macy 		space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
4130eda14cbcSMatt Macy 		    SM_NO_VDEVID, tx);
4131eda14cbcSMatt Macy 		space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
4132eda14cbcSMatt Macy 		    SM_NO_VDEVID, tx);
4133eda14cbcSMatt Macy 		mutex_enter(&msp->ms_lock);
4134eda14cbcSMatt Macy 	}
4135eda14cbcSMatt Macy 
4136eda14cbcSMatt Macy 	msp->ms_allocated_space += range_tree_space(alloctree);
4137eda14cbcSMatt Macy 	ASSERT3U(msp->ms_allocated_space, >=,
4138eda14cbcSMatt Macy 	    range_tree_space(msp->ms_freeing));
4139eda14cbcSMatt Macy 	msp->ms_allocated_space -= range_tree_space(msp->ms_freeing);
4140eda14cbcSMatt Macy 
4141eda14cbcSMatt Macy 	if (!range_tree_is_empty(msp->ms_checkpointing)) {
4142eda14cbcSMatt Macy 		ASSERT(spa_has_checkpoint(spa));
4143eda14cbcSMatt Macy 		ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
4144eda14cbcSMatt Macy 
4145eda14cbcSMatt Macy 		/*
4146eda14cbcSMatt Macy 		 * Since we are doing writes to disk and the ms_checkpointing
4147eda14cbcSMatt Macy 		 * tree won't be changing during that time, we drop the
4148eda14cbcSMatt Macy 		 * ms_lock while writing to the checkpoint space map, for the
4149eda14cbcSMatt Macy 		 * same reason mentioned above.
4150eda14cbcSMatt Macy 		 */
4151eda14cbcSMatt Macy 		mutex_exit(&msp->ms_lock);
4152eda14cbcSMatt Macy 		space_map_write(vd->vdev_checkpoint_sm,
4153eda14cbcSMatt Macy 		    msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
4154eda14cbcSMatt Macy 		mutex_enter(&msp->ms_lock);
4155eda14cbcSMatt Macy 
4156eda14cbcSMatt Macy 		spa->spa_checkpoint_info.sci_dspace +=
4157eda14cbcSMatt Macy 		    range_tree_space(msp->ms_checkpointing);
4158eda14cbcSMatt Macy 		vd->vdev_stat.vs_checkpoint_space +=
4159eda14cbcSMatt Macy 		    range_tree_space(msp->ms_checkpointing);
4160eda14cbcSMatt Macy 		ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
4161eda14cbcSMatt Macy 		    -space_map_allocated(vd->vdev_checkpoint_sm));
4162eda14cbcSMatt Macy 
4163eda14cbcSMatt Macy 		range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
4164eda14cbcSMatt Macy 	}
4165eda14cbcSMatt Macy 
4166eda14cbcSMatt Macy 	if (msp->ms_loaded) {
4167eda14cbcSMatt Macy 		/*
4168eda14cbcSMatt Macy 		 * When the space map is loaded, we have an accurate
4169eda14cbcSMatt Macy 		 * histogram in the range tree. This gives us an opportunity
4170eda14cbcSMatt Macy 		 * to bring the space map's histogram up-to-date so we clear
4171eda14cbcSMatt Macy 		 * it first before updating it.
4172eda14cbcSMatt Macy 		 */
4173eda14cbcSMatt Macy 		space_map_histogram_clear(msp->ms_sm);
4174eda14cbcSMatt Macy 		space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
4175eda14cbcSMatt Macy 
4176eda14cbcSMatt Macy 		/*
4177eda14cbcSMatt Macy 		 * Since we've cleared the histogram we need to add back
4178eda14cbcSMatt Macy 		 * any free space that has already been processed, plus
4179eda14cbcSMatt Macy 		 * any deferred space. This allows the on-disk histogram
4180eda14cbcSMatt Macy 		 * to accurately reflect all free space even if some space
4181eda14cbcSMatt Macy 		 * is not yet available for allocation (i.e. deferred).
4182eda14cbcSMatt Macy 		 */
4183eda14cbcSMatt Macy 		space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
4184eda14cbcSMatt Macy 
4185eda14cbcSMatt Macy 		/*
4186eda14cbcSMatt Macy 		 * Add back any deferred free space that has not been
4187eda14cbcSMatt Macy 		 * added back into the in-core free tree yet. This will
4188eda14cbcSMatt Macy 		 * ensure that we don't end up with a space map histogram
4189eda14cbcSMatt Macy 		 * that is completely empty unless the metaslab is fully
4190eda14cbcSMatt Macy 		 * allocated.
4191eda14cbcSMatt Macy 		 */
4192eda14cbcSMatt Macy 		for (int t = 0; t < TXG_DEFER_SIZE; t++) {
4193eda14cbcSMatt Macy 			space_map_histogram_add(msp->ms_sm,
4194eda14cbcSMatt Macy 			    msp->ms_defer[t], tx);
4195eda14cbcSMatt Macy 		}
4196eda14cbcSMatt Macy 	}
4197eda14cbcSMatt Macy 
4198eda14cbcSMatt Macy 	/*
4199eda14cbcSMatt Macy 	 * Always add the free space from this sync pass to the space
4200eda14cbcSMatt Macy 	 * map histogram. We want to make sure that the on-disk histogram
4201eda14cbcSMatt Macy 	 * accounts for all free space. If the space map is not loaded,
4202eda14cbcSMatt Macy 	 * then we will lose some accuracy but will correct it the next
4203eda14cbcSMatt Macy 	 * time we load the space map.
4204eda14cbcSMatt Macy 	 */
4205eda14cbcSMatt Macy 	space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
4206eda14cbcSMatt Macy 	metaslab_aux_histograms_update(msp);
4207eda14cbcSMatt Macy 
4208eda14cbcSMatt Macy 	metaslab_group_histogram_add(mg, msp);
4209eda14cbcSMatt Macy 	metaslab_group_histogram_verify(mg);
4210eda14cbcSMatt Macy 	metaslab_class_histogram_verify(mg->mg_class);
4211eda14cbcSMatt Macy 
4212eda14cbcSMatt Macy 	/*
4213eda14cbcSMatt Macy 	 * For sync pass 1, we avoid traversing this txg's free range tree
4214eda14cbcSMatt Macy 	 * and instead will just swap the pointers for freeing and freed.
4215eda14cbcSMatt Macy 	 * We can safely do this since the freed_tree is guaranteed to be
4216eda14cbcSMatt Macy 	 * empty on the initial pass.
4217eda14cbcSMatt Macy 	 *
4218eda14cbcSMatt Macy 	 * Keep in mind that even if we are currently using a log spacemap
4219eda14cbcSMatt Macy 	 * we want current frees to end up in the ms_allocatable (but not
4220eda14cbcSMatt Macy 	 * get appended to the ms_sm) so their ranges can be reused as usual.
4221eda14cbcSMatt Macy 	 */
4222eda14cbcSMatt Macy 	if (spa_sync_pass(spa) == 1) {
4223eda14cbcSMatt Macy 		range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
4224eda14cbcSMatt Macy 		ASSERT0(msp->ms_allocated_this_txg);
4225eda14cbcSMatt Macy 	} else {
4226eda14cbcSMatt Macy 		range_tree_vacate(msp->ms_freeing,
4227eda14cbcSMatt Macy 		    range_tree_add, msp->ms_freed);
4228eda14cbcSMatt Macy 	}
4229eda14cbcSMatt Macy 	msp->ms_allocated_this_txg += range_tree_space(alloctree);
4230eda14cbcSMatt Macy 	range_tree_vacate(alloctree, NULL, NULL);
4231eda14cbcSMatt Macy 
4232eda14cbcSMatt Macy 	ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4233eda14cbcSMatt Macy 	ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
4234eda14cbcSMatt Macy 	    & TXG_MASK]));
4235eda14cbcSMatt Macy 	ASSERT0(range_tree_space(msp->ms_freeing));
4236eda14cbcSMatt Macy 	ASSERT0(range_tree_space(msp->ms_checkpointing));
4237eda14cbcSMatt Macy 
4238eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
4239eda14cbcSMatt Macy 
4240eda14cbcSMatt Macy 	/*
4241eda14cbcSMatt Macy 	 * Verify that the space map object ID has been recorded in the
4242eda14cbcSMatt Macy 	 * vdev_ms_array.
4243eda14cbcSMatt Macy 	 */
4244eda14cbcSMatt Macy 	uint64_t object;
4245eda14cbcSMatt Macy 	VERIFY0(dmu_read(mos, vd->vdev_ms_array,
4246eda14cbcSMatt Macy 	    msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
4247eda14cbcSMatt Macy 	VERIFY3U(object, ==, space_map_object(msp->ms_sm));
4248eda14cbcSMatt Macy 
4249eda14cbcSMatt Macy 	mutex_exit(&msp->ms_sync_lock);
4250eda14cbcSMatt Macy 	dmu_tx_commit(tx);
4251eda14cbcSMatt Macy }
4252eda14cbcSMatt Macy 
4253eda14cbcSMatt Macy static void
4254eda14cbcSMatt Macy metaslab_evict(metaslab_t *msp, uint64_t txg)
4255eda14cbcSMatt Macy {
4256eda14cbcSMatt Macy 	if (!msp->ms_loaded || msp->ms_disabled != 0)
4257eda14cbcSMatt Macy 		return;
4258eda14cbcSMatt Macy 
4259eda14cbcSMatt Macy 	for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
4260eda14cbcSMatt Macy 		VERIFY0(range_tree_space(
4261eda14cbcSMatt Macy 		    msp->ms_allocating[(txg + t) & TXG_MASK]));
4262eda14cbcSMatt Macy 	}
4263eda14cbcSMatt Macy 	if (msp->ms_allocator != -1)
4264eda14cbcSMatt Macy 		metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK);
4265eda14cbcSMatt Macy 
4266eda14cbcSMatt Macy 	if (!metaslab_debug_unload)
4267eda14cbcSMatt Macy 		metaslab_unload(msp);
4268eda14cbcSMatt Macy }
4269eda14cbcSMatt Macy 
4270eda14cbcSMatt Macy /*
4271eda14cbcSMatt Macy  * Called after a transaction group has completely synced to mark
4272eda14cbcSMatt Macy  * all of the metaslab's free space as usable.
4273eda14cbcSMatt Macy  */
4274eda14cbcSMatt Macy void
4275eda14cbcSMatt Macy metaslab_sync_done(metaslab_t *msp, uint64_t txg)
4276eda14cbcSMatt Macy {
4277eda14cbcSMatt Macy 	metaslab_group_t *mg = msp->ms_group;
4278eda14cbcSMatt Macy 	vdev_t *vd = mg->mg_vd;
4279eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
4280eda14cbcSMatt Macy 	range_tree_t **defer_tree;
4281eda14cbcSMatt Macy 	int64_t alloc_delta, defer_delta;
4282eda14cbcSMatt Macy 	boolean_t defer_allowed = B_TRUE;
4283eda14cbcSMatt Macy 
4284eda14cbcSMatt Macy 	ASSERT(!vd->vdev_ishole);
4285eda14cbcSMatt Macy 
4286eda14cbcSMatt Macy 	mutex_enter(&msp->ms_lock);
4287eda14cbcSMatt Macy 
4288f9693befSMartin Matuska 	if (msp->ms_new) {
4289f9693befSMartin Matuska 		/* this is a new metaslab, add its capacity to the vdev */
4290eda14cbcSMatt Macy 		metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
4291f9693befSMartin Matuska 
4292f9693befSMartin Matuska 		/* there should be no allocations nor frees at this point */
4293f9693befSMartin Matuska 		VERIFY0(msp->ms_allocated_this_txg);
4294f9693befSMartin Matuska 		VERIFY0(range_tree_space(msp->ms_freed));
4295eda14cbcSMatt Macy 	}
4296f9693befSMartin Matuska 
4297eda14cbcSMatt Macy 	ASSERT0(range_tree_space(msp->ms_freeing));
4298eda14cbcSMatt Macy 	ASSERT0(range_tree_space(msp->ms_checkpointing));
4299eda14cbcSMatt Macy 
4300eda14cbcSMatt Macy 	defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
4301eda14cbcSMatt Macy 
4302eda14cbcSMatt Macy 	uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
4303eda14cbcSMatt Macy 	    metaslab_class_get_alloc(spa_normal_class(spa));
4304eda14cbcSMatt Macy 	if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
4305eda14cbcSMatt Macy 		defer_allowed = B_FALSE;
4306eda14cbcSMatt Macy 	}
4307eda14cbcSMatt Macy 
4308eda14cbcSMatt Macy 	defer_delta = 0;
4309eda14cbcSMatt Macy 	alloc_delta = msp->ms_allocated_this_txg -
4310eda14cbcSMatt Macy 	    range_tree_space(msp->ms_freed);
4311eda14cbcSMatt Macy 
4312eda14cbcSMatt Macy 	if (defer_allowed) {
4313eda14cbcSMatt Macy 		defer_delta = range_tree_space(msp->ms_freed) -
4314eda14cbcSMatt Macy 		    range_tree_space(*defer_tree);
4315eda14cbcSMatt Macy 	} else {
4316eda14cbcSMatt Macy 		defer_delta -= range_tree_space(*defer_tree);
4317eda14cbcSMatt Macy 	}
4318eda14cbcSMatt Macy 	metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
4319eda14cbcSMatt Macy 	    defer_delta, 0);
4320eda14cbcSMatt Macy 
4321eda14cbcSMatt Macy 	if (spa_syncing_log_sm(spa) == NULL) {
4322eda14cbcSMatt Macy 		/*
4323eda14cbcSMatt Macy 		 * If there's a metaslab_load() in progress and we don't have
4324eda14cbcSMatt Macy 		 * a log space map, it means that we probably wrote to the
4325eda14cbcSMatt Macy 		 * metaslab's space map. If this is the case, we need to
4326eda14cbcSMatt Macy 		 * make sure that we wait for the load to complete so that we
4327eda14cbcSMatt Macy 		 * have a consistent view at the in-core side of the metaslab.
4328eda14cbcSMatt Macy 		 */
4329eda14cbcSMatt Macy 		metaslab_load_wait(msp);
4330eda14cbcSMatt Macy 	} else {
4331eda14cbcSMatt Macy 		ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
4332eda14cbcSMatt Macy 	}
4333eda14cbcSMatt Macy 
4334eda14cbcSMatt Macy 	/*
4335eda14cbcSMatt Macy 	 * When auto-trimming is enabled, free ranges which are added to
4336eda14cbcSMatt Macy 	 * ms_allocatable are also be added to ms_trim.  The ms_trim tree is
4337eda14cbcSMatt Macy 	 * periodically consumed by the vdev_autotrim_thread() which issues
4338eda14cbcSMatt Macy 	 * trims for all ranges and then vacates the tree.  The ms_trim tree
4339eda14cbcSMatt Macy 	 * can be discarded at any time with the sole consequence of recent
4340eda14cbcSMatt Macy 	 * frees not being trimmed.
4341eda14cbcSMatt Macy 	 */
4342eda14cbcSMatt Macy 	if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
4343eda14cbcSMatt Macy 		range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim);
4344eda14cbcSMatt Macy 		if (!defer_allowed) {
4345eda14cbcSMatt Macy 			range_tree_walk(msp->ms_freed, range_tree_add,
4346eda14cbcSMatt Macy 			    msp->ms_trim);
4347eda14cbcSMatt Macy 		}
4348eda14cbcSMatt Macy 	} else {
4349eda14cbcSMatt Macy 		range_tree_vacate(msp->ms_trim, NULL, NULL);
4350eda14cbcSMatt Macy 	}
4351eda14cbcSMatt Macy 
4352eda14cbcSMatt Macy 	/*
4353eda14cbcSMatt Macy 	 * Move the frees from the defer_tree back to the free
4354eda14cbcSMatt Macy 	 * range tree (if it's loaded). Swap the freed_tree and
4355eda14cbcSMatt Macy 	 * the defer_tree -- this is safe to do because we've
4356eda14cbcSMatt Macy 	 * just emptied out the defer_tree.
4357eda14cbcSMatt Macy 	 */
4358eda14cbcSMatt Macy 	range_tree_vacate(*defer_tree,
4359eda14cbcSMatt Macy 	    msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable);
4360eda14cbcSMatt Macy 	if (defer_allowed) {
4361eda14cbcSMatt Macy 		range_tree_swap(&msp->ms_freed, defer_tree);
4362eda14cbcSMatt Macy 	} else {
4363eda14cbcSMatt Macy 		range_tree_vacate(msp->ms_freed,
4364eda14cbcSMatt Macy 		    msp->ms_loaded ? range_tree_add : NULL,
4365eda14cbcSMatt Macy 		    msp->ms_allocatable);
4366eda14cbcSMatt Macy 	}
4367eda14cbcSMatt Macy 
4368eda14cbcSMatt Macy 	msp->ms_synced_length = space_map_length(msp->ms_sm);
4369eda14cbcSMatt Macy 
4370eda14cbcSMatt Macy 	msp->ms_deferspace += defer_delta;
4371eda14cbcSMatt Macy 	ASSERT3S(msp->ms_deferspace, >=, 0);
4372eda14cbcSMatt Macy 	ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
4373eda14cbcSMatt Macy 	if (msp->ms_deferspace != 0) {
4374eda14cbcSMatt Macy 		/*
4375eda14cbcSMatt Macy 		 * Keep syncing this metaslab until all deferred frees
4376eda14cbcSMatt Macy 		 * are back in circulation.
4377eda14cbcSMatt Macy 		 */
4378eda14cbcSMatt Macy 		vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
4379eda14cbcSMatt Macy 	}
4380eda14cbcSMatt Macy 	metaslab_aux_histograms_update_done(msp, defer_allowed);
4381eda14cbcSMatt Macy 
4382eda14cbcSMatt Macy 	if (msp->ms_new) {
4383eda14cbcSMatt Macy 		msp->ms_new = B_FALSE;
4384eda14cbcSMatt Macy 		mutex_enter(&mg->mg_lock);
4385eda14cbcSMatt Macy 		mg->mg_ms_ready++;
4386eda14cbcSMatt Macy 		mutex_exit(&mg->mg_lock);
4387eda14cbcSMatt Macy 	}
4388eda14cbcSMatt Macy 
4389eda14cbcSMatt Macy 	/*
4390eda14cbcSMatt Macy 	 * Re-sort metaslab within its group now that we've adjusted
4391eda14cbcSMatt Macy 	 * its allocatable space.
4392eda14cbcSMatt Macy 	 */
4393eda14cbcSMatt Macy 	metaslab_recalculate_weight_and_sort(msp);
4394eda14cbcSMatt Macy 
4395eda14cbcSMatt Macy 	ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4396eda14cbcSMatt Macy 	ASSERT0(range_tree_space(msp->ms_freeing));
4397eda14cbcSMatt Macy 	ASSERT0(range_tree_space(msp->ms_freed));
4398eda14cbcSMatt Macy 	ASSERT0(range_tree_space(msp->ms_checkpointing));
4399eda14cbcSMatt Macy 	msp->ms_allocating_total -= msp->ms_allocated_this_txg;
4400eda14cbcSMatt Macy 	msp->ms_allocated_this_txg = 0;
4401eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
4402eda14cbcSMatt Macy }
4403eda14cbcSMatt Macy 
4404eda14cbcSMatt Macy void
4405eda14cbcSMatt Macy metaslab_sync_reassess(metaslab_group_t *mg)
4406eda14cbcSMatt Macy {
4407eda14cbcSMatt Macy 	spa_t *spa = mg->mg_class->mc_spa;
4408eda14cbcSMatt Macy 
4409eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4410eda14cbcSMatt Macy 	metaslab_group_alloc_update(mg);
4411eda14cbcSMatt Macy 	mg->mg_fragmentation = metaslab_group_fragmentation(mg);
4412eda14cbcSMatt Macy 
4413eda14cbcSMatt Macy 	/*
4414eda14cbcSMatt Macy 	 * Preload the next potential metaslabs but only on active
4415eda14cbcSMatt Macy 	 * metaslab groups. We can get into a state where the metaslab
4416eda14cbcSMatt Macy 	 * is no longer active since we dirty metaslabs as we remove a
4417eda14cbcSMatt Macy 	 * a device, thus potentially making the metaslab group eligible
4418eda14cbcSMatt Macy 	 * for preloading.
4419eda14cbcSMatt Macy 	 */
4420eda14cbcSMatt Macy 	if (mg->mg_activation_count > 0) {
4421eda14cbcSMatt Macy 		metaslab_group_preload(mg);
4422eda14cbcSMatt Macy 	}
4423eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALLOC, FTAG);
4424eda14cbcSMatt Macy }
4425eda14cbcSMatt Macy 
4426eda14cbcSMatt Macy /*
4427eda14cbcSMatt Macy  * When writing a ditto block (i.e. more than one DVA for a given BP) on
4428eda14cbcSMatt Macy  * the same vdev as an existing DVA of this BP, then try to allocate it
4429eda14cbcSMatt Macy  * on a different metaslab than existing DVAs (i.e. a unique metaslab).
4430eda14cbcSMatt Macy  */
4431eda14cbcSMatt Macy static boolean_t
4432eda14cbcSMatt Macy metaslab_is_unique(metaslab_t *msp, dva_t *dva)
4433eda14cbcSMatt Macy {
4434eda14cbcSMatt Macy 	uint64_t dva_ms_id;
4435eda14cbcSMatt Macy 
4436eda14cbcSMatt Macy 	if (DVA_GET_ASIZE(dva) == 0)
4437eda14cbcSMatt Macy 		return (B_TRUE);
4438eda14cbcSMatt Macy 
4439eda14cbcSMatt Macy 	if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
4440eda14cbcSMatt Macy 		return (B_TRUE);
4441eda14cbcSMatt Macy 
4442eda14cbcSMatt Macy 	dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
4443eda14cbcSMatt Macy 
4444eda14cbcSMatt Macy 	return (msp->ms_id != dva_ms_id);
4445eda14cbcSMatt Macy }
4446eda14cbcSMatt Macy 
4447eda14cbcSMatt Macy /*
4448eda14cbcSMatt Macy  * ==========================================================================
4449eda14cbcSMatt Macy  * Metaslab allocation tracing facility
4450eda14cbcSMatt Macy  * ==========================================================================
4451eda14cbcSMatt Macy  */
4452eda14cbcSMatt Macy 
4453eda14cbcSMatt Macy /*
4454eda14cbcSMatt Macy  * Add an allocation trace element to the allocation tracing list.
4455eda14cbcSMatt Macy  */
4456eda14cbcSMatt Macy static void
4457eda14cbcSMatt Macy metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
4458eda14cbcSMatt Macy     metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
4459eda14cbcSMatt Macy     int allocator)
4460eda14cbcSMatt Macy {
4461eda14cbcSMatt Macy 	metaslab_alloc_trace_t *mat;
4462eda14cbcSMatt Macy 
4463eda14cbcSMatt Macy 	if (!metaslab_trace_enabled)
4464eda14cbcSMatt Macy 		return;
4465eda14cbcSMatt Macy 
4466eda14cbcSMatt Macy 	/*
4467eda14cbcSMatt Macy 	 * When the tracing list reaches its maximum we remove
4468eda14cbcSMatt Macy 	 * the second element in the list before adding a new one.
4469eda14cbcSMatt Macy 	 * By removing the second element we preserve the original
4470eda14cbcSMatt Macy 	 * entry as a clue to what allocations steps have already been
4471eda14cbcSMatt Macy 	 * performed.
4472eda14cbcSMatt Macy 	 */
4473eda14cbcSMatt Macy 	if (zal->zal_size == metaslab_trace_max_entries) {
4474eda14cbcSMatt Macy 		metaslab_alloc_trace_t *mat_next;
4475eda14cbcSMatt Macy #ifdef ZFS_DEBUG
4476eda14cbcSMatt Macy 		panic("too many entries in allocation list");
4477eda14cbcSMatt Macy #endif
4478eda14cbcSMatt Macy 		METASLABSTAT_BUMP(metaslabstat_trace_over_limit);
4479eda14cbcSMatt Macy 		zal->zal_size--;
4480eda14cbcSMatt Macy 		mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
4481eda14cbcSMatt Macy 		list_remove(&zal->zal_list, mat_next);
4482eda14cbcSMatt Macy 		kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
4483eda14cbcSMatt Macy 	}
4484eda14cbcSMatt Macy 
4485eda14cbcSMatt Macy 	mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
4486eda14cbcSMatt Macy 	list_link_init(&mat->mat_list_node);
4487eda14cbcSMatt Macy 	mat->mat_mg = mg;
4488eda14cbcSMatt Macy 	mat->mat_msp = msp;
4489eda14cbcSMatt Macy 	mat->mat_size = psize;
4490eda14cbcSMatt Macy 	mat->mat_dva_id = dva_id;
4491eda14cbcSMatt Macy 	mat->mat_offset = offset;
4492eda14cbcSMatt Macy 	mat->mat_weight = 0;
4493eda14cbcSMatt Macy 	mat->mat_allocator = allocator;
4494eda14cbcSMatt Macy 
4495eda14cbcSMatt Macy 	if (msp != NULL)
4496eda14cbcSMatt Macy 		mat->mat_weight = msp->ms_weight;
4497eda14cbcSMatt Macy 
4498eda14cbcSMatt Macy 	/*
4499eda14cbcSMatt Macy 	 * The list is part of the zio so locking is not required. Only
4500eda14cbcSMatt Macy 	 * a single thread will perform allocations for a given zio.
4501eda14cbcSMatt Macy 	 */
4502eda14cbcSMatt Macy 	list_insert_tail(&zal->zal_list, mat);
4503eda14cbcSMatt Macy 	zal->zal_size++;
4504eda14cbcSMatt Macy 
4505eda14cbcSMatt Macy 	ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
4506eda14cbcSMatt Macy }
4507eda14cbcSMatt Macy 
4508eda14cbcSMatt Macy void
4509eda14cbcSMatt Macy metaslab_trace_init(zio_alloc_list_t *zal)
4510eda14cbcSMatt Macy {
4511eda14cbcSMatt Macy 	list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
4512eda14cbcSMatt Macy 	    offsetof(metaslab_alloc_trace_t, mat_list_node));
4513eda14cbcSMatt Macy 	zal->zal_size = 0;
4514eda14cbcSMatt Macy }
4515eda14cbcSMatt Macy 
4516eda14cbcSMatt Macy void
4517eda14cbcSMatt Macy metaslab_trace_fini(zio_alloc_list_t *zal)
4518eda14cbcSMatt Macy {
4519eda14cbcSMatt Macy 	metaslab_alloc_trace_t *mat;
4520eda14cbcSMatt Macy 
4521eda14cbcSMatt Macy 	while ((mat = list_remove_head(&zal->zal_list)) != NULL)
4522eda14cbcSMatt Macy 		kmem_cache_free(metaslab_alloc_trace_cache, mat);
4523eda14cbcSMatt Macy 	list_destroy(&zal->zal_list);
4524eda14cbcSMatt Macy 	zal->zal_size = 0;
4525eda14cbcSMatt Macy }
4526eda14cbcSMatt Macy 
4527eda14cbcSMatt Macy /*
4528eda14cbcSMatt Macy  * ==========================================================================
4529eda14cbcSMatt Macy  * Metaslab block operations
4530eda14cbcSMatt Macy  * ==========================================================================
4531eda14cbcSMatt Macy  */
4532eda14cbcSMatt Macy 
4533eda14cbcSMatt Macy static void
4534a0b956f5SMartin Matuska metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, const void *tag,
4535a0b956f5SMartin Matuska     int flags, int allocator)
4536eda14cbcSMatt Macy {
4537eda14cbcSMatt Macy 	if (!(flags & METASLAB_ASYNC_ALLOC) ||
4538eda14cbcSMatt Macy 	    (flags & METASLAB_DONT_THROTTLE))
4539eda14cbcSMatt Macy 		return;
4540eda14cbcSMatt Macy 
4541eda14cbcSMatt Macy 	metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4542eda14cbcSMatt Macy 	if (!mg->mg_class->mc_alloc_throttle_enabled)
4543eda14cbcSMatt Macy 		return;
4544eda14cbcSMatt Macy 
4545eda14cbcSMatt Macy 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4546eda14cbcSMatt Macy 	(void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag);
4547eda14cbcSMatt Macy }
4548eda14cbcSMatt Macy 
4549eda14cbcSMatt Macy static void
4550eda14cbcSMatt Macy metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator)
4551eda14cbcSMatt Macy {
4552eda14cbcSMatt Macy 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
45537877fdebSMatt Macy 	metaslab_class_allocator_t *mca =
45547877fdebSMatt Macy 	    &mg->mg_class->mc_allocator[allocator];
4555eda14cbcSMatt Macy 	uint64_t max = mg->mg_max_alloc_queue_depth;
4556eda14cbcSMatt Macy 	uint64_t cur = mga->mga_cur_max_alloc_queue_depth;
4557eda14cbcSMatt Macy 	while (cur < max) {
4558eda14cbcSMatt Macy 		if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth,
4559eda14cbcSMatt Macy 		    cur, cur + 1) == cur) {
45607877fdebSMatt Macy 			atomic_inc_64(&mca->mca_alloc_max_slots);
4561eda14cbcSMatt Macy 			return;
4562eda14cbcSMatt Macy 		}
4563eda14cbcSMatt Macy 		cur = mga->mga_cur_max_alloc_queue_depth;
4564eda14cbcSMatt Macy 	}
4565eda14cbcSMatt Macy }
4566eda14cbcSMatt Macy 
4567eda14cbcSMatt Macy void
4568a0b956f5SMartin Matuska metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, const void *tag,
4569a0b956f5SMartin Matuska     int flags, int allocator, boolean_t io_complete)
4570eda14cbcSMatt Macy {
4571eda14cbcSMatt Macy 	if (!(flags & METASLAB_ASYNC_ALLOC) ||
4572eda14cbcSMatt Macy 	    (flags & METASLAB_DONT_THROTTLE))
4573eda14cbcSMatt Macy 		return;
4574eda14cbcSMatt Macy 
4575eda14cbcSMatt Macy 	metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4576eda14cbcSMatt Macy 	if (!mg->mg_class->mc_alloc_throttle_enabled)
4577eda14cbcSMatt Macy 		return;
4578eda14cbcSMatt Macy 
4579eda14cbcSMatt Macy 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4580eda14cbcSMatt Macy 	(void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag);
4581eda14cbcSMatt Macy 	if (io_complete)
4582eda14cbcSMatt Macy 		metaslab_group_increment_qdepth(mg, allocator);
4583eda14cbcSMatt Macy }
4584eda14cbcSMatt Macy 
4585eda14cbcSMatt Macy void
4586a0b956f5SMartin Matuska metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, const void *tag,
4587eda14cbcSMatt Macy     int allocator)
4588eda14cbcSMatt Macy {
4589eda14cbcSMatt Macy #ifdef ZFS_DEBUG
4590eda14cbcSMatt Macy 	const dva_t *dva = bp->blk_dva;
4591eda14cbcSMatt Macy 	int ndvas = BP_GET_NDVAS(bp);
4592eda14cbcSMatt Macy 
4593eda14cbcSMatt Macy 	for (int d = 0; d < ndvas; d++) {
4594eda14cbcSMatt Macy 		uint64_t vdev = DVA_GET_VDEV(&dva[d]);
4595eda14cbcSMatt Macy 		metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4596eda14cbcSMatt Macy 		metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4597eda14cbcSMatt Macy 		VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag));
4598eda14cbcSMatt Macy 	}
4599eda14cbcSMatt Macy #endif
4600eda14cbcSMatt Macy }
4601eda14cbcSMatt Macy 
4602eda14cbcSMatt Macy static uint64_t
4603eda14cbcSMatt Macy metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
4604eda14cbcSMatt Macy {
4605eda14cbcSMatt Macy 	uint64_t start;
4606eda14cbcSMatt Macy 	range_tree_t *rt = msp->ms_allocatable;
4607eda14cbcSMatt Macy 	metaslab_class_t *mc = msp->ms_group->mg_class;
4608eda14cbcSMatt Macy 
4609eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
4610eda14cbcSMatt Macy 	VERIFY(!msp->ms_condensing);
4611eda14cbcSMatt Macy 	VERIFY0(msp->ms_disabled);
4612eda14cbcSMatt Macy 
4613eda14cbcSMatt Macy 	start = mc->mc_ops->msop_alloc(msp, size);
4614eda14cbcSMatt Macy 	if (start != -1ULL) {
4615eda14cbcSMatt Macy 		metaslab_group_t *mg = msp->ms_group;
4616eda14cbcSMatt Macy 		vdev_t *vd = mg->mg_vd;
4617eda14cbcSMatt Macy 
4618eda14cbcSMatt Macy 		VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
4619eda14cbcSMatt Macy 		VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
4620eda14cbcSMatt Macy 		VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
4621eda14cbcSMatt Macy 		range_tree_remove(rt, start, size);
4622eda14cbcSMatt Macy 		range_tree_clear(msp->ms_trim, start, size);
4623eda14cbcSMatt Macy 
4624eda14cbcSMatt Macy 		if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
4625eda14cbcSMatt Macy 			vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
4626eda14cbcSMatt Macy 
4627eda14cbcSMatt Macy 		range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size);
4628eda14cbcSMatt Macy 		msp->ms_allocating_total += size;
4629eda14cbcSMatt Macy 
4630eda14cbcSMatt Macy 		/* Track the last successful allocation */
4631eda14cbcSMatt Macy 		msp->ms_alloc_txg = txg;
4632eda14cbcSMatt Macy 		metaslab_verify_space(msp, txg);
4633eda14cbcSMatt Macy 	}
4634eda14cbcSMatt Macy 
4635eda14cbcSMatt Macy 	/*
4636eda14cbcSMatt Macy 	 * Now that we've attempted the allocation we need to update the
4637eda14cbcSMatt Macy 	 * metaslab's maximum block size since it may have changed.
4638eda14cbcSMatt Macy 	 */
4639eda14cbcSMatt Macy 	msp->ms_max_size = metaslab_largest_allocatable(msp);
4640eda14cbcSMatt Macy 	return (start);
4641eda14cbcSMatt Macy }
4642eda14cbcSMatt Macy 
4643eda14cbcSMatt Macy /*
4644eda14cbcSMatt Macy  * Find the metaslab with the highest weight that is less than what we've
4645eda14cbcSMatt Macy  * already tried.  In the common case, this means that we will examine each
4646eda14cbcSMatt Macy  * metaslab at most once. Note that concurrent callers could reorder metaslabs
4647eda14cbcSMatt Macy  * by activation/passivation once we have dropped the mg_lock. If a metaslab is
4648eda14cbcSMatt Macy  * activated by another thread, and we fail to allocate from the metaslab we
4649eda14cbcSMatt Macy  * have selected, we may not try the newly-activated metaslab, and instead
4650eda14cbcSMatt Macy  * activate another metaslab.  This is not optimal, but generally does not cause
4651eda14cbcSMatt Macy  * any problems (a possible exception being if every metaslab is completely full
4652eda14cbcSMatt Macy  * except for the newly-activated metaslab which we fail to examine).
4653eda14cbcSMatt Macy  */
4654eda14cbcSMatt Macy static metaslab_t *
4655eda14cbcSMatt Macy find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
4656eda14cbcSMatt Macy     dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator,
4657eda14cbcSMatt Macy     boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search,
4658eda14cbcSMatt Macy     boolean_t *was_active)
4659eda14cbcSMatt Macy {
4660eda14cbcSMatt Macy 	avl_index_t idx;
4661eda14cbcSMatt Macy 	avl_tree_t *t = &mg->mg_metaslab_tree;
4662eda14cbcSMatt Macy 	metaslab_t *msp = avl_find(t, search, &idx);
4663eda14cbcSMatt Macy 	if (msp == NULL)
4664eda14cbcSMatt Macy 		msp = avl_nearest(t, idx, AVL_AFTER);
4665eda14cbcSMatt Macy 
4666be181ee2SMartin Matuska 	uint_t tries = 0;
4667eda14cbcSMatt Macy 	for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
4668eda14cbcSMatt Macy 		int i;
46697877fdebSMatt Macy 
46707877fdebSMatt Macy 		if (!try_hard && tries > zfs_metaslab_find_max_tries) {
46717877fdebSMatt Macy 			METASLABSTAT_BUMP(metaslabstat_too_many_tries);
46727877fdebSMatt Macy 			return (NULL);
46737877fdebSMatt Macy 		}
46747877fdebSMatt Macy 		tries++;
46757877fdebSMatt Macy 
4676eda14cbcSMatt Macy 		if (!metaslab_should_allocate(msp, asize, try_hard)) {
4677eda14cbcSMatt Macy 			metaslab_trace_add(zal, mg, msp, asize, d,
4678eda14cbcSMatt Macy 			    TRACE_TOO_SMALL, allocator);
4679eda14cbcSMatt Macy 			continue;
4680eda14cbcSMatt Macy 		}
4681eda14cbcSMatt Macy 
4682eda14cbcSMatt Macy 		/*
4683eda14cbcSMatt Macy 		 * If the selected metaslab is condensing or disabled,
4684eda14cbcSMatt Macy 		 * skip it.
4685eda14cbcSMatt Macy 		 */
4686eda14cbcSMatt Macy 		if (msp->ms_condensing || msp->ms_disabled > 0)
4687eda14cbcSMatt Macy 			continue;
4688eda14cbcSMatt Macy 
4689eda14cbcSMatt Macy 		*was_active = msp->ms_allocator != -1;
4690eda14cbcSMatt Macy 		/*
4691eda14cbcSMatt Macy 		 * If we're activating as primary, this is our first allocation
4692eda14cbcSMatt Macy 		 * from this disk, so we don't need to check how close we are.
4693eda14cbcSMatt Macy 		 * If the metaslab under consideration was already active,
4694eda14cbcSMatt Macy 		 * we're getting desperate enough to steal another allocator's
4695eda14cbcSMatt Macy 		 * metaslab, so we still don't care about distances.
4696eda14cbcSMatt Macy 		 */
4697eda14cbcSMatt Macy 		if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
4698eda14cbcSMatt Macy 			break;
4699eda14cbcSMatt Macy 
4700eda14cbcSMatt Macy 		for (i = 0; i < d; i++) {
4701eda14cbcSMatt Macy 			if (want_unique &&
4702eda14cbcSMatt Macy 			    !metaslab_is_unique(msp, &dva[i]))
4703eda14cbcSMatt Macy 				break;  /* try another metaslab */
4704eda14cbcSMatt Macy 		}
4705eda14cbcSMatt Macy 		if (i == d)
4706eda14cbcSMatt Macy 			break;
4707eda14cbcSMatt Macy 	}
4708eda14cbcSMatt Macy 
4709eda14cbcSMatt Macy 	if (msp != NULL) {
4710eda14cbcSMatt Macy 		search->ms_weight = msp->ms_weight;
4711eda14cbcSMatt Macy 		search->ms_start = msp->ms_start + 1;
4712eda14cbcSMatt Macy 		search->ms_allocator = msp->ms_allocator;
4713eda14cbcSMatt Macy 		search->ms_primary = msp->ms_primary;
4714eda14cbcSMatt Macy 	}
4715eda14cbcSMatt Macy 	return (msp);
4716eda14cbcSMatt Macy }
4717eda14cbcSMatt Macy 
4718eda14cbcSMatt Macy static void
4719eda14cbcSMatt Macy metaslab_active_mask_verify(metaslab_t *msp)
4720eda14cbcSMatt Macy {
4721eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&msp->ms_lock));
4722eda14cbcSMatt Macy 
4723eda14cbcSMatt Macy 	if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
4724eda14cbcSMatt Macy 		return;
4725eda14cbcSMatt Macy 
4726eda14cbcSMatt Macy 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0)
4727eda14cbcSMatt Macy 		return;
4728eda14cbcSMatt Macy 
4729eda14cbcSMatt Macy 	if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) {
4730eda14cbcSMatt Macy 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4731eda14cbcSMatt Macy 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4732eda14cbcSMatt Macy 		VERIFY3S(msp->ms_allocator, !=, -1);
4733eda14cbcSMatt Macy 		VERIFY(msp->ms_primary);
4734eda14cbcSMatt Macy 		return;
4735eda14cbcSMatt Macy 	}
4736eda14cbcSMatt Macy 
4737eda14cbcSMatt Macy 	if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) {
4738eda14cbcSMatt Macy 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4739eda14cbcSMatt Macy 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4740eda14cbcSMatt Macy 		VERIFY3S(msp->ms_allocator, !=, -1);
4741eda14cbcSMatt Macy 		VERIFY(!msp->ms_primary);
4742eda14cbcSMatt Macy 		return;
4743eda14cbcSMatt Macy 	}
4744eda14cbcSMatt Macy 
4745eda14cbcSMatt Macy 	if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
4746eda14cbcSMatt Macy 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4747eda14cbcSMatt Macy 		VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4748eda14cbcSMatt Macy 		VERIFY3S(msp->ms_allocator, ==, -1);
4749eda14cbcSMatt Macy 		return;
4750eda14cbcSMatt Macy 	}
4751eda14cbcSMatt Macy }
4752eda14cbcSMatt Macy 
4753eda14cbcSMatt Macy static uint64_t
4754eda14cbcSMatt Macy metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
4755eda14cbcSMatt Macy     uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
4756eda14cbcSMatt Macy     int allocator, boolean_t try_hard)
4757eda14cbcSMatt Macy {
4758eda14cbcSMatt Macy 	metaslab_t *msp = NULL;
4759eda14cbcSMatt Macy 	uint64_t offset = -1ULL;
4760eda14cbcSMatt Macy 
4761eda14cbcSMatt Macy 	uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY;
4762eda14cbcSMatt Macy 	for (int i = 0; i < d; i++) {
4763eda14cbcSMatt Macy 		if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4764eda14cbcSMatt Macy 		    DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4765eda14cbcSMatt Macy 			activation_weight = METASLAB_WEIGHT_SECONDARY;
4766eda14cbcSMatt Macy 		} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4767eda14cbcSMatt Macy 		    DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4768eda14cbcSMatt Macy 			activation_weight = METASLAB_WEIGHT_CLAIM;
4769eda14cbcSMatt Macy 			break;
4770eda14cbcSMatt Macy 		}
4771eda14cbcSMatt Macy 	}
4772eda14cbcSMatt Macy 
4773eda14cbcSMatt Macy 	/*
4774eda14cbcSMatt Macy 	 * If we don't have enough metaslabs active to fill the entire array, we
4775eda14cbcSMatt Macy 	 * just use the 0th slot.
4776eda14cbcSMatt Macy 	 */
4777eda14cbcSMatt Macy 	if (mg->mg_ms_ready < mg->mg_allocators * 3)
4778eda14cbcSMatt Macy 		allocator = 0;
4779eda14cbcSMatt Macy 	metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4780eda14cbcSMatt Macy 
4781eda14cbcSMatt Macy 	ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
4782eda14cbcSMatt Macy 
4783eda14cbcSMatt Macy 	metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
4784eda14cbcSMatt Macy 	search->ms_weight = UINT64_MAX;
4785eda14cbcSMatt Macy 	search->ms_start = 0;
4786eda14cbcSMatt Macy 	/*
4787eda14cbcSMatt Macy 	 * At the end of the metaslab tree are the already-active metaslabs,
4788eda14cbcSMatt Macy 	 * first the primaries, then the secondaries. When we resume searching
4789eda14cbcSMatt Macy 	 * through the tree, we need to consider ms_allocator and ms_primary so
4790eda14cbcSMatt Macy 	 * we start in the location right after where we left off, and don't
4791eda14cbcSMatt Macy 	 * accidentally loop forever considering the same metaslabs.
4792eda14cbcSMatt Macy 	 */
4793eda14cbcSMatt Macy 	search->ms_allocator = -1;
4794eda14cbcSMatt Macy 	search->ms_primary = B_TRUE;
4795eda14cbcSMatt Macy 	for (;;) {
4796eda14cbcSMatt Macy 		boolean_t was_active = B_FALSE;
4797eda14cbcSMatt Macy 
4798eda14cbcSMatt Macy 		mutex_enter(&mg->mg_lock);
4799eda14cbcSMatt Macy 
4800eda14cbcSMatt Macy 		if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4801eda14cbcSMatt Macy 		    mga->mga_primary != NULL) {
4802eda14cbcSMatt Macy 			msp = mga->mga_primary;
4803eda14cbcSMatt Macy 
4804eda14cbcSMatt Macy 			/*
4805eda14cbcSMatt Macy 			 * Even though we don't hold the ms_lock for the
4806eda14cbcSMatt Macy 			 * primary metaslab, those fields should not
4807eda14cbcSMatt Macy 			 * change while we hold the mg_lock. Thus it is
4808eda14cbcSMatt Macy 			 * safe to make assertions on them.
4809eda14cbcSMatt Macy 			 */
4810eda14cbcSMatt Macy 			ASSERT(msp->ms_primary);
4811eda14cbcSMatt Macy 			ASSERT3S(msp->ms_allocator, ==, allocator);
4812eda14cbcSMatt Macy 			ASSERT(msp->ms_loaded);
4813eda14cbcSMatt Macy 
4814eda14cbcSMatt Macy 			was_active = B_TRUE;
4815eda14cbcSMatt Macy 			ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
4816eda14cbcSMatt Macy 		} else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4817eda14cbcSMatt Macy 		    mga->mga_secondary != NULL) {
4818eda14cbcSMatt Macy 			msp = mga->mga_secondary;
4819eda14cbcSMatt Macy 
4820eda14cbcSMatt Macy 			/*
4821eda14cbcSMatt Macy 			 * See comment above about the similar assertions
4822eda14cbcSMatt Macy 			 * for the primary metaslab.
4823eda14cbcSMatt Macy 			 */
4824eda14cbcSMatt Macy 			ASSERT(!msp->ms_primary);
4825eda14cbcSMatt Macy 			ASSERT3S(msp->ms_allocator, ==, allocator);
4826eda14cbcSMatt Macy 			ASSERT(msp->ms_loaded);
4827eda14cbcSMatt Macy 
4828eda14cbcSMatt Macy 			was_active = B_TRUE;
4829eda14cbcSMatt Macy 			ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
4830eda14cbcSMatt Macy 		} else {
4831eda14cbcSMatt Macy 			msp = find_valid_metaslab(mg, activation_weight, dva, d,
4832eda14cbcSMatt Macy 			    want_unique, asize, allocator, try_hard, zal,
4833eda14cbcSMatt Macy 			    search, &was_active);
4834eda14cbcSMatt Macy 		}
4835eda14cbcSMatt Macy 
4836eda14cbcSMatt Macy 		mutex_exit(&mg->mg_lock);
4837eda14cbcSMatt Macy 		if (msp == NULL) {
4838eda14cbcSMatt Macy 			kmem_free(search, sizeof (*search));
4839eda14cbcSMatt Macy 			return (-1ULL);
4840eda14cbcSMatt Macy 		}
4841eda14cbcSMatt Macy 		mutex_enter(&msp->ms_lock);
4842eda14cbcSMatt Macy 
4843eda14cbcSMatt Macy 		metaslab_active_mask_verify(msp);
4844eda14cbcSMatt Macy 
4845eda14cbcSMatt Macy 		/*
4846eda14cbcSMatt Macy 		 * This code is disabled out because of issues with
4847eda14cbcSMatt Macy 		 * tracepoints in non-gpl kernel modules.
4848eda14cbcSMatt Macy 		 */
4849eda14cbcSMatt Macy #if 0
4850eda14cbcSMatt Macy 		DTRACE_PROBE3(ms__activation__attempt,
4851eda14cbcSMatt Macy 		    metaslab_t *, msp, uint64_t, activation_weight,
4852eda14cbcSMatt Macy 		    boolean_t, was_active);
4853eda14cbcSMatt Macy #endif
4854eda14cbcSMatt Macy 
4855eda14cbcSMatt Macy 		/*
4856eda14cbcSMatt Macy 		 * Ensure that the metaslab we have selected is still
4857eda14cbcSMatt Macy 		 * capable of handling our request. It's possible that
4858eda14cbcSMatt Macy 		 * another thread may have changed the weight while we
4859eda14cbcSMatt Macy 		 * were blocked on the metaslab lock. We check the
4860eda14cbcSMatt Macy 		 * active status first to see if we need to set_selected_txg
4861eda14cbcSMatt Macy 		 * a new metaslab.
4862eda14cbcSMatt Macy 		 */
4863eda14cbcSMatt Macy 		if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
4864eda14cbcSMatt Macy 			ASSERT3S(msp->ms_allocator, ==, -1);
4865eda14cbcSMatt Macy 			mutex_exit(&msp->ms_lock);
4866eda14cbcSMatt Macy 			continue;
4867eda14cbcSMatt Macy 		}
4868eda14cbcSMatt Macy 
4869eda14cbcSMatt Macy 		/*
4870eda14cbcSMatt Macy 		 * If the metaslab was activated for another allocator
4871eda14cbcSMatt Macy 		 * while we were waiting in the ms_lock above, or it's
4872eda14cbcSMatt Macy 		 * a primary and we're seeking a secondary (or vice versa),
4873eda14cbcSMatt Macy 		 * we go back and select a new metaslab.
4874eda14cbcSMatt Macy 		 */
4875eda14cbcSMatt Macy 		if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
4876eda14cbcSMatt Macy 		    (msp->ms_allocator != -1) &&
4877eda14cbcSMatt Macy 		    (msp->ms_allocator != allocator || ((activation_weight ==
4878eda14cbcSMatt Macy 		    METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
4879eda14cbcSMatt Macy 			ASSERT(msp->ms_loaded);
4880eda14cbcSMatt Macy 			ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) ||
4881eda14cbcSMatt Macy 			    msp->ms_allocator != -1);
4882eda14cbcSMatt Macy 			mutex_exit(&msp->ms_lock);
4883eda14cbcSMatt Macy 			continue;
4884eda14cbcSMatt Macy 		}
4885eda14cbcSMatt Macy 
4886eda14cbcSMatt Macy 		/*
4887eda14cbcSMatt Macy 		 * This metaslab was used for claiming regions allocated
4888eda14cbcSMatt Macy 		 * by the ZIL during pool import. Once these regions are
4889eda14cbcSMatt Macy 		 * claimed we don't need to keep the CLAIM bit set
4890eda14cbcSMatt Macy 		 * anymore. Passivate this metaslab to zero its activation
4891eda14cbcSMatt Macy 		 * mask.
4892eda14cbcSMatt Macy 		 */
4893eda14cbcSMatt Macy 		if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
4894eda14cbcSMatt Macy 		    activation_weight != METASLAB_WEIGHT_CLAIM) {
4895eda14cbcSMatt Macy 			ASSERT(msp->ms_loaded);
4896eda14cbcSMatt Macy 			ASSERT3S(msp->ms_allocator, ==, -1);
4897eda14cbcSMatt Macy 			metaslab_passivate(msp, msp->ms_weight &
4898eda14cbcSMatt Macy 			    ~METASLAB_WEIGHT_CLAIM);
4899eda14cbcSMatt Macy 			mutex_exit(&msp->ms_lock);
4900eda14cbcSMatt Macy 			continue;
4901eda14cbcSMatt Macy 		}
4902eda14cbcSMatt Macy 
4903eda14cbcSMatt Macy 		metaslab_set_selected_txg(msp, txg);
4904eda14cbcSMatt Macy 
4905eda14cbcSMatt Macy 		int activation_error =
4906eda14cbcSMatt Macy 		    metaslab_activate(msp, allocator, activation_weight);
4907eda14cbcSMatt Macy 		metaslab_active_mask_verify(msp);
4908eda14cbcSMatt Macy 
4909eda14cbcSMatt Macy 		/*
4910eda14cbcSMatt Macy 		 * If the metaslab was activated by another thread for
4911eda14cbcSMatt Macy 		 * another allocator or activation_weight (EBUSY), or it
4912eda14cbcSMatt Macy 		 * failed because another metaslab was assigned as primary
4913eda14cbcSMatt Macy 		 * for this allocator (EEXIST) we continue using this
4914eda14cbcSMatt Macy 		 * metaslab for our allocation, rather than going on to a
4915eda14cbcSMatt Macy 		 * worse metaslab (we waited for that metaslab to be loaded
4916eda14cbcSMatt Macy 		 * after all).
4917eda14cbcSMatt Macy 		 *
4918eda14cbcSMatt Macy 		 * If the activation failed due to an I/O error or ENOSPC we
4919eda14cbcSMatt Macy 		 * skip to the next metaslab.
4920eda14cbcSMatt Macy 		 */
4921eda14cbcSMatt Macy 		boolean_t activated;
4922eda14cbcSMatt Macy 		if (activation_error == 0) {
4923eda14cbcSMatt Macy 			activated = B_TRUE;
4924eda14cbcSMatt Macy 		} else if (activation_error == EBUSY ||
4925eda14cbcSMatt Macy 		    activation_error == EEXIST) {
4926eda14cbcSMatt Macy 			activated = B_FALSE;
4927eda14cbcSMatt Macy 		} else {
4928eda14cbcSMatt Macy 			mutex_exit(&msp->ms_lock);
4929eda14cbcSMatt Macy 			continue;
4930eda14cbcSMatt Macy 		}
4931eda14cbcSMatt Macy 		ASSERT(msp->ms_loaded);
4932eda14cbcSMatt Macy 
4933eda14cbcSMatt Macy 		/*
4934eda14cbcSMatt Macy 		 * Now that we have the lock, recheck to see if we should
4935eda14cbcSMatt Macy 		 * continue to use this metaslab for this allocation. The
4936eda14cbcSMatt Macy 		 * the metaslab is now loaded so metaslab_should_allocate()
4937eda14cbcSMatt Macy 		 * can accurately determine if the allocation attempt should
4938eda14cbcSMatt Macy 		 * proceed.
4939eda14cbcSMatt Macy 		 */
4940eda14cbcSMatt Macy 		if (!metaslab_should_allocate(msp, asize, try_hard)) {
4941eda14cbcSMatt Macy 			/* Passivate this metaslab and select a new one. */
4942eda14cbcSMatt Macy 			metaslab_trace_add(zal, mg, msp, asize, d,
4943eda14cbcSMatt Macy 			    TRACE_TOO_SMALL, allocator);
4944eda14cbcSMatt Macy 			goto next;
4945eda14cbcSMatt Macy 		}
4946eda14cbcSMatt Macy 
4947eda14cbcSMatt Macy 		/*
4948eda14cbcSMatt Macy 		 * If this metaslab is currently condensing then pick again
4949eda14cbcSMatt Macy 		 * as we can't manipulate this metaslab until it's committed
4950eda14cbcSMatt Macy 		 * to disk. If this metaslab is being initialized, we shouldn't
4951eda14cbcSMatt Macy 		 * allocate from it since the allocated region might be
4952eda14cbcSMatt Macy 		 * overwritten after allocation.
4953eda14cbcSMatt Macy 		 */
4954eda14cbcSMatt Macy 		if (msp->ms_condensing) {
4955eda14cbcSMatt Macy 			metaslab_trace_add(zal, mg, msp, asize, d,
4956eda14cbcSMatt Macy 			    TRACE_CONDENSING, allocator);
4957eda14cbcSMatt Macy 			if (activated) {
4958eda14cbcSMatt Macy 				metaslab_passivate(msp, msp->ms_weight &
4959eda14cbcSMatt Macy 				    ~METASLAB_ACTIVE_MASK);
4960eda14cbcSMatt Macy 			}
4961eda14cbcSMatt Macy 			mutex_exit(&msp->ms_lock);
4962eda14cbcSMatt Macy 			continue;
4963eda14cbcSMatt Macy 		} else if (msp->ms_disabled > 0) {
4964eda14cbcSMatt Macy 			metaslab_trace_add(zal, mg, msp, asize, d,
4965eda14cbcSMatt Macy 			    TRACE_DISABLED, allocator);
4966eda14cbcSMatt Macy 			if (activated) {
4967eda14cbcSMatt Macy 				metaslab_passivate(msp, msp->ms_weight &
4968eda14cbcSMatt Macy 				    ~METASLAB_ACTIVE_MASK);
4969eda14cbcSMatt Macy 			}
4970eda14cbcSMatt Macy 			mutex_exit(&msp->ms_lock);
4971eda14cbcSMatt Macy 			continue;
4972eda14cbcSMatt Macy 		}
4973eda14cbcSMatt Macy 
4974eda14cbcSMatt Macy 		offset = metaslab_block_alloc(msp, asize, txg);
4975eda14cbcSMatt Macy 		metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
4976eda14cbcSMatt Macy 
4977eda14cbcSMatt Macy 		if (offset != -1ULL) {
4978eda14cbcSMatt Macy 			/* Proactively passivate the metaslab, if needed */
4979eda14cbcSMatt Macy 			if (activated)
4980eda14cbcSMatt Macy 				metaslab_segment_may_passivate(msp);
4981eda14cbcSMatt Macy 			break;
4982eda14cbcSMatt Macy 		}
4983eda14cbcSMatt Macy next:
4984eda14cbcSMatt Macy 		ASSERT(msp->ms_loaded);
4985eda14cbcSMatt Macy 
4986eda14cbcSMatt Macy 		/*
4987eda14cbcSMatt Macy 		 * This code is disabled out because of issues with
4988eda14cbcSMatt Macy 		 * tracepoints in non-gpl kernel modules.
4989eda14cbcSMatt Macy 		 */
4990eda14cbcSMatt Macy #if 0
4991eda14cbcSMatt Macy 		DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp,
4992eda14cbcSMatt Macy 		    uint64_t, asize);
4993eda14cbcSMatt Macy #endif
4994eda14cbcSMatt Macy 
4995eda14cbcSMatt Macy 		/*
4996eda14cbcSMatt Macy 		 * We were unable to allocate from this metaslab so determine
4997eda14cbcSMatt Macy 		 * a new weight for this metaslab. Now that we have loaded
4998eda14cbcSMatt Macy 		 * the metaslab we can provide a better hint to the metaslab
4999eda14cbcSMatt Macy 		 * selector.
5000eda14cbcSMatt Macy 		 *
5001eda14cbcSMatt Macy 		 * For space-based metaslabs, we use the maximum block size.
5002eda14cbcSMatt Macy 		 * This information is only available when the metaslab
5003eda14cbcSMatt Macy 		 * is loaded and is more accurate than the generic free
5004eda14cbcSMatt Macy 		 * space weight that was calculated by metaslab_weight().
5005eda14cbcSMatt Macy 		 * This information allows us to quickly compare the maximum
5006eda14cbcSMatt Macy 		 * available allocation in the metaslab to the allocation
5007eda14cbcSMatt Macy 		 * size being requested.
5008eda14cbcSMatt Macy 		 *
5009eda14cbcSMatt Macy 		 * For segment-based metaslabs, determine the new weight
5010eda14cbcSMatt Macy 		 * based on the highest bucket in the range tree. We
5011eda14cbcSMatt Macy 		 * explicitly use the loaded segment weight (i.e. the range
5012eda14cbcSMatt Macy 		 * tree histogram) since it contains the space that is
5013eda14cbcSMatt Macy 		 * currently available for allocation and is accurate
5014eda14cbcSMatt Macy 		 * even within a sync pass.
5015eda14cbcSMatt Macy 		 */
5016eda14cbcSMatt Macy 		uint64_t weight;
5017eda14cbcSMatt Macy 		if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
5018eda14cbcSMatt Macy 			weight = metaslab_largest_allocatable(msp);
5019eda14cbcSMatt Macy 			WEIGHT_SET_SPACEBASED(weight);
5020eda14cbcSMatt Macy 		} else {
5021eda14cbcSMatt Macy 			weight = metaslab_weight_from_range_tree(msp);
5022eda14cbcSMatt Macy 		}
5023eda14cbcSMatt Macy 
5024eda14cbcSMatt Macy 		if (activated) {
5025eda14cbcSMatt Macy 			metaslab_passivate(msp, weight);
5026eda14cbcSMatt Macy 		} else {
5027eda14cbcSMatt Macy 			/*
5028eda14cbcSMatt Macy 			 * For the case where we use the metaslab that is
5029eda14cbcSMatt Macy 			 * active for another allocator we want to make
5030eda14cbcSMatt Macy 			 * sure that we retain the activation mask.
5031eda14cbcSMatt Macy 			 *
5032eda14cbcSMatt Macy 			 * Note that we could attempt to use something like
5033eda14cbcSMatt Macy 			 * metaslab_recalculate_weight_and_sort() that
5034eda14cbcSMatt Macy 			 * retains the activation mask here. That function
5035eda14cbcSMatt Macy 			 * uses metaslab_weight() to set the weight though
5036eda14cbcSMatt Macy 			 * which is not as accurate as the calculations
5037eda14cbcSMatt Macy 			 * above.
5038eda14cbcSMatt Macy 			 */
5039eda14cbcSMatt Macy 			weight |= msp->ms_weight & METASLAB_ACTIVE_MASK;
5040eda14cbcSMatt Macy 			metaslab_group_sort(mg, msp, weight);
5041eda14cbcSMatt Macy 		}
5042eda14cbcSMatt Macy 		metaslab_active_mask_verify(msp);
5043eda14cbcSMatt Macy 
5044eda14cbcSMatt Macy 		/*
5045eda14cbcSMatt Macy 		 * We have just failed an allocation attempt, check
5046eda14cbcSMatt Macy 		 * that metaslab_should_allocate() agrees. Otherwise,
5047eda14cbcSMatt Macy 		 * we may end up in an infinite loop retrying the same
5048eda14cbcSMatt Macy 		 * metaslab.
5049eda14cbcSMatt Macy 		 */
5050eda14cbcSMatt Macy 		ASSERT(!metaslab_should_allocate(msp, asize, try_hard));
5051eda14cbcSMatt Macy 
5052eda14cbcSMatt Macy 		mutex_exit(&msp->ms_lock);
5053eda14cbcSMatt Macy 	}
5054eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
5055eda14cbcSMatt Macy 	kmem_free(search, sizeof (*search));
5056eda14cbcSMatt Macy 	return (offset);
5057eda14cbcSMatt Macy }
5058eda14cbcSMatt Macy 
5059eda14cbcSMatt Macy static uint64_t
5060eda14cbcSMatt Macy metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
5061eda14cbcSMatt Macy     uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d,
5062eda14cbcSMatt Macy     int allocator, boolean_t try_hard)
5063eda14cbcSMatt Macy {
5064eda14cbcSMatt Macy 	uint64_t offset;
5065eda14cbcSMatt Macy 	ASSERT(mg->mg_initialized);
5066eda14cbcSMatt Macy 
5067eda14cbcSMatt Macy 	offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique,
5068eda14cbcSMatt Macy 	    dva, d, allocator, try_hard);
5069eda14cbcSMatt Macy 
5070eda14cbcSMatt Macy 	mutex_enter(&mg->mg_lock);
5071eda14cbcSMatt Macy 	if (offset == -1ULL) {
5072eda14cbcSMatt Macy 		mg->mg_failed_allocations++;
5073eda14cbcSMatt Macy 		metaslab_trace_add(zal, mg, NULL, asize, d,
5074eda14cbcSMatt Macy 		    TRACE_GROUP_FAILURE, allocator);
5075eda14cbcSMatt Macy 		if (asize == SPA_GANGBLOCKSIZE) {
5076eda14cbcSMatt Macy 			/*
5077eda14cbcSMatt Macy 			 * This metaslab group was unable to allocate
5078eda14cbcSMatt Macy 			 * the minimum gang block size so it must be out of
5079eda14cbcSMatt Macy 			 * space. We must notify the allocation throttle
5080eda14cbcSMatt Macy 			 * to start skipping allocation attempts to this
5081eda14cbcSMatt Macy 			 * metaslab group until more space becomes available.
5082eda14cbcSMatt Macy 			 * Note: this failure cannot be caused by the
5083eda14cbcSMatt Macy 			 * allocation throttle since the allocation throttle
5084eda14cbcSMatt Macy 			 * is only responsible for skipping devices and
5085eda14cbcSMatt Macy 			 * not failing block allocations.
5086eda14cbcSMatt Macy 			 */
5087eda14cbcSMatt Macy 			mg->mg_no_free_space = B_TRUE;
5088eda14cbcSMatt Macy 		}
5089eda14cbcSMatt Macy 	}
5090eda14cbcSMatt Macy 	mg->mg_allocations++;
5091eda14cbcSMatt Macy 	mutex_exit(&mg->mg_lock);
5092eda14cbcSMatt Macy 	return (offset);
5093eda14cbcSMatt Macy }
5094eda14cbcSMatt Macy 
5095eda14cbcSMatt Macy /*
5096eda14cbcSMatt Macy  * Allocate a block for the specified i/o.
5097eda14cbcSMatt Macy  */
5098eda14cbcSMatt Macy int
5099eda14cbcSMatt Macy metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
5100eda14cbcSMatt Macy     dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
5101eda14cbcSMatt Macy     zio_alloc_list_t *zal, int allocator)
5102eda14cbcSMatt Macy {
51037877fdebSMatt Macy 	metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5104*315ee00fSMartin Matuska 	metaslab_group_t *mg, *rotor;
5105eda14cbcSMatt Macy 	vdev_t *vd;
5106eda14cbcSMatt Macy 	boolean_t try_hard = B_FALSE;
5107eda14cbcSMatt Macy 
5108eda14cbcSMatt Macy 	ASSERT(!DVA_IS_VALID(&dva[d]));
5109eda14cbcSMatt Macy 
5110eda14cbcSMatt Macy 	/*
5111eda14cbcSMatt Macy 	 * For testing, make some blocks above a certain size be gang blocks.
5112eda14cbcSMatt Macy 	 * This will result in more split blocks when using device removal,
5113eda14cbcSMatt Macy 	 * and a large number of split blocks coupled with ztest-induced
5114eda14cbcSMatt Macy 	 * damage can result in extremely long reconstruction times.  This
5115eda14cbcSMatt Macy 	 * will also test spilling from special to normal.
5116eda14cbcSMatt Macy 	 */
5117*315ee00fSMartin Matuska 	if (psize >= metaslab_force_ganging &&
5118*315ee00fSMartin Matuska 	    metaslab_force_ganging_pct > 0 &&
5119*315ee00fSMartin Matuska 	    (random_in_range(100) < MIN(metaslab_force_ganging_pct, 100))) {
5120eda14cbcSMatt Macy 		metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
5121eda14cbcSMatt Macy 		    allocator);
5122eda14cbcSMatt Macy 		return (SET_ERROR(ENOSPC));
5123eda14cbcSMatt Macy 	}
5124eda14cbcSMatt Macy 
5125eda14cbcSMatt Macy 	/*
5126eda14cbcSMatt Macy 	 * Start at the rotor and loop through all mgs until we find something.
51277877fdebSMatt Macy 	 * Note that there's no locking on mca_rotor or mca_aliquot because
5128eda14cbcSMatt Macy 	 * nothing actually breaks if we miss a few updates -- we just won't
5129eda14cbcSMatt Macy 	 * allocate quite as evenly.  It all balances out over time.
5130eda14cbcSMatt Macy 	 *
5131eda14cbcSMatt Macy 	 * If we are doing ditto or log blocks, try to spread them across
5132eda14cbcSMatt Macy 	 * consecutive vdevs.  If we're forced to reuse a vdev before we've
5133eda14cbcSMatt Macy 	 * allocated all of our ditto blocks, then try and spread them out on
5134eda14cbcSMatt Macy 	 * that vdev as much as possible.  If it turns out to not be possible,
5135eda14cbcSMatt Macy 	 * gradually lower our standards until anything becomes acceptable.
5136eda14cbcSMatt Macy 	 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
5137eda14cbcSMatt Macy 	 * gives us hope of containing our fault domains to something we're
5138eda14cbcSMatt Macy 	 * able to reason about.  Otherwise, any two top-level vdev failures
5139eda14cbcSMatt Macy 	 * will guarantee the loss of data.  With consecutive allocation,
5140eda14cbcSMatt Macy 	 * only two adjacent top-level vdev failures will result in data loss.
5141eda14cbcSMatt Macy 	 *
5142eda14cbcSMatt Macy 	 * If we are doing gang blocks (hintdva is non-NULL), try to keep
5143eda14cbcSMatt Macy 	 * ourselves on the same vdev as our gang block header.  That
5144eda14cbcSMatt Macy 	 * way, we can hope for locality in vdev_cache, plus it makes our
5145eda14cbcSMatt Macy 	 * fault domains something tractable.
5146eda14cbcSMatt Macy 	 */
5147eda14cbcSMatt Macy 	if (hintdva) {
5148eda14cbcSMatt Macy 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
5149eda14cbcSMatt Macy 
5150eda14cbcSMatt Macy 		/*
5151eda14cbcSMatt Macy 		 * It's possible the vdev we're using as the hint no
5152eda14cbcSMatt Macy 		 * longer exists or its mg has been closed (e.g. by
5153eda14cbcSMatt Macy 		 * device removal).  Consult the rotor when
5154eda14cbcSMatt Macy 		 * all else fails.
5155eda14cbcSMatt Macy 		 */
5156eda14cbcSMatt Macy 		if (vd != NULL && vd->vdev_mg != NULL) {
5157184c1b94SMartin Matuska 			mg = vdev_get_mg(vd, mc);
5158eda14cbcSMatt Macy 
5159dbd5678dSMartin Matuska 			if (flags & METASLAB_HINTBP_AVOID)
5160eda14cbcSMatt Macy 				mg = mg->mg_next;
5161eda14cbcSMatt Macy 		} else {
51627877fdebSMatt Macy 			mg = mca->mca_rotor;
5163eda14cbcSMatt Macy 		}
5164eda14cbcSMatt Macy 	} else if (d != 0) {
5165eda14cbcSMatt Macy 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
5166eda14cbcSMatt Macy 		mg = vd->vdev_mg->mg_next;
5167eda14cbcSMatt Macy 	} else {
51687877fdebSMatt Macy 		ASSERT(mca->mca_rotor != NULL);
51697877fdebSMatt Macy 		mg = mca->mca_rotor;
5170eda14cbcSMatt Macy 	}
5171eda14cbcSMatt Macy 
5172eda14cbcSMatt Macy 	/*
5173eda14cbcSMatt Macy 	 * If the hint put us into the wrong metaslab class, or into a
5174eda14cbcSMatt Macy 	 * metaslab group that has been passivated, just follow the rotor.
5175eda14cbcSMatt Macy 	 */
5176eda14cbcSMatt Macy 	if (mg->mg_class != mc || mg->mg_activation_count <= 0)
51777877fdebSMatt Macy 		mg = mca->mca_rotor;
5178eda14cbcSMatt Macy 
5179eda14cbcSMatt Macy 	rotor = mg;
5180eda14cbcSMatt Macy top:
5181eda14cbcSMatt Macy 	do {
5182eda14cbcSMatt Macy 		boolean_t allocatable;
5183eda14cbcSMatt Macy 
5184eda14cbcSMatt Macy 		ASSERT(mg->mg_activation_count == 1);
5185eda14cbcSMatt Macy 		vd = mg->mg_vd;
5186eda14cbcSMatt Macy 
5187eda14cbcSMatt Macy 		/*
5188eda14cbcSMatt Macy 		 * Don't allocate from faulted devices.
5189eda14cbcSMatt Macy 		 */
5190eda14cbcSMatt Macy 		if (try_hard) {
5191eda14cbcSMatt Macy 			spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
5192eda14cbcSMatt Macy 			allocatable = vdev_allocatable(vd);
5193eda14cbcSMatt Macy 			spa_config_exit(spa, SCL_ZIO, FTAG);
5194eda14cbcSMatt Macy 		} else {
5195eda14cbcSMatt Macy 			allocatable = vdev_allocatable(vd);
5196eda14cbcSMatt Macy 		}
5197eda14cbcSMatt Macy 
5198eda14cbcSMatt Macy 		/*
5199eda14cbcSMatt Macy 		 * Determine if the selected metaslab group is eligible
5200eda14cbcSMatt Macy 		 * for allocations. If we're ganging then don't allow
5201eda14cbcSMatt Macy 		 * this metaslab group to skip allocations since that would
5202eda14cbcSMatt Macy 		 * inadvertently return ENOSPC and suspend the pool
5203eda14cbcSMatt Macy 		 * even though space is still available.
5204eda14cbcSMatt Macy 		 */
5205eda14cbcSMatt Macy 		if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
5206eda14cbcSMatt Macy 			allocatable = metaslab_group_allocatable(mg, rotor,
520715f0b8c3SMartin Matuska 			    flags, psize, allocator, d);
5208eda14cbcSMatt Macy 		}
5209eda14cbcSMatt Macy 
5210eda14cbcSMatt Macy 		if (!allocatable) {
5211eda14cbcSMatt Macy 			metaslab_trace_add(zal, mg, NULL, psize, d,
5212eda14cbcSMatt Macy 			    TRACE_NOT_ALLOCATABLE, allocator);
5213eda14cbcSMatt Macy 			goto next;
5214eda14cbcSMatt Macy 		}
5215eda14cbcSMatt Macy 
5216eda14cbcSMatt Macy 		ASSERT(mg->mg_initialized);
5217eda14cbcSMatt Macy 
5218eda14cbcSMatt Macy 		/*
5219dbd5678dSMartin Matuska 		 * Avoid writing single-copy data to an unhealthy,
5220eda14cbcSMatt Macy 		 * non-redundant vdev, unless we've already tried all
5221eda14cbcSMatt Macy 		 * other vdevs.
5222eda14cbcSMatt Macy 		 */
5223dbd5678dSMartin Matuska 		if (vd->vdev_state < VDEV_STATE_HEALTHY &&
5224eda14cbcSMatt Macy 		    d == 0 && !try_hard && vd->vdev_children == 0) {
5225eda14cbcSMatt Macy 			metaslab_trace_add(zal, mg, NULL, psize, d,
5226eda14cbcSMatt Macy 			    TRACE_VDEV_ERROR, allocator);
5227eda14cbcSMatt Macy 			goto next;
5228eda14cbcSMatt Macy 		}
5229eda14cbcSMatt Macy 
5230eda14cbcSMatt Macy 		ASSERT(mg->mg_class == mc);
5231eda14cbcSMatt Macy 
5232eda14cbcSMatt Macy 		uint64_t asize = vdev_psize_to_asize(vd, psize);
5233eda14cbcSMatt Macy 		ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
5234eda14cbcSMatt Macy 
5235eda14cbcSMatt Macy 		/*
5236eda14cbcSMatt Macy 		 * If we don't need to try hard, then require that the
5237eda14cbcSMatt Macy 		 * block be on a different metaslab from any other DVAs
5238eda14cbcSMatt Macy 		 * in this BP (unique=true).  If we are trying hard, then
5239eda14cbcSMatt Macy 		 * allow any metaslab to be used (unique=false).
5240eda14cbcSMatt Macy 		 */
5241eda14cbcSMatt Macy 		uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
5242eda14cbcSMatt Macy 		    !try_hard, dva, d, allocator, try_hard);
5243eda14cbcSMatt Macy 
5244eda14cbcSMatt Macy 		if (offset != -1ULL) {
5245eda14cbcSMatt Macy 			/*
5246eda14cbcSMatt Macy 			 * If we've just selected this metaslab group,
5247eda14cbcSMatt Macy 			 * figure out whether the corresponding vdev is
5248eda14cbcSMatt Macy 			 * over- or under-used relative to the pool,
5249eda14cbcSMatt Macy 			 * and set an allocation bias to even it out.
5250eda14cbcSMatt Macy 			 *
5251eda14cbcSMatt Macy 			 * Bias is also used to compensate for unequally
5252eda14cbcSMatt Macy 			 * sized vdevs so that space is allocated fairly.
5253eda14cbcSMatt Macy 			 */
52547877fdebSMatt Macy 			if (mca->mca_aliquot == 0 && metaslab_bias_enabled) {
5255eda14cbcSMatt Macy 				vdev_stat_t *vs = &vd->vdev_stat;
5256eda14cbcSMatt Macy 				int64_t vs_free = vs->vs_space - vs->vs_alloc;
5257eda14cbcSMatt Macy 				int64_t mc_free = mc->mc_space - mc->mc_alloc;
5258eda14cbcSMatt Macy 				int64_t ratio;
5259eda14cbcSMatt Macy 
5260eda14cbcSMatt Macy 				/*
5261eda14cbcSMatt Macy 				 * Calculate how much more or less we should
5262eda14cbcSMatt Macy 				 * try to allocate from this device during
5263eda14cbcSMatt Macy 				 * this iteration around the rotor.
5264eda14cbcSMatt Macy 				 *
5265eda14cbcSMatt Macy 				 * This basically introduces a zero-centered
5266eda14cbcSMatt Macy 				 * bias towards the devices with the most
5267eda14cbcSMatt Macy 				 * free space, while compensating for vdev
5268eda14cbcSMatt Macy 				 * size differences.
5269eda14cbcSMatt Macy 				 *
5270eda14cbcSMatt Macy 				 * Examples:
5271eda14cbcSMatt Macy 				 *  vdev V1 = 16M/128M
5272eda14cbcSMatt Macy 				 *  vdev V2 = 16M/128M
5273eda14cbcSMatt Macy 				 *  ratio(V1) = 100% ratio(V2) = 100%
5274eda14cbcSMatt Macy 				 *
5275eda14cbcSMatt Macy 				 *  vdev V1 = 16M/128M
5276eda14cbcSMatt Macy 				 *  vdev V2 = 64M/128M
5277eda14cbcSMatt Macy 				 *  ratio(V1) = 127% ratio(V2) =  72%
5278eda14cbcSMatt Macy 				 *
5279eda14cbcSMatt Macy 				 *  vdev V1 = 16M/128M
5280eda14cbcSMatt Macy 				 *  vdev V2 = 64M/512M
5281eda14cbcSMatt Macy 				 *  ratio(V1) =  40% ratio(V2) = 160%
5282eda14cbcSMatt Macy 				 */
5283eda14cbcSMatt Macy 				ratio = (vs_free * mc->mc_alloc_groups * 100) /
5284eda14cbcSMatt Macy 				    (mc_free + 1);
5285eda14cbcSMatt Macy 				mg->mg_bias = ((ratio - 100) *
5286eda14cbcSMatt Macy 				    (int64_t)mg->mg_aliquot) / 100;
5287eda14cbcSMatt Macy 			} else if (!metaslab_bias_enabled) {
5288eda14cbcSMatt Macy 				mg->mg_bias = 0;
5289eda14cbcSMatt Macy 			}
5290eda14cbcSMatt Macy 
5291*315ee00fSMartin Matuska 			if ((flags & METASLAB_ZIL) ||
52927877fdebSMatt Macy 			    atomic_add_64_nv(&mca->mca_aliquot, asize) >=
5293eda14cbcSMatt Macy 			    mg->mg_aliquot + mg->mg_bias) {
52947877fdebSMatt Macy 				mca->mca_rotor = mg->mg_next;
52957877fdebSMatt Macy 				mca->mca_aliquot = 0;
5296eda14cbcSMatt Macy 			}
5297eda14cbcSMatt Macy 
5298eda14cbcSMatt Macy 			DVA_SET_VDEV(&dva[d], vd->vdev_id);
5299eda14cbcSMatt Macy 			DVA_SET_OFFSET(&dva[d], offset);
5300eda14cbcSMatt Macy 			DVA_SET_GANG(&dva[d],
5301eda14cbcSMatt Macy 			    ((flags & METASLAB_GANG_HEADER) ? 1 : 0));
5302eda14cbcSMatt Macy 			DVA_SET_ASIZE(&dva[d], asize);
5303eda14cbcSMatt Macy 
5304eda14cbcSMatt Macy 			return (0);
5305eda14cbcSMatt Macy 		}
5306eda14cbcSMatt Macy next:
53077877fdebSMatt Macy 		mca->mca_rotor = mg->mg_next;
53087877fdebSMatt Macy 		mca->mca_aliquot = 0;
5309eda14cbcSMatt Macy 	} while ((mg = mg->mg_next) != rotor);
5310eda14cbcSMatt Macy 
5311eda14cbcSMatt Macy 	/*
53127877fdebSMatt Macy 	 * If we haven't tried hard, perhaps do so now.
5313eda14cbcSMatt Macy 	 */
53147877fdebSMatt Macy 	if (!try_hard && (zfs_metaslab_try_hard_before_gang ||
53157877fdebSMatt Macy 	    GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 ||
53167877fdebSMatt Macy 	    psize <= 1 << spa->spa_min_ashift)) {
53177877fdebSMatt Macy 		METASLABSTAT_BUMP(metaslabstat_try_hard);
5318eda14cbcSMatt Macy 		try_hard = B_TRUE;
5319eda14cbcSMatt Macy 		goto top;
5320eda14cbcSMatt Macy 	}
5321eda14cbcSMatt Macy 
5322da5137abSMartin Matuska 	memset(&dva[d], 0, sizeof (dva_t));
5323eda14cbcSMatt Macy 
5324eda14cbcSMatt Macy 	metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
5325eda14cbcSMatt Macy 	return (SET_ERROR(ENOSPC));
5326eda14cbcSMatt Macy }
5327eda14cbcSMatt Macy 
5328eda14cbcSMatt Macy void
5329eda14cbcSMatt Macy metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
5330eda14cbcSMatt Macy     boolean_t checkpoint)
5331eda14cbcSMatt Macy {
5332eda14cbcSMatt Macy 	metaslab_t *msp;
5333eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
5334eda14cbcSMatt Macy 
5335eda14cbcSMatt Macy 	ASSERT(vdev_is_concrete(vd));
5336eda14cbcSMatt Macy 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5337eda14cbcSMatt Macy 	ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
5338eda14cbcSMatt Macy 
5339eda14cbcSMatt Macy 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5340eda14cbcSMatt Macy 
5341eda14cbcSMatt Macy 	VERIFY(!msp->ms_condensing);
5342eda14cbcSMatt Macy 	VERIFY3U(offset, >=, msp->ms_start);
5343eda14cbcSMatt Macy 	VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
5344eda14cbcSMatt Macy 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5345eda14cbcSMatt Macy 	VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
5346eda14cbcSMatt Macy 
5347eda14cbcSMatt Macy 	metaslab_check_free_impl(vd, offset, asize);
5348eda14cbcSMatt Macy 
5349eda14cbcSMatt Macy 	mutex_enter(&msp->ms_lock);
5350eda14cbcSMatt Macy 	if (range_tree_is_empty(msp->ms_freeing) &&
5351eda14cbcSMatt Macy 	    range_tree_is_empty(msp->ms_checkpointing)) {
5352eda14cbcSMatt Macy 		vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
5353eda14cbcSMatt Macy 	}
5354eda14cbcSMatt Macy 
5355eda14cbcSMatt Macy 	if (checkpoint) {
5356eda14cbcSMatt Macy 		ASSERT(spa_has_checkpoint(spa));
5357eda14cbcSMatt Macy 		range_tree_add(msp->ms_checkpointing, offset, asize);
5358eda14cbcSMatt Macy 	} else {
5359eda14cbcSMatt Macy 		range_tree_add(msp->ms_freeing, offset, asize);
5360eda14cbcSMatt Macy 	}
5361eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
5362eda14cbcSMatt Macy }
5363eda14cbcSMatt Macy 
5364eda14cbcSMatt Macy void
5365eda14cbcSMatt Macy metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5366eda14cbcSMatt Macy     uint64_t size, void *arg)
5367eda14cbcSMatt Macy {
5368e92ffd9bSMartin Matuska 	(void) inner_offset;
5369eda14cbcSMatt Macy 	boolean_t *checkpoint = arg;
5370eda14cbcSMatt Macy 
5371eda14cbcSMatt Macy 	ASSERT3P(checkpoint, !=, NULL);
5372eda14cbcSMatt Macy 
5373eda14cbcSMatt Macy 	if (vd->vdev_ops->vdev_op_remap != NULL)
5374eda14cbcSMatt Macy 		vdev_indirect_mark_obsolete(vd, offset, size);
5375eda14cbcSMatt Macy 	else
5376eda14cbcSMatt Macy 		metaslab_free_impl(vd, offset, size, *checkpoint);
5377eda14cbcSMatt Macy }
5378eda14cbcSMatt Macy 
5379eda14cbcSMatt Macy static void
5380eda14cbcSMatt Macy metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
5381eda14cbcSMatt Macy     boolean_t checkpoint)
5382eda14cbcSMatt Macy {
5383eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
5384eda14cbcSMatt Macy 
5385eda14cbcSMatt Macy 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5386eda14cbcSMatt Macy 
5387eda14cbcSMatt Macy 	if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
5388eda14cbcSMatt Macy 		return;
5389eda14cbcSMatt Macy 
5390eda14cbcSMatt Macy 	if (spa->spa_vdev_removal != NULL &&
5391eda14cbcSMatt Macy 	    spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
5392eda14cbcSMatt Macy 	    vdev_is_concrete(vd)) {
5393eda14cbcSMatt Macy 		/*
5394eda14cbcSMatt Macy 		 * Note: we check if the vdev is concrete because when
5395eda14cbcSMatt Macy 		 * we complete the removal, we first change the vdev to be
5396eda14cbcSMatt Macy 		 * an indirect vdev (in open context), and then (in syncing
5397eda14cbcSMatt Macy 		 * context) clear spa_vdev_removal.
5398eda14cbcSMatt Macy 		 */
5399eda14cbcSMatt Macy 		free_from_removing_vdev(vd, offset, size);
5400eda14cbcSMatt Macy 	} else if (vd->vdev_ops->vdev_op_remap != NULL) {
5401eda14cbcSMatt Macy 		vdev_indirect_mark_obsolete(vd, offset, size);
5402eda14cbcSMatt Macy 		vd->vdev_ops->vdev_op_remap(vd, offset, size,
5403eda14cbcSMatt Macy 		    metaslab_free_impl_cb, &checkpoint);
5404eda14cbcSMatt Macy 	} else {
5405eda14cbcSMatt Macy 		metaslab_free_concrete(vd, offset, size, checkpoint);
5406eda14cbcSMatt Macy 	}
5407eda14cbcSMatt Macy }
5408eda14cbcSMatt Macy 
5409eda14cbcSMatt Macy typedef struct remap_blkptr_cb_arg {
5410eda14cbcSMatt Macy 	blkptr_t *rbca_bp;
5411eda14cbcSMatt Macy 	spa_remap_cb_t rbca_cb;
5412eda14cbcSMatt Macy 	vdev_t *rbca_remap_vd;
5413eda14cbcSMatt Macy 	uint64_t rbca_remap_offset;
5414eda14cbcSMatt Macy 	void *rbca_cb_arg;
5415eda14cbcSMatt Macy } remap_blkptr_cb_arg_t;
5416eda14cbcSMatt Macy 
5417eda14cbcSMatt Macy static void
5418eda14cbcSMatt Macy remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5419eda14cbcSMatt Macy     uint64_t size, void *arg)
5420eda14cbcSMatt Macy {
5421eda14cbcSMatt Macy 	remap_blkptr_cb_arg_t *rbca = arg;
5422eda14cbcSMatt Macy 	blkptr_t *bp = rbca->rbca_bp;
5423eda14cbcSMatt Macy 
5424eda14cbcSMatt Macy 	/* We can not remap split blocks. */
5425eda14cbcSMatt Macy 	if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
5426eda14cbcSMatt Macy 		return;
5427eda14cbcSMatt Macy 	ASSERT0(inner_offset);
5428eda14cbcSMatt Macy 
5429eda14cbcSMatt Macy 	if (rbca->rbca_cb != NULL) {
5430eda14cbcSMatt Macy 		/*
5431eda14cbcSMatt Macy 		 * At this point we know that we are not handling split
5432eda14cbcSMatt Macy 		 * blocks and we invoke the callback on the previous
5433eda14cbcSMatt Macy 		 * vdev which must be indirect.
5434eda14cbcSMatt Macy 		 */
5435eda14cbcSMatt Macy 		ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
5436eda14cbcSMatt Macy 
5437eda14cbcSMatt Macy 		rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
5438eda14cbcSMatt Macy 		    rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
5439eda14cbcSMatt Macy 
5440eda14cbcSMatt Macy 		/* set up remap_blkptr_cb_arg for the next call */
5441eda14cbcSMatt Macy 		rbca->rbca_remap_vd = vd;
5442eda14cbcSMatt Macy 		rbca->rbca_remap_offset = offset;
5443eda14cbcSMatt Macy 	}
5444eda14cbcSMatt Macy 
5445eda14cbcSMatt Macy 	/*
5446eda14cbcSMatt Macy 	 * The phys birth time is that of dva[0].  This ensures that we know
5447eda14cbcSMatt Macy 	 * when each dva was written, so that resilver can determine which
5448eda14cbcSMatt Macy 	 * blocks need to be scrubbed (i.e. those written during the time
5449eda14cbcSMatt Macy 	 * the vdev was offline).  It also ensures that the key used in
5450eda14cbcSMatt Macy 	 * the ARC hash table is unique (i.e. dva[0] + phys_birth).  If
5451eda14cbcSMatt Macy 	 * we didn't change the phys_birth, a lookup in the ARC for a
5452eda14cbcSMatt Macy 	 * remapped BP could find the data that was previously stored at
5453eda14cbcSMatt Macy 	 * this vdev + offset.
5454eda14cbcSMatt Macy 	 */
5455eda14cbcSMatt Macy 	vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
5456eda14cbcSMatt Macy 	    DVA_GET_VDEV(&bp->blk_dva[0]));
5457eda14cbcSMatt Macy 	vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
5458eda14cbcSMatt Macy 	bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
5459eda14cbcSMatt Macy 	    DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
5460eda14cbcSMatt Macy 
5461eda14cbcSMatt Macy 	DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
5462eda14cbcSMatt Macy 	DVA_SET_OFFSET(&bp->blk_dva[0], offset);
5463eda14cbcSMatt Macy }
5464eda14cbcSMatt Macy 
5465eda14cbcSMatt Macy /*
5466eda14cbcSMatt Macy  * If the block pointer contains any indirect DVAs, modify them to refer to
5467eda14cbcSMatt Macy  * concrete DVAs.  Note that this will sometimes not be possible, leaving
5468eda14cbcSMatt Macy  * the indirect DVA in place.  This happens if the indirect DVA spans multiple
5469eda14cbcSMatt Macy  * segments in the mapping (i.e. it is a "split block").
5470eda14cbcSMatt Macy  *
5471eda14cbcSMatt Macy  * If the BP was remapped, calls the callback on the original dva (note the
5472eda14cbcSMatt Macy  * callback can be called multiple times if the original indirect DVA refers
5473eda14cbcSMatt Macy  * to another indirect DVA, etc).
5474eda14cbcSMatt Macy  *
5475eda14cbcSMatt Macy  * Returns TRUE if the BP was remapped.
5476eda14cbcSMatt Macy  */
5477eda14cbcSMatt Macy boolean_t
5478eda14cbcSMatt Macy spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
5479eda14cbcSMatt Macy {
5480eda14cbcSMatt Macy 	remap_blkptr_cb_arg_t rbca;
5481eda14cbcSMatt Macy 
5482eda14cbcSMatt Macy 	if (!zfs_remap_blkptr_enable)
5483eda14cbcSMatt Macy 		return (B_FALSE);
5484eda14cbcSMatt Macy 
5485eda14cbcSMatt Macy 	if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
5486eda14cbcSMatt Macy 		return (B_FALSE);
5487eda14cbcSMatt Macy 
5488eda14cbcSMatt Macy 	/*
5489eda14cbcSMatt Macy 	 * Dedup BP's can not be remapped, because ddt_phys_select() depends
5490eda14cbcSMatt Macy 	 * on DVA[0] being the same in the BP as in the DDT (dedup table).
5491eda14cbcSMatt Macy 	 */
5492eda14cbcSMatt Macy 	if (BP_GET_DEDUP(bp))
5493eda14cbcSMatt Macy 		return (B_FALSE);
5494eda14cbcSMatt Macy 
5495eda14cbcSMatt Macy 	/*
5496eda14cbcSMatt Macy 	 * Gang blocks can not be remapped, because
5497eda14cbcSMatt Macy 	 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
5498eda14cbcSMatt Macy 	 * the BP used to read the gang block header (GBH) being the same
5499eda14cbcSMatt Macy 	 * as the DVA[0] that we allocated for the GBH.
5500eda14cbcSMatt Macy 	 */
5501eda14cbcSMatt Macy 	if (BP_IS_GANG(bp))
5502eda14cbcSMatt Macy 		return (B_FALSE);
5503eda14cbcSMatt Macy 
5504eda14cbcSMatt Macy 	/*
5505eda14cbcSMatt Macy 	 * Embedded BP's have no DVA to remap.
5506eda14cbcSMatt Macy 	 */
5507eda14cbcSMatt Macy 	if (BP_GET_NDVAS(bp) < 1)
5508eda14cbcSMatt Macy 		return (B_FALSE);
5509eda14cbcSMatt Macy 
5510eda14cbcSMatt Macy 	/*
5511eda14cbcSMatt Macy 	 * Note: we only remap dva[0].  If we remapped other dvas, we
5512eda14cbcSMatt Macy 	 * would no longer know what their phys birth txg is.
5513eda14cbcSMatt Macy 	 */
5514eda14cbcSMatt Macy 	dva_t *dva = &bp->blk_dva[0];
5515eda14cbcSMatt Macy 
5516eda14cbcSMatt Macy 	uint64_t offset = DVA_GET_OFFSET(dva);
5517eda14cbcSMatt Macy 	uint64_t size = DVA_GET_ASIZE(dva);
5518eda14cbcSMatt Macy 	vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
5519eda14cbcSMatt Macy 
5520eda14cbcSMatt Macy 	if (vd->vdev_ops->vdev_op_remap == NULL)
5521eda14cbcSMatt Macy 		return (B_FALSE);
5522eda14cbcSMatt Macy 
5523eda14cbcSMatt Macy 	rbca.rbca_bp = bp;
5524eda14cbcSMatt Macy 	rbca.rbca_cb = callback;
5525eda14cbcSMatt Macy 	rbca.rbca_remap_vd = vd;
5526eda14cbcSMatt Macy 	rbca.rbca_remap_offset = offset;
5527eda14cbcSMatt Macy 	rbca.rbca_cb_arg = arg;
5528eda14cbcSMatt Macy 
5529eda14cbcSMatt Macy 	/*
5530eda14cbcSMatt Macy 	 * remap_blkptr_cb() will be called in order for each level of
5531eda14cbcSMatt Macy 	 * indirection, until a concrete vdev is reached or a split block is
5532eda14cbcSMatt Macy 	 * encountered. old_vd and old_offset are updated within the callback
5533eda14cbcSMatt Macy 	 * as we go from the one indirect vdev to the next one (either concrete
5534eda14cbcSMatt Macy 	 * or indirect again) in that order.
5535eda14cbcSMatt Macy 	 */
5536eda14cbcSMatt Macy 	vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
5537eda14cbcSMatt Macy 
5538eda14cbcSMatt Macy 	/* Check if the DVA wasn't remapped because it is a split block */
5539eda14cbcSMatt Macy 	if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
5540eda14cbcSMatt Macy 		return (B_FALSE);
5541eda14cbcSMatt Macy 
5542eda14cbcSMatt Macy 	return (B_TRUE);
5543eda14cbcSMatt Macy }
5544eda14cbcSMatt Macy 
5545eda14cbcSMatt Macy /*
5546eda14cbcSMatt Macy  * Undo the allocation of a DVA which happened in the given transaction group.
5547eda14cbcSMatt Macy  */
5548eda14cbcSMatt Macy void
5549eda14cbcSMatt Macy metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5550eda14cbcSMatt Macy {
5551eda14cbcSMatt Macy 	metaslab_t *msp;
5552eda14cbcSMatt Macy 	vdev_t *vd;
5553eda14cbcSMatt Macy 	uint64_t vdev = DVA_GET_VDEV(dva);
5554eda14cbcSMatt Macy 	uint64_t offset = DVA_GET_OFFSET(dva);
5555eda14cbcSMatt Macy 	uint64_t size = DVA_GET_ASIZE(dva);
5556eda14cbcSMatt Macy 
5557eda14cbcSMatt Macy 	ASSERT(DVA_IS_VALID(dva));
5558eda14cbcSMatt Macy 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5559eda14cbcSMatt Macy 
5560eda14cbcSMatt Macy 	if (txg > spa_freeze_txg(spa))
5561eda14cbcSMatt Macy 		return;
5562eda14cbcSMatt Macy 
5563eda14cbcSMatt Macy 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
5564eda14cbcSMatt Macy 	    (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
5565eda14cbcSMatt Macy 		zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
5566eda14cbcSMatt Macy 		    (u_longlong_t)vdev, (u_longlong_t)offset,
5567eda14cbcSMatt Macy 		    (u_longlong_t)size);
5568eda14cbcSMatt Macy 		return;
5569eda14cbcSMatt Macy 	}
5570eda14cbcSMatt Macy 
5571eda14cbcSMatt Macy 	ASSERT(!vd->vdev_removing);
5572eda14cbcSMatt Macy 	ASSERT(vdev_is_concrete(vd));
5573eda14cbcSMatt Macy 	ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
5574eda14cbcSMatt Macy 	ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
5575eda14cbcSMatt Macy 
5576eda14cbcSMatt Macy 	if (DVA_GET_GANG(dva))
55776db169e9SMartin Matuska 		size = vdev_gang_header_asize(vd);
5578eda14cbcSMatt Macy 
5579eda14cbcSMatt Macy 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5580eda14cbcSMatt Macy 
5581eda14cbcSMatt Macy 	mutex_enter(&msp->ms_lock);
5582eda14cbcSMatt Macy 	range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
5583eda14cbcSMatt Macy 	    offset, size);
5584eda14cbcSMatt Macy 	msp->ms_allocating_total -= size;
5585eda14cbcSMatt Macy 
5586eda14cbcSMatt Macy 	VERIFY(!msp->ms_condensing);
5587eda14cbcSMatt Macy 	VERIFY3U(offset, >=, msp->ms_start);
5588eda14cbcSMatt Macy 	VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
5589eda14cbcSMatt Macy 	VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=,
5590eda14cbcSMatt Macy 	    msp->ms_size);
5591eda14cbcSMatt Macy 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5592eda14cbcSMatt Macy 	VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5593eda14cbcSMatt Macy 	range_tree_add(msp->ms_allocatable, offset, size);
5594eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
5595eda14cbcSMatt Macy }
5596eda14cbcSMatt Macy 
5597eda14cbcSMatt Macy /*
5598eda14cbcSMatt Macy  * Free the block represented by the given DVA.
5599eda14cbcSMatt Macy  */
5600eda14cbcSMatt Macy void
5601eda14cbcSMatt Macy metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
5602eda14cbcSMatt Macy {
5603eda14cbcSMatt Macy 	uint64_t vdev = DVA_GET_VDEV(dva);
5604eda14cbcSMatt Macy 	uint64_t offset = DVA_GET_OFFSET(dva);
5605eda14cbcSMatt Macy 	uint64_t size = DVA_GET_ASIZE(dva);
5606eda14cbcSMatt Macy 	vdev_t *vd = vdev_lookup_top(spa, vdev);
5607eda14cbcSMatt Macy 
5608eda14cbcSMatt Macy 	ASSERT(DVA_IS_VALID(dva));
5609eda14cbcSMatt Macy 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5610eda14cbcSMatt Macy 
5611eda14cbcSMatt Macy 	if (DVA_GET_GANG(dva)) {
56126db169e9SMartin Matuska 		size = vdev_gang_header_asize(vd);
5613eda14cbcSMatt Macy 	}
5614eda14cbcSMatt Macy 
5615eda14cbcSMatt Macy 	metaslab_free_impl(vd, offset, size, checkpoint);
5616eda14cbcSMatt Macy }
5617eda14cbcSMatt Macy 
5618eda14cbcSMatt Macy /*
5619eda14cbcSMatt Macy  * Reserve some allocation slots. The reservation system must be called
5620eda14cbcSMatt Macy  * before we call into the allocator. If there aren't any available slots
5621eda14cbcSMatt Macy  * then the I/O will be throttled until an I/O completes and its slots are
5622eda14cbcSMatt Macy  * freed up. The function returns true if it was successful in placing
5623eda14cbcSMatt Macy  * the reservation.
5624eda14cbcSMatt Macy  */
5625eda14cbcSMatt Macy boolean_t
5626eda14cbcSMatt Macy metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
5627eda14cbcSMatt Macy     zio_t *zio, int flags)
5628eda14cbcSMatt Macy {
56297877fdebSMatt Macy 	metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
56307877fdebSMatt Macy 	uint64_t max = mca->mca_alloc_max_slots;
5631eda14cbcSMatt Macy 
5632eda14cbcSMatt Macy 	ASSERT(mc->mc_alloc_throttle_enabled);
56333f9d360cSMartin Matuska 	if (GANG_ALLOCATION(flags) || (flags & METASLAB_MUST_RESERVE) ||
56343f9d360cSMartin Matuska 	    zfs_refcount_count(&mca->mca_alloc_slots) + slots <= max) {
5635eda14cbcSMatt Macy 		/*
56361f88aa09SMartin Matuska 		 * The potential race between _count() and _add() is covered
56371f88aa09SMartin Matuska 		 * by the allocator lock in most cases, or irrelevant due to
56381f88aa09SMartin Matuska 		 * GANG_ALLOCATION() or METASLAB_MUST_RESERVE set in others.
56391f88aa09SMartin Matuska 		 * But even if we assume some other non-existing scenario, the
56401f88aa09SMartin Matuska 		 * worst that can happen is few more I/Os get to allocation
56411f88aa09SMartin Matuska 		 * earlier, that is not a problem.
56421f88aa09SMartin Matuska 		 *
5643eda14cbcSMatt Macy 		 * We reserve the slots individually so that we can unreserve
5644eda14cbcSMatt Macy 		 * them individually when an I/O completes.
5645eda14cbcSMatt Macy 		 */
56464e8d558cSMartin Matuska 		zfs_refcount_add_few(&mca->mca_alloc_slots, slots, zio);
5647eda14cbcSMatt Macy 		zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
56483f9d360cSMartin Matuska 		return (B_TRUE);
5649eda14cbcSMatt Macy 	}
56503f9d360cSMartin Matuska 	return (B_FALSE);
5651eda14cbcSMatt Macy }
5652eda14cbcSMatt Macy 
5653eda14cbcSMatt Macy void
5654eda14cbcSMatt Macy metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
5655eda14cbcSMatt Macy     int allocator, zio_t *zio)
5656eda14cbcSMatt Macy {
56577877fdebSMatt Macy 	metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
56587877fdebSMatt Macy 
5659eda14cbcSMatt Macy 	ASSERT(mc->mc_alloc_throttle_enabled);
56604e8d558cSMartin Matuska 	zfs_refcount_remove_few(&mca->mca_alloc_slots, slots, zio);
5661eda14cbcSMatt Macy }
5662eda14cbcSMatt Macy 
5663eda14cbcSMatt Macy static int
5664eda14cbcSMatt Macy metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
5665eda14cbcSMatt Macy     uint64_t txg)
5666eda14cbcSMatt Macy {
5667eda14cbcSMatt Macy 	metaslab_t *msp;
5668eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
5669eda14cbcSMatt Macy 	int error = 0;
5670eda14cbcSMatt Macy 
5671eda14cbcSMatt Macy 	if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
5672eda14cbcSMatt Macy 		return (SET_ERROR(ENXIO));
5673eda14cbcSMatt Macy 
5674eda14cbcSMatt Macy 	ASSERT3P(vd->vdev_ms, !=, NULL);
5675eda14cbcSMatt Macy 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5676eda14cbcSMatt Macy 
5677eda14cbcSMatt Macy 	mutex_enter(&msp->ms_lock);
5678eda14cbcSMatt Macy 
5679eda14cbcSMatt Macy 	if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
5680eda14cbcSMatt Macy 		error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
5681eda14cbcSMatt Macy 		if (error == EBUSY) {
5682eda14cbcSMatt Macy 			ASSERT(msp->ms_loaded);
5683eda14cbcSMatt Macy 			ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
5684eda14cbcSMatt Macy 			error = 0;
5685eda14cbcSMatt Macy 		}
5686eda14cbcSMatt Macy 	}
5687eda14cbcSMatt Macy 
5688eda14cbcSMatt Macy 	if (error == 0 &&
5689eda14cbcSMatt Macy 	    !range_tree_contains(msp->ms_allocatable, offset, size))
5690eda14cbcSMatt Macy 		error = SET_ERROR(ENOENT);
5691eda14cbcSMatt Macy 
5692eda14cbcSMatt Macy 	if (error || txg == 0) {	/* txg == 0 indicates dry run */
5693eda14cbcSMatt Macy 		mutex_exit(&msp->ms_lock);
5694eda14cbcSMatt Macy 		return (error);
5695eda14cbcSMatt Macy 	}
5696eda14cbcSMatt Macy 
5697eda14cbcSMatt Macy 	VERIFY(!msp->ms_condensing);
5698eda14cbcSMatt Macy 	VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5699eda14cbcSMatt Macy 	VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5700eda14cbcSMatt Macy 	VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=,
5701eda14cbcSMatt Macy 	    msp->ms_size);
5702eda14cbcSMatt Macy 	range_tree_remove(msp->ms_allocatable, offset, size);
5703eda14cbcSMatt Macy 	range_tree_clear(msp->ms_trim, offset, size);
5704eda14cbcSMatt Macy 
57057877fdebSMatt Macy 	if (spa_writeable(spa)) {	/* don't dirty if we're zdb(8) */
5706eda14cbcSMatt Macy 		metaslab_class_t *mc = msp->ms_group->mg_class;
5707eda14cbcSMatt Macy 		multilist_sublist_t *mls =
57083ff01b23SMartin Matuska 		    multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
5709eda14cbcSMatt Macy 		if (!multilist_link_active(&msp->ms_class_txg_node)) {
5710eda14cbcSMatt Macy 			msp->ms_selected_txg = txg;
5711eda14cbcSMatt Macy 			multilist_sublist_insert_head(mls, msp);
5712eda14cbcSMatt Macy 		}
5713eda14cbcSMatt Macy 		multilist_sublist_unlock(mls);
5714eda14cbcSMatt Macy 
5715eda14cbcSMatt Macy 		if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
5716eda14cbcSMatt Macy 			vdev_dirty(vd, VDD_METASLAB, msp, txg);
5717eda14cbcSMatt Macy 		range_tree_add(msp->ms_allocating[txg & TXG_MASK],
5718eda14cbcSMatt Macy 		    offset, size);
5719eda14cbcSMatt Macy 		msp->ms_allocating_total += size;
5720eda14cbcSMatt Macy 	}
5721eda14cbcSMatt Macy 
5722eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
5723eda14cbcSMatt Macy 
5724eda14cbcSMatt Macy 	return (0);
5725eda14cbcSMatt Macy }
5726eda14cbcSMatt Macy 
5727eda14cbcSMatt Macy typedef struct metaslab_claim_cb_arg_t {
5728eda14cbcSMatt Macy 	uint64_t	mcca_txg;
5729eda14cbcSMatt Macy 	int		mcca_error;
5730eda14cbcSMatt Macy } metaslab_claim_cb_arg_t;
5731eda14cbcSMatt Macy 
5732eda14cbcSMatt Macy static void
5733eda14cbcSMatt Macy metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5734eda14cbcSMatt Macy     uint64_t size, void *arg)
5735eda14cbcSMatt Macy {
5736e92ffd9bSMartin Matuska 	(void) inner_offset;
5737eda14cbcSMatt Macy 	metaslab_claim_cb_arg_t *mcca_arg = arg;
5738eda14cbcSMatt Macy 
5739eda14cbcSMatt Macy 	if (mcca_arg->mcca_error == 0) {
5740eda14cbcSMatt Macy 		mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
5741eda14cbcSMatt Macy 		    size, mcca_arg->mcca_txg);
5742eda14cbcSMatt Macy 	}
5743eda14cbcSMatt Macy }
5744eda14cbcSMatt Macy 
5745eda14cbcSMatt Macy int
5746eda14cbcSMatt Macy metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
5747eda14cbcSMatt Macy {
5748eda14cbcSMatt Macy 	if (vd->vdev_ops->vdev_op_remap != NULL) {
5749eda14cbcSMatt Macy 		metaslab_claim_cb_arg_t arg;
5750eda14cbcSMatt Macy 
5751eda14cbcSMatt Macy 		/*
57527877fdebSMatt Macy 		 * Only zdb(8) can claim on indirect vdevs.  This is used
5753eda14cbcSMatt Macy 		 * to detect leaks of mapped space (that are not accounted
5754eda14cbcSMatt Macy 		 * for in the obsolete counts, spacemap, or bpobj).
5755eda14cbcSMatt Macy 		 */
5756eda14cbcSMatt Macy 		ASSERT(!spa_writeable(vd->vdev_spa));
5757eda14cbcSMatt Macy 		arg.mcca_error = 0;
5758eda14cbcSMatt Macy 		arg.mcca_txg = txg;
5759eda14cbcSMatt Macy 
5760eda14cbcSMatt Macy 		vd->vdev_ops->vdev_op_remap(vd, offset, size,
5761eda14cbcSMatt Macy 		    metaslab_claim_impl_cb, &arg);
5762eda14cbcSMatt Macy 
5763eda14cbcSMatt Macy 		if (arg.mcca_error == 0) {
5764eda14cbcSMatt Macy 			arg.mcca_error = metaslab_claim_concrete(vd,
5765eda14cbcSMatt Macy 			    offset, size, txg);
5766eda14cbcSMatt Macy 		}
5767eda14cbcSMatt Macy 		return (arg.mcca_error);
5768eda14cbcSMatt Macy 	} else {
5769eda14cbcSMatt Macy 		return (metaslab_claim_concrete(vd, offset, size, txg));
5770eda14cbcSMatt Macy 	}
5771eda14cbcSMatt Macy }
5772eda14cbcSMatt Macy 
5773eda14cbcSMatt Macy /*
5774eda14cbcSMatt Macy  * Intent log support: upon opening the pool after a crash, notify the SPA
5775eda14cbcSMatt Macy  * of blocks that the intent log has allocated for immediate write, but
5776eda14cbcSMatt Macy  * which are still considered free by the SPA because the last transaction
5777eda14cbcSMatt Macy  * group didn't commit yet.
5778eda14cbcSMatt Macy  */
5779eda14cbcSMatt Macy static int
5780eda14cbcSMatt Macy metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5781eda14cbcSMatt Macy {
5782eda14cbcSMatt Macy 	uint64_t vdev = DVA_GET_VDEV(dva);
5783eda14cbcSMatt Macy 	uint64_t offset = DVA_GET_OFFSET(dva);
5784eda14cbcSMatt Macy 	uint64_t size = DVA_GET_ASIZE(dva);
5785eda14cbcSMatt Macy 	vdev_t *vd;
5786eda14cbcSMatt Macy 
5787eda14cbcSMatt Macy 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
5788eda14cbcSMatt Macy 		return (SET_ERROR(ENXIO));
5789eda14cbcSMatt Macy 	}
5790eda14cbcSMatt Macy 
5791eda14cbcSMatt Macy 	ASSERT(DVA_IS_VALID(dva));
5792eda14cbcSMatt Macy 
5793eda14cbcSMatt Macy 	if (DVA_GET_GANG(dva))
57946db169e9SMartin Matuska 		size = vdev_gang_header_asize(vd);
5795eda14cbcSMatt Macy 
5796eda14cbcSMatt Macy 	return (metaslab_claim_impl(vd, offset, size, txg));
5797eda14cbcSMatt Macy }
5798eda14cbcSMatt Macy 
5799eda14cbcSMatt Macy int
5800eda14cbcSMatt Macy metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
5801eda14cbcSMatt Macy     int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
5802eda14cbcSMatt Macy     zio_alloc_list_t *zal, zio_t *zio, int allocator)
5803eda14cbcSMatt Macy {
5804eda14cbcSMatt Macy 	dva_t *dva = bp->blk_dva;
5805eda14cbcSMatt Macy 	dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
5806eda14cbcSMatt Macy 	int error = 0;
5807eda14cbcSMatt Macy 
5808eda14cbcSMatt Macy 	ASSERT(bp->blk_birth == 0);
5809eda14cbcSMatt Macy 	ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
5810eda14cbcSMatt Macy 
5811eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5812eda14cbcSMatt Macy 
58137877fdebSMatt Macy 	if (mc->mc_allocator[allocator].mca_rotor == NULL) {
58147877fdebSMatt Macy 		/* no vdevs in this class */
5815eda14cbcSMatt Macy 		spa_config_exit(spa, SCL_ALLOC, FTAG);
5816eda14cbcSMatt Macy 		return (SET_ERROR(ENOSPC));
5817eda14cbcSMatt Macy 	}
5818eda14cbcSMatt Macy 
5819eda14cbcSMatt Macy 	ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
5820eda14cbcSMatt Macy 	ASSERT(BP_GET_NDVAS(bp) == 0);
5821eda14cbcSMatt Macy 	ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
5822eda14cbcSMatt Macy 	ASSERT3P(zal, !=, NULL);
5823eda14cbcSMatt Macy 
5824eda14cbcSMatt Macy 	for (int d = 0; d < ndvas; d++) {
5825eda14cbcSMatt Macy 		error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
5826eda14cbcSMatt Macy 		    txg, flags, zal, allocator);
5827eda14cbcSMatt Macy 		if (error != 0) {
5828eda14cbcSMatt Macy 			for (d--; d >= 0; d--) {
5829eda14cbcSMatt Macy 				metaslab_unalloc_dva(spa, &dva[d], txg);
5830eda14cbcSMatt Macy 				metaslab_group_alloc_decrement(spa,
5831eda14cbcSMatt Macy 				    DVA_GET_VDEV(&dva[d]), zio, flags,
5832eda14cbcSMatt Macy 				    allocator, B_FALSE);
5833da5137abSMartin Matuska 				memset(&dva[d], 0, sizeof (dva_t));
5834eda14cbcSMatt Macy 			}
5835eda14cbcSMatt Macy 			spa_config_exit(spa, SCL_ALLOC, FTAG);
5836eda14cbcSMatt Macy 			return (error);
5837eda14cbcSMatt Macy 		} else {
5838eda14cbcSMatt Macy 			/*
5839eda14cbcSMatt Macy 			 * Update the metaslab group's queue depth
5840eda14cbcSMatt Macy 			 * based on the newly allocated dva.
5841eda14cbcSMatt Macy 			 */
5842eda14cbcSMatt Macy 			metaslab_group_alloc_increment(spa,
5843eda14cbcSMatt Macy 			    DVA_GET_VDEV(&dva[d]), zio, flags, allocator);
5844eda14cbcSMatt Macy 		}
5845eda14cbcSMatt Macy 	}
5846eda14cbcSMatt Macy 	ASSERT(error == 0);
5847eda14cbcSMatt Macy 	ASSERT(BP_GET_NDVAS(bp) == ndvas);
5848eda14cbcSMatt Macy 
5849eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALLOC, FTAG);
5850eda14cbcSMatt Macy 
5851eda14cbcSMatt Macy 	BP_SET_BIRTH(bp, txg, 0);
5852eda14cbcSMatt Macy 
5853eda14cbcSMatt Macy 	return (0);
5854eda14cbcSMatt Macy }
5855eda14cbcSMatt Macy 
5856eda14cbcSMatt Macy void
5857eda14cbcSMatt Macy metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
5858eda14cbcSMatt Macy {
5859eda14cbcSMatt Macy 	const dva_t *dva = bp->blk_dva;
5860eda14cbcSMatt Macy 	int ndvas = BP_GET_NDVAS(bp);
5861eda14cbcSMatt Macy 
5862eda14cbcSMatt Macy 	ASSERT(!BP_IS_HOLE(bp));
5863eda14cbcSMatt Macy 	ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
5864eda14cbcSMatt Macy 
5865eda14cbcSMatt Macy 	/*
5866eda14cbcSMatt Macy 	 * If we have a checkpoint for the pool we need to make sure that
5867eda14cbcSMatt Macy 	 * the blocks that we free that are part of the checkpoint won't be
5868eda14cbcSMatt Macy 	 * reused until the checkpoint is discarded or we revert to it.
5869eda14cbcSMatt Macy 	 *
5870eda14cbcSMatt Macy 	 * The checkpoint flag is passed down the metaslab_free code path
5871eda14cbcSMatt Macy 	 * and is set whenever we want to add a block to the checkpoint's
5872eda14cbcSMatt Macy 	 * accounting. That is, we "checkpoint" blocks that existed at the
5873eda14cbcSMatt Macy 	 * time the checkpoint was created and are therefore referenced by
5874eda14cbcSMatt Macy 	 * the checkpointed uberblock.
5875eda14cbcSMatt Macy 	 *
5876eda14cbcSMatt Macy 	 * Note that, we don't checkpoint any blocks if the current
5877eda14cbcSMatt Macy 	 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
5878eda14cbcSMatt Macy 	 * normally as they will be referenced by the checkpointed uberblock.
5879eda14cbcSMatt Macy 	 */
5880eda14cbcSMatt Macy 	boolean_t checkpoint = B_FALSE;
5881eda14cbcSMatt Macy 	if (bp->blk_birth <= spa->spa_checkpoint_txg &&
5882eda14cbcSMatt Macy 	    spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
5883eda14cbcSMatt Macy 		/*
5884eda14cbcSMatt Macy 		 * At this point, if the block is part of the checkpoint
5885eda14cbcSMatt Macy 		 * there is no way it was created in the current txg.
5886eda14cbcSMatt Macy 		 */
5887eda14cbcSMatt Macy 		ASSERT(!now);
5888eda14cbcSMatt Macy 		ASSERT3U(spa_syncing_txg(spa), ==, txg);
5889eda14cbcSMatt Macy 		checkpoint = B_TRUE;
5890eda14cbcSMatt Macy 	}
5891eda14cbcSMatt Macy 
5892eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
5893eda14cbcSMatt Macy 
5894eda14cbcSMatt Macy 	for (int d = 0; d < ndvas; d++) {
5895eda14cbcSMatt Macy 		if (now) {
5896eda14cbcSMatt Macy 			metaslab_unalloc_dva(spa, &dva[d], txg);
5897eda14cbcSMatt Macy 		} else {
5898eda14cbcSMatt Macy 			ASSERT3U(txg, ==, spa_syncing_txg(spa));
5899eda14cbcSMatt Macy 			metaslab_free_dva(spa, &dva[d], checkpoint);
5900eda14cbcSMatt Macy 		}
5901eda14cbcSMatt Macy 	}
5902eda14cbcSMatt Macy 
5903eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_FREE, FTAG);
5904eda14cbcSMatt Macy }
5905eda14cbcSMatt Macy 
5906eda14cbcSMatt Macy int
5907eda14cbcSMatt Macy metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
5908eda14cbcSMatt Macy {
5909eda14cbcSMatt Macy 	const dva_t *dva = bp->blk_dva;
5910eda14cbcSMatt Macy 	int ndvas = BP_GET_NDVAS(bp);
5911eda14cbcSMatt Macy 	int error = 0;
5912eda14cbcSMatt Macy 
5913eda14cbcSMatt Macy 	ASSERT(!BP_IS_HOLE(bp));
5914eda14cbcSMatt Macy 
5915eda14cbcSMatt Macy 	if (txg != 0) {
5916eda14cbcSMatt Macy 		/*
5917eda14cbcSMatt Macy 		 * First do a dry run to make sure all DVAs are claimable,
5918eda14cbcSMatt Macy 		 * so we don't have to unwind from partial failures below.
5919eda14cbcSMatt Macy 		 */
5920eda14cbcSMatt Macy 		if ((error = metaslab_claim(spa, bp, 0)) != 0)
5921eda14cbcSMatt Macy 			return (error);
5922eda14cbcSMatt Macy 	}
5923eda14cbcSMatt Macy 
5924eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5925eda14cbcSMatt Macy 
5926eda14cbcSMatt Macy 	for (int d = 0; d < ndvas; d++) {
5927eda14cbcSMatt Macy 		error = metaslab_claim_dva(spa, &dva[d], txg);
5928eda14cbcSMatt Macy 		if (error != 0)
5929eda14cbcSMatt Macy 			break;
5930eda14cbcSMatt Macy 	}
5931eda14cbcSMatt Macy 
5932eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_ALLOC, FTAG);
5933eda14cbcSMatt Macy 
5934eda14cbcSMatt Macy 	ASSERT(error == 0 || txg == 0);
5935eda14cbcSMatt Macy 
5936eda14cbcSMatt Macy 	return (error);
5937eda14cbcSMatt Macy }
5938eda14cbcSMatt Macy 
5939eda14cbcSMatt Macy static void
5940eda14cbcSMatt Macy metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
5941eda14cbcSMatt Macy     uint64_t size, void *arg)
5942eda14cbcSMatt Macy {
5943e92ffd9bSMartin Matuska 	(void) inner, (void) arg;
5944e92ffd9bSMartin Matuska 
5945eda14cbcSMatt Macy 	if (vd->vdev_ops == &vdev_indirect_ops)
5946eda14cbcSMatt Macy 		return;
5947eda14cbcSMatt Macy 
5948eda14cbcSMatt Macy 	metaslab_check_free_impl(vd, offset, size);
5949eda14cbcSMatt Macy }
5950eda14cbcSMatt Macy 
5951eda14cbcSMatt Macy static void
5952eda14cbcSMatt Macy metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
5953eda14cbcSMatt Macy {
5954eda14cbcSMatt Macy 	metaslab_t *msp;
5955eda14cbcSMatt Macy 	spa_t *spa __maybe_unused = vd->vdev_spa;
5956eda14cbcSMatt Macy 
5957eda14cbcSMatt Macy 	if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
5958eda14cbcSMatt Macy 		return;
5959eda14cbcSMatt Macy 
5960eda14cbcSMatt Macy 	if (vd->vdev_ops->vdev_op_remap != NULL) {
5961eda14cbcSMatt Macy 		vd->vdev_ops->vdev_op_remap(vd, offset, size,
5962eda14cbcSMatt Macy 		    metaslab_check_free_impl_cb, NULL);
5963eda14cbcSMatt Macy 		return;
5964eda14cbcSMatt Macy 	}
5965eda14cbcSMatt Macy 
5966eda14cbcSMatt Macy 	ASSERT(vdev_is_concrete(vd));
5967eda14cbcSMatt Macy 	ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
5968eda14cbcSMatt Macy 	ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5969eda14cbcSMatt Macy 
5970eda14cbcSMatt Macy 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5971eda14cbcSMatt Macy 
5972eda14cbcSMatt Macy 	mutex_enter(&msp->ms_lock);
5973eda14cbcSMatt Macy 	if (msp->ms_loaded) {
5974eda14cbcSMatt Macy 		range_tree_verify_not_present(msp->ms_allocatable,
5975eda14cbcSMatt Macy 		    offset, size);
5976eda14cbcSMatt Macy 	}
5977eda14cbcSMatt Macy 
5978eda14cbcSMatt Macy 	/*
5979eda14cbcSMatt Macy 	 * Check all segments that currently exist in the freeing pipeline.
5980eda14cbcSMatt Macy 	 *
5981eda14cbcSMatt Macy 	 * It would intuitively make sense to also check the current allocating
5982eda14cbcSMatt Macy 	 * tree since metaslab_unalloc_dva() exists for extents that are
5983eda14cbcSMatt Macy 	 * allocated and freed in the same sync pass within the same txg.
5984eda14cbcSMatt Macy 	 * Unfortunately there are places (e.g. the ZIL) where we allocate a
5985eda14cbcSMatt Macy 	 * segment but then we free part of it within the same txg
5986eda14cbcSMatt Macy 	 * [see zil_sync()]. Thus, we don't call range_tree_verify() in the
5987eda14cbcSMatt Macy 	 * current allocating tree.
5988eda14cbcSMatt Macy 	 */
5989eda14cbcSMatt Macy 	range_tree_verify_not_present(msp->ms_freeing, offset, size);
5990eda14cbcSMatt Macy 	range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
5991eda14cbcSMatt Macy 	range_tree_verify_not_present(msp->ms_freed, offset, size);
5992eda14cbcSMatt Macy 	for (int j = 0; j < TXG_DEFER_SIZE; j++)
5993eda14cbcSMatt Macy 		range_tree_verify_not_present(msp->ms_defer[j], offset, size);
5994eda14cbcSMatt Macy 	range_tree_verify_not_present(msp->ms_trim, offset, size);
5995eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
5996eda14cbcSMatt Macy }
5997eda14cbcSMatt Macy 
5998eda14cbcSMatt Macy void
5999eda14cbcSMatt Macy metaslab_check_free(spa_t *spa, const blkptr_t *bp)
6000eda14cbcSMatt Macy {
6001eda14cbcSMatt Macy 	if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
6002eda14cbcSMatt Macy 		return;
6003eda14cbcSMatt Macy 
6004eda14cbcSMatt Macy 	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
6005eda14cbcSMatt Macy 	for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
6006eda14cbcSMatt Macy 		uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
6007eda14cbcSMatt Macy 		vdev_t *vd = vdev_lookup_top(spa, vdev);
6008eda14cbcSMatt Macy 		uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
6009eda14cbcSMatt Macy 		uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
6010eda14cbcSMatt Macy 
6011eda14cbcSMatt Macy 		if (DVA_GET_GANG(&bp->blk_dva[i]))
60126db169e9SMartin Matuska 			size = vdev_gang_header_asize(vd);
6013eda14cbcSMatt Macy 
6014eda14cbcSMatt Macy 		ASSERT3P(vd, !=, NULL);
6015eda14cbcSMatt Macy 
6016eda14cbcSMatt Macy 		metaslab_check_free_impl(vd, offset, size);
6017eda14cbcSMatt Macy 	}
6018eda14cbcSMatt Macy 	spa_config_exit(spa, SCL_VDEV, FTAG);
6019eda14cbcSMatt Macy }
6020eda14cbcSMatt Macy 
6021eda14cbcSMatt Macy static void
6022eda14cbcSMatt Macy metaslab_group_disable_wait(metaslab_group_t *mg)
6023eda14cbcSMatt Macy {
6024eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
6025eda14cbcSMatt Macy 	while (mg->mg_disabled_updating) {
6026eda14cbcSMatt Macy 		cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
6027eda14cbcSMatt Macy 	}
6028eda14cbcSMatt Macy }
6029eda14cbcSMatt Macy 
6030eda14cbcSMatt Macy static void
6031eda14cbcSMatt Macy metaslab_group_disabled_increment(metaslab_group_t *mg)
6032eda14cbcSMatt Macy {
6033eda14cbcSMatt Macy 	ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
6034eda14cbcSMatt Macy 	ASSERT(mg->mg_disabled_updating);
6035eda14cbcSMatt Macy 
6036eda14cbcSMatt Macy 	while (mg->mg_ms_disabled >= max_disabled_ms) {
6037eda14cbcSMatt Macy 		cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
6038eda14cbcSMatt Macy 	}
6039eda14cbcSMatt Macy 	mg->mg_ms_disabled++;
6040eda14cbcSMatt Macy 	ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
6041eda14cbcSMatt Macy }
6042eda14cbcSMatt Macy 
6043eda14cbcSMatt Macy /*
6044eda14cbcSMatt Macy  * Mark the metaslab as disabled to prevent any allocations on this metaslab.
6045eda14cbcSMatt Macy  * We must also track how many metaslabs are currently disabled within a
6046eda14cbcSMatt Macy  * metaslab group and limit them to prevent allocation failures from
6047eda14cbcSMatt Macy  * occurring because all metaslabs are disabled.
6048eda14cbcSMatt Macy  */
6049eda14cbcSMatt Macy void
6050eda14cbcSMatt Macy metaslab_disable(metaslab_t *msp)
6051eda14cbcSMatt Macy {
6052eda14cbcSMatt Macy 	ASSERT(!MUTEX_HELD(&msp->ms_lock));
6053eda14cbcSMatt Macy 	metaslab_group_t *mg = msp->ms_group;
6054eda14cbcSMatt Macy 
6055eda14cbcSMatt Macy 	mutex_enter(&mg->mg_ms_disabled_lock);
6056eda14cbcSMatt Macy 
6057eda14cbcSMatt Macy 	/*
6058eda14cbcSMatt Macy 	 * To keep an accurate count of how many threads have disabled
6059eda14cbcSMatt Macy 	 * a specific metaslab group, we only allow one thread to mark
6060eda14cbcSMatt Macy 	 * the metaslab group at a time. This ensures that the value of
6061eda14cbcSMatt Macy 	 * ms_disabled will be accurate when we decide to mark a metaslab
6062eda14cbcSMatt Macy 	 * group as disabled. To do this we force all other threads
6063eda14cbcSMatt Macy 	 * to wait till the metaslab's mg_disabled_updating flag is no
6064eda14cbcSMatt Macy 	 * longer set.
6065eda14cbcSMatt Macy 	 */
6066eda14cbcSMatt Macy 	metaslab_group_disable_wait(mg);
6067eda14cbcSMatt Macy 	mg->mg_disabled_updating = B_TRUE;
6068eda14cbcSMatt Macy 	if (msp->ms_disabled == 0) {
6069eda14cbcSMatt Macy 		metaslab_group_disabled_increment(mg);
6070eda14cbcSMatt Macy 	}
6071eda14cbcSMatt Macy 	mutex_enter(&msp->ms_lock);
6072eda14cbcSMatt Macy 	msp->ms_disabled++;
6073eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
6074eda14cbcSMatt Macy 
6075eda14cbcSMatt Macy 	mg->mg_disabled_updating = B_FALSE;
6076eda14cbcSMatt Macy 	cv_broadcast(&mg->mg_ms_disabled_cv);
6077eda14cbcSMatt Macy 	mutex_exit(&mg->mg_ms_disabled_lock);
6078eda14cbcSMatt Macy }
6079eda14cbcSMatt Macy 
6080eda14cbcSMatt Macy void
6081eda14cbcSMatt Macy metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload)
6082eda14cbcSMatt Macy {
6083eda14cbcSMatt Macy 	metaslab_group_t *mg = msp->ms_group;
6084eda14cbcSMatt Macy 	spa_t *spa = mg->mg_vd->vdev_spa;
6085eda14cbcSMatt Macy 
6086eda14cbcSMatt Macy 	/*
6087eda14cbcSMatt Macy 	 * Wait for the outstanding IO to be synced to prevent newly
6088eda14cbcSMatt Macy 	 * allocated blocks from being overwritten.  This used by
6089eda14cbcSMatt Macy 	 * initialize and TRIM which are modifying unallocated space.
6090eda14cbcSMatt Macy 	 */
6091eda14cbcSMatt Macy 	if (sync)
6092eda14cbcSMatt Macy 		txg_wait_synced(spa_get_dsl(spa), 0);
6093eda14cbcSMatt Macy 
6094eda14cbcSMatt Macy 	mutex_enter(&mg->mg_ms_disabled_lock);
6095eda14cbcSMatt Macy 	mutex_enter(&msp->ms_lock);
6096eda14cbcSMatt Macy 	if (--msp->ms_disabled == 0) {
6097eda14cbcSMatt Macy 		mg->mg_ms_disabled--;
6098eda14cbcSMatt Macy 		cv_broadcast(&mg->mg_ms_disabled_cv);
6099eda14cbcSMatt Macy 		if (unload)
6100eda14cbcSMatt Macy 			metaslab_unload(msp);
6101eda14cbcSMatt Macy 	}
6102eda14cbcSMatt Macy 	mutex_exit(&msp->ms_lock);
6103eda14cbcSMatt Macy 	mutex_exit(&mg->mg_ms_disabled_lock);
6104eda14cbcSMatt Macy }
6105eda14cbcSMatt Macy 
6106716fd348SMartin Matuska void
6107716fd348SMartin Matuska metaslab_set_unflushed_dirty(metaslab_t *ms, boolean_t dirty)
6108716fd348SMartin Matuska {
6109716fd348SMartin Matuska 	ms->ms_unflushed_dirty = dirty;
6110716fd348SMartin Matuska }
6111716fd348SMartin Matuska 
6112eda14cbcSMatt Macy static void
6113eda14cbcSMatt Macy metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
6114eda14cbcSMatt Macy {
6115eda14cbcSMatt Macy 	vdev_t *vd = ms->ms_group->mg_vd;
6116eda14cbcSMatt Macy 	spa_t *spa = vd->vdev_spa;
6117eda14cbcSMatt Macy 	objset_t *mos = spa_meta_objset(spa);
6118eda14cbcSMatt Macy 
6119eda14cbcSMatt Macy 	ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
6120eda14cbcSMatt Macy 
6121eda14cbcSMatt Macy 	metaslab_unflushed_phys_t entry = {
6122eda14cbcSMatt Macy 		.msp_unflushed_txg = metaslab_unflushed_txg(ms),
6123eda14cbcSMatt Macy 	};
6124eda14cbcSMatt Macy 	uint64_t entry_size = sizeof (entry);
6125eda14cbcSMatt Macy 	uint64_t entry_offset = ms->ms_id * entry_size;
6126eda14cbcSMatt Macy 
6127eda14cbcSMatt Macy 	uint64_t object = 0;
6128eda14cbcSMatt Macy 	int err = zap_lookup(mos, vd->vdev_top_zap,
6129eda14cbcSMatt Macy 	    VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
6130eda14cbcSMatt Macy 	    &object);
6131eda14cbcSMatt Macy 	if (err == ENOENT) {
6132eda14cbcSMatt Macy 		object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
6133eda14cbcSMatt Macy 		    SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
6134eda14cbcSMatt Macy 		VERIFY0(zap_add(mos, vd->vdev_top_zap,
6135eda14cbcSMatt Macy 		    VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
6136eda14cbcSMatt Macy 		    &object, tx));
6137eda14cbcSMatt Macy 	} else {
6138eda14cbcSMatt Macy 		VERIFY0(err);
6139eda14cbcSMatt Macy 	}
6140eda14cbcSMatt Macy 
6141eda14cbcSMatt Macy 	dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
6142eda14cbcSMatt Macy 	    &entry, tx);
6143eda14cbcSMatt Macy }
6144eda14cbcSMatt Macy 
6145eda14cbcSMatt Macy void
6146eda14cbcSMatt Macy metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
6147eda14cbcSMatt Macy {
6148eda14cbcSMatt Macy 	ms->ms_unflushed_txg = txg;
6149eda14cbcSMatt Macy 	metaslab_update_ondisk_flush_data(ms, tx);
6150eda14cbcSMatt Macy }
6151eda14cbcSMatt Macy 
6152716fd348SMartin Matuska boolean_t
6153716fd348SMartin Matuska metaslab_unflushed_dirty(metaslab_t *ms)
6154716fd348SMartin Matuska {
6155716fd348SMartin Matuska 	return (ms->ms_unflushed_dirty);
6156716fd348SMartin Matuska }
6157716fd348SMartin Matuska 
6158eda14cbcSMatt Macy uint64_t
6159eda14cbcSMatt Macy metaslab_unflushed_txg(metaslab_t *ms)
6160eda14cbcSMatt Macy {
6161eda14cbcSMatt Macy 	return (ms->ms_unflushed_txg);
6162eda14cbcSMatt Macy }
6163eda14cbcSMatt Macy 
6164dbd5678dSMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, U64, ZMOD_RW,
6165eda14cbcSMatt Macy 	"Allocation granularity (a.k.a. stripe size)");
6166eda14cbcSMatt Macy 
6167eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW,
6168eda14cbcSMatt Macy 	"Load all metaslabs when pool is first opened");
6169eda14cbcSMatt Macy 
6170eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW,
6171eda14cbcSMatt Macy 	"Prevent metaslabs from being unloaded");
6172eda14cbcSMatt Macy 
6173eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW,
6174eda14cbcSMatt Macy 	"Preload potential metaslabs during reassessment");
6175eda14cbcSMatt Macy 
6176be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW,
6177eda14cbcSMatt Macy 	"Delay in txgs after metaslab was last used before unloading");
6178eda14cbcSMatt Macy 
6179be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW,
6180eda14cbcSMatt Macy 	"Delay in milliseconds after metaslab was last used before unloading");
6181eda14cbcSMatt Macy 
6182eda14cbcSMatt Macy /* BEGIN CSTYLED */
6183be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW,
6184eda14cbcSMatt Macy 	"Percentage of metaslab group size that should be free to make it "
6185eda14cbcSMatt Macy 	"eligible for allocation");
6186eda14cbcSMatt Macy 
6187be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW,
6188eda14cbcSMatt Macy 	"Percentage of metaslab group size that should be considered eligible "
6189eda14cbcSMatt Macy 	"for allocations unless all metaslab groups within the metaslab class "
6190eda14cbcSMatt Macy 	"have also crossed this threshold");
6191eda14cbcSMatt Macy 
6192c03c5b1cSMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT,
6193c03c5b1cSMartin Matuska 	ZMOD_RW,
6194eda14cbcSMatt Macy 	"Use the fragmentation metric to prefer less fragmented metaslabs");
6195eda14cbcSMatt Macy /* END CSTYLED */
6196eda14cbcSMatt Macy 
6197be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT,
6198c03c5b1cSMartin Matuska 	ZMOD_RW, "Fragmentation for metaslab to allow allocation");
6199c03c5b1cSMartin Matuska 
6200eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW,
6201eda14cbcSMatt Macy 	"Prefer metaslabs with lower LBAs");
6202eda14cbcSMatt Macy 
6203eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW,
6204eda14cbcSMatt Macy 	"Enable metaslab group biasing");
6205eda14cbcSMatt Macy 
6206eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT,
6207eda14cbcSMatt Macy 	ZMOD_RW, "Enable segment-based metaslab selection");
6208eda14cbcSMatt Macy 
6209eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW,
6210eda14cbcSMatt Macy 	"Segment-based metaslab selection maximum buckets before switching");
6211eda14cbcSMatt Macy 
6212dbd5678dSMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, U64, ZMOD_RW,
6213*315ee00fSMartin Matuska 	"Blocks larger than this size are sometimes forced to be gang blocks");
6214*315ee00fSMartin Matuska 
6215*315ee00fSMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging_pct, UINT, ZMOD_RW,
6216*315ee00fSMartin Matuska 	"Percentage of large blocks that will be forced to be gang blocks");
6217eda14cbcSMatt Macy 
6218be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, UINT, ZMOD_RW,
6219eda14cbcSMatt Macy 	"Max distance (bytes) to search forward before using size tree");
6220eda14cbcSMatt Macy 
6221eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
6222eda14cbcSMatt Macy 	"When looking in size tree, use largest segment instead of exact fit");
6223eda14cbcSMatt Macy 
6224dbd5678dSMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, U64,
6225eda14cbcSMatt Macy 	ZMOD_RW, "How long to trust the cached max chunk size of a metaslab");
6226eda14cbcSMatt Macy 
6227be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, UINT, ZMOD_RW,
6228eda14cbcSMatt Macy 	"Percentage of memory that can be used to store metaslab range trees");
62297877fdebSMatt Macy 
62307877fdebSMatt Macy ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
62317877fdebSMatt Macy 	ZMOD_RW, "Try hard to allocate before ganging");
62327877fdebSMatt Macy 
6233be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW,
62347877fdebSMatt Macy 	"Normally only consider this many of the best metaslabs in each vdev");
6235