1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
27 * Copyright (c) 2017, Intel Corporation.
28 */
29
30 #include <sys/zfs_context.h>
31 #include <sys/brt.h>
32 #include <sys/dmu.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/space_map.h>
35 #include <sys/metaslab_impl.h>
36 #include <sys/vdev_impl.h>
37 #include <sys/vdev_draid.h>
38 #include <sys/zio.h>
39 #include <sys/spa_impl.h>
40 #include <sys/zfeature.h>
41 #include <sys/vdev_indirect_mapping.h>
42 #include <sys/zap.h>
43 #include <sys/btree.h>
44
45 #define GANG_ALLOCATION(flags) \
46 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
47
48 /*
49 * Metaslab group's per child vdev granularity, in bytes. This is roughly
50 * similar to what would be referred to as the "stripe size" in traditional
51 * RAID arrays. In normal operation, we will try to write this amount of
52 * data to each disk before moving on to the next top-level vdev.
53 */
54 static uint64_t metaslab_aliquot = 2 * 1024 * 1024;
55
56 /*
57 * For testing, make some blocks above a certain size be gang blocks.
58 */
59 uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1;
60
61 /*
62 * Of blocks of size >= metaslab_force_ganging, actually gang them this often.
63 */
64 uint_t metaslab_force_ganging_pct = 3;
65
66 /*
67 * In pools where the log space map feature is not enabled we touch
68 * multiple metaslabs (and their respective space maps) with each
69 * transaction group. Thus, we benefit from having a small space map
70 * block size since it allows us to issue more I/O operations scattered
71 * around the disk. So a sane default for the space map block size
72 * is 8~16K.
73 */
74 int zfs_metaslab_sm_blksz_no_log = (1 << 14);
75
76 /*
77 * When the log space map feature is enabled, we accumulate a lot of
78 * changes per metaslab that are flushed once in a while so we benefit
79 * from a bigger block size like 128K for the metaslab space maps.
80 */
81 int zfs_metaslab_sm_blksz_with_log = (1 << 17);
82
83 /*
84 * The in-core space map representation is more compact than its on-disk form.
85 * The zfs_condense_pct determines how much more compact the in-core
86 * space map representation must be before we compact it on-disk.
87 * Values should be greater than or equal to 100.
88 */
89 uint_t zfs_condense_pct = 200;
90
91 /*
92 * Condensing a metaslab is not guaranteed to actually reduce the amount of
93 * space used on disk. In particular, a space map uses data in increments of
94 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the
95 * same number of blocks after condensing. Since the goal of condensing is to
96 * reduce the number of IOPs required to read the space map, we only want to
97 * condense when we can be sure we will reduce the number of blocks used by the
98 * space map. Unfortunately, we cannot precisely compute whether or not this is
99 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
100 * we apply the following heuristic: do not condense a spacemap unless the
101 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
102 * blocks.
103 */
104 static const int zfs_metaslab_condense_block_threshold = 4;
105
106 /*
107 * The zfs_mg_noalloc_threshold defines which metaslab groups should
108 * be eligible for allocation. The value is defined as a percentage of
109 * free space. Metaslab groups that have more free space than
110 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
111 * a metaslab group's free space is less than or equal to the
112 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
113 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
114 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
115 * groups are allowed to accept allocations. Gang blocks are always
116 * eligible to allocate on any metaslab group. The default value of 0 means
117 * no metaslab group will be excluded based on this criterion.
118 */
119 static uint_t zfs_mg_noalloc_threshold = 0;
120
121 /*
122 * Metaslab groups are considered eligible for allocations if their
123 * fragmentation metric (measured as a percentage) is less than or
124 * equal to zfs_mg_fragmentation_threshold. If a metaslab group
125 * exceeds this threshold then it will be skipped unless all metaslab
126 * groups within the metaslab class have also crossed this threshold.
127 *
128 * This tunable was introduced to avoid edge cases where we continue
129 * allocating from very fragmented disks in our pool while other, less
130 * fragmented disks, exists. On the other hand, if all disks in the
131 * pool are uniformly approaching the threshold, the threshold can
132 * be a speed bump in performance, where we keep switching the disks
133 * that we allocate from (e.g. we allocate some segments from disk A
134 * making it bypassing the threshold while freeing segments from disk
135 * B getting its fragmentation below the threshold).
136 *
137 * Empirically, we've seen that our vdev selection for allocations is
138 * good enough that fragmentation increases uniformly across all vdevs
139 * the majority of the time. Thus we set the threshold percentage high
140 * enough to avoid hitting the speed bump on pools that are being pushed
141 * to the edge.
142 */
143 static uint_t zfs_mg_fragmentation_threshold = 95;
144
145 /*
146 * Allow metaslabs to keep their active state as long as their fragmentation
147 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
148 * active metaslab that exceeds this threshold will no longer keep its active
149 * status allowing better metaslabs to be selected.
150 */
151 static uint_t zfs_metaslab_fragmentation_threshold = 77;
152
153 /*
154 * When set will load all metaslabs when pool is first opened.
155 */
156 int metaslab_debug_load = B_FALSE;
157
158 /*
159 * When set will prevent metaslabs from being unloaded.
160 */
161 static int metaslab_debug_unload = B_FALSE;
162
163 /*
164 * Minimum size which forces the dynamic allocator to change
165 * it's allocation strategy. Once the space map cannot satisfy
166 * an allocation of this size then it switches to using more
167 * aggressive strategy (i.e search by size rather than offset).
168 */
169 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
170
171 /*
172 * The minimum free space, in percent, which must be available
173 * in a space map to continue allocations in a first-fit fashion.
174 * Once the space map's free space drops below this level we dynamically
175 * switch to using best-fit allocations.
176 */
177 uint_t metaslab_df_free_pct = 4;
178
179 /*
180 * Maximum distance to search forward from the last offset. Without this
181 * limit, fragmented pools can see >100,000 iterations and
182 * metaslab_block_picker() becomes the performance limiting factor on
183 * high-performance storage.
184 *
185 * With the default setting of 16MB, we typically see less than 500
186 * iterations, even with very fragmented, ashift=9 pools. The maximum number
187 * of iterations possible is:
188 * metaslab_df_max_search / (2 * (1<<ashift))
189 * With the default setting of 16MB this is 16*1024 (with ashift=9) or
190 * 2048 (with ashift=12).
191 */
192 static uint_t metaslab_df_max_search = 16 * 1024 * 1024;
193
194 /*
195 * Forces the metaslab_block_picker function to search for at least this many
196 * segments forwards until giving up on finding a segment that the allocation
197 * will fit into.
198 */
199 static const uint32_t metaslab_min_search_count = 100;
200
201 /*
202 * If we are not searching forward (due to metaslab_df_max_search,
203 * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable
204 * controls what segment is used. If it is set, we will use the largest free
205 * segment. If it is not set, we will use a segment of exactly the requested
206 * size (or larger).
207 */
208 static int metaslab_df_use_largest_segment = B_FALSE;
209
210 /*
211 * These tunables control how long a metaslab will remain loaded after the
212 * last allocation from it. A metaslab can't be unloaded until at least
213 * metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds
214 * have elapsed. However, zfs_metaslab_mem_limit may cause it to be
215 * unloaded sooner. These settings are intended to be generous -- to keep
216 * metaslabs loaded for a long time, reducing the rate of metaslab loading.
217 */
218 static uint_t metaslab_unload_delay = 32;
219 static uint_t metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */
220
221 /*
222 * Max number of metaslabs per group to preload.
223 */
224 uint_t metaslab_preload_limit = 10;
225
226 /*
227 * Enable/disable preloading of metaslab.
228 */
229 static int metaslab_preload_enabled = B_TRUE;
230
231 /*
232 * Enable/disable fragmentation weighting on metaslabs.
233 */
234 static int metaslab_fragmentation_factor_enabled = B_TRUE;
235
236 /*
237 * Enable/disable lba weighting (i.e. outer tracks are given preference).
238 */
239 static int metaslab_lba_weighting_enabled = B_TRUE;
240
241 /*
242 * Enable/disable space-based metaslab group biasing.
243 */
244 static int metaslab_bias_enabled = B_TRUE;
245
246 /*
247 * Control performance-based metaslab group biasing.
248 */
249 static int metaslab_perf_bias = 1;
250
251 /*
252 * Enable/disable remapping of indirect DVAs to their concrete vdevs.
253 */
254 static const boolean_t zfs_remap_blkptr_enable = B_TRUE;
255
256 /*
257 * Enable/disable segment-based metaslab selection.
258 */
259 static int zfs_metaslab_segment_weight_enabled = B_TRUE;
260
261 /*
262 * When using segment-based metaslab selection, we will continue
263 * allocating from the active metaslab until we have exhausted
264 * zfs_metaslab_switch_threshold of its buckets.
265 */
266 static int zfs_metaslab_switch_threshold = 2;
267
268 /*
269 * Internal switch to enable/disable the metaslab allocation tracing
270 * facility.
271 */
272 static const boolean_t metaslab_trace_enabled = B_FALSE;
273
274 /*
275 * Maximum entries that the metaslab allocation tracing facility will keep
276 * in a given list when running in non-debug mode. We limit the number
277 * of entries in non-debug mode to prevent us from using up too much memory.
278 * The limit should be sufficiently large that we don't expect any allocation
279 * to every exceed this value. In debug mode, the system will panic if this
280 * limit is ever reached allowing for further investigation.
281 */
282 static const uint64_t metaslab_trace_max_entries = 5000;
283
284 /*
285 * Maximum number of metaslabs per group that can be disabled
286 * simultaneously.
287 */
288 static const int max_disabled_ms = 3;
289
290 /*
291 * Time (in seconds) to respect ms_max_size when the metaslab is not loaded.
292 * To avoid 64-bit overflow, don't set above UINT32_MAX.
293 */
294 static uint64_t zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */
295
296 /*
297 * Maximum percentage of memory to use on storing loaded metaslabs. If loading
298 * a metaslab would take it over this percentage, the oldest selected metaslab
299 * is automatically unloaded.
300 */
301 static uint_t zfs_metaslab_mem_limit = 25;
302
303 /*
304 * Force the per-metaslab range trees to use 64-bit integers to store
305 * segments. Used for debugging purposes.
306 */
307 static const boolean_t zfs_metaslab_force_large_segs = B_FALSE;
308
309 /*
310 * By default we only store segments over a certain size in the size-sorted
311 * metaslab trees (ms_allocatable_by_size and
312 * ms_unflushed_frees_by_size). This dramatically reduces memory usage and
313 * improves load and unload times at the cost of causing us to use slightly
314 * larger segments than we would otherwise in some cases.
315 */
316 static const uint32_t metaslab_by_size_min_shift = 14;
317
318 /*
319 * If not set, we will first try normal allocation. If that fails then
320 * we will do a gang allocation. If that fails then we will do a "try hard"
321 * gang allocation. If that fails then we will have a multi-layer gang
322 * block.
323 *
324 * If set, we will first try normal allocation. If that fails then
325 * we will do a "try hard" allocation. If that fails we will do a gang
326 * allocation. If that fails we will do a "try hard" gang allocation. If
327 * that fails then we will have a multi-layer gang block.
328 */
329 static int zfs_metaslab_try_hard_before_gang = B_FALSE;
330
331 /*
332 * When not trying hard, we only consider the best zfs_metaslab_find_max_tries
333 * metaslabs. This improves performance, especially when there are many
334 * metaslabs per vdev and the allocation can't actually be satisfied (so we
335 * would otherwise iterate all the metaslabs). If there is a metaslab with a
336 * worse weight but it can actually satisfy the allocation, we won't find it
337 * until trying hard. This may happen if the worse metaslab is not loaded
338 * (and the true weight is better than we have calculated), or due to weight
339 * bucketization. E.g. we are looking for a 60K segment, and the best
340 * metaslabs all have free segments in the 32-63K bucket, but the best
341 * zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a
342 * subsequent metaslab has ms_max_size >60KB (but fewer segments in this
343 * bucket, and therefore a lower weight).
344 */
345 static uint_t zfs_metaslab_find_max_tries = 100;
346
347 static uint64_t metaslab_weight(metaslab_t *, boolean_t);
348 static void metaslab_set_fragmentation(metaslab_t *, boolean_t);
349 static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t);
350 static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
351
352 static void metaslab_passivate(metaslab_t *msp, uint64_t weight);
353 static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp);
354 static void metaslab_flush_update(metaslab_t *, dmu_tx_t *);
355 static unsigned int metaslab_idx_func(multilist_t *, void *);
356 static void metaslab_evict(metaslab_t *, uint64_t);
357 static void metaslab_rt_add(zfs_range_tree_t *rt, zfs_range_seg_t *rs,
358 void *arg);
359 kmem_cache_t *metaslab_alloc_trace_cache;
360
361 typedef struct metaslab_stats {
362 kstat_named_t metaslabstat_trace_over_limit;
363 kstat_named_t metaslabstat_reload_tree;
364 kstat_named_t metaslabstat_too_many_tries;
365 kstat_named_t metaslabstat_try_hard;
366 } metaslab_stats_t;
367
368 static metaslab_stats_t metaslab_stats = {
369 { "trace_over_limit", KSTAT_DATA_UINT64 },
370 { "reload_tree", KSTAT_DATA_UINT64 },
371 { "too_many_tries", KSTAT_DATA_UINT64 },
372 { "try_hard", KSTAT_DATA_UINT64 },
373 };
374
375 #define METASLABSTAT_BUMP(stat) \
376 atomic_inc_64(&metaslab_stats.stat.value.ui64);
377
378
379 static kstat_t *metaslab_ksp;
380
381 void
metaslab_stat_init(void)382 metaslab_stat_init(void)
383 {
384 ASSERT(metaslab_alloc_trace_cache == NULL);
385 metaslab_alloc_trace_cache = kmem_cache_create(
386 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
387 0, NULL, NULL, NULL, NULL, NULL, 0);
388 metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats",
389 "misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) /
390 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
391 if (metaslab_ksp != NULL) {
392 metaslab_ksp->ks_data = &metaslab_stats;
393 kstat_install(metaslab_ksp);
394 }
395 }
396
397 void
metaslab_stat_fini(void)398 metaslab_stat_fini(void)
399 {
400 if (metaslab_ksp != NULL) {
401 kstat_delete(metaslab_ksp);
402 metaslab_ksp = NULL;
403 }
404
405 kmem_cache_destroy(metaslab_alloc_trace_cache);
406 metaslab_alloc_trace_cache = NULL;
407 }
408
409 /*
410 * ==========================================================================
411 * Metaslab classes
412 * ==========================================================================
413 */
414 metaslab_class_t *
metaslab_class_create(spa_t * spa,const metaslab_ops_t * ops,boolean_t is_log)415 metaslab_class_create(spa_t *spa, const metaslab_ops_t *ops, boolean_t is_log)
416 {
417 metaslab_class_t *mc;
418
419 mc = kmem_zalloc(offsetof(metaslab_class_t,
420 mc_allocator[spa->spa_alloc_count]), KM_SLEEP);
421
422 mc->mc_spa = spa;
423 mc->mc_ops = ops;
424 mc->mc_is_log = is_log;
425 mc->mc_alloc_io_size = SPA_OLD_MAXBLOCKSIZE;
426 mc->mc_alloc_max = UINT64_MAX;
427 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
428 multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t),
429 offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func);
430 for (int i = 0; i < spa->spa_alloc_count; i++) {
431 metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
432 mutex_init(&mca->mca_lock, NULL, MUTEX_DEFAULT, NULL);
433 avl_create(&mca->mca_tree, zio_bookmark_compare,
434 sizeof (zio_t), offsetof(zio_t, io_queue_node.a));
435 mca->mca_rotor = NULL;
436 mca->mca_reserved = 0;
437 }
438
439 return (mc);
440 }
441
442 void
metaslab_class_destroy(metaslab_class_t * mc)443 metaslab_class_destroy(metaslab_class_t *mc)
444 {
445 spa_t *spa = mc->mc_spa;
446
447 ASSERT(mc->mc_alloc == 0);
448 ASSERT(mc->mc_deferred == 0);
449 ASSERT(mc->mc_space == 0);
450 ASSERT(mc->mc_dspace == 0);
451
452 for (int i = 0; i < spa->spa_alloc_count; i++) {
453 metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
454 avl_destroy(&mca->mca_tree);
455 mutex_destroy(&mca->mca_lock);
456 ASSERT(mca->mca_rotor == NULL);
457 ASSERT0(mca->mca_reserved);
458 }
459 mutex_destroy(&mc->mc_lock);
460 multilist_destroy(&mc->mc_metaslab_txg_list);
461 kmem_free(mc, offsetof(metaslab_class_t,
462 mc_allocator[spa->spa_alloc_count]));
463 }
464
465 void
metaslab_class_validate(metaslab_class_t * mc)466 metaslab_class_validate(metaslab_class_t *mc)
467 {
468 #ifdef ZFS_DEBUG
469 spa_t *spa = mc->mc_spa;
470
471 /*
472 * Must hold one of the spa_config locks.
473 */
474 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) ||
475 spa_config_held(spa, SCL_ALL, RW_WRITER));
476
477 for (int i = 0; i < spa->spa_alloc_count; i++) {
478 metaslab_class_allocator_t *mca = &mc->mc_allocator[i];
479 metaslab_group_t *mg, *rotor;
480
481 ASSERT0(avl_numnodes(&mca->mca_tree));
482 ASSERT0(mca->mca_reserved);
483
484 if ((mg = rotor = mca->mca_rotor) == NULL)
485 continue;
486 do {
487 metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
488 vdev_t *vd = mg->mg_vd;
489
490 ASSERT3P(vd->vdev_top, ==, vd);
491 ASSERT(vd->vdev_mg == mg || vd->vdev_log_mg == mg);
492 ASSERT3P(mg->mg_class, ==, mc);
493 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
494 ASSERT0(zfs_refcount_count(&mga->mga_queue_depth));
495 } while ((mg = mg->mg_next) != rotor);
496 }
497 #endif
498 }
499
500 /*
501 * For each metaslab group in a class pre-calculate allocation quota and
502 * target queue depth to balance their space usage and write performance.
503 * Based on those pre-calculate class allocation throttle threshold for
504 * optimal saturation. onsync is true once per TXG to enable/disable
505 * allocation throttling and update moving average of maximum I/O size.
506 */
507 void
metaslab_class_balance(metaslab_class_t * mc,boolean_t onsync)508 metaslab_class_balance(metaslab_class_t *mc, boolean_t onsync)
509 {
510 metaslab_group_t *mg, *first;
511
512 /*
513 * Must hold one of the spa_config locks.
514 */
515 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
516 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
517
518 if (onsync)
519 metaslab_class_validate(mc);
520
521 if (mc->mc_groups == 0) {
522 if (onsync)
523 mc->mc_alloc_throttle_enabled = B_FALSE;
524 mc->mc_alloc_max = UINT64_MAX;
525 return;
526 }
527
528 if (onsync) {
529 /*
530 * Moving average of maximum allocation size, in absence of
531 * large allocations shrinking to 1/8 of metaslab_aliquot.
532 */
533 mc->mc_alloc_io_size = (3 * mc->mc_alloc_io_size +
534 metaslab_aliquot / 8) / 4;
535 mc->mc_alloc_throttle_enabled = mc->mc_is_log ? 0 :
536 zio_dva_throttle_enabled;
537 }
538
539 mg = first = mc->mc_allocator[0].mca_rotor;
540 uint64_t children = 0;
541 do {
542 children += vdev_get_ndisks(mg->mg_vd) -
543 vdev_get_nparity(mg->mg_vd);
544 } while ((mg = mg->mg_next) != first);
545
546 uint64_t sum_aliquot = 0;
547 do {
548 vdev_stat_t *vs = &mg->mg_vd->vdev_stat;
549 uint_t ratio;
550
551 /*
552 * Scale allocations per iteration with average number of
553 * children. Wider vdevs need more sequential allocations
554 * to keep decent per-child I/O size.
555 */
556 uint64_t mg_aliquot = MAX(metaslab_aliquot * children /
557 mc->mc_groups, mc->mc_alloc_io_size * 4);
558
559 /*
560 * Scale allocations per iteration with the vdev capacity,
561 * relative to average. Bigger vdevs should get more to
562 * fill up at the same time as smaller ones.
563 */
564 if (mc->mc_space > 0 && vs->vs_space > 0) {
565 ratio = vs->vs_space / (mc->mc_space / (mc->mc_groups *
566 256) + 1);
567 mg_aliquot = mg_aliquot * ratio / 256;
568 }
569
570 /*
571 * Scale allocations per iteration with the vdev's free space
572 * fraction, relative to average. Despite the above, vdevs free
573 * space fractions may get imbalanced, for example due to new
574 * vdev addition or different performance. We want free space
575 * fractions to be similar to postpone fragmentation.
576 *
577 * But same time we don't want to throttle vdevs still having
578 * plenty of free space, that appear faster than others, even
579 * if that cause temporary imbalance. Allow them to allocate
580 * more by keeping their allocation queue depth equivalent to
581 * 2.5 full iteration, even if they repeatedly drain it. Later
582 * with the free space reduction gradually reduce the target
583 * queue depth, stronger enforcing the free space balance.
584 */
585 if (metaslab_bias_enabled &&
586 mc->mc_space > 0 && vs->vs_space > 0) {
587 uint64_t vs_free = vs->vs_space > vs->vs_alloc ?
588 vs->vs_space - vs->vs_alloc : 0;
589 uint64_t mc_free = mc->mc_space > mc->mc_alloc ?
590 mc->mc_space - mc->mc_alloc : 0;
591 /*
592 * vs_fr is 16 bit fixed-point free space fraction.
593 * mc_fr is 8 bit fixed-point free space fraction.
594 * ratio as their quotient is 8 bit fixed-point.
595 */
596 uint_t vs_fr = vs_free / (vs->vs_space / 65536 + 1);
597 uint_t mc_fr = mc_free / (mc->mc_space / 256 + 1);
598 ratio = vs_fr / (mc_fr + 1);
599 mg->mg_aliquot = mg_aliquot * ratio / 256;
600 /* From 2.5x at 25% full to 1x at 75%. */
601 ratio = MIN(163840, vs_fr * 3 + 16384);
602 mg->mg_queue_target = MAX(mg->mg_aliquot,
603 mg->mg_aliquot * ratio / 65536);
604 } else {
605 mg->mg_aliquot = mg_aliquot;
606 mg->mg_queue_target = mg->mg_aliquot * 2;
607 }
608 sum_aliquot += mg->mg_aliquot;
609 } while ((mg = mg->mg_next) != first);
610
611 /*
612 * Set per-class allocation throttle threshold to 4 iterations through
613 * all the vdevs. This should keep all vdevs busy even if some are
614 * allocating more than we planned for them due to bigger blocks or
615 * better performance.
616 */
617 mc->mc_alloc_max = sum_aliquot * 4;
618 }
619
620 static void
metaslab_class_rotate(metaslab_group_t * mg,int allocator,uint64_t psize,boolean_t success)621 metaslab_class_rotate(metaslab_group_t *mg, int allocator, uint64_t psize,
622 boolean_t success)
623 {
624 metaslab_class_t *mc = mg->mg_class;
625 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
626 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
627
628 /*
629 * Exit fast if there is nothing to rotate, we are not following
630 * the rotor (copies, gangs, etc) or somebody already rotated it.
631 */
632 if (mc->mc_groups < 2 || mca->mca_rotor != mg)
633 return;
634
635 /*
636 * Always rotate in case of allocation error or a log class.
637 */
638 if (!success || mc->mc_is_log)
639 goto rotate;
640
641 /*
642 * Allocate from this group if we expect next I/O of the same size to
643 * mostly fit within the allocation quota. Rotate if we expect it to
644 * mostly go over the target queue depth. Meanwhile, to stripe between
645 * groups in configured amounts per child even if we can't reach the
646 * target queue depth, i.e. can't saturate the group write performance,
647 * always rotate after allocating the queue target bytes.
648 */
649 uint64_t naq = atomic_add_64_nv(&mca->mca_aliquot, psize) + psize / 2;
650 if (naq < mg->mg_aliquot)
651 return;
652 if (naq >= mg->mg_queue_target)
653 goto rotate;
654 if (zfs_refcount_count(&mga->mga_queue_depth) + psize + psize / 2 >=
655 mg->mg_queue_target)
656 goto rotate;
657
658 /*
659 * When the pool is not too busy, prefer restoring the vdev free space
660 * balance instead of getting maximum speed we might not need, so that
661 * we could have more flexibility during more busy times later.
662 */
663 if (metaslab_perf_bias <= 0)
664 goto rotate;
665 if (metaslab_perf_bias >= 2)
666 return;
667 spa_t *spa = mc->mc_spa;
668 dsl_pool_t *dp = spa_get_dsl(spa);
669 if (dp == NULL)
670 return;
671 uint64_t busy_thresh = zfs_dirty_data_max *
672 (zfs_vdev_async_write_active_min_dirty_percent +
673 zfs_vdev_async_write_active_max_dirty_percent) / 200;
674 if (dp->dp_dirty_total > busy_thresh || spa_has_pending_synctask(spa))
675 return;
676
677 rotate:
678 mca->mca_rotor = mg->mg_next;
679 mca->mca_aliquot = 0;
680 }
681
682 static void
metaslab_class_space_update(metaslab_class_t * mc,int64_t alloc_delta,int64_t defer_delta,int64_t space_delta,int64_t dspace_delta)683 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
684 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
685 {
686 atomic_add_64(&mc->mc_alloc, alloc_delta);
687 atomic_add_64(&mc->mc_deferred, defer_delta);
688 atomic_add_64(&mc->mc_space, space_delta);
689 atomic_add_64(&mc->mc_dspace, dspace_delta);
690 }
691
692 uint64_t
metaslab_class_get_alloc(metaslab_class_t * mc)693 metaslab_class_get_alloc(metaslab_class_t *mc)
694 {
695 return (mc->mc_alloc);
696 }
697
698 uint64_t
metaslab_class_get_deferred(metaslab_class_t * mc)699 metaslab_class_get_deferred(metaslab_class_t *mc)
700 {
701 return (mc->mc_deferred);
702 }
703
704 uint64_t
metaslab_class_get_space(metaslab_class_t * mc)705 metaslab_class_get_space(metaslab_class_t *mc)
706 {
707 return (mc->mc_space);
708 }
709
710 uint64_t
metaslab_class_get_dspace(metaslab_class_t * mc)711 metaslab_class_get_dspace(metaslab_class_t *mc)
712 {
713 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
714 }
715
716 void
metaslab_class_histogram_verify(metaslab_class_t * mc)717 metaslab_class_histogram_verify(metaslab_class_t *mc)
718 {
719 spa_t *spa = mc->mc_spa;
720 vdev_t *rvd = spa->spa_root_vdev;
721 uint64_t *mc_hist;
722 int i;
723
724 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
725 return;
726
727 mc_hist = kmem_zalloc(sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE,
728 KM_SLEEP);
729
730 mutex_enter(&mc->mc_lock);
731 for (int c = 0; c < rvd->vdev_children; c++) {
732 vdev_t *tvd = rvd->vdev_child[c];
733 metaslab_group_t *mg = vdev_get_mg(tvd, mc);
734
735 /*
736 * Skip any holes, uninitialized top-levels, or
737 * vdevs that are not in this metalab class.
738 */
739 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
740 mg->mg_class != mc) {
741 continue;
742 }
743
744 IMPLY(mg == mg->mg_vd->vdev_log_mg,
745 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
746
747 for (i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++)
748 mc_hist[i] += mg->mg_histogram[i];
749 }
750
751 for (i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) {
752 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
753 }
754
755 mutex_exit(&mc->mc_lock);
756 kmem_free(mc_hist, sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE);
757 }
758
759 /*
760 * Calculate the metaslab class's fragmentation metric. The metric
761 * is weighted based on the space contribution of each metaslab group.
762 * The return value will be a number between 0 and 100 (inclusive), or
763 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
764 * zfs_frag_table for more information about the metric.
765 */
766 uint64_t
metaslab_class_fragmentation(metaslab_class_t * mc)767 metaslab_class_fragmentation(metaslab_class_t *mc)
768 {
769 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
770 uint64_t fragmentation = 0;
771
772 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
773
774 for (int c = 0; c < rvd->vdev_children; c++) {
775 vdev_t *tvd = rvd->vdev_child[c];
776 metaslab_group_t *mg = tvd->vdev_mg;
777
778 /*
779 * Skip any holes, uninitialized top-levels,
780 * or vdevs that are not in this metalab class.
781 */
782 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
783 mg->mg_class != mc) {
784 continue;
785 }
786
787 /*
788 * If a metaslab group does not contain a fragmentation
789 * metric then just bail out.
790 */
791 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
792 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
793 return (ZFS_FRAG_INVALID);
794 }
795
796 /*
797 * Determine how much this metaslab_group is contributing
798 * to the overall pool fragmentation metric.
799 */
800 fragmentation += mg->mg_fragmentation *
801 metaslab_group_get_space(mg);
802 }
803 fragmentation /= metaslab_class_get_space(mc);
804
805 ASSERT3U(fragmentation, <=, 100);
806 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
807 return (fragmentation);
808 }
809
810 /*
811 * Calculate the amount of expandable space that is available in
812 * this metaslab class. If a device is expanded then its expandable
813 * space will be the amount of allocatable space that is currently not
814 * part of this metaslab class.
815 */
816 uint64_t
metaslab_class_expandable_space(metaslab_class_t * mc)817 metaslab_class_expandable_space(metaslab_class_t *mc)
818 {
819 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
820 uint64_t space = 0;
821
822 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
823 for (int c = 0; c < rvd->vdev_children; c++) {
824 vdev_t *tvd = rvd->vdev_child[c];
825 metaslab_group_t *mg = tvd->vdev_mg;
826
827 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
828 mg->mg_class != mc) {
829 continue;
830 }
831
832 /*
833 * Calculate if we have enough space to add additional
834 * metaslabs. We report the expandable space in terms
835 * of the metaslab size since that's the unit of expansion.
836 */
837 space += P2ALIGN_TYPED(tvd->vdev_max_asize - tvd->vdev_asize,
838 1ULL << tvd->vdev_ms_shift, uint64_t);
839 }
840 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
841 return (space);
842 }
843
844 void
metaslab_class_evict_old(metaslab_class_t * mc,uint64_t txg)845 metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg)
846 {
847 multilist_t *ml = &mc->mc_metaslab_txg_list;
848 uint64_t now = gethrestime_sec();
849 /* Round delay up to next second. */
850 uint_t delay = (metaslab_unload_delay_ms + 999) / 1000;
851 for (int i = 0; i < multilist_get_num_sublists(ml); i++) {
852 multilist_sublist_t *mls = multilist_sublist_lock_idx(ml, i);
853 metaslab_t *msp = multilist_sublist_head(mls);
854 multilist_sublist_unlock(mls);
855 while (msp != NULL) {
856 mutex_enter(&msp->ms_lock);
857
858 /*
859 * If the metaslab has been removed from the list
860 * (which could happen if we were at the memory limit
861 * and it was evicted during this loop), then we can't
862 * proceed and we should restart the sublist.
863 */
864 if (!multilist_link_active(&msp->ms_class_txg_node)) {
865 mutex_exit(&msp->ms_lock);
866 i--;
867 break;
868 }
869 mls = multilist_sublist_lock_idx(ml, i);
870 metaslab_t *next_msp = multilist_sublist_next(mls, msp);
871 multilist_sublist_unlock(mls);
872 if (txg >
873 msp->ms_selected_txg + metaslab_unload_delay &&
874 now > msp->ms_selected_time + delay &&
875 (msp->ms_allocator == -1 ||
876 !metaslab_preload_enabled)) {
877 metaslab_evict(msp, txg);
878 } else {
879 /*
880 * Once we've hit a metaslab selected too
881 * recently to evict, we're done evicting for
882 * now.
883 */
884 mutex_exit(&msp->ms_lock);
885 break;
886 }
887 mutex_exit(&msp->ms_lock);
888 msp = next_msp;
889 }
890 }
891 }
892
893 static int
metaslab_compare(const void * x1,const void * x2)894 metaslab_compare(const void *x1, const void *x2)
895 {
896 const metaslab_t *m1 = (const metaslab_t *)x1;
897 const metaslab_t *m2 = (const metaslab_t *)x2;
898
899 int sort1 = 0;
900 int sort2 = 0;
901 if (m1->ms_allocator != -1 && m1->ms_primary)
902 sort1 = 1;
903 else if (m1->ms_allocator != -1 && !m1->ms_primary)
904 sort1 = 2;
905 if (m2->ms_allocator != -1 && m2->ms_primary)
906 sort2 = 1;
907 else if (m2->ms_allocator != -1 && !m2->ms_primary)
908 sort2 = 2;
909
910 /*
911 * Sort inactive metaslabs first, then primaries, then secondaries. When
912 * selecting a metaslab to allocate from, an allocator first tries its
913 * primary, then secondary active metaslab. If it doesn't have active
914 * metaslabs, or can't allocate from them, it searches for an inactive
915 * metaslab to activate. If it can't find a suitable one, it will steal
916 * a primary or secondary metaslab from another allocator.
917 */
918 if (sort1 < sort2)
919 return (-1);
920 if (sort1 > sort2)
921 return (1);
922
923 int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight);
924 if (likely(cmp))
925 return (cmp);
926
927 IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
928
929 return (TREE_CMP(m1->ms_start, m2->ms_start));
930 }
931
932 /*
933 * ==========================================================================
934 * Metaslab groups
935 * ==========================================================================
936 */
937 /*
938 * Update the allocatable flag and the metaslab group's capacity.
939 * The allocatable flag is set to true if the capacity is below
940 * the zfs_mg_noalloc_threshold or has a fragmentation value that is
941 * greater than zfs_mg_fragmentation_threshold. If a metaslab group
942 * transitions from allocatable to non-allocatable or vice versa then the
943 * metaslab group's class is updated to reflect the transition.
944 */
945 static void
metaslab_group_alloc_update(metaslab_group_t * mg)946 metaslab_group_alloc_update(metaslab_group_t *mg)
947 {
948 vdev_t *vd = mg->mg_vd;
949 metaslab_class_t *mc = mg->mg_class;
950 vdev_stat_t *vs = &vd->vdev_stat;
951 boolean_t was_allocatable;
952 boolean_t was_initialized;
953
954 ASSERT(vd == vd->vdev_top);
955 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
956 SCL_ALLOC);
957
958 mutex_enter(&mg->mg_lock);
959 was_allocatable = mg->mg_allocatable;
960 was_initialized = mg->mg_initialized;
961
962 uint64_t free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
963 (vs->vs_space + 1);
964
965 mutex_enter(&mc->mc_lock);
966
967 /*
968 * If the metaslab group was just added then it won't
969 * have any space until we finish syncing out this txg.
970 * At that point we will consider it initialized and available
971 * for allocations. We also don't consider non-activated
972 * metaslab groups (e.g. vdevs that are in the middle of being removed)
973 * to be initialized, because they can't be used for allocation.
974 */
975 mg->mg_initialized = metaslab_group_initialized(mg);
976 if (!was_initialized && mg->mg_initialized) {
977 mc->mc_groups++;
978 } else if (was_initialized && !mg->mg_initialized) {
979 ASSERT3U(mc->mc_groups, >, 0);
980 mc->mc_groups--;
981 }
982 if (mg->mg_initialized)
983 mg->mg_no_free_space = B_FALSE;
984
985 /*
986 * A metaslab group is considered allocatable if it has plenty
987 * of free space or is not heavily fragmented. We only take
988 * fragmentation into account if the metaslab group has a valid
989 * fragmentation metric (i.e. a value between 0 and 100).
990 */
991 mg->mg_allocatable = (mg->mg_activation_count > 0 &&
992 free_capacity > zfs_mg_noalloc_threshold &&
993 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
994 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
995
996 /*
997 * The mc_alloc_groups maintains a count of the number of
998 * groups in this metaslab class that are still above the
999 * zfs_mg_noalloc_threshold. This is used by the allocating
1000 * threads to determine if they should avoid allocations to
1001 * a given group. The allocator will avoid allocations to a group
1002 * if that group has reached or is below the zfs_mg_noalloc_threshold
1003 * and there are still other groups that are above the threshold.
1004 * When a group transitions from allocatable to non-allocatable or
1005 * vice versa we update the metaslab class to reflect that change.
1006 * When the mc_alloc_groups value drops to 0 that means that all
1007 * groups have reached the zfs_mg_noalloc_threshold making all groups
1008 * eligible for allocations. This effectively means that all devices
1009 * are balanced again.
1010 */
1011 if (was_allocatable && !mg->mg_allocatable)
1012 mc->mc_alloc_groups--;
1013 else if (!was_allocatable && mg->mg_allocatable)
1014 mc->mc_alloc_groups++;
1015 mutex_exit(&mc->mc_lock);
1016
1017 mutex_exit(&mg->mg_lock);
1018 }
1019
1020 int
metaslab_sort_by_flushed(const void * va,const void * vb)1021 metaslab_sort_by_flushed(const void *va, const void *vb)
1022 {
1023 const metaslab_t *a = va;
1024 const metaslab_t *b = vb;
1025
1026 int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg);
1027 if (likely(cmp))
1028 return (cmp);
1029
1030 uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id;
1031 uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id;
1032 cmp = TREE_CMP(a_vdev_id, b_vdev_id);
1033 if (cmp)
1034 return (cmp);
1035
1036 return (TREE_CMP(a->ms_id, b->ms_id));
1037 }
1038
1039 metaslab_group_t *
metaslab_group_create(metaslab_class_t * mc,vdev_t * vd,int allocators)1040 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
1041 {
1042 metaslab_group_t *mg;
1043
1044 mg = kmem_zalloc(offsetof(metaslab_group_t,
1045 mg_allocator[allocators]), KM_SLEEP);
1046 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
1047 mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL);
1048 cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL);
1049 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
1050 sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node));
1051 mg->mg_vd = vd;
1052 mg->mg_class = mc;
1053 mg->mg_activation_count = 0;
1054 mg->mg_initialized = B_FALSE;
1055 mg->mg_no_free_space = B_TRUE;
1056 mg->mg_allocators = allocators;
1057
1058 for (int i = 0; i < allocators; i++) {
1059 metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
1060 zfs_refcount_create_tracked(&mga->mga_queue_depth);
1061 }
1062
1063 return (mg);
1064 }
1065
1066 void
metaslab_group_destroy(metaslab_group_t * mg)1067 metaslab_group_destroy(metaslab_group_t *mg)
1068 {
1069 ASSERT(mg->mg_prev == NULL);
1070 ASSERT(mg->mg_next == NULL);
1071 /*
1072 * We may have gone below zero with the activation count
1073 * either because we never activated in the first place or
1074 * because we're done, and possibly removing the vdev.
1075 */
1076 ASSERT(mg->mg_activation_count <= 0);
1077
1078 avl_destroy(&mg->mg_metaslab_tree);
1079 mutex_destroy(&mg->mg_lock);
1080 mutex_destroy(&mg->mg_ms_disabled_lock);
1081 cv_destroy(&mg->mg_ms_disabled_cv);
1082
1083 for (int i = 0; i < mg->mg_allocators; i++) {
1084 metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
1085 zfs_refcount_destroy(&mga->mga_queue_depth);
1086 }
1087 kmem_free(mg, offsetof(metaslab_group_t,
1088 mg_allocator[mg->mg_allocators]));
1089 }
1090
1091 void
metaslab_group_activate(metaslab_group_t * mg)1092 metaslab_group_activate(metaslab_group_t *mg)
1093 {
1094 metaslab_class_t *mc = mg->mg_class;
1095 spa_t *spa = mc->mc_spa;
1096 metaslab_group_t *mgprev, *mgnext;
1097
1098 ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0);
1099
1100 ASSERT(mg->mg_prev == NULL);
1101 ASSERT(mg->mg_next == NULL);
1102 ASSERT(mg->mg_activation_count <= 0);
1103
1104 if (++mg->mg_activation_count <= 0)
1105 return;
1106
1107 metaslab_group_alloc_update(mg);
1108
1109 if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) {
1110 mg->mg_prev = mg;
1111 mg->mg_next = mg;
1112 } else {
1113 mgnext = mgprev->mg_next;
1114 mg->mg_prev = mgprev;
1115 mg->mg_next = mgnext;
1116 mgprev->mg_next = mg;
1117 mgnext->mg_prev = mg;
1118 }
1119 for (int i = 0; i < spa->spa_alloc_count; i++) {
1120 mc->mc_allocator[i].mca_rotor = mg;
1121 mg = mg->mg_next;
1122 }
1123 metaslab_class_balance(mc, B_FALSE);
1124 }
1125
1126 /*
1127 * Passivate a metaslab group and remove it from the allocation rotor.
1128 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
1129 * a metaslab group. This function will momentarily drop spa_config_locks
1130 * that are lower than the SCL_ALLOC lock (see comment below).
1131 */
1132 void
metaslab_group_passivate(metaslab_group_t * mg)1133 metaslab_group_passivate(metaslab_group_t *mg)
1134 {
1135 metaslab_class_t *mc = mg->mg_class;
1136 spa_t *spa = mc->mc_spa;
1137 metaslab_group_t *mgprev, *mgnext;
1138 int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
1139
1140 ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
1141 (SCL_ALLOC | SCL_ZIO));
1142
1143 if (--mg->mg_activation_count != 0) {
1144 for (int i = 0; i < spa->spa_alloc_count; i++)
1145 ASSERT(mc->mc_allocator[i].mca_rotor != mg);
1146 ASSERT(mg->mg_prev == NULL);
1147 ASSERT(mg->mg_next == NULL);
1148 ASSERT(mg->mg_activation_count < 0);
1149 return;
1150 }
1151
1152 /*
1153 * The spa_config_lock is an array of rwlocks, ordered as
1154 * follows (from highest to lowest):
1155 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
1156 * SCL_ZIO > SCL_FREE > SCL_VDEV
1157 * (For more information about the spa_config_lock see spa_misc.c)
1158 * The higher the lock, the broader its coverage. When we passivate
1159 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
1160 * config locks. However, the metaslab group's taskq might be trying
1161 * to preload metaslabs so we must drop the SCL_ZIO lock and any
1162 * lower locks to allow the I/O to complete. At a minimum,
1163 * we continue to hold the SCL_ALLOC lock, which prevents any future
1164 * allocations from taking place and any changes to the vdev tree.
1165 */
1166 spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
1167 taskq_wait_outstanding(spa->spa_metaslab_taskq, 0);
1168 spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
1169 metaslab_group_alloc_update(mg);
1170 for (int i = 0; i < mg->mg_allocators; i++) {
1171 metaslab_group_allocator_t *mga = &mg->mg_allocator[i];
1172 metaslab_t *msp = mga->mga_primary;
1173 if (msp != NULL) {
1174 mutex_enter(&msp->ms_lock);
1175 metaslab_passivate(msp,
1176 metaslab_weight_from_range_tree(msp));
1177 mutex_exit(&msp->ms_lock);
1178 }
1179 msp = mga->mga_secondary;
1180 if (msp != NULL) {
1181 mutex_enter(&msp->ms_lock);
1182 metaslab_passivate(msp,
1183 metaslab_weight_from_range_tree(msp));
1184 mutex_exit(&msp->ms_lock);
1185 }
1186 }
1187
1188 mgprev = mg->mg_prev;
1189 mgnext = mg->mg_next;
1190
1191 if (mg == mgnext) {
1192 mgnext = NULL;
1193 } else {
1194 mgprev->mg_next = mgnext;
1195 mgnext->mg_prev = mgprev;
1196 }
1197 for (int i = 0; i < spa->spa_alloc_count; i++) {
1198 if (mc->mc_allocator[i].mca_rotor == mg)
1199 mc->mc_allocator[i].mca_rotor = mgnext;
1200 }
1201
1202 mg->mg_prev = NULL;
1203 mg->mg_next = NULL;
1204 metaslab_class_balance(mc, B_FALSE);
1205 }
1206
1207 boolean_t
metaslab_group_initialized(metaslab_group_t * mg)1208 metaslab_group_initialized(metaslab_group_t *mg)
1209 {
1210 vdev_t *vd = mg->mg_vd;
1211 vdev_stat_t *vs = &vd->vdev_stat;
1212
1213 return (vs->vs_space != 0 && mg->mg_activation_count > 0);
1214 }
1215
1216 uint64_t
metaslab_group_get_space(metaslab_group_t * mg)1217 metaslab_group_get_space(metaslab_group_t *mg)
1218 {
1219 /*
1220 * Note that the number of nodes in mg_metaslab_tree may be one less
1221 * than vdev_ms_count, due to the embedded log metaslab.
1222 */
1223 mutex_enter(&mg->mg_lock);
1224 uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree);
1225 mutex_exit(&mg->mg_lock);
1226 return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count);
1227 }
1228
1229 void
metaslab_group_histogram_verify(metaslab_group_t * mg)1230 metaslab_group_histogram_verify(metaslab_group_t *mg)
1231 {
1232 uint64_t *mg_hist;
1233 avl_tree_t *t = &mg->mg_metaslab_tree;
1234 uint64_t ashift = mg->mg_vd->vdev_ashift;
1235
1236 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
1237 return;
1238
1239 mg_hist = kmem_zalloc(sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE,
1240 KM_SLEEP);
1241
1242 ASSERT3U(ZFS_RANGE_TREE_HISTOGRAM_SIZE, >=,
1243 SPACE_MAP_HISTOGRAM_SIZE + ashift);
1244
1245 mutex_enter(&mg->mg_lock);
1246 for (metaslab_t *msp = avl_first(t);
1247 msp != NULL; msp = AVL_NEXT(t, msp)) {
1248 VERIFY3P(msp->ms_group, ==, mg);
1249 /* skip if not active */
1250 if (msp->ms_sm == NULL)
1251 continue;
1252
1253 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1254 mg_hist[i + ashift] +=
1255 msp->ms_sm->sm_phys->smp_histogram[i];
1256 }
1257 }
1258
1259 for (int i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i ++)
1260 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
1261
1262 mutex_exit(&mg->mg_lock);
1263
1264 kmem_free(mg_hist, sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE);
1265 }
1266
1267 static void
metaslab_group_histogram_add(metaslab_group_t * mg,metaslab_t * msp)1268 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
1269 {
1270 metaslab_class_t *mc = mg->mg_class;
1271 uint64_t ashift = mg->mg_vd->vdev_ashift;
1272
1273 ASSERT(MUTEX_HELD(&msp->ms_lock));
1274 if (msp->ms_sm == NULL)
1275 return;
1276
1277 mutex_enter(&mg->mg_lock);
1278 mutex_enter(&mc->mc_lock);
1279 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1280 IMPLY(mg == mg->mg_vd->vdev_log_mg,
1281 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
1282 mg->mg_histogram[i + ashift] +=
1283 msp->ms_sm->sm_phys->smp_histogram[i];
1284 mc->mc_histogram[i + ashift] +=
1285 msp->ms_sm->sm_phys->smp_histogram[i];
1286 }
1287 mutex_exit(&mc->mc_lock);
1288 mutex_exit(&mg->mg_lock);
1289 }
1290
1291 void
metaslab_group_histogram_remove(metaslab_group_t * mg,metaslab_t * msp)1292 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
1293 {
1294 metaslab_class_t *mc = mg->mg_class;
1295 uint64_t ashift = mg->mg_vd->vdev_ashift;
1296
1297 ASSERT(MUTEX_HELD(&msp->ms_lock));
1298 if (msp->ms_sm == NULL)
1299 return;
1300
1301 mutex_enter(&mg->mg_lock);
1302 mutex_enter(&mc->mc_lock);
1303 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1304 ASSERT3U(mg->mg_histogram[i + ashift], >=,
1305 msp->ms_sm->sm_phys->smp_histogram[i]);
1306 ASSERT3U(mc->mc_histogram[i + ashift], >=,
1307 msp->ms_sm->sm_phys->smp_histogram[i]);
1308 IMPLY(mg == mg->mg_vd->vdev_log_mg,
1309 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa));
1310
1311 mg->mg_histogram[i + ashift] -=
1312 msp->ms_sm->sm_phys->smp_histogram[i];
1313 mc->mc_histogram[i + ashift] -=
1314 msp->ms_sm->sm_phys->smp_histogram[i];
1315 }
1316 mutex_exit(&mc->mc_lock);
1317 mutex_exit(&mg->mg_lock);
1318 }
1319
1320 static void
metaslab_group_add(metaslab_group_t * mg,metaslab_t * msp)1321 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
1322 {
1323 ASSERT(msp->ms_group == NULL);
1324 mutex_enter(&mg->mg_lock);
1325 msp->ms_group = mg;
1326 msp->ms_weight = 0;
1327 avl_add(&mg->mg_metaslab_tree, msp);
1328 mutex_exit(&mg->mg_lock);
1329
1330 mutex_enter(&msp->ms_lock);
1331 metaslab_group_histogram_add(mg, msp);
1332 mutex_exit(&msp->ms_lock);
1333 }
1334
1335 static void
metaslab_group_remove(metaslab_group_t * mg,metaslab_t * msp)1336 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
1337 {
1338 mutex_enter(&msp->ms_lock);
1339 metaslab_group_histogram_remove(mg, msp);
1340 mutex_exit(&msp->ms_lock);
1341
1342 mutex_enter(&mg->mg_lock);
1343 ASSERT(msp->ms_group == mg);
1344 avl_remove(&mg->mg_metaslab_tree, msp);
1345
1346 metaslab_class_t *mc = msp->ms_group->mg_class;
1347 multilist_sublist_t *mls =
1348 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
1349 if (multilist_link_active(&msp->ms_class_txg_node))
1350 multilist_sublist_remove(mls, msp);
1351 multilist_sublist_unlock(mls);
1352
1353 msp->ms_group = NULL;
1354 mutex_exit(&mg->mg_lock);
1355 }
1356
1357 static void
metaslab_group_sort_impl(metaslab_group_t * mg,metaslab_t * msp,uint64_t weight)1358 metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1359 {
1360 ASSERT(MUTEX_HELD(&msp->ms_lock));
1361 ASSERT(MUTEX_HELD(&mg->mg_lock));
1362 ASSERT(msp->ms_group == mg);
1363
1364 avl_remove(&mg->mg_metaslab_tree, msp);
1365 msp->ms_weight = weight;
1366 avl_add(&mg->mg_metaslab_tree, msp);
1367
1368 }
1369
1370 static void
metaslab_group_sort(metaslab_group_t * mg,metaslab_t * msp,uint64_t weight)1371 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
1372 {
1373 /*
1374 * Although in principle the weight can be any value, in
1375 * practice we do not use values in the range [1, 511].
1376 */
1377 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
1378 ASSERT(MUTEX_HELD(&msp->ms_lock));
1379
1380 mutex_enter(&mg->mg_lock);
1381 metaslab_group_sort_impl(mg, msp, weight);
1382 mutex_exit(&mg->mg_lock);
1383 }
1384
1385 /*
1386 * Calculate the fragmentation for a given metaslab group. Weight metaslabs
1387 * on the amount of free space. The return value will be between 0 and 100
1388 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
1389 * group have a fragmentation metric.
1390 */
1391 uint64_t
metaslab_group_fragmentation(metaslab_group_t * mg)1392 metaslab_group_fragmentation(metaslab_group_t *mg)
1393 {
1394 vdev_t *vd = mg->mg_vd;
1395 uint64_t fragmentation = 0;
1396 uint64_t valid_ms = 0, total_ms = 0;
1397 uint64_t free, total_free = 0;
1398
1399 for (int m = 0; m < vd->vdev_ms_count; m++) {
1400 metaslab_t *msp = vd->vdev_ms[m];
1401
1402 if (msp->ms_group != mg)
1403 continue;
1404 total_ms++;
1405 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
1406 continue;
1407
1408 valid_ms++;
1409 free = (msp->ms_size - metaslab_allocated_space(msp)) /
1410 SPA_MINBLOCKSIZE; /* To prevent overflows. */
1411 total_free += free;
1412 fragmentation += msp->ms_fragmentation * free;
1413 }
1414
1415 if (valid_ms < (total_ms + 1) / 2 || total_free == 0)
1416 return (ZFS_FRAG_INVALID);
1417
1418 fragmentation /= total_free;
1419 ASSERT3U(fragmentation, <=, 100);
1420 return (fragmentation);
1421 }
1422
1423 /*
1424 * ==========================================================================
1425 * Range tree callbacks
1426 * ==========================================================================
1427 */
1428
1429 /*
1430 * Comparison function for the private size-ordered tree using 32-bit
1431 * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1432 */
1433 __attribute__((always_inline)) inline
1434 static int
metaslab_rangesize32_compare(const void * x1,const void * x2)1435 metaslab_rangesize32_compare(const void *x1, const void *x2)
1436 {
1437 const zfs_range_seg32_t *r1 = x1;
1438 const zfs_range_seg32_t *r2 = x2;
1439
1440 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1441 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1442
1443 int cmp = TREE_CMP(rs_size1, rs_size2);
1444
1445 return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
1446 }
1447
1448 /*
1449 * Comparison function for the private size-ordered tree using 64-bit
1450 * ranges. Tree is sorted by size, larger sizes at the end of the tree.
1451 */
1452 __attribute__((always_inline)) inline
1453 static int
metaslab_rangesize64_compare(const void * x1,const void * x2)1454 metaslab_rangesize64_compare(const void *x1, const void *x2)
1455 {
1456 const zfs_range_seg64_t *r1 = x1;
1457 const zfs_range_seg64_t *r2 = x2;
1458
1459 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
1460 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
1461
1462 int cmp = TREE_CMP(rs_size1, rs_size2);
1463
1464 return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start));
1465 }
1466
1467 typedef struct metaslab_rt_arg {
1468 zfs_btree_t *mra_bt;
1469 uint32_t mra_floor_shift;
1470 } metaslab_rt_arg_t;
1471
1472 struct mssa_arg {
1473 zfs_range_tree_t *rt;
1474 metaslab_rt_arg_t *mra;
1475 };
1476
1477 static void
metaslab_size_sorted_add(void * arg,uint64_t start,uint64_t size)1478 metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size)
1479 {
1480 struct mssa_arg *mssap = arg;
1481 zfs_range_tree_t *rt = mssap->rt;
1482 metaslab_rt_arg_t *mrap = mssap->mra;
1483 zfs_range_seg_max_t seg = {0};
1484 zfs_rs_set_start(&seg, rt, start);
1485 zfs_rs_set_end(&seg, rt, start + size);
1486 metaslab_rt_add(rt, &seg, mrap);
1487 }
1488
1489 static void
metaslab_size_tree_full_load(zfs_range_tree_t * rt)1490 metaslab_size_tree_full_load(zfs_range_tree_t *rt)
1491 {
1492 metaslab_rt_arg_t *mrap = rt->rt_arg;
1493 METASLABSTAT_BUMP(metaslabstat_reload_tree);
1494 ASSERT0(zfs_btree_numnodes(mrap->mra_bt));
1495 mrap->mra_floor_shift = 0;
1496 struct mssa_arg arg = {0};
1497 arg.rt = rt;
1498 arg.mra = mrap;
1499 zfs_range_tree_walk(rt, metaslab_size_sorted_add, &arg);
1500 }
1501
1502
ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize32_in_buf,zfs_range_seg32_t,metaslab_rangesize32_compare)1503 ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize32_in_buf,
1504 zfs_range_seg32_t, metaslab_rangesize32_compare)
1505
1506 ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize64_in_buf,
1507 zfs_range_seg64_t, metaslab_rangesize64_compare)
1508
1509 /*
1510 * Create any block allocator specific components. The current allocators
1511 * rely on using both a size-ordered zfs_range_tree_t and an array of
1512 * uint64_t's.
1513 */
1514 static void
1515 metaslab_rt_create(zfs_range_tree_t *rt, void *arg)
1516 {
1517 metaslab_rt_arg_t *mrap = arg;
1518 zfs_btree_t *size_tree = mrap->mra_bt;
1519
1520 size_t size;
1521 int (*compare) (const void *, const void *);
1522 bt_find_in_buf_f bt_find;
1523 switch (rt->rt_type) {
1524 case ZFS_RANGE_SEG32:
1525 size = sizeof (zfs_range_seg32_t);
1526 compare = metaslab_rangesize32_compare;
1527 bt_find = metaslab_rt_find_rangesize32_in_buf;
1528 break;
1529 case ZFS_RANGE_SEG64:
1530 size = sizeof (zfs_range_seg64_t);
1531 compare = metaslab_rangesize64_compare;
1532 bt_find = metaslab_rt_find_rangesize64_in_buf;
1533 break;
1534 default:
1535 panic("Invalid range seg type %d", rt->rt_type);
1536 }
1537 zfs_btree_create(size_tree, compare, bt_find, size);
1538 mrap->mra_floor_shift = metaslab_by_size_min_shift;
1539 }
1540
1541 static void
metaslab_rt_destroy(zfs_range_tree_t * rt,void * arg)1542 metaslab_rt_destroy(zfs_range_tree_t *rt, void *arg)
1543 {
1544 (void) rt;
1545 metaslab_rt_arg_t *mrap = arg;
1546 zfs_btree_t *size_tree = mrap->mra_bt;
1547
1548 zfs_btree_destroy(size_tree);
1549 kmem_free(mrap, sizeof (*mrap));
1550 }
1551
1552 static void
metaslab_rt_add(zfs_range_tree_t * rt,zfs_range_seg_t * rs,void * arg)1553 metaslab_rt_add(zfs_range_tree_t *rt, zfs_range_seg_t *rs, void *arg)
1554 {
1555 metaslab_rt_arg_t *mrap = arg;
1556 zfs_btree_t *size_tree = mrap->mra_bt;
1557
1558 if (zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt) <
1559 (1ULL << mrap->mra_floor_shift))
1560 return;
1561
1562 zfs_btree_add(size_tree, rs);
1563 }
1564
1565 static void
metaslab_rt_remove(zfs_range_tree_t * rt,zfs_range_seg_t * rs,void * arg)1566 metaslab_rt_remove(zfs_range_tree_t *rt, zfs_range_seg_t *rs, void *arg)
1567 {
1568 metaslab_rt_arg_t *mrap = arg;
1569 zfs_btree_t *size_tree = mrap->mra_bt;
1570
1571 if (zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt) < (1ULL <<
1572 mrap->mra_floor_shift))
1573 return;
1574
1575 zfs_btree_remove(size_tree, rs);
1576 }
1577
1578 static void
metaslab_rt_vacate(zfs_range_tree_t * rt,void * arg)1579 metaslab_rt_vacate(zfs_range_tree_t *rt, void *arg)
1580 {
1581 metaslab_rt_arg_t *mrap = arg;
1582 zfs_btree_t *size_tree = mrap->mra_bt;
1583 zfs_btree_clear(size_tree);
1584 zfs_btree_destroy(size_tree);
1585
1586 metaslab_rt_create(rt, arg);
1587 }
1588
1589 static const zfs_range_tree_ops_t metaslab_rt_ops = {
1590 .rtop_create = metaslab_rt_create,
1591 .rtop_destroy = metaslab_rt_destroy,
1592 .rtop_add = metaslab_rt_add,
1593 .rtop_remove = metaslab_rt_remove,
1594 .rtop_vacate = metaslab_rt_vacate
1595 };
1596
1597 /*
1598 * ==========================================================================
1599 * Common allocator routines
1600 * ==========================================================================
1601 */
1602
1603 /*
1604 * Return the maximum contiguous segment within the metaslab.
1605 */
1606 uint64_t
metaslab_largest_allocatable(metaslab_t * msp)1607 metaslab_largest_allocatable(metaslab_t *msp)
1608 {
1609 zfs_btree_t *t = &msp->ms_allocatable_by_size;
1610 zfs_range_seg_t *rs;
1611
1612 if (t == NULL)
1613 return (0);
1614 if (zfs_btree_numnodes(t) == 0)
1615 metaslab_size_tree_full_load(msp->ms_allocatable);
1616
1617 rs = zfs_btree_last(t, NULL);
1618 if (rs == NULL)
1619 return (0);
1620
1621 return (zfs_rs_get_end(rs, msp->ms_allocatable) - zfs_rs_get_start(rs,
1622 msp->ms_allocatable));
1623 }
1624
1625 /*
1626 * Return the maximum contiguous segment within the unflushed frees of this
1627 * metaslab.
1628 */
1629 static uint64_t
metaslab_largest_unflushed_free(metaslab_t * msp)1630 metaslab_largest_unflushed_free(metaslab_t *msp)
1631 {
1632 ASSERT(MUTEX_HELD(&msp->ms_lock));
1633
1634 if (msp->ms_unflushed_frees == NULL)
1635 return (0);
1636
1637 if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0)
1638 metaslab_size_tree_full_load(msp->ms_unflushed_frees);
1639 zfs_range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size,
1640 NULL);
1641 if (rs == NULL)
1642 return (0);
1643
1644 /*
1645 * When a range is freed from the metaslab, that range is added to
1646 * both the unflushed frees and the deferred frees. While the block
1647 * will eventually be usable, if the metaslab were loaded the range
1648 * would not be added to the ms_allocatable tree until TXG_DEFER_SIZE
1649 * txgs had passed. As a result, when attempting to estimate an upper
1650 * bound for the largest currently-usable free segment in the
1651 * metaslab, we need to not consider any ranges currently in the defer
1652 * trees. This algorithm approximates the largest available chunk in
1653 * the largest range in the unflushed_frees tree by taking the first
1654 * chunk. While this may be a poor estimate, it should only remain so
1655 * briefly and should eventually self-correct as frees are no longer
1656 * deferred. Similar logic applies to the ms_freed tree. See
1657 * metaslab_load() for more details.
1658 *
1659 * There are two primary sources of inaccuracy in this estimate. Both
1660 * are tolerated for performance reasons. The first source is that we
1661 * only check the largest segment for overlaps. Smaller segments may
1662 * have more favorable overlaps with the other trees, resulting in
1663 * larger usable chunks. Second, we only look at the first chunk in
1664 * the largest segment; there may be other usable chunks in the
1665 * largest segment, but we ignore them.
1666 */
1667 uint64_t rstart = zfs_rs_get_start(rs, msp->ms_unflushed_frees);
1668 uint64_t rsize = zfs_rs_get_end(rs, msp->ms_unflushed_frees) - rstart;
1669 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1670 uint64_t start = 0;
1671 uint64_t size = 0;
1672 boolean_t found = zfs_range_tree_find_in(msp->ms_defer[t],
1673 rstart, rsize, &start, &size);
1674 if (found) {
1675 if (rstart == start)
1676 return (0);
1677 rsize = start - rstart;
1678 }
1679 }
1680
1681 uint64_t start = 0;
1682 uint64_t size = 0;
1683 boolean_t found = zfs_range_tree_find_in(msp->ms_freed, rstart,
1684 rsize, &start, &size);
1685 if (found)
1686 rsize = start - rstart;
1687
1688 return (rsize);
1689 }
1690
1691 static zfs_range_seg_t *
metaslab_block_find(zfs_btree_t * t,zfs_range_tree_t * rt,uint64_t start,uint64_t size,zfs_btree_index_t * where)1692 metaslab_block_find(zfs_btree_t *t, zfs_range_tree_t *rt, uint64_t start,
1693 uint64_t size, zfs_btree_index_t *where)
1694 {
1695 zfs_range_seg_t *rs;
1696 zfs_range_seg_max_t rsearch;
1697
1698 zfs_rs_set_start(&rsearch, rt, start);
1699 zfs_rs_set_end(&rsearch, rt, start + size);
1700
1701 rs = zfs_btree_find(t, &rsearch, where);
1702 if (rs == NULL) {
1703 rs = zfs_btree_next(t, where, where);
1704 }
1705
1706 return (rs);
1707 }
1708
1709 /*
1710 * This is a helper function that can be used by the allocator to find a
1711 * suitable block to allocate. This will search the specified B-tree looking
1712 * for a block that matches the specified criteria.
1713 */
1714 static uint64_t
metaslab_block_picker(zfs_range_tree_t * rt,uint64_t * cursor,uint64_t size,uint64_t max_search)1715 metaslab_block_picker(zfs_range_tree_t *rt, uint64_t *cursor, uint64_t size,
1716 uint64_t max_search)
1717 {
1718 if (*cursor == 0)
1719 *cursor = rt->rt_start;
1720 zfs_btree_t *bt = &rt->rt_root;
1721 zfs_btree_index_t where;
1722 zfs_range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size,
1723 &where);
1724 uint64_t first_found;
1725 int count_searched = 0;
1726
1727 if (rs != NULL)
1728 first_found = zfs_rs_get_start(rs, rt);
1729
1730 while (rs != NULL && (zfs_rs_get_start(rs, rt) - first_found <=
1731 max_search || count_searched < metaslab_min_search_count)) {
1732 uint64_t offset = zfs_rs_get_start(rs, rt);
1733 if (offset + size <= zfs_rs_get_end(rs, rt)) {
1734 *cursor = offset + size;
1735 return (offset);
1736 }
1737 rs = zfs_btree_next(bt, &where, &where);
1738 count_searched++;
1739 }
1740
1741 *cursor = 0;
1742 return (-1ULL);
1743 }
1744
1745 static uint64_t metaslab_df_alloc(metaslab_t *msp, uint64_t size);
1746 static uint64_t metaslab_cf_alloc(metaslab_t *msp, uint64_t size);
1747 static uint64_t metaslab_ndf_alloc(metaslab_t *msp, uint64_t size);
1748 metaslab_ops_t *metaslab_allocator(spa_t *spa);
1749
1750 static metaslab_ops_t metaslab_allocators[] = {
1751 { "dynamic", metaslab_df_alloc },
1752 { "cursor", metaslab_cf_alloc },
1753 { "new-dynamic", metaslab_ndf_alloc },
1754 };
1755
1756 static int
spa_find_allocator_byname(const char * val)1757 spa_find_allocator_byname(const char *val)
1758 {
1759 int a = ARRAY_SIZE(metaslab_allocators) - 1;
1760 if (strcmp("new-dynamic", val) == 0)
1761 return (-1); /* remove when ndf is working */
1762 for (; a >= 0; a--) {
1763 if (strcmp(val, metaslab_allocators[a].msop_name) == 0)
1764 return (a);
1765 }
1766 return (-1);
1767 }
1768
1769 void
spa_set_allocator(spa_t * spa,const char * allocator)1770 spa_set_allocator(spa_t *spa, const char *allocator)
1771 {
1772 int a = spa_find_allocator_byname(allocator);
1773 if (a < 0) a = 0;
1774 spa->spa_active_allocator = a;
1775 zfs_dbgmsg("spa allocator: %s", metaslab_allocators[a].msop_name);
1776 }
1777
1778 int
spa_get_allocator(spa_t * spa)1779 spa_get_allocator(spa_t *spa)
1780 {
1781 return (spa->spa_active_allocator);
1782 }
1783
1784 #if defined(_KERNEL)
1785 int
param_set_active_allocator_common(const char * val)1786 param_set_active_allocator_common(const char *val)
1787 {
1788 char *p;
1789
1790 if (val == NULL)
1791 return (SET_ERROR(EINVAL));
1792
1793 if ((p = strchr(val, '\n')) != NULL)
1794 *p = '\0';
1795
1796 int a = spa_find_allocator_byname(val);
1797 if (a < 0)
1798 return (SET_ERROR(EINVAL));
1799
1800 zfs_active_allocator = metaslab_allocators[a].msop_name;
1801 return (0);
1802 }
1803 #endif
1804
1805 metaslab_ops_t *
metaslab_allocator(spa_t * spa)1806 metaslab_allocator(spa_t *spa)
1807 {
1808 int allocator = spa_get_allocator(spa);
1809 return (&metaslab_allocators[allocator]);
1810 }
1811
1812 /*
1813 * ==========================================================================
1814 * Dynamic Fit (df) block allocator
1815 *
1816 * Search for a free chunk of at least this size, starting from the last
1817 * offset (for this alignment of block) looking for up to
1818 * metaslab_df_max_search bytes (16MB). If a large enough free chunk is not
1819 * found within 16MB, then return a free chunk of exactly the requested size (or
1820 * larger).
1821 *
1822 * If it seems like searching from the last offset will be unproductive, skip
1823 * that and just return a free chunk of exactly the requested size (or larger).
1824 * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This
1825 * mechanism is probably not very useful and may be removed in the future.
1826 *
1827 * The behavior when not searching can be changed to return the largest free
1828 * chunk, instead of a free chunk of exactly the requested size, by setting
1829 * metaslab_df_use_largest_segment.
1830 * ==========================================================================
1831 */
1832 static uint64_t
metaslab_df_alloc(metaslab_t * msp,uint64_t size)1833 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1834 {
1835 /*
1836 * Find the largest power of 2 block size that evenly divides the
1837 * requested size. This is used to try to allocate blocks with similar
1838 * alignment from the same area of the metaslab (i.e. same cursor
1839 * bucket) but it does not guarantee that other allocations sizes
1840 * may exist in the same region.
1841 */
1842 uint64_t align = size & -size;
1843 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1844 zfs_range_tree_t *rt = msp->ms_allocatable;
1845 uint_t free_pct = zfs_range_tree_space(rt) * 100 / msp->ms_size;
1846 uint64_t offset;
1847
1848 ASSERT(MUTEX_HELD(&msp->ms_lock));
1849
1850 /*
1851 * If we're running low on space, find a segment based on size,
1852 * rather than iterating based on offset.
1853 */
1854 if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold ||
1855 free_pct < metaslab_df_free_pct) {
1856 offset = -1;
1857 } else {
1858 offset = metaslab_block_picker(rt,
1859 cursor, size, metaslab_df_max_search);
1860 }
1861
1862 if (offset == -1) {
1863 zfs_range_seg_t *rs;
1864 if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0)
1865 metaslab_size_tree_full_load(msp->ms_allocatable);
1866
1867 if (metaslab_df_use_largest_segment) {
1868 /* use largest free segment */
1869 rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL);
1870 } else {
1871 zfs_btree_index_t where;
1872 /* use segment of this size, or next largest */
1873 rs = metaslab_block_find(&msp->ms_allocatable_by_size,
1874 rt, msp->ms_start, size, &where);
1875 }
1876 if (rs != NULL && zfs_rs_get_start(rs, rt) + size <=
1877 zfs_rs_get_end(rs, rt)) {
1878 offset = zfs_rs_get_start(rs, rt);
1879 *cursor = offset + size;
1880 }
1881 }
1882
1883 return (offset);
1884 }
1885
1886 /*
1887 * ==========================================================================
1888 * Cursor fit block allocator -
1889 * Select the largest region in the metaslab, set the cursor to the beginning
1890 * of the range and the cursor_end to the end of the range. As allocations
1891 * are made advance the cursor. Continue allocating from the cursor until
1892 * the range is exhausted and then find a new range.
1893 * ==========================================================================
1894 */
1895 static uint64_t
metaslab_cf_alloc(metaslab_t * msp,uint64_t size)1896 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1897 {
1898 zfs_range_tree_t *rt = msp->ms_allocatable;
1899 zfs_btree_t *t = &msp->ms_allocatable_by_size;
1900 uint64_t *cursor = &msp->ms_lbas[0];
1901 uint64_t *cursor_end = &msp->ms_lbas[1];
1902 uint64_t offset = 0;
1903
1904 ASSERT(MUTEX_HELD(&msp->ms_lock));
1905
1906 ASSERT3U(*cursor_end, >=, *cursor);
1907
1908 if ((*cursor + size) > *cursor_end) {
1909 zfs_range_seg_t *rs;
1910
1911 if (zfs_btree_numnodes(t) == 0)
1912 metaslab_size_tree_full_load(msp->ms_allocatable);
1913 rs = zfs_btree_last(t, NULL);
1914 if (rs == NULL || (zfs_rs_get_end(rs, rt) -
1915 zfs_rs_get_start(rs, rt)) < size)
1916 return (-1ULL);
1917
1918 *cursor = zfs_rs_get_start(rs, rt);
1919 *cursor_end = zfs_rs_get_end(rs, rt);
1920 }
1921
1922 offset = *cursor;
1923 *cursor += size;
1924
1925 return (offset);
1926 }
1927
1928 /*
1929 * ==========================================================================
1930 * New dynamic fit allocator -
1931 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1932 * contiguous blocks. If no region is found then just use the largest segment
1933 * that remains.
1934 * ==========================================================================
1935 */
1936
1937 /*
1938 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1939 * to request from the allocator.
1940 */
1941 uint64_t metaslab_ndf_clump_shift = 4;
1942
1943 static uint64_t
metaslab_ndf_alloc(metaslab_t * msp,uint64_t size)1944 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1945 {
1946 zfs_btree_t *t = &msp->ms_allocatable->rt_root;
1947 zfs_range_tree_t *rt = msp->ms_allocatable;
1948 zfs_btree_index_t where;
1949 zfs_range_seg_t *rs;
1950 zfs_range_seg_max_t rsearch;
1951 uint64_t hbit = highbit64(size);
1952 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1953 uint64_t max_size = metaslab_largest_allocatable(msp);
1954
1955 ASSERT(MUTEX_HELD(&msp->ms_lock));
1956
1957 if (max_size < size)
1958 return (-1ULL);
1959
1960 zfs_rs_set_start(&rsearch, rt, *cursor);
1961 zfs_rs_set_end(&rsearch, rt, *cursor + size);
1962
1963 rs = zfs_btree_find(t, &rsearch, &where);
1964 if (rs == NULL || (zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt)) <
1965 size) {
1966 t = &msp->ms_allocatable_by_size;
1967
1968 zfs_rs_set_start(&rsearch, rt, 0);
1969 zfs_rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit +
1970 metaslab_ndf_clump_shift)));
1971
1972 rs = zfs_btree_find(t, &rsearch, &where);
1973 if (rs == NULL)
1974 rs = zfs_btree_next(t, &where, &where);
1975 ASSERT(rs != NULL);
1976 }
1977
1978 if ((zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt)) >= size) {
1979 *cursor = zfs_rs_get_start(rs, rt) + size;
1980 return (zfs_rs_get_start(rs, rt));
1981 }
1982 return (-1ULL);
1983 }
1984
1985 /*
1986 * ==========================================================================
1987 * Metaslabs
1988 * ==========================================================================
1989 */
1990
1991 /*
1992 * Wait for any in-progress metaslab loads to complete.
1993 */
1994 static void
metaslab_load_wait(metaslab_t * msp)1995 metaslab_load_wait(metaslab_t *msp)
1996 {
1997 ASSERT(MUTEX_HELD(&msp->ms_lock));
1998
1999 while (msp->ms_loading) {
2000 ASSERT(!msp->ms_loaded);
2001 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
2002 }
2003 }
2004
2005 /*
2006 * Wait for any in-progress flushing to complete.
2007 */
2008 static void
metaslab_flush_wait(metaslab_t * msp)2009 metaslab_flush_wait(metaslab_t *msp)
2010 {
2011 ASSERT(MUTEX_HELD(&msp->ms_lock));
2012
2013 while (msp->ms_flushing)
2014 cv_wait(&msp->ms_flush_cv, &msp->ms_lock);
2015 }
2016
2017 static unsigned int
metaslab_idx_func(multilist_t * ml,void * arg)2018 metaslab_idx_func(multilist_t *ml, void *arg)
2019 {
2020 metaslab_t *msp = arg;
2021
2022 /*
2023 * ms_id values are allocated sequentially, so full 64bit
2024 * division would be a waste of time, so limit it to 32 bits.
2025 */
2026 return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml));
2027 }
2028
2029 uint64_t
metaslab_allocated_space(metaslab_t * msp)2030 metaslab_allocated_space(metaslab_t *msp)
2031 {
2032 return (msp->ms_allocated_space);
2033 }
2034
2035 /*
2036 * Verify that the space accounting on disk matches the in-core range_trees.
2037 */
2038 static void
metaslab_verify_space(metaslab_t * msp,uint64_t txg)2039 metaslab_verify_space(metaslab_t *msp, uint64_t txg)
2040 {
2041 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2042 uint64_t allocating = 0;
2043 uint64_t sm_free_space, msp_free_space;
2044
2045 ASSERT(MUTEX_HELD(&msp->ms_lock));
2046 ASSERT(!msp->ms_condensing);
2047
2048 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
2049 return;
2050
2051 /*
2052 * We can only verify the metaslab space when we're called
2053 * from syncing context with a loaded metaslab that has an
2054 * allocated space map. Calling this in non-syncing context
2055 * does not provide a consistent view of the metaslab since
2056 * we're performing allocations in the future.
2057 */
2058 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
2059 !msp->ms_loaded)
2060 return;
2061
2062 /*
2063 * Even though the smp_alloc field can get negative,
2064 * when it comes to a metaslab's space map, that should
2065 * never be the case.
2066 */
2067 ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0);
2068
2069 ASSERT3U(space_map_allocated(msp->ms_sm), >=,
2070 zfs_range_tree_space(msp->ms_unflushed_frees));
2071
2072 ASSERT3U(metaslab_allocated_space(msp), ==,
2073 space_map_allocated(msp->ms_sm) +
2074 zfs_range_tree_space(msp->ms_unflushed_allocs) -
2075 zfs_range_tree_space(msp->ms_unflushed_frees));
2076
2077 sm_free_space = msp->ms_size - metaslab_allocated_space(msp);
2078
2079 /*
2080 * Account for future allocations since we would have
2081 * already deducted that space from the ms_allocatable.
2082 */
2083 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
2084 allocating +=
2085 zfs_range_tree_space(msp->ms_allocating[(txg + t) &
2086 TXG_MASK]);
2087 }
2088 ASSERT3U(allocating + msp->ms_allocated_this_txg, ==,
2089 msp->ms_allocating_total);
2090
2091 ASSERT3U(msp->ms_deferspace, ==,
2092 zfs_range_tree_space(msp->ms_defer[0]) +
2093 zfs_range_tree_space(msp->ms_defer[1]));
2094
2095 msp_free_space = zfs_range_tree_space(msp->ms_allocatable) +
2096 allocating + msp->ms_deferspace +
2097 zfs_range_tree_space(msp->ms_freed);
2098
2099 VERIFY3U(sm_free_space, ==, msp_free_space);
2100 }
2101
2102 static void
metaslab_aux_histograms_clear(metaslab_t * msp)2103 metaslab_aux_histograms_clear(metaslab_t *msp)
2104 {
2105 /*
2106 * Auxiliary histograms are only cleared when resetting them,
2107 * which can only happen while the metaslab is loaded.
2108 */
2109 ASSERT(msp->ms_loaded);
2110
2111 memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
2112 for (int t = 0; t < TXG_DEFER_SIZE; t++)
2113 memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t]));
2114 }
2115
2116 static void
metaslab_aux_histogram_add(uint64_t * histogram,uint64_t shift,zfs_range_tree_t * rt)2117 metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift,
2118 zfs_range_tree_t *rt)
2119 {
2120 /*
2121 * This is modeled after space_map_histogram_add(), so refer to that
2122 * function for implementation details. We want this to work like
2123 * the space map histogram, and not the range tree histogram, as we
2124 * are essentially constructing a delta that will be later subtracted
2125 * from the space map histogram.
2126 */
2127 int idx = 0;
2128 for (int i = shift; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) {
2129 ASSERT3U(i, >=, idx + shift);
2130 histogram[idx] += rt->rt_histogram[i] << (i - idx - shift);
2131
2132 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
2133 ASSERT3U(idx + shift, ==, i);
2134 idx++;
2135 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
2136 }
2137 }
2138 }
2139
2140 /*
2141 * Called at every sync pass that the metaslab gets synced.
2142 *
2143 * The reason is that we want our auxiliary histograms to be updated
2144 * wherever the metaslab's space map histogram is updated. This way
2145 * we stay consistent on which parts of the metaslab space map's
2146 * histogram are currently not available for allocations (e.g because
2147 * they are in the defer, freed, and freeing trees).
2148 */
2149 static void
metaslab_aux_histograms_update(metaslab_t * msp)2150 metaslab_aux_histograms_update(metaslab_t *msp)
2151 {
2152 space_map_t *sm = msp->ms_sm;
2153 ASSERT(sm != NULL);
2154
2155 /*
2156 * This is similar to the metaslab's space map histogram updates
2157 * that take place in metaslab_sync(). The only difference is that
2158 * we only care about segments that haven't made it into the
2159 * ms_allocatable tree yet.
2160 */
2161 if (msp->ms_loaded) {
2162 metaslab_aux_histograms_clear(msp);
2163
2164 metaslab_aux_histogram_add(msp->ms_synchist,
2165 sm->sm_shift, msp->ms_freed);
2166
2167 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2168 metaslab_aux_histogram_add(msp->ms_deferhist[t],
2169 sm->sm_shift, msp->ms_defer[t]);
2170 }
2171 }
2172
2173 metaslab_aux_histogram_add(msp->ms_synchist,
2174 sm->sm_shift, msp->ms_freeing);
2175 }
2176
2177 /*
2178 * Called every time we are done syncing (writing to) the metaslab,
2179 * i.e. at the end of each sync pass.
2180 * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist]
2181 */
2182 static void
metaslab_aux_histograms_update_done(metaslab_t * msp,boolean_t defer_allowed)2183 metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed)
2184 {
2185 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2186 space_map_t *sm = msp->ms_sm;
2187
2188 if (sm == NULL) {
2189 /*
2190 * We came here from metaslab_init() when creating/opening a
2191 * pool, looking at a metaslab that hasn't had any allocations
2192 * yet.
2193 */
2194 return;
2195 }
2196
2197 /*
2198 * This is similar to the actions that we take for the ms_freed
2199 * and ms_defer trees in metaslab_sync_done().
2200 */
2201 uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE;
2202 if (defer_allowed) {
2203 memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist,
2204 sizeof (msp->ms_synchist));
2205 } else {
2206 memset(msp->ms_deferhist[hist_index], 0,
2207 sizeof (msp->ms_deferhist[hist_index]));
2208 }
2209 memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist));
2210 }
2211
2212 /*
2213 * Ensure that the metaslab's weight and fragmentation are consistent
2214 * with the contents of the histogram (either the range tree's histogram
2215 * or the space map's depending whether the metaslab is loaded).
2216 */
2217 static void
metaslab_verify_weight_and_frag(metaslab_t * msp)2218 metaslab_verify_weight_and_frag(metaslab_t *msp)
2219 {
2220 ASSERT(MUTEX_HELD(&msp->ms_lock));
2221
2222 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
2223 return;
2224
2225 /*
2226 * We can end up here from vdev_remove_complete(), in which case we
2227 * cannot do these assertions because we hold spa config locks and
2228 * thus we are not allowed to read from the DMU.
2229 *
2230 * We check if the metaslab group has been removed and if that's
2231 * the case we return immediately as that would mean that we are
2232 * here from the aforementioned code path.
2233 */
2234 if (msp->ms_group == NULL)
2235 return;
2236
2237 /*
2238 * Devices being removed always return a weight of 0 and leave
2239 * fragmentation and ms_max_size as is - there is nothing for
2240 * us to verify here.
2241 */
2242 vdev_t *vd = msp->ms_group->mg_vd;
2243 if (vd->vdev_removing)
2244 return;
2245
2246 /*
2247 * If the metaslab is dirty it probably means that we've done
2248 * some allocations or frees that have changed our histograms
2249 * and thus the weight.
2250 */
2251 for (int t = 0; t < TXG_SIZE; t++) {
2252 if (txg_list_member(&vd->vdev_ms_list, msp, t))
2253 return;
2254 }
2255
2256 /*
2257 * This verification checks that our in-memory state is consistent
2258 * with what's on disk. If the pool is read-only then there aren't
2259 * any changes and we just have the initially-loaded state.
2260 */
2261 if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa))
2262 return;
2263
2264 /* some extra verification for in-core tree if you can */
2265 if (msp->ms_loaded) {
2266 zfs_range_tree_stat_verify(msp->ms_allocatable);
2267 VERIFY(space_map_histogram_verify(msp->ms_sm,
2268 msp->ms_allocatable));
2269 }
2270
2271 uint64_t weight = msp->ms_weight;
2272 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2273 boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight);
2274 uint64_t frag = msp->ms_fragmentation;
2275 uint64_t max_segsize = msp->ms_max_size;
2276
2277 msp->ms_weight = 0;
2278 msp->ms_fragmentation = 0;
2279
2280 /*
2281 * This function is used for verification purposes and thus should
2282 * not introduce any side-effects/mutations on the system's state.
2283 *
2284 * Regardless of whether metaslab_weight() thinks this metaslab
2285 * should be active or not, we want to ensure that the actual weight
2286 * (and therefore the value of ms_weight) would be the same if it
2287 * was to be recalculated at this point.
2288 *
2289 * In addition we set the nodirty flag so metaslab_weight() does
2290 * not dirty the metaslab for future TXGs (e.g. when trying to
2291 * force condensing to upgrade the metaslab spacemaps).
2292 */
2293 msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active;
2294
2295 VERIFY3U(max_segsize, ==, msp->ms_max_size);
2296
2297 /*
2298 * If the weight type changed then there is no point in doing
2299 * verification. Revert fields to their original values.
2300 */
2301 if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) ||
2302 (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) {
2303 msp->ms_fragmentation = frag;
2304 msp->ms_weight = weight;
2305 return;
2306 }
2307
2308 VERIFY3U(msp->ms_fragmentation, ==, frag);
2309 VERIFY3U(msp->ms_weight, ==, weight);
2310 }
2311
2312 /*
2313 * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from
2314 * this class that was used longest ago, and attempt to unload it. We don't
2315 * want to spend too much time in this loop to prevent performance
2316 * degradation, and we expect that most of the time this operation will
2317 * succeed. Between that and the normal unloading processing during txg sync,
2318 * we expect this to keep the metaslab memory usage under control.
2319 */
2320 static void
metaslab_potentially_evict(metaslab_class_t * mc)2321 metaslab_potentially_evict(metaslab_class_t *mc)
2322 {
2323 #ifdef _KERNEL
2324 uint64_t allmem = arc_all_memory();
2325 uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2326 uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache);
2327 uint_t tries = 0;
2328 for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size &&
2329 tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2;
2330 tries++) {
2331 unsigned int idx = multilist_get_random_index(
2332 &mc->mc_metaslab_txg_list);
2333 multilist_sublist_t *mls =
2334 multilist_sublist_lock_idx(&mc->mc_metaslab_txg_list, idx);
2335 metaslab_t *msp = multilist_sublist_head(mls);
2336 multilist_sublist_unlock(mls);
2337 while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 <
2338 inuse * size) {
2339 VERIFY3P(mls, ==, multilist_sublist_lock_idx(
2340 &mc->mc_metaslab_txg_list, idx));
2341 ASSERT3U(idx, ==,
2342 metaslab_idx_func(&mc->mc_metaslab_txg_list, msp));
2343
2344 if (!multilist_link_active(&msp->ms_class_txg_node)) {
2345 multilist_sublist_unlock(mls);
2346 break;
2347 }
2348 metaslab_t *next_msp = multilist_sublist_next(mls, msp);
2349 multilist_sublist_unlock(mls);
2350 /*
2351 * If the metaslab is currently loading there are two
2352 * cases. If it's the metaslab we're evicting, we
2353 * can't continue on or we'll panic when we attempt to
2354 * recursively lock the mutex. If it's another
2355 * metaslab that's loading, it can be safely skipped,
2356 * since we know it's very new and therefore not a
2357 * good eviction candidate. We check later once the
2358 * lock is held that the metaslab is fully loaded
2359 * before actually unloading it.
2360 */
2361 if (msp->ms_loading) {
2362 msp = next_msp;
2363 inuse =
2364 spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2365 continue;
2366 }
2367 /*
2368 * We can't unload metaslabs with no spacemap because
2369 * they're not ready to be unloaded yet. We can't
2370 * unload metaslabs with outstanding allocations
2371 * because doing so could cause the metaslab's weight
2372 * to decrease while it's unloaded, which violates an
2373 * invariant that we use to prevent unnecessary
2374 * loading. We also don't unload metaslabs that are
2375 * currently active because they are high-weight
2376 * metaslabs that are likely to be used in the near
2377 * future.
2378 */
2379 mutex_enter(&msp->ms_lock);
2380 if (msp->ms_allocator == -1 && msp->ms_sm != NULL &&
2381 msp->ms_allocating_total == 0) {
2382 metaslab_unload(msp);
2383 }
2384 mutex_exit(&msp->ms_lock);
2385 msp = next_msp;
2386 inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache);
2387 }
2388 }
2389 #else
2390 (void) mc, (void) zfs_metaslab_mem_limit;
2391 #endif
2392 }
2393
2394 static int
metaslab_load_impl(metaslab_t * msp)2395 metaslab_load_impl(metaslab_t *msp)
2396 {
2397 int error = 0;
2398
2399 ASSERT(MUTEX_HELD(&msp->ms_lock));
2400 ASSERT(msp->ms_loading);
2401 ASSERT(!msp->ms_condensing);
2402
2403 /*
2404 * We temporarily drop the lock to unblock other operations while we
2405 * are reading the space map. Therefore, metaslab_sync() and
2406 * metaslab_sync_done() can run at the same time as we do.
2407 *
2408 * If we are using the log space maps, metaslab_sync() can't write to
2409 * the metaslab's space map while we are loading as we only write to
2410 * it when we are flushing the metaslab, and that can't happen while
2411 * we are loading it.
2412 *
2413 * If we are not using log space maps though, metaslab_sync() can
2414 * append to the space map while we are loading. Therefore we load
2415 * only entries that existed when we started the load. Additionally,
2416 * metaslab_sync_done() has to wait for the load to complete because
2417 * there are potential races like metaslab_load() loading parts of the
2418 * space map that are currently being appended by metaslab_sync(). If
2419 * we didn't, the ms_allocatable would have entries that
2420 * metaslab_sync_done() would try to re-add later.
2421 *
2422 * That's why before dropping the lock we remember the synced length
2423 * of the metaslab and read up to that point of the space map,
2424 * ignoring entries appended by metaslab_sync() that happen after we
2425 * drop the lock.
2426 */
2427 uint64_t length = msp->ms_synced_length;
2428 mutex_exit(&msp->ms_lock);
2429
2430 hrtime_t load_start = gethrtime();
2431 metaslab_rt_arg_t *mrap;
2432 if (msp->ms_allocatable->rt_arg == NULL) {
2433 mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
2434 } else {
2435 mrap = msp->ms_allocatable->rt_arg;
2436 msp->ms_allocatable->rt_ops = NULL;
2437 msp->ms_allocatable->rt_arg = NULL;
2438 }
2439 mrap->mra_bt = &msp->ms_allocatable_by_size;
2440 mrap->mra_floor_shift = metaslab_by_size_min_shift;
2441
2442 if (msp->ms_sm != NULL) {
2443 error = space_map_load_length(msp->ms_sm, msp->ms_allocatable,
2444 SM_FREE, length);
2445
2446 /* Now, populate the size-sorted tree. */
2447 metaslab_rt_create(msp->ms_allocatable, mrap);
2448 msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
2449 msp->ms_allocatable->rt_arg = mrap;
2450
2451 struct mssa_arg arg = {0};
2452 arg.rt = msp->ms_allocatable;
2453 arg.mra = mrap;
2454 zfs_range_tree_walk(msp->ms_allocatable,
2455 metaslab_size_sorted_add, &arg);
2456 } else {
2457 /*
2458 * Add the size-sorted tree first, since we don't need to load
2459 * the metaslab from the spacemap.
2460 */
2461 metaslab_rt_create(msp->ms_allocatable, mrap);
2462 msp->ms_allocatable->rt_ops = &metaslab_rt_ops;
2463 msp->ms_allocatable->rt_arg = mrap;
2464 /*
2465 * The space map has not been allocated yet, so treat
2466 * all the space in the metaslab as free and add it to the
2467 * ms_allocatable tree.
2468 */
2469 zfs_range_tree_add(msp->ms_allocatable,
2470 msp->ms_start, msp->ms_size);
2471
2472 if (msp->ms_new) {
2473 /*
2474 * If the ms_sm doesn't exist, this means that this
2475 * metaslab hasn't gone through metaslab_sync() and
2476 * thus has never been dirtied. So we shouldn't
2477 * expect any unflushed allocs or frees from previous
2478 * TXGs.
2479 */
2480 ASSERT(zfs_range_tree_is_empty(
2481 msp->ms_unflushed_allocs));
2482 ASSERT(zfs_range_tree_is_empty(
2483 msp->ms_unflushed_frees));
2484 }
2485 }
2486
2487 /*
2488 * We need to grab the ms_sync_lock to prevent metaslab_sync() from
2489 * changing the ms_sm (or log_sm) and the metaslab's range trees
2490 * while we are about to use them and populate the ms_allocatable.
2491 * The ms_lock is insufficient for this because metaslab_sync() doesn't
2492 * hold the ms_lock while writing the ms_checkpointing tree to disk.
2493 */
2494 mutex_enter(&msp->ms_sync_lock);
2495 mutex_enter(&msp->ms_lock);
2496
2497 ASSERT(!msp->ms_condensing);
2498 ASSERT(!msp->ms_flushing);
2499
2500 if (error != 0) {
2501 mutex_exit(&msp->ms_sync_lock);
2502 return (error);
2503 }
2504
2505 ASSERT3P(msp->ms_group, !=, NULL);
2506 msp->ms_loaded = B_TRUE;
2507
2508 /*
2509 * Apply all the unflushed changes to ms_allocatable right
2510 * away so any manipulations we do below have a clear view
2511 * of what is allocated and what is free.
2512 */
2513 zfs_range_tree_walk(msp->ms_unflushed_allocs,
2514 zfs_range_tree_remove, msp->ms_allocatable);
2515 zfs_range_tree_walk(msp->ms_unflushed_frees,
2516 zfs_range_tree_add, msp->ms_allocatable);
2517
2518 ASSERT3P(msp->ms_group, !=, NULL);
2519 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2520 if (spa_syncing_log_sm(spa) != NULL) {
2521 ASSERT(spa_feature_is_enabled(spa,
2522 SPA_FEATURE_LOG_SPACEMAP));
2523
2524 /*
2525 * If we use a log space map we add all the segments
2526 * that are in ms_unflushed_frees so they are available
2527 * for allocation.
2528 *
2529 * ms_allocatable needs to contain all free segments
2530 * that are ready for allocations (thus not segments
2531 * from ms_freeing, ms_freed, and the ms_defer trees).
2532 * But if we grab the lock in this code path at a sync
2533 * pass later that 1, then it also contains the
2534 * segments of ms_freed (they were added to it earlier
2535 * in this path through ms_unflushed_frees). So we
2536 * need to remove all the segments that exist in
2537 * ms_freed from ms_allocatable as they will be added
2538 * later in metaslab_sync_done().
2539 *
2540 * When there's no log space map, the ms_allocatable
2541 * correctly doesn't contain any segments that exist
2542 * in ms_freed [see ms_synced_length].
2543 */
2544 zfs_range_tree_walk(msp->ms_freed,
2545 zfs_range_tree_remove, msp->ms_allocatable);
2546 }
2547
2548 /*
2549 * If we are not using the log space map, ms_allocatable
2550 * contains the segments that exist in the ms_defer trees
2551 * [see ms_synced_length]. Thus we need to remove them
2552 * from ms_allocatable as they will be added again in
2553 * metaslab_sync_done().
2554 *
2555 * If we are using the log space map, ms_allocatable still
2556 * contains the segments that exist in the ms_defer trees.
2557 * Not because it read them through the ms_sm though. But
2558 * because these segments are part of ms_unflushed_frees
2559 * whose segments we add to ms_allocatable earlier in this
2560 * code path.
2561 */
2562 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2563 zfs_range_tree_walk(msp->ms_defer[t],
2564 zfs_range_tree_remove, msp->ms_allocatable);
2565 }
2566
2567 /*
2568 * Call metaslab_recalculate_weight_and_sort() now that the
2569 * metaslab is loaded so we get the metaslab's real weight.
2570 *
2571 * Unless this metaslab was created with older software and
2572 * has not yet been converted to use segment-based weight, we
2573 * expect the new weight to be better or equal to the weight
2574 * that the metaslab had while it was not loaded. This is
2575 * because the old weight does not take into account the
2576 * consolidation of adjacent segments between TXGs. [see
2577 * comment for ms_synchist and ms_deferhist[] for more info]
2578 */
2579 uint64_t weight = msp->ms_weight;
2580 uint64_t max_size = msp->ms_max_size;
2581 metaslab_recalculate_weight_and_sort(msp);
2582 if (!WEIGHT_IS_SPACEBASED(weight))
2583 ASSERT3U(weight, <=, msp->ms_weight);
2584 msp->ms_max_size = metaslab_largest_allocatable(msp);
2585 ASSERT3U(max_size, <=, msp->ms_max_size);
2586 hrtime_t load_end = gethrtime();
2587 msp->ms_load_time = load_end;
2588 zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, "
2589 "ms_id %llu, smp_length %llu, "
2590 "unflushed_allocs %llu, unflushed_frees %llu, "
2591 "freed %llu, defer %llu + %llu, unloaded time %llu ms, "
2592 "loading_time %lld ms, ms_max_size %llu, "
2593 "max size error %lld, "
2594 "old_weight %llx, new_weight %llx",
2595 (u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
2596 (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
2597 (u_longlong_t)msp->ms_id,
2598 (u_longlong_t)space_map_length(msp->ms_sm),
2599 (u_longlong_t)zfs_range_tree_space(msp->ms_unflushed_allocs),
2600 (u_longlong_t)zfs_range_tree_space(msp->ms_unflushed_frees),
2601 (u_longlong_t)zfs_range_tree_space(msp->ms_freed),
2602 (u_longlong_t)zfs_range_tree_space(msp->ms_defer[0]),
2603 (u_longlong_t)zfs_range_tree_space(msp->ms_defer[1]),
2604 (longlong_t)((load_start - msp->ms_unload_time) / 1000000),
2605 (longlong_t)((load_end - load_start) / 1000000),
2606 (u_longlong_t)msp->ms_max_size,
2607 (u_longlong_t)msp->ms_max_size - max_size,
2608 (u_longlong_t)weight, (u_longlong_t)msp->ms_weight);
2609
2610 metaslab_verify_space(msp, spa_syncing_txg(spa));
2611 mutex_exit(&msp->ms_sync_lock);
2612 return (0);
2613 }
2614
2615 int
metaslab_load(metaslab_t * msp)2616 metaslab_load(metaslab_t *msp)
2617 {
2618 ASSERT(MUTEX_HELD(&msp->ms_lock));
2619
2620 /*
2621 * There may be another thread loading the same metaslab, if that's
2622 * the case just wait until the other thread is done and return.
2623 */
2624 metaslab_load_wait(msp);
2625 if (msp->ms_loaded)
2626 return (0);
2627 VERIFY(!msp->ms_loading);
2628 ASSERT(!msp->ms_condensing);
2629
2630 /*
2631 * We set the loading flag BEFORE potentially dropping the lock to
2632 * wait for an ongoing flush (see ms_flushing below). This way other
2633 * threads know that there is already a thread that is loading this
2634 * metaslab.
2635 */
2636 msp->ms_loading = B_TRUE;
2637
2638 /*
2639 * Wait for any in-progress flushing to finish as we drop the ms_lock
2640 * both here (during space_map_load()) and in metaslab_flush() (when
2641 * we flush our changes to the ms_sm).
2642 */
2643 if (msp->ms_flushing)
2644 metaslab_flush_wait(msp);
2645
2646 /*
2647 * In the possibility that we were waiting for the metaslab to be
2648 * flushed (where we temporarily dropped the ms_lock), ensure that
2649 * no one else loaded the metaslab somehow.
2650 */
2651 ASSERT(!msp->ms_loaded);
2652
2653 /*
2654 * If we're loading a metaslab in the normal class, consider evicting
2655 * another one to keep our memory usage under the limit defined by the
2656 * zfs_metaslab_mem_limit tunable.
2657 */
2658 if (spa_normal_class(msp->ms_group->mg_class->mc_spa) ==
2659 msp->ms_group->mg_class) {
2660 metaslab_potentially_evict(msp->ms_group->mg_class);
2661 }
2662
2663 int error = metaslab_load_impl(msp);
2664
2665 ASSERT(MUTEX_HELD(&msp->ms_lock));
2666 msp->ms_loading = B_FALSE;
2667 cv_broadcast(&msp->ms_load_cv);
2668
2669 return (error);
2670 }
2671
2672 void
metaslab_unload(metaslab_t * msp)2673 metaslab_unload(metaslab_t *msp)
2674 {
2675 ASSERT(MUTEX_HELD(&msp->ms_lock));
2676
2677 /*
2678 * This can happen if a metaslab is selected for eviction (in
2679 * metaslab_potentially_evict) and then unloaded during spa_sync (via
2680 * metaslab_class_evict_old).
2681 */
2682 if (!msp->ms_loaded)
2683 return;
2684
2685 zfs_range_tree_vacate(msp->ms_allocatable, NULL, NULL);
2686 msp->ms_loaded = B_FALSE;
2687 msp->ms_unload_time = gethrtime();
2688
2689 msp->ms_activation_weight = 0;
2690 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
2691
2692 if (msp->ms_group != NULL) {
2693 metaslab_class_t *mc = msp->ms_group->mg_class;
2694 multilist_sublist_t *mls =
2695 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
2696 if (multilist_link_active(&msp->ms_class_txg_node))
2697 multilist_sublist_remove(mls, msp);
2698 multilist_sublist_unlock(mls);
2699
2700 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2701 zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, "
2702 "ms_id %llu, weight %llx, "
2703 "selected txg %llu (%llu s ago), alloc_txg %llu, "
2704 "loaded %llu ms ago, max_size %llu",
2705 (u_longlong_t)spa_syncing_txg(spa), spa_name(spa),
2706 (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
2707 (u_longlong_t)msp->ms_id,
2708 (u_longlong_t)msp->ms_weight,
2709 (u_longlong_t)msp->ms_selected_txg,
2710 (u_longlong_t)(NSEC2SEC(msp->ms_unload_time) -
2711 msp->ms_selected_time),
2712 (u_longlong_t)msp->ms_alloc_txg,
2713 (u_longlong_t)(msp->ms_unload_time -
2714 msp->ms_load_time) / 1000 / 1000,
2715 (u_longlong_t)msp->ms_max_size);
2716 }
2717
2718 /*
2719 * We explicitly recalculate the metaslab's weight based on its space
2720 * map (as it is now not loaded). We want unload metaslabs to always
2721 * have their weights calculated from the space map histograms, while
2722 * loaded ones have it calculated from their in-core range tree
2723 * [see metaslab_load()]. This way, the weight reflects the information
2724 * available in-core, whether it is loaded or not.
2725 *
2726 * If ms_group == NULL means that we came here from metaslab_fini(),
2727 * at which point it doesn't make sense for us to do the recalculation
2728 * and the sorting.
2729 */
2730 if (msp->ms_group != NULL)
2731 metaslab_recalculate_weight_and_sort(msp);
2732 }
2733
2734 /*
2735 * We want to optimize the memory use of the per-metaslab range
2736 * trees. To do this, we store the segments in the range trees in
2737 * units of sectors, zero-indexing from the start of the metaslab. If
2738 * the vdev_ms_shift - the vdev_ashift is less than 32, we can store
2739 * the ranges using two uint32_ts, rather than two uint64_ts.
2740 */
2741 zfs_range_seg_type_t
metaslab_calculate_range_tree_type(vdev_t * vdev,metaslab_t * msp,uint64_t * start,uint64_t * shift)2742 metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp,
2743 uint64_t *start, uint64_t *shift)
2744 {
2745 if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 &&
2746 !zfs_metaslab_force_large_segs) {
2747 *shift = vdev->vdev_ashift;
2748 *start = msp->ms_start;
2749 return (ZFS_RANGE_SEG32);
2750 } else {
2751 *shift = 0;
2752 *start = 0;
2753 return (ZFS_RANGE_SEG64);
2754 }
2755 }
2756
2757 void
metaslab_set_selected_txg(metaslab_t * msp,uint64_t txg)2758 metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg)
2759 {
2760 ASSERT(MUTEX_HELD(&msp->ms_lock));
2761 metaslab_class_t *mc = msp->ms_group->mg_class;
2762 multilist_sublist_t *mls =
2763 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
2764 if (multilist_link_active(&msp->ms_class_txg_node))
2765 multilist_sublist_remove(mls, msp);
2766 msp->ms_selected_txg = txg;
2767 msp->ms_selected_time = gethrestime_sec();
2768 multilist_sublist_insert_tail(mls, msp);
2769 multilist_sublist_unlock(mls);
2770 }
2771
2772 void
metaslab_space_update(vdev_t * vd,metaslab_class_t * mc,int64_t alloc_delta,int64_t defer_delta,int64_t space_delta)2773 metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta,
2774 int64_t defer_delta, int64_t space_delta)
2775 {
2776 vdev_space_update(vd, alloc_delta, defer_delta, space_delta);
2777
2778 ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent);
2779 ASSERT(vd->vdev_ms_count != 0);
2780
2781 metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta,
2782 vdev_deflated_space(vd, space_delta));
2783 }
2784
2785 int
metaslab_init(metaslab_group_t * mg,uint64_t id,uint64_t object,uint64_t txg,metaslab_t ** msp)2786 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object,
2787 uint64_t txg, metaslab_t **msp)
2788 {
2789 vdev_t *vd = mg->mg_vd;
2790 spa_t *spa = vd->vdev_spa;
2791 objset_t *mos = spa->spa_meta_objset;
2792 metaslab_t *ms;
2793 int error;
2794
2795 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
2796 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
2797 mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
2798 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
2799 cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL);
2800 multilist_link_init(&ms->ms_class_txg_node);
2801
2802 ms->ms_id = id;
2803 ms->ms_start = id << vd->vdev_ms_shift;
2804 ms->ms_size = 1ULL << vd->vdev_ms_shift;
2805 ms->ms_allocator = -1;
2806 ms->ms_new = B_TRUE;
2807
2808 vdev_ops_t *ops = vd->vdev_ops;
2809 if (ops->vdev_op_metaslab_init != NULL)
2810 ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size);
2811
2812 /*
2813 * We only open space map objects that already exist. All others
2814 * will be opened when we finally allocate an object for it. For
2815 * readonly pools there is no need to open the space map object.
2816 *
2817 * Note:
2818 * When called from vdev_expand(), we can't call into the DMU as
2819 * we are holding the spa_config_lock as a writer and we would
2820 * deadlock [see relevant comment in vdev_metaslab_init()]. in
2821 * that case, the object parameter is zero though, so we won't
2822 * call into the DMU.
2823 */
2824 if (object != 0 && !(spa->spa_mode == SPA_MODE_READ &&
2825 !spa->spa_read_spacemaps)) {
2826 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
2827 ms->ms_size, vd->vdev_ashift);
2828
2829 if (error != 0) {
2830 kmem_free(ms, sizeof (metaslab_t));
2831 return (error);
2832 }
2833
2834 ASSERT(ms->ms_sm != NULL);
2835 ms->ms_allocated_space = space_map_allocated(ms->ms_sm);
2836 }
2837
2838 uint64_t shift, start;
2839 zfs_range_seg_type_t type =
2840 metaslab_calculate_range_tree_type(vd, ms, &start, &shift);
2841
2842 ms->ms_allocatable = zfs_range_tree_create(NULL, type, NULL, start,
2843 shift);
2844 for (int t = 0; t < TXG_SIZE; t++) {
2845 ms->ms_allocating[t] = zfs_range_tree_create(NULL, type,
2846 NULL, start, shift);
2847 }
2848 ms->ms_freeing = zfs_range_tree_create(NULL, type, NULL, start, shift);
2849 ms->ms_freed = zfs_range_tree_create(NULL, type, NULL, start, shift);
2850 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2851 ms->ms_defer[t] = zfs_range_tree_create(NULL, type, NULL,
2852 start, shift);
2853 }
2854 ms->ms_checkpointing =
2855 zfs_range_tree_create(NULL, type, NULL, start, shift);
2856 ms->ms_unflushed_allocs =
2857 zfs_range_tree_create(NULL, type, NULL, start, shift);
2858
2859 metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP);
2860 mrap->mra_bt = &ms->ms_unflushed_frees_by_size;
2861 mrap->mra_floor_shift = metaslab_by_size_min_shift;
2862 ms->ms_unflushed_frees = zfs_range_tree_create(&metaslab_rt_ops,
2863 type, mrap, start, shift);
2864
2865 ms->ms_trim = zfs_range_tree_create(NULL, type, NULL, start, shift);
2866
2867 metaslab_group_add(mg, ms);
2868 metaslab_set_fragmentation(ms, B_FALSE);
2869
2870 /*
2871 * If we're opening an existing pool (txg == 0) or creating
2872 * a new one (txg == TXG_INITIAL), all space is available now.
2873 * If we're adding space to an existing pool, the new space
2874 * does not become available until after this txg has synced.
2875 * The metaslab's weight will also be initialized when we sync
2876 * out this txg. This ensures that we don't attempt to allocate
2877 * from it before we have initialized it completely.
2878 */
2879 if (txg <= TXG_INITIAL) {
2880 metaslab_sync_done(ms, 0);
2881 metaslab_space_update(vd, mg->mg_class,
2882 metaslab_allocated_space(ms), 0, 0);
2883 }
2884
2885 if (txg != 0) {
2886 vdev_dirty(vd, 0, NULL, txg);
2887 vdev_dirty(vd, VDD_METASLAB, ms, txg);
2888 }
2889
2890 *msp = ms;
2891
2892 return (0);
2893 }
2894
2895 static void
metaslab_fini_flush_data(metaslab_t * msp)2896 metaslab_fini_flush_data(metaslab_t *msp)
2897 {
2898 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
2899
2900 if (metaslab_unflushed_txg(msp) == 0) {
2901 ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL),
2902 ==, NULL);
2903 return;
2904 }
2905 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
2906
2907 mutex_enter(&spa->spa_flushed_ms_lock);
2908 avl_remove(&spa->spa_metaslabs_by_flushed, msp);
2909 mutex_exit(&spa->spa_flushed_ms_lock);
2910
2911 spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp));
2912 spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp),
2913 metaslab_unflushed_dirty(msp));
2914 }
2915
2916 uint64_t
metaslab_unflushed_changes_memused(metaslab_t * ms)2917 metaslab_unflushed_changes_memused(metaslab_t *ms)
2918 {
2919 return ((zfs_range_tree_numsegs(ms->ms_unflushed_allocs) +
2920 zfs_range_tree_numsegs(ms->ms_unflushed_frees)) *
2921 ms->ms_unflushed_allocs->rt_root.bt_elem_size);
2922 }
2923
2924 void
metaslab_fini(metaslab_t * msp)2925 metaslab_fini(metaslab_t *msp)
2926 {
2927 metaslab_group_t *mg = msp->ms_group;
2928 vdev_t *vd = mg->mg_vd;
2929 spa_t *spa = vd->vdev_spa;
2930
2931 metaslab_fini_flush_data(msp);
2932
2933 metaslab_group_remove(mg, msp);
2934
2935 mutex_enter(&msp->ms_lock);
2936 VERIFY(msp->ms_group == NULL);
2937
2938 /*
2939 * If this metaslab hasn't been through metaslab_sync_done() yet its
2940 * space hasn't been accounted for in its vdev and doesn't need to be
2941 * subtracted.
2942 */
2943 if (!msp->ms_new) {
2944 metaslab_space_update(vd, mg->mg_class,
2945 -metaslab_allocated_space(msp), 0, -msp->ms_size);
2946
2947 }
2948 space_map_close(msp->ms_sm);
2949 msp->ms_sm = NULL;
2950
2951 metaslab_unload(msp);
2952
2953 zfs_range_tree_destroy(msp->ms_allocatable);
2954 zfs_range_tree_destroy(msp->ms_freeing);
2955 zfs_range_tree_destroy(msp->ms_freed);
2956
2957 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
2958 metaslab_unflushed_changes_memused(msp));
2959 spa->spa_unflushed_stats.sus_memused -=
2960 metaslab_unflushed_changes_memused(msp);
2961 zfs_range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
2962 zfs_range_tree_destroy(msp->ms_unflushed_allocs);
2963 zfs_range_tree_destroy(msp->ms_checkpointing);
2964 zfs_range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
2965 zfs_range_tree_destroy(msp->ms_unflushed_frees);
2966
2967 for (int t = 0; t < TXG_SIZE; t++) {
2968 zfs_range_tree_destroy(msp->ms_allocating[t]);
2969 }
2970 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
2971 zfs_range_tree_destroy(msp->ms_defer[t]);
2972 }
2973 ASSERT0(msp->ms_deferspace);
2974
2975 for (int t = 0; t < TXG_SIZE; t++)
2976 ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t));
2977
2978 zfs_range_tree_vacate(msp->ms_trim, NULL, NULL);
2979 zfs_range_tree_destroy(msp->ms_trim);
2980
2981 mutex_exit(&msp->ms_lock);
2982 cv_destroy(&msp->ms_load_cv);
2983 cv_destroy(&msp->ms_flush_cv);
2984 mutex_destroy(&msp->ms_lock);
2985 mutex_destroy(&msp->ms_sync_lock);
2986 ASSERT3U(msp->ms_allocator, ==, -1);
2987
2988 kmem_free(msp, sizeof (metaslab_t));
2989 }
2990
2991 /*
2992 * This table defines a segment size based fragmentation metric that will
2993 * allow each metaslab to derive its own fragmentation value. This is done
2994 * by calculating the space in each bucket of the spacemap histogram and
2995 * multiplying that by the fragmentation metric in this table. Doing
2996 * this for all buckets and dividing it by the total amount of free
2997 * space in this metaslab (i.e. the total free space in all buckets) gives
2998 * us the fragmentation metric. This means that a high fragmentation metric
2999 * equates to most of the free space being comprised of small segments.
3000 * Conversely, if the metric is low, then most of the free space is in
3001 * large segments.
3002 *
3003 * This table defines 0% fragmented space using 512M segments. Using this value,
3004 * we derive the rest of the table. This table originally went up to 16MB, but
3005 * with larger recordsizes, larger ashifts, and use of raidz3, it is possible
3006 * to have significantly larger allocations than were previously possible.
3007 * Since the fragmentation value is never stored on disk, it is possible to
3008 * change these calculations in the future.
3009 */
3010 static const int zfs_frag_table[] = {
3011 100, /* 512B */
3012 99, /* 1K */
3013 97, /* 2K */
3014 93, /* 4K */
3015 88, /* 8K */
3016 83, /* 16K */
3017 77, /* 32K */
3018 71, /* 64K */
3019 64, /* 128K */
3020 57, /* 256K */
3021 50, /* 512K */
3022 43, /* 1M */
3023 36, /* 2M */
3024 29, /* 4M */
3025 23, /* 8M */
3026 17, /* 16M */
3027 12, /* 32M */
3028 7, /* 64M */
3029 3, /* 128M */
3030 1, /* 256M */
3031 0, /* 512M */
3032 };
3033 #define FRAGMENTATION_TABLE_SIZE \
3034 (sizeof (zfs_frag_table)/(sizeof (zfs_frag_table[0])))
3035
3036 /*
3037 * Calculate the metaslab's fragmentation metric and set ms_fragmentation.
3038 * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not
3039 * been upgraded and does not support this metric. Otherwise, the return
3040 * value should be in the range [0, 100].
3041 */
3042 static void
metaslab_set_fragmentation(metaslab_t * msp,boolean_t nodirty)3043 metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty)
3044 {
3045 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3046 uint64_t fragmentation = 0;
3047 uint64_t total = 0;
3048 boolean_t feature_enabled = spa_feature_is_enabled(spa,
3049 SPA_FEATURE_SPACEMAP_HISTOGRAM);
3050
3051 if (!feature_enabled) {
3052 msp->ms_fragmentation = ZFS_FRAG_INVALID;
3053 return;
3054 }
3055
3056 /*
3057 * A null space map means that the entire metaslab is free
3058 * and thus is not fragmented.
3059 */
3060 if (msp->ms_sm == NULL) {
3061 msp->ms_fragmentation = 0;
3062 return;
3063 }
3064
3065 /*
3066 * If this metaslab's space map has not been upgraded, flag it
3067 * so that we upgrade next time we encounter it.
3068 */
3069 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
3070 uint64_t txg = spa_syncing_txg(spa);
3071 vdev_t *vd = msp->ms_group->mg_vd;
3072
3073 /*
3074 * If we've reached the final dirty txg, then we must
3075 * be shutting down the pool. We don't want to dirty
3076 * any data past this point so skip setting the condense
3077 * flag. We can retry this action the next time the pool
3078 * is imported. We also skip marking this metaslab for
3079 * condensing if the caller has explicitly set nodirty.
3080 */
3081 if (!nodirty &&
3082 spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
3083 msp->ms_condense_wanted = B_TRUE;
3084 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
3085 zfs_dbgmsg("txg %llu, requesting force condense: "
3086 "ms_id %llu, vdev_id %llu", (u_longlong_t)txg,
3087 (u_longlong_t)msp->ms_id,
3088 (u_longlong_t)vd->vdev_id);
3089 }
3090 msp->ms_fragmentation = ZFS_FRAG_INVALID;
3091 return;
3092 }
3093
3094 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
3095 uint64_t space = 0;
3096 uint8_t shift = msp->ms_sm->sm_shift;
3097
3098 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
3099 FRAGMENTATION_TABLE_SIZE - 1);
3100
3101 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
3102 continue;
3103
3104 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
3105 total += space;
3106
3107 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
3108 fragmentation += space * zfs_frag_table[idx];
3109 }
3110
3111 if (total > 0)
3112 fragmentation /= total;
3113 ASSERT3U(fragmentation, <=, 100);
3114
3115 msp->ms_fragmentation = fragmentation;
3116 }
3117
3118 /*
3119 * Compute a weight -- a selection preference value -- for the given metaslab.
3120 * This is based on the amount of free space, the level of fragmentation,
3121 * the LBA range, and whether the metaslab is loaded.
3122 */
3123 static uint64_t
metaslab_space_weight(metaslab_t * msp)3124 metaslab_space_weight(metaslab_t *msp)
3125 {
3126 metaslab_group_t *mg = msp->ms_group;
3127 vdev_t *vd = mg->mg_vd;
3128 uint64_t weight, space;
3129
3130 ASSERT(MUTEX_HELD(&msp->ms_lock));
3131
3132 /*
3133 * The baseline weight is the metaslab's free space.
3134 */
3135 space = msp->ms_size - metaslab_allocated_space(msp);
3136
3137 if (metaslab_fragmentation_factor_enabled &&
3138 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
3139 /*
3140 * Use the fragmentation information to inversely scale
3141 * down the baseline weight. We need to ensure that we
3142 * don't exclude this metaslab completely when it's 100%
3143 * fragmented. To avoid this we reduce the fragmented value
3144 * by 1.
3145 */
3146 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
3147
3148 /*
3149 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
3150 * this metaslab again. The fragmentation metric may have
3151 * decreased the space to something smaller than
3152 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
3153 * so that we can consume any remaining space.
3154 */
3155 if (space > 0 && space < SPA_MINBLOCKSIZE)
3156 space = SPA_MINBLOCKSIZE;
3157 }
3158 weight = space;
3159
3160 /*
3161 * Modern disks have uniform bit density and constant angular velocity.
3162 * Therefore, the outer recording zones are faster (higher bandwidth)
3163 * than the inner zones by the ratio of outer to inner track diameter,
3164 * which is typically around 2:1. We account for this by assigning
3165 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
3166 * In effect, this means that we'll select the metaslab with the most
3167 * free bandwidth rather than simply the one with the most free space.
3168 */
3169 if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
3170 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
3171 ASSERT(weight >= space && weight <= 2 * space);
3172 }
3173
3174 /*
3175 * If this metaslab is one we're actively using, adjust its
3176 * weight to make it preferable to any inactive metaslab so
3177 * we'll polish it off. If the fragmentation on this metaslab
3178 * has exceed our threshold, then don't mark it active.
3179 */
3180 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
3181 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
3182 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
3183 }
3184
3185 WEIGHT_SET_SPACEBASED(weight);
3186 return (weight);
3187 }
3188
3189 /*
3190 * Return the weight of the specified metaslab, according to the segment-based
3191 * weighting algorithm. The metaslab must be loaded. This function can
3192 * be called within a sync pass since it relies only on the metaslab's
3193 * range tree which is always accurate when the metaslab is loaded.
3194 */
3195 static uint64_t
metaslab_weight_from_range_tree(metaslab_t * msp)3196 metaslab_weight_from_range_tree(metaslab_t *msp)
3197 {
3198 uint64_t weight = 0;
3199 uint32_t segments = 0;
3200
3201 ASSERT(msp->ms_loaded);
3202
3203 for (int i = ZFS_RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
3204 i--) {
3205 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
3206 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
3207
3208 segments <<= 1;
3209 segments += msp->ms_allocatable->rt_histogram[i];
3210
3211 /*
3212 * The range tree provides more precision than the space map
3213 * and must be downgraded so that all values fit within the
3214 * space map's histogram. This allows us to compare loaded
3215 * vs. unloaded metaslabs to determine which metaslab is
3216 * considered "best".
3217 */
3218 if (i > max_idx)
3219 continue;
3220
3221 if (segments != 0) {
3222 WEIGHT_SET_COUNT(weight, segments);
3223 WEIGHT_SET_INDEX(weight, i);
3224 WEIGHT_SET_ACTIVE(weight, 0);
3225 break;
3226 }
3227 }
3228 return (weight);
3229 }
3230
3231 /*
3232 * Calculate the weight based on the on-disk histogram. Should be applied
3233 * only to unloaded metaslabs (i.e no incoming allocations) in-order to
3234 * give results consistent with the on-disk state
3235 */
3236 static uint64_t
metaslab_weight_from_spacemap(metaslab_t * msp)3237 metaslab_weight_from_spacemap(metaslab_t *msp)
3238 {
3239 space_map_t *sm = msp->ms_sm;
3240 ASSERT(!msp->ms_loaded);
3241 ASSERT(sm != NULL);
3242 ASSERT3U(space_map_object(sm), !=, 0);
3243 ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
3244
3245 /*
3246 * Create a joint histogram from all the segments that have made
3247 * it to the metaslab's space map histogram, that are not yet
3248 * available for allocation because they are still in the freeing
3249 * pipeline (e.g. freeing, freed, and defer trees). Then subtract
3250 * these segments from the space map's histogram to get a more
3251 * accurate weight.
3252 */
3253 uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0};
3254 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
3255 deferspace_histogram[i] += msp->ms_synchist[i];
3256 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3257 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
3258 deferspace_histogram[i] += msp->ms_deferhist[t][i];
3259 }
3260 }
3261
3262 uint64_t weight = 0;
3263 for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
3264 ASSERT3U(sm->sm_phys->smp_histogram[i], >=,
3265 deferspace_histogram[i]);
3266 uint64_t count =
3267 sm->sm_phys->smp_histogram[i] - deferspace_histogram[i];
3268 if (count != 0) {
3269 WEIGHT_SET_COUNT(weight, count);
3270 WEIGHT_SET_INDEX(weight, i + sm->sm_shift);
3271 WEIGHT_SET_ACTIVE(weight, 0);
3272 break;
3273 }
3274 }
3275 return (weight);
3276 }
3277
3278 /*
3279 * Compute a segment-based weight for the specified metaslab. The weight
3280 * is determined by highest bucket in the histogram. The information
3281 * for the highest bucket is encoded into the weight value.
3282 */
3283 static uint64_t
metaslab_segment_weight(metaslab_t * msp)3284 metaslab_segment_weight(metaslab_t *msp)
3285 {
3286 metaslab_group_t *mg = msp->ms_group;
3287 uint64_t weight = 0;
3288 uint8_t shift = mg->mg_vd->vdev_ashift;
3289
3290 ASSERT(MUTEX_HELD(&msp->ms_lock));
3291
3292 /*
3293 * The metaslab is completely free.
3294 */
3295 if (metaslab_allocated_space(msp) == 0) {
3296 int idx = highbit64(msp->ms_size) - 1;
3297 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
3298
3299 if (idx < max_idx) {
3300 WEIGHT_SET_COUNT(weight, 1ULL);
3301 WEIGHT_SET_INDEX(weight, idx);
3302 } else {
3303 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
3304 WEIGHT_SET_INDEX(weight, max_idx);
3305 }
3306 WEIGHT_SET_ACTIVE(weight, 0);
3307 ASSERT(!WEIGHT_IS_SPACEBASED(weight));
3308 return (weight);
3309 }
3310
3311 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
3312
3313 /*
3314 * If the metaslab is fully allocated then just make the weight 0.
3315 */
3316 if (metaslab_allocated_space(msp) == msp->ms_size)
3317 return (0);
3318 /*
3319 * If the metaslab is already loaded, then use the range tree to
3320 * determine the weight. Otherwise, we rely on the space map information
3321 * to generate the weight.
3322 */
3323 if (msp->ms_loaded) {
3324 weight = metaslab_weight_from_range_tree(msp);
3325 } else {
3326 weight = metaslab_weight_from_spacemap(msp);
3327 }
3328
3329 /*
3330 * If the metaslab was active the last time we calculated its weight
3331 * then keep it active. We want to consume the entire region that
3332 * is associated with this weight.
3333 */
3334 if (msp->ms_activation_weight != 0 && weight != 0)
3335 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
3336 return (weight);
3337 }
3338
3339 /*
3340 * Determine if we should attempt to allocate from this metaslab. If the
3341 * metaslab is loaded, then we can determine if the desired allocation
3342 * can be satisfied by looking at the size of the maximum free segment
3343 * on that metaslab. Otherwise, we make our decision based on the metaslab's
3344 * weight. For segment-based weighting we can determine the maximum
3345 * allocation based on the index encoded in its value. For space-based
3346 * weights we rely on the entire weight (excluding the weight-type bit).
3347 */
3348 static boolean_t
metaslab_should_allocate(metaslab_t * msp,uint64_t asize,boolean_t try_hard)3349 metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard)
3350 {
3351 /*
3352 * This case will usually but not always get caught by the checks below;
3353 * metaslabs can be loaded by various means, including the trim and
3354 * initialize code. Once that happens, without this check they are
3355 * allocatable even before they finish their first txg sync.
3356 */
3357 if (unlikely(msp->ms_new))
3358 return (B_FALSE);
3359
3360 /*
3361 * If the metaslab is loaded, ms_max_size is definitive and we can use
3362 * the fast check. If it's not, the ms_max_size is a lower bound (once
3363 * set), and we should use the fast check as long as we're not in
3364 * try_hard and it's been less than zfs_metaslab_max_size_cache_sec
3365 * seconds since the metaslab was unloaded.
3366 */
3367 if (msp->ms_loaded ||
3368 (msp->ms_max_size != 0 && !try_hard && gethrtime() <
3369 msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec)))
3370 return (msp->ms_max_size >= asize);
3371
3372 boolean_t should_allocate;
3373 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
3374 /*
3375 * The metaslab segment weight indicates segments in the
3376 * range [2^i, 2^(i+1)), where i is the index in the weight.
3377 * Since the asize might be in the middle of the range, we
3378 * should attempt the allocation if asize < 2^(i+1).
3379 */
3380 should_allocate = (asize <
3381 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
3382 } else {
3383 should_allocate = (asize <=
3384 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
3385 }
3386
3387 return (should_allocate);
3388 }
3389
3390 static uint64_t
metaslab_weight(metaslab_t * msp,boolean_t nodirty)3391 metaslab_weight(metaslab_t *msp, boolean_t nodirty)
3392 {
3393 vdev_t *vd = msp->ms_group->mg_vd;
3394 spa_t *spa = vd->vdev_spa;
3395 uint64_t weight;
3396
3397 ASSERT(MUTEX_HELD(&msp->ms_lock));
3398
3399 metaslab_set_fragmentation(msp, nodirty);
3400
3401 /*
3402 * Update the maximum size. If the metaslab is loaded, this will
3403 * ensure that we get an accurate maximum size if newly freed space
3404 * has been added back into the free tree. If the metaslab is
3405 * unloaded, we check if there's a larger free segment in the
3406 * unflushed frees. This is a lower bound on the largest allocatable
3407 * segment size. Coalescing of adjacent entries may reveal larger
3408 * allocatable segments, but we aren't aware of those until loading
3409 * the space map into a range tree.
3410 */
3411 if (msp->ms_loaded) {
3412 msp->ms_max_size = metaslab_largest_allocatable(msp);
3413 } else {
3414 msp->ms_max_size = MAX(msp->ms_max_size,
3415 metaslab_largest_unflushed_free(msp));
3416 }
3417
3418 /*
3419 * Segment-based weighting requires space map histogram support.
3420 */
3421 if (zfs_metaslab_segment_weight_enabled &&
3422 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
3423 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
3424 sizeof (space_map_phys_t))) {
3425 weight = metaslab_segment_weight(msp);
3426 } else {
3427 weight = metaslab_space_weight(msp);
3428 }
3429 return (weight);
3430 }
3431
3432 void
metaslab_recalculate_weight_and_sort(metaslab_t * msp)3433 metaslab_recalculate_weight_and_sort(metaslab_t *msp)
3434 {
3435 ASSERT(MUTEX_HELD(&msp->ms_lock));
3436
3437 /* note: we preserve the mask (e.g. indication of primary, etc..) */
3438 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
3439 metaslab_group_sort(msp->ms_group, msp,
3440 metaslab_weight(msp, B_FALSE) | was_active);
3441 }
3442
3443 static int
metaslab_activate_allocator(metaslab_group_t * mg,metaslab_t * msp,int allocator,uint64_t activation_weight)3444 metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp,
3445 int allocator, uint64_t activation_weight)
3446 {
3447 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
3448 ASSERT(MUTEX_HELD(&msp->ms_lock));
3449
3450 /*
3451 * If we're activating for the claim code, we don't want to actually
3452 * set the metaslab up for a specific allocator.
3453 */
3454 if (activation_weight == METASLAB_WEIGHT_CLAIM) {
3455 ASSERT0(msp->ms_activation_weight);
3456 msp->ms_activation_weight = msp->ms_weight;
3457 metaslab_group_sort(mg, msp, msp->ms_weight |
3458 activation_weight);
3459 return (0);
3460 }
3461
3462 metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ?
3463 &mga->mga_primary : &mga->mga_secondary);
3464
3465 mutex_enter(&mg->mg_lock);
3466 if (*mspp != NULL) {
3467 mutex_exit(&mg->mg_lock);
3468 return (EEXIST);
3469 }
3470
3471 *mspp = msp;
3472 ASSERT3S(msp->ms_allocator, ==, -1);
3473 msp->ms_allocator = allocator;
3474 msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY);
3475
3476 ASSERT0(msp->ms_activation_weight);
3477 msp->ms_activation_weight = msp->ms_weight;
3478 metaslab_group_sort_impl(mg, msp,
3479 msp->ms_weight | activation_weight);
3480 mutex_exit(&mg->mg_lock);
3481
3482 return (0);
3483 }
3484
3485 static int
metaslab_activate(metaslab_t * msp,int allocator,uint64_t activation_weight)3486 metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight)
3487 {
3488 ASSERT(MUTEX_HELD(&msp->ms_lock));
3489
3490 /*
3491 * The current metaslab is already activated for us so there
3492 * is nothing to do. Already activated though, doesn't mean
3493 * that this metaslab is activated for our allocator nor our
3494 * requested activation weight. The metaslab could have started
3495 * as an active one for our allocator but changed allocators
3496 * while we were waiting to grab its ms_lock or we stole it
3497 * [see find_valid_metaslab()]. This means that there is a
3498 * possibility of passivating a metaslab of another allocator
3499 * or from a different activation mask, from this thread.
3500 */
3501 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
3502 ASSERT(msp->ms_loaded);
3503 return (0);
3504 }
3505
3506 int error = metaslab_load(msp);
3507 if (error != 0) {
3508 metaslab_group_sort(msp->ms_group, msp, 0);
3509 return (error);
3510 }
3511
3512 /*
3513 * When entering metaslab_load() we may have dropped the
3514 * ms_lock because we were loading this metaslab, or we
3515 * were waiting for another thread to load it for us. In
3516 * that scenario, we recheck the weight of the metaslab
3517 * to see if it was activated by another thread.
3518 *
3519 * If the metaslab was activated for another allocator or
3520 * it was activated with a different activation weight (e.g.
3521 * we wanted to make it a primary but it was activated as
3522 * secondary) we return error (EBUSY).
3523 *
3524 * If the metaslab was activated for the same allocator
3525 * and requested activation mask, skip activating it.
3526 */
3527 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) {
3528 if (msp->ms_allocator != allocator)
3529 return (EBUSY);
3530
3531 if ((msp->ms_weight & activation_weight) == 0)
3532 return (SET_ERROR(EBUSY));
3533
3534 EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY),
3535 msp->ms_primary);
3536 return (0);
3537 }
3538
3539 /*
3540 * If the metaslab has literally 0 space, it will have weight 0. In
3541 * that case, don't bother activating it. This can happen if the
3542 * metaslab had space during find_valid_metaslab, but another thread
3543 * loaded it and used all that space while we were waiting to grab the
3544 * lock.
3545 */
3546 if (msp->ms_weight == 0) {
3547 ASSERT0(zfs_range_tree_space(msp->ms_allocatable));
3548 return (SET_ERROR(ENOSPC));
3549 }
3550
3551 if ((error = metaslab_activate_allocator(msp->ms_group, msp,
3552 allocator, activation_weight)) != 0) {
3553 return (error);
3554 }
3555
3556 ASSERT(msp->ms_loaded);
3557 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
3558
3559 return (0);
3560 }
3561
3562 static void
metaslab_passivate_allocator(metaslab_group_t * mg,metaslab_t * msp,uint64_t weight)3563 metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp,
3564 uint64_t weight)
3565 {
3566 ASSERT(MUTEX_HELD(&msp->ms_lock));
3567 ASSERT(msp->ms_loaded);
3568
3569 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
3570 metaslab_group_sort(mg, msp, weight);
3571 return;
3572 }
3573
3574 mutex_enter(&mg->mg_lock);
3575 ASSERT3P(msp->ms_group, ==, mg);
3576 ASSERT3S(0, <=, msp->ms_allocator);
3577 ASSERT3U(msp->ms_allocator, <, mg->mg_allocators);
3578
3579 metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator];
3580 if (msp->ms_primary) {
3581 ASSERT3P(mga->mga_primary, ==, msp);
3582 ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
3583 mga->mga_primary = NULL;
3584 } else {
3585 ASSERT3P(mga->mga_secondary, ==, msp);
3586 ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
3587 mga->mga_secondary = NULL;
3588 }
3589 msp->ms_allocator = -1;
3590 metaslab_group_sort_impl(mg, msp, weight);
3591 mutex_exit(&mg->mg_lock);
3592 }
3593
3594 static void
metaslab_passivate(metaslab_t * msp,uint64_t weight)3595 metaslab_passivate(metaslab_t *msp, uint64_t weight)
3596 {
3597 uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE;
3598
3599 /*
3600 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
3601 * this metaslab again. In that case, it had better be empty,
3602 * or we would be leaving space on the table.
3603 */
3604 ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
3605 size >= SPA_MINBLOCKSIZE ||
3606 zfs_range_tree_space(msp->ms_allocatable) == 0);
3607 ASSERT0(weight & METASLAB_ACTIVE_MASK);
3608
3609 ASSERT(msp->ms_activation_weight != 0);
3610 msp->ms_activation_weight = 0;
3611 metaslab_passivate_allocator(msp->ms_group, msp, weight);
3612 ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK);
3613 }
3614
3615 /*
3616 * Segment-based metaslabs are activated once and remain active until
3617 * we either fail an allocation attempt (similar to space-based metaslabs)
3618 * or have exhausted the free space in zfs_metaslab_switch_threshold
3619 * buckets since the metaslab was activated. This function checks to see
3620 * if we've exhausted the zfs_metaslab_switch_threshold buckets in the
3621 * metaslab and passivates it proactively. This will allow us to select a
3622 * metaslab with a larger contiguous region, if any, remaining within this
3623 * metaslab group. If we're in sync pass > 1, then we continue using this
3624 * metaslab so that we don't dirty more block and cause more sync passes.
3625 */
3626 static void
metaslab_segment_may_passivate(metaslab_t * msp)3627 metaslab_segment_may_passivate(metaslab_t *msp)
3628 {
3629 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3630
3631 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
3632 return;
3633
3634 /*
3635 * As long as a single largest free segment covers majorioty of free
3636 * space, don't consider the metaslab fragmented. It should allow
3637 * us to fill new unfragmented metaslabs full before switching.
3638 */
3639 if (metaslab_largest_allocatable(msp) >
3640 zfs_range_tree_space(msp->ms_allocatable) * 15 / 16)
3641 return;
3642
3643 /*
3644 * Since we are in the middle of a sync pass, the most accurate
3645 * information that is accessible to us is the in-core range tree
3646 * histogram; calculate the new weight based on that information.
3647 */
3648 uint64_t weight = metaslab_weight_from_range_tree(msp);
3649 int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
3650 int current_idx = WEIGHT_GET_INDEX(weight);
3651
3652 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
3653 metaslab_passivate(msp, weight);
3654 }
3655
3656 static void
metaslab_preload(void * arg)3657 metaslab_preload(void *arg)
3658 {
3659 metaslab_t *msp = arg;
3660 metaslab_class_t *mc = msp->ms_group->mg_class;
3661 spa_t *spa = mc->mc_spa;
3662 fstrans_cookie_t cookie = spl_fstrans_mark();
3663
3664 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
3665
3666 mutex_enter(&msp->ms_lock);
3667 (void) metaslab_load(msp);
3668 metaslab_set_selected_txg(msp, spa_syncing_txg(spa));
3669 mutex_exit(&msp->ms_lock);
3670 spl_fstrans_unmark(cookie);
3671 }
3672
3673 static void
metaslab_group_preload(metaslab_group_t * mg)3674 metaslab_group_preload(metaslab_group_t *mg)
3675 {
3676 spa_t *spa = mg->mg_vd->vdev_spa;
3677 metaslab_t *msp;
3678 avl_tree_t *t = &mg->mg_metaslab_tree;
3679 int m = 0;
3680
3681 if (spa_shutting_down(spa) || !metaslab_preload_enabled)
3682 return;
3683
3684 mutex_enter(&mg->mg_lock);
3685
3686 /*
3687 * Load the next potential metaslabs
3688 */
3689 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
3690 ASSERT3P(msp->ms_group, ==, mg);
3691
3692 /*
3693 * We preload only the maximum number of metaslabs specified
3694 * by metaslab_preload_limit. If a metaslab is being forced
3695 * to condense then we preload it too. This will ensure
3696 * that force condensing happens in the next txg.
3697 */
3698 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
3699 continue;
3700 }
3701
3702 VERIFY(taskq_dispatch(spa->spa_metaslab_taskq, metaslab_preload,
3703 msp, TQ_SLEEP | (m <= mg->mg_allocators ? TQ_FRONT : 0))
3704 != TASKQID_INVALID);
3705 }
3706 mutex_exit(&mg->mg_lock);
3707 }
3708
3709 /*
3710 * Determine if the space map's on-disk footprint is past our tolerance for
3711 * inefficiency. We would like to use the following criteria to make our
3712 * decision:
3713 *
3714 * 1. Do not condense if the size of the space map object would dramatically
3715 * increase as a result of writing out the free space range tree.
3716 *
3717 * 2. Condense if the on on-disk space map representation is at least
3718 * zfs_condense_pct/100 times the size of the optimal representation
3719 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB).
3720 *
3721 * 3. Do not condense if the on-disk size of the space map does not actually
3722 * decrease.
3723 *
3724 * Unfortunately, we cannot compute the on-disk size of the space map in this
3725 * context because we cannot accurately compute the effects of compression, etc.
3726 * Instead, we apply the heuristic described in the block comment for
3727 * zfs_metaslab_condense_block_threshold - we only condense if the space used
3728 * is greater than a threshold number of blocks.
3729 */
3730 static boolean_t
metaslab_should_condense(metaslab_t * msp)3731 metaslab_should_condense(metaslab_t *msp)
3732 {
3733 space_map_t *sm = msp->ms_sm;
3734 vdev_t *vd = msp->ms_group->mg_vd;
3735 uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift;
3736
3737 ASSERT(MUTEX_HELD(&msp->ms_lock));
3738 ASSERT(msp->ms_loaded);
3739 ASSERT(sm != NULL);
3740 ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1);
3741
3742 /*
3743 * We always condense metaslabs that are empty and metaslabs for
3744 * which a condense request has been made.
3745 */
3746 if (zfs_range_tree_numsegs(msp->ms_allocatable) == 0 ||
3747 msp->ms_condense_wanted)
3748 return (B_TRUE);
3749
3750 uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize);
3751 uint64_t object_size = space_map_length(sm);
3752 uint64_t optimal_size = space_map_estimate_optimal_size(sm,
3753 msp->ms_allocatable, SM_NO_VDEVID);
3754
3755 return (object_size >= (optimal_size * zfs_condense_pct / 100) &&
3756 object_size > zfs_metaslab_condense_block_threshold * record_size);
3757 }
3758
3759 /*
3760 * Condense the on-disk space map representation to its minimized form.
3761 * The minimized form consists of a small number of allocations followed
3762 * by the entries of the free range tree (ms_allocatable). The condensed
3763 * spacemap contains all the entries of previous TXGs (including those in
3764 * the pool-wide log spacemaps; thus this is effectively a superset of
3765 * metaslab_flush()), but this TXG's entries still need to be written.
3766 */
3767 static void
metaslab_condense(metaslab_t * msp,dmu_tx_t * tx)3768 metaslab_condense(metaslab_t *msp, dmu_tx_t *tx)
3769 {
3770 zfs_range_tree_t *condense_tree;
3771 space_map_t *sm = msp->ms_sm;
3772 uint64_t txg = dmu_tx_get_txg(tx);
3773 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3774
3775 ASSERT(MUTEX_HELD(&msp->ms_lock));
3776 ASSERT(msp->ms_loaded);
3777 ASSERT(msp->ms_sm != NULL);
3778
3779 /*
3780 * In order to condense the space map, we need to change it so it
3781 * only describes which segments are currently allocated and free.
3782 *
3783 * All the current free space resides in the ms_allocatable, all
3784 * the ms_defer trees, and all the ms_allocating trees. We ignore
3785 * ms_freed because it is empty because we're in sync pass 1. We
3786 * ignore ms_freeing because these changes are not yet reflected
3787 * in the spacemap (they will be written later this txg).
3788 *
3789 * So to truncate the space map to represent all the entries of
3790 * previous TXGs we do the following:
3791 *
3792 * 1] We create a range tree (condense tree) that is 100% empty.
3793 * 2] We add to it all segments found in the ms_defer trees
3794 * as those segments are marked as free in the original space
3795 * map. We do the same with the ms_allocating trees for the same
3796 * reason. Adding these segments should be a relatively
3797 * inexpensive operation since we expect these trees to have a
3798 * small number of nodes.
3799 * 3] We vacate any unflushed allocs, since they are not frees we
3800 * need to add to the condense tree. Then we vacate any
3801 * unflushed frees as they should already be part of ms_allocatable.
3802 * 4] At this point, we would ideally like to add all segments
3803 * in the ms_allocatable tree from the condense tree. This way
3804 * we would write all the entries of the condense tree as the
3805 * condensed space map, which would only contain freed
3806 * segments with everything else assumed to be allocated.
3807 *
3808 * Doing so can be prohibitively expensive as ms_allocatable can
3809 * be large, and therefore computationally expensive to add to
3810 * the condense_tree. Instead we first sync out an entry marking
3811 * everything as allocated, then the condense_tree and then the
3812 * ms_allocatable, in the condensed space map. While this is not
3813 * optimal, it is typically close to optimal and more importantly
3814 * much cheaper to compute.
3815 *
3816 * 5] Finally, as both of the unflushed trees were written to our
3817 * new and condensed metaslab space map, we basically flushed
3818 * all the unflushed changes to disk, thus we call
3819 * metaslab_flush_update().
3820 */
3821 ASSERT3U(spa_sync_pass(spa), ==, 1);
3822 ASSERT(zfs_range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */
3823
3824 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, "
3825 "spa %s, smp size %llu, segments %llu, forcing condense=%s",
3826 (u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp,
3827 (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
3828 spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm),
3829 (u_longlong_t)zfs_range_tree_numsegs(msp->ms_allocatable),
3830 msp->ms_condense_wanted ? "TRUE" : "FALSE");
3831
3832 msp->ms_condense_wanted = B_FALSE;
3833
3834 zfs_range_seg_type_t type;
3835 uint64_t shift, start;
3836 type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp,
3837 &start, &shift);
3838
3839 condense_tree = zfs_range_tree_create(NULL, type, NULL, start, shift);
3840
3841 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
3842 zfs_range_tree_walk(msp->ms_defer[t],
3843 zfs_range_tree_add, condense_tree);
3844 }
3845
3846 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
3847 zfs_range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK],
3848 zfs_range_tree_add, condense_tree);
3849 }
3850
3851 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
3852 metaslab_unflushed_changes_memused(msp));
3853 spa->spa_unflushed_stats.sus_memused -=
3854 metaslab_unflushed_changes_memused(msp);
3855 zfs_range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
3856 zfs_range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
3857
3858 /*
3859 * We're about to drop the metaslab's lock thus allowing other
3860 * consumers to change it's content. Set the metaslab's ms_condensing
3861 * flag to ensure that allocations on this metaslab do not occur
3862 * while we're in the middle of committing it to disk. This is only
3863 * critical for ms_allocatable as all other range trees use per TXG
3864 * views of their content.
3865 */
3866 msp->ms_condensing = B_TRUE;
3867
3868 mutex_exit(&msp->ms_lock);
3869 uint64_t object = space_map_object(msp->ms_sm);
3870 space_map_truncate(sm,
3871 spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
3872 zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx);
3873
3874 /*
3875 * space_map_truncate() may have reallocated the spacemap object.
3876 * If so, update the vdev_ms_array.
3877 */
3878 if (space_map_object(msp->ms_sm) != object) {
3879 object = space_map_object(msp->ms_sm);
3880 dmu_write(spa->spa_meta_objset,
3881 msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) *
3882 msp->ms_id, sizeof (uint64_t), &object, tx);
3883 }
3884
3885 /*
3886 * Note:
3887 * When the log space map feature is enabled, each space map will
3888 * always have ALLOCS followed by FREES for each sync pass. This is
3889 * typically true even when the log space map feature is disabled,
3890 * except from the case where a metaslab goes through metaslab_sync()
3891 * and gets condensed. In that case the metaslab's space map will have
3892 * ALLOCS followed by FREES (due to condensing) followed by ALLOCS
3893 * followed by FREES (due to space_map_write() in metaslab_sync()) for
3894 * sync pass 1.
3895 */
3896 zfs_range_tree_t *tmp_tree = zfs_range_tree_create(NULL, type, NULL,
3897 start, shift);
3898 zfs_range_tree_add(tmp_tree, msp->ms_start, msp->ms_size);
3899 space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx);
3900 space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx);
3901 space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx);
3902
3903 zfs_range_tree_vacate(condense_tree, NULL, NULL);
3904 zfs_range_tree_destroy(condense_tree);
3905 zfs_range_tree_vacate(tmp_tree, NULL, NULL);
3906 zfs_range_tree_destroy(tmp_tree);
3907 mutex_enter(&msp->ms_lock);
3908
3909 msp->ms_condensing = B_FALSE;
3910 metaslab_flush_update(msp, tx);
3911 }
3912
3913 static void
metaslab_unflushed_add(metaslab_t * msp,dmu_tx_t * tx)3914 metaslab_unflushed_add(metaslab_t *msp, dmu_tx_t *tx)
3915 {
3916 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3917 ASSERT(spa_syncing_log_sm(spa) != NULL);
3918 ASSERT(msp->ms_sm != NULL);
3919 ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_allocs));
3920 ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_frees));
3921
3922 mutex_enter(&spa->spa_flushed_ms_lock);
3923 metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3924 metaslab_set_unflushed_dirty(msp, B_TRUE);
3925 avl_add(&spa->spa_metaslabs_by_flushed, msp);
3926 mutex_exit(&spa->spa_flushed_ms_lock);
3927
3928 spa_log_sm_increment_current_mscount(spa);
3929 spa_log_summary_add_flushed_metaslab(spa, B_TRUE);
3930 }
3931
3932 void
metaslab_unflushed_bump(metaslab_t * msp,dmu_tx_t * tx,boolean_t dirty)3933 metaslab_unflushed_bump(metaslab_t *msp, dmu_tx_t *tx, boolean_t dirty)
3934 {
3935 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
3936 ASSERT(spa_syncing_log_sm(spa) != NULL);
3937 ASSERT(msp->ms_sm != NULL);
3938 ASSERT(metaslab_unflushed_txg(msp) != 0);
3939 ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp);
3940 ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_allocs));
3941 ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_frees));
3942
3943 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa));
3944
3945 /* update metaslab's position in our flushing tree */
3946 uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp);
3947 boolean_t ms_prev_flushed_dirty = metaslab_unflushed_dirty(msp);
3948 mutex_enter(&spa->spa_flushed_ms_lock);
3949 avl_remove(&spa->spa_metaslabs_by_flushed, msp);
3950 metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx);
3951 metaslab_set_unflushed_dirty(msp, dirty);
3952 avl_add(&spa->spa_metaslabs_by_flushed, msp);
3953 mutex_exit(&spa->spa_flushed_ms_lock);
3954
3955 /* update metaslab counts of spa_log_sm_t nodes */
3956 spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg);
3957 spa_log_sm_increment_current_mscount(spa);
3958
3959 /* update log space map summary */
3960 spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg,
3961 ms_prev_flushed_dirty);
3962 spa_log_summary_add_flushed_metaslab(spa, dirty);
3963
3964 /* cleanup obsolete logs if any */
3965 spa_cleanup_old_sm_logs(spa, tx);
3966 }
3967
3968 /*
3969 * Called when the metaslab has been flushed (its own spacemap now reflects
3970 * all the contents of the pool-wide spacemap log). Updates the metaslab's
3971 * metadata and any pool-wide related log space map data (e.g. summary,
3972 * obsolete logs, etc..) to reflect that.
3973 */
3974 static void
metaslab_flush_update(metaslab_t * msp,dmu_tx_t * tx)3975 metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx)
3976 {
3977 metaslab_group_t *mg = msp->ms_group;
3978 spa_t *spa = mg->mg_vd->vdev_spa;
3979
3980 ASSERT(MUTEX_HELD(&msp->ms_lock));
3981
3982 ASSERT3U(spa_sync_pass(spa), ==, 1);
3983
3984 /*
3985 * Just because a metaslab got flushed, that doesn't mean that
3986 * it will pass through metaslab_sync_done(). Thus, make sure to
3987 * update ms_synced_length here in case it doesn't.
3988 */
3989 msp->ms_synced_length = space_map_length(msp->ms_sm);
3990
3991 /*
3992 * We may end up here from metaslab_condense() without the
3993 * feature being active. In that case this is a no-op.
3994 */
3995 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP) ||
3996 metaslab_unflushed_txg(msp) == 0)
3997 return;
3998
3999 metaslab_unflushed_bump(msp, tx, B_FALSE);
4000 }
4001
4002 boolean_t
metaslab_flush(metaslab_t * msp,dmu_tx_t * tx)4003 metaslab_flush(metaslab_t *msp, dmu_tx_t *tx)
4004 {
4005 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
4006
4007 ASSERT(MUTEX_HELD(&msp->ms_lock));
4008 ASSERT3U(spa_sync_pass(spa), ==, 1);
4009 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
4010
4011 ASSERT(msp->ms_sm != NULL);
4012 ASSERT(metaslab_unflushed_txg(msp) != 0);
4013 ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL);
4014
4015 /*
4016 * There is nothing wrong with flushing the same metaslab twice, as
4017 * this codepath should work on that case. However, the current
4018 * flushing scheme makes sure to avoid this situation as we would be
4019 * making all these calls without having anything meaningful to write
4020 * to disk. We assert this behavior here.
4021 */
4022 ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx));
4023
4024 /*
4025 * We can not flush while loading, because then we would
4026 * not load the ms_unflushed_{allocs,frees}.
4027 */
4028 if (msp->ms_loading)
4029 return (B_FALSE);
4030
4031 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
4032 metaslab_verify_weight_and_frag(msp);
4033
4034 /*
4035 * Metaslab condensing is effectively flushing. Therefore if the
4036 * metaslab can be condensed we can just condense it instead of
4037 * flushing it.
4038 *
4039 * Note that metaslab_condense() does call metaslab_flush_update()
4040 * so we can just return immediately after condensing. We also
4041 * don't need to care about setting ms_flushing or broadcasting
4042 * ms_flush_cv, even if we temporarily drop the ms_lock in
4043 * metaslab_condense(), as the metaslab is already loaded.
4044 */
4045 if (msp->ms_loaded && metaslab_should_condense(msp)) {
4046 metaslab_group_t *mg = msp->ms_group;
4047
4048 /*
4049 * For all histogram operations below refer to the
4050 * comments of metaslab_sync() where we follow a
4051 * similar procedure.
4052 */
4053 metaslab_group_histogram_verify(mg);
4054 metaslab_class_histogram_verify(mg->mg_class);
4055 metaslab_group_histogram_remove(mg, msp);
4056
4057 metaslab_condense(msp, tx);
4058
4059 space_map_histogram_clear(msp->ms_sm);
4060 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
4061 ASSERT(zfs_range_tree_is_empty(msp->ms_freed));
4062 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
4063 space_map_histogram_add(msp->ms_sm,
4064 msp->ms_defer[t], tx);
4065 }
4066 metaslab_aux_histograms_update(msp);
4067
4068 metaslab_group_histogram_add(mg, msp);
4069 metaslab_group_histogram_verify(mg);
4070 metaslab_class_histogram_verify(mg->mg_class);
4071
4072 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
4073
4074 /*
4075 * Since we recreated the histogram (and potentially
4076 * the ms_sm too while condensing) ensure that the
4077 * weight is updated too because we are not guaranteed
4078 * that this metaslab is dirty and will go through
4079 * metaslab_sync_done().
4080 */
4081 metaslab_recalculate_weight_and_sort(msp);
4082 return (B_TRUE);
4083 }
4084
4085 msp->ms_flushing = B_TRUE;
4086 uint64_t sm_len_before = space_map_length(msp->ms_sm);
4087
4088 mutex_exit(&msp->ms_lock);
4089 space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC,
4090 SM_NO_VDEVID, tx);
4091 space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE,
4092 SM_NO_VDEVID, tx);
4093 mutex_enter(&msp->ms_lock);
4094
4095 uint64_t sm_len_after = space_map_length(msp->ms_sm);
4096 if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) {
4097 zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, "
4098 "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, "
4099 "appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx),
4100 spa_name(spa),
4101 (u_longlong_t)msp->ms_group->mg_vd->vdev_id,
4102 (u_longlong_t)msp->ms_id,
4103 (u_longlong_t)zfs_range_tree_space(
4104 msp->ms_unflushed_allocs),
4105 (u_longlong_t)zfs_range_tree_space(
4106 msp->ms_unflushed_frees),
4107 (u_longlong_t)(sm_len_after - sm_len_before));
4108 }
4109
4110 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
4111 metaslab_unflushed_changes_memused(msp));
4112 spa->spa_unflushed_stats.sus_memused -=
4113 metaslab_unflushed_changes_memused(msp);
4114 zfs_range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL);
4115 zfs_range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL);
4116
4117 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
4118 metaslab_verify_weight_and_frag(msp);
4119
4120 metaslab_flush_update(msp, tx);
4121
4122 metaslab_verify_space(msp, dmu_tx_get_txg(tx));
4123 metaslab_verify_weight_and_frag(msp);
4124
4125 msp->ms_flushing = B_FALSE;
4126 cv_broadcast(&msp->ms_flush_cv);
4127 return (B_TRUE);
4128 }
4129
4130 /*
4131 * Write a metaslab to disk in the context of the specified transaction group.
4132 */
4133 void
metaslab_sync(metaslab_t * msp,uint64_t txg)4134 metaslab_sync(metaslab_t *msp, uint64_t txg)
4135 {
4136 metaslab_group_t *mg = msp->ms_group;
4137 vdev_t *vd = mg->mg_vd;
4138 spa_t *spa = vd->vdev_spa;
4139 objset_t *mos = spa_meta_objset(spa);
4140 zfs_range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK];
4141 dmu_tx_t *tx;
4142
4143 ASSERT(!vd->vdev_ishole);
4144
4145 /*
4146 * This metaslab has just been added so there's no work to do now.
4147 */
4148 if (msp->ms_new) {
4149 ASSERT0(zfs_range_tree_space(alloctree));
4150 ASSERT0(zfs_range_tree_space(msp->ms_freeing));
4151 ASSERT0(zfs_range_tree_space(msp->ms_freed));
4152 ASSERT0(zfs_range_tree_space(msp->ms_checkpointing));
4153 ASSERT0(zfs_range_tree_space(msp->ms_trim));
4154 return;
4155 }
4156
4157 /*
4158 * Normally, we don't want to process a metaslab if there are no
4159 * allocations or frees to perform. However, if the metaslab is being
4160 * forced to condense, it's loaded and we're not beyond the final
4161 * dirty txg, we need to let it through. Not condensing beyond the
4162 * final dirty txg prevents an issue where metaslabs that need to be
4163 * condensed but were loaded for other reasons could cause a panic
4164 * here. By only checking the txg in that branch of the conditional,
4165 * we preserve the utility of the VERIFY statements in all other
4166 * cases.
4167 */
4168 if (zfs_range_tree_is_empty(alloctree) &&
4169 zfs_range_tree_is_empty(msp->ms_freeing) &&
4170 zfs_range_tree_is_empty(msp->ms_checkpointing) &&
4171 !(msp->ms_loaded && msp->ms_condense_wanted &&
4172 txg <= spa_final_dirty_txg(spa)))
4173 return;
4174
4175
4176 VERIFY3U(txg, <=, spa_final_dirty_txg(spa));
4177
4178 /*
4179 * The only state that can actually be changing concurrently
4180 * with metaslab_sync() is the metaslab's ms_allocatable. No
4181 * other thread can be modifying this txg's alloc, freeing,
4182 * freed, or space_map_phys_t. We drop ms_lock whenever we
4183 * could call into the DMU, because the DMU can call down to
4184 * us (e.g. via zio_free()) at any time.
4185 *
4186 * The spa_vdev_remove_thread() can be reading metaslab state
4187 * concurrently, and it is locked out by the ms_sync_lock.
4188 * Note that the ms_lock is insufficient for this, because it
4189 * is dropped by space_map_write().
4190 */
4191 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
4192
4193 /*
4194 * Generate a log space map if one doesn't exist already.
4195 */
4196 spa_generate_syncing_log_sm(spa, tx);
4197
4198 if (msp->ms_sm == NULL) {
4199 uint64_t new_object = space_map_alloc(mos,
4200 spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ?
4201 zfs_metaslab_sm_blksz_with_log :
4202 zfs_metaslab_sm_blksz_no_log, tx);
4203 VERIFY3U(new_object, !=, 0);
4204
4205 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
4206 msp->ms_id, sizeof (uint64_t), &new_object, tx);
4207
4208 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
4209 msp->ms_start, msp->ms_size, vd->vdev_ashift));
4210 ASSERT(msp->ms_sm != NULL);
4211
4212 ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_allocs));
4213 ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_frees));
4214 ASSERT0(metaslab_allocated_space(msp));
4215 }
4216
4217 if (!zfs_range_tree_is_empty(msp->ms_checkpointing) &&
4218 vd->vdev_checkpoint_sm == NULL) {
4219 ASSERT(spa_has_checkpoint(spa));
4220
4221 uint64_t new_object = space_map_alloc(mos,
4222 zfs_vdev_standard_sm_blksz, tx);
4223 VERIFY3U(new_object, !=, 0);
4224
4225 VERIFY0(space_map_open(&vd->vdev_checkpoint_sm,
4226 mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift));
4227 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
4228
4229 /*
4230 * We save the space map object as an entry in vdev_top_zap
4231 * so it can be retrieved when the pool is reopened after an
4232 * export or through zdb.
4233 */
4234 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset,
4235 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
4236 sizeof (new_object), 1, &new_object, tx));
4237 }
4238
4239 mutex_enter(&msp->ms_sync_lock);
4240 mutex_enter(&msp->ms_lock);
4241
4242 /*
4243 * Note: metaslab_condense() clears the space map's histogram.
4244 * Therefore we must verify and remove this histogram before
4245 * condensing.
4246 */
4247 metaslab_group_histogram_verify(mg);
4248 metaslab_class_histogram_verify(mg->mg_class);
4249 metaslab_group_histogram_remove(mg, msp);
4250
4251 if (spa->spa_sync_pass == 1 && msp->ms_loaded &&
4252 metaslab_should_condense(msp))
4253 metaslab_condense(msp, tx);
4254
4255 /*
4256 * We'll be going to disk to sync our space accounting, thus we
4257 * drop the ms_lock during that time so allocations coming from
4258 * open-context (ZIL) for future TXGs do not block.
4259 */
4260 mutex_exit(&msp->ms_lock);
4261 space_map_t *log_sm = spa_syncing_log_sm(spa);
4262 if (log_sm != NULL) {
4263 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
4264 if (metaslab_unflushed_txg(msp) == 0)
4265 metaslab_unflushed_add(msp, tx);
4266 else if (!metaslab_unflushed_dirty(msp))
4267 metaslab_unflushed_bump(msp, tx, B_TRUE);
4268
4269 space_map_write(log_sm, alloctree, SM_ALLOC,
4270 vd->vdev_id, tx);
4271 space_map_write(log_sm, msp->ms_freeing, SM_FREE,
4272 vd->vdev_id, tx);
4273 mutex_enter(&msp->ms_lock);
4274
4275 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=,
4276 metaslab_unflushed_changes_memused(msp));
4277 spa->spa_unflushed_stats.sus_memused -=
4278 metaslab_unflushed_changes_memused(msp);
4279 zfs_range_tree_remove_xor_add(alloctree,
4280 msp->ms_unflushed_frees, msp->ms_unflushed_allocs);
4281 zfs_range_tree_remove_xor_add(msp->ms_freeing,
4282 msp->ms_unflushed_allocs, msp->ms_unflushed_frees);
4283 spa->spa_unflushed_stats.sus_memused +=
4284 metaslab_unflushed_changes_memused(msp);
4285 } else {
4286 ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP));
4287
4288 space_map_write(msp->ms_sm, alloctree, SM_ALLOC,
4289 SM_NO_VDEVID, tx);
4290 space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE,
4291 SM_NO_VDEVID, tx);
4292 mutex_enter(&msp->ms_lock);
4293 }
4294
4295 msp->ms_allocated_space += zfs_range_tree_space(alloctree);
4296 ASSERT3U(msp->ms_allocated_space, >=,
4297 zfs_range_tree_space(msp->ms_freeing));
4298 msp->ms_allocated_space -= zfs_range_tree_space(msp->ms_freeing);
4299
4300 if (!zfs_range_tree_is_empty(msp->ms_checkpointing)) {
4301 ASSERT(spa_has_checkpoint(spa));
4302 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL);
4303
4304 /*
4305 * Since we are doing writes to disk and the ms_checkpointing
4306 * tree won't be changing during that time, we drop the
4307 * ms_lock while writing to the checkpoint space map, for the
4308 * same reason mentioned above.
4309 */
4310 mutex_exit(&msp->ms_lock);
4311 space_map_write(vd->vdev_checkpoint_sm,
4312 msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx);
4313 mutex_enter(&msp->ms_lock);
4314
4315 spa->spa_checkpoint_info.sci_dspace +=
4316 zfs_range_tree_space(msp->ms_checkpointing);
4317 vd->vdev_stat.vs_checkpoint_space +=
4318 zfs_range_tree_space(msp->ms_checkpointing);
4319 ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==,
4320 -space_map_allocated(vd->vdev_checkpoint_sm));
4321
4322 zfs_range_tree_vacate(msp->ms_checkpointing, NULL, NULL);
4323 }
4324
4325 if (msp->ms_loaded) {
4326 /*
4327 * When the space map is loaded, we have an accurate
4328 * histogram in the range tree. This gives us an opportunity
4329 * to bring the space map's histogram up-to-date so we clear
4330 * it first before updating it.
4331 */
4332 space_map_histogram_clear(msp->ms_sm);
4333 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx);
4334
4335 /*
4336 * Since we've cleared the histogram we need to add back
4337 * any free space that has already been processed, plus
4338 * any deferred space. This allows the on-disk histogram
4339 * to accurately reflect all free space even if some space
4340 * is not yet available for allocation (i.e. deferred).
4341 */
4342 space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx);
4343
4344 /*
4345 * Add back any deferred free space that has not been
4346 * added back into the in-core free tree yet. This will
4347 * ensure that we don't end up with a space map histogram
4348 * that is completely empty unless the metaslab is fully
4349 * allocated.
4350 */
4351 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
4352 space_map_histogram_add(msp->ms_sm,
4353 msp->ms_defer[t], tx);
4354 }
4355 }
4356
4357 /*
4358 * Always add the free space from this sync pass to the space
4359 * map histogram. We want to make sure that the on-disk histogram
4360 * accounts for all free space. If the space map is not loaded,
4361 * then we will lose some accuracy but will correct it the next
4362 * time we load the space map.
4363 */
4364 space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx);
4365 metaslab_aux_histograms_update(msp);
4366
4367 metaslab_group_histogram_add(mg, msp);
4368 metaslab_group_histogram_verify(mg);
4369 metaslab_class_histogram_verify(mg->mg_class);
4370
4371 /*
4372 * For sync pass 1, we avoid traversing this txg's free range tree
4373 * and instead will just swap the pointers for freeing and freed.
4374 * We can safely do this since the freed_tree is guaranteed to be
4375 * empty on the initial pass.
4376 *
4377 * Keep in mind that even if we are currently using a log spacemap
4378 * we want current frees to end up in the ms_allocatable (but not
4379 * get appended to the ms_sm) so their ranges can be reused as usual.
4380 */
4381 if (spa_sync_pass(spa) == 1) {
4382 zfs_range_tree_swap(&msp->ms_freeing, &msp->ms_freed);
4383 ASSERT0(msp->ms_allocated_this_txg);
4384 } else {
4385 zfs_range_tree_vacate(msp->ms_freeing,
4386 zfs_range_tree_add, msp->ms_freed);
4387 }
4388 msp->ms_allocated_this_txg += zfs_range_tree_space(alloctree);
4389 zfs_range_tree_vacate(alloctree, NULL, NULL);
4390
4391 ASSERT0(zfs_range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4392 ASSERT0(zfs_range_tree_space(msp->ms_allocating[TXG_CLEAN(txg)
4393 & TXG_MASK]));
4394 ASSERT0(zfs_range_tree_space(msp->ms_freeing));
4395 ASSERT0(zfs_range_tree_space(msp->ms_checkpointing));
4396
4397 mutex_exit(&msp->ms_lock);
4398
4399 /*
4400 * Verify that the space map object ID has been recorded in the
4401 * vdev_ms_array.
4402 */
4403 uint64_t object;
4404 VERIFY0(dmu_read(mos, vd->vdev_ms_array,
4405 msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0));
4406 VERIFY3U(object, ==, space_map_object(msp->ms_sm));
4407
4408 mutex_exit(&msp->ms_sync_lock);
4409 dmu_tx_commit(tx);
4410 }
4411
4412 static void
metaslab_evict(metaslab_t * msp,uint64_t txg)4413 metaslab_evict(metaslab_t *msp, uint64_t txg)
4414 {
4415 if (!msp->ms_loaded || msp->ms_disabled != 0)
4416 return;
4417
4418 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
4419 VERIFY0(zfs_range_tree_space(
4420 msp->ms_allocating[(txg + t) & TXG_MASK]));
4421 }
4422 if (msp->ms_allocator != -1)
4423 metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK);
4424
4425 if (!metaslab_debug_unload)
4426 metaslab_unload(msp);
4427 }
4428
4429 /*
4430 * Called after a transaction group has completely synced to mark
4431 * all of the metaslab's free space as usable.
4432 */
4433 void
metaslab_sync_done(metaslab_t * msp,uint64_t txg)4434 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
4435 {
4436 metaslab_group_t *mg = msp->ms_group;
4437 vdev_t *vd = mg->mg_vd;
4438 spa_t *spa = vd->vdev_spa;
4439 zfs_range_tree_t **defer_tree;
4440 int64_t alloc_delta, defer_delta;
4441 boolean_t defer_allowed = B_TRUE;
4442
4443 ASSERT(!vd->vdev_ishole);
4444
4445 mutex_enter(&msp->ms_lock);
4446
4447 if (msp->ms_new) {
4448 /* this is a new metaslab, add its capacity to the vdev */
4449 metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size);
4450
4451 /* there should be no allocations nor frees at this point */
4452 VERIFY0(msp->ms_allocated_this_txg);
4453 VERIFY0(zfs_range_tree_space(msp->ms_freed));
4454 }
4455
4456 ASSERT0(zfs_range_tree_space(msp->ms_freeing));
4457 ASSERT0(zfs_range_tree_space(msp->ms_checkpointing));
4458
4459 defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE];
4460
4461 uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
4462 metaslab_class_get_alloc(spa_normal_class(spa));
4463 if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing ||
4464 vd->vdev_rz_expanding) {
4465 defer_allowed = B_FALSE;
4466 }
4467
4468 defer_delta = 0;
4469 alloc_delta = msp->ms_allocated_this_txg -
4470 zfs_range_tree_space(msp->ms_freed);
4471
4472 if (defer_allowed) {
4473 defer_delta = zfs_range_tree_space(msp->ms_freed) -
4474 zfs_range_tree_space(*defer_tree);
4475 } else {
4476 defer_delta -= zfs_range_tree_space(*defer_tree);
4477 }
4478 metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta,
4479 defer_delta, 0);
4480
4481 if (spa_syncing_log_sm(spa) == NULL) {
4482 /*
4483 * If there's a metaslab_load() in progress and we don't have
4484 * a log space map, it means that we probably wrote to the
4485 * metaslab's space map. If this is the case, we need to
4486 * make sure that we wait for the load to complete so that we
4487 * have a consistent view at the in-core side of the metaslab.
4488 */
4489 metaslab_load_wait(msp);
4490 } else {
4491 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
4492 }
4493
4494 /*
4495 * When auto-trimming is enabled, free ranges which are added to
4496 * ms_allocatable are also be added to ms_trim. The ms_trim tree is
4497 * periodically consumed by the vdev_autotrim_thread() which issues
4498 * trims for all ranges and then vacates the tree. The ms_trim tree
4499 * can be discarded at any time with the sole consequence of recent
4500 * frees not being trimmed.
4501 */
4502 if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) {
4503 zfs_range_tree_walk(*defer_tree, zfs_range_tree_add,
4504 msp->ms_trim);
4505 if (!defer_allowed) {
4506 zfs_range_tree_walk(msp->ms_freed, zfs_range_tree_add,
4507 msp->ms_trim);
4508 }
4509 } else {
4510 zfs_range_tree_vacate(msp->ms_trim, NULL, NULL);
4511 }
4512
4513 /*
4514 * Move the frees from the defer_tree back to the free
4515 * range tree (if it's loaded). Swap the freed_tree and
4516 * the defer_tree -- this is safe to do because we've
4517 * just emptied out the defer_tree.
4518 */
4519 zfs_range_tree_vacate(*defer_tree,
4520 msp->ms_loaded ? zfs_range_tree_add : NULL, msp->ms_allocatable);
4521 if (defer_allowed) {
4522 zfs_range_tree_swap(&msp->ms_freed, defer_tree);
4523 } else {
4524 zfs_range_tree_vacate(msp->ms_freed,
4525 msp->ms_loaded ? zfs_range_tree_add : NULL,
4526 msp->ms_allocatable);
4527 }
4528
4529 msp->ms_synced_length = space_map_length(msp->ms_sm);
4530
4531 msp->ms_deferspace += defer_delta;
4532 ASSERT3S(msp->ms_deferspace, >=, 0);
4533 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
4534 if (msp->ms_deferspace != 0) {
4535 /*
4536 * Keep syncing this metaslab until all deferred frees
4537 * are back in circulation.
4538 */
4539 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
4540 }
4541 metaslab_aux_histograms_update_done(msp, defer_allowed);
4542
4543 if (msp->ms_new) {
4544 msp->ms_new = B_FALSE;
4545 mutex_enter(&mg->mg_lock);
4546 mg->mg_ms_ready++;
4547 mutex_exit(&mg->mg_lock);
4548 }
4549
4550 /*
4551 * Re-sort metaslab within its group now that we've adjusted
4552 * its allocatable space.
4553 */
4554 metaslab_recalculate_weight_and_sort(msp);
4555
4556 ASSERT0(zfs_range_tree_space(msp->ms_allocating[txg & TXG_MASK]));
4557 ASSERT0(zfs_range_tree_space(msp->ms_freeing));
4558 ASSERT0(zfs_range_tree_space(msp->ms_freed));
4559 ASSERT0(zfs_range_tree_space(msp->ms_checkpointing));
4560 msp->ms_allocating_total -= msp->ms_allocated_this_txg;
4561 msp->ms_allocated_this_txg = 0;
4562 mutex_exit(&msp->ms_lock);
4563 }
4564
4565 void
metaslab_sync_reassess(metaslab_group_t * mg)4566 metaslab_sync_reassess(metaslab_group_t *mg)
4567 {
4568 spa_t *spa = mg->mg_class->mc_spa;
4569
4570 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
4571 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
4572 metaslab_group_alloc_update(mg);
4573
4574 /*
4575 * Preload the next potential metaslabs but only on active
4576 * metaslab groups. We can get into a state where the metaslab
4577 * is no longer active since we dirty metaslabs as we remove a
4578 * a device, thus potentially making the metaslab group eligible
4579 * for preloading.
4580 */
4581 if (mg->mg_activation_count > 0) {
4582 metaslab_group_preload(mg);
4583 }
4584 spa_config_exit(spa, SCL_ALLOC, FTAG);
4585 }
4586
4587 /*
4588 * When writing a ditto block (i.e. more than one DVA for a given BP) on
4589 * the same vdev as an existing DVA of this BP, then try to allocate it
4590 * on a different metaslab than existing DVAs (i.e. a unique metaslab).
4591 */
4592 static boolean_t
metaslab_is_unique(metaslab_t * msp,dva_t * dva)4593 metaslab_is_unique(metaslab_t *msp, dva_t *dva)
4594 {
4595 uint64_t dva_ms_id;
4596
4597 if (DVA_GET_ASIZE(dva) == 0)
4598 return (B_TRUE);
4599
4600 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
4601 return (B_TRUE);
4602
4603 dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift;
4604
4605 return (msp->ms_id != dva_ms_id);
4606 }
4607
4608 /*
4609 * ==========================================================================
4610 * Metaslab allocation tracing facility
4611 * ==========================================================================
4612 */
4613
4614 /*
4615 * Add an allocation trace element to the allocation tracing list.
4616 */
4617 static void
metaslab_trace_add(zio_alloc_list_t * zal,metaslab_group_t * mg,metaslab_t * msp,uint64_t psize,uint32_t dva_id,uint64_t offset,int allocator)4618 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
4619 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset,
4620 int allocator)
4621 {
4622 metaslab_alloc_trace_t *mat;
4623
4624 if (!metaslab_trace_enabled)
4625 return;
4626
4627 /*
4628 * When the tracing list reaches its maximum we remove
4629 * the second element in the list before adding a new one.
4630 * By removing the second element we preserve the original
4631 * entry as a clue to what allocations steps have already been
4632 * performed.
4633 */
4634 if (zal->zal_size == metaslab_trace_max_entries) {
4635 metaslab_alloc_trace_t *mat_next;
4636 #ifdef ZFS_DEBUG
4637 panic("too many entries in allocation list");
4638 #endif
4639 METASLABSTAT_BUMP(metaslabstat_trace_over_limit);
4640 zal->zal_size--;
4641 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
4642 list_remove(&zal->zal_list, mat_next);
4643 kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
4644 }
4645
4646 mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
4647 list_link_init(&mat->mat_list_node);
4648 mat->mat_mg = mg;
4649 mat->mat_msp = msp;
4650 mat->mat_size = psize;
4651 mat->mat_dva_id = dva_id;
4652 mat->mat_offset = offset;
4653 mat->mat_weight = 0;
4654 mat->mat_allocator = allocator;
4655
4656 if (msp != NULL)
4657 mat->mat_weight = msp->ms_weight;
4658
4659 /*
4660 * The list is part of the zio so locking is not required. Only
4661 * a single thread will perform allocations for a given zio.
4662 */
4663 list_insert_tail(&zal->zal_list, mat);
4664 zal->zal_size++;
4665
4666 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
4667 }
4668
4669 void
metaslab_trace_init(zio_alloc_list_t * zal)4670 metaslab_trace_init(zio_alloc_list_t *zal)
4671 {
4672 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
4673 offsetof(metaslab_alloc_trace_t, mat_list_node));
4674 zal->zal_size = 0;
4675 }
4676
4677 void
metaslab_trace_fini(zio_alloc_list_t * zal)4678 metaslab_trace_fini(zio_alloc_list_t *zal)
4679 {
4680 metaslab_alloc_trace_t *mat;
4681
4682 while ((mat = list_remove_head(&zal->zal_list)) != NULL)
4683 kmem_cache_free(metaslab_alloc_trace_cache, mat);
4684 list_destroy(&zal->zal_list);
4685 zal->zal_size = 0;
4686 }
4687
4688 /*
4689 * ==========================================================================
4690 * Metaslab block operations
4691 * ==========================================================================
4692 */
4693
4694 static void
metaslab_group_alloc_increment(spa_t * spa,uint64_t vdev,int allocator,int flags,uint64_t psize,const void * tag)4695 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, int allocator,
4696 int flags, uint64_t psize, const void *tag)
4697 {
4698 if (!(flags & METASLAB_ASYNC_ALLOC))
4699 return;
4700
4701 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4702 if (!mg->mg_class->mc_alloc_throttle_enabled)
4703 return;
4704
4705 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4706 (void) zfs_refcount_add_many(&mga->mga_queue_depth, psize, tag);
4707 }
4708
4709 void
metaslab_group_alloc_decrement(spa_t * spa,uint64_t vdev,int allocator,int flags,uint64_t psize,const void * tag)4710 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, int allocator,
4711 int flags, uint64_t psize, const void *tag)
4712 {
4713 if (!(flags & METASLAB_ASYNC_ALLOC))
4714 return;
4715
4716 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
4717 if (!mg->mg_class->mc_alloc_throttle_enabled)
4718 return;
4719
4720 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4721 (void) zfs_refcount_remove_many(&mga->mga_queue_depth, psize, tag);
4722 }
4723
4724 static uint64_t
metaslab_block_alloc(metaslab_t * msp,uint64_t size,uint64_t txg)4725 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
4726 {
4727 uint64_t start;
4728 zfs_range_tree_t *rt = msp->ms_allocatable;
4729 metaslab_class_t *mc = msp->ms_group->mg_class;
4730
4731 ASSERT(MUTEX_HELD(&msp->ms_lock));
4732 VERIFY(!msp->ms_condensing);
4733 VERIFY0(msp->ms_disabled);
4734 VERIFY0(msp->ms_new);
4735
4736 start = mc->mc_ops->msop_alloc(msp, size);
4737 if (start != -1ULL) {
4738 metaslab_group_t *mg = msp->ms_group;
4739 vdev_t *vd = mg->mg_vd;
4740
4741 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
4742 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
4743 VERIFY3U(zfs_range_tree_space(rt) - size, <=, msp->ms_size);
4744 zfs_range_tree_remove(rt, start, size);
4745 zfs_range_tree_clear(msp->ms_trim, start, size);
4746
4747 if (zfs_range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
4748 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
4749
4750 zfs_range_tree_add(msp->ms_allocating[txg & TXG_MASK], start,
4751 size);
4752 msp->ms_allocating_total += size;
4753
4754 /* Track the last successful allocation */
4755 msp->ms_alloc_txg = txg;
4756 metaslab_verify_space(msp, txg);
4757 }
4758
4759 /*
4760 * Now that we've attempted the allocation we need to update the
4761 * metaslab's maximum block size since it may have changed.
4762 */
4763 msp->ms_max_size = metaslab_largest_allocatable(msp);
4764 return (start);
4765 }
4766
4767 /*
4768 * Find the metaslab with the highest weight that is less than what we've
4769 * already tried. In the common case, this means that we will examine each
4770 * metaslab at most once. Note that concurrent callers could reorder metaslabs
4771 * by activation/passivation once we have dropped the mg_lock. If a metaslab is
4772 * activated by another thread, and we fail to allocate from the metaslab we
4773 * have selected, we may not try the newly-activated metaslab, and instead
4774 * activate another metaslab. This is not optimal, but generally does not cause
4775 * any problems (a possible exception being if every metaslab is completely full
4776 * except for the newly-activated metaslab which we fail to examine).
4777 */
4778 static metaslab_t *
find_valid_metaslab(metaslab_group_t * mg,uint64_t activation_weight,dva_t * dva,int d,uint64_t asize,int allocator,boolean_t try_hard,zio_alloc_list_t * zal,metaslab_t * search,boolean_t * was_active)4779 find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
4780 dva_t *dva, int d, uint64_t asize, int allocator,
4781 boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search,
4782 boolean_t *was_active)
4783 {
4784 avl_index_t idx;
4785 avl_tree_t *t = &mg->mg_metaslab_tree;
4786 metaslab_t *msp = avl_find(t, search, &idx);
4787 if (msp == NULL)
4788 msp = avl_nearest(t, idx, AVL_AFTER);
4789
4790 uint_t tries = 0;
4791 for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
4792 int i;
4793
4794 if (!try_hard && tries > zfs_metaslab_find_max_tries) {
4795 METASLABSTAT_BUMP(metaslabstat_too_many_tries);
4796 return (NULL);
4797 }
4798 tries++;
4799
4800 if (!metaslab_should_allocate(msp, asize, try_hard)) {
4801 metaslab_trace_add(zal, mg, msp, asize, d,
4802 TRACE_TOO_SMALL, allocator);
4803 continue;
4804 }
4805
4806 /*
4807 * If the selected metaslab is condensing or disabled, or
4808 * hasn't gone through a metaslab_sync_done(), then skip it.
4809 */
4810 if (msp->ms_condensing || msp->ms_disabled > 0 || msp->ms_new)
4811 continue;
4812
4813 *was_active = msp->ms_allocator != -1;
4814 /*
4815 * If we're activating as primary, this is our first allocation
4816 * from this disk, so we don't need to check how close we are.
4817 * If the metaslab under consideration was already active,
4818 * we're getting desperate enough to steal another allocator's
4819 * metaslab, so we still don't care about distances.
4820 */
4821 if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active)
4822 break;
4823
4824 if (!try_hard) {
4825 for (i = 0; i < d; i++) {
4826 if (!metaslab_is_unique(msp, &dva[i]))
4827 break; /* try another metaslab */
4828 }
4829 if (i == d)
4830 break;
4831 }
4832 }
4833
4834 if (msp != NULL) {
4835 search->ms_weight = msp->ms_weight;
4836 search->ms_start = msp->ms_start + 1;
4837 search->ms_allocator = msp->ms_allocator;
4838 search->ms_primary = msp->ms_primary;
4839 }
4840 return (msp);
4841 }
4842
4843 static void
metaslab_active_mask_verify(metaslab_t * msp)4844 metaslab_active_mask_verify(metaslab_t *msp)
4845 {
4846 ASSERT(MUTEX_HELD(&msp->ms_lock));
4847
4848 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
4849 return;
4850
4851 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0)
4852 return;
4853
4854 if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) {
4855 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4856 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4857 VERIFY3S(msp->ms_allocator, !=, -1);
4858 VERIFY(msp->ms_primary);
4859 return;
4860 }
4861
4862 if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) {
4863 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4864 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM);
4865 VERIFY3S(msp->ms_allocator, !=, -1);
4866 VERIFY(!msp->ms_primary);
4867 return;
4868 }
4869
4870 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) {
4871 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY);
4872 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY);
4873 VERIFY3S(msp->ms_allocator, ==, -1);
4874 return;
4875 }
4876 }
4877
4878 static uint64_t
metaslab_group_alloc(metaslab_group_t * mg,zio_alloc_list_t * zal,uint64_t asize,uint64_t txg,dva_t * dva,int d,int allocator,boolean_t try_hard)4879 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
4880 uint64_t asize, uint64_t txg, dva_t *dva, int d, int allocator,
4881 boolean_t try_hard)
4882 {
4883 metaslab_t *msp = NULL;
4884 uint64_t offset = -1ULL;
4885
4886 uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY;
4887 for (int i = 0; i < d; i++) {
4888 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4889 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4890 activation_weight = METASLAB_WEIGHT_SECONDARY;
4891 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4892 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
4893 activation_weight = METASLAB_WEIGHT_CLAIM;
4894 break;
4895 }
4896 }
4897
4898 /*
4899 * If we don't have enough metaslabs active to fill the entire array, we
4900 * just use the 0th slot.
4901 */
4902 if (mg->mg_ms_ready < mg->mg_allocators * 3)
4903 allocator = 0;
4904 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator];
4905
4906 ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2);
4907
4908 metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
4909 search->ms_weight = UINT64_MAX;
4910 search->ms_start = 0;
4911 /*
4912 * At the end of the metaslab tree are the already-active metaslabs,
4913 * first the primaries, then the secondaries. When we resume searching
4914 * through the tree, we need to consider ms_allocator and ms_primary so
4915 * we start in the location right after where we left off, and don't
4916 * accidentally loop forever considering the same metaslabs.
4917 */
4918 search->ms_allocator = -1;
4919 search->ms_primary = B_TRUE;
4920 for (;;) {
4921 boolean_t was_active = B_FALSE;
4922
4923 mutex_enter(&mg->mg_lock);
4924
4925 if (activation_weight == METASLAB_WEIGHT_PRIMARY &&
4926 mga->mga_primary != NULL) {
4927 msp = mga->mga_primary;
4928
4929 /*
4930 * Even though we don't hold the ms_lock for the
4931 * primary metaslab, those fields should not
4932 * change while we hold the mg_lock. Thus it is
4933 * safe to make assertions on them.
4934 */
4935 ASSERT(msp->ms_primary);
4936 ASSERT3S(msp->ms_allocator, ==, allocator);
4937 ASSERT(msp->ms_loaded);
4938
4939 was_active = B_TRUE;
4940 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
4941 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY &&
4942 mga->mga_secondary != NULL) {
4943 msp = mga->mga_secondary;
4944
4945 /*
4946 * See comment above about the similar assertions
4947 * for the primary metaslab.
4948 */
4949 ASSERT(!msp->ms_primary);
4950 ASSERT3S(msp->ms_allocator, ==, allocator);
4951 ASSERT(msp->ms_loaded);
4952
4953 was_active = B_TRUE;
4954 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
4955 } else {
4956 msp = find_valid_metaslab(mg, activation_weight, dva, d,
4957 asize, allocator, try_hard, zal, search,
4958 &was_active);
4959 }
4960
4961 mutex_exit(&mg->mg_lock);
4962 if (msp == NULL)
4963 break;
4964 mutex_enter(&msp->ms_lock);
4965
4966 metaslab_active_mask_verify(msp);
4967
4968 /*
4969 * This code is disabled out because of issues with
4970 * tracepoints in non-gpl kernel modules.
4971 */
4972 #if 0
4973 DTRACE_PROBE3(ms__activation__attempt,
4974 metaslab_t *, msp, uint64_t, activation_weight,
4975 boolean_t, was_active);
4976 #endif
4977
4978 /*
4979 * Ensure that the metaslab we have selected is still
4980 * capable of handling our request. It's possible that
4981 * another thread may have changed the weight while we
4982 * were blocked on the metaslab lock. We check the
4983 * active status first to see if we need to set_selected_txg
4984 * a new metaslab.
4985 */
4986 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
4987 ASSERT3S(msp->ms_allocator, ==, -1);
4988 mutex_exit(&msp->ms_lock);
4989 continue;
4990 }
4991
4992 /*
4993 * If the metaslab was activated for another allocator
4994 * while we were waiting in the ms_lock above, or it's
4995 * a primary and we're seeking a secondary (or vice versa),
4996 * we go back and select a new metaslab.
4997 */
4998 if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) &&
4999 (msp->ms_allocator != -1) &&
5000 (msp->ms_allocator != allocator || ((activation_weight ==
5001 METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) {
5002 ASSERT(msp->ms_loaded);
5003 ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) ||
5004 msp->ms_allocator != -1);
5005 mutex_exit(&msp->ms_lock);
5006 continue;
5007 }
5008
5009 /*
5010 * This metaslab was used for claiming regions allocated
5011 * by the ZIL during pool import. Once these regions are
5012 * claimed we don't need to keep the CLAIM bit set
5013 * anymore. Passivate this metaslab to zero its activation
5014 * mask.
5015 */
5016 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM &&
5017 activation_weight != METASLAB_WEIGHT_CLAIM) {
5018 ASSERT(msp->ms_loaded);
5019 ASSERT3S(msp->ms_allocator, ==, -1);
5020 metaslab_passivate(msp, msp->ms_weight &
5021 ~METASLAB_WEIGHT_CLAIM);
5022 mutex_exit(&msp->ms_lock);
5023 continue;
5024 }
5025
5026 metaslab_set_selected_txg(msp, txg);
5027
5028 int activation_error =
5029 metaslab_activate(msp, allocator, activation_weight);
5030 metaslab_active_mask_verify(msp);
5031
5032 /*
5033 * If the metaslab was activated by another thread for
5034 * another allocator or activation_weight (EBUSY), or it
5035 * failed because another metaslab was assigned as primary
5036 * for this allocator (EEXIST) we continue using this
5037 * metaslab for our allocation, rather than going on to a
5038 * worse metaslab (we waited for that metaslab to be loaded
5039 * after all).
5040 *
5041 * If the activation failed due to an I/O error or ENOSPC we
5042 * skip to the next metaslab.
5043 */
5044 boolean_t activated;
5045 if (activation_error == 0) {
5046 activated = B_TRUE;
5047 } else if (activation_error == EBUSY ||
5048 activation_error == EEXIST) {
5049 activated = B_FALSE;
5050 } else {
5051 mutex_exit(&msp->ms_lock);
5052 continue;
5053 }
5054 ASSERT(msp->ms_loaded);
5055
5056 /*
5057 * Now that we have the lock, recheck to see if we should
5058 * continue to use this metaslab for this allocation. The
5059 * the metaslab is now loaded so metaslab_should_allocate()
5060 * can accurately determine if the allocation attempt should
5061 * proceed.
5062 */
5063 if (!metaslab_should_allocate(msp, asize, try_hard)) {
5064 /* Passivate this metaslab and select a new one. */
5065 metaslab_trace_add(zal, mg, msp, asize, d,
5066 TRACE_TOO_SMALL, allocator);
5067 goto next;
5068 }
5069
5070 /*
5071 * If this metaslab is currently condensing then pick again
5072 * as we can't manipulate this metaslab until it's committed
5073 * to disk. If this metaslab is being initialized, we shouldn't
5074 * allocate from it since the allocated region might be
5075 * overwritten after allocation.
5076 */
5077 if (msp->ms_condensing) {
5078 metaslab_trace_add(zal, mg, msp, asize, d,
5079 TRACE_CONDENSING, allocator);
5080 if (activated) {
5081 metaslab_passivate(msp, msp->ms_weight &
5082 ~METASLAB_ACTIVE_MASK);
5083 }
5084 mutex_exit(&msp->ms_lock);
5085 continue;
5086 } else if (msp->ms_disabled > 0) {
5087 metaslab_trace_add(zal, mg, msp, asize, d,
5088 TRACE_DISABLED, allocator);
5089 if (activated) {
5090 metaslab_passivate(msp, msp->ms_weight &
5091 ~METASLAB_ACTIVE_MASK);
5092 }
5093 mutex_exit(&msp->ms_lock);
5094 continue;
5095 }
5096
5097 offset = metaslab_block_alloc(msp, asize, txg);
5098 metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator);
5099
5100 if (offset != -1ULL) {
5101 /* Proactively passivate the metaslab, if needed */
5102 if (activated)
5103 metaslab_segment_may_passivate(msp);
5104 mutex_exit(&msp->ms_lock);
5105 break;
5106 }
5107 next:
5108 ASSERT(msp->ms_loaded);
5109
5110 /*
5111 * This code is disabled out because of issues with
5112 * tracepoints in non-gpl kernel modules.
5113 */
5114 #if 0
5115 DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp,
5116 uint64_t, asize);
5117 #endif
5118
5119 /*
5120 * We were unable to allocate from this metaslab so determine
5121 * a new weight for this metaslab. Now that we have loaded
5122 * the metaslab we can provide a better hint to the metaslab
5123 * selector.
5124 *
5125 * For space-based metaslabs, we use the maximum block size.
5126 * This information is only available when the metaslab
5127 * is loaded and is more accurate than the generic free
5128 * space weight that was calculated by metaslab_weight().
5129 * This information allows us to quickly compare the maximum
5130 * available allocation in the metaslab to the allocation
5131 * size being requested.
5132 *
5133 * For segment-based metaslabs, determine the new weight
5134 * based on the highest bucket in the range tree. We
5135 * explicitly use the loaded segment weight (i.e. the range
5136 * tree histogram) since it contains the space that is
5137 * currently available for allocation and is accurate
5138 * even within a sync pass.
5139 */
5140 uint64_t weight;
5141 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
5142 weight = metaslab_largest_allocatable(msp);
5143 WEIGHT_SET_SPACEBASED(weight);
5144 } else {
5145 weight = metaslab_weight_from_range_tree(msp);
5146 }
5147
5148 if (activated) {
5149 metaslab_passivate(msp, weight);
5150 } else {
5151 /*
5152 * For the case where we use the metaslab that is
5153 * active for another allocator we want to make
5154 * sure that we retain the activation mask.
5155 *
5156 * Note that we could attempt to use something like
5157 * metaslab_recalculate_weight_and_sort() that
5158 * retains the activation mask here. That function
5159 * uses metaslab_weight() to set the weight though
5160 * which is not as accurate as the calculations
5161 * above.
5162 */
5163 weight |= msp->ms_weight & METASLAB_ACTIVE_MASK;
5164 metaslab_group_sort(mg, msp, weight);
5165 }
5166 metaslab_active_mask_verify(msp);
5167
5168 /*
5169 * We have just failed an allocation attempt, check
5170 * that metaslab_should_allocate() agrees. Otherwise,
5171 * we may end up in an infinite loop retrying the same
5172 * metaslab.
5173 */
5174 ASSERT(!metaslab_should_allocate(msp, asize, try_hard));
5175
5176 mutex_exit(&msp->ms_lock);
5177 }
5178 kmem_free(search, sizeof (*search));
5179
5180 if (offset == -1ULL) {
5181 metaslab_trace_add(zal, mg, NULL, asize, d,
5182 TRACE_GROUP_FAILURE, allocator);
5183 if (asize <= vdev_get_min_alloc(mg->mg_vd)) {
5184 /*
5185 * This metaslab group was unable to allocate
5186 * the minimum block size so it must be out of
5187 * space. Notify the allocation throttle to
5188 * skip allocation attempts to this group until
5189 * more space becomes available.
5190 */
5191 mg->mg_no_free_space = B_TRUE;
5192 }
5193 }
5194 return (offset);
5195 }
5196
5197 static boolean_t
metaslab_group_allocatable(spa_t * spa,metaslab_group_t * mg,uint64_t psize,int d,int flags,boolean_t try_hard,zio_alloc_list_t * zal,int allocator)5198 metaslab_group_allocatable(spa_t *spa, metaslab_group_t *mg, uint64_t psize,
5199 int d, int flags, boolean_t try_hard, zio_alloc_list_t *zal, int allocator)
5200 {
5201 metaslab_class_t *mc = mg->mg_class;
5202 vdev_t *vd = mg->mg_vd;
5203 boolean_t allocatable;
5204
5205 /*
5206 * Don't allocate from faulted devices.
5207 */
5208 if (try_hard)
5209 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
5210 allocatable = vdev_allocatable(vd);
5211 if (try_hard)
5212 spa_config_exit(spa, SCL_ZIO, FTAG);
5213 if (!allocatable) {
5214 metaslab_trace_add(zal, mg, NULL, psize, d,
5215 TRACE_NOT_ALLOCATABLE, allocator);
5216 return (B_FALSE);
5217 }
5218
5219 if (!try_hard) {
5220 /*
5221 * Avoid vdevs with too little space or too fragmented.
5222 */
5223 if (!GANG_ALLOCATION(flags) && (mg->mg_no_free_space ||
5224 (!mg->mg_allocatable && mc->mc_alloc_groups > 0))) {
5225 metaslab_trace_add(zal, mg, NULL, psize, d,
5226 TRACE_NOT_ALLOCATABLE, allocator);
5227 return (B_FALSE);
5228 }
5229
5230 /*
5231 * Avoid writing single-copy data to an unhealthy,
5232 * non-redundant vdev.
5233 */
5234 if (d == 0 && vd->vdev_state < VDEV_STATE_HEALTHY &&
5235 vd->vdev_children == 0) {
5236 metaslab_trace_add(zal, mg, NULL, psize, d,
5237 TRACE_VDEV_ERROR, allocator);
5238 return (B_FALSE);
5239 }
5240 }
5241
5242 return (B_TRUE);
5243 }
5244
5245 /*
5246 * Allocate a block for the specified i/o.
5247 */
5248 int
metaslab_alloc_dva(spa_t * spa,metaslab_class_t * mc,uint64_t psize,dva_t * dva,int d,dva_t * hintdva,uint64_t txg,int flags,zio_alloc_list_t * zal,int allocator)5249 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
5250 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
5251 zio_alloc_list_t *zal, int allocator)
5252 {
5253 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator];
5254 metaslab_group_t *mg = NULL, *rotor;
5255 vdev_t *vd;
5256 boolean_t try_hard = B_FALSE;
5257
5258 ASSERT(!DVA_IS_VALID(&dva[d]));
5259
5260 /*
5261 * For testing, make some blocks above a certain size be gang blocks.
5262 * This will result in more split blocks when using device removal,
5263 * and a large number of split blocks coupled with ztest-induced
5264 * damage can result in extremely long reconstruction times. This
5265 * will also test spilling from special to normal.
5266 */
5267 if (psize >= metaslab_force_ganging &&
5268 metaslab_force_ganging_pct > 0 &&
5269 (random_in_range(100) < MIN(metaslab_force_ganging_pct, 100))) {
5270 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG,
5271 allocator);
5272 return (SET_ERROR(ENOSPC));
5273 }
5274
5275 /*
5276 * Start at the rotor and loop through all mgs until we find something.
5277 * Note that there's no locking on mca_rotor or mca_aliquot because
5278 * nothing actually breaks if we miss a few updates -- we just won't
5279 * allocate quite as evenly. It all balances out over time.
5280 *
5281 * If we are doing ditto or log blocks, try to spread them across
5282 * consecutive vdevs. If we're forced to reuse a vdev before we've
5283 * allocated all of our ditto blocks, then try and spread them out on
5284 * that vdev as much as possible. If it turns out to not be possible,
5285 * gradually lower our standards until anything becomes acceptable.
5286 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
5287 * gives us hope of containing our fault domains to something we're
5288 * able to reason about. Otherwise, any two top-level vdev failures
5289 * will guarantee the loss of data. With consecutive allocation,
5290 * only two adjacent top-level vdev failures will result in data loss.
5291 *
5292 * If we are doing gang blocks (hintdva is non-NULL), try to keep
5293 * ourselves on the same vdev as our gang block header. It makes our
5294 * fault domains something tractable.
5295 */
5296 if (hintdva && DVA_IS_VALID(&hintdva[d])) {
5297 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
5298 mg = vdev_get_mg(vd, mc);
5299 }
5300 if (mg == NULL && d != 0) {
5301 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
5302 mg = vdev_get_mg(vd, mc)->mg_next;
5303 }
5304 if (mg == NULL || mg->mg_class != mc || mg->mg_activation_count <= 0) {
5305 ASSERT(mca->mca_rotor != NULL);
5306 mg = mca->mca_rotor;
5307 }
5308
5309 rotor = mg;
5310 top:
5311 do {
5312 ASSERT(mg->mg_activation_count == 1);
5313 ASSERT(mg->mg_class == mc);
5314
5315 if (!metaslab_group_allocatable(spa, mg, psize, d, flags,
5316 try_hard, zal, allocator))
5317 goto next;
5318
5319 vd = mg->mg_vd;
5320 uint64_t asize = vdev_psize_to_asize_txg(vd, psize, txg);
5321 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
5322 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
5323 dva, d, allocator, try_hard);
5324
5325 if (offset != -1ULL) {
5326 metaslab_class_rotate(mg, allocator, psize, B_TRUE);
5327
5328 DVA_SET_VDEV(&dva[d], vd->vdev_id);
5329 DVA_SET_OFFSET(&dva[d], offset);
5330 DVA_SET_GANG(&dva[d],
5331 ((flags & METASLAB_GANG_HEADER) ? 1 : 0));
5332 DVA_SET_ASIZE(&dva[d], asize);
5333 return (0);
5334 }
5335 next:
5336 metaslab_class_rotate(mg, allocator, psize, B_FALSE);
5337 } while ((mg = mg->mg_next) != rotor);
5338
5339 /*
5340 * If we haven't tried hard, perhaps do so now.
5341 */
5342 if (!try_hard && (zfs_metaslab_try_hard_before_gang ||
5343 GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 ||
5344 psize <= spa->spa_min_alloc)) {
5345 METASLABSTAT_BUMP(metaslabstat_try_hard);
5346 try_hard = B_TRUE;
5347 goto top;
5348 }
5349
5350 memset(&dva[d], 0, sizeof (dva_t));
5351
5352 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator);
5353 return (SET_ERROR(ENOSPC));
5354 }
5355
5356 void
metaslab_free_concrete(vdev_t * vd,uint64_t offset,uint64_t asize,boolean_t checkpoint)5357 metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
5358 boolean_t checkpoint)
5359 {
5360 metaslab_t *msp;
5361 spa_t *spa = vd->vdev_spa;
5362 int m = offset >> vd->vdev_ms_shift;
5363
5364 ASSERT(vdev_is_concrete(vd));
5365 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5366 VERIFY3U(m, <, vd->vdev_ms_count);
5367
5368 msp = vd->vdev_ms[m];
5369
5370 VERIFY(!msp->ms_condensing);
5371 VERIFY3U(offset, >=, msp->ms_start);
5372 VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
5373 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5374 VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
5375
5376 metaslab_check_free_impl(vd, offset, asize);
5377
5378 mutex_enter(&msp->ms_lock);
5379 if (zfs_range_tree_is_empty(msp->ms_freeing) &&
5380 zfs_range_tree_is_empty(msp->ms_checkpointing)) {
5381 vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa));
5382 }
5383
5384 if (checkpoint) {
5385 ASSERT(spa_has_checkpoint(spa));
5386 zfs_range_tree_add(msp->ms_checkpointing, offset, asize);
5387 } else {
5388 zfs_range_tree_add(msp->ms_freeing, offset, asize);
5389 }
5390 mutex_exit(&msp->ms_lock);
5391 }
5392
5393 void
metaslab_free_impl_cb(uint64_t inner_offset,vdev_t * vd,uint64_t offset,uint64_t size,void * arg)5394 metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5395 uint64_t size, void *arg)
5396 {
5397 (void) inner_offset;
5398 boolean_t *checkpoint = arg;
5399
5400 ASSERT3P(checkpoint, !=, NULL);
5401
5402 if (vd->vdev_ops->vdev_op_remap != NULL)
5403 vdev_indirect_mark_obsolete(vd, offset, size);
5404 else
5405 metaslab_free_impl(vd, offset, size, *checkpoint);
5406 }
5407
5408 static void
metaslab_free_impl(vdev_t * vd,uint64_t offset,uint64_t size,boolean_t checkpoint)5409 metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
5410 boolean_t checkpoint)
5411 {
5412 spa_t *spa = vd->vdev_spa;
5413
5414 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5415
5416 if (spa_syncing_txg(spa) > spa_freeze_txg(spa))
5417 return;
5418
5419 if (spa->spa_vdev_removal != NULL &&
5420 spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id &&
5421 vdev_is_concrete(vd)) {
5422 /*
5423 * Note: we check if the vdev is concrete because when
5424 * we complete the removal, we first change the vdev to be
5425 * an indirect vdev (in open context), and then (in syncing
5426 * context) clear spa_vdev_removal.
5427 */
5428 free_from_removing_vdev(vd, offset, size);
5429 } else if (vd->vdev_ops->vdev_op_remap != NULL) {
5430 vdev_indirect_mark_obsolete(vd, offset, size);
5431 vd->vdev_ops->vdev_op_remap(vd, offset, size,
5432 metaslab_free_impl_cb, &checkpoint);
5433 } else {
5434 metaslab_free_concrete(vd, offset, size, checkpoint);
5435 }
5436 }
5437
5438 typedef struct remap_blkptr_cb_arg {
5439 blkptr_t *rbca_bp;
5440 spa_remap_cb_t rbca_cb;
5441 vdev_t *rbca_remap_vd;
5442 uint64_t rbca_remap_offset;
5443 void *rbca_cb_arg;
5444 } remap_blkptr_cb_arg_t;
5445
5446 static void
remap_blkptr_cb(uint64_t inner_offset,vdev_t * vd,uint64_t offset,uint64_t size,void * arg)5447 remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5448 uint64_t size, void *arg)
5449 {
5450 remap_blkptr_cb_arg_t *rbca = arg;
5451 blkptr_t *bp = rbca->rbca_bp;
5452
5453 /* We can not remap split blocks. */
5454 if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
5455 return;
5456 ASSERT0(inner_offset);
5457
5458 if (rbca->rbca_cb != NULL) {
5459 /*
5460 * At this point we know that we are not handling split
5461 * blocks and we invoke the callback on the previous
5462 * vdev which must be indirect.
5463 */
5464 ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
5465
5466 rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
5467 rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
5468
5469 /* set up remap_blkptr_cb_arg for the next call */
5470 rbca->rbca_remap_vd = vd;
5471 rbca->rbca_remap_offset = offset;
5472 }
5473
5474 /*
5475 * The phys birth time is that of dva[0]. This ensures that we know
5476 * when each dva was written, so that resilver can determine which
5477 * blocks need to be scrubbed (i.e. those written during the time
5478 * the vdev was offline). It also ensures that the key used in
5479 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If
5480 * we didn't change the phys_birth, a lookup in the ARC for a
5481 * remapped BP could find the data that was previously stored at
5482 * this vdev + offset.
5483 */
5484 vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
5485 DVA_GET_VDEV(&bp->blk_dva[0]));
5486 vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
5487 uint64_t physical_birth = vdev_indirect_births_physbirth(vib,
5488 DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
5489 BP_SET_PHYSICAL_BIRTH(bp, physical_birth);
5490
5491 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
5492 DVA_SET_OFFSET(&bp->blk_dva[0], offset);
5493 }
5494
5495 /*
5496 * If the block pointer contains any indirect DVAs, modify them to refer to
5497 * concrete DVAs. Note that this will sometimes not be possible, leaving
5498 * the indirect DVA in place. This happens if the indirect DVA spans multiple
5499 * segments in the mapping (i.e. it is a "split block").
5500 *
5501 * If the BP was remapped, calls the callback on the original dva (note the
5502 * callback can be called multiple times if the original indirect DVA refers
5503 * to another indirect DVA, etc).
5504 *
5505 * Returns TRUE if the BP was remapped.
5506 */
5507 boolean_t
spa_remap_blkptr(spa_t * spa,blkptr_t * bp,spa_remap_cb_t callback,void * arg)5508 spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
5509 {
5510 remap_blkptr_cb_arg_t rbca;
5511
5512 if (!zfs_remap_blkptr_enable)
5513 return (B_FALSE);
5514
5515 if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
5516 return (B_FALSE);
5517
5518 /*
5519 * Dedup BP's can not be remapped, because ddt_phys_select() depends
5520 * on DVA[0] being the same in the BP as in the DDT (dedup table).
5521 */
5522 if (BP_GET_DEDUP(bp))
5523 return (B_FALSE);
5524
5525 /*
5526 * Gang blocks can not be remapped, because
5527 * zio_checksum_gang_verifier() depends on the DVA[0] that's in
5528 * the BP used to read the gang block header (GBH) being the same
5529 * as the DVA[0] that we allocated for the GBH.
5530 */
5531 if (BP_IS_GANG(bp))
5532 return (B_FALSE);
5533
5534 /*
5535 * Embedded BP's have no DVA to remap.
5536 */
5537 if (BP_GET_NDVAS(bp) < 1)
5538 return (B_FALSE);
5539
5540 /*
5541 * Cloned blocks can not be remapped since BRT depends on specific
5542 * vdev id and offset in the DVA[0] for its reference counting.
5543 */
5544 if (!BP_IS_METADATA(bp) && brt_maybe_exists(spa, bp))
5545 return (B_FALSE);
5546
5547 /*
5548 * Note: we only remap dva[0]. If we remapped other dvas, we
5549 * would no longer know what their phys birth txg is.
5550 */
5551 dva_t *dva = &bp->blk_dva[0];
5552
5553 uint64_t offset = DVA_GET_OFFSET(dva);
5554 uint64_t size = DVA_GET_ASIZE(dva);
5555 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
5556
5557 if (vd->vdev_ops->vdev_op_remap == NULL)
5558 return (B_FALSE);
5559
5560 rbca.rbca_bp = bp;
5561 rbca.rbca_cb = callback;
5562 rbca.rbca_remap_vd = vd;
5563 rbca.rbca_remap_offset = offset;
5564 rbca.rbca_cb_arg = arg;
5565
5566 /*
5567 * remap_blkptr_cb() will be called in order for each level of
5568 * indirection, until a concrete vdev is reached or a split block is
5569 * encountered. old_vd and old_offset are updated within the callback
5570 * as we go from the one indirect vdev to the next one (either concrete
5571 * or indirect again) in that order.
5572 */
5573 vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
5574
5575 /* Check if the DVA wasn't remapped because it is a split block */
5576 if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
5577 return (B_FALSE);
5578
5579 return (B_TRUE);
5580 }
5581
5582 /*
5583 * Undo the allocation of a DVA which happened in the given transaction group.
5584 */
5585 void
metaslab_unalloc_dva(spa_t * spa,const dva_t * dva,uint64_t txg)5586 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5587 {
5588 metaslab_t *msp;
5589 vdev_t *vd;
5590 uint64_t vdev = DVA_GET_VDEV(dva);
5591 uint64_t offset = DVA_GET_OFFSET(dva);
5592 uint64_t size = DVA_GET_ASIZE(dva);
5593
5594 ASSERT(DVA_IS_VALID(dva));
5595 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5596
5597 if (txg > spa_freeze_txg(spa))
5598 return;
5599
5600 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
5601 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
5602 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
5603 (u_longlong_t)vdev, (u_longlong_t)offset,
5604 (u_longlong_t)size);
5605 return;
5606 }
5607
5608 ASSERT(!vd->vdev_removing);
5609 ASSERT(vdev_is_concrete(vd));
5610 ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
5611 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
5612
5613 if (DVA_GET_GANG(dva))
5614 size = vdev_gang_header_asize(vd);
5615
5616 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5617
5618 mutex_enter(&msp->ms_lock);
5619 zfs_range_tree_remove(msp->ms_allocating[txg & TXG_MASK],
5620 offset, size);
5621 msp->ms_allocating_total -= size;
5622
5623 VERIFY(!msp->ms_condensing);
5624 VERIFY3U(offset, >=, msp->ms_start);
5625 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
5626 VERIFY3U(zfs_range_tree_space(msp->ms_allocatable) + size, <=,
5627 msp->ms_size);
5628 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5629 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5630 zfs_range_tree_add(msp->ms_allocatable, offset, size);
5631 mutex_exit(&msp->ms_lock);
5632 }
5633
5634 /*
5635 * Free the block represented by the given DVA.
5636 */
5637 void
metaslab_free_dva(spa_t * spa,const dva_t * dva,boolean_t checkpoint)5638 metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint)
5639 {
5640 uint64_t vdev = DVA_GET_VDEV(dva);
5641 uint64_t offset = DVA_GET_OFFSET(dva);
5642 uint64_t size = DVA_GET_ASIZE(dva);
5643 vdev_t *vd = vdev_lookup_top(spa, vdev);
5644
5645 ASSERT(DVA_IS_VALID(dva));
5646 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
5647
5648 if (DVA_GET_GANG(dva)) {
5649 size = vdev_gang_header_asize(vd);
5650 }
5651
5652 metaslab_free_impl(vd, offset, size, checkpoint);
5653 }
5654
5655 /*
5656 * Reserve some allocation slots. The reservation system must be called
5657 * before we call into the allocator. If there aren't any available slots
5658 * then the I/O will be throttled until an I/O completes and its slots are
5659 * freed up. The function returns true if it was successful in placing
5660 * the reservation.
5661 */
5662 boolean_t
metaslab_class_throttle_reserve(metaslab_class_t * mc,int slots,zio_t * zio,boolean_t must,boolean_t * more)5663 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
5664 boolean_t must, boolean_t *more)
5665 {
5666 metaslab_class_allocator_t *mca = &mc->mc_allocator[zio->io_allocator];
5667
5668 ASSERT(mc->mc_alloc_throttle_enabled);
5669 if (mc->mc_alloc_io_size < zio->io_size) {
5670 mc->mc_alloc_io_size = zio->io_size;
5671 metaslab_class_balance(mc, B_FALSE);
5672 }
5673 if (must || mca->mca_reserved <= mc->mc_alloc_max) {
5674 /*
5675 * The potential race between compare and add is covered by the
5676 * allocator lock in most cases, or irrelevant due to must set.
5677 * But even if we assume some other non-existing scenario, the
5678 * worst that can happen is few more I/Os get to allocation
5679 * earlier, that is not a problem.
5680 */
5681 int64_t delta = slots * zio->io_size;
5682 *more = (atomic_add_64_nv(&mca->mca_reserved, delta) <=
5683 mc->mc_alloc_max);
5684 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
5685 return (B_TRUE);
5686 }
5687 *more = B_FALSE;
5688 return (B_FALSE);
5689 }
5690
5691 boolean_t
metaslab_class_throttle_unreserve(metaslab_class_t * mc,int slots,zio_t * zio)5692 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
5693 zio_t *zio)
5694 {
5695 metaslab_class_allocator_t *mca = &mc->mc_allocator[zio->io_allocator];
5696
5697 ASSERT(mc->mc_alloc_throttle_enabled);
5698 int64_t delta = slots * zio->io_size;
5699 return (atomic_add_64_nv(&mca->mca_reserved, -delta) <=
5700 mc->mc_alloc_max);
5701 }
5702
5703 static int
metaslab_claim_concrete(vdev_t * vd,uint64_t offset,uint64_t size,uint64_t txg)5704 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
5705 uint64_t txg)
5706 {
5707 metaslab_t *msp;
5708 spa_t *spa = vd->vdev_spa;
5709 int error = 0;
5710
5711 if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
5712 return (SET_ERROR(ENXIO));
5713
5714 ASSERT3P(vd->vdev_ms, !=, NULL);
5715 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
5716
5717 mutex_enter(&msp->ms_lock);
5718
5719 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) {
5720 error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM);
5721 if (error == EBUSY) {
5722 ASSERT(msp->ms_loaded);
5723 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
5724 error = 0;
5725 }
5726 }
5727
5728 if (error == 0 &&
5729 !zfs_range_tree_contains(msp->ms_allocatable, offset, size))
5730 error = SET_ERROR(ENOENT);
5731
5732 if (error || txg == 0) { /* txg == 0 indicates dry run */
5733 mutex_exit(&msp->ms_lock);
5734 return (error);
5735 }
5736
5737 VERIFY(!msp->ms_condensing);
5738 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
5739 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
5740 VERIFY3U(zfs_range_tree_space(msp->ms_allocatable) - size, <=,
5741 msp->ms_size);
5742 zfs_range_tree_remove(msp->ms_allocatable, offset, size);
5743 zfs_range_tree_clear(msp->ms_trim, offset, size);
5744
5745 if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */
5746 metaslab_class_t *mc = msp->ms_group->mg_class;
5747 multilist_sublist_t *mls =
5748 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp);
5749 if (!multilist_link_active(&msp->ms_class_txg_node)) {
5750 msp->ms_selected_txg = txg;
5751 multilist_sublist_insert_head(mls, msp);
5752 }
5753 multilist_sublist_unlock(mls);
5754
5755 if (zfs_range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK]))
5756 vdev_dirty(vd, VDD_METASLAB, msp, txg);
5757 zfs_range_tree_add(msp->ms_allocating[txg & TXG_MASK],
5758 offset, size);
5759 msp->ms_allocating_total += size;
5760 }
5761
5762 mutex_exit(&msp->ms_lock);
5763
5764 return (0);
5765 }
5766
5767 typedef struct metaslab_claim_cb_arg_t {
5768 uint64_t mcca_txg;
5769 int mcca_error;
5770 } metaslab_claim_cb_arg_t;
5771
5772 static void
metaslab_claim_impl_cb(uint64_t inner_offset,vdev_t * vd,uint64_t offset,uint64_t size,void * arg)5773 metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
5774 uint64_t size, void *arg)
5775 {
5776 (void) inner_offset;
5777 metaslab_claim_cb_arg_t *mcca_arg = arg;
5778
5779 if (mcca_arg->mcca_error == 0) {
5780 mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
5781 size, mcca_arg->mcca_txg);
5782 }
5783 }
5784
5785 int
metaslab_claim_impl(vdev_t * vd,uint64_t offset,uint64_t size,uint64_t txg)5786 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
5787 {
5788 if (vd->vdev_ops->vdev_op_remap != NULL) {
5789 metaslab_claim_cb_arg_t arg;
5790
5791 /*
5792 * Only zdb(8) can claim on indirect vdevs. This is used
5793 * to detect leaks of mapped space (that are not accounted
5794 * for in the obsolete counts, spacemap, or bpobj).
5795 */
5796 ASSERT(!spa_writeable(vd->vdev_spa));
5797 arg.mcca_error = 0;
5798 arg.mcca_txg = txg;
5799
5800 vd->vdev_ops->vdev_op_remap(vd, offset, size,
5801 metaslab_claim_impl_cb, &arg);
5802
5803 if (arg.mcca_error == 0) {
5804 arg.mcca_error = metaslab_claim_concrete(vd,
5805 offset, size, txg);
5806 }
5807 return (arg.mcca_error);
5808 } else {
5809 return (metaslab_claim_concrete(vd, offset, size, txg));
5810 }
5811 }
5812
5813 /*
5814 * Intent log support: upon opening the pool after a crash, notify the SPA
5815 * of blocks that the intent log has allocated for immediate write, but
5816 * which are still considered free by the SPA because the last transaction
5817 * group didn't commit yet.
5818 */
5819 static int
metaslab_claim_dva(spa_t * spa,const dva_t * dva,uint64_t txg)5820 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
5821 {
5822 uint64_t vdev = DVA_GET_VDEV(dva);
5823 uint64_t offset = DVA_GET_OFFSET(dva);
5824 uint64_t size = DVA_GET_ASIZE(dva);
5825 vdev_t *vd;
5826
5827 if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
5828 return (SET_ERROR(ENXIO));
5829 }
5830
5831 ASSERT(DVA_IS_VALID(dva));
5832
5833 if (DVA_GET_GANG(dva))
5834 size = vdev_gang_header_asize(vd);
5835
5836 return (metaslab_claim_impl(vd, offset, size, txg));
5837 }
5838
5839 int
metaslab_alloc(spa_t * spa,metaslab_class_t * mc,uint64_t psize,blkptr_t * bp,int ndvas,uint64_t txg,blkptr_t * hintbp,int flags,zio_alloc_list_t * zal,int allocator,const void * tag)5840 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
5841 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
5842 zio_alloc_list_t *zal, int allocator, const void *tag)
5843 {
5844 dva_t *dva = bp->blk_dva;
5845 dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL;
5846 int error = 0;
5847
5848 ASSERT0(BP_GET_LOGICAL_BIRTH(bp));
5849 ASSERT0(BP_GET_PHYSICAL_BIRTH(bp));
5850
5851 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5852
5853 if (mc->mc_allocator[allocator].mca_rotor == NULL) {
5854 /* no vdevs in this class */
5855 spa_config_exit(spa, SCL_ALLOC, FTAG);
5856 return (SET_ERROR(ENOSPC));
5857 }
5858
5859 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
5860 ASSERT(BP_GET_NDVAS(bp) == 0);
5861 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
5862 ASSERT3P(zal, !=, NULL);
5863
5864 for (int d = 0; d < ndvas; d++) {
5865 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
5866 txg, flags, zal, allocator);
5867 if (error != 0) {
5868 for (d--; d >= 0; d--) {
5869 metaslab_unalloc_dva(spa, &dva[d], txg);
5870 metaslab_group_alloc_decrement(spa,
5871 DVA_GET_VDEV(&dva[d]), allocator, flags,
5872 psize, tag);
5873 memset(&dva[d], 0, sizeof (dva_t));
5874 }
5875 spa_config_exit(spa, SCL_ALLOC, FTAG);
5876 return (error);
5877 } else {
5878 /*
5879 * Update the metaslab group's queue depth
5880 * based on the newly allocated dva.
5881 */
5882 metaslab_group_alloc_increment(spa,
5883 DVA_GET_VDEV(&dva[d]), allocator, flags, psize,
5884 tag);
5885 }
5886 }
5887 ASSERT(error == 0);
5888 ASSERT(BP_GET_NDVAS(bp) == ndvas);
5889
5890 spa_config_exit(spa, SCL_ALLOC, FTAG);
5891
5892 BP_SET_BIRTH(bp, txg, 0);
5893
5894 return (0);
5895 }
5896
5897 void
metaslab_free(spa_t * spa,const blkptr_t * bp,uint64_t txg,boolean_t now)5898 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
5899 {
5900 const dva_t *dva = bp->blk_dva;
5901 int ndvas = BP_GET_NDVAS(bp);
5902
5903 ASSERT(!BP_IS_HOLE(bp));
5904 ASSERT(!now || BP_GET_LOGICAL_BIRTH(bp) >= spa_syncing_txg(spa));
5905
5906 /*
5907 * If we have a checkpoint for the pool we need to make sure that
5908 * the blocks that we free that are part of the checkpoint won't be
5909 * reused until the checkpoint is discarded or we revert to it.
5910 *
5911 * The checkpoint flag is passed down the metaslab_free code path
5912 * and is set whenever we want to add a block to the checkpoint's
5913 * accounting. That is, we "checkpoint" blocks that existed at the
5914 * time the checkpoint was created and are therefore referenced by
5915 * the checkpointed uberblock.
5916 *
5917 * Note that, we don't checkpoint any blocks if the current
5918 * syncing txg <= spa_checkpoint_txg. We want these frees to sync
5919 * normally as they will be referenced by the checkpointed uberblock.
5920 */
5921 boolean_t checkpoint = B_FALSE;
5922 if (BP_GET_LOGICAL_BIRTH(bp) <= spa->spa_checkpoint_txg &&
5923 spa_syncing_txg(spa) > spa->spa_checkpoint_txg) {
5924 /*
5925 * At this point, if the block is part of the checkpoint
5926 * there is no way it was created in the current txg.
5927 */
5928 ASSERT(!now);
5929 ASSERT3U(spa_syncing_txg(spa), ==, txg);
5930 checkpoint = B_TRUE;
5931 }
5932
5933 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
5934
5935 for (int d = 0; d < ndvas; d++) {
5936 if (now) {
5937 metaslab_unalloc_dva(spa, &dva[d], txg);
5938 } else {
5939 ASSERT3U(txg, ==, spa_syncing_txg(spa));
5940 metaslab_free_dva(spa, &dva[d], checkpoint);
5941 }
5942 }
5943
5944 spa_config_exit(spa, SCL_FREE, FTAG);
5945 }
5946
5947 int
metaslab_claim(spa_t * spa,const blkptr_t * bp,uint64_t txg)5948 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
5949 {
5950 const dva_t *dva = bp->blk_dva;
5951 int ndvas = BP_GET_NDVAS(bp);
5952 int error = 0;
5953
5954 ASSERT(!BP_IS_HOLE(bp));
5955
5956 if (txg != 0) {
5957 /*
5958 * First do a dry run to make sure all DVAs are claimable,
5959 * so we don't have to unwind from partial failures below.
5960 */
5961 if ((error = metaslab_claim(spa, bp, 0)) != 0)
5962 return (error);
5963 }
5964
5965 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
5966
5967 for (int d = 0; d < ndvas; d++) {
5968 error = metaslab_claim_dva(spa, &dva[d], txg);
5969 if (error != 0)
5970 break;
5971 }
5972
5973 spa_config_exit(spa, SCL_ALLOC, FTAG);
5974
5975 ASSERT(error == 0 || txg == 0);
5976
5977 return (error);
5978 }
5979
5980 static void
metaslab_check_free_impl_cb(uint64_t inner,vdev_t * vd,uint64_t offset,uint64_t size,void * arg)5981 metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
5982 uint64_t size, void *arg)
5983 {
5984 (void) inner, (void) arg;
5985
5986 if (vd->vdev_ops == &vdev_indirect_ops)
5987 return;
5988
5989 metaslab_check_free_impl(vd, offset, size);
5990 }
5991
5992 static void
metaslab_check_free_impl(vdev_t * vd,uint64_t offset,uint64_t size)5993 metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
5994 {
5995 metaslab_t *msp;
5996 spa_t *spa __maybe_unused = vd->vdev_spa;
5997
5998 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
5999 return;
6000
6001 if (vd->vdev_ops->vdev_op_remap != NULL) {
6002 vd->vdev_ops->vdev_op_remap(vd, offset, size,
6003 metaslab_check_free_impl_cb, NULL);
6004 return;
6005 }
6006
6007 ASSERT(vdev_is_concrete(vd));
6008 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
6009 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
6010
6011 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
6012
6013 mutex_enter(&msp->ms_lock);
6014 if (msp->ms_loaded) {
6015 zfs_range_tree_verify_not_present(msp->ms_allocatable,
6016 offset, size);
6017 }
6018
6019 /*
6020 * Check all segments that currently exist in the freeing pipeline.
6021 *
6022 * It would intuitively make sense to also check the current allocating
6023 * tree since metaslab_unalloc_dva() exists for extents that are
6024 * allocated and freed in the same sync pass within the same txg.
6025 * Unfortunately there are places (e.g. the ZIL) where we allocate a
6026 * segment but then we free part of it within the same txg
6027 * [see zil_sync()]. Thus, we don't call zfs_range_tree_verify() in the
6028 * current allocating tree.
6029 */
6030 zfs_range_tree_verify_not_present(msp->ms_freeing, offset, size);
6031 zfs_range_tree_verify_not_present(msp->ms_checkpointing, offset, size);
6032 zfs_range_tree_verify_not_present(msp->ms_freed, offset, size);
6033 for (int j = 0; j < TXG_DEFER_SIZE; j++)
6034 zfs_range_tree_verify_not_present(msp->ms_defer[j], offset,
6035 size);
6036 zfs_range_tree_verify_not_present(msp->ms_trim, offset, size);
6037 mutex_exit(&msp->ms_lock);
6038 }
6039
6040 void
metaslab_check_free(spa_t * spa,const blkptr_t * bp)6041 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
6042 {
6043 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
6044 return;
6045
6046 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
6047 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
6048 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
6049 vdev_t *vd = vdev_lookup_top(spa, vdev);
6050 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
6051 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
6052
6053 if (DVA_GET_GANG(&bp->blk_dva[i]))
6054 size = vdev_gang_header_asize(vd);
6055
6056 ASSERT3P(vd, !=, NULL);
6057
6058 metaslab_check_free_impl(vd, offset, size);
6059 }
6060 spa_config_exit(spa, SCL_VDEV, FTAG);
6061 }
6062
6063 static void
metaslab_group_disable_wait(metaslab_group_t * mg)6064 metaslab_group_disable_wait(metaslab_group_t *mg)
6065 {
6066 ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
6067 while (mg->mg_disabled_updating) {
6068 cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
6069 }
6070 }
6071
6072 static void
metaslab_group_disabled_increment(metaslab_group_t * mg)6073 metaslab_group_disabled_increment(metaslab_group_t *mg)
6074 {
6075 ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock));
6076 ASSERT(mg->mg_disabled_updating);
6077
6078 while (mg->mg_ms_disabled >= max_disabled_ms) {
6079 cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock);
6080 }
6081 mg->mg_ms_disabled++;
6082 ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms);
6083 }
6084
6085 /*
6086 * Mark the metaslab as disabled to prevent any allocations on this metaslab.
6087 * We must also track how many metaslabs are currently disabled within a
6088 * metaslab group and limit them to prevent allocation failures from
6089 * occurring because all metaslabs are disabled.
6090 */
6091 void
metaslab_disable(metaslab_t * msp)6092 metaslab_disable(metaslab_t *msp)
6093 {
6094 ASSERT(!MUTEX_HELD(&msp->ms_lock));
6095 metaslab_group_t *mg = msp->ms_group;
6096
6097 mutex_enter(&mg->mg_ms_disabled_lock);
6098
6099 /*
6100 * To keep an accurate count of how many threads have disabled
6101 * a specific metaslab group, we only allow one thread to mark
6102 * the metaslab group at a time. This ensures that the value of
6103 * ms_disabled will be accurate when we decide to mark a metaslab
6104 * group as disabled. To do this we force all other threads
6105 * to wait till the metaslab's mg_disabled_updating flag is no
6106 * longer set.
6107 */
6108 metaslab_group_disable_wait(mg);
6109 mg->mg_disabled_updating = B_TRUE;
6110 if (msp->ms_disabled == 0) {
6111 metaslab_group_disabled_increment(mg);
6112 }
6113 mutex_enter(&msp->ms_lock);
6114 msp->ms_disabled++;
6115 mutex_exit(&msp->ms_lock);
6116
6117 mg->mg_disabled_updating = B_FALSE;
6118 cv_broadcast(&mg->mg_ms_disabled_cv);
6119 mutex_exit(&mg->mg_ms_disabled_lock);
6120 }
6121
6122 void
metaslab_enable(metaslab_t * msp,boolean_t sync,boolean_t unload)6123 metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload)
6124 {
6125 metaslab_group_t *mg = msp->ms_group;
6126 spa_t *spa = mg->mg_vd->vdev_spa;
6127
6128 /*
6129 * Wait for the outstanding IO to be synced to prevent newly
6130 * allocated blocks from being overwritten. This used by
6131 * initialize and TRIM which are modifying unallocated space.
6132 */
6133 if (sync)
6134 txg_wait_synced(spa_get_dsl(spa), 0);
6135
6136 mutex_enter(&mg->mg_ms_disabled_lock);
6137 mutex_enter(&msp->ms_lock);
6138 if (--msp->ms_disabled == 0) {
6139 mg->mg_ms_disabled--;
6140 cv_broadcast(&mg->mg_ms_disabled_cv);
6141 if (unload)
6142 metaslab_unload(msp);
6143 }
6144 mutex_exit(&msp->ms_lock);
6145 mutex_exit(&mg->mg_ms_disabled_lock);
6146 }
6147
6148 void
metaslab_set_unflushed_dirty(metaslab_t * ms,boolean_t dirty)6149 metaslab_set_unflushed_dirty(metaslab_t *ms, boolean_t dirty)
6150 {
6151 ms->ms_unflushed_dirty = dirty;
6152 }
6153
6154 static void
metaslab_update_ondisk_flush_data(metaslab_t * ms,dmu_tx_t * tx)6155 metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx)
6156 {
6157 vdev_t *vd = ms->ms_group->mg_vd;
6158 spa_t *spa = vd->vdev_spa;
6159 objset_t *mos = spa_meta_objset(spa);
6160
6161 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP));
6162
6163 metaslab_unflushed_phys_t entry = {
6164 .msp_unflushed_txg = metaslab_unflushed_txg(ms),
6165 };
6166 uint64_t entry_size = sizeof (entry);
6167 uint64_t entry_offset = ms->ms_id * entry_size;
6168
6169 uint64_t object = 0;
6170 int err = zap_lookup(mos, vd->vdev_top_zap,
6171 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
6172 &object);
6173 if (err == ENOENT) {
6174 object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA,
6175 SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx);
6176 VERIFY0(zap_add(mos, vd->vdev_top_zap,
6177 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1,
6178 &object, tx));
6179 } else {
6180 VERIFY0(err);
6181 }
6182
6183 dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size,
6184 &entry, tx);
6185 }
6186
6187 void
metaslab_set_unflushed_txg(metaslab_t * ms,uint64_t txg,dmu_tx_t * tx)6188 metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx)
6189 {
6190 ms->ms_unflushed_txg = txg;
6191 metaslab_update_ondisk_flush_data(ms, tx);
6192 }
6193
6194 boolean_t
metaslab_unflushed_dirty(metaslab_t * ms)6195 metaslab_unflushed_dirty(metaslab_t *ms)
6196 {
6197 return (ms->ms_unflushed_dirty);
6198 }
6199
6200 uint64_t
metaslab_unflushed_txg(metaslab_t * ms)6201 metaslab_unflushed_txg(metaslab_t *ms)
6202 {
6203 return (ms->ms_unflushed_txg);
6204 }
6205
6206 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, U64, ZMOD_RW,
6207 "Allocation granularity (a.k.a. stripe size)");
6208
6209 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW,
6210 "Load all metaslabs when pool is first opened");
6211
6212 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW,
6213 "Prevent metaslabs from being unloaded");
6214
6215 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW,
6216 "Preload potential metaslabs during reassessment");
6217
6218 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_limit, UINT, ZMOD_RW,
6219 "Max number of metaslabs per group to preload");
6220
6221 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW,
6222 "Delay in txgs after metaslab was last used before unloading");
6223
6224 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW,
6225 "Delay in milliseconds after metaslab was last used before unloading");
6226
6227 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW,
6228 "Percentage of metaslab group size that should be free to make it "
6229 "eligible for allocation");
6230
6231 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW,
6232 "Percentage of metaslab group size that should be considered eligible "
6233 "for allocations unless all metaslab groups within the metaslab class "
6234 "have also crossed this threshold");
6235
6236 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT,
6237 ZMOD_RW,
6238 "Use the fragmentation metric to prefer less fragmented metaslabs");
6239
6240 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT,
6241 ZMOD_RW, "Fragmentation for metaslab to allow allocation");
6242
6243 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW,
6244 "Prefer metaslabs with lower LBAs");
6245
6246 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW,
6247 "Enable space-based metaslab group biasing");
6248
6249 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, perf_bias, INT, ZMOD_RW,
6250 "Enable performance-based metaslab group biasing");
6251
6252 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT,
6253 ZMOD_RW, "Enable segment-based metaslab selection");
6254
6255 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW,
6256 "Segment-based metaslab selection maximum buckets before switching");
6257
6258 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, U64, ZMOD_RW,
6259 "Blocks larger than this size are sometimes forced to be gang blocks");
6260
6261 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging_pct, UINT, ZMOD_RW,
6262 "Percentage of large blocks that will be forced to be gang blocks");
6263
6264 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, UINT, ZMOD_RW,
6265 "Max distance (bytes) to search forward before using size tree");
6266
6267 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW,
6268 "When looking in size tree, use largest segment instead of exact fit");
6269
6270 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, U64,
6271 ZMOD_RW, "How long to trust the cached max chunk size of a metaslab");
6272
6273 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, UINT, ZMOD_RW,
6274 "Percentage of memory that can be used to store metaslab range trees");
6275
6276 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT,
6277 ZMOD_RW, "Try hard to allocate before ganging");
6278
6279 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW,
6280 "Normally only consider this many of the best metaslabs in each vdev");
6281
6282 ZFS_MODULE_PARAM_CALL(zfs, zfs_, active_allocator,
6283 param_set_active_allocator, param_get_charp, ZMOD_RW,
6284 "SPA active allocator");
6285