1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 */
26
27 #include <sys/zfs_context.h>
28 #include <sys/dmu.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/space_map.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/zio.h>
34 #include <sys/spa_impl.h>
35 #include <sys/zfeature.h>
36
37 /*
38 * Allow allocations to switch to gang blocks quickly. We do this to
39 * avoid having to load lots of space_maps in a given txg. There are,
40 * however, some cases where we want to avoid "fast" ganging and instead
41 * we want to do an exhaustive search of all metaslabs on this device.
42 * Currently we don't allow any gang, slog, or dump device related allocations
43 * to "fast" gang.
44 */
45 #define CAN_FASTGANG(flags) \
46 (!((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER | \
47 METASLAB_GANG_AVOID)))
48
49 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
50 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
51 #define METASLAB_ACTIVE_MASK \
52 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
53
54 uint64_t metaslab_aliquot = 512ULL << 10;
55 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
56
57 /*
58 * The in-core space map representation is more compact than its on-disk form.
59 * The zfs_condense_pct determines how much more compact the in-core
60 * space_map representation must be before we compact it on-disk.
61 * Values should be greater than or equal to 100.
62 */
63 int zfs_condense_pct = 200;
64
65 /*
66 * Condensing a metaslab is not guaranteed to actually reduce the amount of
67 * space used on disk. In particular, a space map uses data in increments of
68 * MAX(1 << ashift, space_map_blksize), so a metaslab might use the
69 * same number of blocks after condensing. Since the goal of condensing is to
70 * reduce the number of IOPs required to read the space map, we only want to
71 * condense when we can be sure we will reduce the number of blocks used by the
72 * space map. Unfortunately, we cannot precisely compute whether or not this is
73 * the case in metaslab_should_condense since we are holding ms_lock. Instead,
74 * we apply the following heuristic: do not condense a spacemap unless the
75 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
76 * blocks.
77 */
78 int zfs_metaslab_condense_block_threshold = 4;
79
80 /*
81 * The zfs_mg_noalloc_threshold defines which metaslab groups should
82 * be eligible for allocation. The value is defined as a percentage of
83 * free space. Metaslab groups that have more free space than
84 * zfs_mg_noalloc_threshold are always eligible for allocations. Once
85 * a metaslab group's free space is less than or equal to the
86 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that
87 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
88 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all
89 * groups are allowed to accept allocations. Gang blocks are always
90 * eligible to allocate on any metaslab group. The default value of 0 means
91 * no metaslab group will be excluded based on this criterion.
92 */
93 int zfs_mg_noalloc_threshold = 0;
94
95 /*
96 * Metaslab groups are considered eligible for allocations if their
97 * fragmenation metric (measured as a percentage) is less than or equal to
98 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
99 * then it will be skipped unless all metaslab groups within the metaslab
100 * class have also crossed this threshold.
101 */
102 int zfs_mg_fragmentation_threshold = 85;
103
104 /*
105 * Allow metaslabs to keep their active state as long as their fragmentation
106 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
107 * active metaslab that exceeds this threshold will no longer keep its active
108 * status allowing better metaslabs to be selected.
109 */
110 int zfs_metaslab_fragmentation_threshold = 70;
111
112 /*
113 * When set will load all metaslabs when pool is first opened.
114 */
115 int metaslab_debug_load = 0;
116
117 /*
118 * When set will prevent metaslabs from being unloaded.
119 */
120 int metaslab_debug_unload = 0;
121
122 /*
123 * Minimum size which forces the dynamic allocator to change
124 * it's allocation strategy. Once the space map cannot satisfy
125 * an allocation of this size then it switches to using more
126 * aggressive strategy (i.e search by size rather than offset).
127 */
128 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
129
130 /*
131 * The minimum free space, in percent, which must be available
132 * in a space map to continue allocations in a first-fit fashion.
133 * Once the space_map's free space drops below this level we dynamically
134 * switch to using best-fit allocations.
135 */
136 int metaslab_df_free_pct = 4;
137
138 /*
139 * A metaslab is considered "free" if it contains a contiguous
140 * segment which is greater than metaslab_min_alloc_size.
141 */
142 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS;
143
144 /*
145 * Percentage of all cpus that can be used by the metaslab taskq.
146 */
147 int metaslab_load_pct = 50;
148
149 /*
150 * Determines how many txgs a metaslab may remain loaded without having any
151 * allocations from it. As long as a metaslab continues to be used we will
152 * keep it loaded.
153 */
154 int metaslab_unload_delay = TXG_SIZE * 2;
155
156 /*
157 * Max number of metaslabs per group to preload.
158 */
159 int metaslab_preload_limit = SPA_DVAS_PER_BP;
160
161 /*
162 * Enable/disable preloading of metaslab.
163 */
164 boolean_t metaslab_preload_enabled = B_TRUE;
165
166 /*
167 * Enable/disable fragmentation weighting on metaslabs.
168 */
169 boolean_t metaslab_fragmentation_factor_enabled = B_TRUE;
170
171 /*
172 * Enable/disable lba weighting (i.e. outer tracks are given preference).
173 */
174 boolean_t metaslab_lba_weighting_enabled = B_TRUE;
175
176 /*
177 * Enable/disable metaslab group biasing.
178 */
179 boolean_t metaslab_bias_enabled = B_TRUE;
180
181 static uint64_t metaslab_fragmentation(metaslab_t *);
182
183 /*
184 * ==========================================================================
185 * Metaslab classes
186 * ==========================================================================
187 */
188 metaslab_class_t *
metaslab_class_create(spa_t * spa,metaslab_ops_t * ops)189 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
190 {
191 metaslab_class_t *mc;
192
193 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
194
195 mc->mc_spa = spa;
196 mc->mc_rotor = NULL;
197 mc->mc_ops = ops;
198
199 return (mc);
200 }
201
202 void
metaslab_class_destroy(metaslab_class_t * mc)203 metaslab_class_destroy(metaslab_class_t *mc)
204 {
205 ASSERT(mc->mc_rotor == NULL);
206 ASSERT(mc->mc_alloc == 0);
207 ASSERT(mc->mc_deferred == 0);
208 ASSERT(mc->mc_space == 0);
209 ASSERT(mc->mc_dspace == 0);
210
211 kmem_free(mc, sizeof (metaslab_class_t));
212 }
213
214 int
metaslab_class_validate(metaslab_class_t * mc)215 metaslab_class_validate(metaslab_class_t *mc)
216 {
217 metaslab_group_t *mg;
218 vdev_t *vd;
219
220 /*
221 * Must hold one of the spa_config locks.
222 */
223 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
224 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
225
226 if ((mg = mc->mc_rotor) == NULL)
227 return (0);
228
229 do {
230 vd = mg->mg_vd;
231 ASSERT(vd->vdev_mg != NULL);
232 ASSERT3P(vd->vdev_top, ==, vd);
233 ASSERT3P(mg->mg_class, ==, mc);
234 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
235 } while ((mg = mg->mg_next) != mc->mc_rotor);
236
237 return (0);
238 }
239
240 void
metaslab_class_space_update(metaslab_class_t * mc,int64_t alloc_delta,int64_t defer_delta,int64_t space_delta,int64_t dspace_delta)241 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
242 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
243 {
244 atomic_add_64(&mc->mc_alloc, alloc_delta);
245 atomic_add_64(&mc->mc_deferred, defer_delta);
246 atomic_add_64(&mc->mc_space, space_delta);
247 atomic_add_64(&mc->mc_dspace, dspace_delta);
248 }
249
250 uint64_t
metaslab_class_get_alloc(metaslab_class_t * mc)251 metaslab_class_get_alloc(metaslab_class_t *mc)
252 {
253 return (mc->mc_alloc);
254 }
255
256 uint64_t
metaslab_class_get_deferred(metaslab_class_t * mc)257 metaslab_class_get_deferred(metaslab_class_t *mc)
258 {
259 return (mc->mc_deferred);
260 }
261
262 uint64_t
metaslab_class_get_space(metaslab_class_t * mc)263 metaslab_class_get_space(metaslab_class_t *mc)
264 {
265 return (mc->mc_space);
266 }
267
268 uint64_t
metaslab_class_get_dspace(metaslab_class_t * mc)269 metaslab_class_get_dspace(metaslab_class_t *mc)
270 {
271 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
272 }
273
274 void
metaslab_class_histogram_verify(metaslab_class_t * mc)275 metaslab_class_histogram_verify(metaslab_class_t *mc)
276 {
277 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
278 uint64_t *mc_hist;
279 int i;
280
281 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
282 return;
283
284 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
285 KM_SLEEP);
286
287 for (int c = 0; c < rvd->vdev_children; c++) {
288 vdev_t *tvd = rvd->vdev_child[c];
289 metaslab_group_t *mg = tvd->vdev_mg;
290
291 /*
292 * Skip any holes, uninitialized top-levels, or
293 * vdevs that are not in this metalab class.
294 */
295 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
296 mg->mg_class != mc) {
297 continue;
298 }
299
300 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
301 mc_hist[i] += mg->mg_histogram[i];
302 }
303
304 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
305 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
306
307 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
308 }
309
310 /*
311 * Calculate the metaslab class's fragmentation metric. The metric
312 * is weighted based on the space contribution of each metaslab group.
313 * The return value will be a number between 0 and 100 (inclusive), or
314 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the
315 * zfs_frag_table for more information about the metric.
316 */
317 uint64_t
metaslab_class_fragmentation(metaslab_class_t * mc)318 metaslab_class_fragmentation(metaslab_class_t *mc)
319 {
320 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
321 uint64_t fragmentation = 0;
322
323 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
324
325 for (int c = 0; c < rvd->vdev_children; c++) {
326 vdev_t *tvd = rvd->vdev_child[c];
327 metaslab_group_t *mg = tvd->vdev_mg;
328
329 /*
330 * Skip any holes, uninitialized top-levels, or
331 * vdevs that are not in this metalab class.
332 */
333 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
334 mg->mg_class != mc) {
335 continue;
336 }
337
338 /*
339 * If a metaslab group does not contain a fragmentation
340 * metric then just bail out.
341 */
342 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
343 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
344 return (ZFS_FRAG_INVALID);
345 }
346
347 /*
348 * Determine how much this metaslab_group is contributing
349 * to the overall pool fragmentation metric.
350 */
351 fragmentation += mg->mg_fragmentation *
352 metaslab_group_get_space(mg);
353 }
354 fragmentation /= metaslab_class_get_space(mc);
355
356 ASSERT3U(fragmentation, <=, 100);
357 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
358 return (fragmentation);
359 }
360
361 /*
362 * Calculate the amount of expandable space that is available in
363 * this metaslab class. If a device is expanded then its expandable
364 * space will be the amount of allocatable space that is currently not
365 * part of this metaslab class.
366 */
367 uint64_t
metaslab_class_expandable_space(metaslab_class_t * mc)368 metaslab_class_expandable_space(metaslab_class_t *mc)
369 {
370 vdev_t *rvd = mc->mc_spa->spa_root_vdev;
371 uint64_t space = 0;
372
373 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
374 for (int c = 0; c < rvd->vdev_children; c++) {
375 vdev_t *tvd = rvd->vdev_child[c];
376 metaslab_group_t *mg = tvd->vdev_mg;
377
378 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
379 mg->mg_class != mc) {
380 continue;
381 }
382
383 space += tvd->vdev_max_asize - tvd->vdev_asize;
384 }
385 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
386 return (space);
387 }
388
389 /*
390 * ==========================================================================
391 * Metaslab groups
392 * ==========================================================================
393 */
394 static int
metaslab_compare(const void * x1,const void * x2)395 metaslab_compare(const void *x1, const void *x2)
396 {
397 const metaslab_t *m1 = x1;
398 const metaslab_t *m2 = x2;
399
400 if (m1->ms_weight < m2->ms_weight)
401 return (1);
402 if (m1->ms_weight > m2->ms_weight)
403 return (-1);
404
405 /*
406 * If the weights are identical, use the offset to force uniqueness.
407 */
408 if (m1->ms_start < m2->ms_start)
409 return (-1);
410 if (m1->ms_start > m2->ms_start)
411 return (1);
412
413 ASSERT3P(m1, ==, m2);
414
415 return (0);
416 }
417
418 /*
419 * Update the allocatable flag and the metaslab group's capacity.
420 * The allocatable flag is set to true if the capacity is below
421 * the zfs_mg_noalloc_threshold. If a metaslab group transitions
422 * from allocatable to non-allocatable or vice versa then the metaslab
423 * group's class is updated to reflect the transition.
424 */
425 static void
metaslab_group_alloc_update(metaslab_group_t * mg)426 metaslab_group_alloc_update(metaslab_group_t *mg)
427 {
428 vdev_t *vd = mg->mg_vd;
429 metaslab_class_t *mc = mg->mg_class;
430 vdev_stat_t *vs = &vd->vdev_stat;
431 boolean_t was_allocatable;
432
433 ASSERT(vd == vd->vdev_top);
434
435 mutex_enter(&mg->mg_lock);
436 was_allocatable = mg->mg_allocatable;
437
438 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
439 (vs->vs_space + 1);
440
441 /*
442 * A metaslab group is considered allocatable if it has plenty
443 * of free space or is not heavily fragmented. We only take
444 * fragmentation into account if the metaslab group has a valid
445 * fragmentation metric (i.e. a value between 0 and 100).
446 */
447 mg->mg_allocatable = (mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
448 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
449 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
450
451 /*
452 * The mc_alloc_groups maintains a count of the number of
453 * groups in this metaslab class that are still above the
454 * zfs_mg_noalloc_threshold. This is used by the allocating
455 * threads to determine if they should avoid allocations to
456 * a given group. The allocator will avoid allocations to a group
457 * if that group has reached or is below the zfs_mg_noalloc_threshold
458 * and there are still other groups that are above the threshold.
459 * When a group transitions from allocatable to non-allocatable or
460 * vice versa we update the metaslab class to reflect that change.
461 * When the mc_alloc_groups value drops to 0 that means that all
462 * groups have reached the zfs_mg_noalloc_threshold making all groups
463 * eligible for allocations. This effectively means that all devices
464 * are balanced again.
465 */
466 if (was_allocatable && !mg->mg_allocatable)
467 mc->mc_alloc_groups--;
468 else if (!was_allocatable && mg->mg_allocatable)
469 mc->mc_alloc_groups++;
470
471 mutex_exit(&mg->mg_lock);
472 }
473
474 metaslab_group_t *
metaslab_group_create(metaslab_class_t * mc,vdev_t * vd)475 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
476 {
477 metaslab_group_t *mg;
478
479 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
480 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
481 avl_create(&mg->mg_metaslab_tree, metaslab_compare,
482 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
483 mg->mg_vd = vd;
484 mg->mg_class = mc;
485 mg->mg_activation_count = 0;
486
487 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
488 minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT);
489
490 return (mg);
491 }
492
493 void
metaslab_group_destroy(metaslab_group_t * mg)494 metaslab_group_destroy(metaslab_group_t *mg)
495 {
496 ASSERT(mg->mg_prev == NULL);
497 ASSERT(mg->mg_next == NULL);
498 /*
499 * We may have gone below zero with the activation count
500 * either because we never activated in the first place or
501 * because we're done, and possibly removing the vdev.
502 */
503 ASSERT(mg->mg_activation_count <= 0);
504
505 taskq_destroy(mg->mg_taskq);
506 avl_destroy(&mg->mg_metaslab_tree);
507 mutex_destroy(&mg->mg_lock);
508 kmem_free(mg, sizeof (metaslab_group_t));
509 }
510
511 void
metaslab_group_activate(metaslab_group_t * mg)512 metaslab_group_activate(metaslab_group_t *mg)
513 {
514 metaslab_class_t *mc = mg->mg_class;
515 metaslab_group_t *mgprev, *mgnext;
516
517 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
518
519 ASSERT(mc->mc_rotor != mg);
520 ASSERT(mg->mg_prev == NULL);
521 ASSERT(mg->mg_next == NULL);
522 ASSERT(mg->mg_activation_count <= 0);
523
524 if (++mg->mg_activation_count <= 0)
525 return;
526
527 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
528 metaslab_group_alloc_update(mg);
529
530 if ((mgprev = mc->mc_rotor) == NULL) {
531 mg->mg_prev = mg;
532 mg->mg_next = mg;
533 } else {
534 mgnext = mgprev->mg_next;
535 mg->mg_prev = mgprev;
536 mg->mg_next = mgnext;
537 mgprev->mg_next = mg;
538 mgnext->mg_prev = mg;
539 }
540 mc->mc_rotor = mg;
541 }
542
543 void
metaslab_group_passivate(metaslab_group_t * mg)544 metaslab_group_passivate(metaslab_group_t *mg)
545 {
546 metaslab_class_t *mc = mg->mg_class;
547 metaslab_group_t *mgprev, *mgnext;
548
549 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
550
551 if (--mg->mg_activation_count != 0) {
552 ASSERT(mc->mc_rotor != mg);
553 ASSERT(mg->mg_prev == NULL);
554 ASSERT(mg->mg_next == NULL);
555 ASSERT(mg->mg_activation_count < 0);
556 return;
557 }
558
559 taskq_wait(mg->mg_taskq);
560 metaslab_group_alloc_update(mg);
561
562 mgprev = mg->mg_prev;
563 mgnext = mg->mg_next;
564
565 if (mg == mgnext) {
566 mc->mc_rotor = NULL;
567 } else {
568 mc->mc_rotor = mgnext;
569 mgprev->mg_next = mgnext;
570 mgnext->mg_prev = mgprev;
571 }
572
573 mg->mg_prev = NULL;
574 mg->mg_next = NULL;
575 }
576
577 uint64_t
metaslab_group_get_space(metaslab_group_t * mg)578 metaslab_group_get_space(metaslab_group_t *mg)
579 {
580 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
581 }
582
583 void
metaslab_group_histogram_verify(metaslab_group_t * mg)584 metaslab_group_histogram_verify(metaslab_group_t *mg)
585 {
586 uint64_t *mg_hist;
587 vdev_t *vd = mg->mg_vd;
588 uint64_t ashift = vd->vdev_ashift;
589 int i;
590
591 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
592 return;
593
594 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
595 KM_SLEEP);
596
597 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
598 SPACE_MAP_HISTOGRAM_SIZE + ashift);
599
600 for (int m = 0; m < vd->vdev_ms_count; m++) {
601 metaslab_t *msp = vd->vdev_ms[m];
602
603 if (msp->ms_sm == NULL)
604 continue;
605
606 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
607 mg_hist[i + ashift] +=
608 msp->ms_sm->sm_phys->smp_histogram[i];
609 }
610
611 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
612 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
613
614 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
615 }
616
617 static void
metaslab_group_histogram_add(metaslab_group_t * mg,metaslab_t * msp)618 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
619 {
620 metaslab_class_t *mc = mg->mg_class;
621 uint64_t ashift = mg->mg_vd->vdev_ashift;
622
623 ASSERT(MUTEX_HELD(&msp->ms_lock));
624 if (msp->ms_sm == NULL)
625 return;
626
627 mutex_enter(&mg->mg_lock);
628 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
629 mg->mg_histogram[i + ashift] +=
630 msp->ms_sm->sm_phys->smp_histogram[i];
631 mc->mc_histogram[i + ashift] +=
632 msp->ms_sm->sm_phys->smp_histogram[i];
633 }
634 mutex_exit(&mg->mg_lock);
635 }
636
637 void
metaslab_group_histogram_remove(metaslab_group_t * mg,metaslab_t * msp)638 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
639 {
640 metaslab_class_t *mc = mg->mg_class;
641 uint64_t ashift = mg->mg_vd->vdev_ashift;
642
643 ASSERT(MUTEX_HELD(&msp->ms_lock));
644 if (msp->ms_sm == NULL)
645 return;
646
647 mutex_enter(&mg->mg_lock);
648 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
649 ASSERT3U(mg->mg_histogram[i + ashift], >=,
650 msp->ms_sm->sm_phys->smp_histogram[i]);
651 ASSERT3U(mc->mc_histogram[i + ashift], >=,
652 msp->ms_sm->sm_phys->smp_histogram[i]);
653
654 mg->mg_histogram[i + ashift] -=
655 msp->ms_sm->sm_phys->smp_histogram[i];
656 mc->mc_histogram[i + ashift] -=
657 msp->ms_sm->sm_phys->smp_histogram[i];
658 }
659 mutex_exit(&mg->mg_lock);
660 }
661
662 static void
metaslab_group_add(metaslab_group_t * mg,metaslab_t * msp)663 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
664 {
665 ASSERT(msp->ms_group == NULL);
666 mutex_enter(&mg->mg_lock);
667 msp->ms_group = mg;
668 msp->ms_weight = 0;
669 avl_add(&mg->mg_metaslab_tree, msp);
670 mutex_exit(&mg->mg_lock);
671
672 mutex_enter(&msp->ms_lock);
673 metaslab_group_histogram_add(mg, msp);
674 mutex_exit(&msp->ms_lock);
675 }
676
677 static void
metaslab_group_remove(metaslab_group_t * mg,metaslab_t * msp)678 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
679 {
680 mutex_enter(&msp->ms_lock);
681 metaslab_group_histogram_remove(mg, msp);
682 mutex_exit(&msp->ms_lock);
683
684 mutex_enter(&mg->mg_lock);
685 ASSERT(msp->ms_group == mg);
686 avl_remove(&mg->mg_metaslab_tree, msp);
687 msp->ms_group = NULL;
688 mutex_exit(&mg->mg_lock);
689 }
690
691 static void
metaslab_group_sort(metaslab_group_t * mg,metaslab_t * msp,uint64_t weight)692 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
693 {
694 /*
695 * Although in principle the weight can be any value, in
696 * practice we do not use values in the range [1, 511].
697 */
698 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
699 ASSERT(MUTEX_HELD(&msp->ms_lock));
700
701 mutex_enter(&mg->mg_lock);
702 ASSERT(msp->ms_group == mg);
703 avl_remove(&mg->mg_metaslab_tree, msp);
704 msp->ms_weight = weight;
705 avl_add(&mg->mg_metaslab_tree, msp);
706 mutex_exit(&mg->mg_lock);
707 }
708
709 /*
710 * Calculate the fragmentation for a given metaslab group. We can use
711 * a simple average here since all metaslabs within the group must have
712 * the same size. The return value will be a value between 0 and 100
713 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
714 * group have a fragmentation metric.
715 */
716 uint64_t
metaslab_group_fragmentation(metaslab_group_t * mg)717 metaslab_group_fragmentation(metaslab_group_t *mg)
718 {
719 vdev_t *vd = mg->mg_vd;
720 uint64_t fragmentation = 0;
721 uint64_t valid_ms = 0;
722
723 for (int m = 0; m < vd->vdev_ms_count; m++) {
724 metaslab_t *msp = vd->vdev_ms[m];
725
726 if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
727 continue;
728
729 valid_ms++;
730 fragmentation += msp->ms_fragmentation;
731 }
732
733 if (valid_ms <= vd->vdev_ms_count / 2)
734 return (ZFS_FRAG_INVALID);
735
736 fragmentation /= valid_ms;
737 ASSERT3U(fragmentation, <=, 100);
738 return (fragmentation);
739 }
740
741 /*
742 * Determine if a given metaslab group should skip allocations. A metaslab
743 * group should avoid allocations if its free capacity is less than the
744 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than
745 * zfs_mg_fragmentation_threshold and there is at least one metaslab group
746 * that can still handle allocations.
747 */
748 static boolean_t
metaslab_group_allocatable(metaslab_group_t * mg)749 metaslab_group_allocatable(metaslab_group_t *mg)
750 {
751 vdev_t *vd = mg->mg_vd;
752 spa_t *spa = vd->vdev_spa;
753 metaslab_class_t *mc = mg->mg_class;
754
755 /*
756 * We use two key metrics to determine if a metaslab group is
757 * considered allocatable -- free space and fragmentation. If
758 * the free space is greater than the free space threshold and
759 * the fragmentation is less than the fragmentation threshold then
760 * consider the group allocatable. There are two case when we will
761 * not consider these key metrics. The first is if the group is
762 * associated with a slog device and the second is if all groups
763 * in this metaslab class have already been consider ineligible
764 * for allocations.
765 */
766 return ((mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
767 (mg->mg_fragmentation == ZFS_FRAG_INVALID ||
768 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)) ||
769 mc != spa_normal_class(spa) || mc->mc_alloc_groups == 0);
770 }
771
772 /*
773 * ==========================================================================
774 * Range tree callbacks
775 * ==========================================================================
776 */
777
778 /*
779 * Comparison function for the private size-ordered tree. Tree is sorted
780 * by size, larger sizes at the end of the tree.
781 */
782 static int
metaslab_rangesize_compare(const void * x1,const void * x2)783 metaslab_rangesize_compare(const void *x1, const void *x2)
784 {
785 const range_seg_t *r1 = x1;
786 const range_seg_t *r2 = x2;
787 uint64_t rs_size1 = r1->rs_end - r1->rs_start;
788 uint64_t rs_size2 = r2->rs_end - r2->rs_start;
789
790 if (rs_size1 < rs_size2)
791 return (-1);
792 if (rs_size1 > rs_size2)
793 return (1);
794
795 if (r1->rs_start < r2->rs_start)
796 return (-1);
797
798 if (r1->rs_start > r2->rs_start)
799 return (1);
800
801 return (0);
802 }
803
804 /*
805 * Create any block allocator specific components. The current allocators
806 * rely on using both a size-ordered range_tree_t and an array of uint64_t's.
807 */
808 static void
metaslab_rt_create(range_tree_t * rt,void * arg)809 metaslab_rt_create(range_tree_t *rt, void *arg)
810 {
811 metaslab_t *msp = arg;
812
813 ASSERT3P(rt->rt_arg, ==, msp);
814 ASSERT(msp->ms_tree == NULL);
815
816 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
817 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
818 }
819
820 /*
821 * Destroy the block allocator specific components.
822 */
823 static void
metaslab_rt_destroy(range_tree_t * rt,void * arg)824 metaslab_rt_destroy(range_tree_t *rt, void *arg)
825 {
826 metaslab_t *msp = arg;
827
828 ASSERT3P(rt->rt_arg, ==, msp);
829 ASSERT3P(msp->ms_tree, ==, rt);
830 ASSERT0(avl_numnodes(&msp->ms_size_tree));
831
832 avl_destroy(&msp->ms_size_tree);
833 }
834
835 static void
metaslab_rt_add(range_tree_t * rt,range_seg_t * rs,void * arg)836 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg)
837 {
838 metaslab_t *msp = arg;
839
840 ASSERT3P(rt->rt_arg, ==, msp);
841 ASSERT3P(msp->ms_tree, ==, rt);
842 VERIFY(!msp->ms_condensing);
843 avl_add(&msp->ms_size_tree, rs);
844 }
845
846 static void
metaslab_rt_remove(range_tree_t * rt,range_seg_t * rs,void * arg)847 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
848 {
849 metaslab_t *msp = arg;
850
851 ASSERT3P(rt->rt_arg, ==, msp);
852 ASSERT3P(msp->ms_tree, ==, rt);
853 VERIFY(!msp->ms_condensing);
854 avl_remove(&msp->ms_size_tree, rs);
855 }
856
857 static void
metaslab_rt_vacate(range_tree_t * rt,void * arg)858 metaslab_rt_vacate(range_tree_t *rt, void *arg)
859 {
860 metaslab_t *msp = arg;
861
862 ASSERT3P(rt->rt_arg, ==, msp);
863 ASSERT3P(msp->ms_tree, ==, rt);
864
865 /*
866 * Normally one would walk the tree freeing nodes along the way.
867 * Since the nodes are shared with the range trees we can avoid
868 * walking all nodes and just reinitialize the avl tree. The nodes
869 * will be freed by the range tree, so we don't want to free them here.
870 */
871 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare,
872 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node));
873 }
874
875 static range_tree_ops_t metaslab_rt_ops = {
876 metaslab_rt_create,
877 metaslab_rt_destroy,
878 metaslab_rt_add,
879 metaslab_rt_remove,
880 metaslab_rt_vacate
881 };
882
883 /*
884 * ==========================================================================
885 * Metaslab block operations
886 * ==========================================================================
887 */
888
889 /*
890 * Return the maximum contiguous segment within the metaslab.
891 */
892 uint64_t
metaslab_block_maxsize(metaslab_t * msp)893 metaslab_block_maxsize(metaslab_t *msp)
894 {
895 avl_tree_t *t = &msp->ms_size_tree;
896 range_seg_t *rs;
897
898 if (t == NULL || (rs = avl_last(t)) == NULL)
899 return (0ULL);
900
901 return (rs->rs_end - rs->rs_start);
902 }
903
904 uint64_t
metaslab_block_alloc(metaslab_t * msp,uint64_t size)905 metaslab_block_alloc(metaslab_t *msp, uint64_t size)
906 {
907 uint64_t start;
908 range_tree_t *rt = msp->ms_tree;
909
910 VERIFY(!msp->ms_condensing);
911
912 start = msp->ms_ops->msop_alloc(msp, size);
913 if (start != -1ULL) {
914 vdev_t *vd = msp->ms_group->mg_vd;
915
916 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
917 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
918 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
919 range_tree_remove(rt, start, size);
920 }
921 return (start);
922 }
923
924 /*
925 * ==========================================================================
926 * Common allocator routines
927 * ==========================================================================
928 */
929
930 /*
931 * This is a helper function that can be used by the allocator to find
932 * a suitable block to allocate. This will search the specified AVL
933 * tree looking for a block that matches the specified criteria.
934 */
935 static uint64_t
metaslab_block_picker(avl_tree_t * t,uint64_t * cursor,uint64_t size,uint64_t align)936 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
937 uint64_t align)
938 {
939 range_seg_t *rs, rsearch;
940 avl_index_t where;
941
942 rsearch.rs_start = *cursor;
943 rsearch.rs_end = *cursor + size;
944
945 rs = avl_find(t, &rsearch, &where);
946 if (rs == NULL)
947 rs = avl_nearest(t, where, AVL_AFTER);
948
949 while (rs != NULL) {
950 uint64_t offset = P2ROUNDUP(rs->rs_start, align);
951
952 if (offset + size <= rs->rs_end) {
953 *cursor = offset + size;
954 return (offset);
955 }
956 rs = AVL_NEXT(t, rs);
957 }
958
959 /*
960 * If we know we've searched the whole map (*cursor == 0), give up.
961 * Otherwise, reset the cursor to the beginning and try again.
962 */
963 if (*cursor == 0)
964 return (-1ULL);
965
966 *cursor = 0;
967 return (metaslab_block_picker(t, cursor, size, align));
968 }
969
970 /*
971 * ==========================================================================
972 * The first-fit block allocator
973 * ==========================================================================
974 */
975 static uint64_t
metaslab_ff_alloc(metaslab_t * msp,uint64_t size)976 metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
977 {
978 /*
979 * Find the largest power of 2 block size that evenly divides the
980 * requested size. This is used to try to allocate blocks with similar
981 * alignment from the same area of the metaslab (i.e. same cursor
982 * bucket) but it does not guarantee that other allocations sizes
983 * may exist in the same region.
984 */
985 uint64_t align = size & -size;
986 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
987 avl_tree_t *t = &msp->ms_tree->rt_root;
988
989 return (metaslab_block_picker(t, cursor, size, align));
990 }
991
992 static metaslab_ops_t metaslab_ff_ops = {
993 metaslab_ff_alloc
994 };
995
996 /*
997 * ==========================================================================
998 * Dynamic block allocator -
999 * Uses the first fit allocation scheme until space get low and then
1000 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
1001 * and metaslab_df_free_pct to determine when to switch the allocation scheme.
1002 * ==========================================================================
1003 */
1004 static uint64_t
metaslab_df_alloc(metaslab_t * msp,uint64_t size)1005 metaslab_df_alloc(metaslab_t *msp, uint64_t size)
1006 {
1007 /*
1008 * Find the largest power of 2 block size that evenly divides the
1009 * requested size. This is used to try to allocate blocks with similar
1010 * alignment from the same area of the metaslab (i.e. same cursor
1011 * bucket) but it does not guarantee that other allocations sizes
1012 * may exist in the same region.
1013 */
1014 uint64_t align = size & -size;
1015 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
1016 range_tree_t *rt = msp->ms_tree;
1017 avl_tree_t *t = &rt->rt_root;
1018 uint64_t max_size = metaslab_block_maxsize(msp);
1019 int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
1020
1021 ASSERT(MUTEX_HELD(&msp->ms_lock));
1022 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1023
1024 if (max_size < size)
1025 return (-1ULL);
1026
1027 /*
1028 * If we're running low on space switch to using the size
1029 * sorted AVL tree (best-fit).
1030 */
1031 if (max_size < metaslab_df_alloc_threshold ||
1032 free_pct < metaslab_df_free_pct) {
1033 t = &msp->ms_size_tree;
1034 *cursor = 0;
1035 }
1036
1037 return (metaslab_block_picker(t, cursor, size, 1ULL));
1038 }
1039
1040 static metaslab_ops_t metaslab_df_ops = {
1041 metaslab_df_alloc
1042 };
1043
1044 /*
1045 * ==========================================================================
1046 * Cursor fit block allocator -
1047 * Select the largest region in the metaslab, set the cursor to the beginning
1048 * of the range and the cursor_end to the end of the range. As allocations
1049 * are made advance the cursor. Continue allocating from the cursor until
1050 * the range is exhausted and then find a new range.
1051 * ==========================================================================
1052 */
1053 static uint64_t
metaslab_cf_alloc(metaslab_t * msp,uint64_t size)1054 metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
1055 {
1056 range_tree_t *rt = msp->ms_tree;
1057 avl_tree_t *t = &msp->ms_size_tree;
1058 uint64_t *cursor = &msp->ms_lbas[0];
1059 uint64_t *cursor_end = &msp->ms_lbas[1];
1060 uint64_t offset = 0;
1061
1062 ASSERT(MUTEX_HELD(&msp->ms_lock));
1063 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
1064
1065 ASSERT3U(*cursor_end, >=, *cursor);
1066
1067 if ((*cursor + size) > *cursor_end) {
1068 range_seg_t *rs;
1069
1070 rs = avl_last(&msp->ms_size_tree);
1071 if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
1072 return (-1ULL);
1073
1074 *cursor = rs->rs_start;
1075 *cursor_end = rs->rs_end;
1076 }
1077
1078 offset = *cursor;
1079 *cursor += size;
1080
1081 return (offset);
1082 }
1083
1084 static metaslab_ops_t metaslab_cf_ops = {
1085 metaslab_cf_alloc
1086 };
1087
1088 /*
1089 * ==========================================================================
1090 * New dynamic fit allocator -
1091 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
1092 * contiguous blocks. If no region is found then just use the largest segment
1093 * that remains.
1094 * ==========================================================================
1095 */
1096
1097 /*
1098 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
1099 * to request from the allocator.
1100 */
1101 uint64_t metaslab_ndf_clump_shift = 4;
1102
1103 static uint64_t
metaslab_ndf_alloc(metaslab_t * msp,uint64_t size)1104 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
1105 {
1106 avl_tree_t *t = &msp->ms_tree->rt_root;
1107 avl_index_t where;
1108 range_seg_t *rs, rsearch;
1109 uint64_t hbit = highbit64(size);
1110 uint64_t *cursor = &msp->ms_lbas[hbit - 1];
1111 uint64_t max_size = metaslab_block_maxsize(msp);
1112
1113 ASSERT(MUTEX_HELD(&msp->ms_lock));
1114 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
1115
1116 if (max_size < size)
1117 return (-1ULL);
1118
1119 rsearch.rs_start = *cursor;
1120 rsearch.rs_end = *cursor + size;
1121
1122 rs = avl_find(t, &rsearch, &where);
1123 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
1124 t = &msp->ms_size_tree;
1125
1126 rsearch.rs_start = 0;
1127 rsearch.rs_end = MIN(max_size,
1128 1ULL << (hbit + metaslab_ndf_clump_shift));
1129 rs = avl_find(t, &rsearch, &where);
1130 if (rs == NULL)
1131 rs = avl_nearest(t, where, AVL_AFTER);
1132 ASSERT(rs != NULL);
1133 }
1134
1135 if ((rs->rs_end - rs->rs_start) >= size) {
1136 *cursor = rs->rs_start + size;
1137 return (rs->rs_start);
1138 }
1139 return (-1ULL);
1140 }
1141
1142 static metaslab_ops_t metaslab_ndf_ops = {
1143 metaslab_ndf_alloc
1144 };
1145
1146 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
1147
1148 /*
1149 * ==========================================================================
1150 * Metaslabs
1151 * ==========================================================================
1152 */
1153
1154 /*
1155 * Wait for any in-progress metaslab loads to complete.
1156 */
1157 void
metaslab_load_wait(metaslab_t * msp)1158 metaslab_load_wait(metaslab_t *msp)
1159 {
1160 ASSERT(MUTEX_HELD(&msp->ms_lock));
1161
1162 while (msp->ms_loading) {
1163 ASSERT(!msp->ms_loaded);
1164 cv_wait(&msp->ms_load_cv, &msp->ms_lock);
1165 }
1166 }
1167
1168 int
metaslab_load(metaslab_t * msp)1169 metaslab_load(metaslab_t *msp)
1170 {
1171 int error = 0;
1172
1173 ASSERT(MUTEX_HELD(&msp->ms_lock));
1174 ASSERT(!msp->ms_loaded);
1175 ASSERT(!msp->ms_loading);
1176
1177 msp->ms_loading = B_TRUE;
1178
1179 /*
1180 * If the space map has not been allocated yet, then treat
1181 * all the space in the metaslab as free and add it to the
1182 * ms_tree.
1183 */
1184 if (msp->ms_sm != NULL)
1185 error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE);
1186 else
1187 range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size);
1188
1189 msp->ms_loaded = (error == 0);
1190 msp->ms_loading = B_FALSE;
1191
1192 if (msp->ms_loaded) {
1193 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1194 range_tree_walk(msp->ms_defertree[t],
1195 range_tree_remove, msp->ms_tree);
1196 }
1197 }
1198 cv_broadcast(&msp->ms_load_cv);
1199 return (error);
1200 }
1201
1202 void
metaslab_unload(metaslab_t * msp)1203 metaslab_unload(metaslab_t *msp)
1204 {
1205 ASSERT(MUTEX_HELD(&msp->ms_lock));
1206 range_tree_vacate(msp->ms_tree, NULL, NULL);
1207 msp->ms_loaded = B_FALSE;
1208 msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
1209 }
1210
1211 int
metaslab_init(metaslab_group_t * mg,uint64_t id,uint64_t object,uint64_t txg,metaslab_t ** msp)1212 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
1213 metaslab_t **msp)
1214 {
1215 vdev_t *vd = mg->mg_vd;
1216 objset_t *mos = vd->vdev_spa->spa_meta_objset;
1217 metaslab_t *ms;
1218 int error;
1219
1220 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
1221 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
1222 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
1223 ms->ms_id = id;
1224 ms->ms_start = id << vd->vdev_ms_shift;
1225 ms->ms_size = 1ULL << vd->vdev_ms_shift;
1226
1227 /*
1228 * We only open space map objects that already exist. All others
1229 * will be opened when we finally allocate an object for it.
1230 */
1231 if (object != 0) {
1232 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
1233 ms->ms_size, vd->vdev_ashift, &ms->ms_lock);
1234
1235 if (error != 0) {
1236 kmem_free(ms, sizeof (metaslab_t));
1237 return (error);
1238 }
1239
1240 ASSERT(ms->ms_sm != NULL);
1241 }
1242
1243 /*
1244 * We create the main range tree here, but we don't create the
1245 * alloctree and freetree until metaslab_sync_done(). This serves
1246 * two purposes: it allows metaslab_sync_done() to detect the
1247 * addition of new space; and for debugging, it ensures that we'd
1248 * data fault on any attempt to use this metaslab before it's ready.
1249 */
1250 ms->ms_tree = range_tree_create(&metaslab_rt_ops, ms, &ms->ms_lock);
1251 metaslab_group_add(mg, ms);
1252
1253 ms->ms_fragmentation = metaslab_fragmentation(ms);
1254 ms->ms_ops = mg->mg_class->mc_ops;
1255
1256 /*
1257 * If we're opening an existing pool (txg == 0) or creating
1258 * a new one (txg == TXG_INITIAL), all space is available now.
1259 * If we're adding space to an existing pool, the new space
1260 * does not become available until after this txg has synced.
1261 */
1262 if (txg <= TXG_INITIAL)
1263 metaslab_sync_done(ms, 0);
1264
1265 /*
1266 * If metaslab_debug_load is set and we're initializing a metaslab
1267 * that has an allocated space_map object then load the its space
1268 * map so that can verify frees.
1269 */
1270 if (metaslab_debug_load && ms->ms_sm != NULL) {
1271 mutex_enter(&ms->ms_lock);
1272 VERIFY0(metaslab_load(ms));
1273 mutex_exit(&ms->ms_lock);
1274 }
1275
1276 if (txg != 0) {
1277 vdev_dirty(vd, 0, NULL, txg);
1278 vdev_dirty(vd, VDD_METASLAB, ms, txg);
1279 }
1280
1281 *msp = ms;
1282
1283 return (0);
1284 }
1285
1286 void
metaslab_fini(metaslab_t * msp)1287 metaslab_fini(metaslab_t *msp)
1288 {
1289 metaslab_group_t *mg = msp->ms_group;
1290
1291 metaslab_group_remove(mg, msp);
1292
1293 mutex_enter(&msp->ms_lock);
1294
1295 VERIFY(msp->ms_group == NULL);
1296 vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
1297 0, -msp->ms_size);
1298 space_map_close(msp->ms_sm);
1299
1300 metaslab_unload(msp);
1301 range_tree_destroy(msp->ms_tree);
1302
1303 for (int t = 0; t < TXG_SIZE; t++) {
1304 range_tree_destroy(msp->ms_alloctree[t]);
1305 range_tree_destroy(msp->ms_freetree[t]);
1306 }
1307
1308 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1309 range_tree_destroy(msp->ms_defertree[t]);
1310 }
1311
1312 ASSERT0(msp->ms_deferspace);
1313
1314 mutex_exit(&msp->ms_lock);
1315 cv_destroy(&msp->ms_load_cv);
1316 mutex_destroy(&msp->ms_lock);
1317
1318 kmem_free(msp, sizeof (metaslab_t));
1319 }
1320
1321 #define FRAGMENTATION_TABLE_SIZE 17
1322
1323 /*
1324 * This table defines a segment size based fragmentation metric that will
1325 * allow each metaslab to derive its own fragmentation value. This is done
1326 * by calculating the space in each bucket of the spacemap histogram and
1327 * multiplying that by the fragmetation metric in this table. Doing
1328 * this for all buckets and dividing it by the total amount of free
1329 * space in this metaslab (i.e. the total free space in all buckets) gives
1330 * us the fragmentation metric. This means that a high fragmentation metric
1331 * equates to most of the free space being comprised of small segments.
1332 * Conversely, if the metric is low, then most of the free space is in
1333 * large segments. A 10% change in fragmentation equates to approximately
1334 * double the number of segments.
1335 *
1336 * This table defines 0% fragmented space using 16MB segments. Testing has
1337 * shown that segments that are greater than or equal to 16MB do not suffer
1338 * from drastic performance problems. Using this value, we derive the rest
1339 * of the table. Since the fragmentation value is never stored on disk, it
1340 * is possible to change these calculations in the future.
1341 */
1342 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
1343 100, /* 512B */
1344 100, /* 1K */
1345 98, /* 2K */
1346 95, /* 4K */
1347 90, /* 8K */
1348 80, /* 16K */
1349 70, /* 32K */
1350 60, /* 64K */
1351 50, /* 128K */
1352 40, /* 256K */
1353 30, /* 512K */
1354 20, /* 1M */
1355 15, /* 2M */
1356 10, /* 4M */
1357 5, /* 8M */
1358 0 /* 16M */
1359 };
1360
1361 /*
1362 * Calclate the metaslab's fragmentation metric. A return value
1363 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
1364 * not support this metric. Otherwise, the return value should be in the
1365 * range [0, 100].
1366 */
1367 static uint64_t
metaslab_fragmentation(metaslab_t * msp)1368 metaslab_fragmentation(metaslab_t *msp)
1369 {
1370 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1371 uint64_t fragmentation = 0;
1372 uint64_t total = 0;
1373 boolean_t feature_enabled = spa_feature_is_enabled(spa,
1374 SPA_FEATURE_SPACEMAP_HISTOGRAM);
1375
1376 if (!feature_enabled)
1377 return (ZFS_FRAG_INVALID);
1378
1379 /*
1380 * A null space map means that the entire metaslab is free
1381 * and thus is not fragmented.
1382 */
1383 if (msp->ms_sm == NULL)
1384 return (0);
1385
1386 /*
1387 * If this metaslab's space_map has not been upgraded, flag it
1388 * so that we upgrade next time we encounter it.
1389 */
1390 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
1391 uint64_t txg = spa_syncing_txg(spa);
1392 vdev_t *vd = msp->ms_group->mg_vd;
1393
1394 if (spa_writeable(spa)) {
1395 msp->ms_condense_wanted = B_TRUE;
1396 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1397 spa_dbgmsg(spa, "txg %llu, requesting force condense: "
1398 "msp %p, vd %p", txg, msp, vd);
1399 }
1400 return (ZFS_FRAG_INVALID);
1401 }
1402
1403 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
1404 uint64_t space = 0;
1405 uint8_t shift = msp->ms_sm->sm_shift;
1406 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
1407 FRAGMENTATION_TABLE_SIZE - 1);
1408
1409 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
1410 continue;
1411
1412 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
1413 total += space;
1414
1415 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
1416 fragmentation += space * zfs_frag_table[idx];
1417 }
1418
1419 if (total > 0)
1420 fragmentation /= total;
1421 ASSERT3U(fragmentation, <=, 100);
1422 return (fragmentation);
1423 }
1424
1425 /*
1426 * Compute a weight -- a selection preference value -- for the given metaslab.
1427 * This is based on the amount of free space, the level of fragmentation,
1428 * the LBA range, and whether the metaslab is loaded.
1429 */
1430 static uint64_t
metaslab_weight(metaslab_t * msp)1431 metaslab_weight(metaslab_t *msp)
1432 {
1433 metaslab_group_t *mg = msp->ms_group;
1434 vdev_t *vd = mg->mg_vd;
1435 uint64_t weight, space;
1436
1437 ASSERT(MUTEX_HELD(&msp->ms_lock));
1438
1439 /*
1440 * This vdev is in the process of being removed so there is nothing
1441 * for us to do here.
1442 */
1443 if (vd->vdev_removing) {
1444 ASSERT0(space_map_allocated(msp->ms_sm));
1445 ASSERT0(vd->vdev_ms_shift);
1446 return (0);
1447 }
1448
1449 /*
1450 * The baseline weight is the metaslab's free space.
1451 */
1452 space = msp->ms_size - space_map_allocated(msp->ms_sm);
1453
1454 msp->ms_fragmentation = metaslab_fragmentation(msp);
1455 if (metaslab_fragmentation_factor_enabled &&
1456 msp->ms_fragmentation != ZFS_FRAG_INVALID) {
1457 /*
1458 * Use the fragmentation information to inversely scale
1459 * down the baseline weight. We need to ensure that we
1460 * don't exclude this metaslab completely when it's 100%
1461 * fragmented. To avoid this we reduce the fragmented value
1462 * by 1.
1463 */
1464 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
1465
1466 /*
1467 * If space < SPA_MINBLOCKSIZE, then we will not allocate from
1468 * this metaslab again. The fragmentation metric may have
1469 * decreased the space to something smaller than
1470 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
1471 * so that we can consume any remaining space.
1472 */
1473 if (space > 0 && space < SPA_MINBLOCKSIZE)
1474 space = SPA_MINBLOCKSIZE;
1475 }
1476 weight = space;
1477
1478 /*
1479 * Modern disks have uniform bit density and constant angular velocity.
1480 * Therefore, the outer recording zones are faster (higher bandwidth)
1481 * than the inner zones by the ratio of outer to inner track diameter,
1482 * which is typically around 2:1. We account for this by assigning
1483 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
1484 * In effect, this means that we'll select the metaslab with the most
1485 * free bandwidth rather than simply the one with the most free space.
1486 */
1487 if (metaslab_lba_weighting_enabled) {
1488 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
1489 ASSERT(weight >= space && weight <= 2 * space);
1490 }
1491
1492 /*
1493 * If this metaslab is one we're actively using, adjust its
1494 * weight to make it preferable to any inactive metaslab so
1495 * we'll polish it off. If the fragmentation on this metaslab
1496 * has exceed our threshold, then don't mark it active.
1497 */
1498 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
1499 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
1500 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
1501 }
1502
1503 return (weight);
1504 }
1505
1506 static int
metaslab_activate(metaslab_t * msp,uint64_t activation_weight)1507 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
1508 {
1509 ASSERT(MUTEX_HELD(&msp->ms_lock));
1510
1511 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
1512 metaslab_load_wait(msp);
1513 if (!msp->ms_loaded) {
1514 int error = metaslab_load(msp);
1515 if (error) {
1516 metaslab_group_sort(msp->ms_group, msp, 0);
1517 return (error);
1518 }
1519 }
1520
1521 metaslab_group_sort(msp->ms_group, msp,
1522 msp->ms_weight | activation_weight);
1523 }
1524 ASSERT(msp->ms_loaded);
1525 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
1526
1527 return (0);
1528 }
1529
1530 static void
metaslab_passivate(metaslab_t * msp,uint64_t size)1531 metaslab_passivate(metaslab_t *msp, uint64_t size)
1532 {
1533 /*
1534 * If size < SPA_MINBLOCKSIZE, then we will not allocate from
1535 * this metaslab again. In that case, it had better be empty,
1536 * or we would be leaving space on the table.
1537 */
1538 ASSERT(size >= SPA_MINBLOCKSIZE || range_tree_space(msp->ms_tree) == 0);
1539 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
1540 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
1541 }
1542
1543 static void
metaslab_preload(void * arg)1544 metaslab_preload(void *arg)
1545 {
1546 metaslab_t *msp = arg;
1547 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1548
1549 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
1550
1551 mutex_enter(&msp->ms_lock);
1552 metaslab_load_wait(msp);
1553 if (!msp->ms_loaded)
1554 (void) metaslab_load(msp);
1555
1556 /*
1557 * Set the ms_access_txg value so that we don't unload it right away.
1558 */
1559 msp->ms_access_txg = spa_syncing_txg(spa) + metaslab_unload_delay + 1;
1560 mutex_exit(&msp->ms_lock);
1561 }
1562
1563 static void
metaslab_group_preload(metaslab_group_t * mg)1564 metaslab_group_preload(metaslab_group_t *mg)
1565 {
1566 spa_t *spa = mg->mg_vd->vdev_spa;
1567 metaslab_t *msp;
1568 avl_tree_t *t = &mg->mg_metaslab_tree;
1569 int m = 0;
1570
1571 if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
1572 taskq_wait(mg->mg_taskq);
1573 return;
1574 }
1575
1576 mutex_enter(&mg->mg_lock);
1577 /*
1578 * Load the next potential metaslabs
1579 */
1580 msp = avl_first(t);
1581 while (msp != NULL) {
1582 metaslab_t *msp_next = AVL_NEXT(t, msp);
1583
1584 /*
1585 * We preload only the maximum number of metaslabs specified
1586 * by metaslab_preload_limit. If a metaslab is being forced
1587 * to condense then we preload it too. This will ensure
1588 * that force condensing happens in the next txg.
1589 */
1590 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
1591 msp = msp_next;
1592 continue;
1593 }
1594
1595 /*
1596 * We must drop the metaslab group lock here to preserve
1597 * lock ordering with the ms_lock (when grabbing both
1598 * the mg_lock and the ms_lock, the ms_lock must be taken
1599 * first). As a result, it is possible that the ordering
1600 * of the metaslabs within the avl tree may change before
1601 * we reacquire the lock. The metaslab cannot be removed from
1602 * the tree while we're in syncing context so it is safe to
1603 * drop the mg_lock here. If the metaslabs are reordered
1604 * nothing will break -- we just may end up loading a
1605 * less than optimal one.
1606 */
1607 mutex_exit(&mg->mg_lock);
1608 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
1609 msp, TQ_SLEEP) != NULL);
1610 mutex_enter(&mg->mg_lock);
1611 msp = msp_next;
1612 }
1613 mutex_exit(&mg->mg_lock);
1614 }
1615
1616 /*
1617 * Determine if the space map's on-disk footprint is past our tolerance
1618 * for inefficiency. We would like to use the following criteria to make
1619 * our decision:
1620 *
1621 * 1. The size of the space map object should not dramatically increase as a
1622 * result of writing out the free space range tree.
1623 *
1624 * 2. The minimal on-disk space map representation is zfs_condense_pct/100
1625 * times the size than the free space range tree representation
1626 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
1627 *
1628 * 3. The on-disk size of the space map should actually decrease.
1629 *
1630 * Checking the first condition is tricky since we don't want to walk
1631 * the entire AVL tree calculating the estimated on-disk size. Instead we
1632 * use the size-ordered range tree in the metaslab and calculate the
1633 * size required to write out the largest segment in our free tree. If the
1634 * size required to represent that segment on disk is larger than the space
1635 * map object then we avoid condensing this map.
1636 *
1637 * To determine the second criterion we use a best-case estimate and assume
1638 * each segment can be represented on-disk as a single 64-bit entry. We refer
1639 * to this best-case estimate as the space map's minimal form.
1640 *
1641 * Unfortunately, we cannot compute the on-disk size of the space map in this
1642 * context because we cannot accurately compute the effects of compression, etc.
1643 * Instead, we apply the heuristic described in the block comment for
1644 * zfs_metaslab_condense_block_threshold - we only condense if the space used
1645 * is greater than a threshold number of blocks.
1646 */
1647 static boolean_t
metaslab_should_condense(metaslab_t * msp)1648 metaslab_should_condense(metaslab_t *msp)
1649 {
1650 space_map_t *sm = msp->ms_sm;
1651 range_seg_t *rs;
1652 uint64_t size, entries, segsz, object_size, optimal_size, record_size;
1653 dmu_object_info_t doi;
1654 uint64_t vdev_blocksize = 1 << msp->ms_group->mg_vd->vdev_ashift;
1655
1656 ASSERT(MUTEX_HELD(&msp->ms_lock));
1657 ASSERT(msp->ms_loaded);
1658
1659 /*
1660 * Use the ms_size_tree range tree, which is ordered by size, to
1661 * obtain the largest segment in the free tree. We always condense
1662 * metaslabs that are empty and metaslabs for which a condense
1663 * request has been made.
1664 */
1665 rs = avl_last(&msp->ms_size_tree);
1666 if (rs == NULL || msp->ms_condense_wanted)
1667 return (B_TRUE);
1668
1669 /*
1670 * Calculate the number of 64-bit entries this segment would
1671 * require when written to disk. If this single segment would be
1672 * larger on-disk than the entire current on-disk structure, then
1673 * clearly condensing will increase the on-disk structure size.
1674 */
1675 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
1676 entries = size / (MIN(size, SM_RUN_MAX));
1677 segsz = entries * sizeof (uint64_t);
1678
1679 optimal_size = sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root);
1680 object_size = space_map_length(msp->ms_sm);
1681
1682 dmu_object_info_from_db(sm->sm_dbuf, &doi);
1683 record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
1684
1685 return (segsz <= object_size &&
1686 object_size >= (optimal_size * zfs_condense_pct / 100) &&
1687 object_size > zfs_metaslab_condense_block_threshold * record_size);
1688 }
1689
1690 /*
1691 * Condense the on-disk space map representation to its minimized form.
1692 * The minimized form consists of a small number of allocations followed by
1693 * the entries of the free range tree.
1694 */
1695 static void
metaslab_condense(metaslab_t * msp,uint64_t txg,dmu_tx_t * tx)1696 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
1697 {
1698 spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
1699 range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK];
1700 range_tree_t *condense_tree;
1701 space_map_t *sm = msp->ms_sm;
1702
1703 ASSERT(MUTEX_HELD(&msp->ms_lock));
1704 ASSERT3U(spa_sync_pass(spa), ==, 1);
1705 ASSERT(msp->ms_loaded);
1706
1707
1708 spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, "
1709 "smp size %llu, segments %lu, forcing condense=%s", txg,
1710 msp->ms_id, msp, space_map_length(msp->ms_sm),
1711 avl_numnodes(&msp->ms_tree->rt_root),
1712 msp->ms_condense_wanted ? "TRUE" : "FALSE");
1713
1714 msp->ms_condense_wanted = B_FALSE;
1715
1716 /*
1717 * Create an range tree that is 100% allocated. We remove segments
1718 * that have been freed in this txg, any deferred frees that exist,
1719 * and any allocation in the future. Removing segments should be
1720 * a relatively inexpensive operation since we expect these trees to
1721 * have a small number of nodes.
1722 */
1723 condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock);
1724 range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
1725
1726 /*
1727 * Remove what's been freed in this txg from the condense_tree.
1728 * Since we're in sync_pass 1, we know that all the frees from
1729 * this txg are in the freetree.
1730 */
1731 range_tree_walk(freetree, range_tree_remove, condense_tree);
1732
1733 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1734 range_tree_walk(msp->ms_defertree[t],
1735 range_tree_remove, condense_tree);
1736 }
1737
1738 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
1739 range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK],
1740 range_tree_remove, condense_tree);
1741 }
1742
1743 /*
1744 * We're about to drop the metaslab's lock thus allowing
1745 * other consumers to change it's content. Set the
1746 * metaslab's ms_condensing flag to ensure that
1747 * allocations on this metaslab do not occur while we're
1748 * in the middle of committing it to disk. This is only critical
1749 * for the ms_tree as all other range trees use per txg
1750 * views of their content.
1751 */
1752 msp->ms_condensing = B_TRUE;
1753
1754 mutex_exit(&msp->ms_lock);
1755 space_map_truncate(sm, tx);
1756 mutex_enter(&msp->ms_lock);
1757
1758 /*
1759 * While we would ideally like to create a space_map representation
1760 * that consists only of allocation records, doing so can be
1761 * prohibitively expensive because the in-core free tree can be
1762 * large, and therefore computationally expensive to subtract
1763 * from the condense_tree. Instead we sync out two trees, a cheap
1764 * allocation only tree followed by the in-core free tree. While not
1765 * optimal, this is typically close to optimal, and much cheaper to
1766 * compute.
1767 */
1768 space_map_write(sm, condense_tree, SM_ALLOC, tx);
1769 range_tree_vacate(condense_tree, NULL, NULL);
1770 range_tree_destroy(condense_tree);
1771
1772 space_map_write(sm, msp->ms_tree, SM_FREE, tx);
1773 msp->ms_condensing = B_FALSE;
1774 }
1775
1776 /*
1777 * Write a metaslab to disk in the context of the specified transaction group.
1778 */
1779 void
metaslab_sync(metaslab_t * msp,uint64_t txg)1780 metaslab_sync(metaslab_t *msp, uint64_t txg)
1781 {
1782 metaslab_group_t *mg = msp->ms_group;
1783 vdev_t *vd = mg->mg_vd;
1784 spa_t *spa = vd->vdev_spa;
1785 objset_t *mos = spa_meta_objset(spa);
1786 range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
1787 range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK];
1788 range_tree_t **freed_tree =
1789 &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
1790 dmu_tx_t *tx;
1791 uint64_t object = space_map_object(msp->ms_sm);
1792
1793 ASSERT(!vd->vdev_ishole);
1794
1795 /*
1796 * This metaslab has just been added so there's no work to do now.
1797 */
1798 if (*freetree == NULL) {
1799 ASSERT3P(alloctree, ==, NULL);
1800 return;
1801 }
1802
1803 ASSERT3P(alloctree, !=, NULL);
1804 ASSERT3P(*freetree, !=, NULL);
1805 ASSERT3P(*freed_tree, !=, NULL);
1806
1807 /*
1808 * Normally, we don't want to process a metaslab if there
1809 * are no allocations or frees to perform. However, if the metaslab
1810 * is being forced to condense we need to let it through.
1811 */
1812 if (range_tree_space(alloctree) == 0 &&
1813 range_tree_space(*freetree) == 0 &&
1814 !msp->ms_condense_wanted)
1815 return;
1816
1817 /*
1818 * The only state that can actually be changing concurrently with
1819 * metaslab_sync() is the metaslab's ms_tree. No other thread can
1820 * be modifying this txg's alloctree, freetree, freed_tree, or
1821 * space_map_phys_t. Therefore, we only hold ms_lock to satify
1822 * space_map ASSERTs. We drop it whenever we call into the DMU,
1823 * because the DMU can call down to us (e.g. via zio_free()) at
1824 * any time.
1825 */
1826
1827 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
1828
1829 if (msp->ms_sm == NULL) {
1830 uint64_t new_object;
1831
1832 new_object = space_map_alloc(mos, tx);
1833 VERIFY3U(new_object, !=, 0);
1834
1835 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
1836 msp->ms_start, msp->ms_size, vd->vdev_ashift,
1837 &msp->ms_lock));
1838 ASSERT(msp->ms_sm != NULL);
1839 }
1840
1841 mutex_enter(&msp->ms_lock);
1842
1843 /*
1844 * Note: metaslab_condense() clears the space_map's histogram.
1845 * Therefore we must verify and remove this histogram before
1846 * condensing.
1847 */
1848 metaslab_group_histogram_verify(mg);
1849 metaslab_class_histogram_verify(mg->mg_class);
1850 metaslab_group_histogram_remove(mg, msp);
1851
1852 if (msp->ms_loaded && spa_sync_pass(spa) == 1 &&
1853 metaslab_should_condense(msp)) {
1854 metaslab_condense(msp, txg, tx);
1855 } else {
1856 space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
1857 space_map_write(msp->ms_sm, *freetree, SM_FREE, tx);
1858 }
1859
1860 if (msp->ms_loaded) {
1861 /*
1862 * When the space map is loaded, we have an accruate
1863 * histogram in the range tree. This gives us an opportunity
1864 * to bring the space map's histogram up-to-date so we clear
1865 * it first before updating it.
1866 */
1867 space_map_histogram_clear(msp->ms_sm);
1868 space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx);
1869 } else {
1870 /*
1871 * Since the space map is not loaded we simply update the
1872 * exisiting histogram with what was freed in this txg. This
1873 * means that the on-disk histogram may not have an accurate
1874 * view of the free space but it's close enough to allow
1875 * us to make allocation decisions.
1876 */
1877 space_map_histogram_add(msp->ms_sm, *freetree, tx);
1878 }
1879 metaslab_group_histogram_add(mg, msp);
1880 metaslab_group_histogram_verify(mg);
1881 metaslab_class_histogram_verify(mg->mg_class);
1882
1883 /*
1884 * For sync pass 1, we avoid traversing this txg's free range tree
1885 * and instead will just swap the pointers for freetree and
1886 * freed_tree. We can safely do this since the freed_tree is
1887 * guaranteed to be empty on the initial pass.
1888 */
1889 if (spa_sync_pass(spa) == 1) {
1890 range_tree_swap(freetree, freed_tree);
1891 } else {
1892 range_tree_vacate(*freetree, range_tree_add, *freed_tree);
1893 }
1894 range_tree_vacate(alloctree, NULL, NULL);
1895
1896 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
1897 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
1898
1899 mutex_exit(&msp->ms_lock);
1900
1901 if (object != space_map_object(msp->ms_sm)) {
1902 object = space_map_object(msp->ms_sm);
1903 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
1904 msp->ms_id, sizeof (uint64_t), &object, tx);
1905 }
1906 dmu_tx_commit(tx);
1907 }
1908
1909 /*
1910 * Called after a transaction group has completely synced to mark
1911 * all of the metaslab's free space as usable.
1912 */
1913 void
metaslab_sync_done(metaslab_t * msp,uint64_t txg)1914 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
1915 {
1916 metaslab_group_t *mg = msp->ms_group;
1917 vdev_t *vd = mg->mg_vd;
1918 range_tree_t **freed_tree;
1919 range_tree_t **defer_tree;
1920 int64_t alloc_delta, defer_delta;
1921
1922 ASSERT(!vd->vdev_ishole);
1923
1924 mutex_enter(&msp->ms_lock);
1925
1926 /*
1927 * If this metaslab is just becoming available, initialize its
1928 * alloctrees, freetrees, and defertree and add its capacity to
1929 * the vdev.
1930 */
1931 if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) {
1932 for (int t = 0; t < TXG_SIZE; t++) {
1933 ASSERT(msp->ms_alloctree[t] == NULL);
1934 ASSERT(msp->ms_freetree[t] == NULL);
1935
1936 msp->ms_alloctree[t] = range_tree_create(NULL, msp,
1937 &msp->ms_lock);
1938 msp->ms_freetree[t] = range_tree_create(NULL, msp,
1939 &msp->ms_lock);
1940 }
1941
1942 for (int t = 0; t < TXG_DEFER_SIZE; t++) {
1943 ASSERT(msp->ms_defertree[t] == NULL);
1944
1945 msp->ms_defertree[t] = range_tree_create(NULL, msp,
1946 &msp->ms_lock);
1947 }
1948
1949 vdev_space_update(vd, 0, 0, msp->ms_size);
1950 }
1951
1952 freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK];
1953 defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
1954
1955 alloc_delta = space_map_alloc_delta(msp->ms_sm);
1956 defer_delta = range_tree_space(*freed_tree) -
1957 range_tree_space(*defer_tree);
1958
1959 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
1960
1961 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
1962 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK]));
1963
1964 /*
1965 * If there's a metaslab_load() in progress, wait for it to complete
1966 * so that we have a consistent view of the in-core space map.
1967 */
1968 metaslab_load_wait(msp);
1969
1970 /*
1971 * Move the frees from the defer_tree back to the free
1972 * range tree (if it's loaded). Swap the freed_tree and the
1973 * defer_tree -- this is safe to do because we've just emptied out
1974 * the defer_tree.
1975 */
1976 range_tree_vacate(*defer_tree,
1977 msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
1978 range_tree_swap(freed_tree, defer_tree);
1979
1980 space_map_update(msp->ms_sm);
1981
1982 msp->ms_deferspace += defer_delta;
1983 ASSERT3S(msp->ms_deferspace, >=, 0);
1984 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
1985 if (msp->ms_deferspace != 0) {
1986 /*
1987 * Keep syncing this metaslab until all deferred frees
1988 * are back in circulation.
1989 */
1990 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
1991 }
1992
1993 if (msp->ms_loaded && msp->ms_access_txg < txg) {
1994 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
1995 VERIFY0(range_tree_space(
1996 msp->ms_alloctree[(txg + t) & TXG_MASK]));
1997 }
1998
1999 if (!metaslab_debug_unload)
2000 metaslab_unload(msp);
2001 }
2002
2003 metaslab_group_sort(mg, msp, metaslab_weight(msp));
2004 mutex_exit(&msp->ms_lock);
2005 }
2006
2007 void
metaslab_sync_reassess(metaslab_group_t * mg)2008 metaslab_sync_reassess(metaslab_group_t *mg)
2009 {
2010 metaslab_group_alloc_update(mg);
2011 mg->mg_fragmentation = metaslab_group_fragmentation(mg);
2012
2013 /*
2014 * Preload the next potential metaslabs
2015 */
2016 metaslab_group_preload(mg);
2017 }
2018
2019 static uint64_t
metaslab_distance(metaslab_t * msp,dva_t * dva)2020 metaslab_distance(metaslab_t *msp, dva_t *dva)
2021 {
2022 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
2023 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
2024 uint64_t start = msp->ms_id;
2025
2026 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
2027 return (1ULL << 63);
2028
2029 if (offset < start)
2030 return ((start - offset) << ms_shift);
2031 if (offset > start)
2032 return ((offset - start) << ms_shift);
2033 return (0);
2034 }
2035
2036 static uint64_t
metaslab_group_alloc(metaslab_group_t * mg,uint64_t psize,uint64_t asize,uint64_t txg,uint64_t min_distance,dva_t * dva,int d)2037 metaslab_group_alloc(metaslab_group_t *mg, uint64_t psize, uint64_t asize,
2038 uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
2039 {
2040 spa_t *spa = mg->mg_vd->vdev_spa;
2041 metaslab_t *msp = NULL;
2042 uint64_t offset = -1ULL;
2043 avl_tree_t *t = &mg->mg_metaslab_tree;
2044 uint64_t activation_weight;
2045 uint64_t target_distance;
2046 int i;
2047
2048 activation_weight = METASLAB_WEIGHT_PRIMARY;
2049 for (i = 0; i < d; i++) {
2050 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
2051 activation_weight = METASLAB_WEIGHT_SECONDARY;
2052 break;
2053 }
2054 }
2055
2056 for (;;) {
2057 boolean_t was_active;
2058
2059 mutex_enter(&mg->mg_lock);
2060 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
2061 if (msp->ms_weight < asize) {
2062 spa_dbgmsg(spa, "%s: failed to meet weight "
2063 "requirement: vdev %llu, txg %llu, mg %p, "
2064 "msp %p, psize %llu, asize %llu, "
2065 "weight %llu", spa_name(spa),
2066 mg->mg_vd->vdev_id, txg,
2067 mg, msp, psize, asize, msp->ms_weight);
2068 mutex_exit(&mg->mg_lock);
2069 return (-1ULL);
2070 }
2071
2072 /*
2073 * If the selected metaslab is condensing, skip it.
2074 */
2075 if (msp->ms_condensing)
2076 continue;
2077
2078 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
2079 if (activation_weight == METASLAB_WEIGHT_PRIMARY)
2080 break;
2081
2082 target_distance = min_distance +
2083 (space_map_allocated(msp->ms_sm) != 0 ? 0 :
2084 min_distance >> 1);
2085
2086 for (i = 0; i < d; i++)
2087 if (metaslab_distance(msp, &dva[i]) <
2088 target_distance)
2089 break;
2090 if (i == d)
2091 break;
2092 }
2093 mutex_exit(&mg->mg_lock);
2094 if (msp == NULL)
2095 return (-1ULL);
2096
2097 mutex_enter(&msp->ms_lock);
2098
2099 /*
2100 * Ensure that the metaslab we have selected is still
2101 * capable of handling our request. It's possible that
2102 * another thread may have changed the weight while we
2103 * were blocked on the metaslab lock.
2104 */
2105 if (msp->ms_weight < asize || (was_active &&
2106 !(msp->ms_weight & METASLAB_ACTIVE_MASK) &&
2107 activation_weight == METASLAB_WEIGHT_PRIMARY)) {
2108 mutex_exit(&msp->ms_lock);
2109 continue;
2110 }
2111
2112 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
2113 activation_weight == METASLAB_WEIGHT_PRIMARY) {
2114 metaslab_passivate(msp,
2115 msp->ms_weight & ~METASLAB_ACTIVE_MASK);
2116 mutex_exit(&msp->ms_lock);
2117 continue;
2118 }
2119
2120 if (metaslab_activate(msp, activation_weight) != 0) {
2121 mutex_exit(&msp->ms_lock);
2122 continue;
2123 }
2124
2125 /*
2126 * If this metaslab is currently condensing then pick again as
2127 * we can't manipulate this metaslab until it's committed
2128 * to disk.
2129 */
2130 if (msp->ms_condensing) {
2131 mutex_exit(&msp->ms_lock);
2132 continue;
2133 }
2134
2135 if ((offset = metaslab_block_alloc(msp, asize)) != -1ULL)
2136 break;
2137
2138 metaslab_passivate(msp, metaslab_block_maxsize(msp));
2139 mutex_exit(&msp->ms_lock);
2140 }
2141
2142 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2143 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
2144
2145 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, asize);
2146 msp->ms_access_txg = txg + metaslab_unload_delay;
2147
2148 mutex_exit(&msp->ms_lock);
2149
2150 return (offset);
2151 }
2152
2153 /*
2154 * Allocate a block for the specified i/o.
2155 */
2156 static int
metaslab_alloc_dva(spa_t * spa,metaslab_class_t * mc,uint64_t psize,dva_t * dva,int d,dva_t * hintdva,uint64_t txg,int flags)2157 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
2158 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags)
2159 {
2160 metaslab_group_t *mg, *rotor;
2161 vdev_t *vd;
2162 int dshift = 3;
2163 int all_zero;
2164 int zio_lock = B_FALSE;
2165 boolean_t allocatable;
2166 uint64_t offset = -1ULL;
2167 uint64_t asize;
2168 uint64_t distance;
2169
2170 ASSERT(!DVA_IS_VALID(&dva[d]));
2171
2172 /*
2173 * For testing, make some blocks above a certain size be gang blocks.
2174 */
2175 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
2176 return (SET_ERROR(ENOSPC));
2177
2178 /*
2179 * Start at the rotor and loop through all mgs until we find something.
2180 * Note that there's no locking on mc_rotor or mc_aliquot because
2181 * nothing actually breaks if we miss a few updates -- we just won't
2182 * allocate quite as evenly. It all balances out over time.
2183 *
2184 * If we are doing ditto or log blocks, try to spread them across
2185 * consecutive vdevs. If we're forced to reuse a vdev before we've
2186 * allocated all of our ditto blocks, then try and spread them out on
2187 * that vdev as much as possible. If it turns out to not be possible,
2188 * gradually lower our standards until anything becomes acceptable.
2189 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
2190 * gives us hope of containing our fault domains to something we're
2191 * able to reason about. Otherwise, any two top-level vdev failures
2192 * will guarantee the loss of data. With consecutive allocation,
2193 * only two adjacent top-level vdev failures will result in data loss.
2194 *
2195 * If we are doing gang blocks (hintdva is non-NULL), try to keep
2196 * ourselves on the same vdev as our gang block header. That
2197 * way, we can hope for locality in vdev_cache, plus it makes our
2198 * fault domains something tractable.
2199 */
2200 if (hintdva) {
2201 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
2202
2203 /*
2204 * It's possible the vdev we're using as the hint no
2205 * longer exists (i.e. removed). Consult the rotor when
2206 * all else fails.
2207 */
2208 if (vd != NULL) {
2209 mg = vd->vdev_mg;
2210
2211 if (flags & METASLAB_HINTBP_AVOID &&
2212 mg->mg_next != NULL)
2213 mg = mg->mg_next;
2214 } else {
2215 mg = mc->mc_rotor;
2216 }
2217 } else if (d != 0) {
2218 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
2219 mg = vd->vdev_mg->mg_next;
2220 } else {
2221 mg = mc->mc_rotor;
2222 }
2223
2224 /*
2225 * If the hint put us into the wrong metaslab class, or into a
2226 * metaslab group that has been passivated, just follow the rotor.
2227 */
2228 if (mg->mg_class != mc || mg->mg_activation_count <= 0)
2229 mg = mc->mc_rotor;
2230
2231 rotor = mg;
2232 top:
2233 all_zero = B_TRUE;
2234 do {
2235 ASSERT(mg->mg_activation_count == 1);
2236
2237 vd = mg->mg_vd;
2238
2239 /*
2240 * Don't allocate from faulted devices.
2241 */
2242 if (zio_lock) {
2243 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
2244 allocatable = vdev_allocatable(vd);
2245 spa_config_exit(spa, SCL_ZIO, FTAG);
2246 } else {
2247 allocatable = vdev_allocatable(vd);
2248 }
2249
2250 /*
2251 * Determine if the selected metaslab group is eligible
2252 * for allocations. If we're ganging or have requested
2253 * an allocation for the smallest gang block size
2254 * then we don't want to avoid allocating to the this
2255 * metaslab group. If we're in this condition we should
2256 * try to allocate from any device possible so that we
2257 * don't inadvertently return ENOSPC and suspend the pool
2258 * even though space is still available.
2259 */
2260 if (allocatable && CAN_FASTGANG(flags) &&
2261 psize > SPA_GANGBLOCKSIZE)
2262 allocatable = metaslab_group_allocatable(mg);
2263
2264 if (!allocatable)
2265 goto next;
2266
2267 /*
2268 * Avoid writing single-copy data to a failing vdev
2269 * unless the user instructs us that it is okay.
2270 */
2271 if ((vd->vdev_stat.vs_write_errors > 0 ||
2272 vd->vdev_state < VDEV_STATE_HEALTHY) &&
2273 d == 0 && dshift == 3 && vd->vdev_children == 0) {
2274 all_zero = B_FALSE;
2275 goto next;
2276 }
2277
2278 ASSERT(mg->mg_class == mc);
2279
2280 distance = vd->vdev_asize >> dshift;
2281 if (distance <= (1ULL << vd->vdev_ms_shift))
2282 distance = 0;
2283 else
2284 all_zero = B_FALSE;
2285
2286 asize = vdev_psize_to_asize(vd, psize);
2287 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
2288
2289 offset = metaslab_group_alloc(mg, psize, asize, txg, distance,
2290 dva, d);
2291 if (offset != -1ULL) {
2292 /*
2293 * If we've just selected this metaslab group,
2294 * figure out whether the corresponding vdev is
2295 * over- or under-used relative to the pool,
2296 * and set an allocation bias to even it out.
2297 */
2298 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
2299 vdev_stat_t *vs = &vd->vdev_stat;
2300 int64_t vu, cu;
2301
2302 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1);
2303 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1);
2304
2305 /*
2306 * Calculate how much more or less we should
2307 * try to allocate from this device during
2308 * this iteration around the rotor.
2309 * For example, if a device is 80% full
2310 * and the pool is 20% full then we should
2311 * reduce allocations by 60% on this device.
2312 *
2313 * mg_bias = (20 - 80) * 512K / 100 = -307K
2314 *
2315 * This reduces allocations by 307K for this
2316 * iteration.
2317 */
2318 mg->mg_bias = ((cu - vu) *
2319 (int64_t)mg->mg_aliquot) / 100;
2320 } else if (!metaslab_bias_enabled) {
2321 mg->mg_bias = 0;
2322 }
2323
2324 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >=
2325 mg->mg_aliquot + mg->mg_bias) {
2326 mc->mc_rotor = mg->mg_next;
2327 mc->mc_aliquot = 0;
2328 }
2329
2330 DVA_SET_VDEV(&dva[d], vd->vdev_id);
2331 DVA_SET_OFFSET(&dva[d], offset);
2332 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER));
2333 DVA_SET_ASIZE(&dva[d], asize);
2334
2335 return (0);
2336 }
2337 next:
2338 mc->mc_rotor = mg->mg_next;
2339 mc->mc_aliquot = 0;
2340 } while ((mg = mg->mg_next) != rotor);
2341
2342 if (!all_zero) {
2343 dshift++;
2344 ASSERT(dshift < 64);
2345 goto top;
2346 }
2347
2348 if (!allocatable && !zio_lock) {
2349 dshift = 3;
2350 zio_lock = B_TRUE;
2351 goto top;
2352 }
2353
2354 bzero(&dva[d], sizeof (dva_t));
2355
2356 return (SET_ERROR(ENOSPC));
2357 }
2358
2359 /*
2360 * Free the block represented by DVA in the context of the specified
2361 * transaction group.
2362 */
2363 static void
metaslab_free_dva(spa_t * spa,const dva_t * dva,uint64_t txg,boolean_t now)2364 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
2365 {
2366 uint64_t vdev = DVA_GET_VDEV(dva);
2367 uint64_t offset = DVA_GET_OFFSET(dva);
2368 uint64_t size = DVA_GET_ASIZE(dva);
2369 vdev_t *vd;
2370 metaslab_t *msp;
2371
2372 ASSERT(DVA_IS_VALID(dva));
2373
2374 if (txg > spa_freeze_txg(spa))
2375 return;
2376
2377 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2378 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
2379 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
2380 (u_longlong_t)vdev, (u_longlong_t)offset);
2381 ASSERT(0);
2382 return;
2383 }
2384
2385 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2386
2387 if (DVA_GET_GANG(dva))
2388 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2389
2390 mutex_enter(&msp->ms_lock);
2391
2392 if (now) {
2393 range_tree_remove(msp->ms_alloctree[txg & TXG_MASK],
2394 offset, size);
2395
2396 VERIFY(!msp->ms_condensing);
2397 VERIFY3U(offset, >=, msp->ms_start);
2398 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
2399 VERIFY3U(range_tree_space(msp->ms_tree) + size, <=,
2400 msp->ms_size);
2401 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2402 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2403 range_tree_add(msp->ms_tree, offset, size);
2404 } else {
2405 if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0)
2406 vdev_dirty(vd, VDD_METASLAB, msp, txg);
2407 range_tree_add(msp->ms_freetree[txg & TXG_MASK],
2408 offset, size);
2409 }
2410
2411 mutex_exit(&msp->ms_lock);
2412 }
2413
2414 /*
2415 * Intent log support: upon opening the pool after a crash, notify the SPA
2416 * of blocks that the intent log has allocated for immediate write, but
2417 * which are still considered free by the SPA because the last transaction
2418 * group didn't commit yet.
2419 */
2420 static int
metaslab_claim_dva(spa_t * spa,const dva_t * dva,uint64_t txg)2421 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
2422 {
2423 uint64_t vdev = DVA_GET_VDEV(dva);
2424 uint64_t offset = DVA_GET_OFFSET(dva);
2425 uint64_t size = DVA_GET_ASIZE(dva);
2426 vdev_t *vd;
2427 metaslab_t *msp;
2428 int error = 0;
2429
2430 ASSERT(DVA_IS_VALID(dva));
2431
2432 if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
2433 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
2434 return (SET_ERROR(ENXIO));
2435
2436 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2437
2438 if (DVA_GET_GANG(dva))
2439 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
2440
2441 mutex_enter(&msp->ms_lock);
2442
2443 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
2444 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
2445
2446 if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size))
2447 error = SET_ERROR(ENOENT);
2448
2449 if (error || txg == 0) { /* txg == 0 indicates dry run */
2450 mutex_exit(&msp->ms_lock);
2451 return (error);
2452 }
2453
2454 VERIFY(!msp->ms_condensing);
2455 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
2456 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
2457 VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size);
2458 range_tree_remove(msp->ms_tree, offset, size);
2459
2460 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
2461 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
2462 vdev_dirty(vd, VDD_METASLAB, msp, txg);
2463 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size);
2464 }
2465
2466 mutex_exit(&msp->ms_lock);
2467
2468 return (0);
2469 }
2470
2471 int
metaslab_alloc(spa_t * spa,metaslab_class_t * mc,uint64_t psize,blkptr_t * bp,int ndvas,uint64_t txg,blkptr_t * hintbp,int flags)2472 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
2473 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags)
2474 {
2475 dva_t *dva = bp->blk_dva;
2476 dva_t *hintdva = hintbp->blk_dva;
2477 int error = 0;
2478
2479 ASSERT(bp->blk_birth == 0);
2480 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
2481
2482 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2483
2484 if (mc->mc_rotor == NULL) { /* no vdevs in this class */
2485 spa_config_exit(spa, SCL_ALLOC, FTAG);
2486 return (SET_ERROR(ENOSPC));
2487 }
2488
2489 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
2490 ASSERT(BP_GET_NDVAS(bp) == 0);
2491 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
2492
2493 for (int d = 0; d < ndvas; d++) {
2494 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
2495 txg, flags);
2496 if (error != 0) {
2497 for (d--; d >= 0; d--) {
2498 metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
2499 bzero(&dva[d], sizeof (dva_t));
2500 }
2501 spa_config_exit(spa, SCL_ALLOC, FTAG);
2502 return (error);
2503 }
2504 }
2505 ASSERT(error == 0);
2506 ASSERT(BP_GET_NDVAS(bp) == ndvas);
2507
2508 spa_config_exit(spa, SCL_ALLOC, FTAG);
2509
2510 BP_SET_BIRTH(bp, txg, txg);
2511
2512 return (0);
2513 }
2514
2515 void
metaslab_free(spa_t * spa,const blkptr_t * bp,uint64_t txg,boolean_t now)2516 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
2517 {
2518 const dva_t *dva = bp->blk_dva;
2519 int ndvas = BP_GET_NDVAS(bp);
2520
2521 ASSERT(!BP_IS_HOLE(bp));
2522 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
2523
2524 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
2525
2526 for (int d = 0; d < ndvas; d++)
2527 metaslab_free_dva(spa, &dva[d], txg, now);
2528
2529 spa_config_exit(spa, SCL_FREE, FTAG);
2530 }
2531
2532 int
metaslab_claim(spa_t * spa,const blkptr_t * bp,uint64_t txg)2533 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
2534 {
2535 const dva_t *dva = bp->blk_dva;
2536 int ndvas = BP_GET_NDVAS(bp);
2537 int error = 0;
2538
2539 ASSERT(!BP_IS_HOLE(bp));
2540
2541 if (txg != 0) {
2542 /*
2543 * First do a dry run to make sure all DVAs are claimable,
2544 * so we don't have to unwind from partial failures below.
2545 */
2546 if ((error = metaslab_claim(spa, bp, 0)) != 0)
2547 return (error);
2548 }
2549
2550 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
2551
2552 for (int d = 0; d < ndvas; d++)
2553 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
2554 break;
2555
2556 spa_config_exit(spa, SCL_ALLOC, FTAG);
2557
2558 ASSERT(error == 0 || txg == 0);
2559
2560 return (error);
2561 }
2562
2563 void
metaslab_check_free(spa_t * spa,const blkptr_t * bp)2564 metaslab_check_free(spa_t *spa, const blkptr_t *bp)
2565 {
2566 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
2567 return;
2568
2569 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
2570 for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
2571 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
2572 vdev_t *vd = vdev_lookup_top(spa, vdev);
2573 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
2574 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
2575 metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
2576
2577 if (msp->ms_loaded)
2578 range_tree_verify(msp->ms_tree, offset, size);
2579
2580 for (int j = 0; j < TXG_SIZE; j++)
2581 range_tree_verify(msp->ms_freetree[j], offset, size);
2582 for (int j = 0; j < TXG_DEFER_SIZE; j++)
2583 range_tree_verify(msp->ms_defertree[j], offset, size);
2584 }
2585 spa_config_exit(spa, SCL_VDEV, FTAG);
2586 }
2587