1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 25 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2017, Intel Corporation. 27 */ 28 29 #include <sys/zfs_context.h> 30 #include <sys/dmu.h> 31 #include <sys/dmu_tx.h> 32 #include <sys/space_map.h> 33 #include <sys/metaslab_impl.h> 34 #include <sys/vdev_impl.h> 35 #include <sys/vdev_draid.h> 36 #include <sys/zio.h> 37 #include <sys/spa_impl.h> 38 #include <sys/zfeature.h> 39 #include <sys/vdev_indirect_mapping.h> 40 #include <sys/zap.h> 41 #include <sys/btree.h> 42 43 #define WITH_DF_BLOCK_ALLOCATOR 44 45 #define GANG_ALLOCATION(flags) \ 46 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER)) 47 48 /* 49 * Metaslab granularity, in bytes. This is roughly similar to what would be 50 * referred to as the "stripe size" in traditional RAID arrays. In normal 51 * operation, we will try to write this amount of data to each disk before 52 * moving on to the next top-level vdev. 53 */ 54 static uint64_t metaslab_aliquot = 1024 * 1024; 55 56 /* 57 * For testing, make some blocks above a certain size be gang blocks. 58 */ 59 uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1; 60 61 /* 62 * In pools where the log space map feature is not enabled we touch 63 * multiple metaslabs (and their respective space maps) with each 64 * transaction group. Thus, we benefit from having a small space map 65 * block size since it allows us to issue more I/O operations scattered 66 * around the disk. So a sane default for the space map block size 67 * is 8~16K. 68 */ 69 int zfs_metaslab_sm_blksz_no_log = (1 << 14); 70 71 /* 72 * When the log space map feature is enabled, we accumulate a lot of 73 * changes per metaslab that are flushed once in a while so we benefit 74 * from a bigger block size like 128K for the metaslab space maps. 75 */ 76 int zfs_metaslab_sm_blksz_with_log = (1 << 17); 77 78 /* 79 * The in-core space map representation is more compact than its on-disk form. 80 * The zfs_condense_pct determines how much more compact the in-core 81 * space map representation must be before we compact it on-disk. 82 * Values should be greater than or equal to 100. 83 */ 84 uint_t zfs_condense_pct = 200; 85 86 /* 87 * Condensing a metaslab is not guaranteed to actually reduce the amount of 88 * space used on disk. In particular, a space map uses data in increments of 89 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the 90 * same number of blocks after condensing. Since the goal of condensing is to 91 * reduce the number of IOPs required to read the space map, we only want to 92 * condense when we can be sure we will reduce the number of blocks used by the 93 * space map. Unfortunately, we cannot precisely compute whether or not this is 94 * the case in metaslab_should_condense since we are holding ms_lock. Instead, 95 * we apply the following heuristic: do not condense a spacemap unless the 96 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold 97 * blocks. 98 */ 99 static const int zfs_metaslab_condense_block_threshold = 4; 100 101 /* 102 * The zfs_mg_noalloc_threshold defines which metaslab groups should 103 * be eligible for allocation. The value is defined as a percentage of 104 * free space. Metaslab groups that have more free space than 105 * zfs_mg_noalloc_threshold are always eligible for allocations. Once 106 * a metaslab group's free space is less than or equal to the 107 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that 108 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold. 109 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all 110 * groups are allowed to accept allocations. Gang blocks are always 111 * eligible to allocate on any metaslab group. The default value of 0 means 112 * no metaslab group will be excluded based on this criterion. 113 */ 114 static uint_t zfs_mg_noalloc_threshold = 0; 115 116 /* 117 * Metaslab groups are considered eligible for allocations if their 118 * fragmentation metric (measured as a percentage) is less than or 119 * equal to zfs_mg_fragmentation_threshold. If a metaslab group 120 * exceeds this threshold then it will be skipped unless all metaslab 121 * groups within the metaslab class have also crossed this threshold. 122 * 123 * This tunable was introduced to avoid edge cases where we continue 124 * allocating from very fragmented disks in our pool while other, less 125 * fragmented disks, exists. On the other hand, if all disks in the 126 * pool are uniformly approaching the threshold, the threshold can 127 * be a speed bump in performance, where we keep switching the disks 128 * that we allocate from (e.g. we allocate some segments from disk A 129 * making it bypassing the threshold while freeing segments from disk 130 * B getting its fragmentation below the threshold). 131 * 132 * Empirically, we've seen that our vdev selection for allocations is 133 * good enough that fragmentation increases uniformly across all vdevs 134 * the majority of the time. Thus we set the threshold percentage high 135 * enough to avoid hitting the speed bump on pools that are being pushed 136 * to the edge. 137 */ 138 static uint_t zfs_mg_fragmentation_threshold = 95; 139 140 /* 141 * Allow metaslabs to keep their active state as long as their fragmentation 142 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An 143 * active metaslab that exceeds this threshold will no longer keep its active 144 * status allowing better metaslabs to be selected. 145 */ 146 static uint_t zfs_metaslab_fragmentation_threshold = 70; 147 148 /* 149 * When set will load all metaslabs when pool is first opened. 150 */ 151 int metaslab_debug_load = B_FALSE; 152 153 /* 154 * When set will prevent metaslabs from being unloaded. 155 */ 156 static int metaslab_debug_unload = B_FALSE; 157 158 /* 159 * Minimum size which forces the dynamic allocator to change 160 * it's allocation strategy. Once the space map cannot satisfy 161 * an allocation of this size then it switches to using more 162 * aggressive strategy (i.e search by size rather than offset). 163 */ 164 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE; 165 166 /* 167 * The minimum free space, in percent, which must be available 168 * in a space map to continue allocations in a first-fit fashion. 169 * Once the space map's free space drops below this level we dynamically 170 * switch to using best-fit allocations. 171 */ 172 uint_t metaslab_df_free_pct = 4; 173 174 /* 175 * Maximum distance to search forward from the last offset. Without this 176 * limit, fragmented pools can see >100,000 iterations and 177 * metaslab_block_picker() becomes the performance limiting factor on 178 * high-performance storage. 179 * 180 * With the default setting of 16MB, we typically see less than 500 181 * iterations, even with very fragmented, ashift=9 pools. The maximum number 182 * of iterations possible is: 183 * metaslab_df_max_search / (2 * (1<<ashift)) 184 * With the default setting of 16MB this is 16*1024 (with ashift=9) or 185 * 2048 (with ashift=12). 186 */ 187 static uint_t metaslab_df_max_search = 16 * 1024 * 1024; 188 189 /* 190 * Forces the metaslab_block_picker function to search for at least this many 191 * segments forwards until giving up on finding a segment that the allocation 192 * will fit into. 193 */ 194 static const uint32_t metaslab_min_search_count = 100; 195 196 /* 197 * If we are not searching forward (due to metaslab_df_max_search, 198 * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable 199 * controls what segment is used. If it is set, we will use the largest free 200 * segment. If it is not set, we will use a segment of exactly the requested 201 * size (or larger). 202 */ 203 static int metaslab_df_use_largest_segment = B_FALSE; 204 205 /* 206 * Percentage of all cpus that can be used by the metaslab taskq. 207 */ 208 int metaslab_load_pct = 50; 209 210 /* 211 * These tunables control how long a metaslab will remain loaded after the 212 * last allocation from it. A metaslab can't be unloaded until at least 213 * metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds 214 * have elapsed. However, zfs_metaslab_mem_limit may cause it to be 215 * unloaded sooner. These settings are intended to be generous -- to keep 216 * metaslabs loaded for a long time, reducing the rate of metaslab loading. 217 */ 218 static uint_t metaslab_unload_delay = 32; 219 static uint_t metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */ 220 221 /* 222 * Max number of metaslabs per group to preload. 223 */ 224 uint_t metaslab_preload_limit = 10; 225 226 /* 227 * Enable/disable preloading of metaslab. 228 */ 229 static int metaslab_preload_enabled = B_TRUE; 230 231 /* 232 * Enable/disable fragmentation weighting on metaslabs. 233 */ 234 static int metaslab_fragmentation_factor_enabled = B_TRUE; 235 236 /* 237 * Enable/disable lba weighting (i.e. outer tracks are given preference). 238 */ 239 static int metaslab_lba_weighting_enabled = B_TRUE; 240 241 /* 242 * Enable/disable metaslab group biasing. 243 */ 244 static int metaslab_bias_enabled = B_TRUE; 245 246 /* 247 * Enable/disable remapping of indirect DVAs to their concrete vdevs. 248 */ 249 static const boolean_t zfs_remap_blkptr_enable = B_TRUE; 250 251 /* 252 * Enable/disable segment-based metaslab selection. 253 */ 254 static int zfs_metaslab_segment_weight_enabled = B_TRUE; 255 256 /* 257 * When using segment-based metaslab selection, we will continue 258 * allocating from the active metaslab until we have exhausted 259 * zfs_metaslab_switch_threshold of its buckets. 260 */ 261 static int zfs_metaslab_switch_threshold = 2; 262 263 /* 264 * Internal switch to enable/disable the metaslab allocation tracing 265 * facility. 266 */ 267 static const boolean_t metaslab_trace_enabled = B_FALSE; 268 269 /* 270 * Maximum entries that the metaslab allocation tracing facility will keep 271 * in a given list when running in non-debug mode. We limit the number 272 * of entries in non-debug mode to prevent us from using up too much memory. 273 * The limit should be sufficiently large that we don't expect any allocation 274 * to every exceed this value. In debug mode, the system will panic if this 275 * limit is ever reached allowing for further investigation. 276 */ 277 static const uint64_t metaslab_trace_max_entries = 5000; 278 279 /* 280 * Maximum number of metaslabs per group that can be disabled 281 * simultaneously. 282 */ 283 static const int max_disabled_ms = 3; 284 285 /* 286 * Time (in seconds) to respect ms_max_size when the metaslab is not loaded. 287 * To avoid 64-bit overflow, don't set above UINT32_MAX. 288 */ 289 static uint64_t zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */ 290 291 /* 292 * Maximum percentage of memory to use on storing loaded metaslabs. If loading 293 * a metaslab would take it over this percentage, the oldest selected metaslab 294 * is automatically unloaded. 295 */ 296 static uint_t zfs_metaslab_mem_limit = 25; 297 298 /* 299 * Force the per-metaslab range trees to use 64-bit integers to store 300 * segments. Used for debugging purposes. 301 */ 302 static const boolean_t zfs_metaslab_force_large_segs = B_FALSE; 303 304 /* 305 * By default we only store segments over a certain size in the size-sorted 306 * metaslab trees (ms_allocatable_by_size and 307 * ms_unflushed_frees_by_size). This dramatically reduces memory usage and 308 * improves load and unload times at the cost of causing us to use slightly 309 * larger segments than we would otherwise in some cases. 310 */ 311 static const uint32_t metaslab_by_size_min_shift = 14; 312 313 /* 314 * If not set, we will first try normal allocation. If that fails then 315 * we will do a gang allocation. If that fails then we will do a "try hard" 316 * gang allocation. If that fails then we will have a multi-layer gang 317 * block. 318 * 319 * If set, we will first try normal allocation. If that fails then 320 * we will do a "try hard" allocation. If that fails we will do a gang 321 * allocation. If that fails we will do a "try hard" gang allocation. If 322 * that fails then we will have a multi-layer gang block. 323 */ 324 static int zfs_metaslab_try_hard_before_gang = B_FALSE; 325 326 /* 327 * When not trying hard, we only consider the best zfs_metaslab_find_max_tries 328 * metaslabs. This improves performance, especially when there are many 329 * metaslabs per vdev and the allocation can't actually be satisfied (so we 330 * would otherwise iterate all the metaslabs). If there is a metaslab with a 331 * worse weight but it can actually satisfy the allocation, we won't find it 332 * until trying hard. This may happen if the worse metaslab is not loaded 333 * (and the true weight is better than we have calculated), or due to weight 334 * bucketization. E.g. we are looking for a 60K segment, and the best 335 * metaslabs all have free segments in the 32-63K bucket, but the best 336 * zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a 337 * subsequent metaslab has ms_max_size >60KB (but fewer segments in this 338 * bucket, and therefore a lower weight). 339 */ 340 static uint_t zfs_metaslab_find_max_tries = 100; 341 342 static uint64_t metaslab_weight(metaslab_t *, boolean_t); 343 static void metaslab_set_fragmentation(metaslab_t *, boolean_t); 344 static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t); 345 static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t); 346 347 static void metaslab_passivate(metaslab_t *msp, uint64_t weight); 348 static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp); 349 static void metaslab_flush_update(metaslab_t *, dmu_tx_t *); 350 static unsigned int metaslab_idx_func(multilist_t *, void *); 351 static void metaslab_evict(metaslab_t *, uint64_t); 352 static void metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg); 353 kmem_cache_t *metaslab_alloc_trace_cache; 354 355 typedef struct metaslab_stats { 356 kstat_named_t metaslabstat_trace_over_limit; 357 kstat_named_t metaslabstat_reload_tree; 358 kstat_named_t metaslabstat_too_many_tries; 359 kstat_named_t metaslabstat_try_hard; 360 } metaslab_stats_t; 361 362 static metaslab_stats_t metaslab_stats = { 363 { "trace_over_limit", KSTAT_DATA_UINT64 }, 364 { "reload_tree", KSTAT_DATA_UINT64 }, 365 { "too_many_tries", KSTAT_DATA_UINT64 }, 366 { "try_hard", KSTAT_DATA_UINT64 }, 367 }; 368 369 #define METASLABSTAT_BUMP(stat) \ 370 atomic_inc_64(&metaslab_stats.stat.value.ui64); 371 372 373 static kstat_t *metaslab_ksp; 374 375 void 376 metaslab_stat_init(void) 377 { 378 ASSERT(metaslab_alloc_trace_cache == NULL); 379 metaslab_alloc_trace_cache = kmem_cache_create( 380 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t), 381 0, NULL, NULL, NULL, NULL, NULL, 0); 382 metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats", 383 "misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) / 384 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 385 if (metaslab_ksp != NULL) { 386 metaslab_ksp->ks_data = &metaslab_stats; 387 kstat_install(metaslab_ksp); 388 } 389 } 390 391 void 392 metaslab_stat_fini(void) 393 { 394 if (metaslab_ksp != NULL) { 395 kstat_delete(metaslab_ksp); 396 metaslab_ksp = NULL; 397 } 398 399 kmem_cache_destroy(metaslab_alloc_trace_cache); 400 metaslab_alloc_trace_cache = NULL; 401 } 402 403 /* 404 * ========================================================================== 405 * Metaslab classes 406 * ========================================================================== 407 */ 408 metaslab_class_t * 409 metaslab_class_create(spa_t *spa, const metaslab_ops_t *ops) 410 { 411 metaslab_class_t *mc; 412 413 mc = kmem_zalloc(offsetof(metaslab_class_t, 414 mc_allocator[spa->spa_alloc_count]), KM_SLEEP); 415 416 mc->mc_spa = spa; 417 mc->mc_ops = ops; 418 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL); 419 multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t), 420 offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func); 421 for (int i = 0; i < spa->spa_alloc_count; i++) { 422 metaslab_class_allocator_t *mca = &mc->mc_allocator[i]; 423 mca->mca_rotor = NULL; 424 zfs_refcount_create_tracked(&mca->mca_alloc_slots); 425 } 426 427 return (mc); 428 } 429 430 void 431 metaslab_class_destroy(metaslab_class_t *mc) 432 { 433 spa_t *spa = mc->mc_spa; 434 435 ASSERT(mc->mc_alloc == 0); 436 ASSERT(mc->mc_deferred == 0); 437 ASSERT(mc->mc_space == 0); 438 ASSERT(mc->mc_dspace == 0); 439 440 for (int i = 0; i < spa->spa_alloc_count; i++) { 441 metaslab_class_allocator_t *mca = &mc->mc_allocator[i]; 442 ASSERT(mca->mca_rotor == NULL); 443 zfs_refcount_destroy(&mca->mca_alloc_slots); 444 } 445 mutex_destroy(&mc->mc_lock); 446 multilist_destroy(&mc->mc_metaslab_txg_list); 447 kmem_free(mc, offsetof(metaslab_class_t, 448 mc_allocator[spa->spa_alloc_count])); 449 } 450 451 int 452 metaslab_class_validate(metaslab_class_t *mc) 453 { 454 metaslab_group_t *mg; 455 vdev_t *vd; 456 457 /* 458 * Must hold one of the spa_config locks. 459 */ 460 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || 461 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); 462 463 if ((mg = mc->mc_allocator[0].mca_rotor) == NULL) 464 return (0); 465 466 do { 467 vd = mg->mg_vd; 468 ASSERT(vd->vdev_mg != NULL); 469 ASSERT3P(vd->vdev_top, ==, vd); 470 ASSERT3P(mg->mg_class, ==, mc); 471 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); 472 } while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor); 473 474 return (0); 475 } 476 477 static void 478 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, 479 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta) 480 { 481 atomic_add_64(&mc->mc_alloc, alloc_delta); 482 atomic_add_64(&mc->mc_deferred, defer_delta); 483 atomic_add_64(&mc->mc_space, space_delta); 484 atomic_add_64(&mc->mc_dspace, dspace_delta); 485 } 486 487 uint64_t 488 metaslab_class_get_alloc(metaslab_class_t *mc) 489 { 490 return (mc->mc_alloc); 491 } 492 493 uint64_t 494 metaslab_class_get_deferred(metaslab_class_t *mc) 495 { 496 return (mc->mc_deferred); 497 } 498 499 uint64_t 500 metaslab_class_get_space(metaslab_class_t *mc) 501 { 502 return (mc->mc_space); 503 } 504 505 uint64_t 506 metaslab_class_get_dspace(metaslab_class_t *mc) 507 { 508 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space); 509 } 510 511 void 512 metaslab_class_histogram_verify(metaslab_class_t *mc) 513 { 514 spa_t *spa = mc->mc_spa; 515 vdev_t *rvd = spa->spa_root_vdev; 516 uint64_t *mc_hist; 517 int i; 518 519 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) 520 return; 521 522 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, 523 KM_SLEEP); 524 525 mutex_enter(&mc->mc_lock); 526 for (int c = 0; c < rvd->vdev_children; c++) { 527 vdev_t *tvd = rvd->vdev_child[c]; 528 metaslab_group_t *mg = vdev_get_mg(tvd, mc); 529 530 /* 531 * Skip any holes, uninitialized top-levels, or 532 * vdevs that are not in this metalab class. 533 */ 534 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 535 mg->mg_class != mc) { 536 continue; 537 } 538 539 IMPLY(mg == mg->mg_vd->vdev_log_mg, 540 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa)); 541 542 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) 543 mc_hist[i] += mg->mg_histogram[i]; 544 } 545 546 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 547 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]); 548 } 549 550 mutex_exit(&mc->mc_lock); 551 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); 552 } 553 554 /* 555 * Calculate the metaslab class's fragmentation metric. The metric 556 * is weighted based on the space contribution of each metaslab group. 557 * The return value will be a number between 0 and 100 (inclusive), or 558 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the 559 * zfs_frag_table for more information about the metric. 560 */ 561 uint64_t 562 metaslab_class_fragmentation(metaslab_class_t *mc) 563 { 564 vdev_t *rvd = mc->mc_spa->spa_root_vdev; 565 uint64_t fragmentation = 0; 566 567 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); 568 569 for (int c = 0; c < rvd->vdev_children; c++) { 570 vdev_t *tvd = rvd->vdev_child[c]; 571 metaslab_group_t *mg = tvd->vdev_mg; 572 573 /* 574 * Skip any holes, uninitialized top-levels, 575 * or vdevs that are not in this metalab class. 576 */ 577 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 578 mg->mg_class != mc) { 579 continue; 580 } 581 582 /* 583 * If a metaslab group does not contain a fragmentation 584 * metric then just bail out. 585 */ 586 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { 587 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 588 return (ZFS_FRAG_INVALID); 589 } 590 591 /* 592 * Determine how much this metaslab_group is contributing 593 * to the overall pool fragmentation metric. 594 */ 595 fragmentation += mg->mg_fragmentation * 596 metaslab_group_get_space(mg); 597 } 598 fragmentation /= metaslab_class_get_space(mc); 599 600 ASSERT3U(fragmentation, <=, 100); 601 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 602 return (fragmentation); 603 } 604 605 /* 606 * Calculate the amount of expandable space that is available in 607 * this metaslab class. If a device is expanded then its expandable 608 * space will be the amount of allocatable space that is currently not 609 * part of this metaslab class. 610 */ 611 uint64_t 612 metaslab_class_expandable_space(metaslab_class_t *mc) 613 { 614 vdev_t *rvd = mc->mc_spa->spa_root_vdev; 615 uint64_t space = 0; 616 617 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); 618 for (int c = 0; c < rvd->vdev_children; c++) { 619 vdev_t *tvd = rvd->vdev_child[c]; 620 metaslab_group_t *mg = tvd->vdev_mg; 621 622 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 623 mg->mg_class != mc) { 624 continue; 625 } 626 627 /* 628 * Calculate if we have enough space to add additional 629 * metaslabs. We report the expandable space in terms 630 * of the metaslab size since that's the unit of expansion. 631 */ 632 space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize, 633 1ULL << tvd->vdev_ms_shift); 634 } 635 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 636 return (space); 637 } 638 639 void 640 metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg) 641 { 642 multilist_t *ml = &mc->mc_metaslab_txg_list; 643 for (int i = 0; i < multilist_get_num_sublists(ml); i++) { 644 multilist_sublist_t *mls = multilist_sublist_lock(ml, i); 645 metaslab_t *msp = multilist_sublist_head(mls); 646 multilist_sublist_unlock(mls); 647 while (msp != NULL) { 648 mutex_enter(&msp->ms_lock); 649 650 /* 651 * If the metaslab has been removed from the list 652 * (which could happen if we were at the memory limit 653 * and it was evicted during this loop), then we can't 654 * proceed and we should restart the sublist. 655 */ 656 if (!multilist_link_active(&msp->ms_class_txg_node)) { 657 mutex_exit(&msp->ms_lock); 658 i--; 659 break; 660 } 661 mls = multilist_sublist_lock(ml, i); 662 metaslab_t *next_msp = multilist_sublist_next(mls, msp); 663 multilist_sublist_unlock(mls); 664 if (txg > 665 msp->ms_selected_txg + metaslab_unload_delay && 666 gethrtime() > msp->ms_selected_time + 667 (uint64_t)MSEC2NSEC(metaslab_unload_delay_ms)) { 668 metaslab_evict(msp, txg); 669 } else { 670 /* 671 * Once we've hit a metaslab selected too 672 * recently to evict, we're done evicting for 673 * now. 674 */ 675 mutex_exit(&msp->ms_lock); 676 break; 677 } 678 mutex_exit(&msp->ms_lock); 679 msp = next_msp; 680 } 681 } 682 } 683 684 static int 685 metaslab_compare(const void *x1, const void *x2) 686 { 687 const metaslab_t *m1 = (const metaslab_t *)x1; 688 const metaslab_t *m2 = (const metaslab_t *)x2; 689 690 int sort1 = 0; 691 int sort2 = 0; 692 if (m1->ms_allocator != -1 && m1->ms_primary) 693 sort1 = 1; 694 else if (m1->ms_allocator != -1 && !m1->ms_primary) 695 sort1 = 2; 696 if (m2->ms_allocator != -1 && m2->ms_primary) 697 sort2 = 1; 698 else if (m2->ms_allocator != -1 && !m2->ms_primary) 699 sort2 = 2; 700 701 /* 702 * Sort inactive metaslabs first, then primaries, then secondaries. When 703 * selecting a metaslab to allocate from, an allocator first tries its 704 * primary, then secondary active metaslab. If it doesn't have active 705 * metaslabs, or can't allocate from them, it searches for an inactive 706 * metaslab to activate. If it can't find a suitable one, it will steal 707 * a primary or secondary metaslab from another allocator. 708 */ 709 if (sort1 < sort2) 710 return (-1); 711 if (sort1 > sort2) 712 return (1); 713 714 int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight); 715 if (likely(cmp)) 716 return (cmp); 717 718 IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2); 719 720 return (TREE_CMP(m1->ms_start, m2->ms_start)); 721 } 722 723 /* 724 * ========================================================================== 725 * Metaslab groups 726 * ========================================================================== 727 */ 728 /* 729 * Update the allocatable flag and the metaslab group's capacity. 730 * The allocatable flag is set to true if the capacity is below 731 * the zfs_mg_noalloc_threshold or has a fragmentation value that is 732 * greater than zfs_mg_fragmentation_threshold. If a metaslab group 733 * transitions from allocatable to non-allocatable or vice versa then the 734 * metaslab group's class is updated to reflect the transition. 735 */ 736 static void 737 metaslab_group_alloc_update(metaslab_group_t *mg) 738 { 739 vdev_t *vd = mg->mg_vd; 740 metaslab_class_t *mc = mg->mg_class; 741 vdev_stat_t *vs = &vd->vdev_stat; 742 boolean_t was_allocatable; 743 boolean_t was_initialized; 744 745 ASSERT(vd == vd->vdev_top); 746 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==, 747 SCL_ALLOC); 748 749 mutex_enter(&mg->mg_lock); 750 was_allocatable = mg->mg_allocatable; 751 was_initialized = mg->mg_initialized; 752 753 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) / 754 (vs->vs_space + 1); 755 756 mutex_enter(&mc->mc_lock); 757 758 /* 759 * If the metaslab group was just added then it won't 760 * have any space until we finish syncing out this txg. 761 * At that point we will consider it initialized and available 762 * for allocations. We also don't consider non-activated 763 * metaslab groups (e.g. vdevs that are in the middle of being removed) 764 * to be initialized, because they can't be used for allocation. 765 */ 766 mg->mg_initialized = metaslab_group_initialized(mg); 767 if (!was_initialized && mg->mg_initialized) { 768 mc->mc_groups++; 769 } else if (was_initialized && !mg->mg_initialized) { 770 ASSERT3U(mc->mc_groups, >, 0); 771 mc->mc_groups--; 772 } 773 if (mg->mg_initialized) 774 mg->mg_no_free_space = B_FALSE; 775 776 /* 777 * A metaslab group is considered allocatable if it has plenty 778 * of free space or is not heavily fragmented. We only take 779 * fragmentation into account if the metaslab group has a valid 780 * fragmentation metric (i.e. a value between 0 and 100). 781 */ 782 mg->mg_allocatable = (mg->mg_activation_count > 0 && 783 mg->mg_free_capacity > zfs_mg_noalloc_threshold && 784 (mg->mg_fragmentation == ZFS_FRAG_INVALID || 785 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)); 786 787 /* 788 * The mc_alloc_groups maintains a count of the number of 789 * groups in this metaslab class that are still above the 790 * zfs_mg_noalloc_threshold. This is used by the allocating 791 * threads to determine if they should avoid allocations to 792 * a given group. The allocator will avoid allocations to a group 793 * if that group has reached or is below the zfs_mg_noalloc_threshold 794 * and there are still other groups that are above the threshold. 795 * When a group transitions from allocatable to non-allocatable or 796 * vice versa we update the metaslab class to reflect that change. 797 * When the mc_alloc_groups value drops to 0 that means that all 798 * groups have reached the zfs_mg_noalloc_threshold making all groups 799 * eligible for allocations. This effectively means that all devices 800 * are balanced again. 801 */ 802 if (was_allocatable && !mg->mg_allocatable) 803 mc->mc_alloc_groups--; 804 else if (!was_allocatable && mg->mg_allocatable) 805 mc->mc_alloc_groups++; 806 mutex_exit(&mc->mc_lock); 807 808 mutex_exit(&mg->mg_lock); 809 } 810 811 int 812 metaslab_sort_by_flushed(const void *va, const void *vb) 813 { 814 const metaslab_t *a = va; 815 const metaslab_t *b = vb; 816 817 int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg); 818 if (likely(cmp)) 819 return (cmp); 820 821 uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id; 822 uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id; 823 cmp = TREE_CMP(a_vdev_id, b_vdev_id); 824 if (cmp) 825 return (cmp); 826 827 return (TREE_CMP(a->ms_id, b->ms_id)); 828 } 829 830 metaslab_group_t * 831 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators) 832 { 833 metaslab_group_t *mg; 834 835 mg = kmem_zalloc(offsetof(metaslab_group_t, 836 mg_allocator[allocators]), KM_SLEEP); 837 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); 838 mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL); 839 cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL); 840 avl_create(&mg->mg_metaslab_tree, metaslab_compare, 841 sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node)); 842 mg->mg_vd = vd; 843 mg->mg_class = mc; 844 mg->mg_activation_count = 0; 845 mg->mg_initialized = B_FALSE; 846 mg->mg_no_free_space = B_TRUE; 847 mg->mg_allocators = allocators; 848 849 for (int i = 0; i < allocators; i++) { 850 metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; 851 zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth); 852 } 853 854 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct, 855 maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC); 856 857 return (mg); 858 } 859 860 void 861 metaslab_group_destroy(metaslab_group_t *mg) 862 { 863 ASSERT(mg->mg_prev == NULL); 864 ASSERT(mg->mg_next == NULL); 865 /* 866 * We may have gone below zero with the activation count 867 * either because we never activated in the first place or 868 * because we're done, and possibly removing the vdev. 869 */ 870 ASSERT(mg->mg_activation_count <= 0); 871 872 taskq_destroy(mg->mg_taskq); 873 avl_destroy(&mg->mg_metaslab_tree); 874 mutex_destroy(&mg->mg_lock); 875 mutex_destroy(&mg->mg_ms_disabled_lock); 876 cv_destroy(&mg->mg_ms_disabled_cv); 877 878 for (int i = 0; i < mg->mg_allocators; i++) { 879 metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; 880 zfs_refcount_destroy(&mga->mga_alloc_queue_depth); 881 } 882 kmem_free(mg, offsetof(metaslab_group_t, 883 mg_allocator[mg->mg_allocators])); 884 } 885 886 void 887 metaslab_group_activate(metaslab_group_t *mg) 888 { 889 metaslab_class_t *mc = mg->mg_class; 890 spa_t *spa = mc->mc_spa; 891 metaslab_group_t *mgprev, *mgnext; 892 893 ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0); 894 895 ASSERT(mg->mg_prev == NULL); 896 ASSERT(mg->mg_next == NULL); 897 ASSERT(mg->mg_activation_count <= 0); 898 899 if (++mg->mg_activation_count <= 0) 900 return; 901 902 mg->mg_aliquot = metaslab_aliquot * MAX(1, 903 vdev_get_ndisks(mg->mg_vd) - vdev_get_nparity(mg->mg_vd)); 904 metaslab_group_alloc_update(mg); 905 906 if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) { 907 mg->mg_prev = mg; 908 mg->mg_next = mg; 909 } else { 910 mgnext = mgprev->mg_next; 911 mg->mg_prev = mgprev; 912 mg->mg_next = mgnext; 913 mgprev->mg_next = mg; 914 mgnext->mg_prev = mg; 915 } 916 for (int i = 0; i < spa->spa_alloc_count; i++) { 917 mc->mc_allocator[i].mca_rotor = mg; 918 mg = mg->mg_next; 919 } 920 } 921 922 /* 923 * Passivate a metaslab group and remove it from the allocation rotor. 924 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating 925 * a metaslab group. This function will momentarily drop spa_config_locks 926 * that are lower than the SCL_ALLOC lock (see comment below). 927 */ 928 void 929 metaslab_group_passivate(metaslab_group_t *mg) 930 { 931 metaslab_class_t *mc = mg->mg_class; 932 spa_t *spa = mc->mc_spa; 933 metaslab_group_t *mgprev, *mgnext; 934 int locks = spa_config_held(spa, SCL_ALL, RW_WRITER); 935 936 ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==, 937 (SCL_ALLOC | SCL_ZIO)); 938 939 if (--mg->mg_activation_count != 0) { 940 for (int i = 0; i < spa->spa_alloc_count; i++) 941 ASSERT(mc->mc_allocator[i].mca_rotor != mg); 942 ASSERT(mg->mg_prev == NULL); 943 ASSERT(mg->mg_next == NULL); 944 ASSERT(mg->mg_activation_count < 0); 945 return; 946 } 947 948 /* 949 * The spa_config_lock is an array of rwlocks, ordered as 950 * follows (from highest to lowest): 951 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC > 952 * SCL_ZIO > SCL_FREE > SCL_VDEV 953 * (For more information about the spa_config_lock see spa_misc.c) 954 * The higher the lock, the broader its coverage. When we passivate 955 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO 956 * config locks. However, the metaslab group's taskq might be trying 957 * to preload metaslabs so we must drop the SCL_ZIO lock and any 958 * lower locks to allow the I/O to complete. At a minimum, 959 * we continue to hold the SCL_ALLOC lock, which prevents any future 960 * allocations from taking place and any changes to the vdev tree. 961 */ 962 spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa); 963 taskq_wait_outstanding(mg->mg_taskq, 0); 964 spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER); 965 metaslab_group_alloc_update(mg); 966 for (int i = 0; i < mg->mg_allocators; i++) { 967 metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; 968 metaslab_t *msp = mga->mga_primary; 969 if (msp != NULL) { 970 mutex_enter(&msp->ms_lock); 971 metaslab_passivate(msp, 972 metaslab_weight_from_range_tree(msp)); 973 mutex_exit(&msp->ms_lock); 974 } 975 msp = mga->mga_secondary; 976 if (msp != NULL) { 977 mutex_enter(&msp->ms_lock); 978 metaslab_passivate(msp, 979 metaslab_weight_from_range_tree(msp)); 980 mutex_exit(&msp->ms_lock); 981 } 982 } 983 984 mgprev = mg->mg_prev; 985 mgnext = mg->mg_next; 986 987 if (mg == mgnext) { 988 mgnext = NULL; 989 } else { 990 mgprev->mg_next = mgnext; 991 mgnext->mg_prev = mgprev; 992 } 993 for (int i = 0; i < spa->spa_alloc_count; i++) { 994 if (mc->mc_allocator[i].mca_rotor == mg) 995 mc->mc_allocator[i].mca_rotor = mgnext; 996 } 997 998 mg->mg_prev = NULL; 999 mg->mg_next = NULL; 1000 } 1001 1002 boolean_t 1003 metaslab_group_initialized(metaslab_group_t *mg) 1004 { 1005 vdev_t *vd = mg->mg_vd; 1006 vdev_stat_t *vs = &vd->vdev_stat; 1007 1008 return (vs->vs_space != 0 && mg->mg_activation_count > 0); 1009 } 1010 1011 uint64_t 1012 metaslab_group_get_space(metaslab_group_t *mg) 1013 { 1014 /* 1015 * Note that the number of nodes in mg_metaslab_tree may be one less 1016 * than vdev_ms_count, due to the embedded log metaslab. 1017 */ 1018 mutex_enter(&mg->mg_lock); 1019 uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree); 1020 mutex_exit(&mg->mg_lock); 1021 return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count); 1022 } 1023 1024 void 1025 metaslab_group_histogram_verify(metaslab_group_t *mg) 1026 { 1027 uint64_t *mg_hist; 1028 avl_tree_t *t = &mg->mg_metaslab_tree; 1029 uint64_t ashift = mg->mg_vd->vdev_ashift; 1030 1031 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) 1032 return; 1033 1034 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, 1035 KM_SLEEP); 1036 1037 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=, 1038 SPACE_MAP_HISTOGRAM_SIZE + ashift); 1039 1040 mutex_enter(&mg->mg_lock); 1041 for (metaslab_t *msp = avl_first(t); 1042 msp != NULL; msp = AVL_NEXT(t, msp)) { 1043 VERIFY3P(msp->ms_group, ==, mg); 1044 /* skip if not active */ 1045 if (msp->ms_sm == NULL) 1046 continue; 1047 1048 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 1049 mg_hist[i + ashift] += 1050 msp->ms_sm->sm_phys->smp_histogram[i]; 1051 } 1052 } 1053 1054 for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++) 1055 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]); 1056 1057 mutex_exit(&mg->mg_lock); 1058 1059 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); 1060 } 1061 1062 static void 1063 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp) 1064 { 1065 metaslab_class_t *mc = mg->mg_class; 1066 uint64_t ashift = mg->mg_vd->vdev_ashift; 1067 1068 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1069 if (msp->ms_sm == NULL) 1070 return; 1071 1072 mutex_enter(&mg->mg_lock); 1073 mutex_enter(&mc->mc_lock); 1074 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 1075 IMPLY(mg == mg->mg_vd->vdev_log_mg, 1076 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa)); 1077 mg->mg_histogram[i + ashift] += 1078 msp->ms_sm->sm_phys->smp_histogram[i]; 1079 mc->mc_histogram[i + ashift] += 1080 msp->ms_sm->sm_phys->smp_histogram[i]; 1081 } 1082 mutex_exit(&mc->mc_lock); 1083 mutex_exit(&mg->mg_lock); 1084 } 1085 1086 void 1087 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp) 1088 { 1089 metaslab_class_t *mc = mg->mg_class; 1090 uint64_t ashift = mg->mg_vd->vdev_ashift; 1091 1092 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1093 if (msp->ms_sm == NULL) 1094 return; 1095 1096 mutex_enter(&mg->mg_lock); 1097 mutex_enter(&mc->mc_lock); 1098 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 1099 ASSERT3U(mg->mg_histogram[i + ashift], >=, 1100 msp->ms_sm->sm_phys->smp_histogram[i]); 1101 ASSERT3U(mc->mc_histogram[i + ashift], >=, 1102 msp->ms_sm->sm_phys->smp_histogram[i]); 1103 IMPLY(mg == mg->mg_vd->vdev_log_mg, 1104 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa)); 1105 1106 mg->mg_histogram[i + ashift] -= 1107 msp->ms_sm->sm_phys->smp_histogram[i]; 1108 mc->mc_histogram[i + ashift] -= 1109 msp->ms_sm->sm_phys->smp_histogram[i]; 1110 } 1111 mutex_exit(&mc->mc_lock); 1112 mutex_exit(&mg->mg_lock); 1113 } 1114 1115 static void 1116 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) 1117 { 1118 ASSERT(msp->ms_group == NULL); 1119 mutex_enter(&mg->mg_lock); 1120 msp->ms_group = mg; 1121 msp->ms_weight = 0; 1122 avl_add(&mg->mg_metaslab_tree, msp); 1123 mutex_exit(&mg->mg_lock); 1124 1125 mutex_enter(&msp->ms_lock); 1126 metaslab_group_histogram_add(mg, msp); 1127 mutex_exit(&msp->ms_lock); 1128 } 1129 1130 static void 1131 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) 1132 { 1133 mutex_enter(&msp->ms_lock); 1134 metaslab_group_histogram_remove(mg, msp); 1135 mutex_exit(&msp->ms_lock); 1136 1137 mutex_enter(&mg->mg_lock); 1138 ASSERT(msp->ms_group == mg); 1139 avl_remove(&mg->mg_metaslab_tree, msp); 1140 1141 metaslab_class_t *mc = msp->ms_group->mg_class; 1142 multilist_sublist_t *mls = 1143 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); 1144 if (multilist_link_active(&msp->ms_class_txg_node)) 1145 multilist_sublist_remove(mls, msp); 1146 multilist_sublist_unlock(mls); 1147 1148 msp->ms_group = NULL; 1149 mutex_exit(&mg->mg_lock); 1150 } 1151 1152 static void 1153 metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 1154 { 1155 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1156 ASSERT(MUTEX_HELD(&mg->mg_lock)); 1157 ASSERT(msp->ms_group == mg); 1158 1159 avl_remove(&mg->mg_metaslab_tree, msp); 1160 msp->ms_weight = weight; 1161 avl_add(&mg->mg_metaslab_tree, msp); 1162 1163 } 1164 1165 static void 1166 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 1167 { 1168 /* 1169 * Although in principle the weight can be any value, in 1170 * practice we do not use values in the range [1, 511]. 1171 */ 1172 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0); 1173 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1174 1175 mutex_enter(&mg->mg_lock); 1176 metaslab_group_sort_impl(mg, msp, weight); 1177 mutex_exit(&mg->mg_lock); 1178 } 1179 1180 /* 1181 * Calculate the fragmentation for a given metaslab group. We can use 1182 * a simple average here since all metaslabs within the group must have 1183 * the same size. The return value will be a value between 0 and 100 1184 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this 1185 * group have a fragmentation metric. 1186 */ 1187 uint64_t 1188 metaslab_group_fragmentation(metaslab_group_t *mg) 1189 { 1190 vdev_t *vd = mg->mg_vd; 1191 uint64_t fragmentation = 0; 1192 uint64_t valid_ms = 0; 1193 1194 for (int m = 0; m < vd->vdev_ms_count; m++) { 1195 metaslab_t *msp = vd->vdev_ms[m]; 1196 1197 if (msp->ms_fragmentation == ZFS_FRAG_INVALID) 1198 continue; 1199 if (msp->ms_group != mg) 1200 continue; 1201 1202 valid_ms++; 1203 fragmentation += msp->ms_fragmentation; 1204 } 1205 1206 if (valid_ms <= mg->mg_vd->vdev_ms_count / 2) 1207 return (ZFS_FRAG_INVALID); 1208 1209 fragmentation /= valid_ms; 1210 ASSERT3U(fragmentation, <=, 100); 1211 return (fragmentation); 1212 } 1213 1214 /* 1215 * Determine if a given metaslab group should skip allocations. A metaslab 1216 * group should avoid allocations if its free capacity is less than the 1217 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than 1218 * zfs_mg_fragmentation_threshold and there is at least one metaslab group 1219 * that can still handle allocations. If the allocation throttle is enabled 1220 * then we skip allocations to devices that have reached their maximum 1221 * allocation queue depth unless the selected metaslab group is the only 1222 * eligible group remaining. 1223 */ 1224 static boolean_t 1225 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor, 1226 int flags, uint64_t psize, int allocator, int d) 1227 { 1228 spa_t *spa = mg->mg_vd->vdev_spa; 1229 metaslab_class_t *mc = mg->mg_class; 1230 1231 /* 1232 * We can only consider skipping this metaslab group if it's 1233 * in the normal metaslab class and there are other metaslab 1234 * groups to select from. Otherwise, we always consider it eligible 1235 * for allocations. 1236 */ 1237 if ((mc != spa_normal_class(spa) && 1238 mc != spa_special_class(spa) && 1239 mc != spa_dedup_class(spa)) || 1240 mc->mc_groups <= 1) 1241 return (B_TRUE); 1242 1243 /* 1244 * If the metaslab group's mg_allocatable flag is set (see comments 1245 * in metaslab_group_alloc_update() for more information) and 1246 * the allocation throttle is disabled then allow allocations to this 1247 * device. However, if the allocation throttle is enabled then 1248 * check if we have reached our allocation limit (mga_alloc_queue_depth) 1249 * to determine if we should allow allocations to this metaslab group. 1250 * If all metaslab groups are no longer considered allocatable 1251 * (mc_alloc_groups == 0) or we're trying to allocate the smallest 1252 * gang block size then we allow allocations on this metaslab group 1253 * regardless of the mg_allocatable or throttle settings. 1254 */ 1255 if (mg->mg_allocatable) { 1256 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 1257 int64_t qdepth; 1258 uint64_t qmax = mga->mga_cur_max_alloc_queue_depth; 1259 1260 if (!mc->mc_alloc_throttle_enabled) 1261 return (B_TRUE); 1262 1263 /* 1264 * If this metaslab group does not have any free space, then 1265 * there is no point in looking further. 1266 */ 1267 if (mg->mg_no_free_space) 1268 return (B_FALSE); 1269 1270 /* 1271 * Some allocations (e.g., those coming from device removal 1272 * where the * allocations are not even counted in the 1273 * metaslab * allocation queues) are allowed to bypass 1274 * the throttle. 1275 */ 1276 if (flags & METASLAB_DONT_THROTTLE) 1277 return (B_TRUE); 1278 1279 /* 1280 * Relax allocation throttling for ditto blocks. Due to 1281 * random imbalances in allocation it tends to push copies 1282 * to one vdev, that looks a bit better at the moment. 1283 */ 1284 qmax = qmax * (4 + d) / 4; 1285 1286 qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth); 1287 1288 /* 1289 * If this metaslab group is below its qmax or it's 1290 * the only allocatable metasable group, then attempt 1291 * to allocate from it. 1292 */ 1293 if (qdepth < qmax || mc->mc_alloc_groups == 1) 1294 return (B_TRUE); 1295 ASSERT3U(mc->mc_alloc_groups, >, 1); 1296 1297 /* 1298 * Since this metaslab group is at or over its qmax, we 1299 * need to determine if there are metaslab groups after this 1300 * one that might be able to handle this allocation. This is 1301 * racy since we can't hold the locks for all metaslab 1302 * groups at the same time when we make this check. 1303 */ 1304 for (metaslab_group_t *mgp = mg->mg_next; 1305 mgp != rotor; mgp = mgp->mg_next) { 1306 metaslab_group_allocator_t *mgap = 1307 &mgp->mg_allocator[allocator]; 1308 qmax = mgap->mga_cur_max_alloc_queue_depth; 1309 qmax = qmax * (4 + d) / 4; 1310 qdepth = 1311 zfs_refcount_count(&mgap->mga_alloc_queue_depth); 1312 1313 /* 1314 * If there is another metaslab group that 1315 * might be able to handle the allocation, then 1316 * we return false so that we skip this group. 1317 */ 1318 if (qdepth < qmax && !mgp->mg_no_free_space) 1319 return (B_FALSE); 1320 } 1321 1322 /* 1323 * We didn't find another group to handle the allocation 1324 * so we can't skip this metaslab group even though 1325 * we are at or over our qmax. 1326 */ 1327 return (B_TRUE); 1328 1329 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) { 1330 return (B_TRUE); 1331 } 1332 return (B_FALSE); 1333 } 1334 1335 /* 1336 * ========================================================================== 1337 * Range tree callbacks 1338 * ========================================================================== 1339 */ 1340 1341 /* 1342 * Comparison function for the private size-ordered tree using 32-bit 1343 * ranges. Tree is sorted by size, larger sizes at the end of the tree. 1344 */ 1345 __attribute__((always_inline)) inline 1346 static int 1347 metaslab_rangesize32_compare(const void *x1, const void *x2) 1348 { 1349 const range_seg32_t *r1 = x1; 1350 const range_seg32_t *r2 = x2; 1351 1352 uint64_t rs_size1 = r1->rs_end - r1->rs_start; 1353 uint64_t rs_size2 = r2->rs_end - r2->rs_start; 1354 1355 int cmp = TREE_CMP(rs_size1, rs_size2); 1356 1357 return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start)); 1358 } 1359 1360 /* 1361 * Comparison function for the private size-ordered tree using 64-bit 1362 * ranges. Tree is sorted by size, larger sizes at the end of the tree. 1363 */ 1364 __attribute__((always_inline)) inline 1365 static int 1366 metaslab_rangesize64_compare(const void *x1, const void *x2) 1367 { 1368 const range_seg64_t *r1 = x1; 1369 const range_seg64_t *r2 = x2; 1370 1371 uint64_t rs_size1 = r1->rs_end - r1->rs_start; 1372 uint64_t rs_size2 = r2->rs_end - r2->rs_start; 1373 1374 int cmp = TREE_CMP(rs_size1, rs_size2); 1375 1376 return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start)); 1377 } 1378 1379 typedef struct metaslab_rt_arg { 1380 zfs_btree_t *mra_bt; 1381 uint32_t mra_floor_shift; 1382 } metaslab_rt_arg_t; 1383 1384 struct mssa_arg { 1385 range_tree_t *rt; 1386 metaslab_rt_arg_t *mra; 1387 }; 1388 1389 static void 1390 metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size) 1391 { 1392 struct mssa_arg *mssap = arg; 1393 range_tree_t *rt = mssap->rt; 1394 metaslab_rt_arg_t *mrap = mssap->mra; 1395 range_seg_max_t seg = {0}; 1396 rs_set_start(&seg, rt, start); 1397 rs_set_end(&seg, rt, start + size); 1398 metaslab_rt_add(rt, &seg, mrap); 1399 } 1400 1401 static void 1402 metaslab_size_tree_full_load(range_tree_t *rt) 1403 { 1404 metaslab_rt_arg_t *mrap = rt->rt_arg; 1405 METASLABSTAT_BUMP(metaslabstat_reload_tree); 1406 ASSERT0(zfs_btree_numnodes(mrap->mra_bt)); 1407 mrap->mra_floor_shift = 0; 1408 struct mssa_arg arg = {0}; 1409 arg.rt = rt; 1410 arg.mra = mrap; 1411 range_tree_walk(rt, metaslab_size_sorted_add, &arg); 1412 } 1413 1414 1415 ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize32_in_buf, 1416 range_seg32_t, metaslab_rangesize32_compare) 1417 1418 ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize64_in_buf, 1419 range_seg64_t, metaslab_rangesize64_compare) 1420 1421 /* 1422 * Create any block allocator specific components. The current allocators 1423 * rely on using both a size-ordered range_tree_t and an array of uint64_t's. 1424 */ 1425 static void 1426 metaslab_rt_create(range_tree_t *rt, void *arg) 1427 { 1428 metaslab_rt_arg_t *mrap = arg; 1429 zfs_btree_t *size_tree = mrap->mra_bt; 1430 1431 size_t size; 1432 int (*compare) (const void *, const void *); 1433 bt_find_in_buf_f bt_find; 1434 switch (rt->rt_type) { 1435 case RANGE_SEG32: 1436 size = sizeof (range_seg32_t); 1437 compare = metaslab_rangesize32_compare; 1438 bt_find = metaslab_rt_find_rangesize32_in_buf; 1439 break; 1440 case RANGE_SEG64: 1441 size = sizeof (range_seg64_t); 1442 compare = metaslab_rangesize64_compare; 1443 bt_find = metaslab_rt_find_rangesize64_in_buf; 1444 break; 1445 default: 1446 panic("Invalid range seg type %d", rt->rt_type); 1447 } 1448 zfs_btree_create(size_tree, compare, bt_find, size); 1449 mrap->mra_floor_shift = metaslab_by_size_min_shift; 1450 } 1451 1452 static void 1453 metaslab_rt_destroy(range_tree_t *rt, void *arg) 1454 { 1455 (void) rt; 1456 metaslab_rt_arg_t *mrap = arg; 1457 zfs_btree_t *size_tree = mrap->mra_bt; 1458 1459 zfs_btree_destroy(size_tree); 1460 kmem_free(mrap, sizeof (*mrap)); 1461 } 1462 1463 static void 1464 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg) 1465 { 1466 metaslab_rt_arg_t *mrap = arg; 1467 zfs_btree_t *size_tree = mrap->mra_bt; 1468 1469 if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < 1470 (1ULL << mrap->mra_floor_shift)) 1471 return; 1472 1473 zfs_btree_add(size_tree, rs); 1474 } 1475 1476 static void 1477 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg) 1478 { 1479 metaslab_rt_arg_t *mrap = arg; 1480 zfs_btree_t *size_tree = mrap->mra_bt; 1481 1482 if (rs_get_end(rs, rt) - rs_get_start(rs, rt) < (1ULL << 1483 mrap->mra_floor_shift)) 1484 return; 1485 1486 zfs_btree_remove(size_tree, rs); 1487 } 1488 1489 static void 1490 metaslab_rt_vacate(range_tree_t *rt, void *arg) 1491 { 1492 metaslab_rt_arg_t *mrap = arg; 1493 zfs_btree_t *size_tree = mrap->mra_bt; 1494 zfs_btree_clear(size_tree); 1495 zfs_btree_destroy(size_tree); 1496 1497 metaslab_rt_create(rt, arg); 1498 } 1499 1500 static const range_tree_ops_t metaslab_rt_ops = { 1501 .rtop_create = metaslab_rt_create, 1502 .rtop_destroy = metaslab_rt_destroy, 1503 .rtop_add = metaslab_rt_add, 1504 .rtop_remove = metaslab_rt_remove, 1505 .rtop_vacate = metaslab_rt_vacate 1506 }; 1507 1508 /* 1509 * ========================================================================== 1510 * Common allocator routines 1511 * ========================================================================== 1512 */ 1513 1514 /* 1515 * Return the maximum contiguous segment within the metaslab. 1516 */ 1517 uint64_t 1518 metaslab_largest_allocatable(metaslab_t *msp) 1519 { 1520 zfs_btree_t *t = &msp->ms_allocatable_by_size; 1521 range_seg_t *rs; 1522 1523 if (t == NULL) 1524 return (0); 1525 if (zfs_btree_numnodes(t) == 0) 1526 metaslab_size_tree_full_load(msp->ms_allocatable); 1527 1528 rs = zfs_btree_last(t, NULL); 1529 if (rs == NULL) 1530 return (0); 1531 1532 return (rs_get_end(rs, msp->ms_allocatable) - rs_get_start(rs, 1533 msp->ms_allocatable)); 1534 } 1535 1536 /* 1537 * Return the maximum contiguous segment within the unflushed frees of this 1538 * metaslab. 1539 */ 1540 static uint64_t 1541 metaslab_largest_unflushed_free(metaslab_t *msp) 1542 { 1543 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1544 1545 if (msp->ms_unflushed_frees == NULL) 1546 return (0); 1547 1548 if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0) 1549 metaslab_size_tree_full_load(msp->ms_unflushed_frees); 1550 range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size, 1551 NULL); 1552 if (rs == NULL) 1553 return (0); 1554 1555 /* 1556 * When a range is freed from the metaslab, that range is added to 1557 * both the unflushed frees and the deferred frees. While the block 1558 * will eventually be usable, if the metaslab were loaded the range 1559 * would not be added to the ms_allocatable tree until TXG_DEFER_SIZE 1560 * txgs had passed. As a result, when attempting to estimate an upper 1561 * bound for the largest currently-usable free segment in the 1562 * metaslab, we need to not consider any ranges currently in the defer 1563 * trees. This algorithm approximates the largest available chunk in 1564 * the largest range in the unflushed_frees tree by taking the first 1565 * chunk. While this may be a poor estimate, it should only remain so 1566 * briefly and should eventually self-correct as frees are no longer 1567 * deferred. Similar logic applies to the ms_freed tree. See 1568 * metaslab_load() for more details. 1569 * 1570 * There are two primary sources of inaccuracy in this estimate. Both 1571 * are tolerated for performance reasons. The first source is that we 1572 * only check the largest segment for overlaps. Smaller segments may 1573 * have more favorable overlaps with the other trees, resulting in 1574 * larger usable chunks. Second, we only look at the first chunk in 1575 * the largest segment; there may be other usable chunks in the 1576 * largest segment, but we ignore them. 1577 */ 1578 uint64_t rstart = rs_get_start(rs, msp->ms_unflushed_frees); 1579 uint64_t rsize = rs_get_end(rs, msp->ms_unflushed_frees) - rstart; 1580 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 1581 uint64_t start = 0; 1582 uint64_t size = 0; 1583 boolean_t found = range_tree_find_in(msp->ms_defer[t], rstart, 1584 rsize, &start, &size); 1585 if (found) { 1586 if (rstart == start) 1587 return (0); 1588 rsize = start - rstart; 1589 } 1590 } 1591 1592 uint64_t start = 0; 1593 uint64_t size = 0; 1594 boolean_t found = range_tree_find_in(msp->ms_freed, rstart, 1595 rsize, &start, &size); 1596 if (found) 1597 rsize = start - rstart; 1598 1599 return (rsize); 1600 } 1601 1602 static range_seg_t * 1603 metaslab_block_find(zfs_btree_t *t, range_tree_t *rt, uint64_t start, 1604 uint64_t size, zfs_btree_index_t *where) 1605 { 1606 range_seg_t *rs; 1607 range_seg_max_t rsearch; 1608 1609 rs_set_start(&rsearch, rt, start); 1610 rs_set_end(&rsearch, rt, start + size); 1611 1612 rs = zfs_btree_find(t, &rsearch, where); 1613 if (rs == NULL) { 1614 rs = zfs_btree_next(t, where, where); 1615 } 1616 1617 return (rs); 1618 } 1619 1620 #if defined(WITH_DF_BLOCK_ALLOCATOR) || \ 1621 defined(WITH_CF_BLOCK_ALLOCATOR) 1622 1623 /* 1624 * This is a helper function that can be used by the allocator to find a 1625 * suitable block to allocate. This will search the specified B-tree looking 1626 * for a block that matches the specified criteria. 1627 */ 1628 static uint64_t 1629 metaslab_block_picker(range_tree_t *rt, uint64_t *cursor, uint64_t size, 1630 uint64_t max_search) 1631 { 1632 if (*cursor == 0) 1633 *cursor = rt->rt_start; 1634 zfs_btree_t *bt = &rt->rt_root; 1635 zfs_btree_index_t where; 1636 range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, &where); 1637 uint64_t first_found; 1638 int count_searched = 0; 1639 1640 if (rs != NULL) 1641 first_found = rs_get_start(rs, rt); 1642 1643 while (rs != NULL && (rs_get_start(rs, rt) - first_found <= 1644 max_search || count_searched < metaslab_min_search_count)) { 1645 uint64_t offset = rs_get_start(rs, rt); 1646 if (offset + size <= rs_get_end(rs, rt)) { 1647 *cursor = offset + size; 1648 return (offset); 1649 } 1650 rs = zfs_btree_next(bt, &where, &where); 1651 count_searched++; 1652 } 1653 1654 *cursor = 0; 1655 return (-1ULL); 1656 } 1657 #endif /* WITH_DF/CF_BLOCK_ALLOCATOR */ 1658 1659 #if defined(WITH_DF_BLOCK_ALLOCATOR) 1660 /* 1661 * ========================================================================== 1662 * Dynamic Fit (df) block allocator 1663 * 1664 * Search for a free chunk of at least this size, starting from the last 1665 * offset (for this alignment of block) looking for up to 1666 * metaslab_df_max_search bytes (16MB). If a large enough free chunk is not 1667 * found within 16MB, then return a free chunk of exactly the requested size (or 1668 * larger). 1669 * 1670 * If it seems like searching from the last offset will be unproductive, skip 1671 * that and just return a free chunk of exactly the requested size (or larger). 1672 * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This 1673 * mechanism is probably not very useful and may be removed in the future. 1674 * 1675 * The behavior when not searching can be changed to return the largest free 1676 * chunk, instead of a free chunk of exactly the requested size, by setting 1677 * metaslab_df_use_largest_segment. 1678 * ========================================================================== 1679 */ 1680 static uint64_t 1681 metaslab_df_alloc(metaslab_t *msp, uint64_t size) 1682 { 1683 /* 1684 * Find the largest power of 2 block size that evenly divides the 1685 * requested size. This is used to try to allocate blocks with similar 1686 * alignment from the same area of the metaslab (i.e. same cursor 1687 * bucket) but it does not guarantee that other allocations sizes 1688 * may exist in the same region. 1689 */ 1690 uint64_t align = size & -size; 1691 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; 1692 range_tree_t *rt = msp->ms_allocatable; 1693 uint_t free_pct = range_tree_space(rt) * 100 / msp->ms_size; 1694 uint64_t offset; 1695 1696 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1697 1698 /* 1699 * If we're running low on space, find a segment based on size, 1700 * rather than iterating based on offset. 1701 */ 1702 if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold || 1703 free_pct < metaslab_df_free_pct) { 1704 offset = -1; 1705 } else { 1706 offset = metaslab_block_picker(rt, 1707 cursor, size, metaslab_df_max_search); 1708 } 1709 1710 if (offset == -1) { 1711 range_seg_t *rs; 1712 if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0) 1713 metaslab_size_tree_full_load(msp->ms_allocatable); 1714 1715 if (metaslab_df_use_largest_segment) { 1716 /* use largest free segment */ 1717 rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL); 1718 } else { 1719 zfs_btree_index_t where; 1720 /* use segment of this size, or next largest */ 1721 rs = metaslab_block_find(&msp->ms_allocatable_by_size, 1722 rt, msp->ms_start, size, &where); 1723 } 1724 if (rs != NULL && rs_get_start(rs, rt) + size <= rs_get_end(rs, 1725 rt)) { 1726 offset = rs_get_start(rs, rt); 1727 *cursor = offset + size; 1728 } 1729 } 1730 1731 return (offset); 1732 } 1733 1734 const metaslab_ops_t zfs_metaslab_ops = { 1735 metaslab_df_alloc 1736 }; 1737 #endif /* WITH_DF_BLOCK_ALLOCATOR */ 1738 1739 #if defined(WITH_CF_BLOCK_ALLOCATOR) 1740 /* 1741 * ========================================================================== 1742 * Cursor fit block allocator - 1743 * Select the largest region in the metaslab, set the cursor to the beginning 1744 * of the range and the cursor_end to the end of the range. As allocations 1745 * are made advance the cursor. Continue allocating from the cursor until 1746 * the range is exhausted and then find a new range. 1747 * ========================================================================== 1748 */ 1749 static uint64_t 1750 metaslab_cf_alloc(metaslab_t *msp, uint64_t size) 1751 { 1752 range_tree_t *rt = msp->ms_allocatable; 1753 zfs_btree_t *t = &msp->ms_allocatable_by_size; 1754 uint64_t *cursor = &msp->ms_lbas[0]; 1755 uint64_t *cursor_end = &msp->ms_lbas[1]; 1756 uint64_t offset = 0; 1757 1758 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1759 1760 ASSERT3U(*cursor_end, >=, *cursor); 1761 1762 if ((*cursor + size) > *cursor_end) { 1763 range_seg_t *rs; 1764 1765 if (zfs_btree_numnodes(t) == 0) 1766 metaslab_size_tree_full_load(msp->ms_allocatable); 1767 rs = zfs_btree_last(t, NULL); 1768 if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < 1769 size) 1770 return (-1ULL); 1771 1772 *cursor = rs_get_start(rs, rt); 1773 *cursor_end = rs_get_end(rs, rt); 1774 } 1775 1776 offset = *cursor; 1777 *cursor += size; 1778 1779 return (offset); 1780 } 1781 1782 const metaslab_ops_t zfs_metaslab_ops = { 1783 metaslab_cf_alloc 1784 }; 1785 #endif /* WITH_CF_BLOCK_ALLOCATOR */ 1786 1787 #if defined(WITH_NDF_BLOCK_ALLOCATOR) 1788 /* 1789 * ========================================================================== 1790 * New dynamic fit allocator - 1791 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift 1792 * contiguous blocks. If no region is found then just use the largest segment 1793 * that remains. 1794 * ========================================================================== 1795 */ 1796 1797 /* 1798 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift) 1799 * to request from the allocator. 1800 */ 1801 uint64_t metaslab_ndf_clump_shift = 4; 1802 1803 static uint64_t 1804 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) 1805 { 1806 zfs_btree_t *t = &msp->ms_allocatable->rt_root; 1807 range_tree_t *rt = msp->ms_allocatable; 1808 zfs_btree_index_t where; 1809 range_seg_t *rs; 1810 range_seg_max_t rsearch; 1811 uint64_t hbit = highbit64(size); 1812 uint64_t *cursor = &msp->ms_lbas[hbit - 1]; 1813 uint64_t max_size = metaslab_largest_allocatable(msp); 1814 1815 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1816 1817 if (max_size < size) 1818 return (-1ULL); 1819 1820 rs_set_start(&rsearch, rt, *cursor); 1821 rs_set_end(&rsearch, rt, *cursor + size); 1822 1823 rs = zfs_btree_find(t, &rsearch, &where); 1824 if (rs == NULL || (rs_get_end(rs, rt) - rs_get_start(rs, rt)) < size) { 1825 t = &msp->ms_allocatable_by_size; 1826 1827 rs_set_start(&rsearch, rt, 0); 1828 rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit + 1829 metaslab_ndf_clump_shift))); 1830 1831 rs = zfs_btree_find(t, &rsearch, &where); 1832 if (rs == NULL) 1833 rs = zfs_btree_next(t, &where, &where); 1834 ASSERT(rs != NULL); 1835 } 1836 1837 if ((rs_get_end(rs, rt) - rs_get_start(rs, rt)) >= size) { 1838 *cursor = rs_get_start(rs, rt) + size; 1839 return (rs_get_start(rs, rt)); 1840 } 1841 return (-1ULL); 1842 } 1843 1844 const metaslab_ops_t zfs_metaslab_ops = { 1845 metaslab_ndf_alloc 1846 }; 1847 #endif /* WITH_NDF_BLOCK_ALLOCATOR */ 1848 1849 1850 /* 1851 * ========================================================================== 1852 * Metaslabs 1853 * ========================================================================== 1854 */ 1855 1856 /* 1857 * Wait for any in-progress metaslab loads to complete. 1858 */ 1859 static void 1860 metaslab_load_wait(metaslab_t *msp) 1861 { 1862 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1863 1864 while (msp->ms_loading) { 1865 ASSERT(!msp->ms_loaded); 1866 cv_wait(&msp->ms_load_cv, &msp->ms_lock); 1867 } 1868 } 1869 1870 /* 1871 * Wait for any in-progress flushing to complete. 1872 */ 1873 static void 1874 metaslab_flush_wait(metaslab_t *msp) 1875 { 1876 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1877 1878 while (msp->ms_flushing) 1879 cv_wait(&msp->ms_flush_cv, &msp->ms_lock); 1880 } 1881 1882 static unsigned int 1883 metaslab_idx_func(multilist_t *ml, void *arg) 1884 { 1885 metaslab_t *msp = arg; 1886 1887 /* 1888 * ms_id values are allocated sequentially, so full 64bit 1889 * division would be a waste of time, so limit it to 32 bits. 1890 */ 1891 return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml)); 1892 } 1893 1894 uint64_t 1895 metaslab_allocated_space(metaslab_t *msp) 1896 { 1897 return (msp->ms_allocated_space); 1898 } 1899 1900 /* 1901 * Verify that the space accounting on disk matches the in-core range_trees. 1902 */ 1903 static void 1904 metaslab_verify_space(metaslab_t *msp, uint64_t txg) 1905 { 1906 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 1907 uint64_t allocating = 0; 1908 uint64_t sm_free_space, msp_free_space; 1909 1910 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1911 ASSERT(!msp->ms_condensing); 1912 1913 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 1914 return; 1915 1916 /* 1917 * We can only verify the metaslab space when we're called 1918 * from syncing context with a loaded metaslab that has an 1919 * allocated space map. Calling this in non-syncing context 1920 * does not provide a consistent view of the metaslab since 1921 * we're performing allocations in the future. 1922 */ 1923 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL || 1924 !msp->ms_loaded) 1925 return; 1926 1927 /* 1928 * Even though the smp_alloc field can get negative, 1929 * when it comes to a metaslab's space map, that should 1930 * never be the case. 1931 */ 1932 ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0); 1933 1934 ASSERT3U(space_map_allocated(msp->ms_sm), >=, 1935 range_tree_space(msp->ms_unflushed_frees)); 1936 1937 ASSERT3U(metaslab_allocated_space(msp), ==, 1938 space_map_allocated(msp->ms_sm) + 1939 range_tree_space(msp->ms_unflushed_allocs) - 1940 range_tree_space(msp->ms_unflushed_frees)); 1941 1942 sm_free_space = msp->ms_size - metaslab_allocated_space(msp); 1943 1944 /* 1945 * Account for future allocations since we would have 1946 * already deducted that space from the ms_allocatable. 1947 */ 1948 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) { 1949 allocating += 1950 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]); 1951 } 1952 ASSERT3U(allocating + msp->ms_allocated_this_txg, ==, 1953 msp->ms_allocating_total); 1954 1955 ASSERT3U(msp->ms_deferspace, ==, 1956 range_tree_space(msp->ms_defer[0]) + 1957 range_tree_space(msp->ms_defer[1])); 1958 1959 msp_free_space = range_tree_space(msp->ms_allocatable) + allocating + 1960 msp->ms_deferspace + range_tree_space(msp->ms_freed); 1961 1962 VERIFY3U(sm_free_space, ==, msp_free_space); 1963 } 1964 1965 static void 1966 metaslab_aux_histograms_clear(metaslab_t *msp) 1967 { 1968 /* 1969 * Auxiliary histograms are only cleared when resetting them, 1970 * which can only happen while the metaslab is loaded. 1971 */ 1972 ASSERT(msp->ms_loaded); 1973 1974 memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist)); 1975 for (int t = 0; t < TXG_DEFER_SIZE; t++) 1976 memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t])); 1977 } 1978 1979 static void 1980 metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift, 1981 range_tree_t *rt) 1982 { 1983 /* 1984 * This is modeled after space_map_histogram_add(), so refer to that 1985 * function for implementation details. We want this to work like 1986 * the space map histogram, and not the range tree histogram, as we 1987 * are essentially constructing a delta that will be later subtracted 1988 * from the space map histogram. 1989 */ 1990 int idx = 0; 1991 for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 1992 ASSERT3U(i, >=, idx + shift); 1993 histogram[idx] += rt->rt_histogram[i] << (i - idx - shift); 1994 1995 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) { 1996 ASSERT3U(idx + shift, ==, i); 1997 idx++; 1998 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE); 1999 } 2000 } 2001 } 2002 2003 /* 2004 * Called at every sync pass that the metaslab gets synced. 2005 * 2006 * The reason is that we want our auxiliary histograms to be updated 2007 * wherever the metaslab's space map histogram is updated. This way 2008 * we stay consistent on which parts of the metaslab space map's 2009 * histogram are currently not available for allocations (e.g because 2010 * they are in the defer, freed, and freeing trees). 2011 */ 2012 static void 2013 metaslab_aux_histograms_update(metaslab_t *msp) 2014 { 2015 space_map_t *sm = msp->ms_sm; 2016 ASSERT(sm != NULL); 2017 2018 /* 2019 * This is similar to the metaslab's space map histogram updates 2020 * that take place in metaslab_sync(). The only difference is that 2021 * we only care about segments that haven't made it into the 2022 * ms_allocatable tree yet. 2023 */ 2024 if (msp->ms_loaded) { 2025 metaslab_aux_histograms_clear(msp); 2026 2027 metaslab_aux_histogram_add(msp->ms_synchist, 2028 sm->sm_shift, msp->ms_freed); 2029 2030 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2031 metaslab_aux_histogram_add(msp->ms_deferhist[t], 2032 sm->sm_shift, msp->ms_defer[t]); 2033 } 2034 } 2035 2036 metaslab_aux_histogram_add(msp->ms_synchist, 2037 sm->sm_shift, msp->ms_freeing); 2038 } 2039 2040 /* 2041 * Called every time we are done syncing (writing to) the metaslab, 2042 * i.e. at the end of each sync pass. 2043 * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist] 2044 */ 2045 static void 2046 metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed) 2047 { 2048 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2049 space_map_t *sm = msp->ms_sm; 2050 2051 if (sm == NULL) { 2052 /* 2053 * We came here from metaslab_init() when creating/opening a 2054 * pool, looking at a metaslab that hasn't had any allocations 2055 * yet. 2056 */ 2057 return; 2058 } 2059 2060 /* 2061 * This is similar to the actions that we take for the ms_freed 2062 * and ms_defer trees in metaslab_sync_done(). 2063 */ 2064 uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE; 2065 if (defer_allowed) { 2066 memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist, 2067 sizeof (msp->ms_synchist)); 2068 } else { 2069 memset(msp->ms_deferhist[hist_index], 0, 2070 sizeof (msp->ms_deferhist[hist_index])); 2071 } 2072 memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist)); 2073 } 2074 2075 /* 2076 * Ensure that the metaslab's weight and fragmentation are consistent 2077 * with the contents of the histogram (either the range tree's histogram 2078 * or the space map's depending whether the metaslab is loaded). 2079 */ 2080 static void 2081 metaslab_verify_weight_and_frag(metaslab_t *msp) 2082 { 2083 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2084 2085 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 2086 return; 2087 2088 /* 2089 * We can end up here from vdev_remove_complete(), in which case we 2090 * cannot do these assertions because we hold spa config locks and 2091 * thus we are not allowed to read from the DMU. 2092 * 2093 * We check if the metaslab group has been removed and if that's 2094 * the case we return immediately as that would mean that we are 2095 * here from the aforementioned code path. 2096 */ 2097 if (msp->ms_group == NULL) 2098 return; 2099 2100 /* 2101 * Devices being removed always return a weight of 0 and leave 2102 * fragmentation and ms_max_size as is - there is nothing for 2103 * us to verify here. 2104 */ 2105 vdev_t *vd = msp->ms_group->mg_vd; 2106 if (vd->vdev_removing) 2107 return; 2108 2109 /* 2110 * If the metaslab is dirty it probably means that we've done 2111 * some allocations or frees that have changed our histograms 2112 * and thus the weight. 2113 */ 2114 for (int t = 0; t < TXG_SIZE; t++) { 2115 if (txg_list_member(&vd->vdev_ms_list, msp, t)) 2116 return; 2117 } 2118 2119 /* 2120 * This verification checks that our in-memory state is consistent 2121 * with what's on disk. If the pool is read-only then there aren't 2122 * any changes and we just have the initially-loaded state. 2123 */ 2124 if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa)) 2125 return; 2126 2127 /* some extra verification for in-core tree if you can */ 2128 if (msp->ms_loaded) { 2129 range_tree_stat_verify(msp->ms_allocatable); 2130 VERIFY(space_map_histogram_verify(msp->ms_sm, 2131 msp->ms_allocatable)); 2132 } 2133 2134 uint64_t weight = msp->ms_weight; 2135 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 2136 boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight); 2137 uint64_t frag = msp->ms_fragmentation; 2138 uint64_t max_segsize = msp->ms_max_size; 2139 2140 msp->ms_weight = 0; 2141 msp->ms_fragmentation = 0; 2142 2143 /* 2144 * This function is used for verification purposes and thus should 2145 * not introduce any side-effects/mutations on the system's state. 2146 * 2147 * Regardless of whether metaslab_weight() thinks this metaslab 2148 * should be active or not, we want to ensure that the actual weight 2149 * (and therefore the value of ms_weight) would be the same if it 2150 * was to be recalculated at this point. 2151 * 2152 * In addition we set the nodirty flag so metaslab_weight() does 2153 * not dirty the metaslab for future TXGs (e.g. when trying to 2154 * force condensing to upgrade the metaslab spacemaps). 2155 */ 2156 msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active; 2157 2158 VERIFY3U(max_segsize, ==, msp->ms_max_size); 2159 2160 /* 2161 * If the weight type changed then there is no point in doing 2162 * verification. Revert fields to their original values. 2163 */ 2164 if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) || 2165 (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) { 2166 msp->ms_fragmentation = frag; 2167 msp->ms_weight = weight; 2168 return; 2169 } 2170 2171 VERIFY3U(msp->ms_fragmentation, ==, frag); 2172 VERIFY3U(msp->ms_weight, ==, weight); 2173 } 2174 2175 /* 2176 * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from 2177 * this class that was used longest ago, and attempt to unload it. We don't 2178 * want to spend too much time in this loop to prevent performance 2179 * degradation, and we expect that most of the time this operation will 2180 * succeed. Between that and the normal unloading processing during txg sync, 2181 * we expect this to keep the metaslab memory usage under control. 2182 */ 2183 static void 2184 metaslab_potentially_evict(metaslab_class_t *mc) 2185 { 2186 #ifdef _KERNEL 2187 uint64_t allmem = arc_all_memory(); 2188 uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache); 2189 uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache); 2190 uint_t tries = 0; 2191 for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size && 2192 tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2; 2193 tries++) { 2194 unsigned int idx = multilist_get_random_index( 2195 &mc->mc_metaslab_txg_list); 2196 multilist_sublist_t *mls = 2197 multilist_sublist_lock(&mc->mc_metaslab_txg_list, idx); 2198 metaslab_t *msp = multilist_sublist_head(mls); 2199 multilist_sublist_unlock(mls); 2200 while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 < 2201 inuse * size) { 2202 VERIFY3P(mls, ==, multilist_sublist_lock( 2203 &mc->mc_metaslab_txg_list, idx)); 2204 ASSERT3U(idx, ==, 2205 metaslab_idx_func(&mc->mc_metaslab_txg_list, msp)); 2206 2207 if (!multilist_link_active(&msp->ms_class_txg_node)) { 2208 multilist_sublist_unlock(mls); 2209 break; 2210 } 2211 metaslab_t *next_msp = multilist_sublist_next(mls, msp); 2212 multilist_sublist_unlock(mls); 2213 /* 2214 * If the metaslab is currently loading there are two 2215 * cases. If it's the metaslab we're evicting, we 2216 * can't continue on or we'll panic when we attempt to 2217 * recursively lock the mutex. If it's another 2218 * metaslab that's loading, it can be safely skipped, 2219 * since we know it's very new and therefore not a 2220 * good eviction candidate. We check later once the 2221 * lock is held that the metaslab is fully loaded 2222 * before actually unloading it. 2223 */ 2224 if (msp->ms_loading) { 2225 msp = next_msp; 2226 inuse = 2227 spl_kmem_cache_inuse(zfs_btree_leaf_cache); 2228 continue; 2229 } 2230 /* 2231 * We can't unload metaslabs with no spacemap because 2232 * they're not ready to be unloaded yet. We can't 2233 * unload metaslabs with outstanding allocations 2234 * because doing so could cause the metaslab's weight 2235 * to decrease while it's unloaded, which violates an 2236 * invariant that we use to prevent unnecessary 2237 * loading. We also don't unload metaslabs that are 2238 * currently active because they are high-weight 2239 * metaslabs that are likely to be used in the near 2240 * future. 2241 */ 2242 mutex_enter(&msp->ms_lock); 2243 if (msp->ms_allocator == -1 && msp->ms_sm != NULL && 2244 msp->ms_allocating_total == 0) { 2245 metaslab_unload(msp); 2246 } 2247 mutex_exit(&msp->ms_lock); 2248 msp = next_msp; 2249 inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache); 2250 } 2251 } 2252 #else 2253 (void) mc, (void) zfs_metaslab_mem_limit; 2254 #endif 2255 } 2256 2257 static int 2258 metaslab_load_impl(metaslab_t *msp) 2259 { 2260 int error = 0; 2261 2262 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2263 ASSERT(msp->ms_loading); 2264 ASSERT(!msp->ms_condensing); 2265 2266 /* 2267 * We temporarily drop the lock to unblock other operations while we 2268 * are reading the space map. Therefore, metaslab_sync() and 2269 * metaslab_sync_done() can run at the same time as we do. 2270 * 2271 * If we are using the log space maps, metaslab_sync() can't write to 2272 * the metaslab's space map while we are loading as we only write to 2273 * it when we are flushing the metaslab, and that can't happen while 2274 * we are loading it. 2275 * 2276 * If we are not using log space maps though, metaslab_sync() can 2277 * append to the space map while we are loading. Therefore we load 2278 * only entries that existed when we started the load. Additionally, 2279 * metaslab_sync_done() has to wait for the load to complete because 2280 * there are potential races like metaslab_load() loading parts of the 2281 * space map that are currently being appended by metaslab_sync(). If 2282 * we didn't, the ms_allocatable would have entries that 2283 * metaslab_sync_done() would try to re-add later. 2284 * 2285 * That's why before dropping the lock we remember the synced length 2286 * of the metaslab and read up to that point of the space map, 2287 * ignoring entries appended by metaslab_sync() that happen after we 2288 * drop the lock. 2289 */ 2290 uint64_t length = msp->ms_synced_length; 2291 mutex_exit(&msp->ms_lock); 2292 2293 hrtime_t load_start = gethrtime(); 2294 metaslab_rt_arg_t *mrap; 2295 if (msp->ms_allocatable->rt_arg == NULL) { 2296 mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP); 2297 } else { 2298 mrap = msp->ms_allocatable->rt_arg; 2299 msp->ms_allocatable->rt_ops = NULL; 2300 msp->ms_allocatable->rt_arg = NULL; 2301 } 2302 mrap->mra_bt = &msp->ms_allocatable_by_size; 2303 mrap->mra_floor_shift = metaslab_by_size_min_shift; 2304 2305 if (msp->ms_sm != NULL) { 2306 error = space_map_load_length(msp->ms_sm, msp->ms_allocatable, 2307 SM_FREE, length); 2308 2309 /* Now, populate the size-sorted tree. */ 2310 metaslab_rt_create(msp->ms_allocatable, mrap); 2311 msp->ms_allocatable->rt_ops = &metaslab_rt_ops; 2312 msp->ms_allocatable->rt_arg = mrap; 2313 2314 struct mssa_arg arg = {0}; 2315 arg.rt = msp->ms_allocatable; 2316 arg.mra = mrap; 2317 range_tree_walk(msp->ms_allocatable, metaslab_size_sorted_add, 2318 &arg); 2319 } else { 2320 /* 2321 * Add the size-sorted tree first, since we don't need to load 2322 * the metaslab from the spacemap. 2323 */ 2324 metaslab_rt_create(msp->ms_allocatable, mrap); 2325 msp->ms_allocatable->rt_ops = &metaslab_rt_ops; 2326 msp->ms_allocatable->rt_arg = mrap; 2327 /* 2328 * The space map has not been allocated yet, so treat 2329 * all the space in the metaslab as free and add it to the 2330 * ms_allocatable tree. 2331 */ 2332 range_tree_add(msp->ms_allocatable, 2333 msp->ms_start, msp->ms_size); 2334 2335 if (msp->ms_new) { 2336 /* 2337 * If the ms_sm doesn't exist, this means that this 2338 * metaslab hasn't gone through metaslab_sync() and 2339 * thus has never been dirtied. So we shouldn't 2340 * expect any unflushed allocs or frees from previous 2341 * TXGs. 2342 */ 2343 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); 2344 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); 2345 } 2346 } 2347 2348 /* 2349 * We need to grab the ms_sync_lock to prevent metaslab_sync() from 2350 * changing the ms_sm (or log_sm) and the metaslab's range trees 2351 * while we are about to use them and populate the ms_allocatable. 2352 * The ms_lock is insufficient for this because metaslab_sync() doesn't 2353 * hold the ms_lock while writing the ms_checkpointing tree to disk. 2354 */ 2355 mutex_enter(&msp->ms_sync_lock); 2356 mutex_enter(&msp->ms_lock); 2357 2358 ASSERT(!msp->ms_condensing); 2359 ASSERT(!msp->ms_flushing); 2360 2361 if (error != 0) { 2362 mutex_exit(&msp->ms_sync_lock); 2363 return (error); 2364 } 2365 2366 ASSERT3P(msp->ms_group, !=, NULL); 2367 msp->ms_loaded = B_TRUE; 2368 2369 /* 2370 * Apply all the unflushed changes to ms_allocatable right 2371 * away so any manipulations we do below have a clear view 2372 * of what is allocated and what is free. 2373 */ 2374 range_tree_walk(msp->ms_unflushed_allocs, 2375 range_tree_remove, msp->ms_allocatable); 2376 range_tree_walk(msp->ms_unflushed_frees, 2377 range_tree_add, msp->ms_allocatable); 2378 2379 ASSERT3P(msp->ms_group, !=, NULL); 2380 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2381 if (spa_syncing_log_sm(spa) != NULL) { 2382 ASSERT(spa_feature_is_enabled(spa, 2383 SPA_FEATURE_LOG_SPACEMAP)); 2384 2385 /* 2386 * If we use a log space map we add all the segments 2387 * that are in ms_unflushed_frees so they are available 2388 * for allocation. 2389 * 2390 * ms_allocatable needs to contain all free segments 2391 * that are ready for allocations (thus not segments 2392 * from ms_freeing, ms_freed, and the ms_defer trees). 2393 * But if we grab the lock in this code path at a sync 2394 * pass later that 1, then it also contains the 2395 * segments of ms_freed (they were added to it earlier 2396 * in this path through ms_unflushed_frees). So we 2397 * need to remove all the segments that exist in 2398 * ms_freed from ms_allocatable as they will be added 2399 * later in metaslab_sync_done(). 2400 * 2401 * When there's no log space map, the ms_allocatable 2402 * correctly doesn't contain any segments that exist 2403 * in ms_freed [see ms_synced_length]. 2404 */ 2405 range_tree_walk(msp->ms_freed, 2406 range_tree_remove, msp->ms_allocatable); 2407 } 2408 2409 /* 2410 * If we are not using the log space map, ms_allocatable 2411 * contains the segments that exist in the ms_defer trees 2412 * [see ms_synced_length]. Thus we need to remove them 2413 * from ms_allocatable as they will be added again in 2414 * metaslab_sync_done(). 2415 * 2416 * If we are using the log space map, ms_allocatable still 2417 * contains the segments that exist in the ms_defer trees. 2418 * Not because it read them through the ms_sm though. But 2419 * because these segments are part of ms_unflushed_frees 2420 * whose segments we add to ms_allocatable earlier in this 2421 * code path. 2422 */ 2423 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2424 range_tree_walk(msp->ms_defer[t], 2425 range_tree_remove, msp->ms_allocatable); 2426 } 2427 2428 /* 2429 * Call metaslab_recalculate_weight_and_sort() now that the 2430 * metaslab is loaded so we get the metaslab's real weight. 2431 * 2432 * Unless this metaslab was created with older software and 2433 * has not yet been converted to use segment-based weight, we 2434 * expect the new weight to be better or equal to the weight 2435 * that the metaslab had while it was not loaded. This is 2436 * because the old weight does not take into account the 2437 * consolidation of adjacent segments between TXGs. [see 2438 * comment for ms_synchist and ms_deferhist[] for more info] 2439 */ 2440 uint64_t weight = msp->ms_weight; 2441 uint64_t max_size = msp->ms_max_size; 2442 metaslab_recalculate_weight_and_sort(msp); 2443 if (!WEIGHT_IS_SPACEBASED(weight)) 2444 ASSERT3U(weight, <=, msp->ms_weight); 2445 msp->ms_max_size = metaslab_largest_allocatable(msp); 2446 ASSERT3U(max_size, <=, msp->ms_max_size); 2447 hrtime_t load_end = gethrtime(); 2448 msp->ms_load_time = load_end; 2449 zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, " 2450 "ms_id %llu, smp_length %llu, " 2451 "unflushed_allocs %llu, unflushed_frees %llu, " 2452 "freed %llu, defer %llu + %llu, unloaded time %llu ms, " 2453 "loading_time %lld ms, ms_max_size %llu, " 2454 "max size error %lld, " 2455 "old_weight %llx, new_weight %llx", 2456 (u_longlong_t)spa_syncing_txg(spa), spa_name(spa), 2457 (u_longlong_t)msp->ms_group->mg_vd->vdev_id, 2458 (u_longlong_t)msp->ms_id, 2459 (u_longlong_t)space_map_length(msp->ms_sm), 2460 (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs), 2461 (u_longlong_t)range_tree_space(msp->ms_unflushed_frees), 2462 (u_longlong_t)range_tree_space(msp->ms_freed), 2463 (u_longlong_t)range_tree_space(msp->ms_defer[0]), 2464 (u_longlong_t)range_tree_space(msp->ms_defer[1]), 2465 (longlong_t)((load_start - msp->ms_unload_time) / 1000000), 2466 (longlong_t)((load_end - load_start) / 1000000), 2467 (u_longlong_t)msp->ms_max_size, 2468 (u_longlong_t)msp->ms_max_size - max_size, 2469 (u_longlong_t)weight, (u_longlong_t)msp->ms_weight); 2470 2471 metaslab_verify_space(msp, spa_syncing_txg(spa)); 2472 mutex_exit(&msp->ms_sync_lock); 2473 return (0); 2474 } 2475 2476 int 2477 metaslab_load(metaslab_t *msp) 2478 { 2479 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2480 2481 /* 2482 * There may be another thread loading the same metaslab, if that's 2483 * the case just wait until the other thread is done and return. 2484 */ 2485 metaslab_load_wait(msp); 2486 if (msp->ms_loaded) 2487 return (0); 2488 VERIFY(!msp->ms_loading); 2489 ASSERT(!msp->ms_condensing); 2490 2491 /* 2492 * We set the loading flag BEFORE potentially dropping the lock to 2493 * wait for an ongoing flush (see ms_flushing below). This way other 2494 * threads know that there is already a thread that is loading this 2495 * metaslab. 2496 */ 2497 msp->ms_loading = B_TRUE; 2498 2499 /* 2500 * Wait for any in-progress flushing to finish as we drop the ms_lock 2501 * both here (during space_map_load()) and in metaslab_flush() (when 2502 * we flush our changes to the ms_sm). 2503 */ 2504 if (msp->ms_flushing) 2505 metaslab_flush_wait(msp); 2506 2507 /* 2508 * In the possibility that we were waiting for the metaslab to be 2509 * flushed (where we temporarily dropped the ms_lock), ensure that 2510 * no one else loaded the metaslab somehow. 2511 */ 2512 ASSERT(!msp->ms_loaded); 2513 2514 /* 2515 * If we're loading a metaslab in the normal class, consider evicting 2516 * another one to keep our memory usage under the limit defined by the 2517 * zfs_metaslab_mem_limit tunable. 2518 */ 2519 if (spa_normal_class(msp->ms_group->mg_class->mc_spa) == 2520 msp->ms_group->mg_class) { 2521 metaslab_potentially_evict(msp->ms_group->mg_class); 2522 } 2523 2524 int error = metaslab_load_impl(msp); 2525 2526 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2527 msp->ms_loading = B_FALSE; 2528 cv_broadcast(&msp->ms_load_cv); 2529 2530 return (error); 2531 } 2532 2533 void 2534 metaslab_unload(metaslab_t *msp) 2535 { 2536 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2537 2538 /* 2539 * This can happen if a metaslab is selected for eviction (in 2540 * metaslab_potentially_evict) and then unloaded during spa_sync (via 2541 * metaslab_class_evict_old). 2542 */ 2543 if (!msp->ms_loaded) 2544 return; 2545 2546 range_tree_vacate(msp->ms_allocatable, NULL, NULL); 2547 msp->ms_loaded = B_FALSE; 2548 msp->ms_unload_time = gethrtime(); 2549 2550 msp->ms_activation_weight = 0; 2551 msp->ms_weight &= ~METASLAB_ACTIVE_MASK; 2552 2553 if (msp->ms_group != NULL) { 2554 metaslab_class_t *mc = msp->ms_group->mg_class; 2555 multilist_sublist_t *mls = 2556 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); 2557 if (multilist_link_active(&msp->ms_class_txg_node)) 2558 multilist_sublist_remove(mls, msp); 2559 multilist_sublist_unlock(mls); 2560 2561 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2562 zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, " 2563 "ms_id %llu, weight %llx, " 2564 "selected txg %llu (%llu ms ago), alloc_txg %llu, " 2565 "loaded %llu ms ago, max_size %llu", 2566 (u_longlong_t)spa_syncing_txg(spa), spa_name(spa), 2567 (u_longlong_t)msp->ms_group->mg_vd->vdev_id, 2568 (u_longlong_t)msp->ms_id, 2569 (u_longlong_t)msp->ms_weight, 2570 (u_longlong_t)msp->ms_selected_txg, 2571 (u_longlong_t)(msp->ms_unload_time - 2572 msp->ms_selected_time) / 1000 / 1000, 2573 (u_longlong_t)msp->ms_alloc_txg, 2574 (u_longlong_t)(msp->ms_unload_time - 2575 msp->ms_load_time) / 1000 / 1000, 2576 (u_longlong_t)msp->ms_max_size); 2577 } 2578 2579 /* 2580 * We explicitly recalculate the metaslab's weight based on its space 2581 * map (as it is now not loaded). We want unload metaslabs to always 2582 * have their weights calculated from the space map histograms, while 2583 * loaded ones have it calculated from their in-core range tree 2584 * [see metaslab_load()]. This way, the weight reflects the information 2585 * available in-core, whether it is loaded or not. 2586 * 2587 * If ms_group == NULL means that we came here from metaslab_fini(), 2588 * at which point it doesn't make sense for us to do the recalculation 2589 * and the sorting. 2590 */ 2591 if (msp->ms_group != NULL) 2592 metaslab_recalculate_weight_and_sort(msp); 2593 } 2594 2595 /* 2596 * We want to optimize the memory use of the per-metaslab range 2597 * trees. To do this, we store the segments in the range trees in 2598 * units of sectors, zero-indexing from the start of the metaslab. If 2599 * the vdev_ms_shift - the vdev_ashift is less than 32, we can store 2600 * the ranges using two uint32_ts, rather than two uint64_ts. 2601 */ 2602 range_seg_type_t 2603 metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp, 2604 uint64_t *start, uint64_t *shift) 2605 { 2606 if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 && 2607 !zfs_metaslab_force_large_segs) { 2608 *shift = vdev->vdev_ashift; 2609 *start = msp->ms_start; 2610 return (RANGE_SEG32); 2611 } else { 2612 *shift = 0; 2613 *start = 0; 2614 return (RANGE_SEG64); 2615 } 2616 } 2617 2618 void 2619 metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg) 2620 { 2621 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2622 metaslab_class_t *mc = msp->ms_group->mg_class; 2623 multilist_sublist_t *mls = 2624 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); 2625 if (multilist_link_active(&msp->ms_class_txg_node)) 2626 multilist_sublist_remove(mls, msp); 2627 msp->ms_selected_txg = txg; 2628 msp->ms_selected_time = gethrtime(); 2629 multilist_sublist_insert_tail(mls, msp); 2630 multilist_sublist_unlock(mls); 2631 } 2632 2633 void 2634 metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta, 2635 int64_t defer_delta, int64_t space_delta) 2636 { 2637 vdev_space_update(vd, alloc_delta, defer_delta, space_delta); 2638 2639 ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent); 2640 ASSERT(vd->vdev_ms_count != 0); 2641 2642 metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta, 2643 vdev_deflated_space(vd, space_delta)); 2644 } 2645 2646 int 2647 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, 2648 uint64_t txg, metaslab_t **msp) 2649 { 2650 vdev_t *vd = mg->mg_vd; 2651 spa_t *spa = vd->vdev_spa; 2652 objset_t *mos = spa->spa_meta_objset; 2653 metaslab_t *ms; 2654 int error; 2655 2656 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); 2657 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL); 2658 mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL); 2659 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL); 2660 cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL); 2661 multilist_link_init(&ms->ms_class_txg_node); 2662 2663 ms->ms_id = id; 2664 ms->ms_start = id << vd->vdev_ms_shift; 2665 ms->ms_size = 1ULL << vd->vdev_ms_shift; 2666 ms->ms_allocator = -1; 2667 ms->ms_new = B_TRUE; 2668 2669 vdev_ops_t *ops = vd->vdev_ops; 2670 if (ops->vdev_op_metaslab_init != NULL) 2671 ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size); 2672 2673 /* 2674 * We only open space map objects that already exist. All others 2675 * will be opened when we finally allocate an object for it. For 2676 * readonly pools there is no need to open the space map object. 2677 * 2678 * Note: 2679 * When called from vdev_expand(), we can't call into the DMU as 2680 * we are holding the spa_config_lock as a writer and we would 2681 * deadlock [see relevant comment in vdev_metaslab_init()]. in 2682 * that case, the object parameter is zero though, so we won't 2683 * call into the DMU. 2684 */ 2685 if (object != 0 && !(spa->spa_mode == SPA_MODE_READ && 2686 !spa->spa_read_spacemaps)) { 2687 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start, 2688 ms->ms_size, vd->vdev_ashift); 2689 2690 if (error != 0) { 2691 kmem_free(ms, sizeof (metaslab_t)); 2692 return (error); 2693 } 2694 2695 ASSERT(ms->ms_sm != NULL); 2696 ms->ms_allocated_space = space_map_allocated(ms->ms_sm); 2697 } 2698 2699 uint64_t shift, start; 2700 range_seg_type_t type = 2701 metaslab_calculate_range_tree_type(vd, ms, &start, &shift); 2702 2703 ms->ms_allocatable = range_tree_create(NULL, type, NULL, start, shift); 2704 for (int t = 0; t < TXG_SIZE; t++) { 2705 ms->ms_allocating[t] = range_tree_create(NULL, type, 2706 NULL, start, shift); 2707 } 2708 ms->ms_freeing = range_tree_create(NULL, type, NULL, start, shift); 2709 ms->ms_freed = range_tree_create(NULL, type, NULL, start, shift); 2710 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2711 ms->ms_defer[t] = range_tree_create(NULL, type, NULL, 2712 start, shift); 2713 } 2714 ms->ms_checkpointing = 2715 range_tree_create(NULL, type, NULL, start, shift); 2716 ms->ms_unflushed_allocs = 2717 range_tree_create(NULL, type, NULL, start, shift); 2718 2719 metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP); 2720 mrap->mra_bt = &ms->ms_unflushed_frees_by_size; 2721 mrap->mra_floor_shift = metaslab_by_size_min_shift; 2722 ms->ms_unflushed_frees = range_tree_create(&metaslab_rt_ops, 2723 type, mrap, start, shift); 2724 2725 ms->ms_trim = range_tree_create(NULL, type, NULL, start, shift); 2726 2727 metaslab_group_add(mg, ms); 2728 metaslab_set_fragmentation(ms, B_FALSE); 2729 2730 /* 2731 * If we're opening an existing pool (txg == 0) or creating 2732 * a new one (txg == TXG_INITIAL), all space is available now. 2733 * If we're adding space to an existing pool, the new space 2734 * does not become available until after this txg has synced. 2735 * The metaslab's weight will also be initialized when we sync 2736 * out this txg. This ensures that we don't attempt to allocate 2737 * from it before we have initialized it completely. 2738 */ 2739 if (txg <= TXG_INITIAL) { 2740 metaslab_sync_done(ms, 0); 2741 metaslab_space_update(vd, mg->mg_class, 2742 metaslab_allocated_space(ms), 0, 0); 2743 } 2744 2745 if (txg != 0) { 2746 vdev_dirty(vd, 0, NULL, txg); 2747 vdev_dirty(vd, VDD_METASLAB, ms, txg); 2748 } 2749 2750 *msp = ms; 2751 2752 return (0); 2753 } 2754 2755 static void 2756 metaslab_fini_flush_data(metaslab_t *msp) 2757 { 2758 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2759 2760 if (metaslab_unflushed_txg(msp) == 0) { 2761 ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), 2762 ==, NULL); 2763 return; 2764 } 2765 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 2766 2767 mutex_enter(&spa->spa_flushed_ms_lock); 2768 avl_remove(&spa->spa_metaslabs_by_flushed, msp); 2769 mutex_exit(&spa->spa_flushed_ms_lock); 2770 2771 spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp)); 2772 spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp), 2773 metaslab_unflushed_dirty(msp)); 2774 } 2775 2776 uint64_t 2777 metaslab_unflushed_changes_memused(metaslab_t *ms) 2778 { 2779 return ((range_tree_numsegs(ms->ms_unflushed_allocs) + 2780 range_tree_numsegs(ms->ms_unflushed_frees)) * 2781 ms->ms_unflushed_allocs->rt_root.bt_elem_size); 2782 } 2783 2784 void 2785 metaslab_fini(metaslab_t *msp) 2786 { 2787 metaslab_group_t *mg = msp->ms_group; 2788 vdev_t *vd = mg->mg_vd; 2789 spa_t *spa = vd->vdev_spa; 2790 2791 metaslab_fini_flush_data(msp); 2792 2793 metaslab_group_remove(mg, msp); 2794 2795 mutex_enter(&msp->ms_lock); 2796 VERIFY(msp->ms_group == NULL); 2797 2798 /* 2799 * If this metaslab hasn't been through metaslab_sync_done() yet its 2800 * space hasn't been accounted for in its vdev and doesn't need to be 2801 * subtracted. 2802 */ 2803 if (!msp->ms_new) { 2804 metaslab_space_update(vd, mg->mg_class, 2805 -metaslab_allocated_space(msp), 0, -msp->ms_size); 2806 2807 } 2808 space_map_close(msp->ms_sm); 2809 msp->ms_sm = NULL; 2810 2811 metaslab_unload(msp); 2812 2813 range_tree_destroy(msp->ms_allocatable); 2814 range_tree_destroy(msp->ms_freeing); 2815 range_tree_destroy(msp->ms_freed); 2816 2817 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 2818 metaslab_unflushed_changes_memused(msp)); 2819 spa->spa_unflushed_stats.sus_memused -= 2820 metaslab_unflushed_changes_memused(msp); 2821 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); 2822 range_tree_destroy(msp->ms_unflushed_allocs); 2823 range_tree_destroy(msp->ms_checkpointing); 2824 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); 2825 range_tree_destroy(msp->ms_unflushed_frees); 2826 2827 for (int t = 0; t < TXG_SIZE; t++) { 2828 range_tree_destroy(msp->ms_allocating[t]); 2829 } 2830 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2831 range_tree_destroy(msp->ms_defer[t]); 2832 } 2833 ASSERT0(msp->ms_deferspace); 2834 2835 for (int t = 0; t < TXG_SIZE; t++) 2836 ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t)); 2837 2838 range_tree_vacate(msp->ms_trim, NULL, NULL); 2839 range_tree_destroy(msp->ms_trim); 2840 2841 mutex_exit(&msp->ms_lock); 2842 cv_destroy(&msp->ms_load_cv); 2843 cv_destroy(&msp->ms_flush_cv); 2844 mutex_destroy(&msp->ms_lock); 2845 mutex_destroy(&msp->ms_sync_lock); 2846 ASSERT3U(msp->ms_allocator, ==, -1); 2847 2848 kmem_free(msp, sizeof (metaslab_t)); 2849 } 2850 2851 #define FRAGMENTATION_TABLE_SIZE 17 2852 2853 /* 2854 * This table defines a segment size based fragmentation metric that will 2855 * allow each metaslab to derive its own fragmentation value. This is done 2856 * by calculating the space in each bucket of the spacemap histogram and 2857 * multiplying that by the fragmentation metric in this table. Doing 2858 * this for all buckets and dividing it by the total amount of free 2859 * space in this metaslab (i.e. the total free space in all buckets) gives 2860 * us the fragmentation metric. This means that a high fragmentation metric 2861 * equates to most of the free space being comprised of small segments. 2862 * Conversely, if the metric is low, then most of the free space is in 2863 * large segments. A 10% change in fragmentation equates to approximately 2864 * double the number of segments. 2865 * 2866 * This table defines 0% fragmented space using 16MB segments. Testing has 2867 * shown that segments that are greater than or equal to 16MB do not suffer 2868 * from drastic performance problems. Using this value, we derive the rest 2869 * of the table. Since the fragmentation value is never stored on disk, it 2870 * is possible to change these calculations in the future. 2871 */ 2872 static const int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = { 2873 100, /* 512B */ 2874 100, /* 1K */ 2875 98, /* 2K */ 2876 95, /* 4K */ 2877 90, /* 8K */ 2878 80, /* 16K */ 2879 70, /* 32K */ 2880 60, /* 64K */ 2881 50, /* 128K */ 2882 40, /* 256K */ 2883 30, /* 512K */ 2884 20, /* 1M */ 2885 15, /* 2M */ 2886 10, /* 4M */ 2887 5, /* 8M */ 2888 0 /* 16M */ 2889 }; 2890 2891 /* 2892 * Calculate the metaslab's fragmentation metric and set ms_fragmentation. 2893 * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not 2894 * been upgraded and does not support this metric. Otherwise, the return 2895 * value should be in the range [0, 100]. 2896 */ 2897 static void 2898 metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty) 2899 { 2900 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2901 uint64_t fragmentation = 0; 2902 uint64_t total = 0; 2903 boolean_t feature_enabled = spa_feature_is_enabled(spa, 2904 SPA_FEATURE_SPACEMAP_HISTOGRAM); 2905 2906 if (!feature_enabled) { 2907 msp->ms_fragmentation = ZFS_FRAG_INVALID; 2908 return; 2909 } 2910 2911 /* 2912 * A null space map means that the entire metaslab is free 2913 * and thus is not fragmented. 2914 */ 2915 if (msp->ms_sm == NULL) { 2916 msp->ms_fragmentation = 0; 2917 return; 2918 } 2919 2920 /* 2921 * If this metaslab's space map has not been upgraded, flag it 2922 * so that we upgrade next time we encounter it. 2923 */ 2924 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) { 2925 uint64_t txg = spa_syncing_txg(spa); 2926 vdev_t *vd = msp->ms_group->mg_vd; 2927 2928 /* 2929 * If we've reached the final dirty txg, then we must 2930 * be shutting down the pool. We don't want to dirty 2931 * any data past this point so skip setting the condense 2932 * flag. We can retry this action the next time the pool 2933 * is imported. We also skip marking this metaslab for 2934 * condensing if the caller has explicitly set nodirty. 2935 */ 2936 if (!nodirty && 2937 spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) { 2938 msp->ms_condense_wanted = B_TRUE; 2939 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 2940 zfs_dbgmsg("txg %llu, requesting force condense: " 2941 "ms_id %llu, vdev_id %llu", (u_longlong_t)txg, 2942 (u_longlong_t)msp->ms_id, 2943 (u_longlong_t)vd->vdev_id); 2944 } 2945 msp->ms_fragmentation = ZFS_FRAG_INVALID; 2946 return; 2947 } 2948 2949 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 2950 uint64_t space = 0; 2951 uint8_t shift = msp->ms_sm->sm_shift; 2952 2953 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i, 2954 FRAGMENTATION_TABLE_SIZE - 1); 2955 2956 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0) 2957 continue; 2958 2959 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift); 2960 total += space; 2961 2962 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE); 2963 fragmentation += space * zfs_frag_table[idx]; 2964 } 2965 2966 if (total > 0) 2967 fragmentation /= total; 2968 ASSERT3U(fragmentation, <=, 100); 2969 2970 msp->ms_fragmentation = fragmentation; 2971 } 2972 2973 /* 2974 * Compute a weight -- a selection preference value -- for the given metaslab. 2975 * This is based on the amount of free space, the level of fragmentation, 2976 * the LBA range, and whether the metaslab is loaded. 2977 */ 2978 static uint64_t 2979 metaslab_space_weight(metaslab_t *msp) 2980 { 2981 metaslab_group_t *mg = msp->ms_group; 2982 vdev_t *vd = mg->mg_vd; 2983 uint64_t weight, space; 2984 2985 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2986 2987 /* 2988 * The baseline weight is the metaslab's free space. 2989 */ 2990 space = msp->ms_size - metaslab_allocated_space(msp); 2991 2992 if (metaslab_fragmentation_factor_enabled && 2993 msp->ms_fragmentation != ZFS_FRAG_INVALID) { 2994 /* 2995 * Use the fragmentation information to inversely scale 2996 * down the baseline weight. We need to ensure that we 2997 * don't exclude this metaslab completely when it's 100% 2998 * fragmented. To avoid this we reduce the fragmented value 2999 * by 1. 3000 */ 3001 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100; 3002 3003 /* 3004 * If space < SPA_MINBLOCKSIZE, then we will not allocate from 3005 * this metaslab again. The fragmentation metric may have 3006 * decreased the space to something smaller than 3007 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE 3008 * so that we can consume any remaining space. 3009 */ 3010 if (space > 0 && space < SPA_MINBLOCKSIZE) 3011 space = SPA_MINBLOCKSIZE; 3012 } 3013 weight = space; 3014 3015 /* 3016 * Modern disks have uniform bit density and constant angular velocity. 3017 * Therefore, the outer recording zones are faster (higher bandwidth) 3018 * than the inner zones by the ratio of outer to inner track diameter, 3019 * which is typically around 2:1. We account for this by assigning 3020 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). 3021 * In effect, this means that we'll select the metaslab with the most 3022 * free bandwidth rather than simply the one with the most free space. 3023 */ 3024 if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) { 3025 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count; 3026 ASSERT(weight >= space && weight <= 2 * space); 3027 } 3028 3029 /* 3030 * If this metaslab is one we're actively using, adjust its 3031 * weight to make it preferable to any inactive metaslab so 3032 * we'll polish it off. If the fragmentation on this metaslab 3033 * has exceed our threshold, then don't mark it active. 3034 */ 3035 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID && 3036 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) { 3037 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); 3038 } 3039 3040 WEIGHT_SET_SPACEBASED(weight); 3041 return (weight); 3042 } 3043 3044 /* 3045 * Return the weight of the specified metaslab, according to the segment-based 3046 * weighting algorithm. The metaslab must be loaded. This function can 3047 * be called within a sync pass since it relies only on the metaslab's 3048 * range tree which is always accurate when the metaslab is loaded. 3049 */ 3050 static uint64_t 3051 metaslab_weight_from_range_tree(metaslab_t *msp) 3052 { 3053 uint64_t weight = 0; 3054 uint32_t segments = 0; 3055 3056 ASSERT(msp->ms_loaded); 3057 3058 for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT; 3059 i--) { 3060 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift; 3061 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; 3062 3063 segments <<= 1; 3064 segments += msp->ms_allocatable->rt_histogram[i]; 3065 3066 /* 3067 * The range tree provides more precision than the space map 3068 * and must be downgraded so that all values fit within the 3069 * space map's histogram. This allows us to compare loaded 3070 * vs. unloaded metaslabs to determine which metaslab is 3071 * considered "best". 3072 */ 3073 if (i > max_idx) 3074 continue; 3075 3076 if (segments != 0) { 3077 WEIGHT_SET_COUNT(weight, segments); 3078 WEIGHT_SET_INDEX(weight, i); 3079 WEIGHT_SET_ACTIVE(weight, 0); 3080 break; 3081 } 3082 } 3083 return (weight); 3084 } 3085 3086 /* 3087 * Calculate the weight based on the on-disk histogram. Should be applied 3088 * only to unloaded metaslabs (i.e no incoming allocations) in-order to 3089 * give results consistent with the on-disk state 3090 */ 3091 static uint64_t 3092 metaslab_weight_from_spacemap(metaslab_t *msp) 3093 { 3094 space_map_t *sm = msp->ms_sm; 3095 ASSERT(!msp->ms_loaded); 3096 ASSERT(sm != NULL); 3097 ASSERT3U(space_map_object(sm), !=, 0); 3098 ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); 3099 3100 /* 3101 * Create a joint histogram from all the segments that have made 3102 * it to the metaslab's space map histogram, that are not yet 3103 * available for allocation because they are still in the freeing 3104 * pipeline (e.g. freeing, freed, and defer trees). Then subtract 3105 * these segments from the space map's histogram to get a more 3106 * accurate weight. 3107 */ 3108 uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0}; 3109 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) 3110 deferspace_histogram[i] += msp->ms_synchist[i]; 3111 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 3112 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 3113 deferspace_histogram[i] += msp->ms_deferhist[t][i]; 3114 } 3115 } 3116 3117 uint64_t weight = 0; 3118 for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) { 3119 ASSERT3U(sm->sm_phys->smp_histogram[i], >=, 3120 deferspace_histogram[i]); 3121 uint64_t count = 3122 sm->sm_phys->smp_histogram[i] - deferspace_histogram[i]; 3123 if (count != 0) { 3124 WEIGHT_SET_COUNT(weight, count); 3125 WEIGHT_SET_INDEX(weight, i + sm->sm_shift); 3126 WEIGHT_SET_ACTIVE(weight, 0); 3127 break; 3128 } 3129 } 3130 return (weight); 3131 } 3132 3133 /* 3134 * Compute a segment-based weight for the specified metaslab. The weight 3135 * is determined by highest bucket in the histogram. The information 3136 * for the highest bucket is encoded into the weight value. 3137 */ 3138 static uint64_t 3139 metaslab_segment_weight(metaslab_t *msp) 3140 { 3141 metaslab_group_t *mg = msp->ms_group; 3142 uint64_t weight = 0; 3143 uint8_t shift = mg->mg_vd->vdev_ashift; 3144 3145 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3146 3147 /* 3148 * The metaslab is completely free. 3149 */ 3150 if (metaslab_allocated_space(msp) == 0) { 3151 int idx = highbit64(msp->ms_size) - 1; 3152 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; 3153 3154 if (idx < max_idx) { 3155 WEIGHT_SET_COUNT(weight, 1ULL); 3156 WEIGHT_SET_INDEX(weight, idx); 3157 } else { 3158 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx)); 3159 WEIGHT_SET_INDEX(weight, max_idx); 3160 } 3161 WEIGHT_SET_ACTIVE(weight, 0); 3162 ASSERT(!WEIGHT_IS_SPACEBASED(weight)); 3163 return (weight); 3164 } 3165 3166 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); 3167 3168 /* 3169 * If the metaslab is fully allocated then just make the weight 0. 3170 */ 3171 if (metaslab_allocated_space(msp) == msp->ms_size) 3172 return (0); 3173 /* 3174 * If the metaslab is already loaded, then use the range tree to 3175 * determine the weight. Otherwise, we rely on the space map information 3176 * to generate the weight. 3177 */ 3178 if (msp->ms_loaded) { 3179 weight = metaslab_weight_from_range_tree(msp); 3180 } else { 3181 weight = metaslab_weight_from_spacemap(msp); 3182 } 3183 3184 /* 3185 * If the metaslab was active the last time we calculated its weight 3186 * then keep it active. We want to consume the entire region that 3187 * is associated with this weight. 3188 */ 3189 if (msp->ms_activation_weight != 0 && weight != 0) 3190 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight)); 3191 return (weight); 3192 } 3193 3194 /* 3195 * Determine if we should attempt to allocate from this metaslab. If the 3196 * metaslab is loaded, then we can determine if the desired allocation 3197 * can be satisfied by looking at the size of the maximum free segment 3198 * on that metaslab. Otherwise, we make our decision based on the metaslab's 3199 * weight. For segment-based weighting we can determine the maximum 3200 * allocation based on the index encoded in its value. For space-based 3201 * weights we rely on the entire weight (excluding the weight-type bit). 3202 */ 3203 static boolean_t 3204 metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard) 3205 { 3206 /* 3207 * If the metaslab is loaded, ms_max_size is definitive and we can use 3208 * the fast check. If it's not, the ms_max_size is a lower bound (once 3209 * set), and we should use the fast check as long as we're not in 3210 * try_hard and it's been less than zfs_metaslab_max_size_cache_sec 3211 * seconds since the metaslab was unloaded. 3212 */ 3213 if (msp->ms_loaded || 3214 (msp->ms_max_size != 0 && !try_hard && gethrtime() < 3215 msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec))) 3216 return (msp->ms_max_size >= asize); 3217 3218 boolean_t should_allocate; 3219 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) { 3220 /* 3221 * The metaslab segment weight indicates segments in the 3222 * range [2^i, 2^(i+1)), where i is the index in the weight. 3223 * Since the asize might be in the middle of the range, we 3224 * should attempt the allocation if asize < 2^(i+1). 3225 */ 3226 should_allocate = (asize < 3227 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1)); 3228 } else { 3229 should_allocate = (asize <= 3230 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE)); 3231 } 3232 3233 return (should_allocate); 3234 } 3235 3236 static uint64_t 3237 metaslab_weight(metaslab_t *msp, boolean_t nodirty) 3238 { 3239 vdev_t *vd = msp->ms_group->mg_vd; 3240 spa_t *spa = vd->vdev_spa; 3241 uint64_t weight; 3242 3243 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3244 3245 metaslab_set_fragmentation(msp, nodirty); 3246 3247 /* 3248 * Update the maximum size. If the metaslab is loaded, this will 3249 * ensure that we get an accurate maximum size if newly freed space 3250 * has been added back into the free tree. If the metaslab is 3251 * unloaded, we check if there's a larger free segment in the 3252 * unflushed frees. This is a lower bound on the largest allocatable 3253 * segment size. Coalescing of adjacent entries may reveal larger 3254 * allocatable segments, but we aren't aware of those until loading 3255 * the space map into a range tree. 3256 */ 3257 if (msp->ms_loaded) { 3258 msp->ms_max_size = metaslab_largest_allocatable(msp); 3259 } else { 3260 msp->ms_max_size = MAX(msp->ms_max_size, 3261 metaslab_largest_unflushed_free(msp)); 3262 } 3263 3264 /* 3265 * Segment-based weighting requires space map histogram support. 3266 */ 3267 if (zfs_metaslab_segment_weight_enabled && 3268 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) && 3269 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size == 3270 sizeof (space_map_phys_t))) { 3271 weight = metaslab_segment_weight(msp); 3272 } else { 3273 weight = metaslab_space_weight(msp); 3274 } 3275 return (weight); 3276 } 3277 3278 void 3279 metaslab_recalculate_weight_and_sort(metaslab_t *msp) 3280 { 3281 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3282 3283 /* note: we preserve the mask (e.g. indication of primary, etc..) */ 3284 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 3285 metaslab_group_sort(msp->ms_group, msp, 3286 metaslab_weight(msp, B_FALSE) | was_active); 3287 } 3288 3289 static int 3290 metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp, 3291 int allocator, uint64_t activation_weight) 3292 { 3293 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 3294 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3295 3296 /* 3297 * If we're activating for the claim code, we don't want to actually 3298 * set the metaslab up for a specific allocator. 3299 */ 3300 if (activation_weight == METASLAB_WEIGHT_CLAIM) { 3301 ASSERT0(msp->ms_activation_weight); 3302 msp->ms_activation_weight = msp->ms_weight; 3303 metaslab_group_sort(mg, msp, msp->ms_weight | 3304 activation_weight); 3305 return (0); 3306 } 3307 3308 metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ? 3309 &mga->mga_primary : &mga->mga_secondary); 3310 3311 mutex_enter(&mg->mg_lock); 3312 if (*mspp != NULL) { 3313 mutex_exit(&mg->mg_lock); 3314 return (EEXIST); 3315 } 3316 3317 *mspp = msp; 3318 ASSERT3S(msp->ms_allocator, ==, -1); 3319 msp->ms_allocator = allocator; 3320 msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY); 3321 3322 ASSERT0(msp->ms_activation_weight); 3323 msp->ms_activation_weight = msp->ms_weight; 3324 metaslab_group_sort_impl(mg, msp, 3325 msp->ms_weight | activation_weight); 3326 mutex_exit(&mg->mg_lock); 3327 3328 return (0); 3329 } 3330 3331 static int 3332 metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight) 3333 { 3334 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3335 3336 /* 3337 * The current metaslab is already activated for us so there 3338 * is nothing to do. Already activated though, doesn't mean 3339 * that this metaslab is activated for our allocator nor our 3340 * requested activation weight. The metaslab could have started 3341 * as an active one for our allocator but changed allocators 3342 * while we were waiting to grab its ms_lock or we stole it 3343 * [see find_valid_metaslab()]. This means that there is a 3344 * possibility of passivating a metaslab of another allocator 3345 * or from a different activation mask, from this thread. 3346 */ 3347 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) { 3348 ASSERT(msp->ms_loaded); 3349 return (0); 3350 } 3351 3352 int error = metaslab_load(msp); 3353 if (error != 0) { 3354 metaslab_group_sort(msp->ms_group, msp, 0); 3355 return (error); 3356 } 3357 3358 /* 3359 * When entering metaslab_load() we may have dropped the 3360 * ms_lock because we were loading this metaslab, or we 3361 * were waiting for another thread to load it for us. In 3362 * that scenario, we recheck the weight of the metaslab 3363 * to see if it was activated by another thread. 3364 * 3365 * If the metaslab was activated for another allocator or 3366 * it was activated with a different activation weight (e.g. 3367 * we wanted to make it a primary but it was activated as 3368 * secondary) we return error (EBUSY). 3369 * 3370 * If the metaslab was activated for the same allocator 3371 * and requested activation mask, skip activating it. 3372 */ 3373 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) { 3374 if (msp->ms_allocator != allocator) 3375 return (EBUSY); 3376 3377 if ((msp->ms_weight & activation_weight) == 0) 3378 return (SET_ERROR(EBUSY)); 3379 3380 EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY), 3381 msp->ms_primary); 3382 return (0); 3383 } 3384 3385 /* 3386 * If the metaslab has literally 0 space, it will have weight 0. In 3387 * that case, don't bother activating it. This can happen if the 3388 * metaslab had space during find_valid_metaslab, but another thread 3389 * loaded it and used all that space while we were waiting to grab the 3390 * lock. 3391 */ 3392 if (msp->ms_weight == 0) { 3393 ASSERT0(range_tree_space(msp->ms_allocatable)); 3394 return (SET_ERROR(ENOSPC)); 3395 } 3396 3397 if ((error = metaslab_activate_allocator(msp->ms_group, msp, 3398 allocator, activation_weight)) != 0) { 3399 return (error); 3400 } 3401 3402 ASSERT(msp->ms_loaded); 3403 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 3404 3405 return (0); 3406 } 3407 3408 static void 3409 metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp, 3410 uint64_t weight) 3411 { 3412 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3413 ASSERT(msp->ms_loaded); 3414 3415 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) { 3416 metaslab_group_sort(mg, msp, weight); 3417 return; 3418 } 3419 3420 mutex_enter(&mg->mg_lock); 3421 ASSERT3P(msp->ms_group, ==, mg); 3422 ASSERT3S(0, <=, msp->ms_allocator); 3423 ASSERT3U(msp->ms_allocator, <, mg->mg_allocators); 3424 3425 metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator]; 3426 if (msp->ms_primary) { 3427 ASSERT3P(mga->mga_primary, ==, msp); 3428 ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); 3429 mga->mga_primary = NULL; 3430 } else { 3431 ASSERT3P(mga->mga_secondary, ==, msp); 3432 ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); 3433 mga->mga_secondary = NULL; 3434 } 3435 msp->ms_allocator = -1; 3436 metaslab_group_sort_impl(mg, msp, weight); 3437 mutex_exit(&mg->mg_lock); 3438 } 3439 3440 static void 3441 metaslab_passivate(metaslab_t *msp, uint64_t weight) 3442 { 3443 uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE; 3444 3445 /* 3446 * If size < SPA_MINBLOCKSIZE, then we will not allocate from 3447 * this metaslab again. In that case, it had better be empty, 3448 * or we would be leaving space on the table. 3449 */ 3450 ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) || 3451 size >= SPA_MINBLOCKSIZE || 3452 range_tree_space(msp->ms_allocatable) == 0); 3453 ASSERT0(weight & METASLAB_ACTIVE_MASK); 3454 3455 ASSERT(msp->ms_activation_weight != 0); 3456 msp->ms_activation_weight = 0; 3457 metaslab_passivate_allocator(msp->ms_group, msp, weight); 3458 ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK); 3459 } 3460 3461 /* 3462 * Segment-based metaslabs are activated once and remain active until 3463 * we either fail an allocation attempt (similar to space-based metaslabs) 3464 * or have exhausted the free space in zfs_metaslab_switch_threshold 3465 * buckets since the metaslab was activated. This function checks to see 3466 * if we've exhausted the zfs_metaslab_switch_threshold buckets in the 3467 * metaslab and passivates it proactively. This will allow us to select a 3468 * metaslab with a larger contiguous region, if any, remaining within this 3469 * metaslab group. If we're in sync pass > 1, then we continue using this 3470 * metaslab so that we don't dirty more block and cause more sync passes. 3471 */ 3472 static void 3473 metaslab_segment_may_passivate(metaslab_t *msp) 3474 { 3475 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3476 3477 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1) 3478 return; 3479 3480 /* 3481 * Since we are in the middle of a sync pass, the most accurate 3482 * information that is accessible to us is the in-core range tree 3483 * histogram; calculate the new weight based on that information. 3484 */ 3485 uint64_t weight = metaslab_weight_from_range_tree(msp); 3486 int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight); 3487 int current_idx = WEIGHT_GET_INDEX(weight); 3488 3489 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold) 3490 metaslab_passivate(msp, weight); 3491 } 3492 3493 static void 3494 metaslab_preload(void *arg) 3495 { 3496 metaslab_t *msp = arg; 3497 metaslab_class_t *mc = msp->ms_group->mg_class; 3498 spa_t *spa = mc->mc_spa; 3499 fstrans_cookie_t cookie = spl_fstrans_mark(); 3500 3501 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock)); 3502 3503 mutex_enter(&msp->ms_lock); 3504 (void) metaslab_load(msp); 3505 metaslab_set_selected_txg(msp, spa_syncing_txg(spa)); 3506 mutex_exit(&msp->ms_lock); 3507 spl_fstrans_unmark(cookie); 3508 } 3509 3510 static void 3511 metaslab_group_preload(metaslab_group_t *mg) 3512 { 3513 spa_t *spa = mg->mg_vd->vdev_spa; 3514 metaslab_t *msp; 3515 avl_tree_t *t = &mg->mg_metaslab_tree; 3516 int m = 0; 3517 3518 if (spa_shutting_down(spa) || !metaslab_preload_enabled) { 3519 taskq_wait_outstanding(mg->mg_taskq, 0); 3520 return; 3521 } 3522 3523 mutex_enter(&mg->mg_lock); 3524 3525 /* 3526 * Load the next potential metaslabs 3527 */ 3528 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) { 3529 ASSERT3P(msp->ms_group, ==, mg); 3530 3531 /* 3532 * We preload only the maximum number of metaslabs specified 3533 * by metaslab_preload_limit. If a metaslab is being forced 3534 * to condense then we preload it too. This will ensure 3535 * that force condensing happens in the next txg. 3536 */ 3537 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) { 3538 continue; 3539 } 3540 3541 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload, 3542 msp, TQ_SLEEP) != TASKQID_INVALID); 3543 } 3544 mutex_exit(&mg->mg_lock); 3545 } 3546 3547 /* 3548 * Determine if the space map's on-disk footprint is past our tolerance for 3549 * inefficiency. We would like to use the following criteria to make our 3550 * decision: 3551 * 3552 * 1. Do not condense if the size of the space map object would dramatically 3553 * increase as a result of writing out the free space range tree. 3554 * 3555 * 2. Condense if the on on-disk space map representation is at least 3556 * zfs_condense_pct/100 times the size of the optimal representation 3557 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB). 3558 * 3559 * 3. Do not condense if the on-disk size of the space map does not actually 3560 * decrease. 3561 * 3562 * Unfortunately, we cannot compute the on-disk size of the space map in this 3563 * context because we cannot accurately compute the effects of compression, etc. 3564 * Instead, we apply the heuristic described in the block comment for 3565 * zfs_metaslab_condense_block_threshold - we only condense if the space used 3566 * is greater than a threshold number of blocks. 3567 */ 3568 static boolean_t 3569 metaslab_should_condense(metaslab_t *msp) 3570 { 3571 space_map_t *sm = msp->ms_sm; 3572 vdev_t *vd = msp->ms_group->mg_vd; 3573 uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift; 3574 3575 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3576 ASSERT(msp->ms_loaded); 3577 ASSERT(sm != NULL); 3578 ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1); 3579 3580 /* 3581 * We always condense metaslabs that are empty and metaslabs for 3582 * which a condense request has been made. 3583 */ 3584 if (range_tree_numsegs(msp->ms_allocatable) == 0 || 3585 msp->ms_condense_wanted) 3586 return (B_TRUE); 3587 3588 uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize); 3589 uint64_t object_size = space_map_length(sm); 3590 uint64_t optimal_size = space_map_estimate_optimal_size(sm, 3591 msp->ms_allocatable, SM_NO_VDEVID); 3592 3593 return (object_size >= (optimal_size * zfs_condense_pct / 100) && 3594 object_size > zfs_metaslab_condense_block_threshold * record_size); 3595 } 3596 3597 /* 3598 * Condense the on-disk space map representation to its minimized form. 3599 * The minimized form consists of a small number of allocations followed 3600 * by the entries of the free range tree (ms_allocatable). The condensed 3601 * spacemap contains all the entries of previous TXGs (including those in 3602 * the pool-wide log spacemaps; thus this is effectively a superset of 3603 * metaslab_flush()), but this TXG's entries still need to be written. 3604 */ 3605 static void 3606 metaslab_condense(metaslab_t *msp, dmu_tx_t *tx) 3607 { 3608 range_tree_t *condense_tree; 3609 space_map_t *sm = msp->ms_sm; 3610 uint64_t txg = dmu_tx_get_txg(tx); 3611 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3612 3613 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3614 ASSERT(msp->ms_loaded); 3615 ASSERT(msp->ms_sm != NULL); 3616 3617 /* 3618 * In order to condense the space map, we need to change it so it 3619 * only describes which segments are currently allocated and free. 3620 * 3621 * All the current free space resides in the ms_allocatable, all 3622 * the ms_defer trees, and all the ms_allocating trees. We ignore 3623 * ms_freed because it is empty because we're in sync pass 1. We 3624 * ignore ms_freeing because these changes are not yet reflected 3625 * in the spacemap (they will be written later this txg). 3626 * 3627 * So to truncate the space map to represent all the entries of 3628 * previous TXGs we do the following: 3629 * 3630 * 1] We create a range tree (condense tree) that is 100% empty. 3631 * 2] We add to it all segments found in the ms_defer trees 3632 * as those segments are marked as free in the original space 3633 * map. We do the same with the ms_allocating trees for the same 3634 * reason. Adding these segments should be a relatively 3635 * inexpensive operation since we expect these trees to have a 3636 * small number of nodes. 3637 * 3] We vacate any unflushed allocs, since they are not frees we 3638 * need to add to the condense tree. Then we vacate any 3639 * unflushed frees as they should already be part of ms_allocatable. 3640 * 4] At this point, we would ideally like to add all segments 3641 * in the ms_allocatable tree from the condense tree. This way 3642 * we would write all the entries of the condense tree as the 3643 * condensed space map, which would only contain freed 3644 * segments with everything else assumed to be allocated. 3645 * 3646 * Doing so can be prohibitively expensive as ms_allocatable can 3647 * be large, and therefore computationally expensive to add to 3648 * the condense_tree. Instead we first sync out an entry marking 3649 * everything as allocated, then the condense_tree and then the 3650 * ms_allocatable, in the condensed space map. While this is not 3651 * optimal, it is typically close to optimal and more importantly 3652 * much cheaper to compute. 3653 * 3654 * 5] Finally, as both of the unflushed trees were written to our 3655 * new and condensed metaslab space map, we basically flushed 3656 * all the unflushed changes to disk, thus we call 3657 * metaslab_flush_update(). 3658 */ 3659 ASSERT3U(spa_sync_pass(spa), ==, 1); 3660 ASSERT(range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */ 3661 3662 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, " 3663 "spa %s, smp size %llu, segments %llu, forcing condense=%s", 3664 (u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp, 3665 (u_longlong_t)msp->ms_group->mg_vd->vdev_id, 3666 spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm), 3667 (u_longlong_t)range_tree_numsegs(msp->ms_allocatable), 3668 msp->ms_condense_wanted ? "TRUE" : "FALSE"); 3669 3670 msp->ms_condense_wanted = B_FALSE; 3671 3672 range_seg_type_t type; 3673 uint64_t shift, start; 3674 type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp, 3675 &start, &shift); 3676 3677 condense_tree = range_tree_create(NULL, type, NULL, start, shift); 3678 3679 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 3680 range_tree_walk(msp->ms_defer[t], 3681 range_tree_add, condense_tree); 3682 } 3683 3684 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) { 3685 range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK], 3686 range_tree_add, condense_tree); 3687 } 3688 3689 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 3690 metaslab_unflushed_changes_memused(msp)); 3691 spa->spa_unflushed_stats.sus_memused -= 3692 metaslab_unflushed_changes_memused(msp); 3693 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); 3694 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); 3695 3696 /* 3697 * We're about to drop the metaslab's lock thus allowing other 3698 * consumers to change it's content. Set the metaslab's ms_condensing 3699 * flag to ensure that allocations on this metaslab do not occur 3700 * while we're in the middle of committing it to disk. This is only 3701 * critical for ms_allocatable as all other range trees use per TXG 3702 * views of their content. 3703 */ 3704 msp->ms_condensing = B_TRUE; 3705 3706 mutex_exit(&msp->ms_lock); 3707 uint64_t object = space_map_object(msp->ms_sm); 3708 space_map_truncate(sm, 3709 spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ? 3710 zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx); 3711 3712 /* 3713 * space_map_truncate() may have reallocated the spacemap object. 3714 * If so, update the vdev_ms_array. 3715 */ 3716 if (space_map_object(msp->ms_sm) != object) { 3717 object = space_map_object(msp->ms_sm); 3718 dmu_write(spa->spa_meta_objset, 3719 msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) * 3720 msp->ms_id, sizeof (uint64_t), &object, tx); 3721 } 3722 3723 /* 3724 * Note: 3725 * When the log space map feature is enabled, each space map will 3726 * always have ALLOCS followed by FREES for each sync pass. This is 3727 * typically true even when the log space map feature is disabled, 3728 * except from the case where a metaslab goes through metaslab_sync() 3729 * and gets condensed. In that case the metaslab's space map will have 3730 * ALLOCS followed by FREES (due to condensing) followed by ALLOCS 3731 * followed by FREES (due to space_map_write() in metaslab_sync()) for 3732 * sync pass 1. 3733 */ 3734 range_tree_t *tmp_tree = range_tree_create(NULL, type, NULL, start, 3735 shift); 3736 range_tree_add(tmp_tree, msp->ms_start, msp->ms_size); 3737 space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx); 3738 space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx); 3739 space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx); 3740 3741 range_tree_vacate(condense_tree, NULL, NULL); 3742 range_tree_destroy(condense_tree); 3743 range_tree_vacate(tmp_tree, NULL, NULL); 3744 range_tree_destroy(tmp_tree); 3745 mutex_enter(&msp->ms_lock); 3746 3747 msp->ms_condensing = B_FALSE; 3748 metaslab_flush_update(msp, tx); 3749 } 3750 3751 static void 3752 metaslab_unflushed_add(metaslab_t *msp, dmu_tx_t *tx) 3753 { 3754 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3755 ASSERT(spa_syncing_log_sm(spa) != NULL); 3756 ASSERT(msp->ms_sm != NULL); 3757 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); 3758 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); 3759 3760 mutex_enter(&spa->spa_flushed_ms_lock); 3761 metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx); 3762 metaslab_set_unflushed_dirty(msp, B_TRUE); 3763 avl_add(&spa->spa_metaslabs_by_flushed, msp); 3764 mutex_exit(&spa->spa_flushed_ms_lock); 3765 3766 spa_log_sm_increment_current_mscount(spa); 3767 spa_log_summary_add_flushed_metaslab(spa, B_TRUE); 3768 } 3769 3770 void 3771 metaslab_unflushed_bump(metaslab_t *msp, dmu_tx_t *tx, boolean_t dirty) 3772 { 3773 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3774 ASSERT(spa_syncing_log_sm(spa) != NULL); 3775 ASSERT(msp->ms_sm != NULL); 3776 ASSERT(metaslab_unflushed_txg(msp) != 0); 3777 ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp); 3778 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); 3779 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); 3780 3781 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa)); 3782 3783 /* update metaslab's position in our flushing tree */ 3784 uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp); 3785 boolean_t ms_prev_flushed_dirty = metaslab_unflushed_dirty(msp); 3786 mutex_enter(&spa->spa_flushed_ms_lock); 3787 avl_remove(&spa->spa_metaslabs_by_flushed, msp); 3788 metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx); 3789 metaslab_set_unflushed_dirty(msp, dirty); 3790 avl_add(&spa->spa_metaslabs_by_flushed, msp); 3791 mutex_exit(&spa->spa_flushed_ms_lock); 3792 3793 /* update metaslab counts of spa_log_sm_t nodes */ 3794 spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg); 3795 spa_log_sm_increment_current_mscount(spa); 3796 3797 /* update log space map summary */ 3798 spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg, 3799 ms_prev_flushed_dirty); 3800 spa_log_summary_add_flushed_metaslab(spa, dirty); 3801 3802 /* cleanup obsolete logs if any */ 3803 spa_cleanup_old_sm_logs(spa, tx); 3804 } 3805 3806 /* 3807 * Called when the metaslab has been flushed (its own spacemap now reflects 3808 * all the contents of the pool-wide spacemap log). Updates the metaslab's 3809 * metadata and any pool-wide related log space map data (e.g. summary, 3810 * obsolete logs, etc..) to reflect that. 3811 */ 3812 static void 3813 metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx) 3814 { 3815 metaslab_group_t *mg = msp->ms_group; 3816 spa_t *spa = mg->mg_vd->vdev_spa; 3817 3818 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3819 3820 ASSERT3U(spa_sync_pass(spa), ==, 1); 3821 3822 /* 3823 * Just because a metaslab got flushed, that doesn't mean that 3824 * it will pass through metaslab_sync_done(). Thus, make sure to 3825 * update ms_synced_length here in case it doesn't. 3826 */ 3827 msp->ms_synced_length = space_map_length(msp->ms_sm); 3828 3829 /* 3830 * We may end up here from metaslab_condense() without the 3831 * feature being active. In that case this is a no-op. 3832 */ 3833 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP) || 3834 metaslab_unflushed_txg(msp) == 0) 3835 return; 3836 3837 metaslab_unflushed_bump(msp, tx, B_FALSE); 3838 } 3839 3840 boolean_t 3841 metaslab_flush(metaslab_t *msp, dmu_tx_t *tx) 3842 { 3843 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3844 3845 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3846 ASSERT3U(spa_sync_pass(spa), ==, 1); 3847 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 3848 3849 ASSERT(msp->ms_sm != NULL); 3850 ASSERT(metaslab_unflushed_txg(msp) != 0); 3851 ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL); 3852 3853 /* 3854 * There is nothing wrong with flushing the same metaslab twice, as 3855 * this codepath should work on that case. However, the current 3856 * flushing scheme makes sure to avoid this situation as we would be 3857 * making all these calls without having anything meaningful to write 3858 * to disk. We assert this behavior here. 3859 */ 3860 ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx)); 3861 3862 /* 3863 * We can not flush while loading, because then we would 3864 * not load the ms_unflushed_{allocs,frees}. 3865 */ 3866 if (msp->ms_loading) 3867 return (B_FALSE); 3868 3869 metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 3870 metaslab_verify_weight_and_frag(msp); 3871 3872 /* 3873 * Metaslab condensing is effectively flushing. Therefore if the 3874 * metaslab can be condensed we can just condense it instead of 3875 * flushing it. 3876 * 3877 * Note that metaslab_condense() does call metaslab_flush_update() 3878 * so we can just return immediately after condensing. We also 3879 * don't need to care about setting ms_flushing or broadcasting 3880 * ms_flush_cv, even if we temporarily drop the ms_lock in 3881 * metaslab_condense(), as the metaslab is already loaded. 3882 */ 3883 if (msp->ms_loaded && metaslab_should_condense(msp)) { 3884 metaslab_group_t *mg = msp->ms_group; 3885 3886 /* 3887 * For all histogram operations below refer to the 3888 * comments of metaslab_sync() where we follow a 3889 * similar procedure. 3890 */ 3891 metaslab_group_histogram_verify(mg); 3892 metaslab_class_histogram_verify(mg->mg_class); 3893 metaslab_group_histogram_remove(mg, msp); 3894 3895 metaslab_condense(msp, tx); 3896 3897 space_map_histogram_clear(msp->ms_sm); 3898 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx); 3899 ASSERT(range_tree_is_empty(msp->ms_freed)); 3900 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 3901 space_map_histogram_add(msp->ms_sm, 3902 msp->ms_defer[t], tx); 3903 } 3904 metaslab_aux_histograms_update(msp); 3905 3906 metaslab_group_histogram_add(mg, msp); 3907 metaslab_group_histogram_verify(mg); 3908 metaslab_class_histogram_verify(mg->mg_class); 3909 3910 metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 3911 3912 /* 3913 * Since we recreated the histogram (and potentially 3914 * the ms_sm too while condensing) ensure that the 3915 * weight is updated too because we are not guaranteed 3916 * that this metaslab is dirty and will go through 3917 * metaslab_sync_done(). 3918 */ 3919 metaslab_recalculate_weight_and_sort(msp); 3920 return (B_TRUE); 3921 } 3922 3923 msp->ms_flushing = B_TRUE; 3924 uint64_t sm_len_before = space_map_length(msp->ms_sm); 3925 3926 mutex_exit(&msp->ms_lock); 3927 space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC, 3928 SM_NO_VDEVID, tx); 3929 space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE, 3930 SM_NO_VDEVID, tx); 3931 mutex_enter(&msp->ms_lock); 3932 3933 uint64_t sm_len_after = space_map_length(msp->ms_sm); 3934 if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) { 3935 zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, " 3936 "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, " 3937 "appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx), 3938 spa_name(spa), 3939 (u_longlong_t)msp->ms_group->mg_vd->vdev_id, 3940 (u_longlong_t)msp->ms_id, 3941 (u_longlong_t)range_tree_space(msp->ms_unflushed_allocs), 3942 (u_longlong_t)range_tree_space(msp->ms_unflushed_frees), 3943 (u_longlong_t)(sm_len_after - sm_len_before)); 3944 } 3945 3946 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 3947 metaslab_unflushed_changes_memused(msp)); 3948 spa->spa_unflushed_stats.sus_memused -= 3949 metaslab_unflushed_changes_memused(msp); 3950 range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); 3951 range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); 3952 3953 metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 3954 metaslab_verify_weight_and_frag(msp); 3955 3956 metaslab_flush_update(msp, tx); 3957 3958 metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 3959 metaslab_verify_weight_and_frag(msp); 3960 3961 msp->ms_flushing = B_FALSE; 3962 cv_broadcast(&msp->ms_flush_cv); 3963 return (B_TRUE); 3964 } 3965 3966 /* 3967 * Write a metaslab to disk in the context of the specified transaction group. 3968 */ 3969 void 3970 metaslab_sync(metaslab_t *msp, uint64_t txg) 3971 { 3972 metaslab_group_t *mg = msp->ms_group; 3973 vdev_t *vd = mg->mg_vd; 3974 spa_t *spa = vd->vdev_spa; 3975 objset_t *mos = spa_meta_objset(spa); 3976 range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK]; 3977 dmu_tx_t *tx; 3978 3979 ASSERT(!vd->vdev_ishole); 3980 3981 /* 3982 * This metaslab has just been added so there's no work to do now. 3983 */ 3984 if (msp->ms_new) { 3985 ASSERT0(range_tree_space(alloctree)); 3986 ASSERT0(range_tree_space(msp->ms_freeing)); 3987 ASSERT0(range_tree_space(msp->ms_freed)); 3988 ASSERT0(range_tree_space(msp->ms_checkpointing)); 3989 ASSERT0(range_tree_space(msp->ms_trim)); 3990 return; 3991 } 3992 3993 /* 3994 * Normally, we don't want to process a metaslab if there are no 3995 * allocations or frees to perform. However, if the metaslab is being 3996 * forced to condense, it's loaded and we're not beyond the final 3997 * dirty txg, we need to let it through. Not condensing beyond the 3998 * final dirty txg prevents an issue where metaslabs that need to be 3999 * condensed but were loaded for other reasons could cause a panic 4000 * here. By only checking the txg in that branch of the conditional, 4001 * we preserve the utility of the VERIFY statements in all other 4002 * cases. 4003 */ 4004 if (range_tree_is_empty(alloctree) && 4005 range_tree_is_empty(msp->ms_freeing) && 4006 range_tree_is_empty(msp->ms_checkpointing) && 4007 !(msp->ms_loaded && msp->ms_condense_wanted && 4008 txg <= spa_final_dirty_txg(spa))) 4009 return; 4010 4011 4012 VERIFY3U(txg, <=, spa_final_dirty_txg(spa)); 4013 4014 /* 4015 * The only state that can actually be changing concurrently 4016 * with metaslab_sync() is the metaslab's ms_allocatable. No 4017 * other thread can be modifying this txg's alloc, freeing, 4018 * freed, or space_map_phys_t. We drop ms_lock whenever we 4019 * could call into the DMU, because the DMU can call down to 4020 * us (e.g. via zio_free()) at any time. 4021 * 4022 * The spa_vdev_remove_thread() can be reading metaslab state 4023 * concurrently, and it is locked out by the ms_sync_lock. 4024 * Note that the ms_lock is insufficient for this, because it 4025 * is dropped by space_map_write(). 4026 */ 4027 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 4028 4029 /* 4030 * Generate a log space map if one doesn't exist already. 4031 */ 4032 spa_generate_syncing_log_sm(spa, tx); 4033 4034 if (msp->ms_sm == NULL) { 4035 uint64_t new_object = space_map_alloc(mos, 4036 spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ? 4037 zfs_metaslab_sm_blksz_with_log : 4038 zfs_metaslab_sm_blksz_no_log, tx); 4039 VERIFY3U(new_object, !=, 0); 4040 4041 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * 4042 msp->ms_id, sizeof (uint64_t), &new_object, tx); 4043 4044 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object, 4045 msp->ms_start, msp->ms_size, vd->vdev_ashift)); 4046 ASSERT(msp->ms_sm != NULL); 4047 4048 ASSERT(range_tree_is_empty(msp->ms_unflushed_allocs)); 4049 ASSERT(range_tree_is_empty(msp->ms_unflushed_frees)); 4050 ASSERT0(metaslab_allocated_space(msp)); 4051 } 4052 4053 if (!range_tree_is_empty(msp->ms_checkpointing) && 4054 vd->vdev_checkpoint_sm == NULL) { 4055 ASSERT(spa_has_checkpoint(spa)); 4056 4057 uint64_t new_object = space_map_alloc(mos, 4058 zfs_vdev_standard_sm_blksz, tx); 4059 VERIFY3U(new_object, !=, 0); 4060 4061 VERIFY0(space_map_open(&vd->vdev_checkpoint_sm, 4062 mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift)); 4063 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 4064 4065 /* 4066 * We save the space map object as an entry in vdev_top_zap 4067 * so it can be retrieved when the pool is reopened after an 4068 * export or through zdb. 4069 */ 4070 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, 4071 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, 4072 sizeof (new_object), 1, &new_object, tx)); 4073 } 4074 4075 mutex_enter(&msp->ms_sync_lock); 4076 mutex_enter(&msp->ms_lock); 4077 4078 /* 4079 * Note: metaslab_condense() clears the space map's histogram. 4080 * Therefore we must verify and remove this histogram before 4081 * condensing. 4082 */ 4083 metaslab_group_histogram_verify(mg); 4084 metaslab_class_histogram_verify(mg->mg_class); 4085 metaslab_group_histogram_remove(mg, msp); 4086 4087 if (spa->spa_sync_pass == 1 && msp->ms_loaded && 4088 metaslab_should_condense(msp)) 4089 metaslab_condense(msp, tx); 4090 4091 /* 4092 * We'll be going to disk to sync our space accounting, thus we 4093 * drop the ms_lock during that time so allocations coming from 4094 * open-context (ZIL) for future TXGs do not block. 4095 */ 4096 mutex_exit(&msp->ms_lock); 4097 space_map_t *log_sm = spa_syncing_log_sm(spa); 4098 if (log_sm != NULL) { 4099 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP)); 4100 if (metaslab_unflushed_txg(msp) == 0) 4101 metaslab_unflushed_add(msp, tx); 4102 else if (!metaslab_unflushed_dirty(msp)) 4103 metaslab_unflushed_bump(msp, tx, B_TRUE); 4104 4105 space_map_write(log_sm, alloctree, SM_ALLOC, 4106 vd->vdev_id, tx); 4107 space_map_write(log_sm, msp->ms_freeing, SM_FREE, 4108 vd->vdev_id, tx); 4109 mutex_enter(&msp->ms_lock); 4110 4111 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 4112 metaslab_unflushed_changes_memused(msp)); 4113 spa->spa_unflushed_stats.sus_memused -= 4114 metaslab_unflushed_changes_memused(msp); 4115 range_tree_remove_xor_add(alloctree, 4116 msp->ms_unflushed_frees, msp->ms_unflushed_allocs); 4117 range_tree_remove_xor_add(msp->ms_freeing, 4118 msp->ms_unflushed_allocs, msp->ms_unflushed_frees); 4119 spa->spa_unflushed_stats.sus_memused += 4120 metaslab_unflushed_changes_memused(msp); 4121 } else { 4122 ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP)); 4123 4124 space_map_write(msp->ms_sm, alloctree, SM_ALLOC, 4125 SM_NO_VDEVID, tx); 4126 space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE, 4127 SM_NO_VDEVID, tx); 4128 mutex_enter(&msp->ms_lock); 4129 } 4130 4131 msp->ms_allocated_space += range_tree_space(alloctree); 4132 ASSERT3U(msp->ms_allocated_space, >=, 4133 range_tree_space(msp->ms_freeing)); 4134 msp->ms_allocated_space -= range_tree_space(msp->ms_freeing); 4135 4136 if (!range_tree_is_empty(msp->ms_checkpointing)) { 4137 ASSERT(spa_has_checkpoint(spa)); 4138 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 4139 4140 /* 4141 * Since we are doing writes to disk and the ms_checkpointing 4142 * tree won't be changing during that time, we drop the 4143 * ms_lock while writing to the checkpoint space map, for the 4144 * same reason mentioned above. 4145 */ 4146 mutex_exit(&msp->ms_lock); 4147 space_map_write(vd->vdev_checkpoint_sm, 4148 msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx); 4149 mutex_enter(&msp->ms_lock); 4150 4151 spa->spa_checkpoint_info.sci_dspace += 4152 range_tree_space(msp->ms_checkpointing); 4153 vd->vdev_stat.vs_checkpoint_space += 4154 range_tree_space(msp->ms_checkpointing); 4155 ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==, 4156 -space_map_allocated(vd->vdev_checkpoint_sm)); 4157 4158 range_tree_vacate(msp->ms_checkpointing, NULL, NULL); 4159 } 4160 4161 if (msp->ms_loaded) { 4162 /* 4163 * When the space map is loaded, we have an accurate 4164 * histogram in the range tree. This gives us an opportunity 4165 * to bring the space map's histogram up-to-date so we clear 4166 * it first before updating it. 4167 */ 4168 space_map_histogram_clear(msp->ms_sm); 4169 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx); 4170 4171 /* 4172 * Since we've cleared the histogram we need to add back 4173 * any free space that has already been processed, plus 4174 * any deferred space. This allows the on-disk histogram 4175 * to accurately reflect all free space even if some space 4176 * is not yet available for allocation (i.e. deferred). 4177 */ 4178 space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx); 4179 4180 /* 4181 * Add back any deferred free space that has not been 4182 * added back into the in-core free tree yet. This will 4183 * ensure that we don't end up with a space map histogram 4184 * that is completely empty unless the metaslab is fully 4185 * allocated. 4186 */ 4187 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 4188 space_map_histogram_add(msp->ms_sm, 4189 msp->ms_defer[t], tx); 4190 } 4191 } 4192 4193 /* 4194 * Always add the free space from this sync pass to the space 4195 * map histogram. We want to make sure that the on-disk histogram 4196 * accounts for all free space. If the space map is not loaded, 4197 * then we will lose some accuracy but will correct it the next 4198 * time we load the space map. 4199 */ 4200 space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx); 4201 metaslab_aux_histograms_update(msp); 4202 4203 metaslab_group_histogram_add(mg, msp); 4204 metaslab_group_histogram_verify(mg); 4205 metaslab_class_histogram_verify(mg->mg_class); 4206 4207 /* 4208 * For sync pass 1, we avoid traversing this txg's free range tree 4209 * and instead will just swap the pointers for freeing and freed. 4210 * We can safely do this since the freed_tree is guaranteed to be 4211 * empty on the initial pass. 4212 * 4213 * Keep in mind that even if we are currently using a log spacemap 4214 * we want current frees to end up in the ms_allocatable (but not 4215 * get appended to the ms_sm) so their ranges can be reused as usual. 4216 */ 4217 if (spa_sync_pass(spa) == 1) { 4218 range_tree_swap(&msp->ms_freeing, &msp->ms_freed); 4219 ASSERT0(msp->ms_allocated_this_txg); 4220 } else { 4221 range_tree_vacate(msp->ms_freeing, 4222 range_tree_add, msp->ms_freed); 4223 } 4224 msp->ms_allocated_this_txg += range_tree_space(alloctree); 4225 range_tree_vacate(alloctree, NULL, NULL); 4226 4227 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK])); 4228 ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg) 4229 & TXG_MASK])); 4230 ASSERT0(range_tree_space(msp->ms_freeing)); 4231 ASSERT0(range_tree_space(msp->ms_checkpointing)); 4232 4233 mutex_exit(&msp->ms_lock); 4234 4235 /* 4236 * Verify that the space map object ID has been recorded in the 4237 * vdev_ms_array. 4238 */ 4239 uint64_t object; 4240 VERIFY0(dmu_read(mos, vd->vdev_ms_array, 4241 msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0)); 4242 VERIFY3U(object, ==, space_map_object(msp->ms_sm)); 4243 4244 mutex_exit(&msp->ms_sync_lock); 4245 dmu_tx_commit(tx); 4246 } 4247 4248 static void 4249 metaslab_evict(metaslab_t *msp, uint64_t txg) 4250 { 4251 if (!msp->ms_loaded || msp->ms_disabled != 0) 4252 return; 4253 4254 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { 4255 VERIFY0(range_tree_space( 4256 msp->ms_allocating[(txg + t) & TXG_MASK])); 4257 } 4258 if (msp->ms_allocator != -1) 4259 metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK); 4260 4261 if (!metaslab_debug_unload) 4262 metaslab_unload(msp); 4263 } 4264 4265 /* 4266 * Called after a transaction group has completely synced to mark 4267 * all of the metaslab's free space as usable. 4268 */ 4269 void 4270 metaslab_sync_done(metaslab_t *msp, uint64_t txg) 4271 { 4272 metaslab_group_t *mg = msp->ms_group; 4273 vdev_t *vd = mg->mg_vd; 4274 spa_t *spa = vd->vdev_spa; 4275 range_tree_t **defer_tree; 4276 int64_t alloc_delta, defer_delta; 4277 boolean_t defer_allowed = B_TRUE; 4278 4279 ASSERT(!vd->vdev_ishole); 4280 4281 mutex_enter(&msp->ms_lock); 4282 4283 if (msp->ms_new) { 4284 /* this is a new metaslab, add its capacity to the vdev */ 4285 metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size); 4286 4287 /* there should be no allocations nor frees at this point */ 4288 VERIFY0(msp->ms_allocated_this_txg); 4289 VERIFY0(range_tree_space(msp->ms_freed)); 4290 } 4291 4292 ASSERT0(range_tree_space(msp->ms_freeing)); 4293 ASSERT0(range_tree_space(msp->ms_checkpointing)); 4294 4295 defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE]; 4296 4297 uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) - 4298 metaslab_class_get_alloc(spa_normal_class(spa)); 4299 if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) { 4300 defer_allowed = B_FALSE; 4301 } 4302 4303 defer_delta = 0; 4304 alloc_delta = msp->ms_allocated_this_txg - 4305 range_tree_space(msp->ms_freed); 4306 4307 if (defer_allowed) { 4308 defer_delta = range_tree_space(msp->ms_freed) - 4309 range_tree_space(*defer_tree); 4310 } else { 4311 defer_delta -= range_tree_space(*defer_tree); 4312 } 4313 metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta, 4314 defer_delta, 0); 4315 4316 if (spa_syncing_log_sm(spa) == NULL) { 4317 /* 4318 * If there's a metaslab_load() in progress and we don't have 4319 * a log space map, it means that we probably wrote to the 4320 * metaslab's space map. If this is the case, we need to 4321 * make sure that we wait for the load to complete so that we 4322 * have a consistent view at the in-core side of the metaslab. 4323 */ 4324 metaslab_load_wait(msp); 4325 } else { 4326 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 4327 } 4328 4329 /* 4330 * When auto-trimming is enabled, free ranges which are added to 4331 * ms_allocatable are also be added to ms_trim. The ms_trim tree is 4332 * periodically consumed by the vdev_autotrim_thread() which issues 4333 * trims for all ranges and then vacates the tree. The ms_trim tree 4334 * can be discarded at any time with the sole consequence of recent 4335 * frees not being trimmed. 4336 */ 4337 if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) { 4338 range_tree_walk(*defer_tree, range_tree_add, msp->ms_trim); 4339 if (!defer_allowed) { 4340 range_tree_walk(msp->ms_freed, range_tree_add, 4341 msp->ms_trim); 4342 } 4343 } else { 4344 range_tree_vacate(msp->ms_trim, NULL, NULL); 4345 } 4346 4347 /* 4348 * Move the frees from the defer_tree back to the free 4349 * range tree (if it's loaded). Swap the freed_tree and 4350 * the defer_tree -- this is safe to do because we've 4351 * just emptied out the defer_tree. 4352 */ 4353 range_tree_vacate(*defer_tree, 4354 msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable); 4355 if (defer_allowed) { 4356 range_tree_swap(&msp->ms_freed, defer_tree); 4357 } else { 4358 range_tree_vacate(msp->ms_freed, 4359 msp->ms_loaded ? range_tree_add : NULL, 4360 msp->ms_allocatable); 4361 } 4362 4363 msp->ms_synced_length = space_map_length(msp->ms_sm); 4364 4365 msp->ms_deferspace += defer_delta; 4366 ASSERT3S(msp->ms_deferspace, >=, 0); 4367 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size); 4368 if (msp->ms_deferspace != 0) { 4369 /* 4370 * Keep syncing this metaslab until all deferred frees 4371 * are back in circulation. 4372 */ 4373 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 4374 } 4375 metaslab_aux_histograms_update_done(msp, defer_allowed); 4376 4377 if (msp->ms_new) { 4378 msp->ms_new = B_FALSE; 4379 mutex_enter(&mg->mg_lock); 4380 mg->mg_ms_ready++; 4381 mutex_exit(&mg->mg_lock); 4382 } 4383 4384 /* 4385 * Re-sort metaslab within its group now that we've adjusted 4386 * its allocatable space. 4387 */ 4388 metaslab_recalculate_weight_and_sort(msp); 4389 4390 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK])); 4391 ASSERT0(range_tree_space(msp->ms_freeing)); 4392 ASSERT0(range_tree_space(msp->ms_freed)); 4393 ASSERT0(range_tree_space(msp->ms_checkpointing)); 4394 msp->ms_allocating_total -= msp->ms_allocated_this_txg; 4395 msp->ms_allocated_this_txg = 0; 4396 mutex_exit(&msp->ms_lock); 4397 } 4398 4399 void 4400 metaslab_sync_reassess(metaslab_group_t *mg) 4401 { 4402 spa_t *spa = mg->mg_class->mc_spa; 4403 4404 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 4405 metaslab_group_alloc_update(mg); 4406 mg->mg_fragmentation = metaslab_group_fragmentation(mg); 4407 4408 /* 4409 * Preload the next potential metaslabs but only on active 4410 * metaslab groups. We can get into a state where the metaslab 4411 * is no longer active since we dirty metaslabs as we remove a 4412 * a device, thus potentially making the metaslab group eligible 4413 * for preloading. 4414 */ 4415 if (mg->mg_activation_count > 0) { 4416 metaslab_group_preload(mg); 4417 } 4418 spa_config_exit(spa, SCL_ALLOC, FTAG); 4419 } 4420 4421 /* 4422 * When writing a ditto block (i.e. more than one DVA for a given BP) on 4423 * the same vdev as an existing DVA of this BP, then try to allocate it 4424 * on a different metaslab than existing DVAs (i.e. a unique metaslab). 4425 */ 4426 static boolean_t 4427 metaslab_is_unique(metaslab_t *msp, dva_t *dva) 4428 { 4429 uint64_t dva_ms_id; 4430 4431 if (DVA_GET_ASIZE(dva) == 0) 4432 return (B_TRUE); 4433 4434 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) 4435 return (B_TRUE); 4436 4437 dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift; 4438 4439 return (msp->ms_id != dva_ms_id); 4440 } 4441 4442 /* 4443 * ========================================================================== 4444 * Metaslab allocation tracing facility 4445 * ========================================================================== 4446 */ 4447 4448 /* 4449 * Add an allocation trace element to the allocation tracing list. 4450 */ 4451 static void 4452 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg, 4453 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset, 4454 int allocator) 4455 { 4456 metaslab_alloc_trace_t *mat; 4457 4458 if (!metaslab_trace_enabled) 4459 return; 4460 4461 /* 4462 * When the tracing list reaches its maximum we remove 4463 * the second element in the list before adding a new one. 4464 * By removing the second element we preserve the original 4465 * entry as a clue to what allocations steps have already been 4466 * performed. 4467 */ 4468 if (zal->zal_size == metaslab_trace_max_entries) { 4469 metaslab_alloc_trace_t *mat_next; 4470 #ifdef ZFS_DEBUG 4471 panic("too many entries in allocation list"); 4472 #endif 4473 METASLABSTAT_BUMP(metaslabstat_trace_over_limit); 4474 zal->zal_size--; 4475 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list)); 4476 list_remove(&zal->zal_list, mat_next); 4477 kmem_cache_free(metaslab_alloc_trace_cache, mat_next); 4478 } 4479 4480 mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP); 4481 list_link_init(&mat->mat_list_node); 4482 mat->mat_mg = mg; 4483 mat->mat_msp = msp; 4484 mat->mat_size = psize; 4485 mat->mat_dva_id = dva_id; 4486 mat->mat_offset = offset; 4487 mat->mat_weight = 0; 4488 mat->mat_allocator = allocator; 4489 4490 if (msp != NULL) 4491 mat->mat_weight = msp->ms_weight; 4492 4493 /* 4494 * The list is part of the zio so locking is not required. Only 4495 * a single thread will perform allocations for a given zio. 4496 */ 4497 list_insert_tail(&zal->zal_list, mat); 4498 zal->zal_size++; 4499 4500 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries); 4501 } 4502 4503 void 4504 metaslab_trace_init(zio_alloc_list_t *zal) 4505 { 4506 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t), 4507 offsetof(metaslab_alloc_trace_t, mat_list_node)); 4508 zal->zal_size = 0; 4509 } 4510 4511 void 4512 metaslab_trace_fini(zio_alloc_list_t *zal) 4513 { 4514 metaslab_alloc_trace_t *mat; 4515 4516 while ((mat = list_remove_head(&zal->zal_list)) != NULL) 4517 kmem_cache_free(metaslab_alloc_trace_cache, mat); 4518 list_destroy(&zal->zal_list); 4519 zal->zal_size = 0; 4520 } 4521 4522 /* 4523 * ========================================================================== 4524 * Metaslab block operations 4525 * ========================================================================== 4526 */ 4527 4528 static void 4529 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, const void *tag, 4530 int flags, int allocator) 4531 { 4532 if (!(flags & METASLAB_ASYNC_ALLOC) || 4533 (flags & METASLAB_DONT_THROTTLE)) 4534 return; 4535 4536 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 4537 if (!mg->mg_class->mc_alloc_throttle_enabled) 4538 return; 4539 4540 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4541 (void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag); 4542 } 4543 4544 static void 4545 metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator) 4546 { 4547 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4548 metaslab_class_allocator_t *mca = 4549 &mg->mg_class->mc_allocator[allocator]; 4550 uint64_t max = mg->mg_max_alloc_queue_depth; 4551 uint64_t cur = mga->mga_cur_max_alloc_queue_depth; 4552 while (cur < max) { 4553 if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth, 4554 cur, cur + 1) == cur) { 4555 atomic_inc_64(&mca->mca_alloc_max_slots); 4556 return; 4557 } 4558 cur = mga->mga_cur_max_alloc_queue_depth; 4559 } 4560 } 4561 4562 void 4563 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, const void *tag, 4564 int flags, int allocator, boolean_t io_complete) 4565 { 4566 if (!(flags & METASLAB_ASYNC_ALLOC) || 4567 (flags & METASLAB_DONT_THROTTLE)) 4568 return; 4569 4570 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 4571 if (!mg->mg_class->mc_alloc_throttle_enabled) 4572 return; 4573 4574 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4575 (void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag); 4576 if (io_complete) 4577 metaslab_group_increment_qdepth(mg, allocator); 4578 } 4579 4580 void 4581 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, const void *tag, 4582 int allocator) 4583 { 4584 #ifdef ZFS_DEBUG 4585 const dva_t *dva = bp->blk_dva; 4586 int ndvas = BP_GET_NDVAS(bp); 4587 4588 for (int d = 0; d < ndvas; d++) { 4589 uint64_t vdev = DVA_GET_VDEV(&dva[d]); 4590 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 4591 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4592 VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag)); 4593 } 4594 #endif 4595 } 4596 4597 static uint64_t 4598 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg) 4599 { 4600 uint64_t start; 4601 range_tree_t *rt = msp->ms_allocatable; 4602 metaslab_class_t *mc = msp->ms_group->mg_class; 4603 4604 ASSERT(MUTEX_HELD(&msp->ms_lock)); 4605 VERIFY(!msp->ms_condensing); 4606 VERIFY0(msp->ms_disabled); 4607 4608 start = mc->mc_ops->msop_alloc(msp, size); 4609 if (start != -1ULL) { 4610 metaslab_group_t *mg = msp->ms_group; 4611 vdev_t *vd = mg->mg_vd; 4612 4613 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift)); 4614 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 4615 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size); 4616 range_tree_remove(rt, start, size); 4617 range_tree_clear(msp->ms_trim, start, size); 4618 4619 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) 4620 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); 4621 4622 range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size); 4623 msp->ms_allocating_total += size; 4624 4625 /* Track the last successful allocation */ 4626 msp->ms_alloc_txg = txg; 4627 metaslab_verify_space(msp, txg); 4628 } 4629 4630 /* 4631 * Now that we've attempted the allocation we need to update the 4632 * metaslab's maximum block size since it may have changed. 4633 */ 4634 msp->ms_max_size = metaslab_largest_allocatable(msp); 4635 return (start); 4636 } 4637 4638 /* 4639 * Find the metaslab with the highest weight that is less than what we've 4640 * already tried. In the common case, this means that we will examine each 4641 * metaslab at most once. Note that concurrent callers could reorder metaslabs 4642 * by activation/passivation once we have dropped the mg_lock. If a metaslab is 4643 * activated by another thread, and we fail to allocate from the metaslab we 4644 * have selected, we may not try the newly-activated metaslab, and instead 4645 * activate another metaslab. This is not optimal, but generally does not cause 4646 * any problems (a possible exception being if every metaslab is completely full 4647 * except for the newly-activated metaslab which we fail to examine). 4648 */ 4649 static metaslab_t * 4650 find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight, 4651 dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator, 4652 boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search, 4653 boolean_t *was_active) 4654 { 4655 avl_index_t idx; 4656 avl_tree_t *t = &mg->mg_metaslab_tree; 4657 metaslab_t *msp = avl_find(t, search, &idx); 4658 if (msp == NULL) 4659 msp = avl_nearest(t, idx, AVL_AFTER); 4660 4661 uint_t tries = 0; 4662 for (; msp != NULL; msp = AVL_NEXT(t, msp)) { 4663 int i; 4664 4665 if (!try_hard && tries > zfs_metaslab_find_max_tries) { 4666 METASLABSTAT_BUMP(metaslabstat_too_many_tries); 4667 return (NULL); 4668 } 4669 tries++; 4670 4671 if (!metaslab_should_allocate(msp, asize, try_hard)) { 4672 metaslab_trace_add(zal, mg, msp, asize, d, 4673 TRACE_TOO_SMALL, allocator); 4674 continue; 4675 } 4676 4677 /* 4678 * If the selected metaslab is condensing or disabled, 4679 * skip it. 4680 */ 4681 if (msp->ms_condensing || msp->ms_disabled > 0) 4682 continue; 4683 4684 *was_active = msp->ms_allocator != -1; 4685 /* 4686 * If we're activating as primary, this is our first allocation 4687 * from this disk, so we don't need to check how close we are. 4688 * If the metaslab under consideration was already active, 4689 * we're getting desperate enough to steal another allocator's 4690 * metaslab, so we still don't care about distances. 4691 */ 4692 if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active) 4693 break; 4694 4695 for (i = 0; i < d; i++) { 4696 if (want_unique && 4697 !metaslab_is_unique(msp, &dva[i])) 4698 break; /* try another metaslab */ 4699 } 4700 if (i == d) 4701 break; 4702 } 4703 4704 if (msp != NULL) { 4705 search->ms_weight = msp->ms_weight; 4706 search->ms_start = msp->ms_start + 1; 4707 search->ms_allocator = msp->ms_allocator; 4708 search->ms_primary = msp->ms_primary; 4709 } 4710 return (msp); 4711 } 4712 4713 static void 4714 metaslab_active_mask_verify(metaslab_t *msp) 4715 { 4716 ASSERT(MUTEX_HELD(&msp->ms_lock)); 4717 4718 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 4719 return; 4720 4721 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) 4722 return; 4723 4724 if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) { 4725 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); 4726 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM); 4727 VERIFY3S(msp->ms_allocator, !=, -1); 4728 VERIFY(msp->ms_primary); 4729 return; 4730 } 4731 4732 if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) { 4733 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); 4734 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM); 4735 VERIFY3S(msp->ms_allocator, !=, -1); 4736 VERIFY(!msp->ms_primary); 4737 return; 4738 } 4739 4740 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) { 4741 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); 4742 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); 4743 VERIFY3S(msp->ms_allocator, ==, -1); 4744 return; 4745 } 4746 } 4747 4748 static uint64_t 4749 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, 4750 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d, 4751 int allocator, boolean_t try_hard) 4752 { 4753 metaslab_t *msp = NULL; 4754 uint64_t offset = -1ULL; 4755 4756 uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY; 4757 for (int i = 0; i < d; i++) { 4758 if (activation_weight == METASLAB_WEIGHT_PRIMARY && 4759 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 4760 activation_weight = METASLAB_WEIGHT_SECONDARY; 4761 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && 4762 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 4763 activation_weight = METASLAB_WEIGHT_CLAIM; 4764 break; 4765 } 4766 } 4767 4768 /* 4769 * If we don't have enough metaslabs active to fill the entire array, we 4770 * just use the 0th slot. 4771 */ 4772 if (mg->mg_ms_ready < mg->mg_allocators * 3) 4773 allocator = 0; 4774 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4775 4776 ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2); 4777 4778 metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP); 4779 search->ms_weight = UINT64_MAX; 4780 search->ms_start = 0; 4781 /* 4782 * At the end of the metaslab tree are the already-active metaslabs, 4783 * first the primaries, then the secondaries. When we resume searching 4784 * through the tree, we need to consider ms_allocator and ms_primary so 4785 * we start in the location right after where we left off, and don't 4786 * accidentally loop forever considering the same metaslabs. 4787 */ 4788 search->ms_allocator = -1; 4789 search->ms_primary = B_TRUE; 4790 for (;;) { 4791 boolean_t was_active = B_FALSE; 4792 4793 mutex_enter(&mg->mg_lock); 4794 4795 if (activation_weight == METASLAB_WEIGHT_PRIMARY && 4796 mga->mga_primary != NULL) { 4797 msp = mga->mga_primary; 4798 4799 /* 4800 * Even though we don't hold the ms_lock for the 4801 * primary metaslab, those fields should not 4802 * change while we hold the mg_lock. Thus it is 4803 * safe to make assertions on them. 4804 */ 4805 ASSERT(msp->ms_primary); 4806 ASSERT3S(msp->ms_allocator, ==, allocator); 4807 ASSERT(msp->ms_loaded); 4808 4809 was_active = B_TRUE; 4810 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 4811 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && 4812 mga->mga_secondary != NULL) { 4813 msp = mga->mga_secondary; 4814 4815 /* 4816 * See comment above about the similar assertions 4817 * for the primary metaslab. 4818 */ 4819 ASSERT(!msp->ms_primary); 4820 ASSERT3S(msp->ms_allocator, ==, allocator); 4821 ASSERT(msp->ms_loaded); 4822 4823 was_active = B_TRUE; 4824 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 4825 } else { 4826 msp = find_valid_metaslab(mg, activation_weight, dva, d, 4827 want_unique, asize, allocator, try_hard, zal, 4828 search, &was_active); 4829 } 4830 4831 mutex_exit(&mg->mg_lock); 4832 if (msp == NULL) { 4833 kmem_free(search, sizeof (*search)); 4834 return (-1ULL); 4835 } 4836 mutex_enter(&msp->ms_lock); 4837 4838 metaslab_active_mask_verify(msp); 4839 4840 /* 4841 * This code is disabled out because of issues with 4842 * tracepoints in non-gpl kernel modules. 4843 */ 4844 #if 0 4845 DTRACE_PROBE3(ms__activation__attempt, 4846 metaslab_t *, msp, uint64_t, activation_weight, 4847 boolean_t, was_active); 4848 #endif 4849 4850 /* 4851 * Ensure that the metaslab we have selected is still 4852 * capable of handling our request. It's possible that 4853 * another thread may have changed the weight while we 4854 * were blocked on the metaslab lock. We check the 4855 * active status first to see if we need to set_selected_txg 4856 * a new metaslab. 4857 */ 4858 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) { 4859 ASSERT3S(msp->ms_allocator, ==, -1); 4860 mutex_exit(&msp->ms_lock); 4861 continue; 4862 } 4863 4864 /* 4865 * If the metaslab was activated for another allocator 4866 * while we were waiting in the ms_lock above, or it's 4867 * a primary and we're seeking a secondary (or vice versa), 4868 * we go back and select a new metaslab. 4869 */ 4870 if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) && 4871 (msp->ms_allocator != -1) && 4872 (msp->ms_allocator != allocator || ((activation_weight == 4873 METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) { 4874 ASSERT(msp->ms_loaded); 4875 ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) || 4876 msp->ms_allocator != -1); 4877 mutex_exit(&msp->ms_lock); 4878 continue; 4879 } 4880 4881 /* 4882 * This metaslab was used for claiming regions allocated 4883 * by the ZIL during pool import. Once these regions are 4884 * claimed we don't need to keep the CLAIM bit set 4885 * anymore. Passivate this metaslab to zero its activation 4886 * mask. 4887 */ 4888 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM && 4889 activation_weight != METASLAB_WEIGHT_CLAIM) { 4890 ASSERT(msp->ms_loaded); 4891 ASSERT3S(msp->ms_allocator, ==, -1); 4892 metaslab_passivate(msp, msp->ms_weight & 4893 ~METASLAB_WEIGHT_CLAIM); 4894 mutex_exit(&msp->ms_lock); 4895 continue; 4896 } 4897 4898 metaslab_set_selected_txg(msp, txg); 4899 4900 int activation_error = 4901 metaslab_activate(msp, allocator, activation_weight); 4902 metaslab_active_mask_verify(msp); 4903 4904 /* 4905 * If the metaslab was activated by another thread for 4906 * another allocator or activation_weight (EBUSY), or it 4907 * failed because another metaslab was assigned as primary 4908 * for this allocator (EEXIST) we continue using this 4909 * metaslab for our allocation, rather than going on to a 4910 * worse metaslab (we waited for that metaslab to be loaded 4911 * after all). 4912 * 4913 * If the activation failed due to an I/O error or ENOSPC we 4914 * skip to the next metaslab. 4915 */ 4916 boolean_t activated; 4917 if (activation_error == 0) { 4918 activated = B_TRUE; 4919 } else if (activation_error == EBUSY || 4920 activation_error == EEXIST) { 4921 activated = B_FALSE; 4922 } else { 4923 mutex_exit(&msp->ms_lock); 4924 continue; 4925 } 4926 ASSERT(msp->ms_loaded); 4927 4928 /* 4929 * Now that we have the lock, recheck to see if we should 4930 * continue to use this metaslab for this allocation. The 4931 * the metaslab is now loaded so metaslab_should_allocate() 4932 * can accurately determine if the allocation attempt should 4933 * proceed. 4934 */ 4935 if (!metaslab_should_allocate(msp, asize, try_hard)) { 4936 /* Passivate this metaslab and select a new one. */ 4937 metaslab_trace_add(zal, mg, msp, asize, d, 4938 TRACE_TOO_SMALL, allocator); 4939 goto next; 4940 } 4941 4942 /* 4943 * If this metaslab is currently condensing then pick again 4944 * as we can't manipulate this metaslab until it's committed 4945 * to disk. If this metaslab is being initialized, we shouldn't 4946 * allocate from it since the allocated region might be 4947 * overwritten after allocation. 4948 */ 4949 if (msp->ms_condensing) { 4950 metaslab_trace_add(zal, mg, msp, asize, d, 4951 TRACE_CONDENSING, allocator); 4952 if (activated) { 4953 metaslab_passivate(msp, msp->ms_weight & 4954 ~METASLAB_ACTIVE_MASK); 4955 } 4956 mutex_exit(&msp->ms_lock); 4957 continue; 4958 } else if (msp->ms_disabled > 0) { 4959 metaslab_trace_add(zal, mg, msp, asize, d, 4960 TRACE_DISABLED, allocator); 4961 if (activated) { 4962 metaslab_passivate(msp, msp->ms_weight & 4963 ~METASLAB_ACTIVE_MASK); 4964 } 4965 mutex_exit(&msp->ms_lock); 4966 continue; 4967 } 4968 4969 offset = metaslab_block_alloc(msp, asize, txg); 4970 metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator); 4971 4972 if (offset != -1ULL) { 4973 /* Proactively passivate the metaslab, if needed */ 4974 if (activated) 4975 metaslab_segment_may_passivate(msp); 4976 break; 4977 } 4978 next: 4979 ASSERT(msp->ms_loaded); 4980 4981 /* 4982 * This code is disabled out because of issues with 4983 * tracepoints in non-gpl kernel modules. 4984 */ 4985 #if 0 4986 DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp, 4987 uint64_t, asize); 4988 #endif 4989 4990 /* 4991 * We were unable to allocate from this metaslab so determine 4992 * a new weight for this metaslab. Now that we have loaded 4993 * the metaslab we can provide a better hint to the metaslab 4994 * selector. 4995 * 4996 * For space-based metaslabs, we use the maximum block size. 4997 * This information is only available when the metaslab 4998 * is loaded and is more accurate than the generic free 4999 * space weight that was calculated by metaslab_weight(). 5000 * This information allows us to quickly compare the maximum 5001 * available allocation in the metaslab to the allocation 5002 * size being requested. 5003 * 5004 * For segment-based metaslabs, determine the new weight 5005 * based on the highest bucket in the range tree. We 5006 * explicitly use the loaded segment weight (i.e. the range 5007 * tree histogram) since it contains the space that is 5008 * currently available for allocation and is accurate 5009 * even within a sync pass. 5010 */ 5011 uint64_t weight; 5012 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) { 5013 weight = metaslab_largest_allocatable(msp); 5014 WEIGHT_SET_SPACEBASED(weight); 5015 } else { 5016 weight = metaslab_weight_from_range_tree(msp); 5017 } 5018 5019 if (activated) { 5020 metaslab_passivate(msp, weight); 5021 } else { 5022 /* 5023 * For the case where we use the metaslab that is 5024 * active for another allocator we want to make 5025 * sure that we retain the activation mask. 5026 * 5027 * Note that we could attempt to use something like 5028 * metaslab_recalculate_weight_and_sort() that 5029 * retains the activation mask here. That function 5030 * uses metaslab_weight() to set the weight though 5031 * which is not as accurate as the calculations 5032 * above. 5033 */ 5034 weight |= msp->ms_weight & METASLAB_ACTIVE_MASK; 5035 metaslab_group_sort(mg, msp, weight); 5036 } 5037 metaslab_active_mask_verify(msp); 5038 5039 /* 5040 * We have just failed an allocation attempt, check 5041 * that metaslab_should_allocate() agrees. Otherwise, 5042 * we may end up in an infinite loop retrying the same 5043 * metaslab. 5044 */ 5045 ASSERT(!metaslab_should_allocate(msp, asize, try_hard)); 5046 5047 mutex_exit(&msp->ms_lock); 5048 } 5049 mutex_exit(&msp->ms_lock); 5050 kmem_free(search, sizeof (*search)); 5051 return (offset); 5052 } 5053 5054 static uint64_t 5055 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal, 5056 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d, 5057 int allocator, boolean_t try_hard) 5058 { 5059 uint64_t offset; 5060 ASSERT(mg->mg_initialized); 5061 5062 offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique, 5063 dva, d, allocator, try_hard); 5064 5065 mutex_enter(&mg->mg_lock); 5066 if (offset == -1ULL) { 5067 mg->mg_failed_allocations++; 5068 metaslab_trace_add(zal, mg, NULL, asize, d, 5069 TRACE_GROUP_FAILURE, allocator); 5070 if (asize == SPA_GANGBLOCKSIZE) { 5071 /* 5072 * This metaslab group was unable to allocate 5073 * the minimum gang block size so it must be out of 5074 * space. We must notify the allocation throttle 5075 * to start skipping allocation attempts to this 5076 * metaslab group until more space becomes available. 5077 * Note: this failure cannot be caused by the 5078 * allocation throttle since the allocation throttle 5079 * is only responsible for skipping devices and 5080 * not failing block allocations. 5081 */ 5082 mg->mg_no_free_space = B_TRUE; 5083 } 5084 } 5085 mg->mg_allocations++; 5086 mutex_exit(&mg->mg_lock); 5087 return (offset); 5088 } 5089 5090 /* 5091 * Allocate a block for the specified i/o. 5092 */ 5093 int 5094 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, 5095 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags, 5096 zio_alloc_list_t *zal, int allocator) 5097 { 5098 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; 5099 metaslab_group_t *mg, *fast_mg, *rotor; 5100 vdev_t *vd; 5101 boolean_t try_hard = B_FALSE; 5102 5103 ASSERT(!DVA_IS_VALID(&dva[d])); 5104 5105 /* 5106 * For testing, make some blocks above a certain size be gang blocks. 5107 * This will result in more split blocks when using device removal, 5108 * and a large number of split blocks coupled with ztest-induced 5109 * damage can result in extremely long reconstruction times. This 5110 * will also test spilling from special to normal. 5111 */ 5112 if (psize >= metaslab_force_ganging && (random_in_range(100) < 3)) { 5113 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG, 5114 allocator); 5115 return (SET_ERROR(ENOSPC)); 5116 } 5117 5118 /* 5119 * Start at the rotor and loop through all mgs until we find something. 5120 * Note that there's no locking on mca_rotor or mca_aliquot because 5121 * nothing actually breaks if we miss a few updates -- we just won't 5122 * allocate quite as evenly. It all balances out over time. 5123 * 5124 * If we are doing ditto or log blocks, try to spread them across 5125 * consecutive vdevs. If we're forced to reuse a vdev before we've 5126 * allocated all of our ditto blocks, then try and spread them out on 5127 * that vdev as much as possible. If it turns out to not be possible, 5128 * gradually lower our standards until anything becomes acceptable. 5129 * Also, allocating on consecutive vdevs (as opposed to random vdevs) 5130 * gives us hope of containing our fault domains to something we're 5131 * able to reason about. Otherwise, any two top-level vdev failures 5132 * will guarantee the loss of data. With consecutive allocation, 5133 * only two adjacent top-level vdev failures will result in data loss. 5134 * 5135 * If we are doing gang blocks (hintdva is non-NULL), try to keep 5136 * ourselves on the same vdev as our gang block header. That 5137 * way, we can hope for locality in vdev_cache, plus it makes our 5138 * fault domains something tractable. 5139 */ 5140 if (hintdva) { 5141 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); 5142 5143 /* 5144 * It's possible the vdev we're using as the hint no 5145 * longer exists or its mg has been closed (e.g. by 5146 * device removal). Consult the rotor when 5147 * all else fails. 5148 */ 5149 if (vd != NULL && vd->vdev_mg != NULL) { 5150 mg = vdev_get_mg(vd, mc); 5151 5152 if (flags & METASLAB_HINTBP_AVOID) 5153 mg = mg->mg_next; 5154 } else { 5155 mg = mca->mca_rotor; 5156 } 5157 } else if (d != 0) { 5158 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); 5159 mg = vd->vdev_mg->mg_next; 5160 } else if (flags & METASLAB_FASTWRITE) { 5161 mg = fast_mg = mca->mca_rotor; 5162 5163 do { 5164 if (fast_mg->mg_vd->vdev_pending_fastwrite < 5165 mg->mg_vd->vdev_pending_fastwrite) 5166 mg = fast_mg; 5167 } while ((fast_mg = fast_mg->mg_next) != mca->mca_rotor); 5168 5169 } else { 5170 ASSERT(mca->mca_rotor != NULL); 5171 mg = mca->mca_rotor; 5172 } 5173 5174 /* 5175 * If the hint put us into the wrong metaslab class, or into a 5176 * metaslab group that has been passivated, just follow the rotor. 5177 */ 5178 if (mg->mg_class != mc || mg->mg_activation_count <= 0) 5179 mg = mca->mca_rotor; 5180 5181 rotor = mg; 5182 top: 5183 do { 5184 boolean_t allocatable; 5185 5186 ASSERT(mg->mg_activation_count == 1); 5187 vd = mg->mg_vd; 5188 5189 /* 5190 * Don't allocate from faulted devices. 5191 */ 5192 if (try_hard) { 5193 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); 5194 allocatable = vdev_allocatable(vd); 5195 spa_config_exit(spa, SCL_ZIO, FTAG); 5196 } else { 5197 allocatable = vdev_allocatable(vd); 5198 } 5199 5200 /* 5201 * Determine if the selected metaslab group is eligible 5202 * for allocations. If we're ganging then don't allow 5203 * this metaslab group to skip allocations since that would 5204 * inadvertently return ENOSPC and suspend the pool 5205 * even though space is still available. 5206 */ 5207 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) { 5208 allocatable = metaslab_group_allocatable(mg, rotor, 5209 flags, psize, allocator, d); 5210 } 5211 5212 if (!allocatable) { 5213 metaslab_trace_add(zal, mg, NULL, psize, d, 5214 TRACE_NOT_ALLOCATABLE, allocator); 5215 goto next; 5216 } 5217 5218 ASSERT(mg->mg_initialized); 5219 5220 /* 5221 * Avoid writing single-copy data to an unhealthy, 5222 * non-redundant vdev, unless we've already tried all 5223 * other vdevs. 5224 */ 5225 if (vd->vdev_state < VDEV_STATE_HEALTHY && 5226 d == 0 && !try_hard && vd->vdev_children == 0) { 5227 metaslab_trace_add(zal, mg, NULL, psize, d, 5228 TRACE_VDEV_ERROR, allocator); 5229 goto next; 5230 } 5231 5232 ASSERT(mg->mg_class == mc); 5233 5234 uint64_t asize = vdev_psize_to_asize(vd, psize); 5235 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); 5236 5237 /* 5238 * If we don't need to try hard, then require that the 5239 * block be on a different metaslab from any other DVAs 5240 * in this BP (unique=true). If we are trying hard, then 5241 * allow any metaslab to be used (unique=false). 5242 */ 5243 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg, 5244 !try_hard, dva, d, allocator, try_hard); 5245 5246 if (offset != -1ULL) { 5247 /* 5248 * If we've just selected this metaslab group, 5249 * figure out whether the corresponding vdev is 5250 * over- or under-used relative to the pool, 5251 * and set an allocation bias to even it out. 5252 * 5253 * Bias is also used to compensate for unequally 5254 * sized vdevs so that space is allocated fairly. 5255 */ 5256 if (mca->mca_aliquot == 0 && metaslab_bias_enabled) { 5257 vdev_stat_t *vs = &vd->vdev_stat; 5258 int64_t vs_free = vs->vs_space - vs->vs_alloc; 5259 int64_t mc_free = mc->mc_space - mc->mc_alloc; 5260 int64_t ratio; 5261 5262 /* 5263 * Calculate how much more or less we should 5264 * try to allocate from this device during 5265 * this iteration around the rotor. 5266 * 5267 * This basically introduces a zero-centered 5268 * bias towards the devices with the most 5269 * free space, while compensating for vdev 5270 * size differences. 5271 * 5272 * Examples: 5273 * vdev V1 = 16M/128M 5274 * vdev V2 = 16M/128M 5275 * ratio(V1) = 100% ratio(V2) = 100% 5276 * 5277 * vdev V1 = 16M/128M 5278 * vdev V2 = 64M/128M 5279 * ratio(V1) = 127% ratio(V2) = 72% 5280 * 5281 * vdev V1 = 16M/128M 5282 * vdev V2 = 64M/512M 5283 * ratio(V1) = 40% ratio(V2) = 160% 5284 */ 5285 ratio = (vs_free * mc->mc_alloc_groups * 100) / 5286 (mc_free + 1); 5287 mg->mg_bias = ((ratio - 100) * 5288 (int64_t)mg->mg_aliquot) / 100; 5289 } else if (!metaslab_bias_enabled) { 5290 mg->mg_bias = 0; 5291 } 5292 5293 if ((flags & METASLAB_FASTWRITE) || 5294 atomic_add_64_nv(&mca->mca_aliquot, asize) >= 5295 mg->mg_aliquot + mg->mg_bias) { 5296 mca->mca_rotor = mg->mg_next; 5297 mca->mca_aliquot = 0; 5298 } 5299 5300 DVA_SET_VDEV(&dva[d], vd->vdev_id); 5301 DVA_SET_OFFSET(&dva[d], offset); 5302 DVA_SET_GANG(&dva[d], 5303 ((flags & METASLAB_GANG_HEADER) ? 1 : 0)); 5304 DVA_SET_ASIZE(&dva[d], asize); 5305 5306 if (flags & METASLAB_FASTWRITE) { 5307 atomic_add_64(&vd->vdev_pending_fastwrite, 5308 psize); 5309 } 5310 5311 return (0); 5312 } 5313 next: 5314 mca->mca_rotor = mg->mg_next; 5315 mca->mca_aliquot = 0; 5316 } while ((mg = mg->mg_next) != rotor); 5317 5318 /* 5319 * If we haven't tried hard, perhaps do so now. 5320 */ 5321 if (!try_hard && (zfs_metaslab_try_hard_before_gang || 5322 GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 || 5323 psize <= 1 << spa->spa_min_ashift)) { 5324 METASLABSTAT_BUMP(metaslabstat_try_hard); 5325 try_hard = B_TRUE; 5326 goto top; 5327 } 5328 5329 memset(&dva[d], 0, sizeof (dva_t)); 5330 5331 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator); 5332 return (SET_ERROR(ENOSPC)); 5333 } 5334 5335 void 5336 metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize, 5337 boolean_t checkpoint) 5338 { 5339 metaslab_t *msp; 5340 spa_t *spa = vd->vdev_spa; 5341 5342 ASSERT(vdev_is_concrete(vd)); 5343 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5344 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count); 5345 5346 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 5347 5348 VERIFY(!msp->ms_condensing); 5349 VERIFY3U(offset, >=, msp->ms_start); 5350 VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size); 5351 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 5352 VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift)); 5353 5354 metaslab_check_free_impl(vd, offset, asize); 5355 5356 mutex_enter(&msp->ms_lock); 5357 if (range_tree_is_empty(msp->ms_freeing) && 5358 range_tree_is_empty(msp->ms_checkpointing)) { 5359 vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa)); 5360 } 5361 5362 if (checkpoint) { 5363 ASSERT(spa_has_checkpoint(spa)); 5364 range_tree_add(msp->ms_checkpointing, offset, asize); 5365 } else { 5366 range_tree_add(msp->ms_freeing, offset, asize); 5367 } 5368 mutex_exit(&msp->ms_lock); 5369 } 5370 5371 void 5372 metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 5373 uint64_t size, void *arg) 5374 { 5375 (void) inner_offset; 5376 boolean_t *checkpoint = arg; 5377 5378 ASSERT3P(checkpoint, !=, NULL); 5379 5380 if (vd->vdev_ops->vdev_op_remap != NULL) 5381 vdev_indirect_mark_obsolete(vd, offset, size); 5382 else 5383 metaslab_free_impl(vd, offset, size, *checkpoint); 5384 } 5385 5386 static void 5387 metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size, 5388 boolean_t checkpoint) 5389 { 5390 spa_t *spa = vd->vdev_spa; 5391 5392 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5393 5394 if (spa_syncing_txg(spa) > spa_freeze_txg(spa)) 5395 return; 5396 5397 if (spa->spa_vdev_removal != NULL && 5398 spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id && 5399 vdev_is_concrete(vd)) { 5400 /* 5401 * Note: we check if the vdev is concrete because when 5402 * we complete the removal, we first change the vdev to be 5403 * an indirect vdev (in open context), and then (in syncing 5404 * context) clear spa_vdev_removal. 5405 */ 5406 free_from_removing_vdev(vd, offset, size); 5407 } else if (vd->vdev_ops->vdev_op_remap != NULL) { 5408 vdev_indirect_mark_obsolete(vd, offset, size); 5409 vd->vdev_ops->vdev_op_remap(vd, offset, size, 5410 metaslab_free_impl_cb, &checkpoint); 5411 } else { 5412 metaslab_free_concrete(vd, offset, size, checkpoint); 5413 } 5414 } 5415 5416 typedef struct remap_blkptr_cb_arg { 5417 blkptr_t *rbca_bp; 5418 spa_remap_cb_t rbca_cb; 5419 vdev_t *rbca_remap_vd; 5420 uint64_t rbca_remap_offset; 5421 void *rbca_cb_arg; 5422 } remap_blkptr_cb_arg_t; 5423 5424 static void 5425 remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 5426 uint64_t size, void *arg) 5427 { 5428 remap_blkptr_cb_arg_t *rbca = arg; 5429 blkptr_t *bp = rbca->rbca_bp; 5430 5431 /* We can not remap split blocks. */ 5432 if (size != DVA_GET_ASIZE(&bp->blk_dva[0])) 5433 return; 5434 ASSERT0(inner_offset); 5435 5436 if (rbca->rbca_cb != NULL) { 5437 /* 5438 * At this point we know that we are not handling split 5439 * blocks and we invoke the callback on the previous 5440 * vdev which must be indirect. 5441 */ 5442 ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops); 5443 5444 rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id, 5445 rbca->rbca_remap_offset, size, rbca->rbca_cb_arg); 5446 5447 /* set up remap_blkptr_cb_arg for the next call */ 5448 rbca->rbca_remap_vd = vd; 5449 rbca->rbca_remap_offset = offset; 5450 } 5451 5452 /* 5453 * The phys birth time is that of dva[0]. This ensures that we know 5454 * when each dva was written, so that resilver can determine which 5455 * blocks need to be scrubbed (i.e. those written during the time 5456 * the vdev was offline). It also ensures that the key used in 5457 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If 5458 * we didn't change the phys_birth, a lookup in the ARC for a 5459 * remapped BP could find the data that was previously stored at 5460 * this vdev + offset. 5461 */ 5462 vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa, 5463 DVA_GET_VDEV(&bp->blk_dva[0])); 5464 vdev_indirect_births_t *vib = oldvd->vdev_indirect_births; 5465 bp->blk_phys_birth = vdev_indirect_births_physbirth(vib, 5466 DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0])); 5467 5468 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id); 5469 DVA_SET_OFFSET(&bp->blk_dva[0], offset); 5470 } 5471 5472 /* 5473 * If the block pointer contains any indirect DVAs, modify them to refer to 5474 * concrete DVAs. Note that this will sometimes not be possible, leaving 5475 * the indirect DVA in place. This happens if the indirect DVA spans multiple 5476 * segments in the mapping (i.e. it is a "split block"). 5477 * 5478 * If the BP was remapped, calls the callback on the original dva (note the 5479 * callback can be called multiple times if the original indirect DVA refers 5480 * to another indirect DVA, etc). 5481 * 5482 * Returns TRUE if the BP was remapped. 5483 */ 5484 boolean_t 5485 spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg) 5486 { 5487 remap_blkptr_cb_arg_t rbca; 5488 5489 if (!zfs_remap_blkptr_enable) 5490 return (B_FALSE); 5491 5492 if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) 5493 return (B_FALSE); 5494 5495 /* 5496 * Dedup BP's can not be remapped, because ddt_phys_select() depends 5497 * on DVA[0] being the same in the BP as in the DDT (dedup table). 5498 */ 5499 if (BP_GET_DEDUP(bp)) 5500 return (B_FALSE); 5501 5502 /* 5503 * Gang blocks can not be remapped, because 5504 * zio_checksum_gang_verifier() depends on the DVA[0] that's in 5505 * the BP used to read the gang block header (GBH) being the same 5506 * as the DVA[0] that we allocated for the GBH. 5507 */ 5508 if (BP_IS_GANG(bp)) 5509 return (B_FALSE); 5510 5511 /* 5512 * Embedded BP's have no DVA to remap. 5513 */ 5514 if (BP_GET_NDVAS(bp) < 1) 5515 return (B_FALSE); 5516 5517 /* 5518 * Note: we only remap dva[0]. If we remapped other dvas, we 5519 * would no longer know what their phys birth txg is. 5520 */ 5521 dva_t *dva = &bp->blk_dva[0]; 5522 5523 uint64_t offset = DVA_GET_OFFSET(dva); 5524 uint64_t size = DVA_GET_ASIZE(dva); 5525 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 5526 5527 if (vd->vdev_ops->vdev_op_remap == NULL) 5528 return (B_FALSE); 5529 5530 rbca.rbca_bp = bp; 5531 rbca.rbca_cb = callback; 5532 rbca.rbca_remap_vd = vd; 5533 rbca.rbca_remap_offset = offset; 5534 rbca.rbca_cb_arg = arg; 5535 5536 /* 5537 * remap_blkptr_cb() will be called in order for each level of 5538 * indirection, until a concrete vdev is reached or a split block is 5539 * encountered. old_vd and old_offset are updated within the callback 5540 * as we go from the one indirect vdev to the next one (either concrete 5541 * or indirect again) in that order. 5542 */ 5543 vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca); 5544 5545 /* Check if the DVA wasn't remapped because it is a split block */ 5546 if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id) 5547 return (B_FALSE); 5548 5549 return (B_TRUE); 5550 } 5551 5552 /* 5553 * Undo the allocation of a DVA which happened in the given transaction group. 5554 */ 5555 void 5556 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 5557 { 5558 metaslab_t *msp; 5559 vdev_t *vd; 5560 uint64_t vdev = DVA_GET_VDEV(dva); 5561 uint64_t offset = DVA_GET_OFFSET(dva); 5562 uint64_t size = DVA_GET_ASIZE(dva); 5563 5564 ASSERT(DVA_IS_VALID(dva)); 5565 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5566 5567 if (txg > spa_freeze_txg(spa)) 5568 return; 5569 5570 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) || 5571 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { 5572 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu", 5573 (u_longlong_t)vdev, (u_longlong_t)offset, 5574 (u_longlong_t)size); 5575 return; 5576 } 5577 5578 ASSERT(!vd->vdev_removing); 5579 ASSERT(vdev_is_concrete(vd)); 5580 ASSERT0(vd->vdev_indirect_config.vic_mapping_object); 5581 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); 5582 5583 if (DVA_GET_GANG(dva)) 5584 size = vdev_gang_header_asize(vd); 5585 5586 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 5587 5588 mutex_enter(&msp->ms_lock); 5589 range_tree_remove(msp->ms_allocating[txg & TXG_MASK], 5590 offset, size); 5591 msp->ms_allocating_total -= size; 5592 5593 VERIFY(!msp->ms_condensing); 5594 VERIFY3U(offset, >=, msp->ms_start); 5595 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size); 5596 VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=, 5597 msp->ms_size); 5598 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 5599 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 5600 range_tree_add(msp->ms_allocatable, offset, size); 5601 mutex_exit(&msp->ms_lock); 5602 } 5603 5604 /* 5605 * Free the block represented by the given DVA. 5606 */ 5607 void 5608 metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint) 5609 { 5610 uint64_t vdev = DVA_GET_VDEV(dva); 5611 uint64_t offset = DVA_GET_OFFSET(dva); 5612 uint64_t size = DVA_GET_ASIZE(dva); 5613 vdev_t *vd = vdev_lookup_top(spa, vdev); 5614 5615 ASSERT(DVA_IS_VALID(dva)); 5616 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5617 5618 if (DVA_GET_GANG(dva)) { 5619 size = vdev_gang_header_asize(vd); 5620 } 5621 5622 metaslab_free_impl(vd, offset, size, checkpoint); 5623 } 5624 5625 /* 5626 * Reserve some allocation slots. The reservation system must be called 5627 * before we call into the allocator. If there aren't any available slots 5628 * then the I/O will be throttled until an I/O completes and its slots are 5629 * freed up. The function returns true if it was successful in placing 5630 * the reservation. 5631 */ 5632 boolean_t 5633 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator, 5634 zio_t *zio, int flags) 5635 { 5636 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; 5637 uint64_t max = mca->mca_alloc_max_slots; 5638 5639 ASSERT(mc->mc_alloc_throttle_enabled); 5640 if (GANG_ALLOCATION(flags) || (flags & METASLAB_MUST_RESERVE) || 5641 zfs_refcount_count(&mca->mca_alloc_slots) + slots <= max) { 5642 /* 5643 * The potential race between _count() and _add() is covered 5644 * by the allocator lock in most cases, or irrelevant due to 5645 * GANG_ALLOCATION() or METASLAB_MUST_RESERVE set in others. 5646 * But even if we assume some other non-existing scenario, the 5647 * worst that can happen is few more I/Os get to allocation 5648 * earlier, that is not a problem. 5649 * 5650 * We reserve the slots individually so that we can unreserve 5651 * them individually when an I/O completes. 5652 */ 5653 zfs_refcount_add_few(&mca->mca_alloc_slots, slots, zio); 5654 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING; 5655 return (B_TRUE); 5656 } 5657 return (B_FALSE); 5658 } 5659 5660 void 5661 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, 5662 int allocator, zio_t *zio) 5663 { 5664 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; 5665 5666 ASSERT(mc->mc_alloc_throttle_enabled); 5667 zfs_refcount_remove_few(&mca->mca_alloc_slots, slots, zio); 5668 } 5669 5670 static int 5671 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size, 5672 uint64_t txg) 5673 { 5674 metaslab_t *msp; 5675 spa_t *spa = vd->vdev_spa; 5676 int error = 0; 5677 5678 if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count) 5679 return (SET_ERROR(ENXIO)); 5680 5681 ASSERT3P(vd->vdev_ms, !=, NULL); 5682 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 5683 5684 mutex_enter(&msp->ms_lock); 5685 5686 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) { 5687 error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM); 5688 if (error == EBUSY) { 5689 ASSERT(msp->ms_loaded); 5690 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 5691 error = 0; 5692 } 5693 } 5694 5695 if (error == 0 && 5696 !range_tree_contains(msp->ms_allocatable, offset, size)) 5697 error = SET_ERROR(ENOENT); 5698 5699 if (error || txg == 0) { /* txg == 0 indicates dry run */ 5700 mutex_exit(&msp->ms_lock); 5701 return (error); 5702 } 5703 5704 VERIFY(!msp->ms_condensing); 5705 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 5706 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 5707 VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=, 5708 msp->ms_size); 5709 range_tree_remove(msp->ms_allocatable, offset, size); 5710 range_tree_clear(msp->ms_trim, offset, size); 5711 5712 if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */ 5713 metaslab_class_t *mc = msp->ms_group->mg_class; 5714 multilist_sublist_t *mls = 5715 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); 5716 if (!multilist_link_active(&msp->ms_class_txg_node)) { 5717 msp->ms_selected_txg = txg; 5718 multilist_sublist_insert_head(mls, msp); 5719 } 5720 multilist_sublist_unlock(mls); 5721 5722 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) 5723 vdev_dirty(vd, VDD_METASLAB, msp, txg); 5724 range_tree_add(msp->ms_allocating[txg & TXG_MASK], 5725 offset, size); 5726 msp->ms_allocating_total += size; 5727 } 5728 5729 mutex_exit(&msp->ms_lock); 5730 5731 return (0); 5732 } 5733 5734 typedef struct metaslab_claim_cb_arg_t { 5735 uint64_t mcca_txg; 5736 int mcca_error; 5737 } metaslab_claim_cb_arg_t; 5738 5739 static void 5740 metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 5741 uint64_t size, void *arg) 5742 { 5743 (void) inner_offset; 5744 metaslab_claim_cb_arg_t *mcca_arg = arg; 5745 5746 if (mcca_arg->mcca_error == 0) { 5747 mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset, 5748 size, mcca_arg->mcca_txg); 5749 } 5750 } 5751 5752 int 5753 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg) 5754 { 5755 if (vd->vdev_ops->vdev_op_remap != NULL) { 5756 metaslab_claim_cb_arg_t arg; 5757 5758 /* 5759 * Only zdb(8) can claim on indirect vdevs. This is used 5760 * to detect leaks of mapped space (that are not accounted 5761 * for in the obsolete counts, spacemap, or bpobj). 5762 */ 5763 ASSERT(!spa_writeable(vd->vdev_spa)); 5764 arg.mcca_error = 0; 5765 arg.mcca_txg = txg; 5766 5767 vd->vdev_ops->vdev_op_remap(vd, offset, size, 5768 metaslab_claim_impl_cb, &arg); 5769 5770 if (arg.mcca_error == 0) { 5771 arg.mcca_error = metaslab_claim_concrete(vd, 5772 offset, size, txg); 5773 } 5774 return (arg.mcca_error); 5775 } else { 5776 return (metaslab_claim_concrete(vd, offset, size, txg)); 5777 } 5778 } 5779 5780 /* 5781 * Intent log support: upon opening the pool after a crash, notify the SPA 5782 * of blocks that the intent log has allocated for immediate write, but 5783 * which are still considered free by the SPA because the last transaction 5784 * group didn't commit yet. 5785 */ 5786 static int 5787 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 5788 { 5789 uint64_t vdev = DVA_GET_VDEV(dva); 5790 uint64_t offset = DVA_GET_OFFSET(dva); 5791 uint64_t size = DVA_GET_ASIZE(dva); 5792 vdev_t *vd; 5793 5794 if ((vd = vdev_lookup_top(spa, vdev)) == NULL) { 5795 return (SET_ERROR(ENXIO)); 5796 } 5797 5798 ASSERT(DVA_IS_VALID(dva)); 5799 5800 if (DVA_GET_GANG(dva)) 5801 size = vdev_gang_header_asize(vd); 5802 5803 return (metaslab_claim_impl(vd, offset, size, txg)); 5804 } 5805 5806 int 5807 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, 5808 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags, 5809 zio_alloc_list_t *zal, zio_t *zio, int allocator) 5810 { 5811 dva_t *dva = bp->blk_dva; 5812 dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL; 5813 int error = 0; 5814 5815 ASSERT(bp->blk_birth == 0); 5816 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); 5817 5818 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 5819 5820 if (mc->mc_allocator[allocator].mca_rotor == NULL) { 5821 /* no vdevs in this class */ 5822 spa_config_exit(spa, SCL_ALLOC, FTAG); 5823 return (SET_ERROR(ENOSPC)); 5824 } 5825 5826 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); 5827 ASSERT(BP_GET_NDVAS(bp) == 0); 5828 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); 5829 ASSERT3P(zal, !=, NULL); 5830 5831 for (int d = 0; d < ndvas; d++) { 5832 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, 5833 txg, flags, zal, allocator); 5834 if (error != 0) { 5835 for (d--; d >= 0; d--) { 5836 metaslab_unalloc_dva(spa, &dva[d], txg); 5837 metaslab_group_alloc_decrement(spa, 5838 DVA_GET_VDEV(&dva[d]), zio, flags, 5839 allocator, B_FALSE); 5840 memset(&dva[d], 0, sizeof (dva_t)); 5841 } 5842 spa_config_exit(spa, SCL_ALLOC, FTAG); 5843 return (error); 5844 } else { 5845 /* 5846 * Update the metaslab group's queue depth 5847 * based on the newly allocated dva. 5848 */ 5849 metaslab_group_alloc_increment(spa, 5850 DVA_GET_VDEV(&dva[d]), zio, flags, allocator); 5851 } 5852 } 5853 ASSERT(error == 0); 5854 ASSERT(BP_GET_NDVAS(bp) == ndvas); 5855 5856 spa_config_exit(spa, SCL_ALLOC, FTAG); 5857 5858 BP_SET_BIRTH(bp, txg, 0); 5859 5860 return (0); 5861 } 5862 5863 void 5864 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) 5865 { 5866 const dva_t *dva = bp->blk_dva; 5867 int ndvas = BP_GET_NDVAS(bp); 5868 5869 ASSERT(!BP_IS_HOLE(bp)); 5870 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); 5871 5872 /* 5873 * If we have a checkpoint for the pool we need to make sure that 5874 * the blocks that we free that are part of the checkpoint won't be 5875 * reused until the checkpoint is discarded or we revert to it. 5876 * 5877 * The checkpoint flag is passed down the metaslab_free code path 5878 * and is set whenever we want to add a block to the checkpoint's 5879 * accounting. That is, we "checkpoint" blocks that existed at the 5880 * time the checkpoint was created and are therefore referenced by 5881 * the checkpointed uberblock. 5882 * 5883 * Note that, we don't checkpoint any blocks if the current 5884 * syncing txg <= spa_checkpoint_txg. We want these frees to sync 5885 * normally as they will be referenced by the checkpointed uberblock. 5886 */ 5887 boolean_t checkpoint = B_FALSE; 5888 if (bp->blk_birth <= spa->spa_checkpoint_txg && 5889 spa_syncing_txg(spa) > spa->spa_checkpoint_txg) { 5890 /* 5891 * At this point, if the block is part of the checkpoint 5892 * there is no way it was created in the current txg. 5893 */ 5894 ASSERT(!now); 5895 ASSERT3U(spa_syncing_txg(spa), ==, txg); 5896 checkpoint = B_TRUE; 5897 } 5898 5899 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); 5900 5901 for (int d = 0; d < ndvas; d++) { 5902 if (now) { 5903 metaslab_unalloc_dva(spa, &dva[d], txg); 5904 } else { 5905 ASSERT3U(txg, ==, spa_syncing_txg(spa)); 5906 metaslab_free_dva(spa, &dva[d], checkpoint); 5907 } 5908 } 5909 5910 spa_config_exit(spa, SCL_FREE, FTAG); 5911 } 5912 5913 int 5914 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) 5915 { 5916 const dva_t *dva = bp->blk_dva; 5917 int ndvas = BP_GET_NDVAS(bp); 5918 int error = 0; 5919 5920 ASSERT(!BP_IS_HOLE(bp)); 5921 5922 if (txg != 0) { 5923 /* 5924 * First do a dry run to make sure all DVAs are claimable, 5925 * so we don't have to unwind from partial failures below. 5926 */ 5927 if ((error = metaslab_claim(spa, bp, 0)) != 0) 5928 return (error); 5929 } 5930 5931 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 5932 5933 for (int d = 0; d < ndvas; d++) { 5934 error = metaslab_claim_dva(spa, &dva[d], txg); 5935 if (error != 0) 5936 break; 5937 } 5938 5939 spa_config_exit(spa, SCL_ALLOC, FTAG); 5940 5941 ASSERT(error == 0 || txg == 0); 5942 5943 return (error); 5944 } 5945 5946 void 5947 metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp) 5948 { 5949 const dva_t *dva = bp->blk_dva; 5950 int ndvas = BP_GET_NDVAS(bp); 5951 uint64_t psize = BP_GET_PSIZE(bp); 5952 int d; 5953 vdev_t *vd; 5954 5955 ASSERT(!BP_IS_HOLE(bp)); 5956 ASSERT(!BP_IS_EMBEDDED(bp)); 5957 ASSERT(psize > 0); 5958 5959 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 5960 5961 for (d = 0; d < ndvas; d++) { 5962 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) 5963 continue; 5964 atomic_add_64(&vd->vdev_pending_fastwrite, psize); 5965 } 5966 5967 spa_config_exit(spa, SCL_VDEV, FTAG); 5968 } 5969 5970 void 5971 metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp) 5972 { 5973 const dva_t *dva = bp->blk_dva; 5974 int ndvas = BP_GET_NDVAS(bp); 5975 uint64_t psize = BP_GET_PSIZE(bp); 5976 int d; 5977 vdev_t *vd; 5978 5979 ASSERT(!BP_IS_HOLE(bp)); 5980 ASSERT(!BP_IS_EMBEDDED(bp)); 5981 ASSERT(psize > 0); 5982 5983 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 5984 5985 for (d = 0; d < ndvas; d++) { 5986 if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL) 5987 continue; 5988 ASSERT3U(vd->vdev_pending_fastwrite, >=, psize); 5989 atomic_sub_64(&vd->vdev_pending_fastwrite, psize); 5990 } 5991 5992 spa_config_exit(spa, SCL_VDEV, FTAG); 5993 } 5994 5995 static void 5996 metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset, 5997 uint64_t size, void *arg) 5998 { 5999 (void) inner, (void) arg; 6000 6001 if (vd->vdev_ops == &vdev_indirect_ops) 6002 return; 6003 6004 metaslab_check_free_impl(vd, offset, size); 6005 } 6006 6007 static void 6008 metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size) 6009 { 6010 metaslab_t *msp; 6011 spa_t *spa __maybe_unused = vd->vdev_spa; 6012 6013 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) 6014 return; 6015 6016 if (vd->vdev_ops->vdev_op_remap != NULL) { 6017 vd->vdev_ops->vdev_op_remap(vd, offset, size, 6018 metaslab_check_free_impl_cb, NULL); 6019 return; 6020 } 6021 6022 ASSERT(vdev_is_concrete(vd)); 6023 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count); 6024 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 6025 6026 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 6027 6028 mutex_enter(&msp->ms_lock); 6029 if (msp->ms_loaded) { 6030 range_tree_verify_not_present(msp->ms_allocatable, 6031 offset, size); 6032 } 6033 6034 /* 6035 * Check all segments that currently exist in the freeing pipeline. 6036 * 6037 * It would intuitively make sense to also check the current allocating 6038 * tree since metaslab_unalloc_dva() exists for extents that are 6039 * allocated and freed in the same sync pass within the same txg. 6040 * Unfortunately there are places (e.g. the ZIL) where we allocate a 6041 * segment but then we free part of it within the same txg 6042 * [see zil_sync()]. Thus, we don't call range_tree_verify() in the 6043 * current allocating tree. 6044 */ 6045 range_tree_verify_not_present(msp->ms_freeing, offset, size); 6046 range_tree_verify_not_present(msp->ms_checkpointing, offset, size); 6047 range_tree_verify_not_present(msp->ms_freed, offset, size); 6048 for (int j = 0; j < TXG_DEFER_SIZE; j++) 6049 range_tree_verify_not_present(msp->ms_defer[j], offset, size); 6050 range_tree_verify_not_present(msp->ms_trim, offset, size); 6051 mutex_exit(&msp->ms_lock); 6052 } 6053 6054 void 6055 metaslab_check_free(spa_t *spa, const blkptr_t *bp) 6056 { 6057 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) 6058 return; 6059 6060 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 6061 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 6062 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 6063 vdev_t *vd = vdev_lookup_top(spa, vdev); 6064 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 6065 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]); 6066 6067 if (DVA_GET_GANG(&bp->blk_dva[i])) 6068 size = vdev_gang_header_asize(vd); 6069 6070 ASSERT3P(vd, !=, NULL); 6071 6072 metaslab_check_free_impl(vd, offset, size); 6073 } 6074 spa_config_exit(spa, SCL_VDEV, FTAG); 6075 } 6076 6077 static void 6078 metaslab_group_disable_wait(metaslab_group_t *mg) 6079 { 6080 ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock)); 6081 while (mg->mg_disabled_updating) { 6082 cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock); 6083 } 6084 } 6085 6086 static void 6087 metaslab_group_disabled_increment(metaslab_group_t *mg) 6088 { 6089 ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock)); 6090 ASSERT(mg->mg_disabled_updating); 6091 6092 while (mg->mg_ms_disabled >= max_disabled_ms) { 6093 cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock); 6094 } 6095 mg->mg_ms_disabled++; 6096 ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms); 6097 } 6098 6099 /* 6100 * Mark the metaslab as disabled to prevent any allocations on this metaslab. 6101 * We must also track how many metaslabs are currently disabled within a 6102 * metaslab group and limit them to prevent allocation failures from 6103 * occurring because all metaslabs are disabled. 6104 */ 6105 void 6106 metaslab_disable(metaslab_t *msp) 6107 { 6108 ASSERT(!MUTEX_HELD(&msp->ms_lock)); 6109 metaslab_group_t *mg = msp->ms_group; 6110 6111 mutex_enter(&mg->mg_ms_disabled_lock); 6112 6113 /* 6114 * To keep an accurate count of how many threads have disabled 6115 * a specific metaslab group, we only allow one thread to mark 6116 * the metaslab group at a time. This ensures that the value of 6117 * ms_disabled will be accurate when we decide to mark a metaslab 6118 * group as disabled. To do this we force all other threads 6119 * to wait till the metaslab's mg_disabled_updating flag is no 6120 * longer set. 6121 */ 6122 metaslab_group_disable_wait(mg); 6123 mg->mg_disabled_updating = B_TRUE; 6124 if (msp->ms_disabled == 0) { 6125 metaslab_group_disabled_increment(mg); 6126 } 6127 mutex_enter(&msp->ms_lock); 6128 msp->ms_disabled++; 6129 mutex_exit(&msp->ms_lock); 6130 6131 mg->mg_disabled_updating = B_FALSE; 6132 cv_broadcast(&mg->mg_ms_disabled_cv); 6133 mutex_exit(&mg->mg_ms_disabled_lock); 6134 } 6135 6136 void 6137 metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload) 6138 { 6139 metaslab_group_t *mg = msp->ms_group; 6140 spa_t *spa = mg->mg_vd->vdev_spa; 6141 6142 /* 6143 * Wait for the outstanding IO to be synced to prevent newly 6144 * allocated blocks from being overwritten. This used by 6145 * initialize and TRIM which are modifying unallocated space. 6146 */ 6147 if (sync) 6148 txg_wait_synced(spa_get_dsl(spa), 0); 6149 6150 mutex_enter(&mg->mg_ms_disabled_lock); 6151 mutex_enter(&msp->ms_lock); 6152 if (--msp->ms_disabled == 0) { 6153 mg->mg_ms_disabled--; 6154 cv_broadcast(&mg->mg_ms_disabled_cv); 6155 if (unload) 6156 metaslab_unload(msp); 6157 } 6158 mutex_exit(&msp->ms_lock); 6159 mutex_exit(&mg->mg_ms_disabled_lock); 6160 } 6161 6162 void 6163 metaslab_set_unflushed_dirty(metaslab_t *ms, boolean_t dirty) 6164 { 6165 ms->ms_unflushed_dirty = dirty; 6166 } 6167 6168 static void 6169 metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx) 6170 { 6171 vdev_t *vd = ms->ms_group->mg_vd; 6172 spa_t *spa = vd->vdev_spa; 6173 objset_t *mos = spa_meta_objset(spa); 6174 6175 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 6176 6177 metaslab_unflushed_phys_t entry = { 6178 .msp_unflushed_txg = metaslab_unflushed_txg(ms), 6179 }; 6180 uint64_t entry_size = sizeof (entry); 6181 uint64_t entry_offset = ms->ms_id * entry_size; 6182 6183 uint64_t object = 0; 6184 int err = zap_lookup(mos, vd->vdev_top_zap, 6185 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, 6186 &object); 6187 if (err == ENOENT) { 6188 object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA, 6189 SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx); 6190 VERIFY0(zap_add(mos, vd->vdev_top_zap, 6191 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, 6192 &object, tx)); 6193 } else { 6194 VERIFY0(err); 6195 } 6196 6197 dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size, 6198 &entry, tx); 6199 } 6200 6201 void 6202 metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx) 6203 { 6204 ms->ms_unflushed_txg = txg; 6205 metaslab_update_ondisk_flush_data(ms, tx); 6206 } 6207 6208 boolean_t 6209 metaslab_unflushed_dirty(metaslab_t *ms) 6210 { 6211 return (ms->ms_unflushed_dirty); 6212 } 6213 6214 uint64_t 6215 metaslab_unflushed_txg(metaslab_t *ms) 6216 { 6217 return (ms->ms_unflushed_txg); 6218 } 6219 6220 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, U64, ZMOD_RW, 6221 "Allocation granularity (a.k.a. stripe size)"); 6222 6223 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW, 6224 "Load all metaslabs when pool is first opened"); 6225 6226 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW, 6227 "Prevent metaslabs from being unloaded"); 6228 6229 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW, 6230 "Preload potential metaslabs during reassessment"); 6231 6232 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW, 6233 "Delay in txgs after metaslab was last used before unloading"); 6234 6235 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW, 6236 "Delay in milliseconds after metaslab was last used before unloading"); 6237 6238 /* BEGIN CSTYLED */ 6239 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW, 6240 "Percentage of metaslab group size that should be free to make it " 6241 "eligible for allocation"); 6242 6243 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW, 6244 "Percentage of metaslab group size that should be considered eligible " 6245 "for allocations unless all metaslab groups within the metaslab class " 6246 "have also crossed this threshold"); 6247 6248 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT, 6249 ZMOD_RW, 6250 "Use the fragmentation metric to prefer less fragmented metaslabs"); 6251 /* END CSTYLED */ 6252 6253 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT, 6254 ZMOD_RW, "Fragmentation for metaslab to allow allocation"); 6255 6256 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW, 6257 "Prefer metaslabs with lower LBAs"); 6258 6259 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW, 6260 "Enable metaslab group biasing"); 6261 6262 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT, 6263 ZMOD_RW, "Enable segment-based metaslab selection"); 6264 6265 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW, 6266 "Segment-based metaslab selection maximum buckets before switching"); 6267 6268 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, U64, ZMOD_RW, 6269 "Blocks larger than this size are forced to be gang blocks"); 6270 6271 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, UINT, ZMOD_RW, 6272 "Max distance (bytes) to search forward before using size tree"); 6273 6274 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW, 6275 "When looking in size tree, use largest segment instead of exact fit"); 6276 6277 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, U64, 6278 ZMOD_RW, "How long to trust the cached max chunk size of a metaslab"); 6279 6280 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, UINT, ZMOD_RW, 6281 "Percentage of memory that can be used to store metaslab range trees"); 6282 6283 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT, 6284 ZMOD_RW, "Try hard to allocate before ganging"); 6285 6286 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW, 6287 "Normally only consider this many of the best metaslabs in each vdev"); 6288