1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 25 * Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2017, Intel Corporation. 27 */ 28 29 #include <sys/zfs_context.h> 30 #include <sys/dmu.h> 31 #include <sys/dmu_tx.h> 32 #include <sys/space_map.h> 33 #include <sys/metaslab_impl.h> 34 #include <sys/vdev_impl.h> 35 #include <sys/vdev_draid.h> 36 #include <sys/zio.h> 37 #include <sys/spa_impl.h> 38 #include <sys/zfeature.h> 39 #include <sys/vdev_indirect_mapping.h> 40 #include <sys/zap.h> 41 #include <sys/btree.h> 42 43 #define GANG_ALLOCATION(flags) \ 44 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER)) 45 46 /* 47 * Metaslab granularity, in bytes. This is roughly similar to what would be 48 * referred to as the "stripe size" in traditional RAID arrays. In normal 49 * operation, we will try to write this amount of data to each disk before 50 * moving on to the next top-level vdev. 51 */ 52 static uint64_t metaslab_aliquot = 1024 * 1024; 53 54 /* 55 * For testing, make some blocks above a certain size be gang blocks. 56 */ 57 uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1; 58 59 /* 60 * Of blocks of size >= metaslab_force_ganging, actually gang them this often. 61 */ 62 uint_t metaslab_force_ganging_pct = 3; 63 64 /* 65 * In pools where the log space map feature is not enabled we touch 66 * multiple metaslabs (and their respective space maps) with each 67 * transaction group. Thus, we benefit from having a small space map 68 * block size since it allows us to issue more I/O operations scattered 69 * around the disk. So a sane default for the space map block size 70 * is 8~16K. 71 */ 72 int zfs_metaslab_sm_blksz_no_log = (1 << 14); 73 74 /* 75 * When the log space map feature is enabled, we accumulate a lot of 76 * changes per metaslab that are flushed once in a while so we benefit 77 * from a bigger block size like 128K for the metaslab space maps. 78 */ 79 int zfs_metaslab_sm_blksz_with_log = (1 << 17); 80 81 /* 82 * The in-core space map representation is more compact than its on-disk form. 83 * The zfs_condense_pct determines how much more compact the in-core 84 * space map representation must be before we compact it on-disk. 85 * Values should be greater than or equal to 100. 86 */ 87 uint_t zfs_condense_pct = 200; 88 89 /* 90 * Condensing a metaslab is not guaranteed to actually reduce the amount of 91 * space used on disk. In particular, a space map uses data in increments of 92 * MAX(1 << ashift, space_map_blksz), so a metaslab might use the 93 * same number of blocks after condensing. Since the goal of condensing is to 94 * reduce the number of IOPs required to read the space map, we only want to 95 * condense when we can be sure we will reduce the number of blocks used by the 96 * space map. Unfortunately, we cannot precisely compute whether or not this is 97 * the case in metaslab_should_condense since we are holding ms_lock. Instead, 98 * we apply the following heuristic: do not condense a spacemap unless the 99 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold 100 * blocks. 101 */ 102 static const int zfs_metaslab_condense_block_threshold = 4; 103 104 /* 105 * The zfs_mg_noalloc_threshold defines which metaslab groups should 106 * be eligible for allocation. The value is defined as a percentage of 107 * free space. Metaslab groups that have more free space than 108 * zfs_mg_noalloc_threshold are always eligible for allocations. Once 109 * a metaslab group's free space is less than or equal to the 110 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that 111 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold. 112 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all 113 * groups are allowed to accept allocations. Gang blocks are always 114 * eligible to allocate on any metaslab group. The default value of 0 means 115 * no metaslab group will be excluded based on this criterion. 116 */ 117 static uint_t zfs_mg_noalloc_threshold = 0; 118 119 /* 120 * Metaslab groups are considered eligible for allocations if their 121 * fragmentation metric (measured as a percentage) is less than or 122 * equal to zfs_mg_fragmentation_threshold. If a metaslab group 123 * exceeds this threshold then it will be skipped unless all metaslab 124 * groups within the metaslab class have also crossed this threshold. 125 * 126 * This tunable was introduced to avoid edge cases where we continue 127 * allocating from very fragmented disks in our pool while other, less 128 * fragmented disks, exists. On the other hand, if all disks in the 129 * pool are uniformly approaching the threshold, the threshold can 130 * be a speed bump in performance, where we keep switching the disks 131 * that we allocate from (e.g. we allocate some segments from disk A 132 * making it bypassing the threshold while freeing segments from disk 133 * B getting its fragmentation below the threshold). 134 * 135 * Empirically, we've seen that our vdev selection for allocations is 136 * good enough that fragmentation increases uniformly across all vdevs 137 * the majority of the time. Thus we set the threshold percentage high 138 * enough to avoid hitting the speed bump on pools that are being pushed 139 * to the edge. 140 */ 141 static uint_t zfs_mg_fragmentation_threshold = 95; 142 143 /* 144 * Allow metaslabs to keep their active state as long as their fragmentation 145 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An 146 * active metaslab that exceeds this threshold will no longer keep its active 147 * status allowing better metaslabs to be selected. 148 */ 149 static uint_t zfs_metaslab_fragmentation_threshold = 77; 150 151 /* 152 * When set will load all metaslabs when pool is first opened. 153 */ 154 int metaslab_debug_load = B_FALSE; 155 156 /* 157 * When set will prevent metaslabs from being unloaded. 158 */ 159 static int metaslab_debug_unload = B_FALSE; 160 161 /* 162 * Minimum size which forces the dynamic allocator to change 163 * it's allocation strategy. Once the space map cannot satisfy 164 * an allocation of this size then it switches to using more 165 * aggressive strategy (i.e search by size rather than offset). 166 */ 167 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE; 168 169 /* 170 * The minimum free space, in percent, which must be available 171 * in a space map to continue allocations in a first-fit fashion. 172 * Once the space map's free space drops below this level we dynamically 173 * switch to using best-fit allocations. 174 */ 175 uint_t metaslab_df_free_pct = 4; 176 177 /* 178 * Maximum distance to search forward from the last offset. Without this 179 * limit, fragmented pools can see >100,000 iterations and 180 * metaslab_block_picker() becomes the performance limiting factor on 181 * high-performance storage. 182 * 183 * With the default setting of 16MB, we typically see less than 500 184 * iterations, even with very fragmented, ashift=9 pools. The maximum number 185 * of iterations possible is: 186 * metaslab_df_max_search / (2 * (1<<ashift)) 187 * With the default setting of 16MB this is 16*1024 (with ashift=9) or 188 * 2048 (with ashift=12). 189 */ 190 static uint_t metaslab_df_max_search = 16 * 1024 * 1024; 191 192 /* 193 * Forces the metaslab_block_picker function to search for at least this many 194 * segments forwards until giving up on finding a segment that the allocation 195 * will fit into. 196 */ 197 static const uint32_t metaslab_min_search_count = 100; 198 199 /* 200 * If we are not searching forward (due to metaslab_df_max_search, 201 * metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable 202 * controls what segment is used. If it is set, we will use the largest free 203 * segment. If it is not set, we will use a segment of exactly the requested 204 * size (or larger). 205 */ 206 static int metaslab_df_use_largest_segment = B_FALSE; 207 208 /* 209 * These tunables control how long a metaslab will remain loaded after the 210 * last allocation from it. A metaslab can't be unloaded until at least 211 * metaslab_unload_delay TXG's and metaslab_unload_delay_ms milliseconds 212 * have elapsed. However, zfs_metaslab_mem_limit may cause it to be 213 * unloaded sooner. These settings are intended to be generous -- to keep 214 * metaslabs loaded for a long time, reducing the rate of metaslab loading. 215 */ 216 static uint_t metaslab_unload_delay = 32; 217 static uint_t metaslab_unload_delay_ms = 10 * 60 * 1000; /* ten minutes */ 218 219 /* 220 * Max number of metaslabs per group to preload. 221 */ 222 uint_t metaslab_preload_limit = 10; 223 224 /* 225 * Enable/disable preloading of metaslab. 226 */ 227 static int metaslab_preload_enabled = B_TRUE; 228 229 /* 230 * Enable/disable fragmentation weighting on metaslabs. 231 */ 232 static int metaslab_fragmentation_factor_enabled = B_TRUE; 233 234 /* 235 * Enable/disable lba weighting (i.e. outer tracks are given preference). 236 */ 237 static int metaslab_lba_weighting_enabled = B_TRUE; 238 239 /* 240 * Enable/disable metaslab group biasing. 241 */ 242 static int metaslab_bias_enabled = B_TRUE; 243 244 /* 245 * Enable/disable remapping of indirect DVAs to their concrete vdevs. 246 */ 247 static const boolean_t zfs_remap_blkptr_enable = B_TRUE; 248 249 /* 250 * Enable/disable segment-based metaslab selection. 251 */ 252 static int zfs_metaslab_segment_weight_enabled = B_TRUE; 253 254 /* 255 * When using segment-based metaslab selection, we will continue 256 * allocating from the active metaslab until we have exhausted 257 * zfs_metaslab_switch_threshold of its buckets. 258 */ 259 static int zfs_metaslab_switch_threshold = 2; 260 261 /* 262 * Internal switch to enable/disable the metaslab allocation tracing 263 * facility. 264 */ 265 static const boolean_t metaslab_trace_enabled = B_FALSE; 266 267 /* 268 * Maximum entries that the metaslab allocation tracing facility will keep 269 * in a given list when running in non-debug mode. We limit the number 270 * of entries in non-debug mode to prevent us from using up too much memory. 271 * The limit should be sufficiently large that we don't expect any allocation 272 * to every exceed this value. In debug mode, the system will panic if this 273 * limit is ever reached allowing for further investigation. 274 */ 275 static const uint64_t metaslab_trace_max_entries = 5000; 276 277 /* 278 * Maximum number of metaslabs per group that can be disabled 279 * simultaneously. 280 */ 281 static const int max_disabled_ms = 3; 282 283 /* 284 * Time (in seconds) to respect ms_max_size when the metaslab is not loaded. 285 * To avoid 64-bit overflow, don't set above UINT32_MAX. 286 */ 287 static uint64_t zfs_metaslab_max_size_cache_sec = 1 * 60 * 60; /* 1 hour */ 288 289 /* 290 * Maximum percentage of memory to use on storing loaded metaslabs. If loading 291 * a metaslab would take it over this percentage, the oldest selected metaslab 292 * is automatically unloaded. 293 */ 294 static uint_t zfs_metaslab_mem_limit = 25; 295 296 /* 297 * Force the per-metaslab range trees to use 64-bit integers to store 298 * segments. Used for debugging purposes. 299 */ 300 static const boolean_t zfs_metaslab_force_large_segs = B_FALSE; 301 302 /* 303 * By default we only store segments over a certain size in the size-sorted 304 * metaslab trees (ms_allocatable_by_size and 305 * ms_unflushed_frees_by_size). This dramatically reduces memory usage and 306 * improves load and unload times at the cost of causing us to use slightly 307 * larger segments than we would otherwise in some cases. 308 */ 309 static const uint32_t metaslab_by_size_min_shift = 14; 310 311 /* 312 * If not set, we will first try normal allocation. If that fails then 313 * we will do a gang allocation. If that fails then we will do a "try hard" 314 * gang allocation. If that fails then we will have a multi-layer gang 315 * block. 316 * 317 * If set, we will first try normal allocation. If that fails then 318 * we will do a "try hard" allocation. If that fails we will do a gang 319 * allocation. If that fails we will do a "try hard" gang allocation. If 320 * that fails then we will have a multi-layer gang block. 321 */ 322 static int zfs_metaslab_try_hard_before_gang = B_FALSE; 323 324 /* 325 * When not trying hard, we only consider the best zfs_metaslab_find_max_tries 326 * metaslabs. This improves performance, especially when there are many 327 * metaslabs per vdev and the allocation can't actually be satisfied (so we 328 * would otherwise iterate all the metaslabs). If there is a metaslab with a 329 * worse weight but it can actually satisfy the allocation, we won't find it 330 * until trying hard. This may happen if the worse metaslab is not loaded 331 * (and the true weight is better than we have calculated), or due to weight 332 * bucketization. E.g. we are looking for a 60K segment, and the best 333 * metaslabs all have free segments in the 32-63K bucket, but the best 334 * zfs_metaslab_find_max_tries metaslabs have ms_max_size <60KB, and a 335 * subsequent metaslab has ms_max_size >60KB (but fewer segments in this 336 * bucket, and therefore a lower weight). 337 */ 338 static uint_t zfs_metaslab_find_max_tries = 100; 339 340 static uint64_t metaslab_weight(metaslab_t *, boolean_t); 341 static void metaslab_set_fragmentation(metaslab_t *, boolean_t); 342 static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t); 343 static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t); 344 345 static void metaslab_passivate(metaslab_t *msp, uint64_t weight); 346 static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp); 347 static void metaslab_flush_update(metaslab_t *, dmu_tx_t *); 348 static unsigned int metaslab_idx_func(multilist_t *, void *); 349 static void metaslab_evict(metaslab_t *, uint64_t); 350 static void metaslab_rt_add(zfs_range_tree_t *rt, zfs_range_seg_t *rs, 351 void *arg); 352 kmem_cache_t *metaslab_alloc_trace_cache; 353 354 typedef struct metaslab_stats { 355 kstat_named_t metaslabstat_trace_over_limit; 356 kstat_named_t metaslabstat_reload_tree; 357 kstat_named_t metaslabstat_too_many_tries; 358 kstat_named_t metaslabstat_try_hard; 359 } metaslab_stats_t; 360 361 static metaslab_stats_t metaslab_stats = { 362 { "trace_over_limit", KSTAT_DATA_UINT64 }, 363 { "reload_tree", KSTAT_DATA_UINT64 }, 364 { "too_many_tries", KSTAT_DATA_UINT64 }, 365 { "try_hard", KSTAT_DATA_UINT64 }, 366 }; 367 368 #define METASLABSTAT_BUMP(stat) \ 369 atomic_inc_64(&metaslab_stats.stat.value.ui64); 370 371 372 static kstat_t *metaslab_ksp; 373 374 void 375 metaslab_stat_init(void) 376 { 377 ASSERT(metaslab_alloc_trace_cache == NULL); 378 metaslab_alloc_trace_cache = kmem_cache_create( 379 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t), 380 0, NULL, NULL, NULL, NULL, NULL, 0); 381 metaslab_ksp = kstat_create("zfs", 0, "metaslab_stats", 382 "misc", KSTAT_TYPE_NAMED, sizeof (metaslab_stats) / 383 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 384 if (metaslab_ksp != NULL) { 385 metaslab_ksp->ks_data = &metaslab_stats; 386 kstat_install(metaslab_ksp); 387 } 388 } 389 390 void 391 metaslab_stat_fini(void) 392 { 393 if (metaslab_ksp != NULL) { 394 kstat_delete(metaslab_ksp); 395 metaslab_ksp = NULL; 396 } 397 398 kmem_cache_destroy(metaslab_alloc_trace_cache); 399 metaslab_alloc_trace_cache = NULL; 400 } 401 402 /* 403 * ========================================================================== 404 * Metaslab classes 405 * ========================================================================== 406 */ 407 metaslab_class_t * 408 metaslab_class_create(spa_t *spa, const metaslab_ops_t *ops) 409 { 410 metaslab_class_t *mc; 411 412 mc = kmem_zalloc(offsetof(metaslab_class_t, 413 mc_allocator[spa->spa_alloc_count]), KM_SLEEP); 414 415 mc->mc_spa = spa; 416 mc->mc_ops = ops; 417 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL); 418 multilist_create(&mc->mc_metaslab_txg_list, sizeof (metaslab_t), 419 offsetof(metaslab_t, ms_class_txg_node), metaslab_idx_func); 420 for (int i = 0; i < spa->spa_alloc_count; i++) { 421 metaslab_class_allocator_t *mca = &mc->mc_allocator[i]; 422 mca->mca_rotor = NULL; 423 zfs_refcount_create_tracked(&mca->mca_alloc_slots); 424 } 425 426 return (mc); 427 } 428 429 void 430 metaslab_class_destroy(metaslab_class_t *mc) 431 { 432 spa_t *spa = mc->mc_spa; 433 434 ASSERT(mc->mc_alloc == 0); 435 ASSERT(mc->mc_deferred == 0); 436 ASSERT(mc->mc_space == 0); 437 ASSERT(mc->mc_dspace == 0); 438 439 for (int i = 0; i < spa->spa_alloc_count; i++) { 440 metaslab_class_allocator_t *mca = &mc->mc_allocator[i]; 441 ASSERT(mca->mca_rotor == NULL); 442 zfs_refcount_destroy(&mca->mca_alloc_slots); 443 } 444 mutex_destroy(&mc->mc_lock); 445 multilist_destroy(&mc->mc_metaslab_txg_list); 446 kmem_free(mc, offsetof(metaslab_class_t, 447 mc_allocator[spa->spa_alloc_count])); 448 } 449 450 int 451 metaslab_class_validate(metaslab_class_t *mc) 452 { 453 metaslab_group_t *mg; 454 vdev_t *vd; 455 456 /* 457 * Must hold one of the spa_config locks. 458 */ 459 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || 460 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); 461 462 if ((mg = mc->mc_allocator[0].mca_rotor) == NULL) 463 return (0); 464 465 do { 466 vd = mg->mg_vd; 467 ASSERT(vd->vdev_mg != NULL); 468 ASSERT3P(vd->vdev_top, ==, vd); 469 ASSERT3P(mg->mg_class, ==, mc); 470 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); 471 } while ((mg = mg->mg_next) != mc->mc_allocator[0].mca_rotor); 472 473 return (0); 474 } 475 476 static void 477 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, 478 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta) 479 { 480 atomic_add_64(&mc->mc_alloc, alloc_delta); 481 atomic_add_64(&mc->mc_deferred, defer_delta); 482 atomic_add_64(&mc->mc_space, space_delta); 483 atomic_add_64(&mc->mc_dspace, dspace_delta); 484 } 485 486 uint64_t 487 metaslab_class_get_alloc(metaslab_class_t *mc) 488 { 489 return (mc->mc_alloc); 490 } 491 492 uint64_t 493 metaslab_class_get_deferred(metaslab_class_t *mc) 494 { 495 return (mc->mc_deferred); 496 } 497 498 uint64_t 499 metaslab_class_get_space(metaslab_class_t *mc) 500 { 501 return (mc->mc_space); 502 } 503 504 uint64_t 505 metaslab_class_get_dspace(metaslab_class_t *mc) 506 { 507 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space); 508 } 509 510 void 511 metaslab_class_histogram_verify(metaslab_class_t *mc) 512 { 513 spa_t *spa = mc->mc_spa; 514 vdev_t *rvd = spa->spa_root_vdev; 515 uint64_t *mc_hist; 516 int i; 517 518 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) 519 return; 520 521 mc_hist = kmem_zalloc(sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE, 522 KM_SLEEP); 523 524 mutex_enter(&mc->mc_lock); 525 for (int c = 0; c < rvd->vdev_children; c++) { 526 vdev_t *tvd = rvd->vdev_child[c]; 527 metaslab_group_t *mg = vdev_get_mg(tvd, mc); 528 529 /* 530 * Skip any holes, uninitialized top-levels, or 531 * vdevs that are not in this metalab class. 532 */ 533 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 534 mg->mg_class != mc) { 535 continue; 536 } 537 538 IMPLY(mg == mg->mg_vd->vdev_log_mg, 539 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa)); 540 541 for (i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) 542 mc_hist[i] += mg->mg_histogram[i]; 543 } 544 545 for (i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) { 546 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]); 547 } 548 549 mutex_exit(&mc->mc_lock); 550 kmem_free(mc_hist, sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE); 551 } 552 553 /* 554 * Calculate the metaslab class's fragmentation metric. The metric 555 * is weighted based on the space contribution of each metaslab group. 556 * The return value will be a number between 0 and 100 (inclusive), or 557 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the 558 * zfs_frag_table for more information about the metric. 559 */ 560 uint64_t 561 metaslab_class_fragmentation(metaslab_class_t *mc) 562 { 563 vdev_t *rvd = mc->mc_spa->spa_root_vdev; 564 uint64_t fragmentation = 0; 565 566 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); 567 568 for (int c = 0; c < rvd->vdev_children; c++) { 569 vdev_t *tvd = rvd->vdev_child[c]; 570 metaslab_group_t *mg = tvd->vdev_mg; 571 572 /* 573 * Skip any holes, uninitialized top-levels, 574 * or vdevs that are not in this metalab class. 575 */ 576 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 577 mg->mg_class != mc) { 578 continue; 579 } 580 581 /* 582 * If a metaslab group does not contain a fragmentation 583 * metric then just bail out. 584 */ 585 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { 586 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 587 return (ZFS_FRAG_INVALID); 588 } 589 590 /* 591 * Determine how much this metaslab_group is contributing 592 * to the overall pool fragmentation metric. 593 */ 594 fragmentation += mg->mg_fragmentation * 595 metaslab_group_get_space(mg); 596 } 597 fragmentation /= metaslab_class_get_space(mc); 598 599 ASSERT3U(fragmentation, <=, 100); 600 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 601 return (fragmentation); 602 } 603 604 /* 605 * Calculate the amount of expandable space that is available in 606 * this metaslab class. If a device is expanded then its expandable 607 * space will be the amount of allocatable space that is currently not 608 * part of this metaslab class. 609 */ 610 uint64_t 611 metaslab_class_expandable_space(metaslab_class_t *mc) 612 { 613 vdev_t *rvd = mc->mc_spa->spa_root_vdev; 614 uint64_t space = 0; 615 616 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); 617 for (int c = 0; c < rvd->vdev_children; c++) { 618 vdev_t *tvd = rvd->vdev_child[c]; 619 metaslab_group_t *mg = tvd->vdev_mg; 620 621 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 622 mg->mg_class != mc) { 623 continue; 624 } 625 626 /* 627 * Calculate if we have enough space to add additional 628 * metaslabs. We report the expandable space in terms 629 * of the metaslab size since that's the unit of expansion. 630 */ 631 space += P2ALIGN_TYPED(tvd->vdev_max_asize - tvd->vdev_asize, 632 1ULL << tvd->vdev_ms_shift, uint64_t); 633 } 634 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 635 return (space); 636 } 637 638 void 639 metaslab_class_evict_old(metaslab_class_t *mc, uint64_t txg) 640 { 641 multilist_t *ml = &mc->mc_metaslab_txg_list; 642 hrtime_t now = gethrtime(); 643 for (int i = 0; i < multilist_get_num_sublists(ml); i++) { 644 multilist_sublist_t *mls = multilist_sublist_lock_idx(ml, i); 645 metaslab_t *msp = multilist_sublist_head(mls); 646 multilist_sublist_unlock(mls); 647 while (msp != NULL) { 648 mutex_enter(&msp->ms_lock); 649 650 /* 651 * If the metaslab has been removed from the list 652 * (which could happen if we were at the memory limit 653 * and it was evicted during this loop), then we can't 654 * proceed and we should restart the sublist. 655 */ 656 if (!multilist_link_active(&msp->ms_class_txg_node)) { 657 mutex_exit(&msp->ms_lock); 658 i--; 659 break; 660 } 661 mls = multilist_sublist_lock_idx(ml, i); 662 metaslab_t *next_msp = multilist_sublist_next(mls, msp); 663 multilist_sublist_unlock(mls); 664 if (txg > 665 msp->ms_selected_txg + metaslab_unload_delay && 666 now > msp->ms_selected_time + 667 MSEC2NSEC(metaslab_unload_delay_ms) && 668 (msp->ms_allocator == -1 || 669 !metaslab_preload_enabled)) { 670 metaslab_evict(msp, txg); 671 } else { 672 /* 673 * Once we've hit a metaslab selected too 674 * recently to evict, we're done evicting for 675 * now. 676 */ 677 mutex_exit(&msp->ms_lock); 678 break; 679 } 680 mutex_exit(&msp->ms_lock); 681 msp = next_msp; 682 } 683 } 684 } 685 686 static int 687 metaslab_compare(const void *x1, const void *x2) 688 { 689 const metaslab_t *m1 = (const metaslab_t *)x1; 690 const metaslab_t *m2 = (const metaslab_t *)x2; 691 692 int sort1 = 0; 693 int sort2 = 0; 694 if (m1->ms_allocator != -1 && m1->ms_primary) 695 sort1 = 1; 696 else if (m1->ms_allocator != -1 && !m1->ms_primary) 697 sort1 = 2; 698 if (m2->ms_allocator != -1 && m2->ms_primary) 699 sort2 = 1; 700 else if (m2->ms_allocator != -1 && !m2->ms_primary) 701 sort2 = 2; 702 703 /* 704 * Sort inactive metaslabs first, then primaries, then secondaries. When 705 * selecting a metaslab to allocate from, an allocator first tries its 706 * primary, then secondary active metaslab. If it doesn't have active 707 * metaslabs, or can't allocate from them, it searches for an inactive 708 * metaslab to activate. If it can't find a suitable one, it will steal 709 * a primary or secondary metaslab from another allocator. 710 */ 711 if (sort1 < sort2) 712 return (-1); 713 if (sort1 > sort2) 714 return (1); 715 716 int cmp = TREE_CMP(m2->ms_weight, m1->ms_weight); 717 if (likely(cmp)) 718 return (cmp); 719 720 IMPLY(TREE_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2); 721 722 return (TREE_CMP(m1->ms_start, m2->ms_start)); 723 } 724 725 /* 726 * ========================================================================== 727 * Metaslab groups 728 * ========================================================================== 729 */ 730 /* 731 * Update the allocatable flag and the metaslab group's capacity. 732 * The allocatable flag is set to true if the capacity is below 733 * the zfs_mg_noalloc_threshold or has a fragmentation value that is 734 * greater than zfs_mg_fragmentation_threshold. If a metaslab group 735 * transitions from allocatable to non-allocatable or vice versa then the 736 * metaslab group's class is updated to reflect the transition. 737 */ 738 static void 739 metaslab_group_alloc_update(metaslab_group_t *mg) 740 { 741 vdev_t *vd = mg->mg_vd; 742 metaslab_class_t *mc = mg->mg_class; 743 vdev_stat_t *vs = &vd->vdev_stat; 744 boolean_t was_allocatable; 745 boolean_t was_initialized; 746 747 ASSERT(vd == vd->vdev_top); 748 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==, 749 SCL_ALLOC); 750 751 mutex_enter(&mg->mg_lock); 752 was_allocatable = mg->mg_allocatable; 753 was_initialized = mg->mg_initialized; 754 755 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) / 756 (vs->vs_space + 1); 757 758 mutex_enter(&mc->mc_lock); 759 760 /* 761 * If the metaslab group was just added then it won't 762 * have any space until we finish syncing out this txg. 763 * At that point we will consider it initialized and available 764 * for allocations. We also don't consider non-activated 765 * metaslab groups (e.g. vdevs that are in the middle of being removed) 766 * to be initialized, because they can't be used for allocation. 767 */ 768 mg->mg_initialized = metaslab_group_initialized(mg); 769 if (!was_initialized && mg->mg_initialized) { 770 mc->mc_groups++; 771 } else if (was_initialized && !mg->mg_initialized) { 772 ASSERT3U(mc->mc_groups, >, 0); 773 mc->mc_groups--; 774 } 775 if (mg->mg_initialized) 776 mg->mg_no_free_space = B_FALSE; 777 778 /* 779 * A metaslab group is considered allocatable if it has plenty 780 * of free space or is not heavily fragmented. We only take 781 * fragmentation into account if the metaslab group has a valid 782 * fragmentation metric (i.e. a value between 0 and 100). 783 */ 784 mg->mg_allocatable = (mg->mg_activation_count > 0 && 785 mg->mg_free_capacity > zfs_mg_noalloc_threshold && 786 (mg->mg_fragmentation == ZFS_FRAG_INVALID || 787 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)); 788 789 /* 790 * The mc_alloc_groups maintains a count of the number of 791 * groups in this metaslab class that are still above the 792 * zfs_mg_noalloc_threshold. This is used by the allocating 793 * threads to determine if they should avoid allocations to 794 * a given group. The allocator will avoid allocations to a group 795 * if that group has reached or is below the zfs_mg_noalloc_threshold 796 * and there are still other groups that are above the threshold. 797 * When a group transitions from allocatable to non-allocatable or 798 * vice versa we update the metaslab class to reflect that change. 799 * When the mc_alloc_groups value drops to 0 that means that all 800 * groups have reached the zfs_mg_noalloc_threshold making all groups 801 * eligible for allocations. This effectively means that all devices 802 * are balanced again. 803 */ 804 if (was_allocatable && !mg->mg_allocatable) 805 mc->mc_alloc_groups--; 806 else if (!was_allocatable && mg->mg_allocatable) 807 mc->mc_alloc_groups++; 808 mutex_exit(&mc->mc_lock); 809 810 mutex_exit(&mg->mg_lock); 811 } 812 813 int 814 metaslab_sort_by_flushed(const void *va, const void *vb) 815 { 816 const metaslab_t *a = va; 817 const metaslab_t *b = vb; 818 819 int cmp = TREE_CMP(a->ms_unflushed_txg, b->ms_unflushed_txg); 820 if (likely(cmp)) 821 return (cmp); 822 823 uint64_t a_vdev_id = a->ms_group->mg_vd->vdev_id; 824 uint64_t b_vdev_id = b->ms_group->mg_vd->vdev_id; 825 cmp = TREE_CMP(a_vdev_id, b_vdev_id); 826 if (cmp) 827 return (cmp); 828 829 return (TREE_CMP(a->ms_id, b->ms_id)); 830 } 831 832 metaslab_group_t * 833 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators) 834 { 835 metaslab_group_t *mg; 836 837 mg = kmem_zalloc(offsetof(metaslab_group_t, 838 mg_allocator[allocators]), KM_SLEEP); 839 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); 840 mutex_init(&mg->mg_ms_disabled_lock, NULL, MUTEX_DEFAULT, NULL); 841 cv_init(&mg->mg_ms_disabled_cv, NULL, CV_DEFAULT, NULL); 842 avl_create(&mg->mg_metaslab_tree, metaslab_compare, 843 sizeof (metaslab_t), offsetof(metaslab_t, ms_group_node)); 844 mg->mg_vd = vd; 845 mg->mg_class = mc; 846 mg->mg_activation_count = 0; 847 mg->mg_initialized = B_FALSE; 848 mg->mg_no_free_space = B_TRUE; 849 mg->mg_allocators = allocators; 850 851 for (int i = 0; i < allocators; i++) { 852 metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; 853 zfs_refcount_create_tracked(&mga->mga_alloc_queue_depth); 854 } 855 856 return (mg); 857 } 858 859 void 860 metaslab_group_destroy(metaslab_group_t *mg) 861 { 862 ASSERT(mg->mg_prev == NULL); 863 ASSERT(mg->mg_next == NULL); 864 /* 865 * We may have gone below zero with the activation count 866 * either because we never activated in the first place or 867 * because we're done, and possibly removing the vdev. 868 */ 869 ASSERT(mg->mg_activation_count <= 0); 870 871 avl_destroy(&mg->mg_metaslab_tree); 872 mutex_destroy(&mg->mg_lock); 873 mutex_destroy(&mg->mg_ms_disabled_lock); 874 cv_destroy(&mg->mg_ms_disabled_cv); 875 876 for (int i = 0; i < mg->mg_allocators; i++) { 877 metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; 878 zfs_refcount_destroy(&mga->mga_alloc_queue_depth); 879 } 880 kmem_free(mg, offsetof(metaslab_group_t, 881 mg_allocator[mg->mg_allocators])); 882 } 883 884 void 885 metaslab_group_activate(metaslab_group_t *mg) 886 { 887 metaslab_class_t *mc = mg->mg_class; 888 spa_t *spa = mc->mc_spa; 889 metaslab_group_t *mgprev, *mgnext; 890 891 ASSERT3U(spa_config_held(spa, SCL_ALLOC, RW_WRITER), !=, 0); 892 893 ASSERT(mg->mg_prev == NULL); 894 ASSERT(mg->mg_next == NULL); 895 ASSERT(mg->mg_activation_count <= 0); 896 897 if (++mg->mg_activation_count <= 0) 898 return; 899 900 mg->mg_aliquot = metaslab_aliquot * MAX(1, 901 vdev_get_ndisks(mg->mg_vd) - vdev_get_nparity(mg->mg_vd)); 902 metaslab_group_alloc_update(mg); 903 904 if ((mgprev = mc->mc_allocator[0].mca_rotor) == NULL) { 905 mg->mg_prev = mg; 906 mg->mg_next = mg; 907 } else { 908 mgnext = mgprev->mg_next; 909 mg->mg_prev = mgprev; 910 mg->mg_next = mgnext; 911 mgprev->mg_next = mg; 912 mgnext->mg_prev = mg; 913 } 914 for (int i = 0; i < spa->spa_alloc_count; i++) { 915 mc->mc_allocator[i].mca_rotor = mg; 916 mg = mg->mg_next; 917 } 918 } 919 920 /* 921 * Passivate a metaslab group and remove it from the allocation rotor. 922 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating 923 * a metaslab group. This function will momentarily drop spa_config_locks 924 * that are lower than the SCL_ALLOC lock (see comment below). 925 */ 926 void 927 metaslab_group_passivate(metaslab_group_t *mg) 928 { 929 metaslab_class_t *mc = mg->mg_class; 930 spa_t *spa = mc->mc_spa; 931 metaslab_group_t *mgprev, *mgnext; 932 int locks = spa_config_held(spa, SCL_ALL, RW_WRITER); 933 934 ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==, 935 (SCL_ALLOC | SCL_ZIO)); 936 937 if (--mg->mg_activation_count != 0) { 938 for (int i = 0; i < spa->spa_alloc_count; i++) 939 ASSERT(mc->mc_allocator[i].mca_rotor != mg); 940 ASSERT(mg->mg_prev == NULL); 941 ASSERT(mg->mg_next == NULL); 942 ASSERT(mg->mg_activation_count < 0); 943 return; 944 } 945 946 /* 947 * The spa_config_lock is an array of rwlocks, ordered as 948 * follows (from highest to lowest): 949 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC > 950 * SCL_ZIO > SCL_FREE > SCL_VDEV 951 * (For more information about the spa_config_lock see spa_misc.c) 952 * The higher the lock, the broader its coverage. When we passivate 953 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO 954 * config locks. However, the metaslab group's taskq might be trying 955 * to preload metaslabs so we must drop the SCL_ZIO lock and any 956 * lower locks to allow the I/O to complete. At a minimum, 957 * we continue to hold the SCL_ALLOC lock, which prevents any future 958 * allocations from taking place and any changes to the vdev tree. 959 */ 960 spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa); 961 taskq_wait_outstanding(spa->spa_metaslab_taskq, 0); 962 spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER); 963 metaslab_group_alloc_update(mg); 964 for (int i = 0; i < mg->mg_allocators; i++) { 965 metaslab_group_allocator_t *mga = &mg->mg_allocator[i]; 966 metaslab_t *msp = mga->mga_primary; 967 if (msp != NULL) { 968 mutex_enter(&msp->ms_lock); 969 metaslab_passivate(msp, 970 metaslab_weight_from_range_tree(msp)); 971 mutex_exit(&msp->ms_lock); 972 } 973 msp = mga->mga_secondary; 974 if (msp != NULL) { 975 mutex_enter(&msp->ms_lock); 976 metaslab_passivate(msp, 977 metaslab_weight_from_range_tree(msp)); 978 mutex_exit(&msp->ms_lock); 979 } 980 } 981 982 mgprev = mg->mg_prev; 983 mgnext = mg->mg_next; 984 985 if (mg == mgnext) { 986 mgnext = NULL; 987 } else { 988 mgprev->mg_next = mgnext; 989 mgnext->mg_prev = mgprev; 990 } 991 for (int i = 0; i < spa->spa_alloc_count; i++) { 992 if (mc->mc_allocator[i].mca_rotor == mg) 993 mc->mc_allocator[i].mca_rotor = mgnext; 994 } 995 996 mg->mg_prev = NULL; 997 mg->mg_next = NULL; 998 } 999 1000 boolean_t 1001 metaslab_group_initialized(metaslab_group_t *mg) 1002 { 1003 vdev_t *vd = mg->mg_vd; 1004 vdev_stat_t *vs = &vd->vdev_stat; 1005 1006 return (vs->vs_space != 0 && mg->mg_activation_count > 0); 1007 } 1008 1009 uint64_t 1010 metaslab_group_get_space(metaslab_group_t *mg) 1011 { 1012 /* 1013 * Note that the number of nodes in mg_metaslab_tree may be one less 1014 * than vdev_ms_count, due to the embedded log metaslab. 1015 */ 1016 mutex_enter(&mg->mg_lock); 1017 uint64_t ms_count = avl_numnodes(&mg->mg_metaslab_tree); 1018 mutex_exit(&mg->mg_lock); 1019 return ((1ULL << mg->mg_vd->vdev_ms_shift) * ms_count); 1020 } 1021 1022 void 1023 metaslab_group_histogram_verify(metaslab_group_t *mg) 1024 { 1025 uint64_t *mg_hist; 1026 avl_tree_t *t = &mg->mg_metaslab_tree; 1027 uint64_t ashift = mg->mg_vd->vdev_ashift; 1028 1029 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) 1030 return; 1031 1032 mg_hist = kmem_zalloc(sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE, 1033 KM_SLEEP); 1034 1035 ASSERT3U(ZFS_RANGE_TREE_HISTOGRAM_SIZE, >=, 1036 SPACE_MAP_HISTOGRAM_SIZE + ashift); 1037 1038 mutex_enter(&mg->mg_lock); 1039 for (metaslab_t *msp = avl_first(t); 1040 msp != NULL; msp = AVL_NEXT(t, msp)) { 1041 VERIFY3P(msp->ms_group, ==, mg); 1042 /* skip if not active */ 1043 if (msp->ms_sm == NULL) 1044 continue; 1045 1046 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 1047 mg_hist[i + ashift] += 1048 msp->ms_sm->sm_phys->smp_histogram[i]; 1049 } 1050 } 1051 1052 for (int i = 0; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i ++) 1053 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]); 1054 1055 mutex_exit(&mg->mg_lock); 1056 1057 kmem_free(mg_hist, sizeof (uint64_t) * ZFS_RANGE_TREE_HISTOGRAM_SIZE); 1058 } 1059 1060 static void 1061 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp) 1062 { 1063 metaslab_class_t *mc = mg->mg_class; 1064 uint64_t ashift = mg->mg_vd->vdev_ashift; 1065 1066 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1067 if (msp->ms_sm == NULL) 1068 return; 1069 1070 mutex_enter(&mg->mg_lock); 1071 mutex_enter(&mc->mc_lock); 1072 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 1073 IMPLY(mg == mg->mg_vd->vdev_log_mg, 1074 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa)); 1075 mg->mg_histogram[i + ashift] += 1076 msp->ms_sm->sm_phys->smp_histogram[i]; 1077 mc->mc_histogram[i + ashift] += 1078 msp->ms_sm->sm_phys->smp_histogram[i]; 1079 } 1080 mutex_exit(&mc->mc_lock); 1081 mutex_exit(&mg->mg_lock); 1082 } 1083 1084 void 1085 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp) 1086 { 1087 metaslab_class_t *mc = mg->mg_class; 1088 uint64_t ashift = mg->mg_vd->vdev_ashift; 1089 1090 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1091 if (msp->ms_sm == NULL) 1092 return; 1093 1094 mutex_enter(&mg->mg_lock); 1095 mutex_enter(&mc->mc_lock); 1096 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 1097 ASSERT3U(mg->mg_histogram[i + ashift], >=, 1098 msp->ms_sm->sm_phys->smp_histogram[i]); 1099 ASSERT3U(mc->mc_histogram[i + ashift], >=, 1100 msp->ms_sm->sm_phys->smp_histogram[i]); 1101 IMPLY(mg == mg->mg_vd->vdev_log_mg, 1102 mc == spa_embedded_log_class(mg->mg_vd->vdev_spa)); 1103 1104 mg->mg_histogram[i + ashift] -= 1105 msp->ms_sm->sm_phys->smp_histogram[i]; 1106 mc->mc_histogram[i + ashift] -= 1107 msp->ms_sm->sm_phys->smp_histogram[i]; 1108 } 1109 mutex_exit(&mc->mc_lock); 1110 mutex_exit(&mg->mg_lock); 1111 } 1112 1113 static void 1114 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) 1115 { 1116 ASSERT(msp->ms_group == NULL); 1117 mutex_enter(&mg->mg_lock); 1118 msp->ms_group = mg; 1119 msp->ms_weight = 0; 1120 avl_add(&mg->mg_metaslab_tree, msp); 1121 mutex_exit(&mg->mg_lock); 1122 1123 mutex_enter(&msp->ms_lock); 1124 metaslab_group_histogram_add(mg, msp); 1125 mutex_exit(&msp->ms_lock); 1126 } 1127 1128 static void 1129 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) 1130 { 1131 mutex_enter(&msp->ms_lock); 1132 metaslab_group_histogram_remove(mg, msp); 1133 mutex_exit(&msp->ms_lock); 1134 1135 mutex_enter(&mg->mg_lock); 1136 ASSERT(msp->ms_group == mg); 1137 avl_remove(&mg->mg_metaslab_tree, msp); 1138 1139 metaslab_class_t *mc = msp->ms_group->mg_class; 1140 multilist_sublist_t *mls = 1141 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); 1142 if (multilist_link_active(&msp->ms_class_txg_node)) 1143 multilist_sublist_remove(mls, msp); 1144 multilist_sublist_unlock(mls); 1145 1146 msp->ms_group = NULL; 1147 mutex_exit(&mg->mg_lock); 1148 } 1149 1150 static void 1151 metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 1152 { 1153 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1154 ASSERT(MUTEX_HELD(&mg->mg_lock)); 1155 ASSERT(msp->ms_group == mg); 1156 1157 avl_remove(&mg->mg_metaslab_tree, msp); 1158 msp->ms_weight = weight; 1159 avl_add(&mg->mg_metaslab_tree, msp); 1160 1161 } 1162 1163 static void 1164 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 1165 { 1166 /* 1167 * Although in principle the weight can be any value, in 1168 * practice we do not use values in the range [1, 511]. 1169 */ 1170 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0); 1171 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1172 1173 mutex_enter(&mg->mg_lock); 1174 metaslab_group_sort_impl(mg, msp, weight); 1175 mutex_exit(&mg->mg_lock); 1176 } 1177 1178 /* 1179 * Calculate the fragmentation for a given metaslab group. Weight metaslabs 1180 * on the amount of free space. The return value will be between 0 and 100 1181 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this 1182 * group have a fragmentation metric. 1183 */ 1184 uint64_t 1185 metaslab_group_fragmentation(metaslab_group_t *mg) 1186 { 1187 vdev_t *vd = mg->mg_vd; 1188 uint64_t fragmentation = 0; 1189 uint64_t valid_ms = 0, total_ms = 0; 1190 uint64_t free, total_free = 0; 1191 1192 for (int m = 0; m < vd->vdev_ms_count; m++) { 1193 metaslab_t *msp = vd->vdev_ms[m]; 1194 1195 if (msp->ms_group != mg) 1196 continue; 1197 total_ms++; 1198 if (msp->ms_fragmentation == ZFS_FRAG_INVALID) 1199 continue; 1200 1201 valid_ms++; 1202 free = (msp->ms_size - metaslab_allocated_space(msp)) / 1203 SPA_MINBLOCKSIZE; /* To prevent overflows. */ 1204 total_free += free; 1205 fragmentation += msp->ms_fragmentation * free; 1206 } 1207 1208 if (valid_ms < (total_ms + 1) / 2 || total_free == 0) 1209 return (ZFS_FRAG_INVALID); 1210 1211 fragmentation /= total_free; 1212 ASSERT3U(fragmentation, <=, 100); 1213 return (fragmentation); 1214 } 1215 1216 /* 1217 * Determine if a given metaslab group should skip allocations. A metaslab 1218 * group should avoid allocations if its free capacity is less than the 1219 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than 1220 * zfs_mg_fragmentation_threshold and there is at least one metaslab group 1221 * that can still handle allocations. If the allocation throttle is enabled 1222 * then we skip allocations to devices that have reached their maximum 1223 * allocation queue depth unless the selected metaslab group is the only 1224 * eligible group remaining. 1225 */ 1226 static boolean_t 1227 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor, 1228 int flags, uint64_t psize, int allocator, int d) 1229 { 1230 spa_t *spa = mg->mg_vd->vdev_spa; 1231 metaslab_class_t *mc = mg->mg_class; 1232 1233 /* 1234 * We can only consider skipping this metaslab group if it's 1235 * in the normal metaslab class and there are other metaslab 1236 * groups to select from. Otherwise, we always consider it eligible 1237 * for allocations. 1238 */ 1239 if ((mc != spa_normal_class(spa) && 1240 mc != spa_special_class(spa) && 1241 mc != spa_dedup_class(spa)) || 1242 mc->mc_groups <= 1) 1243 return (B_TRUE); 1244 1245 /* 1246 * If the metaslab group's mg_allocatable flag is set (see comments 1247 * in metaslab_group_alloc_update() for more information) and 1248 * the allocation throttle is disabled then allow allocations to this 1249 * device. However, if the allocation throttle is enabled then 1250 * check if we have reached our allocation limit (mga_alloc_queue_depth) 1251 * to determine if we should allow allocations to this metaslab group. 1252 * If all metaslab groups are no longer considered allocatable 1253 * (mc_alloc_groups == 0) or we're trying to allocate the smallest 1254 * gang block size then we allow allocations on this metaslab group 1255 * regardless of the mg_allocatable or throttle settings. 1256 */ 1257 if (mg->mg_allocatable) { 1258 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 1259 int64_t qdepth; 1260 uint64_t qmax = mga->mga_cur_max_alloc_queue_depth; 1261 1262 if (!mc->mc_alloc_throttle_enabled) 1263 return (B_TRUE); 1264 1265 /* 1266 * If this metaslab group does not have any free space, then 1267 * there is no point in looking further. 1268 */ 1269 if (mg->mg_no_free_space) 1270 return (B_FALSE); 1271 1272 /* 1273 * Some allocations (e.g., those coming from device removal 1274 * where the * allocations are not even counted in the 1275 * metaslab * allocation queues) are allowed to bypass 1276 * the throttle. 1277 */ 1278 if (flags & METASLAB_DONT_THROTTLE) 1279 return (B_TRUE); 1280 1281 /* 1282 * Relax allocation throttling for ditto blocks. Due to 1283 * random imbalances in allocation it tends to push copies 1284 * to one vdev, that looks a bit better at the moment. 1285 */ 1286 qmax = qmax * (4 + d) / 4; 1287 1288 qdepth = zfs_refcount_count(&mga->mga_alloc_queue_depth); 1289 1290 /* 1291 * If this metaslab group is below its qmax or it's 1292 * the only allocatable metaslab group, then attempt 1293 * to allocate from it. 1294 */ 1295 if (qdepth < qmax || mc->mc_alloc_groups == 1) 1296 return (B_TRUE); 1297 ASSERT3U(mc->mc_alloc_groups, >, 1); 1298 1299 /* 1300 * Since this metaslab group is at or over its qmax, we 1301 * need to determine if there are metaslab groups after this 1302 * one that might be able to handle this allocation. This is 1303 * racy since we can't hold the locks for all metaslab 1304 * groups at the same time when we make this check. 1305 */ 1306 for (metaslab_group_t *mgp = mg->mg_next; 1307 mgp != rotor; mgp = mgp->mg_next) { 1308 metaslab_group_allocator_t *mgap = 1309 &mgp->mg_allocator[allocator]; 1310 qmax = mgap->mga_cur_max_alloc_queue_depth; 1311 qmax = qmax * (4 + d) / 4; 1312 qdepth = 1313 zfs_refcount_count(&mgap->mga_alloc_queue_depth); 1314 1315 /* 1316 * If there is another metaslab group that 1317 * might be able to handle the allocation, then 1318 * we return false so that we skip this group. 1319 */ 1320 if (qdepth < qmax && !mgp->mg_no_free_space) 1321 return (B_FALSE); 1322 } 1323 1324 /* 1325 * We didn't find another group to handle the allocation 1326 * so we can't skip this metaslab group even though 1327 * we are at or over our qmax. 1328 */ 1329 return (B_TRUE); 1330 1331 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) { 1332 return (B_TRUE); 1333 } 1334 return (B_FALSE); 1335 } 1336 1337 /* 1338 * ========================================================================== 1339 * Range tree callbacks 1340 * ========================================================================== 1341 */ 1342 1343 /* 1344 * Comparison function for the private size-ordered tree using 32-bit 1345 * ranges. Tree is sorted by size, larger sizes at the end of the tree. 1346 */ 1347 __attribute__((always_inline)) inline 1348 static int 1349 metaslab_rangesize32_compare(const void *x1, const void *x2) 1350 { 1351 const zfs_range_seg32_t *r1 = x1; 1352 const zfs_range_seg32_t *r2 = x2; 1353 1354 uint64_t rs_size1 = r1->rs_end - r1->rs_start; 1355 uint64_t rs_size2 = r2->rs_end - r2->rs_start; 1356 1357 int cmp = TREE_CMP(rs_size1, rs_size2); 1358 1359 return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start)); 1360 } 1361 1362 /* 1363 * Comparison function for the private size-ordered tree using 64-bit 1364 * ranges. Tree is sorted by size, larger sizes at the end of the tree. 1365 */ 1366 __attribute__((always_inline)) inline 1367 static int 1368 metaslab_rangesize64_compare(const void *x1, const void *x2) 1369 { 1370 const zfs_range_seg64_t *r1 = x1; 1371 const zfs_range_seg64_t *r2 = x2; 1372 1373 uint64_t rs_size1 = r1->rs_end - r1->rs_start; 1374 uint64_t rs_size2 = r2->rs_end - r2->rs_start; 1375 1376 int cmp = TREE_CMP(rs_size1, rs_size2); 1377 1378 return (cmp + !cmp * TREE_CMP(r1->rs_start, r2->rs_start)); 1379 } 1380 1381 typedef struct metaslab_rt_arg { 1382 zfs_btree_t *mra_bt; 1383 uint32_t mra_floor_shift; 1384 } metaslab_rt_arg_t; 1385 1386 struct mssa_arg { 1387 zfs_range_tree_t *rt; 1388 metaslab_rt_arg_t *mra; 1389 }; 1390 1391 static void 1392 metaslab_size_sorted_add(void *arg, uint64_t start, uint64_t size) 1393 { 1394 struct mssa_arg *mssap = arg; 1395 zfs_range_tree_t *rt = mssap->rt; 1396 metaslab_rt_arg_t *mrap = mssap->mra; 1397 zfs_range_seg_max_t seg = {0}; 1398 zfs_rs_set_start(&seg, rt, start); 1399 zfs_rs_set_end(&seg, rt, start + size); 1400 metaslab_rt_add(rt, &seg, mrap); 1401 } 1402 1403 static void 1404 metaslab_size_tree_full_load(zfs_range_tree_t *rt) 1405 { 1406 metaslab_rt_arg_t *mrap = rt->rt_arg; 1407 METASLABSTAT_BUMP(metaslabstat_reload_tree); 1408 ASSERT0(zfs_btree_numnodes(mrap->mra_bt)); 1409 mrap->mra_floor_shift = 0; 1410 struct mssa_arg arg = {0}; 1411 arg.rt = rt; 1412 arg.mra = mrap; 1413 zfs_range_tree_walk(rt, metaslab_size_sorted_add, &arg); 1414 } 1415 1416 1417 ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize32_in_buf, 1418 zfs_range_seg32_t, metaslab_rangesize32_compare) 1419 1420 ZFS_BTREE_FIND_IN_BUF_FUNC(metaslab_rt_find_rangesize64_in_buf, 1421 zfs_range_seg64_t, metaslab_rangesize64_compare) 1422 1423 /* 1424 * Create any block allocator specific components. The current allocators 1425 * rely on using both a size-ordered zfs_range_tree_t and an array of 1426 * uint64_t's. 1427 */ 1428 static void 1429 metaslab_rt_create(zfs_range_tree_t *rt, void *arg) 1430 { 1431 metaslab_rt_arg_t *mrap = arg; 1432 zfs_btree_t *size_tree = mrap->mra_bt; 1433 1434 size_t size; 1435 int (*compare) (const void *, const void *); 1436 bt_find_in_buf_f bt_find; 1437 switch (rt->rt_type) { 1438 case ZFS_RANGE_SEG32: 1439 size = sizeof (zfs_range_seg32_t); 1440 compare = metaslab_rangesize32_compare; 1441 bt_find = metaslab_rt_find_rangesize32_in_buf; 1442 break; 1443 case ZFS_RANGE_SEG64: 1444 size = sizeof (zfs_range_seg64_t); 1445 compare = metaslab_rangesize64_compare; 1446 bt_find = metaslab_rt_find_rangesize64_in_buf; 1447 break; 1448 default: 1449 panic("Invalid range seg type %d", rt->rt_type); 1450 } 1451 zfs_btree_create(size_tree, compare, bt_find, size); 1452 mrap->mra_floor_shift = metaslab_by_size_min_shift; 1453 } 1454 1455 static void 1456 metaslab_rt_destroy(zfs_range_tree_t *rt, void *arg) 1457 { 1458 (void) rt; 1459 metaslab_rt_arg_t *mrap = arg; 1460 zfs_btree_t *size_tree = mrap->mra_bt; 1461 1462 zfs_btree_destroy(size_tree); 1463 kmem_free(mrap, sizeof (*mrap)); 1464 } 1465 1466 static void 1467 metaslab_rt_add(zfs_range_tree_t *rt, zfs_range_seg_t *rs, void *arg) 1468 { 1469 metaslab_rt_arg_t *mrap = arg; 1470 zfs_btree_t *size_tree = mrap->mra_bt; 1471 1472 if (zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt) < 1473 (1ULL << mrap->mra_floor_shift)) 1474 return; 1475 1476 zfs_btree_add(size_tree, rs); 1477 } 1478 1479 static void 1480 metaslab_rt_remove(zfs_range_tree_t *rt, zfs_range_seg_t *rs, void *arg) 1481 { 1482 metaslab_rt_arg_t *mrap = arg; 1483 zfs_btree_t *size_tree = mrap->mra_bt; 1484 1485 if (zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt) < (1ULL << 1486 mrap->mra_floor_shift)) 1487 return; 1488 1489 zfs_btree_remove(size_tree, rs); 1490 } 1491 1492 static void 1493 metaslab_rt_vacate(zfs_range_tree_t *rt, void *arg) 1494 { 1495 metaslab_rt_arg_t *mrap = arg; 1496 zfs_btree_t *size_tree = mrap->mra_bt; 1497 zfs_btree_clear(size_tree); 1498 zfs_btree_destroy(size_tree); 1499 1500 metaslab_rt_create(rt, arg); 1501 } 1502 1503 static const zfs_range_tree_ops_t metaslab_rt_ops = { 1504 .rtop_create = metaslab_rt_create, 1505 .rtop_destroy = metaslab_rt_destroy, 1506 .rtop_add = metaslab_rt_add, 1507 .rtop_remove = metaslab_rt_remove, 1508 .rtop_vacate = metaslab_rt_vacate 1509 }; 1510 1511 /* 1512 * ========================================================================== 1513 * Common allocator routines 1514 * ========================================================================== 1515 */ 1516 1517 /* 1518 * Return the maximum contiguous segment within the metaslab. 1519 */ 1520 uint64_t 1521 metaslab_largest_allocatable(metaslab_t *msp) 1522 { 1523 zfs_btree_t *t = &msp->ms_allocatable_by_size; 1524 zfs_range_seg_t *rs; 1525 1526 if (t == NULL) 1527 return (0); 1528 if (zfs_btree_numnodes(t) == 0) 1529 metaslab_size_tree_full_load(msp->ms_allocatable); 1530 1531 rs = zfs_btree_last(t, NULL); 1532 if (rs == NULL) 1533 return (0); 1534 1535 return (zfs_rs_get_end(rs, msp->ms_allocatable) - zfs_rs_get_start(rs, 1536 msp->ms_allocatable)); 1537 } 1538 1539 /* 1540 * Return the maximum contiguous segment within the unflushed frees of this 1541 * metaslab. 1542 */ 1543 static uint64_t 1544 metaslab_largest_unflushed_free(metaslab_t *msp) 1545 { 1546 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1547 1548 if (msp->ms_unflushed_frees == NULL) 1549 return (0); 1550 1551 if (zfs_btree_numnodes(&msp->ms_unflushed_frees_by_size) == 0) 1552 metaslab_size_tree_full_load(msp->ms_unflushed_frees); 1553 zfs_range_seg_t *rs = zfs_btree_last(&msp->ms_unflushed_frees_by_size, 1554 NULL); 1555 if (rs == NULL) 1556 return (0); 1557 1558 /* 1559 * When a range is freed from the metaslab, that range is added to 1560 * both the unflushed frees and the deferred frees. While the block 1561 * will eventually be usable, if the metaslab were loaded the range 1562 * would not be added to the ms_allocatable tree until TXG_DEFER_SIZE 1563 * txgs had passed. As a result, when attempting to estimate an upper 1564 * bound for the largest currently-usable free segment in the 1565 * metaslab, we need to not consider any ranges currently in the defer 1566 * trees. This algorithm approximates the largest available chunk in 1567 * the largest range in the unflushed_frees tree by taking the first 1568 * chunk. While this may be a poor estimate, it should only remain so 1569 * briefly and should eventually self-correct as frees are no longer 1570 * deferred. Similar logic applies to the ms_freed tree. See 1571 * metaslab_load() for more details. 1572 * 1573 * There are two primary sources of inaccuracy in this estimate. Both 1574 * are tolerated for performance reasons. The first source is that we 1575 * only check the largest segment for overlaps. Smaller segments may 1576 * have more favorable overlaps with the other trees, resulting in 1577 * larger usable chunks. Second, we only look at the first chunk in 1578 * the largest segment; there may be other usable chunks in the 1579 * largest segment, but we ignore them. 1580 */ 1581 uint64_t rstart = zfs_rs_get_start(rs, msp->ms_unflushed_frees); 1582 uint64_t rsize = zfs_rs_get_end(rs, msp->ms_unflushed_frees) - rstart; 1583 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 1584 uint64_t start = 0; 1585 uint64_t size = 0; 1586 boolean_t found = zfs_range_tree_find_in(msp->ms_defer[t], 1587 rstart, rsize, &start, &size); 1588 if (found) { 1589 if (rstart == start) 1590 return (0); 1591 rsize = start - rstart; 1592 } 1593 } 1594 1595 uint64_t start = 0; 1596 uint64_t size = 0; 1597 boolean_t found = zfs_range_tree_find_in(msp->ms_freed, rstart, 1598 rsize, &start, &size); 1599 if (found) 1600 rsize = start - rstart; 1601 1602 return (rsize); 1603 } 1604 1605 static zfs_range_seg_t * 1606 metaslab_block_find(zfs_btree_t *t, zfs_range_tree_t *rt, uint64_t start, 1607 uint64_t size, zfs_btree_index_t *where) 1608 { 1609 zfs_range_seg_t *rs; 1610 zfs_range_seg_max_t rsearch; 1611 1612 zfs_rs_set_start(&rsearch, rt, start); 1613 zfs_rs_set_end(&rsearch, rt, start + size); 1614 1615 rs = zfs_btree_find(t, &rsearch, where); 1616 if (rs == NULL) { 1617 rs = zfs_btree_next(t, where, where); 1618 } 1619 1620 return (rs); 1621 } 1622 1623 /* 1624 * This is a helper function that can be used by the allocator to find a 1625 * suitable block to allocate. This will search the specified B-tree looking 1626 * for a block that matches the specified criteria. 1627 */ 1628 static uint64_t 1629 metaslab_block_picker(zfs_range_tree_t *rt, uint64_t *cursor, uint64_t size, 1630 uint64_t max_search) 1631 { 1632 if (*cursor == 0) 1633 *cursor = rt->rt_start; 1634 zfs_btree_t *bt = &rt->rt_root; 1635 zfs_btree_index_t where; 1636 zfs_range_seg_t *rs = metaslab_block_find(bt, rt, *cursor, size, 1637 &where); 1638 uint64_t first_found; 1639 int count_searched = 0; 1640 1641 if (rs != NULL) 1642 first_found = zfs_rs_get_start(rs, rt); 1643 1644 while (rs != NULL && (zfs_rs_get_start(rs, rt) - first_found <= 1645 max_search || count_searched < metaslab_min_search_count)) { 1646 uint64_t offset = zfs_rs_get_start(rs, rt); 1647 if (offset + size <= zfs_rs_get_end(rs, rt)) { 1648 *cursor = offset + size; 1649 return (offset); 1650 } 1651 rs = zfs_btree_next(bt, &where, &where); 1652 count_searched++; 1653 } 1654 1655 *cursor = 0; 1656 return (-1ULL); 1657 } 1658 1659 static uint64_t metaslab_df_alloc(metaslab_t *msp, uint64_t size); 1660 static uint64_t metaslab_cf_alloc(metaslab_t *msp, uint64_t size); 1661 static uint64_t metaslab_ndf_alloc(metaslab_t *msp, uint64_t size); 1662 metaslab_ops_t *metaslab_allocator(spa_t *spa); 1663 1664 static metaslab_ops_t metaslab_allocators[] = { 1665 { "dynamic", metaslab_df_alloc }, 1666 { "cursor", metaslab_cf_alloc }, 1667 { "new-dynamic", metaslab_ndf_alloc }, 1668 }; 1669 1670 static int 1671 spa_find_allocator_byname(const char *val) 1672 { 1673 int a = ARRAY_SIZE(metaslab_allocators) - 1; 1674 if (strcmp("new-dynamic", val) == 0) 1675 return (-1); /* remove when ndf is working */ 1676 for (; a >= 0; a--) { 1677 if (strcmp(val, metaslab_allocators[a].msop_name) == 0) 1678 return (a); 1679 } 1680 return (-1); 1681 } 1682 1683 void 1684 spa_set_allocator(spa_t *spa, const char *allocator) 1685 { 1686 int a = spa_find_allocator_byname(allocator); 1687 if (a < 0) a = 0; 1688 spa->spa_active_allocator = a; 1689 zfs_dbgmsg("spa allocator: %s", metaslab_allocators[a].msop_name); 1690 } 1691 1692 int 1693 spa_get_allocator(spa_t *spa) 1694 { 1695 return (spa->spa_active_allocator); 1696 } 1697 1698 #if defined(_KERNEL) 1699 int 1700 param_set_active_allocator_common(const char *val) 1701 { 1702 char *p; 1703 1704 if (val == NULL) 1705 return (SET_ERROR(EINVAL)); 1706 1707 if ((p = strchr(val, '\n')) != NULL) 1708 *p = '\0'; 1709 1710 int a = spa_find_allocator_byname(val); 1711 if (a < 0) 1712 return (SET_ERROR(EINVAL)); 1713 1714 zfs_active_allocator = metaslab_allocators[a].msop_name; 1715 return (0); 1716 } 1717 #endif 1718 1719 metaslab_ops_t * 1720 metaslab_allocator(spa_t *spa) 1721 { 1722 int allocator = spa_get_allocator(spa); 1723 return (&metaslab_allocators[allocator]); 1724 } 1725 1726 /* 1727 * ========================================================================== 1728 * Dynamic Fit (df) block allocator 1729 * 1730 * Search for a free chunk of at least this size, starting from the last 1731 * offset (for this alignment of block) looking for up to 1732 * metaslab_df_max_search bytes (16MB). If a large enough free chunk is not 1733 * found within 16MB, then return a free chunk of exactly the requested size (or 1734 * larger). 1735 * 1736 * If it seems like searching from the last offset will be unproductive, skip 1737 * that and just return a free chunk of exactly the requested size (or larger). 1738 * This is based on metaslab_df_alloc_threshold and metaslab_df_free_pct. This 1739 * mechanism is probably not very useful and may be removed in the future. 1740 * 1741 * The behavior when not searching can be changed to return the largest free 1742 * chunk, instead of a free chunk of exactly the requested size, by setting 1743 * metaslab_df_use_largest_segment. 1744 * ========================================================================== 1745 */ 1746 static uint64_t 1747 metaslab_df_alloc(metaslab_t *msp, uint64_t size) 1748 { 1749 /* 1750 * Find the largest power of 2 block size that evenly divides the 1751 * requested size. This is used to try to allocate blocks with similar 1752 * alignment from the same area of the metaslab (i.e. same cursor 1753 * bucket) but it does not guarantee that other allocations sizes 1754 * may exist in the same region. 1755 */ 1756 uint64_t align = size & -size; 1757 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; 1758 zfs_range_tree_t *rt = msp->ms_allocatable; 1759 uint_t free_pct = zfs_range_tree_space(rt) * 100 / msp->ms_size; 1760 uint64_t offset; 1761 1762 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1763 1764 /* 1765 * If we're running low on space, find a segment based on size, 1766 * rather than iterating based on offset. 1767 */ 1768 if (metaslab_largest_allocatable(msp) < metaslab_df_alloc_threshold || 1769 free_pct < metaslab_df_free_pct) { 1770 offset = -1; 1771 } else { 1772 offset = metaslab_block_picker(rt, 1773 cursor, size, metaslab_df_max_search); 1774 } 1775 1776 if (offset == -1) { 1777 zfs_range_seg_t *rs; 1778 if (zfs_btree_numnodes(&msp->ms_allocatable_by_size) == 0) 1779 metaslab_size_tree_full_load(msp->ms_allocatable); 1780 1781 if (metaslab_df_use_largest_segment) { 1782 /* use largest free segment */ 1783 rs = zfs_btree_last(&msp->ms_allocatable_by_size, NULL); 1784 } else { 1785 zfs_btree_index_t where; 1786 /* use segment of this size, or next largest */ 1787 rs = metaslab_block_find(&msp->ms_allocatable_by_size, 1788 rt, msp->ms_start, size, &where); 1789 } 1790 if (rs != NULL && zfs_rs_get_start(rs, rt) + size <= 1791 zfs_rs_get_end(rs, rt)) { 1792 offset = zfs_rs_get_start(rs, rt); 1793 *cursor = offset + size; 1794 } 1795 } 1796 1797 return (offset); 1798 } 1799 1800 /* 1801 * ========================================================================== 1802 * Cursor fit block allocator - 1803 * Select the largest region in the metaslab, set the cursor to the beginning 1804 * of the range and the cursor_end to the end of the range. As allocations 1805 * are made advance the cursor. Continue allocating from the cursor until 1806 * the range is exhausted and then find a new range. 1807 * ========================================================================== 1808 */ 1809 static uint64_t 1810 metaslab_cf_alloc(metaslab_t *msp, uint64_t size) 1811 { 1812 zfs_range_tree_t *rt = msp->ms_allocatable; 1813 zfs_btree_t *t = &msp->ms_allocatable_by_size; 1814 uint64_t *cursor = &msp->ms_lbas[0]; 1815 uint64_t *cursor_end = &msp->ms_lbas[1]; 1816 uint64_t offset = 0; 1817 1818 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1819 1820 ASSERT3U(*cursor_end, >=, *cursor); 1821 1822 if ((*cursor + size) > *cursor_end) { 1823 zfs_range_seg_t *rs; 1824 1825 if (zfs_btree_numnodes(t) == 0) 1826 metaslab_size_tree_full_load(msp->ms_allocatable); 1827 rs = zfs_btree_last(t, NULL); 1828 if (rs == NULL || (zfs_rs_get_end(rs, rt) - 1829 zfs_rs_get_start(rs, rt)) < size) 1830 return (-1ULL); 1831 1832 *cursor = zfs_rs_get_start(rs, rt); 1833 *cursor_end = zfs_rs_get_end(rs, rt); 1834 } 1835 1836 offset = *cursor; 1837 *cursor += size; 1838 1839 return (offset); 1840 } 1841 1842 /* 1843 * ========================================================================== 1844 * New dynamic fit allocator - 1845 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift 1846 * contiguous blocks. If no region is found then just use the largest segment 1847 * that remains. 1848 * ========================================================================== 1849 */ 1850 1851 /* 1852 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift) 1853 * to request from the allocator. 1854 */ 1855 uint64_t metaslab_ndf_clump_shift = 4; 1856 1857 static uint64_t 1858 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) 1859 { 1860 zfs_btree_t *t = &msp->ms_allocatable->rt_root; 1861 zfs_range_tree_t *rt = msp->ms_allocatable; 1862 zfs_btree_index_t where; 1863 zfs_range_seg_t *rs; 1864 zfs_range_seg_max_t rsearch; 1865 uint64_t hbit = highbit64(size); 1866 uint64_t *cursor = &msp->ms_lbas[hbit - 1]; 1867 uint64_t max_size = metaslab_largest_allocatable(msp); 1868 1869 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1870 1871 if (max_size < size) 1872 return (-1ULL); 1873 1874 zfs_rs_set_start(&rsearch, rt, *cursor); 1875 zfs_rs_set_end(&rsearch, rt, *cursor + size); 1876 1877 rs = zfs_btree_find(t, &rsearch, &where); 1878 if (rs == NULL || (zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt)) < 1879 size) { 1880 t = &msp->ms_allocatable_by_size; 1881 1882 zfs_rs_set_start(&rsearch, rt, 0); 1883 zfs_rs_set_end(&rsearch, rt, MIN(max_size, 1ULL << (hbit + 1884 metaslab_ndf_clump_shift))); 1885 1886 rs = zfs_btree_find(t, &rsearch, &where); 1887 if (rs == NULL) 1888 rs = zfs_btree_next(t, &where, &where); 1889 ASSERT(rs != NULL); 1890 } 1891 1892 if ((zfs_rs_get_end(rs, rt) - zfs_rs_get_start(rs, rt)) >= size) { 1893 *cursor = zfs_rs_get_start(rs, rt) + size; 1894 return (zfs_rs_get_start(rs, rt)); 1895 } 1896 return (-1ULL); 1897 } 1898 1899 /* 1900 * ========================================================================== 1901 * Metaslabs 1902 * ========================================================================== 1903 */ 1904 1905 /* 1906 * Wait for any in-progress metaslab loads to complete. 1907 */ 1908 static void 1909 metaslab_load_wait(metaslab_t *msp) 1910 { 1911 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1912 1913 while (msp->ms_loading) { 1914 ASSERT(!msp->ms_loaded); 1915 cv_wait(&msp->ms_load_cv, &msp->ms_lock); 1916 } 1917 } 1918 1919 /* 1920 * Wait for any in-progress flushing to complete. 1921 */ 1922 static void 1923 metaslab_flush_wait(metaslab_t *msp) 1924 { 1925 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1926 1927 while (msp->ms_flushing) 1928 cv_wait(&msp->ms_flush_cv, &msp->ms_lock); 1929 } 1930 1931 static unsigned int 1932 metaslab_idx_func(multilist_t *ml, void *arg) 1933 { 1934 metaslab_t *msp = arg; 1935 1936 /* 1937 * ms_id values are allocated sequentially, so full 64bit 1938 * division would be a waste of time, so limit it to 32 bits. 1939 */ 1940 return ((unsigned int)msp->ms_id % multilist_get_num_sublists(ml)); 1941 } 1942 1943 uint64_t 1944 metaslab_allocated_space(metaslab_t *msp) 1945 { 1946 return (msp->ms_allocated_space); 1947 } 1948 1949 /* 1950 * Verify that the space accounting on disk matches the in-core range_trees. 1951 */ 1952 static void 1953 metaslab_verify_space(metaslab_t *msp, uint64_t txg) 1954 { 1955 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 1956 uint64_t allocating = 0; 1957 uint64_t sm_free_space, msp_free_space; 1958 1959 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1960 ASSERT(!msp->ms_condensing); 1961 1962 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 1963 return; 1964 1965 /* 1966 * We can only verify the metaslab space when we're called 1967 * from syncing context with a loaded metaslab that has an 1968 * allocated space map. Calling this in non-syncing context 1969 * does not provide a consistent view of the metaslab since 1970 * we're performing allocations in the future. 1971 */ 1972 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL || 1973 !msp->ms_loaded) 1974 return; 1975 1976 /* 1977 * Even though the smp_alloc field can get negative, 1978 * when it comes to a metaslab's space map, that should 1979 * never be the case. 1980 */ 1981 ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0); 1982 1983 ASSERT3U(space_map_allocated(msp->ms_sm), >=, 1984 zfs_range_tree_space(msp->ms_unflushed_frees)); 1985 1986 ASSERT3U(metaslab_allocated_space(msp), ==, 1987 space_map_allocated(msp->ms_sm) + 1988 zfs_range_tree_space(msp->ms_unflushed_allocs) - 1989 zfs_range_tree_space(msp->ms_unflushed_frees)); 1990 1991 sm_free_space = msp->ms_size - metaslab_allocated_space(msp); 1992 1993 /* 1994 * Account for future allocations since we would have 1995 * already deducted that space from the ms_allocatable. 1996 */ 1997 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) { 1998 allocating += 1999 zfs_range_tree_space(msp->ms_allocating[(txg + t) & 2000 TXG_MASK]); 2001 } 2002 ASSERT3U(allocating + msp->ms_allocated_this_txg, ==, 2003 msp->ms_allocating_total); 2004 2005 ASSERT3U(msp->ms_deferspace, ==, 2006 zfs_range_tree_space(msp->ms_defer[0]) + 2007 zfs_range_tree_space(msp->ms_defer[1])); 2008 2009 msp_free_space = zfs_range_tree_space(msp->ms_allocatable) + 2010 allocating + msp->ms_deferspace + 2011 zfs_range_tree_space(msp->ms_freed); 2012 2013 VERIFY3U(sm_free_space, ==, msp_free_space); 2014 } 2015 2016 static void 2017 metaslab_aux_histograms_clear(metaslab_t *msp) 2018 { 2019 /* 2020 * Auxiliary histograms are only cleared when resetting them, 2021 * which can only happen while the metaslab is loaded. 2022 */ 2023 ASSERT(msp->ms_loaded); 2024 2025 memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist)); 2026 for (int t = 0; t < TXG_DEFER_SIZE; t++) 2027 memset(msp->ms_deferhist[t], 0, sizeof (msp->ms_deferhist[t])); 2028 } 2029 2030 static void 2031 metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift, 2032 zfs_range_tree_t *rt) 2033 { 2034 /* 2035 * This is modeled after space_map_histogram_add(), so refer to that 2036 * function for implementation details. We want this to work like 2037 * the space map histogram, and not the range tree histogram, as we 2038 * are essentially constructing a delta that will be later subtracted 2039 * from the space map histogram. 2040 */ 2041 int idx = 0; 2042 for (int i = shift; i < ZFS_RANGE_TREE_HISTOGRAM_SIZE; i++) { 2043 ASSERT3U(i, >=, idx + shift); 2044 histogram[idx] += rt->rt_histogram[i] << (i - idx - shift); 2045 2046 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) { 2047 ASSERT3U(idx + shift, ==, i); 2048 idx++; 2049 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE); 2050 } 2051 } 2052 } 2053 2054 /* 2055 * Called at every sync pass that the metaslab gets synced. 2056 * 2057 * The reason is that we want our auxiliary histograms to be updated 2058 * wherever the metaslab's space map histogram is updated. This way 2059 * we stay consistent on which parts of the metaslab space map's 2060 * histogram are currently not available for allocations (e.g because 2061 * they are in the defer, freed, and freeing trees). 2062 */ 2063 static void 2064 metaslab_aux_histograms_update(metaslab_t *msp) 2065 { 2066 space_map_t *sm = msp->ms_sm; 2067 ASSERT(sm != NULL); 2068 2069 /* 2070 * This is similar to the metaslab's space map histogram updates 2071 * that take place in metaslab_sync(). The only difference is that 2072 * we only care about segments that haven't made it into the 2073 * ms_allocatable tree yet. 2074 */ 2075 if (msp->ms_loaded) { 2076 metaslab_aux_histograms_clear(msp); 2077 2078 metaslab_aux_histogram_add(msp->ms_synchist, 2079 sm->sm_shift, msp->ms_freed); 2080 2081 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2082 metaslab_aux_histogram_add(msp->ms_deferhist[t], 2083 sm->sm_shift, msp->ms_defer[t]); 2084 } 2085 } 2086 2087 metaslab_aux_histogram_add(msp->ms_synchist, 2088 sm->sm_shift, msp->ms_freeing); 2089 } 2090 2091 /* 2092 * Called every time we are done syncing (writing to) the metaslab, 2093 * i.e. at the end of each sync pass. 2094 * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist] 2095 */ 2096 static void 2097 metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed) 2098 { 2099 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2100 space_map_t *sm = msp->ms_sm; 2101 2102 if (sm == NULL) { 2103 /* 2104 * We came here from metaslab_init() when creating/opening a 2105 * pool, looking at a metaslab that hasn't had any allocations 2106 * yet. 2107 */ 2108 return; 2109 } 2110 2111 /* 2112 * This is similar to the actions that we take for the ms_freed 2113 * and ms_defer trees in metaslab_sync_done(). 2114 */ 2115 uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE; 2116 if (defer_allowed) { 2117 memcpy(msp->ms_deferhist[hist_index], msp->ms_synchist, 2118 sizeof (msp->ms_synchist)); 2119 } else { 2120 memset(msp->ms_deferhist[hist_index], 0, 2121 sizeof (msp->ms_deferhist[hist_index])); 2122 } 2123 memset(msp->ms_synchist, 0, sizeof (msp->ms_synchist)); 2124 } 2125 2126 /* 2127 * Ensure that the metaslab's weight and fragmentation are consistent 2128 * with the contents of the histogram (either the range tree's histogram 2129 * or the space map's depending whether the metaslab is loaded). 2130 */ 2131 static void 2132 metaslab_verify_weight_and_frag(metaslab_t *msp) 2133 { 2134 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2135 2136 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 2137 return; 2138 2139 /* 2140 * We can end up here from vdev_remove_complete(), in which case we 2141 * cannot do these assertions because we hold spa config locks and 2142 * thus we are not allowed to read from the DMU. 2143 * 2144 * We check if the metaslab group has been removed and if that's 2145 * the case we return immediately as that would mean that we are 2146 * here from the aforementioned code path. 2147 */ 2148 if (msp->ms_group == NULL) 2149 return; 2150 2151 /* 2152 * Devices being removed always return a weight of 0 and leave 2153 * fragmentation and ms_max_size as is - there is nothing for 2154 * us to verify here. 2155 */ 2156 vdev_t *vd = msp->ms_group->mg_vd; 2157 if (vd->vdev_removing) 2158 return; 2159 2160 /* 2161 * If the metaslab is dirty it probably means that we've done 2162 * some allocations or frees that have changed our histograms 2163 * and thus the weight. 2164 */ 2165 for (int t = 0; t < TXG_SIZE; t++) { 2166 if (txg_list_member(&vd->vdev_ms_list, msp, t)) 2167 return; 2168 } 2169 2170 /* 2171 * This verification checks that our in-memory state is consistent 2172 * with what's on disk. If the pool is read-only then there aren't 2173 * any changes and we just have the initially-loaded state. 2174 */ 2175 if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa)) 2176 return; 2177 2178 /* some extra verification for in-core tree if you can */ 2179 if (msp->ms_loaded) { 2180 zfs_range_tree_stat_verify(msp->ms_allocatable); 2181 VERIFY(space_map_histogram_verify(msp->ms_sm, 2182 msp->ms_allocatable)); 2183 } 2184 2185 uint64_t weight = msp->ms_weight; 2186 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 2187 boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight); 2188 uint64_t frag = msp->ms_fragmentation; 2189 uint64_t max_segsize = msp->ms_max_size; 2190 2191 msp->ms_weight = 0; 2192 msp->ms_fragmentation = 0; 2193 2194 /* 2195 * This function is used for verification purposes and thus should 2196 * not introduce any side-effects/mutations on the system's state. 2197 * 2198 * Regardless of whether metaslab_weight() thinks this metaslab 2199 * should be active or not, we want to ensure that the actual weight 2200 * (and therefore the value of ms_weight) would be the same if it 2201 * was to be recalculated at this point. 2202 * 2203 * In addition we set the nodirty flag so metaslab_weight() does 2204 * not dirty the metaslab for future TXGs (e.g. when trying to 2205 * force condensing to upgrade the metaslab spacemaps). 2206 */ 2207 msp->ms_weight = metaslab_weight(msp, B_TRUE) | was_active; 2208 2209 VERIFY3U(max_segsize, ==, msp->ms_max_size); 2210 2211 /* 2212 * If the weight type changed then there is no point in doing 2213 * verification. Revert fields to their original values. 2214 */ 2215 if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) || 2216 (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) { 2217 msp->ms_fragmentation = frag; 2218 msp->ms_weight = weight; 2219 return; 2220 } 2221 2222 VERIFY3U(msp->ms_fragmentation, ==, frag); 2223 VERIFY3U(msp->ms_weight, ==, weight); 2224 } 2225 2226 /* 2227 * If we're over the zfs_metaslab_mem_limit, select the loaded metaslab from 2228 * this class that was used longest ago, and attempt to unload it. We don't 2229 * want to spend too much time in this loop to prevent performance 2230 * degradation, and we expect that most of the time this operation will 2231 * succeed. Between that and the normal unloading processing during txg sync, 2232 * we expect this to keep the metaslab memory usage under control. 2233 */ 2234 static void 2235 metaslab_potentially_evict(metaslab_class_t *mc) 2236 { 2237 #ifdef _KERNEL 2238 uint64_t allmem = arc_all_memory(); 2239 uint64_t inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache); 2240 uint64_t size = spl_kmem_cache_entry_size(zfs_btree_leaf_cache); 2241 uint_t tries = 0; 2242 for (; allmem * zfs_metaslab_mem_limit / 100 < inuse * size && 2243 tries < multilist_get_num_sublists(&mc->mc_metaslab_txg_list) * 2; 2244 tries++) { 2245 unsigned int idx = multilist_get_random_index( 2246 &mc->mc_metaslab_txg_list); 2247 multilist_sublist_t *mls = 2248 multilist_sublist_lock_idx(&mc->mc_metaslab_txg_list, idx); 2249 metaslab_t *msp = multilist_sublist_head(mls); 2250 multilist_sublist_unlock(mls); 2251 while (msp != NULL && allmem * zfs_metaslab_mem_limit / 100 < 2252 inuse * size) { 2253 VERIFY3P(mls, ==, multilist_sublist_lock_idx( 2254 &mc->mc_metaslab_txg_list, idx)); 2255 ASSERT3U(idx, ==, 2256 metaslab_idx_func(&mc->mc_metaslab_txg_list, msp)); 2257 2258 if (!multilist_link_active(&msp->ms_class_txg_node)) { 2259 multilist_sublist_unlock(mls); 2260 break; 2261 } 2262 metaslab_t *next_msp = multilist_sublist_next(mls, msp); 2263 multilist_sublist_unlock(mls); 2264 /* 2265 * If the metaslab is currently loading there are two 2266 * cases. If it's the metaslab we're evicting, we 2267 * can't continue on or we'll panic when we attempt to 2268 * recursively lock the mutex. If it's another 2269 * metaslab that's loading, it can be safely skipped, 2270 * since we know it's very new and therefore not a 2271 * good eviction candidate. We check later once the 2272 * lock is held that the metaslab is fully loaded 2273 * before actually unloading it. 2274 */ 2275 if (msp->ms_loading) { 2276 msp = next_msp; 2277 inuse = 2278 spl_kmem_cache_inuse(zfs_btree_leaf_cache); 2279 continue; 2280 } 2281 /* 2282 * We can't unload metaslabs with no spacemap because 2283 * they're not ready to be unloaded yet. We can't 2284 * unload metaslabs with outstanding allocations 2285 * because doing so could cause the metaslab's weight 2286 * to decrease while it's unloaded, which violates an 2287 * invariant that we use to prevent unnecessary 2288 * loading. We also don't unload metaslabs that are 2289 * currently active because they are high-weight 2290 * metaslabs that are likely to be used in the near 2291 * future. 2292 */ 2293 mutex_enter(&msp->ms_lock); 2294 if (msp->ms_allocator == -1 && msp->ms_sm != NULL && 2295 msp->ms_allocating_total == 0) { 2296 metaslab_unload(msp); 2297 } 2298 mutex_exit(&msp->ms_lock); 2299 msp = next_msp; 2300 inuse = spl_kmem_cache_inuse(zfs_btree_leaf_cache); 2301 } 2302 } 2303 #else 2304 (void) mc, (void) zfs_metaslab_mem_limit; 2305 #endif 2306 } 2307 2308 static int 2309 metaslab_load_impl(metaslab_t *msp) 2310 { 2311 int error = 0; 2312 2313 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2314 ASSERT(msp->ms_loading); 2315 ASSERT(!msp->ms_condensing); 2316 2317 /* 2318 * We temporarily drop the lock to unblock other operations while we 2319 * are reading the space map. Therefore, metaslab_sync() and 2320 * metaslab_sync_done() can run at the same time as we do. 2321 * 2322 * If we are using the log space maps, metaslab_sync() can't write to 2323 * the metaslab's space map while we are loading as we only write to 2324 * it when we are flushing the metaslab, and that can't happen while 2325 * we are loading it. 2326 * 2327 * If we are not using log space maps though, metaslab_sync() can 2328 * append to the space map while we are loading. Therefore we load 2329 * only entries that existed when we started the load. Additionally, 2330 * metaslab_sync_done() has to wait for the load to complete because 2331 * there are potential races like metaslab_load() loading parts of the 2332 * space map that are currently being appended by metaslab_sync(). If 2333 * we didn't, the ms_allocatable would have entries that 2334 * metaslab_sync_done() would try to re-add later. 2335 * 2336 * That's why before dropping the lock we remember the synced length 2337 * of the metaslab and read up to that point of the space map, 2338 * ignoring entries appended by metaslab_sync() that happen after we 2339 * drop the lock. 2340 */ 2341 uint64_t length = msp->ms_synced_length; 2342 mutex_exit(&msp->ms_lock); 2343 2344 hrtime_t load_start = gethrtime(); 2345 metaslab_rt_arg_t *mrap; 2346 if (msp->ms_allocatable->rt_arg == NULL) { 2347 mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP); 2348 } else { 2349 mrap = msp->ms_allocatable->rt_arg; 2350 msp->ms_allocatable->rt_ops = NULL; 2351 msp->ms_allocatable->rt_arg = NULL; 2352 } 2353 mrap->mra_bt = &msp->ms_allocatable_by_size; 2354 mrap->mra_floor_shift = metaslab_by_size_min_shift; 2355 2356 if (msp->ms_sm != NULL) { 2357 error = space_map_load_length(msp->ms_sm, msp->ms_allocatable, 2358 SM_FREE, length); 2359 2360 /* Now, populate the size-sorted tree. */ 2361 metaslab_rt_create(msp->ms_allocatable, mrap); 2362 msp->ms_allocatable->rt_ops = &metaslab_rt_ops; 2363 msp->ms_allocatable->rt_arg = mrap; 2364 2365 struct mssa_arg arg = {0}; 2366 arg.rt = msp->ms_allocatable; 2367 arg.mra = mrap; 2368 zfs_range_tree_walk(msp->ms_allocatable, 2369 metaslab_size_sorted_add, &arg); 2370 } else { 2371 /* 2372 * Add the size-sorted tree first, since we don't need to load 2373 * the metaslab from the spacemap. 2374 */ 2375 metaslab_rt_create(msp->ms_allocatable, mrap); 2376 msp->ms_allocatable->rt_ops = &metaslab_rt_ops; 2377 msp->ms_allocatable->rt_arg = mrap; 2378 /* 2379 * The space map has not been allocated yet, so treat 2380 * all the space in the metaslab as free and add it to the 2381 * ms_allocatable tree. 2382 */ 2383 zfs_range_tree_add(msp->ms_allocatable, 2384 msp->ms_start, msp->ms_size); 2385 2386 if (msp->ms_new) { 2387 /* 2388 * If the ms_sm doesn't exist, this means that this 2389 * metaslab hasn't gone through metaslab_sync() and 2390 * thus has never been dirtied. So we shouldn't 2391 * expect any unflushed allocs or frees from previous 2392 * TXGs. 2393 */ 2394 ASSERT(zfs_range_tree_is_empty( 2395 msp->ms_unflushed_allocs)); 2396 ASSERT(zfs_range_tree_is_empty( 2397 msp->ms_unflushed_frees)); 2398 } 2399 } 2400 2401 /* 2402 * We need to grab the ms_sync_lock to prevent metaslab_sync() from 2403 * changing the ms_sm (or log_sm) and the metaslab's range trees 2404 * while we are about to use them and populate the ms_allocatable. 2405 * The ms_lock is insufficient for this because metaslab_sync() doesn't 2406 * hold the ms_lock while writing the ms_checkpointing tree to disk. 2407 */ 2408 mutex_enter(&msp->ms_sync_lock); 2409 mutex_enter(&msp->ms_lock); 2410 2411 ASSERT(!msp->ms_condensing); 2412 ASSERT(!msp->ms_flushing); 2413 2414 if (error != 0) { 2415 mutex_exit(&msp->ms_sync_lock); 2416 return (error); 2417 } 2418 2419 ASSERT3P(msp->ms_group, !=, NULL); 2420 msp->ms_loaded = B_TRUE; 2421 2422 /* 2423 * Apply all the unflushed changes to ms_allocatable right 2424 * away so any manipulations we do below have a clear view 2425 * of what is allocated and what is free. 2426 */ 2427 zfs_range_tree_walk(msp->ms_unflushed_allocs, 2428 zfs_range_tree_remove, msp->ms_allocatable); 2429 zfs_range_tree_walk(msp->ms_unflushed_frees, 2430 zfs_range_tree_add, msp->ms_allocatable); 2431 2432 ASSERT3P(msp->ms_group, !=, NULL); 2433 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2434 if (spa_syncing_log_sm(spa) != NULL) { 2435 ASSERT(spa_feature_is_enabled(spa, 2436 SPA_FEATURE_LOG_SPACEMAP)); 2437 2438 /* 2439 * If we use a log space map we add all the segments 2440 * that are in ms_unflushed_frees so they are available 2441 * for allocation. 2442 * 2443 * ms_allocatable needs to contain all free segments 2444 * that are ready for allocations (thus not segments 2445 * from ms_freeing, ms_freed, and the ms_defer trees). 2446 * But if we grab the lock in this code path at a sync 2447 * pass later that 1, then it also contains the 2448 * segments of ms_freed (they were added to it earlier 2449 * in this path through ms_unflushed_frees). So we 2450 * need to remove all the segments that exist in 2451 * ms_freed from ms_allocatable as they will be added 2452 * later in metaslab_sync_done(). 2453 * 2454 * When there's no log space map, the ms_allocatable 2455 * correctly doesn't contain any segments that exist 2456 * in ms_freed [see ms_synced_length]. 2457 */ 2458 zfs_range_tree_walk(msp->ms_freed, 2459 zfs_range_tree_remove, msp->ms_allocatable); 2460 } 2461 2462 /* 2463 * If we are not using the log space map, ms_allocatable 2464 * contains the segments that exist in the ms_defer trees 2465 * [see ms_synced_length]. Thus we need to remove them 2466 * from ms_allocatable as they will be added again in 2467 * metaslab_sync_done(). 2468 * 2469 * If we are using the log space map, ms_allocatable still 2470 * contains the segments that exist in the ms_defer trees. 2471 * Not because it read them through the ms_sm though. But 2472 * because these segments are part of ms_unflushed_frees 2473 * whose segments we add to ms_allocatable earlier in this 2474 * code path. 2475 */ 2476 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2477 zfs_range_tree_walk(msp->ms_defer[t], 2478 zfs_range_tree_remove, msp->ms_allocatable); 2479 } 2480 2481 /* 2482 * Call metaslab_recalculate_weight_and_sort() now that the 2483 * metaslab is loaded so we get the metaslab's real weight. 2484 * 2485 * Unless this metaslab was created with older software and 2486 * has not yet been converted to use segment-based weight, we 2487 * expect the new weight to be better or equal to the weight 2488 * that the metaslab had while it was not loaded. This is 2489 * because the old weight does not take into account the 2490 * consolidation of adjacent segments between TXGs. [see 2491 * comment for ms_synchist and ms_deferhist[] for more info] 2492 */ 2493 uint64_t weight = msp->ms_weight; 2494 uint64_t max_size = msp->ms_max_size; 2495 metaslab_recalculate_weight_and_sort(msp); 2496 if (!WEIGHT_IS_SPACEBASED(weight)) 2497 ASSERT3U(weight, <=, msp->ms_weight); 2498 msp->ms_max_size = metaslab_largest_allocatable(msp); 2499 ASSERT3U(max_size, <=, msp->ms_max_size); 2500 hrtime_t load_end = gethrtime(); 2501 msp->ms_load_time = load_end; 2502 zfs_dbgmsg("metaslab_load: txg %llu, spa %s, vdev_id %llu, " 2503 "ms_id %llu, smp_length %llu, " 2504 "unflushed_allocs %llu, unflushed_frees %llu, " 2505 "freed %llu, defer %llu + %llu, unloaded time %llu ms, " 2506 "loading_time %lld ms, ms_max_size %llu, " 2507 "max size error %lld, " 2508 "old_weight %llx, new_weight %llx", 2509 (u_longlong_t)spa_syncing_txg(spa), spa_name(spa), 2510 (u_longlong_t)msp->ms_group->mg_vd->vdev_id, 2511 (u_longlong_t)msp->ms_id, 2512 (u_longlong_t)space_map_length(msp->ms_sm), 2513 (u_longlong_t)zfs_range_tree_space(msp->ms_unflushed_allocs), 2514 (u_longlong_t)zfs_range_tree_space(msp->ms_unflushed_frees), 2515 (u_longlong_t)zfs_range_tree_space(msp->ms_freed), 2516 (u_longlong_t)zfs_range_tree_space(msp->ms_defer[0]), 2517 (u_longlong_t)zfs_range_tree_space(msp->ms_defer[1]), 2518 (longlong_t)((load_start - msp->ms_unload_time) / 1000000), 2519 (longlong_t)((load_end - load_start) / 1000000), 2520 (u_longlong_t)msp->ms_max_size, 2521 (u_longlong_t)msp->ms_max_size - max_size, 2522 (u_longlong_t)weight, (u_longlong_t)msp->ms_weight); 2523 2524 metaslab_verify_space(msp, spa_syncing_txg(spa)); 2525 mutex_exit(&msp->ms_sync_lock); 2526 return (0); 2527 } 2528 2529 int 2530 metaslab_load(metaslab_t *msp) 2531 { 2532 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2533 2534 /* 2535 * There may be another thread loading the same metaslab, if that's 2536 * the case just wait until the other thread is done and return. 2537 */ 2538 metaslab_load_wait(msp); 2539 if (msp->ms_loaded) 2540 return (0); 2541 VERIFY(!msp->ms_loading); 2542 ASSERT(!msp->ms_condensing); 2543 2544 /* 2545 * We set the loading flag BEFORE potentially dropping the lock to 2546 * wait for an ongoing flush (see ms_flushing below). This way other 2547 * threads know that there is already a thread that is loading this 2548 * metaslab. 2549 */ 2550 msp->ms_loading = B_TRUE; 2551 2552 /* 2553 * Wait for any in-progress flushing to finish as we drop the ms_lock 2554 * both here (during space_map_load()) and in metaslab_flush() (when 2555 * we flush our changes to the ms_sm). 2556 */ 2557 if (msp->ms_flushing) 2558 metaslab_flush_wait(msp); 2559 2560 /* 2561 * In the possibility that we were waiting for the metaslab to be 2562 * flushed (where we temporarily dropped the ms_lock), ensure that 2563 * no one else loaded the metaslab somehow. 2564 */ 2565 ASSERT(!msp->ms_loaded); 2566 2567 /* 2568 * If we're loading a metaslab in the normal class, consider evicting 2569 * another one to keep our memory usage under the limit defined by the 2570 * zfs_metaslab_mem_limit tunable. 2571 */ 2572 if (spa_normal_class(msp->ms_group->mg_class->mc_spa) == 2573 msp->ms_group->mg_class) { 2574 metaslab_potentially_evict(msp->ms_group->mg_class); 2575 } 2576 2577 int error = metaslab_load_impl(msp); 2578 2579 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2580 msp->ms_loading = B_FALSE; 2581 cv_broadcast(&msp->ms_load_cv); 2582 2583 return (error); 2584 } 2585 2586 void 2587 metaslab_unload(metaslab_t *msp) 2588 { 2589 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2590 2591 /* 2592 * This can happen if a metaslab is selected for eviction (in 2593 * metaslab_potentially_evict) and then unloaded during spa_sync (via 2594 * metaslab_class_evict_old). 2595 */ 2596 if (!msp->ms_loaded) 2597 return; 2598 2599 zfs_range_tree_vacate(msp->ms_allocatable, NULL, NULL); 2600 msp->ms_loaded = B_FALSE; 2601 msp->ms_unload_time = gethrtime(); 2602 2603 msp->ms_activation_weight = 0; 2604 msp->ms_weight &= ~METASLAB_ACTIVE_MASK; 2605 2606 if (msp->ms_group != NULL) { 2607 metaslab_class_t *mc = msp->ms_group->mg_class; 2608 multilist_sublist_t *mls = 2609 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); 2610 if (multilist_link_active(&msp->ms_class_txg_node)) 2611 multilist_sublist_remove(mls, msp); 2612 multilist_sublist_unlock(mls); 2613 2614 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2615 zfs_dbgmsg("metaslab_unload: txg %llu, spa %s, vdev_id %llu, " 2616 "ms_id %llu, weight %llx, " 2617 "selected txg %llu (%llu ms ago), alloc_txg %llu, " 2618 "loaded %llu ms ago, max_size %llu", 2619 (u_longlong_t)spa_syncing_txg(spa), spa_name(spa), 2620 (u_longlong_t)msp->ms_group->mg_vd->vdev_id, 2621 (u_longlong_t)msp->ms_id, 2622 (u_longlong_t)msp->ms_weight, 2623 (u_longlong_t)msp->ms_selected_txg, 2624 (u_longlong_t)(msp->ms_unload_time - 2625 msp->ms_selected_time) / 1000 / 1000, 2626 (u_longlong_t)msp->ms_alloc_txg, 2627 (u_longlong_t)(msp->ms_unload_time - 2628 msp->ms_load_time) / 1000 / 1000, 2629 (u_longlong_t)msp->ms_max_size); 2630 } 2631 2632 /* 2633 * We explicitly recalculate the metaslab's weight based on its space 2634 * map (as it is now not loaded). We want unload metaslabs to always 2635 * have their weights calculated from the space map histograms, while 2636 * loaded ones have it calculated from their in-core range tree 2637 * [see metaslab_load()]. This way, the weight reflects the information 2638 * available in-core, whether it is loaded or not. 2639 * 2640 * If ms_group == NULL means that we came here from metaslab_fini(), 2641 * at which point it doesn't make sense for us to do the recalculation 2642 * and the sorting. 2643 */ 2644 if (msp->ms_group != NULL) 2645 metaslab_recalculate_weight_and_sort(msp); 2646 } 2647 2648 /* 2649 * We want to optimize the memory use of the per-metaslab range 2650 * trees. To do this, we store the segments in the range trees in 2651 * units of sectors, zero-indexing from the start of the metaslab. If 2652 * the vdev_ms_shift - the vdev_ashift is less than 32, we can store 2653 * the ranges using two uint32_ts, rather than two uint64_ts. 2654 */ 2655 zfs_range_seg_type_t 2656 metaslab_calculate_range_tree_type(vdev_t *vdev, metaslab_t *msp, 2657 uint64_t *start, uint64_t *shift) 2658 { 2659 if (vdev->vdev_ms_shift - vdev->vdev_ashift < 32 && 2660 !zfs_metaslab_force_large_segs) { 2661 *shift = vdev->vdev_ashift; 2662 *start = msp->ms_start; 2663 return (ZFS_RANGE_SEG32); 2664 } else { 2665 *shift = 0; 2666 *start = 0; 2667 return (ZFS_RANGE_SEG64); 2668 } 2669 } 2670 2671 void 2672 metaslab_set_selected_txg(metaslab_t *msp, uint64_t txg) 2673 { 2674 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2675 metaslab_class_t *mc = msp->ms_group->mg_class; 2676 multilist_sublist_t *mls = 2677 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); 2678 if (multilist_link_active(&msp->ms_class_txg_node)) 2679 multilist_sublist_remove(mls, msp); 2680 msp->ms_selected_txg = txg; 2681 msp->ms_selected_time = gethrtime(); 2682 multilist_sublist_insert_tail(mls, msp); 2683 multilist_sublist_unlock(mls); 2684 } 2685 2686 void 2687 metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta, 2688 int64_t defer_delta, int64_t space_delta) 2689 { 2690 vdev_space_update(vd, alloc_delta, defer_delta, space_delta); 2691 2692 ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent); 2693 ASSERT(vd->vdev_ms_count != 0); 2694 2695 metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta, 2696 vdev_deflated_space(vd, space_delta)); 2697 } 2698 2699 int 2700 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, 2701 uint64_t txg, metaslab_t **msp) 2702 { 2703 vdev_t *vd = mg->mg_vd; 2704 spa_t *spa = vd->vdev_spa; 2705 objset_t *mos = spa->spa_meta_objset; 2706 metaslab_t *ms; 2707 int error; 2708 2709 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); 2710 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL); 2711 mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL); 2712 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL); 2713 cv_init(&ms->ms_flush_cv, NULL, CV_DEFAULT, NULL); 2714 multilist_link_init(&ms->ms_class_txg_node); 2715 2716 ms->ms_id = id; 2717 ms->ms_start = id << vd->vdev_ms_shift; 2718 ms->ms_size = 1ULL << vd->vdev_ms_shift; 2719 ms->ms_allocator = -1; 2720 ms->ms_new = B_TRUE; 2721 2722 vdev_ops_t *ops = vd->vdev_ops; 2723 if (ops->vdev_op_metaslab_init != NULL) 2724 ops->vdev_op_metaslab_init(vd, &ms->ms_start, &ms->ms_size); 2725 2726 /* 2727 * We only open space map objects that already exist. All others 2728 * will be opened when we finally allocate an object for it. For 2729 * readonly pools there is no need to open the space map object. 2730 * 2731 * Note: 2732 * When called from vdev_expand(), we can't call into the DMU as 2733 * we are holding the spa_config_lock as a writer and we would 2734 * deadlock [see relevant comment in vdev_metaslab_init()]. in 2735 * that case, the object parameter is zero though, so we won't 2736 * call into the DMU. 2737 */ 2738 if (object != 0 && !(spa->spa_mode == SPA_MODE_READ && 2739 !spa->spa_read_spacemaps)) { 2740 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start, 2741 ms->ms_size, vd->vdev_ashift); 2742 2743 if (error != 0) { 2744 kmem_free(ms, sizeof (metaslab_t)); 2745 return (error); 2746 } 2747 2748 ASSERT(ms->ms_sm != NULL); 2749 ms->ms_allocated_space = space_map_allocated(ms->ms_sm); 2750 } 2751 2752 uint64_t shift, start; 2753 zfs_range_seg_type_t type = 2754 metaslab_calculate_range_tree_type(vd, ms, &start, &shift); 2755 2756 ms->ms_allocatable = zfs_range_tree_create(NULL, type, NULL, start, 2757 shift); 2758 for (int t = 0; t < TXG_SIZE; t++) { 2759 ms->ms_allocating[t] = zfs_range_tree_create(NULL, type, 2760 NULL, start, shift); 2761 } 2762 ms->ms_freeing = zfs_range_tree_create(NULL, type, NULL, start, shift); 2763 ms->ms_freed = zfs_range_tree_create(NULL, type, NULL, start, shift); 2764 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2765 ms->ms_defer[t] = zfs_range_tree_create(NULL, type, NULL, 2766 start, shift); 2767 } 2768 ms->ms_checkpointing = 2769 zfs_range_tree_create(NULL, type, NULL, start, shift); 2770 ms->ms_unflushed_allocs = 2771 zfs_range_tree_create(NULL, type, NULL, start, shift); 2772 2773 metaslab_rt_arg_t *mrap = kmem_zalloc(sizeof (*mrap), KM_SLEEP); 2774 mrap->mra_bt = &ms->ms_unflushed_frees_by_size; 2775 mrap->mra_floor_shift = metaslab_by_size_min_shift; 2776 ms->ms_unflushed_frees = zfs_range_tree_create(&metaslab_rt_ops, 2777 type, mrap, start, shift); 2778 2779 ms->ms_trim = zfs_range_tree_create(NULL, type, NULL, start, shift); 2780 2781 metaslab_group_add(mg, ms); 2782 metaslab_set_fragmentation(ms, B_FALSE); 2783 2784 /* 2785 * If we're opening an existing pool (txg == 0) or creating 2786 * a new one (txg == TXG_INITIAL), all space is available now. 2787 * If we're adding space to an existing pool, the new space 2788 * does not become available until after this txg has synced. 2789 * The metaslab's weight will also be initialized when we sync 2790 * out this txg. This ensures that we don't attempt to allocate 2791 * from it before we have initialized it completely. 2792 */ 2793 if (txg <= TXG_INITIAL) { 2794 metaslab_sync_done(ms, 0); 2795 metaslab_space_update(vd, mg->mg_class, 2796 metaslab_allocated_space(ms), 0, 0); 2797 } 2798 2799 if (txg != 0) { 2800 vdev_dirty(vd, 0, NULL, txg); 2801 vdev_dirty(vd, VDD_METASLAB, ms, txg); 2802 } 2803 2804 *msp = ms; 2805 2806 return (0); 2807 } 2808 2809 static void 2810 metaslab_fini_flush_data(metaslab_t *msp) 2811 { 2812 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2813 2814 if (metaslab_unflushed_txg(msp) == 0) { 2815 ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), 2816 ==, NULL); 2817 return; 2818 } 2819 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 2820 2821 mutex_enter(&spa->spa_flushed_ms_lock); 2822 avl_remove(&spa->spa_metaslabs_by_flushed, msp); 2823 mutex_exit(&spa->spa_flushed_ms_lock); 2824 2825 spa_log_sm_decrement_mscount(spa, metaslab_unflushed_txg(msp)); 2826 spa_log_summary_decrement_mscount(spa, metaslab_unflushed_txg(msp), 2827 metaslab_unflushed_dirty(msp)); 2828 } 2829 2830 uint64_t 2831 metaslab_unflushed_changes_memused(metaslab_t *ms) 2832 { 2833 return ((zfs_range_tree_numsegs(ms->ms_unflushed_allocs) + 2834 zfs_range_tree_numsegs(ms->ms_unflushed_frees)) * 2835 ms->ms_unflushed_allocs->rt_root.bt_elem_size); 2836 } 2837 2838 void 2839 metaslab_fini(metaslab_t *msp) 2840 { 2841 metaslab_group_t *mg = msp->ms_group; 2842 vdev_t *vd = mg->mg_vd; 2843 spa_t *spa = vd->vdev_spa; 2844 2845 metaslab_fini_flush_data(msp); 2846 2847 metaslab_group_remove(mg, msp); 2848 2849 mutex_enter(&msp->ms_lock); 2850 VERIFY(msp->ms_group == NULL); 2851 2852 /* 2853 * If this metaslab hasn't been through metaslab_sync_done() yet its 2854 * space hasn't been accounted for in its vdev and doesn't need to be 2855 * subtracted. 2856 */ 2857 if (!msp->ms_new) { 2858 metaslab_space_update(vd, mg->mg_class, 2859 -metaslab_allocated_space(msp), 0, -msp->ms_size); 2860 2861 } 2862 space_map_close(msp->ms_sm); 2863 msp->ms_sm = NULL; 2864 2865 metaslab_unload(msp); 2866 2867 zfs_range_tree_destroy(msp->ms_allocatable); 2868 zfs_range_tree_destroy(msp->ms_freeing); 2869 zfs_range_tree_destroy(msp->ms_freed); 2870 2871 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 2872 metaslab_unflushed_changes_memused(msp)); 2873 spa->spa_unflushed_stats.sus_memused -= 2874 metaslab_unflushed_changes_memused(msp); 2875 zfs_range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); 2876 zfs_range_tree_destroy(msp->ms_unflushed_allocs); 2877 zfs_range_tree_destroy(msp->ms_checkpointing); 2878 zfs_range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); 2879 zfs_range_tree_destroy(msp->ms_unflushed_frees); 2880 2881 for (int t = 0; t < TXG_SIZE; t++) { 2882 zfs_range_tree_destroy(msp->ms_allocating[t]); 2883 } 2884 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2885 zfs_range_tree_destroy(msp->ms_defer[t]); 2886 } 2887 ASSERT0(msp->ms_deferspace); 2888 2889 for (int t = 0; t < TXG_SIZE; t++) 2890 ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t)); 2891 2892 zfs_range_tree_vacate(msp->ms_trim, NULL, NULL); 2893 zfs_range_tree_destroy(msp->ms_trim); 2894 2895 mutex_exit(&msp->ms_lock); 2896 cv_destroy(&msp->ms_load_cv); 2897 cv_destroy(&msp->ms_flush_cv); 2898 mutex_destroy(&msp->ms_lock); 2899 mutex_destroy(&msp->ms_sync_lock); 2900 ASSERT3U(msp->ms_allocator, ==, -1); 2901 2902 kmem_free(msp, sizeof (metaslab_t)); 2903 } 2904 2905 /* 2906 * This table defines a segment size based fragmentation metric that will 2907 * allow each metaslab to derive its own fragmentation value. This is done 2908 * by calculating the space in each bucket of the spacemap histogram and 2909 * multiplying that by the fragmentation metric in this table. Doing 2910 * this for all buckets and dividing it by the total amount of free 2911 * space in this metaslab (i.e. the total free space in all buckets) gives 2912 * us the fragmentation metric. This means that a high fragmentation metric 2913 * equates to most of the free space being comprised of small segments. 2914 * Conversely, if the metric is low, then most of the free space is in 2915 * large segments. 2916 * 2917 * This table defines 0% fragmented space using 512M segments. Using this value, 2918 * we derive the rest of the table. This table originally went up to 16MB, but 2919 * with larger recordsizes, larger ashifts, and use of raidz3, it is possible 2920 * to have significantly larger allocations than were previously possible. 2921 * Since the fragmentation value is never stored on disk, it is possible to 2922 * change these calculations in the future. 2923 */ 2924 static const int zfs_frag_table[] = { 2925 100, /* 512B */ 2926 99, /* 1K */ 2927 97, /* 2K */ 2928 93, /* 4K */ 2929 88, /* 8K */ 2930 83, /* 16K */ 2931 77, /* 32K */ 2932 71, /* 64K */ 2933 64, /* 128K */ 2934 57, /* 256K */ 2935 50, /* 512K */ 2936 43, /* 1M */ 2937 36, /* 2M */ 2938 29, /* 4M */ 2939 23, /* 8M */ 2940 17, /* 16M */ 2941 12, /* 32M */ 2942 7, /* 64M */ 2943 3, /* 128M */ 2944 1, /* 256M */ 2945 0, /* 512M */ 2946 }; 2947 #define FRAGMENTATION_TABLE_SIZE \ 2948 (sizeof (zfs_frag_table)/(sizeof (zfs_frag_table[0]))) 2949 2950 /* 2951 * Calculate the metaslab's fragmentation metric and set ms_fragmentation. 2952 * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not 2953 * been upgraded and does not support this metric. Otherwise, the return 2954 * value should be in the range [0, 100]. 2955 */ 2956 static void 2957 metaslab_set_fragmentation(metaslab_t *msp, boolean_t nodirty) 2958 { 2959 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2960 uint64_t fragmentation = 0; 2961 uint64_t total = 0; 2962 boolean_t feature_enabled = spa_feature_is_enabled(spa, 2963 SPA_FEATURE_SPACEMAP_HISTOGRAM); 2964 2965 if (!feature_enabled) { 2966 msp->ms_fragmentation = ZFS_FRAG_INVALID; 2967 return; 2968 } 2969 2970 /* 2971 * A null space map means that the entire metaslab is free 2972 * and thus is not fragmented. 2973 */ 2974 if (msp->ms_sm == NULL) { 2975 msp->ms_fragmentation = 0; 2976 return; 2977 } 2978 2979 /* 2980 * If this metaslab's space map has not been upgraded, flag it 2981 * so that we upgrade next time we encounter it. 2982 */ 2983 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) { 2984 uint64_t txg = spa_syncing_txg(spa); 2985 vdev_t *vd = msp->ms_group->mg_vd; 2986 2987 /* 2988 * If we've reached the final dirty txg, then we must 2989 * be shutting down the pool. We don't want to dirty 2990 * any data past this point so skip setting the condense 2991 * flag. We can retry this action the next time the pool 2992 * is imported. We also skip marking this metaslab for 2993 * condensing if the caller has explicitly set nodirty. 2994 */ 2995 if (!nodirty && 2996 spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) { 2997 msp->ms_condense_wanted = B_TRUE; 2998 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 2999 zfs_dbgmsg("txg %llu, requesting force condense: " 3000 "ms_id %llu, vdev_id %llu", (u_longlong_t)txg, 3001 (u_longlong_t)msp->ms_id, 3002 (u_longlong_t)vd->vdev_id); 3003 } 3004 msp->ms_fragmentation = ZFS_FRAG_INVALID; 3005 return; 3006 } 3007 3008 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 3009 uint64_t space = 0; 3010 uint8_t shift = msp->ms_sm->sm_shift; 3011 3012 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i, 3013 FRAGMENTATION_TABLE_SIZE - 1); 3014 3015 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0) 3016 continue; 3017 3018 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift); 3019 total += space; 3020 3021 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE); 3022 fragmentation += space * zfs_frag_table[idx]; 3023 } 3024 3025 if (total > 0) 3026 fragmentation /= total; 3027 ASSERT3U(fragmentation, <=, 100); 3028 3029 msp->ms_fragmentation = fragmentation; 3030 } 3031 3032 /* 3033 * Compute a weight -- a selection preference value -- for the given metaslab. 3034 * This is based on the amount of free space, the level of fragmentation, 3035 * the LBA range, and whether the metaslab is loaded. 3036 */ 3037 static uint64_t 3038 metaslab_space_weight(metaslab_t *msp) 3039 { 3040 metaslab_group_t *mg = msp->ms_group; 3041 vdev_t *vd = mg->mg_vd; 3042 uint64_t weight, space; 3043 3044 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3045 3046 /* 3047 * The baseline weight is the metaslab's free space. 3048 */ 3049 space = msp->ms_size - metaslab_allocated_space(msp); 3050 3051 if (metaslab_fragmentation_factor_enabled && 3052 msp->ms_fragmentation != ZFS_FRAG_INVALID) { 3053 /* 3054 * Use the fragmentation information to inversely scale 3055 * down the baseline weight. We need to ensure that we 3056 * don't exclude this metaslab completely when it's 100% 3057 * fragmented. To avoid this we reduce the fragmented value 3058 * by 1. 3059 */ 3060 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100; 3061 3062 /* 3063 * If space < SPA_MINBLOCKSIZE, then we will not allocate from 3064 * this metaslab again. The fragmentation metric may have 3065 * decreased the space to something smaller than 3066 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE 3067 * so that we can consume any remaining space. 3068 */ 3069 if (space > 0 && space < SPA_MINBLOCKSIZE) 3070 space = SPA_MINBLOCKSIZE; 3071 } 3072 weight = space; 3073 3074 /* 3075 * Modern disks have uniform bit density and constant angular velocity. 3076 * Therefore, the outer recording zones are faster (higher bandwidth) 3077 * than the inner zones by the ratio of outer to inner track diameter, 3078 * which is typically around 2:1. We account for this by assigning 3079 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). 3080 * In effect, this means that we'll select the metaslab with the most 3081 * free bandwidth rather than simply the one with the most free space. 3082 */ 3083 if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) { 3084 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count; 3085 ASSERT(weight >= space && weight <= 2 * space); 3086 } 3087 3088 /* 3089 * If this metaslab is one we're actively using, adjust its 3090 * weight to make it preferable to any inactive metaslab so 3091 * we'll polish it off. If the fragmentation on this metaslab 3092 * has exceed our threshold, then don't mark it active. 3093 */ 3094 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID && 3095 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) { 3096 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); 3097 } 3098 3099 WEIGHT_SET_SPACEBASED(weight); 3100 return (weight); 3101 } 3102 3103 /* 3104 * Return the weight of the specified metaslab, according to the segment-based 3105 * weighting algorithm. The metaslab must be loaded. This function can 3106 * be called within a sync pass since it relies only on the metaslab's 3107 * range tree which is always accurate when the metaslab is loaded. 3108 */ 3109 static uint64_t 3110 metaslab_weight_from_range_tree(metaslab_t *msp) 3111 { 3112 uint64_t weight = 0; 3113 uint32_t segments = 0; 3114 3115 ASSERT(msp->ms_loaded); 3116 3117 for (int i = ZFS_RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT; 3118 i--) { 3119 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift; 3120 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; 3121 3122 segments <<= 1; 3123 segments += msp->ms_allocatable->rt_histogram[i]; 3124 3125 /* 3126 * The range tree provides more precision than the space map 3127 * and must be downgraded so that all values fit within the 3128 * space map's histogram. This allows us to compare loaded 3129 * vs. unloaded metaslabs to determine which metaslab is 3130 * considered "best". 3131 */ 3132 if (i > max_idx) 3133 continue; 3134 3135 if (segments != 0) { 3136 WEIGHT_SET_COUNT(weight, segments); 3137 WEIGHT_SET_INDEX(weight, i); 3138 WEIGHT_SET_ACTIVE(weight, 0); 3139 break; 3140 } 3141 } 3142 return (weight); 3143 } 3144 3145 /* 3146 * Calculate the weight based on the on-disk histogram. Should be applied 3147 * only to unloaded metaslabs (i.e no incoming allocations) in-order to 3148 * give results consistent with the on-disk state 3149 */ 3150 static uint64_t 3151 metaslab_weight_from_spacemap(metaslab_t *msp) 3152 { 3153 space_map_t *sm = msp->ms_sm; 3154 ASSERT(!msp->ms_loaded); 3155 ASSERT(sm != NULL); 3156 ASSERT3U(space_map_object(sm), !=, 0); 3157 ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); 3158 3159 /* 3160 * Create a joint histogram from all the segments that have made 3161 * it to the metaslab's space map histogram, that are not yet 3162 * available for allocation because they are still in the freeing 3163 * pipeline (e.g. freeing, freed, and defer trees). Then subtract 3164 * these segments from the space map's histogram to get a more 3165 * accurate weight. 3166 */ 3167 uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0}; 3168 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) 3169 deferspace_histogram[i] += msp->ms_synchist[i]; 3170 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 3171 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 3172 deferspace_histogram[i] += msp->ms_deferhist[t][i]; 3173 } 3174 } 3175 3176 uint64_t weight = 0; 3177 for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) { 3178 ASSERT3U(sm->sm_phys->smp_histogram[i], >=, 3179 deferspace_histogram[i]); 3180 uint64_t count = 3181 sm->sm_phys->smp_histogram[i] - deferspace_histogram[i]; 3182 if (count != 0) { 3183 WEIGHT_SET_COUNT(weight, count); 3184 WEIGHT_SET_INDEX(weight, i + sm->sm_shift); 3185 WEIGHT_SET_ACTIVE(weight, 0); 3186 break; 3187 } 3188 } 3189 return (weight); 3190 } 3191 3192 /* 3193 * Compute a segment-based weight for the specified metaslab. The weight 3194 * is determined by highest bucket in the histogram. The information 3195 * for the highest bucket is encoded into the weight value. 3196 */ 3197 static uint64_t 3198 metaslab_segment_weight(metaslab_t *msp) 3199 { 3200 metaslab_group_t *mg = msp->ms_group; 3201 uint64_t weight = 0; 3202 uint8_t shift = mg->mg_vd->vdev_ashift; 3203 3204 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3205 3206 /* 3207 * The metaslab is completely free. 3208 */ 3209 if (metaslab_allocated_space(msp) == 0) { 3210 int idx = highbit64(msp->ms_size) - 1; 3211 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; 3212 3213 if (idx < max_idx) { 3214 WEIGHT_SET_COUNT(weight, 1ULL); 3215 WEIGHT_SET_INDEX(weight, idx); 3216 } else { 3217 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx)); 3218 WEIGHT_SET_INDEX(weight, max_idx); 3219 } 3220 WEIGHT_SET_ACTIVE(weight, 0); 3221 ASSERT(!WEIGHT_IS_SPACEBASED(weight)); 3222 return (weight); 3223 } 3224 3225 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); 3226 3227 /* 3228 * If the metaslab is fully allocated then just make the weight 0. 3229 */ 3230 if (metaslab_allocated_space(msp) == msp->ms_size) 3231 return (0); 3232 /* 3233 * If the metaslab is already loaded, then use the range tree to 3234 * determine the weight. Otherwise, we rely on the space map information 3235 * to generate the weight. 3236 */ 3237 if (msp->ms_loaded) { 3238 weight = metaslab_weight_from_range_tree(msp); 3239 } else { 3240 weight = metaslab_weight_from_spacemap(msp); 3241 } 3242 3243 /* 3244 * If the metaslab was active the last time we calculated its weight 3245 * then keep it active. We want to consume the entire region that 3246 * is associated with this weight. 3247 */ 3248 if (msp->ms_activation_weight != 0 && weight != 0) 3249 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight)); 3250 return (weight); 3251 } 3252 3253 /* 3254 * Determine if we should attempt to allocate from this metaslab. If the 3255 * metaslab is loaded, then we can determine if the desired allocation 3256 * can be satisfied by looking at the size of the maximum free segment 3257 * on that metaslab. Otherwise, we make our decision based on the metaslab's 3258 * weight. For segment-based weighting we can determine the maximum 3259 * allocation based on the index encoded in its value. For space-based 3260 * weights we rely on the entire weight (excluding the weight-type bit). 3261 */ 3262 static boolean_t 3263 metaslab_should_allocate(metaslab_t *msp, uint64_t asize, boolean_t try_hard) 3264 { 3265 /* 3266 * This case will usually but not always get caught by the checks below; 3267 * metaslabs can be loaded by various means, including the trim and 3268 * initialize code. Once that happens, without this check they are 3269 * allocatable even before they finish their first txg sync. 3270 */ 3271 if (unlikely(msp->ms_new)) 3272 return (B_FALSE); 3273 3274 /* 3275 * If the metaslab is loaded, ms_max_size is definitive and we can use 3276 * the fast check. If it's not, the ms_max_size is a lower bound (once 3277 * set), and we should use the fast check as long as we're not in 3278 * try_hard and it's been less than zfs_metaslab_max_size_cache_sec 3279 * seconds since the metaslab was unloaded. 3280 */ 3281 if (msp->ms_loaded || 3282 (msp->ms_max_size != 0 && !try_hard && gethrtime() < 3283 msp->ms_unload_time + SEC2NSEC(zfs_metaslab_max_size_cache_sec))) 3284 return (msp->ms_max_size >= asize); 3285 3286 boolean_t should_allocate; 3287 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) { 3288 /* 3289 * The metaslab segment weight indicates segments in the 3290 * range [2^i, 2^(i+1)), where i is the index in the weight. 3291 * Since the asize might be in the middle of the range, we 3292 * should attempt the allocation if asize < 2^(i+1). 3293 */ 3294 should_allocate = (asize < 3295 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1)); 3296 } else { 3297 should_allocate = (asize <= 3298 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE)); 3299 } 3300 3301 return (should_allocate); 3302 } 3303 3304 static uint64_t 3305 metaslab_weight(metaslab_t *msp, boolean_t nodirty) 3306 { 3307 vdev_t *vd = msp->ms_group->mg_vd; 3308 spa_t *spa = vd->vdev_spa; 3309 uint64_t weight; 3310 3311 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3312 3313 metaslab_set_fragmentation(msp, nodirty); 3314 3315 /* 3316 * Update the maximum size. If the metaslab is loaded, this will 3317 * ensure that we get an accurate maximum size if newly freed space 3318 * has been added back into the free tree. If the metaslab is 3319 * unloaded, we check if there's a larger free segment in the 3320 * unflushed frees. This is a lower bound on the largest allocatable 3321 * segment size. Coalescing of adjacent entries may reveal larger 3322 * allocatable segments, but we aren't aware of those until loading 3323 * the space map into a range tree. 3324 */ 3325 if (msp->ms_loaded) { 3326 msp->ms_max_size = metaslab_largest_allocatable(msp); 3327 } else { 3328 msp->ms_max_size = MAX(msp->ms_max_size, 3329 metaslab_largest_unflushed_free(msp)); 3330 } 3331 3332 /* 3333 * Segment-based weighting requires space map histogram support. 3334 */ 3335 if (zfs_metaslab_segment_weight_enabled && 3336 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) && 3337 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size == 3338 sizeof (space_map_phys_t))) { 3339 weight = metaslab_segment_weight(msp); 3340 } else { 3341 weight = metaslab_space_weight(msp); 3342 } 3343 return (weight); 3344 } 3345 3346 void 3347 metaslab_recalculate_weight_and_sort(metaslab_t *msp) 3348 { 3349 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3350 3351 /* note: we preserve the mask (e.g. indication of primary, etc..) */ 3352 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 3353 metaslab_group_sort(msp->ms_group, msp, 3354 metaslab_weight(msp, B_FALSE) | was_active); 3355 } 3356 3357 static int 3358 metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp, 3359 int allocator, uint64_t activation_weight) 3360 { 3361 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 3362 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3363 3364 /* 3365 * If we're activating for the claim code, we don't want to actually 3366 * set the metaslab up for a specific allocator. 3367 */ 3368 if (activation_weight == METASLAB_WEIGHT_CLAIM) { 3369 ASSERT0(msp->ms_activation_weight); 3370 msp->ms_activation_weight = msp->ms_weight; 3371 metaslab_group_sort(mg, msp, msp->ms_weight | 3372 activation_weight); 3373 return (0); 3374 } 3375 3376 metaslab_t **mspp = (activation_weight == METASLAB_WEIGHT_PRIMARY ? 3377 &mga->mga_primary : &mga->mga_secondary); 3378 3379 mutex_enter(&mg->mg_lock); 3380 if (*mspp != NULL) { 3381 mutex_exit(&mg->mg_lock); 3382 return (EEXIST); 3383 } 3384 3385 *mspp = msp; 3386 ASSERT3S(msp->ms_allocator, ==, -1); 3387 msp->ms_allocator = allocator; 3388 msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY); 3389 3390 ASSERT0(msp->ms_activation_weight); 3391 msp->ms_activation_weight = msp->ms_weight; 3392 metaslab_group_sort_impl(mg, msp, 3393 msp->ms_weight | activation_weight); 3394 mutex_exit(&mg->mg_lock); 3395 3396 return (0); 3397 } 3398 3399 static int 3400 metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight) 3401 { 3402 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3403 3404 /* 3405 * The current metaslab is already activated for us so there 3406 * is nothing to do. Already activated though, doesn't mean 3407 * that this metaslab is activated for our allocator nor our 3408 * requested activation weight. The metaslab could have started 3409 * as an active one for our allocator but changed allocators 3410 * while we were waiting to grab its ms_lock or we stole it 3411 * [see find_valid_metaslab()]. This means that there is a 3412 * possibility of passivating a metaslab of another allocator 3413 * or from a different activation mask, from this thread. 3414 */ 3415 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) { 3416 ASSERT(msp->ms_loaded); 3417 return (0); 3418 } 3419 3420 int error = metaslab_load(msp); 3421 if (error != 0) { 3422 metaslab_group_sort(msp->ms_group, msp, 0); 3423 return (error); 3424 } 3425 3426 /* 3427 * When entering metaslab_load() we may have dropped the 3428 * ms_lock because we were loading this metaslab, or we 3429 * were waiting for another thread to load it for us. In 3430 * that scenario, we recheck the weight of the metaslab 3431 * to see if it was activated by another thread. 3432 * 3433 * If the metaslab was activated for another allocator or 3434 * it was activated with a different activation weight (e.g. 3435 * we wanted to make it a primary but it was activated as 3436 * secondary) we return error (EBUSY). 3437 * 3438 * If the metaslab was activated for the same allocator 3439 * and requested activation mask, skip activating it. 3440 */ 3441 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) { 3442 if (msp->ms_allocator != allocator) 3443 return (EBUSY); 3444 3445 if ((msp->ms_weight & activation_weight) == 0) 3446 return (SET_ERROR(EBUSY)); 3447 3448 EQUIV((activation_weight == METASLAB_WEIGHT_PRIMARY), 3449 msp->ms_primary); 3450 return (0); 3451 } 3452 3453 /* 3454 * If the metaslab has literally 0 space, it will have weight 0. In 3455 * that case, don't bother activating it. This can happen if the 3456 * metaslab had space during find_valid_metaslab, but another thread 3457 * loaded it and used all that space while we were waiting to grab the 3458 * lock. 3459 */ 3460 if (msp->ms_weight == 0) { 3461 ASSERT0(zfs_range_tree_space(msp->ms_allocatable)); 3462 return (SET_ERROR(ENOSPC)); 3463 } 3464 3465 if ((error = metaslab_activate_allocator(msp->ms_group, msp, 3466 allocator, activation_weight)) != 0) { 3467 return (error); 3468 } 3469 3470 ASSERT(msp->ms_loaded); 3471 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 3472 3473 return (0); 3474 } 3475 3476 static void 3477 metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp, 3478 uint64_t weight) 3479 { 3480 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3481 ASSERT(msp->ms_loaded); 3482 3483 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) { 3484 metaslab_group_sort(mg, msp, weight); 3485 return; 3486 } 3487 3488 mutex_enter(&mg->mg_lock); 3489 ASSERT3P(msp->ms_group, ==, mg); 3490 ASSERT3S(0, <=, msp->ms_allocator); 3491 ASSERT3U(msp->ms_allocator, <, mg->mg_allocators); 3492 3493 metaslab_group_allocator_t *mga = &mg->mg_allocator[msp->ms_allocator]; 3494 if (msp->ms_primary) { 3495 ASSERT3P(mga->mga_primary, ==, msp); 3496 ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); 3497 mga->mga_primary = NULL; 3498 } else { 3499 ASSERT3P(mga->mga_secondary, ==, msp); 3500 ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); 3501 mga->mga_secondary = NULL; 3502 } 3503 msp->ms_allocator = -1; 3504 metaslab_group_sort_impl(mg, msp, weight); 3505 mutex_exit(&mg->mg_lock); 3506 } 3507 3508 static void 3509 metaslab_passivate(metaslab_t *msp, uint64_t weight) 3510 { 3511 uint64_t size __maybe_unused = weight & ~METASLAB_WEIGHT_TYPE; 3512 3513 /* 3514 * If size < SPA_MINBLOCKSIZE, then we will not allocate from 3515 * this metaslab again. In that case, it had better be empty, 3516 * or we would be leaving space on the table. 3517 */ 3518 ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) || 3519 size >= SPA_MINBLOCKSIZE || 3520 zfs_range_tree_space(msp->ms_allocatable) == 0); 3521 ASSERT0(weight & METASLAB_ACTIVE_MASK); 3522 3523 ASSERT(msp->ms_activation_weight != 0); 3524 msp->ms_activation_weight = 0; 3525 metaslab_passivate_allocator(msp->ms_group, msp, weight); 3526 ASSERT0(msp->ms_weight & METASLAB_ACTIVE_MASK); 3527 } 3528 3529 /* 3530 * Segment-based metaslabs are activated once and remain active until 3531 * we either fail an allocation attempt (similar to space-based metaslabs) 3532 * or have exhausted the free space in zfs_metaslab_switch_threshold 3533 * buckets since the metaslab was activated. This function checks to see 3534 * if we've exhausted the zfs_metaslab_switch_threshold buckets in the 3535 * metaslab and passivates it proactively. This will allow us to select a 3536 * metaslab with a larger contiguous region, if any, remaining within this 3537 * metaslab group. If we're in sync pass > 1, then we continue using this 3538 * metaslab so that we don't dirty more block and cause more sync passes. 3539 */ 3540 static void 3541 metaslab_segment_may_passivate(metaslab_t *msp) 3542 { 3543 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3544 3545 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1) 3546 return; 3547 3548 /* 3549 * As long as a single largest free segment covers majorioty of free 3550 * space, don't consider the metaslab fragmented. It should allow 3551 * us to fill new unfragmented metaslabs full before switching. 3552 */ 3553 if (metaslab_largest_allocatable(msp) > 3554 zfs_range_tree_space(msp->ms_allocatable) * 15 / 16) 3555 return; 3556 3557 /* 3558 * Since we are in the middle of a sync pass, the most accurate 3559 * information that is accessible to us is the in-core range tree 3560 * histogram; calculate the new weight based on that information. 3561 */ 3562 uint64_t weight = metaslab_weight_from_range_tree(msp); 3563 int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight); 3564 int current_idx = WEIGHT_GET_INDEX(weight); 3565 3566 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold) 3567 metaslab_passivate(msp, weight); 3568 } 3569 3570 static void 3571 metaslab_preload(void *arg) 3572 { 3573 metaslab_t *msp = arg; 3574 metaslab_class_t *mc = msp->ms_group->mg_class; 3575 spa_t *spa = mc->mc_spa; 3576 fstrans_cookie_t cookie = spl_fstrans_mark(); 3577 3578 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock)); 3579 3580 mutex_enter(&msp->ms_lock); 3581 (void) metaslab_load(msp); 3582 metaslab_set_selected_txg(msp, spa_syncing_txg(spa)); 3583 mutex_exit(&msp->ms_lock); 3584 spl_fstrans_unmark(cookie); 3585 } 3586 3587 static void 3588 metaslab_group_preload(metaslab_group_t *mg) 3589 { 3590 spa_t *spa = mg->mg_vd->vdev_spa; 3591 metaslab_t *msp; 3592 avl_tree_t *t = &mg->mg_metaslab_tree; 3593 int m = 0; 3594 3595 if (spa_shutting_down(spa) || !metaslab_preload_enabled) 3596 return; 3597 3598 mutex_enter(&mg->mg_lock); 3599 3600 /* 3601 * Load the next potential metaslabs 3602 */ 3603 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) { 3604 ASSERT3P(msp->ms_group, ==, mg); 3605 3606 /* 3607 * We preload only the maximum number of metaslabs specified 3608 * by metaslab_preload_limit. If a metaslab is being forced 3609 * to condense then we preload it too. This will ensure 3610 * that force condensing happens in the next txg. 3611 */ 3612 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) { 3613 continue; 3614 } 3615 3616 VERIFY(taskq_dispatch(spa->spa_metaslab_taskq, metaslab_preload, 3617 msp, TQ_SLEEP | (m <= mg->mg_allocators ? TQ_FRONT : 0)) 3618 != TASKQID_INVALID); 3619 } 3620 mutex_exit(&mg->mg_lock); 3621 } 3622 3623 /* 3624 * Determine if the space map's on-disk footprint is past our tolerance for 3625 * inefficiency. We would like to use the following criteria to make our 3626 * decision: 3627 * 3628 * 1. Do not condense if the size of the space map object would dramatically 3629 * increase as a result of writing out the free space range tree. 3630 * 3631 * 2. Condense if the on on-disk space map representation is at least 3632 * zfs_condense_pct/100 times the size of the optimal representation 3633 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, optimal = 1.1MB). 3634 * 3635 * 3. Do not condense if the on-disk size of the space map does not actually 3636 * decrease. 3637 * 3638 * Unfortunately, we cannot compute the on-disk size of the space map in this 3639 * context because we cannot accurately compute the effects of compression, etc. 3640 * Instead, we apply the heuristic described in the block comment for 3641 * zfs_metaslab_condense_block_threshold - we only condense if the space used 3642 * is greater than a threshold number of blocks. 3643 */ 3644 static boolean_t 3645 metaslab_should_condense(metaslab_t *msp) 3646 { 3647 space_map_t *sm = msp->ms_sm; 3648 vdev_t *vd = msp->ms_group->mg_vd; 3649 uint64_t vdev_blocksize = 1ULL << vd->vdev_ashift; 3650 3651 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3652 ASSERT(msp->ms_loaded); 3653 ASSERT(sm != NULL); 3654 ASSERT3U(spa_sync_pass(vd->vdev_spa), ==, 1); 3655 3656 /* 3657 * We always condense metaslabs that are empty and metaslabs for 3658 * which a condense request has been made. 3659 */ 3660 if (zfs_range_tree_numsegs(msp->ms_allocatable) == 0 || 3661 msp->ms_condense_wanted) 3662 return (B_TRUE); 3663 3664 uint64_t record_size = MAX(sm->sm_blksz, vdev_blocksize); 3665 uint64_t object_size = space_map_length(sm); 3666 uint64_t optimal_size = space_map_estimate_optimal_size(sm, 3667 msp->ms_allocatable, SM_NO_VDEVID); 3668 3669 return (object_size >= (optimal_size * zfs_condense_pct / 100) && 3670 object_size > zfs_metaslab_condense_block_threshold * record_size); 3671 } 3672 3673 /* 3674 * Condense the on-disk space map representation to its minimized form. 3675 * The minimized form consists of a small number of allocations followed 3676 * by the entries of the free range tree (ms_allocatable). The condensed 3677 * spacemap contains all the entries of previous TXGs (including those in 3678 * the pool-wide log spacemaps; thus this is effectively a superset of 3679 * metaslab_flush()), but this TXG's entries still need to be written. 3680 */ 3681 static void 3682 metaslab_condense(metaslab_t *msp, dmu_tx_t *tx) 3683 { 3684 zfs_range_tree_t *condense_tree; 3685 space_map_t *sm = msp->ms_sm; 3686 uint64_t txg = dmu_tx_get_txg(tx); 3687 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3688 3689 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3690 ASSERT(msp->ms_loaded); 3691 ASSERT(msp->ms_sm != NULL); 3692 3693 /* 3694 * In order to condense the space map, we need to change it so it 3695 * only describes which segments are currently allocated and free. 3696 * 3697 * All the current free space resides in the ms_allocatable, all 3698 * the ms_defer trees, and all the ms_allocating trees. We ignore 3699 * ms_freed because it is empty because we're in sync pass 1. We 3700 * ignore ms_freeing because these changes are not yet reflected 3701 * in the spacemap (they will be written later this txg). 3702 * 3703 * So to truncate the space map to represent all the entries of 3704 * previous TXGs we do the following: 3705 * 3706 * 1] We create a range tree (condense tree) that is 100% empty. 3707 * 2] We add to it all segments found in the ms_defer trees 3708 * as those segments are marked as free in the original space 3709 * map. We do the same with the ms_allocating trees for the same 3710 * reason. Adding these segments should be a relatively 3711 * inexpensive operation since we expect these trees to have a 3712 * small number of nodes. 3713 * 3] We vacate any unflushed allocs, since they are not frees we 3714 * need to add to the condense tree. Then we vacate any 3715 * unflushed frees as they should already be part of ms_allocatable. 3716 * 4] At this point, we would ideally like to add all segments 3717 * in the ms_allocatable tree from the condense tree. This way 3718 * we would write all the entries of the condense tree as the 3719 * condensed space map, which would only contain freed 3720 * segments with everything else assumed to be allocated. 3721 * 3722 * Doing so can be prohibitively expensive as ms_allocatable can 3723 * be large, and therefore computationally expensive to add to 3724 * the condense_tree. Instead we first sync out an entry marking 3725 * everything as allocated, then the condense_tree and then the 3726 * ms_allocatable, in the condensed space map. While this is not 3727 * optimal, it is typically close to optimal and more importantly 3728 * much cheaper to compute. 3729 * 3730 * 5] Finally, as both of the unflushed trees were written to our 3731 * new and condensed metaslab space map, we basically flushed 3732 * all the unflushed changes to disk, thus we call 3733 * metaslab_flush_update(). 3734 */ 3735 ASSERT3U(spa_sync_pass(spa), ==, 1); 3736 ASSERT(zfs_range_tree_is_empty(msp->ms_freed)); /* since it is pass 1 */ 3737 3738 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %px, vdev id %llu, " 3739 "spa %s, smp size %llu, segments %llu, forcing condense=%s", 3740 (u_longlong_t)txg, (u_longlong_t)msp->ms_id, msp, 3741 (u_longlong_t)msp->ms_group->mg_vd->vdev_id, 3742 spa->spa_name, (u_longlong_t)space_map_length(msp->ms_sm), 3743 (u_longlong_t)zfs_range_tree_numsegs(msp->ms_allocatable), 3744 msp->ms_condense_wanted ? "TRUE" : "FALSE"); 3745 3746 msp->ms_condense_wanted = B_FALSE; 3747 3748 zfs_range_seg_type_t type; 3749 uint64_t shift, start; 3750 type = metaslab_calculate_range_tree_type(msp->ms_group->mg_vd, msp, 3751 &start, &shift); 3752 3753 condense_tree = zfs_range_tree_create(NULL, type, NULL, start, shift); 3754 3755 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 3756 zfs_range_tree_walk(msp->ms_defer[t], 3757 zfs_range_tree_add, condense_tree); 3758 } 3759 3760 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) { 3761 zfs_range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK], 3762 zfs_range_tree_add, condense_tree); 3763 } 3764 3765 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 3766 metaslab_unflushed_changes_memused(msp)); 3767 spa->spa_unflushed_stats.sus_memused -= 3768 metaslab_unflushed_changes_memused(msp); 3769 zfs_range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); 3770 zfs_range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); 3771 3772 /* 3773 * We're about to drop the metaslab's lock thus allowing other 3774 * consumers to change it's content. Set the metaslab's ms_condensing 3775 * flag to ensure that allocations on this metaslab do not occur 3776 * while we're in the middle of committing it to disk. This is only 3777 * critical for ms_allocatable as all other range trees use per TXG 3778 * views of their content. 3779 */ 3780 msp->ms_condensing = B_TRUE; 3781 3782 mutex_exit(&msp->ms_lock); 3783 uint64_t object = space_map_object(msp->ms_sm); 3784 space_map_truncate(sm, 3785 spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ? 3786 zfs_metaslab_sm_blksz_with_log : zfs_metaslab_sm_blksz_no_log, tx); 3787 3788 /* 3789 * space_map_truncate() may have reallocated the spacemap object. 3790 * If so, update the vdev_ms_array. 3791 */ 3792 if (space_map_object(msp->ms_sm) != object) { 3793 object = space_map_object(msp->ms_sm); 3794 dmu_write(spa->spa_meta_objset, 3795 msp->ms_group->mg_vd->vdev_ms_array, sizeof (uint64_t) * 3796 msp->ms_id, sizeof (uint64_t), &object, tx); 3797 } 3798 3799 /* 3800 * Note: 3801 * When the log space map feature is enabled, each space map will 3802 * always have ALLOCS followed by FREES for each sync pass. This is 3803 * typically true even when the log space map feature is disabled, 3804 * except from the case where a metaslab goes through metaslab_sync() 3805 * and gets condensed. In that case the metaslab's space map will have 3806 * ALLOCS followed by FREES (due to condensing) followed by ALLOCS 3807 * followed by FREES (due to space_map_write() in metaslab_sync()) for 3808 * sync pass 1. 3809 */ 3810 zfs_range_tree_t *tmp_tree = zfs_range_tree_create(NULL, type, NULL, 3811 start, shift); 3812 zfs_range_tree_add(tmp_tree, msp->ms_start, msp->ms_size); 3813 space_map_write(sm, tmp_tree, SM_ALLOC, SM_NO_VDEVID, tx); 3814 space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx); 3815 space_map_write(sm, condense_tree, SM_FREE, SM_NO_VDEVID, tx); 3816 3817 zfs_range_tree_vacate(condense_tree, NULL, NULL); 3818 zfs_range_tree_destroy(condense_tree); 3819 zfs_range_tree_vacate(tmp_tree, NULL, NULL); 3820 zfs_range_tree_destroy(tmp_tree); 3821 mutex_enter(&msp->ms_lock); 3822 3823 msp->ms_condensing = B_FALSE; 3824 metaslab_flush_update(msp, tx); 3825 } 3826 3827 static void 3828 metaslab_unflushed_add(metaslab_t *msp, dmu_tx_t *tx) 3829 { 3830 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3831 ASSERT(spa_syncing_log_sm(spa) != NULL); 3832 ASSERT(msp->ms_sm != NULL); 3833 ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_allocs)); 3834 ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_frees)); 3835 3836 mutex_enter(&spa->spa_flushed_ms_lock); 3837 metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx); 3838 metaslab_set_unflushed_dirty(msp, B_TRUE); 3839 avl_add(&spa->spa_metaslabs_by_flushed, msp); 3840 mutex_exit(&spa->spa_flushed_ms_lock); 3841 3842 spa_log_sm_increment_current_mscount(spa); 3843 spa_log_summary_add_flushed_metaslab(spa, B_TRUE); 3844 } 3845 3846 void 3847 metaslab_unflushed_bump(metaslab_t *msp, dmu_tx_t *tx, boolean_t dirty) 3848 { 3849 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3850 ASSERT(spa_syncing_log_sm(spa) != NULL); 3851 ASSERT(msp->ms_sm != NULL); 3852 ASSERT(metaslab_unflushed_txg(msp) != 0); 3853 ASSERT3P(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL), ==, msp); 3854 ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_allocs)); 3855 ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_frees)); 3856 3857 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(spa)); 3858 3859 /* update metaslab's position in our flushing tree */ 3860 uint64_t ms_prev_flushed_txg = metaslab_unflushed_txg(msp); 3861 boolean_t ms_prev_flushed_dirty = metaslab_unflushed_dirty(msp); 3862 mutex_enter(&spa->spa_flushed_ms_lock); 3863 avl_remove(&spa->spa_metaslabs_by_flushed, msp); 3864 metaslab_set_unflushed_txg(msp, spa_syncing_txg(spa), tx); 3865 metaslab_set_unflushed_dirty(msp, dirty); 3866 avl_add(&spa->spa_metaslabs_by_flushed, msp); 3867 mutex_exit(&spa->spa_flushed_ms_lock); 3868 3869 /* update metaslab counts of spa_log_sm_t nodes */ 3870 spa_log_sm_decrement_mscount(spa, ms_prev_flushed_txg); 3871 spa_log_sm_increment_current_mscount(spa); 3872 3873 /* update log space map summary */ 3874 spa_log_summary_decrement_mscount(spa, ms_prev_flushed_txg, 3875 ms_prev_flushed_dirty); 3876 spa_log_summary_add_flushed_metaslab(spa, dirty); 3877 3878 /* cleanup obsolete logs if any */ 3879 spa_cleanup_old_sm_logs(spa, tx); 3880 } 3881 3882 /* 3883 * Called when the metaslab has been flushed (its own spacemap now reflects 3884 * all the contents of the pool-wide spacemap log). Updates the metaslab's 3885 * metadata and any pool-wide related log space map data (e.g. summary, 3886 * obsolete logs, etc..) to reflect that. 3887 */ 3888 static void 3889 metaslab_flush_update(metaslab_t *msp, dmu_tx_t *tx) 3890 { 3891 metaslab_group_t *mg = msp->ms_group; 3892 spa_t *spa = mg->mg_vd->vdev_spa; 3893 3894 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3895 3896 ASSERT3U(spa_sync_pass(spa), ==, 1); 3897 3898 /* 3899 * Just because a metaslab got flushed, that doesn't mean that 3900 * it will pass through metaslab_sync_done(). Thus, make sure to 3901 * update ms_synced_length here in case it doesn't. 3902 */ 3903 msp->ms_synced_length = space_map_length(msp->ms_sm); 3904 3905 /* 3906 * We may end up here from metaslab_condense() without the 3907 * feature being active. In that case this is a no-op. 3908 */ 3909 if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP) || 3910 metaslab_unflushed_txg(msp) == 0) 3911 return; 3912 3913 metaslab_unflushed_bump(msp, tx, B_FALSE); 3914 } 3915 3916 boolean_t 3917 metaslab_flush(metaslab_t *msp, dmu_tx_t *tx) 3918 { 3919 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 3920 3921 ASSERT(MUTEX_HELD(&msp->ms_lock)); 3922 ASSERT3U(spa_sync_pass(spa), ==, 1); 3923 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 3924 3925 ASSERT(msp->ms_sm != NULL); 3926 ASSERT(metaslab_unflushed_txg(msp) != 0); 3927 ASSERT(avl_find(&spa->spa_metaslabs_by_flushed, msp, NULL) != NULL); 3928 3929 /* 3930 * There is nothing wrong with flushing the same metaslab twice, as 3931 * this codepath should work on that case. However, the current 3932 * flushing scheme makes sure to avoid this situation as we would be 3933 * making all these calls without having anything meaningful to write 3934 * to disk. We assert this behavior here. 3935 */ 3936 ASSERT3U(metaslab_unflushed_txg(msp), <, dmu_tx_get_txg(tx)); 3937 3938 /* 3939 * We can not flush while loading, because then we would 3940 * not load the ms_unflushed_{allocs,frees}. 3941 */ 3942 if (msp->ms_loading) 3943 return (B_FALSE); 3944 3945 metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 3946 metaslab_verify_weight_and_frag(msp); 3947 3948 /* 3949 * Metaslab condensing is effectively flushing. Therefore if the 3950 * metaslab can be condensed we can just condense it instead of 3951 * flushing it. 3952 * 3953 * Note that metaslab_condense() does call metaslab_flush_update() 3954 * so we can just return immediately after condensing. We also 3955 * don't need to care about setting ms_flushing or broadcasting 3956 * ms_flush_cv, even if we temporarily drop the ms_lock in 3957 * metaslab_condense(), as the metaslab is already loaded. 3958 */ 3959 if (msp->ms_loaded && metaslab_should_condense(msp)) { 3960 metaslab_group_t *mg = msp->ms_group; 3961 3962 /* 3963 * For all histogram operations below refer to the 3964 * comments of metaslab_sync() where we follow a 3965 * similar procedure. 3966 */ 3967 metaslab_group_histogram_verify(mg); 3968 metaslab_class_histogram_verify(mg->mg_class); 3969 metaslab_group_histogram_remove(mg, msp); 3970 3971 metaslab_condense(msp, tx); 3972 3973 space_map_histogram_clear(msp->ms_sm); 3974 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx); 3975 ASSERT(zfs_range_tree_is_empty(msp->ms_freed)); 3976 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 3977 space_map_histogram_add(msp->ms_sm, 3978 msp->ms_defer[t], tx); 3979 } 3980 metaslab_aux_histograms_update(msp); 3981 3982 metaslab_group_histogram_add(mg, msp); 3983 metaslab_group_histogram_verify(mg); 3984 metaslab_class_histogram_verify(mg->mg_class); 3985 3986 metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 3987 3988 /* 3989 * Since we recreated the histogram (and potentially 3990 * the ms_sm too while condensing) ensure that the 3991 * weight is updated too because we are not guaranteed 3992 * that this metaslab is dirty and will go through 3993 * metaslab_sync_done(). 3994 */ 3995 metaslab_recalculate_weight_and_sort(msp); 3996 return (B_TRUE); 3997 } 3998 3999 msp->ms_flushing = B_TRUE; 4000 uint64_t sm_len_before = space_map_length(msp->ms_sm); 4001 4002 mutex_exit(&msp->ms_lock); 4003 space_map_write(msp->ms_sm, msp->ms_unflushed_allocs, SM_ALLOC, 4004 SM_NO_VDEVID, tx); 4005 space_map_write(msp->ms_sm, msp->ms_unflushed_frees, SM_FREE, 4006 SM_NO_VDEVID, tx); 4007 mutex_enter(&msp->ms_lock); 4008 4009 uint64_t sm_len_after = space_map_length(msp->ms_sm); 4010 if (zfs_flags & ZFS_DEBUG_LOG_SPACEMAP) { 4011 zfs_dbgmsg("flushing: txg %llu, spa %s, vdev_id %llu, " 4012 "ms_id %llu, unflushed_allocs %llu, unflushed_frees %llu, " 4013 "appended %llu bytes", (u_longlong_t)dmu_tx_get_txg(tx), 4014 spa_name(spa), 4015 (u_longlong_t)msp->ms_group->mg_vd->vdev_id, 4016 (u_longlong_t)msp->ms_id, 4017 (u_longlong_t)zfs_range_tree_space( 4018 msp->ms_unflushed_allocs), 4019 (u_longlong_t)zfs_range_tree_space( 4020 msp->ms_unflushed_frees), 4021 (u_longlong_t)(sm_len_after - sm_len_before)); 4022 } 4023 4024 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 4025 metaslab_unflushed_changes_memused(msp)); 4026 spa->spa_unflushed_stats.sus_memused -= 4027 metaslab_unflushed_changes_memused(msp); 4028 zfs_range_tree_vacate(msp->ms_unflushed_allocs, NULL, NULL); 4029 zfs_range_tree_vacate(msp->ms_unflushed_frees, NULL, NULL); 4030 4031 metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 4032 metaslab_verify_weight_and_frag(msp); 4033 4034 metaslab_flush_update(msp, tx); 4035 4036 metaslab_verify_space(msp, dmu_tx_get_txg(tx)); 4037 metaslab_verify_weight_and_frag(msp); 4038 4039 msp->ms_flushing = B_FALSE; 4040 cv_broadcast(&msp->ms_flush_cv); 4041 return (B_TRUE); 4042 } 4043 4044 /* 4045 * Write a metaslab to disk in the context of the specified transaction group. 4046 */ 4047 void 4048 metaslab_sync(metaslab_t *msp, uint64_t txg) 4049 { 4050 metaslab_group_t *mg = msp->ms_group; 4051 vdev_t *vd = mg->mg_vd; 4052 spa_t *spa = vd->vdev_spa; 4053 objset_t *mos = spa_meta_objset(spa); 4054 zfs_range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK]; 4055 dmu_tx_t *tx; 4056 4057 ASSERT(!vd->vdev_ishole); 4058 4059 /* 4060 * This metaslab has just been added so there's no work to do now. 4061 */ 4062 if (msp->ms_new) { 4063 ASSERT0(zfs_range_tree_space(alloctree)); 4064 ASSERT0(zfs_range_tree_space(msp->ms_freeing)); 4065 ASSERT0(zfs_range_tree_space(msp->ms_freed)); 4066 ASSERT0(zfs_range_tree_space(msp->ms_checkpointing)); 4067 ASSERT0(zfs_range_tree_space(msp->ms_trim)); 4068 return; 4069 } 4070 4071 /* 4072 * Normally, we don't want to process a metaslab if there are no 4073 * allocations or frees to perform. However, if the metaslab is being 4074 * forced to condense, it's loaded and we're not beyond the final 4075 * dirty txg, we need to let it through. Not condensing beyond the 4076 * final dirty txg prevents an issue where metaslabs that need to be 4077 * condensed but were loaded for other reasons could cause a panic 4078 * here. By only checking the txg in that branch of the conditional, 4079 * we preserve the utility of the VERIFY statements in all other 4080 * cases. 4081 */ 4082 if (zfs_range_tree_is_empty(alloctree) && 4083 zfs_range_tree_is_empty(msp->ms_freeing) && 4084 zfs_range_tree_is_empty(msp->ms_checkpointing) && 4085 !(msp->ms_loaded && msp->ms_condense_wanted && 4086 txg <= spa_final_dirty_txg(spa))) 4087 return; 4088 4089 4090 VERIFY3U(txg, <=, spa_final_dirty_txg(spa)); 4091 4092 /* 4093 * The only state that can actually be changing concurrently 4094 * with metaslab_sync() is the metaslab's ms_allocatable. No 4095 * other thread can be modifying this txg's alloc, freeing, 4096 * freed, or space_map_phys_t. We drop ms_lock whenever we 4097 * could call into the DMU, because the DMU can call down to 4098 * us (e.g. via zio_free()) at any time. 4099 * 4100 * The spa_vdev_remove_thread() can be reading metaslab state 4101 * concurrently, and it is locked out by the ms_sync_lock. 4102 * Note that the ms_lock is insufficient for this, because it 4103 * is dropped by space_map_write(). 4104 */ 4105 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 4106 4107 /* 4108 * Generate a log space map if one doesn't exist already. 4109 */ 4110 spa_generate_syncing_log_sm(spa, tx); 4111 4112 if (msp->ms_sm == NULL) { 4113 uint64_t new_object = space_map_alloc(mos, 4114 spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP) ? 4115 zfs_metaslab_sm_blksz_with_log : 4116 zfs_metaslab_sm_blksz_no_log, tx); 4117 VERIFY3U(new_object, !=, 0); 4118 4119 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * 4120 msp->ms_id, sizeof (uint64_t), &new_object, tx); 4121 4122 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object, 4123 msp->ms_start, msp->ms_size, vd->vdev_ashift)); 4124 ASSERT(msp->ms_sm != NULL); 4125 4126 ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_allocs)); 4127 ASSERT(zfs_range_tree_is_empty(msp->ms_unflushed_frees)); 4128 ASSERT0(metaslab_allocated_space(msp)); 4129 } 4130 4131 if (!zfs_range_tree_is_empty(msp->ms_checkpointing) && 4132 vd->vdev_checkpoint_sm == NULL) { 4133 ASSERT(spa_has_checkpoint(spa)); 4134 4135 uint64_t new_object = space_map_alloc(mos, 4136 zfs_vdev_standard_sm_blksz, tx); 4137 VERIFY3U(new_object, !=, 0); 4138 4139 VERIFY0(space_map_open(&vd->vdev_checkpoint_sm, 4140 mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift)); 4141 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 4142 4143 /* 4144 * We save the space map object as an entry in vdev_top_zap 4145 * so it can be retrieved when the pool is reopened after an 4146 * export or through zdb. 4147 */ 4148 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, 4149 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, 4150 sizeof (new_object), 1, &new_object, tx)); 4151 } 4152 4153 mutex_enter(&msp->ms_sync_lock); 4154 mutex_enter(&msp->ms_lock); 4155 4156 /* 4157 * Note: metaslab_condense() clears the space map's histogram. 4158 * Therefore we must verify and remove this histogram before 4159 * condensing. 4160 */ 4161 metaslab_group_histogram_verify(mg); 4162 metaslab_class_histogram_verify(mg->mg_class); 4163 metaslab_group_histogram_remove(mg, msp); 4164 4165 if (spa->spa_sync_pass == 1 && msp->ms_loaded && 4166 metaslab_should_condense(msp)) 4167 metaslab_condense(msp, tx); 4168 4169 /* 4170 * We'll be going to disk to sync our space accounting, thus we 4171 * drop the ms_lock during that time so allocations coming from 4172 * open-context (ZIL) for future TXGs do not block. 4173 */ 4174 mutex_exit(&msp->ms_lock); 4175 space_map_t *log_sm = spa_syncing_log_sm(spa); 4176 if (log_sm != NULL) { 4177 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP)); 4178 if (metaslab_unflushed_txg(msp) == 0) 4179 metaslab_unflushed_add(msp, tx); 4180 else if (!metaslab_unflushed_dirty(msp)) 4181 metaslab_unflushed_bump(msp, tx, B_TRUE); 4182 4183 space_map_write(log_sm, alloctree, SM_ALLOC, 4184 vd->vdev_id, tx); 4185 space_map_write(log_sm, msp->ms_freeing, SM_FREE, 4186 vd->vdev_id, tx); 4187 mutex_enter(&msp->ms_lock); 4188 4189 ASSERT3U(spa->spa_unflushed_stats.sus_memused, >=, 4190 metaslab_unflushed_changes_memused(msp)); 4191 spa->spa_unflushed_stats.sus_memused -= 4192 metaslab_unflushed_changes_memused(msp); 4193 zfs_range_tree_remove_xor_add(alloctree, 4194 msp->ms_unflushed_frees, msp->ms_unflushed_allocs); 4195 zfs_range_tree_remove_xor_add(msp->ms_freeing, 4196 msp->ms_unflushed_allocs, msp->ms_unflushed_frees); 4197 spa->spa_unflushed_stats.sus_memused += 4198 metaslab_unflushed_changes_memused(msp); 4199 } else { 4200 ASSERT(!spa_feature_is_enabled(spa, SPA_FEATURE_LOG_SPACEMAP)); 4201 4202 space_map_write(msp->ms_sm, alloctree, SM_ALLOC, 4203 SM_NO_VDEVID, tx); 4204 space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE, 4205 SM_NO_VDEVID, tx); 4206 mutex_enter(&msp->ms_lock); 4207 } 4208 4209 msp->ms_allocated_space += zfs_range_tree_space(alloctree); 4210 ASSERT3U(msp->ms_allocated_space, >=, 4211 zfs_range_tree_space(msp->ms_freeing)); 4212 msp->ms_allocated_space -= zfs_range_tree_space(msp->ms_freeing); 4213 4214 if (!zfs_range_tree_is_empty(msp->ms_checkpointing)) { 4215 ASSERT(spa_has_checkpoint(spa)); 4216 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 4217 4218 /* 4219 * Since we are doing writes to disk and the ms_checkpointing 4220 * tree won't be changing during that time, we drop the 4221 * ms_lock while writing to the checkpoint space map, for the 4222 * same reason mentioned above. 4223 */ 4224 mutex_exit(&msp->ms_lock); 4225 space_map_write(vd->vdev_checkpoint_sm, 4226 msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx); 4227 mutex_enter(&msp->ms_lock); 4228 4229 spa->spa_checkpoint_info.sci_dspace += 4230 zfs_range_tree_space(msp->ms_checkpointing); 4231 vd->vdev_stat.vs_checkpoint_space += 4232 zfs_range_tree_space(msp->ms_checkpointing); 4233 ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==, 4234 -space_map_allocated(vd->vdev_checkpoint_sm)); 4235 4236 zfs_range_tree_vacate(msp->ms_checkpointing, NULL, NULL); 4237 } 4238 4239 if (msp->ms_loaded) { 4240 /* 4241 * When the space map is loaded, we have an accurate 4242 * histogram in the range tree. This gives us an opportunity 4243 * to bring the space map's histogram up-to-date so we clear 4244 * it first before updating it. 4245 */ 4246 space_map_histogram_clear(msp->ms_sm); 4247 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx); 4248 4249 /* 4250 * Since we've cleared the histogram we need to add back 4251 * any free space that has already been processed, plus 4252 * any deferred space. This allows the on-disk histogram 4253 * to accurately reflect all free space even if some space 4254 * is not yet available for allocation (i.e. deferred). 4255 */ 4256 space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx); 4257 4258 /* 4259 * Add back any deferred free space that has not been 4260 * added back into the in-core free tree yet. This will 4261 * ensure that we don't end up with a space map histogram 4262 * that is completely empty unless the metaslab is fully 4263 * allocated. 4264 */ 4265 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 4266 space_map_histogram_add(msp->ms_sm, 4267 msp->ms_defer[t], tx); 4268 } 4269 } 4270 4271 /* 4272 * Always add the free space from this sync pass to the space 4273 * map histogram. We want to make sure that the on-disk histogram 4274 * accounts for all free space. If the space map is not loaded, 4275 * then we will lose some accuracy but will correct it the next 4276 * time we load the space map. 4277 */ 4278 space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx); 4279 metaslab_aux_histograms_update(msp); 4280 4281 metaslab_group_histogram_add(mg, msp); 4282 metaslab_group_histogram_verify(mg); 4283 metaslab_class_histogram_verify(mg->mg_class); 4284 4285 /* 4286 * For sync pass 1, we avoid traversing this txg's free range tree 4287 * and instead will just swap the pointers for freeing and freed. 4288 * We can safely do this since the freed_tree is guaranteed to be 4289 * empty on the initial pass. 4290 * 4291 * Keep in mind that even if we are currently using a log spacemap 4292 * we want current frees to end up in the ms_allocatable (but not 4293 * get appended to the ms_sm) so their ranges can be reused as usual. 4294 */ 4295 if (spa_sync_pass(spa) == 1) { 4296 zfs_range_tree_swap(&msp->ms_freeing, &msp->ms_freed); 4297 ASSERT0(msp->ms_allocated_this_txg); 4298 } else { 4299 zfs_range_tree_vacate(msp->ms_freeing, 4300 zfs_range_tree_add, msp->ms_freed); 4301 } 4302 msp->ms_allocated_this_txg += zfs_range_tree_space(alloctree); 4303 zfs_range_tree_vacate(alloctree, NULL, NULL); 4304 4305 ASSERT0(zfs_range_tree_space(msp->ms_allocating[txg & TXG_MASK])); 4306 ASSERT0(zfs_range_tree_space(msp->ms_allocating[TXG_CLEAN(txg) 4307 & TXG_MASK])); 4308 ASSERT0(zfs_range_tree_space(msp->ms_freeing)); 4309 ASSERT0(zfs_range_tree_space(msp->ms_checkpointing)); 4310 4311 mutex_exit(&msp->ms_lock); 4312 4313 /* 4314 * Verify that the space map object ID has been recorded in the 4315 * vdev_ms_array. 4316 */ 4317 uint64_t object; 4318 VERIFY0(dmu_read(mos, vd->vdev_ms_array, 4319 msp->ms_id * sizeof (uint64_t), sizeof (uint64_t), &object, 0)); 4320 VERIFY3U(object, ==, space_map_object(msp->ms_sm)); 4321 4322 mutex_exit(&msp->ms_sync_lock); 4323 dmu_tx_commit(tx); 4324 } 4325 4326 static void 4327 metaslab_evict(metaslab_t *msp, uint64_t txg) 4328 { 4329 if (!msp->ms_loaded || msp->ms_disabled != 0) 4330 return; 4331 4332 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { 4333 VERIFY0(zfs_range_tree_space( 4334 msp->ms_allocating[(txg + t) & TXG_MASK])); 4335 } 4336 if (msp->ms_allocator != -1) 4337 metaslab_passivate(msp, msp->ms_weight & ~METASLAB_ACTIVE_MASK); 4338 4339 if (!metaslab_debug_unload) 4340 metaslab_unload(msp); 4341 } 4342 4343 /* 4344 * Called after a transaction group has completely synced to mark 4345 * all of the metaslab's free space as usable. 4346 */ 4347 void 4348 metaslab_sync_done(metaslab_t *msp, uint64_t txg) 4349 { 4350 metaslab_group_t *mg = msp->ms_group; 4351 vdev_t *vd = mg->mg_vd; 4352 spa_t *spa = vd->vdev_spa; 4353 zfs_range_tree_t **defer_tree; 4354 int64_t alloc_delta, defer_delta; 4355 boolean_t defer_allowed = B_TRUE; 4356 4357 ASSERT(!vd->vdev_ishole); 4358 4359 mutex_enter(&msp->ms_lock); 4360 4361 if (msp->ms_new) { 4362 /* this is a new metaslab, add its capacity to the vdev */ 4363 metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size); 4364 4365 /* there should be no allocations nor frees at this point */ 4366 VERIFY0(msp->ms_allocated_this_txg); 4367 VERIFY0(zfs_range_tree_space(msp->ms_freed)); 4368 } 4369 4370 ASSERT0(zfs_range_tree_space(msp->ms_freeing)); 4371 ASSERT0(zfs_range_tree_space(msp->ms_checkpointing)); 4372 4373 defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE]; 4374 4375 uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) - 4376 metaslab_class_get_alloc(spa_normal_class(spa)); 4377 if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing || 4378 vd->vdev_rz_expanding) { 4379 defer_allowed = B_FALSE; 4380 } 4381 4382 defer_delta = 0; 4383 alloc_delta = msp->ms_allocated_this_txg - 4384 zfs_range_tree_space(msp->ms_freed); 4385 4386 if (defer_allowed) { 4387 defer_delta = zfs_range_tree_space(msp->ms_freed) - 4388 zfs_range_tree_space(*defer_tree); 4389 } else { 4390 defer_delta -= zfs_range_tree_space(*defer_tree); 4391 } 4392 metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta, 4393 defer_delta, 0); 4394 4395 if (spa_syncing_log_sm(spa) == NULL) { 4396 /* 4397 * If there's a metaslab_load() in progress and we don't have 4398 * a log space map, it means that we probably wrote to the 4399 * metaslab's space map. If this is the case, we need to 4400 * make sure that we wait for the load to complete so that we 4401 * have a consistent view at the in-core side of the metaslab. 4402 */ 4403 metaslab_load_wait(msp); 4404 } else { 4405 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 4406 } 4407 4408 /* 4409 * When auto-trimming is enabled, free ranges which are added to 4410 * ms_allocatable are also be added to ms_trim. The ms_trim tree is 4411 * periodically consumed by the vdev_autotrim_thread() which issues 4412 * trims for all ranges and then vacates the tree. The ms_trim tree 4413 * can be discarded at any time with the sole consequence of recent 4414 * frees not being trimmed. 4415 */ 4416 if (spa_get_autotrim(spa) == SPA_AUTOTRIM_ON) { 4417 zfs_range_tree_walk(*defer_tree, zfs_range_tree_add, 4418 msp->ms_trim); 4419 if (!defer_allowed) { 4420 zfs_range_tree_walk(msp->ms_freed, zfs_range_tree_add, 4421 msp->ms_trim); 4422 } 4423 } else { 4424 zfs_range_tree_vacate(msp->ms_trim, NULL, NULL); 4425 } 4426 4427 /* 4428 * Move the frees from the defer_tree back to the free 4429 * range tree (if it's loaded). Swap the freed_tree and 4430 * the defer_tree -- this is safe to do because we've 4431 * just emptied out the defer_tree. 4432 */ 4433 zfs_range_tree_vacate(*defer_tree, 4434 msp->ms_loaded ? zfs_range_tree_add : NULL, msp->ms_allocatable); 4435 if (defer_allowed) { 4436 zfs_range_tree_swap(&msp->ms_freed, defer_tree); 4437 } else { 4438 zfs_range_tree_vacate(msp->ms_freed, 4439 msp->ms_loaded ? zfs_range_tree_add : NULL, 4440 msp->ms_allocatable); 4441 } 4442 4443 msp->ms_synced_length = space_map_length(msp->ms_sm); 4444 4445 msp->ms_deferspace += defer_delta; 4446 ASSERT3S(msp->ms_deferspace, >=, 0); 4447 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size); 4448 if (msp->ms_deferspace != 0) { 4449 /* 4450 * Keep syncing this metaslab until all deferred frees 4451 * are back in circulation. 4452 */ 4453 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 4454 } 4455 metaslab_aux_histograms_update_done(msp, defer_allowed); 4456 4457 if (msp->ms_new) { 4458 msp->ms_new = B_FALSE; 4459 mutex_enter(&mg->mg_lock); 4460 mg->mg_ms_ready++; 4461 mutex_exit(&mg->mg_lock); 4462 } 4463 4464 /* 4465 * Re-sort metaslab within its group now that we've adjusted 4466 * its allocatable space. 4467 */ 4468 metaslab_recalculate_weight_and_sort(msp); 4469 4470 ASSERT0(zfs_range_tree_space(msp->ms_allocating[txg & TXG_MASK])); 4471 ASSERT0(zfs_range_tree_space(msp->ms_freeing)); 4472 ASSERT0(zfs_range_tree_space(msp->ms_freed)); 4473 ASSERT0(zfs_range_tree_space(msp->ms_checkpointing)); 4474 msp->ms_allocating_total -= msp->ms_allocated_this_txg; 4475 msp->ms_allocated_this_txg = 0; 4476 mutex_exit(&msp->ms_lock); 4477 } 4478 4479 void 4480 metaslab_sync_reassess(metaslab_group_t *mg) 4481 { 4482 spa_t *spa = mg->mg_class->mc_spa; 4483 4484 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 4485 mg->mg_fragmentation = metaslab_group_fragmentation(mg); 4486 metaslab_group_alloc_update(mg); 4487 4488 /* 4489 * Preload the next potential metaslabs but only on active 4490 * metaslab groups. We can get into a state where the metaslab 4491 * is no longer active since we dirty metaslabs as we remove a 4492 * a device, thus potentially making the metaslab group eligible 4493 * for preloading. 4494 */ 4495 if (mg->mg_activation_count > 0) { 4496 metaslab_group_preload(mg); 4497 } 4498 spa_config_exit(spa, SCL_ALLOC, FTAG); 4499 } 4500 4501 /* 4502 * When writing a ditto block (i.e. more than one DVA for a given BP) on 4503 * the same vdev as an existing DVA of this BP, then try to allocate it 4504 * on a different metaslab than existing DVAs (i.e. a unique metaslab). 4505 */ 4506 static boolean_t 4507 metaslab_is_unique(metaslab_t *msp, dva_t *dva) 4508 { 4509 uint64_t dva_ms_id; 4510 4511 if (DVA_GET_ASIZE(dva) == 0) 4512 return (B_TRUE); 4513 4514 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) 4515 return (B_TRUE); 4516 4517 dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift; 4518 4519 return (msp->ms_id != dva_ms_id); 4520 } 4521 4522 /* 4523 * ========================================================================== 4524 * Metaslab allocation tracing facility 4525 * ========================================================================== 4526 */ 4527 4528 /* 4529 * Add an allocation trace element to the allocation tracing list. 4530 */ 4531 static void 4532 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg, 4533 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset, 4534 int allocator) 4535 { 4536 metaslab_alloc_trace_t *mat; 4537 4538 if (!metaslab_trace_enabled) 4539 return; 4540 4541 /* 4542 * When the tracing list reaches its maximum we remove 4543 * the second element in the list before adding a new one. 4544 * By removing the second element we preserve the original 4545 * entry as a clue to what allocations steps have already been 4546 * performed. 4547 */ 4548 if (zal->zal_size == metaslab_trace_max_entries) { 4549 metaslab_alloc_trace_t *mat_next; 4550 #ifdef ZFS_DEBUG 4551 panic("too many entries in allocation list"); 4552 #endif 4553 METASLABSTAT_BUMP(metaslabstat_trace_over_limit); 4554 zal->zal_size--; 4555 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list)); 4556 list_remove(&zal->zal_list, mat_next); 4557 kmem_cache_free(metaslab_alloc_trace_cache, mat_next); 4558 } 4559 4560 mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP); 4561 list_link_init(&mat->mat_list_node); 4562 mat->mat_mg = mg; 4563 mat->mat_msp = msp; 4564 mat->mat_size = psize; 4565 mat->mat_dva_id = dva_id; 4566 mat->mat_offset = offset; 4567 mat->mat_weight = 0; 4568 mat->mat_allocator = allocator; 4569 4570 if (msp != NULL) 4571 mat->mat_weight = msp->ms_weight; 4572 4573 /* 4574 * The list is part of the zio so locking is not required. Only 4575 * a single thread will perform allocations for a given zio. 4576 */ 4577 list_insert_tail(&zal->zal_list, mat); 4578 zal->zal_size++; 4579 4580 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries); 4581 } 4582 4583 void 4584 metaslab_trace_init(zio_alloc_list_t *zal) 4585 { 4586 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t), 4587 offsetof(metaslab_alloc_trace_t, mat_list_node)); 4588 zal->zal_size = 0; 4589 } 4590 4591 void 4592 metaslab_trace_fini(zio_alloc_list_t *zal) 4593 { 4594 metaslab_alloc_trace_t *mat; 4595 4596 while ((mat = list_remove_head(&zal->zal_list)) != NULL) 4597 kmem_cache_free(metaslab_alloc_trace_cache, mat); 4598 list_destroy(&zal->zal_list); 4599 zal->zal_size = 0; 4600 } 4601 4602 /* 4603 * ========================================================================== 4604 * Metaslab block operations 4605 * ========================================================================== 4606 */ 4607 4608 static void 4609 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, const void *tag, 4610 int flags, int allocator) 4611 { 4612 if (!(flags & METASLAB_ASYNC_ALLOC) || 4613 (flags & METASLAB_DONT_THROTTLE)) 4614 return; 4615 4616 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 4617 if (!mg->mg_class->mc_alloc_throttle_enabled) 4618 return; 4619 4620 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4621 (void) zfs_refcount_add(&mga->mga_alloc_queue_depth, tag); 4622 } 4623 4624 static void 4625 metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator) 4626 { 4627 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4628 metaslab_class_allocator_t *mca = 4629 &mg->mg_class->mc_allocator[allocator]; 4630 uint64_t max = mg->mg_max_alloc_queue_depth; 4631 uint64_t cur = mga->mga_cur_max_alloc_queue_depth; 4632 while (cur < max) { 4633 if (atomic_cas_64(&mga->mga_cur_max_alloc_queue_depth, 4634 cur, cur + 1) == cur) { 4635 atomic_inc_64(&mca->mca_alloc_max_slots); 4636 return; 4637 } 4638 cur = mga->mga_cur_max_alloc_queue_depth; 4639 } 4640 } 4641 4642 void 4643 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, const void *tag, 4644 int flags, int allocator, boolean_t io_complete) 4645 { 4646 if (!(flags & METASLAB_ASYNC_ALLOC) || 4647 (flags & METASLAB_DONT_THROTTLE)) 4648 return; 4649 4650 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 4651 if (!mg->mg_class->mc_alloc_throttle_enabled) 4652 return; 4653 4654 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4655 (void) zfs_refcount_remove(&mga->mga_alloc_queue_depth, tag); 4656 if (io_complete) 4657 metaslab_group_increment_qdepth(mg, allocator); 4658 } 4659 4660 void 4661 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, const void *tag, 4662 int allocator) 4663 { 4664 #ifdef ZFS_DEBUG 4665 const dva_t *dva = bp->blk_dva; 4666 int ndvas = BP_GET_NDVAS(bp); 4667 4668 for (int d = 0; d < ndvas; d++) { 4669 uint64_t vdev = DVA_GET_VDEV(&dva[d]); 4670 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 4671 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4672 VERIFY(zfs_refcount_not_held(&mga->mga_alloc_queue_depth, tag)); 4673 } 4674 #endif 4675 } 4676 4677 static uint64_t 4678 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg) 4679 { 4680 uint64_t start; 4681 zfs_range_tree_t *rt = msp->ms_allocatable; 4682 metaslab_class_t *mc = msp->ms_group->mg_class; 4683 4684 ASSERT(MUTEX_HELD(&msp->ms_lock)); 4685 VERIFY(!msp->ms_condensing); 4686 VERIFY0(msp->ms_disabled); 4687 VERIFY0(msp->ms_new); 4688 4689 start = mc->mc_ops->msop_alloc(msp, size); 4690 if (start != -1ULL) { 4691 metaslab_group_t *mg = msp->ms_group; 4692 vdev_t *vd = mg->mg_vd; 4693 4694 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift)); 4695 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 4696 VERIFY3U(zfs_range_tree_space(rt) - size, <=, msp->ms_size); 4697 zfs_range_tree_remove(rt, start, size); 4698 zfs_range_tree_clear(msp->ms_trim, start, size); 4699 4700 if (zfs_range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) 4701 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); 4702 4703 zfs_range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, 4704 size); 4705 msp->ms_allocating_total += size; 4706 4707 /* Track the last successful allocation */ 4708 msp->ms_alloc_txg = txg; 4709 metaslab_verify_space(msp, txg); 4710 } 4711 4712 /* 4713 * Now that we've attempted the allocation we need to update the 4714 * metaslab's maximum block size since it may have changed. 4715 */ 4716 msp->ms_max_size = metaslab_largest_allocatable(msp); 4717 return (start); 4718 } 4719 4720 /* 4721 * Find the metaslab with the highest weight that is less than what we've 4722 * already tried. In the common case, this means that we will examine each 4723 * metaslab at most once. Note that concurrent callers could reorder metaslabs 4724 * by activation/passivation once we have dropped the mg_lock. If a metaslab is 4725 * activated by another thread, and we fail to allocate from the metaslab we 4726 * have selected, we may not try the newly-activated metaslab, and instead 4727 * activate another metaslab. This is not optimal, but generally does not cause 4728 * any problems (a possible exception being if every metaslab is completely full 4729 * except for the newly-activated metaslab which we fail to examine). 4730 */ 4731 static metaslab_t * 4732 find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight, 4733 dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator, 4734 boolean_t try_hard, zio_alloc_list_t *zal, metaslab_t *search, 4735 boolean_t *was_active) 4736 { 4737 avl_index_t idx; 4738 avl_tree_t *t = &mg->mg_metaslab_tree; 4739 metaslab_t *msp = avl_find(t, search, &idx); 4740 if (msp == NULL) 4741 msp = avl_nearest(t, idx, AVL_AFTER); 4742 4743 uint_t tries = 0; 4744 for (; msp != NULL; msp = AVL_NEXT(t, msp)) { 4745 int i; 4746 4747 if (!try_hard && tries > zfs_metaslab_find_max_tries) { 4748 METASLABSTAT_BUMP(metaslabstat_too_many_tries); 4749 return (NULL); 4750 } 4751 tries++; 4752 4753 if (!metaslab_should_allocate(msp, asize, try_hard)) { 4754 metaslab_trace_add(zal, mg, msp, asize, d, 4755 TRACE_TOO_SMALL, allocator); 4756 continue; 4757 } 4758 4759 /* 4760 * If the selected metaslab is condensing or disabled, or 4761 * hasn't gone through a metaslab_sync_done(), then skip it. 4762 */ 4763 if (msp->ms_condensing || msp->ms_disabled > 0 || msp->ms_new) 4764 continue; 4765 4766 *was_active = msp->ms_allocator != -1; 4767 /* 4768 * If we're activating as primary, this is our first allocation 4769 * from this disk, so we don't need to check how close we are. 4770 * If the metaslab under consideration was already active, 4771 * we're getting desperate enough to steal another allocator's 4772 * metaslab, so we still don't care about distances. 4773 */ 4774 if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active) 4775 break; 4776 4777 for (i = 0; i < d; i++) { 4778 if (want_unique && 4779 !metaslab_is_unique(msp, &dva[i])) 4780 break; /* try another metaslab */ 4781 } 4782 if (i == d) 4783 break; 4784 } 4785 4786 if (msp != NULL) { 4787 search->ms_weight = msp->ms_weight; 4788 search->ms_start = msp->ms_start + 1; 4789 search->ms_allocator = msp->ms_allocator; 4790 search->ms_primary = msp->ms_primary; 4791 } 4792 return (msp); 4793 } 4794 4795 static void 4796 metaslab_active_mask_verify(metaslab_t *msp) 4797 { 4798 ASSERT(MUTEX_HELD(&msp->ms_lock)); 4799 4800 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 4801 return; 4802 4803 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) 4804 return; 4805 4806 if (msp->ms_weight & METASLAB_WEIGHT_PRIMARY) { 4807 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); 4808 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM); 4809 VERIFY3S(msp->ms_allocator, !=, -1); 4810 VERIFY(msp->ms_primary); 4811 return; 4812 } 4813 4814 if (msp->ms_weight & METASLAB_WEIGHT_SECONDARY) { 4815 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); 4816 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_CLAIM); 4817 VERIFY3S(msp->ms_allocator, !=, -1); 4818 VERIFY(!msp->ms_primary); 4819 return; 4820 } 4821 4822 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) { 4823 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); 4824 VERIFY0(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); 4825 VERIFY3S(msp->ms_allocator, ==, -1); 4826 return; 4827 } 4828 } 4829 4830 static uint64_t 4831 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, 4832 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d, 4833 int allocator, boolean_t try_hard) 4834 { 4835 metaslab_t *msp = NULL; 4836 uint64_t offset = -1ULL; 4837 4838 uint64_t activation_weight = METASLAB_WEIGHT_PRIMARY; 4839 for (int i = 0; i < d; i++) { 4840 if (activation_weight == METASLAB_WEIGHT_PRIMARY && 4841 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 4842 activation_weight = METASLAB_WEIGHT_SECONDARY; 4843 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && 4844 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 4845 activation_weight = METASLAB_WEIGHT_CLAIM; 4846 break; 4847 } 4848 } 4849 4850 /* 4851 * If we don't have enough metaslabs active to fill the entire array, we 4852 * just use the 0th slot. 4853 */ 4854 if (mg->mg_ms_ready < mg->mg_allocators * 3) 4855 allocator = 0; 4856 metaslab_group_allocator_t *mga = &mg->mg_allocator[allocator]; 4857 4858 ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2); 4859 4860 metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP); 4861 search->ms_weight = UINT64_MAX; 4862 search->ms_start = 0; 4863 /* 4864 * At the end of the metaslab tree are the already-active metaslabs, 4865 * first the primaries, then the secondaries. When we resume searching 4866 * through the tree, we need to consider ms_allocator and ms_primary so 4867 * we start in the location right after where we left off, and don't 4868 * accidentally loop forever considering the same metaslabs. 4869 */ 4870 search->ms_allocator = -1; 4871 search->ms_primary = B_TRUE; 4872 for (;;) { 4873 boolean_t was_active = B_FALSE; 4874 4875 mutex_enter(&mg->mg_lock); 4876 4877 if (activation_weight == METASLAB_WEIGHT_PRIMARY && 4878 mga->mga_primary != NULL) { 4879 msp = mga->mga_primary; 4880 4881 /* 4882 * Even though we don't hold the ms_lock for the 4883 * primary metaslab, those fields should not 4884 * change while we hold the mg_lock. Thus it is 4885 * safe to make assertions on them. 4886 */ 4887 ASSERT(msp->ms_primary); 4888 ASSERT3S(msp->ms_allocator, ==, allocator); 4889 ASSERT(msp->ms_loaded); 4890 4891 was_active = B_TRUE; 4892 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 4893 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && 4894 mga->mga_secondary != NULL) { 4895 msp = mga->mga_secondary; 4896 4897 /* 4898 * See comment above about the similar assertions 4899 * for the primary metaslab. 4900 */ 4901 ASSERT(!msp->ms_primary); 4902 ASSERT3S(msp->ms_allocator, ==, allocator); 4903 ASSERT(msp->ms_loaded); 4904 4905 was_active = B_TRUE; 4906 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 4907 } else { 4908 msp = find_valid_metaslab(mg, activation_weight, dva, d, 4909 want_unique, asize, allocator, try_hard, zal, 4910 search, &was_active); 4911 } 4912 4913 mutex_exit(&mg->mg_lock); 4914 if (msp == NULL) { 4915 kmem_free(search, sizeof (*search)); 4916 return (-1ULL); 4917 } 4918 mutex_enter(&msp->ms_lock); 4919 4920 metaslab_active_mask_verify(msp); 4921 4922 /* 4923 * This code is disabled out because of issues with 4924 * tracepoints in non-gpl kernel modules. 4925 */ 4926 #if 0 4927 DTRACE_PROBE3(ms__activation__attempt, 4928 metaslab_t *, msp, uint64_t, activation_weight, 4929 boolean_t, was_active); 4930 #endif 4931 4932 /* 4933 * Ensure that the metaslab we have selected is still 4934 * capable of handling our request. It's possible that 4935 * another thread may have changed the weight while we 4936 * were blocked on the metaslab lock. We check the 4937 * active status first to see if we need to set_selected_txg 4938 * a new metaslab. 4939 */ 4940 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) { 4941 ASSERT3S(msp->ms_allocator, ==, -1); 4942 mutex_exit(&msp->ms_lock); 4943 continue; 4944 } 4945 4946 /* 4947 * If the metaslab was activated for another allocator 4948 * while we were waiting in the ms_lock above, or it's 4949 * a primary and we're seeking a secondary (or vice versa), 4950 * we go back and select a new metaslab. 4951 */ 4952 if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) && 4953 (msp->ms_allocator != -1) && 4954 (msp->ms_allocator != allocator || ((activation_weight == 4955 METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) { 4956 ASSERT(msp->ms_loaded); 4957 ASSERT((msp->ms_weight & METASLAB_WEIGHT_CLAIM) || 4958 msp->ms_allocator != -1); 4959 mutex_exit(&msp->ms_lock); 4960 continue; 4961 } 4962 4963 /* 4964 * This metaslab was used for claiming regions allocated 4965 * by the ZIL during pool import. Once these regions are 4966 * claimed we don't need to keep the CLAIM bit set 4967 * anymore. Passivate this metaslab to zero its activation 4968 * mask. 4969 */ 4970 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM && 4971 activation_weight != METASLAB_WEIGHT_CLAIM) { 4972 ASSERT(msp->ms_loaded); 4973 ASSERT3S(msp->ms_allocator, ==, -1); 4974 metaslab_passivate(msp, msp->ms_weight & 4975 ~METASLAB_WEIGHT_CLAIM); 4976 mutex_exit(&msp->ms_lock); 4977 continue; 4978 } 4979 4980 metaslab_set_selected_txg(msp, txg); 4981 4982 int activation_error = 4983 metaslab_activate(msp, allocator, activation_weight); 4984 metaslab_active_mask_verify(msp); 4985 4986 /* 4987 * If the metaslab was activated by another thread for 4988 * another allocator or activation_weight (EBUSY), or it 4989 * failed because another metaslab was assigned as primary 4990 * for this allocator (EEXIST) we continue using this 4991 * metaslab for our allocation, rather than going on to a 4992 * worse metaslab (we waited for that metaslab to be loaded 4993 * after all). 4994 * 4995 * If the activation failed due to an I/O error or ENOSPC we 4996 * skip to the next metaslab. 4997 */ 4998 boolean_t activated; 4999 if (activation_error == 0) { 5000 activated = B_TRUE; 5001 } else if (activation_error == EBUSY || 5002 activation_error == EEXIST) { 5003 activated = B_FALSE; 5004 } else { 5005 mutex_exit(&msp->ms_lock); 5006 continue; 5007 } 5008 ASSERT(msp->ms_loaded); 5009 5010 /* 5011 * Now that we have the lock, recheck to see if we should 5012 * continue to use this metaslab for this allocation. The 5013 * the metaslab is now loaded so metaslab_should_allocate() 5014 * can accurately determine if the allocation attempt should 5015 * proceed. 5016 */ 5017 if (!metaslab_should_allocate(msp, asize, try_hard)) { 5018 /* Passivate this metaslab and select a new one. */ 5019 metaslab_trace_add(zal, mg, msp, asize, d, 5020 TRACE_TOO_SMALL, allocator); 5021 goto next; 5022 } 5023 5024 /* 5025 * If this metaslab is currently condensing then pick again 5026 * as we can't manipulate this metaslab until it's committed 5027 * to disk. If this metaslab is being initialized, we shouldn't 5028 * allocate from it since the allocated region might be 5029 * overwritten after allocation. 5030 */ 5031 if (msp->ms_condensing) { 5032 metaslab_trace_add(zal, mg, msp, asize, d, 5033 TRACE_CONDENSING, allocator); 5034 if (activated) { 5035 metaslab_passivate(msp, msp->ms_weight & 5036 ~METASLAB_ACTIVE_MASK); 5037 } 5038 mutex_exit(&msp->ms_lock); 5039 continue; 5040 } else if (msp->ms_disabled > 0) { 5041 metaslab_trace_add(zal, mg, msp, asize, d, 5042 TRACE_DISABLED, allocator); 5043 if (activated) { 5044 metaslab_passivate(msp, msp->ms_weight & 5045 ~METASLAB_ACTIVE_MASK); 5046 } 5047 mutex_exit(&msp->ms_lock); 5048 continue; 5049 } 5050 5051 offset = metaslab_block_alloc(msp, asize, txg); 5052 metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator); 5053 5054 if (offset != -1ULL) { 5055 /* Proactively passivate the metaslab, if needed */ 5056 if (activated) 5057 metaslab_segment_may_passivate(msp); 5058 break; 5059 } 5060 next: 5061 ASSERT(msp->ms_loaded); 5062 5063 /* 5064 * This code is disabled out because of issues with 5065 * tracepoints in non-gpl kernel modules. 5066 */ 5067 #if 0 5068 DTRACE_PROBE2(ms__alloc__failure, metaslab_t *, msp, 5069 uint64_t, asize); 5070 #endif 5071 5072 /* 5073 * We were unable to allocate from this metaslab so determine 5074 * a new weight for this metaslab. Now that we have loaded 5075 * the metaslab we can provide a better hint to the metaslab 5076 * selector. 5077 * 5078 * For space-based metaslabs, we use the maximum block size. 5079 * This information is only available when the metaslab 5080 * is loaded and is more accurate than the generic free 5081 * space weight that was calculated by metaslab_weight(). 5082 * This information allows us to quickly compare the maximum 5083 * available allocation in the metaslab to the allocation 5084 * size being requested. 5085 * 5086 * For segment-based metaslabs, determine the new weight 5087 * based on the highest bucket in the range tree. We 5088 * explicitly use the loaded segment weight (i.e. the range 5089 * tree histogram) since it contains the space that is 5090 * currently available for allocation and is accurate 5091 * even within a sync pass. 5092 */ 5093 uint64_t weight; 5094 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) { 5095 weight = metaslab_largest_allocatable(msp); 5096 WEIGHT_SET_SPACEBASED(weight); 5097 } else { 5098 weight = metaslab_weight_from_range_tree(msp); 5099 } 5100 5101 if (activated) { 5102 metaslab_passivate(msp, weight); 5103 } else { 5104 /* 5105 * For the case where we use the metaslab that is 5106 * active for another allocator we want to make 5107 * sure that we retain the activation mask. 5108 * 5109 * Note that we could attempt to use something like 5110 * metaslab_recalculate_weight_and_sort() that 5111 * retains the activation mask here. That function 5112 * uses metaslab_weight() to set the weight though 5113 * which is not as accurate as the calculations 5114 * above. 5115 */ 5116 weight |= msp->ms_weight & METASLAB_ACTIVE_MASK; 5117 metaslab_group_sort(mg, msp, weight); 5118 } 5119 metaslab_active_mask_verify(msp); 5120 5121 /* 5122 * We have just failed an allocation attempt, check 5123 * that metaslab_should_allocate() agrees. Otherwise, 5124 * we may end up in an infinite loop retrying the same 5125 * metaslab. 5126 */ 5127 ASSERT(!metaslab_should_allocate(msp, asize, try_hard)); 5128 5129 mutex_exit(&msp->ms_lock); 5130 } 5131 mutex_exit(&msp->ms_lock); 5132 kmem_free(search, sizeof (*search)); 5133 return (offset); 5134 } 5135 5136 static uint64_t 5137 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal, 5138 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, int d, 5139 int allocator, boolean_t try_hard) 5140 { 5141 uint64_t offset; 5142 5143 offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique, 5144 dva, d, allocator, try_hard); 5145 5146 mutex_enter(&mg->mg_lock); 5147 if (offset == -1ULL) { 5148 mg->mg_failed_allocations++; 5149 metaslab_trace_add(zal, mg, NULL, asize, d, 5150 TRACE_GROUP_FAILURE, allocator); 5151 if (asize == SPA_GANGBLOCKSIZE) { 5152 /* 5153 * This metaslab group was unable to allocate 5154 * the minimum gang block size so it must be out of 5155 * space. We must notify the allocation throttle 5156 * to start skipping allocation attempts to this 5157 * metaslab group until more space becomes available. 5158 * Note: this failure cannot be caused by the 5159 * allocation throttle since the allocation throttle 5160 * is only responsible for skipping devices and 5161 * not failing block allocations. 5162 */ 5163 mg->mg_no_free_space = B_TRUE; 5164 } 5165 } 5166 mg->mg_allocations++; 5167 mutex_exit(&mg->mg_lock); 5168 return (offset); 5169 } 5170 5171 /* 5172 * Allocate a block for the specified i/o. 5173 */ 5174 int 5175 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, 5176 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags, 5177 zio_alloc_list_t *zal, int allocator) 5178 { 5179 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; 5180 metaslab_group_t *mg, *rotor; 5181 vdev_t *vd; 5182 boolean_t try_hard = B_FALSE; 5183 5184 ASSERT(!DVA_IS_VALID(&dva[d])); 5185 5186 /* 5187 * For testing, make some blocks above a certain size be gang blocks. 5188 * This will result in more split blocks when using device removal, 5189 * and a large number of split blocks coupled with ztest-induced 5190 * damage can result in extremely long reconstruction times. This 5191 * will also test spilling from special to normal. 5192 */ 5193 if (psize >= metaslab_force_ganging && 5194 metaslab_force_ganging_pct > 0 && 5195 (random_in_range(100) < MIN(metaslab_force_ganging_pct, 100))) { 5196 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG, 5197 allocator); 5198 return (SET_ERROR(ENOSPC)); 5199 } 5200 5201 /* 5202 * Start at the rotor and loop through all mgs until we find something. 5203 * Note that there's no locking on mca_rotor or mca_aliquot because 5204 * nothing actually breaks if we miss a few updates -- we just won't 5205 * allocate quite as evenly. It all balances out over time. 5206 * 5207 * If we are doing ditto or log blocks, try to spread them across 5208 * consecutive vdevs. If we're forced to reuse a vdev before we've 5209 * allocated all of our ditto blocks, then try and spread them out on 5210 * that vdev as much as possible. If it turns out to not be possible, 5211 * gradually lower our standards until anything becomes acceptable. 5212 * Also, allocating on consecutive vdevs (as opposed to random vdevs) 5213 * gives us hope of containing our fault domains to something we're 5214 * able to reason about. Otherwise, any two top-level vdev failures 5215 * will guarantee the loss of data. With consecutive allocation, 5216 * only two adjacent top-level vdev failures will result in data loss. 5217 * 5218 * If we are doing gang blocks (hintdva is non-NULL), try to keep 5219 * ourselves on the same vdev as our gang block header. That 5220 * way, we can hope for locality in vdev_cache, plus it makes our 5221 * fault domains something tractable. 5222 */ 5223 if (hintdva) { 5224 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); 5225 5226 /* 5227 * It's possible the vdev we're using as the hint no 5228 * longer exists or its mg has been closed (e.g. by 5229 * device removal). Consult the rotor when 5230 * all else fails. 5231 */ 5232 if (vd != NULL && vd->vdev_mg != NULL) { 5233 mg = vdev_get_mg(vd, mc); 5234 5235 if (flags & METASLAB_HINTBP_AVOID) 5236 mg = mg->mg_next; 5237 } else { 5238 mg = mca->mca_rotor; 5239 } 5240 } else if (d != 0) { 5241 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); 5242 mg = vd->vdev_mg->mg_next; 5243 } else { 5244 ASSERT(mca->mca_rotor != NULL); 5245 mg = mca->mca_rotor; 5246 } 5247 5248 /* 5249 * If the hint put us into the wrong metaslab class, or into a 5250 * metaslab group that has been passivated, just follow the rotor. 5251 */ 5252 if (mg->mg_class != mc || mg->mg_activation_count <= 0) 5253 mg = mca->mca_rotor; 5254 5255 rotor = mg; 5256 top: 5257 do { 5258 boolean_t allocatable; 5259 5260 ASSERT(mg->mg_activation_count == 1); 5261 vd = mg->mg_vd; 5262 5263 /* 5264 * Don't allocate from faulted devices. 5265 */ 5266 if (try_hard) { 5267 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); 5268 allocatable = vdev_allocatable(vd); 5269 spa_config_exit(spa, SCL_ZIO, FTAG); 5270 } else { 5271 allocatable = vdev_allocatable(vd); 5272 } 5273 5274 /* 5275 * Determine if the selected metaslab group is eligible 5276 * for allocations. If we're ganging then don't allow 5277 * this metaslab group to skip allocations since that would 5278 * inadvertently return ENOSPC and suspend the pool 5279 * even though space is still available. 5280 */ 5281 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) { 5282 allocatable = metaslab_group_allocatable(mg, rotor, 5283 flags, psize, allocator, d); 5284 } 5285 5286 if (!allocatable) { 5287 metaslab_trace_add(zal, mg, NULL, psize, d, 5288 TRACE_NOT_ALLOCATABLE, allocator); 5289 goto next; 5290 } 5291 5292 /* 5293 * Avoid writing single-copy data to an unhealthy, 5294 * non-redundant vdev, unless we've already tried all 5295 * other vdevs. 5296 */ 5297 if (vd->vdev_state < VDEV_STATE_HEALTHY && 5298 d == 0 && !try_hard && vd->vdev_children == 0) { 5299 metaslab_trace_add(zal, mg, NULL, psize, d, 5300 TRACE_VDEV_ERROR, allocator); 5301 goto next; 5302 } 5303 5304 ASSERT(mg->mg_class == mc); 5305 5306 uint64_t asize = vdev_psize_to_asize_txg(vd, psize, txg); 5307 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); 5308 5309 /* 5310 * If we don't need to try hard, then require that the 5311 * block be on a different metaslab from any other DVAs 5312 * in this BP (unique=true). If we are trying hard, then 5313 * allow any metaslab to be used (unique=false). 5314 */ 5315 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg, 5316 !try_hard, dva, d, allocator, try_hard); 5317 5318 if (offset != -1ULL) { 5319 /* 5320 * If we've just selected this metaslab group, 5321 * figure out whether the corresponding vdev is 5322 * over- or under-used relative to the pool, 5323 * and set an allocation bias to even it out. 5324 * 5325 * Bias is also used to compensate for unequally 5326 * sized vdevs so that space is allocated fairly. 5327 */ 5328 if (mca->mca_aliquot == 0 && metaslab_bias_enabled) { 5329 vdev_stat_t *vs = &vd->vdev_stat; 5330 int64_t vs_free = vs->vs_space - vs->vs_alloc; 5331 int64_t mc_free = mc->mc_space - mc->mc_alloc; 5332 int64_t ratio; 5333 5334 /* 5335 * Calculate how much more or less we should 5336 * try to allocate from this device during 5337 * this iteration around the rotor. 5338 * 5339 * This basically introduces a zero-centered 5340 * bias towards the devices with the most 5341 * free space, while compensating for vdev 5342 * size differences. 5343 * 5344 * Examples: 5345 * vdev V1 = 16M/128M 5346 * vdev V2 = 16M/128M 5347 * ratio(V1) = 100% ratio(V2) = 100% 5348 * 5349 * vdev V1 = 16M/128M 5350 * vdev V2 = 64M/128M 5351 * ratio(V1) = 127% ratio(V2) = 72% 5352 * 5353 * vdev V1 = 16M/128M 5354 * vdev V2 = 64M/512M 5355 * ratio(V1) = 40% ratio(V2) = 160% 5356 */ 5357 ratio = (vs_free * mc->mc_alloc_groups * 100) / 5358 (mc_free + 1); 5359 mg->mg_bias = ((ratio - 100) * 5360 (int64_t)mg->mg_aliquot) / 100; 5361 } else if (!metaslab_bias_enabled) { 5362 mg->mg_bias = 0; 5363 } 5364 5365 if ((flags & METASLAB_ZIL) || 5366 atomic_add_64_nv(&mca->mca_aliquot, asize) >= 5367 mg->mg_aliquot + mg->mg_bias) { 5368 mca->mca_rotor = mg->mg_next; 5369 mca->mca_aliquot = 0; 5370 } 5371 5372 DVA_SET_VDEV(&dva[d], vd->vdev_id); 5373 DVA_SET_OFFSET(&dva[d], offset); 5374 DVA_SET_GANG(&dva[d], 5375 ((flags & METASLAB_GANG_HEADER) ? 1 : 0)); 5376 DVA_SET_ASIZE(&dva[d], asize); 5377 5378 return (0); 5379 } 5380 next: 5381 mca->mca_rotor = mg->mg_next; 5382 mca->mca_aliquot = 0; 5383 } while ((mg = mg->mg_next) != rotor); 5384 5385 /* 5386 * If we haven't tried hard, perhaps do so now. 5387 */ 5388 if (!try_hard && (zfs_metaslab_try_hard_before_gang || 5389 GANG_ALLOCATION(flags) || (flags & METASLAB_ZIL) != 0 || 5390 psize <= 1 << spa->spa_min_ashift)) { 5391 METASLABSTAT_BUMP(metaslabstat_try_hard); 5392 try_hard = B_TRUE; 5393 goto top; 5394 } 5395 5396 memset(&dva[d], 0, sizeof (dva_t)); 5397 5398 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator); 5399 return (SET_ERROR(ENOSPC)); 5400 } 5401 5402 void 5403 metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize, 5404 boolean_t checkpoint) 5405 { 5406 metaslab_t *msp; 5407 spa_t *spa = vd->vdev_spa; 5408 5409 ASSERT(vdev_is_concrete(vd)); 5410 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5411 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count); 5412 5413 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 5414 5415 VERIFY(!msp->ms_condensing); 5416 VERIFY3U(offset, >=, msp->ms_start); 5417 VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size); 5418 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 5419 VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift)); 5420 5421 metaslab_check_free_impl(vd, offset, asize); 5422 5423 mutex_enter(&msp->ms_lock); 5424 if (zfs_range_tree_is_empty(msp->ms_freeing) && 5425 zfs_range_tree_is_empty(msp->ms_checkpointing)) { 5426 vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa)); 5427 } 5428 5429 if (checkpoint) { 5430 ASSERT(spa_has_checkpoint(spa)); 5431 zfs_range_tree_add(msp->ms_checkpointing, offset, asize); 5432 } else { 5433 zfs_range_tree_add(msp->ms_freeing, offset, asize); 5434 } 5435 mutex_exit(&msp->ms_lock); 5436 } 5437 5438 void 5439 metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 5440 uint64_t size, void *arg) 5441 { 5442 (void) inner_offset; 5443 boolean_t *checkpoint = arg; 5444 5445 ASSERT3P(checkpoint, !=, NULL); 5446 5447 if (vd->vdev_ops->vdev_op_remap != NULL) 5448 vdev_indirect_mark_obsolete(vd, offset, size); 5449 else 5450 metaslab_free_impl(vd, offset, size, *checkpoint); 5451 } 5452 5453 static void 5454 metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size, 5455 boolean_t checkpoint) 5456 { 5457 spa_t *spa = vd->vdev_spa; 5458 5459 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5460 5461 if (spa_syncing_txg(spa) > spa_freeze_txg(spa)) 5462 return; 5463 5464 if (spa->spa_vdev_removal != NULL && 5465 spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id && 5466 vdev_is_concrete(vd)) { 5467 /* 5468 * Note: we check if the vdev is concrete because when 5469 * we complete the removal, we first change the vdev to be 5470 * an indirect vdev (in open context), and then (in syncing 5471 * context) clear spa_vdev_removal. 5472 */ 5473 free_from_removing_vdev(vd, offset, size); 5474 } else if (vd->vdev_ops->vdev_op_remap != NULL) { 5475 vdev_indirect_mark_obsolete(vd, offset, size); 5476 vd->vdev_ops->vdev_op_remap(vd, offset, size, 5477 metaslab_free_impl_cb, &checkpoint); 5478 } else { 5479 metaslab_free_concrete(vd, offset, size, checkpoint); 5480 } 5481 } 5482 5483 typedef struct remap_blkptr_cb_arg { 5484 blkptr_t *rbca_bp; 5485 spa_remap_cb_t rbca_cb; 5486 vdev_t *rbca_remap_vd; 5487 uint64_t rbca_remap_offset; 5488 void *rbca_cb_arg; 5489 } remap_blkptr_cb_arg_t; 5490 5491 static void 5492 remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 5493 uint64_t size, void *arg) 5494 { 5495 remap_blkptr_cb_arg_t *rbca = arg; 5496 blkptr_t *bp = rbca->rbca_bp; 5497 5498 /* We can not remap split blocks. */ 5499 if (size != DVA_GET_ASIZE(&bp->blk_dva[0])) 5500 return; 5501 ASSERT0(inner_offset); 5502 5503 if (rbca->rbca_cb != NULL) { 5504 /* 5505 * At this point we know that we are not handling split 5506 * blocks and we invoke the callback on the previous 5507 * vdev which must be indirect. 5508 */ 5509 ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops); 5510 5511 rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id, 5512 rbca->rbca_remap_offset, size, rbca->rbca_cb_arg); 5513 5514 /* set up remap_blkptr_cb_arg for the next call */ 5515 rbca->rbca_remap_vd = vd; 5516 rbca->rbca_remap_offset = offset; 5517 } 5518 5519 /* 5520 * The phys birth time is that of dva[0]. This ensures that we know 5521 * when each dva was written, so that resilver can determine which 5522 * blocks need to be scrubbed (i.e. those written during the time 5523 * the vdev was offline). It also ensures that the key used in 5524 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If 5525 * we didn't change the phys_birth, a lookup in the ARC for a 5526 * remapped BP could find the data that was previously stored at 5527 * this vdev + offset. 5528 */ 5529 vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa, 5530 DVA_GET_VDEV(&bp->blk_dva[0])); 5531 vdev_indirect_births_t *vib = oldvd->vdev_indirect_births; 5532 uint64_t physical_birth = vdev_indirect_births_physbirth(vib, 5533 DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0])); 5534 BP_SET_PHYSICAL_BIRTH(bp, physical_birth); 5535 5536 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id); 5537 DVA_SET_OFFSET(&bp->blk_dva[0], offset); 5538 } 5539 5540 /* 5541 * If the block pointer contains any indirect DVAs, modify them to refer to 5542 * concrete DVAs. Note that this will sometimes not be possible, leaving 5543 * the indirect DVA in place. This happens if the indirect DVA spans multiple 5544 * segments in the mapping (i.e. it is a "split block"). 5545 * 5546 * If the BP was remapped, calls the callback on the original dva (note the 5547 * callback can be called multiple times if the original indirect DVA refers 5548 * to another indirect DVA, etc). 5549 * 5550 * Returns TRUE if the BP was remapped. 5551 */ 5552 boolean_t 5553 spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg) 5554 { 5555 remap_blkptr_cb_arg_t rbca; 5556 5557 if (!zfs_remap_blkptr_enable) 5558 return (B_FALSE); 5559 5560 if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) 5561 return (B_FALSE); 5562 5563 /* 5564 * Dedup BP's can not be remapped, because ddt_phys_select() depends 5565 * on DVA[0] being the same in the BP as in the DDT (dedup table). 5566 */ 5567 if (BP_GET_DEDUP(bp)) 5568 return (B_FALSE); 5569 5570 /* 5571 * Gang blocks can not be remapped, because 5572 * zio_checksum_gang_verifier() depends on the DVA[0] that's in 5573 * the BP used to read the gang block header (GBH) being the same 5574 * as the DVA[0] that we allocated for the GBH. 5575 */ 5576 if (BP_IS_GANG(bp)) 5577 return (B_FALSE); 5578 5579 /* 5580 * Embedded BP's have no DVA to remap. 5581 */ 5582 if (BP_GET_NDVAS(bp) < 1) 5583 return (B_FALSE); 5584 5585 /* 5586 * Note: we only remap dva[0]. If we remapped other dvas, we 5587 * would no longer know what their phys birth txg is. 5588 */ 5589 dva_t *dva = &bp->blk_dva[0]; 5590 5591 uint64_t offset = DVA_GET_OFFSET(dva); 5592 uint64_t size = DVA_GET_ASIZE(dva); 5593 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 5594 5595 if (vd->vdev_ops->vdev_op_remap == NULL) 5596 return (B_FALSE); 5597 5598 rbca.rbca_bp = bp; 5599 rbca.rbca_cb = callback; 5600 rbca.rbca_remap_vd = vd; 5601 rbca.rbca_remap_offset = offset; 5602 rbca.rbca_cb_arg = arg; 5603 5604 /* 5605 * remap_blkptr_cb() will be called in order for each level of 5606 * indirection, until a concrete vdev is reached or a split block is 5607 * encountered. old_vd and old_offset are updated within the callback 5608 * as we go from the one indirect vdev to the next one (either concrete 5609 * or indirect again) in that order. 5610 */ 5611 vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca); 5612 5613 /* Check if the DVA wasn't remapped because it is a split block */ 5614 if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id) 5615 return (B_FALSE); 5616 5617 return (B_TRUE); 5618 } 5619 5620 /* 5621 * Undo the allocation of a DVA which happened in the given transaction group. 5622 */ 5623 void 5624 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 5625 { 5626 metaslab_t *msp; 5627 vdev_t *vd; 5628 uint64_t vdev = DVA_GET_VDEV(dva); 5629 uint64_t offset = DVA_GET_OFFSET(dva); 5630 uint64_t size = DVA_GET_ASIZE(dva); 5631 5632 ASSERT(DVA_IS_VALID(dva)); 5633 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5634 5635 if (txg > spa_freeze_txg(spa)) 5636 return; 5637 5638 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) || 5639 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { 5640 zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu", 5641 (u_longlong_t)vdev, (u_longlong_t)offset, 5642 (u_longlong_t)size); 5643 return; 5644 } 5645 5646 ASSERT(!vd->vdev_removing); 5647 ASSERT(vdev_is_concrete(vd)); 5648 ASSERT0(vd->vdev_indirect_config.vic_mapping_object); 5649 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); 5650 5651 if (DVA_GET_GANG(dva)) 5652 size = vdev_gang_header_asize(vd); 5653 5654 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 5655 5656 mutex_enter(&msp->ms_lock); 5657 zfs_range_tree_remove(msp->ms_allocating[txg & TXG_MASK], 5658 offset, size); 5659 msp->ms_allocating_total -= size; 5660 5661 VERIFY(!msp->ms_condensing); 5662 VERIFY3U(offset, >=, msp->ms_start); 5663 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size); 5664 VERIFY3U(zfs_range_tree_space(msp->ms_allocatable) + size, <=, 5665 msp->ms_size); 5666 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 5667 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 5668 zfs_range_tree_add(msp->ms_allocatable, offset, size); 5669 mutex_exit(&msp->ms_lock); 5670 } 5671 5672 /* 5673 * Free the block represented by the given DVA. 5674 */ 5675 void 5676 metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint) 5677 { 5678 uint64_t vdev = DVA_GET_VDEV(dva); 5679 uint64_t offset = DVA_GET_OFFSET(dva); 5680 uint64_t size = DVA_GET_ASIZE(dva); 5681 vdev_t *vd = vdev_lookup_top(spa, vdev); 5682 5683 ASSERT(DVA_IS_VALID(dva)); 5684 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 5685 5686 if (DVA_GET_GANG(dva)) { 5687 size = vdev_gang_header_asize(vd); 5688 } 5689 5690 metaslab_free_impl(vd, offset, size, checkpoint); 5691 } 5692 5693 /* 5694 * Reserve some allocation slots. The reservation system must be called 5695 * before we call into the allocator. If there aren't any available slots 5696 * then the I/O will be throttled until an I/O completes and its slots are 5697 * freed up. The function returns true if it was successful in placing 5698 * the reservation. 5699 */ 5700 boolean_t 5701 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator, 5702 zio_t *zio, int flags) 5703 { 5704 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; 5705 uint64_t max = mca->mca_alloc_max_slots; 5706 5707 ASSERT(mc->mc_alloc_throttle_enabled); 5708 if (GANG_ALLOCATION(flags) || (flags & METASLAB_MUST_RESERVE) || 5709 zfs_refcount_count(&mca->mca_alloc_slots) + slots <= max) { 5710 /* 5711 * The potential race between _count() and _add() is covered 5712 * by the allocator lock in most cases, or irrelevant due to 5713 * GANG_ALLOCATION() or METASLAB_MUST_RESERVE set in others. 5714 * But even if we assume some other non-existing scenario, the 5715 * worst that can happen is few more I/Os get to allocation 5716 * earlier, that is not a problem. 5717 * 5718 * We reserve the slots individually so that we can unreserve 5719 * them individually when an I/O completes. 5720 */ 5721 zfs_refcount_add_few(&mca->mca_alloc_slots, slots, zio); 5722 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING; 5723 return (B_TRUE); 5724 } 5725 return (B_FALSE); 5726 } 5727 5728 void 5729 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, 5730 int allocator, zio_t *zio) 5731 { 5732 metaslab_class_allocator_t *mca = &mc->mc_allocator[allocator]; 5733 5734 ASSERT(mc->mc_alloc_throttle_enabled); 5735 zfs_refcount_remove_few(&mca->mca_alloc_slots, slots, zio); 5736 } 5737 5738 static int 5739 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size, 5740 uint64_t txg) 5741 { 5742 metaslab_t *msp; 5743 spa_t *spa = vd->vdev_spa; 5744 int error = 0; 5745 5746 if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count) 5747 return (SET_ERROR(ENXIO)); 5748 5749 ASSERT3P(vd->vdev_ms, !=, NULL); 5750 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 5751 5752 mutex_enter(&msp->ms_lock); 5753 5754 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) { 5755 error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM); 5756 if (error == EBUSY) { 5757 ASSERT(msp->ms_loaded); 5758 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 5759 error = 0; 5760 } 5761 } 5762 5763 if (error == 0 && 5764 !zfs_range_tree_contains(msp->ms_allocatable, offset, size)) 5765 error = SET_ERROR(ENOENT); 5766 5767 if (error || txg == 0) { /* txg == 0 indicates dry run */ 5768 mutex_exit(&msp->ms_lock); 5769 return (error); 5770 } 5771 5772 VERIFY(!msp->ms_condensing); 5773 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 5774 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 5775 VERIFY3U(zfs_range_tree_space(msp->ms_allocatable) - size, <=, 5776 msp->ms_size); 5777 zfs_range_tree_remove(msp->ms_allocatable, offset, size); 5778 zfs_range_tree_clear(msp->ms_trim, offset, size); 5779 5780 if (spa_writeable(spa)) { /* don't dirty if we're zdb(8) */ 5781 metaslab_class_t *mc = msp->ms_group->mg_class; 5782 multilist_sublist_t *mls = 5783 multilist_sublist_lock_obj(&mc->mc_metaslab_txg_list, msp); 5784 if (!multilist_link_active(&msp->ms_class_txg_node)) { 5785 msp->ms_selected_txg = txg; 5786 multilist_sublist_insert_head(mls, msp); 5787 } 5788 multilist_sublist_unlock(mls); 5789 5790 if (zfs_range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) 5791 vdev_dirty(vd, VDD_METASLAB, msp, txg); 5792 zfs_range_tree_add(msp->ms_allocating[txg & TXG_MASK], 5793 offset, size); 5794 msp->ms_allocating_total += size; 5795 } 5796 5797 mutex_exit(&msp->ms_lock); 5798 5799 return (0); 5800 } 5801 5802 typedef struct metaslab_claim_cb_arg_t { 5803 uint64_t mcca_txg; 5804 int mcca_error; 5805 } metaslab_claim_cb_arg_t; 5806 5807 static void 5808 metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 5809 uint64_t size, void *arg) 5810 { 5811 (void) inner_offset; 5812 metaslab_claim_cb_arg_t *mcca_arg = arg; 5813 5814 if (mcca_arg->mcca_error == 0) { 5815 mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset, 5816 size, mcca_arg->mcca_txg); 5817 } 5818 } 5819 5820 int 5821 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg) 5822 { 5823 if (vd->vdev_ops->vdev_op_remap != NULL) { 5824 metaslab_claim_cb_arg_t arg; 5825 5826 /* 5827 * Only zdb(8) can claim on indirect vdevs. This is used 5828 * to detect leaks of mapped space (that are not accounted 5829 * for in the obsolete counts, spacemap, or bpobj). 5830 */ 5831 ASSERT(!spa_writeable(vd->vdev_spa)); 5832 arg.mcca_error = 0; 5833 arg.mcca_txg = txg; 5834 5835 vd->vdev_ops->vdev_op_remap(vd, offset, size, 5836 metaslab_claim_impl_cb, &arg); 5837 5838 if (arg.mcca_error == 0) { 5839 arg.mcca_error = metaslab_claim_concrete(vd, 5840 offset, size, txg); 5841 } 5842 return (arg.mcca_error); 5843 } else { 5844 return (metaslab_claim_concrete(vd, offset, size, txg)); 5845 } 5846 } 5847 5848 /* 5849 * Intent log support: upon opening the pool after a crash, notify the SPA 5850 * of blocks that the intent log has allocated for immediate write, but 5851 * which are still considered free by the SPA because the last transaction 5852 * group didn't commit yet. 5853 */ 5854 static int 5855 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 5856 { 5857 uint64_t vdev = DVA_GET_VDEV(dva); 5858 uint64_t offset = DVA_GET_OFFSET(dva); 5859 uint64_t size = DVA_GET_ASIZE(dva); 5860 vdev_t *vd; 5861 5862 if ((vd = vdev_lookup_top(spa, vdev)) == NULL) { 5863 return (SET_ERROR(ENXIO)); 5864 } 5865 5866 ASSERT(DVA_IS_VALID(dva)); 5867 5868 if (DVA_GET_GANG(dva)) 5869 size = vdev_gang_header_asize(vd); 5870 5871 return (metaslab_claim_impl(vd, offset, size, txg)); 5872 } 5873 5874 int 5875 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, 5876 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags, 5877 zio_alloc_list_t *zal, zio_t *zio, int allocator) 5878 { 5879 dva_t *dva = bp->blk_dva; 5880 dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL; 5881 int error = 0; 5882 5883 ASSERT0(BP_GET_LOGICAL_BIRTH(bp)); 5884 ASSERT0(BP_GET_PHYSICAL_BIRTH(bp)); 5885 5886 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 5887 5888 if (mc->mc_allocator[allocator].mca_rotor == NULL) { 5889 /* no vdevs in this class */ 5890 spa_config_exit(spa, SCL_ALLOC, FTAG); 5891 return (SET_ERROR(ENOSPC)); 5892 } 5893 5894 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); 5895 ASSERT(BP_GET_NDVAS(bp) == 0); 5896 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); 5897 ASSERT3P(zal, !=, NULL); 5898 5899 for (int d = 0; d < ndvas; d++) { 5900 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, 5901 txg, flags, zal, allocator); 5902 if (error != 0) { 5903 for (d--; d >= 0; d--) { 5904 metaslab_unalloc_dva(spa, &dva[d], txg); 5905 metaslab_group_alloc_decrement(spa, 5906 DVA_GET_VDEV(&dva[d]), zio, flags, 5907 allocator, B_FALSE); 5908 memset(&dva[d], 0, sizeof (dva_t)); 5909 } 5910 spa_config_exit(spa, SCL_ALLOC, FTAG); 5911 return (error); 5912 } else { 5913 /* 5914 * Update the metaslab group's queue depth 5915 * based on the newly allocated dva. 5916 */ 5917 metaslab_group_alloc_increment(spa, 5918 DVA_GET_VDEV(&dva[d]), zio, flags, allocator); 5919 } 5920 } 5921 ASSERT(error == 0); 5922 ASSERT(BP_GET_NDVAS(bp) == ndvas); 5923 5924 spa_config_exit(spa, SCL_ALLOC, FTAG); 5925 5926 BP_SET_BIRTH(bp, txg, 0); 5927 5928 return (0); 5929 } 5930 5931 void 5932 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) 5933 { 5934 const dva_t *dva = bp->blk_dva; 5935 int ndvas = BP_GET_NDVAS(bp); 5936 5937 ASSERT(!BP_IS_HOLE(bp)); 5938 ASSERT(!now || BP_GET_LOGICAL_BIRTH(bp) >= spa_syncing_txg(spa)); 5939 5940 /* 5941 * If we have a checkpoint for the pool we need to make sure that 5942 * the blocks that we free that are part of the checkpoint won't be 5943 * reused until the checkpoint is discarded or we revert to it. 5944 * 5945 * The checkpoint flag is passed down the metaslab_free code path 5946 * and is set whenever we want to add a block to the checkpoint's 5947 * accounting. That is, we "checkpoint" blocks that existed at the 5948 * time the checkpoint was created and are therefore referenced by 5949 * the checkpointed uberblock. 5950 * 5951 * Note that, we don't checkpoint any blocks if the current 5952 * syncing txg <= spa_checkpoint_txg. We want these frees to sync 5953 * normally as they will be referenced by the checkpointed uberblock. 5954 */ 5955 boolean_t checkpoint = B_FALSE; 5956 if (BP_GET_LOGICAL_BIRTH(bp) <= spa->spa_checkpoint_txg && 5957 spa_syncing_txg(spa) > spa->spa_checkpoint_txg) { 5958 /* 5959 * At this point, if the block is part of the checkpoint 5960 * there is no way it was created in the current txg. 5961 */ 5962 ASSERT(!now); 5963 ASSERT3U(spa_syncing_txg(spa), ==, txg); 5964 checkpoint = B_TRUE; 5965 } 5966 5967 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); 5968 5969 for (int d = 0; d < ndvas; d++) { 5970 if (now) { 5971 metaslab_unalloc_dva(spa, &dva[d], txg); 5972 } else { 5973 ASSERT3U(txg, ==, spa_syncing_txg(spa)); 5974 metaslab_free_dva(spa, &dva[d], checkpoint); 5975 } 5976 } 5977 5978 spa_config_exit(spa, SCL_FREE, FTAG); 5979 } 5980 5981 int 5982 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) 5983 { 5984 const dva_t *dva = bp->blk_dva; 5985 int ndvas = BP_GET_NDVAS(bp); 5986 int error = 0; 5987 5988 ASSERT(!BP_IS_HOLE(bp)); 5989 5990 if (txg != 0) { 5991 /* 5992 * First do a dry run to make sure all DVAs are claimable, 5993 * so we don't have to unwind from partial failures below. 5994 */ 5995 if ((error = metaslab_claim(spa, bp, 0)) != 0) 5996 return (error); 5997 } 5998 5999 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 6000 6001 for (int d = 0; d < ndvas; d++) { 6002 error = metaslab_claim_dva(spa, &dva[d], txg); 6003 if (error != 0) 6004 break; 6005 } 6006 6007 spa_config_exit(spa, SCL_ALLOC, FTAG); 6008 6009 ASSERT(error == 0 || txg == 0); 6010 6011 return (error); 6012 } 6013 6014 static void 6015 metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset, 6016 uint64_t size, void *arg) 6017 { 6018 (void) inner, (void) arg; 6019 6020 if (vd->vdev_ops == &vdev_indirect_ops) 6021 return; 6022 6023 metaslab_check_free_impl(vd, offset, size); 6024 } 6025 6026 static void 6027 metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size) 6028 { 6029 metaslab_t *msp; 6030 spa_t *spa __maybe_unused = vd->vdev_spa; 6031 6032 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) 6033 return; 6034 6035 if (vd->vdev_ops->vdev_op_remap != NULL) { 6036 vd->vdev_ops->vdev_op_remap(vd, offset, size, 6037 metaslab_check_free_impl_cb, NULL); 6038 return; 6039 } 6040 6041 ASSERT(vdev_is_concrete(vd)); 6042 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count); 6043 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 6044 6045 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 6046 6047 mutex_enter(&msp->ms_lock); 6048 if (msp->ms_loaded) { 6049 zfs_range_tree_verify_not_present(msp->ms_allocatable, 6050 offset, size); 6051 } 6052 6053 /* 6054 * Check all segments that currently exist in the freeing pipeline. 6055 * 6056 * It would intuitively make sense to also check the current allocating 6057 * tree since metaslab_unalloc_dva() exists for extents that are 6058 * allocated and freed in the same sync pass within the same txg. 6059 * Unfortunately there are places (e.g. the ZIL) where we allocate a 6060 * segment but then we free part of it within the same txg 6061 * [see zil_sync()]. Thus, we don't call zfs_range_tree_verify() in the 6062 * current allocating tree. 6063 */ 6064 zfs_range_tree_verify_not_present(msp->ms_freeing, offset, size); 6065 zfs_range_tree_verify_not_present(msp->ms_checkpointing, offset, size); 6066 zfs_range_tree_verify_not_present(msp->ms_freed, offset, size); 6067 for (int j = 0; j < TXG_DEFER_SIZE; j++) 6068 zfs_range_tree_verify_not_present(msp->ms_defer[j], offset, 6069 size); 6070 zfs_range_tree_verify_not_present(msp->ms_trim, offset, size); 6071 mutex_exit(&msp->ms_lock); 6072 } 6073 6074 void 6075 metaslab_check_free(spa_t *spa, const blkptr_t *bp) 6076 { 6077 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) 6078 return; 6079 6080 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 6081 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 6082 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 6083 vdev_t *vd = vdev_lookup_top(spa, vdev); 6084 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 6085 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]); 6086 6087 if (DVA_GET_GANG(&bp->blk_dva[i])) 6088 size = vdev_gang_header_asize(vd); 6089 6090 ASSERT3P(vd, !=, NULL); 6091 6092 metaslab_check_free_impl(vd, offset, size); 6093 } 6094 spa_config_exit(spa, SCL_VDEV, FTAG); 6095 } 6096 6097 static void 6098 metaslab_group_disable_wait(metaslab_group_t *mg) 6099 { 6100 ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock)); 6101 while (mg->mg_disabled_updating) { 6102 cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock); 6103 } 6104 } 6105 6106 static void 6107 metaslab_group_disabled_increment(metaslab_group_t *mg) 6108 { 6109 ASSERT(MUTEX_HELD(&mg->mg_ms_disabled_lock)); 6110 ASSERT(mg->mg_disabled_updating); 6111 6112 while (mg->mg_ms_disabled >= max_disabled_ms) { 6113 cv_wait(&mg->mg_ms_disabled_cv, &mg->mg_ms_disabled_lock); 6114 } 6115 mg->mg_ms_disabled++; 6116 ASSERT3U(mg->mg_ms_disabled, <=, max_disabled_ms); 6117 } 6118 6119 /* 6120 * Mark the metaslab as disabled to prevent any allocations on this metaslab. 6121 * We must also track how many metaslabs are currently disabled within a 6122 * metaslab group and limit them to prevent allocation failures from 6123 * occurring because all metaslabs are disabled. 6124 */ 6125 void 6126 metaslab_disable(metaslab_t *msp) 6127 { 6128 ASSERT(!MUTEX_HELD(&msp->ms_lock)); 6129 metaslab_group_t *mg = msp->ms_group; 6130 6131 mutex_enter(&mg->mg_ms_disabled_lock); 6132 6133 /* 6134 * To keep an accurate count of how many threads have disabled 6135 * a specific metaslab group, we only allow one thread to mark 6136 * the metaslab group at a time. This ensures that the value of 6137 * ms_disabled will be accurate when we decide to mark a metaslab 6138 * group as disabled. To do this we force all other threads 6139 * to wait till the metaslab's mg_disabled_updating flag is no 6140 * longer set. 6141 */ 6142 metaslab_group_disable_wait(mg); 6143 mg->mg_disabled_updating = B_TRUE; 6144 if (msp->ms_disabled == 0) { 6145 metaslab_group_disabled_increment(mg); 6146 } 6147 mutex_enter(&msp->ms_lock); 6148 msp->ms_disabled++; 6149 mutex_exit(&msp->ms_lock); 6150 6151 mg->mg_disabled_updating = B_FALSE; 6152 cv_broadcast(&mg->mg_ms_disabled_cv); 6153 mutex_exit(&mg->mg_ms_disabled_lock); 6154 } 6155 6156 void 6157 metaslab_enable(metaslab_t *msp, boolean_t sync, boolean_t unload) 6158 { 6159 metaslab_group_t *mg = msp->ms_group; 6160 spa_t *spa = mg->mg_vd->vdev_spa; 6161 6162 /* 6163 * Wait for the outstanding IO to be synced to prevent newly 6164 * allocated blocks from being overwritten. This used by 6165 * initialize and TRIM which are modifying unallocated space. 6166 */ 6167 if (sync) 6168 txg_wait_synced(spa_get_dsl(spa), 0); 6169 6170 mutex_enter(&mg->mg_ms_disabled_lock); 6171 mutex_enter(&msp->ms_lock); 6172 if (--msp->ms_disabled == 0) { 6173 mg->mg_ms_disabled--; 6174 cv_broadcast(&mg->mg_ms_disabled_cv); 6175 if (unload) 6176 metaslab_unload(msp); 6177 } 6178 mutex_exit(&msp->ms_lock); 6179 mutex_exit(&mg->mg_ms_disabled_lock); 6180 } 6181 6182 void 6183 metaslab_set_unflushed_dirty(metaslab_t *ms, boolean_t dirty) 6184 { 6185 ms->ms_unflushed_dirty = dirty; 6186 } 6187 6188 static void 6189 metaslab_update_ondisk_flush_data(metaslab_t *ms, dmu_tx_t *tx) 6190 { 6191 vdev_t *vd = ms->ms_group->mg_vd; 6192 spa_t *spa = vd->vdev_spa; 6193 objset_t *mos = spa_meta_objset(spa); 6194 6195 ASSERT(spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)); 6196 6197 metaslab_unflushed_phys_t entry = { 6198 .msp_unflushed_txg = metaslab_unflushed_txg(ms), 6199 }; 6200 uint64_t entry_size = sizeof (entry); 6201 uint64_t entry_offset = ms->ms_id * entry_size; 6202 6203 uint64_t object = 0; 6204 int err = zap_lookup(mos, vd->vdev_top_zap, 6205 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, 6206 &object); 6207 if (err == ENOENT) { 6208 object = dmu_object_alloc(mos, DMU_OTN_UINT64_METADATA, 6209 SPA_OLD_MAXBLOCKSIZE, DMU_OT_NONE, 0, tx); 6210 VERIFY0(zap_add(mos, vd->vdev_top_zap, 6211 VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS, sizeof (uint64_t), 1, 6212 &object, tx)); 6213 } else { 6214 VERIFY0(err); 6215 } 6216 6217 dmu_write(spa_meta_objset(spa), object, entry_offset, entry_size, 6218 &entry, tx); 6219 } 6220 6221 void 6222 metaslab_set_unflushed_txg(metaslab_t *ms, uint64_t txg, dmu_tx_t *tx) 6223 { 6224 ms->ms_unflushed_txg = txg; 6225 metaslab_update_ondisk_flush_data(ms, tx); 6226 } 6227 6228 boolean_t 6229 metaslab_unflushed_dirty(metaslab_t *ms) 6230 { 6231 return (ms->ms_unflushed_dirty); 6232 } 6233 6234 uint64_t 6235 metaslab_unflushed_txg(metaslab_t *ms) 6236 { 6237 return (ms->ms_unflushed_txg); 6238 } 6239 6240 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, aliquot, U64, ZMOD_RW, 6241 "Allocation granularity (a.k.a. stripe size)"); 6242 6243 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_load, INT, ZMOD_RW, 6244 "Load all metaslabs when pool is first opened"); 6245 6246 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, debug_unload, INT, ZMOD_RW, 6247 "Prevent metaslabs from being unloaded"); 6248 6249 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_enabled, INT, ZMOD_RW, 6250 "Preload potential metaslabs during reassessment"); 6251 6252 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, preload_limit, UINT, ZMOD_RW, 6253 "Max number of metaslabs per group to preload"); 6254 6255 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay, UINT, ZMOD_RW, 6256 "Delay in txgs after metaslab was last used before unloading"); 6257 6258 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, unload_delay_ms, UINT, ZMOD_RW, 6259 "Delay in milliseconds after metaslab was last used before unloading"); 6260 6261 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, noalloc_threshold, UINT, ZMOD_RW, 6262 "Percentage of metaslab group size that should be free to make it " 6263 "eligible for allocation"); 6264 6265 ZFS_MODULE_PARAM(zfs_mg, zfs_mg_, fragmentation_threshold, UINT, ZMOD_RW, 6266 "Percentage of metaslab group size that should be considered eligible " 6267 "for allocations unless all metaslab groups within the metaslab class " 6268 "have also crossed this threshold"); 6269 6270 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, fragmentation_factor_enabled, INT, 6271 ZMOD_RW, 6272 "Use the fragmentation metric to prefer less fragmented metaslabs"); 6273 6274 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, fragmentation_threshold, UINT, 6275 ZMOD_RW, "Fragmentation for metaslab to allow allocation"); 6276 6277 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, lba_weighting_enabled, INT, ZMOD_RW, 6278 "Prefer metaslabs with lower LBAs"); 6279 6280 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, bias_enabled, INT, ZMOD_RW, 6281 "Enable metaslab group biasing"); 6282 6283 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, segment_weight_enabled, INT, 6284 ZMOD_RW, "Enable segment-based metaslab selection"); 6285 6286 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, switch_threshold, INT, ZMOD_RW, 6287 "Segment-based metaslab selection maximum buckets before switching"); 6288 6289 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging, U64, ZMOD_RW, 6290 "Blocks larger than this size are sometimes forced to be gang blocks"); 6291 6292 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, force_ganging_pct, UINT, ZMOD_RW, 6293 "Percentage of large blocks that will be forced to be gang blocks"); 6294 6295 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_max_search, UINT, ZMOD_RW, 6296 "Max distance (bytes) to search forward before using size tree"); 6297 6298 ZFS_MODULE_PARAM(zfs_metaslab, metaslab_, df_use_largest_segment, INT, ZMOD_RW, 6299 "When looking in size tree, use largest segment instead of exact fit"); 6300 6301 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, max_size_cache_sec, U64, 6302 ZMOD_RW, "How long to trust the cached max chunk size of a metaslab"); 6303 6304 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, mem_limit, UINT, ZMOD_RW, 6305 "Percentage of memory that can be used to store metaslab range trees"); 6306 6307 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, try_hard_before_gang, INT, 6308 ZMOD_RW, "Try hard to allocate before ganging"); 6309 6310 ZFS_MODULE_PARAM(zfs_metaslab, zfs_metaslab_, find_max_tries, UINT, ZMOD_RW, 6311 "Normally only consider this many of the best metaslabs in each vdev"); 6312 6313 ZFS_MODULE_PARAM_CALL(zfs, zfs_, active_allocator, 6314 param_set_active_allocator, param_get_charp, ZMOD_RW, 6315 "SPA active allocator"); 6316