1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 * Copyright (c) 2017, Intel Corporation. 27 */ 28 29 #include <sys/zfs_context.h> 30 #include <sys/dmu.h> 31 #include <sys/dmu_tx.h> 32 #include <sys/space_map.h> 33 #include <sys/metaslab_impl.h> 34 #include <sys/vdev_impl.h> 35 #include <sys/zio.h> 36 #include <sys/spa_impl.h> 37 #include <sys/zfeature.h> 38 #include <sys/vdev_indirect_mapping.h> 39 #include <sys/zap.h> 40 41 #define GANG_ALLOCATION(flags) \ 42 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER)) 43 44 uint64_t metaslab_aliquot = 512ULL << 10; 45 uint64_t metaslab_force_ganging = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ 46 47 /* 48 * Since we can touch multiple metaslabs (and their respective space maps) 49 * with each transaction group, we benefit from having a smaller space map 50 * block size since it allows us to issue more I/O operations scattered 51 * around the disk. 52 */ 53 int zfs_metaslab_sm_blksz = (1 << 12); 54 55 /* 56 * The in-core space map representation is more compact than its on-disk form. 57 * The zfs_condense_pct determines how much more compact the in-core 58 * space map representation must be before we compact it on-disk. 59 * Values should be greater than or equal to 100. 60 */ 61 int zfs_condense_pct = 200; 62 63 /* 64 * Condensing a metaslab is not guaranteed to actually reduce the amount of 65 * space used on disk. In particular, a space map uses data in increments of 66 * MAX(1 << ashift, space_map_blksize), so a metaslab might use the 67 * same number of blocks after condensing. Since the goal of condensing is to 68 * reduce the number of IOPs required to read the space map, we only want to 69 * condense when we can be sure we will reduce the number of blocks used by the 70 * space map. Unfortunately, we cannot precisely compute whether or not this is 71 * the case in metaslab_should_condense since we are holding ms_lock. Instead, 72 * we apply the following heuristic: do not condense a spacemap unless the 73 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold 74 * blocks. 75 */ 76 int zfs_metaslab_condense_block_threshold = 4; 77 78 /* 79 * The zfs_mg_noalloc_threshold defines which metaslab groups should 80 * be eligible for allocation. The value is defined as a percentage of 81 * free space. Metaslab groups that have more free space than 82 * zfs_mg_noalloc_threshold are always eligible for allocations. Once 83 * a metaslab group's free space is less than or equal to the 84 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that 85 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold. 86 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all 87 * groups are allowed to accept allocations. Gang blocks are always 88 * eligible to allocate on any metaslab group. The default value of 0 means 89 * no metaslab group will be excluded based on this criterion. 90 */ 91 int zfs_mg_noalloc_threshold = 0; 92 93 /* 94 * Metaslab groups are considered eligible for allocations if their 95 * fragmenation metric (measured as a percentage) is less than or equal to 96 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold 97 * then it will be skipped unless all metaslab groups within the metaslab 98 * class have also crossed this threshold. 99 */ 100 int zfs_mg_fragmentation_threshold = 85; 101 102 /* 103 * Allow metaslabs to keep their active state as long as their fragmentation 104 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An 105 * active metaslab that exceeds this threshold will no longer keep its active 106 * status allowing better metaslabs to be selected. 107 */ 108 int zfs_metaslab_fragmentation_threshold = 70; 109 110 /* 111 * When set will load all metaslabs when pool is first opened. 112 */ 113 int metaslab_debug_load = 0; 114 115 /* 116 * When set will prevent metaslabs from being unloaded. 117 */ 118 int metaslab_debug_unload = 0; 119 120 /* 121 * Minimum size which forces the dynamic allocator to change 122 * it's allocation strategy. Once the space map cannot satisfy 123 * an allocation of this size then it switches to using more 124 * aggressive strategy (i.e search by size rather than offset). 125 */ 126 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE; 127 128 /* 129 * The minimum free space, in percent, which must be available 130 * in a space map to continue allocations in a first-fit fashion. 131 * Once the space map's free space drops below this level we dynamically 132 * switch to using best-fit allocations. 133 */ 134 int metaslab_df_free_pct = 4; 135 136 /* 137 * A metaslab is considered "free" if it contains a contiguous 138 * segment which is greater than metaslab_min_alloc_size. 139 */ 140 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS; 141 142 /* 143 * Percentage of all cpus that can be used by the metaslab taskq. 144 */ 145 int metaslab_load_pct = 50; 146 147 /* 148 * Determines how many txgs a metaslab may remain loaded without having any 149 * allocations from it. As long as a metaslab continues to be used we will 150 * keep it loaded. 151 */ 152 int metaslab_unload_delay = TXG_SIZE * 2; 153 154 /* 155 * Max number of metaslabs per group to preload. 156 */ 157 int metaslab_preload_limit = SPA_DVAS_PER_BP; 158 159 /* 160 * Enable/disable preloading of metaslab. 161 */ 162 boolean_t metaslab_preload_enabled = B_TRUE; 163 164 /* 165 * Enable/disable fragmentation weighting on metaslabs. 166 */ 167 boolean_t metaslab_fragmentation_factor_enabled = B_TRUE; 168 169 /* 170 * Enable/disable lba weighting (i.e. outer tracks are given preference). 171 */ 172 boolean_t metaslab_lba_weighting_enabled = B_TRUE; 173 174 /* 175 * Enable/disable metaslab group biasing. 176 */ 177 boolean_t metaslab_bias_enabled = B_TRUE; 178 179 /* 180 * Enable/disable remapping of indirect DVAs to their concrete vdevs. 181 */ 182 boolean_t zfs_remap_blkptr_enable = B_TRUE; 183 184 /* 185 * Enable/disable segment-based metaslab selection. 186 */ 187 boolean_t zfs_metaslab_segment_weight_enabled = B_TRUE; 188 189 /* 190 * When using segment-based metaslab selection, we will continue 191 * allocating from the active metaslab until we have exhausted 192 * zfs_metaslab_switch_threshold of its buckets. 193 */ 194 int zfs_metaslab_switch_threshold = 2; 195 196 /* 197 * Internal switch to enable/disable the metaslab allocation tracing 198 * facility. 199 */ 200 boolean_t metaslab_trace_enabled = B_TRUE; 201 202 /* 203 * Maximum entries that the metaslab allocation tracing facility will keep 204 * in a given list when running in non-debug mode. We limit the number 205 * of entries in non-debug mode to prevent us from using up too much memory. 206 * The limit should be sufficiently large that we don't expect any allocation 207 * to every exceed this value. In debug mode, the system will panic if this 208 * limit is ever reached allowing for further investigation. 209 */ 210 uint64_t metaslab_trace_max_entries = 5000; 211 212 static uint64_t metaslab_weight(metaslab_t *); 213 static void metaslab_set_fragmentation(metaslab_t *); 214 static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, boolean_t); 215 static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t); 216 static void metaslab_passivate(metaslab_t *msp, uint64_t weight); 217 static uint64_t metaslab_weight_from_range_tree(metaslab_t *msp); 218 219 kmem_cache_t *metaslab_alloc_trace_cache; 220 221 /* 222 * ========================================================================== 223 * Metaslab classes 224 * ========================================================================== 225 */ 226 metaslab_class_t * 227 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops) 228 { 229 metaslab_class_t *mc; 230 231 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP); 232 233 mc->mc_spa = spa; 234 mc->mc_rotor = NULL; 235 mc->mc_ops = ops; 236 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL); 237 mc->mc_alloc_slots = kmem_zalloc(spa->spa_alloc_count * 238 sizeof (zfs_refcount_t), KM_SLEEP); 239 mc->mc_alloc_max_slots = kmem_zalloc(spa->spa_alloc_count * 240 sizeof (uint64_t), KM_SLEEP); 241 for (int i = 0; i < spa->spa_alloc_count; i++) 242 zfs_refcount_create_tracked(&mc->mc_alloc_slots[i]); 243 244 return (mc); 245 } 246 247 void 248 metaslab_class_destroy(metaslab_class_t *mc) 249 { 250 ASSERT(mc->mc_rotor == NULL); 251 ASSERT(mc->mc_alloc == 0); 252 ASSERT(mc->mc_deferred == 0); 253 ASSERT(mc->mc_space == 0); 254 ASSERT(mc->mc_dspace == 0); 255 256 for (int i = 0; i < mc->mc_spa->spa_alloc_count; i++) 257 zfs_refcount_destroy(&mc->mc_alloc_slots[i]); 258 kmem_free(mc->mc_alloc_slots, mc->mc_spa->spa_alloc_count * 259 sizeof (zfs_refcount_t)); 260 kmem_free(mc->mc_alloc_max_slots, mc->mc_spa->spa_alloc_count * 261 sizeof (uint64_t)); 262 mutex_destroy(&mc->mc_lock); 263 kmem_free(mc, sizeof (metaslab_class_t)); 264 } 265 266 int 267 metaslab_class_validate(metaslab_class_t *mc) 268 { 269 metaslab_group_t *mg; 270 vdev_t *vd; 271 272 /* 273 * Must hold one of the spa_config locks. 274 */ 275 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || 276 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); 277 278 if ((mg = mc->mc_rotor) == NULL) 279 return (0); 280 281 do { 282 vd = mg->mg_vd; 283 ASSERT(vd->vdev_mg != NULL); 284 ASSERT3P(vd->vdev_top, ==, vd); 285 ASSERT3P(mg->mg_class, ==, mc); 286 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); 287 } while ((mg = mg->mg_next) != mc->mc_rotor); 288 289 return (0); 290 } 291 292 static void 293 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, 294 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta) 295 { 296 atomic_add_64(&mc->mc_alloc, alloc_delta); 297 atomic_add_64(&mc->mc_deferred, defer_delta); 298 atomic_add_64(&mc->mc_space, space_delta); 299 atomic_add_64(&mc->mc_dspace, dspace_delta); 300 } 301 302 uint64_t 303 metaslab_class_get_alloc(metaslab_class_t *mc) 304 { 305 return (mc->mc_alloc); 306 } 307 308 uint64_t 309 metaslab_class_get_deferred(metaslab_class_t *mc) 310 { 311 return (mc->mc_deferred); 312 } 313 314 uint64_t 315 metaslab_class_get_space(metaslab_class_t *mc) 316 { 317 return (mc->mc_space); 318 } 319 320 uint64_t 321 metaslab_class_get_dspace(metaslab_class_t *mc) 322 { 323 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space); 324 } 325 326 void 327 metaslab_class_histogram_verify(metaslab_class_t *mc) 328 { 329 spa_t *spa = mc->mc_spa; 330 vdev_t *rvd = spa->spa_root_vdev; 331 uint64_t *mc_hist; 332 int i; 333 334 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) 335 return; 336 337 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, 338 KM_SLEEP); 339 340 for (int c = 0; c < rvd->vdev_children; c++) { 341 vdev_t *tvd = rvd->vdev_child[c]; 342 metaslab_group_t *mg = tvd->vdev_mg; 343 344 /* 345 * Skip any holes, uninitialized top-levels, or 346 * vdevs that are not in this metalab class. 347 */ 348 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 349 mg->mg_class != mc) { 350 continue; 351 } 352 353 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) 354 mc_hist[i] += mg->mg_histogram[i]; 355 } 356 357 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) 358 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]); 359 360 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); 361 } 362 363 /* 364 * Calculate the metaslab class's fragmentation metric. The metric 365 * is weighted based on the space contribution of each metaslab group. 366 * The return value will be a number between 0 and 100 (inclusive), or 367 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the 368 * zfs_frag_table for more information about the metric. 369 */ 370 uint64_t 371 metaslab_class_fragmentation(metaslab_class_t *mc) 372 { 373 vdev_t *rvd = mc->mc_spa->spa_root_vdev; 374 uint64_t fragmentation = 0; 375 376 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); 377 378 for (int c = 0; c < rvd->vdev_children; c++) { 379 vdev_t *tvd = rvd->vdev_child[c]; 380 metaslab_group_t *mg = tvd->vdev_mg; 381 382 /* 383 * Skip any holes, uninitialized top-levels, 384 * or vdevs that are not in this metalab class. 385 */ 386 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 387 mg->mg_class != mc) { 388 continue; 389 } 390 391 /* 392 * If a metaslab group does not contain a fragmentation 393 * metric then just bail out. 394 */ 395 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { 396 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 397 return (ZFS_FRAG_INVALID); 398 } 399 400 /* 401 * Determine how much this metaslab_group is contributing 402 * to the overall pool fragmentation metric. 403 */ 404 fragmentation += mg->mg_fragmentation * 405 metaslab_group_get_space(mg); 406 } 407 fragmentation /= metaslab_class_get_space(mc); 408 409 ASSERT3U(fragmentation, <=, 100); 410 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 411 return (fragmentation); 412 } 413 414 /* 415 * Calculate the amount of expandable space that is available in 416 * this metaslab class. If a device is expanded then its expandable 417 * space will be the amount of allocatable space that is currently not 418 * part of this metaslab class. 419 */ 420 uint64_t 421 metaslab_class_expandable_space(metaslab_class_t *mc) 422 { 423 vdev_t *rvd = mc->mc_spa->spa_root_vdev; 424 uint64_t space = 0; 425 426 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); 427 for (int c = 0; c < rvd->vdev_children; c++) { 428 uint64_t tspace; 429 vdev_t *tvd = rvd->vdev_child[c]; 430 metaslab_group_t *mg = tvd->vdev_mg; 431 432 if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 || 433 mg->mg_class != mc) { 434 continue; 435 } 436 437 /* 438 * Calculate if we have enough space to add additional 439 * metaslabs. We report the expandable space in terms 440 * of the metaslab size since that's the unit of expansion. 441 * Adjust by efi system partition size. 442 */ 443 tspace = tvd->vdev_max_asize - tvd->vdev_asize; 444 if (tspace > mc->mc_spa->spa_bootsize) { 445 tspace -= mc->mc_spa->spa_bootsize; 446 } 447 space += P2ALIGN(tspace, 1ULL << tvd->vdev_ms_shift); 448 } 449 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 450 return (space); 451 } 452 453 static int 454 metaslab_compare(const void *x1, const void *x2) 455 { 456 const metaslab_t *m1 = (const metaslab_t *)x1; 457 const metaslab_t *m2 = (const metaslab_t *)x2; 458 459 int sort1 = 0; 460 int sort2 = 0; 461 if (m1->ms_allocator != -1 && m1->ms_primary) 462 sort1 = 1; 463 else if (m1->ms_allocator != -1 && !m1->ms_primary) 464 sort1 = 2; 465 if (m2->ms_allocator != -1 && m2->ms_primary) 466 sort2 = 1; 467 else if (m2->ms_allocator != -1 && !m2->ms_primary) 468 sort2 = 2; 469 470 /* 471 * Sort inactive metaslabs first, then primaries, then secondaries. When 472 * selecting a metaslab to allocate from, an allocator first tries its 473 * primary, then secondary active metaslab. If it doesn't have active 474 * metaslabs, or can't allocate from them, it searches for an inactive 475 * metaslab to activate. If it can't find a suitable one, it will steal 476 * a primary or secondary metaslab from another allocator. 477 */ 478 if (sort1 < sort2) 479 return (-1); 480 if (sort1 > sort2) 481 return (1); 482 483 int cmp = AVL_CMP(m2->ms_weight, m1->ms_weight); 484 if (likely(cmp)) 485 return (cmp); 486 487 IMPLY(AVL_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2); 488 489 return (AVL_CMP(m1->ms_start, m2->ms_start)); 490 } 491 492 uint64_t 493 metaslab_allocated_space(metaslab_t *msp) 494 { 495 return (msp->ms_allocated_space); 496 } 497 498 /* 499 * Verify that the space accounting on disk matches the in-core range_trees. 500 */ 501 static void 502 metaslab_verify_space(metaslab_t *msp, uint64_t txg) 503 { 504 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 505 uint64_t allocating = 0; 506 uint64_t sm_free_space, msp_free_space; 507 508 ASSERT(MUTEX_HELD(&msp->ms_lock)); 509 ASSERT(!msp->ms_condensing); 510 511 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 512 return; 513 514 /* 515 * We can only verify the metaslab space when we're called 516 * from syncing context with a loaded metaslab that has an 517 * allocated space map. Calling this in non-syncing context 518 * does not provide a consistent view of the metaslab since 519 * we're performing allocations in the future. 520 */ 521 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL || 522 !msp->ms_loaded) 523 return; 524 525 /* 526 * Even though the smp_alloc field can get negative (e.g. 527 * see vdev_checkpoint_sm), that should never be the case 528 * when it come's to a metaslab's space map. 529 */ 530 ASSERT3S(space_map_allocated(msp->ms_sm), >=, 0); 531 532 sm_free_space = msp->ms_size - metaslab_allocated_space(msp); 533 534 /* 535 * Account for future allocations since we would have 536 * already deducted that space from the ms_allocatable. 537 */ 538 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) { 539 allocating += 540 range_tree_space(msp->ms_allocating[(txg + t) & TXG_MASK]); 541 } 542 543 ASSERT3U(msp->ms_deferspace, ==, 544 range_tree_space(msp->ms_defer[0]) + 545 range_tree_space(msp->ms_defer[1])); 546 547 msp_free_space = range_tree_space(msp->ms_allocatable) + allocating + 548 msp->ms_deferspace + range_tree_space(msp->ms_freed); 549 550 VERIFY3U(sm_free_space, ==, msp_free_space); 551 } 552 553 /* 554 * ========================================================================== 555 * Metaslab groups 556 * ========================================================================== 557 */ 558 /* 559 * Update the allocatable flag and the metaslab group's capacity. 560 * The allocatable flag is set to true if the capacity is below 561 * the zfs_mg_noalloc_threshold or has a fragmentation value that is 562 * greater than zfs_mg_fragmentation_threshold. If a metaslab group 563 * transitions from allocatable to non-allocatable or vice versa then the 564 * metaslab group's class is updated to reflect the transition. 565 */ 566 static void 567 metaslab_group_alloc_update(metaslab_group_t *mg) 568 { 569 vdev_t *vd = mg->mg_vd; 570 metaslab_class_t *mc = mg->mg_class; 571 vdev_stat_t *vs = &vd->vdev_stat; 572 boolean_t was_allocatable; 573 boolean_t was_initialized; 574 575 ASSERT(vd == vd->vdev_top); 576 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==, 577 SCL_ALLOC); 578 579 mutex_enter(&mg->mg_lock); 580 was_allocatable = mg->mg_allocatable; 581 was_initialized = mg->mg_initialized; 582 583 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) / 584 (vs->vs_space + 1); 585 586 mutex_enter(&mc->mc_lock); 587 588 /* 589 * If the metaslab group was just added then it won't 590 * have any space until we finish syncing out this txg. 591 * At that point we will consider it initialized and available 592 * for allocations. We also don't consider non-activated 593 * metaslab groups (e.g. vdevs that are in the middle of being removed) 594 * to be initialized, because they can't be used for allocation. 595 */ 596 mg->mg_initialized = metaslab_group_initialized(mg); 597 if (!was_initialized && mg->mg_initialized) { 598 mc->mc_groups++; 599 } else if (was_initialized && !mg->mg_initialized) { 600 ASSERT3U(mc->mc_groups, >, 0); 601 mc->mc_groups--; 602 } 603 if (mg->mg_initialized) 604 mg->mg_no_free_space = B_FALSE; 605 606 /* 607 * A metaslab group is considered allocatable if it has plenty 608 * of free space or is not heavily fragmented. We only take 609 * fragmentation into account if the metaslab group has a valid 610 * fragmentation metric (i.e. a value between 0 and 100). 611 */ 612 mg->mg_allocatable = (mg->mg_activation_count > 0 && 613 mg->mg_free_capacity > zfs_mg_noalloc_threshold && 614 (mg->mg_fragmentation == ZFS_FRAG_INVALID || 615 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)); 616 617 /* 618 * The mc_alloc_groups maintains a count of the number of 619 * groups in this metaslab class that are still above the 620 * zfs_mg_noalloc_threshold. This is used by the allocating 621 * threads to determine if they should avoid allocations to 622 * a given group. The allocator will avoid allocations to a group 623 * if that group has reached or is below the zfs_mg_noalloc_threshold 624 * and there are still other groups that are above the threshold. 625 * When a group transitions from allocatable to non-allocatable or 626 * vice versa we update the metaslab class to reflect that change. 627 * When the mc_alloc_groups value drops to 0 that means that all 628 * groups have reached the zfs_mg_noalloc_threshold making all groups 629 * eligible for allocations. This effectively means that all devices 630 * are balanced again. 631 */ 632 if (was_allocatable && !mg->mg_allocatable) 633 mc->mc_alloc_groups--; 634 else if (!was_allocatable && mg->mg_allocatable) 635 mc->mc_alloc_groups++; 636 mutex_exit(&mc->mc_lock); 637 638 mutex_exit(&mg->mg_lock); 639 } 640 641 metaslab_group_t * 642 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators) 643 { 644 metaslab_group_t *mg; 645 646 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP); 647 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); 648 mutex_init(&mg->mg_ms_initialize_lock, NULL, MUTEX_DEFAULT, NULL); 649 cv_init(&mg->mg_ms_initialize_cv, NULL, CV_DEFAULT, NULL); 650 mg->mg_primaries = kmem_zalloc(allocators * sizeof (metaslab_t *), 651 KM_SLEEP); 652 mg->mg_secondaries = kmem_zalloc(allocators * sizeof (metaslab_t *), 653 KM_SLEEP); 654 avl_create(&mg->mg_metaslab_tree, metaslab_compare, 655 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node)); 656 mg->mg_vd = vd; 657 mg->mg_class = mc; 658 mg->mg_activation_count = 0; 659 mg->mg_initialized = B_FALSE; 660 mg->mg_no_free_space = B_TRUE; 661 mg->mg_allocators = allocators; 662 663 mg->mg_alloc_queue_depth = kmem_zalloc(allocators * 664 sizeof (zfs_refcount_t), KM_SLEEP); 665 mg->mg_cur_max_alloc_queue_depth = kmem_zalloc(allocators * 666 sizeof (uint64_t), KM_SLEEP); 667 for (int i = 0; i < allocators; i++) { 668 zfs_refcount_create_tracked(&mg->mg_alloc_queue_depth[i]); 669 mg->mg_cur_max_alloc_queue_depth[i] = 0; 670 } 671 672 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct, 673 minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT); 674 675 return (mg); 676 } 677 678 void 679 metaslab_group_destroy(metaslab_group_t *mg) 680 { 681 ASSERT(mg->mg_prev == NULL); 682 ASSERT(mg->mg_next == NULL); 683 /* 684 * We may have gone below zero with the activation count 685 * either because we never activated in the first place or 686 * because we're done, and possibly removing the vdev. 687 */ 688 ASSERT(mg->mg_activation_count <= 0); 689 690 taskq_destroy(mg->mg_taskq); 691 avl_destroy(&mg->mg_metaslab_tree); 692 kmem_free(mg->mg_primaries, mg->mg_allocators * sizeof (metaslab_t *)); 693 kmem_free(mg->mg_secondaries, mg->mg_allocators * 694 sizeof (metaslab_t *)); 695 mutex_destroy(&mg->mg_lock); 696 mutex_destroy(&mg->mg_ms_initialize_lock); 697 cv_destroy(&mg->mg_ms_initialize_cv); 698 699 for (int i = 0; i < mg->mg_allocators; i++) { 700 zfs_refcount_destroy(&mg->mg_alloc_queue_depth[i]); 701 mg->mg_cur_max_alloc_queue_depth[i] = 0; 702 } 703 kmem_free(mg->mg_alloc_queue_depth, mg->mg_allocators * 704 sizeof (zfs_refcount_t)); 705 kmem_free(mg->mg_cur_max_alloc_queue_depth, mg->mg_allocators * 706 sizeof (uint64_t)); 707 708 kmem_free(mg, sizeof (metaslab_group_t)); 709 } 710 711 void 712 metaslab_group_activate(metaslab_group_t *mg) 713 { 714 metaslab_class_t *mc = mg->mg_class; 715 metaslab_group_t *mgprev, *mgnext; 716 717 ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER), !=, 0); 718 719 ASSERT(mc->mc_rotor != mg); 720 ASSERT(mg->mg_prev == NULL); 721 ASSERT(mg->mg_next == NULL); 722 ASSERT(mg->mg_activation_count <= 0); 723 724 if (++mg->mg_activation_count <= 0) 725 return; 726 727 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children); 728 metaslab_group_alloc_update(mg); 729 730 if ((mgprev = mc->mc_rotor) == NULL) { 731 mg->mg_prev = mg; 732 mg->mg_next = mg; 733 } else { 734 mgnext = mgprev->mg_next; 735 mg->mg_prev = mgprev; 736 mg->mg_next = mgnext; 737 mgprev->mg_next = mg; 738 mgnext->mg_prev = mg; 739 } 740 mc->mc_rotor = mg; 741 } 742 743 /* 744 * Passivate a metaslab group and remove it from the allocation rotor. 745 * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating 746 * a metaslab group. This function will momentarily drop spa_config_locks 747 * that are lower than the SCL_ALLOC lock (see comment below). 748 */ 749 void 750 metaslab_group_passivate(metaslab_group_t *mg) 751 { 752 metaslab_class_t *mc = mg->mg_class; 753 spa_t *spa = mc->mc_spa; 754 metaslab_group_t *mgprev, *mgnext; 755 int locks = spa_config_held(spa, SCL_ALL, RW_WRITER); 756 757 ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==, 758 (SCL_ALLOC | SCL_ZIO)); 759 760 if (--mg->mg_activation_count != 0) { 761 ASSERT(mc->mc_rotor != mg); 762 ASSERT(mg->mg_prev == NULL); 763 ASSERT(mg->mg_next == NULL); 764 ASSERT(mg->mg_activation_count < 0); 765 return; 766 } 767 768 /* 769 * The spa_config_lock is an array of rwlocks, ordered as 770 * follows (from highest to lowest): 771 * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC > 772 * SCL_ZIO > SCL_FREE > SCL_VDEV 773 * (For more information about the spa_config_lock see spa_misc.c) 774 * The higher the lock, the broader its coverage. When we passivate 775 * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO 776 * config locks. However, the metaslab group's taskq might be trying 777 * to preload metaslabs so we must drop the SCL_ZIO lock and any 778 * lower locks to allow the I/O to complete. At a minimum, 779 * we continue to hold the SCL_ALLOC lock, which prevents any future 780 * allocations from taking place and any changes to the vdev tree. 781 */ 782 spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa); 783 taskq_wait(mg->mg_taskq); 784 spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER); 785 metaslab_group_alloc_update(mg); 786 for (int i = 0; i < mg->mg_allocators; i++) { 787 metaslab_t *msp = mg->mg_primaries[i]; 788 if (msp != NULL) { 789 mutex_enter(&msp->ms_lock); 790 metaslab_passivate(msp, 791 metaslab_weight_from_range_tree(msp)); 792 mutex_exit(&msp->ms_lock); 793 } 794 msp = mg->mg_secondaries[i]; 795 if (msp != NULL) { 796 mutex_enter(&msp->ms_lock); 797 metaslab_passivate(msp, 798 metaslab_weight_from_range_tree(msp)); 799 mutex_exit(&msp->ms_lock); 800 } 801 } 802 803 mgprev = mg->mg_prev; 804 mgnext = mg->mg_next; 805 806 if (mg == mgnext) { 807 mc->mc_rotor = NULL; 808 } else { 809 mc->mc_rotor = mgnext; 810 mgprev->mg_next = mgnext; 811 mgnext->mg_prev = mgprev; 812 } 813 814 mg->mg_prev = NULL; 815 mg->mg_next = NULL; 816 } 817 818 boolean_t 819 metaslab_group_initialized(metaslab_group_t *mg) 820 { 821 vdev_t *vd = mg->mg_vd; 822 vdev_stat_t *vs = &vd->vdev_stat; 823 824 return (vs->vs_space != 0 && mg->mg_activation_count > 0); 825 } 826 827 uint64_t 828 metaslab_group_get_space(metaslab_group_t *mg) 829 { 830 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count); 831 } 832 833 void 834 metaslab_group_histogram_verify(metaslab_group_t *mg) 835 { 836 uint64_t *mg_hist; 837 vdev_t *vd = mg->mg_vd; 838 uint64_t ashift = vd->vdev_ashift; 839 int i; 840 841 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) 842 return; 843 844 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, 845 KM_SLEEP); 846 847 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=, 848 SPACE_MAP_HISTOGRAM_SIZE + ashift); 849 850 for (int m = 0; m < vd->vdev_ms_count; m++) { 851 metaslab_t *msp = vd->vdev_ms[m]; 852 ASSERT(msp != NULL); 853 854 /* skip if not active or not a member */ 855 if (msp->ms_sm == NULL || msp->ms_group != mg) 856 continue; 857 858 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) 859 mg_hist[i + ashift] += 860 msp->ms_sm->sm_phys->smp_histogram[i]; 861 } 862 863 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++) 864 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]); 865 866 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); 867 } 868 869 static void 870 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp) 871 { 872 metaslab_class_t *mc = mg->mg_class; 873 uint64_t ashift = mg->mg_vd->vdev_ashift; 874 875 ASSERT(MUTEX_HELD(&msp->ms_lock)); 876 if (msp->ms_sm == NULL) 877 return; 878 879 mutex_enter(&mg->mg_lock); 880 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 881 mg->mg_histogram[i + ashift] += 882 msp->ms_sm->sm_phys->smp_histogram[i]; 883 mc->mc_histogram[i + ashift] += 884 msp->ms_sm->sm_phys->smp_histogram[i]; 885 } 886 mutex_exit(&mg->mg_lock); 887 } 888 889 void 890 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp) 891 { 892 metaslab_class_t *mc = mg->mg_class; 893 uint64_t ashift = mg->mg_vd->vdev_ashift; 894 895 ASSERT(MUTEX_HELD(&msp->ms_lock)); 896 if (msp->ms_sm == NULL) 897 return; 898 899 mutex_enter(&mg->mg_lock); 900 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 901 ASSERT3U(mg->mg_histogram[i + ashift], >=, 902 msp->ms_sm->sm_phys->smp_histogram[i]); 903 ASSERT3U(mc->mc_histogram[i + ashift], >=, 904 msp->ms_sm->sm_phys->smp_histogram[i]); 905 906 mg->mg_histogram[i + ashift] -= 907 msp->ms_sm->sm_phys->smp_histogram[i]; 908 mc->mc_histogram[i + ashift] -= 909 msp->ms_sm->sm_phys->smp_histogram[i]; 910 } 911 mutex_exit(&mg->mg_lock); 912 } 913 914 static void 915 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) 916 { 917 ASSERT(msp->ms_group == NULL); 918 mutex_enter(&mg->mg_lock); 919 msp->ms_group = mg; 920 msp->ms_weight = 0; 921 avl_add(&mg->mg_metaslab_tree, msp); 922 mutex_exit(&mg->mg_lock); 923 924 mutex_enter(&msp->ms_lock); 925 metaslab_group_histogram_add(mg, msp); 926 mutex_exit(&msp->ms_lock); 927 } 928 929 static void 930 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) 931 { 932 mutex_enter(&msp->ms_lock); 933 metaslab_group_histogram_remove(mg, msp); 934 mutex_exit(&msp->ms_lock); 935 936 mutex_enter(&mg->mg_lock); 937 ASSERT(msp->ms_group == mg); 938 avl_remove(&mg->mg_metaslab_tree, msp); 939 msp->ms_group = NULL; 940 mutex_exit(&mg->mg_lock); 941 } 942 943 static void 944 metaslab_group_sort_impl(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 945 { 946 ASSERT(MUTEX_HELD(&mg->mg_lock)); 947 ASSERT(msp->ms_group == mg); 948 avl_remove(&mg->mg_metaslab_tree, msp); 949 msp->ms_weight = weight; 950 avl_add(&mg->mg_metaslab_tree, msp); 951 952 } 953 954 static void 955 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 956 { 957 /* 958 * Although in principle the weight can be any value, in 959 * practice we do not use values in the range [1, 511]. 960 */ 961 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0); 962 ASSERT(MUTEX_HELD(&msp->ms_lock)); 963 964 mutex_enter(&mg->mg_lock); 965 metaslab_group_sort_impl(mg, msp, weight); 966 mutex_exit(&mg->mg_lock); 967 } 968 969 /* 970 * Calculate the fragmentation for a given metaslab group. We can use 971 * a simple average here since all metaslabs within the group must have 972 * the same size. The return value will be a value between 0 and 100 973 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this 974 * group have a fragmentation metric. 975 */ 976 uint64_t 977 metaslab_group_fragmentation(metaslab_group_t *mg) 978 { 979 vdev_t *vd = mg->mg_vd; 980 uint64_t fragmentation = 0; 981 uint64_t valid_ms = 0; 982 983 for (int m = 0; m < vd->vdev_ms_count; m++) { 984 metaslab_t *msp = vd->vdev_ms[m]; 985 986 if (msp->ms_fragmentation == ZFS_FRAG_INVALID) 987 continue; 988 if (msp->ms_group != mg) 989 continue; 990 991 valid_ms++; 992 fragmentation += msp->ms_fragmentation; 993 } 994 995 if (valid_ms <= mg->mg_vd->vdev_ms_count / 2) 996 return (ZFS_FRAG_INVALID); 997 998 fragmentation /= valid_ms; 999 ASSERT3U(fragmentation, <=, 100); 1000 return (fragmentation); 1001 } 1002 1003 /* 1004 * Determine if a given metaslab group should skip allocations. A metaslab 1005 * group should avoid allocations if its free capacity is less than the 1006 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than 1007 * zfs_mg_fragmentation_threshold and there is at least one metaslab group 1008 * that can still handle allocations. If the allocation throttle is enabled 1009 * then we skip allocations to devices that have reached their maximum 1010 * allocation queue depth unless the selected metaslab group is the only 1011 * eligible group remaining. 1012 */ 1013 static boolean_t 1014 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor, 1015 uint64_t psize, int allocator, int d) 1016 { 1017 spa_t *spa = mg->mg_vd->vdev_spa; 1018 metaslab_class_t *mc = mg->mg_class; 1019 1020 /* 1021 * We can only consider skipping this metaslab group if it's 1022 * in the normal metaslab class and there are other metaslab 1023 * groups to select from. Otherwise, we always consider it eligible 1024 * for allocations. 1025 */ 1026 if ((mc != spa_normal_class(spa) && 1027 mc != spa_special_class(spa) && 1028 mc != spa_dedup_class(spa)) || 1029 mc->mc_groups <= 1) 1030 return (B_TRUE); 1031 1032 /* 1033 * If the metaslab group's mg_allocatable flag is set (see comments 1034 * in metaslab_group_alloc_update() for more information) and 1035 * the allocation throttle is disabled then allow allocations to this 1036 * device. However, if the allocation throttle is enabled then 1037 * check if we have reached our allocation limit (mg_alloc_queue_depth) 1038 * to determine if we should allow allocations to this metaslab group. 1039 * If all metaslab groups are no longer considered allocatable 1040 * (mc_alloc_groups == 0) or we're trying to allocate the smallest 1041 * gang block size then we allow allocations on this metaslab group 1042 * regardless of the mg_allocatable or throttle settings. 1043 */ 1044 if (mg->mg_allocatable) { 1045 metaslab_group_t *mgp; 1046 int64_t qdepth; 1047 uint64_t qmax = mg->mg_cur_max_alloc_queue_depth[allocator]; 1048 1049 if (!mc->mc_alloc_throttle_enabled) 1050 return (B_TRUE); 1051 1052 /* 1053 * If this metaslab group does not have any free space, then 1054 * there is no point in looking further. 1055 */ 1056 if (mg->mg_no_free_space) 1057 return (B_FALSE); 1058 1059 /* 1060 * Relax allocation throttling for ditto blocks. Due to 1061 * random imbalances in allocation it tends to push copies 1062 * to one vdev, that looks a bit better at the moment. 1063 */ 1064 qmax = qmax * (4 + d) / 4; 1065 1066 qdepth = zfs_refcount_count( 1067 &mg->mg_alloc_queue_depth[allocator]); 1068 1069 /* 1070 * If this metaslab group is below its qmax or it's 1071 * the only allocatable metasable group, then attempt 1072 * to allocate from it. 1073 */ 1074 if (qdepth < qmax || mc->mc_alloc_groups == 1) 1075 return (B_TRUE); 1076 ASSERT3U(mc->mc_alloc_groups, >, 1); 1077 1078 /* 1079 * Since this metaslab group is at or over its qmax, we 1080 * need to determine if there are metaslab groups after this 1081 * one that might be able to handle this allocation. This is 1082 * racy since we can't hold the locks for all metaslab 1083 * groups at the same time when we make this check. 1084 */ 1085 for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) { 1086 qmax = mgp->mg_cur_max_alloc_queue_depth[allocator]; 1087 qmax = qmax * (4 + d) / 4; 1088 qdepth = zfs_refcount_count( 1089 &mgp->mg_alloc_queue_depth[allocator]); 1090 1091 /* 1092 * If there is another metaslab group that 1093 * might be able to handle the allocation, then 1094 * we return false so that we skip this group. 1095 */ 1096 if (qdepth < qmax && !mgp->mg_no_free_space) 1097 return (B_FALSE); 1098 } 1099 1100 /* 1101 * We didn't find another group to handle the allocation 1102 * so we can't skip this metaslab group even though 1103 * we are at or over our qmax. 1104 */ 1105 return (B_TRUE); 1106 1107 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) { 1108 return (B_TRUE); 1109 } 1110 return (B_FALSE); 1111 } 1112 1113 /* 1114 * ========================================================================== 1115 * Range tree callbacks 1116 * ========================================================================== 1117 */ 1118 1119 /* 1120 * Comparison function for the private size-ordered tree. Tree is sorted 1121 * by size, larger sizes at the end of the tree. 1122 */ 1123 static int 1124 metaslab_rangesize_compare(const void *x1, const void *x2) 1125 { 1126 const range_seg_t *r1 = x1; 1127 const range_seg_t *r2 = x2; 1128 uint64_t rs_size1 = r1->rs_end - r1->rs_start; 1129 uint64_t rs_size2 = r2->rs_end - r2->rs_start; 1130 1131 int cmp = AVL_CMP(rs_size1, rs_size2); 1132 if (likely(cmp)) 1133 return (cmp); 1134 1135 return (AVL_CMP(r1->rs_start, r2->rs_start)); 1136 } 1137 1138 /* 1139 * ========================================================================== 1140 * Common allocator routines 1141 * ========================================================================== 1142 */ 1143 1144 /* 1145 * Return the maximum contiguous segment within the metaslab. 1146 */ 1147 uint64_t 1148 metaslab_block_maxsize(metaslab_t *msp) 1149 { 1150 avl_tree_t *t = &msp->ms_allocatable_by_size; 1151 range_seg_t *rs; 1152 1153 if (t == NULL || (rs = avl_last(t)) == NULL) 1154 return (0ULL); 1155 1156 return (rs->rs_end - rs->rs_start); 1157 } 1158 1159 static range_seg_t * 1160 metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size) 1161 { 1162 range_seg_t *rs, rsearch; 1163 avl_index_t where; 1164 1165 rsearch.rs_start = start; 1166 rsearch.rs_end = start + size; 1167 1168 rs = avl_find(t, &rsearch, &where); 1169 if (rs == NULL) { 1170 rs = avl_nearest(t, where, AVL_AFTER); 1171 } 1172 1173 return (rs); 1174 } 1175 1176 /* 1177 * This is a helper function that can be used by the allocator to find 1178 * a suitable block to allocate. This will search the specified AVL 1179 * tree looking for a block that matches the specified criteria. 1180 */ 1181 static uint64_t 1182 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size, 1183 uint64_t align) 1184 { 1185 range_seg_t *rs = metaslab_block_find(t, *cursor, size); 1186 1187 while (rs != NULL) { 1188 uint64_t offset = P2ROUNDUP(rs->rs_start, align); 1189 1190 if (offset + size <= rs->rs_end) { 1191 *cursor = offset + size; 1192 return (offset); 1193 } 1194 rs = AVL_NEXT(t, rs); 1195 } 1196 1197 /* 1198 * If we know we've searched the whole map (*cursor == 0), give up. 1199 * Otherwise, reset the cursor to the beginning and try again. 1200 */ 1201 if (*cursor == 0) 1202 return (-1ULL); 1203 1204 *cursor = 0; 1205 return (metaslab_block_picker(t, cursor, size, align)); 1206 } 1207 1208 /* 1209 * ========================================================================== 1210 * The first-fit block allocator 1211 * ========================================================================== 1212 */ 1213 static uint64_t 1214 metaslab_ff_alloc(metaslab_t *msp, uint64_t size) 1215 { 1216 /* 1217 * Find the largest power of 2 block size that evenly divides the 1218 * requested size. This is used to try to allocate blocks with similar 1219 * alignment from the same area of the metaslab (i.e. same cursor 1220 * bucket) but it does not guarantee that other allocations sizes 1221 * may exist in the same region. 1222 */ 1223 uint64_t align = size & -size; 1224 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; 1225 avl_tree_t *t = &msp->ms_allocatable->rt_root; 1226 1227 return (metaslab_block_picker(t, cursor, size, align)); 1228 } 1229 1230 static metaslab_ops_t metaslab_ff_ops = { 1231 metaslab_ff_alloc 1232 }; 1233 1234 /* 1235 * ========================================================================== 1236 * Dynamic block allocator - 1237 * Uses the first fit allocation scheme until space get low and then 1238 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold 1239 * and metaslab_df_free_pct to determine when to switch the allocation scheme. 1240 * ========================================================================== 1241 */ 1242 static uint64_t 1243 metaslab_df_alloc(metaslab_t *msp, uint64_t size) 1244 { 1245 /* 1246 * Find the largest power of 2 block size that evenly divides the 1247 * requested size. This is used to try to allocate blocks with similar 1248 * alignment from the same area of the metaslab (i.e. same cursor 1249 * bucket) but it does not guarantee that other allocations sizes 1250 * may exist in the same region. 1251 */ 1252 uint64_t align = size & -size; 1253 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; 1254 range_tree_t *rt = msp->ms_allocatable; 1255 avl_tree_t *t = &rt->rt_root; 1256 uint64_t max_size = metaslab_block_maxsize(msp); 1257 int free_pct = range_tree_space(rt) * 100 / msp->ms_size; 1258 1259 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1260 ASSERT3U(avl_numnodes(t), ==, 1261 avl_numnodes(&msp->ms_allocatable_by_size)); 1262 1263 if (max_size < size) 1264 return (-1ULL); 1265 1266 /* 1267 * If we're running low on space switch to using the size 1268 * sorted AVL tree (best-fit). 1269 */ 1270 if (max_size < metaslab_df_alloc_threshold || 1271 free_pct < metaslab_df_free_pct) { 1272 t = &msp->ms_allocatable_by_size; 1273 *cursor = 0; 1274 } 1275 1276 return (metaslab_block_picker(t, cursor, size, 1ULL)); 1277 } 1278 1279 static metaslab_ops_t metaslab_df_ops = { 1280 metaslab_df_alloc 1281 }; 1282 1283 /* 1284 * ========================================================================== 1285 * Cursor fit block allocator - 1286 * Select the largest region in the metaslab, set the cursor to the beginning 1287 * of the range and the cursor_end to the end of the range. As allocations 1288 * are made advance the cursor. Continue allocating from the cursor until 1289 * the range is exhausted and then find a new range. 1290 * ========================================================================== 1291 */ 1292 static uint64_t 1293 metaslab_cf_alloc(metaslab_t *msp, uint64_t size) 1294 { 1295 range_tree_t *rt = msp->ms_allocatable; 1296 avl_tree_t *t = &msp->ms_allocatable_by_size; 1297 uint64_t *cursor = &msp->ms_lbas[0]; 1298 uint64_t *cursor_end = &msp->ms_lbas[1]; 1299 uint64_t offset = 0; 1300 1301 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1302 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root)); 1303 1304 ASSERT3U(*cursor_end, >=, *cursor); 1305 1306 if ((*cursor + size) > *cursor_end) { 1307 range_seg_t *rs; 1308 1309 rs = avl_last(&msp->ms_allocatable_by_size); 1310 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) 1311 return (-1ULL); 1312 1313 *cursor = rs->rs_start; 1314 *cursor_end = rs->rs_end; 1315 } 1316 1317 offset = *cursor; 1318 *cursor += size; 1319 1320 return (offset); 1321 } 1322 1323 static metaslab_ops_t metaslab_cf_ops = { 1324 metaslab_cf_alloc 1325 }; 1326 1327 /* 1328 * ========================================================================== 1329 * New dynamic fit allocator - 1330 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift 1331 * contiguous blocks. If no region is found then just use the largest segment 1332 * that remains. 1333 * ========================================================================== 1334 */ 1335 1336 /* 1337 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift) 1338 * to request from the allocator. 1339 */ 1340 uint64_t metaslab_ndf_clump_shift = 4; 1341 1342 static uint64_t 1343 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) 1344 { 1345 avl_tree_t *t = &msp->ms_allocatable->rt_root; 1346 avl_index_t where; 1347 range_seg_t *rs, rsearch; 1348 uint64_t hbit = highbit64(size); 1349 uint64_t *cursor = &msp->ms_lbas[hbit - 1]; 1350 uint64_t max_size = metaslab_block_maxsize(msp); 1351 1352 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1353 ASSERT3U(avl_numnodes(t), ==, 1354 avl_numnodes(&msp->ms_allocatable_by_size)); 1355 1356 if (max_size < size) 1357 return (-1ULL); 1358 1359 rsearch.rs_start = *cursor; 1360 rsearch.rs_end = *cursor + size; 1361 1362 rs = avl_find(t, &rsearch, &where); 1363 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) { 1364 t = &msp->ms_allocatable_by_size; 1365 1366 rsearch.rs_start = 0; 1367 rsearch.rs_end = MIN(max_size, 1368 1ULL << (hbit + metaslab_ndf_clump_shift)); 1369 rs = avl_find(t, &rsearch, &where); 1370 if (rs == NULL) 1371 rs = avl_nearest(t, where, AVL_AFTER); 1372 ASSERT(rs != NULL); 1373 } 1374 1375 if ((rs->rs_end - rs->rs_start) >= size) { 1376 *cursor = rs->rs_start + size; 1377 return (rs->rs_start); 1378 } 1379 return (-1ULL); 1380 } 1381 1382 static metaslab_ops_t metaslab_ndf_ops = { 1383 metaslab_ndf_alloc 1384 }; 1385 1386 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops; 1387 1388 /* 1389 * ========================================================================== 1390 * Metaslabs 1391 * ========================================================================== 1392 */ 1393 1394 static void 1395 metaslab_aux_histograms_clear(metaslab_t *msp) 1396 { 1397 /* 1398 * Auxiliary histograms are only cleared when resetting them, 1399 * which can only happen while the metaslab is loaded. 1400 */ 1401 ASSERT(msp->ms_loaded); 1402 1403 bzero(msp->ms_synchist, sizeof (msp->ms_synchist)); 1404 for (int t = 0; t < TXG_DEFER_SIZE; t++) 1405 bzero(msp->ms_deferhist[t], sizeof (msp->ms_deferhist[t])); 1406 } 1407 1408 static void 1409 metaslab_aux_histogram_add(uint64_t *histogram, uint64_t shift, 1410 range_tree_t *rt) 1411 { 1412 /* 1413 * This is modeled after space_map_histogram_add(), so refer to that 1414 * function for implementation details. We want this to work like 1415 * the space map histogram, and not the range tree histogram, as we 1416 * are essentially constructing a delta that will be later subtracted 1417 * from the space map histogram. 1418 */ 1419 int idx = 0; 1420 for (int i = shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 1421 ASSERT3U(i, >=, idx + shift); 1422 histogram[idx] += rt->rt_histogram[i] << (i - idx - shift); 1423 1424 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) { 1425 ASSERT3U(idx + shift, ==, i); 1426 idx++; 1427 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE); 1428 } 1429 } 1430 } 1431 1432 /* 1433 * Called at every sync pass that the metaslab gets synced. 1434 * 1435 * The reason is that we want our auxiliary histograms to be updated 1436 * wherever the metaslab's space map histogram is updated. This way 1437 * we stay consistent on which parts of the metaslab space map's 1438 * histogram are currently not available for allocations (e.g because 1439 * they are in the defer, freed, and freeing trees). 1440 */ 1441 static void 1442 metaslab_aux_histograms_update(metaslab_t *msp) 1443 { 1444 space_map_t *sm = msp->ms_sm; 1445 ASSERT(sm != NULL); 1446 1447 /* 1448 * This is similar to the metaslab's space map histogram updates 1449 * that take place in metaslab_sync(). The only difference is that 1450 * we only care about segments that haven't made it into the 1451 * ms_allocatable tree yet. 1452 */ 1453 if (msp->ms_loaded) { 1454 metaslab_aux_histograms_clear(msp); 1455 1456 metaslab_aux_histogram_add(msp->ms_synchist, 1457 sm->sm_shift, msp->ms_freed); 1458 1459 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 1460 metaslab_aux_histogram_add(msp->ms_deferhist[t], 1461 sm->sm_shift, msp->ms_defer[t]); 1462 } 1463 } 1464 1465 metaslab_aux_histogram_add(msp->ms_synchist, 1466 sm->sm_shift, msp->ms_freeing); 1467 } 1468 1469 /* 1470 * Called every time we are done syncing (writing to) the metaslab, 1471 * i.e. at the end of each sync pass. 1472 * [see the comment in metaslab_impl.h for ms_synchist, ms_deferhist] 1473 */ 1474 static void 1475 metaslab_aux_histograms_update_done(metaslab_t *msp, boolean_t defer_allowed) 1476 { 1477 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 1478 space_map_t *sm = msp->ms_sm; 1479 1480 if (sm == NULL) { 1481 /* 1482 * We came here from metaslab_init() when creating/opening a 1483 * pool, looking at a metaslab that hasn't had any allocations 1484 * yet. 1485 */ 1486 return; 1487 } 1488 1489 /* 1490 * This is similar to the actions that we take for the ms_freed 1491 * and ms_defer trees in metaslab_sync_done(). 1492 */ 1493 uint64_t hist_index = spa_syncing_txg(spa) % TXG_DEFER_SIZE; 1494 if (defer_allowed) { 1495 bcopy(msp->ms_synchist, msp->ms_deferhist[hist_index], 1496 sizeof (msp->ms_synchist)); 1497 } else { 1498 bzero(msp->ms_deferhist[hist_index], 1499 sizeof (msp->ms_deferhist[hist_index])); 1500 } 1501 bzero(msp->ms_synchist, sizeof (msp->ms_synchist)); 1502 } 1503 1504 /* 1505 * Ensure that the metaslab's weight and fragmentation are consistent 1506 * with the contents of the histogram (either the range tree's histogram 1507 * or the space map's depending whether the metaslab is loaded). 1508 */ 1509 static void 1510 metaslab_verify_weight_and_frag(metaslab_t *msp) 1511 { 1512 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1513 1514 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 1515 return; 1516 1517 /* see comment in metaslab_verify_unflushed_changes() */ 1518 if (msp->ms_group == NULL) 1519 return; 1520 1521 /* 1522 * Devices being removed always return a weight of 0 and leave 1523 * fragmentation and ms_max_size as is - there is nothing for 1524 * us to verify here. 1525 */ 1526 vdev_t *vd = msp->ms_group->mg_vd; 1527 if (vd->vdev_removing) 1528 return; 1529 1530 /* 1531 * If the metaslab is dirty it probably means that we've done 1532 * some allocations or frees that have changed our histograms 1533 * and thus the weight. 1534 */ 1535 for (int t = 0; t < TXG_SIZE; t++) { 1536 if (txg_list_member(&vd->vdev_ms_list, msp, t)) 1537 return; 1538 } 1539 1540 /* 1541 * This verification checks that our in-memory state is consistent 1542 * with what's on disk. If the pool is read-only then there aren't 1543 * any changes and we just have the initially-loaded state. 1544 */ 1545 if (!spa_writeable(msp->ms_group->mg_vd->vdev_spa)) 1546 return; 1547 1548 /* some extra verification for in-core tree if you can */ 1549 if (msp->ms_loaded) { 1550 range_tree_stat_verify(msp->ms_allocatable); 1551 VERIFY(space_map_histogram_verify(msp->ms_sm, 1552 msp->ms_allocatable)); 1553 } 1554 1555 uint64_t weight = msp->ms_weight; 1556 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 1557 boolean_t space_based = WEIGHT_IS_SPACEBASED(msp->ms_weight); 1558 uint64_t frag = msp->ms_fragmentation; 1559 uint64_t max_segsize = msp->ms_max_size; 1560 1561 msp->ms_weight = 0; 1562 msp->ms_fragmentation = 0; 1563 msp->ms_max_size = 0; 1564 1565 /* 1566 * This function is used for verification purposes. Regardless of 1567 * whether metaslab_weight() thinks this metaslab should be active or 1568 * not, we want to ensure that the actual weight (and therefore the 1569 * value of ms_weight) would be the same if it was to be recalculated 1570 * at this point. 1571 */ 1572 msp->ms_weight = metaslab_weight(msp) | was_active; 1573 1574 VERIFY3U(max_segsize, ==, msp->ms_max_size); 1575 1576 /* 1577 * If the weight type changed then there is no point in doing 1578 * verification. Revert fields to their original values. 1579 */ 1580 if ((space_based && !WEIGHT_IS_SPACEBASED(msp->ms_weight)) || 1581 (!space_based && WEIGHT_IS_SPACEBASED(msp->ms_weight))) { 1582 msp->ms_fragmentation = frag; 1583 msp->ms_weight = weight; 1584 return; 1585 } 1586 1587 VERIFY3U(msp->ms_fragmentation, ==, frag); 1588 VERIFY3U(msp->ms_weight, ==, weight); 1589 } 1590 1591 /* 1592 * Wait for any in-progress metaslab loads to complete. 1593 */ 1594 static void 1595 metaslab_load_wait(metaslab_t *msp) 1596 { 1597 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1598 1599 while (msp->ms_loading) { 1600 ASSERT(!msp->ms_loaded); 1601 cv_wait(&msp->ms_load_cv, &msp->ms_lock); 1602 } 1603 } 1604 1605 static int 1606 metaslab_load_impl(metaslab_t *msp) 1607 { 1608 int error = 0; 1609 1610 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1611 ASSERT(msp->ms_loading); 1612 ASSERT(!msp->ms_condensing); 1613 1614 /* 1615 * We temporarily drop the lock to unblock other operations while we 1616 * are reading the space map. Therefore, metaslab_sync() and 1617 * metaslab_sync_done() can run at the same time as we do. 1618 * 1619 * metaslab_sync() can append to the space map while we are loading. 1620 * Therefore we load only entries that existed when we started the 1621 * load. Additionally, metaslab_sync_done() has to wait for the load 1622 * to complete because there are potential races like metaslab_load() 1623 * loading parts of the space map that are currently being appended 1624 * by metaslab_sync(). If we didn't, the ms_allocatable would have 1625 * entries that metaslab_sync_done() would try to re-add later. 1626 * 1627 * That's why before dropping the lock we remember the synced length 1628 * of the metaslab and read up to that point of the space map, 1629 * ignoring entries appended by metaslab_sync() that happen after we 1630 * drop the lock. 1631 */ 1632 uint64_t length = msp->ms_synced_length; 1633 mutex_exit(&msp->ms_lock); 1634 1635 if (msp->ms_sm != NULL) { 1636 error = space_map_load_length(msp->ms_sm, msp->ms_allocatable, 1637 SM_FREE, length); 1638 } else { 1639 /* 1640 * The space map has not been allocated yet, so treat 1641 * all the space in the metaslab as free and add it to the 1642 * ms_allocatable tree. 1643 */ 1644 range_tree_add(msp->ms_allocatable, 1645 msp->ms_start, msp->ms_size); 1646 } 1647 1648 /* 1649 * We need to grab the ms_sync_lock to prevent metaslab_sync() from 1650 * changing the ms_sm and the metaslab's range trees while we are 1651 * about to use them and populate the ms_allocatable. The ms_lock 1652 * is insufficient for this because metaslab_sync() doesn't hold 1653 * the ms_lock while writing the ms_checkpointing tree to disk. 1654 */ 1655 mutex_enter(&msp->ms_sync_lock); 1656 mutex_enter(&msp->ms_lock); 1657 ASSERT(!msp->ms_condensing); 1658 1659 if (error != 0) { 1660 mutex_exit(&msp->ms_sync_lock); 1661 return (error); 1662 } 1663 1664 ASSERT3P(msp->ms_group, !=, NULL); 1665 msp->ms_loaded = B_TRUE; 1666 1667 /* 1668 * The ms_allocatable contains the segments that exist in the 1669 * ms_defer trees [see ms_synced_length]. Thus we need to remove 1670 * them from ms_allocatable as they will be added again in 1671 * metaslab_sync_done(). 1672 */ 1673 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 1674 range_tree_walk(msp->ms_defer[t], 1675 range_tree_remove, msp->ms_allocatable); 1676 } 1677 1678 /* 1679 * Call metaslab_recalculate_weight_and_sort() now that the 1680 * metaslab is loaded so we get the metaslab's real weight. 1681 * 1682 * Unless this metaslab was created with older software and 1683 * has not yet been converted to use segment-based weight, we 1684 * expect the new weight to be better or equal to the weight 1685 * that the metaslab had while it was not loaded. This is 1686 * because the old weight does not take into account the 1687 * consolidation of adjacent segments between TXGs. [see 1688 * comment for ms_synchist and ms_deferhist[] for more info] 1689 */ 1690 uint64_t weight = msp->ms_weight; 1691 metaslab_recalculate_weight_and_sort(msp); 1692 if (!WEIGHT_IS_SPACEBASED(weight)) 1693 ASSERT3U(weight, <=, msp->ms_weight); 1694 msp->ms_max_size = metaslab_block_maxsize(msp); 1695 1696 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 1697 metaslab_verify_space(msp, spa_syncing_txg(spa)); 1698 mutex_exit(&msp->ms_sync_lock); 1699 1700 return (0); 1701 } 1702 1703 int 1704 metaslab_load(metaslab_t *msp) 1705 { 1706 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1707 1708 /* 1709 * There may be another thread loading the same metaslab, if that's 1710 * the case just wait until the other thread is done and return. 1711 */ 1712 metaslab_load_wait(msp); 1713 if (msp->ms_loaded) 1714 return (0); 1715 VERIFY(!msp->ms_loading); 1716 ASSERT(!msp->ms_condensing); 1717 1718 msp->ms_loading = B_TRUE; 1719 int error = metaslab_load_impl(msp); 1720 msp->ms_loading = B_FALSE; 1721 cv_broadcast(&msp->ms_load_cv); 1722 1723 return (error); 1724 } 1725 1726 void 1727 metaslab_unload(metaslab_t *msp) 1728 { 1729 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1730 1731 metaslab_verify_weight_and_frag(msp); 1732 1733 range_tree_vacate(msp->ms_allocatable, NULL, NULL); 1734 msp->ms_loaded = B_FALSE; 1735 1736 msp->ms_weight &= ~METASLAB_ACTIVE_MASK; 1737 msp->ms_max_size = 0; 1738 1739 /* 1740 * We explicitly recalculate the metaslab's weight based on its space 1741 * map (as it is now not loaded). We want unload metaslabs to always 1742 * have their weights calculated from the space map histograms, while 1743 * loaded ones have it calculated from their in-core range tree 1744 * [see metaslab_load()]. This way, the weight reflects the information 1745 * available in-core, whether it is loaded or not 1746 * 1747 * If ms_group == NULL means that we came here from metaslab_fini(), 1748 * at which point it doesn't make sense for us to do the recalculation 1749 * and the sorting. 1750 */ 1751 if (msp->ms_group != NULL) 1752 metaslab_recalculate_weight_and_sort(msp); 1753 } 1754 1755 static void 1756 metaslab_space_update(vdev_t *vd, metaslab_class_t *mc, int64_t alloc_delta, 1757 int64_t defer_delta, int64_t space_delta) 1758 { 1759 vdev_space_update(vd, alloc_delta, defer_delta, space_delta); 1760 1761 ASSERT3P(vd->vdev_spa->spa_root_vdev, ==, vd->vdev_parent); 1762 ASSERT(vd->vdev_ms_count != 0); 1763 1764 metaslab_class_space_update(mc, alloc_delta, defer_delta, space_delta, 1765 vdev_deflated_space(vd, space_delta)); 1766 } 1767 1768 int 1769 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg, 1770 metaslab_t **msp) 1771 { 1772 vdev_t *vd = mg->mg_vd; 1773 spa_t *spa = vd->vdev_spa; 1774 objset_t *mos = spa->spa_meta_objset; 1775 metaslab_t *ms; 1776 int error; 1777 1778 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); 1779 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL); 1780 mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL); 1781 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL); 1782 1783 ms->ms_id = id; 1784 ms->ms_start = id << vd->vdev_ms_shift; 1785 ms->ms_size = 1ULL << vd->vdev_ms_shift; 1786 ms->ms_allocator = -1; 1787 ms->ms_new = B_TRUE; 1788 1789 /* 1790 * We only open space map objects that already exist. All others 1791 * will be opened when we finally allocate an object for it. 1792 * 1793 * Note: 1794 * When called from vdev_expand(), we can't call into the DMU as 1795 * we are holding the spa_config_lock as a writer and we would 1796 * deadlock [see relevant comment in vdev_metaslab_init()]. in 1797 * that case, the object parameter is zero though, so we won't 1798 * call into the DMU. 1799 */ 1800 if (object != 0) { 1801 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start, 1802 ms->ms_size, vd->vdev_ashift); 1803 1804 if (error != 0) { 1805 kmem_free(ms, sizeof (metaslab_t)); 1806 return (error); 1807 } 1808 1809 ASSERT(ms->ms_sm != NULL); 1810 ASSERT3S(space_map_allocated(ms->ms_sm), >=, 0); 1811 ms->ms_allocated_space = space_map_allocated(ms->ms_sm); 1812 } 1813 1814 /* 1815 * We create the ms_allocatable here, but we don't create the 1816 * other range trees until metaslab_sync_done(). This serves 1817 * two purposes: it allows metaslab_sync_done() to detect the 1818 * addition of new space; and for debugging, it ensures that 1819 * we'd data fault on any attempt to use this metaslab before 1820 * it's ready. 1821 */ 1822 ms->ms_allocatable = range_tree_create_impl(&rt_avl_ops, 1823 &ms->ms_allocatable_by_size, metaslab_rangesize_compare, 0); 1824 metaslab_group_add(mg, ms); 1825 1826 metaslab_set_fragmentation(ms); 1827 1828 /* 1829 * If we're opening an existing pool (txg == 0) or creating 1830 * a new one (txg == TXG_INITIAL), all space is available now. 1831 * If we're adding space to an existing pool, the new space 1832 * does not become available until after this txg has synced. 1833 * The metaslab's weight will also be initialized when we sync 1834 * out this txg. This ensures that we don't attempt to allocate 1835 * from it before we have initialized it completely. 1836 */ 1837 if (txg <= TXG_INITIAL) { 1838 metaslab_sync_done(ms, 0); 1839 metaslab_space_update(vd, mg->mg_class, 1840 metaslab_allocated_space(ms), 0, 0); 1841 } 1842 1843 /* 1844 * If metaslab_debug_load is set and we're initializing a metaslab 1845 * that has an allocated space map object then load the space map 1846 * so that we can verify frees. 1847 */ 1848 if (metaslab_debug_load && ms->ms_sm != NULL) { 1849 mutex_enter(&ms->ms_lock); 1850 VERIFY0(metaslab_load(ms)); 1851 mutex_exit(&ms->ms_lock); 1852 } 1853 1854 if (txg != 0) { 1855 vdev_dirty(vd, 0, NULL, txg); 1856 vdev_dirty(vd, VDD_METASLAB, ms, txg); 1857 } 1858 1859 *msp = ms; 1860 1861 return (0); 1862 } 1863 1864 void 1865 metaslab_fini(metaslab_t *msp) 1866 { 1867 metaslab_group_t *mg = msp->ms_group; 1868 vdev_t *vd = mg->mg_vd; 1869 1870 metaslab_group_remove(mg, msp); 1871 1872 mutex_enter(&msp->ms_lock); 1873 VERIFY(msp->ms_group == NULL); 1874 metaslab_space_update(vd, mg->mg_class, 1875 -metaslab_allocated_space(msp), 0, -msp->ms_size); 1876 1877 space_map_close(msp->ms_sm); 1878 1879 metaslab_unload(msp); 1880 1881 range_tree_destroy(msp->ms_allocatable); 1882 range_tree_destroy(msp->ms_freeing); 1883 range_tree_destroy(msp->ms_freed); 1884 1885 for (int t = 0; t < TXG_SIZE; t++) { 1886 range_tree_destroy(msp->ms_allocating[t]); 1887 } 1888 1889 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 1890 range_tree_destroy(msp->ms_defer[t]); 1891 } 1892 ASSERT0(msp->ms_deferspace); 1893 1894 range_tree_destroy(msp->ms_checkpointing); 1895 1896 for (int t = 0; t < TXG_SIZE; t++) 1897 ASSERT(!txg_list_member(&vd->vdev_ms_list, msp, t)); 1898 1899 mutex_exit(&msp->ms_lock); 1900 cv_destroy(&msp->ms_load_cv); 1901 mutex_destroy(&msp->ms_lock); 1902 mutex_destroy(&msp->ms_sync_lock); 1903 ASSERT3U(msp->ms_allocator, ==, -1); 1904 1905 kmem_free(msp, sizeof (metaslab_t)); 1906 } 1907 1908 #define FRAGMENTATION_TABLE_SIZE 17 1909 1910 /* 1911 * This table defines a segment size based fragmentation metric that will 1912 * allow each metaslab to derive its own fragmentation value. This is done 1913 * by calculating the space in each bucket of the spacemap histogram and 1914 * multiplying that by the fragmentation metric in this table. Doing 1915 * this for all buckets and dividing it by the total amount of free 1916 * space in this metaslab (i.e. the total free space in all buckets) gives 1917 * us the fragmentation metric. This means that a high fragmentation metric 1918 * equates to most of the free space being comprised of small segments. 1919 * Conversely, if the metric is low, then most of the free space is in 1920 * large segments. A 10% change in fragmentation equates to approximately 1921 * double the number of segments. 1922 * 1923 * This table defines 0% fragmented space using 16MB segments. Testing has 1924 * shown that segments that are greater than or equal to 16MB do not suffer 1925 * from drastic performance problems. Using this value, we derive the rest 1926 * of the table. Since the fragmentation value is never stored on disk, it 1927 * is possible to change these calculations in the future. 1928 */ 1929 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = { 1930 100, /* 512B */ 1931 100, /* 1K */ 1932 98, /* 2K */ 1933 95, /* 4K */ 1934 90, /* 8K */ 1935 80, /* 16K */ 1936 70, /* 32K */ 1937 60, /* 64K */ 1938 50, /* 128K */ 1939 40, /* 256K */ 1940 30, /* 512K */ 1941 20, /* 1M */ 1942 15, /* 2M */ 1943 10, /* 4M */ 1944 5, /* 8M */ 1945 0 /* 16M */ 1946 }; 1947 1948 /* 1949 * Calculate the metaslab's fragmentation metric and set ms_fragmentation. 1950 * Setting this value to ZFS_FRAG_INVALID means that the metaslab has not 1951 * been upgraded and does not support this metric. Otherwise, the return 1952 * value should be in the range [0, 100]. 1953 */ 1954 static void 1955 metaslab_set_fragmentation(metaslab_t *msp) 1956 { 1957 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 1958 uint64_t fragmentation = 0; 1959 uint64_t total = 0; 1960 boolean_t feature_enabled = spa_feature_is_enabled(spa, 1961 SPA_FEATURE_SPACEMAP_HISTOGRAM); 1962 1963 if (!feature_enabled) { 1964 msp->ms_fragmentation = ZFS_FRAG_INVALID; 1965 return; 1966 } 1967 1968 /* 1969 * A null space map means that the entire metaslab is free 1970 * and thus is not fragmented. 1971 */ 1972 if (msp->ms_sm == NULL) { 1973 msp->ms_fragmentation = 0; 1974 return; 1975 } 1976 1977 /* 1978 * If this metaslab's space map has not been upgraded, flag it 1979 * so that we upgrade next time we encounter it. 1980 */ 1981 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) { 1982 uint64_t txg = spa_syncing_txg(spa); 1983 vdev_t *vd = msp->ms_group->mg_vd; 1984 1985 /* 1986 * If we've reached the final dirty txg, then we must 1987 * be shutting down the pool. We don't want to dirty 1988 * any data past this point so skip setting the condense 1989 * flag. We can retry this action the next time the pool 1990 * is imported. 1991 */ 1992 if (spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) { 1993 msp->ms_condense_wanted = B_TRUE; 1994 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 1995 zfs_dbgmsg("txg %llu, requesting force condense: " 1996 "ms_id %llu, vdev_id %llu", txg, msp->ms_id, 1997 vd->vdev_id); 1998 } 1999 msp->ms_fragmentation = ZFS_FRAG_INVALID; 2000 return; 2001 } 2002 2003 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 2004 uint64_t space = 0; 2005 uint8_t shift = msp->ms_sm->sm_shift; 2006 2007 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i, 2008 FRAGMENTATION_TABLE_SIZE - 1); 2009 2010 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0) 2011 continue; 2012 2013 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift); 2014 total += space; 2015 2016 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE); 2017 fragmentation += space * zfs_frag_table[idx]; 2018 } 2019 2020 if (total > 0) 2021 fragmentation /= total; 2022 ASSERT3U(fragmentation, <=, 100); 2023 2024 msp->ms_fragmentation = fragmentation; 2025 } 2026 2027 /* 2028 * Compute a weight -- a selection preference value -- for the given metaslab. 2029 * This is based on the amount of free space, the level of fragmentation, 2030 * the LBA range, and whether the metaslab is loaded. 2031 */ 2032 static uint64_t 2033 metaslab_space_weight(metaslab_t *msp) 2034 { 2035 metaslab_group_t *mg = msp->ms_group; 2036 vdev_t *vd = mg->mg_vd; 2037 uint64_t weight, space; 2038 2039 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2040 ASSERT(!vd->vdev_removing); 2041 2042 /* 2043 * The baseline weight is the metaslab's free space. 2044 */ 2045 space = msp->ms_size - metaslab_allocated_space(msp); 2046 2047 if (metaslab_fragmentation_factor_enabled && 2048 msp->ms_fragmentation != ZFS_FRAG_INVALID) { 2049 /* 2050 * Use the fragmentation information to inversely scale 2051 * down the baseline weight. We need to ensure that we 2052 * don't exclude this metaslab completely when it's 100% 2053 * fragmented. To avoid this we reduce the fragmented value 2054 * by 1. 2055 */ 2056 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100; 2057 2058 /* 2059 * If space < SPA_MINBLOCKSIZE, then we will not allocate from 2060 * this metaslab again. The fragmentation metric may have 2061 * decreased the space to something smaller than 2062 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE 2063 * so that we can consume any remaining space. 2064 */ 2065 if (space > 0 && space < SPA_MINBLOCKSIZE) 2066 space = SPA_MINBLOCKSIZE; 2067 } 2068 weight = space; 2069 2070 /* 2071 * Modern disks have uniform bit density and constant angular velocity. 2072 * Therefore, the outer recording zones are faster (higher bandwidth) 2073 * than the inner zones by the ratio of outer to inner track diameter, 2074 * which is typically around 2:1. We account for this by assigning 2075 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). 2076 * In effect, this means that we'll select the metaslab with the most 2077 * free bandwidth rather than simply the one with the most free space. 2078 */ 2079 if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) { 2080 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count; 2081 ASSERT(weight >= space && weight <= 2 * space); 2082 } 2083 2084 /* 2085 * If this metaslab is one we're actively using, adjust its 2086 * weight to make it preferable to any inactive metaslab so 2087 * we'll polish it off. If the fragmentation on this metaslab 2088 * has exceed our threshold, then don't mark it active. 2089 */ 2090 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID && 2091 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) { 2092 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); 2093 } 2094 2095 WEIGHT_SET_SPACEBASED(weight); 2096 return (weight); 2097 } 2098 2099 /* 2100 * Return the weight of the specified metaslab, according to the segment-based 2101 * weighting algorithm. The metaslab must be loaded. This function can 2102 * be called within a sync pass since it relies only on the metaslab's 2103 * range tree which is always accurate when the metaslab is loaded. 2104 */ 2105 static uint64_t 2106 metaslab_weight_from_range_tree(metaslab_t *msp) 2107 { 2108 uint64_t weight = 0; 2109 uint32_t segments = 0; 2110 2111 ASSERT(msp->ms_loaded); 2112 2113 for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT; 2114 i--) { 2115 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift; 2116 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; 2117 2118 segments <<= 1; 2119 segments += msp->ms_allocatable->rt_histogram[i]; 2120 2121 /* 2122 * The range tree provides more precision than the space map 2123 * and must be downgraded so that all values fit within the 2124 * space map's histogram. This allows us to compare loaded 2125 * vs. unloaded metaslabs to determine which metaslab is 2126 * considered "best". 2127 */ 2128 if (i > max_idx) 2129 continue; 2130 2131 if (segments != 0) { 2132 WEIGHT_SET_COUNT(weight, segments); 2133 WEIGHT_SET_INDEX(weight, i); 2134 WEIGHT_SET_ACTIVE(weight, 0); 2135 break; 2136 } 2137 } 2138 return (weight); 2139 } 2140 2141 /* 2142 * Calculate the weight based on the on-disk histogram. This should only 2143 * be called after a sync pass has completely finished since the on-disk 2144 * information is updated in metaslab_sync(). 2145 */ 2146 static uint64_t 2147 metaslab_weight_from_spacemap(metaslab_t *msp) 2148 { 2149 space_map_t *sm = msp->ms_sm; 2150 ASSERT(!msp->ms_loaded); 2151 ASSERT(sm != NULL); 2152 ASSERT3U(space_map_object(sm), !=, 0); 2153 ASSERT3U(sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); 2154 2155 /* 2156 * Create a joint histogram from all the segments that have made 2157 * it to the metaslab's space map histogram, that are not yet 2158 * available for allocation because they are still in the freeing 2159 * pipeline (e.g. freeing, freed, and defer trees). Then subtract 2160 * these segments from the space map's histogram to get a more 2161 * accurate weight. 2162 */ 2163 uint64_t deferspace_histogram[SPACE_MAP_HISTOGRAM_SIZE] = {0}; 2164 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) 2165 deferspace_histogram[i] += msp->ms_synchist[i]; 2166 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2167 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 2168 deferspace_histogram[i] += msp->ms_deferhist[t][i]; 2169 } 2170 } 2171 2172 uint64_t weight = 0; 2173 for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) { 2174 ASSERT3U(sm->sm_phys->smp_histogram[i], >=, 2175 deferspace_histogram[i]); 2176 uint64_t count = 2177 sm->sm_phys->smp_histogram[i] - deferspace_histogram[i]; 2178 if (count != 0) { 2179 WEIGHT_SET_COUNT(weight, count); 2180 WEIGHT_SET_INDEX(weight, i + sm->sm_shift); 2181 WEIGHT_SET_ACTIVE(weight, 0); 2182 break; 2183 } 2184 } 2185 return (weight); 2186 } 2187 2188 /* 2189 * Compute a segment-based weight for the specified metaslab. The weight 2190 * is determined by highest bucket in the histogram. The information 2191 * for the highest bucket is encoded into the weight value. 2192 */ 2193 static uint64_t 2194 metaslab_segment_weight(metaslab_t *msp) 2195 { 2196 metaslab_group_t *mg = msp->ms_group; 2197 uint64_t weight = 0; 2198 uint8_t shift = mg->mg_vd->vdev_ashift; 2199 2200 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2201 2202 /* 2203 * The metaslab is completely free. 2204 */ 2205 if (metaslab_allocated_space(msp) == 0) { 2206 int idx = highbit64(msp->ms_size) - 1; 2207 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; 2208 2209 if (idx < max_idx) { 2210 WEIGHT_SET_COUNT(weight, 1ULL); 2211 WEIGHT_SET_INDEX(weight, idx); 2212 } else { 2213 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx)); 2214 WEIGHT_SET_INDEX(weight, max_idx); 2215 } 2216 WEIGHT_SET_ACTIVE(weight, 0); 2217 ASSERT(!WEIGHT_IS_SPACEBASED(weight)); 2218 2219 return (weight); 2220 } 2221 2222 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); 2223 2224 /* 2225 * If the metaslab is fully allocated then just make the weight 0. 2226 */ 2227 if (metaslab_allocated_space(msp) == msp->ms_size) 2228 return (0); 2229 /* 2230 * If the metaslab is already loaded, then use the range tree to 2231 * determine the weight. Otherwise, we rely on the space map information 2232 * to generate the weight. 2233 */ 2234 if (msp->ms_loaded) { 2235 weight = metaslab_weight_from_range_tree(msp); 2236 } else { 2237 weight = metaslab_weight_from_spacemap(msp); 2238 } 2239 2240 /* 2241 * If the metaslab was active the last time we calculated its weight 2242 * then keep it active. We want to consume the entire region that 2243 * is associated with this weight. 2244 */ 2245 if (msp->ms_activation_weight != 0 && weight != 0) 2246 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight)); 2247 return (weight); 2248 } 2249 2250 /* 2251 * Determine if we should attempt to allocate from this metaslab. If the 2252 * metaslab has a maximum size then we can quickly determine if the desired 2253 * allocation size can be satisfied. Otherwise, if we're using segment-based 2254 * weighting then we can determine the maximum allocation that this metaslab 2255 * can accommodate based on the index encoded in the weight. If we're using 2256 * space-based weights then rely on the entire weight (excluding the weight 2257 * type bit). 2258 */ 2259 boolean_t 2260 metaslab_should_allocate(metaslab_t *msp, uint64_t asize) 2261 { 2262 boolean_t should_allocate; 2263 2264 if (msp->ms_max_size != 0) 2265 return (msp->ms_max_size >= asize); 2266 2267 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) { 2268 /* 2269 * The metaslab segment weight indicates segments in the 2270 * range [2^i, 2^(i+1)), where i is the index in the weight. 2271 * Since the asize might be in the middle of the range, we 2272 * should attempt the allocation if asize < 2^(i+1). 2273 */ 2274 should_allocate = (asize < 2275 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1)); 2276 } else { 2277 should_allocate = (asize <= 2278 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE)); 2279 } 2280 return (should_allocate); 2281 } 2282 2283 static uint64_t 2284 metaslab_weight(metaslab_t *msp) 2285 { 2286 vdev_t *vd = msp->ms_group->mg_vd; 2287 spa_t *spa = vd->vdev_spa; 2288 uint64_t weight; 2289 2290 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2291 2292 /* 2293 * If this vdev is in the process of being removed, there is nothing 2294 * for us to do here. 2295 */ 2296 if (vd->vdev_removing) 2297 return (0); 2298 2299 metaslab_set_fragmentation(msp); 2300 2301 /* 2302 * Update the maximum size if the metaslab is loaded. This will 2303 * ensure that we get an accurate maximum size if newly freed space 2304 * has been added back into the free tree. 2305 */ 2306 if (msp->ms_loaded) 2307 msp->ms_max_size = metaslab_block_maxsize(msp); 2308 else 2309 ASSERT0(msp->ms_max_size); 2310 2311 /* 2312 * Segment-based weighting requires space map histogram support. 2313 */ 2314 if (zfs_metaslab_segment_weight_enabled && 2315 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) && 2316 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size == 2317 sizeof (space_map_phys_t))) { 2318 weight = metaslab_segment_weight(msp); 2319 } else { 2320 weight = metaslab_space_weight(msp); 2321 } 2322 return (weight); 2323 } 2324 2325 void 2326 metaslab_recalculate_weight_and_sort(metaslab_t *msp) 2327 { 2328 /* note: we preserve the mask (e.g. indication of primary, etc..) */ 2329 uint64_t was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 2330 metaslab_group_sort(msp->ms_group, msp, 2331 metaslab_weight(msp) | was_active); 2332 } 2333 2334 static int 2335 metaslab_activate_allocator(metaslab_group_t *mg, metaslab_t *msp, 2336 int allocator, uint64_t activation_weight) 2337 { 2338 /* 2339 * If we're activating for the claim code, we don't want to actually 2340 * set the metaslab up for a specific allocator. 2341 */ 2342 if (activation_weight == METASLAB_WEIGHT_CLAIM) 2343 return (0); 2344 metaslab_t **arr = (activation_weight == METASLAB_WEIGHT_PRIMARY ? 2345 mg->mg_primaries : mg->mg_secondaries); 2346 2347 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2348 mutex_enter(&mg->mg_lock); 2349 if (arr[allocator] != NULL) { 2350 mutex_exit(&mg->mg_lock); 2351 return (EEXIST); 2352 } 2353 2354 arr[allocator] = msp; 2355 ASSERT3S(msp->ms_allocator, ==, -1); 2356 msp->ms_allocator = allocator; 2357 msp->ms_primary = (activation_weight == METASLAB_WEIGHT_PRIMARY); 2358 mutex_exit(&mg->mg_lock); 2359 2360 return (0); 2361 } 2362 2363 static int 2364 metaslab_activate(metaslab_t *msp, int allocator, uint64_t activation_weight) 2365 { 2366 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2367 2368 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { 2369 int error = metaslab_load(msp); 2370 if (error != 0) { 2371 metaslab_group_sort(msp->ms_group, msp, 0); 2372 return (error); 2373 } 2374 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) != 0) { 2375 /* 2376 * The metaslab was activated for another allocator 2377 * while we were waiting, we should reselect. 2378 */ 2379 return (EBUSY); 2380 } 2381 if ((error = metaslab_activate_allocator(msp->ms_group, msp, 2382 allocator, activation_weight)) != 0) { 2383 return (error); 2384 } 2385 2386 msp->ms_activation_weight = msp->ms_weight; 2387 metaslab_group_sort(msp->ms_group, msp, 2388 msp->ms_weight | activation_weight); 2389 } 2390 ASSERT(msp->ms_loaded); 2391 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 2392 2393 return (0); 2394 } 2395 2396 static void 2397 metaslab_passivate_allocator(metaslab_group_t *mg, metaslab_t *msp, 2398 uint64_t weight) 2399 { 2400 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2401 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM) { 2402 metaslab_group_sort(mg, msp, weight); 2403 return; 2404 } 2405 2406 mutex_enter(&mg->mg_lock); 2407 ASSERT3P(msp->ms_group, ==, mg); 2408 if (msp->ms_primary) { 2409 ASSERT3U(0, <=, msp->ms_allocator); 2410 ASSERT3U(msp->ms_allocator, <, mg->mg_allocators); 2411 ASSERT3P(mg->mg_primaries[msp->ms_allocator], ==, msp); 2412 ASSERT(msp->ms_weight & METASLAB_WEIGHT_PRIMARY); 2413 mg->mg_primaries[msp->ms_allocator] = NULL; 2414 } else { 2415 ASSERT(msp->ms_weight & METASLAB_WEIGHT_SECONDARY); 2416 ASSERT3P(mg->mg_secondaries[msp->ms_allocator], ==, msp); 2417 mg->mg_secondaries[msp->ms_allocator] = NULL; 2418 } 2419 msp->ms_allocator = -1; 2420 metaslab_group_sort_impl(mg, msp, weight); 2421 mutex_exit(&mg->mg_lock); 2422 } 2423 2424 static void 2425 metaslab_passivate(metaslab_t *msp, uint64_t weight) 2426 { 2427 uint64_t size = weight & ~METASLAB_WEIGHT_TYPE; 2428 2429 /* 2430 * If size < SPA_MINBLOCKSIZE, then we will not allocate from 2431 * this metaslab again. In that case, it had better be empty, 2432 * or we would be leaving space on the table. 2433 */ 2434 ASSERT(size >= SPA_MINBLOCKSIZE || 2435 range_tree_is_empty(msp->ms_allocatable)); 2436 ASSERT0(weight & METASLAB_ACTIVE_MASK); 2437 2438 msp->ms_activation_weight = 0; 2439 metaslab_passivate_allocator(msp->ms_group, msp, weight); 2440 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0); 2441 } 2442 2443 /* 2444 * Segment-based metaslabs are activated once and remain active until 2445 * we either fail an allocation attempt (similar to space-based metaslabs) 2446 * or have exhausted the free space in zfs_metaslab_switch_threshold 2447 * buckets since the metaslab was activated. This function checks to see 2448 * if we've exhaused the zfs_metaslab_switch_threshold buckets in the 2449 * metaslab and passivates it proactively. This will allow us to select a 2450 * metaslabs with larger contiguous region if any remaining within this 2451 * metaslab group. If we're in sync pass > 1, then we continue using this 2452 * metaslab so that we don't dirty more block and cause more sync passes. 2453 */ 2454 void 2455 metaslab_segment_may_passivate(metaslab_t *msp) 2456 { 2457 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2458 2459 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1) 2460 return; 2461 2462 /* 2463 * Since we are in the middle of a sync pass, the most accurate 2464 * information that is accessible to us is the in-core range tree 2465 * histogram; calculate the new weight based on that information. 2466 */ 2467 uint64_t weight = metaslab_weight_from_range_tree(msp); 2468 int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight); 2469 int current_idx = WEIGHT_GET_INDEX(weight); 2470 2471 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold) 2472 metaslab_passivate(msp, weight); 2473 } 2474 2475 static void 2476 metaslab_preload(void *arg) 2477 { 2478 metaslab_t *msp = arg; 2479 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2480 2481 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock)); 2482 2483 mutex_enter(&msp->ms_lock); 2484 (void) metaslab_load(msp); 2485 msp->ms_selected_txg = spa_syncing_txg(spa); 2486 mutex_exit(&msp->ms_lock); 2487 } 2488 2489 static void 2490 metaslab_group_preload(metaslab_group_t *mg) 2491 { 2492 spa_t *spa = mg->mg_vd->vdev_spa; 2493 metaslab_t *msp; 2494 avl_tree_t *t = &mg->mg_metaslab_tree; 2495 int m = 0; 2496 2497 if (spa_shutting_down(spa) || !metaslab_preload_enabled) { 2498 taskq_wait(mg->mg_taskq); 2499 return; 2500 } 2501 2502 mutex_enter(&mg->mg_lock); 2503 2504 /* 2505 * Load the next potential metaslabs 2506 */ 2507 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) { 2508 ASSERT3P(msp->ms_group, ==, mg); 2509 2510 /* 2511 * We preload only the maximum number of metaslabs specified 2512 * by metaslab_preload_limit. If a metaslab is being forced 2513 * to condense then we preload it too. This will ensure 2514 * that force condensing happens in the next txg. 2515 */ 2516 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) { 2517 continue; 2518 } 2519 2520 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload, 2521 msp, TQ_SLEEP) != TASKQID_INVALID); 2522 } 2523 mutex_exit(&mg->mg_lock); 2524 } 2525 2526 /* 2527 * Determine if the space map's on-disk footprint is past our tolerance 2528 * for inefficiency. We would like to use the following criteria to make 2529 * our decision: 2530 * 2531 * 1. The size of the space map object should not dramatically increase as a 2532 * result of writing out the free space range tree. 2533 * 2534 * 2. The minimal on-disk space map representation is zfs_condense_pct/100 2535 * times the size than the free space range tree representation 2536 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1MB). 2537 * 2538 * 3. The on-disk size of the space map should actually decrease. 2539 * 2540 * Unfortunately, we cannot compute the on-disk size of the space map in this 2541 * context because we cannot accurately compute the effects of compression, etc. 2542 * Instead, we apply the heuristic described in the block comment for 2543 * zfs_metaslab_condense_block_threshold - we only condense if the space used 2544 * is greater than a threshold number of blocks. 2545 */ 2546 static boolean_t 2547 metaslab_should_condense(metaslab_t *msp) 2548 { 2549 space_map_t *sm = msp->ms_sm; 2550 vdev_t *vd = msp->ms_group->mg_vd; 2551 uint64_t vdev_blocksize = 1 << vd->vdev_ashift; 2552 uint64_t current_txg = spa_syncing_txg(vd->vdev_spa); 2553 2554 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2555 ASSERT(msp->ms_loaded); 2556 2557 /* 2558 * Allocations and frees in early passes are generally more space 2559 * efficient (in terms of blocks described in space map entries) 2560 * than the ones in later passes (e.g. we don't compress after 2561 * sync pass 5) and condensing a metaslab multiple times in a txg 2562 * could degrade performance. 2563 * 2564 * Thus we prefer condensing each metaslab at most once every txg at 2565 * the earliest sync pass possible. If a metaslab is eligible for 2566 * condensing again after being considered for condensing within the 2567 * same txg, it will hopefully be dirty in the next txg where it will 2568 * be condensed at an earlier pass. 2569 */ 2570 if (msp->ms_condense_checked_txg == current_txg) 2571 return (B_FALSE); 2572 msp->ms_condense_checked_txg = current_txg; 2573 2574 /* 2575 * We always condense metaslabs that are empty and metaslabs for 2576 * which a condense request has been made. 2577 */ 2578 if (avl_is_empty(&msp->ms_allocatable_by_size) || 2579 msp->ms_condense_wanted) 2580 return (B_TRUE); 2581 2582 uint64_t object_size = space_map_length(msp->ms_sm); 2583 uint64_t optimal_size = space_map_estimate_optimal_size(sm, 2584 msp->ms_allocatable, SM_NO_VDEVID); 2585 2586 dmu_object_info_t doi; 2587 dmu_object_info_from_db(sm->sm_dbuf, &doi); 2588 uint64_t record_size = MAX(doi.doi_data_block_size, vdev_blocksize); 2589 2590 return (object_size >= (optimal_size * zfs_condense_pct / 100) && 2591 object_size > zfs_metaslab_condense_block_threshold * record_size); 2592 } 2593 2594 /* 2595 * Condense the on-disk space map representation to its minimized form. 2596 * The minimized form consists of a small number of allocations followed by 2597 * the entries of the free range tree. 2598 */ 2599 static void 2600 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx) 2601 { 2602 range_tree_t *condense_tree; 2603 space_map_t *sm = msp->ms_sm; 2604 2605 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2606 ASSERT(msp->ms_loaded); 2607 2608 zfs_dbgmsg("condensing: txg %llu, msp[%llu] %p, vdev id %llu, " 2609 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg, 2610 msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id, 2611 msp->ms_group->mg_vd->vdev_spa->spa_name, 2612 space_map_length(msp->ms_sm), 2613 avl_numnodes(&msp->ms_allocatable->rt_root), 2614 msp->ms_condense_wanted ? "TRUE" : "FALSE"); 2615 2616 msp->ms_condense_wanted = B_FALSE; 2617 2618 /* 2619 * Create an range tree that is 100% allocated. We remove segments 2620 * that have been freed in this txg, any deferred frees that exist, 2621 * and any allocation in the future. Removing segments should be 2622 * a relatively inexpensive operation since we expect these trees to 2623 * have a small number of nodes. 2624 */ 2625 condense_tree = range_tree_create(NULL, NULL); 2626 range_tree_add(condense_tree, msp->ms_start, msp->ms_size); 2627 2628 range_tree_walk(msp->ms_freeing, range_tree_remove, condense_tree); 2629 range_tree_walk(msp->ms_freed, range_tree_remove, condense_tree); 2630 2631 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2632 range_tree_walk(msp->ms_defer[t], 2633 range_tree_remove, condense_tree); 2634 } 2635 2636 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { 2637 range_tree_walk(msp->ms_allocating[(txg + t) & TXG_MASK], 2638 range_tree_remove, condense_tree); 2639 } 2640 2641 /* 2642 * We're about to drop the metaslab's lock thus allowing 2643 * other consumers to change it's content. Set the 2644 * metaslab's ms_condensing flag to ensure that 2645 * allocations on this metaslab do not occur while we're 2646 * in the middle of committing it to disk. This is only critical 2647 * for ms_allocatable as all other range trees use per txg 2648 * views of their content. 2649 */ 2650 msp->ms_condensing = B_TRUE; 2651 2652 mutex_exit(&msp->ms_lock); 2653 space_map_truncate(sm, zfs_metaslab_sm_blksz, tx); 2654 2655 /* 2656 * While we would ideally like to create a space map representation 2657 * that consists only of allocation records, doing so can be 2658 * prohibitively expensive because the in-core free tree can be 2659 * large, and therefore computationally expensive to subtract 2660 * from the condense_tree. Instead we sync out two trees, a cheap 2661 * allocation only tree followed by the in-core free tree. While not 2662 * optimal, this is typically close to optimal, and much cheaper to 2663 * compute. 2664 */ 2665 space_map_write(sm, condense_tree, SM_ALLOC, SM_NO_VDEVID, tx); 2666 range_tree_vacate(condense_tree, NULL, NULL); 2667 range_tree_destroy(condense_tree); 2668 2669 space_map_write(sm, msp->ms_allocatable, SM_FREE, SM_NO_VDEVID, tx); 2670 mutex_enter(&msp->ms_lock); 2671 msp->ms_condensing = B_FALSE; 2672 } 2673 2674 /* 2675 * Write a metaslab to disk in the context of the specified transaction group. 2676 */ 2677 void 2678 metaslab_sync(metaslab_t *msp, uint64_t txg) 2679 { 2680 metaslab_group_t *mg = msp->ms_group; 2681 vdev_t *vd = mg->mg_vd; 2682 spa_t *spa = vd->vdev_spa; 2683 objset_t *mos = spa_meta_objset(spa); 2684 range_tree_t *alloctree = msp->ms_allocating[txg & TXG_MASK]; 2685 dmu_tx_t *tx; 2686 uint64_t object = space_map_object(msp->ms_sm); 2687 2688 ASSERT(!vd->vdev_ishole); 2689 2690 /* 2691 * This metaslab has just been added so there's no work to do now. 2692 */ 2693 if (msp->ms_freeing == NULL) { 2694 ASSERT3P(alloctree, ==, NULL); 2695 return; 2696 } 2697 2698 ASSERT3P(alloctree, !=, NULL); 2699 ASSERT3P(msp->ms_freeing, !=, NULL); 2700 ASSERT3P(msp->ms_freed, !=, NULL); 2701 ASSERT3P(msp->ms_checkpointing, !=, NULL); 2702 2703 /* 2704 * Normally, we don't want to process a metaslab if there are no 2705 * allocations or frees to perform. However, if the metaslab is being 2706 * forced to condense and it's loaded, we need to let it through. 2707 */ 2708 if (range_tree_is_empty(alloctree) && 2709 range_tree_is_empty(msp->ms_freeing) && 2710 range_tree_is_empty(msp->ms_checkpointing) && 2711 !(msp->ms_loaded && msp->ms_condense_wanted)) 2712 return; 2713 2714 2715 VERIFY(txg <= spa_final_dirty_txg(spa)); 2716 2717 /* 2718 * The only state that can actually be changing concurrently 2719 * with metaslab_sync() is the metaslab's ms_allocatable. No 2720 * other thread can be modifying this txg's alloc, freeing, 2721 * freed, or space_map_phys_t. We drop ms_lock whenever we 2722 * could call into the DMU, because the DMU can call down to 2723 * us (e.g. via zio_free()) at any time. 2724 * 2725 * The spa_vdev_remove_thread() can be reading metaslab state 2726 * concurrently, and it is locked out by the ms_sync_lock. 2727 * Note that the ms_lock is insufficient for this, because it 2728 * is dropped by space_map_write(). 2729 */ 2730 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 2731 2732 if (msp->ms_sm == NULL) { 2733 uint64_t new_object; 2734 2735 new_object = space_map_alloc(mos, zfs_metaslab_sm_blksz, tx); 2736 VERIFY3U(new_object, !=, 0); 2737 2738 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object, 2739 msp->ms_start, msp->ms_size, vd->vdev_ashift)); 2740 2741 ASSERT(msp->ms_sm != NULL); 2742 ASSERT0(metaslab_allocated_space(msp)); 2743 } 2744 2745 if (!range_tree_is_empty(msp->ms_checkpointing) && 2746 vd->vdev_checkpoint_sm == NULL) { 2747 ASSERT(spa_has_checkpoint(spa)); 2748 2749 uint64_t new_object = space_map_alloc(mos, 2750 vdev_standard_sm_blksz, tx); 2751 VERIFY3U(new_object, !=, 0); 2752 2753 VERIFY0(space_map_open(&vd->vdev_checkpoint_sm, 2754 mos, new_object, 0, vd->vdev_asize, vd->vdev_ashift)); 2755 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 2756 2757 /* 2758 * We save the space map object as an entry in vdev_top_zap 2759 * so it can be retrieved when the pool is reopened after an 2760 * export or through zdb. 2761 */ 2762 VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, 2763 vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, 2764 sizeof (new_object), 1, &new_object, tx)); 2765 } 2766 2767 mutex_enter(&msp->ms_sync_lock); 2768 mutex_enter(&msp->ms_lock); 2769 2770 /* 2771 * Note: metaslab_condense() clears the space map's histogram. 2772 * Therefore we must verify and remove this histogram before 2773 * condensing. 2774 */ 2775 metaslab_group_histogram_verify(mg); 2776 metaslab_class_histogram_verify(mg->mg_class); 2777 metaslab_group_histogram_remove(mg, msp); 2778 2779 if (msp->ms_loaded && metaslab_should_condense(msp)) { 2780 metaslab_condense(msp, txg, tx); 2781 } else { 2782 mutex_exit(&msp->ms_lock); 2783 space_map_write(msp->ms_sm, alloctree, SM_ALLOC, 2784 SM_NO_VDEVID, tx); 2785 space_map_write(msp->ms_sm, msp->ms_freeing, SM_FREE, 2786 SM_NO_VDEVID, tx); 2787 mutex_enter(&msp->ms_lock); 2788 } 2789 2790 msp->ms_allocated_space += range_tree_space(alloctree); 2791 ASSERT3U(msp->ms_allocated_space, >=, 2792 range_tree_space(msp->ms_freeing)); 2793 msp->ms_allocated_space -= range_tree_space(msp->ms_freeing); 2794 2795 if (!range_tree_is_empty(msp->ms_checkpointing)) { 2796 ASSERT(spa_has_checkpoint(spa)); 2797 ASSERT3P(vd->vdev_checkpoint_sm, !=, NULL); 2798 2799 /* 2800 * Since we are doing writes to disk and the ms_checkpointing 2801 * tree won't be changing during that time, we drop the 2802 * ms_lock while writing to the checkpoint space map. 2803 */ 2804 mutex_exit(&msp->ms_lock); 2805 space_map_write(vd->vdev_checkpoint_sm, 2806 msp->ms_checkpointing, SM_FREE, SM_NO_VDEVID, tx); 2807 mutex_enter(&msp->ms_lock); 2808 2809 spa->spa_checkpoint_info.sci_dspace += 2810 range_tree_space(msp->ms_checkpointing); 2811 vd->vdev_stat.vs_checkpoint_space += 2812 range_tree_space(msp->ms_checkpointing); 2813 ASSERT3U(vd->vdev_stat.vs_checkpoint_space, ==, 2814 -space_map_allocated(vd->vdev_checkpoint_sm)); 2815 2816 range_tree_vacate(msp->ms_checkpointing, NULL, NULL); 2817 } 2818 2819 if (msp->ms_loaded) { 2820 /* 2821 * When the space map is loaded, we have an accurate 2822 * histogram in the range tree. This gives us an opportunity 2823 * to bring the space map's histogram up-to-date so we clear 2824 * it first before updating it. 2825 */ 2826 space_map_histogram_clear(msp->ms_sm); 2827 space_map_histogram_add(msp->ms_sm, msp->ms_allocatable, tx); 2828 2829 /* 2830 * Since we've cleared the histogram we need to add back 2831 * any free space that has already been processed, plus 2832 * any deferred space. This allows the on-disk histogram 2833 * to accurately reflect all free space even if some space 2834 * is not yet available for allocation (i.e. deferred). 2835 */ 2836 space_map_histogram_add(msp->ms_sm, msp->ms_freed, tx); 2837 2838 /* 2839 * Add back any deferred free space that has not been 2840 * added back into the in-core free tree yet. This will 2841 * ensure that we don't end up with a space map histogram 2842 * that is completely empty unless the metaslab is fully 2843 * allocated. 2844 */ 2845 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2846 space_map_histogram_add(msp->ms_sm, 2847 msp->ms_defer[t], tx); 2848 } 2849 } 2850 2851 /* 2852 * Always add the free space from this sync pass to the space 2853 * map histogram. We want to make sure that the on-disk histogram 2854 * accounts for all free space. If the space map is not loaded, 2855 * then we will lose some accuracy but will correct it the next 2856 * time we load the space map. 2857 */ 2858 space_map_histogram_add(msp->ms_sm, msp->ms_freeing, tx); 2859 metaslab_aux_histograms_update(msp); 2860 2861 metaslab_group_histogram_add(mg, msp); 2862 metaslab_group_histogram_verify(mg); 2863 metaslab_class_histogram_verify(mg->mg_class); 2864 2865 /* 2866 * For sync pass 1, we avoid traversing this txg's free range tree 2867 * and instead will just swap the pointers for freeing and freed. 2868 * We can safely do this since the freed_tree is guaranteed to be 2869 * empty on the initial pass. 2870 */ 2871 if (spa_sync_pass(spa) == 1) { 2872 range_tree_swap(&msp->ms_freeing, &msp->ms_freed); 2873 ASSERT0(msp->ms_allocated_this_txg); 2874 } else { 2875 range_tree_vacate(msp->ms_freeing, 2876 range_tree_add, msp->ms_freed); 2877 } 2878 msp->ms_allocated_this_txg += range_tree_space(alloctree); 2879 range_tree_vacate(alloctree, NULL, NULL); 2880 2881 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK])); 2882 ASSERT0(range_tree_space(msp->ms_allocating[TXG_CLEAN(txg) 2883 & TXG_MASK])); 2884 ASSERT0(range_tree_space(msp->ms_freeing)); 2885 ASSERT0(range_tree_space(msp->ms_checkpointing)); 2886 2887 mutex_exit(&msp->ms_lock); 2888 2889 if (object != space_map_object(msp->ms_sm)) { 2890 object = space_map_object(msp->ms_sm); 2891 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * 2892 msp->ms_id, sizeof (uint64_t), &object, tx); 2893 } 2894 mutex_exit(&msp->ms_sync_lock); 2895 dmu_tx_commit(tx); 2896 } 2897 2898 /* 2899 * Called after a transaction group has completely synced to mark 2900 * all of the metaslab's free space as usable. 2901 */ 2902 void 2903 metaslab_sync_done(metaslab_t *msp, uint64_t txg) 2904 { 2905 metaslab_group_t *mg = msp->ms_group; 2906 vdev_t *vd = mg->mg_vd; 2907 spa_t *spa = vd->vdev_spa; 2908 range_tree_t **defer_tree; 2909 int64_t alloc_delta, defer_delta; 2910 boolean_t defer_allowed = B_TRUE; 2911 2912 ASSERT(!vd->vdev_ishole); 2913 2914 mutex_enter(&msp->ms_lock); 2915 2916 /* 2917 * If this metaslab is just becoming available, initialize its 2918 * range trees and add its capacity to the vdev. 2919 */ 2920 if (msp->ms_freed == NULL) { 2921 for (int t = 0; t < TXG_SIZE; t++) { 2922 ASSERT(msp->ms_allocating[t] == NULL); 2923 2924 msp->ms_allocating[t] = range_tree_create(NULL, NULL); 2925 } 2926 2927 ASSERT3P(msp->ms_freeing, ==, NULL); 2928 msp->ms_freeing = range_tree_create(NULL, NULL); 2929 2930 ASSERT3P(msp->ms_freed, ==, NULL); 2931 msp->ms_freed = range_tree_create(NULL, NULL); 2932 2933 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2934 ASSERT(msp->ms_defer[t] == NULL); 2935 2936 msp->ms_defer[t] = range_tree_create(NULL, NULL); 2937 } 2938 2939 ASSERT3P(msp->ms_checkpointing, ==, NULL); 2940 msp->ms_checkpointing = range_tree_create(NULL, NULL); 2941 2942 metaslab_space_update(vd, mg->mg_class, 0, 0, msp->ms_size); 2943 } 2944 ASSERT0(range_tree_space(msp->ms_freeing)); 2945 ASSERT0(range_tree_space(msp->ms_checkpointing)); 2946 2947 defer_tree = &msp->ms_defer[txg % TXG_DEFER_SIZE]; 2948 2949 uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) - 2950 metaslab_class_get_alloc(spa_normal_class(spa)); 2951 if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) { 2952 defer_allowed = B_FALSE; 2953 } 2954 2955 defer_delta = 0; 2956 alloc_delta = msp->ms_allocated_this_txg - 2957 range_tree_space(msp->ms_freed); 2958 if (defer_allowed) { 2959 defer_delta = range_tree_space(msp->ms_freed) - 2960 range_tree_space(*defer_tree); 2961 } else { 2962 defer_delta -= range_tree_space(*defer_tree); 2963 } 2964 2965 metaslab_space_update(vd, mg->mg_class, alloc_delta + defer_delta, 2966 defer_delta, 0); 2967 2968 /* 2969 * If there's a metaslab_load() in progress, wait for it to complete 2970 * so that we have a consistent view of the in-core space map. 2971 */ 2972 metaslab_load_wait(msp); 2973 2974 /* 2975 * Move the frees from the defer_tree back to the free 2976 * range tree (if it's loaded). Swap the freed_tree and 2977 * the defer_tree -- this is safe to do because we've 2978 * just emptied out the defer_tree. 2979 */ 2980 range_tree_vacate(*defer_tree, 2981 msp->ms_loaded ? range_tree_add : NULL, msp->ms_allocatable); 2982 if (defer_allowed) { 2983 range_tree_swap(&msp->ms_freed, defer_tree); 2984 } else { 2985 range_tree_vacate(msp->ms_freed, 2986 msp->ms_loaded ? range_tree_add : NULL, 2987 msp->ms_allocatable); 2988 } 2989 2990 msp->ms_synced_length = space_map_length(msp->ms_sm); 2991 2992 msp->ms_deferspace += defer_delta; 2993 ASSERT3S(msp->ms_deferspace, >=, 0); 2994 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size); 2995 if (msp->ms_deferspace != 0) { 2996 /* 2997 * Keep syncing this metaslab until all deferred frees 2998 * are back in circulation. 2999 */ 3000 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 3001 } 3002 metaslab_aux_histograms_update_done(msp, defer_allowed); 3003 3004 if (msp->ms_new) { 3005 msp->ms_new = B_FALSE; 3006 mutex_enter(&mg->mg_lock); 3007 mg->mg_ms_ready++; 3008 mutex_exit(&mg->mg_lock); 3009 } 3010 3011 /* 3012 * Re-sort metaslab within its group now that we've adjusted 3013 * its allocatable space. 3014 */ 3015 metaslab_recalculate_weight_and_sort(msp); 3016 3017 /* 3018 * If the metaslab is loaded and we've not tried to load or allocate 3019 * from it in 'metaslab_unload_delay' txgs, then unload it. 3020 */ 3021 if (msp->ms_loaded && 3022 msp->ms_initializing == 0 && 3023 msp->ms_selected_txg + metaslab_unload_delay < txg) { 3024 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { 3025 VERIFY0(range_tree_space( 3026 msp->ms_allocating[(txg + t) & TXG_MASK])); 3027 } 3028 if (msp->ms_allocator != -1) { 3029 metaslab_passivate(msp, msp->ms_weight & 3030 ~METASLAB_ACTIVE_MASK); 3031 } 3032 3033 if (!metaslab_debug_unload) 3034 metaslab_unload(msp); 3035 } 3036 3037 ASSERT0(range_tree_space(msp->ms_allocating[txg & TXG_MASK])); 3038 ASSERT0(range_tree_space(msp->ms_freeing)); 3039 ASSERT0(range_tree_space(msp->ms_freed)); 3040 ASSERT0(range_tree_space(msp->ms_checkpointing)); 3041 3042 msp->ms_allocated_this_txg = 0; 3043 mutex_exit(&msp->ms_lock); 3044 } 3045 3046 void 3047 metaslab_sync_reassess(metaslab_group_t *mg) 3048 { 3049 spa_t *spa = mg->mg_class->mc_spa; 3050 3051 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 3052 metaslab_group_alloc_update(mg); 3053 mg->mg_fragmentation = metaslab_group_fragmentation(mg); 3054 3055 /* 3056 * Preload the next potential metaslabs but only on active 3057 * metaslab groups. We can get into a state where the metaslab 3058 * is no longer active since we dirty metaslabs as we remove a 3059 * a device, thus potentially making the metaslab group eligible 3060 * for preloading. 3061 */ 3062 if (mg->mg_activation_count > 0) { 3063 metaslab_group_preload(mg); 3064 } 3065 spa_config_exit(spa, SCL_ALLOC, FTAG); 3066 } 3067 3068 /* 3069 * When writing a ditto block (i.e. more than one DVA for a given BP) on 3070 * the same vdev as an existing DVA of this BP, then try to allocate it 3071 * on a different metaslab than existing DVAs (i.e. a unique metaslab). 3072 */ 3073 static boolean_t 3074 metaslab_is_unique(metaslab_t *msp, dva_t *dva) 3075 { 3076 uint64_t dva_ms_id; 3077 3078 if (DVA_GET_ASIZE(dva) == 0) 3079 return (B_TRUE); 3080 3081 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) 3082 return (B_TRUE); 3083 3084 dva_ms_id = DVA_GET_OFFSET(dva) >> msp->ms_group->mg_vd->vdev_ms_shift; 3085 3086 return (msp->ms_id != dva_ms_id); 3087 } 3088 3089 /* 3090 * ========================================================================== 3091 * Metaslab allocation tracing facility 3092 * ========================================================================== 3093 */ 3094 kstat_t *metaslab_trace_ksp; 3095 kstat_named_t metaslab_trace_over_limit; 3096 3097 void 3098 metaslab_alloc_trace_init(void) 3099 { 3100 ASSERT(metaslab_alloc_trace_cache == NULL); 3101 metaslab_alloc_trace_cache = kmem_cache_create( 3102 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t), 3103 0, NULL, NULL, NULL, NULL, NULL, 0); 3104 metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats", 3105 "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL); 3106 if (metaslab_trace_ksp != NULL) { 3107 metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit; 3108 kstat_named_init(&metaslab_trace_over_limit, 3109 "metaslab_trace_over_limit", KSTAT_DATA_UINT64); 3110 kstat_install(metaslab_trace_ksp); 3111 } 3112 } 3113 3114 void 3115 metaslab_alloc_trace_fini(void) 3116 { 3117 if (metaslab_trace_ksp != NULL) { 3118 kstat_delete(metaslab_trace_ksp); 3119 metaslab_trace_ksp = NULL; 3120 } 3121 kmem_cache_destroy(metaslab_alloc_trace_cache); 3122 metaslab_alloc_trace_cache = NULL; 3123 } 3124 3125 /* 3126 * Add an allocation trace element to the allocation tracing list. 3127 */ 3128 static void 3129 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg, 3130 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset, 3131 int allocator) 3132 { 3133 if (!metaslab_trace_enabled) 3134 return; 3135 3136 /* 3137 * When the tracing list reaches its maximum we remove 3138 * the second element in the list before adding a new one. 3139 * By removing the second element we preserve the original 3140 * entry as a clue to what allocations steps have already been 3141 * performed. 3142 */ 3143 if (zal->zal_size == metaslab_trace_max_entries) { 3144 metaslab_alloc_trace_t *mat_next; 3145 #ifdef DEBUG 3146 panic("too many entries in allocation list"); 3147 #endif 3148 atomic_inc_64(&metaslab_trace_over_limit.value.ui64); 3149 zal->zal_size--; 3150 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list)); 3151 list_remove(&zal->zal_list, mat_next); 3152 kmem_cache_free(metaslab_alloc_trace_cache, mat_next); 3153 } 3154 3155 metaslab_alloc_trace_t *mat = 3156 kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP); 3157 list_link_init(&mat->mat_list_node); 3158 mat->mat_mg = mg; 3159 mat->mat_msp = msp; 3160 mat->mat_size = psize; 3161 mat->mat_dva_id = dva_id; 3162 mat->mat_offset = offset; 3163 mat->mat_weight = 0; 3164 mat->mat_allocator = allocator; 3165 3166 if (msp != NULL) 3167 mat->mat_weight = msp->ms_weight; 3168 3169 /* 3170 * The list is part of the zio so locking is not required. Only 3171 * a single thread will perform allocations for a given zio. 3172 */ 3173 list_insert_tail(&zal->zal_list, mat); 3174 zal->zal_size++; 3175 3176 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries); 3177 } 3178 3179 void 3180 metaslab_trace_init(zio_alloc_list_t *zal) 3181 { 3182 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t), 3183 offsetof(metaslab_alloc_trace_t, mat_list_node)); 3184 zal->zal_size = 0; 3185 } 3186 3187 void 3188 metaslab_trace_fini(zio_alloc_list_t *zal) 3189 { 3190 metaslab_alloc_trace_t *mat; 3191 3192 while ((mat = list_remove_head(&zal->zal_list)) != NULL) 3193 kmem_cache_free(metaslab_alloc_trace_cache, mat); 3194 list_destroy(&zal->zal_list); 3195 zal->zal_size = 0; 3196 } 3197 3198 /* 3199 * ========================================================================== 3200 * Metaslab block operations 3201 * ========================================================================== 3202 */ 3203 3204 static void 3205 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags, 3206 int allocator) 3207 { 3208 if (!(flags & METASLAB_ASYNC_ALLOC) || 3209 (flags & METASLAB_DONT_THROTTLE)) 3210 return; 3211 3212 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 3213 if (!mg->mg_class->mc_alloc_throttle_enabled) 3214 return; 3215 3216 (void) zfs_refcount_add(&mg->mg_alloc_queue_depth[allocator], tag); 3217 } 3218 3219 static void 3220 metaslab_group_increment_qdepth(metaslab_group_t *mg, int allocator) 3221 { 3222 uint64_t max = mg->mg_max_alloc_queue_depth; 3223 uint64_t cur = mg->mg_cur_max_alloc_queue_depth[allocator]; 3224 while (cur < max) { 3225 if (atomic_cas_64(&mg->mg_cur_max_alloc_queue_depth[allocator], 3226 cur, cur + 1) == cur) { 3227 atomic_inc_64( 3228 &mg->mg_class->mc_alloc_max_slots[allocator]); 3229 return; 3230 } 3231 cur = mg->mg_cur_max_alloc_queue_depth[allocator]; 3232 } 3233 } 3234 3235 void 3236 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags, 3237 int allocator, boolean_t io_complete) 3238 { 3239 if (!(flags & METASLAB_ASYNC_ALLOC) || 3240 (flags & METASLAB_DONT_THROTTLE)) 3241 return; 3242 3243 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 3244 if (!mg->mg_class->mc_alloc_throttle_enabled) 3245 return; 3246 3247 (void) zfs_refcount_remove(&mg->mg_alloc_queue_depth[allocator], tag); 3248 if (io_complete) 3249 metaslab_group_increment_qdepth(mg, allocator); 3250 } 3251 3252 void 3253 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag, 3254 int allocator) 3255 { 3256 #ifdef ZFS_DEBUG 3257 const dva_t *dva = bp->blk_dva; 3258 int ndvas = BP_GET_NDVAS(bp); 3259 3260 for (int d = 0; d < ndvas; d++) { 3261 uint64_t vdev = DVA_GET_VDEV(&dva[d]); 3262 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 3263 VERIFY(zfs_refcount_not_held( 3264 &mg->mg_alloc_queue_depth[allocator], tag)); 3265 } 3266 #endif 3267 } 3268 3269 static uint64_t 3270 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg) 3271 { 3272 uint64_t start; 3273 range_tree_t *rt = msp->ms_allocatable; 3274 metaslab_class_t *mc = msp->ms_group->mg_class; 3275 3276 VERIFY(!msp->ms_condensing); 3277 VERIFY0(msp->ms_initializing); 3278 3279 start = mc->mc_ops->msop_alloc(msp, size); 3280 if (start != -1ULL) { 3281 metaslab_group_t *mg = msp->ms_group; 3282 vdev_t *vd = mg->mg_vd; 3283 3284 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift)); 3285 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 3286 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size); 3287 range_tree_remove(rt, start, size); 3288 3289 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) 3290 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); 3291 3292 range_tree_add(msp->ms_allocating[txg & TXG_MASK], start, size); 3293 3294 /* Track the last successful allocation */ 3295 msp->ms_alloc_txg = txg; 3296 metaslab_verify_space(msp, txg); 3297 } 3298 3299 /* 3300 * Now that we've attempted the allocation we need to update the 3301 * metaslab's maximum block size since it may have changed. 3302 */ 3303 msp->ms_max_size = metaslab_block_maxsize(msp); 3304 return (start); 3305 } 3306 3307 /* 3308 * Find the metaslab with the highest weight that is less than what we've 3309 * already tried. In the common case, this means that we will examine each 3310 * metaslab at most once. Note that concurrent callers could reorder metaslabs 3311 * by activation/passivation once we have dropped the mg_lock. If a metaslab is 3312 * activated by another thread, and we fail to allocate from the metaslab we 3313 * have selected, we may not try the newly-activated metaslab, and instead 3314 * activate another metaslab. This is not optimal, but generally does not cause 3315 * any problems (a possible exception being if every metaslab is completely full 3316 * except for the the newly-activated metaslab which we fail to examine). 3317 */ 3318 static metaslab_t * 3319 find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight, 3320 dva_t *dva, int d, boolean_t want_unique, uint64_t asize, int allocator, 3321 zio_alloc_list_t *zal, metaslab_t *search, boolean_t *was_active) 3322 { 3323 avl_index_t idx; 3324 avl_tree_t *t = &mg->mg_metaslab_tree; 3325 metaslab_t *msp = avl_find(t, search, &idx); 3326 if (msp == NULL) 3327 msp = avl_nearest(t, idx, AVL_AFTER); 3328 3329 for (; msp != NULL; msp = AVL_NEXT(t, msp)) { 3330 int i; 3331 if (!metaslab_should_allocate(msp, asize)) { 3332 metaslab_trace_add(zal, mg, msp, asize, d, 3333 TRACE_TOO_SMALL, allocator); 3334 continue; 3335 } 3336 3337 /* 3338 * If the selected metaslab is condensing or being 3339 * initialized, skip it. 3340 */ 3341 if (msp->ms_condensing || msp->ms_initializing > 0) 3342 continue; 3343 3344 *was_active = msp->ms_allocator != -1; 3345 /* 3346 * If we're activating as primary, this is our first allocation 3347 * from this disk, so we don't need to check how close we are. 3348 * If the metaslab under consideration was already active, 3349 * we're getting desperate enough to steal another allocator's 3350 * metaslab, so we still don't care about distances. 3351 */ 3352 if (activation_weight == METASLAB_WEIGHT_PRIMARY || *was_active) 3353 break; 3354 3355 for (i = 0; i < d; i++) { 3356 if (want_unique && 3357 !metaslab_is_unique(msp, &dva[i])) 3358 break; /* try another metaslab */ 3359 } 3360 if (i == d) 3361 break; 3362 } 3363 3364 if (msp != NULL) { 3365 search->ms_weight = msp->ms_weight; 3366 search->ms_start = msp->ms_start + 1; 3367 search->ms_allocator = msp->ms_allocator; 3368 search->ms_primary = msp->ms_primary; 3369 } 3370 return (msp); 3371 } 3372 3373 /* ARGSUSED */ 3374 static uint64_t 3375 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, 3376 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, 3377 int d, int allocator) 3378 { 3379 metaslab_t *msp = NULL; 3380 uint64_t offset = -1ULL; 3381 uint64_t activation_weight; 3382 3383 activation_weight = METASLAB_WEIGHT_PRIMARY; 3384 for (int i = 0; i < d; i++) { 3385 if (activation_weight == METASLAB_WEIGHT_PRIMARY && 3386 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 3387 activation_weight = METASLAB_WEIGHT_SECONDARY; 3388 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && 3389 DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 3390 activation_weight = METASLAB_WEIGHT_CLAIM; 3391 break; 3392 } 3393 } 3394 3395 /* 3396 * If we don't have enough metaslabs active to fill the entire array, we 3397 * just use the 0th slot. 3398 */ 3399 if (mg->mg_ms_ready < mg->mg_allocators * 3) 3400 allocator = 0; 3401 3402 ASSERT3U(mg->mg_vd->vdev_ms_count, >=, 2); 3403 3404 metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP); 3405 search->ms_weight = UINT64_MAX; 3406 search->ms_start = 0; 3407 /* 3408 * At the end of the metaslab tree are the already-active metaslabs, 3409 * first the primaries, then the secondaries. When we resume searching 3410 * through the tree, we need to consider ms_allocator and ms_primary so 3411 * we start in the location right after where we left off, and don't 3412 * accidentally loop forever considering the same metaslabs. 3413 */ 3414 search->ms_allocator = -1; 3415 search->ms_primary = B_TRUE; 3416 for (;;) { 3417 boolean_t was_active = B_FALSE; 3418 3419 mutex_enter(&mg->mg_lock); 3420 3421 if (activation_weight == METASLAB_WEIGHT_PRIMARY && 3422 mg->mg_primaries[allocator] != NULL) { 3423 msp = mg->mg_primaries[allocator]; 3424 was_active = B_TRUE; 3425 } else if (activation_weight == METASLAB_WEIGHT_SECONDARY && 3426 mg->mg_secondaries[allocator] != NULL) { 3427 msp = mg->mg_secondaries[allocator]; 3428 was_active = B_TRUE; 3429 } else { 3430 msp = find_valid_metaslab(mg, activation_weight, dva, d, 3431 want_unique, asize, allocator, zal, search, 3432 &was_active); 3433 } 3434 3435 mutex_exit(&mg->mg_lock); 3436 if (msp == NULL) { 3437 kmem_free(search, sizeof (*search)); 3438 return (-1ULL); 3439 } 3440 3441 mutex_enter(&msp->ms_lock); 3442 /* 3443 * Ensure that the metaslab we have selected is still 3444 * capable of handling our request. It's possible that 3445 * another thread may have changed the weight while we 3446 * were blocked on the metaslab lock. We check the 3447 * active status first to see if we need to reselect 3448 * a new metaslab. 3449 */ 3450 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) { 3451 mutex_exit(&msp->ms_lock); 3452 continue; 3453 } 3454 3455 /* 3456 * If the metaslab is freshly activated for an allocator that 3457 * isn't the one we're allocating from, or if it's a primary and 3458 * we're seeking a secondary (or vice versa), we go back and 3459 * select a new metaslab. 3460 */ 3461 if (!was_active && (msp->ms_weight & METASLAB_ACTIVE_MASK) && 3462 (msp->ms_allocator != -1) && 3463 (msp->ms_allocator != allocator || ((activation_weight == 3464 METASLAB_WEIGHT_PRIMARY) != msp->ms_primary))) { 3465 mutex_exit(&msp->ms_lock); 3466 continue; 3467 } 3468 3469 if (msp->ms_weight & METASLAB_WEIGHT_CLAIM && 3470 activation_weight != METASLAB_WEIGHT_CLAIM) { 3471 metaslab_passivate(msp, msp->ms_weight & 3472 ~METASLAB_WEIGHT_CLAIM); 3473 mutex_exit(&msp->ms_lock); 3474 continue; 3475 } 3476 3477 if (metaslab_activate(msp, allocator, activation_weight) != 0) { 3478 mutex_exit(&msp->ms_lock); 3479 continue; 3480 } 3481 3482 msp->ms_selected_txg = txg; 3483 3484 /* 3485 * Now that we have the lock, recheck to see if we should 3486 * continue to use this metaslab for this allocation. The 3487 * the metaslab is now loaded so metaslab_should_allocate() can 3488 * accurately determine if the allocation attempt should 3489 * proceed. 3490 */ 3491 if (!metaslab_should_allocate(msp, asize)) { 3492 /* Passivate this metaslab and select a new one. */ 3493 metaslab_trace_add(zal, mg, msp, asize, d, 3494 TRACE_TOO_SMALL, allocator); 3495 goto next; 3496 } 3497 3498 /* 3499 * If this metaslab is currently condensing then pick again as 3500 * we can't manipulate this metaslab until it's committed 3501 * to disk. If this metaslab is being initialized, we shouldn't 3502 * allocate from it since the allocated region might be 3503 * overwritten after allocation. 3504 */ 3505 if (msp->ms_condensing) { 3506 metaslab_trace_add(zal, mg, msp, asize, d, 3507 TRACE_CONDENSING, allocator); 3508 metaslab_passivate(msp, msp->ms_weight & 3509 ~METASLAB_ACTIVE_MASK); 3510 mutex_exit(&msp->ms_lock); 3511 continue; 3512 } else if (msp->ms_initializing > 0) { 3513 metaslab_trace_add(zal, mg, msp, asize, d, 3514 TRACE_INITIALIZING, allocator); 3515 metaslab_passivate(msp, msp->ms_weight & 3516 ~METASLAB_ACTIVE_MASK); 3517 mutex_exit(&msp->ms_lock); 3518 continue; 3519 } 3520 3521 offset = metaslab_block_alloc(msp, asize, txg); 3522 metaslab_trace_add(zal, mg, msp, asize, d, offset, allocator); 3523 3524 if (offset != -1ULL) { 3525 /* Proactively passivate the metaslab, if needed */ 3526 metaslab_segment_may_passivate(msp); 3527 break; 3528 } 3529 next: 3530 ASSERT(msp->ms_loaded); 3531 3532 /* 3533 * We were unable to allocate from this metaslab so determine 3534 * a new weight for this metaslab. Now that we have loaded 3535 * the metaslab we can provide a better hint to the metaslab 3536 * selector. 3537 * 3538 * For space-based metaslabs, we use the maximum block size. 3539 * This information is only available when the metaslab 3540 * is loaded and is more accurate than the generic free 3541 * space weight that was calculated by metaslab_weight(). 3542 * This information allows us to quickly compare the maximum 3543 * available allocation in the metaslab to the allocation 3544 * size being requested. 3545 * 3546 * For segment-based metaslabs, determine the new weight 3547 * based on the highest bucket in the range tree. We 3548 * explicitly use the loaded segment weight (i.e. the range 3549 * tree histogram) since it contains the space that is 3550 * currently available for allocation and is accurate 3551 * even within a sync pass. 3552 */ 3553 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) { 3554 uint64_t weight = metaslab_block_maxsize(msp); 3555 WEIGHT_SET_SPACEBASED(weight); 3556 metaslab_passivate(msp, weight); 3557 } else { 3558 metaslab_passivate(msp, 3559 metaslab_weight_from_range_tree(msp)); 3560 } 3561 3562 /* 3563 * We have just failed an allocation attempt, check 3564 * that metaslab_should_allocate() agrees. Otherwise, 3565 * we may end up in an infinite loop retrying the same 3566 * metaslab. 3567 */ 3568 ASSERT(!metaslab_should_allocate(msp, asize)); 3569 3570 mutex_exit(&msp->ms_lock); 3571 } 3572 mutex_exit(&msp->ms_lock); 3573 kmem_free(search, sizeof (*search)); 3574 return (offset); 3575 } 3576 3577 static uint64_t 3578 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal, 3579 uint64_t asize, uint64_t txg, boolean_t want_unique, dva_t *dva, 3580 int d, int allocator) 3581 { 3582 uint64_t offset; 3583 ASSERT(mg->mg_initialized); 3584 3585 offset = metaslab_group_alloc_normal(mg, zal, asize, txg, want_unique, 3586 dva, d, allocator); 3587 3588 mutex_enter(&mg->mg_lock); 3589 if (offset == -1ULL) { 3590 mg->mg_failed_allocations++; 3591 metaslab_trace_add(zal, mg, NULL, asize, d, 3592 TRACE_GROUP_FAILURE, allocator); 3593 if (asize == SPA_GANGBLOCKSIZE) { 3594 /* 3595 * This metaslab group was unable to allocate 3596 * the minimum gang block size so it must be out of 3597 * space. We must notify the allocation throttle 3598 * to start skipping allocation attempts to this 3599 * metaslab group until more space becomes available. 3600 * Note: this failure cannot be caused by the 3601 * allocation throttle since the allocation throttle 3602 * is only responsible for skipping devices and 3603 * not failing block allocations. 3604 */ 3605 mg->mg_no_free_space = B_TRUE; 3606 } 3607 } 3608 mg->mg_allocations++; 3609 mutex_exit(&mg->mg_lock); 3610 return (offset); 3611 } 3612 3613 /* 3614 * Allocate a block for the specified i/o. 3615 */ 3616 int 3617 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, 3618 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags, 3619 zio_alloc_list_t *zal, int allocator) 3620 { 3621 metaslab_group_t *mg, *rotor; 3622 vdev_t *vd; 3623 boolean_t try_hard = B_FALSE; 3624 3625 ASSERT(!DVA_IS_VALID(&dva[d])); 3626 3627 /* 3628 * For testing, make some blocks above a certain size be gang blocks. 3629 * This will also test spilling from special to normal. 3630 */ 3631 if (psize >= metaslab_force_ganging && (ddi_get_lbolt() & 3) == 0) { 3632 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG, 3633 allocator); 3634 return (SET_ERROR(ENOSPC)); 3635 } 3636 3637 /* 3638 * Start at the rotor and loop through all mgs until we find something. 3639 * Note that there's no locking on mc_rotor or mc_aliquot because 3640 * nothing actually breaks if we miss a few updates -- we just won't 3641 * allocate quite as evenly. It all balances out over time. 3642 * 3643 * If we are doing ditto or log blocks, try to spread them across 3644 * consecutive vdevs. If we're forced to reuse a vdev before we've 3645 * allocated all of our ditto blocks, then try and spread them out on 3646 * that vdev as much as possible. If it turns out to not be possible, 3647 * gradually lower our standards until anything becomes acceptable. 3648 * Also, allocating on consecutive vdevs (as opposed to random vdevs) 3649 * gives us hope of containing our fault domains to something we're 3650 * able to reason about. Otherwise, any two top-level vdev failures 3651 * will guarantee the loss of data. With consecutive allocation, 3652 * only two adjacent top-level vdev failures will result in data loss. 3653 * 3654 * If we are doing gang blocks (hintdva is non-NULL), try to keep 3655 * ourselves on the same vdev as our gang block header. That 3656 * way, we can hope for locality in vdev_cache, plus it makes our 3657 * fault domains something tractable. 3658 */ 3659 if (hintdva) { 3660 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); 3661 3662 /* 3663 * It's possible the vdev we're using as the hint no 3664 * longer exists or its mg has been closed (e.g. by 3665 * device removal). Consult the rotor when 3666 * all else fails. 3667 */ 3668 if (vd != NULL && vd->vdev_mg != NULL) { 3669 mg = vd->vdev_mg; 3670 3671 if (flags & METASLAB_HINTBP_AVOID && 3672 mg->mg_next != NULL) 3673 mg = mg->mg_next; 3674 } else { 3675 mg = mc->mc_rotor; 3676 } 3677 } else if (d != 0) { 3678 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); 3679 mg = vd->vdev_mg->mg_next; 3680 } else { 3681 ASSERT(mc->mc_rotor != NULL); 3682 mg = mc->mc_rotor; 3683 } 3684 3685 /* 3686 * If the hint put us into the wrong metaslab class, or into a 3687 * metaslab group that has been passivated, just follow the rotor. 3688 */ 3689 if (mg->mg_class != mc || mg->mg_activation_count <= 0) 3690 mg = mc->mc_rotor; 3691 3692 rotor = mg; 3693 top: 3694 do { 3695 boolean_t allocatable; 3696 3697 ASSERT(mg->mg_activation_count == 1); 3698 vd = mg->mg_vd; 3699 3700 /* 3701 * Don't allocate from faulted devices. 3702 */ 3703 if (try_hard) { 3704 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); 3705 allocatable = vdev_allocatable(vd); 3706 spa_config_exit(spa, SCL_ZIO, FTAG); 3707 } else { 3708 allocatable = vdev_allocatable(vd); 3709 } 3710 3711 /* 3712 * Determine if the selected metaslab group is eligible 3713 * for allocations. If we're ganging then don't allow 3714 * this metaslab group to skip allocations since that would 3715 * inadvertently return ENOSPC and suspend the pool 3716 * even though space is still available. 3717 */ 3718 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) { 3719 allocatable = metaslab_group_allocatable(mg, rotor, 3720 psize, allocator, d); 3721 } 3722 3723 if (!allocatable) { 3724 metaslab_trace_add(zal, mg, NULL, psize, d, 3725 TRACE_NOT_ALLOCATABLE, allocator); 3726 goto next; 3727 } 3728 3729 ASSERT(mg->mg_initialized); 3730 3731 /* 3732 * Avoid writing single-copy data to a failing, 3733 * non-redundant vdev, unless we've already tried all 3734 * other vdevs. 3735 */ 3736 if ((vd->vdev_stat.vs_write_errors > 0 || 3737 vd->vdev_state < VDEV_STATE_HEALTHY) && 3738 d == 0 && !try_hard && vd->vdev_children == 0) { 3739 metaslab_trace_add(zal, mg, NULL, psize, d, 3740 TRACE_VDEV_ERROR, allocator); 3741 goto next; 3742 } 3743 3744 ASSERT(mg->mg_class == mc); 3745 3746 uint64_t asize = vdev_psize_to_asize(vd, psize); 3747 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); 3748 3749 /* 3750 * If we don't need to try hard, then require that the 3751 * block be on an different metaslab from any other DVAs 3752 * in this BP (unique=true). If we are trying hard, then 3753 * allow any metaslab to be used (unique=false). 3754 */ 3755 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg, 3756 !try_hard, dva, d, allocator); 3757 3758 if (offset != -1ULL) { 3759 /* 3760 * If we've just selected this metaslab group, 3761 * figure out whether the corresponding vdev is 3762 * over- or under-used relative to the pool, 3763 * and set an allocation bias to even it out. 3764 */ 3765 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) { 3766 vdev_stat_t *vs = &vd->vdev_stat; 3767 int64_t vu, cu; 3768 3769 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1); 3770 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1); 3771 3772 /* 3773 * Calculate how much more or less we should 3774 * try to allocate from this device during 3775 * this iteration around the rotor. 3776 * For example, if a device is 80% full 3777 * and the pool is 20% full then we should 3778 * reduce allocations by 60% on this device. 3779 * 3780 * mg_bias = (20 - 80) * 512K / 100 = -307K 3781 * 3782 * This reduces allocations by 307K for this 3783 * iteration. 3784 */ 3785 mg->mg_bias = ((cu - vu) * 3786 (int64_t)mg->mg_aliquot) / 100; 3787 } else if (!metaslab_bias_enabled) { 3788 mg->mg_bias = 0; 3789 } 3790 3791 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >= 3792 mg->mg_aliquot + mg->mg_bias) { 3793 mc->mc_rotor = mg->mg_next; 3794 mc->mc_aliquot = 0; 3795 } 3796 3797 DVA_SET_VDEV(&dva[d], vd->vdev_id); 3798 DVA_SET_OFFSET(&dva[d], offset); 3799 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER)); 3800 DVA_SET_ASIZE(&dva[d], asize); 3801 3802 return (0); 3803 } 3804 next: 3805 mc->mc_rotor = mg->mg_next; 3806 mc->mc_aliquot = 0; 3807 } while ((mg = mg->mg_next) != rotor); 3808 3809 /* 3810 * If we haven't tried hard, do so now. 3811 */ 3812 if (!try_hard) { 3813 try_hard = B_TRUE; 3814 goto top; 3815 } 3816 3817 bzero(&dva[d], sizeof (dva_t)); 3818 3819 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC, allocator); 3820 return (SET_ERROR(ENOSPC)); 3821 } 3822 3823 void 3824 metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize, 3825 boolean_t checkpoint) 3826 { 3827 metaslab_t *msp; 3828 spa_t *spa = vd->vdev_spa; 3829 3830 ASSERT(vdev_is_concrete(vd)); 3831 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 3832 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count); 3833 3834 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 3835 3836 VERIFY(!msp->ms_condensing); 3837 VERIFY3U(offset, >=, msp->ms_start); 3838 VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size); 3839 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 3840 VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift)); 3841 3842 metaslab_check_free_impl(vd, offset, asize); 3843 3844 mutex_enter(&msp->ms_lock); 3845 if (range_tree_is_empty(msp->ms_freeing) && 3846 range_tree_is_empty(msp->ms_checkpointing)) { 3847 vdev_dirty(vd, VDD_METASLAB, msp, spa_syncing_txg(spa)); 3848 } 3849 3850 if (checkpoint) { 3851 ASSERT(spa_has_checkpoint(spa)); 3852 range_tree_add(msp->ms_checkpointing, offset, asize); 3853 } else { 3854 range_tree_add(msp->ms_freeing, offset, asize); 3855 } 3856 mutex_exit(&msp->ms_lock); 3857 } 3858 3859 /* ARGSUSED */ 3860 void 3861 metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 3862 uint64_t size, void *arg) 3863 { 3864 boolean_t *checkpoint = arg; 3865 3866 ASSERT3P(checkpoint, !=, NULL); 3867 3868 if (vd->vdev_ops->vdev_op_remap != NULL) 3869 vdev_indirect_mark_obsolete(vd, offset, size); 3870 else 3871 metaslab_free_impl(vd, offset, size, *checkpoint); 3872 } 3873 3874 static void 3875 metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size, 3876 boolean_t checkpoint) 3877 { 3878 spa_t *spa = vd->vdev_spa; 3879 3880 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 3881 3882 if (spa_syncing_txg(spa) > spa_freeze_txg(spa)) 3883 return; 3884 3885 if (spa->spa_vdev_removal != NULL && 3886 spa->spa_vdev_removal->svr_vdev_id == vd->vdev_id && 3887 vdev_is_concrete(vd)) { 3888 /* 3889 * Note: we check if the vdev is concrete because when 3890 * we complete the removal, we first change the vdev to be 3891 * an indirect vdev (in open context), and then (in syncing 3892 * context) clear spa_vdev_removal. 3893 */ 3894 free_from_removing_vdev(vd, offset, size); 3895 } else if (vd->vdev_ops->vdev_op_remap != NULL) { 3896 vdev_indirect_mark_obsolete(vd, offset, size); 3897 vd->vdev_ops->vdev_op_remap(vd, offset, size, 3898 metaslab_free_impl_cb, &checkpoint); 3899 } else { 3900 metaslab_free_concrete(vd, offset, size, checkpoint); 3901 } 3902 } 3903 3904 typedef struct remap_blkptr_cb_arg { 3905 blkptr_t *rbca_bp; 3906 spa_remap_cb_t rbca_cb; 3907 vdev_t *rbca_remap_vd; 3908 uint64_t rbca_remap_offset; 3909 void *rbca_cb_arg; 3910 } remap_blkptr_cb_arg_t; 3911 3912 void 3913 remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 3914 uint64_t size, void *arg) 3915 { 3916 remap_blkptr_cb_arg_t *rbca = arg; 3917 blkptr_t *bp = rbca->rbca_bp; 3918 3919 /* We can not remap split blocks. */ 3920 if (size != DVA_GET_ASIZE(&bp->blk_dva[0])) 3921 return; 3922 ASSERT0(inner_offset); 3923 3924 if (rbca->rbca_cb != NULL) { 3925 /* 3926 * At this point we know that we are not handling split 3927 * blocks and we invoke the callback on the previous 3928 * vdev which must be indirect. 3929 */ 3930 ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops); 3931 3932 rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id, 3933 rbca->rbca_remap_offset, size, rbca->rbca_cb_arg); 3934 3935 /* set up remap_blkptr_cb_arg for the next call */ 3936 rbca->rbca_remap_vd = vd; 3937 rbca->rbca_remap_offset = offset; 3938 } 3939 3940 /* 3941 * The phys birth time is that of dva[0]. This ensures that we know 3942 * when each dva was written, so that resilver can determine which 3943 * blocks need to be scrubbed (i.e. those written during the time 3944 * the vdev was offline). It also ensures that the key used in 3945 * the ARC hash table is unique (i.e. dva[0] + phys_birth). If 3946 * we didn't change the phys_birth, a lookup in the ARC for a 3947 * remapped BP could find the data that was previously stored at 3948 * this vdev + offset. 3949 */ 3950 vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa, 3951 DVA_GET_VDEV(&bp->blk_dva[0])); 3952 vdev_indirect_births_t *vib = oldvd->vdev_indirect_births; 3953 bp->blk_phys_birth = vdev_indirect_births_physbirth(vib, 3954 DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0])); 3955 3956 DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id); 3957 DVA_SET_OFFSET(&bp->blk_dva[0], offset); 3958 } 3959 3960 /* 3961 * If the block pointer contains any indirect DVAs, modify them to refer to 3962 * concrete DVAs. Note that this will sometimes not be possible, leaving 3963 * the indirect DVA in place. This happens if the indirect DVA spans multiple 3964 * segments in the mapping (i.e. it is a "split block"). 3965 * 3966 * If the BP was remapped, calls the callback on the original dva (note the 3967 * callback can be called multiple times if the original indirect DVA refers 3968 * to another indirect DVA, etc). 3969 * 3970 * Returns TRUE if the BP was remapped. 3971 */ 3972 boolean_t 3973 spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg) 3974 { 3975 remap_blkptr_cb_arg_t rbca; 3976 3977 if (!zfs_remap_blkptr_enable) 3978 return (B_FALSE); 3979 3980 if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) 3981 return (B_FALSE); 3982 3983 /* 3984 * Dedup BP's can not be remapped, because ddt_phys_select() depends 3985 * on DVA[0] being the same in the BP as in the DDT (dedup table). 3986 */ 3987 if (BP_GET_DEDUP(bp)) 3988 return (B_FALSE); 3989 3990 /* 3991 * Gang blocks can not be remapped, because 3992 * zio_checksum_gang_verifier() depends on the DVA[0] that's in 3993 * the BP used to read the gang block header (GBH) being the same 3994 * as the DVA[0] that we allocated for the GBH. 3995 */ 3996 if (BP_IS_GANG(bp)) 3997 return (B_FALSE); 3998 3999 /* 4000 * Embedded BP's have no DVA to remap. 4001 */ 4002 if (BP_GET_NDVAS(bp) < 1) 4003 return (B_FALSE); 4004 4005 /* 4006 * Note: we only remap dva[0]. If we remapped other dvas, we 4007 * would no longer know what their phys birth txg is. 4008 */ 4009 dva_t *dva = &bp->blk_dva[0]; 4010 4011 uint64_t offset = DVA_GET_OFFSET(dva); 4012 uint64_t size = DVA_GET_ASIZE(dva); 4013 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 4014 4015 if (vd->vdev_ops->vdev_op_remap == NULL) 4016 return (B_FALSE); 4017 4018 rbca.rbca_bp = bp; 4019 rbca.rbca_cb = callback; 4020 rbca.rbca_remap_vd = vd; 4021 rbca.rbca_remap_offset = offset; 4022 rbca.rbca_cb_arg = arg; 4023 4024 /* 4025 * remap_blkptr_cb() will be called in order for each level of 4026 * indirection, until a concrete vdev is reached or a split block is 4027 * encountered. old_vd and old_offset are updated within the callback 4028 * as we go from the one indirect vdev to the next one (either concrete 4029 * or indirect again) in that order. 4030 */ 4031 vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca); 4032 4033 /* Check if the DVA wasn't remapped because it is a split block */ 4034 if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id) 4035 return (B_FALSE); 4036 4037 return (B_TRUE); 4038 } 4039 4040 /* 4041 * Undo the allocation of a DVA which happened in the given transaction group. 4042 */ 4043 void 4044 metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 4045 { 4046 metaslab_t *msp; 4047 vdev_t *vd; 4048 uint64_t vdev = DVA_GET_VDEV(dva); 4049 uint64_t offset = DVA_GET_OFFSET(dva); 4050 uint64_t size = DVA_GET_ASIZE(dva); 4051 4052 ASSERT(DVA_IS_VALID(dva)); 4053 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 4054 4055 if (txg > spa_freeze_txg(spa)) 4056 return; 4057 4058 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || 4059 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { 4060 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu", 4061 (u_longlong_t)vdev, (u_longlong_t)offset); 4062 ASSERT(0); 4063 return; 4064 } 4065 4066 ASSERT(!vd->vdev_removing); 4067 ASSERT(vdev_is_concrete(vd)); 4068 ASSERT0(vd->vdev_indirect_config.vic_mapping_object); 4069 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); 4070 4071 if (DVA_GET_GANG(dva)) 4072 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 4073 4074 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 4075 4076 mutex_enter(&msp->ms_lock); 4077 range_tree_remove(msp->ms_allocating[txg & TXG_MASK], 4078 offset, size); 4079 4080 VERIFY(!msp->ms_condensing); 4081 VERIFY3U(offset, >=, msp->ms_start); 4082 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size); 4083 VERIFY3U(range_tree_space(msp->ms_allocatable) + size, <=, 4084 msp->ms_size); 4085 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 4086 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 4087 range_tree_add(msp->ms_allocatable, offset, size); 4088 mutex_exit(&msp->ms_lock); 4089 } 4090 4091 /* 4092 * Free the block represented by the given DVA. 4093 */ 4094 void 4095 metaslab_free_dva(spa_t *spa, const dva_t *dva, boolean_t checkpoint) 4096 { 4097 uint64_t vdev = DVA_GET_VDEV(dva); 4098 uint64_t offset = DVA_GET_OFFSET(dva); 4099 uint64_t size = DVA_GET_ASIZE(dva); 4100 vdev_t *vd = vdev_lookup_top(spa, vdev); 4101 4102 ASSERT(DVA_IS_VALID(dva)); 4103 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 4104 4105 if (DVA_GET_GANG(dva)) { 4106 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 4107 } 4108 4109 metaslab_free_impl(vd, offset, size, checkpoint); 4110 } 4111 4112 /* 4113 * Reserve some allocation slots. The reservation system must be called 4114 * before we call into the allocator. If there aren't any available slots 4115 * then the I/O will be throttled until an I/O completes and its slots are 4116 * freed up. The function returns true if it was successful in placing 4117 * the reservation. 4118 */ 4119 boolean_t 4120 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator, 4121 zio_t *zio, int flags) 4122 { 4123 uint64_t available_slots = 0; 4124 boolean_t slot_reserved = B_FALSE; 4125 uint64_t max = mc->mc_alloc_max_slots[allocator]; 4126 4127 ASSERT(mc->mc_alloc_throttle_enabled); 4128 mutex_enter(&mc->mc_lock); 4129 4130 uint64_t reserved_slots = 4131 zfs_refcount_count(&mc->mc_alloc_slots[allocator]); 4132 if (reserved_slots < max) 4133 available_slots = max - reserved_slots; 4134 4135 if (slots <= available_slots || GANG_ALLOCATION(flags) || 4136 flags & METASLAB_MUST_RESERVE) { 4137 /* 4138 * We reserve the slots individually so that we can unreserve 4139 * them individually when an I/O completes. 4140 */ 4141 for (int d = 0; d < slots; d++) { 4142 reserved_slots = 4143 zfs_refcount_add(&mc->mc_alloc_slots[allocator], 4144 zio); 4145 } 4146 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING; 4147 slot_reserved = B_TRUE; 4148 } 4149 4150 mutex_exit(&mc->mc_lock); 4151 return (slot_reserved); 4152 } 4153 4154 void 4155 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, 4156 int allocator, zio_t *zio) 4157 { 4158 ASSERT(mc->mc_alloc_throttle_enabled); 4159 mutex_enter(&mc->mc_lock); 4160 for (int d = 0; d < slots; d++) { 4161 (void) zfs_refcount_remove(&mc->mc_alloc_slots[allocator], 4162 zio); 4163 } 4164 mutex_exit(&mc->mc_lock); 4165 } 4166 4167 static int 4168 metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size, 4169 uint64_t txg) 4170 { 4171 metaslab_t *msp; 4172 spa_t *spa = vd->vdev_spa; 4173 int error = 0; 4174 4175 if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count) 4176 return (ENXIO); 4177 4178 ASSERT3P(vd->vdev_ms, !=, NULL); 4179 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 4180 4181 mutex_enter(&msp->ms_lock); 4182 4183 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) 4184 error = metaslab_activate(msp, 0, METASLAB_WEIGHT_CLAIM); 4185 /* 4186 * No need to fail in that case; someone else has activated the 4187 * metaslab, but that doesn't preclude us from using it. 4188 */ 4189 if (error == EBUSY) 4190 error = 0; 4191 4192 if (error == 0 && 4193 !range_tree_contains(msp->ms_allocatable, offset, size)) 4194 error = SET_ERROR(ENOENT); 4195 4196 if (error || txg == 0) { /* txg == 0 indicates dry run */ 4197 mutex_exit(&msp->ms_lock); 4198 return (error); 4199 } 4200 4201 VERIFY(!msp->ms_condensing); 4202 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 4203 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 4204 VERIFY3U(range_tree_space(msp->ms_allocatable) - size, <=, 4205 msp->ms_size); 4206 range_tree_remove(msp->ms_allocatable, offset, size); 4207 4208 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */ 4209 if (range_tree_is_empty(msp->ms_allocating[txg & TXG_MASK])) 4210 vdev_dirty(vd, VDD_METASLAB, msp, txg); 4211 range_tree_add(msp->ms_allocating[txg & TXG_MASK], 4212 offset, size); 4213 } 4214 4215 mutex_exit(&msp->ms_lock); 4216 4217 return (0); 4218 } 4219 4220 typedef struct metaslab_claim_cb_arg_t { 4221 uint64_t mcca_txg; 4222 int mcca_error; 4223 } metaslab_claim_cb_arg_t; 4224 4225 /* ARGSUSED */ 4226 static void 4227 metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset, 4228 uint64_t size, void *arg) 4229 { 4230 metaslab_claim_cb_arg_t *mcca_arg = arg; 4231 4232 if (mcca_arg->mcca_error == 0) { 4233 mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset, 4234 size, mcca_arg->mcca_txg); 4235 } 4236 } 4237 4238 int 4239 metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg) 4240 { 4241 if (vd->vdev_ops->vdev_op_remap != NULL) { 4242 metaslab_claim_cb_arg_t arg; 4243 4244 /* 4245 * Only zdb(1M) can claim on indirect vdevs. This is used 4246 * to detect leaks of mapped space (that are not accounted 4247 * for in the obsolete counts, spacemap, or bpobj). 4248 */ 4249 ASSERT(!spa_writeable(vd->vdev_spa)); 4250 arg.mcca_error = 0; 4251 arg.mcca_txg = txg; 4252 4253 vd->vdev_ops->vdev_op_remap(vd, offset, size, 4254 metaslab_claim_impl_cb, &arg); 4255 4256 if (arg.mcca_error == 0) { 4257 arg.mcca_error = metaslab_claim_concrete(vd, 4258 offset, size, txg); 4259 } 4260 return (arg.mcca_error); 4261 } else { 4262 return (metaslab_claim_concrete(vd, offset, size, txg)); 4263 } 4264 } 4265 4266 /* 4267 * Intent log support: upon opening the pool after a crash, notify the SPA 4268 * of blocks that the intent log has allocated for immediate write, but 4269 * which are still considered free by the SPA because the last transaction 4270 * group didn't commit yet. 4271 */ 4272 static int 4273 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 4274 { 4275 uint64_t vdev = DVA_GET_VDEV(dva); 4276 uint64_t offset = DVA_GET_OFFSET(dva); 4277 uint64_t size = DVA_GET_ASIZE(dva); 4278 vdev_t *vd; 4279 4280 if ((vd = vdev_lookup_top(spa, vdev)) == NULL) { 4281 return (SET_ERROR(ENXIO)); 4282 } 4283 4284 ASSERT(DVA_IS_VALID(dva)); 4285 4286 if (DVA_GET_GANG(dva)) 4287 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 4288 4289 return (metaslab_claim_impl(vd, offset, size, txg)); 4290 } 4291 4292 int 4293 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, 4294 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags, 4295 zio_alloc_list_t *zal, zio_t *zio, int allocator) 4296 { 4297 dva_t *dva = bp->blk_dva; 4298 dva_t *hintdva = (hintbp != NULL) ? hintbp->blk_dva : NULL; 4299 int error = 0; 4300 4301 ASSERT(bp->blk_birth == 0); 4302 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); 4303 4304 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 4305 4306 if (mc->mc_rotor == NULL) { /* no vdevs in this class */ 4307 spa_config_exit(spa, SCL_ALLOC, FTAG); 4308 return (SET_ERROR(ENOSPC)); 4309 } 4310 4311 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); 4312 ASSERT(BP_GET_NDVAS(bp) == 0); 4313 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); 4314 ASSERT3P(zal, !=, NULL); 4315 4316 for (int d = 0; d < ndvas; d++) { 4317 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, 4318 txg, flags, zal, allocator); 4319 if (error != 0) { 4320 for (d--; d >= 0; d--) { 4321 metaslab_unalloc_dva(spa, &dva[d], txg); 4322 metaslab_group_alloc_decrement(spa, 4323 DVA_GET_VDEV(&dva[d]), zio, flags, 4324 allocator, B_FALSE); 4325 bzero(&dva[d], sizeof (dva_t)); 4326 } 4327 spa_config_exit(spa, SCL_ALLOC, FTAG); 4328 return (error); 4329 } else { 4330 /* 4331 * Update the metaslab group's queue depth 4332 * based on the newly allocated dva. 4333 */ 4334 metaslab_group_alloc_increment(spa, 4335 DVA_GET_VDEV(&dva[d]), zio, flags, allocator); 4336 } 4337 4338 } 4339 ASSERT(error == 0); 4340 ASSERT(BP_GET_NDVAS(bp) == ndvas); 4341 4342 spa_config_exit(spa, SCL_ALLOC, FTAG); 4343 4344 BP_SET_BIRTH(bp, txg, txg); 4345 4346 return (0); 4347 } 4348 4349 void 4350 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) 4351 { 4352 const dva_t *dva = bp->blk_dva; 4353 int ndvas = BP_GET_NDVAS(bp); 4354 4355 ASSERT(!BP_IS_HOLE(bp)); 4356 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); 4357 4358 /* 4359 * If we have a checkpoint for the pool we need to make sure that 4360 * the blocks that we free that are part of the checkpoint won't be 4361 * reused until the checkpoint is discarded or we revert to it. 4362 * 4363 * The checkpoint flag is passed down the metaslab_free code path 4364 * and is set whenever we want to add a block to the checkpoint's 4365 * accounting. That is, we "checkpoint" blocks that existed at the 4366 * time the checkpoint was created and are therefore referenced by 4367 * the checkpointed uberblock. 4368 * 4369 * Note that, we don't checkpoint any blocks if the current 4370 * syncing txg <= spa_checkpoint_txg. We want these frees to sync 4371 * normally as they will be referenced by the checkpointed uberblock. 4372 */ 4373 boolean_t checkpoint = B_FALSE; 4374 if (bp->blk_birth <= spa->spa_checkpoint_txg && 4375 spa_syncing_txg(spa) > spa->spa_checkpoint_txg) { 4376 /* 4377 * At this point, if the block is part of the checkpoint 4378 * there is no way it was created in the current txg. 4379 */ 4380 ASSERT(!now); 4381 ASSERT3U(spa_syncing_txg(spa), ==, txg); 4382 checkpoint = B_TRUE; 4383 } 4384 4385 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); 4386 4387 for (int d = 0; d < ndvas; d++) { 4388 if (now) { 4389 metaslab_unalloc_dva(spa, &dva[d], txg); 4390 } else { 4391 ASSERT3U(txg, ==, spa_syncing_txg(spa)); 4392 metaslab_free_dva(spa, &dva[d], checkpoint); 4393 } 4394 } 4395 4396 spa_config_exit(spa, SCL_FREE, FTAG); 4397 } 4398 4399 int 4400 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) 4401 { 4402 const dva_t *dva = bp->blk_dva; 4403 int ndvas = BP_GET_NDVAS(bp); 4404 int error = 0; 4405 4406 ASSERT(!BP_IS_HOLE(bp)); 4407 4408 if (txg != 0) { 4409 /* 4410 * First do a dry run to make sure all DVAs are claimable, 4411 * so we don't have to unwind from partial failures below. 4412 */ 4413 if ((error = metaslab_claim(spa, bp, 0)) != 0) 4414 return (error); 4415 } 4416 4417 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 4418 4419 for (int d = 0; d < ndvas; d++) { 4420 error = metaslab_claim_dva(spa, &dva[d], txg); 4421 if (error != 0) 4422 break; 4423 } 4424 4425 spa_config_exit(spa, SCL_ALLOC, FTAG); 4426 4427 ASSERT(error == 0 || txg == 0); 4428 4429 return (error); 4430 } 4431 4432 /* ARGSUSED */ 4433 static void 4434 metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset, 4435 uint64_t size, void *arg) 4436 { 4437 if (vd->vdev_ops == &vdev_indirect_ops) 4438 return; 4439 4440 metaslab_check_free_impl(vd, offset, size); 4441 } 4442 4443 static void 4444 metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size) 4445 { 4446 metaslab_t *msp; 4447 spa_t *spa = vd->vdev_spa; 4448 4449 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) 4450 return; 4451 4452 if (vd->vdev_ops->vdev_op_remap != NULL) { 4453 vd->vdev_ops->vdev_op_remap(vd, offset, size, 4454 metaslab_check_free_impl_cb, NULL); 4455 return; 4456 } 4457 4458 ASSERT(vdev_is_concrete(vd)); 4459 ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count); 4460 ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0); 4461 4462 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 4463 4464 mutex_enter(&msp->ms_lock); 4465 if (msp->ms_loaded) { 4466 range_tree_verify_not_present(msp->ms_allocatable, 4467 offset, size); 4468 } 4469 4470 range_tree_verify_not_present(msp->ms_freeing, offset, size); 4471 range_tree_verify_not_present(msp->ms_checkpointing, offset, size); 4472 range_tree_verify_not_present(msp->ms_freed, offset, size); 4473 for (int j = 0; j < TXG_DEFER_SIZE; j++) 4474 range_tree_verify_not_present(msp->ms_defer[j], offset, size); 4475 mutex_exit(&msp->ms_lock); 4476 } 4477 4478 void 4479 metaslab_check_free(spa_t *spa, const blkptr_t *bp) 4480 { 4481 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) 4482 return; 4483 4484 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 4485 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 4486 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 4487 vdev_t *vd = vdev_lookup_top(spa, vdev); 4488 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 4489 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]); 4490 4491 if (DVA_GET_GANG(&bp->blk_dva[i])) 4492 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 4493 4494 ASSERT3P(vd, !=, NULL); 4495 4496 metaslab_check_free_impl(vd, offset, size); 4497 } 4498 spa_config_exit(spa, SCL_VDEV, FTAG); 4499 } 4500