1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 */ 27 28 #include <sys/zfs_context.h> 29 #include <sys/dmu.h> 30 #include <sys/dmu_tx.h> 31 #include <sys/space_map.h> 32 #include <sys/metaslab_impl.h> 33 #include <sys/vdev_impl.h> 34 #include <sys/zio.h> 35 #include <sys/spa_impl.h> 36 #include <sys/zfeature.h> 37 38 #define GANG_ALLOCATION(flags) \ 39 ((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER)) 40 41 uint64_t metaslab_aliquot = 512ULL << 10; 42 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ 43 44 /* 45 * The in-core space map representation is more compact than its on-disk form. 46 * The zfs_condense_pct determines how much more compact the in-core 47 * space map representation must be before we compact it on-disk. 48 * Values should be greater than or equal to 100. 49 */ 50 int zfs_condense_pct = 200; 51 52 /* 53 * Condensing a metaslab is not guaranteed to actually reduce the amount of 54 * space used on disk. In particular, a space map uses data in increments of 55 * MAX(1 << ashift, space_map_blksize), so a metaslab might use the 56 * same number of blocks after condensing. Since the goal of condensing is to 57 * reduce the number of IOPs required to read the space map, we only want to 58 * condense when we can be sure we will reduce the number of blocks used by the 59 * space map. Unfortunately, we cannot precisely compute whether or not this is 60 * the case in metaslab_should_condense since we are holding ms_lock. Instead, 61 * we apply the following heuristic: do not condense a spacemap unless the 62 * uncondensed size consumes greater than zfs_metaslab_condense_block_threshold 63 * blocks. 64 */ 65 int zfs_metaslab_condense_block_threshold = 4; 66 67 /* 68 * The zfs_mg_noalloc_threshold defines which metaslab groups should 69 * be eligible for allocation. The value is defined as a percentage of 70 * free space. Metaslab groups that have more free space than 71 * zfs_mg_noalloc_threshold are always eligible for allocations. Once 72 * a metaslab group's free space is less than or equal to the 73 * zfs_mg_noalloc_threshold the allocator will avoid allocating to that 74 * group unless all groups in the pool have reached zfs_mg_noalloc_threshold. 75 * Once all groups in the pool reach zfs_mg_noalloc_threshold then all 76 * groups are allowed to accept allocations. Gang blocks are always 77 * eligible to allocate on any metaslab group. The default value of 0 means 78 * no metaslab group will be excluded based on this criterion. 79 */ 80 int zfs_mg_noalloc_threshold = 0; 81 82 /* 83 * Metaslab groups are considered eligible for allocations if their 84 * fragmenation metric (measured as a percentage) is less than or equal to 85 * zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold 86 * then it will be skipped unless all metaslab groups within the metaslab 87 * class have also crossed this threshold. 88 */ 89 int zfs_mg_fragmentation_threshold = 85; 90 91 /* 92 * Allow metaslabs to keep their active state as long as their fragmentation 93 * percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An 94 * active metaslab that exceeds this threshold will no longer keep its active 95 * status allowing better metaslabs to be selected. 96 */ 97 int zfs_metaslab_fragmentation_threshold = 70; 98 99 /* 100 * When set will load all metaslabs when pool is first opened. 101 */ 102 int metaslab_debug_load = 0; 103 104 /* 105 * When set will prevent metaslabs from being unloaded. 106 */ 107 int metaslab_debug_unload = 0; 108 109 /* 110 * Minimum size which forces the dynamic allocator to change 111 * it's allocation strategy. Once the space map cannot satisfy 112 * an allocation of this size then it switches to using more 113 * aggressive strategy (i.e search by size rather than offset). 114 */ 115 uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE; 116 117 /* 118 * The minimum free space, in percent, which must be available 119 * in a space map to continue allocations in a first-fit fashion. 120 * Once the space map's free space drops below this level we dynamically 121 * switch to using best-fit allocations. 122 */ 123 int metaslab_df_free_pct = 4; 124 125 /* 126 * A metaslab is considered "free" if it contains a contiguous 127 * segment which is greater than metaslab_min_alloc_size. 128 */ 129 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS; 130 131 /* 132 * Percentage of all cpus that can be used by the metaslab taskq. 133 */ 134 int metaslab_load_pct = 50; 135 136 /* 137 * Determines how many txgs a metaslab may remain loaded without having any 138 * allocations from it. As long as a metaslab continues to be used we will 139 * keep it loaded. 140 */ 141 int metaslab_unload_delay = TXG_SIZE * 2; 142 143 /* 144 * Max number of metaslabs per group to preload. 145 */ 146 int metaslab_preload_limit = SPA_DVAS_PER_BP; 147 148 /* 149 * Enable/disable preloading of metaslab. 150 */ 151 boolean_t metaslab_preload_enabled = B_TRUE; 152 153 /* 154 * Enable/disable fragmentation weighting on metaslabs. 155 */ 156 boolean_t metaslab_fragmentation_factor_enabled = B_TRUE; 157 158 /* 159 * Enable/disable lba weighting (i.e. outer tracks are given preference). 160 */ 161 boolean_t metaslab_lba_weighting_enabled = B_TRUE; 162 163 /* 164 * Enable/disable metaslab group biasing. 165 */ 166 boolean_t metaslab_bias_enabled = B_TRUE; 167 168 /* 169 * Enable/disable segment-based metaslab selection. 170 */ 171 boolean_t zfs_metaslab_segment_weight_enabled = B_TRUE; 172 173 /* 174 * When using segment-based metaslab selection, we will continue 175 * allocating from the active metaslab until we have exhausted 176 * zfs_metaslab_switch_threshold of its buckets. 177 */ 178 int zfs_metaslab_switch_threshold = 2; 179 180 /* 181 * Internal switch to enable/disable the metaslab allocation tracing 182 * facility. 183 */ 184 boolean_t metaslab_trace_enabled = B_TRUE; 185 186 /* 187 * Maximum entries that the metaslab allocation tracing facility will keep 188 * in a given list when running in non-debug mode. We limit the number 189 * of entries in non-debug mode to prevent us from using up too much memory. 190 * The limit should be sufficiently large that we don't expect any allocation 191 * to every exceed this value. In debug mode, the system will panic if this 192 * limit is ever reached allowing for further investigation. 193 */ 194 uint64_t metaslab_trace_max_entries = 5000; 195 196 static uint64_t metaslab_weight(metaslab_t *); 197 static void metaslab_set_fragmentation(metaslab_t *); 198 199 kmem_cache_t *metaslab_alloc_trace_cache; 200 201 /* 202 * ========================================================================== 203 * Metaslab classes 204 * ========================================================================== 205 */ 206 metaslab_class_t * 207 metaslab_class_create(spa_t *spa, metaslab_ops_t *ops) 208 { 209 metaslab_class_t *mc; 210 211 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP); 212 213 mc->mc_spa = spa; 214 mc->mc_rotor = NULL; 215 mc->mc_ops = ops; 216 mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL); 217 refcount_create_tracked(&mc->mc_alloc_slots); 218 219 return (mc); 220 } 221 222 void 223 metaslab_class_destroy(metaslab_class_t *mc) 224 { 225 ASSERT(mc->mc_rotor == NULL); 226 ASSERT(mc->mc_alloc == 0); 227 ASSERT(mc->mc_deferred == 0); 228 ASSERT(mc->mc_space == 0); 229 ASSERT(mc->mc_dspace == 0); 230 231 refcount_destroy(&mc->mc_alloc_slots); 232 mutex_destroy(&mc->mc_lock); 233 kmem_free(mc, sizeof (metaslab_class_t)); 234 } 235 236 int 237 metaslab_class_validate(metaslab_class_t *mc) 238 { 239 metaslab_group_t *mg; 240 vdev_t *vd; 241 242 /* 243 * Must hold one of the spa_config locks. 244 */ 245 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || 246 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); 247 248 if ((mg = mc->mc_rotor) == NULL) 249 return (0); 250 251 do { 252 vd = mg->mg_vd; 253 ASSERT(vd->vdev_mg != NULL); 254 ASSERT3P(vd->vdev_top, ==, vd); 255 ASSERT3P(mg->mg_class, ==, mc); 256 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); 257 } while ((mg = mg->mg_next) != mc->mc_rotor); 258 259 return (0); 260 } 261 262 void 263 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, 264 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta) 265 { 266 atomic_add_64(&mc->mc_alloc, alloc_delta); 267 atomic_add_64(&mc->mc_deferred, defer_delta); 268 atomic_add_64(&mc->mc_space, space_delta); 269 atomic_add_64(&mc->mc_dspace, dspace_delta); 270 } 271 272 uint64_t 273 metaslab_class_get_alloc(metaslab_class_t *mc) 274 { 275 return (mc->mc_alloc); 276 } 277 278 uint64_t 279 metaslab_class_get_deferred(metaslab_class_t *mc) 280 { 281 return (mc->mc_deferred); 282 } 283 284 uint64_t 285 metaslab_class_get_space(metaslab_class_t *mc) 286 { 287 return (mc->mc_space); 288 } 289 290 uint64_t 291 metaslab_class_get_dspace(metaslab_class_t *mc) 292 { 293 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space); 294 } 295 296 void 297 metaslab_class_histogram_verify(metaslab_class_t *mc) 298 { 299 vdev_t *rvd = mc->mc_spa->spa_root_vdev; 300 uint64_t *mc_hist; 301 int i; 302 303 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) 304 return; 305 306 mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, 307 KM_SLEEP); 308 309 for (int c = 0; c < rvd->vdev_children; c++) { 310 vdev_t *tvd = rvd->vdev_child[c]; 311 metaslab_group_t *mg = tvd->vdev_mg; 312 313 /* 314 * Skip any holes, uninitialized top-levels, or 315 * vdevs that are not in this metalab class. 316 */ 317 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 || 318 mg->mg_class != mc) { 319 continue; 320 } 321 322 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) 323 mc_hist[i] += mg->mg_histogram[i]; 324 } 325 326 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) 327 VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]); 328 329 kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); 330 } 331 332 /* 333 * Calculate the metaslab class's fragmentation metric. The metric 334 * is weighted based on the space contribution of each metaslab group. 335 * The return value will be a number between 0 and 100 (inclusive), or 336 * ZFS_FRAG_INVALID if the metric has not been set. See comment above the 337 * zfs_frag_table for more information about the metric. 338 */ 339 uint64_t 340 metaslab_class_fragmentation(metaslab_class_t *mc) 341 { 342 vdev_t *rvd = mc->mc_spa->spa_root_vdev; 343 uint64_t fragmentation = 0; 344 345 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); 346 347 for (int c = 0; c < rvd->vdev_children; c++) { 348 vdev_t *tvd = rvd->vdev_child[c]; 349 metaslab_group_t *mg = tvd->vdev_mg; 350 351 /* 352 * Skip any holes, uninitialized top-levels, or 353 * vdevs that are not in this metalab class. 354 */ 355 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 || 356 mg->mg_class != mc) { 357 continue; 358 } 359 360 /* 361 * If a metaslab group does not contain a fragmentation 362 * metric then just bail out. 363 */ 364 if (mg->mg_fragmentation == ZFS_FRAG_INVALID) { 365 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 366 return (ZFS_FRAG_INVALID); 367 } 368 369 /* 370 * Determine how much this metaslab_group is contributing 371 * to the overall pool fragmentation metric. 372 */ 373 fragmentation += mg->mg_fragmentation * 374 metaslab_group_get_space(mg); 375 } 376 fragmentation /= metaslab_class_get_space(mc); 377 378 ASSERT3U(fragmentation, <=, 100); 379 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 380 return (fragmentation); 381 } 382 383 /* 384 * Calculate the amount of expandable space that is available in 385 * this metaslab class. If a device is expanded then its expandable 386 * space will be the amount of allocatable space that is currently not 387 * part of this metaslab class. 388 */ 389 uint64_t 390 metaslab_class_expandable_space(metaslab_class_t *mc) 391 { 392 vdev_t *rvd = mc->mc_spa->spa_root_vdev; 393 uint64_t space = 0; 394 395 spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER); 396 for (int c = 0; c < rvd->vdev_children; c++) { 397 vdev_t *tvd = rvd->vdev_child[c]; 398 metaslab_group_t *mg = tvd->vdev_mg; 399 400 if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 || 401 mg->mg_class != mc) { 402 continue; 403 } 404 405 /* 406 * Calculate if we have enough space to add additional 407 * metaslabs. We report the expandable space in terms 408 * of the metaslab size since that's the unit of expansion. 409 */ 410 space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize, 411 1ULL << tvd->vdev_ms_shift); 412 } 413 spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG); 414 return (space); 415 } 416 417 static int 418 metaslab_compare(const void *x1, const void *x2) 419 { 420 const metaslab_t *m1 = x1; 421 const metaslab_t *m2 = x2; 422 423 if (m1->ms_weight < m2->ms_weight) 424 return (1); 425 if (m1->ms_weight > m2->ms_weight) 426 return (-1); 427 428 /* 429 * If the weights are identical, use the offset to force uniqueness. 430 */ 431 if (m1->ms_start < m2->ms_start) 432 return (-1); 433 if (m1->ms_start > m2->ms_start) 434 return (1); 435 436 ASSERT3P(m1, ==, m2); 437 438 return (0); 439 } 440 441 /* 442 * Verify that the space accounting on disk matches the in-core range_trees. 443 */ 444 void 445 metaslab_verify_space(metaslab_t *msp, uint64_t txg) 446 { 447 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 448 uint64_t allocated = 0; 449 uint64_t freed = 0; 450 uint64_t sm_free_space, msp_free_space; 451 452 ASSERT(MUTEX_HELD(&msp->ms_lock)); 453 454 if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0) 455 return; 456 457 /* 458 * We can only verify the metaslab space when we're called 459 * from syncing context with a loaded metaslab that has an allocated 460 * space map. Calling this in non-syncing context does not 461 * provide a consistent view of the metaslab since we're performing 462 * allocations in the future. 463 */ 464 if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL || 465 !msp->ms_loaded) 466 return; 467 468 sm_free_space = msp->ms_size - space_map_allocated(msp->ms_sm) - 469 space_map_alloc_delta(msp->ms_sm); 470 471 /* 472 * Account for future allocations since we would have already 473 * deducted that space from the ms_freetree. 474 */ 475 for (int t = 0; t < TXG_CONCURRENT_STATES; t++) { 476 allocated += 477 range_tree_space(msp->ms_alloctree[(txg + t) & TXG_MASK]); 478 } 479 freed = range_tree_space(msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK]); 480 481 msp_free_space = range_tree_space(msp->ms_tree) + allocated + 482 msp->ms_deferspace + freed; 483 484 VERIFY3U(sm_free_space, ==, msp_free_space); 485 } 486 487 /* 488 * ========================================================================== 489 * Metaslab groups 490 * ========================================================================== 491 */ 492 /* 493 * Update the allocatable flag and the metaslab group's capacity. 494 * The allocatable flag is set to true if the capacity is below 495 * the zfs_mg_noalloc_threshold or has a fragmentation value that is 496 * greater than zfs_mg_fragmentation_threshold. If a metaslab group 497 * transitions from allocatable to non-allocatable or vice versa then the 498 * metaslab group's class is updated to reflect the transition. 499 */ 500 static void 501 metaslab_group_alloc_update(metaslab_group_t *mg) 502 { 503 vdev_t *vd = mg->mg_vd; 504 metaslab_class_t *mc = mg->mg_class; 505 vdev_stat_t *vs = &vd->vdev_stat; 506 boolean_t was_allocatable; 507 boolean_t was_initialized; 508 509 ASSERT(vd == vd->vdev_top); 510 511 mutex_enter(&mg->mg_lock); 512 was_allocatable = mg->mg_allocatable; 513 was_initialized = mg->mg_initialized; 514 515 mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) / 516 (vs->vs_space + 1); 517 518 mutex_enter(&mc->mc_lock); 519 520 /* 521 * If the metaslab group was just added then it won't 522 * have any space until we finish syncing out this txg. 523 * At that point we will consider it initialized and available 524 * for allocations. We also don't consider non-activated 525 * metaslab groups (e.g. vdevs that are in the middle of being removed) 526 * to be initialized, because they can't be used for allocation. 527 */ 528 mg->mg_initialized = metaslab_group_initialized(mg); 529 if (!was_initialized && mg->mg_initialized) { 530 mc->mc_groups++; 531 } else if (was_initialized && !mg->mg_initialized) { 532 ASSERT3U(mc->mc_groups, >, 0); 533 mc->mc_groups--; 534 } 535 if (mg->mg_initialized) 536 mg->mg_no_free_space = B_FALSE; 537 538 /* 539 * A metaslab group is considered allocatable if it has plenty 540 * of free space or is not heavily fragmented. We only take 541 * fragmentation into account if the metaslab group has a valid 542 * fragmentation metric (i.e. a value between 0 and 100). 543 */ 544 mg->mg_allocatable = (mg->mg_activation_count > 0 && 545 mg->mg_free_capacity > zfs_mg_noalloc_threshold && 546 (mg->mg_fragmentation == ZFS_FRAG_INVALID || 547 mg->mg_fragmentation <= zfs_mg_fragmentation_threshold)); 548 549 /* 550 * The mc_alloc_groups maintains a count of the number of 551 * groups in this metaslab class that are still above the 552 * zfs_mg_noalloc_threshold. This is used by the allocating 553 * threads to determine if they should avoid allocations to 554 * a given group. The allocator will avoid allocations to a group 555 * if that group has reached or is below the zfs_mg_noalloc_threshold 556 * and there are still other groups that are above the threshold. 557 * When a group transitions from allocatable to non-allocatable or 558 * vice versa we update the metaslab class to reflect that change. 559 * When the mc_alloc_groups value drops to 0 that means that all 560 * groups have reached the zfs_mg_noalloc_threshold making all groups 561 * eligible for allocations. This effectively means that all devices 562 * are balanced again. 563 */ 564 if (was_allocatable && !mg->mg_allocatable) 565 mc->mc_alloc_groups--; 566 else if (!was_allocatable && mg->mg_allocatable) 567 mc->mc_alloc_groups++; 568 mutex_exit(&mc->mc_lock); 569 570 mutex_exit(&mg->mg_lock); 571 } 572 573 metaslab_group_t * 574 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd) 575 { 576 metaslab_group_t *mg; 577 578 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP); 579 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); 580 avl_create(&mg->mg_metaslab_tree, metaslab_compare, 581 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node)); 582 mg->mg_vd = vd; 583 mg->mg_class = mc; 584 mg->mg_activation_count = 0; 585 mg->mg_initialized = B_FALSE; 586 mg->mg_no_free_space = B_TRUE; 587 refcount_create_tracked(&mg->mg_alloc_queue_depth); 588 589 mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct, 590 minclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT); 591 592 return (mg); 593 } 594 595 void 596 metaslab_group_destroy(metaslab_group_t *mg) 597 { 598 ASSERT(mg->mg_prev == NULL); 599 ASSERT(mg->mg_next == NULL); 600 /* 601 * We may have gone below zero with the activation count 602 * either because we never activated in the first place or 603 * because we're done, and possibly removing the vdev. 604 */ 605 ASSERT(mg->mg_activation_count <= 0); 606 607 taskq_destroy(mg->mg_taskq); 608 avl_destroy(&mg->mg_metaslab_tree); 609 mutex_destroy(&mg->mg_lock); 610 refcount_destroy(&mg->mg_alloc_queue_depth); 611 kmem_free(mg, sizeof (metaslab_group_t)); 612 } 613 614 void 615 metaslab_group_activate(metaslab_group_t *mg) 616 { 617 metaslab_class_t *mc = mg->mg_class; 618 metaslab_group_t *mgprev, *mgnext; 619 620 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER)); 621 622 ASSERT(mc->mc_rotor != mg); 623 ASSERT(mg->mg_prev == NULL); 624 ASSERT(mg->mg_next == NULL); 625 ASSERT(mg->mg_activation_count <= 0); 626 627 if (++mg->mg_activation_count <= 0) 628 return; 629 630 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children); 631 metaslab_group_alloc_update(mg); 632 633 if ((mgprev = mc->mc_rotor) == NULL) { 634 mg->mg_prev = mg; 635 mg->mg_next = mg; 636 } else { 637 mgnext = mgprev->mg_next; 638 mg->mg_prev = mgprev; 639 mg->mg_next = mgnext; 640 mgprev->mg_next = mg; 641 mgnext->mg_prev = mg; 642 } 643 mc->mc_rotor = mg; 644 } 645 646 void 647 metaslab_group_passivate(metaslab_group_t *mg) 648 { 649 metaslab_class_t *mc = mg->mg_class; 650 metaslab_group_t *mgprev, *mgnext; 651 652 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER)); 653 654 if (--mg->mg_activation_count != 0) { 655 ASSERT(mc->mc_rotor != mg); 656 ASSERT(mg->mg_prev == NULL); 657 ASSERT(mg->mg_next == NULL); 658 ASSERT(mg->mg_activation_count < 0); 659 return; 660 } 661 662 taskq_wait(mg->mg_taskq); 663 metaslab_group_alloc_update(mg); 664 665 mgprev = mg->mg_prev; 666 mgnext = mg->mg_next; 667 668 if (mg == mgnext) { 669 mc->mc_rotor = NULL; 670 } else { 671 mc->mc_rotor = mgnext; 672 mgprev->mg_next = mgnext; 673 mgnext->mg_prev = mgprev; 674 } 675 676 mg->mg_prev = NULL; 677 mg->mg_next = NULL; 678 } 679 680 boolean_t 681 metaslab_group_initialized(metaslab_group_t *mg) 682 { 683 vdev_t *vd = mg->mg_vd; 684 vdev_stat_t *vs = &vd->vdev_stat; 685 686 return (vs->vs_space != 0 && mg->mg_activation_count > 0); 687 } 688 689 uint64_t 690 metaslab_group_get_space(metaslab_group_t *mg) 691 { 692 return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count); 693 } 694 695 void 696 metaslab_group_histogram_verify(metaslab_group_t *mg) 697 { 698 uint64_t *mg_hist; 699 vdev_t *vd = mg->mg_vd; 700 uint64_t ashift = vd->vdev_ashift; 701 int i; 702 703 if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0) 704 return; 705 706 mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE, 707 KM_SLEEP); 708 709 ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=, 710 SPACE_MAP_HISTOGRAM_SIZE + ashift); 711 712 for (int m = 0; m < vd->vdev_ms_count; m++) { 713 metaslab_t *msp = vd->vdev_ms[m]; 714 715 if (msp->ms_sm == NULL) 716 continue; 717 718 for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) 719 mg_hist[i + ashift] += 720 msp->ms_sm->sm_phys->smp_histogram[i]; 721 } 722 723 for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++) 724 VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]); 725 726 kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE); 727 } 728 729 static void 730 metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp) 731 { 732 metaslab_class_t *mc = mg->mg_class; 733 uint64_t ashift = mg->mg_vd->vdev_ashift; 734 735 ASSERT(MUTEX_HELD(&msp->ms_lock)); 736 if (msp->ms_sm == NULL) 737 return; 738 739 mutex_enter(&mg->mg_lock); 740 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 741 mg->mg_histogram[i + ashift] += 742 msp->ms_sm->sm_phys->smp_histogram[i]; 743 mc->mc_histogram[i + ashift] += 744 msp->ms_sm->sm_phys->smp_histogram[i]; 745 } 746 mutex_exit(&mg->mg_lock); 747 } 748 749 void 750 metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp) 751 { 752 metaslab_class_t *mc = mg->mg_class; 753 uint64_t ashift = mg->mg_vd->vdev_ashift; 754 755 ASSERT(MUTEX_HELD(&msp->ms_lock)); 756 if (msp->ms_sm == NULL) 757 return; 758 759 mutex_enter(&mg->mg_lock); 760 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 761 ASSERT3U(mg->mg_histogram[i + ashift], >=, 762 msp->ms_sm->sm_phys->smp_histogram[i]); 763 ASSERT3U(mc->mc_histogram[i + ashift], >=, 764 msp->ms_sm->sm_phys->smp_histogram[i]); 765 766 mg->mg_histogram[i + ashift] -= 767 msp->ms_sm->sm_phys->smp_histogram[i]; 768 mc->mc_histogram[i + ashift] -= 769 msp->ms_sm->sm_phys->smp_histogram[i]; 770 } 771 mutex_exit(&mg->mg_lock); 772 } 773 774 static void 775 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) 776 { 777 ASSERT(msp->ms_group == NULL); 778 mutex_enter(&mg->mg_lock); 779 msp->ms_group = mg; 780 msp->ms_weight = 0; 781 avl_add(&mg->mg_metaslab_tree, msp); 782 mutex_exit(&mg->mg_lock); 783 784 mutex_enter(&msp->ms_lock); 785 metaslab_group_histogram_add(mg, msp); 786 mutex_exit(&msp->ms_lock); 787 } 788 789 static void 790 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) 791 { 792 mutex_enter(&msp->ms_lock); 793 metaslab_group_histogram_remove(mg, msp); 794 mutex_exit(&msp->ms_lock); 795 796 mutex_enter(&mg->mg_lock); 797 ASSERT(msp->ms_group == mg); 798 avl_remove(&mg->mg_metaslab_tree, msp); 799 msp->ms_group = NULL; 800 mutex_exit(&mg->mg_lock); 801 } 802 803 static void 804 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 805 { 806 /* 807 * Although in principle the weight can be any value, in 808 * practice we do not use values in the range [1, 511]. 809 */ 810 ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0); 811 ASSERT(MUTEX_HELD(&msp->ms_lock)); 812 813 mutex_enter(&mg->mg_lock); 814 ASSERT(msp->ms_group == mg); 815 avl_remove(&mg->mg_metaslab_tree, msp); 816 msp->ms_weight = weight; 817 avl_add(&mg->mg_metaslab_tree, msp); 818 mutex_exit(&mg->mg_lock); 819 } 820 821 /* 822 * Calculate the fragmentation for a given metaslab group. We can use 823 * a simple average here since all metaslabs within the group must have 824 * the same size. The return value will be a value between 0 and 100 825 * (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this 826 * group have a fragmentation metric. 827 */ 828 uint64_t 829 metaslab_group_fragmentation(metaslab_group_t *mg) 830 { 831 vdev_t *vd = mg->mg_vd; 832 uint64_t fragmentation = 0; 833 uint64_t valid_ms = 0; 834 835 for (int m = 0; m < vd->vdev_ms_count; m++) { 836 metaslab_t *msp = vd->vdev_ms[m]; 837 838 if (msp->ms_fragmentation == ZFS_FRAG_INVALID) 839 continue; 840 841 valid_ms++; 842 fragmentation += msp->ms_fragmentation; 843 } 844 845 if (valid_ms <= vd->vdev_ms_count / 2) 846 return (ZFS_FRAG_INVALID); 847 848 fragmentation /= valid_ms; 849 ASSERT3U(fragmentation, <=, 100); 850 return (fragmentation); 851 } 852 853 /* 854 * Determine if a given metaslab group should skip allocations. A metaslab 855 * group should avoid allocations if its free capacity is less than the 856 * zfs_mg_noalloc_threshold or its fragmentation metric is greater than 857 * zfs_mg_fragmentation_threshold and there is at least one metaslab group 858 * that can still handle allocations. If the allocation throttle is enabled 859 * then we skip allocations to devices that have reached their maximum 860 * allocation queue depth unless the selected metaslab group is the only 861 * eligible group remaining. 862 */ 863 static boolean_t 864 metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor, 865 uint64_t psize) 866 { 867 spa_t *spa = mg->mg_vd->vdev_spa; 868 metaslab_class_t *mc = mg->mg_class; 869 870 /* 871 * We can only consider skipping this metaslab group if it's 872 * in the normal metaslab class and there are other metaslab 873 * groups to select from. Otherwise, we always consider it eligible 874 * for allocations. 875 */ 876 if (mc != spa_normal_class(spa) || mc->mc_groups <= 1) 877 return (B_TRUE); 878 879 /* 880 * If the metaslab group's mg_allocatable flag is set (see comments 881 * in metaslab_group_alloc_update() for more information) and 882 * the allocation throttle is disabled then allow allocations to this 883 * device. However, if the allocation throttle is enabled then 884 * check if we have reached our allocation limit (mg_alloc_queue_depth) 885 * to determine if we should allow allocations to this metaslab group. 886 * If all metaslab groups are no longer considered allocatable 887 * (mc_alloc_groups == 0) or we're trying to allocate the smallest 888 * gang block size then we allow allocations on this metaslab group 889 * regardless of the mg_allocatable or throttle settings. 890 */ 891 if (mg->mg_allocatable) { 892 metaslab_group_t *mgp; 893 int64_t qdepth; 894 uint64_t qmax = mg->mg_max_alloc_queue_depth; 895 896 if (!mc->mc_alloc_throttle_enabled) 897 return (B_TRUE); 898 899 /* 900 * If this metaslab group does not have any free space, then 901 * there is no point in looking further. 902 */ 903 if (mg->mg_no_free_space) 904 return (B_FALSE); 905 906 qdepth = refcount_count(&mg->mg_alloc_queue_depth); 907 908 /* 909 * If this metaslab group is below its qmax or it's 910 * the only allocatable metasable group, then attempt 911 * to allocate from it. 912 */ 913 if (qdepth < qmax || mc->mc_alloc_groups == 1) 914 return (B_TRUE); 915 ASSERT3U(mc->mc_alloc_groups, >, 1); 916 917 /* 918 * Since this metaslab group is at or over its qmax, we 919 * need to determine if there are metaslab groups after this 920 * one that might be able to handle this allocation. This is 921 * racy since we can't hold the locks for all metaslab 922 * groups at the same time when we make this check. 923 */ 924 for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) { 925 qmax = mgp->mg_max_alloc_queue_depth; 926 927 qdepth = refcount_count(&mgp->mg_alloc_queue_depth); 928 929 /* 930 * If there is another metaslab group that 931 * might be able to handle the allocation, then 932 * we return false so that we skip this group. 933 */ 934 if (qdepth < qmax && !mgp->mg_no_free_space) 935 return (B_FALSE); 936 } 937 938 /* 939 * We didn't find another group to handle the allocation 940 * so we can't skip this metaslab group even though 941 * we are at or over our qmax. 942 */ 943 return (B_TRUE); 944 945 } else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) { 946 return (B_TRUE); 947 } 948 return (B_FALSE); 949 } 950 951 /* 952 * ========================================================================== 953 * Range tree callbacks 954 * ========================================================================== 955 */ 956 957 /* 958 * Comparison function for the private size-ordered tree. Tree is sorted 959 * by size, larger sizes at the end of the tree. 960 */ 961 static int 962 metaslab_rangesize_compare(const void *x1, const void *x2) 963 { 964 const range_seg_t *r1 = x1; 965 const range_seg_t *r2 = x2; 966 uint64_t rs_size1 = r1->rs_end - r1->rs_start; 967 uint64_t rs_size2 = r2->rs_end - r2->rs_start; 968 969 if (rs_size1 < rs_size2) 970 return (-1); 971 if (rs_size1 > rs_size2) 972 return (1); 973 974 if (r1->rs_start < r2->rs_start) 975 return (-1); 976 977 if (r1->rs_start > r2->rs_start) 978 return (1); 979 980 return (0); 981 } 982 983 /* 984 * Create any block allocator specific components. The current allocators 985 * rely on using both a size-ordered range_tree_t and an array of uint64_t's. 986 */ 987 static void 988 metaslab_rt_create(range_tree_t *rt, void *arg) 989 { 990 metaslab_t *msp = arg; 991 992 ASSERT3P(rt->rt_arg, ==, msp); 993 ASSERT(msp->ms_tree == NULL); 994 995 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare, 996 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node)); 997 } 998 999 /* 1000 * Destroy the block allocator specific components. 1001 */ 1002 static void 1003 metaslab_rt_destroy(range_tree_t *rt, void *arg) 1004 { 1005 metaslab_t *msp = arg; 1006 1007 ASSERT3P(rt->rt_arg, ==, msp); 1008 ASSERT3P(msp->ms_tree, ==, rt); 1009 ASSERT0(avl_numnodes(&msp->ms_size_tree)); 1010 1011 avl_destroy(&msp->ms_size_tree); 1012 } 1013 1014 static void 1015 metaslab_rt_add(range_tree_t *rt, range_seg_t *rs, void *arg) 1016 { 1017 metaslab_t *msp = arg; 1018 1019 ASSERT3P(rt->rt_arg, ==, msp); 1020 ASSERT3P(msp->ms_tree, ==, rt); 1021 VERIFY(!msp->ms_condensing); 1022 avl_add(&msp->ms_size_tree, rs); 1023 } 1024 1025 static void 1026 metaslab_rt_remove(range_tree_t *rt, range_seg_t *rs, void *arg) 1027 { 1028 metaslab_t *msp = arg; 1029 1030 ASSERT3P(rt->rt_arg, ==, msp); 1031 ASSERT3P(msp->ms_tree, ==, rt); 1032 VERIFY(!msp->ms_condensing); 1033 avl_remove(&msp->ms_size_tree, rs); 1034 } 1035 1036 static void 1037 metaslab_rt_vacate(range_tree_t *rt, void *arg) 1038 { 1039 metaslab_t *msp = arg; 1040 1041 ASSERT3P(rt->rt_arg, ==, msp); 1042 ASSERT3P(msp->ms_tree, ==, rt); 1043 1044 /* 1045 * Normally one would walk the tree freeing nodes along the way. 1046 * Since the nodes are shared with the range trees we can avoid 1047 * walking all nodes and just reinitialize the avl tree. The nodes 1048 * will be freed by the range tree, so we don't want to free them here. 1049 */ 1050 avl_create(&msp->ms_size_tree, metaslab_rangesize_compare, 1051 sizeof (range_seg_t), offsetof(range_seg_t, rs_pp_node)); 1052 } 1053 1054 static range_tree_ops_t metaslab_rt_ops = { 1055 metaslab_rt_create, 1056 metaslab_rt_destroy, 1057 metaslab_rt_add, 1058 metaslab_rt_remove, 1059 metaslab_rt_vacate 1060 }; 1061 1062 /* 1063 * ========================================================================== 1064 * Common allocator routines 1065 * ========================================================================== 1066 */ 1067 1068 /* 1069 * Return the maximum contiguous segment within the metaslab. 1070 */ 1071 uint64_t 1072 metaslab_block_maxsize(metaslab_t *msp) 1073 { 1074 avl_tree_t *t = &msp->ms_size_tree; 1075 range_seg_t *rs; 1076 1077 if (t == NULL || (rs = avl_last(t)) == NULL) 1078 return (0ULL); 1079 1080 return (rs->rs_end - rs->rs_start); 1081 } 1082 1083 static range_seg_t * 1084 metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size) 1085 { 1086 range_seg_t *rs, rsearch; 1087 avl_index_t where; 1088 1089 rsearch.rs_start = start; 1090 rsearch.rs_end = start + size; 1091 1092 rs = avl_find(t, &rsearch, &where); 1093 if (rs == NULL) { 1094 rs = avl_nearest(t, where, AVL_AFTER); 1095 } 1096 1097 return (rs); 1098 } 1099 1100 /* 1101 * This is a helper function that can be used by the allocator to find 1102 * a suitable block to allocate. This will search the specified AVL 1103 * tree looking for a block that matches the specified criteria. 1104 */ 1105 static uint64_t 1106 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size, 1107 uint64_t align) 1108 { 1109 range_seg_t *rs = metaslab_block_find(t, *cursor, size); 1110 1111 while (rs != NULL) { 1112 uint64_t offset = P2ROUNDUP(rs->rs_start, align); 1113 1114 if (offset + size <= rs->rs_end) { 1115 *cursor = offset + size; 1116 return (offset); 1117 } 1118 rs = AVL_NEXT(t, rs); 1119 } 1120 1121 /* 1122 * If we know we've searched the whole map (*cursor == 0), give up. 1123 * Otherwise, reset the cursor to the beginning and try again. 1124 */ 1125 if (*cursor == 0) 1126 return (-1ULL); 1127 1128 *cursor = 0; 1129 return (metaslab_block_picker(t, cursor, size, align)); 1130 } 1131 1132 /* 1133 * ========================================================================== 1134 * The first-fit block allocator 1135 * ========================================================================== 1136 */ 1137 static uint64_t 1138 metaslab_ff_alloc(metaslab_t *msp, uint64_t size) 1139 { 1140 /* 1141 * Find the largest power of 2 block size that evenly divides the 1142 * requested size. This is used to try to allocate blocks with similar 1143 * alignment from the same area of the metaslab (i.e. same cursor 1144 * bucket) but it does not guarantee that other allocations sizes 1145 * may exist in the same region. 1146 */ 1147 uint64_t align = size & -size; 1148 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; 1149 avl_tree_t *t = &msp->ms_tree->rt_root; 1150 1151 return (metaslab_block_picker(t, cursor, size, align)); 1152 } 1153 1154 static metaslab_ops_t metaslab_ff_ops = { 1155 metaslab_ff_alloc 1156 }; 1157 1158 /* 1159 * ========================================================================== 1160 * Dynamic block allocator - 1161 * Uses the first fit allocation scheme until space get low and then 1162 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold 1163 * and metaslab_df_free_pct to determine when to switch the allocation scheme. 1164 * ========================================================================== 1165 */ 1166 static uint64_t 1167 metaslab_df_alloc(metaslab_t *msp, uint64_t size) 1168 { 1169 /* 1170 * Find the largest power of 2 block size that evenly divides the 1171 * requested size. This is used to try to allocate blocks with similar 1172 * alignment from the same area of the metaslab (i.e. same cursor 1173 * bucket) but it does not guarantee that other allocations sizes 1174 * may exist in the same region. 1175 */ 1176 uint64_t align = size & -size; 1177 uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1]; 1178 range_tree_t *rt = msp->ms_tree; 1179 avl_tree_t *t = &rt->rt_root; 1180 uint64_t max_size = metaslab_block_maxsize(msp); 1181 int free_pct = range_tree_space(rt) * 100 / msp->ms_size; 1182 1183 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1184 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree)); 1185 1186 if (max_size < size) 1187 return (-1ULL); 1188 1189 /* 1190 * If we're running low on space switch to using the size 1191 * sorted AVL tree (best-fit). 1192 */ 1193 if (max_size < metaslab_df_alloc_threshold || 1194 free_pct < metaslab_df_free_pct) { 1195 t = &msp->ms_size_tree; 1196 *cursor = 0; 1197 } 1198 1199 return (metaslab_block_picker(t, cursor, size, 1ULL)); 1200 } 1201 1202 static metaslab_ops_t metaslab_df_ops = { 1203 metaslab_df_alloc 1204 }; 1205 1206 /* 1207 * ========================================================================== 1208 * Cursor fit block allocator - 1209 * Select the largest region in the metaslab, set the cursor to the beginning 1210 * of the range and the cursor_end to the end of the range. As allocations 1211 * are made advance the cursor. Continue allocating from the cursor until 1212 * the range is exhausted and then find a new range. 1213 * ========================================================================== 1214 */ 1215 static uint64_t 1216 metaslab_cf_alloc(metaslab_t *msp, uint64_t size) 1217 { 1218 range_tree_t *rt = msp->ms_tree; 1219 avl_tree_t *t = &msp->ms_size_tree; 1220 uint64_t *cursor = &msp->ms_lbas[0]; 1221 uint64_t *cursor_end = &msp->ms_lbas[1]; 1222 uint64_t offset = 0; 1223 1224 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1225 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root)); 1226 1227 ASSERT3U(*cursor_end, >=, *cursor); 1228 1229 if ((*cursor + size) > *cursor_end) { 1230 range_seg_t *rs; 1231 1232 rs = avl_last(&msp->ms_size_tree); 1233 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) 1234 return (-1ULL); 1235 1236 *cursor = rs->rs_start; 1237 *cursor_end = rs->rs_end; 1238 } 1239 1240 offset = *cursor; 1241 *cursor += size; 1242 1243 return (offset); 1244 } 1245 1246 static metaslab_ops_t metaslab_cf_ops = { 1247 metaslab_cf_alloc 1248 }; 1249 1250 /* 1251 * ========================================================================== 1252 * New dynamic fit allocator - 1253 * Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift 1254 * contiguous blocks. If no region is found then just use the largest segment 1255 * that remains. 1256 * ========================================================================== 1257 */ 1258 1259 /* 1260 * Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift) 1261 * to request from the allocator. 1262 */ 1263 uint64_t metaslab_ndf_clump_shift = 4; 1264 1265 static uint64_t 1266 metaslab_ndf_alloc(metaslab_t *msp, uint64_t size) 1267 { 1268 avl_tree_t *t = &msp->ms_tree->rt_root; 1269 avl_index_t where; 1270 range_seg_t *rs, rsearch; 1271 uint64_t hbit = highbit64(size); 1272 uint64_t *cursor = &msp->ms_lbas[hbit - 1]; 1273 uint64_t max_size = metaslab_block_maxsize(msp); 1274 1275 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1276 ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree)); 1277 1278 if (max_size < size) 1279 return (-1ULL); 1280 1281 rsearch.rs_start = *cursor; 1282 rsearch.rs_end = *cursor + size; 1283 1284 rs = avl_find(t, &rsearch, &where); 1285 if (rs == NULL || (rs->rs_end - rs->rs_start) < size) { 1286 t = &msp->ms_size_tree; 1287 1288 rsearch.rs_start = 0; 1289 rsearch.rs_end = MIN(max_size, 1290 1ULL << (hbit + metaslab_ndf_clump_shift)); 1291 rs = avl_find(t, &rsearch, &where); 1292 if (rs == NULL) 1293 rs = avl_nearest(t, where, AVL_AFTER); 1294 ASSERT(rs != NULL); 1295 } 1296 1297 if ((rs->rs_end - rs->rs_start) >= size) { 1298 *cursor = rs->rs_start + size; 1299 return (rs->rs_start); 1300 } 1301 return (-1ULL); 1302 } 1303 1304 static metaslab_ops_t metaslab_ndf_ops = { 1305 metaslab_ndf_alloc 1306 }; 1307 1308 metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops; 1309 1310 /* 1311 * ========================================================================== 1312 * Metaslabs 1313 * ========================================================================== 1314 */ 1315 1316 /* 1317 * Wait for any in-progress metaslab loads to complete. 1318 */ 1319 void 1320 metaslab_load_wait(metaslab_t *msp) 1321 { 1322 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1323 1324 while (msp->ms_loading) { 1325 ASSERT(!msp->ms_loaded); 1326 cv_wait(&msp->ms_load_cv, &msp->ms_lock); 1327 } 1328 } 1329 1330 int 1331 metaslab_load(metaslab_t *msp) 1332 { 1333 int error = 0; 1334 boolean_t success = B_FALSE; 1335 1336 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1337 ASSERT(!msp->ms_loaded); 1338 ASSERT(!msp->ms_loading); 1339 1340 msp->ms_loading = B_TRUE; 1341 1342 /* 1343 * If the space map has not been allocated yet, then treat 1344 * all the space in the metaslab as free and add it to the 1345 * ms_tree. 1346 */ 1347 if (msp->ms_sm != NULL) 1348 error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE); 1349 else 1350 range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size); 1351 1352 success = (error == 0); 1353 msp->ms_loading = B_FALSE; 1354 1355 if (success) { 1356 ASSERT3P(msp->ms_group, !=, NULL); 1357 msp->ms_loaded = B_TRUE; 1358 1359 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 1360 range_tree_walk(msp->ms_defertree[t], 1361 range_tree_remove, msp->ms_tree); 1362 } 1363 msp->ms_max_size = metaslab_block_maxsize(msp); 1364 } 1365 cv_broadcast(&msp->ms_load_cv); 1366 return (error); 1367 } 1368 1369 void 1370 metaslab_unload(metaslab_t *msp) 1371 { 1372 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1373 range_tree_vacate(msp->ms_tree, NULL, NULL); 1374 msp->ms_loaded = B_FALSE; 1375 msp->ms_weight &= ~METASLAB_ACTIVE_MASK; 1376 msp->ms_max_size = 0; 1377 } 1378 1379 int 1380 metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg, 1381 metaslab_t **msp) 1382 { 1383 vdev_t *vd = mg->mg_vd; 1384 objset_t *mos = vd->vdev_spa->spa_meta_objset; 1385 metaslab_t *ms; 1386 int error; 1387 1388 ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); 1389 mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL); 1390 cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL); 1391 ms->ms_id = id; 1392 ms->ms_start = id << vd->vdev_ms_shift; 1393 ms->ms_size = 1ULL << vd->vdev_ms_shift; 1394 1395 /* 1396 * We only open space map objects that already exist. All others 1397 * will be opened when we finally allocate an object for it. 1398 */ 1399 if (object != 0) { 1400 error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start, 1401 ms->ms_size, vd->vdev_ashift, &ms->ms_lock); 1402 1403 if (error != 0) { 1404 kmem_free(ms, sizeof (metaslab_t)); 1405 return (error); 1406 } 1407 1408 ASSERT(ms->ms_sm != NULL); 1409 } 1410 1411 /* 1412 * We create the main range tree here, but we don't create the 1413 * alloctree and freetree until metaslab_sync_done(). This serves 1414 * two purposes: it allows metaslab_sync_done() to detect the 1415 * addition of new space; and for debugging, it ensures that we'd 1416 * data fault on any attempt to use this metaslab before it's ready. 1417 */ 1418 ms->ms_tree = range_tree_create(&metaslab_rt_ops, ms, &ms->ms_lock); 1419 metaslab_group_add(mg, ms); 1420 1421 metaslab_set_fragmentation(ms); 1422 1423 /* 1424 * If we're opening an existing pool (txg == 0) or creating 1425 * a new one (txg == TXG_INITIAL), all space is available now. 1426 * If we're adding space to an existing pool, the new space 1427 * does not become available until after this txg has synced. 1428 * The metaslab's weight will also be initialized when we sync 1429 * out this txg. This ensures that we don't attempt to allocate 1430 * from it before we have initialized it completely. 1431 */ 1432 if (txg <= TXG_INITIAL) 1433 metaslab_sync_done(ms, 0); 1434 1435 /* 1436 * If metaslab_debug_load is set and we're initializing a metaslab 1437 * that has an allocated space map object then load the its space 1438 * map so that can verify frees. 1439 */ 1440 if (metaslab_debug_load && ms->ms_sm != NULL) { 1441 mutex_enter(&ms->ms_lock); 1442 VERIFY0(metaslab_load(ms)); 1443 mutex_exit(&ms->ms_lock); 1444 } 1445 1446 if (txg != 0) { 1447 vdev_dirty(vd, 0, NULL, txg); 1448 vdev_dirty(vd, VDD_METASLAB, ms, txg); 1449 } 1450 1451 *msp = ms; 1452 1453 return (0); 1454 } 1455 1456 void 1457 metaslab_fini(metaslab_t *msp) 1458 { 1459 metaslab_group_t *mg = msp->ms_group; 1460 1461 metaslab_group_remove(mg, msp); 1462 1463 mutex_enter(&msp->ms_lock); 1464 VERIFY(msp->ms_group == NULL); 1465 vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm), 1466 0, -msp->ms_size); 1467 space_map_close(msp->ms_sm); 1468 1469 metaslab_unload(msp); 1470 range_tree_destroy(msp->ms_tree); 1471 1472 for (int t = 0; t < TXG_SIZE; t++) { 1473 range_tree_destroy(msp->ms_alloctree[t]); 1474 range_tree_destroy(msp->ms_freetree[t]); 1475 } 1476 1477 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 1478 range_tree_destroy(msp->ms_defertree[t]); 1479 } 1480 1481 ASSERT0(msp->ms_deferspace); 1482 1483 mutex_exit(&msp->ms_lock); 1484 cv_destroy(&msp->ms_load_cv); 1485 mutex_destroy(&msp->ms_lock); 1486 1487 kmem_free(msp, sizeof (metaslab_t)); 1488 } 1489 1490 #define FRAGMENTATION_TABLE_SIZE 17 1491 1492 /* 1493 * This table defines a segment size based fragmentation metric that will 1494 * allow each metaslab to derive its own fragmentation value. This is done 1495 * by calculating the space in each bucket of the spacemap histogram and 1496 * multiplying that by the fragmetation metric in this table. Doing 1497 * this for all buckets and dividing it by the total amount of free 1498 * space in this metaslab (i.e. the total free space in all buckets) gives 1499 * us the fragmentation metric. This means that a high fragmentation metric 1500 * equates to most of the free space being comprised of small segments. 1501 * Conversely, if the metric is low, then most of the free space is in 1502 * large segments. A 10% change in fragmentation equates to approximately 1503 * double the number of segments. 1504 * 1505 * This table defines 0% fragmented space using 16MB segments. Testing has 1506 * shown that segments that are greater than or equal to 16MB do not suffer 1507 * from drastic performance problems. Using this value, we derive the rest 1508 * of the table. Since the fragmentation value is never stored on disk, it 1509 * is possible to change these calculations in the future. 1510 */ 1511 int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = { 1512 100, /* 512B */ 1513 100, /* 1K */ 1514 98, /* 2K */ 1515 95, /* 4K */ 1516 90, /* 8K */ 1517 80, /* 16K */ 1518 70, /* 32K */ 1519 60, /* 64K */ 1520 50, /* 128K */ 1521 40, /* 256K */ 1522 30, /* 512K */ 1523 20, /* 1M */ 1524 15, /* 2M */ 1525 10, /* 4M */ 1526 5, /* 8M */ 1527 0 /* 16M */ 1528 }; 1529 1530 /* 1531 * Calclate the metaslab's fragmentation metric. A return value 1532 * of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does 1533 * not support this metric. Otherwise, the return value should be in the 1534 * range [0, 100]. 1535 */ 1536 static void 1537 metaslab_set_fragmentation(metaslab_t *msp) 1538 { 1539 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 1540 uint64_t fragmentation = 0; 1541 uint64_t total = 0; 1542 boolean_t feature_enabled = spa_feature_is_enabled(spa, 1543 SPA_FEATURE_SPACEMAP_HISTOGRAM); 1544 1545 if (!feature_enabled) { 1546 msp->ms_fragmentation = ZFS_FRAG_INVALID; 1547 return; 1548 } 1549 1550 /* 1551 * A null space map means that the entire metaslab is free 1552 * and thus is not fragmented. 1553 */ 1554 if (msp->ms_sm == NULL) { 1555 msp->ms_fragmentation = 0; 1556 return; 1557 } 1558 1559 /* 1560 * If this metaslab's space map has not been upgraded, flag it 1561 * so that we upgrade next time we encounter it. 1562 */ 1563 if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) { 1564 uint64_t txg = spa_syncing_txg(spa); 1565 vdev_t *vd = msp->ms_group->mg_vd; 1566 1567 if (spa_writeable(spa)) { 1568 msp->ms_condense_wanted = B_TRUE; 1569 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 1570 spa_dbgmsg(spa, "txg %llu, requesting force condense: " 1571 "msp %p, vd %p", txg, msp, vd); 1572 } 1573 msp->ms_fragmentation = ZFS_FRAG_INVALID; 1574 return; 1575 } 1576 1577 for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) { 1578 uint64_t space = 0; 1579 uint8_t shift = msp->ms_sm->sm_shift; 1580 1581 int idx = MIN(shift - SPA_MINBLOCKSHIFT + i, 1582 FRAGMENTATION_TABLE_SIZE - 1); 1583 1584 if (msp->ms_sm->sm_phys->smp_histogram[i] == 0) 1585 continue; 1586 1587 space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift); 1588 total += space; 1589 1590 ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE); 1591 fragmentation += space * zfs_frag_table[idx]; 1592 } 1593 1594 if (total > 0) 1595 fragmentation /= total; 1596 ASSERT3U(fragmentation, <=, 100); 1597 1598 msp->ms_fragmentation = fragmentation; 1599 } 1600 1601 /* 1602 * Compute a weight -- a selection preference value -- for the given metaslab. 1603 * This is based on the amount of free space, the level of fragmentation, 1604 * the LBA range, and whether the metaslab is loaded. 1605 */ 1606 static uint64_t 1607 metaslab_space_weight(metaslab_t *msp) 1608 { 1609 metaslab_group_t *mg = msp->ms_group; 1610 vdev_t *vd = mg->mg_vd; 1611 uint64_t weight, space; 1612 1613 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1614 ASSERT(!vd->vdev_removing); 1615 1616 /* 1617 * The baseline weight is the metaslab's free space. 1618 */ 1619 space = msp->ms_size - space_map_allocated(msp->ms_sm); 1620 1621 if (metaslab_fragmentation_factor_enabled && 1622 msp->ms_fragmentation != ZFS_FRAG_INVALID) { 1623 /* 1624 * Use the fragmentation information to inversely scale 1625 * down the baseline weight. We need to ensure that we 1626 * don't exclude this metaslab completely when it's 100% 1627 * fragmented. To avoid this we reduce the fragmented value 1628 * by 1. 1629 */ 1630 space = (space * (100 - (msp->ms_fragmentation - 1))) / 100; 1631 1632 /* 1633 * If space < SPA_MINBLOCKSIZE, then we will not allocate from 1634 * this metaslab again. The fragmentation metric may have 1635 * decreased the space to something smaller than 1636 * SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE 1637 * so that we can consume any remaining space. 1638 */ 1639 if (space > 0 && space < SPA_MINBLOCKSIZE) 1640 space = SPA_MINBLOCKSIZE; 1641 } 1642 weight = space; 1643 1644 /* 1645 * Modern disks have uniform bit density and constant angular velocity. 1646 * Therefore, the outer recording zones are faster (higher bandwidth) 1647 * than the inner zones by the ratio of outer to inner track diameter, 1648 * which is typically around 2:1. We account for this by assigning 1649 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). 1650 * In effect, this means that we'll select the metaslab with the most 1651 * free bandwidth rather than simply the one with the most free space. 1652 */ 1653 if (metaslab_lba_weighting_enabled) { 1654 weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count; 1655 ASSERT(weight >= space && weight <= 2 * space); 1656 } 1657 1658 /* 1659 * If this metaslab is one we're actively using, adjust its 1660 * weight to make it preferable to any inactive metaslab so 1661 * we'll polish it off. If the fragmentation on this metaslab 1662 * has exceed our threshold, then don't mark it active. 1663 */ 1664 if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID && 1665 msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) { 1666 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); 1667 } 1668 1669 WEIGHT_SET_SPACEBASED(weight); 1670 return (weight); 1671 } 1672 1673 /* 1674 * Return the weight of the specified metaslab, according to the segment-based 1675 * weighting algorithm. The metaslab must be loaded. This function can 1676 * be called within a sync pass since it relies only on the metaslab's 1677 * range tree which is always accurate when the metaslab is loaded. 1678 */ 1679 static uint64_t 1680 metaslab_weight_from_range_tree(metaslab_t *msp) 1681 { 1682 uint64_t weight = 0; 1683 uint32_t segments = 0; 1684 1685 ASSERT(msp->ms_loaded); 1686 1687 for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT; 1688 i--) { 1689 uint8_t shift = msp->ms_group->mg_vd->vdev_ashift; 1690 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; 1691 1692 segments <<= 1; 1693 segments += msp->ms_tree->rt_histogram[i]; 1694 1695 /* 1696 * The range tree provides more precision than the space map 1697 * and must be downgraded so that all values fit within the 1698 * space map's histogram. This allows us to compare loaded 1699 * vs. unloaded metaslabs to determine which metaslab is 1700 * considered "best". 1701 */ 1702 if (i > max_idx) 1703 continue; 1704 1705 if (segments != 0) { 1706 WEIGHT_SET_COUNT(weight, segments); 1707 WEIGHT_SET_INDEX(weight, i); 1708 WEIGHT_SET_ACTIVE(weight, 0); 1709 break; 1710 } 1711 } 1712 return (weight); 1713 } 1714 1715 /* 1716 * Calculate the weight based on the on-disk histogram. This should only 1717 * be called after a sync pass has completely finished since the on-disk 1718 * information is updated in metaslab_sync(). 1719 */ 1720 static uint64_t 1721 metaslab_weight_from_spacemap(metaslab_t *msp) 1722 { 1723 uint64_t weight = 0; 1724 1725 for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) { 1726 if (msp->ms_sm->sm_phys->smp_histogram[i] != 0) { 1727 WEIGHT_SET_COUNT(weight, 1728 msp->ms_sm->sm_phys->smp_histogram[i]); 1729 WEIGHT_SET_INDEX(weight, i + 1730 msp->ms_sm->sm_shift); 1731 WEIGHT_SET_ACTIVE(weight, 0); 1732 break; 1733 } 1734 } 1735 return (weight); 1736 } 1737 1738 /* 1739 * Compute a segment-based weight for the specified metaslab. The weight 1740 * is determined by highest bucket in the histogram. The information 1741 * for the highest bucket is encoded into the weight value. 1742 */ 1743 static uint64_t 1744 metaslab_segment_weight(metaslab_t *msp) 1745 { 1746 metaslab_group_t *mg = msp->ms_group; 1747 uint64_t weight = 0; 1748 uint8_t shift = mg->mg_vd->vdev_ashift; 1749 1750 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1751 1752 /* 1753 * The metaslab is completely free. 1754 */ 1755 if (space_map_allocated(msp->ms_sm) == 0) { 1756 int idx = highbit64(msp->ms_size) - 1; 1757 int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1; 1758 1759 if (idx < max_idx) { 1760 WEIGHT_SET_COUNT(weight, 1ULL); 1761 WEIGHT_SET_INDEX(weight, idx); 1762 } else { 1763 WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx)); 1764 WEIGHT_SET_INDEX(weight, max_idx); 1765 } 1766 WEIGHT_SET_ACTIVE(weight, 0); 1767 ASSERT(!WEIGHT_IS_SPACEBASED(weight)); 1768 1769 return (weight); 1770 } 1771 1772 ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t)); 1773 1774 /* 1775 * If the metaslab is fully allocated then just make the weight 0. 1776 */ 1777 if (space_map_allocated(msp->ms_sm) == msp->ms_size) 1778 return (0); 1779 /* 1780 * If the metaslab is already loaded, then use the range tree to 1781 * determine the weight. Otherwise, we rely on the space map information 1782 * to generate the weight. 1783 */ 1784 if (msp->ms_loaded) { 1785 weight = metaslab_weight_from_range_tree(msp); 1786 } else { 1787 weight = metaslab_weight_from_spacemap(msp); 1788 } 1789 1790 /* 1791 * If the metaslab was active the last time we calculated its weight 1792 * then keep it active. We want to consume the entire region that 1793 * is associated with this weight. 1794 */ 1795 if (msp->ms_activation_weight != 0 && weight != 0) 1796 WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight)); 1797 return (weight); 1798 } 1799 1800 /* 1801 * Determine if we should attempt to allocate from this metaslab. If the 1802 * metaslab has a maximum size then we can quickly determine if the desired 1803 * allocation size can be satisfied. Otherwise, if we're using segment-based 1804 * weighting then we can determine the maximum allocation that this metaslab 1805 * can accommodate based on the index encoded in the weight. If we're using 1806 * space-based weights then rely on the entire weight (excluding the weight 1807 * type bit). 1808 */ 1809 boolean_t 1810 metaslab_should_allocate(metaslab_t *msp, uint64_t asize) 1811 { 1812 boolean_t should_allocate; 1813 1814 if (msp->ms_max_size != 0) 1815 return (msp->ms_max_size >= asize); 1816 1817 if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) { 1818 /* 1819 * The metaslab segment weight indicates segments in the 1820 * range [2^i, 2^(i+1)), where i is the index in the weight. 1821 * Since the asize might be in the middle of the range, we 1822 * should attempt the allocation if asize < 2^(i+1). 1823 */ 1824 should_allocate = (asize < 1825 1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1)); 1826 } else { 1827 should_allocate = (asize <= 1828 (msp->ms_weight & ~METASLAB_WEIGHT_TYPE)); 1829 } 1830 return (should_allocate); 1831 } 1832 1833 static uint64_t 1834 metaslab_weight(metaslab_t *msp) 1835 { 1836 vdev_t *vd = msp->ms_group->mg_vd; 1837 spa_t *spa = vd->vdev_spa; 1838 uint64_t weight; 1839 1840 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1841 1842 /* 1843 * This vdev is in the process of being removed so there is nothing 1844 * for us to do here. 1845 */ 1846 if (vd->vdev_removing) { 1847 ASSERT0(space_map_allocated(msp->ms_sm)); 1848 ASSERT0(vd->vdev_ms_shift); 1849 return (0); 1850 } 1851 1852 metaslab_set_fragmentation(msp); 1853 1854 /* 1855 * Update the maximum size if the metaslab is loaded. This will 1856 * ensure that we get an accurate maximum size if newly freed space 1857 * has been added back into the free tree. 1858 */ 1859 if (msp->ms_loaded) 1860 msp->ms_max_size = metaslab_block_maxsize(msp); 1861 1862 /* 1863 * Segment-based weighting requires space map histogram support. 1864 */ 1865 if (zfs_metaslab_segment_weight_enabled && 1866 spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) && 1867 (msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size == 1868 sizeof (space_map_phys_t))) { 1869 weight = metaslab_segment_weight(msp); 1870 } else { 1871 weight = metaslab_space_weight(msp); 1872 } 1873 return (weight); 1874 } 1875 1876 static int 1877 metaslab_activate(metaslab_t *msp, uint64_t activation_weight) 1878 { 1879 ASSERT(MUTEX_HELD(&msp->ms_lock)); 1880 1881 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { 1882 metaslab_load_wait(msp); 1883 if (!msp->ms_loaded) { 1884 int error = metaslab_load(msp); 1885 if (error) { 1886 metaslab_group_sort(msp->ms_group, msp, 0); 1887 return (error); 1888 } 1889 } 1890 1891 msp->ms_activation_weight = msp->ms_weight; 1892 metaslab_group_sort(msp->ms_group, msp, 1893 msp->ms_weight | activation_weight); 1894 } 1895 ASSERT(msp->ms_loaded); 1896 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 1897 1898 return (0); 1899 } 1900 1901 static void 1902 metaslab_passivate(metaslab_t *msp, uint64_t weight) 1903 { 1904 uint64_t size = weight & ~METASLAB_WEIGHT_TYPE; 1905 1906 /* 1907 * If size < SPA_MINBLOCKSIZE, then we will not allocate from 1908 * this metaslab again. In that case, it had better be empty, 1909 * or we would be leaving space on the table. 1910 */ 1911 ASSERT(size >= SPA_MINBLOCKSIZE || 1912 range_tree_space(msp->ms_tree) == 0); 1913 ASSERT0(weight & METASLAB_ACTIVE_MASK); 1914 1915 msp->ms_activation_weight = 0; 1916 metaslab_group_sort(msp->ms_group, msp, weight); 1917 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0); 1918 } 1919 1920 /* 1921 * Segment-based metaslabs are activated once and remain active until 1922 * we either fail an allocation attempt (similar to space-based metaslabs) 1923 * or have exhausted the free space in zfs_metaslab_switch_threshold 1924 * buckets since the metaslab was activated. This function checks to see 1925 * if we've exhaused the zfs_metaslab_switch_threshold buckets in the 1926 * metaslab and passivates it proactively. This will allow us to select a 1927 * metaslabs with larger contiguous region if any remaining within this 1928 * metaslab group. If we're in sync pass > 1, then we continue using this 1929 * metaslab so that we don't dirty more block and cause more sync passes. 1930 */ 1931 void 1932 metaslab_segment_may_passivate(metaslab_t *msp) 1933 { 1934 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 1935 1936 if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1) 1937 return; 1938 1939 /* 1940 * Since we are in the middle of a sync pass, the most accurate 1941 * information that is accessible to us is the in-core range tree 1942 * histogram; calculate the new weight based on that information. 1943 */ 1944 uint64_t weight = metaslab_weight_from_range_tree(msp); 1945 int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight); 1946 int current_idx = WEIGHT_GET_INDEX(weight); 1947 1948 if (current_idx <= activation_idx - zfs_metaslab_switch_threshold) 1949 metaslab_passivate(msp, weight); 1950 } 1951 1952 static void 1953 metaslab_preload(void *arg) 1954 { 1955 metaslab_t *msp = arg; 1956 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 1957 1958 ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock)); 1959 1960 mutex_enter(&msp->ms_lock); 1961 metaslab_load_wait(msp); 1962 if (!msp->ms_loaded) 1963 (void) metaslab_load(msp); 1964 msp->ms_selected_txg = spa_syncing_txg(spa); 1965 mutex_exit(&msp->ms_lock); 1966 } 1967 1968 static void 1969 metaslab_group_preload(metaslab_group_t *mg) 1970 { 1971 spa_t *spa = mg->mg_vd->vdev_spa; 1972 metaslab_t *msp; 1973 avl_tree_t *t = &mg->mg_metaslab_tree; 1974 int m = 0; 1975 1976 if (spa_shutting_down(spa) || !metaslab_preload_enabled) { 1977 taskq_wait(mg->mg_taskq); 1978 return; 1979 } 1980 1981 mutex_enter(&mg->mg_lock); 1982 /* 1983 * Load the next potential metaslabs 1984 */ 1985 for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) { 1986 /* 1987 * We preload only the maximum number of metaslabs specified 1988 * by metaslab_preload_limit. If a metaslab is being forced 1989 * to condense then we preload it too. This will ensure 1990 * that force condensing happens in the next txg. 1991 */ 1992 if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) { 1993 continue; 1994 } 1995 1996 VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload, 1997 msp, TQ_SLEEP) != NULL); 1998 } 1999 mutex_exit(&mg->mg_lock); 2000 } 2001 2002 /* 2003 * Determine if the space map's on-disk footprint is past our tolerance 2004 * for inefficiency. We would like to use the following criteria to make 2005 * our decision: 2006 * 2007 * 1. The size of the space map object should not dramatically increase as a 2008 * result of writing out the free space range tree. 2009 * 2010 * 2. The minimal on-disk space map representation is zfs_condense_pct/100 2011 * times the size than the free space range tree representation 2012 * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB). 2013 * 2014 * 3. The on-disk size of the space map should actually decrease. 2015 * 2016 * Checking the first condition is tricky since we don't want to walk 2017 * the entire AVL tree calculating the estimated on-disk size. Instead we 2018 * use the size-ordered range tree in the metaslab and calculate the 2019 * size required to write out the largest segment in our free tree. If the 2020 * size required to represent that segment on disk is larger than the space 2021 * map object then we avoid condensing this map. 2022 * 2023 * To determine the second criterion we use a best-case estimate and assume 2024 * each segment can be represented on-disk as a single 64-bit entry. We refer 2025 * to this best-case estimate as the space map's minimal form. 2026 * 2027 * Unfortunately, we cannot compute the on-disk size of the space map in this 2028 * context because we cannot accurately compute the effects of compression, etc. 2029 * Instead, we apply the heuristic described in the block comment for 2030 * zfs_metaslab_condense_block_threshold - we only condense if the space used 2031 * is greater than a threshold number of blocks. 2032 */ 2033 static boolean_t 2034 metaslab_should_condense(metaslab_t *msp) 2035 { 2036 space_map_t *sm = msp->ms_sm; 2037 range_seg_t *rs; 2038 uint64_t size, entries, segsz, object_size, optimal_size, record_size; 2039 dmu_object_info_t doi; 2040 uint64_t vdev_blocksize = 1 << msp->ms_group->mg_vd->vdev_ashift; 2041 2042 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2043 ASSERT(msp->ms_loaded); 2044 2045 /* 2046 * Use the ms_size_tree range tree, which is ordered by size, to 2047 * obtain the largest segment in the free tree. We always condense 2048 * metaslabs that are empty and metaslabs for which a condense 2049 * request has been made. 2050 */ 2051 rs = avl_last(&msp->ms_size_tree); 2052 if (rs == NULL || msp->ms_condense_wanted) 2053 return (B_TRUE); 2054 2055 /* 2056 * Calculate the number of 64-bit entries this segment would 2057 * require when written to disk. If this single segment would be 2058 * larger on-disk than the entire current on-disk structure, then 2059 * clearly condensing will increase the on-disk structure size. 2060 */ 2061 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift; 2062 entries = size / (MIN(size, SM_RUN_MAX)); 2063 segsz = entries * sizeof (uint64_t); 2064 2065 optimal_size = sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root); 2066 object_size = space_map_length(msp->ms_sm); 2067 2068 dmu_object_info_from_db(sm->sm_dbuf, &doi); 2069 record_size = MAX(doi.doi_data_block_size, vdev_blocksize); 2070 2071 return (segsz <= object_size && 2072 object_size >= (optimal_size * zfs_condense_pct / 100) && 2073 object_size > zfs_metaslab_condense_block_threshold * record_size); 2074 } 2075 2076 /* 2077 * Condense the on-disk space map representation to its minimized form. 2078 * The minimized form consists of a small number of allocations followed by 2079 * the entries of the free range tree. 2080 */ 2081 static void 2082 metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx) 2083 { 2084 spa_t *spa = msp->ms_group->mg_vd->vdev_spa; 2085 range_tree_t *freetree = msp->ms_freetree[txg & TXG_MASK]; 2086 range_tree_t *condense_tree; 2087 space_map_t *sm = msp->ms_sm; 2088 2089 ASSERT(MUTEX_HELD(&msp->ms_lock)); 2090 ASSERT3U(spa_sync_pass(spa), ==, 1); 2091 ASSERT(msp->ms_loaded); 2092 2093 2094 spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, vdev id %llu, " 2095 "spa %s, smp size %llu, segments %lu, forcing condense=%s", txg, 2096 msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id, 2097 msp->ms_group->mg_vd->vdev_spa->spa_name, 2098 space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root), 2099 msp->ms_condense_wanted ? "TRUE" : "FALSE"); 2100 2101 msp->ms_condense_wanted = B_FALSE; 2102 2103 /* 2104 * Create an range tree that is 100% allocated. We remove segments 2105 * that have been freed in this txg, any deferred frees that exist, 2106 * and any allocation in the future. Removing segments should be 2107 * a relatively inexpensive operation since we expect these trees to 2108 * have a small number of nodes. 2109 */ 2110 condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock); 2111 range_tree_add(condense_tree, msp->ms_start, msp->ms_size); 2112 2113 /* 2114 * Remove what's been freed in this txg from the condense_tree. 2115 * Since we're in sync_pass 1, we know that all the frees from 2116 * this txg are in the freetree. 2117 */ 2118 range_tree_walk(freetree, range_tree_remove, condense_tree); 2119 2120 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2121 range_tree_walk(msp->ms_defertree[t], 2122 range_tree_remove, condense_tree); 2123 } 2124 2125 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { 2126 range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK], 2127 range_tree_remove, condense_tree); 2128 } 2129 2130 /* 2131 * We're about to drop the metaslab's lock thus allowing 2132 * other consumers to change it's content. Set the 2133 * metaslab's ms_condensing flag to ensure that 2134 * allocations on this metaslab do not occur while we're 2135 * in the middle of committing it to disk. This is only critical 2136 * for the ms_tree as all other range trees use per txg 2137 * views of their content. 2138 */ 2139 msp->ms_condensing = B_TRUE; 2140 2141 mutex_exit(&msp->ms_lock); 2142 space_map_truncate(sm, tx); 2143 mutex_enter(&msp->ms_lock); 2144 2145 /* 2146 * While we would ideally like to create a space map representation 2147 * that consists only of allocation records, doing so can be 2148 * prohibitively expensive because the in-core free tree can be 2149 * large, and therefore computationally expensive to subtract 2150 * from the condense_tree. Instead we sync out two trees, a cheap 2151 * allocation only tree followed by the in-core free tree. While not 2152 * optimal, this is typically close to optimal, and much cheaper to 2153 * compute. 2154 */ 2155 space_map_write(sm, condense_tree, SM_ALLOC, tx); 2156 range_tree_vacate(condense_tree, NULL, NULL); 2157 range_tree_destroy(condense_tree); 2158 2159 space_map_write(sm, msp->ms_tree, SM_FREE, tx); 2160 msp->ms_condensing = B_FALSE; 2161 } 2162 2163 /* 2164 * Write a metaslab to disk in the context of the specified transaction group. 2165 */ 2166 void 2167 metaslab_sync(metaslab_t *msp, uint64_t txg) 2168 { 2169 metaslab_group_t *mg = msp->ms_group; 2170 vdev_t *vd = mg->mg_vd; 2171 spa_t *spa = vd->vdev_spa; 2172 objset_t *mos = spa_meta_objset(spa); 2173 range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK]; 2174 range_tree_t **freetree = &msp->ms_freetree[txg & TXG_MASK]; 2175 range_tree_t **freed_tree = 2176 &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK]; 2177 dmu_tx_t *tx; 2178 uint64_t object = space_map_object(msp->ms_sm); 2179 2180 ASSERT(!vd->vdev_ishole); 2181 2182 /* 2183 * This metaslab has just been added so there's no work to do now. 2184 */ 2185 if (*freetree == NULL) { 2186 ASSERT3P(alloctree, ==, NULL); 2187 return; 2188 } 2189 2190 ASSERT3P(alloctree, !=, NULL); 2191 ASSERT3P(*freetree, !=, NULL); 2192 ASSERT3P(*freed_tree, !=, NULL); 2193 2194 /* 2195 * Normally, we don't want to process a metaslab if there 2196 * are no allocations or frees to perform. However, if the metaslab 2197 * is being forced to condense we need to let it through. 2198 */ 2199 if (range_tree_space(alloctree) == 0 && 2200 range_tree_space(*freetree) == 0 && 2201 !msp->ms_condense_wanted) 2202 return; 2203 2204 /* 2205 * The only state that can actually be changing concurrently with 2206 * metaslab_sync() is the metaslab's ms_tree. No other thread can 2207 * be modifying this txg's alloctree, freetree, freed_tree, or 2208 * space_map_phys_t. Therefore, we only hold ms_lock to satify 2209 * space map ASSERTs. We drop it whenever we call into the DMU, 2210 * because the DMU can call down to us (e.g. via zio_free()) at 2211 * any time. 2212 */ 2213 2214 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 2215 2216 if (msp->ms_sm == NULL) { 2217 uint64_t new_object; 2218 2219 new_object = space_map_alloc(mos, tx); 2220 VERIFY3U(new_object, !=, 0); 2221 2222 VERIFY0(space_map_open(&msp->ms_sm, mos, new_object, 2223 msp->ms_start, msp->ms_size, vd->vdev_ashift, 2224 &msp->ms_lock)); 2225 ASSERT(msp->ms_sm != NULL); 2226 } 2227 2228 mutex_enter(&msp->ms_lock); 2229 2230 /* 2231 * Note: metaslab_condense() clears the space map's histogram. 2232 * Therefore we must verify and remove this histogram before 2233 * condensing. 2234 */ 2235 metaslab_group_histogram_verify(mg); 2236 metaslab_class_histogram_verify(mg->mg_class); 2237 metaslab_group_histogram_remove(mg, msp); 2238 2239 if (msp->ms_loaded && spa_sync_pass(spa) == 1 && 2240 metaslab_should_condense(msp)) { 2241 metaslab_condense(msp, txg, tx); 2242 } else { 2243 space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx); 2244 space_map_write(msp->ms_sm, *freetree, SM_FREE, tx); 2245 } 2246 2247 if (msp->ms_loaded) { 2248 /* 2249 * When the space map is loaded, we have an accruate 2250 * histogram in the range tree. This gives us an opportunity 2251 * to bring the space map's histogram up-to-date so we clear 2252 * it first before updating it. 2253 */ 2254 space_map_histogram_clear(msp->ms_sm); 2255 space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx); 2256 2257 /* 2258 * Since we've cleared the histogram we need to add back 2259 * any free space that has already been processed, plus 2260 * any deferred space. This allows the on-disk histogram 2261 * to accurately reflect all free space even if some space 2262 * is not yet available for allocation (i.e. deferred). 2263 */ 2264 space_map_histogram_add(msp->ms_sm, *freed_tree, tx); 2265 2266 /* 2267 * Add back any deferred free space that has not been 2268 * added back into the in-core free tree yet. This will 2269 * ensure that we don't end up with a space map histogram 2270 * that is completely empty unless the metaslab is fully 2271 * allocated. 2272 */ 2273 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2274 space_map_histogram_add(msp->ms_sm, 2275 msp->ms_defertree[t], tx); 2276 } 2277 } 2278 2279 /* 2280 * Always add the free space from this sync pass to the space 2281 * map histogram. We want to make sure that the on-disk histogram 2282 * accounts for all free space. If the space map is not loaded, 2283 * then we will lose some accuracy but will correct it the next 2284 * time we load the space map. 2285 */ 2286 space_map_histogram_add(msp->ms_sm, *freetree, tx); 2287 2288 metaslab_group_histogram_add(mg, msp); 2289 metaslab_group_histogram_verify(mg); 2290 metaslab_class_histogram_verify(mg->mg_class); 2291 2292 /* 2293 * For sync pass 1, we avoid traversing this txg's free range tree 2294 * and instead will just swap the pointers for freetree and 2295 * freed_tree. We can safely do this since the freed_tree is 2296 * guaranteed to be empty on the initial pass. 2297 */ 2298 if (spa_sync_pass(spa) == 1) { 2299 range_tree_swap(freetree, freed_tree); 2300 } else { 2301 range_tree_vacate(*freetree, range_tree_add, *freed_tree); 2302 } 2303 range_tree_vacate(alloctree, NULL, NULL); 2304 2305 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK])); 2306 ASSERT0(range_tree_space(msp->ms_alloctree[TXG_CLEAN(txg) & TXG_MASK])); 2307 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK])); 2308 2309 mutex_exit(&msp->ms_lock); 2310 2311 if (object != space_map_object(msp->ms_sm)) { 2312 object = space_map_object(msp->ms_sm); 2313 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * 2314 msp->ms_id, sizeof (uint64_t), &object, tx); 2315 } 2316 dmu_tx_commit(tx); 2317 } 2318 2319 /* 2320 * Called after a transaction group has completely synced to mark 2321 * all of the metaslab's free space as usable. 2322 */ 2323 void 2324 metaslab_sync_done(metaslab_t *msp, uint64_t txg) 2325 { 2326 metaslab_group_t *mg = msp->ms_group; 2327 vdev_t *vd = mg->mg_vd; 2328 spa_t *spa = vd->vdev_spa; 2329 range_tree_t **freed_tree; 2330 range_tree_t **defer_tree; 2331 int64_t alloc_delta, defer_delta; 2332 boolean_t defer_allowed = B_TRUE; 2333 2334 ASSERT(!vd->vdev_ishole); 2335 2336 mutex_enter(&msp->ms_lock); 2337 2338 /* 2339 * If this metaslab is just becoming available, initialize its 2340 * alloctrees, freetrees, and defertree and add its capacity to 2341 * the vdev. 2342 */ 2343 if (msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK] == NULL) { 2344 for (int t = 0; t < TXG_SIZE; t++) { 2345 ASSERT(msp->ms_alloctree[t] == NULL); 2346 ASSERT(msp->ms_freetree[t] == NULL); 2347 2348 msp->ms_alloctree[t] = range_tree_create(NULL, msp, 2349 &msp->ms_lock); 2350 msp->ms_freetree[t] = range_tree_create(NULL, msp, 2351 &msp->ms_lock); 2352 } 2353 2354 for (int t = 0; t < TXG_DEFER_SIZE; t++) { 2355 ASSERT(msp->ms_defertree[t] == NULL); 2356 2357 msp->ms_defertree[t] = range_tree_create(NULL, msp, 2358 &msp->ms_lock); 2359 } 2360 2361 vdev_space_update(vd, 0, 0, msp->ms_size); 2362 } 2363 2364 freed_tree = &msp->ms_freetree[TXG_CLEAN(txg) & TXG_MASK]; 2365 defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE]; 2366 2367 uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) - 2368 metaslab_class_get_alloc(spa_normal_class(spa)); 2369 if (free_space <= spa_get_slop_space(spa)) { 2370 defer_allowed = B_FALSE; 2371 } 2372 2373 defer_delta = 0; 2374 alloc_delta = space_map_alloc_delta(msp->ms_sm); 2375 if (defer_allowed) { 2376 defer_delta = range_tree_space(*freed_tree) - 2377 range_tree_space(*defer_tree); 2378 } else { 2379 defer_delta -= range_tree_space(*defer_tree); 2380 } 2381 2382 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0); 2383 2384 ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK])); 2385 ASSERT0(range_tree_space(msp->ms_freetree[txg & TXG_MASK])); 2386 2387 /* 2388 * If there's a metaslab_load() in progress, wait for it to complete 2389 * so that we have a consistent view of the in-core space map. 2390 */ 2391 metaslab_load_wait(msp); 2392 2393 /* 2394 * Move the frees from the defer_tree back to the free 2395 * range tree (if it's loaded). Swap the freed_tree and the 2396 * defer_tree -- this is safe to do because we've just emptied out 2397 * the defer_tree. 2398 */ 2399 range_tree_vacate(*defer_tree, 2400 msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree); 2401 if (defer_allowed) { 2402 range_tree_swap(freed_tree, defer_tree); 2403 } else { 2404 range_tree_vacate(*freed_tree, 2405 msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree); 2406 } 2407 2408 space_map_update(msp->ms_sm); 2409 2410 msp->ms_deferspace += defer_delta; 2411 ASSERT3S(msp->ms_deferspace, >=, 0); 2412 ASSERT3S(msp->ms_deferspace, <=, msp->ms_size); 2413 if (msp->ms_deferspace != 0) { 2414 /* 2415 * Keep syncing this metaslab until all deferred frees 2416 * are back in circulation. 2417 */ 2418 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 2419 } 2420 2421 /* 2422 * Calculate the new weights before unloading any metaslabs. 2423 * This will give us the most accurate weighting. 2424 */ 2425 metaslab_group_sort(mg, msp, metaslab_weight(msp)); 2426 2427 /* 2428 * If the metaslab is loaded and we've not tried to load or allocate 2429 * from it in 'metaslab_unload_delay' txgs, then unload it. 2430 */ 2431 if (msp->ms_loaded && 2432 msp->ms_selected_txg + metaslab_unload_delay < txg) { 2433 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) { 2434 VERIFY0(range_tree_space( 2435 msp->ms_alloctree[(txg + t) & TXG_MASK])); 2436 } 2437 2438 if (!metaslab_debug_unload) 2439 metaslab_unload(msp); 2440 } 2441 2442 mutex_exit(&msp->ms_lock); 2443 } 2444 2445 void 2446 metaslab_sync_reassess(metaslab_group_t *mg) 2447 { 2448 metaslab_group_alloc_update(mg); 2449 mg->mg_fragmentation = metaslab_group_fragmentation(mg); 2450 2451 /* 2452 * Preload the next potential metaslabs 2453 */ 2454 metaslab_group_preload(mg); 2455 } 2456 2457 static uint64_t 2458 metaslab_distance(metaslab_t *msp, dva_t *dva) 2459 { 2460 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift; 2461 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift; 2462 uint64_t start = msp->ms_id; 2463 2464 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) 2465 return (1ULL << 63); 2466 2467 if (offset < start) 2468 return ((start - offset) << ms_shift); 2469 if (offset > start) 2470 return ((offset - start) << ms_shift); 2471 return (0); 2472 } 2473 2474 /* 2475 * ========================================================================== 2476 * Metaslab allocation tracing facility 2477 * ========================================================================== 2478 */ 2479 kstat_t *metaslab_trace_ksp; 2480 kstat_named_t metaslab_trace_over_limit; 2481 2482 void 2483 metaslab_alloc_trace_init(void) 2484 { 2485 ASSERT(metaslab_alloc_trace_cache == NULL); 2486 metaslab_alloc_trace_cache = kmem_cache_create( 2487 "metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t), 2488 0, NULL, NULL, NULL, NULL, NULL, 0); 2489 metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats", 2490 "misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL); 2491 if (metaslab_trace_ksp != NULL) { 2492 metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit; 2493 kstat_named_init(&metaslab_trace_over_limit, 2494 "metaslab_trace_over_limit", KSTAT_DATA_UINT64); 2495 kstat_install(metaslab_trace_ksp); 2496 } 2497 } 2498 2499 void 2500 metaslab_alloc_trace_fini(void) 2501 { 2502 if (metaslab_trace_ksp != NULL) { 2503 kstat_delete(metaslab_trace_ksp); 2504 metaslab_trace_ksp = NULL; 2505 } 2506 kmem_cache_destroy(metaslab_alloc_trace_cache); 2507 metaslab_alloc_trace_cache = NULL; 2508 } 2509 2510 /* 2511 * Add an allocation trace element to the allocation tracing list. 2512 */ 2513 static void 2514 metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg, 2515 metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset) 2516 { 2517 if (!metaslab_trace_enabled) 2518 return; 2519 2520 /* 2521 * When the tracing list reaches its maximum we remove 2522 * the second element in the list before adding a new one. 2523 * By removing the second element we preserve the original 2524 * entry as a clue to what allocations steps have already been 2525 * performed. 2526 */ 2527 if (zal->zal_size == metaslab_trace_max_entries) { 2528 metaslab_alloc_trace_t *mat_next; 2529 #ifdef DEBUG 2530 panic("too many entries in allocation list"); 2531 #endif 2532 atomic_inc_64(&metaslab_trace_over_limit.value.ui64); 2533 zal->zal_size--; 2534 mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list)); 2535 list_remove(&zal->zal_list, mat_next); 2536 kmem_cache_free(metaslab_alloc_trace_cache, mat_next); 2537 } 2538 2539 metaslab_alloc_trace_t *mat = 2540 kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP); 2541 list_link_init(&mat->mat_list_node); 2542 mat->mat_mg = mg; 2543 mat->mat_msp = msp; 2544 mat->mat_size = psize; 2545 mat->mat_dva_id = dva_id; 2546 mat->mat_offset = offset; 2547 mat->mat_weight = 0; 2548 2549 if (msp != NULL) 2550 mat->mat_weight = msp->ms_weight; 2551 2552 /* 2553 * The list is part of the zio so locking is not required. Only 2554 * a single thread will perform allocations for a given zio. 2555 */ 2556 list_insert_tail(&zal->zal_list, mat); 2557 zal->zal_size++; 2558 2559 ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries); 2560 } 2561 2562 void 2563 metaslab_trace_init(zio_alloc_list_t *zal) 2564 { 2565 list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t), 2566 offsetof(metaslab_alloc_trace_t, mat_list_node)); 2567 zal->zal_size = 0; 2568 } 2569 2570 void 2571 metaslab_trace_fini(zio_alloc_list_t *zal) 2572 { 2573 metaslab_alloc_trace_t *mat; 2574 2575 while ((mat = list_remove_head(&zal->zal_list)) != NULL) 2576 kmem_cache_free(metaslab_alloc_trace_cache, mat); 2577 list_destroy(&zal->zal_list); 2578 zal->zal_size = 0; 2579 } 2580 2581 /* 2582 * ========================================================================== 2583 * Metaslab block operations 2584 * ========================================================================== 2585 */ 2586 2587 static void 2588 metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags) 2589 { 2590 if (!(flags & METASLAB_ASYNC_ALLOC) || 2591 flags & METASLAB_DONT_THROTTLE) 2592 return; 2593 2594 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 2595 if (!mg->mg_class->mc_alloc_throttle_enabled) 2596 return; 2597 2598 (void) refcount_add(&mg->mg_alloc_queue_depth, tag); 2599 } 2600 2601 void 2602 metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags) 2603 { 2604 if (!(flags & METASLAB_ASYNC_ALLOC) || 2605 flags & METASLAB_DONT_THROTTLE) 2606 return; 2607 2608 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 2609 if (!mg->mg_class->mc_alloc_throttle_enabled) 2610 return; 2611 2612 (void) refcount_remove(&mg->mg_alloc_queue_depth, tag); 2613 } 2614 2615 void 2616 metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag) 2617 { 2618 #ifdef ZFS_DEBUG 2619 const dva_t *dva = bp->blk_dva; 2620 int ndvas = BP_GET_NDVAS(bp); 2621 2622 for (int d = 0; d < ndvas; d++) { 2623 uint64_t vdev = DVA_GET_VDEV(&dva[d]); 2624 metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg; 2625 VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth, tag)); 2626 } 2627 #endif 2628 } 2629 2630 static uint64_t 2631 metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg) 2632 { 2633 uint64_t start; 2634 range_tree_t *rt = msp->ms_tree; 2635 metaslab_class_t *mc = msp->ms_group->mg_class; 2636 2637 VERIFY(!msp->ms_condensing); 2638 2639 start = mc->mc_ops->msop_alloc(msp, size); 2640 if (start != -1ULL) { 2641 metaslab_group_t *mg = msp->ms_group; 2642 vdev_t *vd = mg->mg_vd; 2643 2644 VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift)); 2645 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 2646 VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size); 2647 range_tree_remove(rt, start, size); 2648 2649 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0) 2650 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); 2651 2652 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], start, size); 2653 2654 /* Track the last successful allocation */ 2655 msp->ms_alloc_txg = txg; 2656 metaslab_verify_space(msp, txg); 2657 } 2658 2659 /* 2660 * Now that we've attempted the allocation we need to update the 2661 * metaslab's maximum block size since it may have changed. 2662 */ 2663 msp->ms_max_size = metaslab_block_maxsize(msp); 2664 return (start); 2665 } 2666 2667 static uint64_t 2668 metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal, 2669 uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d) 2670 { 2671 metaslab_t *msp = NULL; 2672 uint64_t offset = -1ULL; 2673 uint64_t activation_weight; 2674 uint64_t target_distance; 2675 int i; 2676 2677 activation_weight = METASLAB_WEIGHT_PRIMARY; 2678 for (i = 0; i < d; i++) { 2679 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 2680 activation_weight = METASLAB_WEIGHT_SECONDARY; 2681 break; 2682 } 2683 } 2684 2685 metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP); 2686 search->ms_weight = UINT64_MAX; 2687 search->ms_start = 0; 2688 for (;;) { 2689 boolean_t was_active; 2690 avl_tree_t *t = &mg->mg_metaslab_tree; 2691 avl_index_t idx; 2692 2693 mutex_enter(&mg->mg_lock); 2694 2695 /* 2696 * Find the metaslab with the highest weight that is less 2697 * than what we've already tried. In the common case, this 2698 * means that we will examine each metaslab at most once. 2699 * Note that concurrent callers could reorder metaslabs 2700 * by activation/passivation once we have dropped the mg_lock. 2701 * If a metaslab is activated by another thread, and we fail 2702 * to allocate from the metaslab we have selected, we may 2703 * not try the newly-activated metaslab, and instead activate 2704 * another metaslab. This is not optimal, but generally 2705 * does not cause any problems (a possible exception being 2706 * if every metaslab is completely full except for the 2707 * the newly-activated metaslab which we fail to examine). 2708 */ 2709 msp = avl_find(t, search, &idx); 2710 if (msp == NULL) 2711 msp = avl_nearest(t, idx, AVL_AFTER); 2712 for (; msp != NULL; msp = AVL_NEXT(t, msp)) { 2713 2714 if (!metaslab_should_allocate(msp, asize)) { 2715 metaslab_trace_add(zal, mg, msp, asize, d, 2716 TRACE_TOO_SMALL); 2717 continue; 2718 } 2719 2720 /* 2721 * If the selected metaslab is condensing, skip it. 2722 */ 2723 if (msp->ms_condensing) 2724 continue; 2725 2726 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 2727 if (activation_weight == METASLAB_WEIGHT_PRIMARY) 2728 break; 2729 2730 target_distance = min_distance + 2731 (space_map_allocated(msp->ms_sm) != 0 ? 0 : 2732 min_distance >> 1); 2733 2734 for (i = 0; i < d; i++) { 2735 if (metaslab_distance(msp, &dva[i]) < 2736 target_distance) 2737 break; 2738 } 2739 if (i == d) 2740 break; 2741 } 2742 mutex_exit(&mg->mg_lock); 2743 if (msp == NULL) { 2744 kmem_free(search, sizeof (*search)); 2745 return (-1ULL); 2746 } 2747 search->ms_weight = msp->ms_weight; 2748 search->ms_start = msp->ms_start + 1; 2749 2750 mutex_enter(&msp->ms_lock); 2751 2752 /* 2753 * Ensure that the metaslab we have selected is still 2754 * capable of handling our request. It's possible that 2755 * another thread may have changed the weight while we 2756 * were blocked on the metaslab lock. We check the 2757 * active status first to see if we need to reselect 2758 * a new metaslab. 2759 */ 2760 if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) { 2761 mutex_exit(&msp->ms_lock); 2762 continue; 2763 } 2764 2765 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) && 2766 activation_weight == METASLAB_WEIGHT_PRIMARY) { 2767 metaslab_passivate(msp, 2768 msp->ms_weight & ~METASLAB_ACTIVE_MASK); 2769 mutex_exit(&msp->ms_lock); 2770 continue; 2771 } 2772 2773 if (metaslab_activate(msp, activation_weight) != 0) { 2774 mutex_exit(&msp->ms_lock); 2775 continue; 2776 } 2777 msp->ms_selected_txg = txg; 2778 2779 /* 2780 * Now that we have the lock, recheck to see if we should 2781 * continue to use this metaslab for this allocation. The 2782 * the metaslab is now loaded so metaslab_should_allocate() can 2783 * accurately determine if the allocation attempt should 2784 * proceed. 2785 */ 2786 if (!metaslab_should_allocate(msp, asize)) { 2787 /* Passivate this metaslab and select a new one. */ 2788 metaslab_trace_add(zal, mg, msp, asize, d, 2789 TRACE_TOO_SMALL); 2790 goto next; 2791 } 2792 2793 /* 2794 * If this metaslab is currently condensing then pick again as 2795 * we can't manipulate this metaslab until it's committed 2796 * to disk. 2797 */ 2798 if (msp->ms_condensing) { 2799 metaslab_trace_add(zal, mg, msp, asize, d, 2800 TRACE_CONDENSING); 2801 mutex_exit(&msp->ms_lock); 2802 continue; 2803 } 2804 2805 offset = metaslab_block_alloc(msp, asize, txg); 2806 metaslab_trace_add(zal, mg, msp, asize, d, offset); 2807 2808 if (offset != -1ULL) { 2809 /* Proactively passivate the metaslab, if needed */ 2810 metaslab_segment_may_passivate(msp); 2811 break; 2812 } 2813 next: 2814 ASSERT(msp->ms_loaded); 2815 2816 /* 2817 * We were unable to allocate from this metaslab so determine 2818 * a new weight for this metaslab. Now that we have loaded 2819 * the metaslab we can provide a better hint to the metaslab 2820 * selector. 2821 * 2822 * For space-based metaslabs, we use the maximum block size. 2823 * This information is only available when the metaslab 2824 * is loaded and is more accurate than the generic free 2825 * space weight that was calculated by metaslab_weight(). 2826 * This information allows us to quickly compare the maximum 2827 * available allocation in the metaslab to the allocation 2828 * size being requested. 2829 * 2830 * For segment-based metaslabs, determine the new weight 2831 * based on the highest bucket in the range tree. We 2832 * explicitly use the loaded segment weight (i.e. the range 2833 * tree histogram) since it contains the space that is 2834 * currently available for allocation and is accurate 2835 * even within a sync pass. 2836 */ 2837 if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) { 2838 uint64_t weight = metaslab_block_maxsize(msp); 2839 WEIGHT_SET_SPACEBASED(weight); 2840 metaslab_passivate(msp, weight); 2841 } else { 2842 metaslab_passivate(msp, 2843 metaslab_weight_from_range_tree(msp)); 2844 } 2845 2846 /* 2847 * We have just failed an allocation attempt, check 2848 * that metaslab_should_allocate() agrees. Otherwise, 2849 * we may end up in an infinite loop retrying the same 2850 * metaslab. 2851 */ 2852 ASSERT(!metaslab_should_allocate(msp, asize)); 2853 mutex_exit(&msp->ms_lock); 2854 } 2855 mutex_exit(&msp->ms_lock); 2856 kmem_free(search, sizeof (*search)); 2857 return (offset); 2858 } 2859 2860 static uint64_t 2861 metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal, 2862 uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d) 2863 { 2864 uint64_t offset; 2865 ASSERT(mg->mg_initialized); 2866 2867 offset = metaslab_group_alloc_normal(mg, zal, asize, txg, 2868 min_distance, dva, d); 2869 2870 mutex_enter(&mg->mg_lock); 2871 if (offset == -1ULL) { 2872 mg->mg_failed_allocations++; 2873 metaslab_trace_add(zal, mg, NULL, asize, d, 2874 TRACE_GROUP_FAILURE); 2875 if (asize == SPA_GANGBLOCKSIZE) { 2876 /* 2877 * This metaslab group was unable to allocate 2878 * the minimum gang block size so it must be out of 2879 * space. We must notify the allocation throttle 2880 * to start skipping allocation attempts to this 2881 * metaslab group until more space becomes available. 2882 * Note: this failure cannot be caused by the 2883 * allocation throttle since the allocation throttle 2884 * is only responsible for skipping devices and 2885 * not failing block allocations. 2886 */ 2887 mg->mg_no_free_space = B_TRUE; 2888 } 2889 } 2890 mg->mg_allocations++; 2891 mutex_exit(&mg->mg_lock); 2892 return (offset); 2893 } 2894 2895 /* 2896 * If we have to write a ditto block (i.e. more than one DVA for a given BP) 2897 * on the same vdev as an existing DVA of this BP, then try to allocate it 2898 * at least (vdev_asize / (2 ^ ditto_same_vdev_distance_shift)) away from the 2899 * existing DVAs. 2900 */ 2901 int ditto_same_vdev_distance_shift = 3; 2902 2903 /* 2904 * Allocate a block for the specified i/o. 2905 */ 2906 static int 2907 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, 2908 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags, 2909 zio_alloc_list_t *zal) 2910 { 2911 metaslab_group_t *mg, *rotor; 2912 vdev_t *vd; 2913 boolean_t try_hard = B_FALSE; 2914 2915 ASSERT(!DVA_IS_VALID(&dva[d])); 2916 2917 /* 2918 * For testing, make some blocks above a certain size be gang blocks. 2919 */ 2920 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0) { 2921 metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG); 2922 return (SET_ERROR(ENOSPC)); 2923 } 2924 2925 /* 2926 * Start at the rotor and loop through all mgs until we find something. 2927 * Note that there's no locking on mc_rotor or mc_aliquot because 2928 * nothing actually breaks if we miss a few updates -- we just won't 2929 * allocate quite as evenly. It all balances out over time. 2930 * 2931 * If we are doing ditto or log blocks, try to spread them across 2932 * consecutive vdevs. If we're forced to reuse a vdev before we've 2933 * allocated all of our ditto blocks, then try and spread them out on 2934 * that vdev as much as possible. If it turns out to not be possible, 2935 * gradually lower our standards until anything becomes acceptable. 2936 * Also, allocating on consecutive vdevs (as opposed to random vdevs) 2937 * gives us hope of containing our fault domains to something we're 2938 * able to reason about. Otherwise, any two top-level vdev failures 2939 * will guarantee the loss of data. With consecutive allocation, 2940 * only two adjacent top-level vdev failures will result in data loss. 2941 * 2942 * If we are doing gang blocks (hintdva is non-NULL), try to keep 2943 * ourselves on the same vdev as our gang block header. That 2944 * way, we can hope for locality in vdev_cache, plus it makes our 2945 * fault domains something tractable. 2946 */ 2947 if (hintdva) { 2948 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); 2949 2950 /* 2951 * It's possible the vdev we're using as the hint no 2952 * longer exists (i.e. removed). Consult the rotor when 2953 * all else fails. 2954 */ 2955 if (vd != NULL) { 2956 mg = vd->vdev_mg; 2957 2958 if (flags & METASLAB_HINTBP_AVOID && 2959 mg->mg_next != NULL) 2960 mg = mg->mg_next; 2961 } else { 2962 mg = mc->mc_rotor; 2963 } 2964 } else if (d != 0) { 2965 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); 2966 mg = vd->vdev_mg->mg_next; 2967 } else { 2968 mg = mc->mc_rotor; 2969 } 2970 2971 /* 2972 * If the hint put us into the wrong metaslab class, or into a 2973 * metaslab group that has been passivated, just follow the rotor. 2974 */ 2975 if (mg->mg_class != mc || mg->mg_activation_count <= 0) 2976 mg = mc->mc_rotor; 2977 2978 rotor = mg; 2979 top: 2980 do { 2981 boolean_t allocatable; 2982 2983 ASSERT(mg->mg_activation_count == 1); 2984 vd = mg->mg_vd; 2985 2986 /* 2987 * Don't allocate from faulted devices. 2988 */ 2989 if (try_hard) { 2990 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); 2991 allocatable = vdev_allocatable(vd); 2992 spa_config_exit(spa, SCL_ZIO, FTAG); 2993 } else { 2994 allocatable = vdev_allocatable(vd); 2995 } 2996 2997 /* 2998 * Determine if the selected metaslab group is eligible 2999 * for allocations. If we're ganging then don't allow 3000 * this metaslab group to skip allocations since that would 3001 * inadvertently return ENOSPC and suspend the pool 3002 * even though space is still available. 3003 */ 3004 if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) { 3005 allocatable = metaslab_group_allocatable(mg, rotor, 3006 psize); 3007 } 3008 3009 if (!allocatable) { 3010 metaslab_trace_add(zal, mg, NULL, psize, d, 3011 TRACE_NOT_ALLOCATABLE); 3012 goto next; 3013 } 3014 3015 ASSERT(mg->mg_initialized); 3016 3017 /* 3018 * Avoid writing single-copy data to a failing, 3019 * non-redundant vdev, unless we've already tried all 3020 * other vdevs. 3021 */ 3022 if ((vd->vdev_stat.vs_write_errors > 0 || 3023 vd->vdev_state < VDEV_STATE_HEALTHY) && 3024 d == 0 && !try_hard && vd->vdev_children == 0) { 3025 metaslab_trace_add(zal, mg, NULL, psize, d, 3026 TRACE_VDEV_ERROR); 3027 goto next; 3028 } 3029 3030 ASSERT(mg->mg_class == mc); 3031 3032 /* 3033 * If we don't need to try hard, then require that the 3034 * block be 1/8th of the device away from any other DVAs 3035 * in this BP. If we are trying hard, allow any offset 3036 * to be used (distance=0). 3037 */ 3038 uint64_t distance = 0; 3039 if (!try_hard) { 3040 distance = vd->vdev_asize >> 3041 ditto_same_vdev_distance_shift; 3042 if (distance <= (1ULL << vd->vdev_ms_shift)) 3043 distance = 0; 3044 } 3045 3046 uint64_t asize = vdev_psize_to_asize(vd, psize); 3047 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); 3048 3049 uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg, 3050 distance, dva, d); 3051 3052 if (offset != -1ULL) { 3053 /* 3054 * If we've just selected this metaslab group, 3055 * figure out whether the corresponding vdev is 3056 * over- or under-used relative to the pool, 3057 * and set an allocation bias to even it out. 3058 */ 3059 if (mc->mc_aliquot == 0 && metaslab_bias_enabled) { 3060 vdev_stat_t *vs = &vd->vdev_stat; 3061 int64_t vu, cu; 3062 3063 vu = (vs->vs_alloc * 100) / (vs->vs_space + 1); 3064 cu = (mc->mc_alloc * 100) / (mc->mc_space + 1); 3065 3066 /* 3067 * Calculate how much more or less we should 3068 * try to allocate from this device during 3069 * this iteration around the rotor. 3070 * For example, if a device is 80% full 3071 * and the pool is 20% full then we should 3072 * reduce allocations by 60% on this device. 3073 * 3074 * mg_bias = (20 - 80) * 512K / 100 = -307K 3075 * 3076 * This reduces allocations by 307K for this 3077 * iteration. 3078 */ 3079 mg->mg_bias = ((cu - vu) * 3080 (int64_t)mg->mg_aliquot) / 100; 3081 } else if (!metaslab_bias_enabled) { 3082 mg->mg_bias = 0; 3083 } 3084 3085 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >= 3086 mg->mg_aliquot + mg->mg_bias) { 3087 mc->mc_rotor = mg->mg_next; 3088 mc->mc_aliquot = 0; 3089 } 3090 3091 DVA_SET_VDEV(&dva[d], vd->vdev_id); 3092 DVA_SET_OFFSET(&dva[d], offset); 3093 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER)); 3094 DVA_SET_ASIZE(&dva[d], asize); 3095 3096 return (0); 3097 } 3098 next: 3099 mc->mc_rotor = mg->mg_next; 3100 mc->mc_aliquot = 0; 3101 } while ((mg = mg->mg_next) != rotor); 3102 3103 /* 3104 * If we haven't tried hard, do so now. 3105 */ 3106 if (!try_hard) { 3107 try_hard = B_TRUE; 3108 goto top; 3109 } 3110 3111 bzero(&dva[d], sizeof (dva_t)); 3112 3113 metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC); 3114 return (SET_ERROR(ENOSPC)); 3115 } 3116 3117 /* 3118 * Free the block represented by DVA in the context of the specified 3119 * transaction group. 3120 */ 3121 static void 3122 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now) 3123 { 3124 uint64_t vdev = DVA_GET_VDEV(dva); 3125 uint64_t offset = DVA_GET_OFFSET(dva); 3126 uint64_t size = DVA_GET_ASIZE(dva); 3127 vdev_t *vd; 3128 metaslab_t *msp; 3129 3130 ASSERT(DVA_IS_VALID(dva)); 3131 3132 if (txg > spa_freeze_txg(spa)) 3133 return; 3134 3135 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || 3136 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { 3137 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu", 3138 (u_longlong_t)vdev, (u_longlong_t)offset); 3139 ASSERT(0); 3140 return; 3141 } 3142 3143 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 3144 3145 if (DVA_GET_GANG(dva)) 3146 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 3147 3148 mutex_enter(&msp->ms_lock); 3149 3150 if (now) { 3151 range_tree_remove(msp->ms_alloctree[txg & TXG_MASK], 3152 offset, size); 3153 3154 VERIFY(!msp->ms_condensing); 3155 VERIFY3U(offset, >=, msp->ms_start); 3156 VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size); 3157 VERIFY3U(range_tree_space(msp->ms_tree) + size, <=, 3158 msp->ms_size); 3159 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 3160 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 3161 range_tree_add(msp->ms_tree, offset, size); 3162 msp->ms_max_size = metaslab_block_maxsize(msp); 3163 } else { 3164 if (range_tree_space(msp->ms_freetree[txg & TXG_MASK]) == 0) 3165 vdev_dirty(vd, VDD_METASLAB, msp, txg); 3166 range_tree_add(msp->ms_freetree[txg & TXG_MASK], 3167 offset, size); 3168 } 3169 3170 mutex_exit(&msp->ms_lock); 3171 } 3172 3173 /* 3174 * Intent log support: upon opening the pool after a crash, notify the SPA 3175 * of blocks that the intent log has allocated for immediate write, but 3176 * which are still considered free by the SPA because the last transaction 3177 * group didn't commit yet. 3178 */ 3179 static int 3180 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 3181 { 3182 uint64_t vdev = DVA_GET_VDEV(dva); 3183 uint64_t offset = DVA_GET_OFFSET(dva); 3184 uint64_t size = DVA_GET_ASIZE(dva); 3185 vdev_t *vd; 3186 metaslab_t *msp; 3187 int error = 0; 3188 3189 ASSERT(DVA_IS_VALID(dva)); 3190 3191 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || 3192 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) 3193 return (SET_ERROR(ENXIO)); 3194 3195 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 3196 3197 if (DVA_GET_GANG(dva)) 3198 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 3199 3200 mutex_enter(&msp->ms_lock); 3201 3202 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded) 3203 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY); 3204 3205 if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size)) 3206 error = SET_ERROR(ENOENT); 3207 3208 if (error || txg == 0) { /* txg == 0 indicates dry run */ 3209 mutex_exit(&msp->ms_lock); 3210 return (error); 3211 } 3212 3213 VERIFY(!msp->ms_condensing); 3214 VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift)); 3215 VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift)); 3216 VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size); 3217 range_tree_remove(msp->ms_tree, offset, size); 3218 3219 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */ 3220 if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0) 3221 vdev_dirty(vd, VDD_METASLAB, msp, txg); 3222 range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size); 3223 } 3224 3225 mutex_exit(&msp->ms_lock); 3226 3227 return (0); 3228 } 3229 3230 /* 3231 * Reserve some allocation slots. The reservation system must be called 3232 * before we call into the allocator. If there aren't any available slots 3233 * then the I/O will be throttled until an I/O completes and its slots are 3234 * freed up. The function returns true if it was successful in placing 3235 * the reservation. 3236 */ 3237 boolean_t 3238 metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio, 3239 int flags) 3240 { 3241 uint64_t available_slots = 0; 3242 boolean_t slot_reserved = B_FALSE; 3243 3244 ASSERT(mc->mc_alloc_throttle_enabled); 3245 mutex_enter(&mc->mc_lock); 3246 3247 uint64_t reserved_slots = refcount_count(&mc->mc_alloc_slots); 3248 if (reserved_slots < mc->mc_alloc_max_slots) 3249 available_slots = mc->mc_alloc_max_slots - reserved_slots; 3250 3251 if (slots <= available_slots || GANG_ALLOCATION(flags)) { 3252 /* 3253 * We reserve the slots individually so that we can unreserve 3254 * them individually when an I/O completes. 3255 */ 3256 for (int d = 0; d < slots; d++) { 3257 reserved_slots = refcount_add(&mc->mc_alloc_slots, zio); 3258 } 3259 zio->io_flags |= ZIO_FLAG_IO_ALLOCATING; 3260 slot_reserved = B_TRUE; 3261 } 3262 3263 mutex_exit(&mc->mc_lock); 3264 return (slot_reserved); 3265 } 3266 3267 void 3268 metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, zio_t *zio) 3269 { 3270 ASSERT(mc->mc_alloc_throttle_enabled); 3271 mutex_enter(&mc->mc_lock); 3272 for (int d = 0; d < slots; d++) { 3273 (void) refcount_remove(&mc->mc_alloc_slots, zio); 3274 } 3275 mutex_exit(&mc->mc_lock); 3276 } 3277 3278 int 3279 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, 3280 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags, 3281 zio_alloc_list_t *zal, zio_t *zio) 3282 { 3283 dva_t *dva = bp->blk_dva; 3284 dva_t *hintdva = hintbp->blk_dva; 3285 int error = 0; 3286 3287 ASSERT(bp->blk_birth == 0); 3288 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); 3289 3290 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 3291 3292 if (mc->mc_rotor == NULL) { /* no vdevs in this class */ 3293 spa_config_exit(spa, SCL_ALLOC, FTAG); 3294 return (SET_ERROR(ENOSPC)); 3295 } 3296 3297 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); 3298 ASSERT(BP_GET_NDVAS(bp) == 0); 3299 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); 3300 ASSERT3P(zal, !=, NULL); 3301 3302 for (int d = 0; d < ndvas; d++) { 3303 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, 3304 txg, flags, zal); 3305 if (error != 0) { 3306 for (d--; d >= 0; d--) { 3307 metaslab_free_dva(spa, &dva[d], txg, B_TRUE); 3308 metaslab_group_alloc_decrement(spa, 3309 DVA_GET_VDEV(&dva[d]), zio, flags); 3310 bzero(&dva[d], sizeof (dva_t)); 3311 } 3312 spa_config_exit(spa, SCL_ALLOC, FTAG); 3313 return (error); 3314 } else { 3315 /* 3316 * Update the metaslab group's queue depth 3317 * based on the newly allocated dva. 3318 */ 3319 metaslab_group_alloc_increment(spa, 3320 DVA_GET_VDEV(&dva[d]), zio, flags); 3321 } 3322 3323 } 3324 ASSERT(error == 0); 3325 ASSERT(BP_GET_NDVAS(bp) == ndvas); 3326 3327 spa_config_exit(spa, SCL_ALLOC, FTAG); 3328 3329 BP_SET_BIRTH(bp, txg, txg); 3330 3331 return (0); 3332 } 3333 3334 void 3335 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) 3336 { 3337 const dva_t *dva = bp->blk_dva; 3338 int ndvas = BP_GET_NDVAS(bp); 3339 3340 ASSERT(!BP_IS_HOLE(bp)); 3341 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); 3342 3343 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); 3344 3345 for (int d = 0; d < ndvas; d++) 3346 metaslab_free_dva(spa, &dva[d], txg, now); 3347 3348 spa_config_exit(spa, SCL_FREE, FTAG); 3349 } 3350 3351 int 3352 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) 3353 { 3354 const dva_t *dva = bp->blk_dva; 3355 int ndvas = BP_GET_NDVAS(bp); 3356 int error = 0; 3357 3358 ASSERT(!BP_IS_HOLE(bp)); 3359 3360 if (txg != 0) { 3361 /* 3362 * First do a dry run to make sure all DVAs are claimable, 3363 * so we don't have to unwind from partial failures below. 3364 */ 3365 if ((error = metaslab_claim(spa, bp, 0)) != 0) 3366 return (error); 3367 } 3368 3369 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 3370 3371 for (int d = 0; d < ndvas; d++) 3372 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0) 3373 break; 3374 3375 spa_config_exit(spa, SCL_ALLOC, FTAG); 3376 3377 ASSERT(error == 0 || txg == 0); 3378 3379 return (error); 3380 } 3381 3382 void 3383 metaslab_check_free(spa_t *spa, const blkptr_t *bp) 3384 { 3385 if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0) 3386 return; 3387 3388 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 3389 for (int i = 0; i < BP_GET_NDVAS(bp); i++) { 3390 uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]); 3391 vdev_t *vd = vdev_lookup_top(spa, vdev); 3392 uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]); 3393 uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]); 3394 metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 3395 3396 if (msp->ms_loaded) 3397 range_tree_verify(msp->ms_tree, offset, size); 3398 3399 for (int j = 0; j < TXG_SIZE; j++) 3400 range_tree_verify(msp->ms_freetree[j], offset, size); 3401 for (int j = 0; j < TXG_DEFER_SIZE; j++) 3402 range_tree_verify(msp->ms_defertree[j], offset, size); 3403 } 3404 spa_config_exit(spa, SCL_VDEV, FTAG); 3405 } 3406