1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/dmu.h> 28 #include <sys/dmu_tx.h> 29 #include <sys/space_map.h> 30 #include <sys/metaslab_impl.h> 31 #include <sys/vdev_impl.h> 32 #include <sys/zio.h> 33 34 uint64_t metaslab_aliquot = 512ULL << 10; 35 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ 36 37 /* 38 * Metaslab debugging: when set, keeps all space maps in core to verify frees. 39 */ 40 static int metaslab_debug = 0; 41 42 /* 43 * Minimum size which forces the dynamic allocator to change 44 * it's allocation strategy. Once the space map cannot satisfy 45 * an allocation of this size then it switches to using more 46 * aggressive strategy (i.e search by size rather than offset). 47 */ 48 uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE; 49 50 /* 51 * The minimum free space, in percent, which must be available 52 * in a space map to continue allocations in a first-fit fashion. 53 * Once the space_map's free space drops below this level we dynamically 54 * switch to using best-fit allocations. 55 */ 56 int metaslab_df_free_pct = 4; 57 58 /* 59 * A metaslab is considered "free" if it contains a contiguous 60 * segment which is greater than metaslab_min_alloc_size. 61 */ 62 uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS; 63 64 /* 65 * Max number of space_maps to prefetch. 66 */ 67 int metaslab_prefetch_limit = SPA_DVAS_PER_BP; 68 69 /* 70 * Percentage bonus multiplier for metaslabs that are in the bonus area. 71 */ 72 int metaslab_smo_bonus_pct = 150; 73 74 /* 75 * ========================================================================== 76 * Metaslab classes 77 * ========================================================================== 78 */ 79 metaslab_class_t * 80 metaslab_class_create(spa_t *spa, space_map_ops_t *ops) 81 { 82 metaslab_class_t *mc; 83 84 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP); 85 86 mc->mc_spa = spa; 87 mc->mc_rotor = NULL; 88 mc->mc_ops = ops; 89 90 return (mc); 91 } 92 93 void 94 metaslab_class_destroy(metaslab_class_t *mc) 95 { 96 ASSERT(mc->mc_rotor == NULL); 97 ASSERT(mc->mc_alloc == 0); 98 ASSERT(mc->mc_deferred == 0); 99 ASSERT(mc->mc_space == 0); 100 ASSERT(mc->mc_dspace == 0); 101 102 kmem_free(mc, sizeof (metaslab_class_t)); 103 } 104 105 int 106 metaslab_class_validate(metaslab_class_t *mc) 107 { 108 metaslab_group_t *mg; 109 vdev_t *vd; 110 111 /* 112 * Must hold one of the spa_config locks. 113 */ 114 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || 115 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); 116 117 if ((mg = mc->mc_rotor) == NULL) 118 return (0); 119 120 do { 121 vd = mg->mg_vd; 122 ASSERT(vd->vdev_mg != NULL); 123 ASSERT3P(vd->vdev_top, ==, vd); 124 ASSERT3P(mg->mg_class, ==, mc); 125 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); 126 } while ((mg = mg->mg_next) != mc->mc_rotor); 127 128 return (0); 129 } 130 131 void 132 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, 133 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta) 134 { 135 atomic_add_64(&mc->mc_alloc, alloc_delta); 136 atomic_add_64(&mc->mc_deferred, defer_delta); 137 atomic_add_64(&mc->mc_space, space_delta); 138 atomic_add_64(&mc->mc_dspace, dspace_delta); 139 } 140 141 uint64_t 142 metaslab_class_get_alloc(metaslab_class_t *mc) 143 { 144 return (mc->mc_alloc); 145 } 146 147 uint64_t 148 metaslab_class_get_deferred(metaslab_class_t *mc) 149 { 150 return (mc->mc_deferred); 151 } 152 153 uint64_t 154 metaslab_class_get_space(metaslab_class_t *mc) 155 { 156 return (mc->mc_space); 157 } 158 159 uint64_t 160 metaslab_class_get_dspace(metaslab_class_t *mc) 161 { 162 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space); 163 } 164 165 /* 166 * ========================================================================== 167 * Metaslab groups 168 * ========================================================================== 169 */ 170 static int 171 metaslab_compare(const void *x1, const void *x2) 172 { 173 const metaslab_t *m1 = x1; 174 const metaslab_t *m2 = x2; 175 176 if (m1->ms_weight < m2->ms_weight) 177 return (1); 178 if (m1->ms_weight > m2->ms_weight) 179 return (-1); 180 181 /* 182 * If the weights are identical, use the offset to force uniqueness. 183 */ 184 if (m1->ms_map.sm_start < m2->ms_map.sm_start) 185 return (-1); 186 if (m1->ms_map.sm_start > m2->ms_map.sm_start) 187 return (1); 188 189 ASSERT3P(m1, ==, m2); 190 191 return (0); 192 } 193 194 metaslab_group_t * 195 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd) 196 { 197 metaslab_group_t *mg; 198 199 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP); 200 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); 201 avl_create(&mg->mg_metaslab_tree, metaslab_compare, 202 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node)); 203 mg->mg_vd = vd; 204 mg->mg_class = mc; 205 mg->mg_activation_count = 0; 206 207 return (mg); 208 } 209 210 void 211 metaslab_group_destroy(metaslab_group_t *mg) 212 { 213 ASSERT(mg->mg_prev == NULL); 214 ASSERT(mg->mg_next == NULL); 215 /* 216 * We may have gone below zero with the activation count 217 * either because we never activated in the first place or 218 * because we're done, and possibly removing the vdev. 219 */ 220 ASSERT(mg->mg_activation_count <= 0); 221 222 avl_destroy(&mg->mg_metaslab_tree); 223 mutex_destroy(&mg->mg_lock); 224 kmem_free(mg, sizeof (metaslab_group_t)); 225 } 226 227 void 228 metaslab_group_activate(metaslab_group_t *mg) 229 { 230 metaslab_class_t *mc = mg->mg_class; 231 metaslab_group_t *mgprev, *mgnext; 232 233 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER)); 234 235 ASSERT(mc->mc_rotor != mg); 236 ASSERT(mg->mg_prev == NULL); 237 ASSERT(mg->mg_next == NULL); 238 ASSERT(mg->mg_activation_count <= 0); 239 240 if (++mg->mg_activation_count <= 0) 241 return; 242 243 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children); 244 245 if ((mgprev = mc->mc_rotor) == NULL) { 246 mg->mg_prev = mg; 247 mg->mg_next = mg; 248 } else { 249 mgnext = mgprev->mg_next; 250 mg->mg_prev = mgprev; 251 mg->mg_next = mgnext; 252 mgprev->mg_next = mg; 253 mgnext->mg_prev = mg; 254 } 255 mc->mc_rotor = mg; 256 } 257 258 void 259 metaslab_group_passivate(metaslab_group_t *mg) 260 { 261 metaslab_class_t *mc = mg->mg_class; 262 metaslab_group_t *mgprev, *mgnext; 263 264 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER)); 265 266 if (--mg->mg_activation_count != 0) { 267 ASSERT(mc->mc_rotor != mg); 268 ASSERT(mg->mg_prev == NULL); 269 ASSERT(mg->mg_next == NULL); 270 ASSERT(mg->mg_activation_count < 0); 271 return; 272 } 273 274 mgprev = mg->mg_prev; 275 mgnext = mg->mg_next; 276 277 if (mg == mgnext) { 278 mc->mc_rotor = NULL; 279 } else { 280 mc->mc_rotor = mgnext; 281 mgprev->mg_next = mgnext; 282 mgnext->mg_prev = mgprev; 283 } 284 285 mg->mg_prev = NULL; 286 mg->mg_next = NULL; 287 } 288 289 static void 290 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) 291 { 292 mutex_enter(&mg->mg_lock); 293 ASSERT(msp->ms_group == NULL); 294 msp->ms_group = mg; 295 msp->ms_weight = 0; 296 avl_add(&mg->mg_metaslab_tree, msp); 297 mutex_exit(&mg->mg_lock); 298 } 299 300 static void 301 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) 302 { 303 mutex_enter(&mg->mg_lock); 304 ASSERT(msp->ms_group == mg); 305 avl_remove(&mg->mg_metaslab_tree, msp); 306 msp->ms_group = NULL; 307 mutex_exit(&mg->mg_lock); 308 } 309 310 static void 311 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 312 { 313 /* 314 * Although in principle the weight can be any value, in 315 * practice we do not use values in the range [1, 510]. 316 */ 317 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0); 318 ASSERT(MUTEX_HELD(&msp->ms_lock)); 319 320 mutex_enter(&mg->mg_lock); 321 ASSERT(msp->ms_group == mg); 322 avl_remove(&mg->mg_metaslab_tree, msp); 323 msp->ms_weight = weight; 324 avl_add(&mg->mg_metaslab_tree, msp); 325 mutex_exit(&mg->mg_lock); 326 } 327 328 /* 329 * ========================================================================== 330 * Common allocator routines 331 * ========================================================================== 332 */ 333 static int 334 metaslab_segsize_compare(const void *x1, const void *x2) 335 { 336 const space_seg_t *s1 = x1; 337 const space_seg_t *s2 = x2; 338 uint64_t ss_size1 = s1->ss_end - s1->ss_start; 339 uint64_t ss_size2 = s2->ss_end - s2->ss_start; 340 341 if (ss_size1 < ss_size2) 342 return (-1); 343 if (ss_size1 > ss_size2) 344 return (1); 345 346 if (s1->ss_start < s2->ss_start) 347 return (-1); 348 if (s1->ss_start > s2->ss_start) 349 return (1); 350 351 return (0); 352 } 353 354 /* 355 * This is a helper function that can be used by the allocator to find 356 * a suitable block to allocate. This will search the specified AVL 357 * tree looking for a block that matches the specified criteria. 358 */ 359 static uint64_t 360 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size, 361 uint64_t align) 362 { 363 space_seg_t *ss, ssearch; 364 avl_index_t where; 365 366 ssearch.ss_start = *cursor; 367 ssearch.ss_end = *cursor + size; 368 369 ss = avl_find(t, &ssearch, &where); 370 if (ss == NULL) 371 ss = avl_nearest(t, where, AVL_AFTER); 372 373 while (ss != NULL) { 374 uint64_t offset = P2ROUNDUP(ss->ss_start, align); 375 376 if (offset + size <= ss->ss_end) { 377 *cursor = offset + size; 378 return (offset); 379 } 380 ss = AVL_NEXT(t, ss); 381 } 382 383 /* 384 * If we know we've searched the whole map (*cursor == 0), give up. 385 * Otherwise, reset the cursor to the beginning and try again. 386 */ 387 if (*cursor == 0) 388 return (-1ULL); 389 390 *cursor = 0; 391 return (metaslab_block_picker(t, cursor, size, align)); 392 } 393 394 static void 395 metaslab_pp_load(space_map_t *sm) 396 { 397 space_seg_t *ss; 398 399 ASSERT(sm->sm_ppd == NULL); 400 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP); 401 402 sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); 403 avl_create(sm->sm_pp_root, metaslab_segsize_compare, 404 sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node)); 405 406 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss)) 407 avl_add(sm->sm_pp_root, ss); 408 } 409 410 static void 411 metaslab_pp_unload(space_map_t *sm) 412 { 413 void *cookie = NULL; 414 415 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t)); 416 sm->sm_ppd = NULL; 417 418 while (avl_destroy_nodes(sm->sm_pp_root, &cookie) != NULL) { 419 /* tear down the tree */ 420 } 421 422 avl_destroy(sm->sm_pp_root); 423 kmem_free(sm->sm_pp_root, sizeof (avl_tree_t)); 424 sm->sm_pp_root = NULL; 425 } 426 427 /* ARGSUSED */ 428 static void 429 metaslab_pp_claim(space_map_t *sm, uint64_t start, uint64_t size) 430 { 431 /* No need to update cursor */ 432 } 433 434 /* ARGSUSED */ 435 static void 436 metaslab_pp_free(space_map_t *sm, uint64_t start, uint64_t size) 437 { 438 /* No need to update cursor */ 439 } 440 441 /* 442 * Return the maximum contiguous segment within the metaslab. 443 */ 444 uint64_t 445 metaslab_pp_maxsize(space_map_t *sm) 446 { 447 avl_tree_t *t = sm->sm_pp_root; 448 space_seg_t *ss; 449 450 if (t == NULL || (ss = avl_last(t)) == NULL) 451 return (0ULL); 452 453 return (ss->ss_end - ss->ss_start); 454 } 455 456 /* 457 * ========================================================================== 458 * The first-fit block allocator 459 * ========================================================================== 460 */ 461 static uint64_t 462 metaslab_ff_alloc(space_map_t *sm, uint64_t size) 463 { 464 avl_tree_t *t = &sm->sm_root; 465 uint64_t align = size & -size; 466 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1; 467 468 return (metaslab_block_picker(t, cursor, size, align)); 469 } 470 471 /* ARGSUSED */ 472 boolean_t 473 metaslab_ff_fragmented(space_map_t *sm) 474 { 475 return (B_TRUE); 476 } 477 478 static space_map_ops_t metaslab_ff_ops = { 479 metaslab_pp_load, 480 metaslab_pp_unload, 481 metaslab_ff_alloc, 482 metaslab_pp_claim, 483 metaslab_pp_free, 484 metaslab_pp_maxsize, 485 metaslab_ff_fragmented 486 }; 487 488 /* 489 * ========================================================================== 490 * Dynamic block allocator - 491 * Uses the first fit allocation scheme until space get low and then 492 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold 493 * and metaslab_df_free_pct to determine when to switch the allocation scheme. 494 * ========================================================================== 495 */ 496 static uint64_t 497 metaslab_df_alloc(space_map_t *sm, uint64_t size) 498 { 499 avl_tree_t *t = &sm->sm_root; 500 uint64_t align = size & -size; 501 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1; 502 uint64_t max_size = metaslab_pp_maxsize(sm); 503 int free_pct = sm->sm_space * 100 / sm->sm_size; 504 505 ASSERT(MUTEX_HELD(sm->sm_lock)); 506 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root)); 507 508 if (max_size < size) 509 return (-1ULL); 510 511 /* 512 * If we're running low on space switch to using the size 513 * sorted AVL tree (best-fit). 514 */ 515 if (max_size < metaslab_df_alloc_threshold || 516 free_pct < metaslab_df_free_pct) { 517 t = sm->sm_pp_root; 518 *cursor = 0; 519 } 520 521 return (metaslab_block_picker(t, cursor, size, 1ULL)); 522 } 523 524 static boolean_t 525 metaslab_df_fragmented(space_map_t *sm) 526 { 527 uint64_t max_size = metaslab_pp_maxsize(sm); 528 int free_pct = sm->sm_space * 100 / sm->sm_size; 529 530 if (max_size >= metaslab_df_alloc_threshold && 531 free_pct >= metaslab_df_free_pct) 532 return (B_FALSE); 533 534 return (B_TRUE); 535 } 536 537 static space_map_ops_t metaslab_df_ops = { 538 metaslab_pp_load, 539 metaslab_pp_unload, 540 metaslab_df_alloc, 541 metaslab_pp_claim, 542 metaslab_pp_free, 543 metaslab_pp_maxsize, 544 metaslab_df_fragmented 545 }; 546 547 /* 548 * ========================================================================== 549 * Other experimental allocators 550 * ========================================================================== 551 */ 552 static uint64_t 553 metaslab_cdf_alloc(space_map_t *sm, uint64_t size) 554 { 555 avl_tree_t *t = &sm->sm_root; 556 uint64_t *cursor = (uint64_t *)sm->sm_ppd; 557 uint64_t *extent_end = (uint64_t *)sm->sm_ppd + 1; 558 uint64_t max_size = metaslab_pp_maxsize(sm); 559 uint64_t rsize = size; 560 uint64_t offset = 0; 561 562 ASSERT(MUTEX_HELD(sm->sm_lock)); 563 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root)); 564 565 if (max_size < size) 566 return (-1ULL); 567 568 ASSERT3U(*extent_end, >=, *cursor); 569 570 /* 571 * If we're running low on space switch to using the size 572 * sorted AVL tree (best-fit). 573 */ 574 if ((*cursor + size) > *extent_end) { 575 576 t = sm->sm_pp_root; 577 *cursor = *extent_end = 0; 578 579 if (max_size > 2 * SPA_MAXBLOCKSIZE) 580 rsize = MIN(metaslab_min_alloc_size, max_size); 581 offset = metaslab_block_picker(t, extent_end, rsize, 1ULL); 582 if (offset != -1) 583 *cursor = offset + size; 584 } else { 585 offset = metaslab_block_picker(t, cursor, rsize, 1ULL); 586 } 587 ASSERT3U(*cursor, <=, *extent_end); 588 return (offset); 589 } 590 591 static boolean_t 592 metaslab_cdf_fragmented(space_map_t *sm) 593 { 594 uint64_t max_size = metaslab_pp_maxsize(sm); 595 596 if (max_size > (metaslab_min_alloc_size * 10)) 597 return (B_FALSE); 598 return (B_TRUE); 599 } 600 601 static space_map_ops_t metaslab_cdf_ops = { 602 metaslab_pp_load, 603 metaslab_pp_unload, 604 metaslab_cdf_alloc, 605 metaslab_pp_claim, 606 metaslab_pp_free, 607 metaslab_pp_maxsize, 608 metaslab_cdf_fragmented 609 }; 610 611 static uint64_t 612 metaslab_ndf_alloc(space_map_t *sm, uint64_t size) 613 { 614 avl_tree_t *t = &sm->sm_root; 615 avl_index_t where; 616 space_seg_t *ss, ssearch; 617 uint64_t *cursor = (uint64_t *)sm->sm_ppd; 618 uint64_t max_size = metaslab_pp_maxsize(sm); 619 620 ASSERT(MUTEX_HELD(sm->sm_lock)); 621 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root)); 622 623 if (max_size < size) 624 return (-1ULL); 625 626 ssearch.ss_start = *cursor; 627 ssearch.ss_end = *cursor + size; 628 629 ss = avl_find(t, &ssearch, &where); 630 if (ss == NULL || (ss->ss_start + size > ss->ss_end)) { 631 t = sm->sm_pp_root; 632 633 if (max_size > 2 * SPA_MAXBLOCKSIZE) 634 size = MIN(metaslab_min_alloc_size, max_size); 635 636 ssearch.ss_start = 0; 637 ssearch.ss_end = size; 638 ss = avl_find(t, &ssearch, &where); 639 if (ss == NULL) 640 ss = avl_nearest(t, where, AVL_AFTER); 641 ASSERT(ss != NULL); 642 } 643 644 if (ss != NULL) { 645 if (ss->ss_start + size <= ss->ss_end) { 646 *cursor = ss->ss_start + size; 647 return (ss->ss_start); 648 } 649 } 650 return (-1ULL); 651 } 652 653 static boolean_t 654 metaslab_ndf_fragmented(space_map_t *sm) 655 { 656 uint64_t max_size = metaslab_pp_maxsize(sm); 657 658 if (max_size > (metaslab_min_alloc_size * 10)) 659 return (B_FALSE); 660 return (B_TRUE); 661 } 662 663 664 static space_map_ops_t metaslab_ndf_ops = { 665 metaslab_pp_load, 666 metaslab_pp_unload, 667 metaslab_ndf_alloc, 668 metaslab_pp_claim, 669 metaslab_pp_free, 670 metaslab_pp_maxsize, 671 metaslab_ndf_fragmented 672 }; 673 674 space_map_ops_t *zfs_metaslab_ops = &metaslab_df_ops; 675 676 /* 677 * ========================================================================== 678 * Metaslabs 679 * ========================================================================== 680 */ 681 metaslab_t * 682 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo, 683 uint64_t start, uint64_t size, uint64_t txg) 684 { 685 vdev_t *vd = mg->mg_vd; 686 metaslab_t *msp; 687 688 msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); 689 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL); 690 691 msp->ms_smo_syncing = *smo; 692 693 /* 694 * We create the main space map here, but we don't create the 695 * allocmaps and freemaps until metaslab_sync_done(). This serves 696 * two purposes: it allows metaslab_sync_done() to detect the 697 * addition of new space; and for debugging, it ensures that we'd 698 * data fault on any attempt to use this metaslab before it's ready. 699 */ 700 space_map_create(&msp->ms_map, start, size, 701 vd->vdev_ashift, &msp->ms_lock); 702 703 metaslab_group_add(mg, msp); 704 705 if (metaslab_debug && smo->smo_object != 0) { 706 mutex_enter(&msp->ms_lock); 707 VERIFY(space_map_load(&msp->ms_map, mg->mg_class->mc_ops, 708 SM_FREE, smo, spa_meta_objset(vd->vdev_spa)) == 0); 709 mutex_exit(&msp->ms_lock); 710 } 711 712 /* 713 * If we're opening an existing pool (txg == 0) or creating 714 * a new one (txg == TXG_INITIAL), all space is available now. 715 * If we're adding space to an existing pool, the new space 716 * does not become available until after this txg has synced. 717 */ 718 if (txg <= TXG_INITIAL) 719 metaslab_sync_done(msp, 0); 720 721 if (txg != 0) { 722 vdev_dirty(vd, 0, NULL, txg); 723 vdev_dirty(vd, VDD_METASLAB, msp, txg); 724 } 725 726 return (msp); 727 } 728 729 void 730 metaslab_fini(metaslab_t *msp) 731 { 732 metaslab_group_t *mg = msp->ms_group; 733 734 vdev_space_update(mg->mg_vd, 735 -msp->ms_smo.smo_alloc, 0, -msp->ms_map.sm_size); 736 737 metaslab_group_remove(mg, msp); 738 739 mutex_enter(&msp->ms_lock); 740 741 space_map_unload(&msp->ms_map); 742 space_map_destroy(&msp->ms_map); 743 744 for (int t = 0; t < TXG_SIZE; t++) { 745 space_map_destroy(&msp->ms_allocmap[t]); 746 space_map_destroy(&msp->ms_freemap[t]); 747 } 748 749 for (int t = 0; t < TXG_DEFER_SIZE; t++) 750 space_map_destroy(&msp->ms_defermap[t]); 751 752 ASSERT3S(msp->ms_deferspace, ==, 0); 753 754 mutex_exit(&msp->ms_lock); 755 mutex_destroy(&msp->ms_lock); 756 757 kmem_free(msp, sizeof (metaslab_t)); 758 } 759 760 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63) 761 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62) 762 #define METASLAB_ACTIVE_MASK \ 763 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY) 764 765 static uint64_t 766 metaslab_weight(metaslab_t *msp) 767 { 768 metaslab_group_t *mg = msp->ms_group; 769 space_map_t *sm = &msp->ms_map; 770 space_map_obj_t *smo = &msp->ms_smo; 771 vdev_t *vd = mg->mg_vd; 772 uint64_t weight, space; 773 774 ASSERT(MUTEX_HELD(&msp->ms_lock)); 775 776 /* 777 * The baseline weight is the metaslab's free space. 778 */ 779 space = sm->sm_size - smo->smo_alloc; 780 weight = space; 781 782 /* 783 * Modern disks have uniform bit density and constant angular velocity. 784 * Therefore, the outer recording zones are faster (higher bandwidth) 785 * than the inner zones by the ratio of outer to inner track diameter, 786 * which is typically around 2:1. We account for this by assigning 787 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). 788 * In effect, this means that we'll select the metaslab with the most 789 * free bandwidth rather than simply the one with the most free space. 790 */ 791 weight = 2 * weight - 792 ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count; 793 ASSERT(weight >= space && weight <= 2 * space); 794 795 /* 796 * For locality, assign higher weight to metaslabs which have 797 * a lower offset than what we've already activated. 798 */ 799 if (sm->sm_start <= mg->mg_bonus_area) 800 weight *= (metaslab_smo_bonus_pct / 100); 801 ASSERT(weight >= space && 802 weight <= 2 * (metaslab_smo_bonus_pct / 100) * space); 803 804 if (sm->sm_loaded && !sm->sm_ops->smop_fragmented(sm)) { 805 /* 806 * If this metaslab is one we're actively using, adjust its 807 * weight to make it preferable to any inactive metaslab so 808 * we'll polish it off. 809 */ 810 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); 811 } 812 return (weight); 813 } 814 815 static void 816 metaslab_prefetch(metaslab_group_t *mg) 817 { 818 spa_t *spa = mg->mg_vd->vdev_spa; 819 metaslab_t *msp; 820 avl_tree_t *t = &mg->mg_metaslab_tree; 821 int m; 822 823 mutex_enter(&mg->mg_lock); 824 825 /* 826 * Prefetch the next potential metaslabs 827 */ 828 for (msp = avl_first(t), m = 0; msp; msp = AVL_NEXT(t, msp), m++) { 829 space_map_t *sm = &msp->ms_map; 830 space_map_obj_t *smo = &msp->ms_smo; 831 832 /* If we have reached our prefetch limit then we're done */ 833 if (m >= metaslab_prefetch_limit) 834 break; 835 836 if (!sm->sm_loaded && smo->smo_object != 0) { 837 mutex_exit(&mg->mg_lock); 838 dmu_prefetch(spa_meta_objset(spa), smo->smo_object, 839 0ULL, smo->smo_objsize); 840 mutex_enter(&mg->mg_lock); 841 } 842 } 843 mutex_exit(&mg->mg_lock); 844 } 845 846 static int 847 metaslab_activate(metaslab_t *msp, uint64_t activation_weight, uint64_t size) 848 { 849 metaslab_group_t *mg = msp->ms_group; 850 space_map_t *sm = &msp->ms_map; 851 space_map_ops_t *sm_ops = msp->ms_group->mg_class->mc_ops; 852 853 ASSERT(MUTEX_HELD(&msp->ms_lock)); 854 855 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { 856 space_map_load_wait(sm); 857 if (!sm->sm_loaded) { 858 int error = space_map_load(sm, sm_ops, SM_FREE, 859 &msp->ms_smo, 860 spa_meta_objset(msp->ms_group->mg_vd->vdev_spa)); 861 if (error) { 862 metaslab_group_sort(msp->ms_group, msp, 0); 863 return (error); 864 } 865 for (int t = 0; t < TXG_DEFER_SIZE; t++) 866 space_map_walk(&msp->ms_defermap[t], 867 space_map_claim, sm); 868 869 } 870 871 /* 872 * Track the bonus area as we activate new metaslabs. 873 */ 874 if (sm->sm_start > mg->mg_bonus_area) { 875 mutex_enter(&mg->mg_lock); 876 mg->mg_bonus_area = sm->sm_start; 877 mutex_exit(&mg->mg_lock); 878 } 879 880 /* 881 * If we were able to load the map then make sure 882 * that this map is still able to satisfy our request. 883 */ 884 if (msp->ms_weight < size) 885 return (ENOSPC); 886 887 metaslab_group_sort(msp->ms_group, msp, 888 msp->ms_weight | activation_weight); 889 } 890 ASSERT(sm->sm_loaded); 891 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 892 893 return (0); 894 } 895 896 static void 897 metaslab_passivate(metaslab_t *msp, uint64_t size) 898 { 899 /* 900 * If size < SPA_MINBLOCKSIZE, then we will not allocate from 901 * this metaslab again. In that case, it had better be empty, 902 * or we would be leaving space on the table. 903 */ 904 ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map.sm_space == 0); 905 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size)); 906 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0); 907 } 908 909 /* 910 * Write a metaslab to disk in the context of the specified transaction group. 911 */ 912 void 913 metaslab_sync(metaslab_t *msp, uint64_t txg) 914 { 915 vdev_t *vd = msp->ms_group->mg_vd; 916 spa_t *spa = vd->vdev_spa; 917 objset_t *mos = spa_meta_objset(spa); 918 space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK]; 919 space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK]; 920 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; 921 space_map_t *sm = &msp->ms_map; 922 space_map_obj_t *smo = &msp->ms_smo_syncing; 923 dmu_buf_t *db; 924 dmu_tx_t *tx; 925 926 ASSERT(!vd->vdev_ishole); 927 928 if (allocmap->sm_space == 0 && freemap->sm_space == 0) 929 return; 930 931 /* 932 * The only state that can actually be changing concurrently with 933 * metaslab_sync() is the metaslab's ms_map. No other thread can 934 * be modifying this txg's allocmap, freemap, freed_map, or smo. 935 * Therefore, we only hold ms_lock to satify space_map ASSERTs. 936 * We drop it whenever we call into the DMU, because the DMU 937 * can call down to us (e.g. via zio_free()) at any time. 938 */ 939 940 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 941 942 if (smo->smo_object == 0) { 943 ASSERT(smo->smo_objsize == 0); 944 ASSERT(smo->smo_alloc == 0); 945 smo->smo_object = dmu_object_alloc(mos, 946 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 947 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 948 ASSERT(smo->smo_object != 0); 949 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * 950 (sm->sm_start >> vd->vdev_ms_shift), 951 sizeof (uint64_t), &smo->smo_object, tx); 952 } 953 954 mutex_enter(&msp->ms_lock); 955 956 space_map_walk(freemap, space_map_add, freed_map); 957 958 if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >= 959 2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) { 960 /* 961 * The in-core space map representation is twice as compact 962 * as the on-disk one, so it's time to condense the latter 963 * by generating a pure allocmap from first principles. 964 * 965 * This metaslab is 100% allocated, 966 * minus the content of the in-core map (sm), 967 * minus what's been freed this txg (freed_map), 968 * minus deferred frees (ms_defermap[]), 969 * minus allocations from txgs in the future 970 * (because they haven't been committed yet). 971 */ 972 space_map_vacate(allocmap, NULL, NULL); 973 space_map_vacate(freemap, NULL, NULL); 974 975 space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size); 976 977 space_map_walk(sm, space_map_remove, allocmap); 978 space_map_walk(freed_map, space_map_remove, allocmap); 979 980 for (int t = 0; t < TXG_DEFER_SIZE; t++) 981 space_map_walk(&msp->ms_defermap[t], 982 space_map_remove, allocmap); 983 984 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) 985 space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK], 986 space_map_remove, allocmap); 987 988 mutex_exit(&msp->ms_lock); 989 space_map_truncate(smo, mos, tx); 990 mutex_enter(&msp->ms_lock); 991 } 992 993 space_map_sync(allocmap, SM_ALLOC, smo, mos, tx); 994 space_map_sync(freemap, SM_FREE, smo, mos, tx); 995 996 mutex_exit(&msp->ms_lock); 997 998 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 999 dmu_buf_will_dirty(db, tx); 1000 ASSERT3U(db->db_size, >=, sizeof (*smo)); 1001 bcopy(smo, db->db_data, sizeof (*smo)); 1002 dmu_buf_rele(db, FTAG); 1003 1004 dmu_tx_commit(tx); 1005 } 1006 1007 /* 1008 * Called after a transaction group has completely synced to mark 1009 * all of the metaslab's free space as usable. 1010 */ 1011 void 1012 metaslab_sync_done(metaslab_t *msp, uint64_t txg) 1013 { 1014 space_map_obj_t *smo = &msp->ms_smo; 1015 space_map_obj_t *smosync = &msp->ms_smo_syncing; 1016 space_map_t *sm = &msp->ms_map; 1017 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; 1018 space_map_t *defer_map = &msp->ms_defermap[txg % TXG_DEFER_SIZE]; 1019 metaslab_group_t *mg = msp->ms_group; 1020 vdev_t *vd = mg->mg_vd; 1021 int64_t alloc_delta, defer_delta; 1022 1023 ASSERT(!vd->vdev_ishole); 1024 1025 mutex_enter(&msp->ms_lock); 1026 1027 /* 1028 * If this metaslab is just becoming available, initialize its 1029 * allocmaps and freemaps and add its capacity to the vdev. 1030 */ 1031 if (freed_map->sm_size == 0) { 1032 for (int t = 0; t < TXG_SIZE; t++) { 1033 space_map_create(&msp->ms_allocmap[t], sm->sm_start, 1034 sm->sm_size, sm->sm_shift, sm->sm_lock); 1035 space_map_create(&msp->ms_freemap[t], sm->sm_start, 1036 sm->sm_size, sm->sm_shift, sm->sm_lock); 1037 } 1038 1039 for (int t = 0; t < TXG_DEFER_SIZE; t++) 1040 space_map_create(&msp->ms_defermap[t], sm->sm_start, 1041 sm->sm_size, sm->sm_shift, sm->sm_lock); 1042 1043 vdev_space_update(vd, 0, 0, sm->sm_size); 1044 } 1045 1046 alloc_delta = smosync->smo_alloc - smo->smo_alloc; 1047 defer_delta = freed_map->sm_space - defer_map->sm_space; 1048 1049 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0); 1050 1051 ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0); 1052 ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0); 1053 1054 /* 1055 * If there's a space_map_load() in progress, wait for it to complete 1056 * so that we have a consistent view of the in-core space map. 1057 * Then, add defer_map (oldest deferred frees) to this map and 1058 * transfer freed_map (this txg's frees) to defer_map. 1059 */ 1060 space_map_load_wait(sm); 1061 space_map_vacate(defer_map, sm->sm_loaded ? space_map_free : NULL, sm); 1062 space_map_vacate(freed_map, space_map_add, defer_map); 1063 1064 *smo = *smosync; 1065 1066 msp->ms_deferspace += defer_delta; 1067 ASSERT3S(msp->ms_deferspace, >=, 0); 1068 ASSERT3S(msp->ms_deferspace, <=, sm->sm_size); 1069 if (msp->ms_deferspace != 0) { 1070 /* 1071 * Keep syncing this metaslab until all deferred frees 1072 * are back in circulation. 1073 */ 1074 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 1075 } 1076 1077 /* 1078 * If the map is loaded but no longer active, evict it as soon as all 1079 * future allocations have synced. (If we unloaded it now and then 1080 * loaded a moment later, the map wouldn't reflect those allocations.) 1081 */ 1082 if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { 1083 int evictable = 1; 1084 1085 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) 1086 if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space) 1087 evictable = 0; 1088 1089 if (evictable && !metaslab_debug) 1090 space_map_unload(sm); 1091 } 1092 1093 metaslab_group_sort(mg, msp, metaslab_weight(msp)); 1094 1095 mutex_exit(&msp->ms_lock); 1096 } 1097 1098 void 1099 metaslab_sync_reassess(metaslab_group_t *mg) 1100 { 1101 vdev_t *vd = mg->mg_vd; 1102 1103 /* 1104 * Re-evaluate all metaslabs which have lower offsets than the 1105 * bonus area. 1106 */ 1107 for (int m = 0; m < vd->vdev_ms_count; m++) { 1108 metaslab_t *msp = vd->vdev_ms[m]; 1109 1110 if (msp->ms_map.sm_start > mg->mg_bonus_area) 1111 break; 1112 1113 mutex_enter(&msp->ms_lock); 1114 metaslab_group_sort(mg, msp, metaslab_weight(msp)); 1115 mutex_exit(&msp->ms_lock); 1116 } 1117 1118 /* 1119 * Prefetch the next potential metaslabs 1120 */ 1121 metaslab_prefetch(mg); 1122 } 1123 1124 static uint64_t 1125 metaslab_distance(metaslab_t *msp, dva_t *dva) 1126 { 1127 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift; 1128 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift; 1129 uint64_t start = msp->ms_map.sm_start >> ms_shift; 1130 1131 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) 1132 return (1ULL << 63); 1133 1134 if (offset < start) 1135 return ((start - offset) << ms_shift); 1136 if (offset > start) 1137 return ((offset - start) << ms_shift); 1138 return (0); 1139 } 1140 1141 static uint64_t 1142 metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg, 1143 uint64_t min_distance, dva_t *dva, int d) 1144 { 1145 metaslab_t *msp = NULL; 1146 uint64_t offset = -1ULL; 1147 avl_tree_t *t = &mg->mg_metaslab_tree; 1148 uint64_t activation_weight; 1149 uint64_t target_distance; 1150 int i; 1151 1152 activation_weight = METASLAB_WEIGHT_PRIMARY; 1153 for (i = 0; i < d; i++) { 1154 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 1155 activation_weight = METASLAB_WEIGHT_SECONDARY; 1156 break; 1157 } 1158 } 1159 1160 for (;;) { 1161 boolean_t was_active; 1162 1163 mutex_enter(&mg->mg_lock); 1164 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) { 1165 if (msp->ms_weight < size) { 1166 mutex_exit(&mg->mg_lock); 1167 return (-1ULL); 1168 } 1169 1170 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 1171 if (activation_weight == METASLAB_WEIGHT_PRIMARY) 1172 break; 1173 1174 target_distance = min_distance + 1175 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1); 1176 1177 for (i = 0; i < d; i++) 1178 if (metaslab_distance(msp, &dva[i]) < 1179 target_distance) 1180 break; 1181 if (i == d) 1182 break; 1183 } 1184 mutex_exit(&mg->mg_lock); 1185 if (msp == NULL) 1186 return (-1ULL); 1187 1188 mutex_enter(&msp->ms_lock); 1189 1190 /* 1191 * Ensure that the metaslab we have selected is still 1192 * capable of handling our request. It's possible that 1193 * another thread may have changed the weight while we 1194 * were blocked on the metaslab lock. 1195 */ 1196 if (msp->ms_weight < size || (was_active && 1197 !(msp->ms_weight & METASLAB_ACTIVE_MASK) && 1198 activation_weight == METASLAB_WEIGHT_PRIMARY)) { 1199 mutex_exit(&msp->ms_lock); 1200 continue; 1201 } 1202 1203 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) && 1204 activation_weight == METASLAB_WEIGHT_PRIMARY) { 1205 metaslab_passivate(msp, 1206 msp->ms_weight & ~METASLAB_ACTIVE_MASK); 1207 mutex_exit(&msp->ms_lock); 1208 continue; 1209 } 1210 1211 if (metaslab_activate(msp, activation_weight, size) != 0) { 1212 mutex_exit(&msp->ms_lock); 1213 continue; 1214 } 1215 1216 if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL) 1217 break; 1218 1219 metaslab_passivate(msp, space_map_maxsize(&msp->ms_map)); 1220 1221 mutex_exit(&msp->ms_lock); 1222 } 1223 1224 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0) 1225 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); 1226 1227 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size); 1228 1229 mutex_exit(&msp->ms_lock); 1230 1231 return (offset); 1232 } 1233 1234 /* 1235 * Allocate a block for the specified i/o. 1236 */ 1237 static int 1238 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, 1239 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags) 1240 { 1241 metaslab_group_t *mg, *rotor; 1242 vdev_t *vd; 1243 int dshift = 3; 1244 int all_zero; 1245 int zio_lock = B_FALSE; 1246 boolean_t allocatable; 1247 uint64_t offset = -1ULL; 1248 uint64_t asize; 1249 uint64_t distance; 1250 1251 ASSERT(!DVA_IS_VALID(&dva[d])); 1252 1253 /* 1254 * For testing, make some blocks above a certain size be gang blocks. 1255 */ 1256 if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0) 1257 return (ENOSPC); 1258 1259 /* 1260 * Start at the rotor and loop through all mgs until we find something. 1261 * Note that there's no locking on mc_rotor or mc_aliquot because 1262 * nothing actually breaks if we miss a few updates -- we just won't 1263 * allocate quite as evenly. It all balances out over time. 1264 * 1265 * If we are doing ditto or log blocks, try to spread them across 1266 * consecutive vdevs. If we're forced to reuse a vdev before we've 1267 * allocated all of our ditto blocks, then try and spread them out on 1268 * that vdev as much as possible. If it turns out to not be possible, 1269 * gradually lower our standards until anything becomes acceptable. 1270 * Also, allocating on consecutive vdevs (as opposed to random vdevs) 1271 * gives us hope of containing our fault domains to something we're 1272 * able to reason about. Otherwise, any two top-level vdev failures 1273 * will guarantee the loss of data. With consecutive allocation, 1274 * only two adjacent top-level vdev failures will result in data loss. 1275 * 1276 * If we are doing gang blocks (hintdva is non-NULL), try to keep 1277 * ourselves on the same vdev as our gang block header. That 1278 * way, we can hope for locality in vdev_cache, plus it makes our 1279 * fault domains something tractable. 1280 */ 1281 if (hintdva) { 1282 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); 1283 1284 /* 1285 * It's possible the vdev we're using as the hint no 1286 * longer exists (i.e. removed). Consult the rotor when 1287 * all else fails. 1288 */ 1289 if (vd != NULL) { 1290 mg = vd->vdev_mg; 1291 1292 if (flags & METASLAB_HINTBP_AVOID && 1293 mg->mg_next != NULL) 1294 mg = mg->mg_next; 1295 } else { 1296 mg = mc->mc_rotor; 1297 } 1298 } else if (d != 0) { 1299 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); 1300 mg = vd->vdev_mg->mg_next; 1301 } else { 1302 mg = mc->mc_rotor; 1303 } 1304 1305 /* 1306 * If the hint put us into the wrong metaslab class, or into a 1307 * metaslab group that has been passivated, just follow the rotor. 1308 */ 1309 if (mg->mg_class != mc || mg->mg_activation_count <= 0) 1310 mg = mc->mc_rotor; 1311 1312 rotor = mg; 1313 top: 1314 all_zero = B_TRUE; 1315 do { 1316 ASSERT(mg->mg_activation_count == 1); 1317 1318 vd = mg->mg_vd; 1319 1320 /* 1321 * Don't allocate from faulted devices. 1322 */ 1323 if (zio_lock) { 1324 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); 1325 allocatable = vdev_allocatable(vd); 1326 spa_config_exit(spa, SCL_ZIO, FTAG); 1327 } else { 1328 allocatable = vdev_allocatable(vd); 1329 } 1330 if (!allocatable) 1331 goto next; 1332 1333 /* 1334 * Avoid writing single-copy data to a failing vdev 1335 */ 1336 if ((vd->vdev_stat.vs_write_errors > 0 || 1337 vd->vdev_state < VDEV_STATE_HEALTHY) && 1338 d == 0 && dshift == 3) { 1339 all_zero = B_FALSE; 1340 goto next; 1341 } 1342 1343 ASSERT(mg->mg_class == mc); 1344 1345 distance = vd->vdev_asize >> dshift; 1346 if (distance <= (1ULL << vd->vdev_ms_shift)) 1347 distance = 0; 1348 else 1349 all_zero = B_FALSE; 1350 1351 asize = vdev_psize_to_asize(vd, psize); 1352 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); 1353 1354 offset = metaslab_group_alloc(mg, asize, txg, distance, dva, d); 1355 if (offset != -1ULL) { 1356 /* 1357 * If we've just selected this metaslab group, 1358 * figure out whether the corresponding vdev is 1359 * over- or under-used relative to the pool, 1360 * and set an allocation bias to even it out. 1361 */ 1362 if (mc->mc_aliquot == 0) { 1363 vdev_stat_t *vs = &vd->vdev_stat; 1364 int64_t vu, cu; 1365 1366 /* 1367 * Determine percent used in units of 0..1024. 1368 * (This is just to avoid floating point.) 1369 */ 1370 vu = (vs->vs_alloc << 10) / (vs->vs_space + 1); 1371 cu = (mc->mc_alloc << 10) / (mc->mc_space + 1); 1372 1373 /* 1374 * Bias by at most +/- 25% of the aliquot. 1375 */ 1376 mg->mg_bias = ((cu - vu) * 1377 (int64_t)mg->mg_aliquot) / (1024 * 4); 1378 } 1379 1380 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >= 1381 mg->mg_aliquot + mg->mg_bias) { 1382 mc->mc_rotor = mg->mg_next; 1383 mc->mc_aliquot = 0; 1384 } 1385 1386 DVA_SET_VDEV(&dva[d], vd->vdev_id); 1387 DVA_SET_OFFSET(&dva[d], offset); 1388 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER)); 1389 DVA_SET_ASIZE(&dva[d], asize); 1390 1391 return (0); 1392 } 1393 next: 1394 mc->mc_rotor = mg->mg_next; 1395 mc->mc_aliquot = 0; 1396 } while ((mg = mg->mg_next) != rotor); 1397 1398 if (!all_zero) { 1399 dshift++; 1400 ASSERT(dshift < 64); 1401 goto top; 1402 } 1403 1404 if (!allocatable && !zio_lock) { 1405 dshift = 3; 1406 zio_lock = B_TRUE; 1407 goto top; 1408 } 1409 1410 bzero(&dva[d], sizeof (dva_t)); 1411 1412 return (ENOSPC); 1413 } 1414 1415 /* 1416 * Free the block represented by DVA in the context of the specified 1417 * transaction group. 1418 */ 1419 static void 1420 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now) 1421 { 1422 uint64_t vdev = DVA_GET_VDEV(dva); 1423 uint64_t offset = DVA_GET_OFFSET(dva); 1424 uint64_t size = DVA_GET_ASIZE(dva); 1425 vdev_t *vd; 1426 metaslab_t *msp; 1427 1428 ASSERT(DVA_IS_VALID(dva)); 1429 1430 if (txg > spa_freeze_txg(spa)) 1431 return; 1432 1433 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || 1434 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { 1435 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu", 1436 (u_longlong_t)vdev, (u_longlong_t)offset); 1437 ASSERT(0); 1438 return; 1439 } 1440 1441 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 1442 1443 if (DVA_GET_GANG(dva)) 1444 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 1445 1446 mutex_enter(&msp->ms_lock); 1447 1448 if (now) { 1449 space_map_remove(&msp->ms_allocmap[txg & TXG_MASK], 1450 offset, size); 1451 space_map_free(&msp->ms_map, offset, size); 1452 } else { 1453 if (msp->ms_freemap[txg & TXG_MASK].sm_space == 0) 1454 vdev_dirty(vd, VDD_METASLAB, msp, txg); 1455 space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size); 1456 } 1457 1458 mutex_exit(&msp->ms_lock); 1459 } 1460 1461 /* 1462 * Intent log support: upon opening the pool after a crash, notify the SPA 1463 * of blocks that the intent log has allocated for immediate write, but 1464 * which are still considered free by the SPA because the last transaction 1465 * group didn't commit yet. 1466 */ 1467 static int 1468 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 1469 { 1470 uint64_t vdev = DVA_GET_VDEV(dva); 1471 uint64_t offset = DVA_GET_OFFSET(dva); 1472 uint64_t size = DVA_GET_ASIZE(dva); 1473 vdev_t *vd; 1474 metaslab_t *msp; 1475 int error = 0; 1476 1477 ASSERT(DVA_IS_VALID(dva)); 1478 1479 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || 1480 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) 1481 return (ENXIO); 1482 1483 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 1484 1485 if (DVA_GET_GANG(dva)) 1486 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 1487 1488 mutex_enter(&msp->ms_lock); 1489 1490 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_map.sm_loaded) 1491 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY, 0); 1492 1493 if (error == 0 && !space_map_contains(&msp->ms_map, offset, size)) 1494 error = ENOENT; 1495 1496 if (error || txg == 0) { /* txg == 0 indicates dry run */ 1497 mutex_exit(&msp->ms_lock); 1498 return (error); 1499 } 1500 1501 space_map_claim(&msp->ms_map, offset, size); 1502 1503 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */ 1504 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0) 1505 vdev_dirty(vd, VDD_METASLAB, msp, txg); 1506 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size); 1507 } 1508 1509 mutex_exit(&msp->ms_lock); 1510 1511 return (0); 1512 } 1513 1514 int 1515 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, 1516 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags) 1517 { 1518 dva_t *dva = bp->blk_dva; 1519 dva_t *hintdva = hintbp->blk_dva; 1520 int error = 0; 1521 1522 ASSERT(bp->blk_birth == 0); 1523 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); 1524 1525 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 1526 1527 if (mc->mc_rotor == NULL) { /* no vdevs in this class */ 1528 spa_config_exit(spa, SCL_ALLOC, FTAG); 1529 return (ENOSPC); 1530 } 1531 1532 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); 1533 ASSERT(BP_GET_NDVAS(bp) == 0); 1534 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); 1535 1536 for (int d = 0; d < ndvas; d++) { 1537 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, 1538 txg, flags); 1539 if (error) { 1540 for (d--; d >= 0; d--) { 1541 metaslab_free_dva(spa, &dva[d], txg, B_TRUE); 1542 bzero(&dva[d], sizeof (dva_t)); 1543 } 1544 spa_config_exit(spa, SCL_ALLOC, FTAG); 1545 return (error); 1546 } 1547 } 1548 ASSERT(error == 0); 1549 ASSERT(BP_GET_NDVAS(bp) == ndvas); 1550 1551 spa_config_exit(spa, SCL_ALLOC, FTAG); 1552 1553 BP_SET_BIRTH(bp, txg, txg); 1554 1555 return (0); 1556 } 1557 1558 void 1559 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) 1560 { 1561 const dva_t *dva = bp->blk_dva; 1562 int ndvas = BP_GET_NDVAS(bp); 1563 1564 ASSERT(!BP_IS_HOLE(bp)); 1565 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); 1566 1567 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); 1568 1569 for (int d = 0; d < ndvas; d++) 1570 metaslab_free_dva(spa, &dva[d], txg, now); 1571 1572 spa_config_exit(spa, SCL_FREE, FTAG); 1573 } 1574 1575 int 1576 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) 1577 { 1578 const dva_t *dva = bp->blk_dva; 1579 int ndvas = BP_GET_NDVAS(bp); 1580 int error = 0; 1581 1582 ASSERT(!BP_IS_HOLE(bp)); 1583 1584 if (txg != 0) { 1585 /* 1586 * First do a dry run to make sure all DVAs are claimable, 1587 * so we don't have to unwind from partial failures below. 1588 */ 1589 if ((error = metaslab_claim(spa, bp, 0)) != 0) 1590 return (error); 1591 } 1592 1593 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 1594 1595 for (int d = 0; d < ndvas; d++) 1596 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0) 1597 break; 1598 1599 spa_config_exit(spa, SCL_ALLOC, FTAG); 1600 1601 ASSERT(error == 0 || txg == 0); 1602 1603 return (error); 1604 } 1605