1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/dmu.h> 28 #include <sys/dmu_tx.h> 29 #include <sys/space_map.h> 30 #include <sys/metaslab_impl.h> 31 #include <sys/vdev_impl.h> 32 #include <sys/zio.h> 33 34 uint64_t metaslab_aliquot = 512ULL << 10; 35 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ 36 37 /* 38 * Metaslab debugging: when set, keeps all space maps in core to verify frees. 39 */ 40 static int metaslab_debug = 0; 41 42 /* 43 * Minimum size which forces the dynamic allocator to change 44 * it's allocation strategy. Once the space map cannot satisfy 45 * an allocation of this size then it switches to using more 46 * aggressive strategy (i.e search by size rather than offset). 47 */ 48 uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE; 49 50 /* 51 * The minimum free space, in percent, which must be available 52 * in a space map to continue allocations in a first-fit fashion. 53 * Once the space_map's free space drops below this level we dynamically 54 * switch to using best-fit allocations. 55 */ 56 int metaslab_df_free_pct = 30; 57 58 /* 59 * ========================================================================== 60 * Metaslab classes 61 * ========================================================================== 62 */ 63 metaslab_class_t * 64 metaslab_class_create(spa_t *spa, space_map_ops_t *ops) 65 { 66 metaslab_class_t *mc; 67 68 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP); 69 70 mc->mc_spa = spa; 71 mc->mc_rotor = NULL; 72 mc->mc_ops = ops; 73 74 return (mc); 75 } 76 77 void 78 metaslab_class_destroy(metaslab_class_t *mc) 79 { 80 ASSERT(mc->mc_rotor == NULL); 81 ASSERT(mc->mc_alloc == 0); 82 ASSERT(mc->mc_deferred == 0); 83 ASSERT(mc->mc_space == 0); 84 ASSERT(mc->mc_dspace == 0); 85 86 kmem_free(mc, sizeof (metaslab_class_t)); 87 } 88 89 int 90 metaslab_class_validate(metaslab_class_t *mc) 91 { 92 metaslab_group_t *mg; 93 vdev_t *vd; 94 95 /* 96 * Must hold one of the spa_config locks. 97 */ 98 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || 99 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); 100 101 if ((mg = mc->mc_rotor) == NULL) 102 return (0); 103 104 do { 105 vd = mg->mg_vd; 106 ASSERT(vd->vdev_mg != NULL); 107 ASSERT3P(vd->vdev_top, ==, vd); 108 ASSERT3P(mg->mg_class, ==, mc); 109 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); 110 } while ((mg = mg->mg_next) != mc->mc_rotor); 111 112 return (0); 113 } 114 115 void 116 metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta, 117 int64_t defer_delta, int64_t space_delta, int64_t dspace_delta) 118 { 119 atomic_add_64(&mc->mc_alloc, alloc_delta); 120 atomic_add_64(&mc->mc_deferred, defer_delta); 121 atomic_add_64(&mc->mc_space, space_delta); 122 atomic_add_64(&mc->mc_dspace, dspace_delta); 123 } 124 125 uint64_t 126 metaslab_class_get_alloc(metaslab_class_t *mc) 127 { 128 return (mc->mc_alloc); 129 } 130 131 uint64_t 132 metaslab_class_get_deferred(metaslab_class_t *mc) 133 { 134 return (mc->mc_deferred); 135 } 136 137 uint64_t 138 metaslab_class_get_space(metaslab_class_t *mc) 139 { 140 return (mc->mc_space); 141 } 142 143 uint64_t 144 metaslab_class_get_dspace(metaslab_class_t *mc) 145 { 146 return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space); 147 } 148 149 /* 150 * ========================================================================== 151 * Metaslab groups 152 * ========================================================================== 153 */ 154 static int 155 metaslab_compare(const void *x1, const void *x2) 156 { 157 const metaslab_t *m1 = x1; 158 const metaslab_t *m2 = x2; 159 160 if (m1->ms_weight < m2->ms_weight) 161 return (1); 162 if (m1->ms_weight > m2->ms_weight) 163 return (-1); 164 165 /* 166 * If the weights are identical, use the offset to force uniqueness. 167 */ 168 if (m1->ms_map.sm_start < m2->ms_map.sm_start) 169 return (-1); 170 if (m1->ms_map.sm_start > m2->ms_map.sm_start) 171 return (1); 172 173 ASSERT3P(m1, ==, m2); 174 175 return (0); 176 } 177 178 metaslab_group_t * 179 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd) 180 { 181 metaslab_group_t *mg; 182 183 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP); 184 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); 185 avl_create(&mg->mg_metaslab_tree, metaslab_compare, 186 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node)); 187 mg->mg_vd = vd; 188 mg->mg_class = mc; 189 mg->mg_activation_count = 0; 190 191 return (mg); 192 } 193 194 void 195 metaslab_group_destroy(metaslab_group_t *mg) 196 { 197 ASSERT(mg->mg_prev == NULL); 198 ASSERT(mg->mg_next == NULL); 199 /* 200 * We may have gone below zero with the activation count 201 * either because we never activated in the first place or 202 * because we're done, and possibly removing the vdev. 203 */ 204 ASSERT(mg->mg_activation_count <= 0); 205 206 avl_destroy(&mg->mg_metaslab_tree); 207 mutex_destroy(&mg->mg_lock); 208 kmem_free(mg, sizeof (metaslab_group_t)); 209 } 210 211 void 212 metaslab_group_activate(metaslab_group_t *mg) 213 { 214 metaslab_class_t *mc = mg->mg_class; 215 metaslab_group_t *mgprev, *mgnext; 216 217 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER)); 218 219 ASSERT(mc->mc_rotor != mg); 220 ASSERT(mg->mg_prev == NULL); 221 ASSERT(mg->mg_next == NULL); 222 ASSERT(mg->mg_activation_count <= 0); 223 224 if (++mg->mg_activation_count <= 0) 225 return; 226 227 mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children); 228 229 if ((mgprev = mc->mc_rotor) == NULL) { 230 mg->mg_prev = mg; 231 mg->mg_next = mg; 232 } else { 233 mgnext = mgprev->mg_next; 234 mg->mg_prev = mgprev; 235 mg->mg_next = mgnext; 236 mgprev->mg_next = mg; 237 mgnext->mg_prev = mg; 238 } 239 mc->mc_rotor = mg; 240 } 241 242 void 243 metaslab_group_passivate(metaslab_group_t *mg) 244 { 245 metaslab_class_t *mc = mg->mg_class; 246 metaslab_group_t *mgprev, *mgnext; 247 248 ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER)); 249 250 if (--mg->mg_activation_count != 0) { 251 ASSERT(mc->mc_rotor != mg); 252 ASSERT(mg->mg_prev == NULL); 253 ASSERT(mg->mg_next == NULL); 254 ASSERT(mg->mg_activation_count < 0); 255 return; 256 } 257 258 mgprev = mg->mg_prev; 259 mgnext = mg->mg_next; 260 261 if (mg == mgnext) { 262 mc->mc_rotor = NULL; 263 } else { 264 mc->mc_rotor = mgnext; 265 mgprev->mg_next = mgnext; 266 mgnext->mg_prev = mgprev; 267 } 268 269 mg->mg_prev = NULL; 270 mg->mg_next = NULL; 271 } 272 273 static void 274 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) 275 { 276 mutex_enter(&mg->mg_lock); 277 ASSERT(msp->ms_group == NULL); 278 msp->ms_group = mg; 279 msp->ms_weight = 0; 280 avl_add(&mg->mg_metaslab_tree, msp); 281 mutex_exit(&mg->mg_lock); 282 } 283 284 static void 285 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) 286 { 287 mutex_enter(&mg->mg_lock); 288 ASSERT(msp->ms_group == mg); 289 avl_remove(&mg->mg_metaslab_tree, msp); 290 msp->ms_group = NULL; 291 mutex_exit(&mg->mg_lock); 292 } 293 294 static void 295 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 296 { 297 /* 298 * Although in principle the weight can be any value, in 299 * practice we do not use values in the range [1, 510]. 300 */ 301 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0); 302 ASSERT(MUTEX_HELD(&msp->ms_lock)); 303 304 mutex_enter(&mg->mg_lock); 305 ASSERT(msp->ms_group == mg); 306 avl_remove(&mg->mg_metaslab_tree, msp); 307 msp->ms_weight = weight; 308 avl_add(&mg->mg_metaslab_tree, msp); 309 mutex_exit(&mg->mg_lock); 310 } 311 312 /* 313 * This is a helper function that can be used by the allocator to find 314 * a suitable block to allocate. This will search the specified AVL 315 * tree looking for a block that matches the specified criteria. 316 */ 317 static uint64_t 318 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size, 319 uint64_t align) 320 { 321 space_seg_t *ss, ssearch; 322 avl_index_t where; 323 324 ssearch.ss_start = *cursor; 325 ssearch.ss_end = *cursor + size; 326 327 ss = avl_find(t, &ssearch, &where); 328 if (ss == NULL) 329 ss = avl_nearest(t, where, AVL_AFTER); 330 331 while (ss != NULL) { 332 uint64_t offset = P2ROUNDUP(ss->ss_start, align); 333 334 if (offset + size <= ss->ss_end) { 335 *cursor = offset + size; 336 return (offset); 337 } 338 ss = AVL_NEXT(t, ss); 339 } 340 341 /* 342 * If we know we've searched the whole map (*cursor == 0), give up. 343 * Otherwise, reset the cursor to the beginning and try again. 344 */ 345 if (*cursor == 0) 346 return (-1ULL); 347 348 *cursor = 0; 349 return (metaslab_block_picker(t, cursor, size, align)); 350 } 351 352 /* 353 * ========================================================================== 354 * The first-fit block allocator 355 * ========================================================================== 356 */ 357 static void 358 metaslab_ff_load(space_map_t *sm) 359 { 360 ASSERT(sm->sm_ppd == NULL); 361 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP); 362 sm->sm_pp_root = NULL; 363 } 364 365 static void 366 metaslab_ff_unload(space_map_t *sm) 367 { 368 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t)); 369 sm->sm_ppd = NULL; 370 } 371 372 static uint64_t 373 metaslab_ff_alloc(space_map_t *sm, uint64_t size) 374 { 375 avl_tree_t *t = &sm->sm_root; 376 uint64_t align = size & -size; 377 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1; 378 379 return (metaslab_block_picker(t, cursor, size, align)); 380 } 381 382 /* ARGSUSED */ 383 static void 384 metaslab_ff_claim(space_map_t *sm, uint64_t start, uint64_t size) 385 { 386 /* No need to update cursor */ 387 } 388 389 /* ARGSUSED */ 390 static void 391 metaslab_ff_free(space_map_t *sm, uint64_t start, uint64_t size) 392 { 393 /* No need to update cursor */ 394 } 395 396 static space_map_ops_t metaslab_ff_ops = { 397 metaslab_ff_load, 398 metaslab_ff_unload, 399 metaslab_ff_alloc, 400 metaslab_ff_claim, 401 metaslab_ff_free, 402 NULL /* maxsize */ 403 }; 404 405 /* 406 * Dynamic block allocator - 407 * Uses the first fit allocation scheme until space get low and then 408 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold 409 * and metaslab_df_free_pct to determine when to switch the allocation scheme. 410 */ 411 412 uint64_t 413 metaslab_df_maxsize(space_map_t *sm) 414 { 415 avl_tree_t *t = sm->sm_pp_root; 416 space_seg_t *ss; 417 418 if (t == NULL || (ss = avl_last(t)) == NULL) 419 return (0ULL); 420 421 return (ss->ss_end - ss->ss_start); 422 } 423 424 static int 425 metaslab_df_seg_compare(const void *x1, const void *x2) 426 { 427 const space_seg_t *s1 = x1; 428 const space_seg_t *s2 = x2; 429 uint64_t ss_size1 = s1->ss_end - s1->ss_start; 430 uint64_t ss_size2 = s2->ss_end - s2->ss_start; 431 432 if (ss_size1 < ss_size2) 433 return (-1); 434 if (ss_size1 > ss_size2) 435 return (1); 436 437 if (s1->ss_start < s2->ss_start) 438 return (-1); 439 if (s1->ss_start > s2->ss_start) 440 return (1); 441 442 return (0); 443 } 444 445 static void 446 metaslab_df_load(space_map_t *sm) 447 { 448 space_seg_t *ss; 449 450 ASSERT(sm->sm_ppd == NULL); 451 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP); 452 453 sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); 454 avl_create(sm->sm_pp_root, metaslab_df_seg_compare, 455 sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node)); 456 457 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss)) 458 avl_add(sm->sm_pp_root, ss); 459 } 460 461 static void 462 metaslab_df_unload(space_map_t *sm) 463 { 464 void *cookie = NULL; 465 466 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t)); 467 sm->sm_ppd = NULL; 468 469 while (avl_destroy_nodes(sm->sm_pp_root, &cookie) != NULL) { 470 /* tear down the tree */ 471 } 472 473 avl_destroy(sm->sm_pp_root); 474 kmem_free(sm->sm_pp_root, sizeof (avl_tree_t)); 475 sm->sm_pp_root = NULL; 476 } 477 478 static uint64_t 479 metaslab_df_alloc(space_map_t *sm, uint64_t size) 480 { 481 avl_tree_t *t = &sm->sm_root; 482 uint64_t align = size & -size; 483 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1; 484 uint64_t max_size = metaslab_df_maxsize(sm); 485 int free_pct = sm->sm_space * 100 / sm->sm_size; 486 487 ASSERT(MUTEX_HELD(sm->sm_lock)); 488 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root)); 489 490 if (max_size < size) 491 return (-1ULL); 492 493 /* 494 * If we're running low on space switch to using the size 495 * sorted AVL tree (best-fit). 496 */ 497 if (max_size < metaslab_df_alloc_threshold || 498 free_pct < metaslab_df_free_pct) { 499 t = sm->sm_pp_root; 500 *cursor = 0; 501 } 502 503 return (metaslab_block_picker(t, cursor, size, 1ULL)); 504 } 505 506 /* ARGSUSED */ 507 static void 508 metaslab_df_claim(space_map_t *sm, uint64_t start, uint64_t size) 509 { 510 /* No need to update cursor */ 511 } 512 513 /* ARGSUSED */ 514 static void 515 metaslab_df_free(space_map_t *sm, uint64_t start, uint64_t size) 516 { 517 /* No need to update cursor */ 518 } 519 520 static space_map_ops_t metaslab_df_ops = { 521 metaslab_df_load, 522 metaslab_df_unload, 523 metaslab_df_alloc, 524 metaslab_df_claim, 525 metaslab_df_free, 526 metaslab_df_maxsize 527 }; 528 529 space_map_ops_t *zfs_metaslab_ops = &metaslab_df_ops; 530 531 /* 532 * ========================================================================== 533 * Metaslabs 534 * ========================================================================== 535 */ 536 metaslab_t * 537 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo, 538 uint64_t start, uint64_t size, uint64_t txg) 539 { 540 vdev_t *vd = mg->mg_vd; 541 metaslab_t *msp; 542 543 msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); 544 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL); 545 546 msp->ms_smo_syncing = *smo; 547 548 /* 549 * We create the main space map here, but we don't create the 550 * allocmaps and freemaps until metaslab_sync_done(). This serves 551 * two purposes: it allows metaslab_sync_done() to detect the 552 * addition of new space; and for debugging, it ensures that we'd 553 * data fault on any attempt to use this metaslab before it's ready. 554 */ 555 space_map_create(&msp->ms_map, start, size, 556 vd->vdev_ashift, &msp->ms_lock); 557 558 metaslab_group_add(mg, msp); 559 560 if (metaslab_debug && smo->smo_object != 0) { 561 mutex_enter(&msp->ms_lock); 562 VERIFY(space_map_load(&msp->ms_map, mg->mg_class->mc_ops, 563 SM_FREE, smo, spa_meta_objset(vd->vdev_spa)) == 0); 564 mutex_exit(&msp->ms_lock); 565 } 566 567 /* 568 * If we're opening an existing pool (txg == 0) or creating 569 * a new one (txg == TXG_INITIAL), all space is available now. 570 * If we're adding space to an existing pool, the new space 571 * does not become available until after this txg has synced. 572 */ 573 if (txg <= TXG_INITIAL) 574 metaslab_sync_done(msp, 0); 575 576 if (txg != 0) { 577 vdev_dirty(vd, 0, NULL, txg); 578 vdev_dirty(vd, VDD_METASLAB, msp, txg); 579 } 580 581 return (msp); 582 } 583 584 void 585 metaslab_fini(metaslab_t *msp) 586 { 587 metaslab_group_t *mg = msp->ms_group; 588 589 vdev_space_update(mg->mg_vd, 590 -msp->ms_smo.smo_alloc, 0, -msp->ms_map.sm_size); 591 592 metaslab_group_remove(mg, msp); 593 594 mutex_enter(&msp->ms_lock); 595 596 space_map_unload(&msp->ms_map); 597 space_map_destroy(&msp->ms_map); 598 599 for (int t = 0; t < TXG_SIZE; t++) { 600 space_map_destroy(&msp->ms_allocmap[t]); 601 space_map_destroy(&msp->ms_freemap[t]); 602 } 603 604 for (int t = 0; t < TXG_DEFER_SIZE; t++) 605 space_map_destroy(&msp->ms_defermap[t]); 606 607 ASSERT3S(msp->ms_deferspace, ==, 0); 608 609 mutex_exit(&msp->ms_lock); 610 mutex_destroy(&msp->ms_lock); 611 612 kmem_free(msp, sizeof (metaslab_t)); 613 } 614 615 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63) 616 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62) 617 #define METASLAB_ACTIVE_MASK \ 618 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY) 619 #define METASLAB_SMO_BONUS_MULTIPLIER 2 620 621 static uint64_t 622 metaslab_weight(metaslab_t *msp) 623 { 624 metaslab_group_t *mg = msp->ms_group; 625 space_map_t *sm = &msp->ms_map; 626 space_map_obj_t *smo = &msp->ms_smo; 627 vdev_t *vd = mg->mg_vd; 628 uint64_t weight, space; 629 630 ASSERT(MUTEX_HELD(&msp->ms_lock)); 631 632 /* 633 * The baseline weight is the metaslab's free space. 634 */ 635 space = sm->sm_size - smo->smo_alloc; 636 weight = space; 637 638 /* 639 * Modern disks have uniform bit density and constant angular velocity. 640 * Therefore, the outer recording zones are faster (higher bandwidth) 641 * than the inner zones by the ratio of outer to inner track diameter, 642 * which is typically around 2:1. We account for this by assigning 643 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). 644 * In effect, this means that we'll select the metaslab with the most 645 * free bandwidth rather than simply the one with the most free space. 646 */ 647 weight = 2 * weight - 648 ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count; 649 ASSERT(weight >= space && weight <= 2 * space); 650 651 /* 652 * For locality, assign higher weight to metaslabs we've used before. 653 */ 654 if (smo->smo_object != 0) 655 weight *= METASLAB_SMO_BONUS_MULTIPLIER; 656 ASSERT(weight >= space && 657 weight <= 2 * METASLAB_SMO_BONUS_MULTIPLIER * space); 658 659 /* 660 * If this metaslab is one we're actively using, adjust its weight to 661 * make it preferable to any inactive metaslab so we'll polish it off. 662 */ 663 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); 664 665 return (weight); 666 } 667 668 static int 669 metaslab_activate(metaslab_t *msp, uint64_t activation_weight, uint64_t size) 670 { 671 space_map_t *sm = &msp->ms_map; 672 space_map_ops_t *sm_ops = msp->ms_group->mg_class->mc_ops; 673 674 ASSERT(MUTEX_HELD(&msp->ms_lock)); 675 676 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { 677 space_map_load_wait(sm); 678 if (!sm->sm_loaded) { 679 int error = space_map_load(sm, sm_ops, SM_FREE, 680 &msp->ms_smo, 681 spa_meta_objset(msp->ms_group->mg_vd->vdev_spa)); 682 if (error) { 683 metaslab_group_sort(msp->ms_group, msp, 0); 684 return (error); 685 } 686 for (int t = 0; t < TXG_DEFER_SIZE; t++) 687 space_map_walk(&msp->ms_defermap[t], 688 space_map_claim, sm); 689 } 690 691 /* 692 * If we were able to load the map then make sure 693 * that this map is still able to satisfy our request. 694 */ 695 if (msp->ms_weight < size) 696 return (ENOSPC); 697 698 metaslab_group_sort(msp->ms_group, msp, 699 msp->ms_weight | activation_weight); 700 } 701 ASSERT(sm->sm_loaded); 702 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 703 704 return (0); 705 } 706 707 static void 708 metaslab_passivate(metaslab_t *msp, uint64_t size) 709 { 710 /* 711 * If size < SPA_MINBLOCKSIZE, then we will not allocate from 712 * this metaslab again. In that case, it had better be empty, 713 * or we would be leaving space on the table. 714 */ 715 #if 0 716 ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map.sm_space == 0); 717 #endif 718 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size)); 719 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0); 720 } 721 722 /* 723 * Write a metaslab to disk in the context of the specified transaction group. 724 */ 725 void 726 metaslab_sync(metaslab_t *msp, uint64_t txg) 727 { 728 vdev_t *vd = msp->ms_group->mg_vd; 729 spa_t *spa = vd->vdev_spa; 730 objset_t *mos = spa_meta_objset(spa); 731 space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK]; 732 space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK]; 733 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; 734 space_map_t *sm = &msp->ms_map; 735 space_map_obj_t *smo = &msp->ms_smo_syncing; 736 dmu_buf_t *db; 737 dmu_tx_t *tx; 738 739 ASSERT(!vd->vdev_ishole); 740 741 if (allocmap->sm_space == 0 && freemap->sm_space == 0) 742 return; 743 744 /* 745 * The only state that can actually be changing concurrently with 746 * metaslab_sync() is the metaslab's ms_map. No other thread can 747 * be modifying this txg's allocmap, freemap, freed_map, or smo. 748 * Therefore, we only hold ms_lock to satify space_map ASSERTs. 749 * We drop it whenever we call into the DMU, because the DMU 750 * can call down to us (e.g. via zio_free()) at any time. 751 */ 752 753 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 754 755 if (smo->smo_object == 0) { 756 ASSERT(smo->smo_objsize == 0); 757 ASSERT(smo->smo_alloc == 0); 758 smo->smo_object = dmu_object_alloc(mos, 759 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 760 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 761 ASSERT(smo->smo_object != 0); 762 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * 763 (sm->sm_start >> vd->vdev_ms_shift), 764 sizeof (uint64_t), &smo->smo_object, tx); 765 } 766 767 mutex_enter(&msp->ms_lock); 768 769 space_map_walk(freemap, space_map_add, freed_map); 770 771 if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >= 772 2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) { 773 /* 774 * The in-core space map representation is twice as compact 775 * as the on-disk one, so it's time to condense the latter 776 * by generating a pure allocmap from first principles. 777 * 778 * This metaslab is 100% allocated, 779 * minus the content of the in-core map (sm), 780 * minus what's been freed this txg (freed_map), 781 * minus deferred frees (ms_defermap[]), 782 * minus allocations from txgs in the future 783 * (because they haven't been committed yet). 784 */ 785 space_map_vacate(allocmap, NULL, NULL); 786 space_map_vacate(freemap, NULL, NULL); 787 788 space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size); 789 790 space_map_walk(sm, space_map_remove, allocmap); 791 space_map_walk(freed_map, space_map_remove, allocmap); 792 793 for (int t = 0; t < TXG_DEFER_SIZE; t++) 794 space_map_walk(&msp->ms_defermap[t], 795 space_map_remove, allocmap); 796 797 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) 798 space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK], 799 space_map_remove, allocmap); 800 801 mutex_exit(&msp->ms_lock); 802 space_map_truncate(smo, mos, tx); 803 mutex_enter(&msp->ms_lock); 804 } 805 806 space_map_sync(allocmap, SM_ALLOC, smo, mos, tx); 807 space_map_sync(freemap, SM_FREE, smo, mos, tx); 808 809 mutex_exit(&msp->ms_lock); 810 811 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 812 dmu_buf_will_dirty(db, tx); 813 ASSERT3U(db->db_size, >=, sizeof (*smo)); 814 bcopy(smo, db->db_data, sizeof (*smo)); 815 dmu_buf_rele(db, FTAG); 816 817 dmu_tx_commit(tx); 818 } 819 820 /* 821 * Called after a transaction group has completely synced to mark 822 * all of the metaslab's free space as usable. 823 */ 824 void 825 metaslab_sync_done(metaslab_t *msp, uint64_t txg) 826 { 827 space_map_obj_t *smo = &msp->ms_smo; 828 space_map_obj_t *smosync = &msp->ms_smo_syncing; 829 space_map_t *sm = &msp->ms_map; 830 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; 831 space_map_t *defer_map = &msp->ms_defermap[txg % TXG_DEFER_SIZE]; 832 metaslab_group_t *mg = msp->ms_group; 833 vdev_t *vd = mg->mg_vd; 834 int64_t alloc_delta, defer_delta; 835 836 ASSERT(!vd->vdev_ishole); 837 838 mutex_enter(&msp->ms_lock); 839 840 /* 841 * If this metaslab is just becoming available, initialize its 842 * allocmaps and freemaps and add its capacity to the vdev. 843 */ 844 if (freed_map->sm_size == 0) { 845 for (int t = 0; t < TXG_SIZE; t++) { 846 space_map_create(&msp->ms_allocmap[t], sm->sm_start, 847 sm->sm_size, sm->sm_shift, sm->sm_lock); 848 space_map_create(&msp->ms_freemap[t], sm->sm_start, 849 sm->sm_size, sm->sm_shift, sm->sm_lock); 850 } 851 852 for (int t = 0; t < TXG_DEFER_SIZE; t++) 853 space_map_create(&msp->ms_defermap[t], sm->sm_start, 854 sm->sm_size, sm->sm_shift, sm->sm_lock); 855 856 vdev_space_update(vd, 0, 0, sm->sm_size); 857 } 858 859 alloc_delta = smosync->smo_alloc - smo->smo_alloc; 860 defer_delta = freed_map->sm_space - defer_map->sm_space; 861 862 vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0); 863 864 ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0); 865 ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0); 866 867 /* 868 * If there's a space_map_load() in progress, wait for it to complete 869 * so that we have a consistent view of the in-core space map. 870 * Then, add defer_map (oldest deferred frees) to this map and 871 * transfer freed_map (this txg's frees) to defer_map. 872 */ 873 space_map_load_wait(sm); 874 space_map_vacate(defer_map, sm->sm_loaded ? space_map_free : NULL, sm); 875 space_map_vacate(freed_map, space_map_add, defer_map); 876 877 *smo = *smosync; 878 879 msp->ms_deferspace += defer_delta; 880 ASSERT3S(msp->ms_deferspace, >=, 0); 881 ASSERT3S(msp->ms_deferspace, <=, sm->sm_size); 882 if (msp->ms_deferspace != 0) { 883 /* 884 * Keep syncing this metaslab until all deferred frees 885 * are back in circulation. 886 */ 887 vdev_dirty(vd, VDD_METASLAB, msp, txg + 1); 888 } 889 890 /* 891 * If the map is loaded but no longer active, evict it as soon as all 892 * future allocations have synced. (If we unloaded it now and then 893 * loaded a moment later, the map wouldn't reflect those allocations.) 894 */ 895 if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { 896 int evictable = 1; 897 898 for (int t = 1; t < TXG_CONCURRENT_STATES; t++) 899 if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space) 900 evictable = 0; 901 902 if (evictable && !metaslab_debug) 903 space_map_unload(sm); 904 } 905 906 metaslab_group_sort(mg, msp, metaslab_weight(msp)); 907 908 mutex_exit(&msp->ms_lock); 909 } 910 911 static uint64_t 912 metaslab_distance(metaslab_t *msp, dva_t *dva) 913 { 914 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift; 915 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift; 916 uint64_t start = msp->ms_map.sm_start >> ms_shift; 917 918 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) 919 return (1ULL << 63); 920 921 if (offset < start) 922 return ((start - offset) << ms_shift); 923 if (offset > start) 924 return ((offset - start) << ms_shift); 925 return (0); 926 } 927 928 static uint64_t 929 metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg, 930 uint64_t min_distance, dva_t *dva, int d) 931 { 932 metaslab_t *msp = NULL; 933 uint64_t offset = -1ULL; 934 avl_tree_t *t = &mg->mg_metaslab_tree; 935 uint64_t activation_weight; 936 uint64_t target_distance; 937 int i; 938 939 activation_weight = METASLAB_WEIGHT_PRIMARY; 940 for (i = 0; i < d; i++) { 941 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 942 activation_weight = METASLAB_WEIGHT_SECONDARY; 943 break; 944 } 945 } 946 947 for (;;) { 948 boolean_t was_active; 949 950 mutex_enter(&mg->mg_lock); 951 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) { 952 if (msp->ms_weight < size) { 953 mutex_exit(&mg->mg_lock); 954 return (-1ULL); 955 } 956 957 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 958 if (activation_weight == METASLAB_WEIGHT_PRIMARY) 959 break; 960 961 target_distance = min_distance + 962 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1); 963 964 for (i = 0; i < d; i++) 965 if (metaslab_distance(msp, &dva[i]) < 966 target_distance) 967 break; 968 if (i == d) 969 break; 970 } 971 mutex_exit(&mg->mg_lock); 972 if (msp == NULL) 973 return (-1ULL); 974 975 mutex_enter(&msp->ms_lock); 976 977 /* 978 * Ensure that the metaslab we have selected is still 979 * capable of handling our request. It's possible that 980 * another thread may have changed the weight while we 981 * were blocked on the metaslab lock. 982 */ 983 if (msp->ms_weight < size || (was_active && 984 !(msp->ms_weight & METASLAB_ACTIVE_MASK) && 985 activation_weight == METASLAB_WEIGHT_PRIMARY)) { 986 mutex_exit(&msp->ms_lock); 987 continue; 988 } 989 990 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) && 991 activation_weight == METASLAB_WEIGHT_PRIMARY) { 992 metaslab_passivate(msp, 993 msp->ms_weight & ~METASLAB_ACTIVE_MASK); 994 mutex_exit(&msp->ms_lock); 995 continue; 996 } 997 998 if (metaslab_activate(msp, activation_weight, size) != 0) { 999 mutex_exit(&msp->ms_lock); 1000 continue; 1001 } 1002 1003 if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL) 1004 break; 1005 1006 metaslab_passivate(msp, size - 1); 1007 1008 mutex_exit(&msp->ms_lock); 1009 } 1010 1011 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0) 1012 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); 1013 1014 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size); 1015 1016 mutex_exit(&msp->ms_lock); 1017 1018 return (offset); 1019 } 1020 1021 /* 1022 * Allocate a block for the specified i/o. 1023 */ 1024 static int 1025 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, 1026 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags) 1027 { 1028 metaslab_group_t *mg, *rotor; 1029 vdev_t *vd; 1030 int dshift = 3; 1031 int all_zero; 1032 int zio_lock = B_FALSE; 1033 boolean_t allocatable; 1034 uint64_t offset = -1ULL; 1035 uint64_t asize; 1036 uint64_t distance; 1037 1038 ASSERT(!DVA_IS_VALID(&dva[d])); 1039 1040 /* 1041 * For testing, make some blocks above a certain size be gang blocks. 1042 */ 1043 if (psize >= metaslab_gang_bang && (lbolt & 3) == 0) 1044 return (ENOSPC); 1045 1046 /* 1047 * Start at the rotor and loop through all mgs until we find something. 1048 * Note that there's no locking on mc_rotor or mc_aliquot because 1049 * nothing actually breaks if we miss a few updates -- we just won't 1050 * allocate quite as evenly. It all balances out over time. 1051 * 1052 * If we are doing ditto or log blocks, try to spread them across 1053 * consecutive vdevs. If we're forced to reuse a vdev before we've 1054 * allocated all of our ditto blocks, then try and spread them out on 1055 * that vdev as much as possible. If it turns out to not be possible, 1056 * gradually lower our standards until anything becomes acceptable. 1057 * Also, allocating on consecutive vdevs (as opposed to random vdevs) 1058 * gives us hope of containing our fault domains to something we're 1059 * able to reason about. Otherwise, any two top-level vdev failures 1060 * will guarantee the loss of data. With consecutive allocation, 1061 * only two adjacent top-level vdev failures will result in data loss. 1062 * 1063 * If we are doing gang blocks (hintdva is non-NULL), try to keep 1064 * ourselves on the same vdev as our gang block header. That 1065 * way, we can hope for locality in vdev_cache, plus it makes our 1066 * fault domains something tractable. 1067 */ 1068 if (hintdva) { 1069 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); 1070 1071 /* 1072 * It's possible the vdev we're using as the hint no 1073 * longer exists (i.e. removed). Consult the rotor when 1074 * all else fails. 1075 */ 1076 if (vd != NULL) { 1077 mg = vd->vdev_mg; 1078 1079 if (flags & METASLAB_HINTBP_AVOID && 1080 mg->mg_next != NULL) 1081 mg = mg->mg_next; 1082 } else { 1083 mg = mc->mc_rotor; 1084 } 1085 } else if (d != 0) { 1086 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); 1087 mg = vd->vdev_mg->mg_next; 1088 } else { 1089 mg = mc->mc_rotor; 1090 } 1091 1092 /* 1093 * If the hint put us into the wrong metaslab class, or into a 1094 * metaslab group that has been passivated, just follow the rotor. 1095 */ 1096 if (mg->mg_class != mc || mg->mg_activation_count <= 0) 1097 mg = mc->mc_rotor; 1098 1099 rotor = mg; 1100 top: 1101 all_zero = B_TRUE; 1102 do { 1103 ASSERT(mg->mg_activation_count == 1); 1104 1105 vd = mg->mg_vd; 1106 1107 /* 1108 * Don't allocate from faulted devices. 1109 */ 1110 if (zio_lock) { 1111 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); 1112 allocatable = vdev_allocatable(vd); 1113 spa_config_exit(spa, SCL_ZIO, FTAG); 1114 } else { 1115 allocatable = vdev_allocatable(vd); 1116 } 1117 if (!allocatable) 1118 goto next; 1119 1120 /* 1121 * Avoid writing single-copy data to a failing vdev 1122 */ 1123 if ((vd->vdev_stat.vs_write_errors > 0 || 1124 vd->vdev_state < VDEV_STATE_HEALTHY) && 1125 d == 0 && dshift == 3) { 1126 all_zero = B_FALSE; 1127 goto next; 1128 } 1129 1130 ASSERT(mg->mg_class == mc); 1131 1132 distance = vd->vdev_asize >> dshift; 1133 if (distance <= (1ULL << vd->vdev_ms_shift)) 1134 distance = 0; 1135 else 1136 all_zero = B_FALSE; 1137 1138 asize = vdev_psize_to_asize(vd, psize); 1139 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); 1140 1141 offset = metaslab_group_alloc(mg, asize, txg, distance, dva, d); 1142 if (offset != -1ULL) { 1143 /* 1144 * If we've just selected this metaslab group, 1145 * figure out whether the corresponding vdev is 1146 * over- or under-used relative to the pool, 1147 * and set an allocation bias to even it out. 1148 */ 1149 if (mc->mc_aliquot == 0) { 1150 vdev_stat_t *vs = &vd->vdev_stat; 1151 int64_t vu, cu; 1152 1153 /* 1154 * Determine percent used in units of 0..1024. 1155 * (This is just to avoid floating point.) 1156 */ 1157 vu = (vs->vs_alloc << 10) / (vs->vs_space + 1); 1158 cu = (mc->mc_alloc << 10) / (mc->mc_space + 1); 1159 1160 /* 1161 * Bias by at most +/- 25% of the aliquot. 1162 */ 1163 mg->mg_bias = ((cu - vu) * 1164 (int64_t)mg->mg_aliquot) / (1024 * 4); 1165 } 1166 1167 if (atomic_add_64_nv(&mc->mc_aliquot, asize) >= 1168 mg->mg_aliquot + mg->mg_bias) { 1169 mc->mc_rotor = mg->mg_next; 1170 mc->mc_aliquot = 0; 1171 } 1172 1173 DVA_SET_VDEV(&dva[d], vd->vdev_id); 1174 DVA_SET_OFFSET(&dva[d], offset); 1175 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER)); 1176 DVA_SET_ASIZE(&dva[d], asize); 1177 1178 return (0); 1179 } 1180 next: 1181 mc->mc_rotor = mg->mg_next; 1182 mc->mc_aliquot = 0; 1183 } while ((mg = mg->mg_next) != rotor); 1184 1185 if (!all_zero) { 1186 dshift++; 1187 ASSERT(dshift < 64); 1188 goto top; 1189 } 1190 1191 if (!allocatable && !zio_lock) { 1192 dshift = 3; 1193 zio_lock = B_TRUE; 1194 goto top; 1195 } 1196 1197 bzero(&dva[d], sizeof (dva_t)); 1198 1199 return (ENOSPC); 1200 } 1201 1202 /* 1203 * Free the block represented by DVA in the context of the specified 1204 * transaction group. 1205 */ 1206 static void 1207 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now) 1208 { 1209 uint64_t vdev = DVA_GET_VDEV(dva); 1210 uint64_t offset = DVA_GET_OFFSET(dva); 1211 uint64_t size = DVA_GET_ASIZE(dva); 1212 vdev_t *vd; 1213 metaslab_t *msp; 1214 1215 ASSERT(DVA_IS_VALID(dva)); 1216 1217 if (txg > spa_freeze_txg(spa)) 1218 return; 1219 1220 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || 1221 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { 1222 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu", 1223 (u_longlong_t)vdev, (u_longlong_t)offset); 1224 ASSERT(0); 1225 return; 1226 } 1227 1228 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 1229 1230 if (DVA_GET_GANG(dva)) 1231 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 1232 1233 mutex_enter(&msp->ms_lock); 1234 1235 if (now) { 1236 space_map_remove(&msp->ms_allocmap[txg & TXG_MASK], 1237 offset, size); 1238 space_map_free(&msp->ms_map, offset, size); 1239 } else { 1240 if (msp->ms_freemap[txg & TXG_MASK].sm_space == 0) 1241 vdev_dirty(vd, VDD_METASLAB, msp, txg); 1242 space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size); 1243 } 1244 1245 mutex_exit(&msp->ms_lock); 1246 } 1247 1248 /* 1249 * Intent log support: upon opening the pool after a crash, notify the SPA 1250 * of blocks that the intent log has allocated for immediate write, but 1251 * which are still considered free by the SPA because the last transaction 1252 * group didn't commit yet. 1253 */ 1254 static int 1255 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 1256 { 1257 uint64_t vdev = DVA_GET_VDEV(dva); 1258 uint64_t offset = DVA_GET_OFFSET(dva); 1259 uint64_t size = DVA_GET_ASIZE(dva); 1260 vdev_t *vd; 1261 metaslab_t *msp; 1262 int error = 0; 1263 1264 ASSERT(DVA_IS_VALID(dva)); 1265 1266 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || 1267 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) 1268 return (ENXIO); 1269 1270 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 1271 1272 if (DVA_GET_GANG(dva)) 1273 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 1274 1275 mutex_enter(&msp->ms_lock); 1276 1277 if ((txg != 0 && spa_writeable(spa)) || !msp->ms_map.sm_loaded) 1278 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY, 0); 1279 1280 if (error == 0 && !space_map_contains(&msp->ms_map, offset, size)) 1281 error = ENOENT; 1282 1283 if (error || txg == 0) { /* txg == 0 indicates dry run */ 1284 mutex_exit(&msp->ms_lock); 1285 return (error); 1286 } 1287 1288 space_map_claim(&msp->ms_map, offset, size); 1289 1290 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */ 1291 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0) 1292 vdev_dirty(vd, VDD_METASLAB, msp, txg); 1293 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size); 1294 } 1295 1296 mutex_exit(&msp->ms_lock); 1297 1298 return (0); 1299 } 1300 1301 int 1302 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, 1303 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags) 1304 { 1305 dva_t *dva = bp->blk_dva; 1306 dva_t *hintdva = hintbp->blk_dva; 1307 int error = 0; 1308 1309 ASSERT(bp->blk_birth == 0); 1310 ASSERT(BP_PHYSICAL_BIRTH(bp) == 0); 1311 1312 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 1313 1314 if (mc->mc_rotor == NULL) { /* no vdevs in this class */ 1315 spa_config_exit(spa, SCL_ALLOC, FTAG); 1316 return (ENOSPC); 1317 } 1318 1319 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); 1320 ASSERT(BP_GET_NDVAS(bp) == 0); 1321 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); 1322 1323 for (int d = 0; d < ndvas; d++) { 1324 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, 1325 txg, flags); 1326 if (error) { 1327 for (d--; d >= 0; d--) { 1328 metaslab_free_dva(spa, &dva[d], txg, B_TRUE); 1329 bzero(&dva[d], sizeof (dva_t)); 1330 } 1331 spa_config_exit(spa, SCL_ALLOC, FTAG); 1332 return (error); 1333 } 1334 } 1335 ASSERT(error == 0); 1336 ASSERT(BP_GET_NDVAS(bp) == ndvas); 1337 1338 spa_config_exit(spa, SCL_ALLOC, FTAG); 1339 1340 BP_SET_BIRTH(bp, txg, txg); 1341 1342 return (0); 1343 } 1344 1345 void 1346 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) 1347 { 1348 const dva_t *dva = bp->blk_dva; 1349 int ndvas = BP_GET_NDVAS(bp); 1350 1351 ASSERT(!BP_IS_HOLE(bp)); 1352 ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa)); 1353 1354 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); 1355 1356 for (int d = 0; d < ndvas; d++) 1357 metaslab_free_dva(spa, &dva[d], txg, now); 1358 1359 spa_config_exit(spa, SCL_FREE, FTAG); 1360 } 1361 1362 int 1363 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) 1364 { 1365 const dva_t *dva = bp->blk_dva; 1366 int ndvas = BP_GET_NDVAS(bp); 1367 int error = 0; 1368 1369 ASSERT(!BP_IS_HOLE(bp)); 1370 1371 if (txg != 0) { 1372 /* 1373 * First do a dry run to make sure all DVAs are claimable, 1374 * so we don't have to unwind from partial failures below. 1375 */ 1376 if ((error = metaslab_claim(spa, bp, 0)) != 0) 1377 return (error); 1378 } 1379 1380 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 1381 1382 for (int d = 0; d < ndvas; d++) 1383 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0) 1384 break; 1385 1386 spa_config_exit(spa, SCL_ALLOC, FTAG); 1387 1388 ASSERT(error == 0 || txg == 0); 1389 1390 return (error); 1391 } 1392