1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/spa_impl.h> 28 #include <sys/dmu.h> 29 #include <sys/dmu_tx.h> 30 #include <sys/space_map.h> 31 #include <sys/metaslab_impl.h> 32 #include <sys/vdev_impl.h> 33 #include <sys/zio.h> 34 35 uint64_t metaslab_aliquot = 512ULL << 10; 36 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ 37 38 /* 39 * Minimum size which forces the dynamic allocator to change 40 * it's allocation strategy. Once the space map cannot satisfy 41 * an allocation of this size then it switches to using more 42 * aggressive strategy (i.e search by size rather than offset). 43 */ 44 uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE; 45 46 /* 47 * The minimum free space, in percent, which must be available 48 * in a space map to continue allocations in a first-fit fashion. 49 * Once the space_map's free space drops below this level we dynamically 50 * switch to using best-fit allocations. 51 */ 52 int metaslab_df_free_pct = 30; 53 54 /* 55 * ========================================================================== 56 * Metaslab classes 57 * ========================================================================== 58 */ 59 metaslab_class_t * 60 metaslab_class_create(spa_t *spa, space_map_ops_t *ops) 61 { 62 metaslab_class_t *mc; 63 64 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP); 65 66 mc->mc_spa = spa; 67 mc->mc_rotor = NULL; 68 mc->mc_ops = ops; 69 70 return (mc); 71 } 72 73 void 74 metaslab_class_destroy(metaslab_class_t *mc) 75 { 76 metaslab_group_t *mg; 77 78 while ((mg = mc->mc_rotor) != NULL) { 79 metaslab_class_remove(mc, mg); 80 metaslab_group_destroy(mg); 81 } 82 83 kmem_free(mc, sizeof (metaslab_class_t)); 84 } 85 86 void 87 metaslab_class_add(metaslab_class_t *mc, metaslab_group_t *mg) 88 { 89 metaslab_group_t *mgprev, *mgnext; 90 91 ASSERT(mg->mg_class == NULL); 92 93 if ((mgprev = mc->mc_rotor) == NULL) { 94 mg->mg_prev = mg; 95 mg->mg_next = mg; 96 } else { 97 mgnext = mgprev->mg_next; 98 mg->mg_prev = mgprev; 99 mg->mg_next = mgnext; 100 mgprev->mg_next = mg; 101 mgnext->mg_prev = mg; 102 } 103 mc->mc_rotor = mg; 104 mg->mg_class = mc; 105 } 106 107 void 108 metaslab_class_remove(metaslab_class_t *mc, metaslab_group_t *mg) 109 { 110 metaslab_group_t *mgprev, *mgnext; 111 112 ASSERT(mg->mg_class == mc); 113 114 mgprev = mg->mg_prev; 115 mgnext = mg->mg_next; 116 117 if (mg == mgnext) { 118 mc->mc_rotor = NULL; 119 } else { 120 mc->mc_rotor = mgnext; 121 mgprev->mg_next = mgnext; 122 mgnext->mg_prev = mgprev; 123 } 124 125 mg->mg_prev = NULL; 126 mg->mg_next = NULL; 127 mg->mg_class = NULL; 128 } 129 130 int 131 metaslab_class_validate(metaslab_class_t *mc) 132 { 133 metaslab_group_t *mg; 134 vdev_t *vd; 135 136 /* 137 * Must hold one of the spa_config locks. 138 */ 139 ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) || 140 spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER)); 141 142 if ((mg = mc->mc_rotor) == NULL) 143 return (0); 144 145 do { 146 vd = mg->mg_vd; 147 ASSERT(vd->vdev_mg != NULL); 148 ASSERT3P(vd->vdev_top, ==, vd); 149 ASSERT3P(mg->mg_class, ==, mc); 150 ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops); 151 } while ((mg = mg->mg_next) != mc->mc_rotor); 152 153 return (0); 154 } 155 156 /* 157 * ========================================================================== 158 * Metaslab groups 159 * ========================================================================== 160 */ 161 static int 162 metaslab_compare(const void *x1, const void *x2) 163 { 164 const metaslab_t *m1 = x1; 165 const metaslab_t *m2 = x2; 166 167 if (m1->ms_weight < m2->ms_weight) 168 return (1); 169 if (m1->ms_weight > m2->ms_weight) 170 return (-1); 171 172 /* 173 * If the weights are identical, use the offset to force uniqueness. 174 */ 175 if (m1->ms_map.sm_start < m2->ms_map.sm_start) 176 return (-1); 177 if (m1->ms_map.sm_start > m2->ms_map.sm_start) 178 return (1); 179 180 ASSERT3P(m1, ==, m2); 181 182 return (0); 183 } 184 185 metaslab_group_t * 186 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd) 187 { 188 metaslab_group_t *mg; 189 190 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP); 191 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); 192 avl_create(&mg->mg_metaslab_tree, metaslab_compare, 193 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node)); 194 mg->mg_aliquot = metaslab_aliquot * MAX(1, vd->vdev_children); 195 mg->mg_vd = vd; 196 metaslab_class_add(mc, mg); 197 198 return (mg); 199 } 200 201 void 202 metaslab_group_destroy(metaslab_group_t *mg) 203 { 204 avl_destroy(&mg->mg_metaslab_tree); 205 mutex_destroy(&mg->mg_lock); 206 kmem_free(mg, sizeof (metaslab_group_t)); 207 } 208 209 static void 210 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) 211 { 212 mutex_enter(&mg->mg_lock); 213 ASSERT(msp->ms_group == NULL); 214 msp->ms_group = mg; 215 msp->ms_weight = 0; 216 avl_add(&mg->mg_metaslab_tree, msp); 217 mutex_exit(&mg->mg_lock); 218 } 219 220 static void 221 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) 222 { 223 mutex_enter(&mg->mg_lock); 224 ASSERT(msp->ms_group == mg); 225 avl_remove(&mg->mg_metaslab_tree, msp); 226 msp->ms_group = NULL; 227 mutex_exit(&mg->mg_lock); 228 } 229 230 static void 231 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 232 { 233 /* 234 * Although in principle the weight can be any value, in 235 * practice we do not use values in the range [1, 510]. 236 */ 237 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0); 238 ASSERT(MUTEX_HELD(&msp->ms_lock)); 239 240 mutex_enter(&mg->mg_lock); 241 ASSERT(msp->ms_group == mg); 242 avl_remove(&mg->mg_metaslab_tree, msp); 243 msp->ms_weight = weight; 244 avl_add(&mg->mg_metaslab_tree, msp); 245 mutex_exit(&mg->mg_lock); 246 } 247 248 /* 249 * This is a helper function that can be used by the allocator to find 250 * a suitable block to allocate. This will search the specified AVL 251 * tree looking for a block that matches the specified criteria. 252 */ 253 static uint64_t 254 metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size, 255 uint64_t align) 256 { 257 space_seg_t *ss, ssearch; 258 avl_index_t where; 259 260 ssearch.ss_start = *cursor; 261 ssearch.ss_end = *cursor + size; 262 263 ss = avl_find(t, &ssearch, &where); 264 if (ss == NULL) 265 ss = avl_nearest(t, where, AVL_AFTER); 266 267 while (ss != NULL) { 268 uint64_t offset = P2ROUNDUP(ss->ss_start, align); 269 270 if (offset + size <= ss->ss_end) { 271 *cursor = offset + size; 272 return (offset); 273 } 274 ss = AVL_NEXT(t, ss); 275 } 276 277 /* 278 * If we know we've searched the whole map (*cursor == 0), give up. 279 * Otherwise, reset the cursor to the beginning and try again. 280 */ 281 if (*cursor == 0) 282 return (-1ULL); 283 284 *cursor = 0; 285 return (metaslab_block_picker(t, cursor, size, align)); 286 } 287 288 /* 289 * ========================================================================== 290 * The first-fit block allocator 291 * ========================================================================== 292 */ 293 static void 294 metaslab_ff_load(space_map_t *sm) 295 { 296 ASSERT(sm->sm_ppd == NULL); 297 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP); 298 sm->sm_pp_root = NULL; 299 } 300 301 static void 302 metaslab_ff_unload(space_map_t *sm) 303 { 304 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t)); 305 sm->sm_ppd = NULL; 306 } 307 308 static uint64_t 309 metaslab_ff_alloc(space_map_t *sm, uint64_t size) 310 { 311 avl_tree_t *t = &sm->sm_root; 312 uint64_t align = size & -size; 313 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1; 314 315 return (metaslab_block_picker(t, cursor, size, align)); 316 } 317 318 /* ARGSUSED */ 319 static void 320 metaslab_ff_claim(space_map_t *sm, uint64_t start, uint64_t size) 321 { 322 /* No need to update cursor */ 323 } 324 325 /* ARGSUSED */ 326 static void 327 metaslab_ff_free(space_map_t *sm, uint64_t start, uint64_t size) 328 { 329 /* No need to update cursor */ 330 } 331 332 static space_map_ops_t metaslab_ff_ops = { 333 metaslab_ff_load, 334 metaslab_ff_unload, 335 metaslab_ff_alloc, 336 metaslab_ff_claim, 337 metaslab_ff_free, 338 NULL /* maxsize */ 339 }; 340 341 /* 342 * Dynamic block allocator - 343 * Uses the first fit allocation scheme until space get low and then 344 * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold 345 * and metaslab_df_free_pct to determine when to switch the allocation scheme. 346 */ 347 348 uint64_t 349 metaslab_df_maxsize(space_map_t *sm) 350 { 351 avl_tree_t *t = sm->sm_pp_root; 352 space_seg_t *ss; 353 354 if (t == NULL || (ss = avl_last(t)) == NULL) 355 return (0ULL); 356 357 return (ss->ss_end - ss->ss_start); 358 } 359 360 static int 361 metaslab_df_seg_compare(const void *x1, const void *x2) 362 { 363 const space_seg_t *s1 = x1; 364 const space_seg_t *s2 = x2; 365 uint64_t ss_size1 = s1->ss_end - s1->ss_start; 366 uint64_t ss_size2 = s2->ss_end - s2->ss_start; 367 368 if (ss_size1 < ss_size2) 369 return (-1); 370 if (ss_size1 > ss_size2) 371 return (1); 372 373 if (s1->ss_start < s2->ss_start) 374 return (-1); 375 if (s1->ss_start > s2->ss_start) 376 return (1); 377 378 return (0); 379 } 380 381 static void 382 metaslab_df_load(space_map_t *sm) 383 { 384 space_seg_t *ss; 385 386 ASSERT(sm->sm_ppd == NULL); 387 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP); 388 389 sm->sm_pp_root = kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); 390 avl_create(sm->sm_pp_root, metaslab_df_seg_compare, 391 sizeof (space_seg_t), offsetof(struct space_seg, ss_pp_node)); 392 393 for (ss = avl_first(&sm->sm_root); ss; ss = AVL_NEXT(&sm->sm_root, ss)) 394 avl_add(sm->sm_pp_root, ss); 395 } 396 397 static void 398 metaslab_df_unload(space_map_t *sm) 399 { 400 void *cookie = NULL; 401 402 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t)); 403 sm->sm_ppd = NULL; 404 405 while (avl_destroy_nodes(sm->sm_pp_root, &cookie) != NULL) { 406 /* tear down the tree */ 407 } 408 409 avl_destroy(sm->sm_pp_root); 410 kmem_free(sm->sm_pp_root, sizeof (avl_tree_t)); 411 sm->sm_pp_root = NULL; 412 } 413 414 static uint64_t 415 metaslab_df_alloc(space_map_t *sm, uint64_t size) 416 { 417 avl_tree_t *t = &sm->sm_root; 418 uint64_t align = size & -size; 419 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1; 420 uint64_t max_size = metaslab_df_maxsize(sm); 421 int free_pct = sm->sm_space * 100 / sm->sm_size; 422 423 ASSERT(MUTEX_HELD(sm->sm_lock)); 424 ASSERT3U(avl_numnodes(&sm->sm_root), ==, avl_numnodes(sm->sm_pp_root)); 425 426 if (max_size < size) 427 return (-1ULL); 428 429 /* 430 * If we're running low on space switch to using the size 431 * sorted AVL tree (best-fit). 432 */ 433 if (max_size < metaslab_df_alloc_threshold || 434 free_pct < metaslab_df_free_pct) { 435 t = sm->sm_pp_root; 436 *cursor = 0; 437 } 438 439 return (metaslab_block_picker(t, cursor, size, 1ULL)); 440 } 441 442 /* ARGSUSED */ 443 static void 444 metaslab_df_claim(space_map_t *sm, uint64_t start, uint64_t size) 445 { 446 /* No need to update cursor */ 447 } 448 449 /* ARGSUSED */ 450 static void 451 metaslab_df_free(space_map_t *sm, uint64_t start, uint64_t size) 452 { 453 /* No need to update cursor */ 454 } 455 456 static space_map_ops_t metaslab_df_ops = { 457 metaslab_df_load, 458 metaslab_df_unload, 459 metaslab_df_alloc, 460 metaslab_df_claim, 461 metaslab_df_free, 462 metaslab_df_maxsize 463 }; 464 465 space_map_ops_t *zfs_metaslab_ops = &metaslab_df_ops; 466 467 /* 468 * ========================================================================== 469 * Metaslabs 470 * ========================================================================== 471 */ 472 metaslab_t * 473 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo, 474 uint64_t start, uint64_t size, uint64_t txg) 475 { 476 vdev_t *vd = mg->mg_vd; 477 metaslab_t *msp; 478 479 msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); 480 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL); 481 482 msp->ms_smo_syncing = *smo; 483 484 /* 485 * We create the main space map here, but we don't create the 486 * allocmaps and freemaps until metaslab_sync_done(). This serves 487 * two purposes: it allows metaslab_sync_done() to detect the 488 * addition of new space; and for debugging, it ensures that we'd 489 * data fault on any attempt to use this metaslab before it's ready. 490 */ 491 space_map_create(&msp->ms_map, start, size, 492 vd->vdev_ashift, &msp->ms_lock); 493 494 metaslab_group_add(mg, msp); 495 496 /* 497 * If we're opening an existing pool (txg == 0) or creating 498 * a new one (txg == TXG_INITIAL), all space is available now. 499 * If we're adding space to an existing pool, the new space 500 * does not become available until after this txg has synced. 501 */ 502 if (txg <= TXG_INITIAL) 503 metaslab_sync_done(msp, 0); 504 505 if (txg != 0) { 506 /* 507 * The vdev is dirty, but the metaslab isn't -- it just needs 508 * to have metaslab_sync_done() invoked from vdev_sync_done(). 509 * [We could just dirty the metaslab, but that would cause us 510 * to allocate a space map object for it, which is wasteful 511 * and would mess up the locality logic in metaslab_weight().] 512 */ 513 ASSERT(TXG_CLEAN(txg) == spa_last_synced_txg(vd->vdev_spa)); 514 vdev_dirty(vd, 0, NULL, txg); 515 vdev_dirty(vd, VDD_METASLAB, msp, TXG_CLEAN(txg)); 516 } 517 518 return (msp); 519 } 520 521 void 522 metaslab_fini(metaslab_t *msp) 523 { 524 metaslab_group_t *mg = msp->ms_group; 525 int t; 526 527 vdev_space_update(mg->mg_vd, -msp->ms_map.sm_size, 528 -msp->ms_smo.smo_alloc, B_TRUE); 529 530 metaslab_group_remove(mg, msp); 531 532 mutex_enter(&msp->ms_lock); 533 534 space_map_unload(&msp->ms_map); 535 space_map_destroy(&msp->ms_map); 536 537 for (t = 0; t < TXG_SIZE; t++) { 538 space_map_destroy(&msp->ms_allocmap[t]); 539 space_map_destroy(&msp->ms_freemap[t]); 540 } 541 542 mutex_exit(&msp->ms_lock); 543 mutex_destroy(&msp->ms_lock); 544 545 kmem_free(msp, sizeof (metaslab_t)); 546 } 547 548 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63) 549 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62) 550 #define METASLAB_ACTIVE_MASK \ 551 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY) 552 #define METASLAB_SMO_BONUS_MULTIPLIER 2 553 554 static uint64_t 555 metaslab_weight(metaslab_t *msp) 556 { 557 metaslab_group_t *mg = msp->ms_group; 558 space_map_t *sm = &msp->ms_map; 559 space_map_obj_t *smo = &msp->ms_smo; 560 vdev_t *vd = mg->mg_vd; 561 uint64_t weight, space; 562 563 ASSERT(MUTEX_HELD(&msp->ms_lock)); 564 565 /* 566 * The baseline weight is the metaslab's free space. 567 */ 568 space = sm->sm_size - smo->smo_alloc; 569 weight = space; 570 571 /* 572 * Modern disks have uniform bit density and constant angular velocity. 573 * Therefore, the outer recording zones are faster (higher bandwidth) 574 * than the inner zones by the ratio of outer to inner track diameter, 575 * which is typically around 2:1. We account for this by assigning 576 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). 577 * In effect, this means that we'll select the metaslab with the most 578 * free bandwidth rather than simply the one with the most free space. 579 */ 580 weight = 2 * weight - 581 ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count; 582 ASSERT(weight >= space && weight <= 2 * space); 583 584 /* 585 * For locality, assign higher weight to metaslabs we've used before. 586 */ 587 if (smo->smo_object != 0) 588 weight *= METASLAB_SMO_BONUS_MULTIPLIER; 589 ASSERT(weight >= space && 590 weight <= 2 * METASLAB_SMO_BONUS_MULTIPLIER * space); 591 592 /* 593 * If this metaslab is one we're actively using, adjust its weight to 594 * make it preferable to any inactive metaslab so we'll polish it off. 595 */ 596 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); 597 598 return (weight); 599 } 600 601 static int 602 metaslab_activate(metaslab_t *msp, uint64_t activation_weight, uint64_t size) 603 { 604 space_map_t *sm = &msp->ms_map; 605 space_map_ops_t *sm_ops = msp->ms_group->mg_class->mc_ops; 606 607 ASSERT(MUTEX_HELD(&msp->ms_lock)); 608 609 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { 610 int error = space_map_load(sm, sm_ops, SM_FREE, &msp->ms_smo, 611 msp->ms_group->mg_vd->vdev_spa->spa_meta_objset); 612 if (error) { 613 metaslab_group_sort(msp->ms_group, msp, 0); 614 return (error); 615 } 616 617 /* 618 * If we were able to load the map then make sure 619 * that this map is still able to satisfy our request. 620 */ 621 if (msp->ms_weight < size) 622 return (ENOSPC); 623 624 metaslab_group_sort(msp->ms_group, msp, 625 msp->ms_weight | activation_weight); 626 } 627 ASSERT(sm->sm_loaded); 628 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 629 630 return (0); 631 } 632 633 static void 634 metaslab_passivate(metaslab_t *msp, uint64_t size) 635 { 636 /* 637 * If size < SPA_MINBLOCKSIZE, then we will not allocate from 638 * this metaslab again. In that case, it had better be empty, 639 * or we would be leaving space on the table. 640 */ 641 ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map.sm_space == 0); 642 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size)); 643 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0); 644 } 645 646 /* 647 * Write a metaslab to disk in the context of the specified transaction group. 648 */ 649 void 650 metaslab_sync(metaslab_t *msp, uint64_t txg) 651 { 652 vdev_t *vd = msp->ms_group->mg_vd; 653 spa_t *spa = vd->vdev_spa; 654 objset_t *mos = spa->spa_meta_objset; 655 space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK]; 656 space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK]; 657 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; 658 space_map_t *sm = &msp->ms_map; 659 space_map_obj_t *smo = &msp->ms_smo_syncing; 660 dmu_buf_t *db; 661 dmu_tx_t *tx; 662 int t; 663 664 ASSERT(!vd->vdev_ishole); 665 666 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 667 668 /* 669 * The only state that can actually be changing concurrently with 670 * metaslab_sync() is the metaslab's ms_map. No other thread can 671 * be modifying this txg's allocmap, freemap, freed_map, or smo. 672 * Therefore, we only hold ms_lock to satify space_map ASSERTs. 673 * We drop it whenever we call into the DMU, because the DMU 674 * can call down to us (e.g. via zio_free()) at any time. 675 */ 676 mutex_enter(&msp->ms_lock); 677 678 if (smo->smo_object == 0) { 679 ASSERT(smo->smo_objsize == 0); 680 ASSERT(smo->smo_alloc == 0); 681 mutex_exit(&msp->ms_lock); 682 smo->smo_object = dmu_object_alloc(mos, 683 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 684 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 685 ASSERT(smo->smo_object != 0); 686 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * 687 (sm->sm_start >> vd->vdev_ms_shift), 688 sizeof (uint64_t), &smo->smo_object, tx); 689 mutex_enter(&msp->ms_lock); 690 } 691 692 space_map_walk(freemap, space_map_add, freed_map); 693 694 if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >= 695 2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) { 696 /* 697 * The in-core space map representation is twice as compact 698 * as the on-disk one, so it's time to condense the latter 699 * by generating a pure allocmap from first principles. 700 * 701 * This metaslab is 100% allocated, 702 * minus the content of the in-core map (sm), 703 * minus what's been freed this txg (freed_map), 704 * minus allocations from txgs in the future 705 * (because they haven't been committed yet). 706 */ 707 space_map_vacate(allocmap, NULL, NULL); 708 space_map_vacate(freemap, NULL, NULL); 709 710 space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size); 711 712 space_map_walk(sm, space_map_remove, allocmap); 713 space_map_walk(freed_map, space_map_remove, allocmap); 714 715 for (t = 1; t < TXG_CONCURRENT_STATES; t++) 716 space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK], 717 space_map_remove, allocmap); 718 719 mutex_exit(&msp->ms_lock); 720 space_map_truncate(smo, mos, tx); 721 mutex_enter(&msp->ms_lock); 722 } 723 724 space_map_sync(allocmap, SM_ALLOC, smo, mos, tx); 725 space_map_sync(freemap, SM_FREE, smo, mos, tx); 726 727 mutex_exit(&msp->ms_lock); 728 729 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 730 dmu_buf_will_dirty(db, tx); 731 ASSERT3U(db->db_size, >=, sizeof (*smo)); 732 bcopy(smo, db->db_data, sizeof (*smo)); 733 dmu_buf_rele(db, FTAG); 734 735 dmu_tx_commit(tx); 736 } 737 738 /* 739 * Called after a transaction group has completely synced to mark 740 * all of the metaslab's free space as usable. 741 */ 742 void 743 metaslab_sync_done(metaslab_t *msp, uint64_t txg) 744 { 745 space_map_obj_t *smo = &msp->ms_smo; 746 space_map_obj_t *smosync = &msp->ms_smo_syncing; 747 space_map_t *sm = &msp->ms_map; 748 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; 749 metaslab_group_t *mg = msp->ms_group; 750 vdev_t *vd = mg->mg_vd; 751 int t; 752 753 ASSERT(!vd->vdev_ishole); 754 755 mutex_enter(&msp->ms_lock); 756 757 /* 758 * If this metaslab is just becoming available, initialize its 759 * allocmaps and freemaps and add its capacity to the vdev. 760 */ 761 if (freed_map->sm_size == 0) { 762 for (t = 0; t < TXG_SIZE; t++) { 763 space_map_create(&msp->ms_allocmap[t], sm->sm_start, 764 sm->sm_size, sm->sm_shift, sm->sm_lock); 765 space_map_create(&msp->ms_freemap[t], sm->sm_start, 766 sm->sm_size, sm->sm_shift, sm->sm_lock); 767 } 768 vdev_space_update(vd, sm->sm_size, 0, B_TRUE); 769 } 770 771 vdev_space_update(vd, 0, smosync->smo_alloc - smo->smo_alloc, B_TRUE); 772 773 ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0); 774 ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0); 775 776 /* 777 * If there's a space_map_load() in progress, wait for it to complete 778 * so that we have a consistent view of the in-core space map. 779 * Then, add everything we freed in this txg to the map. 780 */ 781 space_map_load_wait(sm); 782 space_map_vacate(freed_map, sm->sm_loaded ? space_map_free : NULL, sm); 783 784 *smo = *smosync; 785 786 /* 787 * If the map is loaded but no longer active, evict it as soon as all 788 * future allocations have synced. (If we unloaded it now and then 789 * loaded a moment later, the map wouldn't reflect those allocations.) 790 */ 791 if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { 792 int evictable = 1; 793 794 for (t = 1; t < TXG_CONCURRENT_STATES; t++) 795 if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space) 796 evictable = 0; 797 798 if (evictable) 799 space_map_unload(sm); 800 } 801 802 metaslab_group_sort(mg, msp, metaslab_weight(msp)); 803 804 mutex_exit(&msp->ms_lock); 805 } 806 807 static uint64_t 808 metaslab_distance(metaslab_t *msp, dva_t *dva) 809 { 810 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift; 811 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift; 812 uint64_t start = msp->ms_map.sm_start >> ms_shift; 813 814 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) 815 return (1ULL << 63); 816 817 if (offset < start) 818 return ((start - offset) << ms_shift); 819 if (offset > start) 820 return ((offset - start) << ms_shift); 821 return (0); 822 } 823 824 static uint64_t 825 metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg, 826 uint64_t min_distance, dva_t *dva, int d) 827 { 828 metaslab_t *msp = NULL; 829 uint64_t offset = -1ULL; 830 avl_tree_t *t = &mg->mg_metaslab_tree; 831 uint64_t activation_weight; 832 uint64_t target_distance; 833 int i; 834 835 activation_weight = METASLAB_WEIGHT_PRIMARY; 836 for (i = 0; i < d; i++) { 837 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) { 838 activation_weight = METASLAB_WEIGHT_SECONDARY; 839 break; 840 } 841 } 842 843 for (;;) { 844 boolean_t was_active; 845 846 mutex_enter(&mg->mg_lock); 847 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) { 848 if (msp->ms_weight < size) { 849 mutex_exit(&mg->mg_lock); 850 return (-1ULL); 851 } 852 853 was_active = msp->ms_weight & METASLAB_ACTIVE_MASK; 854 if (activation_weight == METASLAB_WEIGHT_PRIMARY) 855 break; 856 857 target_distance = min_distance + 858 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1); 859 860 for (i = 0; i < d; i++) 861 if (metaslab_distance(msp, &dva[i]) < 862 target_distance) 863 break; 864 if (i == d) 865 break; 866 } 867 mutex_exit(&mg->mg_lock); 868 if (msp == NULL) 869 return (-1ULL); 870 871 mutex_enter(&msp->ms_lock); 872 873 /* 874 * Ensure that the metaslab we have selected is still 875 * capable of handling our request. It's possible that 876 * another thread may have changed the weight while we 877 * were blocked on the metaslab lock. 878 */ 879 if (msp->ms_weight < size || (was_active && 880 !(msp->ms_weight & METASLAB_ACTIVE_MASK) && 881 activation_weight == METASLAB_WEIGHT_PRIMARY)) { 882 mutex_exit(&msp->ms_lock); 883 continue; 884 } 885 886 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) && 887 activation_weight == METASLAB_WEIGHT_PRIMARY) { 888 metaslab_passivate(msp, 889 msp->ms_weight & ~METASLAB_ACTIVE_MASK); 890 mutex_exit(&msp->ms_lock); 891 continue; 892 } 893 894 if (metaslab_activate(msp, activation_weight, size) != 0) { 895 mutex_exit(&msp->ms_lock); 896 continue; 897 } 898 899 if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL) 900 break; 901 902 metaslab_passivate(msp, size - 1); 903 904 mutex_exit(&msp->ms_lock); 905 } 906 907 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0) 908 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); 909 910 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size); 911 912 mutex_exit(&msp->ms_lock); 913 914 return (offset); 915 } 916 917 /* 918 * Allocate a block for the specified i/o. 919 */ 920 static int 921 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, 922 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags) 923 { 924 metaslab_group_t *mg, *rotor; 925 vdev_t *vd; 926 int dshift = 3; 927 int all_zero; 928 int zio_lock = B_FALSE; 929 boolean_t allocatable; 930 uint64_t offset = -1ULL; 931 uint64_t asize; 932 uint64_t distance; 933 934 ASSERT(!DVA_IS_VALID(&dva[d])); 935 936 /* 937 * For testing, make some blocks above a certain size be gang blocks. 938 */ 939 if (psize >= metaslab_gang_bang && (lbolt & 3) == 0) 940 return (ENOSPC); 941 942 /* 943 * Start at the rotor and loop through all mgs until we find something. 944 * Note that there's no locking on mc_rotor or mc_allocated because 945 * nothing actually breaks if we miss a few updates -- we just won't 946 * allocate quite as evenly. It all balances out over time. 947 * 948 * If we are doing ditto or log blocks, try to spread them across 949 * consecutive vdevs. If we're forced to reuse a vdev before we've 950 * allocated all of our ditto blocks, then try and spread them out on 951 * that vdev as much as possible. If it turns out to not be possible, 952 * gradually lower our standards until anything becomes acceptable. 953 * Also, allocating on consecutive vdevs (as opposed to random vdevs) 954 * gives us hope of containing our fault domains to something we're 955 * able to reason about. Otherwise, any two top-level vdev failures 956 * will guarantee the loss of data. With consecutive allocation, 957 * only two adjacent top-level vdev failures will result in data loss. 958 * 959 * If we are doing gang blocks (hintdva is non-NULL), try to keep 960 * ourselves on the same vdev as our gang block header. That 961 * way, we can hope for locality in vdev_cache, plus it makes our 962 * fault domains something tractable. 963 */ 964 if (hintdva) { 965 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); 966 967 /* 968 * It's possible the vdev we're using as the hint no 969 * longer exists (i.e. removed). Consult the rotor when 970 * all else fails. 971 */ 972 if (vd != NULL && vd->vdev_mg != NULL) { 973 mg = vd->vdev_mg; 974 975 if (flags & METASLAB_HINTBP_AVOID && 976 mg->mg_next != NULL) 977 mg = mg->mg_next; 978 } else { 979 mg = mc->mc_rotor; 980 } 981 } else if (d != 0) { 982 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); 983 mg = vd->vdev_mg->mg_next; 984 } else { 985 mg = mc->mc_rotor; 986 } 987 988 /* 989 * If the hint put us into the wrong class, just follow the rotor. 990 */ 991 if (mg->mg_class != mc) 992 mg = mc->mc_rotor; 993 994 rotor = mg; 995 top: 996 all_zero = B_TRUE; 997 do { 998 vd = mg->mg_vd; 999 1000 /* 1001 * Don't allocate from faulted devices. 1002 */ 1003 if (zio_lock) { 1004 spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER); 1005 allocatable = vdev_allocatable(vd); 1006 spa_config_exit(spa, SCL_ZIO, FTAG); 1007 } else { 1008 allocatable = vdev_allocatable(vd); 1009 } 1010 if (!allocatable) 1011 goto next; 1012 1013 /* 1014 * Avoid writing single-copy data to a failing vdev 1015 */ 1016 if ((vd->vdev_stat.vs_write_errors > 0 || 1017 vd->vdev_state < VDEV_STATE_HEALTHY) && 1018 d == 0 && dshift == 3) { 1019 all_zero = B_FALSE; 1020 goto next; 1021 } 1022 1023 ASSERT(mg->mg_class == mc); 1024 1025 distance = vd->vdev_asize >> dshift; 1026 if (distance <= (1ULL << vd->vdev_ms_shift)) 1027 distance = 0; 1028 else 1029 all_zero = B_FALSE; 1030 1031 asize = vdev_psize_to_asize(vd, psize); 1032 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); 1033 1034 offset = metaslab_group_alloc(mg, asize, txg, distance, dva, d); 1035 if (offset != -1ULL) { 1036 /* 1037 * If we've just selected this metaslab group, 1038 * figure out whether the corresponding vdev is 1039 * over- or under-used relative to the pool, 1040 * and set an allocation bias to even it out. 1041 */ 1042 if (mc->mc_allocated == 0) { 1043 vdev_stat_t *vs = &vd->vdev_stat; 1044 uint64_t alloc, space; 1045 int64_t vu, su; 1046 1047 alloc = spa_get_alloc(spa); 1048 space = spa_get_space(spa); 1049 1050 /* 1051 * Determine percent used in units of 0..1024. 1052 * (This is just to avoid floating point.) 1053 */ 1054 vu = (vs->vs_alloc << 10) / (vs->vs_space + 1); 1055 su = (alloc << 10) / (space + 1); 1056 1057 /* 1058 * Bias by at most +/- 25% of the aliquot. 1059 */ 1060 mg->mg_bias = ((su - vu) * 1061 (int64_t)mg->mg_aliquot) / (1024 * 4); 1062 } 1063 1064 if (atomic_add_64_nv(&mc->mc_allocated, asize) >= 1065 mg->mg_aliquot + mg->mg_bias) { 1066 mc->mc_rotor = mg->mg_next; 1067 mc->mc_allocated = 0; 1068 } 1069 1070 DVA_SET_VDEV(&dva[d], vd->vdev_id); 1071 DVA_SET_OFFSET(&dva[d], offset); 1072 DVA_SET_GANG(&dva[d], !!(flags & METASLAB_GANG_HEADER)); 1073 DVA_SET_ASIZE(&dva[d], asize); 1074 1075 return (0); 1076 } 1077 next: 1078 mc->mc_rotor = mg->mg_next; 1079 mc->mc_allocated = 0; 1080 } while ((mg = mg->mg_next) != rotor); 1081 1082 if (!all_zero) { 1083 dshift++; 1084 ASSERT(dshift < 64); 1085 goto top; 1086 } 1087 1088 if (!allocatable && !zio_lock) { 1089 dshift = 3; 1090 zio_lock = B_TRUE; 1091 goto top; 1092 } 1093 1094 bzero(&dva[d], sizeof (dva_t)); 1095 1096 return (ENOSPC); 1097 } 1098 1099 /* 1100 * Free the block represented by DVA in the context of the specified 1101 * transaction group. 1102 */ 1103 static void 1104 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now) 1105 { 1106 uint64_t vdev = DVA_GET_VDEV(dva); 1107 uint64_t offset = DVA_GET_OFFSET(dva); 1108 uint64_t size = DVA_GET_ASIZE(dva); 1109 vdev_t *vd; 1110 metaslab_t *msp; 1111 1112 ASSERT(DVA_IS_VALID(dva)); 1113 1114 if (txg > spa_freeze_txg(spa)) 1115 return; 1116 1117 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || 1118 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { 1119 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu", 1120 (u_longlong_t)vdev, (u_longlong_t)offset); 1121 ASSERT(0); 1122 return; 1123 } 1124 1125 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 1126 1127 if (DVA_GET_GANG(dva)) 1128 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 1129 1130 mutex_enter(&msp->ms_lock); 1131 1132 if (now) { 1133 space_map_remove(&msp->ms_allocmap[txg & TXG_MASK], 1134 offset, size); 1135 space_map_free(&msp->ms_map, offset, size); 1136 } else { 1137 if (msp->ms_freemap[txg & TXG_MASK].sm_space == 0) 1138 vdev_dirty(vd, VDD_METASLAB, msp, txg); 1139 space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size); 1140 } 1141 1142 mutex_exit(&msp->ms_lock); 1143 } 1144 1145 /* 1146 * Intent log support: upon opening the pool after a crash, notify the SPA 1147 * of blocks that the intent log has allocated for immediate write, but 1148 * which are still considered free by the SPA because the last transaction 1149 * group didn't commit yet. 1150 */ 1151 static int 1152 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 1153 { 1154 uint64_t vdev = DVA_GET_VDEV(dva); 1155 uint64_t offset = DVA_GET_OFFSET(dva); 1156 uint64_t size = DVA_GET_ASIZE(dva); 1157 vdev_t *vd; 1158 metaslab_t *msp; 1159 int error; 1160 1161 ASSERT(DVA_IS_VALID(dva)); 1162 1163 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || 1164 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) 1165 return (ENXIO); 1166 1167 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 1168 1169 if (DVA_GET_GANG(dva)) 1170 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 1171 1172 mutex_enter(&msp->ms_lock); 1173 1174 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY, 0); 1175 if (error || txg == 0) { /* txg == 0 indicates dry run */ 1176 mutex_exit(&msp->ms_lock); 1177 return (error); 1178 } 1179 1180 space_map_claim(&msp->ms_map, offset, size); 1181 1182 if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */ 1183 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0) 1184 vdev_dirty(vd, VDD_METASLAB, msp, txg); 1185 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size); 1186 } 1187 1188 mutex_exit(&msp->ms_lock); 1189 1190 return (0); 1191 } 1192 1193 int 1194 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, 1195 int ndvas, uint64_t txg, blkptr_t *hintbp, int flags) 1196 { 1197 dva_t *dva = bp->blk_dva; 1198 dva_t *hintdva = hintbp->blk_dva; 1199 int error = 0; 1200 1201 ASSERT(bp->blk_birth == 0); 1202 1203 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 1204 1205 if (mc->mc_rotor == NULL) { /* no vdevs in this class */ 1206 spa_config_exit(spa, SCL_ALLOC, FTAG); 1207 return (ENOSPC); 1208 } 1209 1210 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); 1211 ASSERT(BP_GET_NDVAS(bp) == 0); 1212 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); 1213 1214 for (int d = 0; d < ndvas; d++) { 1215 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, 1216 txg, flags); 1217 if (error) { 1218 for (d--; d >= 0; d--) { 1219 metaslab_free_dva(spa, &dva[d], txg, B_TRUE); 1220 bzero(&dva[d], sizeof (dva_t)); 1221 } 1222 spa_config_exit(spa, SCL_ALLOC, FTAG); 1223 return (error); 1224 } 1225 } 1226 ASSERT(error == 0); 1227 ASSERT(BP_GET_NDVAS(bp) == ndvas); 1228 1229 spa_config_exit(spa, SCL_ALLOC, FTAG); 1230 1231 bp->blk_birth = txg; 1232 1233 return (0); 1234 } 1235 1236 void 1237 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) 1238 { 1239 const dva_t *dva = bp->blk_dva; 1240 int ndvas = BP_GET_NDVAS(bp); 1241 1242 ASSERT(!BP_IS_HOLE(bp)); 1243 ASSERT(!now || bp->blk_birth >= spa->spa_syncing_txg); 1244 1245 spa_config_enter(spa, SCL_FREE, FTAG, RW_READER); 1246 1247 for (int d = 0; d < ndvas; d++) 1248 metaslab_free_dva(spa, &dva[d], txg, now); 1249 1250 spa_config_exit(spa, SCL_FREE, FTAG); 1251 } 1252 1253 int 1254 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) 1255 { 1256 const dva_t *dva = bp->blk_dva; 1257 int ndvas = BP_GET_NDVAS(bp); 1258 int error = 0; 1259 1260 ASSERT(!BP_IS_HOLE(bp)); 1261 1262 if (txg != 0) { 1263 /* 1264 * First do a dry run to make sure all DVAs are claimable, 1265 * so we don't have to unwind from partial failures below. 1266 */ 1267 if ((error = metaslab_claim(spa, bp, 0)) != 0) 1268 return (error); 1269 } 1270 1271 spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER); 1272 1273 for (int d = 0; d < ndvas; d++) 1274 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0) 1275 break; 1276 1277 spa_config_exit(spa, SCL_ALLOC, FTAG); 1278 1279 ASSERT(error == 0 || txg == 0); 1280 1281 return (error); 1282 } 1283