1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/zfs_context.h> 29 #include <sys/spa_impl.h> 30 #include <sys/dmu.h> 31 #include <sys/dmu_tx.h> 32 #include <sys/space_map.h> 33 #include <sys/metaslab_impl.h> 34 #include <sys/vdev_impl.h> 35 #include <sys/zio.h> 36 37 /* 38 * ========================================================================== 39 * Metaslab classes 40 * ========================================================================== 41 */ 42 metaslab_class_t * 43 metaslab_class_create(void) 44 { 45 metaslab_class_t *mc; 46 47 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP); 48 49 mc->mc_rotor = NULL; 50 51 return (mc); 52 } 53 54 void 55 metaslab_class_destroy(metaslab_class_t *mc) 56 { 57 metaslab_group_t *mg; 58 59 while ((mg = mc->mc_rotor) != NULL) { 60 metaslab_class_remove(mc, mg); 61 metaslab_group_destroy(mg); 62 } 63 64 kmem_free(mc, sizeof (metaslab_class_t)); 65 } 66 67 void 68 metaslab_class_add(metaslab_class_t *mc, metaslab_group_t *mg) 69 { 70 metaslab_group_t *mgprev, *mgnext; 71 72 ASSERT(mg->mg_class == NULL); 73 74 if ((mgprev = mc->mc_rotor) == NULL) { 75 mg->mg_prev = mg; 76 mg->mg_next = mg; 77 } else { 78 mgnext = mgprev->mg_next; 79 mg->mg_prev = mgprev; 80 mg->mg_next = mgnext; 81 mgprev->mg_next = mg; 82 mgnext->mg_prev = mg; 83 } 84 mc->mc_rotor = mg; 85 mg->mg_class = mc; 86 } 87 88 void 89 metaslab_class_remove(metaslab_class_t *mc, metaslab_group_t *mg) 90 { 91 metaslab_group_t *mgprev, *mgnext; 92 93 ASSERT(mg->mg_class == mc); 94 95 mgprev = mg->mg_prev; 96 mgnext = mg->mg_next; 97 98 if (mg == mgnext) { 99 mc->mc_rotor = NULL; 100 } else { 101 mc->mc_rotor = mgnext; 102 mgprev->mg_next = mgnext; 103 mgnext->mg_prev = mgprev; 104 } 105 106 mg->mg_prev = NULL; 107 mg->mg_next = NULL; 108 mg->mg_class = NULL; 109 } 110 111 /* 112 * ========================================================================== 113 * Metaslab groups 114 * ========================================================================== 115 */ 116 static int 117 metaslab_compare(const void *x1, const void *x2) 118 { 119 const metaslab_t *m1 = x1; 120 const metaslab_t *m2 = x2; 121 122 if (m1->ms_weight < m2->ms_weight) 123 return (1); 124 if (m1->ms_weight > m2->ms_weight) 125 return (-1); 126 127 /* 128 * If the weights are identical, use the offset to force uniqueness. 129 */ 130 if (m1->ms_map.sm_start < m2->ms_map.sm_start) 131 return (-1); 132 if (m1->ms_map.sm_start > m2->ms_map.sm_start) 133 return (1); 134 135 ASSERT3P(m1, ==, m2); 136 137 return (0); 138 } 139 140 metaslab_group_t * 141 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd) 142 { 143 metaslab_group_t *mg; 144 145 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP); 146 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); 147 avl_create(&mg->mg_metaslab_tree, metaslab_compare, 148 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node)); 149 mg->mg_aliquot = 2ULL << 20; /* XXX -- tweak me */ 150 mg->mg_vd = vd; 151 metaslab_class_add(mc, mg); 152 153 return (mg); 154 } 155 156 void 157 metaslab_group_destroy(metaslab_group_t *mg) 158 { 159 avl_destroy(&mg->mg_metaslab_tree); 160 mutex_destroy(&mg->mg_lock); 161 kmem_free(mg, sizeof (metaslab_group_t)); 162 } 163 164 static void 165 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) 166 { 167 mutex_enter(&mg->mg_lock); 168 ASSERT(msp->ms_group == NULL); 169 msp->ms_group = mg; 170 msp->ms_weight = 0; 171 avl_add(&mg->mg_metaslab_tree, msp); 172 mutex_exit(&mg->mg_lock); 173 } 174 175 static void 176 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) 177 { 178 mutex_enter(&mg->mg_lock); 179 ASSERT(msp->ms_group == mg); 180 avl_remove(&mg->mg_metaslab_tree, msp); 181 msp->ms_group = NULL; 182 mutex_exit(&mg->mg_lock); 183 } 184 185 static void 186 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 187 { 188 ASSERT(MUTEX_HELD(&msp->ms_lock)); 189 190 mutex_enter(&mg->mg_lock); 191 ASSERT(msp->ms_group == mg); 192 avl_remove(&mg->mg_metaslab_tree, msp); 193 msp->ms_weight = weight; 194 avl_add(&mg->mg_metaslab_tree, msp); 195 mutex_exit(&mg->mg_lock); 196 } 197 198 /* 199 * ========================================================================== 200 * The first-fit block allocator 201 * ========================================================================== 202 */ 203 static void 204 metaslab_ff_load(space_map_t *sm) 205 { 206 ASSERT(sm->sm_ppd == NULL); 207 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP); 208 } 209 210 static void 211 metaslab_ff_unload(space_map_t *sm) 212 { 213 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t)); 214 sm->sm_ppd = NULL; 215 } 216 217 static uint64_t 218 metaslab_ff_alloc(space_map_t *sm, uint64_t size) 219 { 220 avl_tree_t *t = &sm->sm_root; 221 uint64_t align = size & -size; 222 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1; 223 space_seg_t *ss, ssearch; 224 avl_index_t where; 225 226 ssearch.ss_start = *cursor; 227 ssearch.ss_end = *cursor + size; 228 229 ss = avl_find(t, &ssearch, &where); 230 if (ss == NULL) 231 ss = avl_nearest(t, where, AVL_AFTER); 232 233 while (ss != NULL) { 234 uint64_t offset = P2ROUNDUP(ss->ss_start, align); 235 236 if (offset + size <= ss->ss_end) { 237 *cursor = offset + size; 238 return (offset); 239 } 240 ss = AVL_NEXT(t, ss); 241 } 242 243 /* 244 * If we know we've searched the whole map (*cursor == 0), give up. 245 * Otherwise, reset the cursor to the beginning and try again. 246 */ 247 if (*cursor == 0) 248 return (-1ULL); 249 250 *cursor = 0; 251 return (metaslab_ff_alloc(sm, size)); 252 } 253 254 /* ARGSUSED */ 255 static void 256 metaslab_ff_claim(space_map_t *sm, uint64_t start, uint64_t size) 257 { 258 /* No need to update cursor */ 259 } 260 261 /* ARGSUSED */ 262 static void 263 metaslab_ff_free(space_map_t *sm, uint64_t start, uint64_t size) 264 { 265 /* No need to update cursor */ 266 } 267 268 static space_map_ops_t metaslab_ff_ops = { 269 metaslab_ff_load, 270 metaslab_ff_unload, 271 metaslab_ff_alloc, 272 metaslab_ff_claim, 273 metaslab_ff_free 274 }; 275 276 /* 277 * ========================================================================== 278 * Metaslabs 279 * ========================================================================== 280 */ 281 metaslab_t * 282 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo, 283 uint64_t start, uint64_t size, uint64_t txg) 284 { 285 vdev_t *vd = mg->mg_vd; 286 metaslab_t *msp; 287 288 msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); 289 290 msp->ms_smo_syncing = *smo; 291 292 /* 293 * We create the main space map here, but we don't create the 294 * allocmaps and freemaps until metaslab_sync_done(). This serves 295 * two purposes: it allows metaslab_sync_done() to detect the 296 * addition of new space; and for debugging, it ensures that we'd 297 * data fault on any attempt to use this metaslab before it's ready. 298 */ 299 space_map_create(&msp->ms_map, start, size, 300 vd->vdev_ashift, &msp->ms_lock); 301 302 metaslab_group_add(mg, msp); 303 304 /* 305 * If we're opening an existing pool (txg == 0) or creating 306 * a new one (txg == TXG_INITIAL), all space is available now. 307 * If we're adding space to an existing pool, the new space 308 * does not become available until after this txg has synced. 309 */ 310 if (txg <= TXG_INITIAL) 311 metaslab_sync_done(msp, 0); 312 313 if (txg != 0) { 314 /* 315 * The vdev is dirty, but the metaslab isn't -- it just needs 316 * to have metaslab_sync_done() invoked from vdev_sync_done(). 317 * [We could just dirty the metaslab, but that would cause us 318 * to allocate a space map object for it, which is wasteful 319 * and would mess up the locality logic in metaslab_weight().] 320 */ 321 ASSERT(TXG_CLEAN(txg) == spa_last_synced_txg(vd->vdev_spa)); 322 vdev_dirty(vd, 0, NULL, txg); 323 vdev_dirty(vd, VDD_METASLAB, msp, TXG_CLEAN(txg)); 324 } 325 326 return (msp); 327 } 328 329 void 330 metaslab_fini(metaslab_t *msp) 331 { 332 metaslab_group_t *mg = msp->ms_group; 333 int t; 334 335 vdev_space_update(mg->mg_vd, -msp->ms_map.sm_size, 336 -msp->ms_smo.smo_alloc); 337 338 metaslab_group_remove(mg, msp); 339 340 mutex_enter(&msp->ms_lock); 341 342 space_map_unload(&msp->ms_map); 343 space_map_destroy(&msp->ms_map); 344 345 for (t = 0; t < TXG_SIZE; t++) { 346 space_map_destroy(&msp->ms_allocmap[t]); 347 space_map_destroy(&msp->ms_freemap[t]); 348 } 349 350 mutex_exit(&msp->ms_lock); 351 352 kmem_free(msp, sizeof (metaslab_t)); 353 } 354 355 #define METASLAB_ACTIVE_WEIGHT (1ULL << 63) 356 357 static uint64_t 358 metaslab_weight(metaslab_t *msp) 359 { 360 space_map_t *sm = &msp->ms_map; 361 space_map_obj_t *smo = &msp->ms_smo; 362 vdev_t *vd = msp->ms_group->mg_vd; 363 uint64_t weight, space; 364 365 ASSERT(MUTEX_HELD(&msp->ms_lock)); 366 367 /* 368 * The baseline weight is the metaslab's free space. 369 */ 370 space = sm->sm_size - smo->smo_alloc; 371 weight = space; 372 373 /* 374 * Modern disks have uniform bit density and constant angular velocity. 375 * Therefore, the outer recording zones are faster (higher bandwidth) 376 * than the inner zones by the ratio of outer to inner track diameter, 377 * which is typically around 2:1. We account for this by assigning 378 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). 379 * In effect, this means that we'll select the metaslab with the most 380 * free bandwidth rather than simply the one with the most free space. 381 */ 382 weight = 2 * weight - 383 ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count; 384 ASSERT(weight >= space && weight <= 2 * space); 385 386 /* 387 * For locality, assign higher weight to metaslabs we've used before. 388 */ 389 if (smo->smo_object != 0) 390 weight *= 2; 391 ASSERT(weight >= space && weight <= 4 * space); 392 393 /* 394 * If this metaslab is one we're actively using, adjust its weight to 395 * make it preferable to any inactive metaslab so we'll polish it off. 396 */ 397 weight |= (msp->ms_weight & METASLAB_ACTIVE_WEIGHT); 398 399 return (weight); 400 } 401 402 static int 403 metaslab_activate(metaslab_t *msp) 404 { 405 space_map_t *sm = &msp->ms_map; 406 407 ASSERT(MUTEX_HELD(&msp->ms_lock)); 408 409 if (msp->ms_weight < METASLAB_ACTIVE_WEIGHT) { 410 int error = space_map_load(sm, &metaslab_ff_ops, 411 SM_FREE, &msp->ms_smo, 412 msp->ms_group->mg_vd->vdev_spa->spa_meta_objset); 413 if (error) { 414 metaslab_group_sort(msp->ms_group, msp, 0); 415 return (error); 416 } 417 metaslab_group_sort(msp->ms_group, msp, 418 msp->ms_weight | METASLAB_ACTIVE_WEIGHT); 419 } 420 ASSERT(sm->sm_loaded); 421 ASSERT(msp->ms_weight >= METASLAB_ACTIVE_WEIGHT); 422 423 return (0); 424 } 425 426 static void 427 metaslab_passivate(metaslab_t *msp, uint64_t size) 428 { 429 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size - 1)); 430 ASSERT(msp->ms_weight < METASLAB_ACTIVE_WEIGHT); 431 } 432 433 /* 434 * Write a metaslab to disk in the context of the specified transaction group. 435 */ 436 void 437 metaslab_sync(metaslab_t *msp, uint64_t txg) 438 { 439 vdev_t *vd = msp->ms_group->mg_vd; 440 spa_t *spa = vd->vdev_spa; 441 objset_t *mos = spa->spa_meta_objset; 442 space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK]; 443 space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK]; 444 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; 445 space_map_t *sm = &msp->ms_map; 446 space_map_obj_t *smo = &msp->ms_smo_syncing; 447 dmu_buf_t *db; 448 dmu_tx_t *tx; 449 int t; 450 451 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 452 453 /* 454 * The only state that can actually be changing concurrently with 455 * metaslab_sync() is the metaslab's ms_map. No other thread can 456 * be modifying this txg's allocmap, freemap, freed_map, or smo. 457 * Therefore, we only hold ms_lock to satify space_map ASSERTs. 458 * We drop it whenever we call into the DMU, because the DMU 459 * can call down to us (e.g. via zio_free()) at any time. 460 */ 461 mutex_enter(&msp->ms_lock); 462 463 if (smo->smo_object == 0) { 464 ASSERT(smo->smo_objsize == 0); 465 ASSERT(smo->smo_alloc == 0); 466 mutex_exit(&msp->ms_lock); 467 smo->smo_object = dmu_object_alloc(mos, 468 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 469 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 470 ASSERT(smo->smo_object != 0); 471 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * 472 (sm->sm_start >> vd->vdev_ms_shift), 473 sizeof (uint64_t), &smo->smo_object, tx); 474 mutex_enter(&msp->ms_lock); 475 } 476 477 space_map_walk(freemap, space_map_add, freed_map); 478 479 if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >= 480 2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) { 481 /* 482 * The in-core space map representation is twice as compact 483 * as the on-disk one, so it's time to condense the latter 484 * by generating a pure allocmap from first principles. 485 * 486 * This metaslab is 100% allocated, 487 * minus the content of the in-core map (sm), 488 * minus what's been freed this txg (freed_map), 489 * minus allocations from txgs in the future 490 * (because they haven't been committed yet). 491 */ 492 space_map_vacate(allocmap, NULL, NULL); 493 space_map_vacate(freemap, NULL, NULL); 494 495 space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size); 496 497 space_map_walk(sm, space_map_remove, allocmap); 498 space_map_walk(freed_map, space_map_remove, allocmap); 499 500 for (t = 1; t < TXG_CONCURRENT_STATES; t++) 501 space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK], 502 space_map_remove, allocmap); 503 504 mutex_exit(&msp->ms_lock); 505 space_map_truncate(smo, mos, tx); 506 mutex_enter(&msp->ms_lock); 507 } 508 509 space_map_sync(allocmap, SM_ALLOC, smo, mos, tx); 510 space_map_sync(freemap, SM_FREE, smo, mos, tx); 511 512 mutex_exit(&msp->ms_lock); 513 514 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 515 dmu_buf_will_dirty(db, tx); 516 ASSERT3U(db->db_size, ==, sizeof (*smo)); 517 bcopy(smo, db->db_data, db->db_size); 518 dmu_buf_rele(db, FTAG); 519 520 dmu_tx_commit(tx); 521 } 522 523 /* 524 * Called after a transaction group has completely synced to mark 525 * all of the metaslab's free space as usable. 526 */ 527 void 528 metaslab_sync_done(metaslab_t *msp, uint64_t txg) 529 { 530 space_map_obj_t *smo = &msp->ms_smo; 531 space_map_obj_t *smosync = &msp->ms_smo_syncing; 532 space_map_t *sm = &msp->ms_map; 533 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; 534 metaslab_group_t *mg = msp->ms_group; 535 vdev_t *vd = mg->mg_vd; 536 int t; 537 538 mutex_enter(&msp->ms_lock); 539 540 /* 541 * If this metaslab is just becoming available, initialize its 542 * allocmaps and freemaps and add its capacity to the vdev. 543 */ 544 if (freed_map->sm_size == 0) { 545 for (t = 0; t < TXG_SIZE; t++) { 546 space_map_create(&msp->ms_allocmap[t], sm->sm_start, 547 sm->sm_size, sm->sm_shift, sm->sm_lock); 548 space_map_create(&msp->ms_freemap[t], sm->sm_start, 549 sm->sm_size, sm->sm_shift, sm->sm_lock); 550 } 551 vdev_space_update(vd, sm->sm_size, 0); 552 } 553 554 vdev_space_update(vd, 0, smosync->smo_alloc - smo->smo_alloc); 555 556 ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0); 557 ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0); 558 559 /* 560 * If there's a space_map_load() in progress, wait for it to complete 561 * so that we have a consistent view of the in-core space map. 562 * Then, add everything we freed in this txg to the map. 563 */ 564 space_map_load_wait(sm); 565 space_map_vacate(freed_map, sm->sm_loaded ? space_map_free : NULL, sm); 566 567 *smo = *smosync; 568 569 /* 570 * If the map is loaded but no longer active, evict it as soon as all 571 * future allocations have synced. (If we unloaded it now and then 572 * loaded a moment later, the map wouldn't reflect those allocations.) 573 */ 574 if (sm->sm_loaded && msp->ms_weight < METASLAB_ACTIVE_WEIGHT) { 575 int evictable = 1; 576 577 for (t = 1; t < TXG_CONCURRENT_STATES; t++) 578 if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space) 579 evictable = 0; 580 581 if (evictable) 582 space_map_unload(sm); 583 } 584 585 metaslab_group_sort(mg, msp, metaslab_weight(msp)); 586 587 mutex_exit(&msp->ms_lock); 588 } 589 590 /* 591 * Intent log support: upon opening the pool after a crash, notify the SPA 592 * of blocks that the intent log has allocated for immediate write, but 593 * which are still considered free by the SPA because the last transaction 594 * group didn't commit yet. 595 */ 596 int 597 metaslab_claim(spa_t *spa, dva_t *dva, uint64_t txg) 598 { 599 uint64_t vdev = DVA_GET_VDEV(dva); 600 uint64_t offset = DVA_GET_OFFSET(dva); 601 uint64_t size = DVA_GET_ASIZE(dva); 602 vdev_t *vd; 603 metaslab_t *msp; 604 int error; 605 606 if ((vd = vdev_lookup_top(spa, vdev)) == NULL) 607 return (ENXIO); 608 609 if ((offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) 610 return (ENXIO); 611 612 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 613 614 if (DVA_GET_GANG(dva)) 615 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 616 617 mutex_enter(&msp->ms_lock); 618 619 error = metaslab_activate(msp); 620 if (error) { 621 mutex_exit(&msp->ms_lock); 622 return (error); 623 } 624 625 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0) 626 vdev_dirty(vd, VDD_METASLAB, msp, txg); 627 628 space_map_claim(&msp->ms_map, offset, size); 629 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size); 630 631 mutex_exit(&msp->ms_lock); 632 633 return (0); 634 } 635 636 static metaslab_t * 637 metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t *offp, 638 uint64_t txg) 639 { 640 metaslab_t *msp = NULL; 641 uint64_t offset = -1ULL; 642 643 for (;;) { 644 mutex_enter(&mg->mg_lock); 645 msp = avl_first(&mg->mg_metaslab_tree); 646 if (msp == NULL || msp->ms_weight < size) { 647 mutex_exit(&mg->mg_lock); 648 return (NULL); 649 } 650 mutex_exit(&mg->mg_lock); 651 652 mutex_enter(&msp->ms_lock); 653 654 if (metaslab_activate(msp) != 0) { 655 mutex_exit(&msp->ms_lock); 656 continue; 657 } 658 659 if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL) 660 break; 661 662 metaslab_passivate(msp, size); 663 664 mutex_exit(&msp->ms_lock); 665 } 666 667 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0) 668 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); 669 670 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size); 671 672 mutex_exit(&msp->ms_lock); 673 674 *offp = offset; 675 return (msp); 676 } 677 678 /* 679 * Allocate a block for the specified i/o. 680 */ 681 int 682 metaslab_alloc(spa_t *spa, uint64_t psize, dva_t *dva, uint64_t txg) 683 { 684 metaslab_t *msp; 685 metaslab_group_t *mg, *rotor; 686 metaslab_class_t *mc; 687 vdev_t *vd; 688 uint64_t offset = -1ULL; 689 uint64_t asize; 690 691 mc = spa_metaslab_class_select(spa); 692 693 /* 694 * Start at the rotor and loop through all mgs until we find something. 695 * Note that there's no locking on mc_rotor or mc_allocated because 696 * nothing actually breaks if we miss a few updates -- we just won't 697 * allocate quite as evenly. It all balances out over time. 698 */ 699 mg = rotor = mc->mc_rotor; 700 do { 701 vd = mg->mg_vd; 702 asize = vdev_psize_to_asize(vd, psize); 703 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); 704 705 msp = metaslab_group_alloc(mg, asize, &offset, txg); 706 if (msp != NULL) { 707 ASSERT(offset != -1ULL); 708 709 /* 710 * If we've just selected this metaslab group, 711 * figure out whether the corresponding vdev is 712 * over- or under-used relative to the pool, 713 * and set an allocation bias to even it out. 714 */ 715 if (mc->mc_allocated == 0) { 716 vdev_stat_t *vs = &vd->vdev_stat; 717 uint64_t alloc, space; 718 int64_t vu, su; 719 720 alloc = spa_get_alloc(spa); 721 space = spa_get_space(spa); 722 723 /* 724 * Determine percent used in units of 0..1024. 725 * (This is just to avoid floating point.) 726 */ 727 vu = (vs->vs_alloc << 10) / (vs->vs_space + 1); 728 su = (alloc << 10) / (space + 1); 729 730 /* 731 * Bias by at most +/- 25% of the aliquot. 732 */ 733 mg->mg_bias = ((su - vu) * 734 (int64_t)mg->mg_aliquot) / (1024 * 4); 735 } 736 737 if (atomic_add_64_nv(&mc->mc_allocated, asize) >= 738 mg->mg_aliquot + mg->mg_bias) { 739 mc->mc_rotor = mg->mg_next; 740 mc->mc_allocated = 0; 741 } 742 743 DVA_SET_VDEV(dva, vd->vdev_id); 744 DVA_SET_OFFSET(dva, offset); 745 DVA_SET_GANG(dva, 0); 746 DVA_SET_ASIZE(dva, asize); 747 748 return (0); 749 } 750 mc->mc_rotor = mg->mg_next; 751 mc->mc_allocated = 0; 752 } while ((mg = mg->mg_next) != rotor); 753 754 DVA_SET_VDEV(dva, 0); 755 DVA_SET_OFFSET(dva, 0); 756 DVA_SET_GANG(dva, 0); 757 758 return (ENOSPC); 759 } 760 761 /* 762 * Free the block represented by DVA in the context of the specified 763 * transaction group. 764 */ 765 void 766 metaslab_free(spa_t *spa, dva_t *dva, uint64_t txg, boolean_t now) 767 { 768 uint64_t vdev = DVA_GET_VDEV(dva); 769 uint64_t offset = DVA_GET_OFFSET(dva); 770 uint64_t size = DVA_GET_ASIZE(dva); 771 vdev_t *vd; 772 metaslab_t *msp; 773 774 if (txg > spa_freeze_txg(spa)) 775 return; 776 777 if ((vd = vdev_lookup_top(spa, vdev)) == NULL) { 778 cmn_err(CE_WARN, "metaslab_free(): bad vdev %llu", 779 (u_longlong_t)vdev); 780 ASSERT(0); 781 return; 782 } 783 784 if ((offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { 785 cmn_err(CE_WARN, "metaslab_free(): bad offset %llu", 786 (u_longlong_t)offset); 787 ASSERT(0); 788 return; 789 } 790 791 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 792 793 if (DVA_GET_GANG(dva)) 794 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 795 796 mutex_enter(&msp->ms_lock); 797 798 if (now) { 799 space_map_remove(&msp->ms_allocmap[txg & TXG_MASK], 800 offset, size); 801 space_map_free(&msp->ms_map, offset, size); 802 } else { 803 if (msp->ms_freemap[txg & TXG_MASK].sm_space == 0) 804 vdev_dirty(vd, VDD_METASLAB, msp, txg); 805 space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size); 806 } 807 808 mutex_exit(&msp->ms_lock); 809 } 810