1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/zfs_context.h> 29 #include <sys/spa_impl.h> 30 #include <sys/dmu.h> 31 #include <sys/dmu_tx.h> 32 #include <sys/space_map.h> 33 #include <sys/metaslab_impl.h> 34 #include <sys/vdev_impl.h> 35 #include <sys/zio.h> 36 37 uint64_t metaslab_aliquot = 512ULL << 10; 38 uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ 39 40 /* 41 * ========================================================================== 42 * Metaslab classes 43 * ========================================================================== 44 */ 45 metaslab_class_t * 46 metaslab_class_create(void) 47 { 48 metaslab_class_t *mc; 49 50 mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP); 51 52 mc->mc_rotor = NULL; 53 54 return (mc); 55 } 56 57 void 58 metaslab_class_destroy(metaslab_class_t *mc) 59 { 60 metaslab_group_t *mg; 61 62 while ((mg = mc->mc_rotor) != NULL) { 63 metaslab_class_remove(mc, mg); 64 metaslab_group_destroy(mg); 65 } 66 67 kmem_free(mc, sizeof (metaslab_class_t)); 68 } 69 70 void 71 metaslab_class_add(metaslab_class_t *mc, metaslab_group_t *mg) 72 { 73 metaslab_group_t *mgprev, *mgnext; 74 75 ASSERT(mg->mg_class == NULL); 76 77 if ((mgprev = mc->mc_rotor) == NULL) { 78 mg->mg_prev = mg; 79 mg->mg_next = mg; 80 } else { 81 mgnext = mgprev->mg_next; 82 mg->mg_prev = mgprev; 83 mg->mg_next = mgnext; 84 mgprev->mg_next = mg; 85 mgnext->mg_prev = mg; 86 } 87 mc->mc_rotor = mg; 88 mg->mg_class = mc; 89 } 90 91 void 92 metaslab_class_remove(metaslab_class_t *mc, metaslab_group_t *mg) 93 { 94 metaslab_group_t *mgprev, *mgnext; 95 96 ASSERT(mg->mg_class == mc); 97 98 mgprev = mg->mg_prev; 99 mgnext = mg->mg_next; 100 101 if (mg == mgnext) { 102 mc->mc_rotor = NULL; 103 } else { 104 mc->mc_rotor = mgnext; 105 mgprev->mg_next = mgnext; 106 mgnext->mg_prev = mgprev; 107 } 108 109 mg->mg_prev = NULL; 110 mg->mg_next = NULL; 111 mg->mg_class = NULL; 112 } 113 114 /* 115 * ========================================================================== 116 * Metaslab groups 117 * ========================================================================== 118 */ 119 static int 120 metaslab_compare(const void *x1, const void *x2) 121 { 122 const metaslab_t *m1 = x1; 123 const metaslab_t *m2 = x2; 124 125 if (m1->ms_weight < m2->ms_weight) 126 return (1); 127 if (m1->ms_weight > m2->ms_weight) 128 return (-1); 129 130 /* 131 * If the weights are identical, use the offset to force uniqueness. 132 */ 133 if (m1->ms_map.sm_start < m2->ms_map.sm_start) 134 return (-1); 135 if (m1->ms_map.sm_start > m2->ms_map.sm_start) 136 return (1); 137 138 ASSERT3P(m1, ==, m2); 139 140 return (0); 141 } 142 143 metaslab_group_t * 144 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd) 145 { 146 metaslab_group_t *mg; 147 148 mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP); 149 mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL); 150 avl_create(&mg->mg_metaslab_tree, metaslab_compare, 151 sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node)); 152 mg->mg_aliquot = metaslab_aliquot * MAX(1, vd->vdev_children); 153 mg->mg_vd = vd; 154 metaslab_class_add(mc, mg); 155 156 return (mg); 157 } 158 159 void 160 metaslab_group_destroy(metaslab_group_t *mg) 161 { 162 avl_destroy(&mg->mg_metaslab_tree); 163 mutex_destroy(&mg->mg_lock); 164 kmem_free(mg, sizeof (metaslab_group_t)); 165 } 166 167 static void 168 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp) 169 { 170 mutex_enter(&mg->mg_lock); 171 ASSERT(msp->ms_group == NULL); 172 msp->ms_group = mg; 173 msp->ms_weight = 0; 174 avl_add(&mg->mg_metaslab_tree, msp); 175 mutex_exit(&mg->mg_lock); 176 } 177 178 static void 179 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp) 180 { 181 mutex_enter(&mg->mg_lock); 182 ASSERT(msp->ms_group == mg); 183 avl_remove(&mg->mg_metaslab_tree, msp); 184 msp->ms_group = NULL; 185 mutex_exit(&mg->mg_lock); 186 } 187 188 static void 189 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight) 190 { 191 /* 192 * Although in principle the weight can be any value, in 193 * practice we do not use values in the range [1, 510]. 194 */ 195 ASSERT(weight >= SPA_MINBLOCKSIZE-1 || weight == 0); 196 ASSERT(MUTEX_HELD(&msp->ms_lock)); 197 198 mutex_enter(&mg->mg_lock); 199 ASSERT(msp->ms_group == mg); 200 avl_remove(&mg->mg_metaslab_tree, msp); 201 msp->ms_weight = weight; 202 avl_add(&mg->mg_metaslab_tree, msp); 203 mutex_exit(&mg->mg_lock); 204 } 205 206 /* 207 * ========================================================================== 208 * The first-fit block allocator 209 * ========================================================================== 210 */ 211 static void 212 metaslab_ff_load(space_map_t *sm) 213 { 214 ASSERT(sm->sm_ppd == NULL); 215 sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP); 216 } 217 218 static void 219 metaslab_ff_unload(space_map_t *sm) 220 { 221 kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t)); 222 sm->sm_ppd = NULL; 223 } 224 225 static uint64_t 226 metaslab_ff_alloc(space_map_t *sm, uint64_t size) 227 { 228 avl_tree_t *t = &sm->sm_root; 229 uint64_t align = size & -size; 230 uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1; 231 space_seg_t *ss, ssearch; 232 avl_index_t where; 233 234 ssearch.ss_start = *cursor; 235 ssearch.ss_end = *cursor + size; 236 237 ss = avl_find(t, &ssearch, &where); 238 if (ss == NULL) 239 ss = avl_nearest(t, where, AVL_AFTER); 240 241 while (ss != NULL) { 242 uint64_t offset = P2ROUNDUP(ss->ss_start, align); 243 244 if (offset + size <= ss->ss_end) { 245 *cursor = offset + size; 246 return (offset); 247 } 248 ss = AVL_NEXT(t, ss); 249 } 250 251 /* 252 * If we know we've searched the whole map (*cursor == 0), give up. 253 * Otherwise, reset the cursor to the beginning and try again. 254 */ 255 if (*cursor == 0) 256 return (-1ULL); 257 258 *cursor = 0; 259 return (metaslab_ff_alloc(sm, size)); 260 } 261 262 /* ARGSUSED */ 263 static void 264 metaslab_ff_claim(space_map_t *sm, uint64_t start, uint64_t size) 265 { 266 /* No need to update cursor */ 267 } 268 269 /* ARGSUSED */ 270 static void 271 metaslab_ff_free(space_map_t *sm, uint64_t start, uint64_t size) 272 { 273 /* No need to update cursor */ 274 } 275 276 static space_map_ops_t metaslab_ff_ops = { 277 metaslab_ff_load, 278 metaslab_ff_unload, 279 metaslab_ff_alloc, 280 metaslab_ff_claim, 281 metaslab_ff_free 282 }; 283 284 /* 285 * ========================================================================== 286 * Metaslabs 287 * ========================================================================== 288 */ 289 metaslab_t * 290 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo, 291 uint64_t start, uint64_t size, uint64_t txg) 292 { 293 vdev_t *vd = mg->mg_vd; 294 metaslab_t *msp; 295 296 msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP); 297 mutex_init(&msp->ms_lock, NULL, MUTEX_DEFAULT, NULL); 298 299 msp->ms_smo_syncing = *smo; 300 301 /* 302 * We create the main space map here, but we don't create the 303 * allocmaps and freemaps until metaslab_sync_done(). This serves 304 * two purposes: it allows metaslab_sync_done() to detect the 305 * addition of new space; and for debugging, it ensures that we'd 306 * data fault on any attempt to use this metaslab before it's ready. 307 */ 308 space_map_create(&msp->ms_map, start, size, 309 vd->vdev_ashift, &msp->ms_lock); 310 311 metaslab_group_add(mg, msp); 312 313 /* 314 * If we're opening an existing pool (txg == 0) or creating 315 * a new one (txg == TXG_INITIAL), all space is available now. 316 * If we're adding space to an existing pool, the new space 317 * does not become available until after this txg has synced. 318 */ 319 if (txg <= TXG_INITIAL) 320 metaslab_sync_done(msp, 0); 321 322 if (txg != 0) { 323 /* 324 * The vdev is dirty, but the metaslab isn't -- it just needs 325 * to have metaslab_sync_done() invoked from vdev_sync_done(). 326 * [We could just dirty the metaslab, but that would cause us 327 * to allocate a space map object for it, which is wasteful 328 * and would mess up the locality logic in metaslab_weight().] 329 */ 330 ASSERT(TXG_CLEAN(txg) == spa_last_synced_txg(vd->vdev_spa)); 331 vdev_dirty(vd, 0, NULL, txg); 332 vdev_dirty(vd, VDD_METASLAB, msp, TXG_CLEAN(txg)); 333 } 334 335 return (msp); 336 } 337 338 void 339 metaslab_fini(metaslab_t *msp) 340 { 341 metaslab_group_t *mg = msp->ms_group; 342 int t; 343 344 vdev_space_update(mg->mg_vd, -msp->ms_map.sm_size, 345 -msp->ms_smo.smo_alloc, B_TRUE); 346 347 metaslab_group_remove(mg, msp); 348 349 mutex_enter(&msp->ms_lock); 350 351 space_map_unload(&msp->ms_map); 352 space_map_destroy(&msp->ms_map); 353 354 for (t = 0; t < TXG_SIZE; t++) { 355 space_map_destroy(&msp->ms_allocmap[t]); 356 space_map_destroy(&msp->ms_freemap[t]); 357 } 358 359 mutex_exit(&msp->ms_lock); 360 mutex_destroy(&msp->ms_lock); 361 362 kmem_free(msp, sizeof (metaslab_t)); 363 } 364 365 #define METASLAB_WEIGHT_PRIMARY (1ULL << 63) 366 #define METASLAB_WEIGHT_SECONDARY (1ULL << 62) 367 #define METASLAB_ACTIVE_MASK \ 368 (METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY) 369 #define METASLAB_SMO_BONUS_MULTIPLIER 2 370 371 static uint64_t 372 metaslab_weight(metaslab_t *msp) 373 { 374 metaslab_group_t *mg = msp->ms_group; 375 space_map_t *sm = &msp->ms_map; 376 space_map_obj_t *smo = &msp->ms_smo; 377 vdev_t *vd = mg->mg_vd; 378 uint64_t weight, space; 379 380 ASSERT(MUTEX_HELD(&msp->ms_lock)); 381 382 /* 383 * The baseline weight is the metaslab's free space. 384 */ 385 space = sm->sm_size - smo->smo_alloc; 386 weight = space; 387 388 /* 389 * Modern disks have uniform bit density and constant angular velocity. 390 * Therefore, the outer recording zones are faster (higher bandwidth) 391 * than the inner zones by the ratio of outer to inner track diameter, 392 * which is typically around 2:1. We account for this by assigning 393 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x). 394 * In effect, this means that we'll select the metaslab with the most 395 * free bandwidth rather than simply the one with the most free space. 396 */ 397 weight = 2 * weight - 398 ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count; 399 ASSERT(weight >= space && weight <= 2 * space); 400 401 /* 402 * For locality, assign higher weight to metaslabs we've used before. 403 */ 404 if (smo->smo_object != 0) 405 weight *= METASLAB_SMO_BONUS_MULTIPLIER; 406 ASSERT(weight >= space && 407 weight <= 2 * METASLAB_SMO_BONUS_MULTIPLIER * space); 408 409 /* 410 * If this metaslab is one we're actively using, adjust its weight to 411 * make it preferable to any inactive metaslab so we'll polish it off. 412 */ 413 weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK); 414 415 return (weight); 416 } 417 418 static int 419 metaslab_activate(metaslab_t *msp, uint64_t activation_weight) 420 { 421 space_map_t *sm = &msp->ms_map; 422 423 ASSERT(MUTEX_HELD(&msp->ms_lock)); 424 425 if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { 426 int error = space_map_load(sm, &metaslab_ff_ops, 427 SM_FREE, &msp->ms_smo, 428 msp->ms_group->mg_vd->vdev_spa->spa_meta_objset); 429 if (error) { 430 metaslab_group_sort(msp->ms_group, msp, 0); 431 return (error); 432 } 433 metaslab_group_sort(msp->ms_group, msp, 434 msp->ms_weight | activation_weight); 435 } 436 ASSERT(sm->sm_loaded); 437 ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK); 438 439 return (0); 440 } 441 442 static void 443 metaslab_passivate(metaslab_t *msp, uint64_t size) 444 { 445 /* 446 * If size < SPA_MINBLOCKSIZE, then we will not allocate from 447 * this metaslab again. In that case, it had better be empty, 448 * or we would be leaving space on the table. 449 */ 450 ASSERT(size >= SPA_MINBLOCKSIZE || msp->ms_map.sm_space == 0); 451 metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size)); 452 ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0); 453 } 454 455 /* 456 * Write a metaslab to disk in the context of the specified transaction group. 457 */ 458 void 459 metaslab_sync(metaslab_t *msp, uint64_t txg) 460 { 461 vdev_t *vd = msp->ms_group->mg_vd; 462 spa_t *spa = vd->vdev_spa; 463 objset_t *mos = spa->spa_meta_objset; 464 space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK]; 465 space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK]; 466 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; 467 space_map_t *sm = &msp->ms_map; 468 space_map_obj_t *smo = &msp->ms_smo_syncing; 469 dmu_buf_t *db; 470 dmu_tx_t *tx; 471 int t; 472 473 tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg); 474 475 /* 476 * The only state that can actually be changing concurrently with 477 * metaslab_sync() is the metaslab's ms_map. No other thread can 478 * be modifying this txg's allocmap, freemap, freed_map, or smo. 479 * Therefore, we only hold ms_lock to satify space_map ASSERTs. 480 * We drop it whenever we call into the DMU, because the DMU 481 * can call down to us (e.g. via zio_free()) at any time. 482 */ 483 mutex_enter(&msp->ms_lock); 484 485 if (smo->smo_object == 0) { 486 ASSERT(smo->smo_objsize == 0); 487 ASSERT(smo->smo_alloc == 0); 488 mutex_exit(&msp->ms_lock); 489 smo->smo_object = dmu_object_alloc(mos, 490 DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT, 491 DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx); 492 ASSERT(smo->smo_object != 0); 493 dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) * 494 (sm->sm_start >> vd->vdev_ms_shift), 495 sizeof (uint64_t), &smo->smo_object, tx); 496 mutex_enter(&msp->ms_lock); 497 } 498 499 space_map_walk(freemap, space_map_add, freed_map); 500 501 if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >= 502 2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) { 503 /* 504 * The in-core space map representation is twice as compact 505 * as the on-disk one, so it's time to condense the latter 506 * by generating a pure allocmap from first principles. 507 * 508 * This metaslab is 100% allocated, 509 * minus the content of the in-core map (sm), 510 * minus what's been freed this txg (freed_map), 511 * minus allocations from txgs in the future 512 * (because they haven't been committed yet). 513 */ 514 space_map_vacate(allocmap, NULL, NULL); 515 space_map_vacate(freemap, NULL, NULL); 516 517 space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size); 518 519 space_map_walk(sm, space_map_remove, allocmap); 520 space_map_walk(freed_map, space_map_remove, allocmap); 521 522 for (t = 1; t < TXG_CONCURRENT_STATES; t++) 523 space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK], 524 space_map_remove, allocmap); 525 526 mutex_exit(&msp->ms_lock); 527 space_map_truncate(smo, mos, tx); 528 mutex_enter(&msp->ms_lock); 529 } 530 531 space_map_sync(allocmap, SM_ALLOC, smo, mos, tx); 532 space_map_sync(freemap, SM_FREE, smo, mos, tx); 533 534 mutex_exit(&msp->ms_lock); 535 536 VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db)); 537 dmu_buf_will_dirty(db, tx); 538 ASSERT3U(db->db_size, >=, sizeof (*smo)); 539 bcopy(smo, db->db_data, sizeof (*smo)); 540 dmu_buf_rele(db, FTAG); 541 542 dmu_tx_commit(tx); 543 } 544 545 /* 546 * Called after a transaction group has completely synced to mark 547 * all of the metaslab's free space as usable. 548 */ 549 void 550 metaslab_sync_done(metaslab_t *msp, uint64_t txg) 551 { 552 space_map_obj_t *smo = &msp->ms_smo; 553 space_map_obj_t *smosync = &msp->ms_smo_syncing; 554 space_map_t *sm = &msp->ms_map; 555 space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK]; 556 metaslab_group_t *mg = msp->ms_group; 557 vdev_t *vd = mg->mg_vd; 558 int t; 559 560 mutex_enter(&msp->ms_lock); 561 562 /* 563 * If this metaslab is just becoming available, initialize its 564 * allocmaps and freemaps and add its capacity to the vdev. 565 */ 566 if (freed_map->sm_size == 0) { 567 for (t = 0; t < TXG_SIZE; t++) { 568 space_map_create(&msp->ms_allocmap[t], sm->sm_start, 569 sm->sm_size, sm->sm_shift, sm->sm_lock); 570 space_map_create(&msp->ms_freemap[t], sm->sm_start, 571 sm->sm_size, sm->sm_shift, sm->sm_lock); 572 } 573 vdev_space_update(vd, sm->sm_size, 0, B_TRUE); 574 } 575 576 vdev_space_update(vd, 0, smosync->smo_alloc - smo->smo_alloc, B_TRUE); 577 578 ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0); 579 ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0); 580 581 /* 582 * If there's a space_map_load() in progress, wait for it to complete 583 * so that we have a consistent view of the in-core space map. 584 * Then, add everything we freed in this txg to the map. 585 */ 586 space_map_load_wait(sm); 587 space_map_vacate(freed_map, sm->sm_loaded ? space_map_free : NULL, sm); 588 589 *smo = *smosync; 590 591 /* 592 * If the map is loaded but no longer active, evict it as soon as all 593 * future allocations have synced. (If we unloaded it now and then 594 * loaded a moment later, the map wouldn't reflect those allocations.) 595 */ 596 if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) { 597 int evictable = 1; 598 599 for (t = 1; t < TXG_CONCURRENT_STATES; t++) 600 if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space) 601 evictable = 0; 602 603 if (evictable) 604 space_map_unload(sm); 605 } 606 607 metaslab_group_sort(mg, msp, metaslab_weight(msp)); 608 609 mutex_exit(&msp->ms_lock); 610 } 611 612 static uint64_t 613 metaslab_distance(metaslab_t *msp, dva_t *dva) 614 { 615 uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift; 616 uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift; 617 uint64_t start = msp->ms_map.sm_start >> ms_shift; 618 619 if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva)) 620 return (1ULL << 63); 621 622 if (offset < start) 623 return ((start - offset) << ms_shift); 624 if (offset > start) 625 return ((offset - start) << ms_shift); 626 return (0); 627 } 628 629 static uint64_t 630 metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg, 631 uint64_t min_distance, dva_t *dva, int d) 632 { 633 metaslab_t *msp = NULL; 634 uint64_t offset = -1ULL; 635 avl_tree_t *t = &mg->mg_metaslab_tree; 636 uint64_t activation_weight; 637 uint64_t target_distance; 638 int i; 639 640 activation_weight = METASLAB_WEIGHT_PRIMARY; 641 for (i = 0; i < d; i++) 642 if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) 643 activation_weight = METASLAB_WEIGHT_SECONDARY; 644 645 for (;;) { 646 mutex_enter(&mg->mg_lock); 647 for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) { 648 if (msp->ms_weight < size) { 649 mutex_exit(&mg->mg_lock); 650 return (-1ULL); 651 } 652 653 if (activation_weight == METASLAB_WEIGHT_PRIMARY) 654 break; 655 656 target_distance = min_distance + 657 (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1); 658 659 for (i = 0; i < d; i++) 660 if (metaslab_distance(msp, &dva[i]) < 661 target_distance) 662 break; 663 if (i == d) 664 break; 665 } 666 mutex_exit(&mg->mg_lock); 667 if (msp == NULL) 668 return (-1ULL); 669 670 mutex_enter(&msp->ms_lock); 671 672 /* 673 * Ensure that the metaslab we have selected is still 674 * capable of handling our request. It's possible that 675 * another thread may have changed the weight while we 676 * were blocked on the metaslab lock. 677 */ 678 if (msp->ms_weight < size) { 679 mutex_exit(&msp->ms_lock); 680 continue; 681 } 682 683 if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) && 684 activation_weight == METASLAB_WEIGHT_PRIMARY) { 685 metaslab_passivate(msp, 686 msp->ms_weight & ~METASLAB_ACTIVE_MASK); 687 mutex_exit(&msp->ms_lock); 688 continue; 689 } 690 691 if (metaslab_activate(msp, activation_weight) != 0) { 692 mutex_exit(&msp->ms_lock); 693 continue; 694 } 695 696 if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL) 697 break; 698 699 metaslab_passivate(msp, size - 1); 700 701 mutex_exit(&msp->ms_lock); 702 } 703 704 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0) 705 vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg); 706 707 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size); 708 709 mutex_exit(&msp->ms_lock); 710 711 return (offset); 712 } 713 714 /* 715 * Allocate a block for the specified i/o. 716 */ 717 static int 718 metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize, 719 dva_t *dva, int d, dva_t *hintdva, uint64_t txg, boolean_t hintdva_avoid) 720 { 721 metaslab_group_t *mg, *rotor; 722 vdev_t *vd; 723 int dshift = 3; 724 int all_zero; 725 uint64_t offset = -1ULL; 726 uint64_t asize; 727 uint64_t distance; 728 729 ASSERT(!DVA_IS_VALID(&dva[d])); 730 731 /* 732 * For testing, make some blocks above a certain size be gang blocks. 733 */ 734 if (psize >= metaslab_gang_bang && (lbolt & 3) == 0) 735 return (ENOSPC); 736 737 /* 738 * Start at the rotor and loop through all mgs until we find something. 739 * Note that there's no locking on mc_rotor or mc_allocated because 740 * nothing actually breaks if we miss a few updates -- we just won't 741 * allocate quite as evenly. It all balances out over time. 742 * 743 * If we are doing ditto or log blocks, try to spread them across 744 * consecutive vdevs. If we're forced to reuse a vdev before we've 745 * allocated all of our ditto blocks, then try and spread them out on 746 * that vdev as much as possible. If it turns out to not be possible, 747 * gradually lower our standards until anything becomes acceptable. 748 * Also, allocating on consecutive vdevs (as opposed to random vdevs) 749 * gives us hope of containing our fault domains to something we're 750 * able to reason about. Otherwise, any two top-level vdev failures 751 * will guarantee the loss of data. With consecutive allocation, 752 * only two adjacent top-level vdev failures will result in data loss. 753 * 754 * If we are doing gang blocks (hintdva is non-NULL), try to keep 755 * ourselves on the same vdev as our gang block header. That 756 * way, we can hope for locality in vdev_cache, plus it makes our 757 * fault domains something tractable. 758 */ 759 if (hintdva) { 760 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d])); 761 if (hintdva_avoid) 762 mg = vd->vdev_mg->mg_next; 763 else 764 mg = vd->vdev_mg; 765 } else if (d != 0) { 766 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1])); 767 mg = vd->vdev_mg->mg_next; 768 } else { 769 mg = mc->mc_rotor; 770 } 771 772 /* 773 * If the hint put us into the wrong class, just follow the rotor. 774 */ 775 if (mg->mg_class != mc) 776 mg = mc->mc_rotor; 777 778 rotor = mg; 779 top: 780 all_zero = B_TRUE; 781 do { 782 vd = mg->mg_vd; 783 /* 784 * Dont allocate from faulted devices 785 */ 786 if (!vdev_writeable(vd)) 787 goto next; 788 /* 789 * Avoid writing single-copy data to a failing vdev 790 */ 791 if ((vd->vdev_stat.vs_write_errors > 0 || 792 vd->vdev_state < VDEV_STATE_HEALTHY) && 793 d == 0 && dshift == 3) { 794 all_zero = B_FALSE; 795 goto next; 796 } 797 798 ASSERT(mg->mg_class == mc); 799 800 distance = vd->vdev_asize >> dshift; 801 if (distance <= (1ULL << vd->vdev_ms_shift)) 802 distance = 0; 803 else 804 all_zero = B_FALSE; 805 806 asize = vdev_psize_to_asize(vd, psize); 807 ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0); 808 809 offset = metaslab_group_alloc(mg, asize, txg, distance, dva, d); 810 if (offset != -1ULL) { 811 /* 812 * If we've just selected this metaslab group, 813 * figure out whether the corresponding vdev is 814 * over- or under-used relative to the pool, 815 * and set an allocation bias to even it out. 816 */ 817 if (mc->mc_allocated == 0) { 818 vdev_stat_t *vs = &vd->vdev_stat; 819 uint64_t alloc, space; 820 int64_t vu, su; 821 822 alloc = spa_get_alloc(spa); 823 space = spa_get_space(spa); 824 825 /* 826 * Determine percent used in units of 0..1024. 827 * (This is just to avoid floating point.) 828 */ 829 vu = (vs->vs_alloc << 10) / (vs->vs_space + 1); 830 su = (alloc << 10) / (space + 1); 831 832 /* 833 * Bias by at most +/- 25% of the aliquot. 834 */ 835 mg->mg_bias = ((su - vu) * 836 (int64_t)mg->mg_aliquot) / (1024 * 4); 837 } 838 839 if (atomic_add_64_nv(&mc->mc_allocated, asize) >= 840 mg->mg_aliquot + mg->mg_bias) { 841 mc->mc_rotor = mg->mg_next; 842 mc->mc_allocated = 0; 843 } 844 845 DVA_SET_VDEV(&dva[d], vd->vdev_id); 846 DVA_SET_OFFSET(&dva[d], offset); 847 DVA_SET_GANG(&dva[d], 0); 848 DVA_SET_ASIZE(&dva[d], asize); 849 850 return (0); 851 } 852 next: 853 mc->mc_rotor = mg->mg_next; 854 mc->mc_allocated = 0; 855 } while ((mg = mg->mg_next) != rotor); 856 857 if (!all_zero) { 858 dshift++; 859 ASSERT(dshift < 64); 860 goto top; 861 } 862 863 bzero(&dva[d], sizeof (dva_t)); 864 865 return (ENOSPC); 866 } 867 868 /* 869 * Free the block represented by DVA in the context of the specified 870 * transaction group. 871 */ 872 static void 873 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now) 874 { 875 uint64_t vdev = DVA_GET_VDEV(dva); 876 uint64_t offset = DVA_GET_OFFSET(dva); 877 uint64_t size = DVA_GET_ASIZE(dva); 878 vdev_t *vd; 879 metaslab_t *msp; 880 881 ASSERT(DVA_IS_VALID(dva)); 882 883 if (txg > spa_freeze_txg(spa)) 884 return; 885 886 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || 887 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) { 888 cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu", 889 (u_longlong_t)vdev, (u_longlong_t)offset); 890 ASSERT(0); 891 return; 892 } 893 894 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 895 896 if (DVA_GET_GANG(dva)) 897 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 898 899 mutex_enter(&msp->ms_lock); 900 901 if (now) { 902 space_map_remove(&msp->ms_allocmap[txg & TXG_MASK], 903 offset, size); 904 space_map_free(&msp->ms_map, offset, size); 905 } else { 906 if (msp->ms_freemap[txg & TXG_MASK].sm_space == 0) 907 vdev_dirty(vd, VDD_METASLAB, msp, txg); 908 space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size); 909 910 /* 911 * verify that this region is actually allocated in 912 * either a ms_allocmap or the ms_map 913 */ 914 if (msp->ms_map.sm_loaded) { 915 boolean_t allocd = B_FALSE; 916 int i; 917 918 if (!space_map_contains(&msp->ms_map, offset, size)) { 919 allocd = B_TRUE; 920 } else { 921 for (i = 0; i < TXG_CONCURRENT_STATES; i++) { 922 space_map_t *sm = &msp->ms_allocmap 923 [(txg - i) & TXG_MASK]; 924 if (space_map_contains(sm, 925 offset, size)) { 926 allocd = B_TRUE; 927 break; 928 } 929 } 930 } 931 932 if (!allocd) { 933 zfs_panic_recover("freeing free segment " 934 "(vdev=%llu offset=%llx size=%llx)", 935 (longlong_t)vdev, (longlong_t)offset, 936 (longlong_t)size); 937 } 938 } 939 940 941 } 942 943 mutex_exit(&msp->ms_lock); 944 } 945 946 /* 947 * Intent log support: upon opening the pool after a crash, notify the SPA 948 * of blocks that the intent log has allocated for immediate write, but 949 * which are still considered free by the SPA because the last transaction 950 * group didn't commit yet. 951 */ 952 static int 953 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg) 954 { 955 uint64_t vdev = DVA_GET_VDEV(dva); 956 uint64_t offset = DVA_GET_OFFSET(dva); 957 uint64_t size = DVA_GET_ASIZE(dva); 958 vdev_t *vd; 959 metaslab_t *msp; 960 int error; 961 962 ASSERT(DVA_IS_VALID(dva)); 963 964 if ((vd = vdev_lookup_top(spa, vdev)) == NULL || 965 (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) 966 return (ENXIO); 967 968 msp = vd->vdev_ms[offset >> vd->vdev_ms_shift]; 969 970 if (DVA_GET_GANG(dva)) 971 size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE); 972 973 mutex_enter(&msp->ms_lock); 974 975 error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY); 976 if (error) { 977 mutex_exit(&msp->ms_lock); 978 return (error); 979 } 980 981 if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0) 982 vdev_dirty(vd, VDD_METASLAB, msp, txg); 983 984 space_map_claim(&msp->ms_map, offset, size); 985 space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size); 986 987 mutex_exit(&msp->ms_lock); 988 989 return (0); 990 } 991 992 int 993 metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp, 994 int ndvas, uint64_t txg, blkptr_t *hintbp, boolean_t hintbp_avoid) 995 { 996 dva_t *dva = bp->blk_dva; 997 dva_t *hintdva = hintbp->blk_dva; 998 int d; 999 int error = 0; 1000 1001 if (mc->mc_rotor == NULL) /* no vdevs in this class */ 1002 return (ENOSPC); 1003 1004 ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa)); 1005 ASSERT(BP_GET_NDVAS(bp) == 0); 1006 ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp)); 1007 1008 for (d = 0; d < ndvas; d++) { 1009 error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva, 1010 txg, hintbp_avoid); 1011 if (error) { 1012 for (d--; d >= 0; d--) { 1013 metaslab_free_dva(spa, &dva[d], txg, B_TRUE); 1014 bzero(&dva[d], sizeof (dva_t)); 1015 } 1016 return (error); 1017 } 1018 } 1019 ASSERT(error == 0); 1020 ASSERT(BP_GET_NDVAS(bp) == ndvas); 1021 1022 return (0); 1023 } 1024 1025 void 1026 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now) 1027 { 1028 const dva_t *dva = bp->blk_dva; 1029 int ndvas = BP_GET_NDVAS(bp); 1030 int d; 1031 1032 ASSERT(!BP_IS_HOLE(bp)); 1033 1034 for (d = 0; d < ndvas; d++) 1035 metaslab_free_dva(spa, &dva[d], txg, now); 1036 } 1037 1038 int 1039 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg) 1040 { 1041 const dva_t *dva = bp->blk_dva; 1042 int ndvas = BP_GET_NDVAS(bp); 1043 int d, error; 1044 int last_error = 0; 1045 1046 ASSERT(!BP_IS_HOLE(bp)); 1047 1048 for (d = 0; d < ndvas; d++) 1049 if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0) 1050 last_error = error; 1051 1052 return (last_error); 1053 } 1054