1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 #include <sys/zfs_context.h> 29 #include <sys/spa_impl.h> 30 #include <sys/zio.h> 31 #include <sys/zio_checksum.h> 32 #include <sys/zio_compress.h> 33 #include <sys/dmu.h> 34 #include <sys/dmu_tx.h> 35 #include <sys/zap.h> 36 #include <sys/zil.h> 37 #include <sys/vdev_impl.h> 38 #include <sys/metaslab.h> 39 #include <sys/uberblock_impl.h> 40 #include <sys/txg.h> 41 #include <sys/avl.h> 42 #include <sys/unique.h> 43 #include <sys/dsl_pool.h> 44 #include <sys/dsl_dir.h> 45 #include <sys/dsl_prop.h> 46 #include <sys/fs/zfs.h> 47 48 /* 49 * SPA locking 50 * 51 * There are four basic locks for managing spa_t structures: 52 * 53 * spa_namespace_lock (global mutex) 54 * 55 * This lock must be acquired to do any of the following: 56 * 57 * - Lookup a spa_t by name 58 * - Add or remove a spa_t from the namespace 59 * - Increase spa_refcount from non-zero 60 * - Check if spa_refcount is zero 61 * - Rename a spa_t 62 * - add/remove/attach/detach devices 63 * - Held for the duration of create/destroy/import/export 64 * 65 * It does not need to handle recursion. A create or destroy may 66 * reference objects (files or zvols) in other pools, but by 67 * definition they must have an existing reference, and will never need 68 * to lookup a spa_t by name. 69 * 70 * spa_refcount (per-spa refcount_t protected by mutex) 71 * 72 * This reference count keep track of any active users of the spa_t. The 73 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 74 * the refcount is never really 'zero' - opening a pool implicitly keeps 75 * some references in the DMU. Internally we check against SPA_MINREF, but 76 * present the image of a zero/non-zero value to consumers. 77 * 78 * spa_config_lock (per-spa crazy rwlock) 79 * 80 * This SPA special is a recursive rwlock, capable of being acquired from 81 * asynchronous threads. It has protects the spa_t from config changes, 82 * and must be held in the following circumstances: 83 * 84 * - RW_READER to perform I/O to the spa 85 * - RW_WRITER to change the vdev config 86 * 87 * spa_config_cache_lock (per-spa mutex) 88 * 89 * This mutex prevents the spa_config nvlist from being updated. No 90 * other locks are required to obtain this lock, although implicitly you 91 * must have the namespace lock or non-zero refcount to have any kind 92 * of spa_t pointer at all. 93 * 94 * The locking order is fairly straightforward: 95 * 96 * spa_namespace_lock -> spa_refcount 97 * 98 * The namespace lock must be acquired to increase the refcount from 0 99 * or to check if it is zero. 100 * 101 * spa_refcount -> spa_config_lock 102 * 103 * There must be at least one valid reference on the spa_t to acquire 104 * the config lock. 105 * 106 * spa_namespace_lock -> spa_config_lock 107 * 108 * The namespace lock must always be taken before the config lock. 109 * 110 * 111 * The spa_namespace_lock and spa_config_cache_lock can be acquired directly and 112 * are globally visible. 113 * 114 * The namespace is manipulated using the following functions, all which require 115 * the spa_namespace_lock to be held. 116 * 117 * spa_lookup() Lookup a spa_t by name. 118 * 119 * spa_add() Create a new spa_t in the namespace. 120 * 121 * spa_remove() Remove a spa_t from the namespace. This also 122 * frees up any memory associated with the spa_t. 123 * 124 * spa_next() Returns the next spa_t in the system, or the 125 * first if NULL is passed. 126 * 127 * spa_evict_all() Shutdown and remove all spa_t structures in 128 * the system. 129 * 130 * spa_guid_exists() Determine whether a pool/device guid exists. 131 * 132 * The spa_refcount is manipulated using the following functions: 133 * 134 * spa_open_ref() Adds a reference to the given spa_t. Must be 135 * called with spa_namespace_lock held if the 136 * refcount is currently zero. 137 * 138 * spa_close() Remove a reference from the spa_t. This will 139 * not free the spa_t or remove it from the 140 * namespace. No locking is required. 141 * 142 * spa_refcount_zero() Returns true if the refcount is currently 143 * zero. Must be called with spa_namespace_lock 144 * held. 145 * 146 * The spa_config_lock is manipulated using the following functions: 147 * 148 * spa_config_enter() Acquire the config lock as RW_READER or 149 * RW_WRITER. At least one reference on the spa_t 150 * must exist. 151 * 152 * spa_config_exit() Release the config lock. 153 * 154 * spa_config_held() Returns true if the config lock is currently 155 * held in the given state. 156 * 157 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 158 * 159 * spa_vdev_enter() Acquire the namespace lock and the config lock 160 * for writing. 161 * 162 * spa_vdev_exit() Release the config lock, wait for all I/O 163 * to complete, sync the updated configs to the 164 * cache, and release the namespace lock. 165 * 166 * The spa_name() function also requires either the spa_namespace_lock 167 * or the spa_config_lock, as both are needed to do a rename. spa_rename() is 168 * also implemented within this file since is requires manipulation of the 169 * namespace. 170 */ 171 172 static avl_tree_t spa_namespace_avl; 173 kmutex_t spa_namespace_lock; 174 static kcondvar_t spa_namespace_cv; 175 static int spa_active_count; 176 static int spa_max_replication_override = SPA_DVAS_PER_BP; 177 178 static avl_tree_t spa_spare_avl; 179 static kmutex_t spa_spare_lock; 180 181 kmem_cache_t *spa_buffer_pool; 182 int spa_mode; 183 184 #ifdef ZFS_DEBUG 185 int zfs_flags = ~0; 186 #else 187 int zfs_flags = 0; 188 #endif 189 190 #define SPA_MINREF 5 /* spa_refcnt for an open-but-idle pool */ 191 192 /* 193 * ========================================================================== 194 * SPA namespace functions 195 * ========================================================================== 196 */ 197 198 /* 199 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 200 * Returns NULL if no matching spa_t is found. 201 */ 202 spa_t * 203 spa_lookup(const char *name) 204 { 205 spa_t search, *spa; 206 avl_index_t where; 207 208 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 209 210 search.spa_name = (char *)name; 211 spa = avl_find(&spa_namespace_avl, &search, &where); 212 213 return (spa); 214 } 215 216 /* 217 * Create an uninitialized spa_t with the given name. Requires 218 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 219 * exist by calling spa_lookup() first. 220 */ 221 spa_t * 222 spa_add(const char *name, const char *altroot) 223 { 224 spa_t *spa; 225 226 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 227 228 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 229 230 spa->spa_name = spa_strdup(name); 231 spa->spa_state = POOL_STATE_UNINITIALIZED; 232 spa->spa_freeze_txg = UINT64_MAX; 233 spa->spa_final_txg = UINT64_MAX; 234 235 refcount_create(&spa->spa_refcount); 236 refcount_create(&spa->spa_config_lock.scl_count); 237 238 avl_add(&spa_namespace_avl, spa); 239 240 /* 241 * Set the alternate root, if there is one. 242 */ 243 if (altroot) { 244 spa->spa_root = spa_strdup(altroot); 245 spa_active_count++; 246 } 247 248 return (spa); 249 } 250 251 /* 252 * Removes a spa_t from the namespace, freeing up any memory used. Requires 253 * spa_namespace_lock. This is called only after the spa_t has been closed and 254 * deactivated. 255 */ 256 void 257 spa_remove(spa_t *spa) 258 { 259 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 260 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 261 ASSERT(spa->spa_scrub_thread == NULL); 262 263 avl_remove(&spa_namespace_avl, spa); 264 cv_broadcast(&spa_namespace_cv); 265 266 if (spa->spa_root) { 267 spa_strfree(spa->spa_root); 268 spa_active_count--; 269 } 270 271 if (spa->spa_name) 272 spa_strfree(spa->spa_name); 273 274 spa_config_set(spa, NULL); 275 276 refcount_destroy(&spa->spa_refcount); 277 refcount_destroy(&spa->spa_config_lock.scl_count); 278 279 kmem_free(spa, sizeof (spa_t)); 280 } 281 282 /* 283 * Given a pool, return the next pool in the namespace, or NULL if there is 284 * none. If 'prev' is NULL, return the first pool. 285 */ 286 spa_t * 287 spa_next(spa_t *prev) 288 { 289 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 290 291 if (prev) 292 return (AVL_NEXT(&spa_namespace_avl, prev)); 293 else 294 return (avl_first(&spa_namespace_avl)); 295 } 296 297 /* 298 * ========================================================================== 299 * SPA refcount functions 300 * ========================================================================== 301 */ 302 303 /* 304 * Add a reference to the given spa_t. Must have at least one reference, or 305 * have the namespace lock held. 306 */ 307 void 308 spa_open_ref(spa_t *spa, void *tag) 309 { 310 ASSERT(refcount_count(&spa->spa_refcount) > SPA_MINREF || 311 MUTEX_HELD(&spa_namespace_lock)); 312 313 (void) refcount_add(&spa->spa_refcount, tag); 314 } 315 316 /* 317 * Remove a reference to the given spa_t. Must have at least one reference, or 318 * have the namespace lock held. 319 */ 320 void 321 spa_close(spa_t *spa, void *tag) 322 { 323 ASSERT(refcount_count(&spa->spa_refcount) > SPA_MINREF || 324 MUTEX_HELD(&spa_namespace_lock)); 325 326 (void) refcount_remove(&spa->spa_refcount, tag); 327 } 328 329 /* 330 * Check to see if the spa refcount is zero. Must be called with 331 * spa_namespace_lock held. We really compare against SPA_MINREF, which is the 332 * number of references acquired when opening a pool 333 */ 334 boolean_t 335 spa_refcount_zero(spa_t *spa) 336 { 337 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 338 339 return (refcount_count(&spa->spa_refcount) == SPA_MINREF); 340 } 341 342 /* 343 * ========================================================================== 344 * SPA spare tracking 345 * ========================================================================== 346 */ 347 348 /* 349 * We track spare information on a global basis. This allows us to do two 350 * things: determine when a spare is no longer referenced by any active pool, 351 * and (quickly) determine if a spare is currently in use in another pool on the 352 * system. 353 */ 354 typedef struct spa_spare { 355 uint64_t spare_guid; 356 avl_node_t spare_avl; 357 int spare_count; 358 } spa_spare_t; 359 360 static int 361 spa_spare_compare(const void *a, const void *b) 362 { 363 const spa_spare_t *sa = a; 364 const spa_spare_t *sb = b; 365 366 if (sa->spare_guid < sb->spare_guid) 367 return (-1); 368 else if (sa->spare_guid > sb->spare_guid) 369 return (1); 370 else 371 return (0); 372 } 373 374 void 375 spa_spare_add(uint64_t guid) 376 { 377 avl_index_t where; 378 spa_spare_t search; 379 spa_spare_t *spare; 380 381 mutex_enter(&spa_spare_lock); 382 383 search.spare_guid = guid; 384 if ((spare = avl_find(&spa_spare_avl, &search, &where)) != NULL) { 385 spare->spare_count++; 386 } else { 387 spare = kmem_alloc(sizeof (spa_spare_t), KM_SLEEP); 388 spare->spare_guid = guid; 389 spare->spare_count = 1; 390 avl_insert(&spa_spare_avl, spare, where); 391 } 392 393 mutex_exit(&spa_spare_lock); 394 } 395 396 void 397 spa_spare_remove(uint64_t guid) 398 { 399 spa_spare_t search; 400 spa_spare_t *spare; 401 avl_index_t where; 402 403 mutex_enter(&spa_spare_lock); 404 405 search.spare_guid = guid; 406 spare = avl_find(&spa_spare_avl, &search, &where); 407 408 ASSERT(spare != NULL); 409 410 if (--spare->spare_count == 0) { 411 avl_remove(&spa_spare_avl, spare); 412 kmem_free(spare, sizeof (spa_spare_t)); 413 } 414 415 mutex_exit(&spa_spare_lock); 416 } 417 418 boolean_t 419 spa_spare_inuse(uint64_t guid) 420 { 421 spa_spare_t search; 422 avl_index_t where; 423 boolean_t ret; 424 425 mutex_enter(&spa_spare_lock); 426 427 search.spare_guid = guid; 428 ret = (avl_find(&spa_spare_avl, &search, &where) != NULL); 429 430 mutex_exit(&spa_spare_lock); 431 432 return (ret); 433 } 434 435 /* 436 * ========================================================================== 437 * SPA config locking 438 * ========================================================================== 439 */ 440 441 /* 442 * Acquire the config lock. The config lock is a special rwlock that allows for 443 * recursive enters. Because these enters come from the same thread as well as 444 * asynchronous threads working on behalf of the owner, we must unilaterally 445 * allow all reads access as long at least one reader is held (even if a write 446 * is requested). This has the side effect of write starvation, but write locks 447 * are extremely rare, and a solution to this problem would be significantly 448 * more complex (if even possible). 449 * 450 * We would like to assert that the namespace lock isn't held, but this is a 451 * valid use during create. 452 */ 453 void 454 spa_config_enter(spa_t *spa, krw_t rw, void *tag) 455 { 456 spa_config_lock_t *scl = &spa->spa_config_lock; 457 458 mutex_enter(&scl->scl_lock); 459 460 if (scl->scl_writer != curthread) { 461 if (rw == RW_READER) { 462 while (scl->scl_writer != NULL) 463 cv_wait(&scl->scl_cv, &scl->scl_lock); 464 } else { 465 while (scl->scl_writer != NULL || 466 !refcount_is_zero(&scl->scl_count)) 467 cv_wait(&scl->scl_cv, &scl->scl_lock); 468 scl->scl_writer = curthread; 469 } 470 } 471 472 (void) refcount_add(&scl->scl_count, tag); 473 474 mutex_exit(&scl->scl_lock); 475 } 476 477 /* 478 * Release the spa config lock, notifying any waiters in the process. 479 */ 480 void 481 spa_config_exit(spa_t *spa, void *tag) 482 { 483 spa_config_lock_t *scl = &spa->spa_config_lock; 484 485 mutex_enter(&scl->scl_lock); 486 487 ASSERT(!refcount_is_zero(&scl->scl_count)); 488 if (refcount_remove(&scl->scl_count, tag) == 0) { 489 cv_broadcast(&scl->scl_cv); 490 scl->scl_writer = NULL; /* OK in either case */ 491 } 492 493 mutex_exit(&scl->scl_lock); 494 } 495 496 /* 497 * Returns true if the config lock is held in the given manner. 498 */ 499 boolean_t 500 spa_config_held(spa_t *spa, krw_t rw) 501 { 502 spa_config_lock_t *scl = &spa->spa_config_lock; 503 boolean_t held; 504 505 mutex_enter(&scl->scl_lock); 506 if (rw == RW_WRITER) 507 held = (scl->scl_writer == curthread); 508 else 509 held = !refcount_is_zero(&scl->scl_count); 510 mutex_exit(&scl->scl_lock); 511 512 return (held); 513 } 514 515 /* 516 * ========================================================================== 517 * SPA vdev locking 518 * ========================================================================== 519 */ 520 521 /* 522 * Lock the given spa_t for the purpose of adding or removing a vdev. 523 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 524 * It returns the next transaction group for the spa_t. 525 */ 526 uint64_t 527 spa_vdev_enter(spa_t *spa) 528 { 529 /* 530 * Suspend scrub activity while we mess with the config. 531 */ 532 spa_scrub_suspend(spa); 533 534 mutex_enter(&spa_namespace_lock); 535 536 spa_config_enter(spa, RW_WRITER, spa); 537 538 return (spa_last_synced_txg(spa) + 1); 539 } 540 541 /* 542 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 543 * locking of spa_vdev_enter(), we also want make sure the transactions have 544 * synced to disk, and then update the global configuration cache with the new 545 * information. 546 */ 547 int 548 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 549 { 550 int config_changed = B_FALSE; 551 552 ASSERT(txg > spa_last_synced_txg(spa)); 553 554 /* 555 * Reassess the DTLs. 556 */ 557 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 558 559 /* 560 * If the config changed, notify the scrub thread that it must restart. 561 */ 562 if (error == 0 && !list_is_empty(&spa->spa_dirty_list)) { 563 config_changed = B_TRUE; 564 spa_scrub_restart(spa, txg); 565 } 566 567 spa_config_exit(spa, spa); 568 569 /* 570 * Allow scrubbing to resume. 571 */ 572 spa_scrub_resume(spa); 573 574 /* 575 * Note: this txg_wait_synced() is important because it ensures 576 * that there won't be more than one config change per txg. 577 * This allows us to use the txg as the generation number. 578 */ 579 if (error == 0) 580 txg_wait_synced(spa->spa_dsl_pool, txg); 581 582 if (vd != NULL) { 583 ASSERT(!vd->vdev_detached || vd->vdev_dtl.smo_object == 0); 584 vdev_free(vd); 585 } 586 587 /* 588 * If the config changed, update the config cache. 589 */ 590 if (config_changed) 591 spa_config_sync(); 592 593 mutex_exit(&spa_namespace_lock); 594 595 return (error); 596 } 597 598 /* 599 * ========================================================================== 600 * Miscellaneous functions 601 * ========================================================================== 602 */ 603 604 /* 605 * Rename a spa_t. 606 */ 607 int 608 spa_rename(const char *name, const char *newname) 609 { 610 spa_t *spa; 611 int err; 612 613 /* 614 * Lookup the spa_t and grab the config lock for writing. We need to 615 * actually open the pool so that we can sync out the necessary labels. 616 * It's OK to call spa_open() with the namespace lock held because we 617 * allow recursive calls for other reasons. 618 */ 619 mutex_enter(&spa_namespace_lock); 620 if ((err = spa_open(name, &spa, FTAG)) != 0) { 621 mutex_exit(&spa_namespace_lock); 622 return (err); 623 } 624 625 spa_config_enter(spa, RW_WRITER, FTAG); 626 627 avl_remove(&spa_namespace_avl, spa); 628 spa_strfree(spa->spa_name); 629 spa->spa_name = spa_strdup(newname); 630 avl_add(&spa_namespace_avl, spa); 631 632 /* 633 * Sync all labels to disk with the new names by marking the root vdev 634 * dirty and waiting for it to sync. It will pick up the new pool name 635 * during the sync. 636 */ 637 vdev_config_dirty(spa->spa_root_vdev); 638 639 spa_config_exit(spa, FTAG); 640 641 txg_wait_synced(spa->spa_dsl_pool, 0); 642 643 /* 644 * Sync the updated config cache. 645 */ 646 spa_config_sync(); 647 648 spa_close(spa, FTAG); 649 650 mutex_exit(&spa_namespace_lock); 651 652 return (0); 653 } 654 655 656 /* 657 * Determine whether a pool with given pool_guid exists. If device_guid is 658 * non-zero, determine whether the pool exists *and* contains a device with the 659 * specified device_guid. 660 */ 661 boolean_t 662 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 663 { 664 spa_t *spa; 665 avl_tree_t *t = &spa_namespace_avl; 666 667 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 668 669 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 670 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 671 continue; 672 if (spa->spa_root_vdev == NULL) 673 continue; 674 if (spa_guid(spa) == pool_guid && (device_guid == 0 || 675 vdev_lookup_by_guid(spa->spa_root_vdev, device_guid))) 676 break; 677 } 678 679 return (spa != NULL); 680 } 681 682 char * 683 spa_strdup(const char *s) 684 { 685 size_t len; 686 char *new; 687 688 len = strlen(s); 689 new = kmem_alloc(len + 1, KM_SLEEP); 690 bcopy(s, new, len); 691 new[len] = '\0'; 692 693 return (new); 694 } 695 696 void 697 spa_strfree(char *s) 698 { 699 kmem_free(s, strlen(s) + 1); 700 } 701 702 uint64_t 703 spa_get_random(uint64_t range) 704 { 705 uint64_t r; 706 707 ASSERT(range != 0); 708 709 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 710 711 return (r % range); 712 } 713 714 void 715 sprintf_blkptr(char *buf, int len, const blkptr_t *bp) 716 { 717 int d; 718 719 if (bp == NULL) { 720 (void) snprintf(buf, len, "<NULL>"); 721 return; 722 } 723 724 if (BP_IS_HOLE(bp)) { 725 (void) snprintf(buf, len, "<hole>"); 726 return; 727 } 728 729 (void) snprintf(buf, len, "[L%llu %s] %llxL/%llxP ", 730 (u_longlong_t)BP_GET_LEVEL(bp), 731 dmu_ot[BP_GET_TYPE(bp)].ot_name, 732 (u_longlong_t)BP_GET_LSIZE(bp), 733 (u_longlong_t)BP_GET_PSIZE(bp)); 734 735 for (d = 0; d < BP_GET_NDVAS(bp); d++) { 736 const dva_t *dva = &bp->blk_dva[d]; 737 (void) snprintf(buf + strlen(buf), len - strlen(buf), 738 "DVA[%d]=<%llu:%llx:%llx> ", d, 739 (u_longlong_t)DVA_GET_VDEV(dva), 740 (u_longlong_t)DVA_GET_OFFSET(dva), 741 (u_longlong_t)DVA_GET_ASIZE(dva)); 742 } 743 744 (void) snprintf(buf + strlen(buf), len - strlen(buf), 745 "%s %s %s %s birth=%llu fill=%llu cksum=%llx:%llx:%llx:%llx", 746 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name, 747 zio_compress_table[BP_GET_COMPRESS(bp)].ci_name, 748 BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE", 749 BP_IS_GANG(bp) ? "gang" : "contiguous", 750 (u_longlong_t)bp->blk_birth, 751 (u_longlong_t)bp->blk_fill, 752 (u_longlong_t)bp->blk_cksum.zc_word[0], 753 (u_longlong_t)bp->blk_cksum.zc_word[1], 754 (u_longlong_t)bp->blk_cksum.zc_word[2], 755 (u_longlong_t)bp->blk_cksum.zc_word[3]); 756 } 757 758 void 759 spa_freeze(spa_t *spa) 760 { 761 uint64_t freeze_txg = 0; 762 763 spa_config_enter(spa, RW_WRITER, FTAG); 764 if (spa->spa_freeze_txg == UINT64_MAX) { 765 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 766 spa->spa_freeze_txg = freeze_txg; 767 } 768 spa_config_exit(spa, FTAG); 769 if (freeze_txg != 0) 770 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 771 } 772 773 /* 774 * ========================================================================== 775 * Accessor functions 776 * ========================================================================== 777 */ 778 779 krwlock_t * 780 spa_traverse_rwlock(spa_t *spa) 781 { 782 return (&spa->spa_traverse_lock); 783 } 784 785 int 786 spa_traverse_wanted(spa_t *spa) 787 { 788 return (spa->spa_traverse_wanted); 789 } 790 791 dsl_pool_t * 792 spa_get_dsl(spa_t *spa) 793 { 794 return (spa->spa_dsl_pool); 795 } 796 797 blkptr_t * 798 spa_get_rootblkptr(spa_t *spa) 799 { 800 return (&spa->spa_ubsync.ub_rootbp); 801 } 802 803 void 804 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 805 { 806 spa->spa_uberblock.ub_rootbp = *bp; 807 } 808 809 void 810 spa_altroot(spa_t *spa, char *buf, size_t buflen) 811 { 812 if (spa->spa_root == NULL) 813 buf[0] = '\0'; 814 else 815 (void) strncpy(buf, spa->spa_root, buflen); 816 } 817 818 int 819 spa_sync_pass(spa_t *spa) 820 { 821 return (spa->spa_sync_pass); 822 } 823 824 char * 825 spa_name(spa_t *spa) 826 { 827 /* 828 * Accessing the name requires holding either the namespace lock or the 829 * config lock, both of which are required to do a rename. 830 */ 831 ASSERT(MUTEX_HELD(&spa_namespace_lock) || 832 spa_config_held(spa, RW_READER) || spa_config_held(spa, RW_WRITER)); 833 834 return (spa->spa_name); 835 } 836 837 uint64_t 838 spa_guid(spa_t *spa) 839 { 840 /* 841 * If we fail to parse the config during spa_load(), we can go through 842 * the error path (which posts an ereport) and end up here with no root 843 * vdev. We stash the original pool guid in 'spa_load_guid' to handle 844 * this case. 845 */ 846 if (spa->spa_root_vdev != NULL) 847 return (spa->spa_root_vdev->vdev_guid); 848 else 849 return (spa->spa_load_guid); 850 } 851 852 uint64_t 853 spa_last_synced_txg(spa_t *spa) 854 { 855 return (spa->spa_ubsync.ub_txg); 856 } 857 858 uint64_t 859 spa_first_txg(spa_t *spa) 860 { 861 return (spa->spa_first_txg); 862 } 863 864 int 865 spa_state(spa_t *spa) 866 { 867 return (spa->spa_state); 868 } 869 870 uint64_t 871 spa_freeze_txg(spa_t *spa) 872 { 873 return (spa->spa_freeze_txg); 874 } 875 876 /* 877 * In the future, this may select among different metaslab classes 878 * depending on the zdp. For now, there's no such distinction. 879 */ 880 metaslab_class_t * 881 spa_metaslab_class_select(spa_t *spa) 882 { 883 return (spa->spa_normal_class); 884 } 885 886 /* 887 * Return how much space is allocated in the pool (ie. sum of all asize) 888 */ 889 uint64_t 890 spa_get_alloc(spa_t *spa) 891 { 892 return (spa->spa_root_vdev->vdev_stat.vs_alloc); 893 } 894 895 /* 896 * Return how much (raid-z inflated) space there is in the pool. 897 */ 898 uint64_t 899 spa_get_space(spa_t *spa) 900 { 901 return (spa->spa_root_vdev->vdev_stat.vs_space); 902 } 903 904 /* 905 * Return the amount of raid-z-deflated space in the pool. 906 */ 907 uint64_t 908 spa_get_dspace(spa_t *spa) 909 { 910 if (spa->spa_deflate) 911 return (spa->spa_root_vdev->vdev_stat.vs_dspace); 912 else 913 return (spa->spa_root_vdev->vdev_stat.vs_space); 914 } 915 916 /* ARGSUSED */ 917 uint64_t 918 spa_get_asize(spa_t *spa, uint64_t lsize) 919 { 920 /* 921 * For now, the worst case is 512-byte RAID-Z blocks, in which 922 * case the space requirement is exactly 2x; so just assume that. 923 * Add to this the fact that we can have up to 3 DVAs per bp, and 924 * we have to multiply by a total of 6x. 925 */ 926 return (lsize * 6); 927 } 928 929 uint64_t 930 spa_version(spa_t *spa) 931 { 932 return (spa->spa_ubsync.ub_version); 933 } 934 935 int 936 spa_max_replication(spa_t *spa) 937 { 938 /* 939 * As of ZFS_VERSION == ZFS_VERSION_DITTO_BLOCKS, we are able to 940 * handle BPs with more than one DVA allocated. Set our max 941 * replication level accordingly. 942 */ 943 if (spa_version(spa) < ZFS_VERSION_DITTO_BLOCKS) 944 return (1); 945 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 946 } 947 948 uint64_t 949 bp_get_dasize(spa_t *spa, const blkptr_t *bp) 950 { 951 int sz = 0, i; 952 953 if (!spa->spa_deflate) 954 return (BP_GET_ASIZE(bp)); 955 956 for (i = 0; i < SPA_DVAS_PER_BP; i++) { 957 vdev_t *vd = 958 vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[i])); 959 sz += (DVA_GET_ASIZE(&bp->blk_dva[i]) >> SPA_MINBLOCKSHIFT) * 960 vd->vdev_deflate_ratio; 961 } 962 return (sz); 963 } 964 965 /* 966 * ========================================================================== 967 * Initialization and Termination 968 * ========================================================================== 969 */ 970 971 static int 972 spa_name_compare(const void *a1, const void *a2) 973 { 974 const spa_t *s1 = a1; 975 const spa_t *s2 = a2; 976 int s; 977 978 s = strcmp(s1->spa_name, s2->spa_name); 979 if (s > 0) 980 return (1); 981 if (s < 0) 982 return (-1); 983 return (0); 984 } 985 986 int 987 spa_busy(void) 988 { 989 return (spa_active_count); 990 } 991 992 void 993 spa_init(int mode) 994 { 995 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 996 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 997 998 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 999 offsetof(spa_t, spa_avl)); 1000 1001 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_spare_t), 1002 offsetof(spa_spare_t, spare_avl)); 1003 1004 spa_mode = mode; 1005 1006 refcount_init(); 1007 unique_init(); 1008 zio_init(); 1009 dmu_init(); 1010 zil_init(); 1011 spa_config_load(); 1012 } 1013 1014 void 1015 spa_fini(void) 1016 { 1017 spa_evict_all(); 1018 1019 zil_fini(); 1020 dmu_fini(); 1021 zio_fini(); 1022 refcount_fini(); 1023 1024 avl_destroy(&spa_namespace_avl); 1025 avl_destroy(&spa_spare_avl); 1026 1027 cv_destroy(&spa_namespace_cv); 1028 mutex_destroy(&spa_namespace_lock); 1029 } 1030