1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/spa_impl.h> 28 #include <sys/zio.h> 29 #include <sys/zio_checksum.h> 30 #include <sys/zio_compress.h> 31 #include <sys/dmu.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/zap.h> 34 #include <sys/zil.h> 35 #include <sys/vdev_impl.h> 36 #include <sys/metaslab.h> 37 #include <sys/uberblock_impl.h> 38 #include <sys/txg.h> 39 #include <sys/avl.h> 40 #include <sys/unique.h> 41 #include <sys/dsl_pool.h> 42 #include <sys/dsl_dir.h> 43 #include <sys/dsl_prop.h> 44 #include <sys/fs/zfs.h> 45 #include <sys/metaslab_impl.h> 46 #include <sys/sunddi.h> 47 #include <sys/arc.h> 48 #include "zfs_prop.h" 49 50 /* 51 * SPA locking 52 * 53 * There are four basic locks for managing spa_t structures: 54 * 55 * spa_namespace_lock (global mutex) 56 * 57 * This lock must be acquired to do any of the following: 58 * 59 * - Lookup a spa_t by name 60 * - Add or remove a spa_t from the namespace 61 * - Increase spa_refcount from non-zero 62 * - Check if spa_refcount is zero 63 * - Rename a spa_t 64 * - add/remove/attach/detach devices 65 * - Held for the duration of create/destroy/import/export 66 * 67 * It does not need to handle recursion. A create or destroy may 68 * reference objects (files or zvols) in other pools, but by 69 * definition they must have an existing reference, and will never need 70 * to lookup a spa_t by name. 71 * 72 * spa_refcount (per-spa refcount_t protected by mutex) 73 * 74 * This reference count keep track of any active users of the spa_t. The 75 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 76 * the refcount is never really 'zero' - opening a pool implicitly keeps 77 * some references in the DMU. Internally we check against spa_minref, but 78 * present the image of a zero/non-zero value to consumers. 79 * 80 * spa_config_lock[] (per-spa array of rwlocks) 81 * 82 * This protects the spa_t from config changes, and must be held in 83 * the following circumstances: 84 * 85 * - RW_READER to perform I/O to the spa 86 * - RW_WRITER to change the vdev config 87 * 88 * The locking order is fairly straightforward: 89 * 90 * spa_namespace_lock -> spa_refcount 91 * 92 * The namespace lock must be acquired to increase the refcount from 0 93 * or to check if it is zero. 94 * 95 * spa_refcount -> spa_config_lock[] 96 * 97 * There must be at least one valid reference on the spa_t to acquire 98 * the config lock. 99 * 100 * spa_namespace_lock -> spa_config_lock[] 101 * 102 * The namespace lock must always be taken before the config lock. 103 * 104 * 105 * The spa_namespace_lock can be acquired directly and is globally visible. 106 * 107 * The namespace is manipulated using the following functions, all of which 108 * require the spa_namespace_lock to be held. 109 * 110 * spa_lookup() Lookup a spa_t by name. 111 * 112 * spa_add() Create a new spa_t in the namespace. 113 * 114 * spa_remove() Remove a spa_t from the namespace. This also 115 * frees up any memory associated with the spa_t. 116 * 117 * spa_next() Returns the next spa_t in the system, or the 118 * first if NULL is passed. 119 * 120 * spa_evict_all() Shutdown and remove all spa_t structures in 121 * the system. 122 * 123 * spa_guid_exists() Determine whether a pool/device guid exists. 124 * 125 * The spa_refcount is manipulated using the following functions: 126 * 127 * spa_open_ref() Adds a reference to the given spa_t. Must be 128 * called with spa_namespace_lock held if the 129 * refcount is currently zero. 130 * 131 * spa_close() Remove a reference from the spa_t. This will 132 * not free the spa_t or remove it from the 133 * namespace. No locking is required. 134 * 135 * spa_refcount_zero() Returns true if the refcount is currently 136 * zero. Must be called with spa_namespace_lock 137 * held. 138 * 139 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 140 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 141 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 142 * 143 * To read the configuration, it suffices to hold one of these locks as reader. 144 * To modify the configuration, you must hold all locks as writer. To modify 145 * vdev state without altering the vdev tree's topology (e.g. online/offline), 146 * you must hold SCL_STATE and SCL_ZIO as writer. 147 * 148 * We use these distinct config locks to avoid recursive lock entry. 149 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 150 * block allocations (SCL_ALLOC), which may require reading space maps 151 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 152 * 153 * The spa config locks cannot be normal rwlocks because we need the 154 * ability to hand off ownership. For example, SCL_ZIO is acquired 155 * by the issuing thread and later released by an interrupt thread. 156 * They do, however, obey the usual write-wanted semantics to prevent 157 * writer (i.e. system administrator) starvation. 158 * 159 * The lock acquisition rules are as follows: 160 * 161 * SCL_CONFIG 162 * Protects changes to the vdev tree topology, such as vdev 163 * add/remove/attach/detach. Protects the dirty config list 164 * (spa_config_dirty_list) and the set of spares and l2arc devices. 165 * 166 * SCL_STATE 167 * Protects changes to pool state and vdev state, such as vdev 168 * online/offline/fault/degrade/clear. Protects the dirty state list 169 * (spa_state_dirty_list) and global pool state (spa_state). 170 * 171 * SCL_ALLOC 172 * Protects changes to metaslab groups and classes. 173 * Held as reader by metaslab_alloc() and metaslab_claim(). 174 * 175 * SCL_ZIO 176 * Held by bp-level zios (those which have no io_vd upon entry) 177 * to prevent changes to the vdev tree. The bp-level zio implicitly 178 * protects all of its vdev child zios, which do not hold SCL_ZIO. 179 * 180 * SCL_FREE 181 * Protects changes to metaslab groups and classes. 182 * Held as reader by metaslab_free(). SCL_FREE is distinct from 183 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 184 * blocks in zio_done() while another i/o that holds either 185 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 186 * 187 * SCL_VDEV 188 * Held as reader to prevent changes to the vdev tree during trivial 189 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 190 * other locks, and lower than all of them, to ensure that it's safe 191 * to acquire regardless of caller context. 192 * 193 * In addition, the following rules apply: 194 * 195 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 196 * The lock ordering is SCL_CONFIG > spa_props_lock. 197 * 198 * (b) I/O operations on leaf vdevs. For any zio operation that takes 199 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 200 * or zio_write_phys() -- the caller must ensure that the config cannot 201 * cannot change in the interim, and that the vdev cannot be reopened. 202 * SCL_STATE as reader suffices for both. 203 * 204 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 205 * 206 * spa_vdev_enter() Acquire the namespace lock and the config lock 207 * for writing. 208 * 209 * spa_vdev_exit() Release the config lock, wait for all I/O 210 * to complete, sync the updated configs to the 211 * cache, and release the namespace lock. 212 * 213 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 214 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 215 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 216 * 217 * spa_rename() is also implemented within this file since is requires 218 * manipulation of the namespace. 219 */ 220 221 static avl_tree_t spa_namespace_avl; 222 kmutex_t spa_namespace_lock; 223 static kcondvar_t spa_namespace_cv; 224 static int spa_active_count; 225 int spa_max_replication_override = SPA_DVAS_PER_BP; 226 227 static kmutex_t spa_spare_lock; 228 static avl_tree_t spa_spare_avl; 229 static kmutex_t spa_l2cache_lock; 230 static avl_tree_t spa_l2cache_avl; 231 232 kmem_cache_t *spa_buffer_pool; 233 int spa_mode_global; 234 235 #ifdef ZFS_DEBUG 236 /* Everything except dprintf is on by default in debug builds */ 237 int zfs_flags = ~ZFS_DEBUG_DPRINTF; 238 #else 239 int zfs_flags = 0; 240 #endif 241 242 /* 243 * zfs_recover can be set to nonzero to attempt to recover from 244 * otherwise-fatal errors, typically caused by on-disk corruption. When 245 * set, calls to zfs_panic_recover() will turn into warning messages. 246 */ 247 int zfs_recover = 0; 248 249 250 /* 251 * ========================================================================== 252 * SPA config locking 253 * ========================================================================== 254 */ 255 static void 256 spa_config_lock_init(spa_t *spa) 257 { 258 for (int i = 0; i < SCL_LOCKS; i++) { 259 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 260 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 261 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 262 refcount_create(&scl->scl_count); 263 scl->scl_writer = NULL; 264 scl->scl_write_wanted = 0; 265 } 266 } 267 268 static void 269 spa_config_lock_destroy(spa_t *spa) 270 { 271 for (int i = 0; i < SCL_LOCKS; i++) { 272 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 273 mutex_destroy(&scl->scl_lock); 274 cv_destroy(&scl->scl_cv); 275 refcount_destroy(&scl->scl_count); 276 ASSERT(scl->scl_writer == NULL); 277 ASSERT(scl->scl_write_wanted == 0); 278 } 279 } 280 281 int 282 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 283 { 284 for (int i = 0; i < SCL_LOCKS; i++) { 285 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 286 if (!(locks & (1 << i))) 287 continue; 288 mutex_enter(&scl->scl_lock); 289 if (rw == RW_READER) { 290 if (scl->scl_writer || scl->scl_write_wanted) { 291 mutex_exit(&scl->scl_lock); 292 spa_config_exit(spa, locks ^ (1 << i), tag); 293 return (0); 294 } 295 } else { 296 ASSERT(scl->scl_writer != curthread); 297 if (!refcount_is_zero(&scl->scl_count)) { 298 mutex_exit(&scl->scl_lock); 299 spa_config_exit(spa, locks ^ (1 << i), tag); 300 return (0); 301 } 302 scl->scl_writer = curthread; 303 } 304 (void) refcount_add(&scl->scl_count, tag); 305 mutex_exit(&scl->scl_lock); 306 } 307 return (1); 308 } 309 310 void 311 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 312 { 313 int wlocks_held = 0; 314 315 for (int i = 0; i < SCL_LOCKS; i++) { 316 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 317 if (scl->scl_writer == curthread) 318 wlocks_held |= (1 << i); 319 if (!(locks & (1 << i))) 320 continue; 321 mutex_enter(&scl->scl_lock); 322 if (rw == RW_READER) { 323 while (scl->scl_writer || scl->scl_write_wanted) { 324 cv_wait(&scl->scl_cv, &scl->scl_lock); 325 } 326 } else { 327 ASSERT(scl->scl_writer != curthread); 328 while (!refcount_is_zero(&scl->scl_count)) { 329 scl->scl_write_wanted++; 330 cv_wait(&scl->scl_cv, &scl->scl_lock); 331 scl->scl_write_wanted--; 332 } 333 scl->scl_writer = curthread; 334 } 335 (void) refcount_add(&scl->scl_count, tag); 336 mutex_exit(&scl->scl_lock); 337 } 338 ASSERT(wlocks_held <= locks); 339 } 340 341 void 342 spa_config_exit(spa_t *spa, int locks, void *tag) 343 { 344 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 345 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 346 if (!(locks & (1 << i))) 347 continue; 348 mutex_enter(&scl->scl_lock); 349 ASSERT(!refcount_is_zero(&scl->scl_count)); 350 if (refcount_remove(&scl->scl_count, tag) == 0) { 351 ASSERT(scl->scl_writer == NULL || 352 scl->scl_writer == curthread); 353 scl->scl_writer = NULL; /* OK in either case */ 354 cv_broadcast(&scl->scl_cv); 355 } 356 mutex_exit(&scl->scl_lock); 357 } 358 } 359 360 int 361 spa_config_held(spa_t *spa, int locks, krw_t rw) 362 { 363 int locks_held = 0; 364 365 for (int i = 0; i < SCL_LOCKS; i++) { 366 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 367 if (!(locks & (1 << i))) 368 continue; 369 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 370 (rw == RW_WRITER && scl->scl_writer == curthread)) 371 locks_held |= 1 << i; 372 } 373 374 return (locks_held); 375 } 376 377 /* 378 * ========================================================================== 379 * SPA namespace functions 380 * ========================================================================== 381 */ 382 383 /* 384 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 385 * Returns NULL if no matching spa_t is found. 386 */ 387 spa_t * 388 spa_lookup(const char *name) 389 { 390 static spa_t search; /* spa_t is large; don't allocate on stack */ 391 spa_t *spa; 392 avl_index_t where; 393 char c; 394 char *cp; 395 396 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 397 398 /* 399 * If it's a full dataset name, figure out the pool name and 400 * just use that. 401 */ 402 cp = strpbrk(name, "/@"); 403 if (cp) { 404 c = *cp; 405 *cp = '\0'; 406 } 407 408 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 409 spa = avl_find(&spa_namespace_avl, &search, &where); 410 411 if (cp) 412 *cp = c; 413 414 return (spa); 415 } 416 417 /* 418 * Create an uninitialized spa_t with the given name. Requires 419 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 420 * exist by calling spa_lookup() first. 421 */ 422 spa_t * 423 spa_add(const char *name, nvlist_t *config, const char *altroot) 424 { 425 spa_t *spa; 426 spa_config_dirent_t *dp; 427 428 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 429 430 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 431 432 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 433 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 434 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 435 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 436 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 437 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 438 439 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 440 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 441 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 442 443 for (int t = 0; t < TXG_SIZE; t++) 444 bplist_init(&spa->spa_free_bplist[t]); 445 bplist_init(&spa->spa_deferred_bplist); 446 447 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 448 spa->spa_state = POOL_STATE_UNINITIALIZED; 449 spa->spa_freeze_txg = UINT64_MAX; 450 spa->spa_final_txg = UINT64_MAX; 451 spa->spa_load_max_txg = UINT64_MAX; 452 453 refcount_create(&spa->spa_refcount); 454 spa_config_lock_init(spa); 455 456 avl_add(&spa_namespace_avl, spa); 457 458 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 459 460 /* 461 * Set the alternate root, if there is one. 462 */ 463 if (altroot) { 464 spa->spa_root = spa_strdup(altroot); 465 spa_active_count++; 466 } 467 468 /* 469 * Every pool starts with the default cachefile 470 */ 471 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 472 offsetof(spa_config_dirent_t, scd_link)); 473 474 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 475 dp->scd_path = spa_strdup(spa_config_path); 476 list_insert_head(&spa->spa_config_list, dp); 477 478 if (config != NULL) 479 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 480 481 return (spa); 482 } 483 484 /* 485 * Removes a spa_t from the namespace, freeing up any memory used. Requires 486 * spa_namespace_lock. This is called only after the spa_t has been closed and 487 * deactivated. 488 */ 489 void 490 spa_remove(spa_t *spa) 491 { 492 spa_config_dirent_t *dp; 493 494 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 495 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 496 497 avl_remove(&spa_namespace_avl, spa); 498 cv_broadcast(&spa_namespace_cv); 499 500 if (spa->spa_root) { 501 spa_strfree(spa->spa_root); 502 spa_active_count--; 503 } 504 505 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 506 list_remove(&spa->spa_config_list, dp); 507 if (dp->scd_path != NULL) 508 spa_strfree(dp->scd_path); 509 kmem_free(dp, sizeof (spa_config_dirent_t)); 510 } 511 512 list_destroy(&spa->spa_config_list); 513 514 spa_config_set(spa, NULL); 515 516 refcount_destroy(&spa->spa_refcount); 517 518 spa_config_lock_destroy(spa); 519 520 for (int t = 0; t < TXG_SIZE; t++) 521 bplist_fini(&spa->spa_free_bplist[t]); 522 bplist_fini(&spa->spa_deferred_bplist); 523 524 cv_destroy(&spa->spa_async_cv); 525 cv_destroy(&spa->spa_scrub_io_cv); 526 cv_destroy(&spa->spa_suspend_cv); 527 528 mutex_destroy(&spa->spa_async_lock); 529 mutex_destroy(&spa->spa_scrub_lock); 530 mutex_destroy(&spa->spa_errlog_lock); 531 mutex_destroy(&spa->spa_errlist_lock); 532 mutex_destroy(&spa->spa_history_lock); 533 mutex_destroy(&spa->spa_props_lock); 534 mutex_destroy(&spa->spa_suspend_lock); 535 536 kmem_free(spa, sizeof (spa_t)); 537 } 538 539 /* 540 * Given a pool, return the next pool in the namespace, or NULL if there is 541 * none. If 'prev' is NULL, return the first pool. 542 */ 543 spa_t * 544 spa_next(spa_t *prev) 545 { 546 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 547 548 if (prev) 549 return (AVL_NEXT(&spa_namespace_avl, prev)); 550 else 551 return (avl_first(&spa_namespace_avl)); 552 } 553 554 /* 555 * ========================================================================== 556 * SPA refcount functions 557 * ========================================================================== 558 */ 559 560 /* 561 * Add a reference to the given spa_t. Must have at least one reference, or 562 * have the namespace lock held. 563 */ 564 void 565 spa_open_ref(spa_t *spa, void *tag) 566 { 567 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 568 MUTEX_HELD(&spa_namespace_lock)); 569 (void) refcount_add(&spa->spa_refcount, tag); 570 } 571 572 /* 573 * Remove a reference to the given spa_t. Must have at least one reference, or 574 * have the namespace lock held. 575 */ 576 void 577 spa_close(spa_t *spa, void *tag) 578 { 579 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 580 MUTEX_HELD(&spa_namespace_lock)); 581 (void) refcount_remove(&spa->spa_refcount, tag); 582 } 583 584 /* 585 * Check to see if the spa refcount is zero. Must be called with 586 * spa_namespace_lock held. We really compare against spa_minref, which is the 587 * number of references acquired when opening a pool 588 */ 589 boolean_t 590 spa_refcount_zero(spa_t *spa) 591 { 592 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 593 594 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 595 } 596 597 /* 598 * ========================================================================== 599 * SPA spare and l2cache tracking 600 * ========================================================================== 601 */ 602 603 /* 604 * Hot spares and cache devices are tracked using the same code below, 605 * for 'auxiliary' devices. 606 */ 607 608 typedef struct spa_aux { 609 uint64_t aux_guid; 610 uint64_t aux_pool; 611 avl_node_t aux_avl; 612 int aux_count; 613 } spa_aux_t; 614 615 static int 616 spa_aux_compare(const void *a, const void *b) 617 { 618 const spa_aux_t *sa = a; 619 const spa_aux_t *sb = b; 620 621 if (sa->aux_guid < sb->aux_guid) 622 return (-1); 623 else if (sa->aux_guid > sb->aux_guid) 624 return (1); 625 else 626 return (0); 627 } 628 629 void 630 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 631 { 632 avl_index_t where; 633 spa_aux_t search; 634 spa_aux_t *aux; 635 636 search.aux_guid = vd->vdev_guid; 637 if ((aux = avl_find(avl, &search, &where)) != NULL) { 638 aux->aux_count++; 639 } else { 640 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 641 aux->aux_guid = vd->vdev_guid; 642 aux->aux_count = 1; 643 avl_insert(avl, aux, where); 644 } 645 } 646 647 void 648 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 649 { 650 spa_aux_t search; 651 spa_aux_t *aux; 652 avl_index_t where; 653 654 search.aux_guid = vd->vdev_guid; 655 aux = avl_find(avl, &search, &where); 656 657 ASSERT(aux != NULL); 658 659 if (--aux->aux_count == 0) { 660 avl_remove(avl, aux); 661 kmem_free(aux, sizeof (spa_aux_t)); 662 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 663 aux->aux_pool = 0ULL; 664 } 665 } 666 667 boolean_t 668 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 669 { 670 spa_aux_t search, *found; 671 672 search.aux_guid = guid; 673 found = avl_find(avl, &search, NULL); 674 675 if (pool) { 676 if (found) 677 *pool = found->aux_pool; 678 else 679 *pool = 0ULL; 680 } 681 682 if (refcnt) { 683 if (found) 684 *refcnt = found->aux_count; 685 else 686 *refcnt = 0; 687 } 688 689 return (found != NULL); 690 } 691 692 void 693 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 694 { 695 spa_aux_t search, *found; 696 avl_index_t where; 697 698 search.aux_guid = vd->vdev_guid; 699 found = avl_find(avl, &search, &where); 700 ASSERT(found != NULL); 701 ASSERT(found->aux_pool == 0ULL); 702 703 found->aux_pool = spa_guid(vd->vdev_spa); 704 } 705 706 /* 707 * Spares are tracked globally due to the following constraints: 708 * 709 * - A spare may be part of multiple pools. 710 * - A spare may be added to a pool even if it's actively in use within 711 * another pool. 712 * - A spare in use in any pool can only be the source of a replacement if 713 * the target is a spare in the same pool. 714 * 715 * We keep track of all spares on the system through the use of a reference 716 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 717 * spare, then we bump the reference count in the AVL tree. In addition, we set 718 * the 'vdev_isspare' member to indicate that the device is a spare (active or 719 * inactive). When a spare is made active (used to replace a device in the 720 * pool), we also keep track of which pool its been made a part of. 721 * 722 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 723 * called under the spa_namespace lock as part of vdev reconfiguration. The 724 * separate spare lock exists for the status query path, which does not need to 725 * be completely consistent with respect to other vdev configuration changes. 726 */ 727 728 static int 729 spa_spare_compare(const void *a, const void *b) 730 { 731 return (spa_aux_compare(a, b)); 732 } 733 734 void 735 spa_spare_add(vdev_t *vd) 736 { 737 mutex_enter(&spa_spare_lock); 738 ASSERT(!vd->vdev_isspare); 739 spa_aux_add(vd, &spa_spare_avl); 740 vd->vdev_isspare = B_TRUE; 741 mutex_exit(&spa_spare_lock); 742 } 743 744 void 745 spa_spare_remove(vdev_t *vd) 746 { 747 mutex_enter(&spa_spare_lock); 748 ASSERT(vd->vdev_isspare); 749 spa_aux_remove(vd, &spa_spare_avl); 750 vd->vdev_isspare = B_FALSE; 751 mutex_exit(&spa_spare_lock); 752 } 753 754 boolean_t 755 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 756 { 757 boolean_t found; 758 759 mutex_enter(&spa_spare_lock); 760 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 761 mutex_exit(&spa_spare_lock); 762 763 return (found); 764 } 765 766 void 767 spa_spare_activate(vdev_t *vd) 768 { 769 mutex_enter(&spa_spare_lock); 770 ASSERT(vd->vdev_isspare); 771 spa_aux_activate(vd, &spa_spare_avl); 772 mutex_exit(&spa_spare_lock); 773 } 774 775 /* 776 * Level 2 ARC devices are tracked globally for the same reasons as spares. 777 * Cache devices currently only support one pool per cache device, and so 778 * for these devices the aux reference count is currently unused beyond 1. 779 */ 780 781 static int 782 spa_l2cache_compare(const void *a, const void *b) 783 { 784 return (spa_aux_compare(a, b)); 785 } 786 787 void 788 spa_l2cache_add(vdev_t *vd) 789 { 790 mutex_enter(&spa_l2cache_lock); 791 ASSERT(!vd->vdev_isl2cache); 792 spa_aux_add(vd, &spa_l2cache_avl); 793 vd->vdev_isl2cache = B_TRUE; 794 mutex_exit(&spa_l2cache_lock); 795 } 796 797 void 798 spa_l2cache_remove(vdev_t *vd) 799 { 800 mutex_enter(&spa_l2cache_lock); 801 ASSERT(vd->vdev_isl2cache); 802 spa_aux_remove(vd, &spa_l2cache_avl); 803 vd->vdev_isl2cache = B_FALSE; 804 mutex_exit(&spa_l2cache_lock); 805 } 806 807 boolean_t 808 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 809 { 810 boolean_t found; 811 812 mutex_enter(&spa_l2cache_lock); 813 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 814 mutex_exit(&spa_l2cache_lock); 815 816 return (found); 817 } 818 819 void 820 spa_l2cache_activate(vdev_t *vd) 821 { 822 mutex_enter(&spa_l2cache_lock); 823 ASSERT(vd->vdev_isl2cache); 824 spa_aux_activate(vd, &spa_l2cache_avl); 825 mutex_exit(&spa_l2cache_lock); 826 } 827 828 /* 829 * ========================================================================== 830 * SPA vdev locking 831 * ========================================================================== 832 */ 833 834 /* 835 * Lock the given spa_t for the purpose of adding or removing a vdev. 836 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 837 * It returns the next transaction group for the spa_t. 838 */ 839 uint64_t 840 spa_vdev_enter(spa_t *spa) 841 { 842 mutex_enter(&spa_namespace_lock); 843 return (spa_vdev_config_enter(spa)); 844 } 845 846 /* 847 * Internal implementation for spa_vdev_enter(). Used when a vdev 848 * operation requires multiple syncs (i.e. removing a device) while 849 * keeping the spa_namespace_lock held. 850 */ 851 uint64_t 852 spa_vdev_config_enter(spa_t *spa) 853 { 854 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 855 856 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 857 858 return (spa_last_synced_txg(spa) + 1); 859 } 860 861 /* 862 * Used in combination with spa_vdev_config_enter() to allow the syncing 863 * of multiple transactions without releasing the spa_namespace_lock. 864 */ 865 void 866 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 867 { 868 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 869 870 int config_changed = B_FALSE; 871 872 ASSERT(txg > spa_last_synced_txg(spa)); 873 874 spa->spa_pending_vdev = NULL; 875 876 /* 877 * Reassess the DTLs. 878 */ 879 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 880 881 /* 882 * If the config changed, notify the scrub thread that it must restart. 883 */ 884 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 885 dsl_pool_scrub_restart(spa->spa_dsl_pool); 886 config_changed = B_TRUE; 887 spa->spa_config_generation++; 888 } 889 890 /* 891 * Verify the metaslab classes. 892 */ 893 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 894 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 895 896 spa_config_exit(spa, SCL_ALL, spa); 897 898 /* 899 * Panic the system if the specified tag requires it. This 900 * is useful for ensuring that configurations are updated 901 * transactionally. 902 */ 903 if (zio_injection_enabled) 904 zio_handle_panic_injection(spa, tag); 905 906 /* 907 * Note: this txg_wait_synced() is important because it ensures 908 * that there won't be more than one config change per txg. 909 * This allows us to use the txg as the generation number. 910 */ 911 if (error == 0) 912 txg_wait_synced(spa->spa_dsl_pool, txg); 913 914 if (vd != NULL) { 915 ASSERT(!vd->vdev_detached || vd->vdev_dtl_smo.smo_object == 0); 916 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 917 vdev_free(vd); 918 spa_config_exit(spa, SCL_ALL, spa); 919 } 920 921 /* 922 * If the config changed, update the config cache. 923 */ 924 if (config_changed) 925 spa_config_sync(spa, B_FALSE, B_TRUE); 926 } 927 928 /* 929 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 930 * locking of spa_vdev_enter(), we also want make sure the transactions have 931 * synced to disk, and then update the global configuration cache with the new 932 * information. 933 */ 934 int 935 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 936 { 937 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 938 mutex_exit(&spa_namespace_lock); 939 940 return (error); 941 } 942 943 /* 944 * Lock the given spa_t for the purpose of changing vdev state. 945 */ 946 void 947 spa_vdev_state_enter(spa_t *spa, int oplocks) 948 { 949 int locks = SCL_STATE_ALL | oplocks; 950 951 spa_config_enter(spa, locks, spa, RW_WRITER); 952 spa->spa_vdev_locks = locks; 953 } 954 955 int 956 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 957 { 958 if (vd != NULL || error == 0) 959 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 960 0, 0, B_FALSE); 961 962 if (vd != NULL) { 963 vdev_state_dirty(vd->vdev_top); 964 spa->spa_config_generation++; 965 } 966 967 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 968 spa_config_exit(spa, spa->spa_vdev_locks, spa); 969 970 /* 971 * If anything changed, wait for it to sync. This ensures that, 972 * from the system administrator's perspective, zpool(1M) commands 973 * are synchronous. This is important for things like zpool offline: 974 * when the command completes, you expect no further I/O from ZFS. 975 */ 976 if (vd != NULL) 977 txg_wait_synced(spa->spa_dsl_pool, 0); 978 979 return (error); 980 } 981 982 /* 983 * ========================================================================== 984 * Miscellaneous functions 985 * ========================================================================== 986 */ 987 988 /* 989 * Rename a spa_t. 990 */ 991 int 992 spa_rename(const char *name, const char *newname) 993 { 994 spa_t *spa; 995 int err; 996 997 /* 998 * Lookup the spa_t and grab the config lock for writing. We need to 999 * actually open the pool so that we can sync out the necessary labels. 1000 * It's OK to call spa_open() with the namespace lock held because we 1001 * allow recursive calls for other reasons. 1002 */ 1003 mutex_enter(&spa_namespace_lock); 1004 if ((err = spa_open(name, &spa, FTAG)) != 0) { 1005 mutex_exit(&spa_namespace_lock); 1006 return (err); 1007 } 1008 1009 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1010 1011 avl_remove(&spa_namespace_avl, spa); 1012 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 1013 avl_add(&spa_namespace_avl, spa); 1014 1015 /* 1016 * Sync all labels to disk with the new names by marking the root vdev 1017 * dirty and waiting for it to sync. It will pick up the new pool name 1018 * during the sync. 1019 */ 1020 vdev_config_dirty(spa->spa_root_vdev); 1021 1022 spa_config_exit(spa, SCL_ALL, FTAG); 1023 1024 txg_wait_synced(spa->spa_dsl_pool, 0); 1025 1026 /* 1027 * Sync the updated config cache. 1028 */ 1029 spa_config_sync(spa, B_FALSE, B_TRUE); 1030 1031 spa_close(spa, FTAG); 1032 1033 mutex_exit(&spa_namespace_lock); 1034 1035 return (0); 1036 } 1037 1038 1039 /* 1040 * Determine whether a pool with given pool_guid exists. If device_guid is 1041 * non-zero, determine whether the pool exists *and* contains a device with the 1042 * specified device_guid. 1043 */ 1044 boolean_t 1045 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1046 { 1047 spa_t *spa; 1048 avl_tree_t *t = &spa_namespace_avl; 1049 1050 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1051 1052 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1053 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1054 continue; 1055 if (spa->spa_root_vdev == NULL) 1056 continue; 1057 if (spa_guid(spa) == pool_guid) { 1058 if (device_guid == 0) 1059 break; 1060 1061 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1062 device_guid) != NULL) 1063 break; 1064 1065 /* 1066 * Check any devices we may be in the process of adding. 1067 */ 1068 if (spa->spa_pending_vdev) { 1069 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1070 device_guid) != NULL) 1071 break; 1072 } 1073 } 1074 } 1075 1076 return (spa != NULL); 1077 } 1078 1079 char * 1080 spa_strdup(const char *s) 1081 { 1082 size_t len; 1083 char *new; 1084 1085 len = strlen(s); 1086 new = kmem_alloc(len + 1, KM_SLEEP); 1087 bcopy(s, new, len); 1088 new[len] = '\0'; 1089 1090 return (new); 1091 } 1092 1093 void 1094 spa_strfree(char *s) 1095 { 1096 kmem_free(s, strlen(s) + 1); 1097 } 1098 1099 uint64_t 1100 spa_get_random(uint64_t range) 1101 { 1102 uint64_t r; 1103 1104 ASSERT(range != 0); 1105 1106 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1107 1108 return (r % range); 1109 } 1110 1111 void 1112 sprintf_blkptr(char *buf, const blkptr_t *bp) 1113 { 1114 char *type = dmu_ot[BP_GET_TYPE(bp)].ot_name; 1115 char *checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1116 char *compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1117 1118 SPRINTF_BLKPTR(snprintf, ' ', buf, bp, type, checksum, compress); 1119 } 1120 1121 void 1122 spa_freeze(spa_t *spa) 1123 { 1124 uint64_t freeze_txg = 0; 1125 1126 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1127 if (spa->spa_freeze_txg == UINT64_MAX) { 1128 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1129 spa->spa_freeze_txg = freeze_txg; 1130 } 1131 spa_config_exit(spa, SCL_ALL, FTAG); 1132 if (freeze_txg != 0) 1133 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1134 } 1135 1136 void 1137 zfs_panic_recover(const char *fmt, ...) 1138 { 1139 va_list adx; 1140 1141 va_start(adx, fmt); 1142 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1143 va_end(adx); 1144 } 1145 1146 /* 1147 * ========================================================================== 1148 * Accessor functions 1149 * ========================================================================== 1150 */ 1151 1152 boolean_t 1153 spa_shutting_down(spa_t *spa) 1154 { 1155 return (spa->spa_async_suspended); 1156 } 1157 1158 dsl_pool_t * 1159 spa_get_dsl(spa_t *spa) 1160 { 1161 return (spa->spa_dsl_pool); 1162 } 1163 1164 blkptr_t * 1165 spa_get_rootblkptr(spa_t *spa) 1166 { 1167 return (&spa->spa_ubsync.ub_rootbp); 1168 } 1169 1170 void 1171 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1172 { 1173 spa->spa_uberblock.ub_rootbp = *bp; 1174 } 1175 1176 void 1177 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1178 { 1179 if (spa->spa_root == NULL) 1180 buf[0] = '\0'; 1181 else 1182 (void) strncpy(buf, spa->spa_root, buflen); 1183 } 1184 1185 int 1186 spa_sync_pass(spa_t *spa) 1187 { 1188 return (spa->spa_sync_pass); 1189 } 1190 1191 char * 1192 spa_name(spa_t *spa) 1193 { 1194 return (spa->spa_name); 1195 } 1196 1197 uint64_t 1198 spa_guid(spa_t *spa) 1199 { 1200 /* 1201 * If we fail to parse the config during spa_load(), we can go through 1202 * the error path (which posts an ereport) and end up here with no root 1203 * vdev. We stash the original pool guid in 'spa_load_guid' to handle 1204 * this case. 1205 */ 1206 if (spa->spa_root_vdev != NULL) 1207 return (spa->spa_root_vdev->vdev_guid); 1208 else 1209 return (spa->spa_load_guid); 1210 } 1211 1212 uint64_t 1213 spa_last_synced_txg(spa_t *spa) 1214 { 1215 return (spa->spa_ubsync.ub_txg); 1216 } 1217 1218 uint64_t 1219 spa_first_txg(spa_t *spa) 1220 { 1221 return (spa->spa_first_txg); 1222 } 1223 1224 uint64_t 1225 spa_syncing_txg(spa_t *spa) 1226 { 1227 return (spa->spa_syncing_txg); 1228 } 1229 1230 pool_state_t 1231 spa_state(spa_t *spa) 1232 { 1233 return (spa->spa_state); 1234 } 1235 1236 uint64_t 1237 spa_freeze_txg(spa_t *spa) 1238 { 1239 return (spa->spa_freeze_txg); 1240 } 1241 1242 /* ARGSUSED */ 1243 uint64_t 1244 spa_get_asize(spa_t *spa, uint64_t lsize) 1245 { 1246 /* 1247 * The worst case is single-sector max-parity RAID-Z blocks, in which 1248 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 1249 * times the size; so just assume that. Add to this the fact that 1250 * we can have up to 3 DVAs per bp, and one more factor of 2 because 1251 * the block may be dittoed with up to 3 DVAs by ddt_sync(). 1252 */ 1253 return (lsize * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2); 1254 } 1255 1256 /* 1257 * Return the failure mode that has been set to this pool. The default 1258 * behavior will be to block all I/Os when a complete failure occurs. 1259 */ 1260 uint8_t 1261 spa_get_failmode(spa_t *spa) 1262 { 1263 return (spa->spa_failmode); 1264 } 1265 1266 boolean_t 1267 spa_suspended(spa_t *spa) 1268 { 1269 return (spa->spa_suspended); 1270 } 1271 1272 uint64_t 1273 spa_version(spa_t *spa) 1274 { 1275 return (spa->spa_ubsync.ub_version); 1276 } 1277 1278 boolean_t 1279 spa_deflate(spa_t *spa) 1280 { 1281 return (spa->spa_deflate); 1282 } 1283 1284 metaslab_class_t * 1285 spa_normal_class(spa_t *spa) 1286 { 1287 return (spa->spa_normal_class); 1288 } 1289 1290 metaslab_class_t * 1291 spa_log_class(spa_t *spa) 1292 { 1293 return (spa->spa_log_class); 1294 } 1295 1296 int 1297 spa_max_replication(spa_t *spa) 1298 { 1299 /* 1300 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1301 * handle BPs with more than one DVA allocated. Set our max 1302 * replication level accordingly. 1303 */ 1304 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1305 return (1); 1306 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1307 } 1308 1309 uint64_t 1310 dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1311 { 1312 uint64_t asize = DVA_GET_ASIZE(dva); 1313 uint64_t dsize = asize; 1314 1315 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1316 1317 if (asize != 0 && spa->spa_deflate) { 1318 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 1319 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1320 } 1321 1322 return (dsize); 1323 } 1324 1325 uint64_t 1326 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1327 { 1328 uint64_t dsize = 0; 1329 1330 for (int d = 0; d < SPA_DVAS_PER_BP; d++) 1331 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1332 1333 return (dsize); 1334 } 1335 1336 uint64_t 1337 bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1338 { 1339 uint64_t dsize = 0; 1340 1341 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1342 1343 for (int d = 0; d < SPA_DVAS_PER_BP; d++) 1344 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1345 1346 spa_config_exit(spa, SCL_VDEV, FTAG); 1347 1348 return (dsize); 1349 } 1350 1351 /* 1352 * ========================================================================== 1353 * Initialization and Termination 1354 * ========================================================================== 1355 */ 1356 1357 static int 1358 spa_name_compare(const void *a1, const void *a2) 1359 { 1360 const spa_t *s1 = a1; 1361 const spa_t *s2 = a2; 1362 int s; 1363 1364 s = strcmp(s1->spa_name, s2->spa_name); 1365 if (s > 0) 1366 return (1); 1367 if (s < 0) 1368 return (-1); 1369 return (0); 1370 } 1371 1372 int 1373 spa_busy(void) 1374 { 1375 return (spa_active_count); 1376 } 1377 1378 void 1379 spa_boot_init() 1380 { 1381 spa_config_load(); 1382 } 1383 1384 void 1385 spa_init(int mode) 1386 { 1387 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1388 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1389 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1390 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1391 1392 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1393 offsetof(spa_t, spa_avl)); 1394 1395 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1396 offsetof(spa_aux_t, aux_avl)); 1397 1398 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1399 offsetof(spa_aux_t, aux_avl)); 1400 1401 spa_mode_global = mode; 1402 1403 refcount_init(); 1404 unique_init(); 1405 zio_init(); 1406 dmu_init(); 1407 zil_init(); 1408 vdev_cache_stat_init(); 1409 zfs_prop_init(); 1410 zpool_prop_init(); 1411 spa_config_load(); 1412 l2arc_start(); 1413 } 1414 1415 void 1416 spa_fini(void) 1417 { 1418 l2arc_stop(); 1419 1420 spa_evict_all(); 1421 1422 vdev_cache_stat_fini(); 1423 zil_fini(); 1424 dmu_fini(); 1425 zio_fini(); 1426 unique_fini(); 1427 refcount_fini(); 1428 1429 avl_destroy(&spa_namespace_avl); 1430 avl_destroy(&spa_spare_avl); 1431 avl_destroy(&spa_l2cache_avl); 1432 1433 cv_destroy(&spa_namespace_cv); 1434 mutex_destroy(&spa_namespace_lock); 1435 mutex_destroy(&spa_spare_lock); 1436 mutex_destroy(&spa_l2cache_lock); 1437 } 1438 1439 /* 1440 * Return whether this pool has slogs. No locking needed. 1441 * It's not a problem if the wrong answer is returned as it's only for 1442 * performance and not correctness 1443 */ 1444 boolean_t 1445 spa_has_slogs(spa_t *spa) 1446 { 1447 return (spa->spa_log_class->mc_rotor != NULL); 1448 } 1449 1450 spa_log_state_t 1451 spa_get_log_state(spa_t *spa) 1452 { 1453 return (spa->spa_log_state); 1454 } 1455 1456 void 1457 spa_set_log_state(spa_t *spa, spa_log_state_t state) 1458 { 1459 spa->spa_log_state = state; 1460 } 1461 1462 boolean_t 1463 spa_is_root(spa_t *spa) 1464 { 1465 return (spa->spa_is_root); 1466 } 1467 1468 boolean_t 1469 spa_writeable(spa_t *spa) 1470 { 1471 return (!!(spa->spa_mode & FWRITE)); 1472 } 1473 1474 int 1475 spa_mode(spa_t *spa) 1476 { 1477 return (spa->spa_mode); 1478 } 1479 1480 uint64_t 1481 spa_bootfs(spa_t *spa) 1482 { 1483 return (spa->spa_bootfs); 1484 } 1485 1486 uint64_t 1487 spa_delegation(spa_t *spa) 1488 { 1489 return (spa->spa_delegation); 1490 } 1491 1492 objset_t * 1493 spa_meta_objset(spa_t *spa) 1494 { 1495 return (spa->spa_meta_objset); 1496 } 1497 1498 enum zio_checksum 1499 spa_dedup_checksum(spa_t *spa) 1500 { 1501 return (spa->spa_dedup_checksum); 1502 } 1503