1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/spa_impl.h> 28 #include <sys/zio.h> 29 #include <sys/zio_checksum.h> 30 #include <sys/zio_compress.h> 31 #include <sys/dmu.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/zap.h> 34 #include <sys/zil.h> 35 #include <sys/vdev_impl.h> 36 #include <sys/metaslab.h> 37 #include <sys/uberblock_impl.h> 38 #include <sys/txg.h> 39 #include <sys/avl.h> 40 #include <sys/unique.h> 41 #include <sys/dsl_pool.h> 42 #include <sys/dsl_dir.h> 43 #include <sys/dsl_prop.h> 44 #include <sys/fs/zfs.h> 45 #include <sys/metaslab_impl.h> 46 #include <sys/sunddi.h> 47 #include <sys/arc.h> 48 #include "zfs_prop.h" 49 50 /* 51 * SPA locking 52 * 53 * There are four basic locks for managing spa_t structures: 54 * 55 * spa_namespace_lock (global mutex) 56 * 57 * This lock must be acquired to do any of the following: 58 * 59 * - Lookup a spa_t by name 60 * - Add or remove a spa_t from the namespace 61 * - Increase spa_refcount from non-zero 62 * - Check if spa_refcount is zero 63 * - Rename a spa_t 64 * - add/remove/attach/detach devices 65 * - Held for the duration of create/destroy/import/export 66 * 67 * It does not need to handle recursion. A create or destroy may 68 * reference objects (files or zvols) in other pools, but by 69 * definition they must have an existing reference, and will never need 70 * to lookup a spa_t by name. 71 * 72 * spa_refcount (per-spa refcount_t protected by mutex) 73 * 74 * This reference count keep track of any active users of the spa_t. The 75 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 76 * the refcount is never really 'zero' - opening a pool implicitly keeps 77 * some references in the DMU. Internally we check against spa_minref, but 78 * present the image of a zero/non-zero value to consumers. 79 * 80 * spa_config_lock[] (per-spa array of rwlocks) 81 * 82 * This protects the spa_t from config changes, and must be held in 83 * the following circumstances: 84 * 85 * - RW_READER to perform I/O to the spa 86 * - RW_WRITER to change the vdev config 87 * 88 * The locking order is fairly straightforward: 89 * 90 * spa_namespace_lock -> spa_refcount 91 * 92 * The namespace lock must be acquired to increase the refcount from 0 93 * or to check if it is zero. 94 * 95 * spa_refcount -> spa_config_lock[] 96 * 97 * There must be at least one valid reference on the spa_t to acquire 98 * the config lock. 99 * 100 * spa_namespace_lock -> spa_config_lock[] 101 * 102 * The namespace lock must always be taken before the config lock. 103 * 104 * 105 * The spa_namespace_lock can be acquired directly and is globally visible. 106 * 107 * The namespace is manipulated using the following functions, all of which 108 * require the spa_namespace_lock to be held. 109 * 110 * spa_lookup() Lookup a spa_t by name. 111 * 112 * spa_add() Create a new spa_t in the namespace. 113 * 114 * spa_remove() Remove a spa_t from the namespace. This also 115 * frees up any memory associated with the spa_t. 116 * 117 * spa_next() Returns the next spa_t in the system, or the 118 * first if NULL is passed. 119 * 120 * spa_evict_all() Shutdown and remove all spa_t structures in 121 * the system. 122 * 123 * spa_guid_exists() Determine whether a pool/device guid exists. 124 * 125 * The spa_refcount is manipulated using the following functions: 126 * 127 * spa_open_ref() Adds a reference to the given spa_t. Must be 128 * called with spa_namespace_lock held if the 129 * refcount is currently zero. 130 * 131 * spa_close() Remove a reference from the spa_t. This will 132 * not free the spa_t or remove it from the 133 * namespace. No locking is required. 134 * 135 * spa_refcount_zero() Returns true if the refcount is currently 136 * zero. Must be called with spa_namespace_lock 137 * held. 138 * 139 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 140 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 141 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 142 * 143 * To read the configuration, it suffices to hold one of these locks as reader. 144 * To modify the configuration, you must hold all locks as writer. To modify 145 * vdev state without altering the vdev tree's topology (e.g. online/offline), 146 * you must hold SCL_STATE and SCL_ZIO as writer. 147 * 148 * We use these distinct config locks to avoid recursive lock entry. 149 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 150 * block allocations (SCL_ALLOC), which may require reading space maps 151 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 152 * 153 * The spa config locks cannot be normal rwlocks because we need the 154 * ability to hand off ownership. For example, SCL_ZIO is acquired 155 * by the issuing thread and later released by an interrupt thread. 156 * They do, however, obey the usual write-wanted semantics to prevent 157 * writer (i.e. system administrator) starvation. 158 * 159 * The lock acquisition rules are as follows: 160 * 161 * SCL_CONFIG 162 * Protects changes to the vdev tree topology, such as vdev 163 * add/remove/attach/detach. Protects the dirty config list 164 * (spa_config_dirty_list) and the set of spares and l2arc devices. 165 * 166 * SCL_STATE 167 * Protects changes to pool state and vdev state, such as vdev 168 * online/offline/fault/degrade/clear. Protects the dirty state list 169 * (spa_state_dirty_list) and global pool state (spa_state). 170 * 171 * SCL_ALLOC 172 * Protects changes to metaslab groups and classes. 173 * Held as reader by metaslab_alloc() and metaslab_claim(). 174 * 175 * SCL_ZIO 176 * Held by bp-level zios (those which have no io_vd upon entry) 177 * to prevent changes to the vdev tree. The bp-level zio implicitly 178 * protects all of its vdev child zios, which do not hold SCL_ZIO. 179 * 180 * SCL_FREE 181 * Protects changes to metaslab groups and classes. 182 * Held as reader by metaslab_free(). SCL_FREE is distinct from 183 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 184 * blocks in zio_done() while another i/o that holds either 185 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 186 * 187 * SCL_VDEV 188 * Held as reader to prevent changes to the vdev tree during trivial 189 * inquiries such as bp_get_dasize(). SCL_VDEV is distinct from the 190 * other locks, and lower than all of them, to ensure that it's safe 191 * to acquire regardless of caller context. 192 * 193 * In addition, the following rules apply: 194 * 195 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 196 * The lock ordering is SCL_CONFIG > spa_props_lock. 197 * 198 * (b) I/O operations on leaf vdevs. For any zio operation that takes 199 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 200 * or zio_write_phys() -- the caller must ensure that the config cannot 201 * cannot change in the interim, and that the vdev cannot be reopened. 202 * SCL_STATE as reader suffices for both. 203 * 204 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 205 * 206 * spa_vdev_enter() Acquire the namespace lock and the config lock 207 * for writing. 208 * 209 * spa_vdev_exit() Release the config lock, wait for all I/O 210 * to complete, sync the updated configs to the 211 * cache, and release the namespace lock. 212 * 213 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 214 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 215 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 216 * 217 * spa_rename() is also implemented within this file since is requires 218 * manipulation of the namespace. 219 */ 220 221 static avl_tree_t spa_namespace_avl; 222 kmutex_t spa_namespace_lock; 223 static kcondvar_t spa_namespace_cv; 224 static int spa_active_count; 225 int spa_max_replication_override = SPA_DVAS_PER_BP; 226 227 static kmutex_t spa_spare_lock; 228 static avl_tree_t spa_spare_avl; 229 static kmutex_t spa_l2cache_lock; 230 static avl_tree_t spa_l2cache_avl; 231 232 kmem_cache_t *spa_buffer_pool; 233 int spa_mode; 234 235 #ifdef ZFS_DEBUG 236 /* Everything except dprintf is on by default in debug builds */ 237 int zfs_flags = ~ZFS_DEBUG_DPRINTF; 238 #else 239 int zfs_flags = 0; 240 #endif 241 242 /* 243 * zfs_recover can be set to nonzero to attempt to recover from 244 * otherwise-fatal errors, typically caused by on-disk corruption. When 245 * set, calls to zfs_panic_recover() will turn into warning messages. 246 */ 247 int zfs_recover = 0; 248 249 250 /* 251 * ========================================================================== 252 * SPA config locking 253 * ========================================================================== 254 */ 255 static void 256 spa_config_lock_init(spa_t *spa) 257 { 258 for (int i = 0; i < SCL_LOCKS; i++) { 259 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 260 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 261 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 262 refcount_create(&scl->scl_count); 263 scl->scl_writer = NULL; 264 scl->scl_write_wanted = 0; 265 } 266 } 267 268 static void 269 spa_config_lock_destroy(spa_t *spa) 270 { 271 for (int i = 0; i < SCL_LOCKS; i++) { 272 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 273 mutex_destroy(&scl->scl_lock); 274 cv_destroy(&scl->scl_cv); 275 refcount_destroy(&scl->scl_count); 276 ASSERT(scl->scl_writer == NULL); 277 ASSERT(scl->scl_write_wanted == 0); 278 } 279 } 280 281 int 282 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 283 { 284 for (int i = 0; i < SCL_LOCKS; i++) { 285 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 286 if (!(locks & (1 << i))) 287 continue; 288 mutex_enter(&scl->scl_lock); 289 if (rw == RW_READER) { 290 if (scl->scl_writer || scl->scl_write_wanted) { 291 mutex_exit(&scl->scl_lock); 292 spa_config_exit(spa, locks ^ (1 << i), tag); 293 return (0); 294 } 295 } else { 296 ASSERT(scl->scl_writer != curthread); 297 if (!refcount_is_zero(&scl->scl_count)) { 298 mutex_exit(&scl->scl_lock); 299 spa_config_exit(spa, locks ^ (1 << i), tag); 300 return (0); 301 } 302 scl->scl_writer = curthread; 303 } 304 (void) refcount_add(&scl->scl_count, tag); 305 mutex_exit(&scl->scl_lock); 306 } 307 return (1); 308 } 309 310 void 311 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 312 { 313 for (int i = 0; i < SCL_LOCKS; i++) { 314 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 315 if (!(locks & (1 << i))) 316 continue; 317 mutex_enter(&scl->scl_lock); 318 if (rw == RW_READER) { 319 while (scl->scl_writer || scl->scl_write_wanted) { 320 cv_wait(&scl->scl_cv, &scl->scl_lock); 321 } 322 } else { 323 ASSERT(scl->scl_writer != curthread); 324 while (!refcount_is_zero(&scl->scl_count)) { 325 scl->scl_write_wanted++; 326 cv_wait(&scl->scl_cv, &scl->scl_lock); 327 scl->scl_write_wanted--; 328 } 329 scl->scl_writer = curthread; 330 } 331 (void) refcount_add(&scl->scl_count, tag); 332 mutex_exit(&scl->scl_lock); 333 } 334 } 335 336 void 337 spa_config_exit(spa_t *spa, int locks, void *tag) 338 { 339 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 340 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 341 if (!(locks & (1 << i))) 342 continue; 343 mutex_enter(&scl->scl_lock); 344 ASSERT(!refcount_is_zero(&scl->scl_count)); 345 if (refcount_remove(&scl->scl_count, tag) == 0) { 346 ASSERT(scl->scl_writer == NULL || 347 scl->scl_writer == curthread); 348 scl->scl_writer = NULL; /* OK in either case */ 349 cv_broadcast(&scl->scl_cv); 350 } 351 mutex_exit(&scl->scl_lock); 352 } 353 } 354 355 int 356 spa_config_held(spa_t *spa, int locks, krw_t rw) 357 { 358 int locks_held = 0; 359 360 for (int i = 0; i < SCL_LOCKS; i++) { 361 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 362 if (!(locks & (1 << i))) 363 continue; 364 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 365 (rw == RW_WRITER && scl->scl_writer == curthread)) 366 locks_held |= 1 << i; 367 } 368 369 return (locks_held); 370 } 371 372 /* 373 * ========================================================================== 374 * SPA namespace functions 375 * ========================================================================== 376 */ 377 378 /* 379 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 380 * Returns NULL if no matching spa_t is found. 381 */ 382 spa_t * 383 spa_lookup(const char *name) 384 { 385 static spa_t search; /* spa_t is large; don't allocate on stack */ 386 spa_t *spa; 387 avl_index_t where; 388 char c; 389 char *cp; 390 391 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 392 393 /* 394 * If it's a full dataset name, figure out the pool name and 395 * just use that. 396 */ 397 cp = strpbrk(name, "/@"); 398 if (cp) { 399 c = *cp; 400 *cp = '\0'; 401 } 402 403 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 404 spa = avl_find(&spa_namespace_avl, &search, &where); 405 406 if (cp) 407 *cp = c; 408 409 return (spa); 410 } 411 412 /* 413 * Create an uninitialized spa_t with the given name. Requires 414 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 415 * exist by calling spa_lookup() first. 416 */ 417 spa_t * 418 spa_add(const char *name, const char *altroot) 419 { 420 spa_t *spa; 421 spa_config_dirent_t *dp; 422 423 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 424 425 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 426 427 rw_init(&spa->spa_traverse_lock, NULL, RW_DEFAULT, NULL); 428 429 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 430 mutex_init(&spa->spa_async_root_lock, NULL, MUTEX_DEFAULT, NULL); 431 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 432 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 433 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 434 mutex_init(&spa->spa_sync_bplist.bpl_lock, NULL, MUTEX_DEFAULT, NULL); 435 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 436 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 437 438 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 439 cv_init(&spa->spa_async_root_cv, NULL, CV_DEFAULT, NULL); 440 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 441 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 442 443 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 444 spa->spa_state = POOL_STATE_UNINITIALIZED; 445 spa->spa_freeze_txg = UINT64_MAX; 446 spa->spa_final_txg = UINT64_MAX; 447 448 refcount_create(&spa->spa_refcount); 449 spa_config_lock_init(spa); 450 451 avl_add(&spa_namespace_avl, spa); 452 453 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 454 455 /* 456 * Set the alternate root, if there is one. 457 */ 458 if (altroot) { 459 spa->spa_root = spa_strdup(altroot); 460 spa_active_count++; 461 } 462 463 /* 464 * Every pool starts with the default cachefile 465 */ 466 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 467 offsetof(spa_config_dirent_t, scd_link)); 468 469 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 470 dp->scd_path = spa_strdup(spa_config_path); 471 list_insert_head(&spa->spa_config_list, dp); 472 473 return (spa); 474 } 475 476 /* 477 * Removes a spa_t from the namespace, freeing up any memory used. Requires 478 * spa_namespace_lock. This is called only after the spa_t has been closed and 479 * deactivated. 480 */ 481 void 482 spa_remove(spa_t *spa) 483 { 484 spa_config_dirent_t *dp; 485 486 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 487 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 488 489 avl_remove(&spa_namespace_avl, spa); 490 cv_broadcast(&spa_namespace_cv); 491 492 if (spa->spa_root) { 493 spa_strfree(spa->spa_root); 494 spa_active_count--; 495 } 496 497 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 498 list_remove(&spa->spa_config_list, dp); 499 if (dp->scd_path != NULL) 500 spa_strfree(dp->scd_path); 501 kmem_free(dp, sizeof (spa_config_dirent_t)); 502 } 503 504 list_destroy(&spa->spa_config_list); 505 506 spa_config_set(spa, NULL); 507 508 refcount_destroy(&spa->spa_refcount); 509 510 spa_config_lock_destroy(spa); 511 512 rw_destroy(&spa->spa_traverse_lock); 513 514 cv_destroy(&spa->spa_async_cv); 515 cv_destroy(&spa->spa_async_root_cv); 516 cv_destroy(&spa->spa_scrub_io_cv); 517 cv_destroy(&spa->spa_suspend_cv); 518 519 mutex_destroy(&spa->spa_async_lock); 520 mutex_destroy(&spa->spa_async_root_lock); 521 mutex_destroy(&spa->spa_scrub_lock); 522 mutex_destroy(&spa->spa_errlog_lock); 523 mutex_destroy(&spa->spa_errlist_lock); 524 mutex_destroy(&spa->spa_sync_bplist.bpl_lock); 525 mutex_destroy(&spa->spa_history_lock); 526 mutex_destroy(&spa->spa_props_lock); 527 mutex_destroy(&spa->spa_suspend_lock); 528 529 kmem_free(spa, sizeof (spa_t)); 530 } 531 532 /* 533 * Given a pool, return the next pool in the namespace, or NULL if there is 534 * none. If 'prev' is NULL, return the first pool. 535 */ 536 spa_t * 537 spa_next(spa_t *prev) 538 { 539 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 540 541 if (prev) 542 return (AVL_NEXT(&spa_namespace_avl, prev)); 543 else 544 return (avl_first(&spa_namespace_avl)); 545 } 546 547 /* 548 * ========================================================================== 549 * SPA refcount functions 550 * ========================================================================== 551 */ 552 553 /* 554 * Add a reference to the given spa_t. Must have at least one reference, or 555 * have the namespace lock held. 556 */ 557 void 558 spa_open_ref(spa_t *spa, void *tag) 559 { 560 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 561 MUTEX_HELD(&spa_namespace_lock)); 562 (void) refcount_add(&spa->spa_refcount, tag); 563 } 564 565 /* 566 * Remove a reference to the given spa_t. Must have at least one reference, or 567 * have the namespace lock held. 568 */ 569 void 570 spa_close(spa_t *spa, void *tag) 571 { 572 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 573 MUTEX_HELD(&spa_namespace_lock)); 574 (void) refcount_remove(&spa->spa_refcount, tag); 575 } 576 577 /* 578 * Check to see if the spa refcount is zero. Must be called with 579 * spa_namespace_lock held. We really compare against spa_minref, which is the 580 * number of references acquired when opening a pool 581 */ 582 boolean_t 583 spa_refcount_zero(spa_t *spa) 584 { 585 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 586 587 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 588 } 589 590 /* 591 * ========================================================================== 592 * SPA spare and l2cache tracking 593 * ========================================================================== 594 */ 595 596 /* 597 * Hot spares and cache devices are tracked using the same code below, 598 * for 'auxiliary' devices. 599 */ 600 601 typedef struct spa_aux { 602 uint64_t aux_guid; 603 uint64_t aux_pool; 604 avl_node_t aux_avl; 605 int aux_count; 606 } spa_aux_t; 607 608 static int 609 spa_aux_compare(const void *a, const void *b) 610 { 611 const spa_aux_t *sa = a; 612 const spa_aux_t *sb = b; 613 614 if (sa->aux_guid < sb->aux_guid) 615 return (-1); 616 else if (sa->aux_guid > sb->aux_guid) 617 return (1); 618 else 619 return (0); 620 } 621 622 void 623 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 624 { 625 avl_index_t where; 626 spa_aux_t search; 627 spa_aux_t *aux; 628 629 search.aux_guid = vd->vdev_guid; 630 if ((aux = avl_find(avl, &search, &where)) != NULL) { 631 aux->aux_count++; 632 } else { 633 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 634 aux->aux_guid = vd->vdev_guid; 635 aux->aux_count = 1; 636 avl_insert(avl, aux, where); 637 } 638 } 639 640 void 641 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 642 { 643 spa_aux_t search; 644 spa_aux_t *aux; 645 avl_index_t where; 646 647 search.aux_guid = vd->vdev_guid; 648 aux = avl_find(avl, &search, &where); 649 650 ASSERT(aux != NULL); 651 652 if (--aux->aux_count == 0) { 653 avl_remove(avl, aux); 654 kmem_free(aux, sizeof (spa_aux_t)); 655 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 656 aux->aux_pool = 0ULL; 657 } 658 } 659 660 boolean_t 661 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 662 { 663 spa_aux_t search, *found; 664 665 search.aux_guid = guid; 666 found = avl_find(avl, &search, NULL); 667 668 if (pool) { 669 if (found) 670 *pool = found->aux_pool; 671 else 672 *pool = 0ULL; 673 } 674 675 if (refcnt) { 676 if (found) 677 *refcnt = found->aux_count; 678 else 679 *refcnt = 0; 680 } 681 682 return (found != NULL); 683 } 684 685 void 686 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 687 { 688 spa_aux_t search, *found; 689 avl_index_t where; 690 691 search.aux_guid = vd->vdev_guid; 692 found = avl_find(avl, &search, &where); 693 ASSERT(found != NULL); 694 ASSERT(found->aux_pool == 0ULL); 695 696 found->aux_pool = spa_guid(vd->vdev_spa); 697 } 698 699 /* 700 * Spares are tracked globally due to the following constraints: 701 * 702 * - A spare may be part of multiple pools. 703 * - A spare may be added to a pool even if it's actively in use within 704 * another pool. 705 * - A spare in use in any pool can only be the source of a replacement if 706 * the target is a spare in the same pool. 707 * 708 * We keep track of all spares on the system through the use of a reference 709 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 710 * spare, then we bump the reference count in the AVL tree. In addition, we set 711 * the 'vdev_isspare' member to indicate that the device is a spare (active or 712 * inactive). When a spare is made active (used to replace a device in the 713 * pool), we also keep track of which pool its been made a part of. 714 * 715 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 716 * called under the spa_namespace lock as part of vdev reconfiguration. The 717 * separate spare lock exists for the status query path, which does not need to 718 * be completely consistent with respect to other vdev configuration changes. 719 */ 720 721 static int 722 spa_spare_compare(const void *a, const void *b) 723 { 724 return (spa_aux_compare(a, b)); 725 } 726 727 void 728 spa_spare_add(vdev_t *vd) 729 { 730 mutex_enter(&spa_spare_lock); 731 ASSERT(!vd->vdev_isspare); 732 spa_aux_add(vd, &spa_spare_avl); 733 vd->vdev_isspare = B_TRUE; 734 mutex_exit(&spa_spare_lock); 735 } 736 737 void 738 spa_spare_remove(vdev_t *vd) 739 { 740 mutex_enter(&spa_spare_lock); 741 ASSERT(vd->vdev_isspare); 742 spa_aux_remove(vd, &spa_spare_avl); 743 vd->vdev_isspare = B_FALSE; 744 mutex_exit(&spa_spare_lock); 745 } 746 747 boolean_t 748 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 749 { 750 boolean_t found; 751 752 mutex_enter(&spa_spare_lock); 753 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 754 mutex_exit(&spa_spare_lock); 755 756 return (found); 757 } 758 759 void 760 spa_spare_activate(vdev_t *vd) 761 { 762 mutex_enter(&spa_spare_lock); 763 ASSERT(vd->vdev_isspare); 764 spa_aux_activate(vd, &spa_spare_avl); 765 mutex_exit(&spa_spare_lock); 766 } 767 768 /* 769 * Level 2 ARC devices are tracked globally for the same reasons as spares. 770 * Cache devices currently only support one pool per cache device, and so 771 * for these devices the aux reference count is currently unused beyond 1. 772 */ 773 774 static int 775 spa_l2cache_compare(const void *a, const void *b) 776 { 777 return (spa_aux_compare(a, b)); 778 } 779 780 void 781 spa_l2cache_add(vdev_t *vd) 782 { 783 mutex_enter(&spa_l2cache_lock); 784 ASSERT(!vd->vdev_isl2cache); 785 spa_aux_add(vd, &spa_l2cache_avl); 786 vd->vdev_isl2cache = B_TRUE; 787 mutex_exit(&spa_l2cache_lock); 788 } 789 790 void 791 spa_l2cache_remove(vdev_t *vd) 792 { 793 mutex_enter(&spa_l2cache_lock); 794 ASSERT(vd->vdev_isl2cache); 795 spa_aux_remove(vd, &spa_l2cache_avl); 796 vd->vdev_isl2cache = B_FALSE; 797 mutex_exit(&spa_l2cache_lock); 798 } 799 800 boolean_t 801 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 802 { 803 boolean_t found; 804 805 mutex_enter(&spa_l2cache_lock); 806 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 807 mutex_exit(&spa_l2cache_lock); 808 809 return (found); 810 } 811 812 void 813 spa_l2cache_activate(vdev_t *vd) 814 { 815 mutex_enter(&spa_l2cache_lock); 816 ASSERT(vd->vdev_isl2cache); 817 spa_aux_activate(vd, &spa_l2cache_avl); 818 mutex_exit(&spa_l2cache_lock); 819 } 820 821 void 822 spa_l2cache_space_update(vdev_t *vd, int64_t space, int64_t alloc) 823 { 824 vdev_space_update(vd, space, alloc, B_FALSE); 825 } 826 827 /* 828 * ========================================================================== 829 * SPA vdev locking 830 * ========================================================================== 831 */ 832 833 /* 834 * Lock the given spa_t for the purpose of adding or removing a vdev. 835 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 836 * It returns the next transaction group for the spa_t. 837 */ 838 uint64_t 839 spa_vdev_enter(spa_t *spa) 840 { 841 mutex_enter(&spa_namespace_lock); 842 843 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 844 845 return (spa_last_synced_txg(spa) + 1); 846 } 847 848 /* 849 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 850 * locking of spa_vdev_enter(), we also want make sure the transactions have 851 * synced to disk, and then update the global configuration cache with the new 852 * information. 853 */ 854 int 855 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 856 { 857 int config_changed = B_FALSE; 858 859 ASSERT(txg > spa_last_synced_txg(spa)); 860 861 spa->spa_pending_vdev = NULL; 862 863 /* 864 * Reassess the DTLs. 865 */ 866 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 867 868 /* 869 * If the config changed, notify the scrub thread that it must restart. 870 */ 871 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 872 dsl_pool_scrub_restart(spa->spa_dsl_pool); 873 config_changed = B_TRUE; 874 } 875 876 spa_config_exit(spa, SCL_ALL, spa); 877 878 /* 879 * Note: this txg_wait_synced() is important because it ensures 880 * that there won't be more than one config change per txg. 881 * This allows us to use the txg as the generation number. 882 */ 883 if (error == 0) 884 txg_wait_synced(spa->spa_dsl_pool, txg); 885 886 if (vd != NULL) { 887 ASSERT(!vd->vdev_detached || vd->vdev_dtl.smo_object == 0); 888 vdev_free(vd); 889 } 890 891 /* 892 * If the config changed, update the config cache. 893 */ 894 if (config_changed) 895 spa_config_sync(spa, B_FALSE, B_TRUE); 896 897 mutex_exit(&spa_namespace_lock); 898 899 return (error); 900 } 901 902 /* 903 * Lock the given spa_t for the purpose of changing vdev state. 904 */ 905 void 906 spa_vdev_state_enter(spa_t *spa) 907 { 908 spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER); 909 } 910 911 int 912 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 913 { 914 if (vd != NULL) 915 vdev_state_dirty(vd->vdev_top); 916 917 spa_config_exit(spa, SCL_STATE_ALL, spa); 918 919 return (error); 920 } 921 922 /* 923 * ========================================================================== 924 * Miscellaneous functions 925 * ========================================================================== 926 */ 927 928 /* 929 * Rename a spa_t. 930 */ 931 int 932 spa_rename(const char *name, const char *newname) 933 { 934 spa_t *spa; 935 int err; 936 937 /* 938 * Lookup the spa_t and grab the config lock for writing. We need to 939 * actually open the pool so that we can sync out the necessary labels. 940 * It's OK to call spa_open() with the namespace lock held because we 941 * allow recursive calls for other reasons. 942 */ 943 mutex_enter(&spa_namespace_lock); 944 if ((err = spa_open(name, &spa, FTAG)) != 0) { 945 mutex_exit(&spa_namespace_lock); 946 return (err); 947 } 948 949 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 950 951 avl_remove(&spa_namespace_avl, spa); 952 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 953 avl_add(&spa_namespace_avl, spa); 954 955 /* 956 * Sync all labels to disk with the new names by marking the root vdev 957 * dirty and waiting for it to sync. It will pick up the new pool name 958 * during the sync. 959 */ 960 vdev_config_dirty(spa->spa_root_vdev); 961 962 spa_config_exit(spa, SCL_ALL, FTAG); 963 964 txg_wait_synced(spa->spa_dsl_pool, 0); 965 966 /* 967 * Sync the updated config cache. 968 */ 969 spa_config_sync(spa, B_FALSE, B_TRUE); 970 971 spa_close(spa, FTAG); 972 973 mutex_exit(&spa_namespace_lock); 974 975 return (0); 976 } 977 978 979 /* 980 * Determine whether a pool with given pool_guid exists. If device_guid is 981 * non-zero, determine whether the pool exists *and* contains a device with the 982 * specified device_guid. 983 */ 984 boolean_t 985 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 986 { 987 spa_t *spa; 988 avl_tree_t *t = &spa_namespace_avl; 989 990 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 991 992 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 993 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 994 continue; 995 if (spa->spa_root_vdev == NULL) 996 continue; 997 if (spa_guid(spa) == pool_guid) { 998 if (device_guid == 0) 999 break; 1000 1001 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1002 device_guid) != NULL) 1003 break; 1004 1005 /* 1006 * Check any devices we may be in the process of adding. 1007 */ 1008 if (spa->spa_pending_vdev) { 1009 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1010 device_guid) != NULL) 1011 break; 1012 } 1013 } 1014 } 1015 1016 return (spa != NULL); 1017 } 1018 1019 char * 1020 spa_strdup(const char *s) 1021 { 1022 size_t len; 1023 char *new; 1024 1025 len = strlen(s); 1026 new = kmem_alloc(len + 1, KM_SLEEP); 1027 bcopy(s, new, len); 1028 new[len] = '\0'; 1029 1030 return (new); 1031 } 1032 1033 void 1034 spa_strfree(char *s) 1035 { 1036 kmem_free(s, strlen(s) + 1); 1037 } 1038 1039 uint64_t 1040 spa_get_random(uint64_t range) 1041 { 1042 uint64_t r; 1043 1044 ASSERT(range != 0); 1045 1046 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1047 1048 return (r % range); 1049 } 1050 1051 void 1052 sprintf_blkptr(char *buf, int len, const blkptr_t *bp) 1053 { 1054 int d; 1055 1056 if (bp == NULL) { 1057 (void) snprintf(buf, len, "<NULL>"); 1058 return; 1059 } 1060 1061 if (BP_IS_HOLE(bp)) { 1062 (void) snprintf(buf, len, "<hole>"); 1063 return; 1064 } 1065 1066 (void) snprintf(buf, len, "[L%llu %s] %llxL/%llxP ", 1067 (u_longlong_t)BP_GET_LEVEL(bp), 1068 dmu_ot[BP_GET_TYPE(bp)].ot_name, 1069 (u_longlong_t)BP_GET_LSIZE(bp), 1070 (u_longlong_t)BP_GET_PSIZE(bp)); 1071 1072 for (d = 0; d < BP_GET_NDVAS(bp); d++) { 1073 const dva_t *dva = &bp->blk_dva[d]; 1074 (void) snprintf(buf + strlen(buf), len - strlen(buf), 1075 "DVA[%d]=<%llu:%llx:%llx> ", d, 1076 (u_longlong_t)DVA_GET_VDEV(dva), 1077 (u_longlong_t)DVA_GET_OFFSET(dva), 1078 (u_longlong_t)DVA_GET_ASIZE(dva)); 1079 } 1080 1081 (void) snprintf(buf + strlen(buf), len - strlen(buf), 1082 "%s %s %s %s birth=%llu fill=%llu cksum=%llx:%llx:%llx:%llx", 1083 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name, 1084 zio_compress_table[BP_GET_COMPRESS(bp)].ci_name, 1085 BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE", 1086 BP_IS_GANG(bp) ? "gang" : "contiguous", 1087 (u_longlong_t)bp->blk_birth, 1088 (u_longlong_t)bp->blk_fill, 1089 (u_longlong_t)bp->blk_cksum.zc_word[0], 1090 (u_longlong_t)bp->blk_cksum.zc_word[1], 1091 (u_longlong_t)bp->blk_cksum.zc_word[2], 1092 (u_longlong_t)bp->blk_cksum.zc_word[3]); 1093 } 1094 1095 void 1096 spa_freeze(spa_t *spa) 1097 { 1098 uint64_t freeze_txg = 0; 1099 1100 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1101 if (spa->spa_freeze_txg == UINT64_MAX) { 1102 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1103 spa->spa_freeze_txg = freeze_txg; 1104 } 1105 spa_config_exit(spa, SCL_ALL, FTAG); 1106 if (freeze_txg != 0) 1107 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1108 } 1109 1110 void 1111 zfs_panic_recover(const char *fmt, ...) 1112 { 1113 va_list adx; 1114 1115 va_start(adx, fmt); 1116 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1117 va_end(adx); 1118 } 1119 1120 /* 1121 * ========================================================================== 1122 * Accessor functions 1123 * ========================================================================== 1124 */ 1125 1126 krwlock_t * 1127 spa_traverse_rwlock(spa_t *spa) 1128 { 1129 return (&spa->spa_traverse_lock); 1130 } 1131 1132 boolean_t 1133 spa_traverse_wanted(spa_t *spa) 1134 { 1135 return (spa->spa_traverse_wanted); 1136 } 1137 1138 dsl_pool_t * 1139 spa_get_dsl(spa_t *spa) 1140 { 1141 return (spa->spa_dsl_pool); 1142 } 1143 1144 blkptr_t * 1145 spa_get_rootblkptr(spa_t *spa) 1146 { 1147 return (&spa->spa_ubsync.ub_rootbp); 1148 } 1149 1150 void 1151 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1152 { 1153 spa->spa_uberblock.ub_rootbp = *bp; 1154 } 1155 1156 void 1157 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1158 { 1159 if (spa->spa_root == NULL) 1160 buf[0] = '\0'; 1161 else 1162 (void) strncpy(buf, spa->spa_root, buflen); 1163 } 1164 1165 int 1166 spa_sync_pass(spa_t *spa) 1167 { 1168 return (spa->spa_sync_pass); 1169 } 1170 1171 char * 1172 spa_name(spa_t *spa) 1173 { 1174 return (spa->spa_name); 1175 } 1176 1177 uint64_t 1178 spa_guid(spa_t *spa) 1179 { 1180 /* 1181 * If we fail to parse the config during spa_load(), we can go through 1182 * the error path (which posts an ereport) and end up here with no root 1183 * vdev. We stash the original pool guid in 'spa_load_guid' to handle 1184 * this case. 1185 */ 1186 if (spa->spa_root_vdev != NULL) 1187 return (spa->spa_root_vdev->vdev_guid); 1188 else 1189 return (spa->spa_load_guid); 1190 } 1191 1192 uint64_t 1193 spa_last_synced_txg(spa_t *spa) 1194 { 1195 return (spa->spa_ubsync.ub_txg); 1196 } 1197 1198 uint64_t 1199 spa_first_txg(spa_t *spa) 1200 { 1201 return (spa->spa_first_txg); 1202 } 1203 1204 int 1205 spa_state(spa_t *spa) 1206 { 1207 return (spa->spa_state); 1208 } 1209 1210 uint64_t 1211 spa_freeze_txg(spa_t *spa) 1212 { 1213 return (spa->spa_freeze_txg); 1214 } 1215 1216 /* 1217 * Return how much space is allocated in the pool (ie. sum of all asize) 1218 */ 1219 uint64_t 1220 spa_get_alloc(spa_t *spa) 1221 { 1222 return (spa->spa_root_vdev->vdev_stat.vs_alloc); 1223 } 1224 1225 /* 1226 * Return how much (raid-z inflated) space there is in the pool. 1227 */ 1228 uint64_t 1229 spa_get_space(spa_t *spa) 1230 { 1231 return (spa->spa_root_vdev->vdev_stat.vs_space); 1232 } 1233 1234 /* 1235 * Return the amount of raid-z-deflated space in the pool. 1236 */ 1237 uint64_t 1238 spa_get_dspace(spa_t *spa) 1239 { 1240 if (spa->spa_deflate) 1241 return (spa->spa_root_vdev->vdev_stat.vs_dspace); 1242 else 1243 return (spa->spa_root_vdev->vdev_stat.vs_space); 1244 } 1245 1246 /* ARGSUSED */ 1247 uint64_t 1248 spa_get_asize(spa_t *spa, uint64_t lsize) 1249 { 1250 /* 1251 * For now, the worst case is 512-byte RAID-Z blocks, in which 1252 * case the space requirement is exactly 2x; so just assume that. 1253 * Add to this the fact that we can have up to 3 DVAs per bp, and 1254 * we have to multiply by a total of 6x. 1255 */ 1256 return (lsize * 6); 1257 } 1258 1259 /* 1260 * Return the failure mode that has been set to this pool. The default 1261 * behavior will be to block all I/Os when a complete failure occurs. 1262 */ 1263 uint8_t 1264 spa_get_failmode(spa_t *spa) 1265 { 1266 return (spa->spa_failmode); 1267 } 1268 1269 boolean_t 1270 spa_suspended(spa_t *spa) 1271 { 1272 return (spa->spa_suspended); 1273 } 1274 1275 uint64_t 1276 spa_version(spa_t *spa) 1277 { 1278 return (spa->spa_ubsync.ub_version); 1279 } 1280 1281 int 1282 spa_max_replication(spa_t *spa) 1283 { 1284 /* 1285 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1286 * handle BPs with more than one DVA allocated. Set our max 1287 * replication level accordingly. 1288 */ 1289 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1290 return (1); 1291 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1292 } 1293 1294 uint64_t 1295 bp_get_dasize(spa_t *spa, const blkptr_t *bp) 1296 { 1297 int sz = 0, i; 1298 1299 if (!spa->spa_deflate) 1300 return (BP_GET_ASIZE(bp)); 1301 1302 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1303 for (i = 0; i < SPA_DVAS_PER_BP; i++) { 1304 vdev_t *vd = 1305 vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[i])); 1306 if (vd) 1307 sz += (DVA_GET_ASIZE(&bp->blk_dva[i]) >> 1308 SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1309 } 1310 spa_config_exit(spa, SCL_VDEV, FTAG); 1311 return (sz); 1312 } 1313 1314 /* 1315 * ========================================================================== 1316 * Initialization and Termination 1317 * ========================================================================== 1318 */ 1319 1320 static int 1321 spa_name_compare(const void *a1, const void *a2) 1322 { 1323 const spa_t *s1 = a1; 1324 const spa_t *s2 = a2; 1325 int s; 1326 1327 s = strcmp(s1->spa_name, s2->spa_name); 1328 if (s > 0) 1329 return (1); 1330 if (s < 0) 1331 return (-1); 1332 return (0); 1333 } 1334 1335 int 1336 spa_busy(void) 1337 { 1338 return (spa_active_count); 1339 } 1340 1341 void 1342 spa_boot_init() 1343 { 1344 spa_config_load(); 1345 } 1346 1347 void 1348 spa_init(int mode) 1349 { 1350 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1351 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1352 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1353 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1354 1355 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1356 offsetof(spa_t, spa_avl)); 1357 1358 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1359 offsetof(spa_aux_t, aux_avl)); 1360 1361 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1362 offsetof(spa_aux_t, aux_avl)); 1363 1364 spa_mode = mode; 1365 1366 refcount_init(); 1367 unique_init(); 1368 zio_init(); 1369 dmu_init(); 1370 zil_init(); 1371 vdev_cache_stat_init(); 1372 zfs_prop_init(); 1373 zpool_prop_init(); 1374 spa_config_load(); 1375 l2arc_start(); 1376 } 1377 1378 void 1379 spa_fini(void) 1380 { 1381 l2arc_stop(); 1382 1383 spa_evict_all(); 1384 1385 vdev_cache_stat_fini(); 1386 zil_fini(); 1387 dmu_fini(); 1388 zio_fini(); 1389 unique_fini(); 1390 refcount_fini(); 1391 1392 avl_destroy(&spa_namespace_avl); 1393 avl_destroy(&spa_spare_avl); 1394 avl_destroy(&spa_l2cache_avl); 1395 1396 cv_destroy(&spa_namespace_cv); 1397 mutex_destroy(&spa_namespace_lock); 1398 mutex_destroy(&spa_spare_lock); 1399 mutex_destroy(&spa_l2cache_lock); 1400 } 1401 1402 /* 1403 * Return whether this pool has slogs. No locking needed. 1404 * It's not a problem if the wrong answer is returned as it's only for 1405 * performance and not correctness 1406 */ 1407 boolean_t 1408 spa_has_slogs(spa_t *spa) 1409 { 1410 return (spa->spa_log_class->mc_rotor != NULL); 1411 } 1412 1413 /* 1414 * Return whether this pool is the root pool. 1415 */ 1416 boolean_t 1417 spa_is_root(spa_t *spa) 1418 { 1419 return (spa->spa_is_root); 1420 } 1421