1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/zfs_context.h> 27 #include <sys/spa_impl.h> 28 #include <sys/zio.h> 29 #include <sys/zio_checksum.h> 30 #include <sys/zio_compress.h> 31 #include <sys/dmu.h> 32 #include <sys/dmu_tx.h> 33 #include <sys/zap.h> 34 #include <sys/zil.h> 35 #include <sys/vdev_impl.h> 36 #include <sys/metaslab.h> 37 #include <sys/uberblock_impl.h> 38 #include <sys/txg.h> 39 #include <sys/avl.h> 40 #include <sys/unique.h> 41 #include <sys/dsl_pool.h> 42 #include <sys/dsl_dir.h> 43 #include <sys/dsl_prop.h> 44 #include <sys/fs/zfs.h> 45 #include <sys/metaslab_impl.h> 46 #include <sys/sunddi.h> 47 #include <sys/arc.h> 48 #include "zfs_prop.h" 49 50 /* 51 * SPA locking 52 * 53 * There are four basic locks for managing spa_t structures: 54 * 55 * spa_namespace_lock (global mutex) 56 * 57 * This lock must be acquired to do any of the following: 58 * 59 * - Lookup a spa_t by name 60 * - Add or remove a spa_t from the namespace 61 * - Increase spa_refcount from non-zero 62 * - Check if spa_refcount is zero 63 * - Rename a spa_t 64 * - add/remove/attach/detach devices 65 * - Held for the duration of create/destroy/import/export 66 * 67 * It does not need to handle recursion. A create or destroy may 68 * reference objects (files or zvols) in other pools, but by 69 * definition they must have an existing reference, and will never need 70 * to lookup a spa_t by name. 71 * 72 * spa_refcount (per-spa refcount_t protected by mutex) 73 * 74 * This reference count keep track of any active users of the spa_t. The 75 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 76 * the refcount is never really 'zero' - opening a pool implicitly keeps 77 * some references in the DMU. Internally we check against spa_minref, but 78 * present the image of a zero/non-zero value to consumers. 79 * 80 * spa_config_lock[] (per-spa array of rwlocks) 81 * 82 * This protects the spa_t from config changes, and must be held in 83 * the following circumstances: 84 * 85 * - RW_READER to perform I/O to the spa 86 * - RW_WRITER to change the vdev config 87 * 88 * The locking order is fairly straightforward: 89 * 90 * spa_namespace_lock -> spa_refcount 91 * 92 * The namespace lock must be acquired to increase the refcount from 0 93 * or to check if it is zero. 94 * 95 * spa_refcount -> spa_config_lock[] 96 * 97 * There must be at least one valid reference on the spa_t to acquire 98 * the config lock. 99 * 100 * spa_namespace_lock -> spa_config_lock[] 101 * 102 * The namespace lock must always be taken before the config lock. 103 * 104 * 105 * The spa_namespace_lock can be acquired directly and is globally visible. 106 * 107 * The namespace is manipulated using the following functions, all of which 108 * require the spa_namespace_lock to be held. 109 * 110 * spa_lookup() Lookup a spa_t by name. 111 * 112 * spa_add() Create a new spa_t in the namespace. 113 * 114 * spa_remove() Remove a spa_t from the namespace. This also 115 * frees up any memory associated with the spa_t. 116 * 117 * spa_next() Returns the next spa_t in the system, or the 118 * first if NULL is passed. 119 * 120 * spa_evict_all() Shutdown and remove all spa_t structures in 121 * the system. 122 * 123 * spa_guid_exists() Determine whether a pool/device guid exists. 124 * 125 * The spa_refcount is manipulated using the following functions: 126 * 127 * spa_open_ref() Adds a reference to the given spa_t. Must be 128 * called with spa_namespace_lock held if the 129 * refcount is currently zero. 130 * 131 * spa_close() Remove a reference from the spa_t. This will 132 * not free the spa_t or remove it from the 133 * namespace. No locking is required. 134 * 135 * spa_refcount_zero() Returns true if the refcount is currently 136 * zero. Must be called with spa_namespace_lock 137 * held. 138 * 139 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 140 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 141 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 142 * 143 * To read the configuration, it suffices to hold one of these locks as reader. 144 * To modify the configuration, you must hold all locks as writer. To modify 145 * vdev state without altering the vdev tree's topology (e.g. online/offline), 146 * you must hold SCL_STATE and SCL_ZIO as writer. 147 * 148 * We use these distinct config locks to avoid recursive lock entry. 149 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 150 * block allocations (SCL_ALLOC), which may require reading space maps 151 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 152 * 153 * The spa config locks cannot be normal rwlocks because we need the 154 * ability to hand off ownership. For example, SCL_ZIO is acquired 155 * by the issuing thread and later released by an interrupt thread. 156 * They do, however, obey the usual write-wanted semantics to prevent 157 * writer (i.e. system administrator) starvation. 158 * 159 * The lock acquisition rules are as follows: 160 * 161 * SCL_CONFIG 162 * Protects changes to the vdev tree topology, such as vdev 163 * add/remove/attach/detach. Protects the dirty config list 164 * (spa_config_dirty_list) and the set of spares and l2arc devices. 165 * 166 * SCL_STATE 167 * Protects changes to pool state and vdev state, such as vdev 168 * online/offline/fault/degrade/clear. Protects the dirty state list 169 * (spa_state_dirty_list) and global pool state (spa_state). 170 * 171 * SCL_ALLOC 172 * Protects changes to metaslab groups and classes. 173 * Held as reader by metaslab_alloc() and metaslab_claim(). 174 * 175 * SCL_ZIO 176 * Held by bp-level zios (those which have no io_vd upon entry) 177 * to prevent changes to the vdev tree. The bp-level zio implicitly 178 * protects all of its vdev child zios, which do not hold SCL_ZIO. 179 * 180 * SCL_FREE 181 * Protects changes to metaslab groups and classes. 182 * Held as reader by metaslab_free(). SCL_FREE is distinct from 183 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 184 * blocks in zio_done() while another i/o that holds either 185 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 186 * 187 * SCL_VDEV 188 * Held as reader to prevent changes to the vdev tree during trivial 189 * inquiries such as bp_get_dasize(). SCL_VDEV is distinct from the 190 * other locks, and lower than all of them, to ensure that it's safe 191 * to acquire regardless of caller context. 192 * 193 * In addition, the following rules apply: 194 * 195 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 196 * The lock ordering is SCL_CONFIG > spa_props_lock. 197 * 198 * (b) I/O operations on leaf vdevs. For any zio operation that takes 199 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 200 * or zio_write_phys() -- the caller must ensure that the config cannot 201 * cannot change in the interim, and that the vdev cannot be reopened. 202 * SCL_STATE as reader suffices for both. 203 * 204 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 205 * 206 * spa_vdev_enter() Acquire the namespace lock and the config lock 207 * for writing. 208 * 209 * spa_vdev_exit() Release the config lock, wait for all I/O 210 * to complete, sync the updated configs to the 211 * cache, and release the namespace lock. 212 * 213 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 214 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 215 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 216 * 217 * spa_rename() is also implemented within this file since is requires 218 * manipulation of the namespace. 219 */ 220 221 static avl_tree_t spa_namespace_avl; 222 kmutex_t spa_namespace_lock; 223 static kcondvar_t spa_namespace_cv; 224 static int spa_active_count; 225 int spa_max_replication_override = SPA_DVAS_PER_BP; 226 227 static kmutex_t spa_spare_lock; 228 static avl_tree_t spa_spare_avl; 229 static kmutex_t spa_l2cache_lock; 230 static avl_tree_t spa_l2cache_avl; 231 232 kmem_cache_t *spa_buffer_pool; 233 int spa_mode_global; 234 235 #ifdef ZFS_DEBUG 236 /* Everything except dprintf is on by default in debug builds */ 237 int zfs_flags = ~ZFS_DEBUG_DPRINTF; 238 #else 239 int zfs_flags = 0; 240 #endif 241 242 /* 243 * zfs_recover can be set to nonzero to attempt to recover from 244 * otherwise-fatal errors, typically caused by on-disk corruption. When 245 * set, calls to zfs_panic_recover() will turn into warning messages. 246 */ 247 int zfs_recover = 0; 248 249 250 /* 251 * ========================================================================== 252 * SPA config locking 253 * ========================================================================== 254 */ 255 static void 256 spa_config_lock_init(spa_t *spa) 257 { 258 for (int i = 0; i < SCL_LOCKS; i++) { 259 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 260 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 261 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 262 refcount_create(&scl->scl_count); 263 scl->scl_writer = NULL; 264 scl->scl_write_wanted = 0; 265 } 266 } 267 268 static void 269 spa_config_lock_destroy(spa_t *spa) 270 { 271 for (int i = 0; i < SCL_LOCKS; i++) { 272 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 273 mutex_destroy(&scl->scl_lock); 274 cv_destroy(&scl->scl_cv); 275 refcount_destroy(&scl->scl_count); 276 ASSERT(scl->scl_writer == NULL); 277 ASSERT(scl->scl_write_wanted == 0); 278 } 279 } 280 281 int 282 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 283 { 284 for (int i = 0; i < SCL_LOCKS; i++) { 285 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 286 if (!(locks & (1 << i))) 287 continue; 288 mutex_enter(&scl->scl_lock); 289 if (rw == RW_READER) { 290 if (scl->scl_writer || scl->scl_write_wanted) { 291 mutex_exit(&scl->scl_lock); 292 spa_config_exit(spa, locks ^ (1 << i), tag); 293 return (0); 294 } 295 } else { 296 ASSERT(scl->scl_writer != curthread); 297 if (!refcount_is_zero(&scl->scl_count)) { 298 mutex_exit(&scl->scl_lock); 299 spa_config_exit(spa, locks ^ (1 << i), tag); 300 return (0); 301 } 302 scl->scl_writer = curthread; 303 } 304 (void) refcount_add(&scl->scl_count, tag); 305 mutex_exit(&scl->scl_lock); 306 } 307 return (1); 308 } 309 310 void 311 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 312 { 313 for (int i = 0; i < SCL_LOCKS; i++) { 314 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 315 if (!(locks & (1 << i))) 316 continue; 317 mutex_enter(&scl->scl_lock); 318 if (rw == RW_READER) { 319 while (scl->scl_writer || scl->scl_write_wanted) { 320 cv_wait(&scl->scl_cv, &scl->scl_lock); 321 } 322 } else { 323 ASSERT(scl->scl_writer != curthread); 324 while (!refcount_is_zero(&scl->scl_count)) { 325 scl->scl_write_wanted++; 326 cv_wait(&scl->scl_cv, &scl->scl_lock); 327 scl->scl_write_wanted--; 328 } 329 scl->scl_writer = curthread; 330 } 331 (void) refcount_add(&scl->scl_count, tag); 332 mutex_exit(&scl->scl_lock); 333 } 334 } 335 336 void 337 spa_config_exit(spa_t *spa, int locks, void *tag) 338 { 339 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 340 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 341 if (!(locks & (1 << i))) 342 continue; 343 mutex_enter(&scl->scl_lock); 344 ASSERT(!refcount_is_zero(&scl->scl_count)); 345 if (refcount_remove(&scl->scl_count, tag) == 0) { 346 ASSERT(scl->scl_writer == NULL || 347 scl->scl_writer == curthread); 348 scl->scl_writer = NULL; /* OK in either case */ 349 cv_broadcast(&scl->scl_cv); 350 } 351 mutex_exit(&scl->scl_lock); 352 } 353 } 354 355 int 356 spa_config_held(spa_t *spa, int locks, krw_t rw) 357 { 358 int locks_held = 0; 359 360 for (int i = 0; i < SCL_LOCKS; i++) { 361 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 362 if (!(locks & (1 << i))) 363 continue; 364 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 365 (rw == RW_WRITER && scl->scl_writer == curthread)) 366 locks_held |= 1 << i; 367 } 368 369 return (locks_held); 370 } 371 372 /* 373 * ========================================================================== 374 * SPA namespace functions 375 * ========================================================================== 376 */ 377 378 /* 379 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 380 * Returns NULL if no matching spa_t is found. 381 */ 382 spa_t * 383 spa_lookup(const char *name) 384 { 385 static spa_t search; /* spa_t is large; don't allocate on stack */ 386 spa_t *spa; 387 avl_index_t where; 388 char c; 389 char *cp; 390 391 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 392 393 /* 394 * If it's a full dataset name, figure out the pool name and 395 * just use that. 396 */ 397 cp = strpbrk(name, "/@"); 398 if (cp) { 399 c = *cp; 400 *cp = '\0'; 401 } 402 403 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 404 spa = avl_find(&spa_namespace_avl, &search, &where); 405 406 if (cp) 407 *cp = c; 408 409 return (spa); 410 } 411 412 /* 413 * Create an uninitialized spa_t with the given name. Requires 414 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 415 * exist by calling spa_lookup() first. 416 */ 417 spa_t * 418 spa_add(const char *name, const char *altroot) 419 { 420 spa_t *spa; 421 spa_config_dirent_t *dp; 422 423 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 424 425 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 426 427 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 428 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 429 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 430 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 431 mutex_init(&spa->spa_sync_bplist.bpl_lock, NULL, MUTEX_DEFAULT, NULL); 432 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 433 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 434 435 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 436 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 437 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 438 439 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 440 spa->spa_state = POOL_STATE_UNINITIALIZED; 441 spa->spa_freeze_txg = UINT64_MAX; 442 spa->spa_final_txg = UINT64_MAX; 443 444 refcount_create(&spa->spa_refcount); 445 spa_config_lock_init(spa); 446 447 avl_add(&spa_namespace_avl, spa); 448 449 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 450 451 /* 452 * Set the alternate root, if there is one. 453 */ 454 if (altroot) { 455 spa->spa_root = spa_strdup(altroot); 456 spa_active_count++; 457 } 458 459 /* 460 * Every pool starts with the default cachefile 461 */ 462 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 463 offsetof(spa_config_dirent_t, scd_link)); 464 465 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 466 dp->scd_path = spa_strdup(spa_config_path); 467 list_insert_head(&spa->spa_config_list, dp); 468 469 return (spa); 470 } 471 472 /* 473 * Removes a spa_t from the namespace, freeing up any memory used. Requires 474 * spa_namespace_lock. This is called only after the spa_t has been closed and 475 * deactivated. 476 */ 477 void 478 spa_remove(spa_t *spa) 479 { 480 spa_config_dirent_t *dp; 481 482 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 483 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 484 485 avl_remove(&spa_namespace_avl, spa); 486 cv_broadcast(&spa_namespace_cv); 487 488 if (spa->spa_root) { 489 spa_strfree(spa->spa_root); 490 spa_active_count--; 491 } 492 493 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 494 list_remove(&spa->spa_config_list, dp); 495 if (dp->scd_path != NULL) 496 spa_strfree(dp->scd_path); 497 kmem_free(dp, sizeof (spa_config_dirent_t)); 498 } 499 500 list_destroy(&spa->spa_config_list); 501 502 spa_config_set(spa, NULL); 503 504 refcount_destroy(&spa->spa_refcount); 505 506 spa_config_lock_destroy(spa); 507 508 cv_destroy(&spa->spa_async_cv); 509 cv_destroy(&spa->spa_scrub_io_cv); 510 cv_destroy(&spa->spa_suspend_cv); 511 512 mutex_destroy(&spa->spa_async_lock); 513 mutex_destroy(&spa->spa_scrub_lock); 514 mutex_destroy(&spa->spa_errlog_lock); 515 mutex_destroy(&spa->spa_errlist_lock); 516 mutex_destroy(&spa->spa_sync_bplist.bpl_lock); 517 mutex_destroy(&spa->spa_history_lock); 518 mutex_destroy(&spa->spa_props_lock); 519 mutex_destroy(&spa->spa_suspend_lock); 520 521 kmem_free(spa, sizeof (spa_t)); 522 } 523 524 /* 525 * Given a pool, return the next pool in the namespace, or NULL if there is 526 * none. If 'prev' is NULL, return the first pool. 527 */ 528 spa_t * 529 spa_next(spa_t *prev) 530 { 531 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 532 533 if (prev) 534 return (AVL_NEXT(&spa_namespace_avl, prev)); 535 else 536 return (avl_first(&spa_namespace_avl)); 537 } 538 539 /* 540 * ========================================================================== 541 * SPA refcount functions 542 * ========================================================================== 543 */ 544 545 /* 546 * Add a reference to the given spa_t. Must have at least one reference, or 547 * have the namespace lock held. 548 */ 549 void 550 spa_open_ref(spa_t *spa, void *tag) 551 { 552 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 553 MUTEX_HELD(&spa_namespace_lock)); 554 (void) refcount_add(&spa->spa_refcount, tag); 555 } 556 557 /* 558 * Remove a reference to the given spa_t. Must have at least one reference, or 559 * have the namespace lock held. 560 */ 561 void 562 spa_close(spa_t *spa, void *tag) 563 { 564 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 565 MUTEX_HELD(&spa_namespace_lock)); 566 (void) refcount_remove(&spa->spa_refcount, tag); 567 } 568 569 /* 570 * Check to see if the spa refcount is zero. Must be called with 571 * spa_namespace_lock held. We really compare against spa_minref, which is the 572 * number of references acquired when opening a pool 573 */ 574 boolean_t 575 spa_refcount_zero(spa_t *spa) 576 { 577 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 578 579 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 580 } 581 582 /* 583 * ========================================================================== 584 * SPA spare and l2cache tracking 585 * ========================================================================== 586 */ 587 588 /* 589 * Hot spares and cache devices are tracked using the same code below, 590 * for 'auxiliary' devices. 591 */ 592 593 typedef struct spa_aux { 594 uint64_t aux_guid; 595 uint64_t aux_pool; 596 avl_node_t aux_avl; 597 int aux_count; 598 } spa_aux_t; 599 600 static int 601 spa_aux_compare(const void *a, const void *b) 602 { 603 const spa_aux_t *sa = a; 604 const spa_aux_t *sb = b; 605 606 if (sa->aux_guid < sb->aux_guid) 607 return (-1); 608 else if (sa->aux_guid > sb->aux_guid) 609 return (1); 610 else 611 return (0); 612 } 613 614 void 615 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 616 { 617 avl_index_t where; 618 spa_aux_t search; 619 spa_aux_t *aux; 620 621 search.aux_guid = vd->vdev_guid; 622 if ((aux = avl_find(avl, &search, &where)) != NULL) { 623 aux->aux_count++; 624 } else { 625 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 626 aux->aux_guid = vd->vdev_guid; 627 aux->aux_count = 1; 628 avl_insert(avl, aux, where); 629 } 630 } 631 632 void 633 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 634 { 635 spa_aux_t search; 636 spa_aux_t *aux; 637 avl_index_t where; 638 639 search.aux_guid = vd->vdev_guid; 640 aux = avl_find(avl, &search, &where); 641 642 ASSERT(aux != NULL); 643 644 if (--aux->aux_count == 0) { 645 avl_remove(avl, aux); 646 kmem_free(aux, sizeof (spa_aux_t)); 647 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 648 aux->aux_pool = 0ULL; 649 } 650 } 651 652 boolean_t 653 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 654 { 655 spa_aux_t search, *found; 656 657 search.aux_guid = guid; 658 found = avl_find(avl, &search, NULL); 659 660 if (pool) { 661 if (found) 662 *pool = found->aux_pool; 663 else 664 *pool = 0ULL; 665 } 666 667 if (refcnt) { 668 if (found) 669 *refcnt = found->aux_count; 670 else 671 *refcnt = 0; 672 } 673 674 return (found != NULL); 675 } 676 677 void 678 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 679 { 680 spa_aux_t search, *found; 681 avl_index_t where; 682 683 search.aux_guid = vd->vdev_guid; 684 found = avl_find(avl, &search, &where); 685 ASSERT(found != NULL); 686 ASSERT(found->aux_pool == 0ULL); 687 688 found->aux_pool = spa_guid(vd->vdev_spa); 689 } 690 691 /* 692 * Spares are tracked globally due to the following constraints: 693 * 694 * - A spare may be part of multiple pools. 695 * - A spare may be added to a pool even if it's actively in use within 696 * another pool. 697 * - A spare in use in any pool can only be the source of a replacement if 698 * the target is a spare in the same pool. 699 * 700 * We keep track of all spares on the system through the use of a reference 701 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 702 * spare, then we bump the reference count in the AVL tree. In addition, we set 703 * the 'vdev_isspare' member to indicate that the device is a spare (active or 704 * inactive). When a spare is made active (used to replace a device in the 705 * pool), we also keep track of which pool its been made a part of. 706 * 707 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 708 * called under the spa_namespace lock as part of vdev reconfiguration. The 709 * separate spare lock exists for the status query path, which does not need to 710 * be completely consistent with respect to other vdev configuration changes. 711 */ 712 713 static int 714 spa_spare_compare(const void *a, const void *b) 715 { 716 return (spa_aux_compare(a, b)); 717 } 718 719 void 720 spa_spare_add(vdev_t *vd) 721 { 722 mutex_enter(&spa_spare_lock); 723 ASSERT(!vd->vdev_isspare); 724 spa_aux_add(vd, &spa_spare_avl); 725 vd->vdev_isspare = B_TRUE; 726 mutex_exit(&spa_spare_lock); 727 } 728 729 void 730 spa_spare_remove(vdev_t *vd) 731 { 732 mutex_enter(&spa_spare_lock); 733 ASSERT(vd->vdev_isspare); 734 spa_aux_remove(vd, &spa_spare_avl); 735 vd->vdev_isspare = B_FALSE; 736 mutex_exit(&spa_spare_lock); 737 } 738 739 boolean_t 740 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 741 { 742 boolean_t found; 743 744 mutex_enter(&spa_spare_lock); 745 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 746 mutex_exit(&spa_spare_lock); 747 748 return (found); 749 } 750 751 void 752 spa_spare_activate(vdev_t *vd) 753 { 754 mutex_enter(&spa_spare_lock); 755 ASSERT(vd->vdev_isspare); 756 spa_aux_activate(vd, &spa_spare_avl); 757 mutex_exit(&spa_spare_lock); 758 } 759 760 /* 761 * Level 2 ARC devices are tracked globally for the same reasons as spares. 762 * Cache devices currently only support one pool per cache device, and so 763 * for these devices the aux reference count is currently unused beyond 1. 764 */ 765 766 static int 767 spa_l2cache_compare(const void *a, const void *b) 768 { 769 return (spa_aux_compare(a, b)); 770 } 771 772 void 773 spa_l2cache_add(vdev_t *vd) 774 { 775 mutex_enter(&spa_l2cache_lock); 776 ASSERT(!vd->vdev_isl2cache); 777 spa_aux_add(vd, &spa_l2cache_avl); 778 vd->vdev_isl2cache = B_TRUE; 779 mutex_exit(&spa_l2cache_lock); 780 } 781 782 void 783 spa_l2cache_remove(vdev_t *vd) 784 { 785 mutex_enter(&spa_l2cache_lock); 786 ASSERT(vd->vdev_isl2cache); 787 spa_aux_remove(vd, &spa_l2cache_avl); 788 vd->vdev_isl2cache = B_FALSE; 789 mutex_exit(&spa_l2cache_lock); 790 } 791 792 boolean_t 793 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 794 { 795 boolean_t found; 796 797 mutex_enter(&spa_l2cache_lock); 798 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 799 mutex_exit(&spa_l2cache_lock); 800 801 return (found); 802 } 803 804 void 805 spa_l2cache_activate(vdev_t *vd) 806 { 807 mutex_enter(&spa_l2cache_lock); 808 ASSERT(vd->vdev_isl2cache); 809 spa_aux_activate(vd, &spa_l2cache_avl); 810 mutex_exit(&spa_l2cache_lock); 811 } 812 813 void 814 spa_l2cache_space_update(vdev_t *vd, int64_t space, int64_t alloc) 815 { 816 vdev_space_update(vd, space, alloc, B_FALSE); 817 } 818 819 /* 820 * ========================================================================== 821 * SPA vdev locking 822 * ========================================================================== 823 */ 824 825 /* 826 * Lock the given spa_t for the purpose of adding or removing a vdev. 827 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 828 * It returns the next transaction group for the spa_t. 829 */ 830 uint64_t 831 spa_vdev_enter(spa_t *spa) 832 { 833 mutex_enter(&spa_namespace_lock); 834 835 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 836 837 return (spa_last_synced_txg(spa) + 1); 838 } 839 840 /* 841 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 842 * locking of spa_vdev_enter(), we also want make sure the transactions have 843 * synced to disk, and then update the global configuration cache with the new 844 * information. 845 */ 846 int 847 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 848 { 849 int config_changed = B_FALSE; 850 851 ASSERT(txg > spa_last_synced_txg(spa)); 852 853 spa->spa_pending_vdev = NULL; 854 855 /* 856 * Reassess the DTLs. 857 */ 858 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 859 860 /* 861 * If the config changed, notify the scrub thread that it must restart. 862 */ 863 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 864 dsl_pool_scrub_restart(spa->spa_dsl_pool); 865 config_changed = B_TRUE; 866 } 867 868 spa_config_exit(spa, SCL_ALL, spa); 869 870 /* 871 * Note: this txg_wait_synced() is important because it ensures 872 * that there won't be more than one config change per txg. 873 * This allows us to use the txg as the generation number. 874 */ 875 if (error == 0) 876 txg_wait_synced(spa->spa_dsl_pool, txg); 877 878 if (vd != NULL) { 879 ASSERT(!vd->vdev_detached || vd->vdev_dtl_smo.smo_object == 0); 880 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 881 vdev_free(vd); 882 spa_config_exit(spa, SCL_ALL, spa); 883 } 884 885 /* 886 * If the config changed, update the config cache. 887 */ 888 if (config_changed) 889 spa_config_sync(spa, B_FALSE, B_TRUE); 890 891 mutex_exit(&spa_namespace_lock); 892 893 return (error); 894 } 895 896 /* 897 * Lock the given spa_t for the purpose of changing vdev state. 898 */ 899 void 900 spa_vdev_state_enter(spa_t *spa) 901 { 902 spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER); 903 } 904 905 int 906 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 907 { 908 if (vd != NULL) 909 vdev_state_dirty(vd->vdev_top); 910 911 spa_config_exit(spa, SCL_STATE_ALL, spa); 912 913 /* 914 * If anything changed, wait for it to sync. This ensures that, 915 * from the system administrator's perspective, zpool(1M) commands 916 * are synchronous. This is important for things like zpool offline: 917 * when the command completes, you expect no further I/O from ZFS. 918 */ 919 if (vd != NULL) 920 txg_wait_synced(spa->spa_dsl_pool, 0); 921 922 return (error); 923 } 924 925 /* 926 * ========================================================================== 927 * Miscellaneous functions 928 * ========================================================================== 929 */ 930 931 /* 932 * Rename a spa_t. 933 */ 934 int 935 spa_rename(const char *name, const char *newname) 936 { 937 spa_t *spa; 938 int err; 939 940 /* 941 * Lookup the spa_t and grab the config lock for writing. We need to 942 * actually open the pool so that we can sync out the necessary labels. 943 * It's OK to call spa_open() with the namespace lock held because we 944 * allow recursive calls for other reasons. 945 */ 946 mutex_enter(&spa_namespace_lock); 947 if ((err = spa_open(name, &spa, FTAG)) != 0) { 948 mutex_exit(&spa_namespace_lock); 949 return (err); 950 } 951 952 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 953 954 avl_remove(&spa_namespace_avl, spa); 955 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 956 avl_add(&spa_namespace_avl, spa); 957 958 /* 959 * Sync all labels to disk with the new names by marking the root vdev 960 * dirty and waiting for it to sync. It will pick up the new pool name 961 * during the sync. 962 */ 963 vdev_config_dirty(spa->spa_root_vdev); 964 965 spa_config_exit(spa, SCL_ALL, FTAG); 966 967 txg_wait_synced(spa->spa_dsl_pool, 0); 968 969 /* 970 * Sync the updated config cache. 971 */ 972 spa_config_sync(spa, B_FALSE, B_TRUE); 973 974 spa_close(spa, FTAG); 975 976 mutex_exit(&spa_namespace_lock); 977 978 return (0); 979 } 980 981 982 /* 983 * Determine whether a pool with given pool_guid exists. If device_guid is 984 * non-zero, determine whether the pool exists *and* contains a device with the 985 * specified device_guid. 986 */ 987 boolean_t 988 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 989 { 990 spa_t *spa; 991 avl_tree_t *t = &spa_namespace_avl; 992 993 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 994 995 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 996 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 997 continue; 998 if (spa->spa_root_vdev == NULL) 999 continue; 1000 if (spa_guid(spa) == pool_guid) { 1001 if (device_guid == 0) 1002 break; 1003 1004 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1005 device_guid) != NULL) 1006 break; 1007 1008 /* 1009 * Check any devices we may be in the process of adding. 1010 */ 1011 if (spa->spa_pending_vdev) { 1012 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1013 device_guid) != NULL) 1014 break; 1015 } 1016 } 1017 } 1018 1019 return (spa != NULL); 1020 } 1021 1022 char * 1023 spa_strdup(const char *s) 1024 { 1025 size_t len; 1026 char *new; 1027 1028 len = strlen(s); 1029 new = kmem_alloc(len + 1, KM_SLEEP); 1030 bcopy(s, new, len); 1031 new[len] = '\0'; 1032 1033 return (new); 1034 } 1035 1036 void 1037 spa_strfree(char *s) 1038 { 1039 kmem_free(s, strlen(s) + 1); 1040 } 1041 1042 uint64_t 1043 spa_get_random(uint64_t range) 1044 { 1045 uint64_t r; 1046 1047 ASSERT(range != 0); 1048 1049 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1050 1051 return (r % range); 1052 } 1053 1054 void 1055 sprintf_blkptr(char *buf, int len, const blkptr_t *bp) 1056 { 1057 int d; 1058 1059 if (bp == NULL) { 1060 (void) snprintf(buf, len, "<NULL>"); 1061 return; 1062 } 1063 1064 if (BP_IS_HOLE(bp)) { 1065 (void) snprintf(buf, len, "<hole>"); 1066 return; 1067 } 1068 1069 (void) snprintf(buf, len, "[L%llu %s] %llxL/%llxP ", 1070 (u_longlong_t)BP_GET_LEVEL(bp), 1071 dmu_ot[BP_GET_TYPE(bp)].ot_name, 1072 (u_longlong_t)BP_GET_LSIZE(bp), 1073 (u_longlong_t)BP_GET_PSIZE(bp)); 1074 1075 for (d = 0; d < BP_GET_NDVAS(bp); d++) { 1076 const dva_t *dva = &bp->blk_dva[d]; 1077 (void) snprintf(buf + strlen(buf), len - strlen(buf), 1078 "DVA[%d]=<%llu:%llx:%llx> ", d, 1079 (u_longlong_t)DVA_GET_VDEV(dva), 1080 (u_longlong_t)DVA_GET_OFFSET(dva), 1081 (u_longlong_t)DVA_GET_ASIZE(dva)); 1082 } 1083 1084 (void) snprintf(buf + strlen(buf), len - strlen(buf), 1085 "%s %s %s %s birth=%llu fill=%llu cksum=%llx:%llx:%llx:%llx", 1086 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name, 1087 zio_compress_table[BP_GET_COMPRESS(bp)].ci_name, 1088 BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE", 1089 BP_IS_GANG(bp) ? "gang" : "contiguous", 1090 (u_longlong_t)bp->blk_birth, 1091 (u_longlong_t)bp->blk_fill, 1092 (u_longlong_t)bp->blk_cksum.zc_word[0], 1093 (u_longlong_t)bp->blk_cksum.zc_word[1], 1094 (u_longlong_t)bp->blk_cksum.zc_word[2], 1095 (u_longlong_t)bp->blk_cksum.zc_word[3]); 1096 } 1097 1098 void 1099 spa_freeze(spa_t *spa) 1100 { 1101 uint64_t freeze_txg = 0; 1102 1103 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1104 if (spa->spa_freeze_txg == UINT64_MAX) { 1105 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1106 spa->spa_freeze_txg = freeze_txg; 1107 } 1108 spa_config_exit(spa, SCL_ALL, FTAG); 1109 if (freeze_txg != 0) 1110 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1111 } 1112 1113 void 1114 zfs_panic_recover(const char *fmt, ...) 1115 { 1116 va_list adx; 1117 1118 va_start(adx, fmt); 1119 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1120 va_end(adx); 1121 } 1122 1123 /* 1124 * ========================================================================== 1125 * Accessor functions 1126 * ========================================================================== 1127 */ 1128 1129 boolean_t 1130 spa_shutting_down(spa_t *spa) 1131 { 1132 return (spa->spa_async_suspended); 1133 } 1134 1135 dsl_pool_t * 1136 spa_get_dsl(spa_t *spa) 1137 { 1138 return (spa->spa_dsl_pool); 1139 } 1140 1141 blkptr_t * 1142 spa_get_rootblkptr(spa_t *spa) 1143 { 1144 return (&spa->spa_ubsync.ub_rootbp); 1145 } 1146 1147 void 1148 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1149 { 1150 spa->spa_uberblock.ub_rootbp = *bp; 1151 } 1152 1153 void 1154 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1155 { 1156 if (spa->spa_root == NULL) 1157 buf[0] = '\0'; 1158 else 1159 (void) strncpy(buf, spa->spa_root, buflen); 1160 } 1161 1162 int 1163 spa_sync_pass(spa_t *spa) 1164 { 1165 return (spa->spa_sync_pass); 1166 } 1167 1168 char * 1169 spa_name(spa_t *spa) 1170 { 1171 return (spa->spa_name); 1172 } 1173 1174 uint64_t 1175 spa_guid(spa_t *spa) 1176 { 1177 /* 1178 * If we fail to parse the config during spa_load(), we can go through 1179 * the error path (which posts an ereport) and end up here with no root 1180 * vdev. We stash the original pool guid in 'spa_load_guid' to handle 1181 * this case. 1182 */ 1183 if (spa->spa_root_vdev != NULL) 1184 return (spa->spa_root_vdev->vdev_guid); 1185 else 1186 return (spa->spa_load_guid); 1187 } 1188 1189 uint64_t 1190 spa_last_synced_txg(spa_t *spa) 1191 { 1192 return (spa->spa_ubsync.ub_txg); 1193 } 1194 1195 uint64_t 1196 spa_first_txg(spa_t *spa) 1197 { 1198 return (spa->spa_first_txg); 1199 } 1200 1201 pool_state_t 1202 spa_state(spa_t *spa) 1203 { 1204 return (spa->spa_state); 1205 } 1206 1207 uint64_t 1208 spa_freeze_txg(spa_t *spa) 1209 { 1210 return (spa->spa_freeze_txg); 1211 } 1212 1213 /* 1214 * Return how much space is allocated in the pool (ie. sum of all asize) 1215 */ 1216 uint64_t 1217 spa_get_alloc(spa_t *spa) 1218 { 1219 return (spa->spa_root_vdev->vdev_stat.vs_alloc); 1220 } 1221 1222 /* 1223 * Return how much (raid-z inflated) space there is in the pool. 1224 */ 1225 uint64_t 1226 spa_get_space(spa_t *spa) 1227 { 1228 return (spa->spa_root_vdev->vdev_stat.vs_space); 1229 } 1230 1231 /* 1232 * Return the amount of raid-z-deflated space in the pool. 1233 */ 1234 uint64_t 1235 spa_get_dspace(spa_t *spa) 1236 { 1237 if (spa->spa_deflate) 1238 return (spa->spa_root_vdev->vdev_stat.vs_dspace); 1239 else 1240 return (spa->spa_root_vdev->vdev_stat.vs_space); 1241 } 1242 1243 /* ARGSUSED */ 1244 uint64_t 1245 spa_get_asize(spa_t *spa, uint64_t lsize) 1246 { 1247 /* 1248 * For now, the worst case is 512-byte RAID-Z blocks, in which 1249 * case the space requirement is exactly 2x; so just assume that. 1250 * Add to this the fact that we can have up to 3 DVAs per bp, and 1251 * we have to multiply by a total of 6x. 1252 */ 1253 return (lsize * 6); 1254 } 1255 1256 /* 1257 * Return the failure mode that has been set to this pool. The default 1258 * behavior will be to block all I/Os when a complete failure occurs. 1259 */ 1260 uint8_t 1261 spa_get_failmode(spa_t *spa) 1262 { 1263 return (spa->spa_failmode); 1264 } 1265 1266 boolean_t 1267 spa_suspended(spa_t *spa) 1268 { 1269 return (spa->spa_suspended); 1270 } 1271 1272 uint64_t 1273 spa_version(spa_t *spa) 1274 { 1275 return (spa->spa_ubsync.ub_version); 1276 } 1277 1278 int 1279 spa_max_replication(spa_t *spa) 1280 { 1281 /* 1282 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1283 * handle BPs with more than one DVA allocated. Set our max 1284 * replication level accordingly. 1285 */ 1286 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1287 return (1); 1288 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1289 } 1290 1291 uint64_t 1292 bp_get_dasize(spa_t *spa, const blkptr_t *bp) 1293 { 1294 int sz = 0, i; 1295 1296 if (!spa->spa_deflate) 1297 return (BP_GET_ASIZE(bp)); 1298 1299 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1300 for (i = 0; i < SPA_DVAS_PER_BP; i++) { 1301 vdev_t *vd = 1302 vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[i])); 1303 if (vd) 1304 sz += (DVA_GET_ASIZE(&bp->blk_dva[i]) >> 1305 SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1306 } 1307 spa_config_exit(spa, SCL_VDEV, FTAG); 1308 return (sz); 1309 } 1310 1311 /* 1312 * ========================================================================== 1313 * Initialization and Termination 1314 * ========================================================================== 1315 */ 1316 1317 static int 1318 spa_name_compare(const void *a1, const void *a2) 1319 { 1320 const spa_t *s1 = a1; 1321 const spa_t *s2 = a2; 1322 int s; 1323 1324 s = strcmp(s1->spa_name, s2->spa_name); 1325 if (s > 0) 1326 return (1); 1327 if (s < 0) 1328 return (-1); 1329 return (0); 1330 } 1331 1332 int 1333 spa_busy(void) 1334 { 1335 return (spa_active_count); 1336 } 1337 1338 void 1339 spa_boot_init() 1340 { 1341 spa_config_load(); 1342 } 1343 1344 void 1345 spa_init(int mode) 1346 { 1347 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1348 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1349 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1350 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1351 1352 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1353 offsetof(spa_t, spa_avl)); 1354 1355 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1356 offsetof(spa_aux_t, aux_avl)); 1357 1358 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1359 offsetof(spa_aux_t, aux_avl)); 1360 1361 spa_mode_global = mode; 1362 1363 refcount_init(); 1364 unique_init(); 1365 zio_init(); 1366 dmu_init(); 1367 zil_init(); 1368 vdev_cache_stat_init(); 1369 zfs_prop_init(); 1370 zpool_prop_init(); 1371 spa_config_load(); 1372 l2arc_start(); 1373 } 1374 1375 void 1376 spa_fini(void) 1377 { 1378 l2arc_stop(); 1379 1380 spa_evict_all(); 1381 1382 vdev_cache_stat_fini(); 1383 zil_fini(); 1384 dmu_fini(); 1385 zio_fini(); 1386 unique_fini(); 1387 refcount_fini(); 1388 1389 avl_destroy(&spa_namespace_avl); 1390 avl_destroy(&spa_spare_avl); 1391 avl_destroy(&spa_l2cache_avl); 1392 1393 cv_destroy(&spa_namespace_cv); 1394 mutex_destroy(&spa_namespace_lock); 1395 mutex_destroy(&spa_spare_lock); 1396 mutex_destroy(&spa_l2cache_lock); 1397 } 1398 1399 /* 1400 * Return whether this pool has slogs. No locking needed. 1401 * It's not a problem if the wrong answer is returned as it's only for 1402 * performance and not correctness 1403 */ 1404 boolean_t 1405 spa_has_slogs(spa_t *spa) 1406 { 1407 return (spa->spa_log_class->mc_rotor != NULL); 1408 } 1409 1410 /* 1411 * Return whether this pool is the root pool. 1412 */ 1413 boolean_t 1414 spa_is_root(spa_t *spa) 1415 { 1416 return (spa->spa_is_root); 1417 } 1418 1419 boolean_t 1420 spa_writeable(spa_t *spa) 1421 { 1422 return (!!(spa->spa_mode & FWRITE)); 1423 } 1424 1425 int 1426 spa_mode(spa_t *spa) 1427 { 1428 return (spa->spa_mode); 1429 } 1430