1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2013 by Delphix. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 */ 26 27 #include <sys/zfs_context.h> 28 #include <sys/spa_impl.h> 29 #include <sys/spa_boot.h> 30 #include <sys/zio.h> 31 #include <sys/zio_checksum.h> 32 #include <sys/zio_compress.h> 33 #include <sys/dmu.h> 34 #include <sys/dmu_tx.h> 35 #include <sys/zap.h> 36 #include <sys/zil.h> 37 #include <sys/vdev_impl.h> 38 #include <sys/metaslab.h> 39 #include <sys/uberblock_impl.h> 40 #include <sys/txg.h> 41 #include <sys/avl.h> 42 #include <sys/unique.h> 43 #include <sys/dsl_pool.h> 44 #include <sys/dsl_dir.h> 45 #include <sys/dsl_prop.h> 46 #include <sys/dsl_scan.h> 47 #include <sys/fs/zfs.h> 48 #include <sys/metaslab_impl.h> 49 #include <sys/arc.h> 50 #include <sys/ddt.h> 51 #include "zfs_prop.h" 52 #include "zfeature_common.h" 53 54 /* 55 * SPA locking 56 * 57 * There are four basic locks for managing spa_t structures: 58 * 59 * spa_namespace_lock (global mutex) 60 * 61 * This lock must be acquired to do any of the following: 62 * 63 * - Lookup a spa_t by name 64 * - Add or remove a spa_t from the namespace 65 * - Increase spa_refcount from non-zero 66 * - Check if spa_refcount is zero 67 * - Rename a spa_t 68 * - add/remove/attach/detach devices 69 * - Held for the duration of create/destroy/import/export 70 * 71 * It does not need to handle recursion. A create or destroy may 72 * reference objects (files or zvols) in other pools, but by 73 * definition they must have an existing reference, and will never need 74 * to lookup a spa_t by name. 75 * 76 * spa_refcount (per-spa refcount_t protected by mutex) 77 * 78 * This reference count keep track of any active users of the spa_t. The 79 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 80 * the refcount is never really 'zero' - opening a pool implicitly keeps 81 * some references in the DMU. Internally we check against spa_minref, but 82 * present the image of a zero/non-zero value to consumers. 83 * 84 * spa_config_lock[] (per-spa array of rwlocks) 85 * 86 * This protects the spa_t from config changes, and must be held in 87 * the following circumstances: 88 * 89 * - RW_READER to perform I/O to the spa 90 * - RW_WRITER to change the vdev config 91 * 92 * The locking order is fairly straightforward: 93 * 94 * spa_namespace_lock -> spa_refcount 95 * 96 * The namespace lock must be acquired to increase the refcount from 0 97 * or to check if it is zero. 98 * 99 * spa_refcount -> spa_config_lock[] 100 * 101 * There must be at least one valid reference on the spa_t to acquire 102 * the config lock. 103 * 104 * spa_namespace_lock -> spa_config_lock[] 105 * 106 * The namespace lock must always be taken before the config lock. 107 * 108 * 109 * The spa_namespace_lock can be acquired directly and is globally visible. 110 * 111 * The namespace is manipulated using the following functions, all of which 112 * require the spa_namespace_lock to be held. 113 * 114 * spa_lookup() Lookup a spa_t by name. 115 * 116 * spa_add() Create a new spa_t in the namespace. 117 * 118 * spa_remove() Remove a spa_t from the namespace. This also 119 * frees up any memory associated with the spa_t. 120 * 121 * spa_next() Returns the next spa_t in the system, or the 122 * first if NULL is passed. 123 * 124 * spa_evict_all() Shutdown and remove all spa_t structures in 125 * the system. 126 * 127 * spa_guid_exists() Determine whether a pool/device guid exists. 128 * 129 * The spa_refcount is manipulated using the following functions: 130 * 131 * spa_open_ref() Adds a reference to the given spa_t. Must be 132 * called with spa_namespace_lock held if the 133 * refcount is currently zero. 134 * 135 * spa_close() Remove a reference from the spa_t. This will 136 * not free the spa_t or remove it from the 137 * namespace. No locking is required. 138 * 139 * spa_refcount_zero() Returns true if the refcount is currently 140 * zero. Must be called with spa_namespace_lock 141 * held. 142 * 143 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 144 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 145 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 146 * 147 * To read the configuration, it suffices to hold one of these locks as reader. 148 * To modify the configuration, you must hold all locks as writer. To modify 149 * vdev state without altering the vdev tree's topology (e.g. online/offline), 150 * you must hold SCL_STATE and SCL_ZIO as writer. 151 * 152 * We use these distinct config locks to avoid recursive lock entry. 153 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 154 * block allocations (SCL_ALLOC), which may require reading space maps 155 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 156 * 157 * The spa config locks cannot be normal rwlocks because we need the 158 * ability to hand off ownership. For example, SCL_ZIO is acquired 159 * by the issuing thread and later released by an interrupt thread. 160 * They do, however, obey the usual write-wanted semantics to prevent 161 * writer (i.e. system administrator) starvation. 162 * 163 * The lock acquisition rules are as follows: 164 * 165 * SCL_CONFIG 166 * Protects changes to the vdev tree topology, such as vdev 167 * add/remove/attach/detach. Protects the dirty config list 168 * (spa_config_dirty_list) and the set of spares and l2arc devices. 169 * 170 * SCL_STATE 171 * Protects changes to pool state and vdev state, such as vdev 172 * online/offline/fault/degrade/clear. Protects the dirty state list 173 * (spa_state_dirty_list) and global pool state (spa_state). 174 * 175 * SCL_ALLOC 176 * Protects changes to metaslab groups and classes. 177 * Held as reader by metaslab_alloc() and metaslab_claim(). 178 * 179 * SCL_ZIO 180 * Held by bp-level zios (those which have no io_vd upon entry) 181 * to prevent changes to the vdev tree. The bp-level zio implicitly 182 * protects all of its vdev child zios, which do not hold SCL_ZIO. 183 * 184 * SCL_FREE 185 * Protects changes to metaslab groups and classes. 186 * Held as reader by metaslab_free(). SCL_FREE is distinct from 187 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 188 * blocks in zio_done() while another i/o that holds either 189 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 190 * 191 * SCL_VDEV 192 * Held as reader to prevent changes to the vdev tree during trivial 193 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 194 * other locks, and lower than all of them, to ensure that it's safe 195 * to acquire regardless of caller context. 196 * 197 * In addition, the following rules apply: 198 * 199 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 200 * The lock ordering is SCL_CONFIG > spa_props_lock. 201 * 202 * (b) I/O operations on leaf vdevs. For any zio operation that takes 203 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 204 * or zio_write_phys() -- the caller must ensure that the config cannot 205 * cannot change in the interim, and that the vdev cannot be reopened. 206 * SCL_STATE as reader suffices for both. 207 * 208 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 209 * 210 * spa_vdev_enter() Acquire the namespace lock and the config lock 211 * for writing. 212 * 213 * spa_vdev_exit() Release the config lock, wait for all I/O 214 * to complete, sync the updated configs to the 215 * cache, and release the namespace lock. 216 * 217 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 218 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 219 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 220 * 221 * spa_rename() is also implemented within this file since it requires 222 * manipulation of the namespace. 223 */ 224 225 static avl_tree_t spa_namespace_avl; 226 kmutex_t spa_namespace_lock; 227 static kcondvar_t spa_namespace_cv; 228 static int spa_active_count; 229 int spa_max_replication_override = SPA_DVAS_PER_BP; 230 231 static kmutex_t spa_spare_lock; 232 static avl_tree_t spa_spare_avl; 233 static kmutex_t spa_l2cache_lock; 234 static avl_tree_t spa_l2cache_avl; 235 236 kmem_cache_t *spa_buffer_pool; 237 int spa_mode_global; 238 239 #ifdef ZFS_DEBUG 240 /* Everything except dprintf and spa is on by default in debug builds */ 241 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA); 242 #else 243 int zfs_flags = 0; 244 #endif 245 246 /* 247 * zfs_recover can be set to nonzero to attempt to recover from 248 * otherwise-fatal errors, typically caused by on-disk corruption. When 249 * set, calls to zfs_panic_recover() will turn into warning messages. 250 * This should only be used as a last resort, as it typically results 251 * in leaked space, or worse. 252 */ 253 int zfs_recover = 0; 254 255 /* 256 * Expiration time in milliseconds. This value has two meanings. First it is 257 * used to determine when the spa_deadman() logic should fire. By default the 258 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 259 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 260 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 261 * in a system panic. 262 */ 263 uint64_t zfs_deadman_synctime_ms = 1000000ULL; 264 265 /* 266 * Check time in milliseconds. This defines the frequency at which we check 267 * for hung I/O. 268 */ 269 uint64_t zfs_deadman_checktime_ms = 5000ULL; 270 271 /* 272 * Override the zfs deadman behavior via /etc/system. By default the 273 * deadman is enabled except on VMware and sparc deployments. 274 */ 275 int zfs_deadman_enabled = -1; 276 277 /* 278 * The worst case is single-sector max-parity RAID-Z blocks, in which 279 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 280 * times the size; so just assume that. Add to this the fact that 281 * we can have up to 3 DVAs per bp, and one more factor of 2 because 282 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 283 * the worst case is: 284 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 285 */ 286 int spa_asize_inflation = 24; 287 288 /* 289 * ========================================================================== 290 * SPA config locking 291 * ========================================================================== 292 */ 293 static void 294 spa_config_lock_init(spa_t *spa) 295 { 296 for (int i = 0; i < SCL_LOCKS; i++) { 297 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 298 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 299 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 300 refcount_create_untracked(&scl->scl_count); 301 scl->scl_writer = NULL; 302 scl->scl_write_wanted = 0; 303 } 304 } 305 306 static void 307 spa_config_lock_destroy(spa_t *spa) 308 { 309 for (int i = 0; i < SCL_LOCKS; i++) { 310 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 311 mutex_destroy(&scl->scl_lock); 312 cv_destroy(&scl->scl_cv); 313 refcount_destroy(&scl->scl_count); 314 ASSERT(scl->scl_writer == NULL); 315 ASSERT(scl->scl_write_wanted == 0); 316 } 317 } 318 319 int 320 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 321 { 322 for (int i = 0; i < SCL_LOCKS; i++) { 323 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 324 if (!(locks & (1 << i))) 325 continue; 326 mutex_enter(&scl->scl_lock); 327 if (rw == RW_READER) { 328 if (scl->scl_writer || scl->scl_write_wanted) { 329 mutex_exit(&scl->scl_lock); 330 spa_config_exit(spa, locks ^ (1 << i), tag); 331 return (0); 332 } 333 } else { 334 ASSERT(scl->scl_writer != curthread); 335 if (!refcount_is_zero(&scl->scl_count)) { 336 mutex_exit(&scl->scl_lock); 337 spa_config_exit(spa, locks ^ (1 << i), tag); 338 return (0); 339 } 340 scl->scl_writer = curthread; 341 } 342 (void) refcount_add(&scl->scl_count, tag); 343 mutex_exit(&scl->scl_lock); 344 } 345 return (1); 346 } 347 348 void 349 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 350 { 351 int wlocks_held = 0; 352 353 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 354 355 for (int i = 0; i < SCL_LOCKS; i++) { 356 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 357 if (scl->scl_writer == curthread) 358 wlocks_held |= (1 << i); 359 if (!(locks & (1 << i))) 360 continue; 361 mutex_enter(&scl->scl_lock); 362 if (rw == RW_READER) { 363 while (scl->scl_writer || scl->scl_write_wanted) { 364 cv_wait(&scl->scl_cv, &scl->scl_lock); 365 } 366 } else { 367 ASSERT(scl->scl_writer != curthread); 368 while (!refcount_is_zero(&scl->scl_count)) { 369 scl->scl_write_wanted++; 370 cv_wait(&scl->scl_cv, &scl->scl_lock); 371 scl->scl_write_wanted--; 372 } 373 scl->scl_writer = curthread; 374 } 375 (void) refcount_add(&scl->scl_count, tag); 376 mutex_exit(&scl->scl_lock); 377 } 378 ASSERT(wlocks_held <= locks); 379 } 380 381 void 382 spa_config_exit(spa_t *spa, int locks, void *tag) 383 { 384 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 385 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 386 if (!(locks & (1 << i))) 387 continue; 388 mutex_enter(&scl->scl_lock); 389 ASSERT(!refcount_is_zero(&scl->scl_count)); 390 if (refcount_remove(&scl->scl_count, tag) == 0) { 391 ASSERT(scl->scl_writer == NULL || 392 scl->scl_writer == curthread); 393 scl->scl_writer = NULL; /* OK in either case */ 394 cv_broadcast(&scl->scl_cv); 395 } 396 mutex_exit(&scl->scl_lock); 397 } 398 } 399 400 int 401 spa_config_held(spa_t *spa, int locks, krw_t rw) 402 { 403 int locks_held = 0; 404 405 for (int i = 0; i < SCL_LOCKS; i++) { 406 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 407 if (!(locks & (1 << i))) 408 continue; 409 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 410 (rw == RW_WRITER && scl->scl_writer == curthread)) 411 locks_held |= 1 << i; 412 } 413 414 return (locks_held); 415 } 416 417 /* 418 * ========================================================================== 419 * SPA namespace functions 420 * ========================================================================== 421 */ 422 423 /* 424 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 425 * Returns NULL if no matching spa_t is found. 426 */ 427 spa_t * 428 spa_lookup(const char *name) 429 { 430 static spa_t search; /* spa_t is large; don't allocate on stack */ 431 spa_t *spa; 432 avl_index_t where; 433 char *cp; 434 435 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 436 437 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 438 439 /* 440 * If it's a full dataset name, figure out the pool name and 441 * just use that. 442 */ 443 cp = strpbrk(search.spa_name, "/@#"); 444 if (cp != NULL) 445 *cp = '\0'; 446 447 spa = avl_find(&spa_namespace_avl, &search, &where); 448 449 return (spa); 450 } 451 452 /* 453 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 454 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 455 * looking for potentially hung I/Os. 456 */ 457 void 458 spa_deadman(void *arg) 459 { 460 spa_t *spa = arg; 461 462 /* 463 * Disable the deadman timer if the pool is suspended. 464 */ 465 if (spa_suspended(spa)) { 466 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 467 return; 468 } 469 470 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 471 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 472 ++spa->spa_deadman_calls); 473 if (zfs_deadman_enabled) 474 vdev_deadman(spa->spa_root_vdev); 475 } 476 477 /* 478 * Create an uninitialized spa_t with the given name. Requires 479 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 480 * exist by calling spa_lookup() first. 481 */ 482 spa_t * 483 spa_add(const char *name, nvlist_t *config, const char *altroot) 484 { 485 spa_t *spa; 486 spa_config_dirent_t *dp; 487 cyc_handler_t hdlr; 488 cyc_time_t when; 489 490 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 491 492 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 493 494 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 495 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 496 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 497 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 498 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 499 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 500 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 501 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 502 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 503 mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL); 504 505 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 506 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 507 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 508 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 509 510 for (int t = 0; t < TXG_SIZE; t++) 511 bplist_create(&spa->spa_free_bplist[t]); 512 513 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 514 spa->spa_state = POOL_STATE_UNINITIALIZED; 515 spa->spa_freeze_txg = UINT64_MAX; 516 spa->spa_final_txg = UINT64_MAX; 517 spa->spa_load_max_txg = UINT64_MAX; 518 spa->spa_proc = &p0; 519 spa->spa_proc_state = SPA_PROC_NONE; 520 521 hdlr.cyh_func = spa_deadman; 522 hdlr.cyh_arg = spa; 523 hdlr.cyh_level = CY_LOW_LEVEL; 524 525 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 526 527 /* 528 * This determines how often we need to check for hung I/Os after 529 * the cyclic has already fired. Since checking for hung I/Os is 530 * an expensive operation we don't want to check too frequently. 531 * Instead wait for 5 seconds before checking again. 532 */ 533 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 534 when.cyt_when = CY_INFINITY; 535 mutex_enter(&cpu_lock); 536 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 537 mutex_exit(&cpu_lock); 538 539 refcount_create(&spa->spa_refcount); 540 spa_config_lock_init(spa); 541 542 avl_add(&spa_namespace_avl, spa); 543 544 /* 545 * Set the alternate root, if there is one. 546 */ 547 if (altroot) { 548 spa->spa_root = spa_strdup(altroot); 549 spa_active_count++; 550 } 551 552 /* 553 * Every pool starts with the default cachefile 554 */ 555 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 556 offsetof(spa_config_dirent_t, scd_link)); 557 558 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 559 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 560 list_insert_head(&spa->spa_config_list, dp); 561 562 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 563 KM_SLEEP) == 0); 564 565 if (config != NULL) { 566 nvlist_t *features; 567 568 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 569 &features) == 0) { 570 VERIFY(nvlist_dup(features, &spa->spa_label_features, 571 0) == 0); 572 } 573 574 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 575 } 576 577 if (spa->spa_label_features == NULL) { 578 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 579 KM_SLEEP) == 0); 580 } 581 582 spa->spa_iokstat = kstat_create("zfs", 0, name, 583 "disk", KSTAT_TYPE_IO, 1, 0); 584 if (spa->spa_iokstat) { 585 spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock; 586 kstat_install(spa->spa_iokstat); 587 } 588 589 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0); 590 591 /* 592 * As a pool is being created, treat all features as disabled by 593 * setting SPA_FEATURE_DISABLED for all entries in the feature 594 * refcount cache. 595 */ 596 for (int i = 0; i < SPA_FEATURES; i++) { 597 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 598 } 599 600 return (spa); 601 } 602 603 /* 604 * Removes a spa_t from the namespace, freeing up any memory used. Requires 605 * spa_namespace_lock. This is called only after the spa_t has been closed and 606 * deactivated. 607 */ 608 void 609 spa_remove(spa_t *spa) 610 { 611 spa_config_dirent_t *dp; 612 613 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 614 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 615 616 nvlist_free(spa->spa_config_splitting); 617 618 avl_remove(&spa_namespace_avl, spa); 619 cv_broadcast(&spa_namespace_cv); 620 621 if (spa->spa_root) { 622 spa_strfree(spa->spa_root); 623 spa_active_count--; 624 } 625 626 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 627 list_remove(&spa->spa_config_list, dp); 628 if (dp->scd_path != NULL) 629 spa_strfree(dp->scd_path); 630 kmem_free(dp, sizeof (spa_config_dirent_t)); 631 } 632 633 list_destroy(&spa->spa_config_list); 634 635 nvlist_free(spa->spa_label_features); 636 nvlist_free(spa->spa_load_info); 637 spa_config_set(spa, NULL); 638 639 mutex_enter(&cpu_lock); 640 if (spa->spa_deadman_cycid != CYCLIC_NONE) 641 cyclic_remove(spa->spa_deadman_cycid); 642 mutex_exit(&cpu_lock); 643 spa->spa_deadman_cycid = CYCLIC_NONE; 644 645 refcount_destroy(&spa->spa_refcount); 646 647 spa_config_lock_destroy(spa); 648 649 kstat_delete(spa->spa_iokstat); 650 spa->spa_iokstat = NULL; 651 652 for (int t = 0; t < TXG_SIZE; t++) 653 bplist_destroy(&spa->spa_free_bplist[t]); 654 655 cv_destroy(&spa->spa_async_cv); 656 cv_destroy(&spa->spa_proc_cv); 657 cv_destroy(&spa->spa_scrub_io_cv); 658 cv_destroy(&spa->spa_suspend_cv); 659 660 mutex_destroy(&spa->spa_async_lock); 661 mutex_destroy(&spa->spa_errlist_lock); 662 mutex_destroy(&spa->spa_errlog_lock); 663 mutex_destroy(&spa->spa_history_lock); 664 mutex_destroy(&spa->spa_proc_lock); 665 mutex_destroy(&spa->spa_props_lock); 666 mutex_destroy(&spa->spa_scrub_lock); 667 mutex_destroy(&spa->spa_suspend_lock); 668 mutex_destroy(&spa->spa_vdev_top_lock); 669 mutex_destroy(&spa->spa_iokstat_lock); 670 671 kmem_free(spa, sizeof (spa_t)); 672 } 673 674 /* 675 * Given a pool, return the next pool in the namespace, or NULL if there is 676 * none. If 'prev' is NULL, return the first pool. 677 */ 678 spa_t * 679 spa_next(spa_t *prev) 680 { 681 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 682 683 if (prev) 684 return (AVL_NEXT(&spa_namespace_avl, prev)); 685 else 686 return (avl_first(&spa_namespace_avl)); 687 } 688 689 /* 690 * ========================================================================== 691 * SPA refcount functions 692 * ========================================================================== 693 */ 694 695 /* 696 * Add a reference to the given spa_t. Must have at least one reference, or 697 * have the namespace lock held. 698 */ 699 void 700 spa_open_ref(spa_t *spa, void *tag) 701 { 702 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 703 MUTEX_HELD(&spa_namespace_lock)); 704 (void) refcount_add(&spa->spa_refcount, tag); 705 } 706 707 /* 708 * Remove a reference to the given spa_t. Must have at least one reference, or 709 * have the namespace lock held. 710 */ 711 void 712 spa_close(spa_t *spa, void *tag) 713 { 714 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 715 MUTEX_HELD(&spa_namespace_lock)); 716 (void) refcount_remove(&spa->spa_refcount, tag); 717 } 718 719 /* 720 * Check to see if the spa refcount is zero. Must be called with 721 * spa_namespace_lock held. We really compare against spa_minref, which is the 722 * number of references acquired when opening a pool 723 */ 724 boolean_t 725 spa_refcount_zero(spa_t *spa) 726 { 727 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 728 729 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 730 } 731 732 /* 733 * ========================================================================== 734 * SPA spare and l2cache tracking 735 * ========================================================================== 736 */ 737 738 /* 739 * Hot spares and cache devices are tracked using the same code below, 740 * for 'auxiliary' devices. 741 */ 742 743 typedef struct spa_aux { 744 uint64_t aux_guid; 745 uint64_t aux_pool; 746 avl_node_t aux_avl; 747 int aux_count; 748 } spa_aux_t; 749 750 static int 751 spa_aux_compare(const void *a, const void *b) 752 { 753 const spa_aux_t *sa = a; 754 const spa_aux_t *sb = b; 755 756 if (sa->aux_guid < sb->aux_guid) 757 return (-1); 758 else if (sa->aux_guid > sb->aux_guid) 759 return (1); 760 else 761 return (0); 762 } 763 764 void 765 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 766 { 767 avl_index_t where; 768 spa_aux_t search; 769 spa_aux_t *aux; 770 771 search.aux_guid = vd->vdev_guid; 772 if ((aux = avl_find(avl, &search, &where)) != NULL) { 773 aux->aux_count++; 774 } else { 775 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 776 aux->aux_guid = vd->vdev_guid; 777 aux->aux_count = 1; 778 avl_insert(avl, aux, where); 779 } 780 } 781 782 void 783 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 784 { 785 spa_aux_t search; 786 spa_aux_t *aux; 787 avl_index_t where; 788 789 search.aux_guid = vd->vdev_guid; 790 aux = avl_find(avl, &search, &where); 791 792 ASSERT(aux != NULL); 793 794 if (--aux->aux_count == 0) { 795 avl_remove(avl, aux); 796 kmem_free(aux, sizeof (spa_aux_t)); 797 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 798 aux->aux_pool = 0ULL; 799 } 800 } 801 802 boolean_t 803 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 804 { 805 spa_aux_t search, *found; 806 807 search.aux_guid = guid; 808 found = avl_find(avl, &search, NULL); 809 810 if (pool) { 811 if (found) 812 *pool = found->aux_pool; 813 else 814 *pool = 0ULL; 815 } 816 817 if (refcnt) { 818 if (found) 819 *refcnt = found->aux_count; 820 else 821 *refcnt = 0; 822 } 823 824 return (found != NULL); 825 } 826 827 void 828 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 829 { 830 spa_aux_t search, *found; 831 avl_index_t where; 832 833 search.aux_guid = vd->vdev_guid; 834 found = avl_find(avl, &search, &where); 835 ASSERT(found != NULL); 836 ASSERT(found->aux_pool == 0ULL); 837 838 found->aux_pool = spa_guid(vd->vdev_spa); 839 } 840 841 /* 842 * Spares are tracked globally due to the following constraints: 843 * 844 * - A spare may be part of multiple pools. 845 * - A spare may be added to a pool even if it's actively in use within 846 * another pool. 847 * - A spare in use in any pool can only be the source of a replacement if 848 * the target is a spare in the same pool. 849 * 850 * We keep track of all spares on the system through the use of a reference 851 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 852 * spare, then we bump the reference count in the AVL tree. In addition, we set 853 * the 'vdev_isspare' member to indicate that the device is a spare (active or 854 * inactive). When a spare is made active (used to replace a device in the 855 * pool), we also keep track of which pool its been made a part of. 856 * 857 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 858 * called under the spa_namespace lock as part of vdev reconfiguration. The 859 * separate spare lock exists for the status query path, which does not need to 860 * be completely consistent with respect to other vdev configuration changes. 861 */ 862 863 static int 864 spa_spare_compare(const void *a, const void *b) 865 { 866 return (spa_aux_compare(a, b)); 867 } 868 869 void 870 spa_spare_add(vdev_t *vd) 871 { 872 mutex_enter(&spa_spare_lock); 873 ASSERT(!vd->vdev_isspare); 874 spa_aux_add(vd, &spa_spare_avl); 875 vd->vdev_isspare = B_TRUE; 876 mutex_exit(&spa_spare_lock); 877 } 878 879 void 880 spa_spare_remove(vdev_t *vd) 881 { 882 mutex_enter(&spa_spare_lock); 883 ASSERT(vd->vdev_isspare); 884 spa_aux_remove(vd, &spa_spare_avl); 885 vd->vdev_isspare = B_FALSE; 886 mutex_exit(&spa_spare_lock); 887 } 888 889 boolean_t 890 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 891 { 892 boolean_t found; 893 894 mutex_enter(&spa_spare_lock); 895 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 896 mutex_exit(&spa_spare_lock); 897 898 return (found); 899 } 900 901 void 902 spa_spare_activate(vdev_t *vd) 903 { 904 mutex_enter(&spa_spare_lock); 905 ASSERT(vd->vdev_isspare); 906 spa_aux_activate(vd, &spa_spare_avl); 907 mutex_exit(&spa_spare_lock); 908 } 909 910 /* 911 * Level 2 ARC devices are tracked globally for the same reasons as spares. 912 * Cache devices currently only support one pool per cache device, and so 913 * for these devices the aux reference count is currently unused beyond 1. 914 */ 915 916 static int 917 spa_l2cache_compare(const void *a, const void *b) 918 { 919 return (spa_aux_compare(a, b)); 920 } 921 922 void 923 spa_l2cache_add(vdev_t *vd) 924 { 925 mutex_enter(&spa_l2cache_lock); 926 ASSERT(!vd->vdev_isl2cache); 927 spa_aux_add(vd, &spa_l2cache_avl); 928 vd->vdev_isl2cache = B_TRUE; 929 mutex_exit(&spa_l2cache_lock); 930 } 931 932 void 933 spa_l2cache_remove(vdev_t *vd) 934 { 935 mutex_enter(&spa_l2cache_lock); 936 ASSERT(vd->vdev_isl2cache); 937 spa_aux_remove(vd, &spa_l2cache_avl); 938 vd->vdev_isl2cache = B_FALSE; 939 mutex_exit(&spa_l2cache_lock); 940 } 941 942 boolean_t 943 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 944 { 945 boolean_t found; 946 947 mutex_enter(&spa_l2cache_lock); 948 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 949 mutex_exit(&spa_l2cache_lock); 950 951 return (found); 952 } 953 954 void 955 spa_l2cache_activate(vdev_t *vd) 956 { 957 mutex_enter(&spa_l2cache_lock); 958 ASSERT(vd->vdev_isl2cache); 959 spa_aux_activate(vd, &spa_l2cache_avl); 960 mutex_exit(&spa_l2cache_lock); 961 } 962 963 /* 964 * ========================================================================== 965 * SPA vdev locking 966 * ========================================================================== 967 */ 968 969 /* 970 * Lock the given spa_t for the purpose of adding or removing a vdev. 971 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 972 * It returns the next transaction group for the spa_t. 973 */ 974 uint64_t 975 spa_vdev_enter(spa_t *spa) 976 { 977 mutex_enter(&spa->spa_vdev_top_lock); 978 mutex_enter(&spa_namespace_lock); 979 return (spa_vdev_config_enter(spa)); 980 } 981 982 /* 983 * Internal implementation for spa_vdev_enter(). Used when a vdev 984 * operation requires multiple syncs (i.e. removing a device) while 985 * keeping the spa_namespace_lock held. 986 */ 987 uint64_t 988 spa_vdev_config_enter(spa_t *spa) 989 { 990 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 991 992 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 993 994 return (spa_last_synced_txg(spa) + 1); 995 } 996 997 /* 998 * Used in combination with spa_vdev_config_enter() to allow the syncing 999 * of multiple transactions without releasing the spa_namespace_lock. 1000 */ 1001 void 1002 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1003 { 1004 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1005 1006 int config_changed = B_FALSE; 1007 1008 ASSERT(txg > spa_last_synced_txg(spa)); 1009 1010 spa->spa_pending_vdev = NULL; 1011 1012 /* 1013 * Reassess the DTLs. 1014 */ 1015 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1016 1017 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1018 config_changed = B_TRUE; 1019 spa->spa_config_generation++; 1020 } 1021 1022 /* 1023 * Verify the metaslab classes. 1024 */ 1025 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1026 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1027 1028 spa_config_exit(spa, SCL_ALL, spa); 1029 1030 /* 1031 * Panic the system if the specified tag requires it. This 1032 * is useful for ensuring that configurations are updated 1033 * transactionally. 1034 */ 1035 if (zio_injection_enabled) 1036 zio_handle_panic_injection(spa, tag, 0); 1037 1038 /* 1039 * Note: this txg_wait_synced() is important because it ensures 1040 * that there won't be more than one config change per txg. 1041 * This allows us to use the txg as the generation number. 1042 */ 1043 if (error == 0) 1044 txg_wait_synced(spa->spa_dsl_pool, txg); 1045 1046 if (vd != NULL) { 1047 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1048 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1049 vdev_free(vd); 1050 spa_config_exit(spa, SCL_ALL, spa); 1051 } 1052 1053 /* 1054 * If the config changed, update the config cache. 1055 */ 1056 if (config_changed) 1057 spa_config_sync(spa, B_FALSE, B_TRUE); 1058 } 1059 1060 /* 1061 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1062 * locking of spa_vdev_enter(), we also want make sure the transactions have 1063 * synced to disk, and then update the global configuration cache with the new 1064 * information. 1065 */ 1066 int 1067 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1068 { 1069 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1070 mutex_exit(&spa_namespace_lock); 1071 mutex_exit(&spa->spa_vdev_top_lock); 1072 1073 return (error); 1074 } 1075 1076 /* 1077 * Lock the given spa_t for the purpose of changing vdev state. 1078 */ 1079 void 1080 spa_vdev_state_enter(spa_t *spa, int oplocks) 1081 { 1082 int locks = SCL_STATE_ALL | oplocks; 1083 1084 /* 1085 * Root pools may need to read of the underlying devfs filesystem 1086 * when opening up a vdev. Unfortunately if we're holding the 1087 * SCL_ZIO lock it will result in a deadlock when we try to issue 1088 * the read from the root filesystem. Instead we "prefetch" 1089 * the associated vnodes that we need prior to opening the 1090 * underlying devices and cache them so that we can prevent 1091 * any I/O when we are doing the actual open. 1092 */ 1093 if (spa_is_root(spa)) { 1094 int low = locks & ~(SCL_ZIO - 1); 1095 int high = locks & ~low; 1096 1097 spa_config_enter(spa, high, spa, RW_WRITER); 1098 vdev_hold(spa->spa_root_vdev); 1099 spa_config_enter(spa, low, spa, RW_WRITER); 1100 } else { 1101 spa_config_enter(spa, locks, spa, RW_WRITER); 1102 } 1103 spa->spa_vdev_locks = locks; 1104 } 1105 1106 int 1107 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1108 { 1109 boolean_t config_changed = B_FALSE; 1110 1111 if (vd != NULL || error == 0) 1112 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1113 0, 0, B_FALSE); 1114 1115 if (vd != NULL) { 1116 vdev_state_dirty(vd->vdev_top); 1117 config_changed = B_TRUE; 1118 spa->spa_config_generation++; 1119 } 1120 1121 if (spa_is_root(spa)) 1122 vdev_rele(spa->spa_root_vdev); 1123 1124 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1125 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1126 1127 /* 1128 * If anything changed, wait for it to sync. This ensures that, 1129 * from the system administrator's perspective, zpool(1M) commands 1130 * are synchronous. This is important for things like zpool offline: 1131 * when the command completes, you expect no further I/O from ZFS. 1132 */ 1133 if (vd != NULL) 1134 txg_wait_synced(spa->spa_dsl_pool, 0); 1135 1136 /* 1137 * If the config changed, update the config cache. 1138 */ 1139 if (config_changed) { 1140 mutex_enter(&spa_namespace_lock); 1141 spa_config_sync(spa, B_FALSE, B_TRUE); 1142 mutex_exit(&spa_namespace_lock); 1143 } 1144 1145 return (error); 1146 } 1147 1148 /* 1149 * ========================================================================== 1150 * Miscellaneous functions 1151 * ========================================================================== 1152 */ 1153 1154 void 1155 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1156 { 1157 if (!nvlist_exists(spa->spa_label_features, feature)) { 1158 fnvlist_add_boolean(spa->spa_label_features, feature); 1159 /* 1160 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1161 * dirty the vdev config because lock SCL_CONFIG is not held. 1162 * Thankfully, in this case we don't need to dirty the config 1163 * because it will be written out anyway when we finish 1164 * creating the pool. 1165 */ 1166 if (tx->tx_txg != TXG_INITIAL) 1167 vdev_config_dirty(spa->spa_root_vdev); 1168 } 1169 } 1170 1171 void 1172 spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1173 { 1174 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1175 vdev_config_dirty(spa->spa_root_vdev); 1176 } 1177 1178 /* 1179 * Rename a spa_t. 1180 */ 1181 int 1182 spa_rename(const char *name, const char *newname) 1183 { 1184 spa_t *spa; 1185 int err; 1186 1187 /* 1188 * Lookup the spa_t and grab the config lock for writing. We need to 1189 * actually open the pool so that we can sync out the necessary labels. 1190 * It's OK to call spa_open() with the namespace lock held because we 1191 * allow recursive calls for other reasons. 1192 */ 1193 mutex_enter(&spa_namespace_lock); 1194 if ((err = spa_open(name, &spa, FTAG)) != 0) { 1195 mutex_exit(&spa_namespace_lock); 1196 return (err); 1197 } 1198 1199 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1200 1201 avl_remove(&spa_namespace_avl, spa); 1202 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 1203 avl_add(&spa_namespace_avl, spa); 1204 1205 /* 1206 * Sync all labels to disk with the new names by marking the root vdev 1207 * dirty and waiting for it to sync. It will pick up the new pool name 1208 * during the sync. 1209 */ 1210 vdev_config_dirty(spa->spa_root_vdev); 1211 1212 spa_config_exit(spa, SCL_ALL, FTAG); 1213 1214 txg_wait_synced(spa->spa_dsl_pool, 0); 1215 1216 /* 1217 * Sync the updated config cache. 1218 */ 1219 spa_config_sync(spa, B_FALSE, B_TRUE); 1220 1221 spa_close(spa, FTAG); 1222 1223 mutex_exit(&spa_namespace_lock); 1224 1225 return (0); 1226 } 1227 1228 /* 1229 * Return the spa_t associated with given pool_guid, if it exists. If 1230 * device_guid is non-zero, determine whether the pool exists *and* contains 1231 * a device with the specified device_guid. 1232 */ 1233 spa_t * 1234 spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1235 { 1236 spa_t *spa; 1237 avl_tree_t *t = &spa_namespace_avl; 1238 1239 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1240 1241 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1242 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1243 continue; 1244 if (spa->spa_root_vdev == NULL) 1245 continue; 1246 if (spa_guid(spa) == pool_guid) { 1247 if (device_guid == 0) 1248 break; 1249 1250 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1251 device_guid) != NULL) 1252 break; 1253 1254 /* 1255 * Check any devices we may be in the process of adding. 1256 */ 1257 if (spa->spa_pending_vdev) { 1258 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1259 device_guid) != NULL) 1260 break; 1261 } 1262 } 1263 } 1264 1265 return (spa); 1266 } 1267 1268 /* 1269 * Determine whether a pool with the given pool_guid exists. 1270 */ 1271 boolean_t 1272 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1273 { 1274 return (spa_by_guid(pool_guid, device_guid) != NULL); 1275 } 1276 1277 char * 1278 spa_strdup(const char *s) 1279 { 1280 size_t len; 1281 char *new; 1282 1283 len = strlen(s); 1284 new = kmem_alloc(len + 1, KM_SLEEP); 1285 bcopy(s, new, len); 1286 new[len] = '\0'; 1287 1288 return (new); 1289 } 1290 1291 void 1292 spa_strfree(char *s) 1293 { 1294 kmem_free(s, strlen(s) + 1); 1295 } 1296 1297 uint64_t 1298 spa_get_random(uint64_t range) 1299 { 1300 uint64_t r; 1301 1302 ASSERT(range != 0); 1303 1304 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1305 1306 return (r % range); 1307 } 1308 1309 uint64_t 1310 spa_generate_guid(spa_t *spa) 1311 { 1312 uint64_t guid = spa_get_random(-1ULL); 1313 1314 if (spa != NULL) { 1315 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1316 guid = spa_get_random(-1ULL); 1317 } else { 1318 while (guid == 0 || spa_guid_exists(guid, 0)) 1319 guid = spa_get_random(-1ULL); 1320 } 1321 1322 return (guid); 1323 } 1324 1325 void 1326 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1327 { 1328 char type[256]; 1329 char *checksum = NULL; 1330 char *compress = NULL; 1331 1332 if (bp != NULL) { 1333 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1334 dmu_object_byteswap_t bswap = 1335 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1336 (void) snprintf(type, sizeof (type), "bswap %s %s", 1337 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1338 "metadata" : "data", 1339 dmu_ot_byteswap[bswap].ob_name); 1340 } else { 1341 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1342 sizeof (type)); 1343 } 1344 checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1345 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1346 } 1347 1348 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1349 compress); 1350 } 1351 1352 void 1353 spa_freeze(spa_t *spa) 1354 { 1355 uint64_t freeze_txg = 0; 1356 1357 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1358 if (spa->spa_freeze_txg == UINT64_MAX) { 1359 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1360 spa->spa_freeze_txg = freeze_txg; 1361 } 1362 spa_config_exit(spa, SCL_ALL, FTAG); 1363 if (freeze_txg != 0) 1364 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1365 } 1366 1367 void 1368 zfs_panic_recover(const char *fmt, ...) 1369 { 1370 va_list adx; 1371 1372 va_start(adx, fmt); 1373 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1374 va_end(adx); 1375 } 1376 1377 /* 1378 * This is a stripped-down version of strtoull, suitable only for converting 1379 * lowercase hexadecimal numbers that don't overflow. 1380 */ 1381 uint64_t 1382 strtonum(const char *str, char **nptr) 1383 { 1384 uint64_t val = 0; 1385 char c; 1386 int digit; 1387 1388 while ((c = *str) != '\0') { 1389 if (c >= '0' && c <= '9') 1390 digit = c - '0'; 1391 else if (c >= 'a' && c <= 'f') 1392 digit = 10 + c - 'a'; 1393 else 1394 break; 1395 1396 val *= 16; 1397 val += digit; 1398 1399 str++; 1400 } 1401 1402 if (nptr) 1403 *nptr = (char *)str; 1404 1405 return (val); 1406 } 1407 1408 /* 1409 * ========================================================================== 1410 * Accessor functions 1411 * ========================================================================== 1412 */ 1413 1414 boolean_t 1415 spa_shutting_down(spa_t *spa) 1416 { 1417 return (spa->spa_async_suspended); 1418 } 1419 1420 dsl_pool_t * 1421 spa_get_dsl(spa_t *spa) 1422 { 1423 return (spa->spa_dsl_pool); 1424 } 1425 1426 boolean_t 1427 spa_is_initializing(spa_t *spa) 1428 { 1429 return (spa->spa_is_initializing); 1430 } 1431 1432 blkptr_t * 1433 spa_get_rootblkptr(spa_t *spa) 1434 { 1435 return (&spa->spa_ubsync.ub_rootbp); 1436 } 1437 1438 void 1439 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1440 { 1441 spa->spa_uberblock.ub_rootbp = *bp; 1442 } 1443 1444 void 1445 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1446 { 1447 if (spa->spa_root == NULL) 1448 buf[0] = '\0'; 1449 else 1450 (void) strncpy(buf, spa->spa_root, buflen); 1451 } 1452 1453 int 1454 spa_sync_pass(spa_t *spa) 1455 { 1456 return (spa->spa_sync_pass); 1457 } 1458 1459 char * 1460 spa_name(spa_t *spa) 1461 { 1462 return (spa->spa_name); 1463 } 1464 1465 uint64_t 1466 spa_guid(spa_t *spa) 1467 { 1468 dsl_pool_t *dp = spa_get_dsl(spa); 1469 uint64_t guid; 1470 1471 /* 1472 * If we fail to parse the config during spa_load(), we can go through 1473 * the error path (which posts an ereport) and end up here with no root 1474 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1475 * this case. 1476 */ 1477 if (spa->spa_root_vdev == NULL) 1478 return (spa->spa_config_guid); 1479 1480 guid = spa->spa_last_synced_guid != 0 ? 1481 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1482 1483 /* 1484 * Return the most recently synced out guid unless we're 1485 * in syncing context. 1486 */ 1487 if (dp && dsl_pool_sync_context(dp)) 1488 return (spa->spa_root_vdev->vdev_guid); 1489 else 1490 return (guid); 1491 } 1492 1493 uint64_t 1494 spa_load_guid(spa_t *spa) 1495 { 1496 /* 1497 * This is a GUID that exists solely as a reference for the 1498 * purposes of the arc. It is generated at load time, and 1499 * is never written to persistent storage. 1500 */ 1501 return (spa->spa_load_guid); 1502 } 1503 1504 uint64_t 1505 spa_last_synced_txg(spa_t *spa) 1506 { 1507 return (spa->spa_ubsync.ub_txg); 1508 } 1509 1510 uint64_t 1511 spa_first_txg(spa_t *spa) 1512 { 1513 return (spa->spa_first_txg); 1514 } 1515 1516 uint64_t 1517 spa_syncing_txg(spa_t *spa) 1518 { 1519 return (spa->spa_syncing_txg); 1520 } 1521 1522 pool_state_t 1523 spa_state(spa_t *spa) 1524 { 1525 return (spa->spa_state); 1526 } 1527 1528 spa_load_state_t 1529 spa_load_state(spa_t *spa) 1530 { 1531 return (spa->spa_load_state); 1532 } 1533 1534 uint64_t 1535 spa_freeze_txg(spa_t *spa) 1536 { 1537 return (spa->spa_freeze_txg); 1538 } 1539 1540 /* ARGSUSED */ 1541 uint64_t 1542 spa_get_asize(spa_t *spa, uint64_t lsize) 1543 { 1544 return (lsize * spa_asize_inflation); 1545 } 1546 1547 uint64_t 1548 spa_get_dspace(spa_t *spa) 1549 { 1550 return (spa->spa_dspace); 1551 } 1552 1553 void 1554 spa_update_dspace(spa_t *spa) 1555 { 1556 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1557 ddt_get_dedup_dspace(spa); 1558 } 1559 1560 /* 1561 * Return the failure mode that has been set to this pool. The default 1562 * behavior will be to block all I/Os when a complete failure occurs. 1563 */ 1564 uint8_t 1565 spa_get_failmode(spa_t *spa) 1566 { 1567 return (spa->spa_failmode); 1568 } 1569 1570 boolean_t 1571 spa_suspended(spa_t *spa) 1572 { 1573 return (spa->spa_suspended); 1574 } 1575 1576 uint64_t 1577 spa_version(spa_t *spa) 1578 { 1579 return (spa->spa_ubsync.ub_version); 1580 } 1581 1582 boolean_t 1583 spa_deflate(spa_t *spa) 1584 { 1585 return (spa->spa_deflate); 1586 } 1587 1588 metaslab_class_t * 1589 spa_normal_class(spa_t *spa) 1590 { 1591 return (spa->spa_normal_class); 1592 } 1593 1594 metaslab_class_t * 1595 spa_log_class(spa_t *spa) 1596 { 1597 return (spa->spa_log_class); 1598 } 1599 1600 int 1601 spa_max_replication(spa_t *spa) 1602 { 1603 /* 1604 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1605 * handle BPs with more than one DVA allocated. Set our max 1606 * replication level accordingly. 1607 */ 1608 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1609 return (1); 1610 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1611 } 1612 1613 int 1614 spa_prev_software_version(spa_t *spa) 1615 { 1616 return (spa->spa_prev_software_version); 1617 } 1618 1619 uint64_t 1620 spa_deadman_synctime(spa_t *spa) 1621 { 1622 return (spa->spa_deadman_synctime); 1623 } 1624 1625 uint64_t 1626 dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1627 { 1628 uint64_t asize = DVA_GET_ASIZE(dva); 1629 uint64_t dsize = asize; 1630 1631 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1632 1633 if (asize != 0 && spa->spa_deflate) { 1634 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 1635 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1636 } 1637 1638 return (dsize); 1639 } 1640 1641 uint64_t 1642 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1643 { 1644 uint64_t dsize = 0; 1645 1646 for (int d = 0; d < SPA_DVAS_PER_BP; d++) 1647 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1648 1649 return (dsize); 1650 } 1651 1652 uint64_t 1653 bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1654 { 1655 uint64_t dsize = 0; 1656 1657 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1658 1659 for (int d = 0; d < SPA_DVAS_PER_BP; d++) 1660 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1661 1662 spa_config_exit(spa, SCL_VDEV, FTAG); 1663 1664 return (dsize); 1665 } 1666 1667 /* 1668 * ========================================================================== 1669 * Initialization and Termination 1670 * ========================================================================== 1671 */ 1672 1673 static int 1674 spa_name_compare(const void *a1, const void *a2) 1675 { 1676 const spa_t *s1 = a1; 1677 const spa_t *s2 = a2; 1678 int s; 1679 1680 s = strcmp(s1->spa_name, s2->spa_name); 1681 if (s > 0) 1682 return (1); 1683 if (s < 0) 1684 return (-1); 1685 return (0); 1686 } 1687 1688 int 1689 spa_busy(void) 1690 { 1691 return (spa_active_count); 1692 } 1693 1694 void 1695 spa_boot_init() 1696 { 1697 spa_config_load(); 1698 } 1699 1700 void 1701 spa_init(int mode) 1702 { 1703 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1704 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1705 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1706 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1707 1708 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1709 offsetof(spa_t, spa_avl)); 1710 1711 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1712 offsetof(spa_aux_t, aux_avl)); 1713 1714 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1715 offsetof(spa_aux_t, aux_avl)); 1716 1717 spa_mode_global = mode; 1718 1719 #ifdef _KERNEL 1720 spa_arch_init(); 1721 #else 1722 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 1723 arc_procfd = open("/proc/self/ctl", O_WRONLY); 1724 if (arc_procfd == -1) { 1725 perror("could not enable watchpoints: " 1726 "opening /proc/self/ctl failed: "); 1727 } else { 1728 arc_watch = B_TRUE; 1729 } 1730 } 1731 #endif 1732 1733 refcount_init(); 1734 unique_init(); 1735 range_tree_init(); 1736 zio_init(); 1737 dmu_init(); 1738 zil_init(); 1739 vdev_cache_stat_init(); 1740 zfs_prop_init(); 1741 zpool_prop_init(); 1742 zpool_feature_init(); 1743 spa_config_load(); 1744 l2arc_start(); 1745 } 1746 1747 void 1748 spa_fini(void) 1749 { 1750 l2arc_stop(); 1751 1752 spa_evict_all(); 1753 1754 vdev_cache_stat_fini(); 1755 zil_fini(); 1756 dmu_fini(); 1757 zio_fini(); 1758 range_tree_fini(); 1759 unique_fini(); 1760 refcount_fini(); 1761 1762 avl_destroy(&spa_namespace_avl); 1763 avl_destroy(&spa_spare_avl); 1764 avl_destroy(&spa_l2cache_avl); 1765 1766 cv_destroy(&spa_namespace_cv); 1767 mutex_destroy(&spa_namespace_lock); 1768 mutex_destroy(&spa_spare_lock); 1769 mutex_destroy(&spa_l2cache_lock); 1770 } 1771 1772 /* 1773 * Return whether this pool has slogs. No locking needed. 1774 * It's not a problem if the wrong answer is returned as it's only for 1775 * performance and not correctness 1776 */ 1777 boolean_t 1778 spa_has_slogs(spa_t *spa) 1779 { 1780 return (spa->spa_log_class->mc_rotor != NULL); 1781 } 1782 1783 spa_log_state_t 1784 spa_get_log_state(spa_t *spa) 1785 { 1786 return (spa->spa_log_state); 1787 } 1788 1789 void 1790 spa_set_log_state(spa_t *spa, spa_log_state_t state) 1791 { 1792 spa->spa_log_state = state; 1793 } 1794 1795 boolean_t 1796 spa_is_root(spa_t *spa) 1797 { 1798 return (spa->spa_is_root); 1799 } 1800 1801 boolean_t 1802 spa_writeable(spa_t *spa) 1803 { 1804 return (!!(spa->spa_mode & FWRITE)); 1805 } 1806 1807 int 1808 spa_mode(spa_t *spa) 1809 { 1810 return (spa->spa_mode); 1811 } 1812 1813 uint64_t 1814 spa_bootfs(spa_t *spa) 1815 { 1816 return (spa->spa_bootfs); 1817 } 1818 1819 uint64_t 1820 spa_delegation(spa_t *spa) 1821 { 1822 return (spa->spa_delegation); 1823 } 1824 1825 objset_t * 1826 spa_meta_objset(spa_t *spa) 1827 { 1828 return (spa->spa_meta_objset); 1829 } 1830 1831 enum zio_checksum 1832 spa_dedup_checksum(spa_t *spa) 1833 { 1834 return (spa->spa_dedup_checksum); 1835 } 1836 1837 /* 1838 * Reset pool scan stat per scan pass (or reboot). 1839 */ 1840 void 1841 spa_scan_stat_init(spa_t *spa) 1842 { 1843 /* data not stored on disk */ 1844 spa->spa_scan_pass_start = gethrestime_sec(); 1845 spa->spa_scan_pass_exam = 0; 1846 vdev_scan_stat_init(spa->spa_root_vdev); 1847 } 1848 1849 /* 1850 * Get scan stats for zpool status reports 1851 */ 1852 int 1853 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 1854 { 1855 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 1856 1857 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 1858 return (SET_ERROR(ENOENT)); 1859 bzero(ps, sizeof (pool_scan_stat_t)); 1860 1861 /* data stored on disk */ 1862 ps->pss_func = scn->scn_phys.scn_func; 1863 ps->pss_start_time = scn->scn_phys.scn_start_time; 1864 ps->pss_end_time = scn->scn_phys.scn_end_time; 1865 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 1866 ps->pss_examined = scn->scn_phys.scn_examined; 1867 ps->pss_to_process = scn->scn_phys.scn_to_process; 1868 ps->pss_processed = scn->scn_phys.scn_processed; 1869 ps->pss_errors = scn->scn_phys.scn_errors; 1870 ps->pss_state = scn->scn_phys.scn_state; 1871 1872 /* data not stored on disk */ 1873 ps->pss_pass_start = spa->spa_scan_pass_start; 1874 ps->pss_pass_exam = spa->spa_scan_pass_exam; 1875 1876 return (0); 1877 } 1878 1879 boolean_t 1880 spa_debug_enabled(spa_t *spa) 1881 { 1882 return (spa->spa_debug); 1883 } 1884