1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 * Copyright 2013 Saso Kiselkov. All rights reserved. 27 * Copyright (c) 2014 Integros [integros.com] 28 * Copyright (c) 2017 Datto Inc. 29 */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/spa_impl.h> 33 #include <sys/spa_boot.h> 34 #include <sys/zio.h> 35 #include <sys/zio_checksum.h> 36 #include <sys/zio_compress.h> 37 #include <sys/dmu.h> 38 #include <sys/dmu_tx.h> 39 #include <sys/zap.h> 40 #include <sys/zil.h> 41 #include <sys/vdev_impl.h> 42 #include <sys/vdev_initialize.h> 43 #include <sys/metaslab.h> 44 #include <sys/uberblock_impl.h> 45 #include <sys/txg.h> 46 #include <sys/avl.h> 47 #include <sys/unique.h> 48 #include <sys/dsl_pool.h> 49 #include <sys/dsl_dir.h> 50 #include <sys/dsl_prop.h> 51 #include <sys/dsl_scan.h> 52 #include <sys/fs/zfs.h> 53 #include <sys/metaslab_impl.h> 54 #include <sys/arc.h> 55 #include <sys/ddt.h> 56 #include "zfs_prop.h" 57 #include <sys/zfeature.h> 58 59 /* 60 * SPA locking 61 * 62 * There are four basic locks for managing spa_t structures: 63 * 64 * spa_namespace_lock (global mutex) 65 * 66 * This lock must be acquired to do any of the following: 67 * 68 * - Lookup a spa_t by name 69 * - Add or remove a spa_t from the namespace 70 * - Increase spa_refcount from non-zero 71 * - Check if spa_refcount is zero 72 * - Rename a spa_t 73 * - add/remove/attach/detach devices 74 * - Held for the duration of create/destroy/import/export 75 * 76 * It does not need to handle recursion. A create or destroy may 77 * reference objects (files or zvols) in other pools, but by 78 * definition they must have an existing reference, and will never need 79 * to lookup a spa_t by name. 80 * 81 * spa_refcount (per-spa zfs_refcount_t protected by mutex) 82 * 83 * This reference count keep track of any active users of the spa_t. The 84 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 85 * the refcount is never really 'zero' - opening a pool implicitly keeps 86 * some references in the DMU. Internally we check against spa_minref, but 87 * present the image of a zero/non-zero value to consumers. 88 * 89 * spa_config_lock[] (per-spa array of rwlocks) 90 * 91 * This protects the spa_t from config changes, and must be held in 92 * the following circumstances: 93 * 94 * - RW_READER to perform I/O to the spa 95 * - RW_WRITER to change the vdev config 96 * 97 * The locking order is fairly straightforward: 98 * 99 * spa_namespace_lock -> spa_refcount 100 * 101 * The namespace lock must be acquired to increase the refcount from 0 102 * or to check if it is zero. 103 * 104 * spa_refcount -> spa_config_lock[] 105 * 106 * There must be at least one valid reference on the spa_t to acquire 107 * the config lock. 108 * 109 * spa_namespace_lock -> spa_config_lock[] 110 * 111 * The namespace lock must always be taken before the config lock. 112 * 113 * 114 * The spa_namespace_lock can be acquired directly and is globally visible. 115 * 116 * The namespace is manipulated using the following functions, all of which 117 * require the spa_namespace_lock to be held. 118 * 119 * spa_lookup() Lookup a spa_t by name. 120 * 121 * spa_add() Create a new spa_t in the namespace. 122 * 123 * spa_remove() Remove a spa_t from the namespace. This also 124 * frees up any memory associated with the spa_t. 125 * 126 * spa_next() Returns the next spa_t in the system, or the 127 * first if NULL is passed. 128 * 129 * spa_evict_all() Shutdown and remove all spa_t structures in 130 * the system. 131 * 132 * spa_guid_exists() Determine whether a pool/device guid exists. 133 * 134 * The spa_refcount is manipulated using the following functions: 135 * 136 * spa_open_ref() Adds a reference to the given spa_t. Must be 137 * called with spa_namespace_lock held if the 138 * refcount is currently zero. 139 * 140 * spa_close() Remove a reference from the spa_t. This will 141 * not free the spa_t or remove it from the 142 * namespace. No locking is required. 143 * 144 * spa_refcount_zero() Returns true if the refcount is currently 145 * zero. Must be called with spa_namespace_lock 146 * held. 147 * 148 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 149 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 150 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 151 * 152 * To read the configuration, it suffices to hold one of these locks as reader. 153 * To modify the configuration, you must hold all locks as writer. To modify 154 * vdev state without altering the vdev tree's topology (e.g. online/offline), 155 * you must hold SCL_STATE and SCL_ZIO as writer. 156 * 157 * We use these distinct config locks to avoid recursive lock entry. 158 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 159 * block allocations (SCL_ALLOC), which may require reading space maps 160 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 161 * 162 * The spa config locks cannot be normal rwlocks because we need the 163 * ability to hand off ownership. For example, SCL_ZIO is acquired 164 * by the issuing thread and later released by an interrupt thread. 165 * They do, however, obey the usual write-wanted semantics to prevent 166 * writer (i.e. system administrator) starvation. 167 * 168 * The lock acquisition rules are as follows: 169 * 170 * SCL_CONFIG 171 * Protects changes to the vdev tree topology, such as vdev 172 * add/remove/attach/detach. Protects the dirty config list 173 * (spa_config_dirty_list) and the set of spares and l2arc devices. 174 * 175 * SCL_STATE 176 * Protects changes to pool state and vdev state, such as vdev 177 * online/offline/fault/degrade/clear. Protects the dirty state list 178 * (spa_state_dirty_list) and global pool state (spa_state). 179 * 180 * SCL_ALLOC 181 * Protects changes to metaslab groups and classes. 182 * Held as reader by metaslab_alloc() and metaslab_claim(). 183 * 184 * SCL_ZIO 185 * Held by bp-level zios (those which have no io_vd upon entry) 186 * to prevent changes to the vdev tree. The bp-level zio implicitly 187 * protects all of its vdev child zios, which do not hold SCL_ZIO. 188 * 189 * SCL_FREE 190 * Protects changes to metaslab groups and classes. 191 * Held as reader by metaslab_free(). SCL_FREE is distinct from 192 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 193 * blocks in zio_done() while another i/o that holds either 194 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 195 * 196 * SCL_VDEV 197 * Held as reader to prevent changes to the vdev tree during trivial 198 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 199 * other locks, and lower than all of them, to ensure that it's safe 200 * to acquire regardless of caller context. 201 * 202 * In addition, the following rules apply: 203 * 204 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 205 * The lock ordering is SCL_CONFIG > spa_props_lock. 206 * 207 * (b) I/O operations on leaf vdevs. For any zio operation that takes 208 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 209 * or zio_write_phys() -- the caller must ensure that the config cannot 210 * cannot change in the interim, and that the vdev cannot be reopened. 211 * SCL_STATE as reader suffices for both. 212 * 213 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 214 * 215 * spa_vdev_enter() Acquire the namespace lock and the config lock 216 * for writing. 217 * 218 * spa_vdev_exit() Release the config lock, wait for all I/O 219 * to complete, sync the updated configs to the 220 * cache, and release the namespace lock. 221 * 222 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 223 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 224 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 225 */ 226 227 static avl_tree_t spa_namespace_avl; 228 kmutex_t spa_namespace_lock; 229 static kcondvar_t spa_namespace_cv; 230 static int spa_active_count; 231 int spa_max_replication_override = SPA_DVAS_PER_BP; 232 233 static kmutex_t spa_spare_lock; 234 static avl_tree_t spa_spare_avl; 235 static kmutex_t spa_l2cache_lock; 236 static avl_tree_t spa_l2cache_avl; 237 238 kmem_cache_t *spa_buffer_pool; 239 int spa_mode_global; 240 241 #ifdef ZFS_DEBUG 242 /* 243 * Everything except dprintf, spa, and indirect_remap is on by default 244 * in debug builds. 245 */ 246 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_INDIRECT_REMAP); 247 #else 248 int zfs_flags = 0; 249 #endif 250 251 /* 252 * zfs_recover can be set to nonzero to attempt to recover from 253 * otherwise-fatal errors, typically caused by on-disk corruption. When 254 * set, calls to zfs_panic_recover() will turn into warning messages. 255 * This should only be used as a last resort, as it typically results 256 * in leaked space, or worse. 257 */ 258 boolean_t zfs_recover = B_FALSE; 259 260 /* 261 * If destroy encounters an EIO while reading metadata (e.g. indirect 262 * blocks), space referenced by the missing metadata can not be freed. 263 * Normally this causes the background destroy to become "stalled", as 264 * it is unable to make forward progress. While in this stalled state, 265 * all remaining space to free from the error-encountering filesystem is 266 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 267 * permanently leak the space from indirect blocks that can not be read, 268 * and continue to free everything else that it can. 269 * 270 * The default, "stalling" behavior is useful if the storage partially 271 * fails (i.e. some but not all i/os fail), and then later recovers. In 272 * this case, we will be able to continue pool operations while it is 273 * partially failed, and when it recovers, we can continue to free the 274 * space, with no leaks. However, note that this case is actually 275 * fairly rare. 276 * 277 * Typically pools either (a) fail completely (but perhaps temporarily, 278 * e.g. a top-level vdev going offline), or (b) have localized, 279 * permanent errors (e.g. disk returns the wrong data due to bit flip or 280 * firmware bug). In case (a), this setting does not matter because the 281 * pool will be suspended and the sync thread will not be able to make 282 * forward progress regardless. In case (b), because the error is 283 * permanent, the best we can do is leak the minimum amount of space, 284 * which is what setting this flag will do. Therefore, it is reasonable 285 * for this flag to normally be set, but we chose the more conservative 286 * approach of not setting it, so that there is no possibility of 287 * leaking space in the "partial temporary" failure case. 288 */ 289 boolean_t zfs_free_leak_on_eio = B_FALSE; 290 291 /* 292 * Expiration time in milliseconds. This value has two meanings. First it is 293 * used to determine when the spa_deadman() logic should fire. By default the 294 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 295 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 296 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 297 * in a system panic. 298 */ 299 uint64_t zfs_deadman_synctime_ms = 1000000ULL; 300 301 /* 302 * Check time in milliseconds. This defines the frequency at which we check 303 * for hung I/O. 304 */ 305 uint64_t zfs_deadman_checktime_ms = 5000ULL; 306 307 /* 308 * Override the zfs deadman behavior via /etc/system. By default the 309 * deadman is enabled except on VMware and sparc deployments. 310 */ 311 int zfs_deadman_enabled = -1; 312 313 /* 314 * The worst case is single-sector max-parity RAID-Z blocks, in which 315 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 316 * times the size; so just assume that. Add to this the fact that 317 * we can have up to 3 DVAs per bp, and one more factor of 2 because 318 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 319 * the worst case is: 320 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 321 */ 322 int spa_asize_inflation = 24; 323 324 /* 325 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 326 * the pool to be consumed. This ensures that we don't run the pool 327 * completely out of space, due to unaccounted changes (e.g. to the MOS). 328 * It also limits the worst-case time to allocate space. If we have 329 * less than this amount of free space, most ZPL operations (e.g. write, 330 * create) will return ENOSPC. 331 * 332 * Certain operations (e.g. file removal, most administrative actions) can 333 * use half the slop space. They will only return ENOSPC if less than half 334 * the slop space is free. Typically, once the pool has less than the slop 335 * space free, the user will use these operations to free up space in the pool. 336 * These are the operations that call dsl_pool_adjustedsize() with the netfree 337 * argument set to TRUE. 338 * 339 * Operations that are almost guaranteed to free up space in the absence of 340 * a pool checkpoint can use up to three quarters of the slop space 341 * (e.g zfs destroy). 342 * 343 * A very restricted set of operations are always permitted, regardless of 344 * the amount of free space. These are the operations that call 345 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net 346 * increase in the amount of space used, it is possible to run the pool 347 * completely out of space, causing it to be permanently read-only. 348 * 349 * Note that on very small pools, the slop space will be larger than 350 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 351 * but we never allow it to be more than half the pool size. 352 * 353 * See also the comments in zfs_space_check_t. 354 */ 355 int spa_slop_shift = 5; 356 uint64_t spa_min_slop = 128 * 1024 * 1024; 357 358 int spa_allocators = 4; 359 360 /*PRINTFLIKE2*/ 361 void 362 spa_load_failed(spa_t *spa, const char *fmt, ...) 363 { 364 va_list adx; 365 char buf[256]; 366 367 va_start(adx, fmt); 368 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 369 va_end(adx); 370 371 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name, 372 spa->spa_trust_config ? "trusted" : "untrusted", buf); 373 } 374 375 /*PRINTFLIKE2*/ 376 void 377 spa_load_note(spa_t *spa, const char *fmt, ...) 378 { 379 va_list adx; 380 char buf[256]; 381 382 va_start(adx, fmt); 383 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 384 va_end(adx); 385 386 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name, 387 spa->spa_trust_config ? "trusted" : "untrusted", buf); 388 } 389 390 /* 391 * ========================================================================== 392 * SPA config locking 393 * ========================================================================== 394 */ 395 static void 396 spa_config_lock_init(spa_t *spa) 397 { 398 for (int i = 0; i < SCL_LOCKS; i++) { 399 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 400 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 401 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 402 zfs_refcount_create_untracked(&scl->scl_count); 403 scl->scl_writer = NULL; 404 scl->scl_write_wanted = 0; 405 } 406 } 407 408 static void 409 spa_config_lock_destroy(spa_t *spa) 410 { 411 for (int i = 0; i < SCL_LOCKS; i++) { 412 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 413 mutex_destroy(&scl->scl_lock); 414 cv_destroy(&scl->scl_cv); 415 zfs_refcount_destroy(&scl->scl_count); 416 ASSERT(scl->scl_writer == NULL); 417 ASSERT(scl->scl_write_wanted == 0); 418 } 419 } 420 421 int 422 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 423 { 424 for (int i = 0; i < SCL_LOCKS; i++) { 425 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 426 if (!(locks & (1 << i))) 427 continue; 428 mutex_enter(&scl->scl_lock); 429 if (rw == RW_READER) { 430 if (scl->scl_writer || scl->scl_write_wanted) { 431 mutex_exit(&scl->scl_lock); 432 spa_config_exit(spa, locks & ((1 << i) - 1), 433 tag); 434 return (0); 435 } 436 } else { 437 ASSERT(scl->scl_writer != curthread); 438 if (!zfs_refcount_is_zero(&scl->scl_count)) { 439 mutex_exit(&scl->scl_lock); 440 spa_config_exit(spa, locks & ((1 << i) - 1), 441 tag); 442 return (0); 443 } 444 scl->scl_writer = curthread; 445 } 446 (void) zfs_refcount_add(&scl->scl_count, tag); 447 mutex_exit(&scl->scl_lock); 448 } 449 return (1); 450 } 451 452 void 453 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 454 { 455 int wlocks_held = 0; 456 457 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 458 459 for (int i = 0; i < SCL_LOCKS; i++) { 460 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 461 if (scl->scl_writer == curthread) 462 wlocks_held |= (1 << i); 463 if (!(locks & (1 << i))) 464 continue; 465 mutex_enter(&scl->scl_lock); 466 if (rw == RW_READER) { 467 while (scl->scl_writer || scl->scl_write_wanted) { 468 cv_wait(&scl->scl_cv, &scl->scl_lock); 469 } 470 } else { 471 ASSERT(scl->scl_writer != curthread); 472 while (!zfs_refcount_is_zero(&scl->scl_count)) { 473 scl->scl_write_wanted++; 474 cv_wait(&scl->scl_cv, &scl->scl_lock); 475 scl->scl_write_wanted--; 476 } 477 scl->scl_writer = curthread; 478 } 479 (void) zfs_refcount_add(&scl->scl_count, tag); 480 mutex_exit(&scl->scl_lock); 481 } 482 ASSERT3U(wlocks_held, <=, locks); 483 } 484 485 void 486 spa_config_exit(spa_t *spa, int locks, void *tag) 487 { 488 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 489 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 490 if (!(locks & (1 << i))) 491 continue; 492 mutex_enter(&scl->scl_lock); 493 ASSERT(!zfs_refcount_is_zero(&scl->scl_count)); 494 if (zfs_refcount_remove(&scl->scl_count, tag) == 0) { 495 ASSERT(scl->scl_writer == NULL || 496 scl->scl_writer == curthread); 497 scl->scl_writer = NULL; /* OK in either case */ 498 cv_broadcast(&scl->scl_cv); 499 } 500 mutex_exit(&scl->scl_lock); 501 } 502 } 503 504 int 505 spa_config_held(spa_t *spa, int locks, krw_t rw) 506 { 507 int locks_held = 0; 508 509 for (int i = 0; i < SCL_LOCKS; i++) { 510 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 511 if (!(locks & (1 << i))) 512 continue; 513 if ((rw == RW_READER && 514 !zfs_refcount_is_zero(&scl->scl_count)) || 515 (rw == RW_WRITER && scl->scl_writer == curthread)) 516 locks_held |= 1 << i; 517 } 518 519 return (locks_held); 520 } 521 522 /* 523 * ========================================================================== 524 * SPA namespace functions 525 * ========================================================================== 526 */ 527 528 /* 529 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 530 * Returns NULL if no matching spa_t is found. 531 */ 532 spa_t * 533 spa_lookup(const char *name) 534 { 535 static spa_t search; /* spa_t is large; don't allocate on stack */ 536 spa_t *spa; 537 avl_index_t where; 538 char *cp; 539 540 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 541 542 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 543 544 /* 545 * If it's a full dataset name, figure out the pool name and 546 * just use that. 547 */ 548 cp = strpbrk(search.spa_name, "/@#"); 549 if (cp != NULL) 550 *cp = '\0'; 551 552 spa = avl_find(&spa_namespace_avl, &search, &where); 553 554 return (spa); 555 } 556 557 /* 558 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 559 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 560 * looking for potentially hung I/Os. 561 */ 562 void 563 spa_deadman(void *arg) 564 { 565 spa_t *spa = arg; 566 567 /* 568 * Disable the deadman timer if the pool is suspended. 569 */ 570 if (spa_suspended(spa)) { 571 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 572 return; 573 } 574 575 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 576 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 577 ++spa->spa_deadman_calls); 578 if (zfs_deadman_enabled) 579 vdev_deadman(spa->spa_root_vdev); 580 } 581 582 /* 583 * Create an uninitialized spa_t with the given name. Requires 584 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 585 * exist by calling spa_lookup() first. 586 */ 587 spa_t * 588 spa_add(const char *name, nvlist_t *config, const char *altroot) 589 { 590 spa_t *spa; 591 spa_config_dirent_t *dp; 592 cyc_handler_t hdlr; 593 cyc_time_t when; 594 595 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 596 597 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 598 599 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 600 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 601 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 602 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 603 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 604 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 605 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 606 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 607 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 608 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 609 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 610 mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL); 611 612 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 613 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 614 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 615 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 616 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 617 618 for (int t = 0; t < TXG_SIZE; t++) 619 bplist_create(&spa->spa_free_bplist[t]); 620 621 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 622 spa->spa_state = POOL_STATE_UNINITIALIZED; 623 spa->spa_freeze_txg = UINT64_MAX; 624 spa->spa_final_txg = UINT64_MAX; 625 spa->spa_load_max_txg = UINT64_MAX; 626 spa->spa_proc = &p0; 627 spa->spa_proc_state = SPA_PROC_NONE; 628 spa->spa_trust_config = B_TRUE; 629 630 hdlr.cyh_func = spa_deadman; 631 hdlr.cyh_arg = spa; 632 hdlr.cyh_level = CY_LOW_LEVEL; 633 634 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 635 636 /* 637 * This determines how often we need to check for hung I/Os after 638 * the cyclic has already fired. Since checking for hung I/Os is 639 * an expensive operation we don't want to check too frequently. 640 * Instead wait for 5 seconds before checking again. 641 */ 642 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 643 when.cyt_when = CY_INFINITY; 644 mutex_enter(&cpu_lock); 645 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 646 mutex_exit(&cpu_lock); 647 648 zfs_refcount_create(&spa->spa_refcount); 649 spa_config_lock_init(spa); 650 651 avl_add(&spa_namespace_avl, spa); 652 653 /* 654 * Set the alternate root, if there is one. 655 */ 656 if (altroot) { 657 spa->spa_root = spa_strdup(altroot); 658 spa_active_count++; 659 } 660 661 spa->spa_alloc_count = spa_allocators; 662 spa->spa_alloc_locks = kmem_zalloc(spa->spa_alloc_count * 663 sizeof (kmutex_t), KM_SLEEP); 664 spa->spa_alloc_trees = kmem_zalloc(spa->spa_alloc_count * 665 sizeof (avl_tree_t), KM_SLEEP); 666 for (int i = 0; i < spa->spa_alloc_count; i++) { 667 mutex_init(&spa->spa_alloc_locks[i], NULL, MUTEX_DEFAULT, NULL); 668 avl_create(&spa->spa_alloc_trees[i], zio_bookmark_compare, 669 sizeof (zio_t), offsetof(zio_t, io_alloc_node)); 670 } 671 672 /* 673 * Every pool starts with the default cachefile 674 */ 675 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 676 offsetof(spa_config_dirent_t, scd_link)); 677 678 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 679 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 680 list_insert_head(&spa->spa_config_list, dp); 681 682 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 683 KM_SLEEP) == 0); 684 685 if (config != NULL) { 686 nvlist_t *features; 687 688 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 689 &features) == 0) { 690 VERIFY(nvlist_dup(features, &spa->spa_label_features, 691 0) == 0); 692 } 693 694 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 695 } 696 697 if (spa->spa_label_features == NULL) { 698 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 699 KM_SLEEP) == 0); 700 } 701 702 spa->spa_iokstat = kstat_create("zfs", 0, name, 703 "disk", KSTAT_TYPE_IO, 1, 0); 704 if (spa->spa_iokstat) { 705 spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock; 706 kstat_install(spa->spa_iokstat); 707 } 708 709 spa->spa_min_ashift = INT_MAX; 710 spa->spa_max_ashift = 0; 711 712 /* 713 * As a pool is being created, treat all features as disabled by 714 * setting SPA_FEATURE_DISABLED for all entries in the feature 715 * refcount cache. 716 */ 717 for (int i = 0; i < SPA_FEATURES; i++) { 718 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 719 } 720 721 list_create(&spa->spa_leaf_list, sizeof (vdev_t), 722 offsetof(vdev_t, vdev_leaf_node)); 723 724 return (spa); 725 } 726 727 /* 728 * Removes a spa_t from the namespace, freeing up any memory used. Requires 729 * spa_namespace_lock. This is called only after the spa_t has been closed and 730 * deactivated. 731 */ 732 void 733 spa_remove(spa_t *spa) 734 { 735 spa_config_dirent_t *dp; 736 737 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 738 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 739 ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0); 740 741 nvlist_free(spa->spa_config_splitting); 742 743 avl_remove(&spa_namespace_avl, spa); 744 cv_broadcast(&spa_namespace_cv); 745 746 if (spa->spa_root) { 747 spa_strfree(spa->spa_root); 748 spa_active_count--; 749 } 750 751 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 752 list_remove(&spa->spa_config_list, dp); 753 if (dp->scd_path != NULL) 754 spa_strfree(dp->scd_path); 755 kmem_free(dp, sizeof (spa_config_dirent_t)); 756 } 757 758 for (int i = 0; i < spa->spa_alloc_count; i++) { 759 avl_destroy(&spa->spa_alloc_trees[i]); 760 mutex_destroy(&spa->spa_alloc_locks[i]); 761 } 762 kmem_free(spa->spa_alloc_locks, spa->spa_alloc_count * 763 sizeof (kmutex_t)); 764 kmem_free(spa->spa_alloc_trees, spa->spa_alloc_count * 765 sizeof (avl_tree_t)); 766 767 list_destroy(&spa->spa_config_list); 768 list_destroy(&spa->spa_leaf_list); 769 770 nvlist_free(spa->spa_label_features); 771 nvlist_free(spa->spa_load_info); 772 spa_config_set(spa, NULL); 773 774 mutex_enter(&cpu_lock); 775 if (spa->spa_deadman_cycid != CYCLIC_NONE) 776 cyclic_remove(spa->spa_deadman_cycid); 777 mutex_exit(&cpu_lock); 778 spa->spa_deadman_cycid = CYCLIC_NONE; 779 780 zfs_refcount_destroy(&spa->spa_refcount); 781 782 spa_config_lock_destroy(spa); 783 784 kstat_delete(spa->spa_iokstat); 785 spa->spa_iokstat = NULL; 786 787 for (int t = 0; t < TXG_SIZE; t++) 788 bplist_destroy(&spa->spa_free_bplist[t]); 789 790 zio_checksum_templates_free(spa); 791 792 cv_destroy(&spa->spa_async_cv); 793 cv_destroy(&spa->spa_evicting_os_cv); 794 cv_destroy(&spa->spa_proc_cv); 795 cv_destroy(&spa->spa_scrub_io_cv); 796 cv_destroy(&spa->spa_suspend_cv); 797 798 mutex_destroy(&spa->spa_async_lock); 799 mutex_destroy(&spa->spa_errlist_lock); 800 mutex_destroy(&spa->spa_errlog_lock); 801 mutex_destroy(&spa->spa_evicting_os_lock); 802 mutex_destroy(&spa->spa_history_lock); 803 mutex_destroy(&spa->spa_proc_lock); 804 mutex_destroy(&spa->spa_props_lock); 805 mutex_destroy(&spa->spa_cksum_tmpls_lock); 806 mutex_destroy(&spa->spa_scrub_lock); 807 mutex_destroy(&spa->spa_suspend_lock); 808 mutex_destroy(&spa->spa_vdev_top_lock); 809 mutex_destroy(&spa->spa_iokstat_lock); 810 811 kmem_free(spa, sizeof (spa_t)); 812 } 813 814 /* 815 * Given a pool, return the next pool in the namespace, or NULL if there is 816 * none. If 'prev' is NULL, return the first pool. 817 */ 818 spa_t * 819 spa_next(spa_t *prev) 820 { 821 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 822 823 if (prev) 824 return (AVL_NEXT(&spa_namespace_avl, prev)); 825 else 826 return (avl_first(&spa_namespace_avl)); 827 } 828 829 /* 830 * ========================================================================== 831 * SPA refcount functions 832 * ========================================================================== 833 */ 834 835 /* 836 * Add a reference to the given spa_t. Must have at least one reference, or 837 * have the namespace lock held. 838 */ 839 void 840 spa_open_ref(spa_t *spa, void *tag) 841 { 842 ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref || 843 MUTEX_HELD(&spa_namespace_lock)); 844 (void) zfs_refcount_add(&spa->spa_refcount, tag); 845 } 846 847 /* 848 * Remove a reference to the given spa_t. Must have at least one reference, or 849 * have the namespace lock held. 850 */ 851 void 852 spa_close(spa_t *spa, void *tag) 853 { 854 ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref || 855 MUTEX_HELD(&spa_namespace_lock)); 856 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 857 } 858 859 /* 860 * Remove a reference to the given spa_t held by a dsl dir that is 861 * being asynchronously released. Async releases occur from a taskq 862 * performing eviction of dsl datasets and dirs. The namespace lock 863 * isn't held and the hold by the object being evicted may contribute to 864 * spa_minref (e.g. dataset or directory released during pool export), 865 * so the asserts in spa_close() do not apply. 866 */ 867 void 868 spa_async_close(spa_t *spa, void *tag) 869 { 870 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 871 } 872 873 /* 874 * Check to see if the spa refcount is zero. Must be called with 875 * spa_namespace_lock held. We really compare against spa_minref, which is the 876 * number of references acquired when opening a pool 877 */ 878 boolean_t 879 spa_refcount_zero(spa_t *spa) 880 { 881 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 882 883 return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref); 884 } 885 886 /* 887 * ========================================================================== 888 * SPA spare and l2cache tracking 889 * ========================================================================== 890 */ 891 892 /* 893 * Hot spares and cache devices are tracked using the same code below, 894 * for 'auxiliary' devices. 895 */ 896 897 typedef struct spa_aux { 898 uint64_t aux_guid; 899 uint64_t aux_pool; 900 avl_node_t aux_avl; 901 int aux_count; 902 } spa_aux_t; 903 904 static int 905 spa_aux_compare(const void *a, const void *b) 906 { 907 const spa_aux_t *sa = a; 908 const spa_aux_t *sb = b; 909 910 if (sa->aux_guid < sb->aux_guid) 911 return (-1); 912 else if (sa->aux_guid > sb->aux_guid) 913 return (1); 914 else 915 return (0); 916 } 917 918 void 919 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 920 { 921 avl_index_t where; 922 spa_aux_t search; 923 spa_aux_t *aux; 924 925 search.aux_guid = vd->vdev_guid; 926 if ((aux = avl_find(avl, &search, &where)) != NULL) { 927 aux->aux_count++; 928 } else { 929 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 930 aux->aux_guid = vd->vdev_guid; 931 aux->aux_count = 1; 932 avl_insert(avl, aux, where); 933 } 934 } 935 936 void 937 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 938 { 939 spa_aux_t search; 940 spa_aux_t *aux; 941 avl_index_t where; 942 943 search.aux_guid = vd->vdev_guid; 944 aux = avl_find(avl, &search, &where); 945 946 ASSERT(aux != NULL); 947 948 if (--aux->aux_count == 0) { 949 avl_remove(avl, aux); 950 kmem_free(aux, sizeof (spa_aux_t)); 951 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 952 aux->aux_pool = 0ULL; 953 } 954 } 955 956 boolean_t 957 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 958 { 959 spa_aux_t search, *found; 960 961 search.aux_guid = guid; 962 found = avl_find(avl, &search, NULL); 963 964 if (pool) { 965 if (found) 966 *pool = found->aux_pool; 967 else 968 *pool = 0ULL; 969 } 970 971 if (refcnt) { 972 if (found) 973 *refcnt = found->aux_count; 974 else 975 *refcnt = 0; 976 } 977 978 return (found != NULL); 979 } 980 981 void 982 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 983 { 984 spa_aux_t search, *found; 985 avl_index_t where; 986 987 search.aux_guid = vd->vdev_guid; 988 found = avl_find(avl, &search, &where); 989 ASSERT(found != NULL); 990 ASSERT(found->aux_pool == 0ULL); 991 992 found->aux_pool = spa_guid(vd->vdev_spa); 993 } 994 995 /* 996 * Spares are tracked globally due to the following constraints: 997 * 998 * - A spare may be part of multiple pools. 999 * - A spare may be added to a pool even if it's actively in use within 1000 * another pool. 1001 * - A spare in use in any pool can only be the source of a replacement if 1002 * the target is a spare in the same pool. 1003 * 1004 * We keep track of all spares on the system through the use of a reference 1005 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1006 * spare, then we bump the reference count in the AVL tree. In addition, we set 1007 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1008 * inactive). When a spare is made active (used to replace a device in the 1009 * pool), we also keep track of which pool its been made a part of. 1010 * 1011 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1012 * called under the spa_namespace lock as part of vdev reconfiguration. The 1013 * separate spare lock exists for the status query path, which does not need to 1014 * be completely consistent with respect to other vdev configuration changes. 1015 */ 1016 1017 static int 1018 spa_spare_compare(const void *a, const void *b) 1019 { 1020 return (spa_aux_compare(a, b)); 1021 } 1022 1023 void 1024 spa_spare_add(vdev_t *vd) 1025 { 1026 mutex_enter(&spa_spare_lock); 1027 ASSERT(!vd->vdev_isspare); 1028 spa_aux_add(vd, &spa_spare_avl); 1029 vd->vdev_isspare = B_TRUE; 1030 mutex_exit(&spa_spare_lock); 1031 } 1032 1033 void 1034 spa_spare_remove(vdev_t *vd) 1035 { 1036 mutex_enter(&spa_spare_lock); 1037 ASSERT(vd->vdev_isspare); 1038 spa_aux_remove(vd, &spa_spare_avl); 1039 vd->vdev_isspare = B_FALSE; 1040 mutex_exit(&spa_spare_lock); 1041 } 1042 1043 boolean_t 1044 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1045 { 1046 boolean_t found; 1047 1048 mutex_enter(&spa_spare_lock); 1049 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1050 mutex_exit(&spa_spare_lock); 1051 1052 return (found); 1053 } 1054 1055 void 1056 spa_spare_activate(vdev_t *vd) 1057 { 1058 mutex_enter(&spa_spare_lock); 1059 ASSERT(vd->vdev_isspare); 1060 spa_aux_activate(vd, &spa_spare_avl); 1061 mutex_exit(&spa_spare_lock); 1062 } 1063 1064 /* 1065 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1066 * Cache devices currently only support one pool per cache device, and so 1067 * for these devices the aux reference count is currently unused beyond 1. 1068 */ 1069 1070 static int 1071 spa_l2cache_compare(const void *a, const void *b) 1072 { 1073 return (spa_aux_compare(a, b)); 1074 } 1075 1076 void 1077 spa_l2cache_add(vdev_t *vd) 1078 { 1079 mutex_enter(&spa_l2cache_lock); 1080 ASSERT(!vd->vdev_isl2cache); 1081 spa_aux_add(vd, &spa_l2cache_avl); 1082 vd->vdev_isl2cache = B_TRUE; 1083 mutex_exit(&spa_l2cache_lock); 1084 } 1085 1086 void 1087 spa_l2cache_remove(vdev_t *vd) 1088 { 1089 mutex_enter(&spa_l2cache_lock); 1090 ASSERT(vd->vdev_isl2cache); 1091 spa_aux_remove(vd, &spa_l2cache_avl); 1092 vd->vdev_isl2cache = B_FALSE; 1093 mutex_exit(&spa_l2cache_lock); 1094 } 1095 1096 boolean_t 1097 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1098 { 1099 boolean_t found; 1100 1101 mutex_enter(&spa_l2cache_lock); 1102 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1103 mutex_exit(&spa_l2cache_lock); 1104 1105 return (found); 1106 } 1107 1108 void 1109 spa_l2cache_activate(vdev_t *vd) 1110 { 1111 mutex_enter(&spa_l2cache_lock); 1112 ASSERT(vd->vdev_isl2cache); 1113 spa_aux_activate(vd, &spa_l2cache_avl); 1114 mutex_exit(&spa_l2cache_lock); 1115 } 1116 1117 /* 1118 * ========================================================================== 1119 * SPA vdev locking 1120 * ========================================================================== 1121 */ 1122 1123 /* 1124 * Lock the given spa_t for the purpose of adding or removing a vdev. 1125 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1126 * It returns the next transaction group for the spa_t. 1127 */ 1128 uint64_t 1129 spa_vdev_enter(spa_t *spa) 1130 { 1131 mutex_enter(&spa->spa_vdev_top_lock); 1132 mutex_enter(&spa_namespace_lock); 1133 return (spa_vdev_config_enter(spa)); 1134 } 1135 1136 /* 1137 * Internal implementation for spa_vdev_enter(). Used when a vdev 1138 * operation requires multiple syncs (i.e. removing a device) while 1139 * keeping the spa_namespace_lock held. 1140 */ 1141 uint64_t 1142 spa_vdev_config_enter(spa_t *spa) 1143 { 1144 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1145 1146 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1147 1148 return (spa_last_synced_txg(spa) + 1); 1149 } 1150 1151 /* 1152 * Used in combination with spa_vdev_config_enter() to allow the syncing 1153 * of multiple transactions without releasing the spa_namespace_lock. 1154 */ 1155 void 1156 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1157 { 1158 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1159 1160 int config_changed = B_FALSE; 1161 1162 ASSERT(txg > spa_last_synced_txg(spa)); 1163 1164 spa->spa_pending_vdev = NULL; 1165 1166 /* 1167 * Reassess the DTLs. 1168 */ 1169 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1170 1171 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1172 config_changed = B_TRUE; 1173 spa->spa_config_generation++; 1174 } 1175 1176 /* 1177 * Verify the metaslab classes. 1178 */ 1179 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1180 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1181 1182 spa_config_exit(spa, SCL_ALL, spa); 1183 1184 /* 1185 * Panic the system if the specified tag requires it. This 1186 * is useful for ensuring that configurations are updated 1187 * transactionally. 1188 */ 1189 if (zio_injection_enabled) 1190 zio_handle_panic_injection(spa, tag, 0); 1191 1192 /* 1193 * Note: this txg_wait_synced() is important because it ensures 1194 * that there won't be more than one config change per txg. 1195 * This allows us to use the txg as the generation number. 1196 */ 1197 if (error == 0) 1198 txg_wait_synced(spa->spa_dsl_pool, txg); 1199 1200 if (vd != NULL) { 1201 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1202 if (vd->vdev_ops->vdev_op_leaf) { 1203 mutex_enter(&vd->vdev_initialize_lock); 1204 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED); 1205 mutex_exit(&vd->vdev_initialize_lock); 1206 } 1207 1208 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1209 vdev_free(vd); 1210 spa_config_exit(spa, SCL_ALL, spa); 1211 } 1212 1213 /* 1214 * If the config changed, update the config cache. 1215 */ 1216 if (config_changed) 1217 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1218 } 1219 1220 /* 1221 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1222 * locking of spa_vdev_enter(), we also want make sure the transactions have 1223 * synced to disk, and then update the global configuration cache with the new 1224 * information. 1225 */ 1226 int 1227 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1228 { 1229 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1230 mutex_exit(&spa_namespace_lock); 1231 mutex_exit(&spa->spa_vdev_top_lock); 1232 1233 return (error); 1234 } 1235 1236 /* 1237 * Lock the given spa_t for the purpose of changing vdev state. 1238 */ 1239 void 1240 spa_vdev_state_enter(spa_t *spa, int oplocks) 1241 { 1242 int locks = SCL_STATE_ALL | oplocks; 1243 1244 /* 1245 * Root pools may need to read of the underlying devfs filesystem 1246 * when opening up a vdev. Unfortunately if we're holding the 1247 * SCL_ZIO lock it will result in a deadlock when we try to issue 1248 * the read from the root filesystem. Instead we "prefetch" 1249 * the associated vnodes that we need prior to opening the 1250 * underlying devices and cache them so that we can prevent 1251 * any I/O when we are doing the actual open. 1252 */ 1253 if (spa_is_root(spa)) { 1254 int low = locks & ~(SCL_ZIO - 1); 1255 int high = locks & ~low; 1256 1257 spa_config_enter(spa, high, spa, RW_WRITER); 1258 vdev_hold(spa->spa_root_vdev); 1259 spa_config_enter(spa, low, spa, RW_WRITER); 1260 } else { 1261 spa_config_enter(spa, locks, spa, RW_WRITER); 1262 } 1263 spa->spa_vdev_locks = locks; 1264 } 1265 1266 int 1267 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1268 { 1269 boolean_t config_changed = B_FALSE; 1270 1271 if (vd != NULL || error == 0) 1272 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1273 0, 0, B_FALSE); 1274 1275 if (vd != NULL) { 1276 vdev_state_dirty(vd->vdev_top); 1277 config_changed = B_TRUE; 1278 spa->spa_config_generation++; 1279 } 1280 1281 if (spa_is_root(spa)) 1282 vdev_rele(spa->spa_root_vdev); 1283 1284 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1285 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1286 1287 /* 1288 * If anything changed, wait for it to sync. This ensures that, 1289 * from the system administrator's perspective, zpool(1M) commands 1290 * are synchronous. This is important for things like zpool offline: 1291 * when the command completes, you expect no further I/O from ZFS. 1292 */ 1293 if (vd != NULL) 1294 txg_wait_synced(spa->spa_dsl_pool, 0); 1295 1296 /* 1297 * If the config changed, update the config cache. 1298 */ 1299 if (config_changed) { 1300 mutex_enter(&spa_namespace_lock); 1301 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1302 mutex_exit(&spa_namespace_lock); 1303 } 1304 1305 return (error); 1306 } 1307 1308 /* 1309 * ========================================================================== 1310 * Miscellaneous functions 1311 * ========================================================================== 1312 */ 1313 1314 void 1315 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1316 { 1317 if (!nvlist_exists(spa->spa_label_features, feature)) { 1318 fnvlist_add_boolean(spa->spa_label_features, feature); 1319 /* 1320 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1321 * dirty the vdev config because lock SCL_CONFIG is not held. 1322 * Thankfully, in this case we don't need to dirty the config 1323 * because it will be written out anyway when we finish 1324 * creating the pool. 1325 */ 1326 if (tx->tx_txg != TXG_INITIAL) 1327 vdev_config_dirty(spa->spa_root_vdev); 1328 } 1329 } 1330 1331 void 1332 spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1333 { 1334 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1335 vdev_config_dirty(spa->spa_root_vdev); 1336 } 1337 1338 /* 1339 * Return the spa_t associated with given pool_guid, if it exists. If 1340 * device_guid is non-zero, determine whether the pool exists *and* contains 1341 * a device with the specified device_guid. 1342 */ 1343 spa_t * 1344 spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1345 { 1346 spa_t *spa; 1347 avl_tree_t *t = &spa_namespace_avl; 1348 1349 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1350 1351 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1352 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1353 continue; 1354 if (spa->spa_root_vdev == NULL) 1355 continue; 1356 if (spa_guid(spa) == pool_guid) { 1357 if (device_guid == 0) 1358 break; 1359 1360 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1361 device_guid) != NULL) 1362 break; 1363 1364 /* 1365 * Check any devices we may be in the process of adding. 1366 */ 1367 if (spa->spa_pending_vdev) { 1368 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1369 device_guid) != NULL) 1370 break; 1371 } 1372 } 1373 } 1374 1375 return (spa); 1376 } 1377 1378 /* 1379 * Determine whether a pool with the given pool_guid exists. 1380 */ 1381 boolean_t 1382 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1383 { 1384 return (spa_by_guid(pool_guid, device_guid) != NULL); 1385 } 1386 1387 char * 1388 spa_strdup(const char *s) 1389 { 1390 size_t len; 1391 char *new; 1392 1393 len = strlen(s); 1394 new = kmem_alloc(len + 1, KM_SLEEP); 1395 bcopy(s, new, len); 1396 new[len] = '\0'; 1397 1398 return (new); 1399 } 1400 1401 void 1402 spa_strfree(char *s) 1403 { 1404 kmem_free(s, strlen(s) + 1); 1405 } 1406 1407 uint64_t 1408 spa_get_random(uint64_t range) 1409 { 1410 uint64_t r; 1411 1412 ASSERT(range != 0); 1413 1414 if (range == 1) 1415 return (0); 1416 1417 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1418 1419 return (r % range); 1420 } 1421 1422 uint64_t 1423 spa_generate_guid(spa_t *spa) 1424 { 1425 uint64_t guid = spa_get_random(-1ULL); 1426 1427 if (spa != NULL) { 1428 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1429 guid = spa_get_random(-1ULL); 1430 } else { 1431 while (guid == 0 || spa_guid_exists(guid, 0)) 1432 guid = spa_get_random(-1ULL); 1433 } 1434 1435 return (guid); 1436 } 1437 1438 void 1439 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1440 { 1441 char type[256]; 1442 char *checksum = NULL; 1443 char *compress = NULL; 1444 1445 if (bp != NULL) { 1446 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1447 dmu_object_byteswap_t bswap = 1448 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1449 (void) snprintf(type, sizeof (type), "bswap %s %s", 1450 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1451 "metadata" : "data", 1452 dmu_ot_byteswap[bswap].ob_name); 1453 } else { 1454 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1455 sizeof (type)); 1456 } 1457 if (!BP_IS_EMBEDDED(bp)) { 1458 checksum = 1459 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1460 } 1461 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1462 } 1463 1464 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1465 compress); 1466 } 1467 1468 void 1469 spa_freeze(spa_t *spa) 1470 { 1471 uint64_t freeze_txg = 0; 1472 1473 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1474 if (spa->spa_freeze_txg == UINT64_MAX) { 1475 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1476 spa->spa_freeze_txg = freeze_txg; 1477 } 1478 spa_config_exit(spa, SCL_ALL, FTAG); 1479 if (freeze_txg != 0) 1480 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1481 } 1482 1483 void 1484 zfs_panic_recover(const char *fmt, ...) 1485 { 1486 va_list adx; 1487 1488 va_start(adx, fmt); 1489 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1490 va_end(adx); 1491 } 1492 1493 /* 1494 * This is a stripped-down version of strtoull, suitable only for converting 1495 * lowercase hexadecimal numbers that don't overflow. 1496 */ 1497 uint64_t 1498 zfs_strtonum(const char *str, char **nptr) 1499 { 1500 uint64_t val = 0; 1501 char c; 1502 int digit; 1503 1504 while ((c = *str) != '\0') { 1505 if (c >= '0' && c <= '9') 1506 digit = c - '0'; 1507 else if (c >= 'a' && c <= 'f') 1508 digit = 10 + c - 'a'; 1509 else 1510 break; 1511 1512 val *= 16; 1513 val += digit; 1514 1515 str++; 1516 } 1517 1518 if (nptr) 1519 *nptr = (char *)str; 1520 1521 return (val); 1522 } 1523 1524 /* 1525 * ========================================================================== 1526 * Accessor functions 1527 * ========================================================================== 1528 */ 1529 1530 boolean_t 1531 spa_shutting_down(spa_t *spa) 1532 { 1533 return (spa->spa_async_suspended); 1534 } 1535 1536 dsl_pool_t * 1537 spa_get_dsl(spa_t *spa) 1538 { 1539 return (spa->spa_dsl_pool); 1540 } 1541 1542 boolean_t 1543 spa_is_initializing(spa_t *spa) 1544 { 1545 return (spa->spa_is_initializing); 1546 } 1547 1548 boolean_t 1549 spa_indirect_vdevs_loaded(spa_t *spa) 1550 { 1551 return (spa->spa_indirect_vdevs_loaded); 1552 } 1553 1554 blkptr_t * 1555 spa_get_rootblkptr(spa_t *spa) 1556 { 1557 return (&spa->spa_ubsync.ub_rootbp); 1558 } 1559 1560 void 1561 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1562 { 1563 spa->spa_uberblock.ub_rootbp = *bp; 1564 } 1565 1566 void 1567 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1568 { 1569 if (spa->spa_root == NULL) 1570 buf[0] = '\0'; 1571 else 1572 (void) strncpy(buf, spa->spa_root, buflen); 1573 } 1574 1575 int 1576 spa_sync_pass(spa_t *spa) 1577 { 1578 return (spa->spa_sync_pass); 1579 } 1580 1581 char * 1582 spa_name(spa_t *spa) 1583 { 1584 return (spa->spa_name); 1585 } 1586 1587 uint64_t 1588 spa_guid(spa_t *spa) 1589 { 1590 dsl_pool_t *dp = spa_get_dsl(spa); 1591 uint64_t guid; 1592 1593 /* 1594 * If we fail to parse the config during spa_load(), we can go through 1595 * the error path (which posts an ereport) and end up here with no root 1596 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1597 * this case. 1598 */ 1599 if (spa->spa_root_vdev == NULL) 1600 return (spa->spa_config_guid); 1601 1602 guid = spa->spa_last_synced_guid != 0 ? 1603 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1604 1605 /* 1606 * Return the most recently synced out guid unless we're 1607 * in syncing context. 1608 */ 1609 if (dp && dsl_pool_sync_context(dp)) 1610 return (spa->spa_root_vdev->vdev_guid); 1611 else 1612 return (guid); 1613 } 1614 1615 uint64_t 1616 spa_load_guid(spa_t *spa) 1617 { 1618 /* 1619 * This is a GUID that exists solely as a reference for the 1620 * purposes of the arc. It is generated at load time, and 1621 * is never written to persistent storage. 1622 */ 1623 return (spa->spa_load_guid); 1624 } 1625 1626 uint64_t 1627 spa_last_synced_txg(spa_t *spa) 1628 { 1629 return (spa->spa_ubsync.ub_txg); 1630 } 1631 1632 uint64_t 1633 spa_first_txg(spa_t *spa) 1634 { 1635 return (spa->spa_first_txg); 1636 } 1637 1638 uint64_t 1639 spa_syncing_txg(spa_t *spa) 1640 { 1641 return (spa->spa_syncing_txg); 1642 } 1643 1644 /* 1645 * Return the last txg where data can be dirtied. The final txgs 1646 * will be used to just clear out any deferred frees that remain. 1647 */ 1648 uint64_t 1649 spa_final_dirty_txg(spa_t *spa) 1650 { 1651 return (spa->spa_final_txg - TXG_DEFER_SIZE); 1652 } 1653 1654 pool_state_t 1655 spa_state(spa_t *spa) 1656 { 1657 return (spa->spa_state); 1658 } 1659 1660 spa_load_state_t 1661 spa_load_state(spa_t *spa) 1662 { 1663 return (spa->spa_load_state); 1664 } 1665 1666 uint64_t 1667 spa_freeze_txg(spa_t *spa) 1668 { 1669 return (spa->spa_freeze_txg); 1670 } 1671 1672 /* ARGSUSED */ 1673 uint64_t 1674 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) 1675 { 1676 return (lsize * spa_asize_inflation); 1677 } 1678 1679 /* 1680 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), 1681 * or at least 128MB, unless that would cause it to be more than half the 1682 * pool size. 1683 * 1684 * See the comment above spa_slop_shift for details. 1685 */ 1686 uint64_t 1687 spa_get_slop_space(spa_t *spa) 1688 { 1689 uint64_t space = spa_get_dspace(spa); 1690 return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop))); 1691 } 1692 1693 uint64_t 1694 spa_get_dspace(spa_t *spa) 1695 { 1696 return (spa->spa_dspace); 1697 } 1698 1699 uint64_t 1700 spa_get_checkpoint_space(spa_t *spa) 1701 { 1702 return (spa->spa_checkpoint_info.sci_dspace); 1703 } 1704 1705 void 1706 spa_update_dspace(spa_t *spa) 1707 { 1708 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1709 ddt_get_dedup_dspace(spa); 1710 if (spa->spa_vdev_removal != NULL) { 1711 /* 1712 * We can't allocate from the removing device, so 1713 * subtract its size. This prevents the DMU/DSL from 1714 * filling up the (now smaller) pool while we are in the 1715 * middle of removing the device. 1716 * 1717 * Note that the DMU/DSL doesn't actually know or care 1718 * how much space is allocated (it does its own tracking 1719 * of how much space has been logically used). So it 1720 * doesn't matter that the data we are moving may be 1721 * allocated twice (on the old device and the new 1722 * device). 1723 */ 1724 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1725 vdev_t *vd = 1726 vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id); 1727 spa->spa_dspace -= spa_deflate(spa) ? 1728 vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space; 1729 spa_config_exit(spa, SCL_VDEV, FTAG); 1730 } 1731 } 1732 1733 /* 1734 * Return the failure mode that has been set to this pool. The default 1735 * behavior will be to block all I/Os when a complete failure occurs. 1736 */ 1737 uint8_t 1738 spa_get_failmode(spa_t *spa) 1739 { 1740 return (spa->spa_failmode); 1741 } 1742 1743 boolean_t 1744 spa_suspended(spa_t *spa) 1745 { 1746 return (spa->spa_suspended != ZIO_SUSPEND_NONE); 1747 } 1748 1749 uint64_t 1750 spa_version(spa_t *spa) 1751 { 1752 return (spa->spa_ubsync.ub_version); 1753 } 1754 1755 boolean_t 1756 spa_deflate(spa_t *spa) 1757 { 1758 return (spa->spa_deflate); 1759 } 1760 1761 metaslab_class_t * 1762 spa_normal_class(spa_t *spa) 1763 { 1764 return (spa->spa_normal_class); 1765 } 1766 1767 metaslab_class_t * 1768 spa_log_class(spa_t *spa) 1769 { 1770 return (spa->spa_log_class); 1771 } 1772 1773 void 1774 spa_evicting_os_register(spa_t *spa, objset_t *os) 1775 { 1776 mutex_enter(&spa->spa_evicting_os_lock); 1777 list_insert_head(&spa->spa_evicting_os_list, os); 1778 mutex_exit(&spa->spa_evicting_os_lock); 1779 } 1780 1781 void 1782 spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1783 { 1784 mutex_enter(&spa->spa_evicting_os_lock); 1785 list_remove(&spa->spa_evicting_os_list, os); 1786 cv_broadcast(&spa->spa_evicting_os_cv); 1787 mutex_exit(&spa->spa_evicting_os_lock); 1788 } 1789 1790 void 1791 spa_evicting_os_wait(spa_t *spa) 1792 { 1793 mutex_enter(&spa->spa_evicting_os_lock); 1794 while (!list_is_empty(&spa->spa_evicting_os_list)) 1795 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 1796 mutex_exit(&spa->spa_evicting_os_lock); 1797 1798 dmu_buf_user_evict_wait(); 1799 } 1800 1801 int 1802 spa_max_replication(spa_t *spa) 1803 { 1804 /* 1805 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1806 * handle BPs with more than one DVA allocated. Set our max 1807 * replication level accordingly. 1808 */ 1809 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1810 return (1); 1811 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1812 } 1813 1814 int 1815 spa_prev_software_version(spa_t *spa) 1816 { 1817 return (spa->spa_prev_software_version); 1818 } 1819 1820 uint64_t 1821 spa_deadman_synctime(spa_t *spa) 1822 { 1823 return (spa->spa_deadman_synctime); 1824 } 1825 1826 uint64_t 1827 dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1828 { 1829 uint64_t asize = DVA_GET_ASIZE(dva); 1830 uint64_t dsize = asize; 1831 1832 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1833 1834 if (asize != 0 && spa->spa_deflate) { 1835 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 1836 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1837 } 1838 1839 return (dsize); 1840 } 1841 1842 uint64_t 1843 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1844 { 1845 uint64_t dsize = 0; 1846 1847 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1848 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1849 1850 return (dsize); 1851 } 1852 1853 uint64_t 1854 bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1855 { 1856 uint64_t dsize = 0; 1857 1858 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1859 1860 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1861 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1862 1863 spa_config_exit(spa, SCL_VDEV, FTAG); 1864 1865 return (dsize); 1866 } 1867 1868 uint64_t 1869 spa_dirty_data(spa_t *spa) 1870 { 1871 return (spa->spa_dsl_pool->dp_dirty_total); 1872 } 1873 1874 /* 1875 * ========================================================================== 1876 * Initialization and Termination 1877 * ========================================================================== 1878 */ 1879 1880 static int 1881 spa_name_compare(const void *a1, const void *a2) 1882 { 1883 const spa_t *s1 = a1; 1884 const spa_t *s2 = a2; 1885 int s; 1886 1887 s = strcmp(s1->spa_name, s2->spa_name); 1888 if (s > 0) 1889 return (1); 1890 if (s < 0) 1891 return (-1); 1892 return (0); 1893 } 1894 1895 int 1896 spa_busy(void) 1897 { 1898 return (spa_active_count); 1899 } 1900 1901 void 1902 spa_boot_init() 1903 { 1904 spa_config_load(); 1905 } 1906 1907 void 1908 spa_init(int mode) 1909 { 1910 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1911 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1912 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1913 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1914 1915 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1916 offsetof(spa_t, spa_avl)); 1917 1918 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1919 offsetof(spa_aux_t, aux_avl)); 1920 1921 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1922 offsetof(spa_aux_t, aux_avl)); 1923 1924 spa_mode_global = mode; 1925 1926 #ifdef _KERNEL 1927 spa_arch_init(); 1928 #else 1929 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 1930 arc_procfd = open("/proc/self/ctl", O_WRONLY); 1931 if (arc_procfd == -1) { 1932 perror("could not enable watchpoints: " 1933 "opening /proc/self/ctl failed: "); 1934 } else { 1935 arc_watch = B_TRUE; 1936 } 1937 } 1938 #endif 1939 1940 zfs_refcount_init(); 1941 unique_init(); 1942 range_tree_init(); 1943 metaslab_alloc_trace_init(); 1944 zio_init(); 1945 dmu_init(); 1946 zil_init(); 1947 vdev_cache_stat_init(); 1948 zfs_prop_init(); 1949 zpool_prop_init(); 1950 zpool_feature_init(); 1951 spa_config_load(); 1952 l2arc_start(); 1953 } 1954 1955 void 1956 spa_fini(void) 1957 { 1958 l2arc_stop(); 1959 1960 spa_evict_all(); 1961 1962 vdev_cache_stat_fini(); 1963 zil_fini(); 1964 dmu_fini(); 1965 zio_fini(); 1966 metaslab_alloc_trace_fini(); 1967 range_tree_fini(); 1968 unique_fini(); 1969 zfs_refcount_fini(); 1970 1971 avl_destroy(&spa_namespace_avl); 1972 avl_destroy(&spa_spare_avl); 1973 avl_destroy(&spa_l2cache_avl); 1974 1975 cv_destroy(&spa_namespace_cv); 1976 mutex_destroy(&spa_namespace_lock); 1977 mutex_destroy(&spa_spare_lock); 1978 mutex_destroy(&spa_l2cache_lock); 1979 } 1980 1981 /* 1982 * Return whether this pool has slogs. No locking needed. 1983 * It's not a problem if the wrong answer is returned as it's only for 1984 * performance and not correctness 1985 */ 1986 boolean_t 1987 spa_has_slogs(spa_t *spa) 1988 { 1989 return (spa->spa_log_class->mc_rotor != NULL); 1990 } 1991 1992 spa_log_state_t 1993 spa_get_log_state(spa_t *spa) 1994 { 1995 return (spa->spa_log_state); 1996 } 1997 1998 void 1999 spa_set_log_state(spa_t *spa, spa_log_state_t state) 2000 { 2001 spa->spa_log_state = state; 2002 } 2003 2004 boolean_t 2005 spa_is_root(spa_t *spa) 2006 { 2007 return (spa->spa_is_root); 2008 } 2009 2010 boolean_t 2011 spa_writeable(spa_t *spa) 2012 { 2013 return (!!(spa->spa_mode & FWRITE) && spa->spa_trust_config); 2014 } 2015 2016 /* 2017 * Returns true if there is a pending sync task in any of the current 2018 * syncing txg, the current quiescing txg, or the current open txg. 2019 */ 2020 boolean_t 2021 spa_has_pending_synctask(spa_t *spa) 2022 { 2023 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) || 2024 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks)); 2025 } 2026 2027 int 2028 spa_mode(spa_t *spa) 2029 { 2030 return (spa->spa_mode); 2031 } 2032 2033 uint64_t 2034 spa_bootfs(spa_t *spa) 2035 { 2036 return (spa->spa_bootfs); 2037 } 2038 2039 uint64_t 2040 spa_delegation(spa_t *spa) 2041 { 2042 return (spa->spa_delegation); 2043 } 2044 2045 objset_t * 2046 spa_meta_objset(spa_t *spa) 2047 { 2048 return (spa->spa_meta_objset); 2049 } 2050 2051 enum zio_checksum 2052 spa_dedup_checksum(spa_t *spa) 2053 { 2054 return (spa->spa_dedup_checksum); 2055 } 2056 2057 /* 2058 * Reset pool scan stat per scan pass (or reboot). 2059 */ 2060 void 2061 spa_scan_stat_init(spa_t *spa) 2062 { 2063 /* data not stored on disk */ 2064 spa->spa_scan_pass_start = gethrestime_sec(); 2065 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) 2066 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; 2067 else 2068 spa->spa_scan_pass_scrub_pause = 0; 2069 spa->spa_scan_pass_scrub_spent_paused = 0; 2070 spa->spa_scan_pass_exam = 0; 2071 vdev_scan_stat_init(spa->spa_root_vdev); 2072 } 2073 2074 /* 2075 * Get scan stats for zpool status reports 2076 */ 2077 int 2078 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2079 { 2080 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2081 2082 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2083 return (SET_ERROR(ENOENT)); 2084 bzero(ps, sizeof (pool_scan_stat_t)); 2085 2086 /* data stored on disk */ 2087 ps->pss_func = scn->scn_phys.scn_func; 2088 ps->pss_start_time = scn->scn_phys.scn_start_time; 2089 ps->pss_end_time = scn->scn_phys.scn_end_time; 2090 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2091 ps->pss_examined = scn->scn_phys.scn_examined; 2092 ps->pss_to_process = scn->scn_phys.scn_to_process; 2093 ps->pss_processed = scn->scn_phys.scn_processed; 2094 ps->pss_errors = scn->scn_phys.scn_errors; 2095 ps->pss_state = scn->scn_phys.scn_state; 2096 2097 /* data not stored on disk */ 2098 ps->pss_pass_start = spa->spa_scan_pass_start; 2099 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2100 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; 2101 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; 2102 2103 return (0); 2104 } 2105 2106 int 2107 spa_maxblocksize(spa_t *spa) 2108 { 2109 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2110 return (SPA_MAXBLOCKSIZE); 2111 else 2112 return (SPA_OLD_MAXBLOCKSIZE); 2113 } 2114 2115 int 2116 spa_maxdnodesize(spa_t *spa) 2117 { 2118 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) 2119 return (DNODE_MAX_SIZE); 2120 else 2121 return (DNODE_MIN_SIZE); 2122 } 2123 2124 boolean_t 2125 spa_multihost(spa_t *spa) 2126 { 2127 return (spa->spa_multihost ? B_TRUE : B_FALSE); 2128 } 2129 2130 unsigned long 2131 spa_get_hostid(void) 2132 { 2133 unsigned long myhostid; 2134 2135 #ifdef _KERNEL 2136 myhostid = zone_get_hostid(NULL); 2137 #else /* _KERNEL */ 2138 /* 2139 * We're emulating the system's hostid in userland, so 2140 * we can't use zone_get_hostid(). 2141 */ 2142 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 2143 #endif /* _KERNEL */ 2144 2145 return (myhostid); 2146 } 2147 2148 /* 2149 * Returns the txg that the last device removal completed. No indirect mappings 2150 * have been added since this txg. 2151 */ 2152 uint64_t 2153 spa_get_last_removal_txg(spa_t *spa) 2154 { 2155 uint64_t vdevid; 2156 uint64_t ret = -1ULL; 2157 2158 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2159 /* 2160 * sr_prev_indirect_vdev is only modified while holding all the 2161 * config locks, so it is sufficient to hold SCL_VDEV as reader when 2162 * examining it. 2163 */ 2164 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; 2165 2166 while (vdevid != -1ULL) { 2167 vdev_t *vd = vdev_lookup_top(spa, vdevid); 2168 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 2169 2170 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2171 2172 /* 2173 * If the removal did not remap any data, we don't care. 2174 */ 2175 if (vdev_indirect_births_count(vib) != 0) { 2176 ret = vdev_indirect_births_last_entry_txg(vib); 2177 break; 2178 } 2179 2180 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; 2181 } 2182 spa_config_exit(spa, SCL_VDEV, FTAG); 2183 2184 IMPLY(ret != -1ULL, 2185 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 2186 2187 return (ret); 2188 } 2189 2190 boolean_t 2191 spa_trust_config(spa_t *spa) 2192 { 2193 return (spa->spa_trust_config); 2194 } 2195 2196 uint64_t 2197 spa_missing_tvds_allowed(spa_t *spa) 2198 { 2199 return (spa->spa_missing_tvds_allowed); 2200 } 2201 2202 void 2203 spa_set_missing_tvds(spa_t *spa, uint64_t missing) 2204 { 2205 spa->spa_missing_tvds = missing; 2206 } 2207 2208 boolean_t 2209 spa_top_vdevs_spacemap_addressable(spa_t *spa) 2210 { 2211 vdev_t *rvd = spa->spa_root_vdev; 2212 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2213 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c])) 2214 return (B_FALSE); 2215 } 2216 return (B_TRUE); 2217 } 2218 2219 boolean_t 2220 spa_has_checkpoint(spa_t *spa) 2221 { 2222 return (spa->spa_checkpoint_txg != 0); 2223 } 2224 2225 boolean_t 2226 spa_importing_readonly_checkpoint(spa_t *spa) 2227 { 2228 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) && 2229 spa->spa_mode == FREAD); 2230 } 2231 2232 uint64_t 2233 spa_min_claim_txg(spa_t *spa) 2234 { 2235 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg; 2236 2237 if (checkpoint_txg != 0) 2238 return (checkpoint_txg + 1); 2239 2240 return (spa->spa_first_txg); 2241 } 2242 2243 /* 2244 * If there is a checkpoint, async destroys may consume more space from 2245 * the pool instead of freeing it. In an attempt to save the pool from 2246 * getting suspended when it is about to run out of space, we stop 2247 * processing async destroys. 2248 */ 2249 boolean_t 2250 spa_suspend_async_destroy(spa_t *spa) 2251 { 2252 dsl_pool_t *dp = spa_get_dsl(spa); 2253 2254 uint64_t unreserved = dsl_pool_unreserved_space(dp, 2255 ZFS_SPACE_CHECK_EXTRA_RESERVED); 2256 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes; 2257 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0; 2258 2259 if (spa_has_checkpoint(spa) && avail == 0) 2260 return (B_TRUE); 2261 2262 return (B_FALSE); 2263 } 2264