1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 * Copyright 2013 Saso Kiselkov. All rights reserved. 27 * Copyright (c) 2017 Datto Inc. 28 * Copyright (c) 2017, Intel Corporation. 29 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. 30 */ 31 32 #include <sys/zfs_context.h> 33 #include <sys/zfs_chksum.h> 34 #include <sys/spa_impl.h> 35 #include <sys/zio.h> 36 #include <sys/zio_checksum.h> 37 #include <sys/zio_compress.h> 38 #include <sys/dmu.h> 39 #include <sys/dmu_tx.h> 40 #include <sys/zap.h> 41 #include <sys/zil.h> 42 #include <sys/vdev_impl.h> 43 #include <sys/vdev_initialize.h> 44 #include <sys/vdev_trim.h> 45 #include <sys/vdev_file.h> 46 #include <sys/vdev_raidz.h> 47 #include <sys/metaslab.h> 48 #include <sys/uberblock_impl.h> 49 #include <sys/txg.h> 50 #include <sys/avl.h> 51 #include <sys/unique.h> 52 #include <sys/dsl_pool.h> 53 #include <sys/dsl_dir.h> 54 #include <sys/dsl_prop.h> 55 #include <sys/fm/util.h> 56 #include <sys/dsl_scan.h> 57 #include <sys/fs/zfs.h> 58 #include <sys/metaslab_impl.h> 59 #include <sys/arc.h> 60 #include <sys/brt.h> 61 #include <sys/ddt.h> 62 #include <sys/kstat.h> 63 #include "zfs_prop.h" 64 #include <sys/btree.h> 65 #include <sys/zfeature.h> 66 #include <sys/qat.h> 67 #include <sys/zstd/zstd.h> 68 69 /* 70 * SPA locking 71 * 72 * There are three basic locks for managing spa_t structures: 73 * 74 * spa_namespace_lock (global mutex) 75 * 76 * This lock must be acquired to do any of the following: 77 * 78 * - Lookup a spa_t by name 79 * - Add or remove a spa_t from the namespace 80 * - Increase spa_refcount from non-zero 81 * - Check if spa_refcount is zero 82 * - Rename a spa_t 83 * - add/remove/attach/detach devices 84 * - Held for the duration of create/destroy/import/export 85 * 86 * It does not need to handle recursion. A create or destroy may 87 * reference objects (files or zvols) in other pools, but by 88 * definition they must have an existing reference, and will never need 89 * to lookup a spa_t by name. 90 * 91 * spa_refcount (per-spa zfs_refcount_t protected by mutex) 92 * 93 * This reference count keep track of any active users of the spa_t. The 94 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 95 * the refcount is never really 'zero' - opening a pool implicitly keeps 96 * some references in the DMU. Internally we check against spa_minref, but 97 * present the image of a zero/non-zero value to consumers. 98 * 99 * spa_config_lock[] (per-spa array of rwlocks) 100 * 101 * This protects the spa_t from config changes, and must be held in 102 * the following circumstances: 103 * 104 * - RW_READER to perform I/O to the spa 105 * - RW_WRITER to change the vdev config 106 * 107 * The locking order is fairly straightforward: 108 * 109 * spa_namespace_lock -> spa_refcount 110 * 111 * The namespace lock must be acquired to increase the refcount from 0 112 * or to check if it is zero. 113 * 114 * spa_refcount -> spa_config_lock[] 115 * 116 * There must be at least one valid reference on the spa_t to acquire 117 * the config lock. 118 * 119 * spa_namespace_lock -> spa_config_lock[] 120 * 121 * The namespace lock must always be taken before the config lock. 122 * 123 * 124 * The spa_namespace_lock can be acquired directly and is globally visible. 125 * 126 * The namespace is manipulated using the following functions, all of which 127 * require the spa_namespace_lock to be held. 128 * 129 * spa_lookup() Lookup a spa_t by name. 130 * 131 * spa_add() Create a new spa_t in the namespace. 132 * 133 * spa_remove() Remove a spa_t from the namespace. This also 134 * frees up any memory associated with the spa_t. 135 * 136 * spa_next() Returns the next spa_t in the system, or the 137 * first if NULL is passed. 138 * 139 * spa_evict_all() Shutdown and remove all spa_t structures in 140 * the system. 141 * 142 * spa_guid_exists() Determine whether a pool/device guid exists. 143 * 144 * The spa_refcount is manipulated using the following functions: 145 * 146 * spa_open_ref() Adds a reference to the given spa_t. Must be 147 * called with spa_namespace_lock held if the 148 * refcount is currently zero. 149 * 150 * spa_close() Remove a reference from the spa_t. This will 151 * not free the spa_t or remove it from the 152 * namespace. No locking is required. 153 * 154 * spa_refcount_zero() Returns true if the refcount is currently 155 * zero. Must be called with spa_namespace_lock 156 * held. 157 * 158 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 159 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 160 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 161 * 162 * To read the configuration, it suffices to hold one of these locks as reader. 163 * To modify the configuration, you must hold all locks as writer. To modify 164 * vdev state without altering the vdev tree's topology (e.g. online/offline), 165 * you must hold SCL_STATE and SCL_ZIO as writer. 166 * 167 * We use these distinct config locks to avoid recursive lock entry. 168 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 169 * block allocations (SCL_ALLOC), which may require reading space maps 170 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 171 * 172 * The spa config locks cannot be normal rwlocks because we need the 173 * ability to hand off ownership. For example, SCL_ZIO is acquired 174 * by the issuing thread and later released by an interrupt thread. 175 * They do, however, obey the usual write-wanted semantics to prevent 176 * writer (i.e. system administrator) starvation. 177 * 178 * The lock acquisition rules are as follows: 179 * 180 * SCL_CONFIG 181 * Protects changes to the vdev tree topology, such as vdev 182 * add/remove/attach/detach. Protects the dirty config list 183 * (spa_config_dirty_list) and the set of spares and l2arc devices. 184 * 185 * SCL_STATE 186 * Protects changes to pool state and vdev state, such as vdev 187 * online/offline/fault/degrade/clear. Protects the dirty state list 188 * (spa_state_dirty_list) and global pool state (spa_state). 189 * 190 * SCL_ALLOC 191 * Protects changes to metaslab groups and classes. 192 * Held as reader by metaslab_alloc() and metaslab_claim(). 193 * 194 * SCL_ZIO 195 * Held by bp-level zios (those which have no io_vd upon entry) 196 * to prevent changes to the vdev tree. The bp-level zio implicitly 197 * protects all of its vdev child zios, which do not hold SCL_ZIO. 198 * 199 * SCL_FREE 200 * Protects changes to metaslab groups and classes. 201 * Held as reader by metaslab_free(). SCL_FREE is distinct from 202 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 203 * blocks in zio_done() while another i/o that holds either 204 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 205 * 206 * SCL_VDEV 207 * Held as reader to prevent changes to the vdev tree during trivial 208 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 209 * other locks, and lower than all of them, to ensure that it's safe 210 * to acquire regardless of caller context. 211 * 212 * In addition, the following rules apply: 213 * 214 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 215 * The lock ordering is SCL_CONFIG > spa_props_lock. 216 * 217 * (b) I/O operations on leaf vdevs. For any zio operation that takes 218 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 219 * or zio_write_phys() -- the caller must ensure that the config cannot 220 * cannot change in the interim, and that the vdev cannot be reopened. 221 * SCL_STATE as reader suffices for both. 222 * 223 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 224 * 225 * spa_vdev_enter() Acquire the namespace lock and the config lock 226 * for writing. 227 * 228 * spa_vdev_exit() Release the config lock, wait for all I/O 229 * to complete, sync the updated configs to the 230 * cache, and release the namespace lock. 231 * 232 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 233 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 234 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 235 */ 236 237 static avl_tree_t spa_namespace_avl; 238 kmutex_t spa_namespace_lock; 239 static kcondvar_t spa_namespace_cv; 240 static const int spa_max_replication_override = SPA_DVAS_PER_BP; 241 242 static kmutex_t spa_spare_lock; 243 static avl_tree_t spa_spare_avl; 244 static kmutex_t spa_l2cache_lock; 245 static avl_tree_t spa_l2cache_avl; 246 247 spa_mode_t spa_mode_global = SPA_MODE_UNINIT; 248 249 #ifdef ZFS_DEBUG 250 /* 251 * Everything except dprintf, set_error, spa, and indirect_remap is on 252 * by default in debug builds. 253 */ 254 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR | 255 ZFS_DEBUG_INDIRECT_REMAP); 256 #else 257 int zfs_flags = 0; 258 #endif 259 260 /* 261 * zfs_recover can be set to nonzero to attempt to recover from 262 * otherwise-fatal errors, typically caused by on-disk corruption. When 263 * set, calls to zfs_panic_recover() will turn into warning messages. 264 * This should only be used as a last resort, as it typically results 265 * in leaked space, or worse. 266 */ 267 int zfs_recover = B_FALSE; 268 269 /* 270 * If destroy encounters an EIO while reading metadata (e.g. indirect 271 * blocks), space referenced by the missing metadata can not be freed. 272 * Normally this causes the background destroy to become "stalled", as 273 * it is unable to make forward progress. While in this stalled state, 274 * all remaining space to free from the error-encountering filesystem is 275 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 276 * permanently leak the space from indirect blocks that can not be read, 277 * and continue to free everything else that it can. 278 * 279 * The default, "stalling" behavior is useful if the storage partially 280 * fails (i.e. some but not all i/os fail), and then later recovers. In 281 * this case, we will be able to continue pool operations while it is 282 * partially failed, and when it recovers, we can continue to free the 283 * space, with no leaks. However, note that this case is actually 284 * fairly rare. 285 * 286 * Typically pools either (a) fail completely (but perhaps temporarily, 287 * e.g. a top-level vdev going offline), or (b) have localized, 288 * permanent errors (e.g. disk returns the wrong data due to bit flip or 289 * firmware bug). In case (a), this setting does not matter because the 290 * pool will be suspended and the sync thread will not be able to make 291 * forward progress regardless. In case (b), because the error is 292 * permanent, the best we can do is leak the minimum amount of space, 293 * which is what setting this flag will do. Therefore, it is reasonable 294 * for this flag to normally be set, but we chose the more conservative 295 * approach of not setting it, so that there is no possibility of 296 * leaking space in the "partial temporary" failure case. 297 */ 298 int zfs_free_leak_on_eio = B_FALSE; 299 300 /* 301 * Expiration time in milliseconds. This value has two meanings. First it is 302 * used to determine when the spa_deadman() logic should fire. By default the 303 * spa_deadman() will fire if spa_sync() has not completed in 600 seconds. 304 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 305 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 306 * in one of three behaviors controlled by zfs_deadman_failmode. 307 */ 308 uint64_t zfs_deadman_synctime_ms = 600000UL; /* 10 min. */ 309 310 /* 311 * This value controls the maximum amount of time zio_wait() will block for an 312 * outstanding IO. By default this is 300 seconds at which point the "hung" 313 * behavior will be applied as described for zfs_deadman_synctime_ms. 314 */ 315 uint64_t zfs_deadman_ziotime_ms = 300000UL; /* 5 min. */ 316 317 /* 318 * Check time in milliseconds. This defines the frequency at which we check 319 * for hung I/O. 320 */ 321 uint64_t zfs_deadman_checktime_ms = 60000UL; /* 1 min. */ 322 323 /* 324 * By default the deadman is enabled. 325 */ 326 int zfs_deadman_enabled = B_TRUE; 327 328 /* 329 * Controls the behavior of the deadman when it detects a "hung" I/O. 330 * Valid values are zfs_deadman_failmode=<wait|continue|panic>. 331 * 332 * wait - Wait for the "hung" I/O (default) 333 * continue - Attempt to recover from a "hung" I/O 334 * panic - Panic the system 335 */ 336 const char *zfs_deadman_failmode = "wait"; 337 338 /* 339 * The worst case is single-sector max-parity RAID-Z blocks, in which 340 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 341 * times the size; so just assume that. Add to this the fact that 342 * we can have up to 3 DVAs per bp, and one more factor of 2 because 343 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 344 * the worst case is: 345 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 346 */ 347 uint_t spa_asize_inflation = 24; 348 349 /* 350 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 351 * the pool to be consumed (bounded by spa_max_slop). This ensures that we 352 * don't run the pool completely out of space, due to unaccounted changes (e.g. 353 * to the MOS). It also limits the worst-case time to allocate space. If we 354 * have less than this amount of free space, most ZPL operations (e.g. write, 355 * create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are 356 * also part of this 3.2% of space which can't be consumed by normal writes; 357 * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded 358 * log space. 359 * 360 * Certain operations (e.g. file removal, most administrative actions) can 361 * use half the slop space. They will only return ENOSPC if less than half 362 * the slop space is free. Typically, once the pool has less than the slop 363 * space free, the user will use these operations to free up space in the pool. 364 * These are the operations that call dsl_pool_adjustedsize() with the netfree 365 * argument set to TRUE. 366 * 367 * Operations that are almost guaranteed to free up space in the absence of 368 * a pool checkpoint can use up to three quarters of the slop space 369 * (e.g zfs destroy). 370 * 371 * A very restricted set of operations are always permitted, regardless of 372 * the amount of free space. These are the operations that call 373 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net 374 * increase in the amount of space used, it is possible to run the pool 375 * completely out of space, causing it to be permanently read-only. 376 * 377 * Note that on very small pools, the slop space will be larger than 378 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 379 * but we never allow it to be more than half the pool size. 380 * 381 * Further, on very large pools, the slop space will be smaller than 382 * 3.2%, to avoid reserving much more space than we actually need; bounded 383 * by spa_max_slop (128GB). 384 * 385 * See also the comments in zfs_space_check_t. 386 */ 387 uint_t spa_slop_shift = 5; 388 static const uint64_t spa_min_slop = 128ULL * 1024 * 1024; 389 static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024; 390 static const int spa_allocators = 4; 391 392 393 void 394 spa_load_failed(spa_t *spa, const char *fmt, ...) 395 { 396 va_list adx; 397 char buf[256]; 398 399 va_start(adx, fmt); 400 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 401 va_end(adx); 402 403 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name, 404 spa->spa_trust_config ? "trusted" : "untrusted", buf); 405 } 406 407 void 408 spa_load_note(spa_t *spa, const char *fmt, ...) 409 { 410 va_list adx; 411 char buf[256]; 412 413 va_start(adx, fmt); 414 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 415 va_end(adx); 416 417 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name, 418 spa->spa_trust_config ? "trusted" : "untrusted", buf); 419 } 420 421 /* 422 * By default dedup and user data indirects land in the special class 423 */ 424 static int zfs_ddt_data_is_special = B_TRUE; 425 static int zfs_user_indirect_is_special = B_TRUE; 426 427 /* 428 * The percentage of special class final space reserved for metadata only. 429 * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only 430 * let metadata into the class. 431 */ 432 static uint_t zfs_special_class_metadata_reserve_pct = 25; 433 434 /* 435 * ========================================================================== 436 * SPA config locking 437 * ========================================================================== 438 */ 439 static void 440 spa_config_lock_init(spa_t *spa) 441 { 442 for (int i = 0; i < SCL_LOCKS; i++) { 443 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 444 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 445 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 446 scl->scl_writer = NULL; 447 scl->scl_write_wanted = 0; 448 scl->scl_count = 0; 449 } 450 } 451 452 static void 453 spa_config_lock_destroy(spa_t *spa) 454 { 455 for (int i = 0; i < SCL_LOCKS; i++) { 456 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 457 mutex_destroy(&scl->scl_lock); 458 cv_destroy(&scl->scl_cv); 459 ASSERT(scl->scl_writer == NULL); 460 ASSERT(scl->scl_write_wanted == 0); 461 ASSERT(scl->scl_count == 0); 462 } 463 } 464 465 int 466 spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw) 467 { 468 for (int i = 0; i < SCL_LOCKS; i++) { 469 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 470 if (!(locks & (1 << i))) 471 continue; 472 mutex_enter(&scl->scl_lock); 473 if (rw == RW_READER) { 474 if (scl->scl_writer || scl->scl_write_wanted) { 475 mutex_exit(&scl->scl_lock); 476 spa_config_exit(spa, locks & ((1 << i) - 1), 477 tag); 478 return (0); 479 } 480 } else { 481 ASSERT(scl->scl_writer != curthread); 482 if (scl->scl_count != 0) { 483 mutex_exit(&scl->scl_lock); 484 spa_config_exit(spa, locks & ((1 << i) - 1), 485 tag); 486 return (0); 487 } 488 scl->scl_writer = curthread; 489 } 490 scl->scl_count++; 491 mutex_exit(&scl->scl_lock); 492 } 493 return (1); 494 } 495 496 void 497 spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw) 498 { 499 (void) tag; 500 int wlocks_held = 0; 501 502 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 503 504 for (int i = 0; i < SCL_LOCKS; i++) { 505 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 506 if (scl->scl_writer == curthread) 507 wlocks_held |= (1 << i); 508 if (!(locks & (1 << i))) 509 continue; 510 mutex_enter(&scl->scl_lock); 511 if (rw == RW_READER) { 512 while (scl->scl_writer || scl->scl_write_wanted) { 513 cv_wait(&scl->scl_cv, &scl->scl_lock); 514 } 515 } else { 516 ASSERT(scl->scl_writer != curthread); 517 while (scl->scl_count != 0) { 518 scl->scl_write_wanted++; 519 cv_wait(&scl->scl_cv, &scl->scl_lock); 520 scl->scl_write_wanted--; 521 } 522 scl->scl_writer = curthread; 523 } 524 scl->scl_count++; 525 mutex_exit(&scl->scl_lock); 526 } 527 ASSERT3U(wlocks_held, <=, locks); 528 } 529 530 void 531 spa_config_exit(spa_t *spa, int locks, const void *tag) 532 { 533 (void) tag; 534 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 535 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 536 if (!(locks & (1 << i))) 537 continue; 538 mutex_enter(&scl->scl_lock); 539 ASSERT(scl->scl_count > 0); 540 if (--scl->scl_count == 0) { 541 ASSERT(scl->scl_writer == NULL || 542 scl->scl_writer == curthread); 543 scl->scl_writer = NULL; /* OK in either case */ 544 cv_broadcast(&scl->scl_cv); 545 } 546 mutex_exit(&scl->scl_lock); 547 } 548 } 549 550 int 551 spa_config_held(spa_t *spa, int locks, krw_t rw) 552 { 553 int locks_held = 0; 554 555 for (int i = 0; i < SCL_LOCKS; i++) { 556 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 557 if (!(locks & (1 << i))) 558 continue; 559 if ((rw == RW_READER && scl->scl_count != 0) || 560 (rw == RW_WRITER && scl->scl_writer == curthread)) 561 locks_held |= 1 << i; 562 } 563 564 return (locks_held); 565 } 566 567 /* 568 * ========================================================================== 569 * SPA namespace functions 570 * ========================================================================== 571 */ 572 573 /* 574 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 575 * Returns NULL if no matching spa_t is found. 576 */ 577 spa_t * 578 spa_lookup(const char *name) 579 { 580 static spa_t search; /* spa_t is large; don't allocate on stack */ 581 spa_t *spa; 582 avl_index_t where; 583 char *cp; 584 585 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 586 587 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 588 589 /* 590 * If it's a full dataset name, figure out the pool name and 591 * just use that. 592 */ 593 cp = strpbrk(search.spa_name, "/@#"); 594 if (cp != NULL) 595 *cp = '\0'; 596 597 spa = avl_find(&spa_namespace_avl, &search, &where); 598 599 return (spa); 600 } 601 602 /* 603 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 604 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 605 * looking for potentially hung I/Os. 606 */ 607 void 608 spa_deadman(void *arg) 609 { 610 spa_t *spa = arg; 611 612 /* Disable the deadman if the pool is suspended. */ 613 if (spa_suspended(spa)) 614 return; 615 616 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 617 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 618 (u_longlong_t)++spa->spa_deadman_calls); 619 if (zfs_deadman_enabled) 620 vdev_deadman(spa->spa_root_vdev, FTAG); 621 622 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, 623 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + 624 MSEC_TO_TICK(zfs_deadman_checktime_ms)); 625 } 626 627 static int 628 spa_log_sm_sort_by_txg(const void *va, const void *vb) 629 { 630 const spa_log_sm_t *a = va; 631 const spa_log_sm_t *b = vb; 632 633 return (TREE_CMP(a->sls_txg, b->sls_txg)); 634 } 635 636 /* 637 * Create an uninitialized spa_t with the given name. Requires 638 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 639 * exist by calling spa_lookup() first. 640 */ 641 spa_t * 642 spa_add(const char *name, nvlist_t *config, const char *altroot) 643 { 644 spa_t *spa; 645 spa_config_dirent_t *dp; 646 647 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 648 649 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 650 651 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 652 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 653 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 654 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 655 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 656 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 657 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 658 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 659 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 660 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 661 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 662 mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL); 663 mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL); 664 mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL); 665 666 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 667 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 668 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 669 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 670 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 671 cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL); 672 cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL); 673 674 for (int t = 0; t < TXG_SIZE; t++) 675 bplist_create(&spa->spa_free_bplist[t]); 676 677 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 678 spa->spa_state = POOL_STATE_UNINITIALIZED; 679 spa->spa_freeze_txg = UINT64_MAX; 680 spa->spa_final_txg = UINT64_MAX; 681 spa->spa_load_max_txg = UINT64_MAX; 682 spa->spa_proc = &p0; 683 spa->spa_proc_state = SPA_PROC_NONE; 684 spa->spa_trust_config = B_TRUE; 685 spa->spa_hostid = zone_get_hostid(NULL); 686 687 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 688 spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms); 689 spa_set_deadman_failmode(spa, zfs_deadman_failmode); 690 691 zfs_refcount_create(&spa->spa_refcount); 692 spa_config_lock_init(spa); 693 spa_stats_init(spa); 694 695 avl_add(&spa_namespace_avl, spa); 696 697 /* 698 * Set the alternate root, if there is one. 699 */ 700 if (altroot) 701 spa->spa_root = spa_strdup(altroot); 702 703 spa->spa_alloc_count = spa_allocators; 704 spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count * 705 sizeof (spa_alloc_t), KM_SLEEP); 706 for (int i = 0; i < spa->spa_alloc_count; i++) { 707 mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT, 708 NULL); 709 avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare, 710 sizeof (zio_t), offsetof(zio_t, io_alloc_node)); 711 } 712 avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed, 713 sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node)); 714 avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg, 715 sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node)); 716 list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t), 717 offsetof(log_summary_entry_t, lse_node)); 718 719 /* 720 * Every pool starts with the default cachefile 721 */ 722 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 723 offsetof(spa_config_dirent_t, scd_link)); 724 725 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 726 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 727 list_insert_head(&spa->spa_config_list, dp); 728 729 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 730 KM_SLEEP) == 0); 731 732 if (config != NULL) { 733 nvlist_t *features; 734 735 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 736 &features) == 0) { 737 VERIFY(nvlist_dup(features, &spa->spa_label_features, 738 0) == 0); 739 } 740 741 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 742 } 743 744 if (spa->spa_label_features == NULL) { 745 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 746 KM_SLEEP) == 0); 747 } 748 749 spa->spa_min_ashift = INT_MAX; 750 spa->spa_max_ashift = 0; 751 spa->spa_min_alloc = INT_MAX; 752 753 /* Reset cached value */ 754 spa->spa_dedup_dspace = ~0ULL; 755 756 /* 757 * As a pool is being created, treat all features as disabled by 758 * setting SPA_FEATURE_DISABLED for all entries in the feature 759 * refcount cache. 760 */ 761 for (int i = 0; i < SPA_FEATURES; i++) { 762 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 763 } 764 765 list_create(&spa->spa_leaf_list, sizeof (vdev_t), 766 offsetof(vdev_t, vdev_leaf_node)); 767 768 return (spa); 769 } 770 771 /* 772 * Removes a spa_t from the namespace, freeing up any memory used. Requires 773 * spa_namespace_lock. This is called only after the spa_t has been closed and 774 * deactivated. 775 */ 776 void 777 spa_remove(spa_t *spa) 778 { 779 spa_config_dirent_t *dp; 780 781 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 782 ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED); 783 ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0); 784 ASSERT0(spa->spa_waiters); 785 786 nvlist_free(spa->spa_config_splitting); 787 788 avl_remove(&spa_namespace_avl, spa); 789 cv_broadcast(&spa_namespace_cv); 790 791 if (spa->spa_root) 792 spa_strfree(spa->spa_root); 793 794 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 795 list_remove(&spa->spa_config_list, dp); 796 if (dp->scd_path != NULL) 797 spa_strfree(dp->scd_path); 798 kmem_free(dp, sizeof (spa_config_dirent_t)); 799 } 800 801 for (int i = 0; i < spa->spa_alloc_count; i++) { 802 avl_destroy(&spa->spa_allocs[i].spaa_tree); 803 mutex_destroy(&spa->spa_allocs[i].spaa_lock); 804 } 805 kmem_free(spa->spa_allocs, spa->spa_alloc_count * 806 sizeof (spa_alloc_t)); 807 808 avl_destroy(&spa->spa_metaslabs_by_flushed); 809 avl_destroy(&spa->spa_sm_logs_by_txg); 810 list_destroy(&spa->spa_log_summary); 811 list_destroy(&spa->spa_config_list); 812 list_destroy(&spa->spa_leaf_list); 813 814 nvlist_free(spa->spa_label_features); 815 nvlist_free(spa->spa_load_info); 816 nvlist_free(spa->spa_feat_stats); 817 spa_config_set(spa, NULL); 818 819 zfs_refcount_destroy(&spa->spa_refcount); 820 821 spa_stats_destroy(spa); 822 spa_config_lock_destroy(spa); 823 824 for (int t = 0; t < TXG_SIZE; t++) 825 bplist_destroy(&spa->spa_free_bplist[t]); 826 827 zio_checksum_templates_free(spa); 828 829 cv_destroy(&spa->spa_async_cv); 830 cv_destroy(&spa->spa_evicting_os_cv); 831 cv_destroy(&spa->spa_proc_cv); 832 cv_destroy(&spa->spa_scrub_io_cv); 833 cv_destroy(&spa->spa_suspend_cv); 834 cv_destroy(&spa->spa_activities_cv); 835 cv_destroy(&spa->spa_waiters_cv); 836 837 mutex_destroy(&spa->spa_flushed_ms_lock); 838 mutex_destroy(&spa->spa_async_lock); 839 mutex_destroy(&spa->spa_errlist_lock); 840 mutex_destroy(&spa->spa_errlog_lock); 841 mutex_destroy(&spa->spa_evicting_os_lock); 842 mutex_destroy(&spa->spa_history_lock); 843 mutex_destroy(&spa->spa_proc_lock); 844 mutex_destroy(&spa->spa_props_lock); 845 mutex_destroy(&spa->spa_cksum_tmpls_lock); 846 mutex_destroy(&spa->spa_scrub_lock); 847 mutex_destroy(&spa->spa_suspend_lock); 848 mutex_destroy(&spa->spa_vdev_top_lock); 849 mutex_destroy(&spa->spa_feat_stats_lock); 850 mutex_destroy(&spa->spa_activities_lock); 851 852 kmem_free(spa, sizeof (spa_t)); 853 } 854 855 /* 856 * Given a pool, return the next pool in the namespace, or NULL if there is 857 * none. If 'prev' is NULL, return the first pool. 858 */ 859 spa_t * 860 spa_next(spa_t *prev) 861 { 862 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 863 864 if (prev) 865 return (AVL_NEXT(&spa_namespace_avl, prev)); 866 else 867 return (avl_first(&spa_namespace_avl)); 868 } 869 870 /* 871 * ========================================================================== 872 * SPA refcount functions 873 * ========================================================================== 874 */ 875 876 /* 877 * Add a reference to the given spa_t. Must have at least one reference, or 878 * have the namespace lock held. 879 */ 880 void 881 spa_open_ref(spa_t *spa, const void *tag) 882 { 883 ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref || 884 MUTEX_HELD(&spa_namespace_lock)); 885 (void) zfs_refcount_add(&spa->spa_refcount, tag); 886 } 887 888 /* 889 * Remove a reference to the given spa_t. Must have at least one reference, or 890 * have the namespace lock held. 891 */ 892 void 893 spa_close(spa_t *spa, const void *tag) 894 { 895 ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref || 896 MUTEX_HELD(&spa_namespace_lock)); 897 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 898 } 899 900 /* 901 * Remove a reference to the given spa_t held by a dsl dir that is 902 * being asynchronously released. Async releases occur from a taskq 903 * performing eviction of dsl datasets and dirs. The namespace lock 904 * isn't held and the hold by the object being evicted may contribute to 905 * spa_minref (e.g. dataset or directory released during pool export), 906 * so the asserts in spa_close() do not apply. 907 */ 908 void 909 spa_async_close(spa_t *spa, const void *tag) 910 { 911 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 912 } 913 914 /* 915 * Check to see if the spa refcount is zero. Must be called with 916 * spa_namespace_lock held. We really compare against spa_minref, which is the 917 * number of references acquired when opening a pool 918 */ 919 boolean_t 920 spa_refcount_zero(spa_t *spa) 921 { 922 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 923 924 return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref); 925 } 926 927 /* 928 * ========================================================================== 929 * SPA spare and l2cache tracking 930 * ========================================================================== 931 */ 932 933 /* 934 * Hot spares and cache devices are tracked using the same code below, 935 * for 'auxiliary' devices. 936 */ 937 938 typedef struct spa_aux { 939 uint64_t aux_guid; 940 uint64_t aux_pool; 941 avl_node_t aux_avl; 942 int aux_count; 943 } spa_aux_t; 944 945 static inline int 946 spa_aux_compare(const void *a, const void *b) 947 { 948 const spa_aux_t *sa = (const spa_aux_t *)a; 949 const spa_aux_t *sb = (const spa_aux_t *)b; 950 951 return (TREE_CMP(sa->aux_guid, sb->aux_guid)); 952 } 953 954 static void 955 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 956 { 957 avl_index_t where; 958 spa_aux_t search; 959 spa_aux_t *aux; 960 961 search.aux_guid = vd->vdev_guid; 962 if ((aux = avl_find(avl, &search, &where)) != NULL) { 963 aux->aux_count++; 964 } else { 965 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 966 aux->aux_guid = vd->vdev_guid; 967 aux->aux_count = 1; 968 avl_insert(avl, aux, where); 969 } 970 } 971 972 static void 973 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 974 { 975 spa_aux_t search; 976 spa_aux_t *aux; 977 avl_index_t where; 978 979 search.aux_guid = vd->vdev_guid; 980 aux = avl_find(avl, &search, &where); 981 982 ASSERT(aux != NULL); 983 984 if (--aux->aux_count == 0) { 985 avl_remove(avl, aux); 986 kmem_free(aux, sizeof (spa_aux_t)); 987 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 988 aux->aux_pool = 0ULL; 989 } 990 } 991 992 static boolean_t 993 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 994 { 995 spa_aux_t search, *found; 996 997 search.aux_guid = guid; 998 found = avl_find(avl, &search, NULL); 999 1000 if (pool) { 1001 if (found) 1002 *pool = found->aux_pool; 1003 else 1004 *pool = 0ULL; 1005 } 1006 1007 if (refcnt) { 1008 if (found) 1009 *refcnt = found->aux_count; 1010 else 1011 *refcnt = 0; 1012 } 1013 1014 return (found != NULL); 1015 } 1016 1017 static void 1018 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1019 { 1020 spa_aux_t search, *found; 1021 avl_index_t where; 1022 1023 search.aux_guid = vd->vdev_guid; 1024 found = avl_find(avl, &search, &where); 1025 ASSERT(found != NULL); 1026 ASSERT(found->aux_pool == 0ULL); 1027 1028 found->aux_pool = spa_guid(vd->vdev_spa); 1029 } 1030 1031 /* 1032 * Spares are tracked globally due to the following constraints: 1033 * 1034 * - A spare may be part of multiple pools. 1035 * - A spare may be added to a pool even if it's actively in use within 1036 * another pool. 1037 * - A spare in use in any pool can only be the source of a replacement if 1038 * the target is a spare in the same pool. 1039 * 1040 * We keep track of all spares on the system through the use of a reference 1041 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1042 * spare, then we bump the reference count in the AVL tree. In addition, we set 1043 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1044 * inactive). When a spare is made active (used to replace a device in the 1045 * pool), we also keep track of which pool its been made a part of. 1046 * 1047 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1048 * called under the spa_namespace lock as part of vdev reconfiguration. The 1049 * separate spare lock exists for the status query path, which does not need to 1050 * be completely consistent with respect to other vdev configuration changes. 1051 */ 1052 1053 static int 1054 spa_spare_compare(const void *a, const void *b) 1055 { 1056 return (spa_aux_compare(a, b)); 1057 } 1058 1059 void 1060 spa_spare_add(vdev_t *vd) 1061 { 1062 mutex_enter(&spa_spare_lock); 1063 ASSERT(!vd->vdev_isspare); 1064 spa_aux_add(vd, &spa_spare_avl); 1065 vd->vdev_isspare = B_TRUE; 1066 mutex_exit(&spa_spare_lock); 1067 } 1068 1069 void 1070 spa_spare_remove(vdev_t *vd) 1071 { 1072 mutex_enter(&spa_spare_lock); 1073 ASSERT(vd->vdev_isspare); 1074 spa_aux_remove(vd, &spa_spare_avl); 1075 vd->vdev_isspare = B_FALSE; 1076 mutex_exit(&spa_spare_lock); 1077 } 1078 1079 boolean_t 1080 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1081 { 1082 boolean_t found; 1083 1084 mutex_enter(&spa_spare_lock); 1085 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1086 mutex_exit(&spa_spare_lock); 1087 1088 return (found); 1089 } 1090 1091 void 1092 spa_spare_activate(vdev_t *vd) 1093 { 1094 mutex_enter(&spa_spare_lock); 1095 ASSERT(vd->vdev_isspare); 1096 spa_aux_activate(vd, &spa_spare_avl); 1097 mutex_exit(&spa_spare_lock); 1098 } 1099 1100 /* 1101 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1102 * Cache devices currently only support one pool per cache device, and so 1103 * for these devices the aux reference count is currently unused beyond 1. 1104 */ 1105 1106 static int 1107 spa_l2cache_compare(const void *a, const void *b) 1108 { 1109 return (spa_aux_compare(a, b)); 1110 } 1111 1112 void 1113 spa_l2cache_add(vdev_t *vd) 1114 { 1115 mutex_enter(&spa_l2cache_lock); 1116 ASSERT(!vd->vdev_isl2cache); 1117 spa_aux_add(vd, &spa_l2cache_avl); 1118 vd->vdev_isl2cache = B_TRUE; 1119 mutex_exit(&spa_l2cache_lock); 1120 } 1121 1122 void 1123 spa_l2cache_remove(vdev_t *vd) 1124 { 1125 mutex_enter(&spa_l2cache_lock); 1126 ASSERT(vd->vdev_isl2cache); 1127 spa_aux_remove(vd, &spa_l2cache_avl); 1128 vd->vdev_isl2cache = B_FALSE; 1129 mutex_exit(&spa_l2cache_lock); 1130 } 1131 1132 boolean_t 1133 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1134 { 1135 boolean_t found; 1136 1137 mutex_enter(&spa_l2cache_lock); 1138 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1139 mutex_exit(&spa_l2cache_lock); 1140 1141 return (found); 1142 } 1143 1144 void 1145 spa_l2cache_activate(vdev_t *vd) 1146 { 1147 mutex_enter(&spa_l2cache_lock); 1148 ASSERT(vd->vdev_isl2cache); 1149 spa_aux_activate(vd, &spa_l2cache_avl); 1150 mutex_exit(&spa_l2cache_lock); 1151 } 1152 1153 /* 1154 * ========================================================================== 1155 * SPA vdev locking 1156 * ========================================================================== 1157 */ 1158 1159 /* 1160 * Lock the given spa_t for the purpose of adding or removing a vdev. 1161 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1162 * It returns the next transaction group for the spa_t. 1163 */ 1164 uint64_t 1165 spa_vdev_enter(spa_t *spa) 1166 { 1167 mutex_enter(&spa->spa_vdev_top_lock); 1168 mutex_enter(&spa_namespace_lock); 1169 1170 vdev_autotrim_stop_all(spa); 1171 1172 return (spa_vdev_config_enter(spa)); 1173 } 1174 1175 /* 1176 * The same as spa_vdev_enter() above but additionally takes the guid of 1177 * the vdev being detached. When there is a rebuild in process it will be 1178 * suspended while the vdev tree is modified then resumed by spa_vdev_exit(). 1179 * The rebuild is canceled if only a single child remains after the detach. 1180 */ 1181 uint64_t 1182 spa_vdev_detach_enter(spa_t *spa, uint64_t guid) 1183 { 1184 mutex_enter(&spa->spa_vdev_top_lock); 1185 mutex_enter(&spa_namespace_lock); 1186 1187 vdev_autotrim_stop_all(spa); 1188 1189 if (guid != 0) { 1190 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 1191 if (vd) { 1192 vdev_rebuild_stop_wait(vd->vdev_top); 1193 } 1194 } 1195 1196 return (spa_vdev_config_enter(spa)); 1197 } 1198 1199 /* 1200 * Internal implementation for spa_vdev_enter(). Used when a vdev 1201 * operation requires multiple syncs (i.e. removing a device) while 1202 * keeping the spa_namespace_lock held. 1203 */ 1204 uint64_t 1205 spa_vdev_config_enter(spa_t *spa) 1206 { 1207 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1208 1209 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1210 1211 return (spa_last_synced_txg(spa) + 1); 1212 } 1213 1214 /* 1215 * Used in combination with spa_vdev_config_enter() to allow the syncing 1216 * of multiple transactions without releasing the spa_namespace_lock. 1217 */ 1218 void 1219 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, 1220 const char *tag) 1221 { 1222 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1223 1224 int config_changed = B_FALSE; 1225 1226 ASSERT(txg > spa_last_synced_txg(spa)); 1227 1228 spa->spa_pending_vdev = NULL; 1229 1230 /* 1231 * Reassess the DTLs. 1232 */ 1233 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE); 1234 1235 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1236 config_changed = B_TRUE; 1237 spa->spa_config_generation++; 1238 } 1239 1240 /* 1241 * Verify the metaslab classes. 1242 */ 1243 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1244 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1245 ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0); 1246 ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0); 1247 ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0); 1248 1249 spa_config_exit(spa, SCL_ALL, spa); 1250 1251 /* 1252 * Panic the system if the specified tag requires it. This 1253 * is useful for ensuring that configurations are updated 1254 * transactionally. 1255 */ 1256 if (zio_injection_enabled) 1257 zio_handle_panic_injection(spa, tag, 0); 1258 1259 /* 1260 * Note: this txg_wait_synced() is important because it ensures 1261 * that there won't be more than one config change per txg. 1262 * This allows us to use the txg as the generation number. 1263 */ 1264 if (error == 0) 1265 txg_wait_synced(spa->spa_dsl_pool, txg); 1266 1267 if (vd != NULL) { 1268 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1269 if (vd->vdev_ops->vdev_op_leaf) { 1270 mutex_enter(&vd->vdev_initialize_lock); 1271 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, 1272 NULL); 1273 mutex_exit(&vd->vdev_initialize_lock); 1274 1275 mutex_enter(&vd->vdev_trim_lock); 1276 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL); 1277 mutex_exit(&vd->vdev_trim_lock); 1278 } 1279 1280 /* 1281 * The vdev may be both a leaf and top-level device. 1282 */ 1283 vdev_autotrim_stop_wait(vd); 1284 1285 spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER); 1286 vdev_free(vd); 1287 spa_config_exit(spa, SCL_STATE_ALL, spa); 1288 } 1289 1290 /* 1291 * If the config changed, update the config cache. 1292 */ 1293 if (config_changed) 1294 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE); 1295 } 1296 1297 /* 1298 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1299 * locking of spa_vdev_enter(), we also want make sure the transactions have 1300 * synced to disk, and then update the global configuration cache with the new 1301 * information. 1302 */ 1303 int 1304 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1305 { 1306 vdev_autotrim_restart(spa); 1307 vdev_rebuild_restart(spa); 1308 1309 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1310 mutex_exit(&spa_namespace_lock); 1311 mutex_exit(&spa->spa_vdev_top_lock); 1312 1313 return (error); 1314 } 1315 1316 /* 1317 * Lock the given spa_t for the purpose of changing vdev state. 1318 */ 1319 void 1320 spa_vdev_state_enter(spa_t *spa, int oplocks) 1321 { 1322 int locks = SCL_STATE_ALL | oplocks; 1323 1324 /* 1325 * Root pools may need to read of the underlying devfs filesystem 1326 * when opening up a vdev. Unfortunately if we're holding the 1327 * SCL_ZIO lock it will result in a deadlock when we try to issue 1328 * the read from the root filesystem. Instead we "prefetch" 1329 * the associated vnodes that we need prior to opening the 1330 * underlying devices and cache them so that we can prevent 1331 * any I/O when we are doing the actual open. 1332 */ 1333 if (spa_is_root(spa)) { 1334 int low = locks & ~(SCL_ZIO - 1); 1335 int high = locks & ~low; 1336 1337 spa_config_enter(spa, high, spa, RW_WRITER); 1338 vdev_hold(spa->spa_root_vdev); 1339 spa_config_enter(spa, low, spa, RW_WRITER); 1340 } else { 1341 spa_config_enter(spa, locks, spa, RW_WRITER); 1342 } 1343 spa->spa_vdev_locks = locks; 1344 } 1345 1346 int 1347 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1348 { 1349 boolean_t config_changed = B_FALSE; 1350 vdev_t *vdev_top; 1351 1352 if (vd == NULL || vd == spa->spa_root_vdev) { 1353 vdev_top = spa->spa_root_vdev; 1354 } else { 1355 vdev_top = vd->vdev_top; 1356 } 1357 1358 if (vd != NULL || error == 0) 1359 vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE); 1360 1361 if (vd != NULL) { 1362 if (vd != spa->spa_root_vdev) 1363 vdev_state_dirty(vdev_top); 1364 1365 config_changed = B_TRUE; 1366 spa->spa_config_generation++; 1367 } 1368 1369 if (spa_is_root(spa)) 1370 vdev_rele(spa->spa_root_vdev); 1371 1372 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1373 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1374 1375 /* 1376 * If anything changed, wait for it to sync. This ensures that, 1377 * from the system administrator's perspective, zpool(8) commands 1378 * are synchronous. This is important for things like zpool offline: 1379 * when the command completes, you expect no further I/O from ZFS. 1380 */ 1381 if (vd != NULL) 1382 txg_wait_synced(spa->spa_dsl_pool, 0); 1383 1384 /* 1385 * If the config changed, update the config cache. 1386 */ 1387 if (config_changed) { 1388 mutex_enter(&spa_namespace_lock); 1389 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE); 1390 mutex_exit(&spa_namespace_lock); 1391 } 1392 1393 return (error); 1394 } 1395 1396 /* 1397 * ========================================================================== 1398 * Miscellaneous functions 1399 * ========================================================================== 1400 */ 1401 1402 void 1403 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1404 { 1405 if (!nvlist_exists(spa->spa_label_features, feature)) { 1406 fnvlist_add_boolean(spa->spa_label_features, feature); 1407 /* 1408 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1409 * dirty the vdev config because lock SCL_CONFIG is not held. 1410 * Thankfully, in this case we don't need to dirty the config 1411 * because it will be written out anyway when we finish 1412 * creating the pool. 1413 */ 1414 if (tx->tx_txg != TXG_INITIAL) 1415 vdev_config_dirty(spa->spa_root_vdev); 1416 } 1417 } 1418 1419 void 1420 spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1421 { 1422 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1423 vdev_config_dirty(spa->spa_root_vdev); 1424 } 1425 1426 /* 1427 * Return the spa_t associated with given pool_guid, if it exists. If 1428 * device_guid is non-zero, determine whether the pool exists *and* contains 1429 * a device with the specified device_guid. 1430 */ 1431 spa_t * 1432 spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1433 { 1434 spa_t *spa; 1435 avl_tree_t *t = &spa_namespace_avl; 1436 1437 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1438 1439 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1440 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1441 continue; 1442 if (spa->spa_root_vdev == NULL) 1443 continue; 1444 if (spa_guid(spa) == pool_guid) { 1445 if (device_guid == 0) 1446 break; 1447 1448 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1449 device_guid) != NULL) 1450 break; 1451 1452 /* 1453 * Check any devices we may be in the process of adding. 1454 */ 1455 if (spa->spa_pending_vdev) { 1456 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1457 device_guid) != NULL) 1458 break; 1459 } 1460 } 1461 } 1462 1463 return (spa); 1464 } 1465 1466 /* 1467 * Determine whether a pool with the given pool_guid exists. 1468 */ 1469 boolean_t 1470 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1471 { 1472 return (spa_by_guid(pool_guid, device_guid) != NULL); 1473 } 1474 1475 char * 1476 spa_strdup(const char *s) 1477 { 1478 size_t len; 1479 char *new; 1480 1481 len = strlen(s); 1482 new = kmem_alloc(len + 1, KM_SLEEP); 1483 memcpy(new, s, len + 1); 1484 1485 return (new); 1486 } 1487 1488 void 1489 spa_strfree(char *s) 1490 { 1491 kmem_free(s, strlen(s) + 1); 1492 } 1493 1494 uint64_t 1495 spa_generate_guid(spa_t *spa) 1496 { 1497 uint64_t guid; 1498 1499 if (spa != NULL) { 1500 do { 1501 (void) random_get_pseudo_bytes((void *)&guid, 1502 sizeof (guid)); 1503 } while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)); 1504 } else { 1505 do { 1506 (void) random_get_pseudo_bytes((void *)&guid, 1507 sizeof (guid)); 1508 } while (guid == 0 || spa_guid_exists(guid, 0)); 1509 } 1510 1511 return (guid); 1512 } 1513 1514 void 1515 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1516 { 1517 char type[256]; 1518 const char *checksum = NULL; 1519 const char *compress = NULL; 1520 1521 if (bp != NULL) { 1522 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1523 dmu_object_byteswap_t bswap = 1524 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1525 (void) snprintf(type, sizeof (type), "bswap %s %s", 1526 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1527 "metadata" : "data", 1528 dmu_ot_byteswap[bswap].ob_name); 1529 } else { 1530 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1531 sizeof (type)); 1532 } 1533 if (!BP_IS_EMBEDDED(bp)) { 1534 checksum = 1535 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1536 } 1537 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1538 } 1539 1540 SNPRINTF_BLKPTR(kmem_scnprintf, ' ', buf, buflen, bp, type, checksum, 1541 compress); 1542 } 1543 1544 void 1545 spa_freeze(spa_t *spa) 1546 { 1547 uint64_t freeze_txg = 0; 1548 1549 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1550 if (spa->spa_freeze_txg == UINT64_MAX) { 1551 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1552 spa->spa_freeze_txg = freeze_txg; 1553 } 1554 spa_config_exit(spa, SCL_ALL, FTAG); 1555 if (freeze_txg != 0) 1556 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1557 } 1558 1559 void 1560 zfs_panic_recover(const char *fmt, ...) 1561 { 1562 va_list adx; 1563 1564 va_start(adx, fmt); 1565 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1566 va_end(adx); 1567 } 1568 1569 /* 1570 * This is a stripped-down version of strtoull, suitable only for converting 1571 * lowercase hexadecimal numbers that don't overflow. 1572 */ 1573 uint64_t 1574 zfs_strtonum(const char *str, char **nptr) 1575 { 1576 uint64_t val = 0; 1577 char c; 1578 int digit; 1579 1580 while ((c = *str) != '\0') { 1581 if (c >= '0' && c <= '9') 1582 digit = c - '0'; 1583 else if (c >= 'a' && c <= 'f') 1584 digit = 10 + c - 'a'; 1585 else 1586 break; 1587 1588 val *= 16; 1589 val += digit; 1590 1591 str++; 1592 } 1593 1594 if (nptr) 1595 *nptr = (char *)str; 1596 1597 return (val); 1598 } 1599 1600 void 1601 spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx) 1602 { 1603 /* 1604 * We bump the feature refcount for each special vdev added to the pool 1605 */ 1606 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES)); 1607 spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx); 1608 } 1609 1610 /* 1611 * ========================================================================== 1612 * Accessor functions 1613 * ========================================================================== 1614 */ 1615 1616 boolean_t 1617 spa_shutting_down(spa_t *spa) 1618 { 1619 return (spa->spa_async_suspended); 1620 } 1621 1622 dsl_pool_t * 1623 spa_get_dsl(spa_t *spa) 1624 { 1625 return (spa->spa_dsl_pool); 1626 } 1627 1628 boolean_t 1629 spa_is_initializing(spa_t *spa) 1630 { 1631 return (spa->spa_is_initializing); 1632 } 1633 1634 boolean_t 1635 spa_indirect_vdevs_loaded(spa_t *spa) 1636 { 1637 return (spa->spa_indirect_vdevs_loaded); 1638 } 1639 1640 blkptr_t * 1641 spa_get_rootblkptr(spa_t *spa) 1642 { 1643 return (&spa->spa_ubsync.ub_rootbp); 1644 } 1645 1646 void 1647 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1648 { 1649 spa->spa_uberblock.ub_rootbp = *bp; 1650 } 1651 1652 void 1653 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1654 { 1655 if (spa->spa_root == NULL) 1656 buf[0] = '\0'; 1657 else 1658 (void) strlcpy(buf, spa->spa_root, buflen); 1659 } 1660 1661 uint32_t 1662 spa_sync_pass(spa_t *spa) 1663 { 1664 return (spa->spa_sync_pass); 1665 } 1666 1667 char * 1668 spa_name(spa_t *spa) 1669 { 1670 return (spa->spa_name); 1671 } 1672 1673 uint64_t 1674 spa_guid(spa_t *spa) 1675 { 1676 dsl_pool_t *dp = spa_get_dsl(spa); 1677 uint64_t guid; 1678 1679 /* 1680 * If we fail to parse the config during spa_load(), we can go through 1681 * the error path (which posts an ereport) and end up here with no root 1682 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1683 * this case. 1684 */ 1685 if (spa->spa_root_vdev == NULL) 1686 return (spa->spa_config_guid); 1687 1688 guid = spa->spa_last_synced_guid != 0 ? 1689 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1690 1691 /* 1692 * Return the most recently synced out guid unless we're 1693 * in syncing context. 1694 */ 1695 if (dp && dsl_pool_sync_context(dp)) 1696 return (spa->spa_root_vdev->vdev_guid); 1697 else 1698 return (guid); 1699 } 1700 1701 uint64_t 1702 spa_load_guid(spa_t *spa) 1703 { 1704 /* 1705 * This is a GUID that exists solely as a reference for the 1706 * purposes of the arc. It is generated at load time, and 1707 * is never written to persistent storage. 1708 */ 1709 return (spa->spa_load_guid); 1710 } 1711 1712 uint64_t 1713 spa_last_synced_txg(spa_t *spa) 1714 { 1715 return (spa->spa_ubsync.ub_txg); 1716 } 1717 1718 uint64_t 1719 spa_first_txg(spa_t *spa) 1720 { 1721 return (spa->spa_first_txg); 1722 } 1723 1724 uint64_t 1725 spa_syncing_txg(spa_t *spa) 1726 { 1727 return (spa->spa_syncing_txg); 1728 } 1729 1730 /* 1731 * Return the last txg where data can be dirtied. The final txgs 1732 * will be used to just clear out any deferred frees that remain. 1733 */ 1734 uint64_t 1735 spa_final_dirty_txg(spa_t *spa) 1736 { 1737 return (spa->spa_final_txg - TXG_DEFER_SIZE); 1738 } 1739 1740 pool_state_t 1741 spa_state(spa_t *spa) 1742 { 1743 return (spa->spa_state); 1744 } 1745 1746 spa_load_state_t 1747 spa_load_state(spa_t *spa) 1748 { 1749 return (spa->spa_load_state); 1750 } 1751 1752 uint64_t 1753 spa_freeze_txg(spa_t *spa) 1754 { 1755 return (spa->spa_freeze_txg); 1756 } 1757 1758 /* 1759 * Return the inflated asize for a logical write in bytes. This is used by the 1760 * DMU to calculate the space a logical write will require on disk. 1761 * If lsize is smaller than the largest physical block size allocatable on this 1762 * pool we use its value instead, since the write will end up using the whole 1763 * block anyway. 1764 */ 1765 uint64_t 1766 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) 1767 { 1768 if (lsize == 0) 1769 return (0); /* No inflation needed */ 1770 return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation); 1771 } 1772 1773 /* 1774 * Return the amount of slop space in bytes. It is typically 1/32 of the pool 1775 * (3.2%), minus the embedded log space. On very small pools, it may be 1776 * slightly larger than this. On very large pools, it will be capped to 1777 * the value of spa_max_slop. The embedded log space is not included in 1778 * spa_dspace. By subtracting it, the usable space (per "zfs list") is a 1779 * constant 97% of the total space, regardless of metaslab size (assuming the 1780 * default spa_slop_shift=5 and a non-tiny pool). 1781 * 1782 * See the comment above spa_slop_shift for more details. 1783 */ 1784 uint64_t 1785 spa_get_slop_space(spa_t *spa) 1786 { 1787 uint64_t space = 0; 1788 uint64_t slop = 0; 1789 1790 /* 1791 * Make sure spa_dedup_dspace has been set. 1792 */ 1793 if (spa->spa_dedup_dspace == ~0ULL) 1794 spa_update_dspace(spa); 1795 1796 /* 1797 * spa_get_dspace() includes the space only logically "used" by 1798 * deduplicated data, so since it's not useful to reserve more 1799 * space with more deduplicated data, we subtract that out here. 1800 */ 1801 space = spa_get_dspace(spa) - spa->spa_dedup_dspace; 1802 slop = MIN(space >> spa_slop_shift, spa_max_slop); 1803 1804 /* 1805 * Subtract the embedded log space, but no more than half the (3.2%) 1806 * unusable space. Note, the "no more than half" is only relevant if 1807 * zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by 1808 * default. 1809 */ 1810 uint64_t embedded_log = 1811 metaslab_class_get_dspace(spa_embedded_log_class(spa)); 1812 slop -= MIN(embedded_log, slop >> 1); 1813 1814 /* 1815 * Slop space should be at least spa_min_slop, but no more than half 1816 * the entire pool. 1817 */ 1818 slop = MAX(slop, MIN(space >> 1, spa_min_slop)); 1819 return (slop); 1820 } 1821 1822 uint64_t 1823 spa_get_dspace(spa_t *spa) 1824 { 1825 return (spa->spa_dspace); 1826 } 1827 1828 uint64_t 1829 spa_get_checkpoint_space(spa_t *spa) 1830 { 1831 return (spa->spa_checkpoint_info.sci_dspace); 1832 } 1833 1834 void 1835 spa_update_dspace(spa_t *spa) 1836 { 1837 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1838 ddt_get_dedup_dspace(spa) + brt_get_dspace(spa); 1839 if (spa->spa_nonallocating_dspace > 0) { 1840 /* 1841 * Subtract the space provided by all non-allocating vdevs that 1842 * contribute to dspace. If a file is overwritten, its old 1843 * blocks are freed and new blocks are allocated. If there are 1844 * no snapshots of the file, the available space should remain 1845 * the same. The old blocks could be freed from the 1846 * non-allocating vdev, but the new blocks must be allocated on 1847 * other (allocating) vdevs. By reserving the entire size of 1848 * the non-allocating vdevs (including allocated space), we 1849 * ensure that there will be enough space on the allocating 1850 * vdevs for this file overwrite to succeed. 1851 * 1852 * Note that the DMU/DSL doesn't actually know or care 1853 * how much space is allocated (it does its own tracking 1854 * of how much space has been logically used). So it 1855 * doesn't matter that the data we are moving may be 1856 * allocated twice (on the old device and the new device). 1857 */ 1858 ASSERT3U(spa->spa_dspace, >=, spa->spa_nonallocating_dspace); 1859 spa->spa_dspace -= spa->spa_nonallocating_dspace; 1860 } 1861 } 1862 1863 /* 1864 * Return the failure mode that has been set to this pool. The default 1865 * behavior will be to block all I/Os when a complete failure occurs. 1866 */ 1867 uint64_t 1868 spa_get_failmode(spa_t *spa) 1869 { 1870 return (spa->spa_failmode); 1871 } 1872 1873 boolean_t 1874 spa_suspended(spa_t *spa) 1875 { 1876 return (spa->spa_suspended != ZIO_SUSPEND_NONE); 1877 } 1878 1879 uint64_t 1880 spa_version(spa_t *spa) 1881 { 1882 return (spa->spa_ubsync.ub_version); 1883 } 1884 1885 boolean_t 1886 spa_deflate(spa_t *spa) 1887 { 1888 return (spa->spa_deflate); 1889 } 1890 1891 metaslab_class_t * 1892 spa_normal_class(spa_t *spa) 1893 { 1894 return (spa->spa_normal_class); 1895 } 1896 1897 metaslab_class_t * 1898 spa_log_class(spa_t *spa) 1899 { 1900 return (spa->spa_log_class); 1901 } 1902 1903 metaslab_class_t * 1904 spa_embedded_log_class(spa_t *spa) 1905 { 1906 return (spa->spa_embedded_log_class); 1907 } 1908 1909 metaslab_class_t * 1910 spa_special_class(spa_t *spa) 1911 { 1912 return (spa->spa_special_class); 1913 } 1914 1915 metaslab_class_t * 1916 spa_dedup_class(spa_t *spa) 1917 { 1918 return (spa->spa_dedup_class); 1919 } 1920 1921 /* 1922 * Locate an appropriate allocation class 1923 */ 1924 metaslab_class_t * 1925 spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype, 1926 uint_t level, uint_t special_smallblk) 1927 { 1928 /* 1929 * ZIL allocations determine their class in zio_alloc_zil(). 1930 */ 1931 ASSERT(objtype != DMU_OT_INTENT_LOG); 1932 1933 boolean_t has_special_class = spa->spa_special_class->mc_groups != 0; 1934 1935 if (DMU_OT_IS_DDT(objtype)) { 1936 if (spa->spa_dedup_class->mc_groups != 0) 1937 return (spa_dedup_class(spa)); 1938 else if (has_special_class && zfs_ddt_data_is_special) 1939 return (spa_special_class(spa)); 1940 else 1941 return (spa_normal_class(spa)); 1942 } 1943 1944 /* Indirect blocks for user data can land in special if allowed */ 1945 if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) { 1946 if (has_special_class && zfs_user_indirect_is_special) 1947 return (spa_special_class(spa)); 1948 else 1949 return (spa_normal_class(spa)); 1950 } 1951 1952 if (DMU_OT_IS_METADATA(objtype) || level > 0) { 1953 if (has_special_class) 1954 return (spa_special_class(spa)); 1955 else 1956 return (spa_normal_class(spa)); 1957 } 1958 1959 /* 1960 * Allow small file blocks in special class in some cases (like 1961 * for the dRAID vdev feature). But always leave a reserve of 1962 * zfs_special_class_metadata_reserve_pct exclusively for metadata. 1963 */ 1964 if (DMU_OT_IS_FILE(objtype) && 1965 has_special_class && size <= special_smallblk) { 1966 metaslab_class_t *special = spa_special_class(spa); 1967 uint64_t alloc = metaslab_class_get_alloc(special); 1968 uint64_t space = metaslab_class_get_space(special); 1969 uint64_t limit = 1970 (space * (100 - zfs_special_class_metadata_reserve_pct)) 1971 / 100; 1972 1973 if (alloc < limit) 1974 return (special); 1975 } 1976 1977 return (spa_normal_class(spa)); 1978 } 1979 1980 void 1981 spa_evicting_os_register(spa_t *spa, objset_t *os) 1982 { 1983 mutex_enter(&spa->spa_evicting_os_lock); 1984 list_insert_head(&spa->spa_evicting_os_list, os); 1985 mutex_exit(&spa->spa_evicting_os_lock); 1986 } 1987 1988 void 1989 spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1990 { 1991 mutex_enter(&spa->spa_evicting_os_lock); 1992 list_remove(&spa->spa_evicting_os_list, os); 1993 cv_broadcast(&spa->spa_evicting_os_cv); 1994 mutex_exit(&spa->spa_evicting_os_lock); 1995 } 1996 1997 void 1998 spa_evicting_os_wait(spa_t *spa) 1999 { 2000 mutex_enter(&spa->spa_evicting_os_lock); 2001 while (!list_is_empty(&spa->spa_evicting_os_list)) 2002 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 2003 mutex_exit(&spa->spa_evicting_os_lock); 2004 2005 dmu_buf_user_evict_wait(); 2006 } 2007 2008 int 2009 spa_max_replication(spa_t *spa) 2010 { 2011 /* 2012 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 2013 * handle BPs with more than one DVA allocated. Set our max 2014 * replication level accordingly. 2015 */ 2016 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 2017 return (1); 2018 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 2019 } 2020 2021 int 2022 spa_prev_software_version(spa_t *spa) 2023 { 2024 return (spa->spa_prev_software_version); 2025 } 2026 2027 uint64_t 2028 spa_deadman_synctime(spa_t *spa) 2029 { 2030 return (spa->spa_deadman_synctime); 2031 } 2032 2033 spa_autotrim_t 2034 spa_get_autotrim(spa_t *spa) 2035 { 2036 return (spa->spa_autotrim); 2037 } 2038 2039 uint64_t 2040 spa_deadman_ziotime(spa_t *spa) 2041 { 2042 return (spa->spa_deadman_ziotime); 2043 } 2044 2045 uint64_t 2046 spa_get_deadman_failmode(spa_t *spa) 2047 { 2048 return (spa->spa_deadman_failmode); 2049 } 2050 2051 void 2052 spa_set_deadman_failmode(spa_t *spa, const char *failmode) 2053 { 2054 if (strcmp(failmode, "wait") == 0) 2055 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; 2056 else if (strcmp(failmode, "continue") == 0) 2057 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE; 2058 else if (strcmp(failmode, "panic") == 0) 2059 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC; 2060 else 2061 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; 2062 } 2063 2064 void 2065 spa_set_deadman_ziotime(hrtime_t ns) 2066 { 2067 spa_t *spa = NULL; 2068 2069 if (spa_mode_global != SPA_MODE_UNINIT) { 2070 mutex_enter(&spa_namespace_lock); 2071 while ((spa = spa_next(spa)) != NULL) 2072 spa->spa_deadman_ziotime = ns; 2073 mutex_exit(&spa_namespace_lock); 2074 } 2075 } 2076 2077 void 2078 spa_set_deadman_synctime(hrtime_t ns) 2079 { 2080 spa_t *spa = NULL; 2081 2082 if (spa_mode_global != SPA_MODE_UNINIT) { 2083 mutex_enter(&spa_namespace_lock); 2084 while ((spa = spa_next(spa)) != NULL) 2085 spa->spa_deadman_synctime = ns; 2086 mutex_exit(&spa_namespace_lock); 2087 } 2088 } 2089 2090 uint64_t 2091 dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 2092 { 2093 uint64_t asize = DVA_GET_ASIZE(dva); 2094 uint64_t dsize = asize; 2095 2096 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 2097 2098 if (asize != 0 && spa->spa_deflate) { 2099 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 2100 if (vd != NULL) 2101 dsize = (asize >> SPA_MINBLOCKSHIFT) * 2102 vd->vdev_deflate_ratio; 2103 } 2104 2105 return (dsize); 2106 } 2107 2108 uint64_t 2109 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 2110 { 2111 uint64_t dsize = 0; 2112 2113 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2114 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2115 2116 return (dsize); 2117 } 2118 2119 uint64_t 2120 bp_get_dsize(spa_t *spa, const blkptr_t *bp) 2121 { 2122 uint64_t dsize = 0; 2123 2124 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2125 2126 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2127 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2128 2129 spa_config_exit(spa, SCL_VDEV, FTAG); 2130 2131 return (dsize); 2132 } 2133 2134 uint64_t 2135 spa_dirty_data(spa_t *spa) 2136 { 2137 return (spa->spa_dsl_pool->dp_dirty_total); 2138 } 2139 2140 /* 2141 * ========================================================================== 2142 * SPA Import Progress Routines 2143 * ========================================================================== 2144 */ 2145 2146 typedef struct spa_import_progress { 2147 uint64_t pool_guid; /* unique id for updates */ 2148 char *pool_name; 2149 spa_load_state_t spa_load_state; 2150 uint64_t mmp_sec_remaining; /* MMP activity check */ 2151 uint64_t spa_load_max_txg; /* rewind txg */ 2152 procfs_list_node_t smh_node; 2153 } spa_import_progress_t; 2154 2155 spa_history_list_t *spa_import_progress_list = NULL; 2156 2157 static int 2158 spa_import_progress_show_header(struct seq_file *f) 2159 { 2160 seq_printf(f, "%-20s %-14s %-14s %-12s %s\n", "pool_guid", 2161 "load_state", "multihost_secs", "max_txg", 2162 "pool_name"); 2163 return (0); 2164 } 2165 2166 static int 2167 spa_import_progress_show(struct seq_file *f, void *data) 2168 { 2169 spa_import_progress_t *sip = (spa_import_progress_t *)data; 2170 2171 seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %s\n", 2172 (u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state, 2173 (u_longlong_t)sip->mmp_sec_remaining, 2174 (u_longlong_t)sip->spa_load_max_txg, 2175 (sip->pool_name ? sip->pool_name : "-")); 2176 2177 return (0); 2178 } 2179 2180 /* Remove oldest elements from list until there are no more than 'size' left */ 2181 static void 2182 spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size) 2183 { 2184 spa_import_progress_t *sip; 2185 while (shl->size > size) { 2186 sip = list_remove_head(&shl->procfs_list.pl_list); 2187 if (sip->pool_name) 2188 spa_strfree(sip->pool_name); 2189 kmem_free(sip, sizeof (spa_import_progress_t)); 2190 shl->size--; 2191 } 2192 2193 IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list)); 2194 } 2195 2196 static void 2197 spa_import_progress_init(void) 2198 { 2199 spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t), 2200 KM_SLEEP); 2201 2202 spa_import_progress_list->size = 0; 2203 2204 spa_import_progress_list->procfs_list.pl_private = 2205 spa_import_progress_list; 2206 2207 procfs_list_install("zfs", 2208 NULL, 2209 "import_progress", 2210 0644, 2211 &spa_import_progress_list->procfs_list, 2212 spa_import_progress_show, 2213 spa_import_progress_show_header, 2214 NULL, 2215 offsetof(spa_import_progress_t, smh_node)); 2216 } 2217 2218 static void 2219 spa_import_progress_destroy(void) 2220 { 2221 spa_history_list_t *shl = spa_import_progress_list; 2222 procfs_list_uninstall(&shl->procfs_list); 2223 spa_import_progress_truncate(shl, 0); 2224 procfs_list_destroy(&shl->procfs_list); 2225 kmem_free(shl, sizeof (spa_history_list_t)); 2226 } 2227 2228 int 2229 spa_import_progress_set_state(uint64_t pool_guid, 2230 spa_load_state_t load_state) 2231 { 2232 spa_history_list_t *shl = spa_import_progress_list; 2233 spa_import_progress_t *sip; 2234 int error = ENOENT; 2235 2236 if (shl->size == 0) 2237 return (0); 2238 2239 mutex_enter(&shl->procfs_list.pl_lock); 2240 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2241 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2242 if (sip->pool_guid == pool_guid) { 2243 sip->spa_load_state = load_state; 2244 error = 0; 2245 break; 2246 } 2247 } 2248 mutex_exit(&shl->procfs_list.pl_lock); 2249 2250 return (error); 2251 } 2252 2253 int 2254 spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg) 2255 { 2256 spa_history_list_t *shl = spa_import_progress_list; 2257 spa_import_progress_t *sip; 2258 int error = ENOENT; 2259 2260 if (shl->size == 0) 2261 return (0); 2262 2263 mutex_enter(&shl->procfs_list.pl_lock); 2264 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2265 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2266 if (sip->pool_guid == pool_guid) { 2267 sip->spa_load_max_txg = load_max_txg; 2268 error = 0; 2269 break; 2270 } 2271 } 2272 mutex_exit(&shl->procfs_list.pl_lock); 2273 2274 return (error); 2275 } 2276 2277 int 2278 spa_import_progress_set_mmp_check(uint64_t pool_guid, 2279 uint64_t mmp_sec_remaining) 2280 { 2281 spa_history_list_t *shl = spa_import_progress_list; 2282 spa_import_progress_t *sip; 2283 int error = ENOENT; 2284 2285 if (shl->size == 0) 2286 return (0); 2287 2288 mutex_enter(&shl->procfs_list.pl_lock); 2289 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2290 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2291 if (sip->pool_guid == pool_guid) { 2292 sip->mmp_sec_remaining = mmp_sec_remaining; 2293 error = 0; 2294 break; 2295 } 2296 } 2297 mutex_exit(&shl->procfs_list.pl_lock); 2298 2299 return (error); 2300 } 2301 2302 /* 2303 * A new import is in progress, add an entry. 2304 */ 2305 void 2306 spa_import_progress_add(spa_t *spa) 2307 { 2308 spa_history_list_t *shl = spa_import_progress_list; 2309 spa_import_progress_t *sip; 2310 const char *poolname = NULL; 2311 2312 sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP); 2313 sip->pool_guid = spa_guid(spa); 2314 2315 (void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME, 2316 &poolname); 2317 if (poolname == NULL) 2318 poolname = spa_name(spa); 2319 sip->pool_name = spa_strdup(poolname); 2320 sip->spa_load_state = spa_load_state(spa); 2321 2322 mutex_enter(&shl->procfs_list.pl_lock); 2323 procfs_list_add(&shl->procfs_list, sip); 2324 shl->size++; 2325 mutex_exit(&shl->procfs_list.pl_lock); 2326 } 2327 2328 void 2329 spa_import_progress_remove(uint64_t pool_guid) 2330 { 2331 spa_history_list_t *shl = spa_import_progress_list; 2332 spa_import_progress_t *sip; 2333 2334 mutex_enter(&shl->procfs_list.pl_lock); 2335 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2336 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2337 if (sip->pool_guid == pool_guid) { 2338 if (sip->pool_name) 2339 spa_strfree(sip->pool_name); 2340 list_remove(&shl->procfs_list.pl_list, sip); 2341 shl->size--; 2342 kmem_free(sip, sizeof (spa_import_progress_t)); 2343 break; 2344 } 2345 } 2346 mutex_exit(&shl->procfs_list.pl_lock); 2347 } 2348 2349 /* 2350 * ========================================================================== 2351 * Initialization and Termination 2352 * ========================================================================== 2353 */ 2354 2355 static int 2356 spa_name_compare(const void *a1, const void *a2) 2357 { 2358 const spa_t *s1 = a1; 2359 const spa_t *s2 = a2; 2360 int s; 2361 2362 s = strcmp(s1->spa_name, s2->spa_name); 2363 2364 return (TREE_ISIGN(s)); 2365 } 2366 2367 void 2368 spa_boot_init(void) 2369 { 2370 spa_config_load(); 2371 } 2372 2373 void 2374 spa_init(spa_mode_t mode) 2375 { 2376 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 2377 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 2378 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 2379 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 2380 2381 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 2382 offsetof(spa_t, spa_avl)); 2383 2384 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 2385 offsetof(spa_aux_t, aux_avl)); 2386 2387 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 2388 offsetof(spa_aux_t, aux_avl)); 2389 2390 spa_mode_global = mode; 2391 2392 #ifndef _KERNEL 2393 if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) { 2394 struct sigaction sa; 2395 2396 sa.sa_flags = SA_SIGINFO; 2397 sigemptyset(&sa.sa_mask); 2398 sa.sa_sigaction = arc_buf_sigsegv; 2399 2400 if (sigaction(SIGSEGV, &sa, NULL) == -1) { 2401 perror("could not enable watchpoints: " 2402 "sigaction(SIGSEGV, ...) = "); 2403 } else { 2404 arc_watch = B_TRUE; 2405 } 2406 } 2407 #endif 2408 2409 fm_init(); 2410 zfs_refcount_init(); 2411 unique_init(); 2412 zfs_btree_init(); 2413 metaslab_stat_init(); 2414 brt_init(); 2415 ddt_init(); 2416 zio_init(); 2417 dmu_init(); 2418 zil_init(); 2419 vdev_cache_stat_init(); 2420 vdev_mirror_stat_init(); 2421 vdev_raidz_math_init(); 2422 vdev_file_init(); 2423 zfs_prop_init(); 2424 chksum_init(); 2425 zpool_prop_init(); 2426 zpool_feature_init(); 2427 spa_config_load(); 2428 vdev_prop_init(); 2429 l2arc_start(); 2430 scan_init(); 2431 qat_init(); 2432 spa_import_progress_init(); 2433 } 2434 2435 void 2436 spa_fini(void) 2437 { 2438 l2arc_stop(); 2439 2440 spa_evict_all(); 2441 2442 vdev_file_fini(); 2443 vdev_cache_stat_fini(); 2444 vdev_mirror_stat_fini(); 2445 vdev_raidz_math_fini(); 2446 chksum_fini(); 2447 zil_fini(); 2448 dmu_fini(); 2449 zio_fini(); 2450 ddt_fini(); 2451 brt_fini(); 2452 metaslab_stat_fini(); 2453 zfs_btree_fini(); 2454 unique_fini(); 2455 zfs_refcount_fini(); 2456 fm_fini(); 2457 scan_fini(); 2458 qat_fini(); 2459 spa_import_progress_destroy(); 2460 2461 avl_destroy(&spa_namespace_avl); 2462 avl_destroy(&spa_spare_avl); 2463 avl_destroy(&spa_l2cache_avl); 2464 2465 cv_destroy(&spa_namespace_cv); 2466 mutex_destroy(&spa_namespace_lock); 2467 mutex_destroy(&spa_spare_lock); 2468 mutex_destroy(&spa_l2cache_lock); 2469 } 2470 2471 /* 2472 * Return whether this pool has a dedicated slog device. No locking needed. 2473 * It's not a problem if the wrong answer is returned as it's only for 2474 * performance and not correctness. 2475 */ 2476 boolean_t 2477 spa_has_slogs(spa_t *spa) 2478 { 2479 return (spa->spa_log_class->mc_groups != 0); 2480 } 2481 2482 spa_log_state_t 2483 spa_get_log_state(spa_t *spa) 2484 { 2485 return (spa->spa_log_state); 2486 } 2487 2488 void 2489 spa_set_log_state(spa_t *spa, spa_log_state_t state) 2490 { 2491 spa->spa_log_state = state; 2492 } 2493 2494 boolean_t 2495 spa_is_root(spa_t *spa) 2496 { 2497 return (spa->spa_is_root); 2498 } 2499 2500 boolean_t 2501 spa_writeable(spa_t *spa) 2502 { 2503 return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config); 2504 } 2505 2506 /* 2507 * Returns true if there is a pending sync task in any of the current 2508 * syncing txg, the current quiescing txg, or the current open txg. 2509 */ 2510 boolean_t 2511 spa_has_pending_synctask(spa_t *spa) 2512 { 2513 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) || 2514 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks)); 2515 } 2516 2517 spa_mode_t 2518 spa_mode(spa_t *spa) 2519 { 2520 return (spa->spa_mode); 2521 } 2522 2523 uint64_t 2524 spa_bootfs(spa_t *spa) 2525 { 2526 return (spa->spa_bootfs); 2527 } 2528 2529 uint64_t 2530 spa_delegation(spa_t *spa) 2531 { 2532 return (spa->spa_delegation); 2533 } 2534 2535 objset_t * 2536 spa_meta_objset(spa_t *spa) 2537 { 2538 return (spa->spa_meta_objset); 2539 } 2540 2541 enum zio_checksum 2542 spa_dedup_checksum(spa_t *spa) 2543 { 2544 return (spa->spa_dedup_checksum); 2545 } 2546 2547 /* 2548 * Reset pool scan stat per scan pass (or reboot). 2549 */ 2550 void 2551 spa_scan_stat_init(spa_t *spa) 2552 { 2553 /* data not stored on disk */ 2554 spa->spa_scan_pass_start = gethrestime_sec(); 2555 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) 2556 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; 2557 else 2558 spa->spa_scan_pass_scrub_pause = 0; 2559 spa->spa_scan_pass_scrub_spent_paused = 0; 2560 spa->spa_scan_pass_exam = 0; 2561 spa->spa_scan_pass_issued = 0; 2562 } 2563 2564 /* 2565 * Get scan stats for zpool status reports 2566 */ 2567 int 2568 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2569 { 2570 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2571 2572 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2573 return (SET_ERROR(ENOENT)); 2574 memset(ps, 0, sizeof (pool_scan_stat_t)); 2575 2576 /* data stored on disk */ 2577 ps->pss_func = scn->scn_phys.scn_func; 2578 ps->pss_state = scn->scn_phys.scn_state; 2579 ps->pss_start_time = scn->scn_phys.scn_start_time; 2580 ps->pss_end_time = scn->scn_phys.scn_end_time; 2581 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2582 ps->pss_examined = scn->scn_phys.scn_examined; 2583 ps->pss_to_process = scn->scn_phys.scn_to_process; 2584 ps->pss_processed = scn->scn_phys.scn_processed; 2585 ps->pss_errors = scn->scn_phys.scn_errors; 2586 2587 /* data not stored on disk */ 2588 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2589 ps->pss_pass_start = spa->spa_scan_pass_start; 2590 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; 2591 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; 2592 ps->pss_pass_issued = spa->spa_scan_pass_issued; 2593 ps->pss_issued = 2594 scn->scn_issued_before_pass + spa->spa_scan_pass_issued; 2595 2596 return (0); 2597 } 2598 2599 int 2600 spa_maxblocksize(spa_t *spa) 2601 { 2602 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2603 return (SPA_MAXBLOCKSIZE); 2604 else 2605 return (SPA_OLD_MAXBLOCKSIZE); 2606 } 2607 2608 2609 /* 2610 * Returns the txg that the last device removal completed. No indirect mappings 2611 * have been added since this txg. 2612 */ 2613 uint64_t 2614 spa_get_last_removal_txg(spa_t *spa) 2615 { 2616 uint64_t vdevid; 2617 uint64_t ret = -1ULL; 2618 2619 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2620 /* 2621 * sr_prev_indirect_vdev is only modified while holding all the 2622 * config locks, so it is sufficient to hold SCL_VDEV as reader when 2623 * examining it. 2624 */ 2625 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; 2626 2627 while (vdevid != -1ULL) { 2628 vdev_t *vd = vdev_lookup_top(spa, vdevid); 2629 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 2630 2631 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2632 2633 /* 2634 * If the removal did not remap any data, we don't care. 2635 */ 2636 if (vdev_indirect_births_count(vib) != 0) { 2637 ret = vdev_indirect_births_last_entry_txg(vib); 2638 break; 2639 } 2640 2641 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; 2642 } 2643 spa_config_exit(spa, SCL_VDEV, FTAG); 2644 2645 IMPLY(ret != -1ULL, 2646 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 2647 2648 return (ret); 2649 } 2650 2651 int 2652 spa_maxdnodesize(spa_t *spa) 2653 { 2654 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) 2655 return (DNODE_MAX_SIZE); 2656 else 2657 return (DNODE_MIN_SIZE); 2658 } 2659 2660 boolean_t 2661 spa_multihost(spa_t *spa) 2662 { 2663 return (spa->spa_multihost ? B_TRUE : B_FALSE); 2664 } 2665 2666 uint32_t 2667 spa_get_hostid(spa_t *spa) 2668 { 2669 return (spa->spa_hostid); 2670 } 2671 2672 boolean_t 2673 spa_trust_config(spa_t *spa) 2674 { 2675 return (spa->spa_trust_config); 2676 } 2677 2678 uint64_t 2679 spa_missing_tvds_allowed(spa_t *spa) 2680 { 2681 return (spa->spa_missing_tvds_allowed); 2682 } 2683 2684 space_map_t * 2685 spa_syncing_log_sm(spa_t *spa) 2686 { 2687 return (spa->spa_syncing_log_sm); 2688 } 2689 2690 void 2691 spa_set_missing_tvds(spa_t *spa, uint64_t missing) 2692 { 2693 spa->spa_missing_tvds = missing; 2694 } 2695 2696 /* 2697 * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc). 2698 */ 2699 const char * 2700 spa_state_to_name(spa_t *spa) 2701 { 2702 ASSERT3P(spa, !=, NULL); 2703 2704 /* 2705 * it is possible for the spa to exist, without root vdev 2706 * as the spa transitions during import/export 2707 */ 2708 vdev_t *rvd = spa->spa_root_vdev; 2709 if (rvd == NULL) { 2710 return ("TRANSITIONING"); 2711 } 2712 vdev_state_t state = rvd->vdev_state; 2713 vdev_aux_t aux = rvd->vdev_stat.vs_aux; 2714 2715 if (spa_suspended(spa) && 2716 (spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE)) 2717 return ("SUSPENDED"); 2718 2719 switch (state) { 2720 case VDEV_STATE_CLOSED: 2721 case VDEV_STATE_OFFLINE: 2722 return ("OFFLINE"); 2723 case VDEV_STATE_REMOVED: 2724 return ("REMOVED"); 2725 case VDEV_STATE_CANT_OPEN: 2726 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 2727 return ("FAULTED"); 2728 else if (aux == VDEV_AUX_SPLIT_POOL) 2729 return ("SPLIT"); 2730 else 2731 return ("UNAVAIL"); 2732 case VDEV_STATE_FAULTED: 2733 return ("FAULTED"); 2734 case VDEV_STATE_DEGRADED: 2735 return ("DEGRADED"); 2736 case VDEV_STATE_HEALTHY: 2737 return ("ONLINE"); 2738 default: 2739 break; 2740 } 2741 2742 return ("UNKNOWN"); 2743 } 2744 2745 boolean_t 2746 spa_top_vdevs_spacemap_addressable(spa_t *spa) 2747 { 2748 vdev_t *rvd = spa->spa_root_vdev; 2749 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2750 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c])) 2751 return (B_FALSE); 2752 } 2753 return (B_TRUE); 2754 } 2755 2756 boolean_t 2757 spa_has_checkpoint(spa_t *spa) 2758 { 2759 return (spa->spa_checkpoint_txg != 0); 2760 } 2761 2762 boolean_t 2763 spa_importing_readonly_checkpoint(spa_t *spa) 2764 { 2765 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) && 2766 spa->spa_mode == SPA_MODE_READ); 2767 } 2768 2769 uint64_t 2770 spa_min_claim_txg(spa_t *spa) 2771 { 2772 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg; 2773 2774 if (checkpoint_txg != 0) 2775 return (checkpoint_txg + 1); 2776 2777 return (spa->spa_first_txg); 2778 } 2779 2780 /* 2781 * If there is a checkpoint, async destroys may consume more space from 2782 * the pool instead of freeing it. In an attempt to save the pool from 2783 * getting suspended when it is about to run out of space, we stop 2784 * processing async destroys. 2785 */ 2786 boolean_t 2787 spa_suspend_async_destroy(spa_t *spa) 2788 { 2789 dsl_pool_t *dp = spa_get_dsl(spa); 2790 2791 uint64_t unreserved = dsl_pool_unreserved_space(dp, 2792 ZFS_SPACE_CHECK_EXTRA_RESERVED); 2793 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes; 2794 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0; 2795 2796 if (spa_has_checkpoint(spa) && avail == 0) 2797 return (B_TRUE); 2798 2799 return (B_FALSE); 2800 } 2801 2802 #if defined(_KERNEL) 2803 2804 int 2805 param_set_deadman_failmode_common(const char *val) 2806 { 2807 spa_t *spa = NULL; 2808 char *p; 2809 2810 if (val == NULL) 2811 return (SET_ERROR(EINVAL)); 2812 2813 if ((p = strchr(val, '\n')) != NULL) 2814 *p = '\0'; 2815 2816 if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 && 2817 strcmp(val, "panic")) 2818 return (SET_ERROR(EINVAL)); 2819 2820 if (spa_mode_global != SPA_MODE_UNINIT) { 2821 mutex_enter(&spa_namespace_lock); 2822 while ((spa = spa_next(spa)) != NULL) 2823 spa_set_deadman_failmode(spa, val); 2824 mutex_exit(&spa_namespace_lock); 2825 } 2826 2827 return (0); 2828 } 2829 #endif 2830 2831 /* Namespace manipulation */ 2832 EXPORT_SYMBOL(spa_lookup); 2833 EXPORT_SYMBOL(spa_add); 2834 EXPORT_SYMBOL(spa_remove); 2835 EXPORT_SYMBOL(spa_next); 2836 2837 /* Refcount functions */ 2838 EXPORT_SYMBOL(spa_open_ref); 2839 EXPORT_SYMBOL(spa_close); 2840 EXPORT_SYMBOL(spa_refcount_zero); 2841 2842 /* Pool configuration lock */ 2843 EXPORT_SYMBOL(spa_config_tryenter); 2844 EXPORT_SYMBOL(spa_config_enter); 2845 EXPORT_SYMBOL(spa_config_exit); 2846 EXPORT_SYMBOL(spa_config_held); 2847 2848 /* Pool vdev add/remove lock */ 2849 EXPORT_SYMBOL(spa_vdev_enter); 2850 EXPORT_SYMBOL(spa_vdev_exit); 2851 2852 /* Pool vdev state change lock */ 2853 EXPORT_SYMBOL(spa_vdev_state_enter); 2854 EXPORT_SYMBOL(spa_vdev_state_exit); 2855 2856 /* Accessor functions */ 2857 EXPORT_SYMBOL(spa_shutting_down); 2858 EXPORT_SYMBOL(spa_get_dsl); 2859 EXPORT_SYMBOL(spa_get_rootblkptr); 2860 EXPORT_SYMBOL(spa_set_rootblkptr); 2861 EXPORT_SYMBOL(spa_altroot); 2862 EXPORT_SYMBOL(spa_sync_pass); 2863 EXPORT_SYMBOL(spa_name); 2864 EXPORT_SYMBOL(spa_guid); 2865 EXPORT_SYMBOL(spa_last_synced_txg); 2866 EXPORT_SYMBOL(spa_first_txg); 2867 EXPORT_SYMBOL(spa_syncing_txg); 2868 EXPORT_SYMBOL(spa_version); 2869 EXPORT_SYMBOL(spa_state); 2870 EXPORT_SYMBOL(spa_load_state); 2871 EXPORT_SYMBOL(spa_freeze_txg); 2872 EXPORT_SYMBOL(spa_get_dspace); 2873 EXPORT_SYMBOL(spa_update_dspace); 2874 EXPORT_SYMBOL(spa_deflate); 2875 EXPORT_SYMBOL(spa_normal_class); 2876 EXPORT_SYMBOL(spa_log_class); 2877 EXPORT_SYMBOL(spa_special_class); 2878 EXPORT_SYMBOL(spa_preferred_class); 2879 EXPORT_SYMBOL(spa_max_replication); 2880 EXPORT_SYMBOL(spa_prev_software_version); 2881 EXPORT_SYMBOL(spa_get_failmode); 2882 EXPORT_SYMBOL(spa_suspended); 2883 EXPORT_SYMBOL(spa_bootfs); 2884 EXPORT_SYMBOL(spa_delegation); 2885 EXPORT_SYMBOL(spa_meta_objset); 2886 EXPORT_SYMBOL(spa_maxblocksize); 2887 EXPORT_SYMBOL(spa_maxdnodesize); 2888 2889 /* Miscellaneous support routines */ 2890 EXPORT_SYMBOL(spa_guid_exists); 2891 EXPORT_SYMBOL(spa_strdup); 2892 EXPORT_SYMBOL(spa_strfree); 2893 EXPORT_SYMBOL(spa_generate_guid); 2894 EXPORT_SYMBOL(snprintf_blkptr); 2895 EXPORT_SYMBOL(spa_freeze); 2896 EXPORT_SYMBOL(spa_upgrade); 2897 EXPORT_SYMBOL(spa_evict_all); 2898 EXPORT_SYMBOL(spa_lookup_by_guid); 2899 EXPORT_SYMBOL(spa_has_spare); 2900 EXPORT_SYMBOL(dva_get_dsize_sync); 2901 EXPORT_SYMBOL(bp_get_dsize_sync); 2902 EXPORT_SYMBOL(bp_get_dsize); 2903 EXPORT_SYMBOL(spa_has_slogs); 2904 EXPORT_SYMBOL(spa_is_root); 2905 EXPORT_SYMBOL(spa_writeable); 2906 EXPORT_SYMBOL(spa_mode); 2907 EXPORT_SYMBOL(spa_namespace_lock); 2908 EXPORT_SYMBOL(spa_trust_config); 2909 EXPORT_SYMBOL(spa_missing_tvds_allowed); 2910 EXPORT_SYMBOL(spa_set_missing_tvds); 2911 EXPORT_SYMBOL(spa_state_to_name); 2912 EXPORT_SYMBOL(spa_importing_readonly_checkpoint); 2913 EXPORT_SYMBOL(spa_min_claim_txg); 2914 EXPORT_SYMBOL(spa_suspend_async_destroy); 2915 EXPORT_SYMBOL(spa_has_checkpoint); 2916 EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable); 2917 2918 ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW, 2919 "Set additional debugging flags"); 2920 2921 ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW, 2922 "Set to attempt to recover from fatal errors"); 2923 2924 ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW, 2925 "Set to ignore IO errors during free and permanently leak the space"); 2926 2927 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, U64, ZMOD_RW, 2928 "Dead I/O check interval in milliseconds"); 2929 2930 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW, 2931 "Enable deadman timer"); 2932 2933 ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, UINT, ZMOD_RW, 2934 "SPA size estimate multiplication factor"); 2935 2936 ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW, 2937 "Place DDT data into the special class"); 2938 2939 ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW, 2940 "Place user data indirect blocks into the special class"); 2941 2942 /* BEGIN CSTYLED */ 2943 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode, 2944 param_set_deadman_failmode, param_get_charp, ZMOD_RW, 2945 "Failmode for deadman timer"); 2946 2947 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms, 2948 param_set_deadman_synctime, spl_param_get_u64, ZMOD_RW, 2949 "Pool sync expiration time in milliseconds"); 2950 2951 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms, 2952 param_set_deadman_ziotime, spl_param_get_u64, ZMOD_RW, 2953 "IO expiration time in milliseconds"); 2954 2955 ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW, 2956 "Small file blocks in special vdevs depends on this much " 2957 "free space available"); 2958 /* END CSTYLED */ 2959 2960 ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift, 2961 param_get_uint, ZMOD_RW, "Reserved free space in pool"); 2962