1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 * Copyright 2013 Saso Kiselkov. All rights reserved. 27 * Copyright (c) 2017 Datto Inc. 28 * Copyright (c) 2017, Intel Corporation. 29 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. 30 * Copyright (c) 2023, Klara Inc. 31 */ 32 33 #include <sys/zfs_context.h> 34 #include <sys/zfs_chksum.h> 35 #include <sys/spa_impl.h> 36 #include <sys/zio.h> 37 #include <sys/zio_checksum.h> 38 #include <sys/zio_compress.h> 39 #include <sys/dmu.h> 40 #include <sys/dmu_tx.h> 41 #include <sys/zap.h> 42 #include <sys/zil.h> 43 #include <sys/vdev_impl.h> 44 #include <sys/vdev_initialize.h> 45 #include <sys/vdev_trim.h> 46 #include <sys/vdev_file.h> 47 #include <sys/vdev_raidz.h> 48 #include <sys/metaslab.h> 49 #include <sys/uberblock_impl.h> 50 #include <sys/txg.h> 51 #include <sys/avl.h> 52 #include <sys/unique.h> 53 #include <sys/dsl_pool.h> 54 #include <sys/dsl_dir.h> 55 #include <sys/dsl_prop.h> 56 #include <sys/fm/util.h> 57 #include <sys/dsl_scan.h> 58 #include <sys/fs/zfs.h> 59 #include <sys/metaslab_impl.h> 60 #include <sys/arc.h> 61 #include <sys/brt.h> 62 #include <sys/ddt.h> 63 #include <sys/kstat.h> 64 #include "zfs_prop.h" 65 #include <sys/btree.h> 66 #include <sys/zfeature.h> 67 #include <sys/qat.h> 68 #include <sys/zstd/zstd.h> 69 70 /* 71 * SPA locking 72 * 73 * There are three basic locks for managing spa_t structures: 74 * 75 * spa_namespace_lock (global mutex) 76 * 77 * This lock must be acquired to do any of the following: 78 * 79 * - Lookup a spa_t by name 80 * - Add or remove a spa_t from the namespace 81 * - Increase spa_refcount from non-zero 82 * - Check if spa_refcount is zero 83 * - Rename a spa_t 84 * - add/remove/attach/detach devices 85 * - Held for the duration of create/destroy/import/export 86 * 87 * It does not need to handle recursion. A create or destroy may 88 * reference objects (files or zvols) in other pools, but by 89 * definition they must have an existing reference, and will never need 90 * to lookup a spa_t by name. 91 * 92 * spa_refcount (per-spa zfs_refcount_t protected by mutex) 93 * 94 * This reference count keep track of any active users of the spa_t. The 95 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 96 * the refcount is never really 'zero' - opening a pool implicitly keeps 97 * some references in the DMU. Internally we check against spa_minref, but 98 * present the image of a zero/non-zero value to consumers. 99 * 100 * spa_config_lock[] (per-spa array of rwlocks) 101 * 102 * This protects the spa_t from config changes, and must be held in 103 * the following circumstances: 104 * 105 * - RW_READER to perform I/O to the spa 106 * - RW_WRITER to change the vdev config 107 * 108 * The locking order is fairly straightforward: 109 * 110 * spa_namespace_lock -> spa_refcount 111 * 112 * The namespace lock must be acquired to increase the refcount from 0 113 * or to check if it is zero. 114 * 115 * spa_refcount -> spa_config_lock[] 116 * 117 * There must be at least one valid reference on the spa_t to acquire 118 * the config lock. 119 * 120 * spa_namespace_lock -> spa_config_lock[] 121 * 122 * The namespace lock must always be taken before the config lock. 123 * 124 * 125 * The spa_namespace_lock can be acquired directly and is globally visible. 126 * 127 * The namespace is manipulated using the following functions, all of which 128 * require the spa_namespace_lock to be held. 129 * 130 * spa_lookup() Lookup a spa_t by name. 131 * 132 * spa_add() Create a new spa_t in the namespace. 133 * 134 * spa_remove() Remove a spa_t from the namespace. This also 135 * frees up any memory associated with the spa_t. 136 * 137 * spa_next() Returns the next spa_t in the system, or the 138 * first if NULL is passed. 139 * 140 * spa_evict_all() Shutdown and remove all spa_t structures in 141 * the system. 142 * 143 * spa_guid_exists() Determine whether a pool/device guid exists. 144 * 145 * The spa_refcount is manipulated using the following functions: 146 * 147 * spa_open_ref() Adds a reference to the given spa_t. Must be 148 * called with spa_namespace_lock held if the 149 * refcount is currently zero. 150 * 151 * spa_close() Remove a reference from the spa_t. This will 152 * not free the spa_t or remove it from the 153 * namespace. No locking is required. 154 * 155 * spa_refcount_zero() Returns true if the refcount is currently 156 * zero. Must be called with spa_namespace_lock 157 * held. 158 * 159 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 160 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 161 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 162 * 163 * To read the configuration, it suffices to hold one of these locks as reader. 164 * To modify the configuration, you must hold all locks as writer. To modify 165 * vdev state without altering the vdev tree's topology (e.g. online/offline), 166 * you must hold SCL_STATE and SCL_ZIO as writer. 167 * 168 * We use these distinct config locks to avoid recursive lock entry. 169 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 170 * block allocations (SCL_ALLOC), which may require reading space maps 171 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 172 * 173 * The spa config locks cannot be normal rwlocks because we need the 174 * ability to hand off ownership. For example, SCL_ZIO is acquired 175 * by the issuing thread and later released by an interrupt thread. 176 * They do, however, obey the usual write-wanted semantics to prevent 177 * writer (i.e. system administrator) starvation. 178 * 179 * The lock acquisition rules are as follows: 180 * 181 * SCL_CONFIG 182 * Protects changes to the vdev tree topology, such as vdev 183 * add/remove/attach/detach. Protects the dirty config list 184 * (spa_config_dirty_list) and the set of spares and l2arc devices. 185 * 186 * SCL_STATE 187 * Protects changes to pool state and vdev state, such as vdev 188 * online/offline/fault/degrade/clear. Protects the dirty state list 189 * (spa_state_dirty_list) and global pool state (spa_state). 190 * 191 * SCL_ALLOC 192 * Protects changes to metaslab groups and classes. 193 * Held as reader by metaslab_alloc() and metaslab_claim(). 194 * 195 * SCL_ZIO 196 * Held by bp-level zios (those which have no io_vd upon entry) 197 * to prevent changes to the vdev tree. The bp-level zio implicitly 198 * protects all of its vdev child zios, which do not hold SCL_ZIO. 199 * 200 * SCL_FREE 201 * Protects changes to metaslab groups and classes. 202 * Held as reader by metaslab_free(). SCL_FREE is distinct from 203 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 204 * blocks in zio_done() while another i/o that holds either 205 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 206 * 207 * SCL_VDEV 208 * Held as reader to prevent changes to the vdev tree during trivial 209 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 210 * other locks, and lower than all of them, to ensure that it's safe 211 * to acquire regardless of caller context. 212 * 213 * In addition, the following rules apply: 214 * 215 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 216 * The lock ordering is SCL_CONFIG > spa_props_lock. 217 * 218 * (b) I/O operations on leaf vdevs. For any zio operation that takes 219 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 220 * or zio_write_phys() -- the caller must ensure that the config cannot 221 * cannot change in the interim, and that the vdev cannot be reopened. 222 * SCL_STATE as reader suffices for both. 223 * 224 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 225 * 226 * spa_vdev_enter() Acquire the namespace lock and the config lock 227 * for writing. 228 * 229 * spa_vdev_exit() Release the config lock, wait for all I/O 230 * to complete, sync the updated configs to the 231 * cache, and release the namespace lock. 232 * 233 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 234 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 235 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 236 */ 237 238 static avl_tree_t spa_namespace_avl; 239 kmutex_t spa_namespace_lock; 240 static kcondvar_t spa_namespace_cv; 241 static const int spa_max_replication_override = SPA_DVAS_PER_BP; 242 243 static kmutex_t spa_spare_lock; 244 static avl_tree_t spa_spare_avl; 245 static kmutex_t spa_l2cache_lock; 246 static avl_tree_t spa_l2cache_avl; 247 248 spa_mode_t spa_mode_global = SPA_MODE_UNINIT; 249 250 #ifdef ZFS_DEBUG 251 /* 252 * Everything except dprintf, set_error, spa, and indirect_remap is on 253 * by default in debug builds. 254 */ 255 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR | 256 ZFS_DEBUG_INDIRECT_REMAP); 257 #else 258 int zfs_flags = 0; 259 #endif 260 261 /* 262 * zfs_recover can be set to nonzero to attempt to recover from 263 * otherwise-fatal errors, typically caused by on-disk corruption. When 264 * set, calls to zfs_panic_recover() will turn into warning messages. 265 * This should only be used as a last resort, as it typically results 266 * in leaked space, or worse. 267 */ 268 int zfs_recover = B_FALSE; 269 270 /* 271 * If destroy encounters an EIO while reading metadata (e.g. indirect 272 * blocks), space referenced by the missing metadata can not be freed. 273 * Normally this causes the background destroy to become "stalled", as 274 * it is unable to make forward progress. While in this stalled state, 275 * all remaining space to free from the error-encountering filesystem is 276 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 277 * permanently leak the space from indirect blocks that can not be read, 278 * and continue to free everything else that it can. 279 * 280 * The default, "stalling" behavior is useful if the storage partially 281 * fails (i.e. some but not all i/os fail), and then later recovers. In 282 * this case, we will be able to continue pool operations while it is 283 * partially failed, and when it recovers, we can continue to free the 284 * space, with no leaks. However, note that this case is actually 285 * fairly rare. 286 * 287 * Typically pools either (a) fail completely (but perhaps temporarily, 288 * e.g. a top-level vdev going offline), or (b) have localized, 289 * permanent errors (e.g. disk returns the wrong data due to bit flip or 290 * firmware bug). In case (a), this setting does not matter because the 291 * pool will be suspended and the sync thread will not be able to make 292 * forward progress regardless. In case (b), because the error is 293 * permanent, the best we can do is leak the minimum amount of space, 294 * which is what setting this flag will do. Therefore, it is reasonable 295 * for this flag to normally be set, but we chose the more conservative 296 * approach of not setting it, so that there is no possibility of 297 * leaking space in the "partial temporary" failure case. 298 */ 299 int zfs_free_leak_on_eio = B_FALSE; 300 301 /* 302 * Expiration time in milliseconds. This value has two meanings. First it is 303 * used to determine when the spa_deadman() logic should fire. By default the 304 * spa_deadman() will fire if spa_sync() has not completed in 600 seconds. 305 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 306 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 307 * in one of three behaviors controlled by zfs_deadman_failmode. 308 */ 309 uint64_t zfs_deadman_synctime_ms = 600000UL; /* 10 min. */ 310 311 /* 312 * This value controls the maximum amount of time zio_wait() will block for an 313 * outstanding IO. By default this is 300 seconds at which point the "hung" 314 * behavior will be applied as described for zfs_deadman_synctime_ms. 315 */ 316 uint64_t zfs_deadman_ziotime_ms = 300000UL; /* 5 min. */ 317 318 /* 319 * Check time in milliseconds. This defines the frequency at which we check 320 * for hung I/O. 321 */ 322 uint64_t zfs_deadman_checktime_ms = 60000UL; /* 1 min. */ 323 324 /* 325 * By default the deadman is enabled. 326 */ 327 int zfs_deadman_enabled = B_TRUE; 328 329 /* 330 * Controls the behavior of the deadman when it detects a "hung" I/O. 331 * Valid values are zfs_deadman_failmode=<wait|continue|panic>. 332 * 333 * wait - Wait for the "hung" I/O (default) 334 * continue - Attempt to recover from a "hung" I/O 335 * panic - Panic the system 336 */ 337 const char *zfs_deadman_failmode = "wait"; 338 339 /* 340 * The worst case is single-sector max-parity RAID-Z blocks, in which 341 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 342 * times the size; so just assume that. Add to this the fact that 343 * we can have up to 3 DVAs per bp, and one more factor of 2 because 344 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 345 * the worst case is: 346 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 347 */ 348 uint_t spa_asize_inflation = 24; 349 350 /* 351 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 352 * the pool to be consumed (bounded by spa_max_slop). This ensures that we 353 * don't run the pool completely out of space, due to unaccounted changes (e.g. 354 * to the MOS). It also limits the worst-case time to allocate space. If we 355 * have less than this amount of free space, most ZPL operations (e.g. write, 356 * create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are 357 * also part of this 3.2% of space which can't be consumed by normal writes; 358 * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded 359 * log space. 360 * 361 * Certain operations (e.g. file removal, most administrative actions) can 362 * use half the slop space. They will only return ENOSPC if less than half 363 * the slop space is free. Typically, once the pool has less than the slop 364 * space free, the user will use these operations to free up space in the pool. 365 * These are the operations that call dsl_pool_adjustedsize() with the netfree 366 * argument set to TRUE. 367 * 368 * Operations that are almost guaranteed to free up space in the absence of 369 * a pool checkpoint can use up to three quarters of the slop space 370 * (e.g zfs destroy). 371 * 372 * A very restricted set of operations are always permitted, regardless of 373 * the amount of free space. These are the operations that call 374 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net 375 * increase in the amount of space used, it is possible to run the pool 376 * completely out of space, causing it to be permanently read-only. 377 * 378 * Note that on very small pools, the slop space will be larger than 379 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 380 * but we never allow it to be more than half the pool size. 381 * 382 * Further, on very large pools, the slop space will be smaller than 383 * 3.2%, to avoid reserving much more space than we actually need; bounded 384 * by spa_max_slop (128GB). 385 * 386 * See also the comments in zfs_space_check_t. 387 */ 388 uint_t spa_slop_shift = 5; 389 static const uint64_t spa_min_slop = 128ULL * 1024 * 1024; 390 static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024; 391 static const int spa_allocators = 4; 392 393 /* 394 * Spa active allocator. 395 * Valid values are zfs_active_allocator=<dynamic|cursor|new-dynamic>. 396 */ 397 const char *zfs_active_allocator = "dynamic"; 398 399 void 400 spa_load_failed(spa_t *spa, const char *fmt, ...) 401 { 402 va_list adx; 403 char buf[256]; 404 405 va_start(adx, fmt); 406 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 407 va_end(adx); 408 409 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name, 410 spa->spa_trust_config ? "trusted" : "untrusted", buf); 411 } 412 413 void 414 spa_load_note(spa_t *spa, const char *fmt, ...) 415 { 416 va_list adx; 417 char buf[256]; 418 419 va_start(adx, fmt); 420 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 421 va_end(adx); 422 423 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name, 424 spa->spa_trust_config ? "trusted" : "untrusted", buf); 425 } 426 427 /* 428 * By default dedup and user data indirects land in the special class 429 */ 430 static int zfs_ddt_data_is_special = B_TRUE; 431 static int zfs_user_indirect_is_special = B_TRUE; 432 433 /* 434 * The percentage of special class final space reserved for metadata only. 435 * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only 436 * let metadata into the class. 437 */ 438 static uint_t zfs_special_class_metadata_reserve_pct = 25; 439 440 /* 441 * ========================================================================== 442 * SPA config locking 443 * ========================================================================== 444 */ 445 static void 446 spa_config_lock_init(spa_t *spa) 447 { 448 for (int i = 0; i < SCL_LOCKS; i++) { 449 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 450 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 451 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 452 scl->scl_writer = NULL; 453 scl->scl_write_wanted = 0; 454 scl->scl_count = 0; 455 } 456 } 457 458 static void 459 spa_config_lock_destroy(spa_t *spa) 460 { 461 for (int i = 0; i < SCL_LOCKS; i++) { 462 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 463 mutex_destroy(&scl->scl_lock); 464 cv_destroy(&scl->scl_cv); 465 ASSERT(scl->scl_writer == NULL); 466 ASSERT(scl->scl_write_wanted == 0); 467 ASSERT(scl->scl_count == 0); 468 } 469 } 470 471 int 472 spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw) 473 { 474 for (int i = 0; i < SCL_LOCKS; i++) { 475 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 476 if (!(locks & (1 << i))) 477 continue; 478 mutex_enter(&scl->scl_lock); 479 if (rw == RW_READER) { 480 if (scl->scl_writer || scl->scl_write_wanted) { 481 mutex_exit(&scl->scl_lock); 482 spa_config_exit(spa, locks & ((1 << i) - 1), 483 tag); 484 return (0); 485 } 486 } else { 487 ASSERT(scl->scl_writer != curthread); 488 if (scl->scl_count != 0) { 489 mutex_exit(&scl->scl_lock); 490 spa_config_exit(spa, locks & ((1 << i) - 1), 491 tag); 492 return (0); 493 } 494 scl->scl_writer = curthread; 495 } 496 scl->scl_count++; 497 mutex_exit(&scl->scl_lock); 498 } 499 return (1); 500 } 501 502 static void 503 spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw, 504 int mmp_flag) 505 { 506 (void) tag; 507 int wlocks_held = 0; 508 509 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 510 511 for (int i = 0; i < SCL_LOCKS; i++) { 512 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 513 if (scl->scl_writer == curthread) 514 wlocks_held |= (1 << i); 515 if (!(locks & (1 << i))) 516 continue; 517 mutex_enter(&scl->scl_lock); 518 if (rw == RW_READER) { 519 while (scl->scl_writer || 520 (!mmp_flag && scl->scl_write_wanted)) { 521 cv_wait(&scl->scl_cv, &scl->scl_lock); 522 } 523 } else { 524 ASSERT(scl->scl_writer != curthread); 525 while (scl->scl_count != 0) { 526 scl->scl_write_wanted++; 527 cv_wait(&scl->scl_cv, &scl->scl_lock); 528 scl->scl_write_wanted--; 529 } 530 scl->scl_writer = curthread; 531 } 532 scl->scl_count++; 533 mutex_exit(&scl->scl_lock); 534 } 535 ASSERT3U(wlocks_held, <=, locks); 536 } 537 538 void 539 spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw) 540 { 541 spa_config_enter_impl(spa, locks, tag, rw, 0); 542 } 543 544 /* 545 * The spa_config_enter_mmp() allows the mmp thread to cut in front of 546 * outstanding write lock requests. This is needed since the mmp updates are 547 * time sensitive and failure to service them promptly will result in a 548 * suspended pool. This pool suspension has been seen in practice when there is 549 * a single disk in a pool that is responding slowly and presumably about to 550 * fail. 551 */ 552 553 void 554 spa_config_enter_mmp(spa_t *spa, int locks, const void *tag, krw_t rw) 555 { 556 spa_config_enter_impl(spa, locks, tag, rw, 1); 557 } 558 559 void 560 spa_config_exit(spa_t *spa, int locks, const void *tag) 561 { 562 (void) tag; 563 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 564 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 565 if (!(locks & (1 << i))) 566 continue; 567 mutex_enter(&scl->scl_lock); 568 ASSERT(scl->scl_count > 0); 569 if (--scl->scl_count == 0) { 570 ASSERT(scl->scl_writer == NULL || 571 scl->scl_writer == curthread); 572 scl->scl_writer = NULL; /* OK in either case */ 573 cv_broadcast(&scl->scl_cv); 574 } 575 mutex_exit(&scl->scl_lock); 576 } 577 } 578 579 int 580 spa_config_held(spa_t *spa, int locks, krw_t rw) 581 { 582 int locks_held = 0; 583 584 for (int i = 0; i < SCL_LOCKS; i++) { 585 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 586 if (!(locks & (1 << i))) 587 continue; 588 if ((rw == RW_READER && scl->scl_count != 0) || 589 (rw == RW_WRITER && scl->scl_writer == curthread)) 590 locks_held |= 1 << i; 591 } 592 593 return (locks_held); 594 } 595 596 /* 597 * ========================================================================== 598 * SPA namespace functions 599 * ========================================================================== 600 */ 601 602 /* 603 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 604 * Returns NULL if no matching spa_t is found. 605 */ 606 spa_t * 607 spa_lookup(const char *name) 608 { 609 static spa_t search; /* spa_t is large; don't allocate on stack */ 610 spa_t *spa; 611 avl_index_t where; 612 char *cp; 613 614 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 615 616 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 617 618 /* 619 * If it's a full dataset name, figure out the pool name and 620 * just use that. 621 */ 622 cp = strpbrk(search.spa_name, "/@#"); 623 if (cp != NULL) 624 *cp = '\0'; 625 626 spa = avl_find(&spa_namespace_avl, &search, &where); 627 628 return (spa); 629 } 630 631 /* 632 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 633 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 634 * looking for potentially hung I/Os. 635 */ 636 void 637 spa_deadman(void *arg) 638 { 639 spa_t *spa = arg; 640 641 /* Disable the deadman if the pool is suspended. */ 642 if (spa_suspended(spa)) 643 return; 644 645 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 646 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 647 (u_longlong_t)++spa->spa_deadman_calls); 648 if (zfs_deadman_enabled) 649 vdev_deadman(spa->spa_root_vdev, FTAG); 650 651 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, 652 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + 653 MSEC_TO_TICK(zfs_deadman_checktime_ms)); 654 } 655 656 static int 657 spa_log_sm_sort_by_txg(const void *va, const void *vb) 658 { 659 const spa_log_sm_t *a = va; 660 const spa_log_sm_t *b = vb; 661 662 return (TREE_CMP(a->sls_txg, b->sls_txg)); 663 } 664 665 /* 666 * Create an uninitialized spa_t with the given name. Requires 667 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 668 * exist by calling spa_lookup() first. 669 */ 670 spa_t * 671 spa_add(const char *name, nvlist_t *config, const char *altroot) 672 { 673 spa_t *spa; 674 spa_config_dirent_t *dp; 675 676 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 677 678 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 679 680 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 681 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 682 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 683 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 684 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 685 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 686 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 687 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 688 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 689 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 690 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 691 mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL); 692 mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL); 693 mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL); 694 695 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 696 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 697 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 698 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 699 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 700 cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL); 701 cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL); 702 703 for (int t = 0; t < TXG_SIZE; t++) 704 bplist_create(&spa->spa_free_bplist[t]); 705 706 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 707 spa->spa_state = POOL_STATE_UNINITIALIZED; 708 spa->spa_freeze_txg = UINT64_MAX; 709 spa->spa_final_txg = UINT64_MAX; 710 spa->spa_load_max_txg = UINT64_MAX; 711 spa->spa_proc = &p0; 712 spa->spa_proc_state = SPA_PROC_NONE; 713 spa->spa_trust_config = B_TRUE; 714 spa->spa_hostid = zone_get_hostid(NULL); 715 716 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 717 spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms); 718 spa_set_deadman_failmode(spa, zfs_deadman_failmode); 719 spa_set_allocator(spa, zfs_active_allocator); 720 721 zfs_refcount_create(&spa->spa_refcount); 722 spa_config_lock_init(spa); 723 spa_stats_init(spa); 724 725 avl_add(&spa_namespace_avl, spa); 726 727 /* 728 * Set the alternate root, if there is one. 729 */ 730 if (altroot) 731 spa->spa_root = spa_strdup(altroot); 732 733 spa->spa_alloc_count = spa_allocators; 734 spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count * 735 sizeof (spa_alloc_t), KM_SLEEP); 736 for (int i = 0; i < spa->spa_alloc_count; i++) { 737 mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT, 738 NULL); 739 avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare, 740 sizeof (zio_t), offsetof(zio_t, io_queue_node.a)); 741 } 742 avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed, 743 sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node)); 744 avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg, 745 sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node)); 746 list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t), 747 offsetof(log_summary_entry_t, lse_node)); 748 749 /* 750 * Every pool starts with the default cachefile 751 */ 752 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 753 offsetof(spa_config_dirent_t, scd_link)); 754 755 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 756 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 757 list_insert_head(&spa->spa_config_list, dp); 758 759 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 760 KM_SLEEP) == 0); 761 762 if (config != NULL) { 763 nvlist_t *features; 764 765 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 766 &features) == 0) { 767 VERIFY(nvlist_dup(features, &spa->spa_label_features, 768 0) == 0); 769 } 770 771 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 772 } 773 774 if (spa->spa_label_features == NULL) { 775 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 776 KM_SLEEP) == 0); 777 } 778 779 spa->spa_min_ashift = INT_MAX; 780 spa->spa_max_ashift = 0; 781 spa->spa_min_alloc = INT_MAX; 782 spa->spa_gcd_alloc = INT_MAX; 783 784 /* Reset cached value */ 785 spa->spa_dedup_dspace = ~0ULL; 786 787 /* 788 * As a pool is being created, treat all features as disabled by 789 * setting SPA_FEATURE_DISABLED for all entries in the feature 790 * refcount cache. 791 */ 792 for (int i = 0; i < SPA_FEATURES; i++) { 793 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 794 } 795 796 list_create(&spa->spa_leaf_list, sizeof (vdev_t), 797 offsetof(vdev_t, vdev_leaf_node)); 798 799 return (spa); 800 } 801 802 /* 803 * Removes a spa_t from the namespace, freeing up any memory used. Requires 804 * spa_namespace_lock. This is called only after the spa_t has been closed and 805 * deactivated. 806 */ 807 void 808 spa_remove(spa_t *spa) 809 { 810 spa_config_dirent_t *dp; 811 812 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 813 ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED); 814 ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0); 815 ASSERT0(spa->spa_waiters); 816 817 nvlist_free(spa->spa_config_splitting); 818 819 avl_remove(&spa_namespace_avl, spa); 820 cv_broadcast(&spa_namespace_cv); 821 822 if (spa->spa_root) 823 spa_strfree(spa->spa_root); 824 825 while ((dp = list_remove_head(&spa->spa_config_list)) != NULL) { 826 if (dp->scd_path != NULL) 827 spa_strfree(dp->scd_path); 828 kmem_free(dp, sizeof (spa_config_dirent_t)); 829 } 830 831 for (int i = 0; i < spa->spa_alloc_count; i++) { 832 avl_destroy(&spa->spa_allocs[i].spaa_tree); 833 mutex_destroy(&spa->spa_allocs[i].spaa_lock); 834 } 835 kmem_free(spa->spa_allocs, spa->spa_alloc_count * 836 sizeof (spa_alloc_t)); 837 838 avl_destroy(&spa->spa_metaslabs_by_flushed); 839 avl_destroy(&spa->spa_sm_logs_by_txg); 840 list_destroy(&spa->spa_log_summary); 841 list_destroy(&spa->spa_config_list); 842 list_destroy(&spa->spa_leaf_list); 843 844 nvlist_free(spa->spa_label_features); 845 nvlist_free(spa->spa_load_info); 846 nvlist_free(spa->spa_feat_stats); 847 spa_config_set(spa, NULL); 848 849 zfs_refcount_destroy(&spa->spa_refcount); 850 851 spa_stats_destroy(spa); 852 spa_config_lock_destroy(spa); 853 854 for (int t = 0; t < TXG_SIZE; t++) 855 bplist_destroy(&spa->spa_free_bplist[t]); 856 857 zio_checksum_templates_free(spa); 858 859 cv_destroy(&spa->spa_async_cv); 860 cv_destroy(&spa->spa_evicting_os_cv); 861 cv_destroy(&spa->spa_proc_cv); 862 cv_destroy(&spa->spa_scrub_io_cv); 863 cv_destroy(&spa->spa_suspend_cv); 864 cv_destroy(&spa->spa_activities_cv); 865 cv_destroy(&spa->spa_waiters_cv); 866 867 mutex_destroy(&spa->spa_flushed_ms_lock); 868 mutex_destroy(&spa->spa_async_lock); 869 mutex_destroy(&spa->spa_errlist_lock); 870 mutex_destroy(&spa->spa_errlog_lock); 871 mutex_destroy(&spa->spa_evicting_os_lock); 872 mutex_destroy(&spa->spa_history_lock); 873 mutex_destroy(&spa->spa_proc_lock); 874 mutex_destroy(&spa->spa_props_lock); 875 mutex_destroy(&spa->spa_cksum_tmpls_lock); 876 mutex_destroy(&spa->spa_scrub_lock); 877 mutex_destroy(&spa->spa_suspend_lock); 878 mutex_destroy(&spa->spa_vdev_top_lock); 879 mutex_destroy(&spa->spa_feat_stats_lock); 880 mutex_destroy(&spa->spa_activities_lock); 881 882 kmem_free(spa, sizeof (spa_t)); 883 } 884 885 /* 886 * Given a pool, return the next pool in the namespace, or NULL if there is 887 * none. If 'prev' is NULL, return the first pool. 888 */ 889 spa_t * 890 spa_next(spa_t *prev) 891 { 892 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 893 894 if (prev) 895 return (AVL_NEXT(&spa_namespace_avl, prev)); 896 else 897 return (avl_first(&spa_namespace_avl)); 898 } 899 900 /* 901 * ========================================================================== 902 * SPA refcount functions 903 * ========================================================================== 904 */ 905 906 /* 907 * Add a reference to the given spa_t. Must have at least one reference, or 908 * have the namespace lock held. 909 */ 910 void 911 spa_open_ref(spa_t *spa, const void *tag) 912 { 913 ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref || 914 MUTEX_HELD(&spa_namespace_lock)); 915 (void) zfs_refcount_add(&spa->spa_refcount, tag); 916 } 917 918 /* 919 * Remove a reference to the given spa_t. Must have at least one reference, or 920 * have the namespace lock held. 921 */ 922 void 923 spa_close(spa_t *spa, const void *tag) 924 { 925 ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref || 926 MUTEX_HELD(&spa_namespace_lock)); 927 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 928 } 929 930 /* 931 * Remove a reference to the given spa_t held by a dsl dir that is 932 * being asynchronously released. Async releases occur from a taskq 933 * performing eviction of dsl datasets and dirs. The namespace lock 934 * isn't held and the hold by the object being evicted may contribute to 935 * spa_minref (e.g. dataset or directory released during pool export), 936 * so the asserts in spa_close() do not apply. 937 */ 938 void 939 spa_async_close(spa_t *spa, const void *tag) 940 { 941 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 942 } 943 944 /* 945 * Check to see if the spa refcount is zero. Must be called with 946 * spa_namespace_lock held. We really compare against spa_minref, which is the 947 * number of references acquired when opening a pool 948 */ 949 boolean_t 950 spa_refcount_zero(spa_t *spa) 951 { 952 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 953 954 return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref); 955 } 956 957 /* 958 * ========================================================================== 959 * SPA spare and l2cache tracking 960 * ========================================================================== 961 */ 962 963 /* 964 * Hot spares and cache devices are tracked using the same code below, 965 * for 'auxiliary' devices. 966 */ 967 968 typedef struct spa_aux { 969 uint64_t aux_guid; 970 uint64_t aux_pool; 971 avl_node_t aux_avl; 972 int aux_count; 973 } spa_aux_t; 974 975 static inline int 976 spa_aux_compare(const void *a, const void *b) 977 { 978 const spa_aux_t *sa = (const spa_aux_t *)a; 979 const spa_aux_t *sb = (const spa_aux_t *)b; 980 981 return (TREE_CMP(sa->aux_guid, sb->aux_guid)); 982 } 983 984 static void 985 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 986 { 987 avl_index_t where; 988 spa_aux_t search; 989 spa_aux_t *aux; 990 991 search.aux_guid = vd->vdev_guid; 992 if ((aux = avl_find(avl, &search, &where)) != NULL) { 993 aux->aux_count++; 994 } else { 995 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 996 aux->aux_guid = vd->vdev_guid; 997 aux->aux_count = 1; 998 avl_insert(avl, aux, where); 999 } 1000 } 1001 1002 static void 1003 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 1004 { 1005 spa_aux_t search; 1006 spa_aux_t *aux; 1007 avl_index_t where; 1008 1009 search.aux_guid = vd->vdev_guid; 1010 aux = avl_find(avl, &search, &where); 1011 1012 ASSERT(aux != NULL); 1013 1014 if (--aux->aux_count == 0) { 1015 avl_remove(avl, aux); 1016 kmem_free(aux, sizeof (spa_aux_t)); 1017 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 1018 aux->aux_pool = 0ULL; 1019 } 1020 } 1021 1022 static boolean_t 1023 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 1024 { 1025 spa_aux_t search, *found; 1026 1027 search.aux_guid = guid; 1028 found = avl_find(avl, &search, NULL); 1029 1030 if (pool) { 1031 if (found) 1032 *pool = found->aux_pool; 1033 else 1034 *pool = 0ULL; 1035 } 1036 1037 if (refcnt) { 1038 if (found) 1039 *refcnt = found->aux_count; 1040 else 1041 *refcnt = 0; 1042 } 1043 1044 return (found != NULL); 1045 } 1046 1047 static void 1048 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1049 { 1050 spa_aux_t search, *found; 1051 avl_index_t where; 1052 1053 search.aux_guid = vd->vdev_guid; 1054 found = avl_find(avl, &search, &where); 1055 ASSERT(found != NULL); 1056 ASSERT(found->aux_pool == 0ULL); 1057 1058 found->aux_pool = spa_guid(vd->vdev_spa); 1059 } 1060 1061 /* 1062 * Spares are tracked globally due to the following constraints: 1063 * 1064 * - A spare may be part of multiple pools. 1065 * - A spare may be added to a pool even if it's actively in use within 1066 * another pool. 1067 * - A spare in use in any pool can only be the source of a replacement if 1068 * the target is a spare in the same pool. 1069 * 1070 * We keep track of all spares on the system through the use of a reference 1071 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1072 * spare, then we bump the reference count in the AVL tree. In addition, we set 1073 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1074 * inactive). When a spare is made active (used to replace a device in the 1075 * pool), we also keep track of which pool its been made a part of. 1076 * 1077 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1078 * called under the spa_namespace lock as part of vdev reconfiguration. The 1079 * separate spare lock exists for the status query path, which does not need to 1080 * be completely consistent with respect to other vdev configuration changes. 1081 */ 1082 1083 static int 1084 spa_spare_compare(const void *a, const void *b) 1085 { 1086 return (spa_aux_compare(a, b)); 1087 } 1088 1089 void 1090 spa_spare_add(vdev_t *vd) 1091 { 1092 mutex_enter(&spa_spare_lock); 1093 ASSERT(!vd->vdev_isspare); 1094 spa_aux_add(vd, &spa_spare_avl); 1095 vd->vdev_isspare = B_TRUE; 1096 mutex_exit(&spa_spare_lock); 1097 } 1098 1099 void 1100 spa_spare_remove(vdev_t *vd) 1101 { 1102 mutex_enter(&spa_spare_lock); 1103 ASSERT(vd->vdev_isspare); 1104 spa_aux_remove(vd, &spa_spare_avl); 1105 vd->vdev_isspare = B_FALSE; 1106 mutex_exit(&spa_spare_lock); 1107 } 1108 1109 boolean_t 1110 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1111 { 1112 boolean_t found; 1113 1114 mutex_enter(&spa_spare_lock); 1115 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1116 mutex_exit(&spa_spare_lock); 1117 1118 return (found); 1119 } 1120 1121 void 1122 spa_spare_activate(vdev_t *vd) 1123 { 1124 mutex_enter(&spa_spare_lock); 1125 ASSERT(vd->vdev_isspare); 1126 spa_aux_activate(vd, &spa_spare_avl); 1127 mutex_exit(&spa_spare_lock); 1128 } 1129 1130 /* 1131 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1132 * Cache devices currently only support one pool per cache device, and so 1133 * for these devices the aux reference count is currently unused beyond 1. 1134 */ 1135 1136 static int 1137 spa_l2cache_compare(const void *a, const void *b) 1138 { 1139 return (spa_aux_compare(a, b)); 1140 } 1141 1142 void 1143 spa_l2cache_add(vdev_t *vd) 1144 { 1145 mutex_enter(&spa_l2cache_lock); 1146 ASSERT(!vd->vdev_isl2cache); 1147 spa_aux_add(vd, &spa_l2cache_avl); 1148 vd->vdev_isl2cache = B_TRUE; 1149 mutex_exit(&spa_l2cache_lock); 1150 } 1151 1152 void 1153 spa_l2cache_remove(vdev_t *vd) 1154 { 1155 mutex_enter(&spa_l2cache_lock); 1156 ASSERT(vd->vdev_isl2cache); 1157 spa_aux_remove(vd, &spa_l2cache_avl); 1158 vd->vdev_isl2cache = B_FALSE; 1159 mutex_exit(&spa_l2cache_lock); 1160 } 1161 1162 boolean_t 1163 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1164 { 1165 boolean_t found; 1166 1167 mutex_enter(&spa_l2cache_lock); 1168 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1169 mutex_exit(&spa_l2cache_lock); 1170 1171 return (found); 1172 } 1173 1174 void 1175 spa_l2cache_activate(vdev_t *vd) 1176 { 1177 mutex_enter(&spa_l2cache_lock); 1178 ASSERT(vd->vdev_isl2cache); 1179 spa_aux_activate(vd, &spa_l2cache_avl); 1180 mutex_exit(&spa_l2cache_lock); 1181 } 1182 1183 /* 1184 * ========================================================================== 1185 * SPA vdev locking 1186 * ========================================================================== 1187 */ 1188 1189 /* 1190 * Lock the given spa_t for the purpose of adding or removing a vdev. 1191 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1192 * It returns the next transaction group for the spa_t. 1193 */ 1194 uint64_t 1195 spa_vdev_enter(spa_t *spa) 1196 { 1197 mutex_enter(&spa->spa_vdev_top_lock); 1198 mutex_enter(&spa_namespace_lock); 1199 1200 vdev_autotrim_stop_all(spa); 1201 1202 return (spa_vdev_config_enter(spa)); 1203 } 1204 1205 /* 1206 * The same as spa_vdev_enter() above but additionally takes the guid of 1207 * the vdev being detached. When there is a rebuild in process it will be 1208 * suspended while the vdev tree is modified then resumed by spa_vdev_exit(). 1209 * The rebuild is canceled if only a single child remains after the detach. 1210 */ 1211 uint64_t 1212 spa_vdev_detach_enter(spa_t *spa, uint64_t guid) 1213 { 1214 mutex_enter(&spa->spa_vdev_top_lock); 1215 mutex_enter(&spa_namespace_lock); 1216 1217 vdev_autotrim_stop_all(spa); 1218 1219 if (guid != 0) { 1220 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 1221 if (vd) { 1222 vdev_rebuild_stop_wait(vd->vdev_top); 1223 } 1224 } 1225 1226 return (spa_vdev_config_enter(spa)); 1227 } 1228 1229 /* 1230 * Internal implementation for spa_vdev_enter(). Used when a vdev 1231 * operation requires multiple syncs (i.e. removing a device) while 1232 * keeping the spa_namespace_lock held. 1233 */ 1234 uint64_t 1235 spa_vdev_config_enter(spa_t *spa) 1236 { 1237 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1238 1239 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1240 1241 return (spa_last_synced_txg(spa) + 1); 1242 } 1243 1244 /* 1245 * Used in combination with spa_vdev_config_enter() to allow the syncing 1246 * of multiple transactions without releasing the spa_namespace_lock. 1247 */ 1248 void 1249 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, 1250 const char *tag) 1251 { 1252 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1253 1254 int config_changed = B_FALSE; 1255 1256 ASSERT(txg > spa_last_synced_txg(spa)); 1257 1258 spa->spa_pending_vdev = NULL; 1259 1260 /* 1261 * Reassess the DTLs. 1262 */ 1263 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE); 1264 1265 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1266 config_changed = B_TRUE; 1267 spa->spa_config_generation++; 1268 } 1269 1270 /* 1271 * Verify the metaslab classes. 1272 */ 1273 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1274 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1275 ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0); 1276 ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0); 1277 ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0); 1278 1279 spa_config_exit(spa, SCL_ALL, spa); 1280 1281 /* 1282 * Panic the system if the specified tag requires it. This 1283 * is useful for ensuring that configurations are updated 1284 * transactionally. 1285 */ 1286 if (zio_injection_enabled) 1287 zio_handle_panic_injection(spa, tag, 0); 1288 1289 /* 1290 * Note: this txg_wait_synced() is important because it ensures 1291 * that there won't be more than one config change per txg. 1292 * This allows us to use the txg as the generation number. 1293 */ 1294 if (error == 0) 1295 txg_wait_synced(spa->spa_dsl_pool, txg); 1296 1297 if (vd != NULL) { 1298 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1299 if (vd->vdev_ops->vdev_op_leaf) { 1300 mutex_enter(&vd->vdev_initialize_lock); 1301 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, 1302 NULL); 1303 mutex_exit(&vd->vdev_initialize_lock); 1304 1305 mutex_enter(&vd->vdev_trim_lock); 1306 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL); 1307 mutex_exit(&vd->vdev_trim_lock); 1308 } 1309 1310 /* 1311 * The vdev may be both a leaf and top-level device. 1312 */ 1313 vdev_autotrim_stop_wait(vd); 1314 1315 spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER); 1316 vdev_free(vd); 1317 spa_config_exit(spa, SCL_STATE_ALL, spa); 1318 } 1319 1320 /* 1321 * If the config changed, update the config cache. 1322 */ 1323 if (config_changed) 1324 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE); 1325 } 1326 1327 /* 1328 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1329 * locking of spa_vdev_enter(), we also want make sure the transactions have 1330 * synced to disk, and then update the global configuration cache with the new 1331 * information. 1332 */ 1333 int 1334 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1335 { 1336 vdev_autotrim_restart(spa); 1337 vdev_rebuild_restart(spa); 1338 1339 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1340 mutex_exit(&spa_namespace_lock); 1341 mutex_exit(&spa->spa_vdev_top_lock); 1342 1343 return (error); 1344 } 1345 1346 /* 1347 * Lock the given spa_t for the purpose of changing vdev state. 1348 */ 1349 void 1350 spa_vdev_state_enter(spa_t *spa, int oplocks) 1351 { 1352 int locks = SCL_STATE_ALL | oplocks; 1353 1354 /* 1355 * Root pools may need to read of the underlying devfs filesystem 1356 * when opening up a vdev. Unfortunately if we're holding the 1357 * SCL_ZIO lock it will result in a deadlock when we try to issue 1358 * the read from the root filesystem. Instead we "prefetch" 1359 * the associated vnodes that we need prior to opening the 1360 * underlying devices and cache them so that we can prevent 1361 * any I/O when we are doing the actual open. 1362 */ 1363 if (spa_is_root(spa)) { 1364 int low = locks & ~(SCL_ZIO - 1); 1365 int high = locks & ~low; 1366 1367 spa_config_enter(spa, high, spa, RW_WRITER); 1368 vdev_hold(spa->spa_root_vdev); 1369 spa_config_enter(spa, low, spa, RW_WRITER); 1370 } else { 1371 spa_config_enter(spa, locks, spa, RW_WRITER); 1372 } 1373 spa->spa_vdev_locks = locks; 1374 } 1375 1376 int 1377 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1378 { 1379 boolean_t config_changed = B_FALSE; 1380 vdev_t *vdev_top; 1381 1382 if (vd == NULL || vd == spa->spa_root_vdev) { 1383 vdev_top = spa->spa_root_vdev; 1384 } else { 1385 vdev_top = vd->vdev_top; 1386 } 1387 1388 if (vd != NULL || error == 0) 1389 vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE); 1390 1391 if (vd != NULL) { 1392 if (vd != spa->spa_root_vdev) 1393 vdev_state_dirty(vdev_top); 1394 1395 config_changed = B_TRUE; 1396 spa->spa_config_generation++; 1397 } 1398 1399 if (spa_is_root(spa)) 1400 vdev_rele(spa->spa_root_vdev); 1401 1402 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1403 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1404 1405 /* 1406 * If anything changed, wait for it to sync. This ensures that, 1407 * from the system administrator's perspective, zpool(8) commands 1408 * are synchronous. This is important for things like zpool offline: 1409 * when the command completes, you expect no further I/O from ZFS. 1410 */ 1411 if (vd != NULL) 1412 txg_wait_synced(spa->spa_dsl_pool, 0); 1413 1414 /* 1415 * If the config changed, update the config cache. 1416 */ 1417 if (config_changed) { 1418 mutex_enter(&spa_namespace_lock); 1419 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE); 1420 mutex_exit(&spa_namespace_lock); 1421 } 1422 1423 return (error); 1424 } 1425 1426 /* 1427 * ========================================================================== 1428 * Miscellaneous functions 1429 * ========================================================================== 1430 */ 1431 1432 void 1433 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1434 { 1435 if (!nvlist_exists(spa->spa_label_features, feature)) { 1436 fnvlist_add_boolean(spa->spa_label_features, feature); 1437 /* 1438 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1439 * dirty the vdev config because lock SCL_CONFIG is not held. 1440 * Thankfully, in this case we don't need to dirty the config 1441 * because it will be written out anyway when we finish 1442 * creating the pool. 1443 */ 1444 if (tx->tx_txg != TXG_INITIAL) 1445 vdev_config_dirty(spa->spa_root_vdev); 1446 } 1447 } 1448 1449 void 1450 spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1451 { 1452 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1453 vdev_config_dirty(spa->spa_root_vdev); 1454 } 1455 1456 /* 1457 * Return the spa_t associated with given pool_guid, if it exists. If 1458 * device_guid is non-zero, determine whether the pool exists *and* contains 1459 * a device with the specified device_guid. 1460 */ 1461 spa_t * 1462 spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1463 { 1464 spa_t *spa; 1465 avl_tree_t *t = &spa_namespace_avl; 1466 1467 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1468 1469 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1470 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1471 continue; 1472 if (spa->spa_root_vdev == NULL) 1473 continue; 1474 if (spa_guid(spa) == pool_guid) { 1475 if (device_guid == 0) 1476 break; 1477 1478 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1479 device_guid) != NULL) 1480 break; 1481 1482 /* 1483 * Check any devices we may be in the process of adding. 1484 */ 1485 if (spa->spa_pending_vdev) { 1486 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1487 device_guid) != NULL) 1488 break; 1489 } 1490 } 1491 } 1492 1493 return (spa); 1494 } 1495 1496 /* 1497 * Determine whether a pool with the given pool_guid exists. 1498 */ 1499 boolean_t 1500 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1501 { 1502 return (spa_by_guid(pool_guid, device_guid) != NULL); 1503 } 1504 1505 char * 1506 spa_strdup(const char *s) 1507 { 1508 size_t len; 1509 char *new; 1510 1511 len = strlen(s); 1512 new = kmem_alloc(len + 1, KM_SLEEP); 1513 memcpy(new, s, len + 1); 1514 1515 return (new); 1516 } 1517 1518 void 1519 spa_strfree(char *s) 1520 { 1521 kmem_free(s, strlen(s) + 1); 1522 } 1523 1524 uint64_t 1525 spa_generate_guid(spa_t *spa) 1526 { 1527 uint64_t guid; 1528 1529 if (spa != NULL) { 1530 do { 1531 (void) random_get_pseudo_bytes((void *)&guid, 1532 sizeof (guid)); 1533 } while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)); 1534 } else { 1535 do { 1536 (void) random_get_pseudo_bytes((void *)&guid, 1537 sizeof (guid)); 1538 } while (guid == 0 || spa_guid_exists(guid, 0)); 1539 } 1540 1541 return (guid); 1542 } 1543 1544 void 1545 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1546 { 1547 char type[256]; 1548 const char *checksum = NULL; 1549 const char *compress = NULL; 1550 1551 if (bp != NULL) { 1552 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1553 dmu_object_byteswap_t bswap = 1554 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1555 (void) snprintf(type, sizeof (type), "bswap %s %s", 1556 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1557 "metadata" : "data", 1558 dmu_ot_byteswap[bswap].ob_name); 1559 } else { 1560 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1561 sizeof (type)); 1562 } 1563 if (!BP_IS_EMBEDDED(bp)) { 1564 checksum = 1565 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1566 } 1567 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1568 } 1569 1570 SNPRINTF_BLKPTR(kmem_scnprintf, ' ', buf, buflen, bp, type, checksum, 1571 compress); 1572 } 1573 1574 void 1575 spa_freeze(spa_t *spa) 1576 { 1577 uint64_t freeze_txg = 0; 1578 1579 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1580 if (spa->spa_freeze_txg == UINT64_MAX) { 1581 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1582 spa->spa_freeze_txg = freeze_txg; 1583 } 1584 spa_config_exit(spa, SCL_ALL, FTAG); 1585 if (freeze_txg != 0) 1586 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1587 } 1588 1589 void 1590 zfs_panic_recover(const char *fmt, ...) 1591 { 1592 va_list adx; 1593 1594 va_start(adx, fmt); 1595 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1596 va_end(adx); 1597 } 1598 1599 /* 1600 * This is a stripped-down version of strtoull, suitable only for converting 1601 * lowercase hexadecimal numbers that don't overflow. 1602 */ 1603 uint64_t 1604 zfs_strtonum(const char *str, char **nptr) 1605 { 1606 uint64_t val = 0; 1607 char c; 1608 int digit; 1609 1610 while ((c = *str) != '\0') { 1611 if (c >= '0' && c <= '9') 1612 digit = c - '0'; 1613 else if (c >= 'a' && c <= 'f') 1614 digit = 10 + c - 'a'; 1615 else 1616 break; 1617 1618 val *= 16; 1619 val += digit; 1620 1621 str++; 1622 } 1623 1624 if (nptr) 1625 *nptr = (char *)str; 1626 1627 return (val); 1628 } 1629 1630 void 1631 spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx) 1632 { 1633 /* 1634 * We bump the feature refcount for each special vdev added to the pool 1635 */ 1636 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES)); 1637 spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx); 1638 } 1639 1640 /* 1641 * ========================================================================== 1642 * Accessor functions 1643 * ========================================================================== 1644 */ 1645 1646 boolean_t 1647 spa_shutting_down(spa_t *spa) 1648 { 1649 return (spa->spa_async_suspended); 1650 } 1651 1652 dsl_pool_t * 1653 spa_get_dsl(spa_t *spa) 1654 { 1655 return (spa->spa_dsl_pool); 1656 } 1657 1658 boolean_t 1659 spa_is_initializing(spa_t *spa) 1660 { 1661 return (spa->spa_is_initializing); 1662 } 1663 1664 boolean_t 1665 spa_indirect_vdevs_loaded(spa_t *spa) 1666 { 1667 return (spa->spa_indirect_vdevs_loaded); 1668 } 1669 1670 blkptr_t * 1671 spa_get_rootblkptr(spa_t *spa) 1672 { 1673 return (&spa->spa_ubsync.ub_rootbp); 1674 } 1675 1676 void 1677 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1678 { 1679 spa->spa_uberblock.ub_rootbp = *bp; 1680 } 1681 1682 void 1683 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1684 { 1685 if (spa->spa_root == NULL) 1686 buf[0] = '\0'; 1687 else 1688 (void) strlcpy(buf, spa->spa_root, buflen); 1689 } 1690 1691 uint32_t 1692 spa_sync_pass(spa_t *spa) 1693 { 1694 return (spa->spa_sync_pass); 1695 } 1696 1697 char * 1698 spa_name(spa_t *spa) 1699 { 1700 return (spa->spa_name); 1701 } 1702 1703 uint64_t 1704 spa_guid(spa_t *spa) 1705 { 1706 dsl_pool_t *dp = spa_get_dsl(spa); 1707 uint64_t guid; 1708 1709 /* 1710 * If we fail to parse the config during spa_load(), we can go through 1711 * the error path (which posts an ereport) and end up here with no root 1712 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1713 * this case. 1714 */ 1715 if (spa->spa_root_vdev == NULL) 1716 return (spa->spa_config_guid); 1717 1718 guid = spa->spa_last_synced_guid != 0 ? 1719 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1720 1721 /* 1722 * Return the most recently synced out guid unless we're 1723 * in syncing context. 1724 */ 1725 if (dp && dsl_pool_sync_context(dp)) 1726 return (spa->spa_root_vdev->vdev_guid); 1727 else 1728 return (guid); 1729 } 1730 1731 uint64_t 1732 spa_load_guid(spa_t *spa) 1733 { 1734 /* 1735 * This is a GUID that exists solely as a reference for the 1736 * purposes of the arc. It is generated at load time, and 1737 * is never written to persistent storage. 1738 */ 1739 return (spa->spa_load_guid); 1740 } 1741 1742 uint64_t 1743 spa_last_synced_txg(spa_t *spa) 1744 { 1745 return (spa->spa_ubsync.ub_txg); 1746 } 1747 1748 uint64_t 1749 spa_first_txg(spa_t *spa) 1750 { 1751 return (spa->spa_first_txg); 1752 } 1753 1754 uint64_t 1755 spa_syncing_txg(spa_t *spa) 1756 { 1757 return (spa->spa_syncing_txg); 1758 } 1759 1760 /* 1761 * Return the last txg where data can be dirtied. The final txgs 1762 * will be used to just clear out any deferred frees that remain. 1763 */ 1764 uint64_t 1765 spa_final_dirty_txg(spa_t *spa) 1766 { 1767 return (spa->spa_final_txg - TXG_DEFER_SIZE); 1768 } 1769 1770 pool_state_t 1771 spa_state(spa_t *spa) 1772 { 1773 return (spa->spa_state); 1774 } 1775 1776 spa_load_state_t 1777 spa_load_state(spa_t *spa) 1778 { 1779 return (spa->spa_load_state); 1780 } 1781 1782 uint64_t 1783 spa_freeze_txg(spa_t *spa) 1784 { 1785 return (spa->spa_freeze_txg); 1786 } 1787 1788 /* 1789 * Return the inflated asize for a logical write in bytes. This is used by the 1790 * DMU to calculate the space a logical write will require on disk. 1791 * If lsize is smaller than the largest physical block size allocatable on this 1792 * pool we use its value instead, since the write will end up using the whole 1793 * block anyway. 1794 */ 1795 uint64_t 1796 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) 1797 { 1798 if (lsize == 0) 1799 return (0); /* No inflation needed */ 1800 return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation); 1801 } 1802 1803 /* 1804 * Return the amount of slop space in bytes. It is typically 1/32 of the pool 1805 * (3.2%), minus the embedded log space. On very small pools, it may be 1806 * slightly larger than this. On very large pools, it will be capped to 1807 * the value of spa_max_slop. The embedded log space is not included in 1808 * spa_dspace. By subtracting it, the usable space (per "zfs list") is a 1809 * constant 97% of the total space, regardless of metaslab size (assuming the 1810 * default spa_slop_shift=5 and a non-tiny pool). 1811 * 1812 * See the comment above spa_slop_shift for more details. 1813 */ 1814 uint64_t 1815 spa_get_slop_space(spa_t *spa) 1816 { 1817 uint64_t space = 0; 1818 uint64_t slop = 0; 1819 1820 /* 1821 * Make sure spa_dedup_dspace has been set. 1822 */ 1823 if (spa->spa_dedup_dspace == ~0ULL) 1824 spa_update_dspace(spa); 1825 1826 /* 1827 * spa_get_dspace() includes the space only logically "used" by 1828 * deduplicated data, so since it's not useful to reserve more 1829 * space with more deduplicated data, we subtract that out here. 1830 */ 1831 space = spa_get_dspace(spa) - spa->spa_dedup_dspace; 1832 slop = MIN(space >> spa_slop_shift, spa_max_slop); 1833 1834 /* 1835 * Subtract the embedded log space, but no more than half the (3.2%) 1836 * unusable space. Note, the "no more than half" is only relevant if 1837 * zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by 1838 * default. 1839 */ 1840 uint64_t embedded_log = 1841 metaslab_class_get_dspace(spa_embedded_log_class(spa)); 1842 slop -= MIN(embedded_log, slop >> 1); 1843 1844 /* 1845 * Slop space should be at least spa_min_slop, but no more than half 1846 * the entire pool. 1847 */ 1848 slop = MAX(slop, MIN(space >> 1, spa_min_slop)); 1849 return (slop); 1850 } 1851 1852 uint64_t 1853 spa_get_dspace(spa_t *spa) 1854 { 1855 return (spa->spa_dspace); 1856 } 1857 1858 uint64_t 1859 spa_get_checkpoint_space(spa_t *spa) 1860 { 1861 return (spa->spa_checkpoint_info.sci_dspace); 1862 } 1863 1864 void 1865 spa_update_dspace(spa_t *spa) 1866 { 1867 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1868 ddt_get_dedup_dspace(spa) + brt_get_dspace(spa); 1869 if (spa->spa_nonallocating_dspace > 0) { 1870 /* 1871 * Subtract the space provided by all non-allocating vdevs that 1872 * contribute to dspace. If a file is overwritten, its old 1873 * blocks are freed and new blocks are allocated. If there are 1874 * no snapshots of the file, the available space should remain 1875 * the same. The old blocks could be freed from the 1876 * non-allocating vdev, but the new blocks must be allocated on 1877 * other (allocating) vdevs. By reserving the entire size of 1878 * the non-allocating vdevs (including allocated space), we 1879 * ensure that there will be enough space on the allocating 1880 * vdevs for this file overwrite to succeed. 1881 * 1882 * Note that the DMU/DSL doesn't actually know or care 1883 * how much space is allocated (it does its own tracking 1884 * of how much space has been logically used). So it 1885 * doesn't matter that the data we are moving may be 1886 * allocated twice (on the old device and the new device). 1887 */ 1888 ASSERT3U(spa->spa_dspace, >=, spa->spa_nonallocating_dspace); 1889 spa->spa_dspace -= spa->spa_nonallocating_dspace; 1890 } 1891 } 1892 1893 /* 1894 * Return the failure mode that has been set to this pool. The default 1895 * behavior will be to block all I/Os when a complete failure occurs. 1896 */ 1897 uint64_t 1898 spa_get_failmode(spa_t *spa) 1899 { 1900 return (spa->spa_failmode); 1901 } 1902 1903 boolean_t 1904 spa_suspended(spa_t *spa) 1905 { 1906 return (spa->spa_suspended != ZIO_SUSPEND_NONE); 1907 } 1908 1909 uint64_t 1910 spa_version(spa_t *spa) 1911 { 1912 return (spa->spa_ubsync.ub_version); 1913 } 1914 1915 boolean_t 1916 spa_deflate(spa_t *spa) 1917 { 1918 return (spa->spa_deflate); 1919 } 1920 1921 metaslab_class_t * 1922 spa_normal_class(spa_t *spa) 1923 { 1924 return (spa->spa_normal_class); 1925 } 1926 1927 metaslab_class_t * 1928 spa_log_class(spa_t *spa) 1929 { 1930 return (spa->spa_log_class); 1931 } 1932 1933 metaslab_class_t * 1934 spa_embedded_log_class(spa_t *spa) 1935 { 1936 return (spa->spa_embedded_log_class); 1937 } 1938 1939 metaslab_class_t * 1940 spa_special_class(spa_t *spa) 1941 { 1942 return (spa->spa_special_class); 1943 } 1944 1945 metaslab_class_t * 1946 spa_dedup_class(spa_t *spa) 1947 { 1948 return (spa->spa_dedup_class); 1949 } 1950 1951 /* 1952 * Locate an appropriate allocation class 1953 */ 1954 metaslab_class_t * 1955 spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype, 1956 uint_t level, uint_t special_smallblk) 1957 { 1958 /* 1959 * ZIL allocations determine their class in zio_alloc_zil(). 1960 */ 1961 ASSERT(objtype != DMU_OT_INTENT_LOG); 1962 1963 boolean_t has_special_class = spa->spa_special_class->mc_groups != 0; 1964 1965 if (DMU_OT_IS_DDT(objtype)) { 1966 if (spa->spa_dedup_class->mc_groups != 0) 1967 return (spa_dedup_class(spa)); 1968 else if (has_special_class && zfs_ddt_data_is_special) 1969 return (spa_special_class(spa)); 1970 else 1971 return (spa_normal_class(spa)); 1972 } 1973 1974 /* Indirect blocks for user data can land in special if allowed */ 1975 if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) { 1976 if (has_special_class && zfs_user_indirect_is_special) 1977 return (spa_special_class(spa)); 1978 else 1979 return (spa_normal_class(spa)); 1980 } 1981 1982 if (DMU_OT_IS_METADATA(objtype) || level > 0) { 1983 if (has_special_class) 1984 return (spa_special_class(spa)); 1985 else 1986 return (spa_normal_class(spa)); 1987 } 1988 1989 /* 1990 * Allow small file blocks in special class in some cases (like 1991 * for the dRAID vdev feature). But always leave a reserve of 1992 * zfs_special_class_metadata_reserve_pct exclusively for metadata. 1993 */ 1994 if (DMU_OT_IS_FILE(objtype) && 1995 has_special_class && size <= special_smallblk) { 1996 metaslab_class_t *special = spa_special_class(spa); 1997 uint64_t alloc = metaslab_class_get_alloc(special); 1998 uint64_t space = metaslab_class_get_space(special); 1999 uint64_t limit = 2000 (space * (100 - zfs_special_class_metadata_reserve_pct)) 2001 / 100; 2002 2003 if (alloc < limit) 2004 return (special); 2005 } 2006 2007 return (spa_normal_class(spa)); 2008 } 2009 2010 void 2011 spa_evicting_os_register(spa_t *spa, objset_t *os) 2012 { 2013 mutex_enter(&spa->spa_evicting_os_lock); 2014 list_insert_head(&spa->spa_evicting_os_list, os); 2015 mutex_exit(&spa->spa_evicting_os_lock); 2016 } 2017 2018 void 2019 spa_evicting_os_deregister(spa_t *spa, objset_t *os) 2020 { 2021 mutex_enter(&spa->spa_evicting_os_lock); 2022 list_remove(&spa->spa_evicting_os_list, os); 2023 cv_broadcast(&spa->spa_evicting_os_cv); 2024 mutex_exit(&spa->spa_evicting_os_lock); 2025 } 2026 2027 void 2028 spa_evicting_os_wait(spa_t *spa) 2029 { 2030 mutex_enter(&spa->spa_evicting_os_lock); 2031 while (!list_is_empty(&spa->spa_evicting_os_list)) 2032 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 2033 mutex_exit(&spa->spa_evicting_os_lock); 2034 2035 dmu_buf_user_evict_wait(); 2036 } 2037 2038 int 2039 spa_max_replication(spa_t *spa) 2040 { 2041 /* 2042 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 2043 * handle BPs with more than one DVA allocated. Set our max 2044 * replication level accordingly. 2045 */ 2046 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 2047 return (1); 2048 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 2049 } 2050 2051 int 2052 spa_prev_software_version(spa_t *spa) 2053 { 2054 return (spa->spa_prev_software_version); 2055 } 2056 2057 uint64_t 2058 spa_deadman_synctime(spa_t *spa) 2059 { 2060 return (spa->spa_deadman_synctime); 2061 } 2062 2063 spa_autotrim_t 2064 spa_get_autotrim(spa_t *spa) 2065 { 2066 return (spa->spa_autotrim); 2067 } 2068 2069 uint64_t 2070 spa_deadman_ziotime(spa_t *spa) 2071 { 2072 return (spa->spa_deadman_ziotime); 2073 } 2074 2075 uint64_t 2076 spa_get_deadman_failmode(spa_t *spa) 2077 { 2078 return (spa->spa_deadman_failmode); 2079 } 2080 2081 void 2082 spa_set_deadman_failmode(spa_t *spa, const char *failmode) 2083 { 2084 if (strcmp(failmode, "wait") == 0) 2085 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; 2086 else if (strcmp(failmode, "continue") == 0) 2087 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE; 2088 else if (strcmp(failmode, "panic") == 0) 2089 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC; 2090 else 2091 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; 2092 } 2093 2094 void 2095 spa_set_deadman_ziotime(hrtime_t ns) 2096 { 2097 spa_t *spa = NULL; 2098 2099 if (spa_mode_global != SPA_MODE_UNINIT) { 2100 mutex_enter(&spa_namespace_lock); 2101 while ((spa = spa_next(spa)) != NULL) 2102 spa->spa_deadman_ziotime = ns; 2103 mutex_exit(&spa_namespace_lock); 2104 } 2105 } 2106 2107 void 2108 spa_set_deadman_synctime(hrtime_t ns) 2109 { 2110 spa_t *spa = NULL; 2111 2112 if (spa_mode_global != SPA_MODE_UNINIT) { 2113 mutex_enter(&spa_namespace_lock); 2114 while ((spa = spa_next(spa)) != NULL) 2115 spa->spa_deadman_synctime = ns; 2116 mutex_exit(&spa_namespace_lock); 2117 } 2118 } 2119 2120 uint64_t 2121 dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 2122 { 2123 uint64_t asize = DVA_GET_ASIZE(dva); 2124 uint64_t dsize = asize; 2125 2126 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 2127 2128 if (asize != 0 && spa->spa_deflate) { 2129 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 2130 if (vd != NULL) 2131 dsize = (asize >> SPA_MINBLOCKSHIFT) * 2132 vd->vdev_deflate_ratio; 2133 } 2134 2135 return (dsize); 2136 } 2137 2138 uint64_t 2139 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 2140 { 2141 uint64_t dsize = 0; 2142 2143 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2144 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2145 2146 return (dsize); 2147 } 2148 2149 uint64_t 2150 bp_get_dsize(spa_t *spa, const blkptr_t *bp) 2151 { 2152 uint64_t dsize = 0; 2153 2154 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2155 2156 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2157 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2158 2159 spa_config_exit(spa, SCL_VDEV, FTAG); 2160 2161 return (dsize); 2162 } 2163 2164 uint64_t 2165 spa_dirty_data(spa_t *spa) 2166 { 2167 return (spa->spa_dsl_pool->dp_dirty_total); 2168 } 2169 2170 /* 2171 * ========================================================================== 2172 * SPA Import Progress Routines 2173 * ========================================================================== 2174 */ 2175 2176 typedef struct spa_import_progress { 2177 uint64_t pool_guid; /* unique id for updates */ 2178 char *pool_name; 2179 spa_load_state_t spa_load_state; 2180 uint64_t mmp_sec_remaining; /* MMP activity check */ 2181 uint64_t spa_load_max_txg; /* rewind txg */ 2182 procfs_list_node_t smh_node; 2183 } spa_import_progress_t; 2184 2185 spa_history_list_t *spa_import_progress_list = NULL; 2186 2187 static int 2188 spa_import_progress_show_header(struct seq_file *f) 2189 { 2190 seq_printf(f, "%-20s %-14s %-14s %-12s %s\n", "pool_guid", 2191 "load_state", "multihost_secs", "max_txg", 2192 "pool_name"); 2193 return (0); 2194 } 2195 2196 static int 2197 spa_import_progress_show(struct seq_file *f, void *data) 2198 { 2199 spa_import_progress_t *sip = (spa_import_progress_t *)data; 2200 2201 seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %s\n", 2202 (u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state, 2203 (u_longlong_t)sip->mmp_sec_remaining, 2204 (u_longlong_t)sip->spa_load_max_txg, 2205 (sip->pool_name ? sip->pool_name : "-")); 2206 2207 return (0); 2208 } 2209 2210 /* Remove oldest elements from list until there are no more than 'size' left */ 2211 static void 2212 spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size) 2213 { 2214 spa_import_progress_t *sip; 2215 while (shl->size > size) { 2216 sip = list_remove_head(&shl->procfs_list.pl_list); 2217 if (sip->pool_name) 2218 spa_strfree(sip->pool_name); 2219 kmem_free(sip, sizeof (spa_import_progress_t)); 2220 shl->size--; 2221 } 2222 2223 IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list)); 2224 } 2225 2226 static void 2227 spa_import_progress_init(void) 2228 { 2229 spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t), 2230 KM_SLEEP); 2231 2232 spa_import_progress_list->size = 0; 2233 2234 spa_import_progress_list->procfs_list.pl_private = 2235 spa_import_progress_list; 2236 2237 procfs_list_install("zfs", 2238 NULL, 2239 "import_progress", 2240 0644, 2241 &spa_import_progress_list->procfs_list, 2242 spa_import_progress_show, 2243 spa_import_progress_show_header, 2244 NULL, 2245 offsetof(spa_import_progress_t, smh_node)); 2246 } 2247 2248 static void 2249 spa_import_progress_destroy(void) 2250 { 2251 spa_history_list_t *shl = spa_import_progress_list; 2252 procfs_list_uninstall(&shl->procfs_list); 2253 spa_import_progress_truncate(shl, 0); 2254 procfs_list_destroy(&shl->procfs_list); 2255 kmem_free(shl, sizeof (spa_history_list_t)); 2256 } 2257 2258 int 2259 spa_import_progress_set_state(uint64_t pool_guid, 2260 spa_load_state_t load_state) 2261 { 2262 spa_history_list_t *shl = spa_import_progress_list; 2263 spa_import_progress_t *sip; 2264 int error = ENOENT; 2265 2266 if (shl->size == 0) 2267 return (0); 2268 2269 mutex_enter(&shl->procfs_list.pl_lock); 2270 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2271 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2272 if (sip->pool_guid == pool_guid) { 2273 sip->spa_load_state = load_state; 2274 error = 0; 2275 break; 2276 } 2277 } 2278 mutex_exit(&shl->procfs_list.pl_lock); 2279 2280 return (error); 2281 } 2282 2283 int 2284 spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg) 2285 { 2286 spa_history_list_t *shl = spa_import_progress_list; 2287 spa_import_progress_t *sip; 2288 int error = ENOENT; 2289 2290 if (shl->size == 0) 2291 return (0); 2292 2293 mutex_enter(&shl->procfs_list.pl_lock); 2294 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2295 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2296 if (sip->pool_guid == pool_guid) { 2297 sip->spa_load_max_txg = load_max_txg; 2298 error = 0; 2299 break; 2300 } 2301 } 2302 mutex_exit(&shl->procfs_list.pl_lock); 2303 2304 return (error); 2305 } 2306 2307 int 2308 spa_import_progress_set_mmp_check(uint64_t pool_guid, 2309 uint64_t mmp_sec_remaining) 2310 { 2311 spa_history_list_t *shl = spa_import_progress_list; 2312 spa_import_progress_t *sip; 2313 int error = ENOENT; 2314 2315 if (shl->size == 0) 2316 return (0); 2317 2318 mutex_enter(&shl->procfs_list.pl_lock); 2319 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2320 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2321 if (sip->pool_guid == pool_guid) { 2322 sip->mmp_sec_remaining = mmp_sec_remaining; 2323 error = 0; 2324 break; 2325 } 2326 } 2327 mutex_exit(&shl->procfs_list.pl_lock); 2328 2329 return (error); 2330 } 2331 2332 /* 2333 * A new import is in progress, add an entry. 2334 */ 2335 void 2336 spa_import_progress_add(spa_t *spa) 2337 { 2338 spa_history_list_t *shl = spa_import_progress_list; 2339 spa_import_progress_t *sip; 2340 const char *poolname = NULL; 2341 2342 sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP); 2343 sip->pool_guid = spa_guid(spa); 2344 2345 (void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME, 2346 &poolname); 2347 if (poolname == NULL) 2348 poolname = spa_name(spa); 2349 sip->pool_name = spa_strdup(poolname); 2350 sip->spa_load_state = spa_load_state(spa); 2351 2352 mutex_enter(&shl->procfs_list.pl_lock); 2353 procfs_list_add(&shl->procfs_list, sip); 2354 shl->size++; 2355 mutex_exit(&shl->procfs_list.pl_lock); 2356 } 2357 2358 void 2359 spa_import_progress_remove(uint64_t pool_guid) 2360 { 2361 spa_history_list_t *shl = spa_import_progress_list; 2362 spa_import_progress_t *sip; 2363 2364 mutex_enter(&shl->procfs_list.pl_lock); 2365 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2366 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2367 if (sip->pool_guid == pool_guid) { 2368 if (sip->pool_name) 2369 spa_strfree(sip->pool_name); 2370 list_remove(&shl->procfs_list.pl_list, sip); 2371 shl->size--; 2372 kmem_free(sip, sizeof (spa_import_progress_t)); 2373 break; 2374 } 2375 } 2376 mutex_exit(&shl->procfs_list.pl_lock); 2377 } 2378 2379 /* 2380 * ========================================================================== 2381 * Initialization and Termination 2382 * ========================================================================== 2383 */ 2384 2385 static int 2386 spa_name_compare(const void *a1, const void *a2) 2387 { 2388 const spa_t *s1 = a1; 2389 const spa_t *s2 = a2; 2390 int s; 2391 2392 s = strcmp(s1->spa_name, s2->spa_name); 2393 2394 return (TREE_ISIGN(s)); 2395 } 2396 2397 void 2398 spa_boot_init(void) 2399 { 2400 spa_config_load(); 2401 } 2402 2403 void 2404 spa_init(spa_mode_t mode) 2405 { 2406 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 2407 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 2408 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 2409 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 2410 2411 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 2412 offsetof(spa_t, spa_avl)); 2413 2414 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 2415 offsetof(spa_aux_t, aux_avl)); 2416 2417 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 2418 offsetof(spa_aux_t, aux_avl)); 2419 2420 spa_mode_global = mode; 2421 2422 #ifndef _KERNEL 2423 if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) { 2424 struct sigaction sa; 2425 2426 sa.sa_flags = SA_SIGINFO; 2427 sigemptyset(&sa.sa_mask); 2428 sa.sa_sigaction = arc_buf_sigsegv; 2429 2430 if (sigaction(SIGSEGV, &sa, NULL) == -1) { 2431 perror("could not enable watchpoints: " 2432 "sigaction(SIGSEGV, ...) = "); 2433 } else { 2434 arc_watch = B_TRUE; 2435 } 2436 } 2437 #endif 2438 2439 fm_init(); 2440 zfs_refcount_init(); 2441 unique_init(); 2442 zfs_btree_init(); 2443 metaslab_stat_init(); 2444 brt_init(); 2445 ddt_init(); 2446 zio_init(); 2447 dmu_init(); 2448 zil_init(); 2449 vdev_mirror_stat_init(); 2450 vdev_raidz_math_init(); 2451 vdev_file_init(); 2452 zfs_prop_init(); 2453 chksum_init(); 2454 zpool_prop_init(); 2455 zpool_feature_init(); 2456 spa_config_load(); 2457 vdev_prop_init(); 2458 l2arc_start(); 2459 scan_init(); 2460 qat_init(); 2461 spa_import_progress_init(); 2462 } 2463 2464 void 2465 spa_fini(void) 2466 { 2467 l2arc_stop(); 2468 2469 spa_evict_all(); 2470 2471 vdev_file_fini(); 2472 vdev_mirror_stat_fini(); 2473 vdev_raidz_math_fini(); 2474 chksum_fini(); 2475 zil_fini(); 2476 dmu_fini(); 2477 zio_fini(); 2478 ddt_fini(); 2479 brt_fini(); 2480 metaslab_stat_fini(); 2481 zfs_btree_fini(); 2482 unique_fini(); 2483 zfs_refcount_fini(); 2484 fm_fini(); 2485 scan_fini(); 2486 qat_fini(); 2487 spa_import_progress_destroy(); 2488 2489 avl_destroy(&spa_namespace_avl); 2490 avl_destroy(&spa_spare_avl); 2491 avl_destroy(&spa_l2cache_avl); 2492 2493 cv_destroy(&spa_namespace_cv); 2494 mutex_destroy(&spa_namespace_lock); 2495 mutex_destroy(&spa_spare_lock); 2496 mutex_destroy(&spa_l2cache_lock); 2497 } 2498 2499 /* 2500 * Return whether this pool has a dedicated slog device. No locking needed. 2501 * It's not a problem if the wrong answer is returned as it's only for 2502 * performance and not correctness. 2503 */ 2504 boolean_t 2505 spa_has_slogs(spa_t *spa) 2506 { 2507 return (spa->spa_log_class->mc_groups != 0); 2508 } 2509 2510 spa_log_state_t 2511 spa_get_log_state(spa_t *spa) 2512 { 2513 return (spa->spa_log_state); 2514 } 2515 2516 void 2517 spa_set_log_state(spa_t *spa, spa_log_state_t state) 2518 { 2519 spa->spa_log_state = state; 2520 } 2521 2522 boolean_t 2523 spa_is_root(spa_t *spa) 2524 { 2525 return (spa->spa_is_root); 2526 } 2527 2528 boolean_t 2529 spa_writeable(spa_t *spa) 2530 { 2531 return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config); 2532 } 2533 2534 /* 2535 * Returns true if there is a pending sync task in any of the current 2536 * syncing txg, the current quiescing txg, or the current open txg. 2537 */ 2538 boolean_t 2539 spa_has_pending_synctask(spa_t *spa) 2540 { 2541 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) || 2542 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks)); 2543 } 2544 2545 spa_mode_t 2546 spa_mode(spa_t *spa) 2547 { 2548 return (spa->spa_mode); 2549 } 2550 2551 uint64_t 2552 spa_bootfs(spa_t *spa) 2553 { 2554 return (spa->spa_bootfs); 2555 } 2556 2557 uint64_t 2558 spa_delegation(spa_t *spa) 2559 { 2560 return (spa->spa_delegation); 2561 } 2562 2563 objset_t * 2564 spa_meta_objset(spa_t *spa) 2565 { 2566 return (spa->spa_meta_objset); 2567 } 2568 2569 enum zio_checksum 2570 spa_dedup_checksum(spa_t *spa) 2571 { 2572 return (spa->spa_dedup_checksum); 2573 } 2574 2575 /* 2576 * Reset pool scan stat per scan pass (or reboot). 2577 */ 2578 void 2579 spa_scan_stat_init(spa_t *spa) 2580 { 2581 /* data not stored on disk */ 2582 spa->spa_scan_pass_start = gethrestime_sec(); 2583 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) 2584 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; 2585 else 2586 spa->spa_scan_pass_scrub_pause = 0; 2587 2588 if (dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan)) 2589 spa->spa_scan_pass_errorscrub_pause = spa->spa_scan_pass_start; 2590 else 2591 spa->spa_scan_pass_errorscrub_pause = 0; 2592 2593 spa->spa_scan_pass_scrub_spent_paused = 0; 2594 spa->spa_scan_pass_exam = 0; 2595 spa->spa_scan_pass_issued = 0; 2596 2597 // error scrub stats 2598 spa->spa_scan_pass_errorscrub_spent_paused = 0; 2599 } 2600 2601 /* 2602 * Get scan stats for zpool status reports 2603 */ 2604 int 2605 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2606 { 2607 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2608 2609 if (scn == NULL || (scn->scn_phys.scn_func == POOL_SCAN_NONE && 2610 scn->errorscrub_phys.dep_func == POOL_SCAN_NONE)) 2611 return (SET_ERROR(ENOENT)); 2612 2613 memset(ps, 0, sizeof (pool_scan_stat_t)); 2614 2615 /* data stored on disk */ 2616 ps->pss_func = scn->scn_phys.scn_func; 2617 ps->pss_state = scn->scn_phys.scn_state; 2618 ps->pss_start_time = scn->scn_phys.scn_start_time; 2619 ps->pss_end_time = scn->scn_phys.scn_end_time; 2620 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2621 ps->pss_examined = scn->scn_phys.scn_examined; 2622 ps->pss_skipped = scn->scn_phys.scn_skipped; 2623 ps->pss_processed = scn->scn_phys.scn_processed; 2624 ps->pss_errors = scn->scn_phys.scn_errors; 2625 2626 /* data not stored on disk */ 2627 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2628 ps->pss_pass_start = spa->spa_scan_pass_start; 2629 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; 2630 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; 2631 ps->pss_pass_issued = spa->spa_scan_pass_issued; 2632 ps->pss_issued = 2633 scn->scn_issued_before_pass + spa->spa_scan_pass_issued; 2634 2635 /* error scrub data stored on disk */ 2636 ps->pss_error_scrub_func = scn->errorscrub_phys.dep_func; 2637 ps->pss_error_scrub_state = scn->errorscrub_phys.dep_state; 2638 ps->pss_error_scrub_start = scn->errorscrub_phys.dep_start_time; 2639 ps->pss_error_scrub_end = scn->errorscrub_phys.dep_end_time; 2640 ps->pss_error_scrub_examined = scn->errorscrub_phys.dep_examined; 2641 ps->pss_error_scrub_to_be_examined = 2642 scn->errorscrub_phys.dep_to_examine; 2643 2644 /* error scrub data not stored on disk */ 2645 ps->pss_pass_error_scrub_pause = spa->spa_scan_pass_errorscrub_pause; 2646 2647 return (0); 2648 } 2649 2650 int 2651 spa_maxblocksize(spa_t *spa) 2652 { 2653 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2654 return (SPA_MAXBLOCKSIZE); 2655 else 2656 return (SPA_OLD_MAXBLOCKSIZE); 2657 } 2658 2659 2660 /* 2661 * Returns the txg that the last device removal completed. No indirect mappings 2662 * have been added since this txg. 2663 */ 2664 uint64_t 2665 spa_get_last_removal_txg(spa_t *spa) 2666 { 2667 uint64_t vdevid; 2668 uint64_t ret = -1ULL; 2669 2670 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2671 /* 2672 * sr_prev_indirect_vdev is only modified while holding all the 2673 * config locks, so it is sufficient to hold SCL_VDEV as reader when 2674 * examining it. 2675 */ 2676 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; 2677 2678 while (vdevid != -1ULL) { 2679 vdev_t *vd = vdev_lookup_top(spa, vdevid); 2680 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 2681 2682 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2683 2684 /* 2685 * If the removal did not remap any data, we don't care. 2686 */ 2687 if (vdev_indirect_births_count(vib) != 0) { 2688 ret = vdev_indirect_births_last_entry_txg(vib); 2689 break; 2690 } 2691 2692 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; 2693 } 2694 spa_config_exit(spa, SCL_VDEV, FTAG); 2695 2696 IMPLY(ret != -1ULL, 2697 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 2698 2699 return (ret); 2700 } 2701 2702 int 2703 spa_maxdnodesize(spa_t *spa) 2704 { 2705 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) 2706 return (DNODE_MAX_SIZE); 2707 else 2708 return (DNODE_MIN_SIZE); 2709 } 2710 2711 boolean_t 2712 spa_multihost(spa_t *spa) 2713 { 2714 return (spa->spa_multihost ? B_TRUE : B_FALSE); 2715 } 2716 2717 uint32_t 2718 spa_get_hostid(spa_t *spa) 2719 { 2720 return (spa->spa_hostid); 2721 } 2722 2723 boolean_t 2724 spa_trust_config(spa_t *spa) 2725 { 2726 return (spa->spa_trust_config); 2727 } 2728 2729 uint64_t 2730 spa_missing_tvds_allowed(spa_t *spa) 2731 { 2732 return (spa->spa_missing_tvds_allowed); 2733 } 2734 2735 space_map_t * 2736 spa_syncing_log_sm(spa_t *spa) 2737 { 2738 return (spa->spa_syncing_log_sm); 2739 } 2740 2741 void 2742 spa_set_missing_tvds(spa_t *spa, uint64_t missing) 2743 { 2744 spa->spa_missing_tvds = missing; 2745 } 2746 2747 /* 2748 * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc). 2749 */ 2750 const char * 2751 spa_state_to_name(spa_t *spa) 2752 { 2753 ASSERT3P(spa, !=, NULL); 2754 2755 /* 2756 * it is possible for the spa to exist, without root vdev 2757 * as the spa transitions during import/export 2758 */ 2759 vdev_t *rvd = spa->spa_root_vdev; 2760 if (rvd == NULL) { 2761 return ("TRANSITIONING"); 2762 } 2763 vdev_state_t state = rvd->vdev_state; 2764 vdev_aux_t aux = rvd->vdev_stat.vs_aux; 2765 2766 if (spa_suspended(spa)) 2767 return ("SUSPENDED"); 2768 2769 switch (state) { 2770 case VDEV_STATE_CLOSED: 2771 case VDEV_STATE_OFFLINE: 2772 return ("OFFLINE"); 2773 case VDEV_STATE_REMOVED: 2774 return ("REMOVED"); 2775 case VDEV_STATE_CANT_OPEN: 2776 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 2777 return ("FAULTED"); 2778 else if (aux == VDEV_AUX_SPLIT_POOL) 2779 return ("SPLIT"); 2780 else 2781 return ("UNAVAIL"); 2782 case VDEV_STATE_FAULTED: 2783 return ("FAULTED"); 2784 case VDEV_STATE_DEGRADED: 2785 return ("DEGRADED"); 2786 case VDEV_STATE_HEALTHY: 2787 return ("ONLINE"); 2788 default: 2789 break; 2790 } 2791 2792 return ("UNKNOWN"); 2793 } 2794 2795 boolean_t 2796 spa_top_vdevs_spacemap_addressable(spa_t *spa) 2797 { 2798 vdev_t *rvd = spa->spa_root_vdev; 2799 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2800 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c])) 2801 return (B_FALSE); 2802 } 2803 return (B_TRUE); 2804 } 2805 2806 boolean_t 2807 spa_has_checkpoint(spa_t *spa) 2808 { 2809 return (spa->spa_checkpoint_txg != 0); 2810 } 2811 2812 boolean_t 2813 spa_importing_readonly_checkpoint(spa_t *spa) 2814 { 2815 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) && 2816 spa->spa_mode == SPA_MODE_READ); 2817 } 2818 2819 uint64_t 2820 spa_min_claim_txg(spa_t *spa) 2821 { 2822 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg; 2823 2824 if (checkpoint_txg != 0) 2825 return (checkpoint_txg + 1); 2826 2827 return (spa->spa_first_txg); 2828 } 2829 2830 /* 2831 * If there is a checkpoint, async destroys may consume more space from 2832 * the pool instead of freeing it. In an attempt to save the pool from 2833 * getting suspended when it is about to run out of space, we stop 2834 * processing async destroys. 2835 */ 2836 boolean_t 2837 spa_suspend_async_destroy(spa_t *spa) 2838 { 2839 dsl_pool_t *dp = spa_get_dsl(spa); 2840 2841 uint64_t unreserved = dsl_pool_unreserved_space(dp, 2842 ZFS_SPACE_CHECK_EXTRA_RESERVED); 2843 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes; 2844 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0; 2845 2846 if (spa_has_checkpoint(spa) && avail == 0) 2847 return (B_TRUE); 2848 2849 return (B_FALSE); 2850 } 2851 2852 #if defined(_KERNEL) 2853 2854 int 2855 param_set_deadman_failmode_common(const char *val) 2856 { 2857 spa_t *spa = NULL; 2858 char *p; 2859 2860 if (val == NULL) 2861 return (SET_ERROR(EINVAL)); 2862 2863 if ((p = strchr(val, '\n')) != NULL) 2864 *p = '\0'; 2865 2866 if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 && 2867 strcmp(val, "panic")) 2868 return (SET_ERROR(EINVAL)); 2869 2870 if (spa_mode_global != SPA_MODE_UNINIT) { 2871 mutex_enter(&spa_namespace_lock); 2872 while ((spa = spa_next(spa)) != NULL) 2873 spa_set_deadman_failmode(spa, val); 2874 mutex_exit(&spa_namespace_lock); 2875 } 2876 2877 return (0); 2878 } 2879 #endif 2880 2881 /* Namespace manipulation */ 2882 EXPORT_SYMBOL(spa_lookup); 2883 EXPORT_SYMBOL(spa_add); 2884 EXPORT_SYMBOL(spa_remove); 2885 EXPORT_SYMBOL(spa_next); 2886 2887 /* Refcount functions */ 2888 EXPORT_SYMBOL(spa_open_ref); 2889 EXPORT_SYMBOL(spa_close); 2890 EXPORT_SYMBOL(spa_refcount_zero); 2891 2892 /* Pool configuration lock */ 2893 EXPORT_SYMBOL(spa_config_tryenter); 2894 EXPORT_SYMBOL(spa_config_enter); 2895 EXPORT_SYMBOL(spa_config_exit); 2896 EXPORT_SYMBOL(spa_config_held); 2897 2898 /* Pool vdev add/remove lock */ 2899 EXPORT_SYMBOL(spa_vdev_enter); 2900 EXPORT_SYMBOL(spa_vdev_exit); 2901 2902 /* Pool vdev state change lock */ 2903 EXPORT_SYMBOL(spa_vdev_state_enter); 2904 EXPORT_SYMBOL(spa_vdev_state_exit); 2905 2906 /* Accessor functions */ 2907 EXPORT_SYMBOL(spa_shutting_down); 2908 EXPORT_SYMBOL(spa_get_dsl); 2909 EXPORT_SYMBOL(spa_get_rootblkptr); 2910 EXPORT_SYMBOL(spa_set_rootblkptr); 2911 EXPORT_SYMBOL(spa_altroot); 2912 EXPORT_SYMBOL(spa_sync_pass); 2913 EXPORT_SYMBOL(spa_name); 2914 EXPORT_SYMBOL(spa_guid); 2915 EXPORT_SYMBOL(spa_last_synced_txg); 2916 EXPORT_SYMBOL(spa_first_txg); 2917 EXPORT_SYMBOL(spa_syncing_txg); 2918 EXPORT_SYMBOL(spa_version); 2919 EXPORT_SYMBOL(spa_state); 2920 EXPORT_SYMBOL(spa_load_state); 2921 EXPORT_SYMBOL(spa_freeze_txg); 2922 EXPORT_SYMBOL(spa_get_dspace); 2923 EXPORT_SYMBOL(spa_update_dspace); 2924 EXPORT_SYMBOL(spa_deflate); 2925 EXPORT_SYMBOL(spa_normal_class); 2926 EXPORT_SYMBOL(spa_log_class); 2927 EXPORT_SYMBOL(spa_special_class); 2928 EXPORT_SYMBOL(spa_preferred_class); 2929 EXPORT_SYMBOL(spa_max_replication); 2930 EXPORT_SYMBOL(spa_prev_software_version); 2931 EXPORT_SYMBOL(spa_get_failmode); 2932 EXPORT_SYMBOL(spa_suspended); 2933 EXPORT_SYMBOL(spa_bootfs); 2934 EXPORT_SYMBOL(spa_delegation); 2935 EXPORT_SYMBOL(spa_meta_objset); 2936 EXPORT_SYMBOL(spa_maxblocksize); 2937 EXPORT_SYMBOL(spa_maxdnodesize); 2938 2939 /* Miscellaneous support routines */ 2940 EXPORT_SYMBOL(spa_guid_exists); 2941 EXPORT_SYMBOL(spa_strdup); 2942 EXPORT_SYMBOL(spa_strfree); 2943 EXPORT_SYMBOL(spa_generate_guid); 2944 EXPORT_SYMBOL(snprintf_blkptr); 2945 EXPORT_SYMBOL(spa_freeze); 2946 EXPORT_SYMBOL(spa_upgrade); 2947 EXPORT_SYMBOL(spa_evict_all); 2948 EXPORT_SYMBOL(spa_lookup_by_guid); 2949 EXPORT_SYMBOL(spa_has_spare); 2950 EXPORT_SYMBOL(dva_get_dsize_sync); 2951 EXPORT_SYMBOL(bp_get_dsize_sync); 2952 EXPORT_SYMBOL(bp_get_dsize); 2953 EXPORT_SYMBOL(spa_has_slogs); 2954 EXPORT_SYMBOL(spa_is_root); 2955 EXPORT_SYMBOL(spa_writeable); 2956 EXPORT_SYMBOL(spa_mode); 2957 EXPORT_SYMBOL(spa_namespace_lock); 2958 EXPORT_SYMBOL(spa_trust_config); 2959 EXPORT_SYMBOL(spa_missing_tvds_allowed); 2960 EXPORT_SYMBOL(spa_set_missing_tvds); 2961 EXPORT_SYMBOL(spa_state_to_name); 2962 EXPORT_SYMBOL(spa_importing_readonly_checkpoint); 2963 EXPORT_SYMBOL(spa_min_claim_txg); 2964 EXPORT_SYMBOL(spa_suspend_async_destroy); 2965 EXPORT_SYMBOL(spa_has_checkpoint); 2966 EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable); 2967 2968 ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW, 2969 "Set additional debugging flags"); 2970 2971 ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW, 2972 "Set to attempt to recover from fatal errors"); 2973 2974 ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW, 2975 "Set to ignore IO errors during free and permanently leak the space"); 2976 2977 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, U64, ZMOD_RW, 2978 "Dead I/O check interval in milliseconds"); 2979 2980 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW, 2981 "Enable deadman timer"); 2982 2983 ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, UINT, ZMOD_RW, 2984 "SPA size estimate multiplication factor"); 2985 2986 ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW, 2987 "Place DDT data into the special class"); 2988 2989 ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW, 2990 "Place user data indirect blocks into the special class"); 2991 2992 /* BEGIN CSTYLED */ 2993 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode, 2994 param_set_deadman_failmode, param_get_charp, ZMOD_RW, 2995 "Failmode for deadman timer"); 2996 2997 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms, 2998 param_set_deadman_synctime, spl_param_get_u64, ZMOD_RW, 2999 "Pool sync expiration time in milliseconds"); 3000 3001 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms, 3002 param_set_deadman_ziotime, spl_param_get_u64, ZMOD_RW, 3003 "IO expiration time in milliseconds"); 3004 3005 ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW, 3006 "Small file blocks in special vdevs depends on this much " 3007 "free space available"); 3008 /* END CSTYLED */ 3009 3010 ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift, 3011 param_get_uint, ZMOD_RW, "Reserved free space in pool"); 3012