1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or https://opensource.org/licenses/CDDL-1.0. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 * Copyright 2013 Saso Kiselkov. All rights reserved. 27 * Copyright (c) 2017 Datto Inc. 28 * Copyright (c) 2017, Intel Corporation. 29 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. 30 */ 31 32 #include <sys/zfs_context.h> 33 #include <sys/zfs_chksum.h> 34 #include <sys/spa_impl.h> 35 #include <sys/zio.h> 36 #include <sys/zio_checksum.h> 37 #include <sys/zio_compress.h> 38 #include <sys/dmu.h> 39 #include <sys/dmu_tx.h> 40 #include <sys/zap.h> 41 #include <sys/zil.h> 42 #include <sys/vdev_impl.h> 43 #include <sys/vdev_initialize.h> 44 #include <sys/vdev_trim.h> 45 #include <sys/vdev_file.h> 46 #include <sys/vdev_raidz.h> 47 #include <sys/metaslab.h> 48 #include <sys/uberblock_impl.h> 49 #include <sys/txg.h> 50 #include <sys/avl.h> 51 #include <sys/unique.h> 52 #include <sys/dsl_pool.h> 53 #include <sys/dsl_dir.h> 54 #include <sys/dsl_prop.h> 55 #include <sys/fm/util.h> 56 #include <sys/dsl_scan.h> 57 #include <sys/fs/zfs.h> 58 #include <sys/metaslab_impl.h> 59 #include <sys/arc.h> 60 #include <sys/brt.h> 61 #include <sys/ddt.h> 62 #include <sys/kstat.h> 63 #include "zfs_prop.h" 64 #include <sys/btree.h> 65 #include <sys/zfeature.h> 66 #include <sys/qat.h> 67 #include <sys/zstd/zstd.h> 68 69 /* 70 * SPA locking 71 * 72 * There are three basic locks for managing spa_t structures: 73 * 74 * spa_namespace_lock (global mutex) 75 * 76 * This lock must be acquired to do any of the following: 77 * 78 * - Lookup a spa_t by name 79 * - Add or remove a spa_t from the namespace 80 * - Increase spa_refcount from non-zero 81 * - Check if spa_refcount is zero 82 * - Rename a spa_t 83 * - add/remove/attach/detach devices 84 * - Held for the duration of create/destroy/import/export 85 * 86 * It does not need to handle recursion. A create or destroy may 87 * reference objects (files or zvols) in other pools, but by 88 * definition they must have an existing reference, and will never need 89 * to lookup a spa_t by name. 90 * 91 * spa_refcount (per-spa zfs_refcount_t protected by mutex) 92 * 93 * This reference count keep track of any active users of the spa_t. The 94 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 95 * the refcount is never really 'zero' - opening a pool implicitly keeps 96 * some references in the DMU. Internally we check against spa_minref, but 97 * present the image of a zero/non-zero value to consumers. 98 * 99 * spa_config_lock[] (per-spa array of rwlocks) 100 * 101 * This protects the spa_t from config changes, and must be held in 102 * the following circumstances: 103 * 104 * - RW_READER to perform I/O to the spa 105 * - RW_WRITER to change the vdev config 106 * 107 * The locking order is fairly straightforward: 108 * 109 * spa_namespace_lock -> spa_refcount 110 * 111 * The namespace lock must be acquired to increase the refcount from 0 112 * or to check if it is zero. 113 * 114 * spa_refcount -> spa_config_lock[] 115 * 116 * There must be at least one valid reference on the spa_t to acquire 117 * the config lock. 118 * 119 * spa_namespace_lock -> spa_config_lock[] 120 * 121 * The namespace lock must always be taken before the config lock. 122 * 123 * 124 * The spa_namespace_lock can be acquired directly and is globally visible. 125 * 126 * The namespace is manipulated using the following functions, all of which 127 * require the spa_namespace_lock to be held. 128 * 129 * spa_lookup() Lookup a spa_t by name. 130 * 131 * spa_add() Create a new spa_t in the namespace. 132 * 133 * spa_remove() Remove a spa_t from the namespace. This also 134 * frees up any memory associated with the spa_t. 135 * 136 * spa_next() Returns the next spa_t in the system, or the 137 * first if NULL is passed. 138 * 139 * spa_evict_all() Shutdown and remove all spa_t structures in 140 * the system. 141 * 142 * spa_guid_exists() Determine whether a pool/device guid exists. 143 * 144 * The spa_refcount is manipulated using the following functions: 145 * 146 * spa_open_ref() Adds a reference to the given spa_t. Must be 147 * called with spa_namespace_lock held if the 148 * refcount is currently zero. 149 * 150 * spa_close() Remove a reference from the spa_t. This will 151 * not free the spa_t or remove it from the 152 * namespace. No locking is required. 153 * 154 * spa_refcount_zero() Returns true if the refcount is currently 155 * zero. Must be called with spa_namespace_lock 156 * held. 157 * 158 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 159 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 160 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 161 * 162 * To read the configuration, it suffices to hold one of these locks as reader. 163 * To modify the configuration, you must hold all locks as writer. To modify 164 * vdev state without altering the vdev tree's topology (e.g. online/offline), 165 * you must hold SCL_STATE and SCL_ZIO as writer. 166 * 167 * We use these distinct config locks to avoid recursive lock entry. 168 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 169 * block allocations (SCL_ALLOC), which may require reading space maps 170 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 171 * 172 * The spa config locks cannot be normal rwlocks because we need the 173 * ability to hand off ownership. For example, SCL_ZIO is acquired 174 * by the issuing thread and later released by an interrupt thread. 175 * They do, however, obey the usual write-wanted semantics to prevent 176 * writer (i.e. system administrator) starvation. 177 * 178 * The lock acquisition rules are as follows: 179 * 180 * SCL_CONFIG 181 * Protects changes to the vdev tree topology, such as vdev 182 * add/remove/attach/detach. Protects the dirty config list 183 * (spa_config_dirty_list) and the set of spares and l2arc devices. 184 * 185 * SCL_STATE 186 * Protects changes to pool state and vdev state, such as vdev 187 * online/offline/fault/degrade/clear. Protects the dirty state list 188 * (spa_state_dirty_list) and global pool state (spa_state). 189 * 190 * SCL_ALLOC 191 * Protects changes to metaslab groups and classes. 192 * Held as reader by metaslab_alloc() and metaslab_claim(). 193 * 194 * SCL_ZIO 195 * Held by bp-level zios (those which have no io_vd upon entry) 196 * to prevent changes to the vdev tree. The bp-level zio implicitly 197 * protects all of its vdev child zios, which do not hold SCL_ZIO. 198 * 199 * SCL_FREE 200 * Protects changes to metaslab groups and classes. 201 * Held as reader by metaslab_free(). SCL_FREE is distinct from 202 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 203 * blocks in zio_done() while another i/o that holds either 204 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 205 * 206 * SCL_VDEV 207 * Held as reader to prevent changes to the vdev tree during trivial 208 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 209 * other locks, and lower than all of them, to ensure that it's safe 210 * to acquire regardless of caller context. 211 * 212 * In addition, the following rules apply: 213 * 214 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 215 * The lock ordering is SCL_CONFIG > spa_props_lock. 216 * 217 * (b) I/O operations on leaf vdevs. For any zio operation that takes 218 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 219 * or zio_write_phys() -- the caller must ensure that the config cannot 220 * cannot change in the interim, and that the vdev cannot be reopened. 221 * SCL_STATE as reader suffices for both. 222 * 223 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 224 * 225 * spa_vdev_enter() Acquire the namespace lock and the config lock 226 * for writing. 227 * 228 * spa_vdev_exit() Release the config lock, wait for all I/O 229 * to complete, sync the updated configs to the 230 * cache, and release the namespace lock. 231 * 232 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 233 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 234 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 235 */ 236 237 static avl_tree_t spa_namespace_avl; 238 kmutex_t spa_namespace_lock; 239 static kcondvar_t spa_namespace_cv; 240 static const int spa_max_replication_override = SPA_DVAS_PER_BP; 241 242 static kmutex_t spa_spare_lock; 243 static avl_tree_t spa_spare_avl; 244 static kmutex_t spa_l2cache_lock; 245 static avl_tree_t spa_l2cache_avl; 246 247 spa_mode_t spa_mode_global = SPA_MODE_UNINIT; 248 249 #ifdef ZFS_DEBUG 250 /* 251 * Everything except dprintf, set_error, spa, and indirect_remap is on 252 * by default in debug builds. 253 */ 254 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR | 255 ZFS_DEBUG_INDIRECT_REMAP); 256 #else 257 int zfs_flags = 0; 258 #endif 259 260 /* 261 * zfs_recover can be set to nonzero to attempt to recover from 262 * otherwise-fatal errors, typically caused by on-disk corruption. When 263 * set, calls to zfs_panic_recover() will turn into warning messages. 264 * This should only be used as a last resort, as it typically results 265 * in leaked space, or worse. 266 */ 267 int zfs_recover = B_FALSE; 268 269 /* 270 * If destroy encounters an EIO while reading metadata (e.g. indirect 271 * blocks), space referenced by the missing metadata can not be freed. 272 * Normally this causes the background destroy to become "stalled", as 273 * it is unable to make forward progress. While in this stalled state, 274 * all remaining space to free from the error-encountering filesystem is 275 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 276 * permanently leak the space from indirect blocks that can not be read, 277 * and continue to free everything else that it can. 278 * 279 * The default, "stalling" behavior is useful if the storage partially 280 * fails (i.e. some but not all i/os fail), and then later recovers. In 281 * this case, we will be able to continue pool operations while it is 282 * partially failed, and when it recovers, we can continue to free the 283 * space, with no leaks. However, note that this case is actually 284 * fairly rare. 285 * 286 * Typically pools either (a) fail completely (but perhaps temporarily, 287 * e.g. a top-level vdev going offline), or (b) have localized, 288 * permanent errors (e.g. disk returns the wrong data due to bit flip or 289 * firmware bug). In case (a), this setting does not matter because the 290 * pool will be suspended and the sync thread will not be able to make 291 * forward progress regardless. In case (b), because the error is 292 * permanent, the best we can do is leak the minimum amount of space, 293 * which is what setting this flag will do. Therefore, it is reasonable 294 * for this flag to normally be set, but we chose the more conservative 295 * approach of not setting it, so that there is no possibility of 296 * leaking space in the "partial temporary" failure case. 297 */ 298 int zfs_free_leak_on_eio = B_FALSE; 299 300 /* 301 * Expiration time in milliseconds. This value has two meanings. First it is 302 * used to determine when the spa_deadman() logic should fire. By default the 303 * spa_deadman() will fire if spa_sync() has not completed in 600 seconds. 304 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 305 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 306 * in one of three behaviors controlled by zfs_deadman_failmode. 307 */ 308 uint64_t zfs_deadman_synctime_ms = 600000UL; /* 10 min. */ 309 310 /* 311 * This value controls the maximum amount of time zio_wait() will block for an 312 * outstanding IO. By default this is 300 seconds at which point the "hung" 313 * behavior will be applied as described for zfs_deadman_synctime_ms. 314 */ 315 uint64_t zfs_deadman_ziotime_ms = 300000UL; /* 5 min. */ 316 317 /* 318 * Check time in milliseconds. This defines the frequency at which we check 319 * for hung I/O. 320 */ 321 uint64_t zfs_deadman_checktime_ms = 60000UL; /* 1 min. */ 322 323 /* 324 * By default the deadman is enabled. 325 */ 326 int zfs_deadman_enabled = B_TRUE; 327 328 /* 329 * Controls the behavior of the deadman when it detects a "hung" I/O. 330 * Valid values are zfs_deadman_failmode=<wait|continue|panic>. 331 * 332 * wait - Wait for the "hung" I/O (default) 333 * continue - Attempt to recover from a "hung" I/O 334 * panic - Panic the system 335 */ 336 const char *zfs_deadman_failmode = "wait"; 337 338 /* 339 * The worst case is single-sector max-parity RAID-Z blocks, in which 340 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 341 * times the size; so just assume that. Add to this the fact that 342 * we can have up to 3 DVAs per bp, and one more factor of 2 because 343 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 344 * the worst case is: 345 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 346 */ 347 uint_t spa_asize_inflation = 24; 348 349 /* 350 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 351 * the pool to be consumed (bounded by spa_max_slop). This ensures that we 352 * don't run the pool completely out of space, due to unaccounted changes (e.g. 353 * to the MOS). It also limits the worst-case time to allocate space. If we 354 * have less than this amount of free space, most ZPL operations (e.g. write, 355 * create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are 356 * also part of this 3.2% of space which can't be consumed by normal writes; 357 * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded 358 * log space. 359 * 360 * Certain operations (e.g. file removal, most administrative actions) can 361 * use half the slop space. They will only return ENOSPC if less than half 362 * the slop space is free. Typically, once the pool has less than the slop 363 * space free, the user will use these operations to free up space in the pool. 364 * These are the operations that call dsl_pool_adjustedsize() with the netfree 365 * argument set to TRUE. 366 * 367 * Operations that are almost guaranteed to free up space in the absence of 368 * a pool checkpoint can use up to three quarters of the slop space 369 * (e.g zfs destroy). 370 * 371 * A very restricted set of operations are always permitted, regardless of 372 * the amount of free space. These are the operations that call 373 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net 374 * increase in the amount of space used, it is possible to run the pool 375 * completely out of space, causing it to be permanently read-only. 376 * 377 * Note that on very small pools, the slop space will be larger than 378 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 379 * but we never allow it to be more than half the pool size. 380 * 381 * Further, on very large pools, the slop space will be smaller than 382 * 3.2%, to avoid reserving much more space than we actually need; bounded 383 * by spa_max_slop (128GB). 384 * 385 * See also the comments in zfs_space_check_t. 386 */ 387 uint_t spa_slop_shift = 5; 388 static const uint64_t spa_min_slop = 128ULL * 1024 * 1024; 389 static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024; 390 static const int spa_allocators = 4; 391 392 /* 393 * Spa active allocator. 394 * Valid values are zfs_active_allocator=<dynamic|cursor|new-dynamic>. 395 */ 396 const char *zfs_active_allocator = "dynamic"; 397 398 void 399 spa_load_failed(spa_t *spa, const char *fmt, ...) 400 { 401 va_list adx; 402 char buf[256]; 403 404 va_start(adx, fmt); 405 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 406 va_end(adx); 407 408 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name, 409 spa->spa_trust_config ? "trusted" : "untrusted", buf); 410 } 411 412 void 413 spa_load_note(spa_t *spa, const char *fmt, ...) 414 { 415 va_list adx; 416 char buf[256]; 417 418 va_start(adx, fmt); 419 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 420 va_end(adx); 421 422 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name, 423 spa->spa_trust_config ? "trusted" : "untrusted", buf); 424 } 425 426 /* 427 * By default dedup and user data indirects land in the special class 428 */ 429 static int zfs_ddt_data_is_special = B_TRUE; 430 static int zfs_user_indirect_is_special = B_TRUE; 431 432 /* 433 * The percentage of special class final space reserved for metadata only. 434 * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only 435 * let metadata into the class. 436 */ 437 static uint_t zfs_special_class_metadata_reserve_pct = 25; 438 439 /* 440 * ========================================================================== 441 * SPA config locking 442 * ========================================================================== 443 */ 444 static void 445 spa_config_lock_init(spa_t *spa) 446 { 447 for (int i = 0; i < SCL_LOCKS; i++) { 448 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 449 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 450 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 451 scl->scl_writer = NULL; 452 scl->scl_write_wanted = 0; 453 scl->scl_count = 0; 454 } 455 } 456 457 static void 458 spa_config_lock_destroy(spa_t *spa) 459 { 460 for (int i = 0; i < SCL_LOCKS; i++) { 461 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 462 mutex_destroy(&scl->scl_lock); 463 cv_destroy(&scl->scl_cv); 464 ASSERT(scl->scl_writer == NULL); 465 ASSERT(scl->scl_write_wanted == 0); 466 ASSERT(scl->scl_count == 0); 467 } 468 } 469 470 int 471 spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw) 472 { 473 for (int i = 0; i < SCL_LOCKS; i++) { 474 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 475 if (!(locks & (1 << i))) 476 continue; 477 mutex_enter(&scl->scl_lock); 478 if (rw == RW_READER) { 479 if (scl->scl_writer || scl->scl_write_wanted) { 480 mutex_exit(&scl->scl_lock); 481 spa_config_exit(spa, locks & ((1 << i) - 1), 482 tag); 483 return (0); 484 } 485 } else { 486 ASSERT(scl->scl_writer != curthread); 487 if (scl->scl_count != 0) { 488 mutex_exit(&scl->scl_lock); 489 spa_config_exit(spa, locks & ((1 << i) - 1), 490 tag); 491 return (0); 492 } 493 scl->scl_writer = curthread; 494 } 495 scl->scl_count++; 496 mutex_exit(&scl->scl_lock); 497 } 498 return (1); 499 } 500 501 static void 502 spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw, 503 int mmp_flag) 504 { 505 (void) tag; 506 int wlocks_held = 0; 507 508 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 509 510 for (int i = 0; i < SCL_LOCKS; i++) { 511 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 512 if (scl->scl_writer == curthread) 513 wlocks_held |= (1 << i); 514 if (!(locks & (1 << i))) 515 continue; 516 mutex_enter(&scl->scl_lock); 517 if (rw == RW_READER) { 518 while (scl->scl_writer || 519 (!mmp_flag && scl->scl_write_wanted)) { 520 cv_wait(&scl->scl_cv, &scl->scl_lock); 521 } 522 } else { 523 ASSERT(scl->scl_writer != curthread); 524 while (scl->scl_count != 0) { 525 scl->scl_write_wanted++; 526 cv_wait(&scl->scl_cv, &scl->scl_lock); 527 scl->scl_write_wanted--; 528 } 529 scl->scl_writer = curthread; 530 } 531 scl->scl_count++; 532 mutex_exit(&scl->scl_lock); 533 } 534 ASSERT3U(wlocks_held, <=, locks); 535 } 536 537 void 538 spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw) 539 { 540 spa_config_enter_impl(spa, locks, tag, rw, 0); 541 } 542 543 /* 544 * The spa_config_enter_mmp() allows the mmp thread to cut in front of 545 * outstanding write lock requests. This is needed since the mmp updates are 546 * time sensitive and failure to service them promptly will result in a 547 * suspended pool. This pool suspension has been seen in practice when there is 548 * a single disk in a pool that is responding slowly and presumably about to 549 * fail. 550 */ 551 552 void 553 spa_config_enter_mmp(spa_t *spa, int locks, const void *tag, krw_t rw) 554 { 555 spa_config_enter_impl(spa, locks, tag, rw, 1); 556 } 557 558 void 559 spa_config_exit(spa_t *spa, int locks, const void *tag) 560 { 561 (void) tag; 562 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 563 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 564 if (!(locks & (1 << i))) 565 continue; 566 mutex_enter(&scl->scl_lock); 567 ASSERT(scl->scl_count > 0); 568 if (--scl->scl_count == 0) { 569 ASSERT(scl->scl_writer == NULL || 570 scl->scl_writer == curthread); 571 scl->scl_writer = NULL; /* OK in either case */ 572 cv_broadcast(&scl->scl_cv); 573 } 574 mutex_exit(&scl->scl_lock); 575 } 576 } 577 578 int 579 spa_config_held(spa_t *spa, int locks, krw_t rw) 580 { 581 int locks_held = 0; 582 583 for (int i = 0; i < SCL_LOCKS; i++) { 584 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 585 if (!(locks & (1 << i))) 586 continue; 587 if ((rw == RW_READER && scl->scl_count != 0) || 588 (rw == RW_WRITER && scl->scl_writer == curthread)) 589 locks_held |= 1 << i; 590 } 591 592 return (locks_held); 593 } 594 595 /* 596 * ========================================================================== 597 * SPA namespace functions 598 * ========================================================================== 599 */ 600 601 /* 602 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 603 * Returns NULL if no matching spa_t is found. 604 */ 605 spa_t * 606 spa_lookup(const char *name) 607 { 608 static spa_t search; /* spa_t is large; don't allocate on stack */ 609 spa_t *spa; 610 avl_index_t where; 611 char *cp; 612 613 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 614 615 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 616 617 /* 618 * If it's a full dataset name, figure out the pool name and 619 * just use that. 620 */ 621 cp = strpbrk(search.spa_name, "/@#"); 622 if (cp != NULL) 623 *cp = '\0'; 624 625 spa = avl_find(&spa_namespace_avl, &search, &where); 626 627 return (spa); 628 } 629 630 /* 631 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 632 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 633 * looking for potentially hung I/Os. 634 */ 635 void 636 spa_deadman(void *arg) 637 { 638 spa_t *spa = arg; 639 640 /* Disable the deadman if the pool is suspended. */ 641 if (spa_suspended(spa)) 642 return; 643 644 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 645 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 646 (u_longlong_t)++spa->spa_deadman_calls); 647 if (zfs_deadman_enabled) 648 vdev_deadman(spa->spa_root_vdev, FTAG); 649 650 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, 651 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + 652 MSEC_TO_TICK(zfs_deadman_checktime_ms)); 653 } 654 655 static int 656 spa_log_sm_sort_by_txg(const void *va, const void *vb) 657 { 658 const spa_log_sm_t *a = va; 659 const spa_log_sm_t *b = vb; 660 661 return (TREE_CMP(a->sls_txg, b->sls_txg)); 662 } 663 664 /* 665 * Create an uninitialized spa_t with the given name. Requires 666 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 667 * exist by calling spa_lookup() first. 668 */ 669 spa_t * 670 spa_add(const char *name, nvlist_t *config, const char *altroot) 671 { 672 spa_t *spa; 673 spa_config_dirent_t *dp; 674 675 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 676 677 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 678 679 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 680 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 681 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 682 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 683 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 684 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 685 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 686 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 687 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 688 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 689 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 690 mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL); 691 mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL); 692 mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL); 693 694 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 695 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 696 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 697 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 698 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 699 cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL); 700 cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL); 701 702 for (int t = 0; t < TXG_SIZE; t++) 703 bplist_create(&spa->spa_free_bplist[t]); 704 705 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 706 spa->spa_state = POOL_STATE_UNINITIALIZED; 707 spa->spa_freeze_txg = UINT64_MAX; 708 spa->spa_final_txg = UINT64_MAX; 709 spa->spa_load_max_txg = UINT64_MAX; 710 spa->spa_proc = &p0; 711 spa->spa_proc_state = SPA_PROC_NONE; 712 spa->spa_trust_config = B_TRUE; 713 spa->spa_hostid = zone_get_hostid(NULL); 714 715 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 716 spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms); 717 spa_set_deadman_failmode(spa, zfs_deadman_failmode); 718 spa_set_allocator(spa, zfs_active_allocator); 719 720 zfs_refcount_create(&spa->spa_refcount); 721 spa_config_lock_init(spa); 722 spa_stats_init(spa); 723 724 avl_add(&spa_namespace_avl, spa); 725 726 /* 727 * Set the alternate root, if there is one. 728 */ 729 if (altroot) 730 spa->spa_root = spa_strdup(altroot); 731 732 spa->spa_alloc_count = spa_allocators; 733 spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count * 734 sizeof (spa_alloc_t), KM_SLEEP); 735 for (int i = 0; i < spa->spa_alloc_count; i++) { 736 mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT, 737 NULL); 738 avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare, 739 sizeof (zio_t), offsetof(zio_t, io_queue_node.a)); 740 } 741 avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed, 742 sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node)); 743 avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg, 744 sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node)); 745 list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t), 746 offsetof(log_summary_entry_t, lse_node)); 747 748 /* 749 * Every pool starts with the default cachefile 750 */ 751 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 752 offsetof(spa_config_dirent_t, scd_link)); 753 754 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 755 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 756 list_insert_head(&spa->spa_config_list, dp); 757 758 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 759 KM_SLEEP) == 0); 760 761 if (config != NULL) { 762 nvlist_t *features; 763 764 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 765 &features) == 0) { 766 VERIFY(nvlist_dup(features, &spa->spa_label_features, 767 0) == 0); 768 } 769 770 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 771 } 772 773 if (spa->spa_label_features == NULL) { 774 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 775 KM_SLEEP) == 0); 776 } 777 778 spa->spa_min_ashift = INT_MAX; 779 spa->spa_max_ashift = 0; 780 spa->spa_min_alloc = INT_MAX; 781 spa->spa_gcd_alloc = INT_MAX; 782 783 /* Reset cached value */ 784 spa->spa_dedup_dspace = ~0ULL; 785 786 /* 787 * As a pool is being created, treat all features as disabled by 788 * setting SPA_FEATURE_DISABLED for all entries in the feature 789 * refcount cache. 790 */ 791 for (int i = 0; i < SPA_FEATURES; i++) { 792 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 793 } 794 795 list_create(&spa->spa_leaf_list, sizeof (vdev_t), 796 offsetof(vdev_t, vdev_leaf_node)); 797 798 return (spa); 799 } 800 801 /* 802 * Removes a spa_t from the namespace, freeing up any memory used. Requires 803 * spa_namespace_lock. This is called only after the spa_t has been closed and 804 * deactivated. 805 */ 806 void 807 spa_remove(spa_t *spa) 808 { 809 spa_config_dirent_t *dp; 810 811 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 812 ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED); 813 ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0); 814 ASSERT0(spa->spa_waiters); 815 816 nvlist_free(spa->spa_config_splitting); 817 818 avl_remove(&spa_namespace_avl, spa); 819 cv_broadcast(&spa_namespace_cv); 820 821 if (spa->spa_root) 822 spa_strfree(spa->spa_root); 823 824 while ((dp = list_remove_head(&spa->spa_config_list)) != NULL) { 825 if (dp->scd_path != NULL) 826 spa_strfree(dp->scd_path); 827 kmem_free(dp, sizeof (spa_config_dirent_t)); 828 } 829 830 for (int i = 0; i < spa->spa_alloc_count; i++) { 831 avl_destroy(&spa->spa_allocs[i].spaa_tree); 832 mutex_destroy(&spa->spa_allocs[i].spaa_lock); 833 } 834 kmem_free(spa->spa_allocs, spa->spa_alloc_count * 835 sizeof (spa_alloc_t)); 836 837 avl_destroy(&spa->spa_metaslabs_by_flushed); 838 avl_destroy(&spa->spa_sm_logs_by_txg); 839 list_destroy(&spa->spa_log_summary); 840 list_destroy(&spa->spa_config_list); 841 list_destroy(&spa->spa_leaf_list); 842 843 nvlist_free(spa->spa_label_features); 844 nvlist_free(spa->spa_load_info); 845 nvlist_free(spa->spa_feat_stats); 846 spa_config_set(spa, NULL); 847 848 zfs_refcount_destroy(&spa->spa_refcount); 849 850 spa_stats_destroy(spa); 851 spa_config_lock_destroy(spa); 852 853 for (int t = 0; t < TXG_SIZE; t++) 854 bplist_destroy(&spa->spa_free_bplist[t]); 855 856 zio_checksum_templates_free(spa); 857 858 cv_destroy(&spa->spa_async_cv); 859 cv_destroy(&spa->spa_evicting_os_cv); 860 cv_destroy(&spa->spa_proc_cv); 861 cv_destroy(&spa->spa_scrub_io_cv); 862 cv_destroy(&spa->spa_suspend_cv); 863 cv_destroy(&spa->spa_activities_cv); 864 cv_destroy(&spa->spa_waiters_cv); 865 866 mutex_destroy(&spa->spa_flushed_ms_lock); 867 mutex_destroy(&spa->spa_async_lock); 868 mutex_destroy(&spa->spa_errlist_lock); 869 mutex_destroy(&spa->spa_errlog_lock); 870 mutex_destroy(&spa->spa_evicting_os_lock); 871 mutex_destroy(&spa->spa_history_lock); 872 mutex_destroy(&spa->spa_proc_lock); 873 mutex_destroy(&spa->spa_props_lock); 874 mutex_destroy(&spa->spa_cksum_tmpls_lock); 875 mutex_destroy(&spa->spa_scrub_lock); 876 mutex_destroy(&spa->spa_suspend_lock); 877 mutex_destroy(&spa->spa_vdev_top_lock); 878 mutex_destroy(&spa->spa_feat_stats_lock); 879 mutex_destroy(&spa->spa_activities_lock); 880 881 kmem_free(spa, sizeof (spa_t)); 882 } 883 884 /* 885 * Given a pool, return the next pool in the namespace, or NULL if there is 886 * none. If 'prev' is NULL, return the first pool. 887 */ 888 spa_t * 889 spa_next(spa_t *prev) 890 { 891 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 892 893 if (prev) 894 return (AVL_NEXT(&spa_namespace_avl, prev)); 895 else 896 return (avl_first(&spa_namespace_avl)); 897 } 898 899 /* 900 * ========================================================================== 901 * SPA refcount functions 902 * ========================================================================== 903 */ 904 905 /* 906 * Add a reference to the given spa_t. Must have at least one reference, or 907 * have the namespace lock held. 908 */ 909 void 910 spa_open_ref(spa_t *spa, const void *tag) 911 { 912 ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref || 913 MUTEX_HELD(&spa_namespace_lock)); 914 (void) zfs_refcount_add(&spa->spa_refcount, tag); 915 } 916 917 /* 918 * Remove a reference to the given spa_t. Must have at least one reference, or 919 * have the namespace lock held. 920 */ 921 void 922 spa_close(spa_t *spa, const void *tag) 923 { 924 ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref || 925 MUTEX_HELD(&spa_namespace_lock)); 926 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 927 } 928 929 /* 930 * Remove a reference to the given spa_t held by a dsl dir that is 931 * being asynchronously released. Async releases occur from a taskq 932 * performing eviction of dsl datasets and dirs. The namespace lock 933 * isn't held and the hold by the object being evicted may contribute to 934 * spa_minref (e.g. dataset or directory released during pool export), 935 * so the asserts in spa_close() do not apply. 936 */ 937 void 938 spa_async_close(spa_t *spa, const void *tag) 939 { 940 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 941 } 942 943 /* 944 * Check to see if the spa refcount is zero. Must be called with 945 * spa_namespace_lock held. We really compare against spa_minref, which is the 946 * number of references acquired when opening a pool 947 */ 948 boolean_t 949 spa_refcount_zero(spa_t *spa) 950 { 951 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 952 953 return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref); 954 } 955 956 /* 957 * ========================================================================== 958 * SPA spare and l2cache tracking 959 * ========================================================================== 960 */ 961 962 /* 963 * Hot spares and cache devices are tracked using the same code below, 964 * for 'auxiliary' devices. 965 */ 966 967 typedef struct spa_aux { 968 uint64_t aux_guid; 969 uint64_t aux_pool; 970 avl_node_t aux_avl; 971 int aux_count; 972 } spa_aux_t; 973 974 static inline int 975 spa_aux_compare(const void *a, const void *b) 976 { 977 const spa_aux_t *sa = (const spa_aux_t *)a; 978 const spa_aux_t *sb = (const spa_aux_t *)b; 979 980 return (TREE_CMP(sa->aux_guid, sb->aux_guid)); 981 } 982 983 static void 984 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 985 { 986 avl_index_t where; 987 spa_aux_t search; 988 spa_aux_t *aux; 989 990 search.aux_guid = vd->vdev_guid; 991 if ((aux = avl_find(avl, &search, &where)) != NULL) { 992 aux->aux_count++; 993 } else { 994 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 995 aux->aux_guid = vd->vdev_guid; 996 aux->aux_count = 1; 997 avl_insert(avl, aux, where); 998 } 999 } 1000 1001 static void 1002 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 1003 { 1004 spa_aux_t search; 1005 spa_aux_t *aux; 1006 avl_index_t where; 1007 1008 search.aux_guid = vd->vdev_guid; 1009 aux = avl_find(avl, &search, &where); 1010 1011 ASSERT(aux != NULL); 1012 1013 if (--aux->aux_count == 0) { 1014 avl_remove(avl, aux); 1015 kmem_free(aux, sizeof (spa_aux_t)); 1016 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 1017 aux->aux_pool = 0ULL; 1018 } 1019 } 1020 1021 static boolean_t 1022 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 1023 { 1024 spa_aux_t search, *found; 1025 1026 search.aux_guid = guid; 1027 found = avl_find(avl, &search, NULL); 1028 1029 if (pool) { 1030 if (found) 1031 *pool = found->aux_pool; 1032 else 1033 *pool = 0ULL; 1034 } 1035 1036 if (refcnt) { 1037 if (found) 1038 *refcnt = found->aux_count; 1039 else 1040 *refcnt = 0; 1041 } 1042 1043 return (found != NULL); 1044 } 1045 1046 static void 1047 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1048 { 1049 spa_aux_t search, *found; 1050 avl_index_t where; 1051 1052 search.aux_guid = vd->vdev_guid; 1053 found = avl_find(avl, &search, &where); 1054 ASSERT(found != NULL); 1055 ASSERT(found->aux_pool == 0ULL); 1056 1057 found->aux_pool = spa_guid(vd->vdev_spa); 1058 } 1059 1060 /* 1061 * Spares are tracked globally due to the following constraints: 1062 * 1063 * - A spare may be part of multiple pools. 1064 * - A spare may be added to a pool even if it's actively in use within 1065 * another pool. 1066 * - A spare in use in any pool can only be the source of a replacement if 1067 * the target is a spare in the same pool. 1068 * 1069 * We keep track of all spares on the system through the use of a reference 1070 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1071 * spare, then we bump the reference count in the AVL tree. In addition, we set 1072 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1073 * inactive). When a spare is made active (used to replace a device in the 1074 * pool), we also keep track of which pool its been made a part of. 1075 * 1076 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1077 * called under the spa_namespace lock as part of vdev reconfiguration. The 1078 * separate spare lock exists for the status query path, which does not need to 1079 * be completely consistent with respect to other vdev configuration changes. 1080 */ 1081 1082 static int 1083 spa_spare_compare(const void *a, const void *b) 1084 { 1085 return (spa_aux_compare(a, b)); 1086 } 1087 1088 void 1089 spa_spare_add(vdev_t *vd) 1090 { 1091 mutex_enter(&spa_spare_lock); 1092 ASSERT(!vd->vdev_isspare); 1093 spa_aux_add(vd, &spa_spare_avl); 1094 vd->vdev_isspare = B_TRUE; 1095 mutex_exit(&spa_spare_lock); 1096 } 1097 1098 void 1099 spa_spare_remove(vdev_t *vd) 1100 { 1101 mutex_enter(&spa_spare_lock); 1102 ASSERT(vd->vdev_isspare); 1103 spa_aux_remove(vd, &spa_spare_avl); 1104 vd->vdev_isspare = B_FALSE; 1105 mutex_exit(&spa_spare_lock); 1106 } 1107 1108 boolean_t 1109 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1110 { 1111 boolean_t found; 1112 1113 mutex_enter(&spa_spare_lock); 1114 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1115 mutex_exit(&spa_spare_lock); 1116 1117 return (found); 1118 } 1119 1120 void 1121 spa_spare_activate(vdev_t *vd) 1122 { 1123 mutex_enter(&spa_spare_lock); 1124 ASSERT(vd->vdev_isspare); 1125 spa_aux_activate(vd, &spa_spare_avl); 1126 mutex_exit(&spa_spare_lock); 1127 } 1128 1129 /* 1130 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1131 * Cache devices currently only support one pool per cache device, and so 1132 * for these devices the aux reference count is currently unused beyond 1. 1133 */ 1134 1135 static int 1136 spa_l2cache_compare(const void *a, const void *b) 1137 { 1138 return (spa_aux_compare(a, b)); 1139 } 1140 1141 void 1142 spa_l2cache_add(vdev_t *vd) 1143 { 1144 mutex_enter(&spa_l2cache_lock); 1145 ASSERT(!vd->vdev_isl2cache); 1146 spa_aux_add(vd, &spa_l2cache_avl); 1147 vd->vdev_isl2cache = B_TRUE; 1148 mutex_exit(&spa_l2cache_lock); 1149 } 1150 1151 void 1152 spa_l2cache_remove(vdev_t *vd) 1153 { 1154 mutex_enter(&spa_l2cache_lock); 1155 ASSERT(vd->vdev_isl2cache); 1156 spa_aux_remove(vd, &spa_l2cache_avl); 1157 vd->vdev_isl2cache = B_FALSE; 1158 mutex_exit(&spa_l2cache_lock); 1159 } 1160 1161 boolean_t 1162 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1163 { 1164 boolean_t found; 1165 1166 mutex_enter(&spa_l2cache_lock); 1167 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1168 mutex_exit(&spa_l2cache_lock); 1169 1170 return (found); 1171 } 1172 1173 void 1174 spa_l2cache_activate(vdev_t *vd) 1175 { 1176 mutex_enter(&spa_l2cache_lock); 1177 ASSERT(vd->vdev_isl2cache); 1178 spa_aux_activate(vd, &spa_l2cache_avl); 1179 mutex_exit(&spa_l2cache_lock); 1180 } 1181 1182 /* 1183 * ========================================================================== 1184 * SPA vdev locking 1185 * ========================================================================== 1186 */ 1187 1188 /* 1189 * Lock the given spa_t for the purpose of adding or removing a vdev. 1190 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1191 * It returns the next transaction group for the spa_t. 1192 */ 1193 uint64_t 1194 spa_vdev_enter(spa_t *spa) 1195 { 1196 mutex_enter(&spa->spa_vdev_top_lock); 1197 mutex_enter(&spa_namespace_lock); 1198 1199 vdev_autotrim_stop_all(spa); 1200 1201 return (spa_vdev_config_enter(spa)); 1202 } 1203 1204 /* 1205 * The same as spa_vdev_enter() above but additionally takes the guid of 1206 * the vdev being detached. When there is a rebuild in process it will be 1207 * suspended while the vdev tree is modified then resumed by spa_vdev_exit(). 1208 * The rebuild is canceled if only a single child remains after the detach. 1209 */ 1210 uint64_t 1211 spa_vdev_detach_enter(spa_t *spa, uint64_t guid) 1212 { 1213 mutex_enter(&spa->spa_vdev_top_lock); 1214 mutex_enter(&spa_namespace_lock); 1215 1216 vdev_autotrim_stop_all(spa); 1217 1218 if (guid != 0) { 1219 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 1220 if (vd) { 1221 vdev_rebuild_stop_wait(vd->vdev_top); 1222 } 1223 } 1224 1225 return (spa_vdev_config_enter(spa)); 1226 } 1227 1228 /* 1229 * Internal implementation for spa_vdev_enter(). Used when a vdev 1230 * operation requires multiple syncs (i.e. removing a device) while 1231 * keeping the spa_namespace_lock held. 1232 */ 1233 uint64_t 1234 spa_vdev_config_enter(spa_t *spa) 1235 { 1236 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1237 1238 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1239 1240 return (spa_last_synced_txg(spa) + 1); 1241 } 1242 1243 /* 1244 * Used in combination with spa_vdev_config_enter() to allow the syncing 1245 * of multiple transactions without releasing the spa_namespace_lock. 1246 */ 1247 void 1248 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, 1249 const char *tag) 1250 { 1251 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1252 1253 int config_changed = B_FALSE; 1254 1255 ASSERT(txg > spa_last_synced_txg(spa)); 1256 1257 spa->spa_pending_vdev = NULL; 1258 1259 /* 1260 * Reassess the DTLs. 1261 */ 1262 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE); 1263 1264 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1265 config_changed = B_TRUE; 1266 spa->spa_config_generation++; 1267 } 1268 1269 /* 1270 * Verify the metaslab classes. 1271 */ 1272 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1273 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1274 ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0); 1275 ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0); 1276 ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0); 1277 1278 spa_config_exit(spa, SCL_ALL, spa); 1279 1280 /* 1281 * Panic the system if the specified tag requires it. This 1282 * is useful for ensuring that configurations are updated 1283 * transactionally. 1284 */ 1285 if (zio_injection_enabled) 1286 zio_handle_panic_injection(spa, tag, 0); 1287 1288 /* 1289 * Note: this txg_wait_synced() is important because it ensures 1290 * that there won't be more than one config change per txg. 1291 * This allows us to use the txg as the generation number. 1292 */ 1293 if (error == 0) 1294 txg_wait_synced(spa->spa_dsl_pool, txg); 1295 1296 if (vd != NULL) { 1297 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1298 if (vd->vdev_ops->vdev_op_leaf) { 1299 mutex_enter(&vd->vdev_initialize_lock); 1300 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, 1301 NULL); 1302 mutex_exit(&vd->vdev_initialize_lock); 1303 1304 mutex_enter(&vd->vdev_trim_lock); 1305 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL); 1306 mutex_exit(&vd->vdev_trim_lock); 1307 } 1308 1309 /* 1310 * The vdev may be both a leaf and top-level device. 1311 */ 1312 vdev_autotrim_stop_wait(vd); 1313 1314 spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER); 1315 vdev_free(vd); 1316 spa_config_exit(spa, SCL_STATE_ALL, spa); 1317 } 1318 1319 /* 1320 * If the config changed, update the config cache. 1321 */ 1322 if (config_changed) 1323 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE); 1324 } 1325 1326 /* 1327 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1328 * locking of spa_vdev_enter(), we also want make sure the transactions have 1329 * synced to disk, and then update the global configuration cache with the new 1330 * information. 1331 */ 1332 int 1333 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1334 { 1335 vdev_autotrim_restart(spa); 1336 vdev_rebuild_restart(spa); 1337 1338 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1339 mutex_exit(&spa_namespace_lock); 1340 mutex_exit(&spa->spa_vdev_top_lock); 1341 1342 return (error); 1343 } 1344 1345 /* 1346 * Lock the given spa_t for the purpose of changing vdev state. 1347 */ 1348 void 1349 spa_vdev_state_enter(spa_t *spa, int oplocks) 1350 { 1351 int locks = SCL_STATE_ALL | oplocks; 1352 1353 /* 1354 * Root pools may need to read of the underlying devfs filesystem 1355 * when opening up a vdev. Unfortunately if we're holding the 1356 * SCL_ZIO lock it will result in a deadlock when we try to issue 1357 * the read from the root filesystem. Instead we "prefetch" 1358 * the associated vnodes that we need prior to opening the 1359 * underlying devices and cache them so that we can prevent 1360 * any I/O when we are doing the actual open. 1361 */ 1362 if (spa_is_root(spa)) { 1363 int low = locks & ~(SCL_ZIO - 1); 1364 int high = locks & ~low; 1365 1366 spa_config_enter(spa, high, spa, RW_WRITER); 1367 vdev_hold(spa->spa_root_vdev); 1368 spa_config_enter(spa, low, spa, RW_WRITER); 1369 } else { 1370 spa_config_enter(spa, locks, spa, RW_WRITER); 1371 } 1372 spa->spa_vdev_locks = locks; 1373 } 1374 1375 int 1376 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1377 { 1378 boolean_t config_changed = B_FALSE; 1379 vdev_t *vdev_top; 1380 1381 if (vd == NULL || vd == spa->spa_root_vdev) { 1382 vdev_top = spa->spa_root_vdev; 1383 } else { 1384 vdev_top = vd->vdev_top; 1385 } 1386 1387 if (vd != NULL || error == 0) 1388 vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE); 1389 1390 if (vd != NULL) { 1391 if (vd != spa->spa_root_vdev) 1392 vdev_state_dirty(vdev_top); 1393 1394 config_changed = B_TRUE; 1395 spa->spa_config_generation++; 1396 } 1397 1398 if (spa_is_root(spa)) 1399 vdev_rele(spa->spa_root_vdev); 1400 1401 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1402 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1403 1404 /* 1405 * If anything changed, wait for it to sync. This ensures that, 1406 * from the system administrator's perspective, zpool(8) commands 1407 * are synchronous. This is important for things like zpool offline: 1408 * when the command completes, you expect no further I/O from ZFS. 1409 */ 1410 if (vd != NULL) 1411 txg_wait_synced(spa->spa_dsl_pool, 0); 1412 1413 /* 1414 * If the config changed, update the config cache. 1415 */ 1416 if (config_changed) { 1417 mutex_enter(&spa_namespace_lock); 1418 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE); 1419 mutex_exit(&spa_namespace_lock); 1420 } 1421 1422 return (error); 1423 } 1424 1425 /* 1426 * ========================================================================== 1427 * Miscellaneous functions 1428 * ========================================================================== 1429 */ 1430 1431 void 1432 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1433 { 1434 if (!nvlist_exists(spa->spa_label_features, feature)) { 1435 fnvlist_add_boolean(spa->spa_label_features, feature); 1436 /* 1437 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1438 * dirty the vdev config because lock SCL_CONFIG is not held. 1439 * Thankfully, in this case we don't need to dirty the config 1440 * because it will be written out anyway when we finish 1441 * creating the pool. 1442 */ 1443 if (tx->tx_txg != TXG_INITIAL) 1444 vdev_config_dirty(spa->spa_root_vdev); 1445 } 1446 } 1447 1448 void 1449 spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1450 { 1451 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1452 vdev_config_dirty(spa->spa_root_vdev); 1453 } 1454 1455 /* 1456 * Return the spa_t associated with given pool_guid, if it exists. If 1457 * device_guid is non-zero, determine whether the pool exists *and* contains 1458 * a device with the specified device_guid. 1459 */ 1460 spa_t * 1461 spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1462 { 1463 spa_t *spa; 1464 avl_tree_t *t = &spa_namespace_avl; 1465 1466 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1467 1468 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1469 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1470 continue; 1471 if (spa->spa_root_vdev == NULL) 1472 continue; 1473 if (spa_guid(spa) == pool_guid) { 1474 if (device_guid == 0) 1475 break; 1476 1477 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1478 device_guid) != NULL) 1479 break; 1480 1481 /* 1482 * Check any devices we may be in the process of adding. 1483 */ 1484 if (spa->spa_pending_vdev) { 1485 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1486 device_guid) != NULL) 1487 break; 1488 } 1489 } 1490 } 1491 1492 return (spa); 1493 } 1494 1495 /* 1496 * Determine whether a pool with the given pool_guid exists. 1497 */ 1498 boolean_t 1499 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1500 { 1501 return (spa_by_guid(pool_guid, device_guid) != NULL); 1502 } 1503 1504 char * 1505 spa_strdup(const char *s) 1506 { 1507 size_t len; 1508 char *new; 1509 1510 len = strlen(s); 1511 new = kmem_alloc(len + 1, KM_SLEEP); 1512 memcpy(new, s, len + 1); 1513 1514 return (new); 1515 } 1516 1517 void 1518 spa_strfree(char *s) 1519 { 1520 kmem_free(s, strlen(s) + 1); 1521 } 1522 1523 uint64_t 1524 spa_generate_guid(spa_t *spa) 1525 { 1526 uint64_t guid; 1527 1528 if (spa != NULL) { 1529 do { 1530 (void) random_get_pseudo_bytes((void *)&guid, 1531 sizeof (guid)); 1532 } while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)); 1533 } else { 1534 do { 1535 (void) random_get_pseudo_bytes((void *)&guid, 1536 sizeof (guid)); 1537 } while (guid == 0 || spa_guid_exists(guid, 0)); 1538 } 1539 1540 return (guid); 1541 } 1542 1543 void 1544 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1545 { 1546 char type[256]; 1547 const char *checksum = NULL; 1548 const char *compress = NULL; 1549 1550 if (bp != NULL) { 1551 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1552 dmu_object_byteswap_t bswap = 1553 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1554 (void) snprintf(type, sizeof (type), "bswap %s %s", 1555 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1556 "metadata" : "data", 1557 dmu_ot_byteswap[bswap].ob_name); 1558 } else { 1559 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1560 sizeof (type)); 1561 } 1562 if (!BP_IS_EMBEDDED(bp)) { 1563 checksum = 1564 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1565 } 1566 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1567 } 1568 1569 SNPRINTF_BLKPTR(kmem_scnprintf, ' ', buf, buflen, bp, type, checksum, 1570 compress); 1571 } 1572 1573 void 1574 spa_freeze(spa_t *spa) 1575 { 1576 uint64_t freeze_txg = 0; 1577 1578 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1579 if (spa->spa_freeze_txg == UINT64_MAX) { 1580 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1581 spa->spa_freeze_txg = freeze_txg; 1582 } 1583 spa_config_exit(spa, SCL_ALL, FTAG); 1584 if (freeze_txg != 0) 1585 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1586 } 1587 1588 void 1589 zfs_panic_recover(const char *fmt, ...) 1590 { 1591 va_list adx; 1592 1593 va_start(adx, fmt); 1594 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1595 va_end(adx); 1596 } 1597 1598 /* 1599 * This is a stripped-down version of strtoull, suitable only for converting 1600 * lowercase hexadecimal numbers that don't overflow. 1601 */ 1602 uint64_t 1603 zfs_strtonum(const char *str, char **nptr) 1604 { 1605 uint64_t val = 0; 1606 char c; 1607 int digit; 1608 1609 while ((c = *str) != '\0') { 1610 if (c >= '0' && c <= '9') 1611 digit = c - '0'; 1612 else if (c >= 'a' && c <= 'f') 1613 digit = 10 + c - 'a'; 1614 else 1615 break; 1616 1617 val *= 16; 1618 val += digit; 1619 1620 str++; 1621 } 1622 1623 if (nptr) 1624 *nptr = (char *)str; 1625 1626 return (val); 1627 } 1628 1629 void 1630 spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx) 1631 { 1632 /* 1633 * We bump the feature refcount for each special vdev added to the pool 1634 */ 1635 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES)); 1636 spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx); 1637 } 1638 1639 /* 1640 * ========================================================================== 1641 * Accessor functions 1642 * ========================================================================== 1643 */ 1644 1645 boolean_t 1646 spa_shutting_down(spa_t *spa) 1647 { 1648 return (spa->spa_async_suspended); 1649 } 1650 1651 dsl_pool_t * 1652 spa_get_dsl(spa_t *spa) 1653 { 1654 return (spa->spa_dsl_pool); 1655 } 1656 1657 boolean_t 1658 spa_is_initializing(spa_t *spa) 1659 { 1660 return (spa->spa_is_initializing); 1661 } 1662 1663 boolean_t 1664 spa_indirect_vdevs_loaded(spa_t *spa) 1665 { 1666 return (spa->spa_indirect_vdevs_loaded); 1667 } 1668 1669 blkptr_t * 1670 spa_get_rootblkptr(spa_t *spa) 1671 { 1672 return (&spa->spa_ubsync.ub_rootbp); 1673 } 1674 1675 void 1676 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1677 { 1678 spa->spa_uberblock.ub_rootbp = *bp; 1679 } 1680 1681 void 1682 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1683 { 1684 if (spa->spa_root == NULL) 1685 buf[0] = '\0'; 1686 else 1687 (void) strlcpy(buf, spa->spa_root, buflen); 1688 } 1689 1690 uint32_t 1691 spa_sync_pass(spa_t *spa) 1692 { 1693 return (spa->spa_sync_pass); 1694 } 1695 1696 char * 1697 spa_name(spa_t *spa) 1698 { 1699 return (spa->spa_name); 1700 } 1701 1702 uint64_t 1703 spa_guid(spa_t *spa) 1704 { 1705 dsl_pool_t *dp = spa_get_dsl(spa); 1706 uint64_t guid; 1707 1708 /* 1709 * If we fail to parse the config during spa_load(), we can go through 1710 * the error path (which posts an ereport) and end up here with no root 1711 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1712 * this case. 1713 */ 1714 if (spa->spa_root_vdev == NULL) 1715 return (spa->spa_config_guid); 1716 1717 guid = spa->spa_last_synced_guid != 0 ? 1718 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1719 1720 /* 1721 * Return the most recently synced out guid unless we're 1722 * in syncing context. 1723 */ 1724 if (dp && dsl_pool_sync_context(dp)) 1725 return (spa->spa_root_vdev->vdev_guid); 1726 else 1727 return (guid); 1728 } 1729 1730 uint64_t 1731 spa_load_guid(spa_t *spa) 1732 { 1733 /* 1734 * This is a GUID that exists solely as a reference for the 1735 * purposes of the arc. It is generated at load time, and 1736 * is never written to persistent storage. 1737 */ 1738 return (spa->spa_load_guid); 1739 } 1740 1741 uint64_t 1742 spa_last_synced_txg(spa_t *spa) 1743 { 1744 return (spa->spa_ubsync.ub_txg); 1745 } 1746 1747 uint64_t 1748 spa_first_txg(spa_t *spa) 1749 { 1750 return (spa->spa_first_txg); 1751 } 1752 1753 uint64_t 1754 spa_syncing_txg(spa_t *spa) 1755 { 1756 return (spa->spa_syncing_txg); 1757 } 1758 1759 /* 1760 * Return the last txg where data can be dirtied. The final txgs 1761 * will be used to just clear out any deferred frees that remain. 1762 */ 1763 uint64_t 1764 spa_final_dirty_txg(spa_t *spa) 1765 { 1766 return (spa->spa_final_txg - TXG_DEFER_SIZE); 1767 } 1768 1769 pool_state_t 1770 spa_state(spa_t *spa) 1771 { 1772 return (spa->spa_state); 1773 } 1774 1775 spa_load_state_t 1776 spa_load_state(spa_t *spa) 1777 { 1778 return (spa->spa_load_state); 1779 } 1780 1781 uint64_t 1782 spa_freeze_txg(spa_t *spa) 1783 { 1784 return (spa->spa_freeze_txg); 1785 } 1786 1787 /* 1788 * Return the inflated asize for a logical write in bytes. This is used by the 1789 * DMU to calculate the space a logical write will require on disk. 1790 * If lsize is smaller than the largest physical block size allocatable on this 1791 * pool we use its value instead, since the write will end up using the whole 1792 * block anyway. 1793 */ 1794 uint64_t 1795 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) 1796 { 1797 if (lsize == 0) 1798 return (0); /* No inflation needed */ 1799 return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation); 1800 } 1801 1802 /* 1803 * Return the amount of slop space in bytes. It is typically 1/32 of the pool 1804 * (3.2%), minus the embedded log space. On very small pools, it may be 1805 * slightly larger than this. On very large pools, it will be capped to 1806 * the value of spa_max_slop. The embedded log space is not included in 1807 * spa_dspace. By subtracting it, the usable space (per "zfs list") is a 1808 * constant 97% of the total space, regardless of metaslab size (assuming the 1809 * default spa_slop_shift=5 and a non-tiny pool). 1810 * 1811 * See the comment above spa_slop_shift for more details. 1812 */ 1813 uint64_t 1814 spa_get_slop_space(spa_t *spa) 1815 { 1816 uint64_t space = 0; 1817 uint64_t slop = 0; 1818 1819 /* 1820 * Make sure spa_dedup_dspace has been set. 1821 */ 1822 if (spa->spa_dedup_dspace == ~0ULL) 1823 spa_update_dspace(spa); 1824 1825 /* 1826 * spa_get_dspace() includes the space only logically "used" by 1827 * deduplicated data, so since it's not useful to reserve more 1828 * space with more deduplicated data, we subtract that out here. 1829 */ 1830 space = spa_get_dspace(spa) - spa->spa_dedup_dspace; 1831 slop = MIN(space >> spa_slop_shift, spa_max_slop); 1832 1833 /* 1834 * Subtract the embedded log space, but no more than half the (3.2%) 1835 * unusable space. Note, the "no more than half" is only relevant if 1836 * zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by 1837 * default. 1838 */ 1839 uint64_t embedded_log = 1840 metaslab_class_get_dspace(spa_embedded_log_class(spa)); 1841 slop -= MIN(embedded_log, slop >> 1); 1842 1843 /* 1844 * Slop space should be at least spa_min_slop, but no more than half 1845 * the entire pool. 1846 */ 1847 slop = MAX(slop, MIN(space >> 1, spa_min_slop)); 1848 return (slop); 1849 } 1850 1851 uint64_t 1852 spa_get_dspace(spa_t *spa) 1853 { 1854 return (spa->spa_dspace); 1855 } 1856 1857 uint64_t 1858 spa_get_checkpoint_space(spa_t *spa) 1859 { 1860 return (spa->spa_checkpoint_info.sci_dspace); 1861 } 1862 1863 void 1864 spa_update_dspace(spa_t *spa) 1865 { 1866 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1867 ddt_get_dedup_dspace(spa) + brt_get_dspace(spa); 1868 if (spa->spa_nonallocating_dspace > 0) { 1869 /* 1870 * Subtract the space provided by all non-allocating vdevs that 1871 * contribute to dspace. If a file is overwritten, its old 1872 * blocks are freed and new blocks are allocated. If there are 1873 * no snapshots of the file, the available space should remain 1874 * the same. The old blocks could be freed from the 1875 * non-allocating vdev, but the new blocks must be allocated on 1876 * other (allocating) vdevs. By reserving the entire size of 1877 * the non-allocating vdevs (including allocated space), we 1878 * ensure that there will be enough space on the allocating 1879 * vdevs for this file overwrite to succeed. 1880 * 1881 * Note that the DMU/DSL doesn't actually know or care 1882 * how much space is allocated (it does its own tracking 1883 * of how much space has been logically used). So it 1884 * doesn't matter that the data we are moving may be 1885 * allocated twice (on the old device and the new device). 1886 */ 1887 ASSERT3U(spa->spa_dspace, >=, spa->spa_nonallocating_dspace); 1888 spa->spa_dspace -= spa->spa_nonallocating_dspace; 1889 } 1890 } 1891 1892 /* 1893 * Return the failure mode that has been set to this pool. The default 1894 * behavior will be to block all I/Os when a complete failure occurs. 1895 */ 1896 uint64_t 1897 spa_get_failmode(spa_t *spa) 1898 { 1899 return (spa->spa_failmode); 1900 } 1901 1902 boolean_t 1903 spa_suspended(spa_t *spa) 1904 { 1905 return (spa->spa_suspended != ZIO_SUSPEND_NONE); 1906 } 1907 1908 uint64_t 1909 spa_version(spa_t *spa) 1910 { 1911 return (spa->spa_ubsync.ub_version); 1912 } 1913 1914 boolean_t 1915 spa_deflate(spa_t *spa) 1916 { 1917 return (spa->spa_deflate); 1918 } 1919 1920 metaslab_class_t * 1921 spa_normal_class(spa_t *spa) 1922 { 1923 return (spa->spa_normal_class); 1924 } 1925 1926 metaslab_class_t * 1927 spa_log_class(spa_t *spa) 1928 { 1929 return (spa->spa_log_class); 1930 } 1931 1932 metaslab_class_t * 1933 spa_embedded_log_class(spa_t *spa) 1934 { 1935 return (spa->spa_embedded_log_class); 1936 } 1937 1938 metaslab_class_t * 1939 spa_special_class(spa_t *spa) 1940 { 1941 return (spa->spa_special_class); 1942 } 1943 1944 metaslab_class_t * 1945 spa_dedup_class(spa_t *spa) 1946 { 1947 return (spa->spa_dedup_class); 1948 } 1949 1950 /* 1951 * Locate an appropriate allocation class 1952 */ 1953 metaslab_class_t * 1954 spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype, 1955 uint_t level, uint_t special_smallblk) 1956 { 1957 /* 1958 * ZIL allocations determine their class in zio_alloc_zil(). 1959 */ 1960 ASSERT(objtype != DMU_OT_INTENT_LOG); 1961 1962 boolean_t has_special_class = spa->spa_special_class->mc_groups != 0; 1963 1964 if (DMU_OT_IS_DDT(objtype)) { 1965 if (spa->spa_dedup_class->mc_groups != 0) 1966 return (spa_dedup_class(spa)); 1967 else if (has_special_class && zfs_ddt_data_is_special) 1968 return (spa_special_class(spa)); 1969 else 1970 return (spa_normal_class(spa)); 1971 } 1972 1973 /* Indirect blocks for user data can land in special if allowed */ 1974 if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) { 1975 if (has_special_class && zfs_user_indirect_is_special) 1976 return (spa_special_class(spa)); 1977 else 1978 return (spa_normal_class(spa)); 1979 } 1980 1981 if (DMU_OT_IS_METADATA(objtype) || level > 0) { 1982 if (has_special_class) 1983 return (spa_special_class(spa)); 1984 else 1985 return (spa_normal_class(spa)); 1986 } 1987 1988 /* 1989 * Allow small file blocks in special class in some cases (like 1990 * for the dRAID vdev feature). But always leave a reserve of 1991 * zfs_special_class_metadata_reserve_pct exclusively for metadata. 1992 */ 1993 if (DMU_OT_IS_FILE(objtype) && 1994 has_special_class && size <= special_smallblk) { 1995 metaslab_class_t *special = spa_special_class(spa); 1996 uint64_t alloc = metaslab_class_get_alloc(special); 1997 uint64_t space = metaslab_class_get_space(special); 1998 uint64_t limit = 1999 (space * (100 - zfs_special_class_metadata_reserve_pct)) 2000 / 100; 2001 2002 if (alloc < limit) 2003 return (special); 2004 } 2005 2006 return (spa_normal_class(spa)); 2007 } 2008 2009 void 2010 spa_evicting_os_register(spa_t *spa, objset_t *os) 2011 { 2012 mutex_enter(&spa->spa_evicting_os_lock); 2013 list_insert_head(&spa->spa_evicting_os_list, os); 2014 mutex_exit(&spa->spa_evicting_os_lock); 2015 } 2016 2017 void 2018 spa_evicting_os_deregister(spa_t *spa, objset_t *os) 2019 { 2020 mutex_enter(&spa->spa_evicting_os_lock); 2021 list_remove(&spa->spa_evicting_os_list, os); 2022 cv_broadcast(&spa->spa_evicting_os_cv); 2023 mutex_exit(&spa->spa_evicting_os_lock); 2024 } 2025 2026 void 2027 spa_evicting_os_wait(spa_t *spa) 2028 { 2029 mutex_enter(&spa->spa_evicting_os_lock); 2030 while (!list_is_empty(&spa->spa_evicting_os_list)) 2031 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 2032 mutex_exit(&spa->spa_evicting_os_lock); 2033 2034 dmu_buf_user_evict_wait(); 2035 } 2036 2037 int 2038 spa_max_replication(spa_t *spa) 2039 { 2040 /* 2041 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 2042 * handle BPs with more than one DVA allocated. Set our max 2043 * replication level accordingly. 2044 */ 2045 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 2046 return (1); 2047 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 2048 } 2049 2050 int 2051 spa_prev_software_version(spa_t *spa) 2052 { 2053 return (spa->spa_prev_software_version); 2054 } 2055 2056 uint64_t 2057 spa_deadman_synctime(spa_t *spa) 2058 { 2059 return (spa->spa_deadman_synctime); 2060 } 2061 2062 spa_autotrim_t 2063 spa_get_autotrim(spa_t *spa) 2064 { 2065 return (spa->spa_autotrim); 2066 } 2067 2068 uint64_t 2069 spa_deadman_ziotime(spa_t *spa) 2070 { 2071 return (spa->spa_deadman_ziotime); 2072 } 2073 2074 uint64_t 2075 spa_get_deadman_failmode(spa_t *spa) 2076 { 2077 return (spa->spa_deadman_failmode); 2078 } 2079 2080 void 2081 spa_set_deadman_failmode(spa_t *spa, const char *failmode) 2082 { 2083 if (strcmp(failmode, "wait") == 0) 2084 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; 2085 else if (strcmp(failmode, "continue") == 0) 2086 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE; 2087 else if (strcmp(failmode, "panic") == 0) 2088 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC; 2089 else 2090 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; 2091 } 2092 2093 void 2094 spa_set_deadman_ziotime(hrtime_t ns) 2095 { 2096 spa_t *spa = NULL; 2097 2098 if (spa_mode_global != SPA_MODE_UNINIT) { 2099 mutex_enter(&spa_namespace_lock); 2100 while ((spa = spa_next(spa)) != NULL) 2101 spa->spa_deadman_ziotime = ns; 2102 mutex_exit(&spa_namespace_lock); 2103 } 2104 } 2105 2106 void 2107 spa_set_deadman_synctime(hrtime_t ns) 2108 { 2109 spa_t *spa = NULL; 2110 2111 if (spa_mode_global != SPA_MODE_UNINIT) { 2112 mutex_enter(&spa_namespace_lock); 2113 while ((spa = spa_next(spa)) != NULL) 2114 spa->spa_deadman_synctime = ns; 2115 mutex_exit(&spa_namespace_lock); 2116 } 2117 } 2118 2119 uint64_t 2120 dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 2121 { 2122 uint64_t asize = DVA_GET_ASIZE(dva); 2123 uint64_t dsize = asize; 2124 2125 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 2126 2127 if (asize != 0 && spa->spa_deflate) { 2128 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 2129 if (vd != NULL) 2130 dsize = (asize >> SPA_MINBLOCKSHIFT) * 2131 vd->vdev_deflate_ratio; 2132 } 2133 2134 return (dsize); 2135 } 2136 2137 uint64_t 2138 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 2139 { 2140 uint64_t dsize = 0; 2141 2142 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2143 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2144 2145 return (dsize); 2146 } 2147 2148 uint64_t 2149 bp_get_dsize(spa_t *spa, const blkptr_t *bp) 2150 { 2151 uint64_t dsize = 0; 2152 2153 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2154 2155 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2156 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2157 2158 spa_config_exit(spa, SCL_VDEV, FTAG); 2159 2160 return (dsize); 2161 } 2162 2163 uint64_t 2164 spa_dirty_data(spa_t *spa) 2165 { 2166 return (spa->spa_dsl_pool->dp_dirty_total); 2167 } 2168 2169 /* 2170 * ========================================================================== 2171 * SPA Import Progress Routines 2172 * ========================================================================== 2173 */ 2174 2175 typedef struct spa_import_progress { 2176 uint64_t pool_guid; /* unique id for updates */ 2177 char *pool_name; 2178 spa_load_state_t spa_load_state; 2179 uint64_t mmp_sec_remaining; /* MMP activity check */ 2180 uint64_t spa_load_max_txg; /* rewind txg */ 2181 procfs_list_node_t smh_node; 2182 } spa_import_progress_t; 2183 2184 spa_history_list_t *spa_import_progress_list = NULL; 2185 2186 static int 2187 spa_import_progress_show_header(struct seq_file *f) 2188 { 2189 seq_printf(f, "%-20s %-14s %-14s %-12s %s\n", "pool_guid", 2190 "load_state", "multihost_secs", "max_txg", 2191 "pool_name"); 2192 return (0); 2193 } 2194 2195 static int 2196 spa_import_progress_show(struct seq_file *f, void *data) 2197 { 2198 spa_import_progress_t *sip = (spa_import_progress_t *)data; 2199 2200 seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %s\n", 2201 (u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state, 2202 (u_longlong_t)sip->mmp_sec_remaining, 2203 (u_longlong_t)sip->spa_load_max_txg, 2204 (sip->pool_name ? sip->pool_name : "-")); 2205 2206 return (0); 2207 } 2208 2209 /* Remove oldest elements from list until there are no more than 'size' left */ 2210 static void 2211 spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size) 2212 { 2213 spa_import_progress_t *sip; 2214 while (shl->size > size) { 2215 sip = list_remove_head(&shl->procfs_list.pl_list); 2216 if (sip->pool_name) 2217 spa_strfree(sip->pool_name); 2218 kmem_free(sip, sizeof (spa_import_progress_t)); 2219 shl->size--; 2220 } 2221 2222 IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list)); 2223 } 2224 2225 static void 2226 spa_import_progress_init(void) 2227 { 2228 spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t), 2229 KM_SLEEP); 2230 2231 spa_import_progress_list->size = 0; 2232 2233 spa_import_progress_list->procfs_list.pl_private = 2234 spa_import_progress_list; 2235 2236 procfs_list_install("zfs", 2237 NULL, 2238 "import_progress", 2239 0644, 2240 &spa_import_progress_list->procfs_list, 2241 spa_import_progress_show, 2242 spa_import_progress_show_header, 2243 NULL, 2244 offsetof(spa_import_progress_t, smh_node)); 2245 } 2246 2247 static void 2248 spa_import_progress_destroy(void) 2249 { 2250 spa_history_list_t *shl = spa_import_progress_list; 2251 procfs_list_uninstall(&shl->procfs_list); 2252 spa_import_progress_truncate(shl, 0); 2253 procfs_list_destroy(&shl->procfs_list); 2254 kmem_free(shl, sizeof (spa_history_list_t)); 2255 } 2256 2257 int 2258 spa_import_progress_set_state(uint64_t pool_guid, 2259 spa_load_state_t load_state) 2260 { 2261 spa_history_list_t *shl = spa_import_progress_list; 2262 spa_import_progress_t *sip; 2263 int error = ENOENT; 2264 2265 if (shl->size == 0) 2266 return (0); 2267 2268 mutex_enter(&shl->procfs_list.pl_lock); 2269 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2270 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2271 if (sip->pool_guid == pool_guid) { 2272 sip->spa_load_state = load_state; 2273 error = 0; 2274 break; 2275 } 2276 } 2277 mutex_exit(&shl->procfs_list.pl_lock); 2278 2279 return (error); 2280 } 2281 2282 int 2283 spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg) 2284 { 2285 spa_history_list_t *shl = spa_import_progress_list; 2286 spa_import_progress_t *sip; 2287 int error = ENOENT; 2288 2289 if (shl->size == 0) 2290 return (0); 2291 2292 mutex_enter(&shl->procfs_list.pl_lock); 2293 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2294 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2295 if (sip->pool_guid == pool_guid) { 2296 sip->spa_load_max_txg = load_max_txg; 2297 error = 0; 2298 break; 2299 } 2300 } 2301 mutex_exit(&shl->procfs_list.pl_lock); 2302 2303 return (error); 2304 } 2305 2306 int 2307 spa_import_progress_set_mmp_check(uint64_t pool_guid, 2308 uint64_t mmp_sec_remaining) 2309 { 2310 spa_history_list_t *shl = spa_import_progress_list; 2311 spa_import_progress_t *sip; 2312 int error = ENOENT; 2313 2314 if (shl->size == 0) 2315 return (0); 2316 2317 mutex_enter(&shl->procfs_list.pl_lock); 2318 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2319 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2320 if (sip->pool_guid == pool_guid) { 2321 sip->mmp_sec_remaining = mmp_sec_remaining; 2322 error = 0; 2323 break; 2324 } 2325 } 2326 mutex_exit(&shl->procfs_list.pl_lock); 2327 2328 return (error); 2329 } 2330 2331 /* 2332 * A new import is in progress, add an entry. 2333 */ 2334 void 2335 spa_import_progress_add(spa_t *spa) 2336 { 2337 spa_history_list_t *shl = spa_import_progress_list; 2338 spa_import_progress_t *sip; 2339 const char *poolname = NULL; 2340 2341 sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP); 2342 sip->pool_guid = spa_guid(spa); 2343 2344 (void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME, 2345 &poolname); 2346 if (poolname == NULL) 2347 poolname = spa_name(spa); 2348 sip->pool_name = spa_strdup(poolname); 2349 sip->spa_load_state = spa_load_state(spa); 2350 2351 mutex_enter(&shl->procfs_list.pl_lock); 2352 procfs_list_add(&shl->procfs_list, sip); 2353 shl->size++; 2354 mutex_exit(&shl->procfs_list.pl_lock); 2355 } 2356 2357 void 2358 spa_import_progress_remove(uint64_t pool_guid) 2359 { 2360 spa_history_list_t *shl = spa_import_progress_list; 2361 spa_import_progress_t *sip; 2362 2363 mutex_enter(&shl->procfs_list.pl_lock); 2364 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2365 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2366 if (sip->pool_guid == pool_guid) { 2367 if (sip->pool_name) 2368 spa_strfree(sip->pool_name); 2369 list_remove(&shl->procfs_list.pl_list, sip); 2370 shl->size--; 2371 kmem_free(sip, sizeof (spa_import_progress_t)); 2372 break; 2373 } 2374 } 2375 mutex_exit(&shl->procfs_list.pl_lock); 2376 } 2377 2378 /* 2379 * ========================================================================== 2380 * Initialization and Termination 2381 * ========================================================================== 2382 */ 2383 2384 static int 2385 spa_name_compare(const void *a1, const void *a2) 2386 { 2387 const spa_t *s1 = a1; 2388 const spa_t *s2 = a2; 2389 int s; 2390 2391 s = strcmp(s1->spa_name, s2->spa_name); 2392 2393 return (TREE_ISIGN(s)); 2394 } 2395 2396 void 2397 spa_boot_init(void) 2398 { 2399 spa_config_load(); 2400 } 2401 2402 void 2403 spa_init(spa_mode_t mode) 2404 { 2405 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 2406 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 2407 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 2408 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 2409 2410 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 2411 offsetof(spa_t, spa_avl)); 2412 2413 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 2414 offsetof(spa_aux_t, aux_avl)); 2415 2416 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 2417 offsetof(spa_aux_t, aux_avl)); 2418 2419 spa_mode_global = mode; 2420 2421 #ifndef _KERNEL 2422 if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) { 2423 struct sigaction sa; 2424 2425 sa.sa_flags = SA_SIGINFO; 2426 sigemptyset(&sa.sa_mask); 2427 sa.sa_sigaction = arc_buf_sigsegv; 2428 2429 if (sigaction(SIGSEGV, &sa, NULL) == -1) { 2430 perror("could not enable watchpoints: " 2431 "sigaction(SIGSEGV, ...) = "); 2432 } else { 2433 arc_watch = B_TRUE; 2434 } 2435 } 2436 #endif 2437 2438 fm_init(); 2439 zfs_refcount_init(); 2440 unique_init(); 2441 zfs_btree_init(); 2442 metaslab_stat_init(); 2443 brt_init(); 2444 ddt_init(); 2445 zio_init(); 2446 dmu_init(); 2447 zil_init(); 2448 vdev_mirror_stat_init(); 2449 vdev_raidz_math_init(); 2450 vdev_file_init(); 2451 zfs_prop_init(); 2452 chksum_init(); 2453 zpool_prop_init(); 2454 zpool_feature_init(); 2455 spa_config_load(); 2456 vdev_prop_init(); 2457 l2arc_start(); 2458 scan_init(); 2459 qat_init(); 2460 spa_import_progress_init(); 2461 } 2462 2463 void 2464 spa_fini(void) 2465 { 2466 l2arc_stop(); 2467 2468 spa_evict_all(); 2469 2470 vdev_file_fini(); 2471 vdev_mirror_stat_fini(); 2472 vdev_raidz_math_fini(); 2473 chksum_fini(); 2474 zil_fini(); 2475 dmu_fini(); 2476 zio_fini(); 2477 ddt_fini(); 2478 brt_fini(); 2479 metaslab_stat_fini(); 2480 zfs_btree_fini(); 2481 unique_fini(); 2482 zfs_refcount_fini(); 2483 fm_fini(); 2484 scan_fini(); 2485 qat_fini(); 2486 spa_import_progress_destroy(); 2487 2488 avl_destroy(&spa_namespace_avl); 2489 avl_destroy(&spa_spare_avl); 2490 avl_destroy(&spa_l2cache_avl); 2491 2492 cv_destroy(&spa_namespace_cv); 2493 mutex_destroy(&spa_namespace_lock); 2494 mutex_destroy(&spa_spare_lock); 2495 mutex_destroy(&spa_l2cache_lock); 2496 } 2497 2498 /* 2499 * Return whether this pool has a dedicated slog device. No locking needed. 2500 * It's not a problem if the wrong answer is returned as it's only for 2501 * performance and not correctness. 2502 */ 2503 boolean_t 2504 spa_has_slogs(spa_t *spa) 2505 { 2506 return (spa->spa_log_class->mc_groups != 0); 2507 } 2508 2509 spa_log_state_t 2510 spa_get_log_state(spa_t *spa) 2511 { 2512 return (spa->spa_log_state); 2513 } 2514 2515 void 2516 spa_set_log_state(spa_t *spa, spa_log_state_t state) 2517 { 2518 spa->spa_log_state = state; 2519 } 2520 2521 boolean_t 2522 spa_is_root(spa_t *spa) 2523 { 2524 return (spa->spa_is_root); 2525 } 2526 2527 boolean_t 2528 spa_writeable(spa_t *spa) 2529 { 2530 return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config); 2531 } 2532 2533 /* 2534 * Returns true if there is a pending sync task in any of the current 2535 * syncing txg, the current quiescing txg, or the current open txg. 2536 */ 2537 boolean_t 2538 spa_has_pending_synctask(spa_t *spa) 2539 { 2540 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) || 2541 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks)); 2542 } 2543 2544 spa_mode_t 2545 spa_mode(spa_t *spa) 2546 { 2547 return (spa->spa_mode); 2548 } 2549 2550 uint64_t 2551 spa_bootfs(spa_t *spa) 2552 { 2553 return (spa->spa_bootfs); 2554 } 2555 2556 uint64_t 2557 spa_delegation(spa_t *spa) 2558 { 2559 return (spa->spa_delegation); 2560 } 2561 2562 objset_t * 2563 spa_meta_objset(spa_t *spa) 2564 { 2565 return (spa->spa_meta_objset); 2566 } 2567 2568 enum zio_checksum 2569 spa_dedup_checksum(spa_t *spa) 2570 { 2571 return (spa->spa_dedup_checksum); 2572 } 2573 2574 /* 2575 * Reset pool scan stat per scan pass (or reboot). 2576 */ 2577 void 2578 spa_scan_stat_init(spa_t *spa) 2579 { 2580 /* data not stored on disk */ 2581 spa->spa_scan_pass_start = gethrestime_sec(); 2582 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) 2583 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; 2584 else 2585 spa->spa_scan_pass_scrub_pause = 0; 2586 2587 if (dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan)) 2588 spa->spa_scan_pass_errorscrub_pause = spa->spa_scan_pass_start; 2589 else 2590 spa->spa_scan_pass_errorscrub_pause = 0; 2591 2592 spa->spa_scan_pass_scrub_spent_paused = 0; 2593 spa->spa_scan_pass_exam = 0; 2594 spa->spa_scan_pass_issued = 0; 2595 2596 // error scrub stats 2597 spa->spa_scan_pass_errorscrub_spent_paused = 0; 2598 } 2599 2600 /* 2601 * Get scan stats for zpool status reports 2602 */ 2603 int 2604 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2605 { 2606 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2607 2608 if (scn == NULL || (scn->scn_phys.scn_func == POOL_SCAN_NONE && 2609 scn->errorscrub_phys.dep_func == POOL_SCAN_NONE)) 2610 return (SET_ERROR(ENOENT)); 2611 2612 memset(ps, 0, sizeof (pool_scan_stat_t)); 2613 2614 /* data stored on disk */ 2615 ps->pss_func = scn->scn_phys.scn_func; 2616 ps->pss_state = scn->scn_phys.scn_state; 2617 ps->pss_start_time = scn->scn_phys.scn_start_time; 2618 ps->pss_end_time = scn->scn_phys.scn_end_time; 2619 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2620 ps->pss_examined = scn->scn_phys.scn_examined; 2621 ps->pss_skipped = scn->scn_phys.scn_skipped; 2622 ps->pss_processed = scn->scn_phys.scn_processed; 2623 ps->pss_errors = scn->scn_phys.scn_errors; 2624 2625 /* data not stored on disk */ 2626 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2627 ps->pss_pass_start = spa->spa_scan_pass_start; 2628 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; 2629 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; 2630 ps->pss_pass_issued = spa->spa_scan_pass_issued; 2631 ps->pss_issued = 2632 scn->scn_issued_before_pass + spa->spa_scan_pass_issued; 2633 2634 /* error scrub data stored on disk */ 2635 ps->pss_error_scrub_func = scn->errorscrub_phys.dep_func; 2636 ps->pss_error_scrub_state = scn->errorscrub_phys.dep_state; 2637 ps->pss_error_scrub_start = scn->errorscrub_phys.dep_start_time; 2638 ps->pss_error_scrub_end = scn->errorscrub_phys.dep_end_time; 2639 ps->pss_error_scrub_examined = scn->errorscrub_phys.dep_examined; 2640 ps->pss_error_scrub_to_be_examined = 2641 scn->errorscrub_phys.dep_to_examine; 2642 2643 /* error scrub data not stored on disk */ 2644 ps->pss_pass_error_scrub_pause = spa->spa_scan_pass_errorscrub_pause; 2645 2646 return (0); 2647 } 2648 2649 int 2650 spa_maxblocksize(spa_t *spa) 2651 { 2652 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2653 return (SPA_MAXBLOCKSIZE); 2654 else 2655 return (SPA_OLD_MAXBLOCKSIZE); 2656 } 2657 2658 2659 /* 2660 * Returns the txg that the last device removal completed. No indirect mappings 2661 * have been added since this txg. 2662 */ 2663 uint64_t 2664 spa_get_last_removal_txg(spa_t *spa) 2665 { 2666 uint64_t vdevid; 2667 uint64_t ret = -1ULL; 2668 2669 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2670 /* 2671 * sr_prev_indirect_vdev is only modified while holding all the 2672 * config locks, so it is sufficient to hold SCL_VDEV as reader when 2673 * examining it. 2674 */ 2675 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; 2676 2677 while (vdevid != -1ULL) { 2678 vdev_t *vd = vdev_lookup_top(spa, vdevid); 2679 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 2680 2681 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2682 2683 /* 2684 * If the removal did not remap any data, we don't care. 2685 */ 2686 if (vdev_indirect_births_count(vib) != 0) { 2687 ret = vdev_indirect_births_last_entry_txg(vib); 2688 break; 2689 } 2690 2691 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; 2692 } 2693 spa_config_exit(spa, SCL_VDEV, FTAG); 2694 2695 IMPLY(ret != -1ULL, 2696 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 2697 2698 return (ret); 2699 } 2700 2701 int 2702 spa_maxdnodesize(spa_t *spa) 2703 { 2704 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) 2705 return (DNODE_MAX_SIZE); 2706 else 2707 return (DNODE_MIN_SIZE); 2708 } 2709 2710 boolean_t 2711 spa_multihost(spa_t *spa) 2712 { 2713 return (spa->spa_multihost ? B_TRUE : B_FALSE); 2714 } 2715 2716 uint32_t 2717 spa_get_hostid(spa_t *spa) 2718 { 2719 return (spa->spa_hostid); 2720 } 2721 2722 boolean_t 2723 spa_trust_config(spa_t *spa) 2724 { 2725 return (spa->spa_trust_config); 2726 } 2727 2728 uint64_t 2729 spa_missing_tvds_allowed(spa_t *spa) 2730 { 2731 return (spa->spa_missing_tvds_allowed); 2732 } 2733 2734 space_map_t * 2735 spa_syncing_log_sm(spa_t *spa) 2736 { 2737 return (spa->spa_syncing_log_sm); 2738 } 2739 2740 void 2741 spa_set_missing_tvds(spa_t *spa, uint64_t missing) 2742 { 2743 spa->spa_missing_tvds = missing; 2744 } 2745 2746 /* 2747 * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc). 2748 */ 2749 const char * 2750 spa_state_to_name(spa_t *spa) 2751 { 2752 ASSERT3P(spa, !=, NULL); 2753 2754 /* 2755 * it is possible for the spa to exist, without root vdev 2756 * as the spa transitions during import/export 2757 */ 2758 vdev_t *rvd = spa->spa_root_vdev; 2759 if (rvd == NULL) { 2760 return ("TRANSITIONING"); 2761 } 2762 vdev_state_t state = rvd->vdev_state; 2763 vdev_aux_t aux = rvd->vdev_stat.vs_aux; 2764 2765 if (spa_suspended(spa) && 2766 (spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE)) 2767 return ("SUSPENDED"); 2768 2769 switch (state) { 2770 case VDEV_STATE_CLOSED: 2771 case VDEV_STATE_OFFLINE: 2772 return ("OFFLINE"); 2773 case VDEV_STATE_REMOVED: 2774 return ("REMOVED"); 2775 case VDEV_STATE_CANT_OPEN: 2776 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 2777 return ("FAULTED"); 2778 else if (aux == VDEV_AUX_SPLIT_POOL) 2779 return ("SPLIT"); 2780 else 2781 return ("UNAVAIL"); 2782 case VDEV_STATE_FAULTED: 2783 return ("FAULTED"); 2784 case VDEV_STATE_DEGRADED: 2785 return ("DEGRADED"); 2786 case VDEV_STATE_HEALTHY: 2787 return ("ONLINE"); 2788 default: 2789 break; 2790 } 2791 2792 return ("UNKNOWN"); 2793 } 2794 2795 boolean_t 2796 spa_top_vdevs_spacemap_addressable(spa_t *spa) 2797 { 2798 vdev_t *rvd = spa->spa_root_vdev; 2799 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2800 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c])) 2801 return (B_FALSE); 2802 } 2803 return (B_TRUE); 2804 } 2805 2806 boolean_t 2807 spa_has_checkpoint(spa_t *spa) 2808 { 2809 return (spa->spa_checkpoint_txg != 0); 2810 } 2811 2812 boolean_t 2813 spa_importing_readonly_checkpoint(spa_t *spa) 2814 { 2815 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) && 2816 spa->spa_mode == SPA_MODE_READ); 2817 } 2818 2819 uint64_t 2820 spa_min_claim_txg(spa_t *spa) 2821 { 2822 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg; 2823 2824 if (checkpoint_txg != 0) 2825 return (checkpoint_txg + 1); 2826 2827 return (spa->spa_first_txg); 2828 } 2829 2830 /* 2831 * If there is a checkpoint, async destroys may consume more space from 2832 * the pool instead of freeing it. In an attempt to save the pool from 2833 * getting suspended when it is about to run out of space, we stop 2834 * processing async destroys. 2835 */ 2836 boolean_t 2837 spa_suspend_async_destroy(spa_t *spa) 2838 { 2839 dsl_pool_t *dp = spa_get_dsl(spa); 2840 2841 uint64_t unreserved = dsl_pool_unreserved_space(dp, 2842 ZFS_SPACE_CHECK_EXTRA_RESERVED); 2843 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes; 2844 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0; 2845 2846 if (spa_has_checkpoint(spa) && avail == 0) 2847 return (B_TRUE); 2848 2849 return (B_FALSE); 2850 } 2851 2852 #if defined(_KERNEL) 2853 2854 int 2855 param_set_deadman_failmode_common(const char *val) 2856 { 2857 spa_t *spa = NULL; 2858 char *p; 2859 2860 if (val == NULL) 2861 return (SET_ERROR(EINVAL)); 2862 2863 if ((p = strchr(val, '\n')) != NULL) 2864 *p = '\0'; 2865 2866 if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 && 2867 strcmp(val, "panic")) 2868 return (SET_ERROR(EINVAL)); 2869 2870 if (spa_mode_global != SPA_MODE_UNINIT) { 2871 mutex_enter(&spa_namespace_lock); 2872 while ((spa = spa_next(spa)) != NULL) 2873 spa_set_deadman_failmode(spa, val); 2874 mutex_exit(&spa_namespace_lock); 2875 } 2876 2877 return (0); 2878 } 2879 #endif 2880 2881 /* Namespace manipulation */ 2882 EXPORT_SYMBOL(spa_lookup); 2883 EXPORT_SYMBOL(spa_add); 2884 EXPORT_SYMBOL(spa_remove); 2885 EXPORT_SYMBOL(spa_next); 2886 2887 /* Refcount functions */ 2888 EXPORT_SYMBOL(spa_open_ref); 2889 EXPORT_SYMBOL(spa_close); 2890 EXPORT_SYMBOL(spa_refcount_zero); 2891 2892 /* Pool configuration lock */ 2893 EXPORT_SYMBOL(spa_config_tryenter); 2894 EXPORT_SYMBOL(spa_config_enter); 2895 EXPORT_SYMBOL(spa_config_exit); 2896 EXPORT_SYMBOL(spa_config_held); 2897 2898 /* Pool vdev add/remove lock */ 2899 EXPORT_SYMBOL(spa_vdev_enter); 2900 EXPORT_SYMBOL(spa_vdev_exit); 2901 2902 /* Pool vdev state change lock */ 2903 EXPORT_SYMBOL(spa_vdev_state_enter); 2904 EXPORT_SYMBOL(spa_vdev_state_exit); 2905 2906 /* Accessor functions */ 2907 EXPORT_SYMBOL(spa_shutting_down); 2908 EXPORT_SYMBOL(spa_get_dsl); 2909 EXPORT_SYMBOL(spa_get_rootblkptr); 2910 EXPORT_SYMBOL(spa_set_rootblkptr); 2911 EXPORT_SYMBOL(spa_altroot); 2912 EXPORT_SYMBOL(spa_sync_pass); 2913 EXPORT_SYMBOL(spa_name); 2914 EXPORT_SYMBOL(spa_guid); 2915 EXPORT_SYMBOL(spa_last_synced_txg); 2916 EXPORT_SYMBOL(spa_first_txg); 2917 EXPORT_SYMBOL(spa_syncing_txg); 2918 EXPORT_SYMBOL(spa_version); 2919 EXPORT_SYMBOL(spa_state); 2920 EXPORT_SYMBOL(spa_load_state); 2921 EXPORT_SYMBOL(spa_freeze_txg); 2922 EXPORT_SYMBOL(spa_get_dspace); 2923 EXPORT_SYMBOL(spa_update_dspace); 2924 EXPORT_SYMBOL(spa_deflate); 2925 EXPORT_SYMBOL(spa_normal_class); 2926 EXPORT_SYMBOL(spa_log_class); 2927 EXPORT_SYMBOL(spa_special_class); 2928 EXPORT_SYMBOL(spa_preferred_class); 2929 EXPORT_SYMBOL(spa_max_replication); 2930 EXPORT_SYMBOL(spa_prev_software_version); 2931 EXPORT_SYMBOL(spa_get_failmode); 2932 EXPORT_SYMBOL(spa_suspended); 2933 EXPORT_SYMBOL(spa_bootfs); 2934 EXPORT_SYMBOL(spa_delegation); 2935 EXPORT_SYMBOL(spa_meta_objset); 2936 EXPORT_SYMBOL(spa_maxblocksize); 2937 EXPORT_SYMBOL(spa_maxdnodesize); 2938 2939 /* Miscellaneous support routines */ 2940 EXPORT_SYMBOL(spa_guid_exists); 2941 EXPORT_SYMBOL(spa_strdup); 2942 EXPORT_SYMBOL(spa_strfree); 2943 EXPORT_SYMBOL(spa_generate_guid); 2944 EXPORT_SYMBOL(snprintf_blkptr); 2945 EXPORT_SYMBOL(spa_freeze); 2946 EXPORT_SYMBOL(spa_upgrade); 2947 EXPORT_SYMBOL(spa_evict_all); 2948 EXPORT_SYMBOL(spa_lookup_by_guid); 2949 EXPORT_SYMBOL(spa_has_spare); 2950 EXPORT_SYMBOL(dva_get_dsize_sync); 2951 EXPORT_SYMBOL(bp_get_dsize_sync); 2952 EXPORT_SYMBOL(bp_get_dsize); 2953 EXPORT_SYMBOL(spa_has_slogs); 2954 EXPORT_SYMBOL(spa_is_root); 2955 EXPORT_SYMBOL(spa_writeable); 2956 EXPORT_SYMBOL(spa_mode); 2957 EXPORT_SYMBOL(spa_namespace_lock); 2958 EXPORT_SYMBOL(spa_trust_config); 2959 EXPORT_SYMBOL(spa_missing_tvds_allowed); 2960 EXPORT_SYMBOL(spa_set_missing_tvds); 2961 EXPORT_SYMBOL(spa_state_to_name); 2962 EXPORT_SYMBOL(spa_importing_readonly_checkpoint); 2963 EXPORT_SYMBOL(spa_min_claim_txg); 2964 EXPORT_SYMBOL(spa_suspend_async_destroy); 2965 EXPORT_SYMBOL(spa_has_checkpoint); 2966 EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable); 2967 2968 ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW, 2969 "Set additional debugging flags"); 2970 2971 ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW, 2972 "Set to attempt to recover from fatal errors"); 2973 2974 ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW, 2975 "Set to ignore IO errors during free and permanently leak the space"); 2976 2977 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, U64, ZMOD_RW, 2978 "Dead I/O check interval in milliseconds"); 2979 2980 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW, 2981 "Enable deadman timer"); 2982 2983 ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, UINT, ZMOD_RW, 2984 "SPA size estimate multiplication factor"); 2985 2986 ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW, 2987 "Place DDT data into the special class"); 2988 2989 ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW, 2990 "Place user data indirect blocks into the special class"); 2991 2992 /* BEGIN CSTYLED */ 2993 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode, 2994 param_set_deadman_failmode, param_get_charp, ZMOD_RW, 2995 "Failmode for deadman timer"); 2996 2997 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms, 2998 param_set_deadman_synctime, spl_param_get_u64, ZMOD_RW, 2999 "Pool sync expiration time in milliseconds"); 3000 3001 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms, 3002 param_set_deadman_ziotime, spl_param_get_u64, ZMOD_RW, 3003 "IO expiration time in milliseconds"); 3004 3005 ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW, 3006 "Small file blocks in special vdevs depends on this much " 3007 "free space available"); 3008 /* END CSTYLED */ 3009 3010 ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift, 3011 param_get_uint, ZMOD_RW, "Reserved free space in pool"); 3012