1 // SPDX-License-Identifier: CDDL-1.0 2 /* 3 * CDDL HEADER START 4 * 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or https://opensource.org/licenses/CDDL-1.0. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2024 by Delphix. All rights reserved. 25 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2013 Saso Kiselkov. All rights reserved. 28 * Copyright (c) 2017 Datto Inc. 29 * Copyright (c) 2017, Intel Corporation. 30 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. 31 * Copyright (c) 2023, 2024, Klara Inc. 32 */ 33 34 #include <sys/zfs_context.h> 35 #include <sys/zfs_chksum.h> 36 #include <sys/spa_impl.h> 37 #include <sys/zio.h> 38 #include <sys/zio_checksum.h> 39 #include <sys/zio_compress.h> 40 #include <sys/dmu.h> 41 #include <sys/dmu_tx.h> 42 #include <sys/zap.h> 43 #include <sys/zil.h> 44 #include <sys/vdev_impl.h> 45 #include <sys/vdev_initialize.h> 46 #include <sys/vdev_trim.h> 47 #include <sys/vdev_file.h> 48 #include <sys/vdev_raidz.h> 49 #include <sys/metaslab.h> 50 #include <sys/uberblock_impl.h> 51 #include <sys/txg.h> 52 #include <sys/avl.h> 53 #include <sys/unique.h> 54 #include <sys/dsl_pool.h> 55 #include <sys/dsl_dir.h> 56 #include <sys/dsl_prop.h> 57 #include <sys/fm/util.h> 58 #include <sys/dsl_scan.h> 59 #include <sys/fs/zfs.h> 60 #include <sys/metaslab_impl.h> 61 #include <sys/arc.h> 62 #include <sys/brt.h> 63 #include <sys/ddt.h> 64 #include <sys/kstat.h> 65 #include "zfs_prop.h" 66 #include <sys/btree.h> 67 #include <sys/zfeature.h> 68 #include <sys/qat.h> 69 #include <sys/zstd/zstd.h> 70 71 /* 72 * SPA locking 73 * 74 * There are three basic locks for managing spa_t structures: 75 * 76 * spa_namespace_lock (global mutex) 77 * 78 * This lock must be acquired to do any of the following: 79 * 80 * - Lookup a spa_t by name 81 * - Add or remove a spa_t from the namespace 82 * - Increase spa_refcount from non-zero 83 * - Check if spa_refcount is zero 84 * - Rename a spa_t 85 * - add/remove/attach/detach devices 86 * - Held for the duration of create/destroy 87 * - Held at the start and end of import and export 88 * 89 * It does not need to handle recursion. A create or destroy may 90 * reference objects (files or zvols) in other pools, but by 91 * definition they must have an existing reference, and will never need 92 * to lookup a spa_t by name. 93 * 94 * spa_refcount (per-spa zfs_refcount_t protected by mutex) 95 * 96 * This reference count keep track of any active users of the spa_t. The 97 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 98 * the refcount is never really 'zero' - opening a pool implicitly keeps 99 * some references in the DMU. Internally we check against spa_minref, but 100 * present the image of a zero/non-zero value to consumers. 101 * 102 * spa_config_lock[] (per-spa array of rwlocks) 103 * 104 * This protects the spa_t from config changes, and must be held in 105 * the following circumstances: 106 * 107 * - RW_READER to perform I/O to the spa 108 * - RW_WRITER to change the vdev config 109 * 110 * The locking order is fairly straightforward: 111 * 112 * spa_namespace_lock -> spa_refcount 113 * 114 * The namespace lock must be acquired to increase the refcount from 0 115 * or to check if it is zero. 116 * 117 * spa_refcount -> spa_config_lock[] 118 * 119 * There must be at least one valid reference on the spa_t to acquire 120 * the config lock. 121 * 122 * spa_namespace_lock -> spa_config_lock[] 123 * 124 * The namespace lock must always be taken before the config lock. 125 * 126 * 127 * The spa_namespace_lock can be acquired directly and is globally visible. 128 * 129 * The namespace is manipulated using the following functions, all of which 130 * require the spa_namespace_lock to be held. 131 * 132 * spa_lookup() Lookup a spa_t by name. 133 * 134 * spa_add() Create a new spa_t in the namespace. 135 * 136 * spa_remove() Remove a spa_t from the namespace. This also 137 * frees up any memory associated with the spa_t. 138 * 139 * spa_next() Returns the next spa_t in the system, or the 140 * first if NULL is passed. 141 * 142 * spa_evict_all() Shutdown and remove all spa_t structures in 143 * the system. 144 * 145 * spa_guid_exists() Determine whether a pool/device guid exists. 146 * 147 * The spa_refcount is manipulated using the following functions: 148 * 149 * spa_open_ref() Adds a reference to the given spa_t. Must be 150 * called with spa_namespace_lock held if the 151 * refcount is currently zero. 152 * 153 * spa_close() Remove a reference from the spa_t. This will 154 * not free the spa_t or remove it from the 155 * namespace. No locking is required. 156 * 157 * spa_refcount_zero() Returns true if the refcount is currently 158 * zero. Must be called with spa_namespace_lock 159 * held. 160 * 161 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 162 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 163 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 164 * 165 * To read the configuration, it suffices to hold one of these locks as reader. 166 * To modify the configuration, you must hold all locks as writer. To modify 167 * vdev state without altering the vdev tree's topology (e.g. online/offline), 168 * you must hold SCL_STATE and SCL_ZIO as writer. 169 * 170 * We use these distinct config locks to avoid recursive lock entry. 171 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 172 * block allocations (SCL_ALLOC), which may require reading space maps 173 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 174 * 175 * The spa config locks cannot be normal rwlocks because we need the 176 * ability to hand off ownership. For example, SCL_ZIO is acquired 177 * by the issuing thread and later released by an interrupt thread. 178 * They do, however, obey the usual write-wanted semantics to prevent 179 * writer (i.e. system administrator) starvation. 180 * 181 * The lock acquisition rules are as follows: 182 * 183 * SCL_CONFIG 184 * Protects changes to the vdev tree topology, such as vdev 185 * add/remove/attach/detach. Protects the dirty config list 186 * (spa_config_dirty_list) and the set of spares and l2arc devices. 187 * 188 * SCL_STATE 189 * Protects changes to pool state and vdev state, such as vdev 190 * online/offline/fault/degrade/clear. Protects the dirty state list 191 * (spa_state_dirty_list) and global pool state (spa_state). 192 * 193 * SCL_ALLOC 194 * Protects changes to metaslab groups and classes. 195 * Held as reader by metaslab_alloc() and metaslab_claim(). 196 * 197 * SCL_ZIO 198 * Held by bp-level zios (those which have no io_vd upon entry) 199 * to prevent changes to the vdev tree. The bp-level zio implicitly 200 * protects all of its vdev child zios, which do not hold SCL_ZIO. 201 * 202 * SCL_FREE 203 * Protects changes to metaslab groups and classes. 204 * Held as reader by metaslab_free(). SCL_FREE is distinct from 205 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 206 * blocks in zio_done() while another i/o that holds either 207 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 208 * 209 * SCL_VDEV 210 * Held as reader to prevent changes to the vdev tree during trivial 211 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 212 * other locks, and lower than all of them, to ensure that it's safe 213 * to acquire regardless of caller context. 214 * 215 * In addition, the following rules apply: 216 * 217 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 218 * The lock ordering is SCL_CONFIG > spa_props_lock. 219 * 220 * (b) I/O operations on leaf vdevs. For any zio operation that takes 221 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 222 * or zio_write_phys() -- the caller must ensure that the config cannot 223 * cannot change in the interim, and that the vdev cannot be reopened. 224 * SCL_STATE as reader suffices for both. 225 * 226 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 227 * 228 * spa_vdev_enter() Acquire the namespace lock and the config lock 229 * for writing. 230 * 231 * spa_vdev_exit() Release the config lock, wait for all I/O 232 * to complete, sync the updated configs to the 233 * cache, and release the namespace lock. 234 * 235 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 236 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 237 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 238 */ 239 240 avl_tree_t spa_namespace_avl; 241 kmutex_t spa_namespace_lock; 242 kcondvar_t spa_namespace_cv; 243 static const int spa_max_replication_override = SPA_DVAS_PER_BP; 244 245 static kmutex_t spa_spare_lock; 246 static avl_tree_t spa_spare_avl; 247 static kmutex_t spa_l2cache_lock; 248 static avl_tree_t spa_l2cache_avl; 249 250 spa_mode_t spa_mode_global = SPA_MODE_UNINIT; 251 252 #ifdef ZFS_DEBUG 253 /* 254 * Everything except dprintf, set_error, spa, and indirect_remap is on 255 * by default in debug builds. 256 */ 257 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR | 258 ZFS_DEBUG_INDIRECT_REMAP); 259 #else 260 int zfs_flags = 0; 261 #endif 262 263 /* 264 * zfs_recover can be set to nonzero to attempt to recover from 265 * otherwise-fatal errors, typically caused by on-disk corruption. When 266 * set, calls to zfs_panic_recover() will turn into warning messages. 267 * This should only be used as a last resort, as it typically results 268 * in leaked space, or worse. 269 */ 270 int zfs_recover = B_FALSE; 271 272 /* 273 * If destroy encounters an EIO while reading metadata (e.g. indirect 274 * blocks), space referenced by the missing metadata can not be freed. 275 * Normally this causes the background destroy to become "stalled", as 276 * it is unable to make forward progress. While in this stalled state, 277 * all remaining space to free from the error-encountering filesystem is 278 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 279 * permanently leak the space from indirect blocks that can not be read, 280 * and continue to free everything else that it can. 281 * 282 * The default, "stalling" behavior is useful if the storage partially 283 * fails (i.e. some but not all i/os fail), and then later recovers. In 284 * this case, we will be able to continue pool operations while it is 285 * partially failed, and when it recovers, we can continue to free the 286 * space, with no leaks. However, note that this case is actually 287 * fairly rare. 288 * 289 * Typically pools either (a) fail completely (but perhaps temporarily, 290 * e.g. a top-level vdev going offline), or (b) have localized, 291 * permanent errors (e.g. disk returns the wrong data due to bit flip or 292 * firmware bug). In case (a), this setting does not matter because the 293 * pool will be suspended and the sync thread will not be able to make 294 * forward progress regardless. In case (b), because the error is 295 * permanent, the best we can do is leak the minimum amount of space, 296 * which is what setting this flag will do. Therefore, it is reasonable 297 * for this flag to normally be set, but we chose the more conservative 298 * approach of not setting it, so that there is no possibility of 299 * leaking space in the "partial temporary" failure case. 300 */ 301 int zfs_free_leak_on_eio = B_FALSE; 302 303 /* 304 * Expiration time in milliseconds. This value has two meanings. First it is 305 * used to determine when the spa_deadman() logic should fire. By default the 306 * spa_deadman() will fire if spa_sync() has not completed in 600 seconds. 307 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 308 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 309 * in one of three behaviors controlled by zfs_deadman_failmode. 310 */ 311 uint64_t zfs_deadman_synctime_ms = 600000UL; /* 10 min. */ 312 313 /* 314 * This value controls the maximum amount of time zio_wait() will block for an 315 * outstanding IO. By default this is 300 seconds at which point the "hung" 316 * behavior will be applied as described for zfs_deadman_synctime_ms. 317 */ 318 uint64_t zfs_deadman_ziotime_ms = 300000UL; /* 5 min. */ 319 320 /* 321 * Check time in milliseconds. This defines the frequency at which we check 322 * for hung I/O. 323 */ 324 uint64_t zfs_deadman_checktime_ms = 60000UL; /* 1 min. */ 325 326 /* 327 * By default the deadman is enabled. 328 */ 329 int zfs_deadman_enabled = B_TRUE; 330 331 /* 332 * Controls the behavior of the deadman when it detects a "hung" I/O. 333 * Valid values are zfs_deadman_failmode=<wait|continue|panic>. 334 * 335 * wait - Wait for the "hung" I/O (default) 336 * continue - Attempt to recover from a "hung" I/O 337 * panic - Panic the system 338 */ 339 const char *zfs_deadman_failmode = "wait"; 340 341 /* 342 * The worst case is single-sector max-parity RAID-Z blocks, in which 343 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 344 * times the size; so just assume that. Add to this the fact that 345 * we can have up to 3 DVAs per bp, and one more factor of 2 because 346 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 347 * the worst case is: 348 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 349 */ 350 uint_t spa_asize_inflation = 24; 351 352 /* 353 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 354 * the pool to be consumed (bounded by spa_max_slop). This ensures that we 355 * don't run the pool completely out of space, due to unaccounted changes (e.g. 356 * to the MOS). It also limits the worst-case time to allocate space. If we 357 * have less than this amount of free space, most ZPL operations (e.g. write, 358 * create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are 359 * also part of this 3.2% of space which can't be consumed by normal writes; 360 * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded 361 * log space. 362 * 363 * Certain operations (e.g. file removal, most administrative actions) can 364 * use half the slop space. They will only return ENOSPC if less than half 365 * the slop space is free. Typically, once the pool has less than the slop 366 * space free, the user will use these operations to free up space in the pool. 367 * These are the operations that call dsl_pool_adjustedsize() with the netfree 368 * argument set to TRUE. 369 * 370 * Operations that are almost guaranteed to free up space in the absence of 371 * a pool checkpoint can use up to three quarters of the slop space 372 * (e.g zfs destroy). 373 * 374 * A very restricted set of operations are always permitted, regardless of 375 * the amount of free space. These are the operations that call 376 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net 377 * increase in the amount of space used, it is possible to run the pool 378 * completely out of space, causing it to be permanently read-only. 379 * 380 * Note that on very small pools, the slop space will be larger than 381 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 382 * but we never allow it to be more than half the pool size. 383 * 384 * Further, on very large pools, the slop space will be smaller than 385 * 3.2%, to avoid reserving much more space than we actually need; bounded 386 * by spa_max_slop (128GB). 387 * 388 * See also the comments in zfs_space_check_t. 389 */ 390 uint_t spa_slop_shift = 5; 391 static const uint64_t spa_min_slop = 128ULL * 1024 * 1024; 392 static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024; 393 394 /* 395 * Number of allocators to use, per spa instance 396 */ 397 static int spa_num_allocators = 4; 398 static int spa_cpus_per_allocator = 4; 399 400 /* 401 * Spa active allocator. 402 * Valid values are zfs_active_allocator=<dynamic|cursor|new-dynamic>. 403 */ 404 const char *zfs_active_allocator = "dynamic"; 405 406 void 407 spa_load_failed(spa_t *spa, const char *fmt, ...) 408 { 409 va_list adx; 410 char buf[256]; 411 412 va_start(adx, fmt); 413 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 414 va_end(adx); 415 416 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name, 417 spa->spa_trust_config ? "trusted" : "untrusted", buf); 418 } 419 420 void 421 spa_load_note(spa_t *spa, const char *fmt, ...) 422 { 423 va_list adx; 424 char buf[256]; 425 426 va_start(adx, fmt); 427 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 428 va_end(adx); 429 430 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name, 431 spa->spa_trust_config ? "trusted" : "untrusted", buf); 432 433 spa_import_progress_set_notes_nolog(spa, "%s", buf); 434 } 435 436 /* 437 * By default dedup and user data indirects land in the special class 438 */ 439 static int zfs_ddt_data_is_special = B_TRUE; 440 static int zfs_user_indirect_is_special = B_TRUE; 441 442 /* 443 * The percentage of special class final space reserved for metadata only. 444 * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only 445 * let metadata into the class. 446 */ 447 static uint_t zfs_special_class_metadata_reserve_pct = 25; 448 449 /* 450 * ========================================================================== 451 * SPA config locking 452 * ========================================================================== 453 */ 454 static void 455 spa_config_lock_init(spa_t *spa) 456 { 457 for (int i = 0; i < SCL_LOCKS; i++) { 458 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 459 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 460 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 461 scl->scl_writer = NULL; 462 scl->scl_write_wanted = 0; 463 scl->scl_count = 0; 464 } 465 } 466 467 static void 468 spa_config_lock_destroy(spa_t *spa) 469 { 470 for (int i = 0; i < SCL_LOCKS; i++) { 471 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 472 mutex_destroy(&scl->scl_lock); 473 cv_destroy(&scl->scl_cv); 474 ASSERT(scl->scl_writer == NULL); 475 ASSERT(scl->scl_write_wanted == 0); 476 ASSERT(scl->scl_count == 0); 477 } 478 } 479 480 int 481 spa_config_tryenter(spa_t *spa, int locks, const void *tag, krw_t rw) 482 { 483 for (int i = 0; i < SCL_LOCKS; i++) { 484 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 485 if (!(locks & (1 << i))) 486 continue; 487 mutex_enter(&scl->scl_lock); 488 if (rw == RW_READER) { 489 if (scl->scl_writer || scl->scl_write_wanted) { 490 mutex_exit(&scl->scl_lock); 491 spa_config_exit(spa, locks & ((1 << i) - 1), 492 tag); 493 return (0); 494 } 495 } else { 496 ASSERT(scl->scl_writer != curthread); 497 if (scl->scl_count != 0) { 498 mutex_exit(&scl->scl_lock); 499 spa_config_exit(spa, locks & ((1 << i) - 1), 500 tag); 501 return (0); 502 } 503 scl->scl_writer = curthread; 504 } 505 scl->scl_count++; 506 mutex_exit(&scl->scl_lock); 507 } 508 return (1); 509 } 510 511 static void 512 spa_config_enter_impl(spa_t *spa, int locks, const void *tag, krw_t rw, 513 int mmp_flag) 514 { 515 (void) tag; 516 int wlocks_held = 0; 517 518 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 519 520 for (int i = 0; i < SCL_LOCKS; i++) { 521 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 522 if (scl->scl_writer == curthread) 523 wlocks_held |= (1 << i); 524 if (!(locks & (1 << i))) 525 continue; 526 mutex_enter(&scl->scl_lock); 527 if (rw == RW_READER) { 528 while (scl->scl_writer || 529 (!mmp_flag && scl->scl_write_wanted)) { 530 cv_wait(&scl->scl_cv, &scl->scl_lock); 531 } 532 } else { 533 ASSERT(scl->scl_writer != curthread); 534 while (scl->scl_count != 0) { 535 scl->scl_write_wanted++; 536 cv_wait(&scl->scl_cv, &scl->scl_lock); 537 scl->scl_write_wanted--; 538 } 539 scl->scl_writer = curthread; 540 } 541 scl->scl_count++; 542 mutex_exit(&scl->scl_lock); 543 } 544 ASSERT3U(wlocks_held, <=, locks); 545 } 546 547 void 548 spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw) 549 { 550 spa_config_enter_impl(spa, locks, tag, rw, 0); 551 } 552 553 /* 554 * The spa_config_enter_mmp() allows the mmp thread to cut in front of 555 * outstanding write lock requests. This is needed since the mmp updates are 556 * time sensitive and failure to service them promptly will result in a 557 * suspended pool. This pool suspension has been seen in practice when there is 558 * a single disk in a pool that is responding slowly and presumably about to 559 * fail. 560 */ 561 562 void 563 spa_config_enter_mmp(spa_t *spa, int locks, const void *tag, krw_t rw) 564 { 565 spa_config_enter_impl(spa, locks, tag, rw, 1); 566 } 567 568 void 569 spa_config_exit(spa_t *spa, int locks, const void *tag) 570 { 571 (void) tag; 572 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 573 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 574 if (!(locks & (1 << i))) 575 continue; 576 mutex_enter(&scl->scl_lock); 577 ASSERT(scl->scl_count > 0); 578 if (--scl->scl_count == 0) { 579 ASSERT(scl->scl_writer == NULL || 580 scl->scl_writer == curthread); 581 scl->scl_writer = NULL; /* OK in either case */ 582 cv_broadcast(&scl->scl_cv); 583 } 584 mutex_exit(&scl->scl_lock); 585 } 586 } 587 588 int 589 spa_config_held(spa_t *spa, int locks, krw_t rw) 590 { 591 int locks_held = 0; 592 593 for (int i = 0; i < SCL_LOCKS; i++) { 594 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 595 if (!(locks & (1 << i))) 596 continue; 597 if ((rw == RW_READER && scl->scl_count != 0) || 598 (rw == RW_WRITER && scl->scl_writer == curthread)) 599 locks_held |= 1 << i; 600 } 601 602 return (locks_held); 603 } 604 605 /* 606 * ========================================================================== 607 * SPA namespace functions 608 * ========================================================================== 609 */ 610 611 /* 612 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 613 * Returns NULL if no matching spa_t is found. 614 */ 615 spa_t * 616 spa_lookup(const char *name) 617 { 618 static spa_t search; /* spa_t is large; don't allocate on stack */ 619 spa_t *spa; 620 avl_index_t where; 621 char *cp; 622 623 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 624 625 retry: 626 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 627 628 /* 629 * If it's a full dataset name, figure out the pool name and 630 * just use that. 631 */ 632 cp = strpbrk(search.spa_name, "/@#"); 633 if (cp != NULL) 634 *cp = '\0'; 635 636 spa = avl_find(&spa_namespace_avl, &search, &where); 637 if (spa == NULL) 638 return (NULL); 639 640 /* 641 * Avoid racing with import/export, which don't hold the namespace 642 * lock for their entire duration. 643 */ 644 if ((spa->spa_load_thread != NULL && 645 spa->spa_load_thread != curthread) || 646 (spa->spa_export_thread != NULL && 647 spa->spa_export_thread != curthread)) { 648 cv_wait(&spa_namespace_cv, &spa_namespace_lock); 649 goto retry; 650 } 651 652 return (spa); 653 } 654 655 /* 656 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 657 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 658 * looking for potentially hung I/Os. 659 */ 660 void 661 spa_deadman(void *arg) 662 { 663 spa_t *spa = arg; 664 665 /* Disable the deadman if the pool is suspended. */ 666 if (spa_suspended(spa)) 667 return; 668 669 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 670 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 671 (u_longlong_t)++spa->spa_deadman_calls); 672 if (zfs_deadman_enabled) 673 vdev_deadman(spa->spa_root_vdev, FTAG); 674 675 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, 676 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + 677 MSEC_TO_TICK(zfs_deadman_checktime_ms)); 678 } 679 680 static int 681 spa_log_sm_sort_by_txg(const void *va, const void *vb) 682 { 683 const spa_log_sm_t *a = va; 684 const spa_log_sm_t *b = vb; 685 686 return (TREE_CMP(a->sls_txg, b->sls_txg)); 687 } 688 689 /* 690 * Create an uninitialized spa_t with the given name. Requires 691 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 692 * exist by calling spa_lookup() first. 693 */ 694 spa_t * 695 spa_add(const char *name, nvlist_t *config, const char *altroot) 696 { 697 spa_t *spa; 698 spa_config_dirent_t *dp; 699 700 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 701 702 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 703 704 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 705 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 706 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 707 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 708 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 709 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 710 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 711 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 712 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 713 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 714 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 715 mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL); 716 mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL); 717 mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL); 718 719 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 720 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 721 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 722 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 723 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 724 cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL); 725 cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL); 726 727 for (int t = 0; t < TXG_SIZE; t++) 728 bplist_create(&spa->spa_free_bplist[t]); 729 730 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 731 spa->spa_state = POOL_STATE_UNINITIALIZED; 732 spa->spa_freeze_txg = UINT64_MAX; 733 spa->spa_final_txg = UINT64_MAX; 734 spa->spa_load_max_txg = UINT64_MAX; 735 spa->spa_proc = &p0; 736 spa->spa_proc_state = SPA_PROC_NONE; 737 spa->spa_trust_config = B_TRUE; 738 spa->spa_hostid = zone_get_hostid(NULL); 739 740 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 741 spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms); 742 spa_set_deadman_failmode(spa, zfs_deadman_failmode); 743 spa_set_allocator(spa, zfs_active_allocator); 744 745 zfs_refcount_create(&spa->spa_refcount); 746 spa_config_lock_init(spa); 747 spa_stats_init(spa); 748 749 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 750 avl_add(&spa_namespace_avl, spa); 751 752 /* 753 * Set the alternate root, if there is one. 754 */ 755 if (altroot) 756 spa->spa_root = spa_strdup(altroot); 757 758 /* Do not allow more allocators than fraction of CPUs. */ 759 spa->spa_alloc_count = MAX(MIN(spa_num_allocators, 760 boot_ncpus / MAX(spa_cpus_per_allocator, 1)), 1); 761 762 if (spa->spa_alloc_count > 1) { 763 spa->spa_allocs_use = kmem_zalloc(offsetof(spa_allocs_use_t, 764 sau_inuse[spa->spa_alloc_count]), KM_SLEEP); 765 mutex_init(&spa->spa_allocs_use->sau_lock, NULL, MUTEX_DEFAULT, 766 NULL); 767 } 768 769 avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed, 770 sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node)); 771 avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg, 772 sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node)); 773 list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t), 774 offsetof(log_summary_entry_t, lse_node)); 775 776 /* 777 * Every pool starts with the default cachefile 778 */ 779 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 780 offsetof(spa_config_dirent_t, scd_link)); 781 782 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 783 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 784 list_insert_head(&spa->spa_config_list, dp); 785 786 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 787 KM_SLEEP) == 0); 788 789 if (config != NULL) { 790 nvlist_t *features; 791 792 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 793 &features) == 0) { 794 VERIFY(nvlist_dup(features, &spa->spa_label_features, 795 0) == 0); 796 } 797 798 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 799 } 800 801 if (spa->spa_label_features == NULL) { 802 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 803 KM_SLEEP) == 0); 804 } 805 806 spa->spa_min_ashift = INT_MAX; 807 spa->spa_max_ashift = 0; 808 spa->spa_min_alloc = INT_MAX; 809 spa->spa_gcd_alloc = INT_MAX; 810 811 /* Reset cached value */ 812 spa->spa_dedup_dspace = ~0ULL; 813 814 /* 815 * As a pool is being created, treat all features as disabled by 816 * setting SPA_FEATURE_DISABLED for all entries in the feature 817 * refcount cache. 818 */ 819 for (int i = 0; i < SPA_FEATURES; i++) { 820 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 821 } 822 823 list_create(&spa->spa_leaf_list, sizeof (vdev_t), 824 offsetof(vdev_t, vdev_leaf_node)); 825 826 return (spa); 827 } 828 829 /* 830 * Removes a spa_t from the namespace, freeing up any memory used. Requires 831 * spa_namespace_lock. This is called only after the spa_t has been closed and 832 * deactivated. 833 */ 834 void 835 spa_remove(spa_t *spa) 836 { 837 spa_config_dirent_t *dp; 838 839 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 840 ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED); 841 ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0); 842 ASSERT0(spa->spa_waiters); 843 844 nvlist_free(spa->spa_config_splitting); 845 846 avl_remove(&spa_namespace_avl, spa); 847 848 if (spa->spa_root) 849 spa_strfree(spa->spa_root); 850 851 while ((dp = list_remove_head(&spa->spa_config_list)) != NULL) { 852 if (dp->scd_path != NULL) 853 spa_strfree(dp->scd_path); 854 kmem_free(dp, sizeof (spa_config_dirent_t)); 855 } 856 857 if (spa->spa_alloc_count > 1) { 858 mutex_destroy(&spa->spa_allocs_use->sau_lock); 859 kmem_free(spa->spa_allocs_use, offsetof(spa_allocs_use_t, 860 sau_inuse[spa->spa_alloc_count])); 861 } 862 863 avl_destroy(&spa->spa_metaslabs_by_flushed); 864 avl_destroy(&spa->spa_sm_logs_by_txg); 865 list_destroy(&spa->spa_log_summary); 866 list_destroy(&spa->spa_config_list); 867 list_destroy(&spa->spa_leaf_list); 868 869 nvlist_free(spa->spa_label_features); 870 nvlist_free(spa->spa_load_info); 871 nvlist_free(spa->spa_feat_stats); 872 spa_config_set(spa, NULL); 873 874 zfs_refcount_destroy(&spa->spa_refcount); 875 876 spa_stats_destroy(spa); 877 spa_config_lock_destroy(spa); 878 879 for (int t = 0; t < TXG_SIZE; t++) 880 bplist_destroy(&spa->spa_free_bplist[t]); 881 882 zio_checksum_templates_free(spa); 883 884 cv_destroy(&spa->spa_async_cv); 885 cv_destroy(&spa->spa_evicting_os_cv); 886 cv_destroy(&spa->spa_proc_cv); 887 cv_destroy(&spa->spa_scrub_io_cv); 888 cv_destroy(&spa->spa_suspend_cv); 889 cv_destroy(&spa->spa_activities_cv); 890 cv_destroy(&spa->spa_waiters_cv); 891 892 mutex_destroy(&spa->spa_flushed_ms_lock); 893 mutex_destroy(&spa->spa_async_lock); 894 mutex_destroy(&spa->spa_errlist_lock); 895 mutex_destroy(&spa->spa_errlog_lock); 896 mutex_destroy(&spa->spa_evicting_os_lock); 897 mutex_destroy(&spa->spa_history_lock); 898 mutex_destroy(&spa->spa_proc_lock); 899 mutex_destroy(&spa->spa_props_lock); 900 mutex_destroy(&spa->spa_cksum_tmpls_lock); 901 mutex_destroy(&spa->spa_scrub_lock); 902 mutex_destroy(&spa->spa_suspend_lock); 903 mutex_destroy(&spa->spa_vdev_top_lock); 904 mutex_destroy(&spa->spa_feat_stats_lock); 905 mutex_destroy(&spa->spa_activities_lock); 906 907 kmem_free(spa, sizeof (spa_t)); 908 } 909 910 /* 911 * Given a pool, return the next pool in the namespace, or NULL if there is 912 * none. If 'prev' is NULL, return the first pool. 913 */ 914 spa_t * 915 spa_next(spa_t *prev) 916 { 917 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 918 919 if (prev) 920 return (AVL_NEXT(&spa_namespace_avl, prev)); 921 else 922 return (avl_first(&spa_namespace_avl)); 923 } 924 925 /* 926 * ========================================================================== 927 * SPA refcount functions 928 * ========================================================================== 929 */ 930 931 /* 932 * Add a reference to the given spa_t. Must have at least one reference, or 933 * have the namespace lock held. 934 */ 935 void 936 spa_open_ref(spa_t *spa, const void *tag) 937 { 938 ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref || 939 MUTEX_HELD(&spa_namespace_lock) || 940 spa->spa_load_thread == curthread); 941 (void) zfs_refcount_add(&spa->spa_refcount, tag); 942 } 943 944 /* 945 * Remove a reference to the given spa_t. Must have at least one reference, or 946 * have the namespace lock held or be part of a pool import/export. 947 */ 948 void 949 spa_close(spa_t *spa, const void *tag) 950 { 951 ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref || 952 MUTEX_HELD(&spa_namespace_lock) || 953 spa->spa_load_thread == curthread || 954 spa->spa_export_thread == curthread); 955 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 956 } 957 958 /* 959 * Remove a reference to the given spa_t held by a dsl dir that is 960 * being asynchronously released. Async releases occur from a taskq 961 * performing eviction of dsl datasets and dirs. The namespace lock 962 * isn't held and the hold by the object being evicted may contribute to 963 * spa_minref (e.g. dataset or directory released during pool export), 964 * so the asserts in spa_close() do not apply. 965 */ 966 void 967 spa_async_close(spa_t *spa, const void *tag) 968 { 969 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 970 } 971 972 /* 973 * Check to see if the spa refcount is zero. Must be called with 974 * spa_namespace_lock held or be the spa export thread. We really 975 * compare against spa_minref, which is the number of references 976 * acquired when opening a pool 977 */ 978 boolean_t 979 spa_refcount_zero(spa_t *spa) 980 { 981 ASSERT(MUTEX_HELD(&spa_namespace_lock) || 982 spa->spa_export_thread == curthread); 983 984 return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref); 985 } 986 987 /* 988 * ========================================================================== 989 * SPA spare and l2cache tracking 990 * ========================================================================== 991 */ 992 993 /* 994 * Hot spares and cache devices are tracked using the same code below, 995 * for 'auxiliary' devices. 996 */ 997 998 typedef struct spa_aux { 999 uint64_t aux_guid; 1000 uint64_t aux_pool; 1001 avl_node_t aux_avl; 1002 int aux_count; 1003 } spa_aux_t; 1004 1005 static inline int 1006 spa_aux_compare(const void *a, const void *b) 1007 { 1008 const spa_aux_t *sa = (const spa_aux_t *)a; 1009 const spa_aux_t *sb = (const spa_aux_t *)b; 1010 1011 return (TREE_CMP(sa->aux_guid, sb->aux_guid)); 1012 } 1013 1014 static void 1015 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 1016 { 1017 avl_index_t where; 1018 spa_aux_t search; 1019 spa_aux_t *aux; 1020 1021 search.aux_guid = vd->vdev_guid; 1022 if ((aux = avl_find(avl, &search, &where)) != NULL) { 1023 aux->aux_count++; 1024 } else { 1025 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 1026 aux->aux_guid = vd->vdev_guid; 1027 aux->aux_count = 1; 1028 avl_insert(avl, aux, where); 1029 } 1030 } 1031 1032 static void 1033 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 1034 { 1035 spa_aux_t search; 1036 spa_aux_t *aux; 1037 avl_index_t where; 1038 1039 search.aux_guid = vd->vdev_guid; 1040 aux = avl_find(avl, &search, &where); 1041 1042 ASSERT(aux != NULL); 1043 1044 if (--aux->aux_count == 0) { 1045 avl_remove(avl, aux); 1046 kmem_free(aux, sizeof (spa_aux_t)); 1047 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 1048 aux->aux_pool = 0ULL; 1049 } 1050 } 1051 1052 static boolean_t 1053 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 1054 { 1055 spa_aux_t search, *found; 1056 1057 search.aux_guid = guid; 1058 found = avl_find(avl, &search, NULL); 1059 1060 if (pool) { 1061 if (found) 1062 *pool = found->aux_pool; 1063 else 1064 *pool = 0ULL; 1065 } 1066 1067 if (refcnt) { 1068 if (found) 1069 *refcnt = found->aux_count; 1070 else 1071 *refcnt = 0; 1072 } 1073 1074 return (found != NULL); 1075 } 1076 1077 static void 1078 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1079 { 1080 spa_aux_t search, *found; 1081 avl_index_t where; 1082 1083 search.aux_guid = vd->vdev_guid; 1084 found = avl_find(avl, &search, &where); 1085 ASSERT(found != NULL); 1086 ASSERT(found->aux_pool == 0ULL); 1087 1088 found->aux_pool = spa_guid(vd->vdev_spa); 1089 } 1090 1091 /* 1092 * Spares are tracked globally due to the following constraints: 1093 * 1094 * - A spare may be part of multiple pools. 1095 * - A spare may be added to a pool even if it's actively in use within 1096 * another pool. 1097 * - A spare in use in any pool can only be the source of a replacement if 1098 * the target is a spare in the same pool. 1099 * 1100 * We keep track of all spares on the system through the use of a reference 1101 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1102 * spare, then we bump the reference count in the AVL tree. In addition, we set 1103 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1104 * inactive). When a spare is made active (used to replace a device in the 1105 * pool), we also keep track of which pool its been made a part of. 1106 * 1107 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1108 * called under the spa_namespace lock as part of vdev reconfiguration. The 1109 * separate spare lock exists for the status query path, which does not need to 1110 * be completely consistent with respect to other vdev configuration changes. 1111 */ 1112 1113 static int 1114 spa_spare_compare(const void *a, const void *b) 1115 { 1116 return (spa_aux_compare(a, b)); 1117 } 1118 1119 void 1120 spa_spare_add(vdev_t *vd) 1121 { 1122 mutex_enter(&spa_spare_lock); 1123 ASSERT(!vd->vdev_isspare); 1124 spa_aux_add(vd, &spa_spare_avl); 1125 vd->vdev_isspare = B_TRUE; 1126 mutex_exit(&spa_spare_lock); 1127 } 1128 1129 void 1130 spa_spare_remove(vdev_t *vd) 1131 { 1132 mutex_enter(&spa_spare_lock); 1133 ASSERT(vd->vdev_isspare); 1134 spa_aux_remove(vd, &spa_spare_avl); 1135 vd->vdev_isspare = B_FALSE; 1136 mutex_exit(&spa_spare_lock); 1137 } 1138 1139 boolean_t 1140 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1141 { 1142 boolean_t found; 1143 1144 mutex_enter(&spa_spare_lock); 1145 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1146 mutex_exit(&spa_spare_lock); 1147 1148 return (found); 1149 } 1150 1151 void 1152 spa_spare_activate(vdev_t *vd) 1153 { 1154 mutex_enter(&spa_spare_lock); 1155 ASSERT(vd->vdev_isspare); 1156 spa_aux_activate(vd, &spa_spare_avl); 1157 mutex_exit(&spa_spare_lock); 1158 } 1159 1160 /* 1161 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1162 * Cache devices currently only support one pool per cache device, and so 1163 * for these devices the aux reference count is currently unused beyond 1. 1164 */ 1165 1166 static int 1167 spa_l2cache_compare(const void *a, const void *b) 1168 { 1169 return (spa_aux_compare(a, b)); 1170 } 1171 1172 void 1173 spa_l2cache_add(vdev_t *vd) 1174 { 1175 mutex_enter(&spa_l2cache_lock); 1176 ASSERT(!vd->vdev_isl2cache); 1177 spa_aux_add(vd, &spa_l2cache_avl); 1178 vd->vdev_isl2cache = B_TRUE; 1179 mutex_exit(&spa_l2cache_lock); 1180 } 1181 1182 void 1183 spa_l2cache_remove(vdev_t *vd) 1184 { 1185 mutex_enter(&spa_l2cache_lock); 1186 ASSERT(vd->vdev_isl2cache); 1187 spa_aux_remove(vd, &spa_l2cache_avl); 1188 vd->vdev_isl2cache = B_FALSE; 1189 mutex_exit(&spa_l2cache_lock); 1190 } 1191 1192 boolean_t 1193 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1194 { 1195 boolean_t found; 1196 1197 mutex_enter(&spa_l2cache_lock); 1198 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1199 mutex_exit(&spa_l2cache_lock); 1200 1201 return (found); 1202 } 1203 1204 void 1205 spa_l2cache_activate(vdev_t *vd) 1206 { 1207 mutex_enter(&spa_l2cache_lock); 1208 ASSERT(vd->vdev_isl2cache); 1209 spa_aux_activate(vd, &spa_l2cache_avl); 1210 mutex_exit(&spa_l2cache_lock); 1211 } 1212 1213 /* 1214 * ========================================================================== 1215 * SPA vdev locking 1216 * ========================================================================== 1217 */ 1218 1219 /* 1220 * Lock the given spa_t for the purpose of adding or removing a vdev. 1221 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1222 * It returns the next transaction group for the spa_t. 1223 */ 1224 uint64_t 1225 spa_vdev_enter(spa_t *spa) 1226 { 1227 mutex_enter(&spa->spa_vdev_top_lock); 1228 mutex_enter(&spa_namespace_lock); 1229 1230 ASSERT0(spa->spa_export_thread); 1231 1232 vdev_autotrim_stop_all(spa); 1233 1234 return (spa_vdev_config_enter(spa)); 1235 } 1236 1237 /* 1238 * The same as spa_vdev_enter() above but additionally takes the guid of 1239 * the vdev being detached. When there is a rebuild in process it will be 1240 * suspended while the vdev tree is modified then resumed by spa_vdev_exit(). 1241 * The rebuild is canceled if only a single child remains after the detach. 1242 */ 1243 uint64_t 1244 spa_vdev_detach_enter(spa_t *spa, uint64_t guid) 1245 { 1246 mutex_enter(&spa->spa_vdev_top_lock); 1247 mutex_enter(&spa_namespace_lock); 1248 1249 ASSERT0(spa->spa_export_thread); 1250 1251 vdev_autotrim_stop_all(spa); 1252 1253 if (guid != 0) { 1254 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 1255 if (vd) { 1256 vdev_rebuild_stop_wait(vd->vdev_top); 1257 } 1258 } 1259 1260 return (spa_vdev_config_enter(spa)); 1261 } 1262 1263 /* 1264 * Internal implementation for spa_vdev_enter(). Used when a vdev 1265 * operation requires multiple syncs (i.e. removing a device) while 1266 * keeping the spa_namespace_lock held. 1267 */ 1268 uint64_t 1269 spa_vdev_config_enter(spa_t *spa) 1270 { 1271 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1272 1273 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1274 1275 return (spa_last_synced_txg(spa) + 1); 1276 } 1277 1278 /* 1279 * Used in combination with spa_vdev_config_enter() to allow the syncing 1280 * of multiple transactions without releasing the spa_namespace_lock. 1281 */ 1282 void 1283 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, 1284 const char *tag) 1285 { 1286 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1287 1288 int config_changed = B_FALSE; 1289 1290 ASSERT(txg > spa_last_synced_txg(spa)); 1291 1292 spa->spa_pending_vdev = NULL; 1293 1294 /* 1295 * Reassess the DTLs. 1296 */ 1297 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE); 1298 1299 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1300 config_changed = B_TRUE; 1301 spa->spa_config_generation++; 1302 } 1303 1304 /* 1305 * Verify the metaslab classes. 1306 */ 1307 metaslab_class_validate(spa_normal_class(spa)); 1308 metaslab_class_validate(spa_log_class(spa)); 1309 metaslab_class_validate(spa_embedded_log_class(spa)); 1310 metaslab_class_validate(spa_special_class(spa)); 1311 metaslab_class_validate(spa_dedup_class(spa)); 1312 1313 spa_config_exit(spa, SCL_ALL, spa); 1314 1315 /* 1316 * Panic the system if the specified tag requires it. This 1317 * is useful for ensuring that configurations are updated 1318 * transactionally. 1319 */ 1320 if (zio_injection_enabled) 1321 zio_handle_panic_injection(spa, tag, 0); 1322 1323 /* 1324 * Note: this txg_wait_synced() is important because it ensures 1325 * that there won't be more than one config change per txg. 1326 * This allows us to use the txg as the generation number. 1327 */ 1328 if (error == 0) 1329 txg_wait_synced(spa->spa_dsl_pool, txg); 1330 1331 if (vd != NULL) { 1332 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1333 if (vd->vdev_ops->vdev_op_leaf) { 1334 mutex_enter(&vd->vdev_initialize_lock); 1335 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, 1336 NULL); 1337 mutex_exit(&vd->vdev_initialize_lock); 1338 1339 mutex_enter(&vd->vdev_trim_lock); 1340 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL); 1341 mutex_exit(&vd->vdev_trim_lock); 1342 } 1343 1344 /* 1345 * The vdev may be both a leaf and top-level device. 1346 */ 1347 vdev_autotrim_stop_wait(vd); 1348 1349 spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER); 1350 vdev_free(vd); 1351 spa_config_exit(spa, SCL_STATE_ALL, spa); 1352 } 1353 1354 /* 1355 * If the config changed, update the config cache. 1356 */ 1357 if (config_changed) 1358 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_TRUE); 1359 } 1360 1361 /* 1362 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1363 * locking of spa_vdev_enter(), we also want make sure the transactions have 1364 * synced to disk, and then update the global configuration cache with the new 1365 * information. 1366 */ 1367 int 1368 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1369 { 1370 vdev_autotrim_restart(spa); 1371 vdev_rebuild_restart(spa); 1372 1373 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1374 mutex_exit(&spa_namespace_lock); 1375 mutex_exit(&spa->spa_vdev_top_lock); 1376 1377 return (error); 1378 } 1379 1380 /* 1381 * Lock the given spa_t for the purpose of changing vdev state. 1382 */ 1383 void 1384 spa_vdev_state_enter(spa_t *spa, int oplocks) 1385 { 1386 int locks = SCL_STATE_ALL | oplocks; 1387 1388 /* 1389 * Root pools may need to read of the underlying devfs filesystem 1390 * when opening up a vdev. Unfortunately if we're holding the 1391 * SCL_ZIO lock it will result in a deadlock when we try to issue 1392 * the read from the root filesystem. Instead we "prefetch" 1393 * the associated vnodes that we need prior to opening the 1394 * underlying devices and cache them so that we can prevent 1395 * any I/O when we are doing the actual open. 1396 */ 1397 if (spa_is_root(spa)) { 1398 int low = locks & ~(SCL_ZIO - 1); 1399 int high = locks & ~low; 1400 1401 spa_config_enter(spa, high, spa, RW_WRITER); 1402 vdev_hold(spa->spa_root_vdev); 1403 spa_config_enter(spa, low, spa, RW_WRITER); 1404 } else { 1405 spa_config_enter(spa, locks, spa, RW_WRITER); 1406 } 1407 spa->spa_vdev_locks = locks; 1408 } 1409 1410 int 1411 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1412 { 1413 boolean_t config_changed = B_FALSE; 1414 vdev_t *vdev_top; 1415 1416 if (vd == NULL || vd == spa->spa_root_vdev) { 1417 vdev_top = spa->spa_root_vdev; 1418 } else { 1419 vdev_top = vd->vdev_top; 1420 } 1421 1422 if (vd != NULL || error == 0) 1423 vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE); 1424 1425 if (vd != NULL) { 1426 if (vd != spa->spa_root_vdev) 1427 vdev_state_dirty(vdev_top); 1428 1429 config_changed = B_TRUE; 1430 spa->spa_config_generation++; 1431 } 1432 1433 if (spa_is_root(spa)) 1434 vdev_rele(spa->spa_root_vdev); 1435 1436 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1437 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1438 1439 /* 1440 * If anything changed, wait for it to sync. This ensures that, 1441 * from the system administrator's perspective, zpool(8) commands 1442 * are synchronous. This is important for things like zpool offline: 1443 * when the command completes, you expect no further I/O from ZFS. 1444 */ 1445 if (vd != NULL) 1446 txg_wait_synced(spa->spa_dsl_pool, 0); 1447 1448 /* 1449 * If the config changed, update the config cache. 1450 */ 1451 if (config_changed) { 1452 mutex_enter(&spa_namespace_lock); 1453 spa_write_cachefile(spa, B_FALSE, B_TRUE, B_FALSE); 1454 mutex_exit(&spa_namespace_lock); 1455 } 1456 1457 return (error); 1458 } 1459 1460 /* 1461 * ========================================================================== 1462 * Miscellaneous functions 1463 * ========================================================================== 1464 */ 1465 1466 void 1467 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1468 { 1469 if (!nvlist_exists(spa->spa_label_features, feature)) { 1470 fnvlist_add_boolean(spa->spa_label_features, feature); 1471 /* 1472 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1473 * dirty the vdev config because lock SCL_CONFIG is not held. 1474 * Thankfully, in this case we don't need to dirty the config 1475 * because it will be written out anyway when we finish 1476 * creating the pool. 1477 */ 1478 if (tx->tx_txg != TXG_INITIAL) 1479 vdev_config_dirty(spa->spa_root_vdev); 1480 } 1481 } 1482 1483 void 1484 spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1485 { 1486 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1487 vdev_config_dirty(spa->spa_root_vdev); 1488 } 1489 1490 /* 1491 * Return the spa_t associated with given pool_guid, if it exists. If 1492 * device_guid is non-zero, determine whether the pool exists *and* contains 1493 * a device with the specified device_guid. 1494 */ 1495 spa_t * 1496 spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1497 { 1498 spa_t *spa; 1499 avl_tree_t *t = &spa_namespace_avl; 1500 1501 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1502 1503 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1504 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1505 continue; 1506 if (spa->spa_root_vdev == NULL) 1507 continue; 1508 if (spa_guid(spa) == pool_guid) { 1509 if (device_guid == 0) 1510 break; 1511 1512 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1513 device_guid) != NULL) 1514 break; 1515 1516 /* 1517 * Check any devices we may be in the process of adding. 1518 */ 1519 if (spa->spa_pending_vdev) { 1520 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1521 device_guid) != NULL) 1522 break; 1523 } 1524 } 1525 } 1526 1527 return (spa); 1528 } 1529 1530 /* 1531 * Determine whether a pool with the given pool_guid exists. 1532 */ 1533 boolean_t 1534 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1535 { 1536 return (spa_by_guid(pool_guid, device_guid) != NULL); 1537 } 1538 1539 char * 1540 spa_strdup(const char *s) 1541 { 1542 size_t len; 1543 char *new; 1544 1545 len = strlen(s); 1546 new = kmem_alloc(len + 1, KM_SLEEP); 1547 memcpy(new, s, len + 1); 1548 1549 return (new); 1550 } 1551 1552 void 1553 spa_strfree(char *s) 1554 { 1555 kmem_free(s, strlen(s) + 1); 1556 } 1557 1558 uint64_t 1559 spa_generate_guid(spa_t *spa) 1560 { 1561 uint64_t guid; 1562 1563 if (spa != NULL) { 1564 do { 1565 (void) random_get_pseudo_bytes((void *)&guid, 1566 sizeof (guid)); 1567 } while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)); 1568 } else { 1569 do { 1570 (void) random_get_pseudo_bytes((void *)&guid, 1571 sizeof (guid)); 1572 } while (guid == 0 || spa_guid_exists(guid, 0)); 1573 } 1574 1575 return (guid); 1576 } 1577 1578 static boolean_t 1579 spa_load_guid_exists(uint64_t guid) 1580 { 1581 avl_tree_t *t = &spa_namespace_avl; 1582 1583 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1584 1585 for (spa_t *spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1586 if (spa_load_guid(spa) == guid) 1587 return (B_TRUE); 1588 } 1589 1590 return (arc_async_flush_guid_inuse(guid)); 1591 } 1592 1593 uint64_t 1594 spa_generate_load_guid(void) 1595 { 1596 uint64_t guid; 1597 1598 do { 1599 (void) random_get_pseudo_bytes((void *)&guid, 1600 sizeof (guid)); 1601 } while (guid == 0 || spa_load_guid_exists(guid)); 1602 1603 return (guid); 1604 } 1605 1606 void 1607 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1608 { 1609 char type[256]; 1610 const char *checksum = NULL; 1611 const char *compress = NULL; 1612 1613 if (bp != NULL) { 1614 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1615 dmu_object_byteswap_t bswap = 1616 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1617 (void) snprintf(type, sizeof (type), "bswap %s %s", 1618 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1619 "metadata" : "data", 1620 dmu_ot_byteswap[bswap].ob_name); 1621 } else { 1622 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1623 sizeof (type)); 1624 } 1625 if (!BP_IS_EMBEDDED(bp)) { 1626 checksum = 1627 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1628 } 1629 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1630 } 1631 1632 SNPRINTF_BLKPTR(kmem_scnprintf, ' ', buf, buflen, bp, type, checksum, 1633 compress); 1634 } 1635 1636 void 1637 spa_freeze(spa_t *spa) 1638 { 1639 uint64_t freeze_txg = 0; 1640 1641 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1642 if (spa->spa_freeze_txg == UINT64_MAX) { 1643 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1644 spa->spa_freeze_txg = freeze_txg; 1645 } 1646 spa_config_exit(spa, SCL_ALL, FTAG); 1647 if (freeze_txg != 0) 1648 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1649 } 1650 1651 void 1652 zfs_panic_recover(const char *fmt, ...) 1653 { 1654 va_list adx; 1655 1656 va_start(adx, fmt); 1657 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1658 va_end(adx); 1659 } 1660 1661 /* 1662 * This is a stripped-down version of strtoull, suitable only for converting 1663 * lowercase hexadecimal numbers that don't overflow. 1664 */ 1665 uint64_t 1666 zfs_strtonum(const char *str, char **nptr) 1667 { 1668 uint64_t val = 0; 1669 char c; 1670 int digit; 1671 1672 while ((c = *str) != '\0') { 1673 if (c >= '0' && c <= '9') 1674 digit = c - '0'; 1675 else if (c >= 'a' && c <= 'f') 1676 digit = 10 + c - 'a'; 1677 else 1678 break; 1679 1680 val *= 16; 1681 val += digit; 1682 1683 str++; 1684 } 1685 1686 if (nptr) 1687 *nptr = (char *)str; 1688 1689 return (val); 1690 } 1691 1692 void 1693 spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx) 1694 { 1695 /* 1696 * We bump the feature refcount for each special vdev added to the pool 1697 */ 1698 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES)); 1699 spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx); 1700 } 1701 1702 /* 1703 * ========================================================================== 1704 * Accessor functions 1705 * ========================================================================== 1706 */ 1707 1708 boolean_t 1709 spa_shutting_down(spa_t *spa) 1710 { 1711 return (spa->spa_async_suspended); 1712 } 1713 1714 dsl_pool_t * 1715 spa_get_dsl(spa_t *spa) 1716 { 1717 return (spa->spa_dsl_pool); 1718 } 1719 1720 boolean_t 1721 spa_is_initializing(spa_t *spa) 1722 { 1723 return (spa->spa_is_initializing); 1724 } 1725 1726 boolean_t 1727 spa_indirect_vdevs_loaded(spa_t *spa) 1728 { 1729 return (spa->spa_indirect_vdevs_loaded); 1730 } 1731 1732 blkptr_t * 1733 spa_get_rootblkptr(spa_t *spa) 1734 { 1735 return (&spa->spa_ubsync.ub_rootbp); 1736 } 1737 1738 void 1739 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1740 { 1741 spa->spa_uberblock.ub_rootbp = *bp; 1742 } 1743 1744 void 1745 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1746 { 1747 if (spa->spa_root == NULL) 1748 buf[0] = '\0'; 1749 else 1750 (void) strlcpy(buf, spa->spa_root, buflen); 1751 } 1752 1753 uint32_t 1754 spa_sync_pass(spa_t *spa) 1755 { 1756 return (spa->spa_sync_pass); 1757 } 1758 1759 char * 1760 spa_name(spa_t *spa) 1761 { 1762 return (spa->spa_name); 1763 } 1764 1765 uint64_t 1766 spa_guid(spa_t *spa) 1767 { 1768 dsl_pool_t *dp = spa_get_dsl(spa); 1769 uint64_t guid; 1770 1771 /* 1772 * If we fail to parse the config during spa_load(), we can go through 1773 * the error path (which posts an ereport) and end up here with no root 1774 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1775 * this case. 1776 */ 1777 if (spa->spa_root_vdev == NULL) 1778 return (spa->spa_config_guid); 1779 1780 guid = spa->spa_last_synced_guid != 0 ? 1781 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1782 1783 /* 1784 * Return the most recently synced out guid unless we're 1785 * in syncing context. 1786 */ 1787 if (dp && dsl_pool_sync_context(dp)) 1788 return (spa->spa_root_vdev->vdev_guid); 1789 else 1790 return (guid); 1791 } 1792 1793 uint64_t 1794 spa_load_guid(spa_t *spa) 1795 { 1796 /* 1797 * This is a GUID that exists solely as a reference for the 1798 * purposes of the arc. It is generated at load time, and 1799 * is never written to persistent storage. 1800 */ 1801 return (spa->spa_load_guid); 1802 } 1803 1804 uint64_t 1805 spa_last_synced_txg(spa_t *spa) 1806 { 1807 return (spa->spa_ubsync.ub_txg); 1808 } 1809 1810 uint64_t 1811 spa_first_txg(spa_t *spa) 1812 { 1813 return (spa->spa_first_txg); 1814 } 1815 1816 uint64_t 1817 spa_syncing_txg(spa_t *spa) 1818 { 1819 return (spa->spa_syncing_txg); 1820 } 1821 1822 /* 1823 * Return the last txg where data can be dirtied. The final txgs 1824 * will be used to just clear out any deferred frees that remain. 1825 */ 1826 uint64_t 1827 spa_final_dirty_txg(spa_t *spa) 1828 { 1829 return (spa->spa_final_txg - TXG_DEFER_SIZE); 1830 } 1831 1832 pool_state_t 1833 spa_state(spa_t *spa) 1834 { 1835 return (spa->spa_state); 1836 } 1837 1838 spa_load_state_t 1839 spa_load_state(spa_t *spa) 1840 { 1841 return (spa->spa_load_state); 1842 } 1843 1844 uint64_t 1845 spa_freeze_txg(spa_t *spa) 1846 { 1847 return (spa->spa_freeze_txg); 1848 } 1849 1850 /* 1851 * Return the inflated asize for a logical write in bytes. This is used by the 1852 * DMU to calculate the space a logical write will require on disk. 1853 * If lsize is smaller than the largest physical block size allocatable on this 1854 * pool we use its value instead, since the write will end up using the whole 1855 * block anyway. 1856 */ 1857 uint64_t 1858 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) 1859 { 1860 if (lsize == 0) 1861 return (0); /* No inflation needed */ 1862 return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation); 1863 } 1864 1865 /* 1866 * Return the amount of slop space in bytes. It is typically 1/32 of the pool 1867 * (3.2%), minus the embedded log space. On very small pools, it may be 1868 * slightly larger than this. On very large pools, it will be capped to 1869 * the value of spa_max_slop. The embedded log space is not included in 1870 * spa_dspace. By subtracting it, the usable space (per "zfs list") is a 1871 * constant 97% of the total space, regardless of metaslab size (assuming the 1872 * default spa_slop_shift=5 and a non-tiny pool). 1873 * 1874 * See the comment above spa_slop_shift for more details. 1875 */ 1876 uint64_t 1877 spa_get_slop_space(spa_t *spa) 1878 { 1879 uint64_t space = 0; 1880 uint64_t slop = 0; 1881 1882 /* 1883 * Make sure spa_dedup_dspace has been set. 1884 */ 1885 if (spa->spa_dedup_dspace == ~0ULL) 1886 spa_update_dspace(spa); 1887 1888 space = spa->spa_rdspace; 1889 slop = MIN(space >> spa_slop_shift, spa_max_slop); 1890 1891 /* 1892 * Subtract the embedded log space, but no more than half the (3.2%) 1893 * unusable space. Note, the "no more than half" is only relevant if 1894 * zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by 1895 * default. 1896 */ 1897 uint64_t embedded_log = 1898 metaslab_class_get_dspace(spa_embedded_log_class(spa)); 1899 slop -= MIN(embedded_log, slop >> 1); 1900 1901 /* 1902 * Slop space should be at least spa_min_slop, but no more than half 1903 * the entire pool. 1904 */ 1905 slop = MAX(slop, MIN(space >> 1, spa_min_slop)); 1906 return (slop); 1907 } 1908 1909 uint64_t 1910 spa_get_dspace(spa_t *spa) 1911 { 1912 return (spa->spa_dspace); 1913 } 1914 1915 uint64_t 1916 spa_get_checkpoint_space(spa_t *spa) 1917 { 1918 return (spa->spa_checkpoint_info.sci_dspace); 1919 } 1920 1921 void 1922 spa_update_dspace(spa_t *spa) 1923 { 1924 spa->spa_rdspace = metaslab_class_get_dspace(spa_normal_class(spa)); 1925 if (spa->spa_nonallocating_dspace > 0) { 1926 /* 1927 * Subtract the space provided by all non-allocating vdevs that 1928 * contribute to dspace. If a file is overwritten, its old 1929 * blocks are freed and new blocks are allocated. If there are 1930 * no snapshots of the file, the available space should remain 1931 * the same. The old blocks could be freed from the 1932 * non-allocating vdev, but the new blocks must be allocated on 1933 * other (allocating) vdevs. By reserving the entire size of 1934 * the non-allocating vdevs (including allocated space), we 1935 * ensure that there will be enough space on the allocating 1936 * vdevs for this file overwrite to succeed. 1937 * 1938 * Note that the DMU/DSL doesn't actually know or care 1939 * how much space is allocated (it does its own tracking 1940 * of how much space has been logically used). So it 1941 * doesn't matter that the data we are moving may be 1942 * allocated twice (on the old device and the new device). 1943 */ 1944 ASSERT3U(spa->spa_rdspace, >=, spa->spa_nonallocating_dspace); 1945 spa->spa_rdspace -= spa->spa_nonallocating_dspace; 1946 } 1947 spa->spa_dspace = spa->spa_rdspace + ddt_get_dedup_dspace(spa) + 1948 brt_get_dspace(spa); 1949 } 1950 1951 /* 1952 * Return the failure mode that has been set to this pool. The default 1953 * behavior will be to block all I/Os when a complete failure occurs. 1954 */ 1955 uint64_t 1956 spa_get_failmode(spa_t *spa) 1957 { 1958 return (spa->spa_failmode); 1959 } 1960 1961 boolean_t 1962 spa_suspended(spa_t *spa) 1963 { 1964 return (spa->spa_suspended != ZIO_SUSPEND_NONE); 1965 } 1966 1967 uint64_t 1968 spa_version(spa_t *spa) 1969 { 1970 return (spa->spa_ubsync.ub_version); 1971 } 1972 1973 boolean_t 1974 spa_deflate(spa_t *spa) 1975 { 1976 return (spa->spa_deflate); 1977 } 1978 1979 metaslab_class_t * 1980 spa_normal_class(spa_t *spa) 1981 { 1982 return (spa->spa_normal_class); 1983 } 1984 1985 metaslab_class_t * 1986 spa_log_class(spa_t *spa) 1987 { 1988 return (spa->spa_log_class); 1989 } 1990 1991 metaslab_class_t * 1992 spa_embedded_log_class(spa_t *spa) 1993 { 1994 return (spa->spa_embedded_log_class); 1995 } 1996 1997 metaslab_class_t * 1998 spa_special_class(spa_t *spa) 1999 { 2000 return (spa->spa_special_class); 2001 } 2002 2003 metaslab_class_t * 2004 spa_dedup_class(spa_t *spa) 2005 { 2006 return (spa->spa_dedup_class); 2007 } 2008 2009 boolean_t 2010 spa_special_has_ddt(spa_t *spa) 2011 { 2012 return (zfs_ddt_data_is_special && 2013 spa->spa_special_class->mc_groups != 0); 2014 } 2015 2016 /* 2017 * Locate an appropriate allocation class 2018 */ 2019 metaslab_class_t * 2020 spa_preferred_class(spa_t *spa, const zio_t *zio) 2021 { 2022 const zio_prop_t *zp = &zio->io_prop; 2023 2024 /* 2025 * Override object type for the purposes of selecting a storage class. 2026 * Primarily for DMU_OTN_ types where we can't explicitly control their 2027 * storage class; instead, choose a static type most closely matches 2028 * what we want. 2029 */ 2030 dmu_object_type_t objtype = 2031 zp->zp_storage_type == DMU_OT_NONE ? 2032 zp->zp_type : zp->zp_storage_type; 2033 2034 /* 2035 * ZIL allocations determine their class in zio_alloc_zil(). 2036 */ 2037 ASSERT(objtype != DMU_OT_INTENT_LOG); 2038 2039 boolean_t has_special_class = spa->spa_special_class->mc_groups != 0; 2040 2041 if (DMU_OT_IS_DDT(objtype)) { 2042 if (spa->spa_dedup_class->mc_groups != 0) 2043 return (spa_dedup_class(spa)); 2044 else if (has_special_class && zfs_ddt_data_is_special) 2045 return (spa_special_class(spa)); 2046 else 2047 return (spa_normal_class(spa)); 2048 } 2049 2050 /* Indirect blocks for user data can land in special if allowed */ 2051 if (zp->zp_level > 0 && 2052 (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) { 2053 if (has_special_class && zfs_user_indirect_is_special) 2054 return (spa_special_class(spa)); 2055 else 2056 return (spa_normal_class(spa)); 2057 } 2058 2059 if (DMU_OT_IS_METADATA(objtype) || zp->zp_level > 0) { 2060 if (has_special_class) 2061 return (spa_special_class(spa)); 2062 else 2063 return (spa_normal_class(spa)); 2064 } 2065 2066 /* 2067 * Allow small file blocks in special class in some cases (like 2068 * for the dRAID vdev feature). But always leave a reserve of 2069 * zfs_special_class_metadata_reserve_pct exclusively for metadata. 2070 */ 2071 if (DMU_OT_IS_FILE(objtype) && 2072 has_special_class && zio->io_size <= zp->zp_zpl_smallblk) { 2073 metaslab_class_t *special = spa_special_class(spa); 2074 uint64_t alloc = metaslab_class_get_alloc(special); 2075 uint64_t space = metaslab_class_get_space(special); 2076 uint64_t limit = 2077 (space * (100 - zfs_special_class_metadata_reserve_pct)) 2078 / 100; 2079 2080 if (alloc < limit) 2081 return (special); 2082 } 2083 2084 return (spa_normal_class(spa)); 2085 } 2086 2087 void 2088 spa_evicting_os_register(spa_t *spa, objset_t *os) 2089 { 2090 mutex_enter(&spa->spa_evicting_os_lock); 2091 list_insert_head(&spa->spa_evicting_os_list, os); 2092 mutex_exit(&spa->spa_evicting_os_lock); 2093 } 2094 2095 void 2096 spa_evicting_os_deregister(spa_t *spa, objset_t *os) 2097 { 2098 mutex_enter(&spa->spa_evicting_os_lock); 2099 list_remove(&spa->spa_evicting_os_list, os); 2100 cv_broadcast(&spa->spa_evicting_os_cv); 2101 mutex_exit(&spa->spa_evicting_os_lock); 2102 } 2103 2104 void 2105 spa_evicting_os_wait(spa_t *spa) 2106 { 2107 mutex_enter(&spa->spa_evicting_os_lock); 2108 while (!list_is_empty(&spa->spa_evicting_os_list)) 2109 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 2110 mutex_exit(&spa->spa_evicting_os_lock); 2111 2112 dmu_buf_user_evict_wait(); 2113 } 2114 2115 int 2116 spa_max_replication(spa_t *spa) 2117 { 2118 /* 2119 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 2120 * handle BPs with more than one DVA allocated. Set our max 2121 * replication level accordingly. 2122 */ 2123 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 2124 return (1); 2125 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 2126 } 2127 2128 int 2129 spa_prev_software_version(spa_t *spa) 2130 { 2131 return (spa->spa_prev_software_version); 2132 } 2133 2134 uint64_t 2135 spa_deadman_synctime(spa_t *spa) 2136 { 2137 return (spa->spa_deadman_synctime); 2138 } 2139 2140 spa_autotrim_t 2141 spa_get_autotrim(spa_t *spa) 2142 { 2143 return (spa->spa_autotrim); 2144 } 2145 2146 uint64_t 2147 spa_deadman_ziotime(spa_t *spa) 2148 { 2149 return (spa->spa_deadman_ziotime); 2150 } 2151 2152 uint64_t 2153 spa_get_deadman_failmode(spa_t *spa) 2154 { 2155 return (spa->spa_deadman_failmode); 2156 } 2157 2158 void 2159 spa_set_deadman_failmode(spa_t *spa, const char *failmode) 2160 { 2161 if (strcmp(failmode, "wait") == 0) 2162 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; 2163 else if (strcmp(failmode, "continue") == 0) 2164 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE; 2165 else if (strcmp(failmode, "panic") == 0) 2166 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC; 2167 else 2168 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; 2169 } 2170 2171 void 2172 spa_set_deadman_ziotime(hrtime_t ns) 2173 { 2174 spa_t *spa = NULL; 2175 2176 if (spa_mode_global != SPA_MODE_UNINIT) { 2177 mutex_enter(&spa_namespace_lock); 2178 while ((spa = spa_next(spa)) != NULL) 2179 spa->spa_deadman_ziotime = ns; 2180 mutex_exit(&spa_namespace_lock); 2181 } 2182 } 2183 2184 void 2185 spa_set_deadman_synctime(hrtime_t ns) 2186 { 2187 spa_t *spa = NULL; 2188 2189 if (spa_mode_global != SPA_MODE_UNINIT) { 2190 mutex_enter(&spa_namespace_lock); 2191 while ((spa = spa_next(spa)) != NULL) 2192 spa->spa_deadman_synctime = ns; 2193 mutex_exit(&spa_namespace_lock); 2194 } 2195 } 2196 2197 uint64_t 2198 dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 2199 { 2200 uint64_t asize = DVA_GET_ASIZE(dva); 2201 uint64_t dsize = asize; 2202 2203 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 2204 2205 if (asize != 0 && spa->spa_deflate) { 2206 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 2207 if (vd != NULL) 2208 dsize = (asize >> SPA_MINBLOCKSHIFT) * 2209 vd->vdev_deflate_ratio; 2210 } 2211 2212 return (dsize); 2213 } 2214 2215 uint64_t 2216 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 2217 { 2218 uint64_t dsize = 0; 2219 2220 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2221 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2222 2223 return (dsize); 2224 } 2225 2226 uint64_t 2227 bp_get_dsize(spa_t *spa, const blkptr_t *bp) 2228 { 2229 uint64_t dsize = 0; 2230 2231 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2232 2233 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2234 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2235 2236 spa_config_exit(spa, SCL_VDEV, FTAG); 2237 2238 return (dsize); 2239 } 2240 2241 uint64_t 2242 spa_dirty_data(spa_t *spa) 2243 { 2244 return (spa->spa_dsl_pool->dp_dirty_total); 2245 } 2246 2247 /* 2248 * ========================================================================== 2249 * SPA Import Progress Routines 2250 * ========================================================================== 2251 */ 2252 2253 typedef struct spa_import_progress { 2254 uint64_t pool_guid; /* unique id for updates */ 2255 char *pool_name; 2256 spa_load_state_t spa_load_state; 2257 char *spa_load_notes; 2258 uint64_t mmp_sec_remaining; /* MMP activity check */ 2259 uint64_t spa_load_max_txg; /* rewind txg */ 2260 procfs_list_node_t smh_node; 2261 } spa_import_progress_t; 2262 2263 spa_history_list_t *spa_import_progress_list = NULL; 2264 2265 static int 2266 spa_import_progress_show_header(struct seq_file *f) 2267 { 2268 seq_printf(f, "%-20s %-14s %-14s %-12s %-16s %s\n", "pool_guid", 2269 "load_state", "multihost_secs", "max_txg", 2270 "pool_name", "notes"); 2271 return (0); 2272 } 2273 2274 static int 2275 spa_import_progress_show(struct seq_file *f, void *data) 2276 { 2277 spa_import_progress_t *sip = (spa_import_progress_t *)data; 2278 2279 seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %-16s %s\n", 2280 (u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state, 2281 (u_longlong_t)sip->mmp_sec_remaining, 2282 (u_longlong_t)sip->spa_load_max_txg, 2283 (sip->pool_name ? sip->pool_name : "-"), 2284 (sip->spa_load_notes ? sip->spa_load_notes : "-")); 2285 2286 return (0); 2287 } 2288 2289 /* Remove oldest elements from list until there are no more than 'size' left */ 2290 static void 2291 spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size) 2292 { 2293 spa_import_progress_t *sip; 2294 while (shl->size > size) { 2295 sip = list_remove_head(&shl->procfs_list.pl_list); 2296 if (sip->pool_name) 2297 spa_strfree(sip->pool_name); 2298 if (sip->spa_load_notes) 2299 kmem_strfree(sip->spa_load_notes); 2300 kmem_free(sip, sizeof (spa_import_progress_t)); 2301 shl->size--; 2302 } 2303 2304 IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list)); 2305 } 2306 2307 static void 2308 spa_import_progress_init(void) 2309 { 2310 spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t), 2311 KM_SLEEP); 2312 2313 spa_import_progress_list->size = 0; 2314 2315 spa_import_progress_list->procfs_list.pl_private = 2316 spa_import_progress_list; 2317 2318 procfs_list_install("zfs", 2319 NULL, 2320 "import_progress", 2321 0644, 2322 &spa_import_progress_list->procfs_list, 2323 spa_import_progress_show, 2324 spa_import_progress_show_header, 2325 NULL, 2326 offsetof(spa_import_progress_t, smh_node)); 2327 } 2328 2329 static void 2330 spa_import_progress_destroy(void) 2331 { 2332 spa_history_list_t *shl = spa_import_progress_list; 2333 procfs_list_uninstall(&shl->procfs_list); 2334 spa_import_progress_truncate(shl, 0); 2335 procfs_list_destroy(&shl->procfs_list); 2336 kmem_free(shl, sizeof (spa_history_list_t)); 2337 } 2338 2339 int 2340 spa_import_progress_set_state(uint64_t pool_guid, 2341 spa_load_state_t load_state) 2342 { 2343 spa_history_list_t *shl = spa_import_progress_list; 2344 spa_import_progress_t *sip; 2345 int error = ENOENT; 2346 2347 if (shl->size == 0) 2348 return (0); 2349 2350 mutex_enter(&shl->procfs_list.pl_lock); 2351 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2352 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2353 if (sip->pool_guid == pool_guid) { 2354 sip->spa_load_state = load_state; 2355 if (sip->spa_load_notes != NULL) { 2356 kmem_strfree(sip->spa_load_notes); 2357 sip->spa_load_notes = NULL; 2358 } 2359 error = 0; 2360 break; 2361 } 2362 } 2363 mutex_exit(&shl->procfs_list.pl_lock); 2364 2365 return (error); 2366 } 2367 2368 static void 2369 spa_import_progress_set_notes_impl(spa_t *spa, boolean_t log_dbgmsg, 2370 const char *fmt, va_list adx) 2371 { 2372 spa_history_list_t *shl = spa_import_progress_list; 2373 spa_import_progress_t *sip; 2374 uint64_t pool_guid = spa_guid(spa); 2375 2376 if (shl->size == 0) 2377 return; 2378 2379 char *notes = kmem_vasprintf(fmt, adx); 2380 2381 mutex_enter(&shl->procfs_list.pl_lock); 2382 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2383 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2384 if (sip->pool_guid == pool_guid) { 2385 if (sip->spa_load_notes != NULL) { 2386 kmem_strfree(sip->spa_load_notes); 2387 sip->spa_load_notes = NULL; 2388 } 2389 sip->spa_load_notes = notes; 2390 if (log_dbgmsg) 2391 zfs_dbgmsg("'%s' %s", sip->pool_name, notes); 2392 notes = NULL; 2393 break; 2394 } 2395 } 2396 mutex_exit(&shl->procfs_list.pl_lock); 2397 if (notes != NULL) 2398 kmem_strfree(notes); 2399 } 2400 2401 void 2402 spa_import_progress_set_notes(spa_t *spa, const char *fmt, ...) 2403 { 2404 va_list adx; 2405 2406 va_start(adx, fmt); 2407 spa_import_progress_set_notes_impl(spa, B_TRUE, fmt, adx); 2408 va_end(adx); 2409 } 2410 2411 void 2412 spa_import_progress_set_notes_nolog(spa_t *spa, const char *fmt, ...) 2413 { 2414 va_list adx; 2415 2416 va_start(adx, fmt); 2417 spa_import_progress_set_notes_impl(spa, B_FALSE, fmt, adx); 2418 va_end(adx); 2419 } 2420 2421 int 2422 spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg) 2423 { 2424 spa_history_list_t *shl = spa_import_progress_list; 2425 spa_import_progress_t *sip; 2426 int error = ENOENT; 2427 2428 if (shl->size == 0) 2429 return (0); 2430 2431 mutex_enter(&shl->procfs_list.pl_lock); 2432 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2433 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2434 if (sip->pool_guid == pool_guid) { 2435 sip->spa_load_max_txg = load_max_txg; 2436 error = 0; 2437 break; 2438 } 2439 } 2440 mutex_exit(&shl->procfs_list.pl_lock); 2441 2442 return (error); 2443 } 2444 2445 int 2446 spa_import_progress_set_mmp_check(uint64_t pool_guid, 2447 uint64_t mmp_sec_remaining) 2448 { 2449 spa_history_list_t *shl = spa_import_progress_list; 2450 spa_import_progress_t *sip; 2451 int error = ENOENT; 2452 2453 if (shl->size == 0) 2454 return (0); 2455 2456 mutex_enter(&shl->procfs_list.pl_lock); 2457 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2458 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2459 if (sip->pool_guid == pool_guid) { 2460 sip->mmp_sec_remaining = mmp_sec_remaining; 2461 error = 0; 2462 break; 2463 } 2464 } 2465 mutex_exit(&shl->procfs_list.pl_lock); 2466 2467 return (error); 2468 } 2469 2470 /* 2471 * A new import is in progress, add an entry. 2472 */ 2473 void 2474 spa_import_progress_add(spa_t *spa) 2475 { 2476 spa_history_list_t *shl = spa_import_progress_list; 2477 spa_import_progress_t *sip; 2478 const char *poolname = NULL; 2479 2480 sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP); 2481 sip->pool_guid = spa_guid(spa); 2482 2483 (void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME, 2484 &poolname); 2485 if (poolname == NULL) 2486 poolname = spa_name(spa); 2487 sip->pool_name = spa_strdup(poolname); 2488 sip->spa_load_state = spa_load_state(spa); 2489 sip->spa_load_notes = NULL; 2490 2491 mutex_enter(&shl->procfs_list.pl_lock); 2492 procfs_list_add(&shl->procfs_list, sip); 2493 shl->size++; 2494 mutex_exit(&shl->procfs_list.pl_lock); 2495 } 2496 2497 void 2498 spa_import_progress_remove(uint64_t pool_guid) 2499 { 2500 spa_history_list_t *shl = spa_import_progress_list; 2501 spa_import_progress_t *sip; 2502 2503 mutex_enter(&shl->procfs_list.pl_lock); 2504 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2505 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2506 if (sip->pool_guid == pool_guid) { 2507 if (sip->pool_name) 2508 spa_strfree(sip->pool_name); 2509 if (sip->spa_load_notes) 2510 spa_strfree(sip->spa_load_notes); 2511 list_remove(&shl->procfs_list.pl_list, sip); 2512 shl->size--; 2513 kmem_free(sip, sizeof (spa_import_progress_t)); 2514 break; 2515 } 2516 } 2517 mutex_exit(&shl->procfs_list.pl_lock); 2518 } 2519 2520 /* 2521 * ========================================================================== 2522 * Initialization and Termination 2523 * ========================================================================== 2524 */ 2525 2526 static int 2527 spa_name_compare(const void *a1, const void *a2) 2528 { 2529 const spa_t *s1 = a1; 2530 const spa_t *s2 = a2; 2531 int s; 2532 2533 s = strcmp(s1->spa_name, s2->spa_name); 2534 2535 return (TREE_ISIGN(s)); 2536 } 2537 2538 void 2539 spa_boot_init(void *unused) 2540 { 2541 (void) unused; 2542 spa_config_load(); 2543 } 2544 2545 void 2546 spa_init(spa_mode_t mode) 2547 { 2548 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 2549 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 2550 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 2551 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 2552 2553 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 2554 offsetof(spa_t, spa_avl)); 2555 2556 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 2557 offsetof(spa_aux_t, aux_avl)); 2558 2559 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 2560 offsetof(spa_aux_t, aux_avl)); 2561 2562 spa_mode_global = mode; 2563 2564 #ifndef _KERNEL 2565 if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) { 2566 struct sigaction sa; 2567 2568 sa.sa_flags = SA_SIGINFO; 2569 sigemptyset(&sa.sa_mask); 2570 sa.sa_sigaction = arc_buf_sigsegv; 2571 2572 if (sigaction(SIGSEGV, &sa, NULL) == -1) { 2573 perror("could not enable watchpoints: " 2574 "sigaction(SIGSEGV, ...) = "); 2575 } else { 2576 arc_watch = B_TRUE; 2577 } 2578 } 2579 #endif 2580 2581 fm_init(); 2582 zfs_refcount_init(); 2583 unique_init(); 2584 zfs_btree_init(); 2585 metaslab_stat_init(); 2586 brt_init(); 2587 ddt_init(); 2588 zio_init(); 2589 dmu_init(); 2590 zil_init(); 2591 vdev_mirror_stat_init(); 2592 vdev_raidz_math_init(); 2593 vdev_file_init(); 2594 zfs_prop_init(); 2595 chksum_init(); 2596 zpool_prop_init(); 2597 zpool_feature_init(); 2598 spa_config_load(); 2599 vdev_prop_init(); 2600 l2arc_start(); 2601 scan_init(); 2602 qat_init(); 2603 spa_import_progress_init(); 2604 zap_init(); 2605 } 2606 2607 void 2608 spa_fini(void) 2609 { 2610 l2arc_stop(); 2611 2612 spa_evict_all(); 2613 2614 vdev_file_fini(); 2615 vdev_mirror_stat_fini(); 2616 vdev_raidz_math_fini(); 2617 chksum_fini(); 2618 zil_fini(); 2619 dmu_fini(); 2620 zio_fini(); 2621 ddt_fini(); 2622 brt_fini(); 2623 metaslab_stat_fini(); 2624 zfs_btree_fini(); 2625 unique_fini(); 2626 zfs_refcount_fini(); 2627 fm_fini(); 2628 scan_fini(); 2629 qat_fini(); 2630 spa_import_progress_destroy(); 2631 zap_fini(); 2632 2633 avl_destroy(&spa_namespace_avl); 2634 avl_destroy(&spa_spare_avl); 2635 avl_destroy(&spa_l2cache_avl); 2636 2637 cv_destroy(&spa_namespace_cv); 2638 mutex_destroy(&spa_namespace_lock); 2639 mutex_destroy(&spa_spare_lock); 2640 mutex_destroy(&spa_l2cache_lock); 2641 } 2642 2643 /* 2644 * Return whether this pool has a dedicated slog device. No locking needed. 2645 * It's not a problem if the wrong answer is returned as it's only for 2646 * performance and not correctness. 2647 */ 2648 boolean_t 2649 spa_has_slogs(spa_t *spa) 2650 { 2651 return (spa->spa_log_class->mc_groups != 0); 2652 } 2653 2654 spa_log_state_t 2655 spa_get_log_state(spa_t *spa) 2656 { 2657 return (spa->spa_log_state); 2658 } 2659 2660 void 2661 spa_set_log_state(spa_t *spa, spa_log_state_t state) 2662 { 2663 spa->spa_log_state = state; 2664 } 2665 2666 boolean_t 2667 spa_is_root(spa_t *spa) 2668 { 2669 return (spa->spa_is_root); 2670 } 2671 2672 boolean_t 2673 spa_writeable(spa_t *spa) 2674 { 2675 return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config); 2676 } 2677 2678 /* 2679 * Returns true if there is a pending sync task in any of the current 2680 * syncing txg, the current quiescing txg, or the current open txg. 2681 */ 2682 boolean_t 2683 spa_has_pending_synctask(spa_t *spa) 2684 { 2685 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) || 2686 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks)); 2687 } 2688 2689 spa_mode_t 2690 spa_mode(spa_t *spa) 2691 { 2692 return (spa->spa_mode); 2693 } 2694 2695 uint64_t 2696 spa_get_last_scrubbed_txg(spa_t *spa) 2697 { 2698 return (spa->spa_scrubbed_last_txg); 2699 } 2700 2701 uint64_t 2702 spa_bootfs(spa_t *spa) 2703 { 2704 return (spa->spa_bootfs); 2705 } 2706 2707 uint64_t 2708 spa_delegation(spa_t *spa) 2709 { 2710 return (spa->spa_delegation); 2711 } 2712 2713 objset_t * 2714 spa_meta_objset(spa_t *spa) 2715 { 2716 return (spa->spa_meta_objset); 2717 } 2718 2719 enum zio_checksum 2720 spa_dedup_checksum(spa_t *spa) 2721 { 2722 return (spa->spa_dedup_checksum); 2723 } 2724 2725 /* 2726 * Reset pool scan stat per scan pass (or reboot). 2727 */ 2728 void 2729 spa_scan_stat_init(spa_t *spa) 2730 { 2731 /* data not stored on disk */ 2732 spa->spa_scan_pass_start = gethrestime_sec(); 2733 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) 2734 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; 2735 else 2736 spa->spa_scan_pass_scrub_pause = 0; 2737 2738 if (dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan)) 2739 spa->spa_scan_pass_errorscrub_pause = spa->spa_scan_pass_start; 2740 else 2741 spa->spa_scan_pass_errorscrub_pause = 0; 2742 2743 spa->spa_scan_pass_scrub_spent_paused = 0; 2744 spa->spa_scan_pass_exam = 0; 2745 spa->spa_scan_pass_issued = 0; 2746 2747 // error scrub stats 2748 spa->spa_scan_pass_errorscrub_spent_paused = 0; 2749 } 2750 2751 /* 2752 * Get scan stats for zpool status reports 2753 */ 2754 int 2755 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2756 { 2757 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2758 2759 if (scn == NULL || (scn->scn_phys.scn_func == POOL_SCAN_NONE && 2760 scn->errorscrub_phys.dep_func == POOL_SCAN_NONE)) 2761 return (SET_ERROR(ENOENT)); 2762 2763 memset(ps, 0, sizeof (pool_scan_stat_t)); 2764 2765 /* data stored on disk */ 2766 ps->pss_func = scn->scn_phys.scn_func; 2767 ps->pss_state = scn->scn_phys.scn_state; 2768 ps->pss_start_time = scn->scn_phys.scn_start_time; 2769 ps->pss_end_time = scn->scn_phys.scn_end_time; 2770 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2771 ps->pss_examined = scn->scn_phys.scn_examined; 2772 ps->pss_skipped = scn->scn_phys.scn_skipped; 2773 ps->pss_processed = scn->scn_phys.scn_processed; 2774 ps->pss_errors = scn->scn_phys.scn_errors; 2775 2776 /* data not stored on disk */ 2777 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2778 ps->pss_pass_start = spa->spa_scan_pass_start; 2779 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; 2780 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; 2781 ps->pss_pass_issued = spa->spa_scan_pass_issued; 2782 ps->pss_issued = 2783 scn->scn_issued_before_pass + spa->spa_scan_pass_issued; 2784 2785 /* error scrub data stored on disk */ 2786 ps->pss_error_scrub_func = scn->errorscrub_phys.dep_func; 2787 ps->pss_error_scrub_state = scn->errorscrub_phys.dep_state; 2788 ps->pss_error_scrub_start = scn->errorscrub_phys.dep_start_time; 2789 ps->pss_error_scrub_end = scn->errorscrub_phys.dep_end_time; 2790 ps->pss_error_scrub_examined = scn->errorscrub_phys.dep_examined; 2791 ps->pss_error_scrub_to_be_examined = 2792 scn->errorscrub_phys.dep_to_examine; 2793 2794 /* error scrub data not stored on disk */ 2795 ps->pss_pass_error_scrub_pause = spa->spa_scan_pass_errorscrub_pause; 2796 2797 return (0); 2798 } 2799 2800 int 2801 spa_maxblocksize(spa_t *spa) 2802 { 2803 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2804 return (SPA_MAXBLOCKSIZE); 2805 else 2806 return (SPA_OLD_MAXBLOCKSIZE); 2807 } 2808 2809 2810 /* 2811 * Returns the txg that the last device removal completed. No indirect mappings 2812 * have been added since this txg. 2813 */ 2814 uint64_t 2815 spa_get_last_removal_txg(spa_t *spa) 2816 { 2817 uint64_t vdevid; 2818 uint64_t ret = -1ULL; 2819 2820 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2821 /* 2822 * sr_prev_indirect_vdev is only modified while holding all the 2823 * config locks, so it is sufficient to hold SCL_VDEV as reader when 2824 * examining it. 2825 */ 2826 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; 2827 2828 while (vdevid != -1ULL) { 2829 vdev_t *vd = vdev_lookup_top(spa, vdevid); 2830 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 2831 2832 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2833 2834 /* 2835 * If the removal did not remap any data, we don't care. 2836 */ 2837 if (vdev_indirect_births_count(vib) != 0) { 2838 ret = vdev_indirect_births_last_entry_txg(vib); 2839 break; 2840 } 2841 2842 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; 2843 } 2844 spa_config_exit(spa, SCL_VDEV, FTAG); 2845 2846 IMPLY(ret != -1ULL, 2847 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 2848 2849 return (ret); 2850 } 2851 2852 int 2853 spa_maxdnodesize(spa_t *spa) 2854 { 2855 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) 2856 return (DNODE_MAX_SIZE); 2857 else 2858 return (DNODE_MIN_SIZE); 2859 } 2860 2861 boolean_t 2862 spa_multihost(spa_t *spa) 2863 { 2864 return (spa->spa_multihost ? B_TRUE : B_FALSE); 2865 } 2866 2867 uint32_t 2868 spa_get_hostid(spa_t *spa) 2869 { 2870 return (spa->spa_hostid); 2871 } 2872 2873 boolean_t 2874 spa_trust_config(spa_t *spa) 2875 { 2876 return (spa->spa_trust_config); 2877 } 2878 2879 uint64_t 2880 spa_missing_tvds_allowed(spa_t *spa) 2881 { 2882 return (spa->spa_missing_tvds_allowed); 2883 } 2884 2885 space_map_t * 2886 spa_syncing_log_sm(spa_t *spa) 2887 { 2888 return (spa->spa_syncing_log_sm); 2889 } 2890 2891 void 2892 spa_set_missing_tvds(spa_t *spa, uint64_t missing) 2893 { 2894 spa->spa_missing_tvds = missing; 2895 } 2896 2897 /* 2898 * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc). 2899 */ 2900 const char * 2901 spa_state_to_name(spa_t *spa) 2902 { 2903 ASSERT3P(spa, !=, NULL); 2904 2905 /* 2906 * it is possible for the spa to exist, without root vdev 2907 * as the spa transitions during import/export 2908 */ 2909 vdev_t *rvd = spa->spa_root_vdev; 2910 if (rvd == NULL) { 2911 return ("TRANSITIONING"); 2912 } 2913 vdev_state_t state = rvd->vdev_state; 2914 vdev_aux_t aux = rvd->vdev_stat.vs_aux; 2915 2916 if (spa_suspended(spa)) 2917 return ("SUSPENDED"); 2918 2919 switch (state) { 2920 case VDEV_STATE_CLOSED: 2921 case VDEV_STATE_OFFLINE: 2922 return ("OFFLINE"); 2923 case VDEV_STATE_REMOVED: 2924 return ("REMOVED"); 2925 case VDEV_STATE_CANT_OPEN: 2926 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 2927 return ("FAULTED"); 2928 else if (aux == VDEV_AUX_SPLIT_POOL) 2929 return ("SPLIT"); 2930 else 2931 return ("UNAVAIL"); 2932 case VDEV_STATE_FAULTED: 2933 return ("FAULTED"); 2934 case VDEV_STATE_DEGRADED: 2935 return ("DEGRADED"); 2936 case VDEV_STATE_HEALTHY: 2937 return ("ONLINE"); 2938 default: 2939 break; 2940 } 2941 2942 return ("UNKNOWN"); 2943 } 2944 2945 boolean_t 2946 spa_top_vdevs_spacemap_addressable(spa_t *spa) 2947 { 2948 vdev_t *rvd = spa->spa_root_vdev; 2949 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2950 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c])) 2951 return (B_FALSE); 2952 } 2953 return (B_TRUE); 2954 } 2955 2956 boolean_t 2957 spa_has_checkpoint(spa_t *spa) 2958 { 2959 return (spa->spa_checkpoint_txg != 0); 2960 } 2961 2962 boolean_t 2963 spa_importing_readonly_checkpoint(spa_t *spa) 2964 { 2965 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) && 2966 spa->spa_mode == SPA_MODE_READ); 2967 } 2968 2969 uint64_t 2970 spa_min_claim_txg(spa_t *spa) 2971 { 2972 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg; 2973 2974 if (checkpoint_txg != 0) 2975 return (checkpoint_txg + 1); 2976 2977 return (spa->spa_first_txg); 2978 } 2979 2980 /* 2981 * If there is a checkpoint, async destroys may consume more space from 2982 * the pool instead of freeing it. In an attempt to save the pool from 2983 * getting suspended when it is about to run out of space, we stop 2984 * processing async destroys. 2985 */ 2986 boolean_t 2987 spa_suspend_async_destroy(spa_t *spa) 2988 { 2989 dsl_pool_t *dp = spa_get_dsl(spa); 2990 2991 uint64_t unreserved = dsl_pool_unreserved_space(dp, 2992 ZFS_SPACE_CHECK_EXTRA_RESERVED); 2993 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes; 2994 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0; 2995 2996 if (spa_has_checkpoint(spa) && avail == 0) 2997 return (B_TRUE); 2998 2999 return (B_FALSE); 3000 } 3001 3002 #if defined(_KERNEL) 3003 3004 int 3005 param_set_deadman_failmode_common(const char *val) 3006 { 3007 spa_t *spa = NULL; 3008 char *p; 3009 3010 if (val == NULL) 3011 return (SET_ERROR(EINVAL)); 3012 3013 if ((p = strchr(val, '\n')) != NULL) 3014 *p = '\0'; 3015 3016 if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 && 3017 strcmp(val, "panic")) 3018 return (SET_ERROR(EINVAL)); 3019 3020 if (spa_mode_global != SPA_MODE_UNINIT) { 3021 mutex_enter(&spa_namespace_lock); 3022 while ((spa = spa_next(spa)) != NULL) 3023 spa_set_deadman_failmode(spa, val); 3024 mutex_exit(&spa_namespace_lock); 3025 } 3026 3027 return (0); 3028 } 3029 #endif 3030 3031 /* Namespace manipulation */ 3032 EXPORT_SYMBOL(spa_lookup); 3033 EXPORT_SYMBOL(spa_add); 3034 EXPORT_SYMBOL(spa_remove); 3035 EXPORT_SYMBOL(spa_next); 3036 3037 /* Refcount functions */ 3038 EXPORT_SYMBOL(spa_open_ref); 3039 EXPORT_SYMBOL(spa_close); 3040 EXPORT_SYMBOL(spa_refcount_zero); 3041 3042 /* Pool configuration lock */ 3043 EXPORT_SYMBOL(spa_config_tryenter); 3044 EXPORT_SYMBOL(spa_config_enter); 3045 EXPORT_SYMBOL(spa_config_exit); 3046 EXPORT_SYMBOL(spa_config_held); 3047 3048 /* Pool vdev add/remove lock */ 3049 EXPORT_SYMBOL(spa_vdev_enter); 3050 EXPORT_SYMBOL(spa_vdev_exit); 3051 3052 /* Pool vdev state change lock */ 3053 EXPORT_SYMBOL(spa_vdev_state_enter); 3054 EXPORT_SYMBOL(spa_vdev_state_exit); 3055 3056 /* Accessor functions */ 3057 EXPORT_SYMBOL(spa_shutting_down); 3058 EXPORT_SYMBOL(spa_get_dsl); 3059 EXPORT_SYMBOL(spa_get_rootblkptr); 3060 EXPORT_SYMBOL(spa_set_rootblkptr); 3061 EXPORT_SYMBOL(spa_altroot); 3062 EXPORT_SYMBOL(spa_sync_pass); 3063 EXPORT_SYMBOL(spa_name); 3064 EXPORT_SYMBOL(spa_guid); 3065 EXPORT_SYMBOL(spa_last_synced_txg); 3066 EXPORT_SYMBOL(spa_first_txg); 3067 EXPORT_SYMBOL(spa_syncing_txg); 3068 EXPORT_SYMBOL(spa_version); 3069 EXPORT_SYMBOL(spa_state); 3070 EXPORT_SYMBOL(spa_load_state); 3071 EXPORT_SYMBOL(spa_freeze_txg); 3072 EXPORT_SYMBOL(spa_get_dspace); 3073 EXPORT_SYMBOL(spa_update_dspace); 3074 EXPORT_SYMBOL(spa_deflate); 3075 EXPORT_SYMBOL(spa_normal_class); 3076 EXPORT_SYMBOL(spa_log_class); 3077 EXPORT_SYMBOL(spa_special_class); 3078 EXPORT_SYMBOL(spa_preferred_class); 3079 EXPORT_SYMBOL(spa_max_replication); 3080 EXPORT_SYMBOL(spa_prev_software_version); 3081 EXPORT_SYMBOL(spa_get_failmode); 3082 EXPORT_SYMBOL(spa_suspended); 3083 EXPORT_SYMBOL(spa_bootfs); 3084 EXPORT_SYMBOL(spa_delegation); 3085 EXPORT_SYMBOL(spa_meta_objset); 3086 EXPORT_SYMBOL(spa_maxblocksize); 3087 EXPORT_SYMBOL(spa_maxdnodesize); 3088 3089 /* Miscellaneous support routines */ 3090 EXPORT_SYMBOL(spa_guid_exists); 3091 EXPORT_SYMBOL(spa_strdup); 3092 EXPORT_SYMBOL(spa_strfree); 3093 EXPORT_SYMBOL(spa_generate_guid); 3094 EXPORT_SYMBOL(snprintf_blkptr); 3095 EXPORT_SYMBOL(spa_freeze); 3096 EXPORT_SYMBOL(spa_upgrade); 3097 EXPORT_SYMBOL(spa_evict_all); 3098 EXPORT_SYMBOL(spa_lookup_by_guid); 3099 EXPORT_SYMBOL(spa_has_spare); 3100 EXPORT_SYMBOL(dva_get_dsize_sync); 3101 EXPORT_SYMBOL(bp_get_dsize_sync); 3102 EXPORT_SYMBOL(bp_get_dsize); 3103 EXPORT_SYMBOL(spa_has_slogs); 3104 EXPORT_SYMBOL(spa_is_root); 3105 EXPORT_SYMBOL(spa_writeable); 3106 EXPORT_SYMBOL(spa_mode); 3107 EXPORT_SYMBOL(spa_namespace_lock); 3108 EXPORT_SYMBOL(spa_trust_config); 3109 EXPORT_SYMBOL(spa_missing_tvds_allowed); 3110 EXPORT_SYMBOL(spa_set_missing_tvds); 3111 EXPORT_SYMBOL(spa_state_to_name); 3112 EXPORT_SYMBOL(spa_importing_readonly_checkpoint); 3113 EXPORT_SYMBOL(spa_min_claim_txg); 3114 EXPORT_SYMBOL(spa_suspend_async_destroy); 3115 EXPORT_SYMBOL(spa_has_checkpoint); 3116 EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable); 3117 3118 ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW, 3119 "Set additional debugging flags"); 3120 3121 ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW, 3122 "Set to attempt to recover from fatal errors"); 3123 3124 ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW, 3125 "Set to ignore IO errors during free and permanently leak the space"); 3126 3127 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, U64, ZMOD_RW, 3128 "Dead I/O check interval in milliseconds"); 3129 3130 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW, 3131 "Enable deadman timer"); 3132 3133 ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, UINT, ZMOD_RW, 3134 "SPA size estimate multiplication factor"); 3135 3136 ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW, 3137 "Place DDT data into the special class"); 3138 3139 ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW, 3140 "Place user data indirect blocks into the special class"); 3141 3142 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode, 3143 param_set_deadman_failmode, param_get_charp, ZMOD_RW, 3144 "Failmode for deadman timer"); 3145 3146 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms, 3147 param_set_deadman_synctime, spl_param_get_u64, ZMOD_RW, 3148 "Pool sync expiration time in milliseconds"); 3149 3150 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms, 3151 param_set_deadman_ziotime, spl_param_get_u64, ZMOD_RW, 3152 "IO expiration time in milliseconds"); 3153 3154 ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, UINT, ZMOD_RW, 3155 "Small file blocks in special vdevs depends on this much " 3156 "free space available"); 3157 3158 ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift, 3159 param_get_uint, ZMOD_RW, "Reserved free space in pool"); 3160 3161 ZFS_MODULE_PARAM(zfs, spa_, num_allocators, INT, ZMOD_RW, 3162 "Number of allocators per spa"); 3163 3164 ZFS_MODULE_PARAM(zfs, spa_, cpus_per_allocator, INT, ZMOD_RW, 3165 "Minimum number of CPUs per allocators"); 3166