1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 * Copyright 2013 Saso Kiselkov. All rights reserved. 27 * Copyright (c) 2017 Datto Inc. 28 * Copyright (c) 2017, Intel Corporation. 29 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. 30 */ 31 32 #include <sys/zfs_context.h> 33 #include <sys/spa_impl.h> 34 #include <sys/zio.h> 35 #include <sys/zio_checksum.h> 36 #include <sys/zio_compress.h> 37 #include <sys/dmu.h> 38 #include <sys/dmu_tx.h> 39 #include <sys/zap.h> 40 #include <sys/zil.h> 41 #include <sys/vdev_impl.h> 42 #include <sys/vdev_initialize.h> 43 #include <sys/vdev_trim.h> 44 #include <sys/vdev_file.h> 45 #include <sys/vdev_raidz.h> 46 #include <sys/metaslab.h> 47 #include <sys/uberblock_impl.h> 48 #include <sys/txg.h> 49 #include <sys/avl.h> 50 #include <sys/unique.h> 51 #include <sys/dsl_pool.h> 52 #include <sys/dsl_dir.h> 53 #include <sys/dsl_prop.h> 54 #include <sys/fm/util.h> 55 #include <sys/dsl_scan.h> 56 #include <sys/fs/zfs.h> 57 #include <sys/metaslab_impl.h> 58 #include <sys/arc.h> 59 #include <sys/ddt.h> 60 #include <sys/kstat.h> 61 #include "zfs_prop.h" 62 #include <sys/btree.h> 63 #include <sys/zfeature.h> 64 #include <sys/qat.h> 65 #include <sys/zstd/zstd.h> 66 67 /* 68 * SPA locking 69 * 70 * There are three basic locks for managing spa_t structures: 71 * 72 * spa_namespace_lock (global mutex) 73 * 74 * This lock must be acquired to do any of the following: 75 * 76 * - Lookup a spa_t by name 77 * - Add or remove a spa_t from the namespace 78 * - Increase spa_refcount from non-zero 79 * - Check if spa_refcount is zero 80 * - Rename a spa_t 81 * - add/remove/attach/detach devices 82 * - Held for the duration of create/destroy/import/export 83 * 84 * It does not need to handle recursion. A create or destroy may 85 * reference objects (files or zvols) in other pools, but by 86 * definition they must have an existing reference, and will never need 87 * to lookup a spa_t by name. 88 * 89 * spa_refcount (per-spa zfs_refcount_t protected by mutex) 90 * 91 * This reference count keep track of any active users of the spa_t. The 92 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 93 * the refcount is never really 'zero' - opening a pool implicitly keeps 94 * some references in the DMU. Internally we check against spa_minref, but 95 * present the image of a zero/non-zero value to consumers. 96 * 97 * spa_config_lock[] (per-spa array of rwlocks) 98 * 99 * This protects the spa_t from config changes, and must be held in 100 * the following circumstances: 101 * 102 * - RW_READER to perform I/O to the spa 103 * - RW_WRITER to change the vdev config 104 * 105 * The locking order is fairly straightforward: 106 * 107 * spa_namespace_lock -> spa_refcount 108 * 109 * The namespace lock must be acquired to increase the refcount from 0 110 * or to check if it is zero. 111 * 112 * spa_refcount -> spa_config_lock[] 113 * 114 * There must be at least one valid reference on the spa_t to acquire 115 * the config lock. 116 * 117 * spa_namespace_lock -> spa_config_lock[] 118 * 119 * The namespace lock must always be taken before the config lock. 120 * 121 * 122 * The spa_namespace_lock can be acquired directly and is globally visible. 123 * 124 * The namespace is manipulated using the following functions, all of which 125 * require the spa_namespace_lock to be held. 126 * 127 * spa_lookup() Lookup a spa_t by name. 128 * 129 * spa_add() Create a new spa_t in the namespace. 130 * 131 * spa_remove() Remove a spa_t from the namespace. This also 132 * frees up any memory associated with the spa_t. 133 * 134 * spa_next() Returns the next spa_t in the system, or the 135 * first if NULL is passed. 136 * 137 * spa_evict_all() Shutdown and remove all spa_t structures in 138 * the system. 139 * 140 * spa_guid_exists() Determine whether a pool/device guid exists. 141 * 142 * The spa_refcount is manipulated using the following functions: 143 * 144 * spa_open_ref() Adds a reference to the given spa_t. Must be 145 * called with spa_namespace_lock held if the 146 * refcount is currently zero. 147 * 148 * spa_close() Remove a reference from the spa_t. This will 149 * not free the spa_t or remove it from the 150 * namespace. No locking is required. 151 * 152 * spa_refcount_zero() Returns true if the refcount is currently 153 * zero. Must be called with spa_namespace_lock 154 * held. 155 * 156 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 157 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 158 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 159 * 160 * To read the configuration, it suffices to hold one of these locks as reader. 161 * To modify the configuration, you must hold all locks as writer. To modify 162 * vdev state without altering the vdev tree's topology (e.g. online/offline), 163 * you must hold SCL_STATE and SCL_ZIO as writer. 164 * 165 * We use these distinct config locks to avoid recursive lock entry. 166 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 167 * block allocations (SCL_ALLOC), which may require reading space maps 168 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 169 * 170 * The spa config locks cannot be normal rwlocks because we need the 171 * ability to hand off ownership. For example, SCL_ZIO is acquired 172 * by the issuing thread and later released by an interrupt thread. 173 * They do, however, obey the usual write-wanted semantics to prevent 174 * writer (i.e. system administrator) starvation. 175 * 176 * The lock acquisition rules are as follows: 177 * 178 * SCL_CONFIG 179 * Protects changes to the vdev tree topology, such as vdev 180 * add/remove/attach/detach. Protects the dirty config list 181 * (spa_config_dirty_list) and the set of spares and l2arc devices. 182 * 183 * SCL_STATE 184 * Protects changes to pool state and vdev state, such as vdev 185 * online/offline/fault/degrade/clear. Protects the dirty state list 186 * (spa_state_dirty_list) and global pool state (spa_state). 187 * 188 * SCL_ALLOC 189 * Protects changes to metaslab groups and classes. 190 * Held as reader by metaslab_alloc() and metaslab_claim(). 191 * 192 * SCL_ZIO 193 * Held by bp-level zios (those which have no io_vd upon entry) 194 * to prevent changes to the vdev tree. The bp-level zio implicitly 195 * protects all of its vdev child zios, which do not hold SCL_ZIO. 196 * 197 * SCL_FREE 198 * Protects changes to metaslab groups and classes. 199 * Held as reader by metaslab_free(). SCL_FREE is distinct from 200 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 201 * blocks in zio_done() while another i/o that holds either 202 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 203 * 204 * SCL_VDEV 205 * Held as reader to prevent changes to the vdev tree during trivial 206 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 207 * other locks, and lower than all of them, to ensure that it's safe 208 * to acquire regardless of caller context. 209 * 210 * In addition, the following rules apply: 211 * 212 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 213 * The lock ordering is SCL_CONFIG > spa_props_lock. 214 * 215 * (b) I/O operations on leaf vdevs. For any zio operation that takes 216 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 217 * or zio_write_phys() -- the caller must ensure that the config cannot 218 * cannot change in the interim, and that the vdev cannot be reopened. 219 * SCL_STATE as reader suffices for both. 220 * 221 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 222 * 223 * spa_vdev_enter() Acquire the namespace lock and the config lock 224 * for writing. 225 * 226 * spa_vdev_exit() Release the config lock, wait for all I/O 227 * to complete, sync the updated configs to the 228 * cache, and release the namespace lock. 229 * 230 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 231 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 232 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 233 */ 234 235 static avl_tree_t spa_namespace_avl; 236 kmutex_t spa_namespace_lock; 237 static kcondvar_t spa_namespace_cv; 238 int spa_max_replication_override = SPA_DVAS_PER_BP; 239 240 static kmutex_t spa_spare_lock; 241 static avl_tree_t spa_spare_avl; 242 static kmutex_t spa_l2cache_lock; 243 static avl_tree_t spa_l2cache_avl; 244 245 kmem_cache_t *spa_buffer_pool; 246 spa_mode_t spa_mode_global = SPA_MODE_UNINIT; 247 248 #ifdef ZFS_DEBUG 249 /* 250 * Everything except dprintf, set_error, spa, and indirect_remap is on 251 * by default in debug builds. 252 */ 253 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR | 254 ZFS_DEBUG_INDIRECT_REMAP); 255 #else 256 int zfs_flags = 0; 257 #endif 258 259 /* 260 * zfs_recover can be set to nonzero to attempt to recover from 261 * otherwise-fatal errors, typically caused by on-disk corruption. When 262 * set, calls to zfs_panic_recover() will turn into warning messages. 263 * This should only be used as a last resort, as it typically results 264 * in leaked space, or worse. 265 */ 266 int zfs_recover = B_FALSE; 267 268 /* 269 * If destroy encounters an EIO while reading metadata (e.g. indirect 270 * blocks), space referenced by the missing metadata can not be freed. 271 * Normally this causes the background destroy to become "stalled", as 272 * it is unable to make forward progress. While in this stalled state, 273 * all remaining space to free from the error-encountering filesystem is 274 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 275 * permanently leak the space from indirect blocks that can not be read, 276 * and continue to free everything else that it can. 277 * 278 * The default, "stalling" behavior is useful if the storage partially 279 * fails (i.e. some but not all i/os fail), and then later recovers. In 280 * this case, we will be able to continue pool operations while it is 281 * partially failed, and when it recovers, we can continue to free the 282 * space, with no leaks. However, note that this case is actually 283 * fairly rare. 284 * 285 * Typically pools either (a) fail completely (but perhaps temporarily, 286 * e.g. a top-level vdev going offline), or (b) have localized, 287 * permanent errors (e.g. disk returns the wrong data due to bit flip or 288 * firmware bug). In case (a), this setting does not matter because the 289 * pool will be suspended and the sync thread will not be able to make 290 * forward progress regardless. In case (b), because the error is 291 * permanent, the best we can do is leak the minimum amount of space, 292 * which is what setting this flag will do. Therefore, it is reasonable 293 * for this flag to normally be set, but we chose the more conservative 294 * approach of not setting it, so that there is no possibility of 295 * leaking space in the "partial temporary" failure case. 296 */ 297 int zfs_free_leak_on_eio = B_FALSE; 298 299 /* 300 * Expiration time in milliseconds. This value has two meanings. First it is 301 * used to determine when the spa_deadman() logic should fire. By default the 302 * spa_deadman() will fire if spa_sync() has not completed in 600 seconds. 303 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 304 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 305 * in one of three behaviors controlled by zfs_deadman_failmode. 306 */ 307 unsigned long zfs_deadman_synctime_ms = 600000UL; 308 309 /* 310 * This value controls the maximum amount of time zio_wait() will block for an 311 * outstanding IO. By default this is 300 seconds at which point the "hung" 312 * behavior will be applied as described for zfs_deadman_synctime_ms. 313 */ 314 unsigned long zfs_deadman_ziotime_ms = 300000UL; 315 316 /* 317 * Check time in milliseconds. This defines the frequency at which we check 318 * for hung I/O. 319 */ 320 unsigned long zfs_deadman_checktime_ms = 60000UL; 321 322 /* 323 * By default the deadman is enabled. 324 */ 325 int zfs_deadman_enabled = 1; 326 327 /* 328 * Controls the behavior of the deadman when it detects a "hung" I/O. 329 * Valid values are zfs_deadman_failmode=<wait|continue|panic>. 330 * 331 * wait - Wait for the "hung" I/O (default) 332 * continue - Attempt to recover from a "hung" I/O 333 * panic - Panic the system 334 */ 335 char *zfs_deadman_failmode = "wait"; 336 337 /* 338 * The worst case is single-sector max-parity RAID-Z blocks, in which 339 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 340 * times the size; so just assume that. Add to this the fact that 341 * we can have up to 3 DVAs per bp, and one more factor of 2 because 342 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 343 * the worst case is: 344 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 345 */ 346 int spa_asize_inflation = 24; 347 348 /* 349 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 350 * the pool to be consumed (bounded by spa_max_slop). This ensures that we 351 * don't run the pool completely out of space, due to unaccounted changes (e.g. 352 * to the MOS). It also limits the worst-case time to allocate space. If we 353 * have less than this amount of free space, most ZPL operations (e.g. write, 354 * create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are 355 * also part of this 3.2% of space which can't be consumed by normal writes; 356 * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded 357 * log space. 358 * 359 * Certain operations (e.g. file removal, most administrative actions) can 360 * use half the slop space. They will only return ENOSPC if less than half 361 * the slop space is free. Typically, once the pool has less than the slop 362 * space free, the user will use these operations to free up space in the pool. 363 * These are the operations that call dsl_pool_adjustedsize() with the netfree 364 * argument set to TRUE. 365 * 366 * Operations that are almost guaranteed to free up space in the absence of 367 * a pool checkpoint can use up to three quarters of the slop space 368 * (e.g zfs destroy). 369 * 370 * A very restricted set of operations are always permitted, regardless of 371 * the amount of free space. These are the operations that call 372 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net 373 * increase in the amount of space used, it is possible to run the pool 374 * completely out of space, causing it to be permanently read-only. 375 * 376 * Note that on very small pools, the slop space will be larger than 377 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 378 * but we never allow it to be more than half the pool size. 379 * 380 * Further, on very large pools, the slop space will be smaller than 381 * 3.2%, to avoid reserving much more space than we actually need; bounded 382 * by spa_max_slop (128GB). 383 * 384 * See also the comments in zfs_space_check_t. 385 */ 386 int spa_slop_shift = 5; 387 uint64_t spa_min_slop = 128ULL * 1024 * 1024; 388 uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024; 389 int spa_allocators = 4; 390 391 392 /*PRINTFLIKE2*/ 393 void 394 spa_load_failed(spa_t *spa, const char *fmt, ...) 395 { 396 va_list adx; 397 char buf[256]; 398 399 va_start(adx, fmt); 400 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 401 va_end(adx); 402 403 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name, 404 spa->spa_trust_config ? "trusted" : "untrusted", buf); 405 } 406 407 /*PRINTFLIKE2*/ 408 void 409 spa_load_note(spa_t *spa, const char *fmt, ...) 410 { 411 va_list adx; 412 char buf[256]; 413 414 va_start(adx, fmt); 415 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 416 va_end(adx); 417 418 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name, 419 spa->spa_trust_config ? "trusted" : "untrusted", buf); 420 } 421 422 /* 423 * By default dedup and user data indirects land in the special class 424 */ 425 int zfs_ddt_data_is_special = B_TRUE; 426 int zfs_user_indirect_is_special = B_TRUE; 427 428 /* 429 * The percentage of special class final space reserved for metadata only. 430 * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only 431 * let metadata into the class. 432 */ 433 int zfs_special_class_metadata_reserve_pct = 25; 434 435 /* 436 * ========================================================================== 437 * SPA config locking 438 * ========================================================================== 439 */ 440 static void 441 spa_config_lock_init(spa_t *spa) 442 { 443 for (int i = 0; i < SCL_LOCKS; i++) { 444 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 445 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 446 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 447 zfs_refcount_create_untracked(&scl->scl_count); 448 scl->scl_writer = NULL; 449 scl->scl_write_wanted = 0; 450 } 451 } 452 453 static void 454 spa_config_lock_destroy(spa_t *spa) 455 { 456 for (int i = 0; i < SCL_LOCKS; i++) { 457 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 458 mutex_destroy(&scl->scl_lock); 459 cv_destroy(&scl->scl_cv); 460 zfs_refcount_destroy(&scl->scl_count); 461 ASSERT(scl->scl_writer == NULL); 462 ASSERT(scl->scl_write_wanted == 0); 463 } 464 } 465 466 int 467 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 468 { 469 for (int i = 0; i < SCL_LOCKS; i++) { 470 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 471 if (!(locks & (1 << i))) 472 continue; 473 mutex_enter(&scl->scl_lock); 474 if (rw == RW_READER) { 475 if (scl->scl_writer || scl->scl_write_wanted) { 476 mutex_exit(&scl->scl_lock); 477 spa_config_exit(spa, locks & ((1 << i) - 1), 478 tag); 479 return (0); 480 } 481 } else { 482 ASSERT(scl->scl_writer != curthread); 483 if (!zfs_refcount_is_zero(&scl->scl_count)) { 484 mutex_exit(&scl->scl_lock); 485 spa_config_exit(spa, locks & ((1 << i) - 1), 486 tag); 487 return (0); 488 } 489 scl->scl_writer = curthread; 490 } 491 (void) zfs_refcount_add(&scl->scl_count, tag); 492 mutex_exit(&scl->scl_lock); 493 } 494 return (1); 495 } 496 497 void 498 spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw) 499 { 500 int wlocks_held = 0; 501 502 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 503 504 for (int i = 0; i < SCL_LOCKS; i++) { 505 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 506 if (scl->scl_writer == curthread) 507 wlocks_held |= (1 << i); 508 if (!(locks & (1 << i))) 509 continue; 510 mutex_enter(&scl->scl_lock); 511 if (rw == RW_READER) { 512 while (scl->scl_writer || scl->scl_write_wanted) { 513 cv_wait(&scl->scl_cv, &scl->scl_lock); 514 } 515 } else { 516 ASSERT(scl->scl_writer != curthread); 517 while (!zfs_refcount_is_zero(&scl->scl_count)) { 518 scl->scl_write_wanted++; 519 cv_wait(&scl->scl_cv, &scl->scl_lock); 520 scl->scl_write_wanted--; 521 } 522 scl->scl_writer = curthread; 523 } 524 (void) zfs_refcount_add(&scl->scl_count, tag); 525 mutex_exit(&scl->scl_lock); 526 } 527 ASSERT3U(wlocks_held, <=, locks); 528 } 529 530 void 531 spa_config_exit(spa_t *spa, int locks, const void *tag) 532 { 533 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 534 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 535 if (!(locks & (1 << i))) 536 continue; 537 mutex_enter(&scl->scl_lock); 538 ASSERT(!zfs_refcount_is_zero(&scl->scl_count)); 539 if (zfs_refcount_remove(&scl->scl_count, tag) == 0) { 540 ASSERT(scl->scl_writer == NULL || 541 scl->scl_writer == curthread); 542 scl->scl_writer = NULL; /* OK in either case */ 543 cv_broadcast(&scl->scl_cv); 544 } 545 mutex_exit(&scl->scl_lock); 546 } 547 } 548 549 int 550 spa_config_held(spa_t *spa, int locks, krw_t rw) 551 { 552 int locks_held = 0; 553 554 for (int i = 0; i < SCL_LOCKS; i++) { 555 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 556 if (!(locks & (1 << i))) 557 continue; 558 if ((rw == RW_READER && 559 !zfs_refcount_is_zero(&scl->scl_count)) || 560 (rw == RW_WRITER && scl->scl_writer == curthread)) 561 locks_held |= 1 << i; 562 } 563 564 return (locks_held); 565 } 566 567 /* 568 * ========================================================================== 569 * SPA namespace functions 570 * ========================================================================== 571 */ 572 573 /* 574 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 575 * Returns NULL if no matching spa_t is found. 576 */ 577 spa_t * 578 spa_lookup(const char *name) 579 { 580 static spa_t search; /* spa_t is large; don't allocate on stack */ 581 spa_t *spa; 582 avl_index_t where; 583 char *cp; 584 585 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 586 587 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 588 589 /* 590 * If it's a full dataset name, figure out the pool name and 591 * just use that. 592 */ 593 cp = strpbrk(search.spa_name, "/@#"); 594 if (cp != NULL) 595 *cp = '\0'; 596 597 spa = avl_find(&spa_namespace_avl, &search, &where); 598 599 return (spa); 600 } 601 602 /* 603 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 604 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 605 * looking for potentially hung I/Os. 606 */ 607 void 608 spa_deadman(void *arg) 609 { 610 spa_t *spa = arg; 611 612 /* Disable the deadman if the pool is suspended. */ 613 if (spa_suspended(spa)) 614 return; 615 616 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 617 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 618 ++spa->spa_deadman_calls); 619 if (zfs_deadman_enabled) 620 vdev_deadman(spa->spa_root_vdev, FTAG); 621 622 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, 623 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + 624 MSEC_TO_TICK(zfs_deadman_checktime_ms)); 625 } 626 627 static int 628 spa_log_sm_sort_by_txg(const void *va, const void *vb) 629 { 630 const spa_log_sm_t *a = va; 631 const spa_log_sm_t *b = vb; 632 633 return (TREE_CMP(a->sls_txg, b->sls_txg)); 634 } 635 636 /* 637 * Create an uninitialized spa_t with the given name. Requires 638 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 639 * exist by calling spa_lookup() first. 640 */ 641 spa_t * 642 spa_add(const char *name, nvlist_t *config, const char *altroot) 643 { 644 spa_t *spa; 645 spa_config_dirent_t *dp; 646 647 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 648 649 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 650 651 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 652 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 653 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 654 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 655 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 656 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 657 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 658 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 659 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 660 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 661 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 662 mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL); 663 mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL); 664 mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL); 665 666 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 667 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 668 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 669 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 670 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 671 cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL); 672 cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL); 673 674 for (int t = 0; t < TXG_SIZE; t++) 675 bplist_create(&spa->spa_free_bplist[t]); 676 677 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 678 spa->spa_state = POOL_STATE_UNINITIALIZED; 679 spa->spa_freeze_txg = UINT64_MAX; 680 spa->spa_final_txg = UINT64_MAX; 681 spa->spa_load_max_txg = UINT64_MAX; 682 spa->spa_proc = &p0; 683 spa->spa_proc_state = SPA_PROC_NONE; 684 spa->spa_trust_config = B_TRUE; 685 spa->spa_hostid = zone_get_hostid(NULL); 686 687 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 688 spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms); 689 spa_set_deadman_failmode(spa, zfs_deadman_failmode); 690 691 zfs_refcount_create(&spa->spa_refcount); 692 spa_config_lock_init(spa); 693 spa_stats_init(spa); 694 695 avl_add(&spa_namespace_avl, spa); 696 697 /* 698 * Set the alternate root, if there is one. 699 */ 700 if (altroot) 701 spa->spa_root = spa_strdup(altroot); 702 703 spa->spa_alloc_count = spa_allocators; 704 spa->spa_alloc_locks = kmem_zalloc(spa->spa_alloc_count * 705 sizeof (kmutex_t), KM_SLEEP); 706 spa->spa_alloc_trees = kmem_zalloc(spa->spa_alloc_count * 707 sizeof (avl_tree_t), KM_SLEEP); 708 for (int i = 0; i < spa->spa_alloc_count; i++) { 709 mutex_init(&spa->spa_alloc_locks[i], NULL, MUTEX_DEFAULT, NULL); 710 avl_create(&spa->spa_alloc_trees[i], zio_bookmark_compare, 711 sizeof (zio_t), offsetof(zio_t, io_alloc_node)); 712 } 713 avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed, 714 sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node)); 715 avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg, 716 sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node)); 717 list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t), 718 offsetof(log_summary_entry_t, lse_node)); 719 720 /* 721 * Every pool starts with the default cachefile 722 */ 723 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 724 offsetof(spa_config_dirent_t, scd_link)); 725 726 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 727 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 728 list_insert_head(&spa->spa_config_list, dp); 729 730 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 731 KM_SLEEP) == 0); 732 733 if (config != NULL) { 734 nvlist_t *features; 735 736 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 737 &features) == 0) { 738 VERIFY(nvlist_dup(features, &spa->spa_label_features, 739 0) == 0); 740 } 741 742 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 743 } 744 745 if (spa->spa_label_features == NULL) { 746 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 747 KM_SLEEP) == 0); 748 } 749 750 spa->spa_min_ashift = INT_MAX; 751 spa->spa_max_ashift = 0; 752 spa->spa_min_alloc = INT_MAX; 753 754 /* Reset cached value */ 755 spa->spa_dedup_dspace = ~0ULL; 756 757 /* 758 * As a pool is being created, treat all features as disabled by 759 * setting SPA_FEATURE_DISABLED for all entries in the feature 760 * refcount cache. 761 */ 762 for (int i = 0; i < SPA_FEATURES; i++) { 763 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 764 } 765 766 list_create(&spa->spa_leaf_list, sizeof (vdev_t), 767 offsetof(vdev_t, vdev_leaf_node)); 768 769 return (spa); 770 } 771 772 /* 773 * Removes a spa_t from the namespace, freeing up any memory used. Requires 774 * spa_namespace_lock. This is called only after the spa_t has been closed and 775 * deactivated. 776 */ 777 void 778 spa_remove(spa_t *spa) 779 { 780 spa_config_dirent_t *dp; 781 782 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 783 ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED); 784 ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0); 785 ASSERT0(spa->spa_waiters); 786 787 nvlist_free(spa->spa_config_splitting); 788 789 avl_remove(&spa_namespace_avl, spa); 790 cv_broadcast(&spa_namespace_cv); 791 792 if (spa->spa_root) 793 spa_strfree(spa->spa_root); 794 795 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 796 list_remove(&spa->spa_config_list, dp); 797 if (dp->scd_path != NULL) 798 spa_strfree(dp->scd_path); 799 kmem_free(dp, sizeof (spa_config_dirent_t)); 800 } 801 802 for (int i = 0; i < spa->spa_alloc_count; i++) { 803 avl_destroy(&spa->spa_alloc_trees[i]); 804 mutex_destroy(&spa->spa_alloc_locks[i]); 805 } 806 kmem_free(spa->spa_alloc_locks, spa->spa_alloc_count * 807 sizeof (kmutex_t)); 808 kmem_free(spa->spa_alloc_trees, spa->spa_alloc_count * 809 sizeof (avl_tree_t)); 810 811 avl_destroy(&spa->spa_metaslabs_by_flushed); 812 avl_destroy(&spa->spa_sm_logs_by_txg); 813 list_destroy(&spa->spa_log_summary); 814 list_destroy(&spa->spa_config_list); 815 list_destroy(&spa->spa_leaf_list); 816 817 nvlist_free(spa->spa_label_features); 818 nvlist_free(spa->spa_load_info); 819 nvlist_free(spa->spa_feat_stats); 820 spa_config_set(spa, NULL); 821 822 zfs_refcount_destroy(&spa->spa_refcount); 823 824 spa_stats_destroy(spa); 825 spa_config_lock_destroy(spa); 826 827 for (int t = 0; t < TXG_SIZE; t++) 828 bplist_destroy(&spa->spa_free_bplist[t]); 829 830 zio_checksum_templates_free(spa); 831 832 cv_destroy(&spa->spa_async_cv); 833 cv_destroy(&spa->spa_evicting_os_cv); 834 cv_destroy(&spa->spa_proc_cv); 835 cv_destroy(&spa->spa_scrub_io_cv); 836 cv_destroy(&spa->spa_suspend_cv); 837 cv_destroy(&spa->spa_activities_cv); 838 cv_destroy(&spa->spa_waiters_cv); 839 840 mutex_destroy(&spa->spa_flushed_ms_lock); 841 mutex_destroy(&spa->spa_async_lock); 842 mutex_destroy(&spa->spa_errlist_lock); 843 mutex_destroy(&spa->spa_errlog_lock); 844 mutex_destroy(&spa->spa_evicting_os_lock); 845 mutex_destroy(&spa->spa_history_lock); 846 mutex_destroy(&spa->spa_proc_lock); 847 mutex_destroy(&spa->spa_props_lock); 848 mutex_destroy(&spa->spa_cksum_tmpls_lock); 849 mutex_destroy(&spa->spa_scrub_lock); 850 mutex_destroy(&spa->spa_suspend_lock); 851 mutex_destroy(&spa->spa_vdev_top_lock); 852 mutex_destroy(&spa->spa_feat_stats_lock); 853 mutex_destroy(&spa->spa_activities_lock); 854 855 kmem_free(spa, sizeof (spa_t)); 856 } 857 858 /* 859 * Given a pool, return the next pool in the namespace, or NULL if there is 860 * none. If 'prev' is NULL, return the first pool. 861 */ 862 spa_t * 863 spa_next(spa_t *prev) 864 { 865 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 866 867 if (prev) 868 return (AVL_NEXT(&spa_namespace_avl, prev)); 869 else 870 return (avl_first(&spa_namespace_avl)); 871 } 872 873 /* 874 * ========================================================================== 875 * SPA refcount functions 876 * ========================================================================== 877 */ 878 879 /* 880 * Add a reference to the given spa_t. Must have at least one reference, or 881 * have the namespace lock held. 882 */ 883 void 884 spa_open_ref(spa_t *spa, void *tag) 885 { 886 ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref || 887 MUTEX_HELD(&spa_namespace_lock)); 888 (void) zfs_refcount_add(&spa->spa_refcount, tag); 889 } 890 891 /* 892 * Remove a reference to the given spa_t. Must have at least one reference, or 893 * have the namespace lock held. 894 */ 895 void 896 spa_close(spa_t *spa, void *tag) 897 { 898 ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref || 899 MUTEX_HELD(&spa_namespace_lock)); 900 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 901 } 902 903 /* 904 * Remove a reference to the given spa_t held by a dsl dir that is 905 * being asynchronously released. Async releases occur from a taskq 906 * performing eviction of dsl datasets and dirs. The namespace lock 907 * isn't held and the hold by the object being evicted may contribute to 908 * spa_minref (e.g. dataset or directory released during pool export), 909 * so the asserts in spa_close() do not apply. 910 */ 911 void 912 spa_async_close(spa_t *spa, void *tag) 913 { 914 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 915 } 916 917 /* 918 * Check to see if the spa refcount is zero. Must be called with 919 * spa_namespace_lock held. We really compare against spa_minref, which is the 920 * number of references acquired when opening a pool 921 */ 922 boolean_t 923 spa_refcount_zero(spa_t *spa) 924 { 925 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 926 927 return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref); 928 } 929 930 /* 931 * ========================================================================== 932 * SPA spare and l2cache tracking 933 * ========================================================================== 934 */ 935 936 /* 937 * Hot spares and cache devices are tracked using the same code below, 938 * for 'auxiliary' devices. 939 */ 940 941 typedef struct spa_aux { 942 uint64_t aux_guid; 943 uint64_t aux_pool; 944 avl_node_t aux_avl; 945 int aux_count; 946 } spa_aux_t; 947 948 static inline int 949 spa_aux_compare(const void *a, const void *b) 950 { 951 const spa_aux_t *sa = (const spa_aux_t *)a; 952 const spa_aux_t *sb = (const spa_aux_t *)b; 953 954 return (TREE_CMP(sa->aux_guid, sb->aux_guid)); 955 } 956 957 static void 958 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 959 { 960 avl_index_t where; 961 spa_aux_t search; 962 spa_aux_t *aux; 963 964 search.aux_guid = vd->vdev_guid; 965 if ((aux = avl_find(avl, &search, &where)) != NULL) { 966 aux->aux_count++; 967 } else { 968 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 969 aux->aux_guid = vd->vdev_guid; 970 aux->aux_count = 1; 971 avl_insert(avl, aux, where); 972 } 973 } 974 975 static void 976 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 977 { 978 spa_aux_t search; 979 spa_aux_t *aux; 980 avl_index_t where; 981 982 search.aux_guid = vd->vdev_guid; 983 aux = avl_find(avl, &search, &where); 984 985 ASSERT(aux != NULL); 986 987 if (--aux->aux_count == 0) { 988 avl_remove(avl, aux); 989 kmem_free(aux, sizeof (spa_aux_t)); 990 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 991 aux->aux_pool = 0ULL; 992 } 993 } 994 995 static boolean_t 996 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 997 { 998 spa_aux_t search, *found; 999 1000 search.aux_guid = guid; 1001 found = avl_find(avl, &search, NULL); 1002 1003 if (pool) { 1004 if (found) 1005 *pool = found->aux_pool; 1006 else 1007 *pool = 0ULL; 1008 } 1009 1010 if (refcnt) { 1011 if (found) 1012 *refcnt = found->aux_count; 1013 else 1014 *refcnt = 0; 1015 } 1016 1017 return (found != NULL); 1018 } 1019 1020 static void 1021 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1022 { 1023 spa_aux_t search, *found; 1024 avl_index_t where; 1025 1026 search.aux_guid = vd->vdev_guid; 1027 found = avl_find(avl, &search, &where); 1028 ASSERT(found != NULL); 1029 ASSERT(found->aux_pool == 0ULL); 1030 1031 found->aux_pool = spa_guid(vd->vdev_spa); 1032 } 1033 1034 /* 1035 * Spares are tracked globally due to the following constraints: 1036 * 1037 * - A spare may be part of multiple pools. 1038 * - A spare may be added to a pool even if it's actively in use within 1039 * another pool. 1040 * - A spare in use in any pool can only be the source of a replacement if 1041 * the target is a spare in the same pool. 1042 * 1043 * We keep track of all spares on the system through the use of a reference 1044 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1045 * spare, then we bump the reference count in the AVL tree. In addition, we set 1046 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1047 * inactive). When a spare is made active (used to replace a device in the 1048 * pool), we also keep track of which pool its been made a part of. 1049 * 1050 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1051 * called under the spa_namespace lock as part of vdev reconfiguration. The 1052 * separate spare lock exists for the status query path, which does not need to 1053 * be completely consistent with respect to other vdev configuration changes. 1054 */ 1055 1056 static int 1057 spa_spare_compare(const void *a, const void *b) 1058 { 1059 return (spa_aux_compare(a, b)); 1060 } 1061 1062 void 1063 spa_spare_add(vdev_t *vd) 1064 { 1065 mutex_enter(&spa_spare_lock); 1066 ASSERT(!vd->vdev_isspare); 1067 spa_aux_add(vd, &spa_spare_avl); 1068 vd->vdev_isspare = B_TRUE; 1069 mutex_exit(&spa_spare_lock); 1070 } 1071 1072 void 1073 spa_spare_remove(vdev_t *vd) 1074 { 1075 mutex_enter(&spa_spare_lock); 1076 ASSERT(vd->vdev_isspare); 1077 spa_aux_remove(vd, &spa_spare_avl); 1078 vd->vdev_isspare = B_FALSE; 1079 mutex_exit(&spa_spare_lock); 1080 } 1081 1082 boolean_t 1083 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1084 { 1085 boolean_t found; 1086 1087 mutex_enter(&spa_spare_lock); 1088 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1089 mutex_exit(&spa_spare_lock); 1090 1091 return (found); 1092 } 1093 1094 void 1095 spa_spare_activate(vdev_t *vd) 1096 { 1097 mutex_enter(&spa_spare_lock); 1098 ASSERT(vd->vdev_isspare); 1099 spa_aux_activate(vd, &spa_spare_avl); 1100 mutex_exit(&spa_spare_lock); 1101 } 1102 1103 /* 1104 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1105 * Cache devices currently only support one pool per cache device, and so 1106 * for these devices the aux reference count is currently unused beyond 1. 1107 */ 1108 1109 static int 1110 spa_l2cache_compare(const void *a, const void *b) 1111 { 1112 return (spa_aux_compare(a, b)); 1113 } 1114 1115 void 1116 spa_l2cache_add(vdev_t *vd) 1117 { 1118 mutex_enter(&spa_l2cache_lock); 1119 ASSERT(!vd->vdev_isl2cache); 1120 spa_aux_add(vd, &spa_l2cache_avl); 1121 vd->vdev_isl2cache = B_TRUE; 1122 mutex_exit(&spa_l2cache_lock); 1123 } 1124 1125 void 1126 spa_l2cache_remove(vdev_t *vd) 1127 { 1128 mutex_enter(&spa_l2cache_lock); 1129 ASSERT(vd->vdev_isl2cache); 1130 spa_aux_remove(vd, &spa_l2cache_avl); 1131 vd->vdev_isl2cache = B_FALSE; 1132 mutex_exit(&spa_l2cache_lock); 1133 } 1134 1135 boolean_t 1136 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1137 { 1138 boolean_t found; 1139 1140 mutex_enter(&spa_l2cache_lock); 1141 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1142 mutex_exit(&spa_l2cache_lock); 1143 1144 return (found); 1145 } 1146 1147 void 1148 spa_l2cache_activate(vdev_t *vd) 1149 { 1150 mutex_enter(&spa_l2cache_lock); 1151 ASSERT(vd->vdev_isl2cache); 1152 spa_aux_activate(vd, &spa_l2cache_avl); 1153 mutex_exit(&spa_l2cache_lock); 1154 } 1155 1156 /* 1157 * ========================================================================== 1158 * SPA vdev locking 1159 * ========================================================================== 1160 */ 1161 1162 /* 1163 * Lock the given spa_t for the purpose of adding or removing a vdev. 1164 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1165 * It returns the next transaction group for the spa_t. 1166 */ 1167 uint64_t 1168 spa_vdev_enter(spa_t *spa) 1169 { 1170 mutex_enter(&spa->spa_vdev_top_lock); 1171 mutex_enter(&spa_namespace_lock); 1172 1173 vdev_autotrim_stop_all(spa); 1174 1175 return (spa_vdev_config_enter(spa)); 1176 } 1177 1178 /* 1179 * The same as spa_vdev_enter() above but additionally takes the guid of 1180 * the vdev being detached. When there is a rebuild in process it will be 1181 * suspended while the vdev tree is modified then resumed by spa_vdev_exit(). 1182 * The rebuild is canceled if only a single child remains after the detach. 1183 */ 1184 uint64_t 1185 spa_vdev_detach_enter(spa_t *spa, uint64_t guid) 1186 { 1187 mutex_enter(&spa->spa_vdev_top_lock); 1188 mutex_enter(&spa_namespace_lock); 1189 1190 vdev_autotrim_stop_all(spa); 1191 1192 if (guid != 0) { 1193 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 1194 if (vd) { 1195 vdev_rebuild_stop_wait(vd->vdev_top); 1196 } 1197 } 1198 1199 return (spa_vdev_config_enter(spa)); 1200 } 1201 1202 /* 1203 * Internal implementation for spa_vdev_enter(). Used when a vdev 1204 * operation requires multiple syncs (i.e. removing a device) while 1205 * keeping the spa_namespace_lock held. 1206 */ 1207 uint64_t 1208 spa_vdev_config_enter(spa_t *spa) 1209 { 1210 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1211 1212 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1213 1214 return (spa_last_synced_txg(spa) + 1); 1215 } 1216 1217 /* 1218 * Used in combination with spa_vdev_config_enter() to allow the syncing 1219 * of multiple transactions without releasing the spa_namespace_lock. 1220 */ 1221 void 1222 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1223 { 1224 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1225 1226 int config_changed = B_FALSE; 1227 1228 ASSERT(txg > spa_last_synced_txg(spa)); 1229 1230 spa->spa_pending_vdev = NULL; 1231 1232 /* 1233 * Reassess the DTLs. 1234 */ 1235 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE); 1236 1237 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1238 config_changed = B_TRUE; 1239 spa->spa_config_generation++; 1240 } 1241 1242 /* 1243 * Verify the metaslab classes. 1244 */ 1245 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1246 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1247 ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0); 1248 ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0); 1249 ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0); 1250 1251 spa_config_exit(spa, SCL_ALL, spa); 1252 1253 /* 1254 * Panic the system if the specified tag requires it. This 1255 * is useful for ensuring that configurations are updated 1256 * transactionally. 1257 */ 1258 if (zio_injection_enabled) 1259 zio_handle_panic_injection(spa, tag, 0); 1260 1261 /* 1262 * Note: this txg_wait_synced() is important because it ensures 1263 * that there won't be more than one config change per txg. 1264 * This allows us to use the txg as the generation number. 1265 */ 1266 if (error == 0) 1267 txg_wait_synced(spa->spa_dsl_pool, txg); 1268 1269 if (vd != NULL) { 1270 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1271 if (vd->vdev_ops->vdev_op_leaf) { 1272 mutex_enter(&vd->vdev_initialize_lock); 1273 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, 1274 NULL); 1275 mutex_exit(&vd->vdev_initialize_lock); 1276 1277 mutex_enter(&vd->vdev_trim_lock); 1278 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL); 1279 mutex_exit(&vd->vdev_trim_lock); 1280 } 1281 1282 /* 1283 * The vdev may be both a leaf and top-level device. 1284 */ 1285 vdev_autotrim_stop_wait(vd); 1286 1287 spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER); 1288 vdev_free(vd); 1289 spa_config_exit(spa, SCL_STATE_ALL, spa); 1290 } 1291 1292 /* 1293 * If the config changed, update the config cache. 1294 */ 1295 if (config_changed) 1296 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1297 } 1298 1299 /* 1300 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1301 * locking of spa_vdev_enter(), we also want make sure the transactions have 1302 * synced to disk, and then update the global configuration cache with the new 1303 * information. 1304 */ 1305 int 1306 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1307 { 1308 vdev_autotrim_restart(spa); 1309 vdev_rebuild_restart(spa); 1310 1311 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1312 mutex_exit(&spa_namespace_lock); 1313 mutex_exit(&spa->spa_vdev_top_lock); 1314 1315 return (error); 1316 } 1317 1318 /* 1319 * Lock the given spa_t for the purpose of changing vdev state. 1320 */ 1321 void 1322 spa_vdev_state_enter(spa_t *spa, int oplocks) 1323 { 1324 int locks = SCL_STATE_ALL | oplocks; 1325 1326 /* 1327 * Root pools may need to read of the underlying devfs filesystem 1328 * when opening up a vdev. Unfortunately if we're holding the 1329 * SCL_ZIO lock it will result in a deadlock when we try to issue 1330 * the read from the root filesystem. Instead we "prefetch" 1331 * the associated vnodes that we need prior to opening the 1332 * underlying devices and cache them so that we can prevent 1333 * any I/O when we are doing the actual open. 1334 */ 1335 if (spa_is_root(spa)) { 1336 int low = locks & ~(SCL_ZIO - 1); 1337 int high = locks & ~low; 1338 1339 spa_config_enter(spa, high, spa, RW_WRITER); 1340 vdev_hold(spa->spa_root_vdev); 1341 spa_config_enter(spa, low, spa, RW_WRITER); 1342 } else { 1343 spa_config_enter(spa, locks, spa, RW_WRITER); 1344 } 1345 spa->spa_vdev_locks = locks; 1346 } 1347 1348 int 1349 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1350 { 1351 boolean_t config_changed = B_FALSE; 1352 vdev_t *vdev_top; 1353 1354 if (vd == NULL || vd == spa->spa_root_vdev) { 1355 vdev_top = spa->spa_root_vdev; 1356 } else { 1357 vdev_top = vd->vdev_top; 1358 } 1359 1360 if (vd != NULL || error == 0) 1361 vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE); 1362 1363 if (vd != NULL) { 1364 if (vd != spa->spa_root_vdev) 1365 vdev_state_dirty(vdev_top); 1366 1367 config_changed = B_TRUE; 1368 spa->spa_config_generation++; 1369 } 1370 1371 if (spa_is_root(spa)) 1372 vdev_rele(spa->spa_root_vdev); 1373 1374 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1375 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1376 1377 /* 1378 * If anything changed, wait for it to sync. This ensures that, 1379 * from the system administrator's perspective, zpool(8) commands 1380 * are synchronous. This is important for things like zpool offline: 1381 * when the command completes, you expect no further I/O from ZFS. 1382 */ 1383 if (vd != NULL) 1384 txg_wait_synced(spa->spa_dsl_pool, 0); 1385 1386 /* 1387 * If the config changed, update the config cache. 1388 */ 1389 if (config_changed) { 1390 mutex_enter(&spa_namespace_lock); 1391 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1392 mutex_exit(&spa_namespace_lock); 1393 } 1394 1395 return (error); 1396 } 1397 1398 /* 1399 * ========================================================================== 1400 * Miscellaneous functions 1401 * ========================================================================== 1402 */ 1403 1404 void 1405 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1406 { 1407 if (!nvlist_exists(spa->spa_label_features, feature)) { 1408 fnvlist_add_boolean(spa->spa_label_features, feature); 1409 /* 1410 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1411 * dirty the vdev config because lock SCL_CONFIG is not held. 1412 * Thankfully, in this case we don't need to dirty the config 1413 * because it will be written out anyway when we finish 1414 * creating the pool. 1415 */ 1416 if (tx->tx_txg != TXG_INITIAL) 1417 vdev_config_dirty(spa->spa_root_vdev); 1418 } 1419 } 1420 1421 void 1422 spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1423 { 1424 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1425 vdev_config_dirty(spa->spa_root_vdev); 1426 } 1427 1428 /* 1429 * Return the spa_t associated with given pool_guid, if it exists. If 1430 * device_guid is non-zero, determine whether the pool exists *and* contains 1431 * a device with the specified device_guid. 1432 */ 1433 spa_t * 1434 spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1435 { 1436 spa_t *spa; 1437 avl_tree_t *t = &spa_namespace_avl; 1438 1439 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1440 1441 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1442 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1443 continue; 1444 if (spa->spa_root_vdev == NULL) 1445 continue; 1446 if (spa_guid(spa) == pool_guid) { 1447 if (device_guid == 0) 1448 break; 1449 1450 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1451 device_guid) != NULL) 1452 break; 1453 1454 /* 1455 * Check any devices we may be in the process of adding. 1456 */ 1457 if (spa->spa_pending_vdev) { 1458 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1459 device_guid) != NULL) 1460 break; 1461 } 1462 } 1463 } 1464 1465 return (spa); 1466 } 1467 1468 /* 1469 * Determine whether a pool with the given pool_guid exists. 1470 */ 1471 boolean_t 1472 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1473 { 1474 return (spa_by_guid(pool_guid, device_guid) != NULL); 1475 } 1476 1477 char * 1478 spa_strdup(const char *s) 1479 { 1480 size_t len; 1481 char *new; 1482 1483 len = strlen(s); 1484 new = kmem_alloc(len + 1, KM_SLEEP); 1485 bcopy(s, new, len); 1486 new[len] = '\0'; 1487 1488 return (new); 1489 } 1490 1491 void 1492 spa_strfree(char *s) 1493 { 1494 kmem_free(s, strlen(s) + 1); 1495 } 1496 1497 uint64_t 1498 spa_get_random(uint64_t range) 1499 { 1500 uint64_t r; 1501 1502 ASSERT(range != 0); 1503 1504 if (range == 1) 1505 return (0); 1506 1507 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1508 1509 return (r % range); 1510 } 1511 1512 uint64_t 1513 spa_generate_guid(spa_t *spa) 1514 { 1515 uint64_t guid = spa_get_random(-1ULL); 1516 1517 if (spa != NULL) { 1518 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1519 guid = spa_get_random(-1ULL); 1520 } else { 1521 while (guid == 0 || spa_guid_exists(guid, 0)) 1522 guid = spa_get_random(-1ULL); 1523 } 1524 1525 return (guid); 1526 } 1527 1528 void 1529 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1530 { 1531 char type[256]; 1532 char *checksum = NULL; 1533 char *compress = NULL; 1534 1535 if (bp != NULL) { 1536 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1537 dmu_object_byteswap_t bswap = 1538 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1539 (void) snprintf(type, sizeof (type), "bswap %s %s", 1540 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1541 "metadata" : "data", 1542 dmu_ot_byteswap[bswap].ob_name); 1543 } else { 1544 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1545 sizeof (type)); 1546 } 1547 if (!BP_IS_EMBEDDED(bp)) { 1548 checksum = 1549 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1550 } 1551 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1552 } 1553 1554 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1555 compress); 1556 } 1557 1558 void 1559 spa_freeze(spa_t *spa) 1560 { 1561 uint64_t freeze_txg = 0; 1562 1563 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1564 if (spa->spa_freeze_txg == UINT64_MAX) { 1565 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1566 spa->spa_freeze_txg = freeze_txg; 1567 } 1568 spa_config_exit(spa, SCL_ALL, FTAG); 1569 if (freeze_txg != 0) 1570 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1571 } 1572 1573 void 1574 zfs_panic_recover(const char *fmt, ...) 1575 { 1576 va_list adx; 1577 1578 va_start(adx, fmt); 1579 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1580 va_end(adx); 1581 } 1582 1583 /* 1584 * This is a stripped-down version of strtoull, suitable only for converting 1585 * lowercase hexadecimal numbers that don't overflow. 1586 */ 1587 uint64_t 1588 zfs_strtonum(const char *str, char **nptr) 1589 { 1590 uint64_t val = 0; 1591 char c; 1592 int digit; 1593 1594 while ((c = *str) != '\0') { 1595 if (c >= '0' && c <= '9') 1596 digit = c - '0'; 1597 else if (c >= 'a' && c <= 'f') 1598 digit = 10 + c - 'a'; 1599 else 1600 break; 1601 1602 val *= 16; 1603 val += digit; 1604 1605 str++; 1606 } 1607 1608 if (nptr) 1609 *nptr = (char *)str; 1610 1611 return (val); 1612 } 1613 1614 void 1615 spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx) 1616 { 1617 /* 1618 * We bump the feature refcount for each special vdev added to the pool 1619 */ 1620 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES)); 1621 spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx); 1622 } 1623 1624 /* 1625 * ========================================================================== 1626 * Accessor functions 1627 * ========================================================================== 1628 */ 1629 1630 boolean_t 1631 spa_shutting_down(spa_t *spa) 1632 { 1633 return (spa->spa_async_suspended); 1634 } 1635 1636 dsl_pool_t * 1637 spa_get_dsl(spa_t *spa) 1638 { 1639 return (spa->spa_dsl_pool); 1640 } 1641 1642 boolean_t 1643 spa_is_initializing(spa_t *spa) 1644 { 1645 return (spa->spa_is_initializing); 1646 } 1647 1648 boolean_t 1649 spa_indirect_vdevs_loaded(spa_t *spa) 1650 { 1651 return (spa->spa_indirect_vdevs_loaded); 1652 } 1653 1654 blkptr_t * 1655 spa_get_rootblkptr(spa_t *spa) 1656 { 1657 return (&spa->spa_ubsync.ub_rootbp); 1658 } 1659 1660 void 1661 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1662 { 1663 spa->spa_uberblock.ub_rootbp = *bp; 1664 } 1665 1666 void 1667 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1668 { 1669 if (spa->spa_root == NULL) 1670 buf[0] = '\0'; 1671 else 1672 (void) strncpy(buf, spa->spa_root, buflen); 1673 } 1674 1675 int 1676 spa_sync_pass(spa_t *spa) 1677 { 1678 return (spa->spa_sync_pass); 1679 } 1680 1681 char * 1682 spa_name(spa_t *spa) 1683 { 1684 return (spa->spa_name); 1685 } 1686 1687 uint64_t 1688 spa_guid(spa_t *spa) 1689 { 1690 dsl_pool_t *dp = spa_get_dsl(spa); 1691 uint64_t guid; 1692 1693 /* 1694 * If we fail to parse the config during spa_load(), we can go through 1695 * the error path (which posts an ereport) and end up here with no root 1696 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1697 * this case. 1698 */ 1699 if (spa->spa_root_vdev == NULL) 1700 return (spa->spa_config_guid); 1701 1702 guid = spa->spa_last_synced_guid != 0 ? 1703 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1704 1705 /* 1706 * Return the most recently synced out guid unless we're 1707 * in syncing context. 1708 */ 1709 if (dp && dsl_pool_sync_context(dp)) 1710 return (spa->spa_root_vdev->vdev_guid); 1711 else 1712 return (guid); 1713 } 1714 1715 uint64_t 1716 spa_load_guid(spa_t *spa) 1717 { 1718 /* 1719 * This is a GUID that exists solely as a reference for the 1720 * purposes of the arc. It is generated at load time, and 1721 * is never written to persistent storage. 1722 */ 1723 return (spa->spa_load_guid); 1724 } 1725 1726 uint64_t 1727 spa_last_synced_txg(spa_t *spa) 1728 { 1729 return (spa->spa_ubsync.ub_txg); 1730 } 1731 1732 uint64_t 1733 spa_first_txg(spa_t *spa) 1734 { 1735 return (spa->spa_first_txg); 1736 } 1737 1738 uint64_t 1739 spa_syncing_txg(spa_t *spa) 1740 { 1741 return (spa->spa_syncing_txg); 1742 } 1743 1744 /* 1745 * Return the last txg where data can be dirtied. The final txgs 1746 * will be used to just clear out any deferred frees that remain. 1747 */ 1748 uint64_t 1749 spa_final_dirty_txg(spa_t *spa) 1750 { 1751 return (spa->spa_final_txg - TXG_DEFER_SIZE); 1752 } 1753 1754 pool_state_t 1755 spa_state(spa_t *spa) 1756 { 1757 return (spa->spa_state); 1758 } 1759 1760 spa_load_state_t 1761 spa_load_state(spa_t *spa) 1762 { 1763 return (spa->spa_load_state); 1764 } 1765 1766 uint64_t 1767 spa_freeze_txg(spa_t *spa) 1768 { 1769 return (spa->spa_freeze_txg); 1770 } 1771 1772 /* 1773 * Return the inflated asize for a logical write in bytes. This is used by the 1774 * DMU to calculate the space a logical write will require on disk. 1775 * If lsize is smaller than the largest physical block size allocatable on this 1776 * pool we use its value instead, since the write will end up using the whole 1777 * block anyway. 1778 */ 1779 uint64_t 1780 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) 1781 { 1782 if (lsize == 0) 1783 return (0); /* No inflation needed */ 1784 return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation); 1785 } 1786 1787 /* 1788 * Return the amount of slop space in bytes. It is typically 1/32 of the pool 1789 * (3.2%), minus the embedded log space. On very small pools, it may be 1790 * slightly larger than this. On very large pools, it will be capped to 1791 * the value of spa_max_slop. The embedded log space is not included in 1792 * spa_dspace. By subtracting it, the usable space (per "zfs list") is a 1793 * constant 97% of the total space, regardless of metaslab size (assuming the 1794 * default spa_slop_shift=5 and a non-tiny pool). 1795 * 1796 * See the comment above spa_slop_shift for more details. 1797 */ 1798 uint64_t 1799 spa_get_slop_space(spa_t *spa) 1800 { 1801 uint64_t space = spa_get_dspace(spa); 1802 uint64_t slop = MIN(space >> spa_slop_shift, spa_max_slop); 1803 1804 /* 1805 * Subtract the embedded log space, but no more than half the (3.2%) 1806 * unusable space. Note, the "no more than half" is only relevant if 1807 * zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by 1808 * default. 1809 */ 1810 uint64_t embedded_log = 1811 metaslab_class_get_dspace(spa_embedded_log_class(spa)); 1812 slop -= MIN(embedded_log, slop >> 1); 1813 1814 /* 1815 * Slop space should be at least spa_min_slop, but no more than half 1816 * the entire pool. 1817 */ 1818 slop = MAX(slop, MIN(space >> 1, spa_min_slop)); 1819 return (slop); 1820 } 1821 1822 uint64_t 1823 spa_get_dspace(spa_t *spa) 1824 { 1825 return (spa->spa_dspace); 1826 } 1827 1828 uint64_t 1829 spa_get_checkpoint_space(spa_t *spa) 1830 { 1831 return (spa->spa_checkpoint_info.sci_dspace); 1832 } 1833 1834 void 1835 spa_update_dspace(spa_t *spa) 1836 { 1837 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1838 ddt_get_dedup_dspace(spa); 1839 if (spa->spa_vdev_removal != NULL) { 1840 /* 1841 * We can't allocate from the removing device, so subtract 1842 * its size if it was included in dspace (i.e. if this is a 1843 * normal-class vdev, not special/dedup). This prevents the 1844 * DMU/DSL from filling up the (now smaller) pool while we 1845 * are in the middle of removing the device. 1846 * 1847 * Note that the DMU/DSL doesn't actually know or care 1848 * how much space is allocated (it does its own tracking 1849 * of how much space has been logically used). So it 1850 * doesn't matter that the data we are moving may be 1851 * allocated twice (on the old device and the new 1852 * device). 1853 */ 1854 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1855 vdev_t *vd = 1856 vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id); 1857 if (vd->vdev_mg->mg_class == spa_normal_class(spa)) { 1858 spa->spa_dspace -= spa_deflate(spa) ? 1859 vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space; 1860 } 1861 spa_config_exit(spa, SCL_VDEV, FTAG); 1862 } 1863 } 1864 1865 /* 1866 * Return the failure mode that has been set to this pool. The default 1867 * behavior will be to block all I/Os when a complete failure occurs. 1868 */ 1869 uint64_t 1870 spa_get_failmode(spa_t *spa) 1871 { 1872 return (spa->spa_failmode); 1873 } 1874 1875 boolean_t 1876 spa_suspended(spa_t *spa) 1877 { 1878 return (spa->spa_suspended != ZIO_SUSPEND_NONE); 1879 } 1880 1881 uint64_t 1882 spa_version(spa_t *spa) 1883 { 1884 return (spa->spa_ubsync.ub_version); 1885 } 1886 1887 boolean_t 1888 spa_deflate(spa_t *spa) 1889 { 1890 return (spa->spa_deflate); 1891 } 1892 1893 metaslab_class_t * 1894 spa_normal_class(spa_t *spa) 1895 { 1896 return (spa->spa_normal_class); 1897 } 1898 1899 metaslab_class_t * 1900 spa_log_class(spa_t *spa) 1901 { 1902 return (spa->spa_log_class); 1903 } 1904 1905 metaslab_class_t * 1906 spa_embedded_log_class(spa_t *spa) 1907 { 1908 return (spa->spa_embedded_log_class); 1909 } 1910 1911 metaslab_class_t * 1912 spa_special_class(spa_t *spa) 1913 { 1914 return (spa->spa_special_class); 1915 } 1916 1917 metaslab_class_t * 1918 spa_dedup_class(spa_t *spa) 1919 { 1920 return (spa->spa_dedup_class); 1921 } 1922 1923 /* 1924 * Locate an appropriate allocation class 1925 */ 1926 metaslab_class_t * 1927 spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype, 1928 uint_t level, uint_t special_smallblk) 1929 { 1930 /* 1931 * ZIL allocations determine their class in zio_alloc_zil(). 1932 */ 1933 ASSERT(objtype != DMU_OT_INTENT_LOG); 1934 1935 boolean_t has_special_class = spa->spa_special_class->mc_groups != 0; 1936 1937 if (DMU_OT_IS_DDT(objtype)) { 1938 if (spa->spa_dedup_class->mc_groups != 0) 1939 return (spa_dedup_class(spa)); 1940 else if (has_special_class && zfs_ddt_data_is_special) 1941 return (spa_special_class(spa)); 1942 else 1943 return (spa_normal_class(spa)); 1944 } 1945 1946 /* Indirect blocks for user data can land in special if allowed */ 1947 if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) { 1948 if (has_special_class && zfs_user_indirect_is_special) 1949 return (spa_special_class(spa)); 1950 else 1951 return (spa_normal_class(spa)); 1952 } 1953 1954 if (DMU_OT_IS_METADATA(objtype) || level > 0) { 1955 if (has_special_class) 1956 return (spa_special_class(spa)); 1957 else 1958 return (spa_normal_class(spa)); 1959 } 1960 1961 /* 1962 * Allow small file blocks in special class in some cases (like 1963 * for the dRAID vdev feature). But always leave a reserve of 1964 * zfs_special_class_metadata_reserve_pct exclusively for metadata. 1965 */ 1966 if (DMU_OT_IS_FILE(objtype) && 1967 has_special_class && size <= special_smallblk) { 1968 metaslab_class_t *special = spa_special_class(spa); 1969 uint64_t alloc = metaslab_class_get_alloc(special); 1970 uint64_t space = metaslab_class_get_space(special); 1971 uint64_t limit = 1972 (space * (100 - zfs_special_class_metadata_reserve_pct)) 1973 / 100; 1974 1975 if (alloc < limit) 1976 return (special); 1977 } 1978 1979 return (spa_normal_class(spa)); 1980 } 1981 1982 void 1983 spa_evicting_os_register(spa_t *spa, objset_t *os) 1984 { 1985 mutex_enter(&spa->spa_evicting_os_lock); 1986 list_insert_head(&spa->spa_evicting_os_list, os); 1987 mutex_exit(&spa->spa_evicting_os_lock); 1988 } 1989 1990 void 1991 spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1992 { 1993 mutex_enter(&spa->spa_evicting_os_lock); 1994 list_remove(&spa->spa_evicting_os_list, os); 1995 cv_broadcast(&spa->spa_evicting_os_cv); 1996 mutex_exit(&spa->spa_evicting_os_lock); 1997 } 1998 1999 void 2000 spa_evicting_os_wait(spa_t *spa) 2001 { 2002 mutex_enter(&spa->spa_evicting_os_lock); 2003 while (!list_is_empty(&spa->spa_evicting_os_list)) 2004 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 2005 mutex_exit(&spa->spa_evicting_os_lock); 2006 2007 dmu_buf_user_evict_wait(); 2008 } 2009 2010 int 2011 spa_max_replication(spa_t *spa) 2012 { 2013 /* 2014 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 2015 * handle BPs with more than one DVA allocated. Set our max 2016 * replication level accordingly. 2017 */ 2018 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 2019 return (1); 2020 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 2021 } 2022 2023 int 2024 spa_prev_software_version(spa_t *spa) 2025 { 2026 return (spa->spa_prev_software_version); 2027 } 2028 2029 uint64_t 2030 spa_deadman_synctime(spa_t *spa) 2031 { 2032 return (spa->spa_deadman_synctime); 2033 } 2034 2035 spa_autotrim_t 2036 spa_get_autotrim(spa_t *spa) 2037 { 2038 return (spa->spa_autotrim); 2039 } 2040 2041 uint64_t 2042 spa_deadman_ziotime(spa_t *spa) 2043 { 2044 return (spa->spa_deadman_ziotime); 2045 } 2046 2047 uint64_t 2048 spa_get_deadman_failmode(spa_t *spa) 2049 { 2050 return (spa->spa_deadman_failmode); 2051 } 2052 2053 void 2054 spa_set_deadman_failmode(spa_t *spa, const char *failmode) 2055 { 2056 if (strcmp(failmode, "wait") == 0) 2057 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; 2058 else if (strcmp(failmode, "continue") == 0) 2059 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE; 2060 else if (strcmp(failmode, "panic") == 0) 2061 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC; 2062 else 2063 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; 2064 } 2065 2066 void 2067 spa_set_deadman_ziotime(hrtime_t ns) 2068 { 2069 spa_t *spa = NULL; 2070 2071 if (spa_mode_global != SPA_MODE_UNINIT) { 2072 mutex_enter(&spa_namespace_lock); 2073 while ((spa = spa_next(spa)) != NULL) 2074 spa->spa_deadman_ziotime = ns; 2075 mutex_exit(&spa_namespace_lock); 2076 } 2077 } 2078 2079 void 2080 spa_set_deadman_synctime(hrtime_t ns) 2081 { 2082 spa_t *spa = NULL; 2083 2084 if (spa_mode_global != SPA_MODE_UNINIT) { 2085 mutex_enter(&spa_namespace_lock); 2086 while ((spa = spa_next(spa)) != NULL) 2087 spa->spa_deadman_synctime = ns; 2088 mutex_exit(&spa_namespace_lock); 2089 } 2090 } 2091 2092 uint64_t 2093 dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 2094 { 2095 uint64_t asize = DVA_GET_ASIZE(dva); 2096 uint64_t dsize = asize; 2097 2098 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 2099 2100 if (asize != 0 && spa->spa_deflate) { 2101 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 2102 if (vd != NULL) 2103 dsize = (asize >> SPA_MINBLOCKSHIFT) * 2104 vd->vdev_deflate_ratio; 2105 } 2106 2107 return (dsize); 2108 } 2109 2110 uint64_t 2111 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 2112 { 2113 uint64_t dsize = 0; 2114 2115 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2116 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2117 2118 return (dsize); 2119 } 2120 2121 uint64_t 2122 bp_get_dsize(spa_t *spa, const blkptr_t *bp) 2123 { 2124 uint64_t dsize = 0; 2125 2126 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2127 2128 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2129 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2130 2131 spa_config_exit(spa, SCL_VDEV, FTAG); 2132 2133 return (dsize); 2134 } 2135 2136 uint64_t 2137 spa_dirty_data(spa_t *spa) 2138 { 2139 return (spa->spa_dsl_pool->dp_dirty_total); 2140 } 2141 2142 /* 2143 * ========================================================================== 2144 * SPA Import Progress Routines 2145 * ========================================================================== 2146 */ 2147 2148 typedef struct spa_import_progress { 2149 uint64_t pool_guid; /* unique id for updates */ 2150 char *pool_name; 2151 spa_load_state_t spa_load_state; 2152 uint64_t mmp_sec_remaining; /* MMP activity check */ 2153 uint64_t spa_load_max_txg; /* rewind txg */ 2154 procfs_list_node_t smh_node; 2155 } spa_import_progress_t; 2156 2157 spa_history_list_t *spa_import_progress_list = NULL; 2158 2159 static int 2160 spa_import_progress_show_header(struct seq_file *f) 2161 { 2162 seq_printf(f, "%-20s %-14s %-14s %-12s %s\n", "pool_guid", 2163 "load_state", "multihost_secs", "max_txg", 2164 "pool_name"); 2165 return (0); 2166 } 2167 2168 static int 2169 spa_import_progress_show(struct seq_file *f, void *data) 2170 { 2171 spa_import_progress_t *sip = (spa_import_progress_t *)data; 2172 2173 seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %s\n", 2174 (u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state, 2175 (u_longlong_t)sip->mmp_sec_remaining, 2176 (u_longlong_t)sip->spa_load_max_txg, 2177 (sip->pool_name ? sip->pool_name : "-")); 2178 2179 return (0); 2180 } 2181 2182 /* Remove oldest elements from list until there are no more than 'size' left */ 2183 static void 2184 spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size) 2185 { 2186 spa_import_progress_t *sip; 2187 while (shl->size > size) { 2188 sip = list_remove_head(&shl->procfs_list.pl_list); 2189 if (sip->pool_name) 2190 spa_strfree(sip->pool_name); 2191 kmem_free(sip, sizeof (spa_import_progress_t)); 2192 shl->size--; 2193 } 2194 2195 IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list)); 2196 } 2197 2198 static void 2199 spa_import_progress_init(void) 2200 { 2201 spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t), 2202 KM_SLEEP); 2203 2204 spa_import_progress_list->size = 0; 2205 2206 spa_import_progress_list->procfs_list.pl_private = 2207 spa_import_progress_list; 2208 2209 procfs_list_install("zfs", 2210 NULL, 2211 "import_progress", 2212 0644, 2213 &spa_import_progress_list->procfs_list, 2214 spa_import_progress_show, 2215 spa_import_progress_show_header, 2216 NULL, 2217 offsetof(spa_import_progress_t, smh_node)); 2218 } 2219 2220 static void 2221 spa_import_progress_destroy(void) 2222 { 2223 spa_history_list_t *shl = spa_import_progress_list; 2224 procfs_list_uninstall(&shl->procfs_list); 2225 spa_import_progress_truncate(shl, 0); 2226 procfs_list_destroy(&shl->procfs_list); 2227 kmem_free(shl, sizeof (spa_history_list_t)); 2228 } 2229 2230 int 2231 spa_import_progress_set_state(uint64_t pool_guid, 2232 spa_load_state_t load_state) 2233 { 2234 spa_history_list_t *shl = spa_import_progress_list; 2235 spa_import_progress_t *sip; 2236 int error = ENOENT; 2237 2238 if (shl->size == 0) 2239 return (0); 2240 2241 mutex_enter(&shl->procfs_list.pl_lock); 2242 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2243 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2244 if (sip->pool_guid == pool_guid) { 2245 sip->spa_load_state = load_state; 2246 error = 0; 2247 break; 2248 } 2249 } 2250 mutex_exit(&shl->procfs_list.pl_lock); 2251 2252 return (error); 2253 } 2254 2255 int 2256 spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg) 2257 { 2258 spa_history_list_t *shl = spa_import_progress_list; 2259 spa_import_progress_t *sip; 2260 int error = ENOENT; 2261 2262 if (shl->size == 0) 2263 return (0); 2264 2265 mutex_enter(&shl->procfs_list.pl_lock); 2266 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2267 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2268 if (sip->pool_guid == pool_guid) { 2269 sip->spa_load_max_txg = load_max_txg; 2270 error = 0; 2271 break; 2272 } 2273 } 2274 mutex_exit(&shl->procfs_list.pl_lock); 2275 2276 return (error); 2277 } 2278 2279 int 2280 spa_import_progress_set_mmp_check(uint64_t pool_guid, 2281 uint64_t mmp_sec_remaining) 2282 { 2283 spa_history_list_t *shl = spa_import_progress_list; 2284 spa_import_progress_t *sip; 2285 int error = ENOENT; 2286 2287 if (shl->size == 0) 2288 return (0); 2289 2290 mutex_enter(&shl->procfs_list.pl_lock); 2291 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2292 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2293 if (sip->pool_guid == pool_guid) { 2294 sip->mmp_sec_remaining = mmp_sec_remaining; 2295 error = 0; 2296 break; 2297 } 2298 } 2299 mutex_exit(&shl->procfs_list.pl_lock); 2300 2301 return (error); 2302 } 2303 2304 /* 2305 * A new import is in progress, add an entry. 2306 */ 2307 void 2308 spa_import_progress_add(spa_t *spa) 2309 { 2310 spa_history_list_t *shl = spa_import_progress_list; 2311 spa_import_progress_t *sip; 2312 char *poolname = NULL; 2313 2314 sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP); 2315 sip->pool_guid = spa_guid(spa); 2316 2317 (void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME, 2318 &poolname); 2319 if (poolname == NULL) 2320 poolname = spa_name(spa); 2321 sip->pool_name = spa_strdup(poolname); 2322 sip->spa_load_state = spa_load_state(spa); 2323 2324 mutex_enter(&shl->procfs_list.pl_lock); 2325 procfs_list_add(&shl->procfs_list, sip); 2326 shl->size++; 2327 mutex_exit(&shl->procfs_list.pl_lock); 2328 } 2329 2330 void 2331 spa_import_progress_remove(uint64_t pool_guid) 2332 { 2333 spa_history_list_t *shl = spa_import_progress_list; 2334 spa_import_progress_t *sip; 2335 2336 mutex_enter(&shl->procfs_list.pl_lock); 2337 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2338 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2339 if (sip->pool_guid == pool_guid) { 2340 if (sip->pool_name) 2341 spa_strfree(sip->pool_name); 2342 list_remove(&shl->procfs_list.pl_list, sip); 2343 shl->size--; 2344 kmem_free(sip, sizeof (spa_import_progress_t)); 2345 break; 2346 } 2347 } 2348 mutex_exit(&shl->procfs_list.pl_lock); 2349 } 2350 2351 /* 2352 * ========================================================================== 2353 * Initialization and Termination 2354 * ========================================================================== 2355 */ 2356 2357 static int 2358 spa_name_compare(const void *a1, const void *a2) 2359 { 2360 const spa_t *s1 = a1; 2361 const spa_t *s2 = a2; 2362 int s; 2363 2364 s = strcmp(s1->spa_name, s2->spa_name); 2365 2366 return (TREE_ISIGN(s)); 2367 } 2368 2369 void 2370 spa_boot_init(void) 2371 { 2372 spa_config_load(); 2373 } 2374 2375 void 2376 spa_init(spa_mode_t mode) 2377 { 2378 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 2379 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 2380 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 2381 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 2382 2383 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 2384 offsetof(spa_t, spa_avl)); 2385 2386 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 2387 offsetof(spa_aux_t, aux_avl)); 2388 2389 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 2390 offsetof(spa_aux_t, aux_avl)); 2391 2392 spa_mode_global = mode; 2393 2394 #ifndef _KERNEL 2395 if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) { 2396 struct sigaction sa; 2397 2398 sa.sa_flags = SA_SIGINFO; 2399 sigemptyset(&sa.sa_mask); 2400 sa.sa_sigaction = arc_buf_sigsegv; 2401 2402 if (sigaction(SIGSEGV, &sa, NULL) == -1) { 2403 perror("could not enable watchpoints: " 2404 "sigaction(SIGSEGV, ...) = "); 2405 } else { 2406 arc_watch = B_TRUE; 2407 } 2408 } 2409 #endif 2410 2411 fm_init(); 2412 zfs_refcount_init(); 2413 unique_init(); 2414 zfs_btree_init(); 2415 metaslab_stat_init(); 2416 ddt_init(); 2417 zio_init(); 2418 dmu_init(); 2419 zil_init(); 2420 vdev_cache_stat_init(); 2421 vdev_mirror_stat_init(); 2422 vdev_raidz_math_init(); 2423 vdev_file_init(); 2424 zfs_prop_init(); 2425 zpool_prop_init(); 2426 zpool_feature_init(); 2427 spa_config_load(); 2428 l2arc_start(); 2429 scan_init(); 2430 qat_init(); 2431 spa_import_progress_init(); 2432 } 2433 2434 void 2435 spa_fini(void) 2436 { 2437 l2arc_stop(); 2438 2439 spa_evict_all(); 2440 2441 vdev_file_fini(); 2442 vdev_cache_stat_fini(); 2443 vdev_mirror_stat_fini(); 2444 vdev_raidz_math_fini(); 2445 zil_fini(); 2446 dmu_fini(); 2447 zio_fini(); 2448 ddt_fini(); 2449 metaslab_stat_fini(); 2450 zfs_btree_fini(); 2451 unique_fini(); 2452 zfs_refcount_fini(); 2453 fm_fini(); 2454 scan_fini(); 2455 qat_fini(); 2456 spa_import_progress_destroy(); 2457 2458 avl_destroy(&spa_namespace_avl); 2459 avl_destroy(&spa_spare_avl); 2460 avl_destroy(&spa_l2cache_avl); 2461 2462 cv_destroy(&spa_namespace_cv); 2463 mutex_destroy(&spa_namespace_lock); 2464 mutex_destroy(&spa_spare_lock); 2465 mutex_destroy(&spa_l2cache_lock); 2466 } 2467 2468 /* 2469 * Return whether this pool has a dedicated slog device. No locking needed. 2470 * It's not a problem if the wrong answer is returned as it's only for 2471 * performance and not correctness. 2472 */ 2473 boolean_t 2474 spa_has_slogs(spa_t *spa) 2475 { 2476 return (spa->spa_log_class->mc_groups != 0); 2477 } 2478 2479 spa_log_state_t 2480 spa_get_log_state(spa_t *spa) 2481 { 2482 return (spa->spa_log_state); 2483 } 2484 2485 void 2486 spa_set_log_state(spa_t *spa, spa_log_state_t state) 2487 { 2488 spa->spa_log_state = state; 2489 } 2490 2491 boolean_t 2492 spa_is_root(spa_t *spa) 2493 { 2494 return (spa->spa_is_root); 2495 } 2496 2497 boolean_t 2498 spa_writeable(spa_t *spa) 2499 { 2500 return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config); 2501 } 2502 2503 /* 2504 * Returns true if there is a pending sync task in any of the current 2505 * syncing txg, the current quiescing txg, or the current open txg. 2506 */ 2507 boolean_t 2508 spa_has_pending_synctask(spa_t *spa) 2509 { 2510 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) || 2511 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks)); 2512 } 2513 2514 spa_mode_t 2515 spa_mode(spa_t *spa) 2516 { 2517 return (spa->spa_mode); 2518 } 2519 2520 uint64_t 2521 spa_bootfs(spa_t *spa) 2522 { 2523 return (spa->spa_bootfs); 2524 } 2525 2526 uint64_t 2527 spa_delegation(spa_t *spa) 2528 { 2529 return (spa->spa_delegation); 2530 } 2531 2532 objset_t * 2533 spa_meta_objset(spa_t *spa) 2534 { 2535 return (spa->spa_meta_objset); 2536 } 2537 2538 enum zio_checksum 2539 spa_dedup_checksum(spa_t *spa) 2540 { 2541 return (spa->spa_dedup_checksum); 2542 } 2543 2544 /* 2545 * Reset pool scan stat per scan pass (or reboot). 2546 */ 2547 void 2548 spa_scan_stat_init(spa_t *spa) 2549 { 2550 /* data not stored on disk */ 2551 spa->spa_scan_pass_start = gethrestime_sec(); 2552 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) 2553 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; 2554 else 2555 spa->spa_scan_pass_scrub_pause = 0; 2556 spa->spa_scan_pass_scrub_spent_paused = 0; 2557 spa->spa_scan_pass_exam = 0; 2558 spa->spa_scan_pass_issued = 0; 2559 vdev_scan_stat_init(spa->spa_root_vdev); 2560 } 2561 2562 /* 2563 * Get scan stats for zpool status reports 2564 */ 2565 int 2566 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2567 { 2568 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2569 2570 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2571 return (SET_ERROR(ENOENT)); 2572 bzero(ps, sizeof (pool_scan_stat_t)); 2573 2574 /* data stored on disk */ 2575 ps->pss_func = scn->scn_phys.scn_func; 2576 ps->pss_state = scn->scn_phys.scn_state; 2577 ps->pss_start_time = scn->scn_phys.scn_start_time; 2578 ps->pss_end_time = scn->scn_phys.scn_end_time; 2579 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2580 ps->pss_examined = scn->scn_phys.scn_examined; 2581 ps->pss_to_process = scn->scn_phys.scn_to_process; 2582 ps->pss_processed = scn->scn_phys.scn_processed; 2583 ps->pss_errors = scn->scn_phys.scn_errors; 2584 2585 /* data not stored on disk */ 2586 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2587 ps->pss_pass_start = spa->spa_scan_pass_start; 2588 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; 2589 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; 2590 ps->pss_pass_issued = spa->spa_scan_pass_issued; 2591 ps->pss_issued = 2592 scn->scn_issued_before_pass + spa->spa_scan_pass_issued; 2593 2594 return (0); 2595 } 2596 2597 int 2598 spa_maxblocksize(spa_t *spa) 2599 { 2600 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2601 return (SPA_MAXBLOCKSIZE); 2602 else 2603 return (SPA_OLD_MAXBLOCKSIZE); 2604 } 2605 2606 2607 /* 2608 * Returns the txg that the last device removal completed. No indirect mappings 2609 * have been added since this txg. 2610 */ 2611 uint64_t 2612 spa_get_last_removal_txg(spa_t *spa) 2613 { 2614 uint64_t vdevid; 2615 uint64_t ret = -1ULL; 2616 2617 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2618 /* 2619 * sr_prev_indirect_vdev is only modified while holding all the 2620 * config locks, so it is sufficient to hold SCL_VDEV as reader when 2621 * examining it. 2622 */ 2623 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; 2624 2625 while (vdevid != -1ULL) { 2626 vdev_t *vd = vdev_lookup_top(spa, vdevid); 2627 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 2628 2629 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2630 2631 /* 2632 * If the removal did not remap any data, we don't care. 2633 */ 2634 if (vdev_indirect_births_count(vib) != 0) { 2635 ret = vdev_indirect_births_last_entry_txg(vib); 2636 break; 2637 } 2638 2639 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; 2640 } 2641 spa_config_exit(spa, SCL_VDEV, FTAG); 2642 2643 IMPLY(ret != -1ULL, 2644 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 2645 2646 return (ret); 2647 } 2648 2649 int 2650 spa_maxdnodesize(spa_t *spa) 2651 { 2652 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) 2653 return (DNODE_MAX_SIZE); 2654 else 2655 return (DNODE_MIN_SIZE); 2656 } 2657 2658 boolean_t 2659 spa_multihost(spa_t *spa) 2660 { 2661 return (spa->spa_multihost ? B_TRUE : B_FALSE); 2662 } 2663 2664 uint32_t 2665 spa_get_hostid(spa_t *spa) 2666 { 2667 return (spa->spa_hostid); 2668 } 2669 2670 boolean_t 2671 spa_trust_config(spa_t *spa) 2672 { 2673 return (spa->spa_trust_config); 2674 } 2675 2676 uint64_t 2677 spa_missing_tvds_allowed(spa_t *spa) 2678 { 2679 return (spa->spa_missing_tvds_allowed); 2680 } 2681 2682 space_map_t * 2683 spa_syncing_log_sm(spa_t *spa) 2684 { 2685 return (spa->spa_syncing_log_sm); 2686 } 2687 2688 void 2689 spa_set_missing_tvds(spa_t *spa, uint64_t missing) 2690 { 2691 spa->spa_missing_tvds = missing; 2692 } 2693 2694 /* 2695 * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc). 2696 */ 2697 const char * 2698 spa_state_to_name(spa_t *spa) 2699 { 2700 ASSERT3P(spa, !=, NULL); 2701 2702 /* 2703 * it is possible for the spa to exist, without root vdev 2704 * as the spa transitions during import/export 2705 */ 2706 vdev_t *rvd = spa->spa_root_vdev; 2707 if (rvd == NULL) { 2708 return ("TRANSITIONING"); 2709 } 2710 vdev_state_t state = rvd->vdev_state; 2711 vdev_aux_t aux = rvd->vdev_stat.vs_aux; 2712 2713 if (spa_suspended(spa) && 2714 (spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE)) 2715 return ("SUSPENDED"); 2716 2717 switch (state) { 2718 case VDEV_STATE_CLOSED: 2719 case VDEV_STATE_OFFLINE: 2720 return ("OFFLINE"); 2721 case VDEV_STATE_REMOVED: 2722 return ("REMOVED"); 2723 case VDEV_STATE_CANT_OPEN: 2724 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 2725 return ("FAULTED"); 2726 else if (aux == VDEV_AUX_SPLIT_POOL) 2727 return ("SPLIT"); 2728 else 2729 return ("UNAVAIL"); 2730 case VDEV_STATE_FAULTED: 2731 return ("FAULTED"); 2732 case VDEV_STATE_DEGRADED: 2733 return ("DEGRADED"); 2734 case VDEV_STATE_HEALTHY: 2735 return ("ONLINE"); 2736 default: 2737 break; 2738 } 2739 2740 return ("UNKNOWN"); 2741 } 2742 2743 boolean_t 2744 spa_top_vdevs_spacemap_addressable(spa_t *spa) 2745 { 2746 vdev_t *rvd = spa->spa_root_vdev; 2747 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2748 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c])) 2749 return (B_FALSE); 2750 } 2751 return (B_TRUE); 2752 } 2753 2754 boolean_t 2755 spa_has_checkpoint(spa_t *spa) 2756 { 2757 return (spa->spa_checkpoint_txg != 0); 2758 } 2759 2760 boolean_t 2761 spa_importing_readonly_checkpoint(spa_t *spa) 2762 { 2763 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) && 2764 spa->spa_mode == SPA_MODE_READ); 2765 } 2766 2767 uint64_t 2768 spa_min_claim_txg(spa_t *spa) 2769 { 2770 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg; 2771 2772 if (checkpoint_txg != 0) 2773 return (checkpoint_txg + 1); 2774 2775 return (spa->spa_first_txg); 2776 } 2777 2778 /* 2779 * If there is a checkpoint, async destroys may consume more space from 2780 * the pool instead of freeing it. In an attempt to save the pool from 2781 * getting suspended when it is about to run out of space, we stop 2782 * processing async destroys. 2783 */ 2784 boolean_t 2785 spa_suspend_async_destroy(spa_t *spa) 2786 { 2787 dsl_pool_t *dp = spa_get_dsl(spa); 2788 2789 uint64_t unreserved = dsl_pool_unreserved_space(dp, 2790 ZFS_SPACE_CHECK_EXTRA_RESERVED); 2791 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes; 2792 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0; 2793 2794 if (spa_has_checkpoint(spa) && avail == 0) 2795 return (B_TRUE); 2796 2797 return (B_FALSE); 2798 } 2799 2800 #if defined(_KERNEL) 2801 2802 int 2803 param_set_deadman_failmode_common(const char *val) 2804 { 2805 spa_t *spa = NULL; 2806 char *p; 2807 2808 if (val == NULL) 2809 return (SET_ERROR(EINVAL)); 2810 2811 if ((p = strchr(val, '\n')) != NULL) 2812 *p = '\0'; 2813 2814 if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 && 2815 strcmp(val, "panic")) 2816 return (SET_ERROR(EINVAL)); 2817 2818 if (spa_mode_global != SPA_MODE_UNINIT) { 2819 mutex_enter(&spa_namespace_lock); 2820 while ((spa = spa_next(spa)) != NULL) 2821 spa_set_deadman_failmode(spa, val); 2822 mutex_exit(&spa_namespace_lock); 2823 } 2824 2825 return (0); 2826 } 2827 #endif 2828 2829 /* Namespace manipulation */ 2830 EXPORT_SYMBOL(spa_lookup); 2831 EXPORT_SYMBOL(spa_add); 2832 EXPORT_SYMBOL(spa_remove); 2833 EXPORT_SYMBOL(spa_next); 2834 2835 /* Refcount functions */ 2836 EXPORT_SYMBOL(spa_open_ref); 2837 EXPORT_SYMBOL(spa_close); 2838 EXPORT_SYMBOL(spa_refcount_zero); 2839 2840 /* Pool configuration lock */ 2841 EXPORT_SYMBOL(spa_config_tryenter); 2842 EXPORT_SYMBOL(spa_config_enter); 2843 EXPORT_SYMBOL(spa_config_exit); 2844 EXPORT_SYMBOL(spa_config_held); 2845 2846 /* Pool vdev add/remove lock */ 2847 EXPORT_SYMBOL(spa_vdev_enter); 2848 EXPORT_SYMBOL(spa_vdev_exit); 2849 2850 /* Pool vdev state change lock */ 2851 EXPORT_SYMBOL(spa_vdev_state_enter); 2852 EXPORT_SYMBOL(spa_vdev_state_exit); 2853 2854 /* Accessor functions */ 2855 EXPORT_SYMBOL(spa_shutting_down); 2856 EXPORT_SYMBOL(spa_get_dsl); 2857 EXPORT_SYMBOL(spa_get_rootblkptr); 2858 EXPORT_SYMBOL(spa_set_rootblkptr); 2859 EXPORT_SYMBOL(spa_altroot); 2860 EXPORT_SYMBOL(spa_sync_pass); 2861 EXPORT_SYMBOL(spa_name); 2862 EXPORT_SYMBOL(spa_guid); 2863 EXPORT_SYMBOL(spa_last_synced_txg); 2864 EXPORT_SYMBOL(spa_first_txg); 2865 EXPORT_SYMBOL(spa_syncing_txg); 2866 EXPORT_SYMBOL(spa_version); 2867 EXPORT_SYMBOL(spa_state); 2868 EXPORT_SYMBOL(spa_load_state); 2869 EXPORT_SYMBOL(spa_freeze_txg); 2870 EXPORT_SYMBOL(spa_get_dspace); 2871 EXPORT_SYMBOL(spa_update_dspace); 2872 EXPORT_SYMBOL(spa_deflate); 2873 EXPORT_SYMBOL(spa_normal_class); 2874 EXPORT_SYMBOL(spa_log_class); 2875 EXPORT_SYMBOL(spa_special_class); 2876 EXPORT_SYMBOL(spa_preferred_class); 2877 EXPORT_SYMBOL(spa_max_replication); 2878 EXPORT_SYMBOL(spa_prev_software_version); 2879 EXPORT_SYMBOL(spa_get_failmode); 2880 EXPORT_SYMBOL(spa_suspended); 2881 EXPORT_SYMBOL(spa_bootfs); 2882 EXPORT_SYMBOL(spa_delegation); 2883 EXPORT_SYMBOL(spa_meta_objset); 2884 EXPORT_SYMBOL(spa_maxblocksize); 2885 EXPORT_SYMBOL(spa_maxdnodesize); 2886 2887 /* Miscellaneous support routines */ 2888 EXPORT_SYMBOL(spa_guid_exists); 2889 EXPORT_SYMBOL(spa_strdup); 2890 EXPORT_SYMBOL(spa_strfree); 2891 EXPORT_SYMBOL(spa_get_random); 2892 EXPORT_SYMBOL(spa_generate_guid); 2893 EXPORT_SYMBOL(snprintf_blkptr); 2894 EXPORT_SYMBOL(spa_freeze); 2895 EXPORT_SYMBOL(spa_upgrade); 2896 EXPORT_SYMBOL(spa_evict_all); 2897 EXPORT_SYMBOL(spa_lookup_by_guid); 2898 EXPORT_SYMBOL(spa_has_spare); 2899 EXPORT_SYMBOL(dva_get_dsize_sync); 2900 EXPORT_SYMBOL(bp_get_dsize_sync); 2901 EXPORT_SYMBOL(bp_get_dsize); 2902 EXPORT_SYMBOL(spa_has_slogs); 2903 EXPORT_SYMBOL(spa_is_root); 2904 EXPORT_SYMBOL(spa_writeable); 2905 EXPORT_SYMBOL(spa_mode); 2906 EXPORT_SYMBOL(spa_namespace_lock); 2907 EXPORT_SYMBOL(spa_trust_config); 2908 EXPORT_SYMBOL(spa_missing_tvds_allowed); 2909 EXPORT_SYMBOL(spa_set_missing_tvds); 2910 EXPORT_SYMBOL(spa_state_to_name); 2911 EXPORT_SYMBOL(spa_importing_readonly_checkpoint); 2912 EXPORT_SYMBOL(spa_min_claim_txg); 2913 EXPORT_SYMBOL(spa_suspend_async_destroy); 2914 EXPORT_SYMBOL(spa_has_checkpoint); 2915 EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable); 2916 2917 ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW, 2918 "Set additional debugging flags"); 2919 2920 ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW, 2921 "Set to attempt to recover from fatal errors"); 2922 2923 ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW, 2924 "Set to ignore IO errors during free and permanently leak the space"); 2925 2926 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, ULONG, ZMOD_RW, 2927 "Dead I/O check interval in milliseconds"); 2928 2929 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW, 2930 "Enable deadman timer"); 2931 2932 ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, INT, ZMOD_RW, 2933 "SPA size estimate multiplication factor"); 2934 2935 ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW, 2936 "Place DDT data into the special class"); 2937 2938 ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW, 2939 "Place user data indirect blocks into the special class"); 2940 2941 /* BEGIN CSTYLED */ 2942 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode, 2943 param_set_deadman_failmode, param_get_charp, ZMOD_RW, 2944 "Failmode for deadman timer"); 2945 2946 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms, 2947 param_set_deadman_synctime, param_get_ulong, ZMOD_RW, 2948 "Pool sync expiration time in milliseconds"); 2949 2950 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms, 2951 param_set_deadman_ziotime, param_get_ulong, ZMOD_RW, 2952 "IO expiration time in milliseconds"); 2953 2954 ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, INT, ZMOD_RW, 2955 "Small file blocks in special vdevs depends on this much " 2956 "free space available"); 2957 /* END CSTYLED */ 2958 2959 ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift, 2960 param_get_int, ZMOD_RW, "Reserved free space in pool"); 2961