1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 * Copyright 2013 Saso Kiselkov. All rights reserved. 27 * Copyright (c) 2017 Datto Inc. 28 * Copyright (c) 2017, Intel Corporation. 29 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved. 30 */ 31 32 #include <sys/zfs_context.h> 33 #include <sys/spa_impl.h> 34 #include <sys/zio.h> 35 #include <sys/zio_checksum.h> 36 #include <sys/zio_compress.h> 37 #include <sys/dmu.h> 38 #include <sys/dmu_tx.h> 39 #include <sys/zap.h> 40 #include <sys/zil.h> 41 #include <sys/vdev_impl.h> 42 #include <sys/vdev_initialize.h> 43 #include <sys/vdev_trim.h> 44 #include <sys/vdev_file.h> 45 #include <sys/vdev_raidz.h> 46 #include <sys/metaslab.h> 47 #include <sys/uberblock_impl.h> 48 #include <sys/txg.h> 49 #include <sys/avl.h> 50 #include <sys/unique.h> 51 #include <sys/dsl_pool.h> 52 #include <sys/dsl_dir.h> 53 #include <sys/dsl_prop.h> 54 #include <sys/fm/util.h> 55 #include <sys/dsl_scan.h> 56 #include <sys/fs/zfs.h> 57 #include <sys/metaslab_impl.h> 58 #include <sys/arc.h> 59 #include <sys/ddt.h> 60 #include <sys/kstat.h> 61 #include "zfs_prop.h" 62 #include <sys/btree.h> 63 #include <sys/zfeature.h> 64 #include <sys/qat.h> 65 #include <sys/zstd/zstd.h> 66 67 /* 68 * SPA locking 69 * 70 * There are three basic locks for managing spa_t structures: 71 * 72 * spa_namespace_lock (global mutex) 73 * 74 * This lock must be acquired to do any of the following: 75 * 76 * - Lookup a spa_t by name 77 * - Add or remove a spa_t from the namespace 78 * - Increase spa_refcount from non-zero 79 * - Check if spa_refcount is zero 80 * - Rename a spa_t 81 * - add/remove/attach/detach devices 82 * - Held for the duration of create/destroy/import/export 83 * 84 * It does not need to handle recursion. A create or destroy may 85 * reference objects (files or zvols) in other pools, but by 86 * definition they must have an existing reference, and will never need 87 * to lookup a spa_t by name. 88 * 89 * spa_refcount (per-spa zfs_refcount_t protected by mutex) 90 * 91 * This reference count keep track of any active users of the spa_t. The 92 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 93 * the refcount is never really 'zero' - opening a pool implicitly keeps 94 * some references in the DMU. Internally we check against spa_minref, but 95 * present the image of a zero/non-zero value to consumers. 96 * 97 * spa_config_lock[] (per-spa array of rwlocks) 98 * 99 * This protects the spa_t from config changes, and must be held in 100 * the following circumstances: 101 * 102 * - RW_READER to perform I/O to the spa 103 * - RW_WRITER to change the vdev config 104 * 105 * The locking order is fairly straightforward: 106 * 107 * spa_namespace_lock -> spa_refcount 108 * 109 * The namespace lock must be acquired to increase the refcount from 0 110 * or to check if it is zero. 111 * 112 * spa_refcount -> spa_config_lock[] 113 * 114 * There must be at least one valid reference on the spa_t to acquire 115 * the config lock. 116 * 117 * spa_namespace_lock -> spa_config_lock[] 118 * 119 * The namespace lock must always be taken before the config lock. 120 * 121 * 122 * The spa_namespace_lock can be acquired directly and is globally visible. 123 * 124 * The namespace is manipulated using the following functions, all of which 125 * require the spa_namespace_lock to be held. 126 * 127 * spa_lookup() Lookup a spa_t by name. 128 * 129 * spa_add() Create a new spa_t in the namespace. 130 * 131 * spa_remove() Remove a spa_t from the namespace. This also 132 * frees up any memory associated with the spa_t. 133 * 134 * spa_next() Returns the next spa_t in the system, or the 135 * first if NULL is passed. 136 * 137 * spa_evict_all() Shutdown and remove all spa_t structures in 138 * the system. 139 * 140 * spa_guid_exists() Determine whether a pool/device guid exists. 141 * 142 * The spa_refcount is manipulated using the following functions: 143 * 144 * spa_open_ref() Adds a reference to the given spa_t. Must be 145 * called with spa_namespace_lock held if the 146 * refcount is currently zero. 147 * 148 * spa_close() Remove a reference from the spa_t. This will 149 * not free the spa_t or remove it from the 150 * namespace. No locking is required. 151 * 152 * spa_refcount_zero() Returns true if the refcount is currently 153 * zero. Must be called with spa_namespace_lock 154 * held. 155 * 156 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 157 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 158 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 159 * 160 * To read the configuration, it suffices to hold one of these locks as reader. 161 * To modify the configuration, you must hold all locks as writer. To modify 162 * vdev state without altering the vdev tree's topology (e.g. online/offline), 163 * you must hold SCL_STATE and SCL_ZIO as writer. 164 * 165 * We use these distinct config locks to avoid recursive lock entry. 166 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 167 * block allocations (SCL_ALLOC), which may require reading space maps 168 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 169 * 170 * The spa config locks cannot be normal rwlocks because we need the 171 * ability to hand off ownership. For example, SCL_ZIO is acquired 172 * by the issuing thread and later released by an interrupt thread. 173 * They do, however, obey the usual write-wanted semantics to prevent 174 * writer (i.e. system administrator) starvation. 175 * 176 * The lock acquisition rules are as follows: 177 * 178 * SCL_CONFIG 179 * Protects changes to the vdev tree topology, such as vdev 180 * add/remove/attach/detach. Protects the dirty config list 181 * (spa_config_dirty_list) and the set of spares and l2arc devices. 182 * 183 * SCL_STATE 184 * Protects changes to pool state and vdev state, such as vdev 185 * online/offline/fault/degrade/clear. Protects the dirty state list 186 * (spa_state_dirty_list) and global pool state (spa_state). 187 * 188 * SCL_ALLOC 189 * Protects changes to metaslab groups and classes. 190 * Held as reader by metaslab_alloc() and metaslab_claim(). 191 * 192 * SCL_ZIO 193 * Held by bp-level zios (those which have no io_vd upon entry) 194 * to prevent changes to the vdev tree. The bp-level zio implicitly 195 * protects all of its vdev child zios, which do not hold SCL_ZIO. 196 * 197 * SCL_FREE 198 * Protects changes to metaslab groups and classes. 199 * Held as reader by metaslab_free(). SCL_FREE is distinct from 200 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 201 * blocks in zio_done() while another i/o that holds either 202 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 203 * 204 * SCL_VDEV 205 * Held as reader to prevent changes to the vdev tree during trivial 206 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 207 * other locks, and lower than all of them, to ensure that it's safe 208 * to acquire regardless of caller context. 209 * 210 * In addition, the following rules apply: 211 * 212 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 213 * The lock ordering is SCL_CONFIG > spa_props_lock. 214 * 215 * (b) I/O operations on leaf vdevs. For any zio operation that takes 216 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 217 * or zio_write_phys() -- the caller must ensure that the config cannot 218 * cannot change in the interim, and that the vdev cannot be reopened. 219 * SCL_STATE as reader suffices for both. 220 * 221 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 222 * 223 * spa_vdev_enter() Acquire the namespace lock and the config lock 224 * for writing. 225 * 226 * spa_vdev_exit() Release the config lock, wait for all I/O 227 * to complete, sync the updated configs to the 228 * cache, and release the namespace lock. 229 * 230 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 231 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 232 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 233 */ 234 235 static avl_tree_t spa_namespace_avl; 236 kmutex_t spa_namespace_lock; 237 static kcondvar_t spa_namespace_cv; 238 static const int spa_max_replication_override = SPA_DVAS_PER_BP; 239 240 static kmutex_t spa_spare_lock; 241 static avl_tree_t spa_spare_avl; 242 static kmutex_t spa_l2cache_lock; 243 static avl_tree_t spa_l2cache_avl; 244 245 spa_mode_t spa_mode_global = SPA_MODE_UNINIT; 246 247 #ifdef ZFS_DEBUG 248 /* 249 * Everything except dprintf, set_error, spa, and indirect_remap is on 250 * by default in debug builds. 251 */ 252 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR | 253 ZFS_DEBUG_INDIRECT_REMAP); 254 #else 255 int zfs_flags = 0; 256 #endif 257 258 /* 259 * zfs_recover can be set to nonzero to attempt to recover from 260 * otherwise-fatal errors, typically caused by on-disk corruption. When 261 * set, calls to zfs_panic_recover() will turn into warning messages. 262 * This should only be used as a last resort, as it typically results 263 * in leaked space, or worse. 264 */ 265 int zfs_recover = B_FALSE; 266 267 /* 268 * If destroy encounters an EIO while reading metadata (e.g. indirect 269 * blocks), space referenced by the missing metadata can not be freed. 270 * Normally this causes the background destroy to become "stalled", as 271 * it is unable to make forward progress. While in this stalled state, 272 * all remaining space to free from the error-encountering filesystem is 273 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 274 * permanently leak the space from indirect blocks that can not be read, 275 * and continue to free everything else that it can. 276 * 277 * The default, "stalling" behavior is useful if the storage partially 278 * fails (i.e. some but not all i/os fail), and then later recovers. In 279 * this case, we will be able to continue pool operations while it is 280 * partially failed, and when it recovers, we can continue to free the 281 * space, with no leaks. However, note that this case is actually 282 * fairly rare. 283 * 284 * Typically pools either (a) fail completely (but perhaps temporarily, 285 * e.g. a top-level vdev going offline), or (b) have localized, 286 * permanent errors (e.g. disk returns the wrong data due to bit flip or 287 * firmware bug). In case (a), this setting does not matter because the 288 * pool will be suspended and the sync thread will not be able to make 289 * forward progress regardless. In case (b), because the error is 290 * permanent, the best we can do is leak the minimum amount of space, 291 * which is what setting this flag will do. Therefore, it is reasonable 292 * for this flag to normally be set, but we chose the more conservative 293 * approach of not setting it, so that there is no possibility of 294 * leaking space in the "partial temporary" failure case. 295 */ 296 int zfs_free_leak_on_eio = B_FALSE; 297 298 /* 299 * Expiration time in milliseconds. This value has two meanings. First it is 300 * used to determine when the spa_deadman() logic should fire. By default the 301 * spa_deadman() will fire if spa_sync() has not completed in 600 seconds. 302 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 303 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 304 * in one of three behaviors controlled by zfs_deadman_failmode. 305 */ 306 unsigned long zfs_deadman_synctime_ms = 600000UL; /* 10 min. */ 307 308 /* 309 * This value controls the maximum amount of time zio_wait() will block for an 310 * outstanding IO. By default this is 300 seconds at which point the "hung" 311 * behavior will be applied as described for zfs_deadman_synctime_ms. 312 */ 313 unsigned long zfs_deadman_ziotime_ms = 300000UL; /* 5 min. */ 314 315 /* 316 * Check time in milliseconds. This defines the frequency at which we check 317 * for hung I/O. 318 */ 319 unsigned long zfs_deadman_checktime_ms = 60000UL; /* 1 min. */ 320 321 /* 322 * By default the deadman is enabled. 323 */ 324 int zfs_deadman_enabled = B_TRUE; 325 326 /* 327 * Controls the behavior of the deadman when it detects a "hung" I/O. 328 * Valid values are zfs_deadman_failmode=<wait|continue|panic>. 329 * 330 * wait - Wait for the "hung" I/O (default) 331 * continue - Attempt to recover from a "hung" I/O 332 * panic - Panic the system 333 */ 334 const char *zfs_deadman_failmode = "wait"; 335 336 /* 337 * The worst case is single-sector max-parity RAID-Z blocks, in which 338 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 339 * times the size; so just assume that. Add to this the fact that 340 * we can have up to 3 DVAs per bp, and one more factor of 2 because 341 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 342 * the worst case is: 343 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 344 */ 345 int spa_asize_inflation = 24; 346 347 /* 348 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 349 * the pool to be consumed (bounded by spa_max_slop). This ensures that we 350 * don't run the pool completely out of space, due to unaccounted changes (e.g. 351 * to the MOS). It also limits the worst-case time to allocate space. If we 352 * have less than this amount of free space, most ZPL operations (e.g. write, 353 * create) will return ENOSPC. The ZIL metaslabs (spa_embedded_log_class) are 354 * also part of this 3.2% of space which can't be consumed by normal writes; 355 * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded 356 * log space. 357 * 358 * Certain operations (e.g. file removal, most administrative actions) can 359 * use half the slop space. They will only return ENOSPC if less than half 360 * the slop space is free. Typically, once the pool has less than the slop 361 * space free, the user will use these operations to free up space in the pool. 362 * These are the operations that call dsl_pool_adjustedsize() with the netfree 363 * argument set to TRUE. 364 * 365 * Operations that are almost guaranteed to free up space in the absence of 366 * a pool checkpoint can use up to three quarters of the slop space 367 * (e.g zfs destroy). 368 * 369 * A very restricted set of operations are always permitted, regardless of 370 * the amount of free space. These are the operations that call 371 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net 372 * increase in the amount of space used, it is possible to run the pool 373 * completely out of space, causing it to be permanently read-only. 374 * 375 * Note that on very small pools, the slop space will be larger than 376 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 377 * but we never allow it to be more than half the pool size. 378 * 379 * Further, on very large pools, the slop space will be smaller than 380 * 3.2%, to avoid reserving much more space than we actually need; bounded 381 * by spa_max_slop (128GB). 382 * 383 * See also the comments in zfs_space_check_t. 384 */ 385 int spa_slop_shift = 5; 386 static const uint64_t spa_min_slop = 128ULL * 1024 * 1024; 387 static const uint64_t spa_max_slop = 128ULL * 1024 * 1024 * 1024; 388 static const int spa_allocators = 4; 389 390 391 void 392 spa_load_failed(spa_t *spa, const char *fmt, ...) 393 { 394 va_list adx; 395 char buf[256]; 396 397 va_start(adx, fmt); 398 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 399 va_end(adx); 400 401 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name, 402 spa->spa_trust_config ? "trusted" : "untrusted", buf); 403 } 404 405 void 406 spa_load_note(spa_t *spa, const char *fmt, ...) 407 { 408 va_list adx; 409 char buf[256]; 410 411 va_start(adx, fmt); 412 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 413 va_end(adx); 414 415 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name, 416 spa->spa_trust_config ? "trusted" : "untrusted", buf); 417 } 418 419 /* 420 * By default dedup and user data indirects land in the special class 421 */ 422 static int zfs_ddt_data_is_special = B_TRUE; 423 static int zfs_user_indirect_is_special = B_TRUE; 424 425 /* 426 * The percentage of special class final space reserved for metadata only. 427 * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only 428 * let metadata into the class. 429 */ 430 static int zfs_special_class_metadata_reserve_pct = 25; 431 432 /* 433 * ========================================================================== 434 * SPA config locking 435 * ========================================================================== 436 */ 437 static void 438 spa_config_lock_init(spa_t *spa) 439 { 440 for (int i = 0; i < SCL_LOCKS; i++) { 441 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 442 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 443 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 444 scl->scl_writer = NULL; 445 scl->scl_write_wanted = 0; 446 scl->scl_count = 0; 447 } 448 } 449 450 static void 451 spa_config_lock_destroy(spa_t *spa) 452 { 453 for (int i = 0; i < SCL_LOCKS; i++) { 454 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 455 mutex_destroy(&scl->scl_lock); 456 cv_destroy(&scl->scl_cv); 457 ASSERT(scl->scl_writer == NULL); 458 ASSERT(scl->scl_write_wanted == 0); 459 ASSERT(scl->scl_count == 0); 460 } 461 } 462 463 int 464 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 465 { 466 for (int i = 0; i < SCL_LOCKS; i++) { 467 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 468 if (!(locks & (1 << i))) 469 continue; 470 mutex_enter(&scl->scl_lock); 471 if (rw == RW_READER) { 472 if (scl->scl_writer || scl->scl_write_wanted) { 473 mutex_exit(&scl->scl_lock); 474 spa_config_exit(spa, locks & ((1 << i) - 1), 475 tag); 476 return (0); 477 } 478 } else { 479 ASSERT(scl->scl_writer != curthread); 480 if (scl->scl_count != 0) { 481 mutex_exit(&scl->scl_lock); 482 spa_config_exit(spa, locks & ((1 << i) - 1), 483 tag); 484 return (0); 485 } 486 scl->scl_writer = curthread; 487 } 488 scl->scl_count++; 489 mutex_exit(&scl->scl_lock); 490 } 491 return (1); 492 } 493 494 void 495 spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw) 496 { 497 (void) tag; 498 int wlocks_held = 0; 499 500 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 501 502 for (int i = 0; i < SCL_LOCKS; i++) { 503 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 504 if (scl->scl_writer == curthread) 505 wlocks_held |= (1 << i); 506 if (!(locks & (1 << i))) 507 continue; 508 mutex_enter(&scl->scl_lock); 509 if (rw == RW_READER) { 510 while (scl->scl_writer || scl->scl_write_wanted) { 511 cv_wait(&scl->scl_cv, &scl->scl_lock); 512 } 513 } else { 514 ASSERT(scl->scl_writer != curthread); 515 while (scl->scl_count != 0) { 516 scl->scl_write_wanted++; 517 cv_wait(&scl->scl_cv, &scl->scl_lock); 518 scl->scl_write_wanted--; 519 } 520 scl->scl_writer = curthread; 521 } 522 scl->scl_count++; 523 mutex_exit(&scl->scl_lock); 524 } 525 ASSERT3U(wlocks_held, <=, locks); 526 } 527 528 void 529 spa_config_exit(spa_t *spa, int locks, const void *tag) 530 { 531 (void) tag; 532 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 533 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 534 if (!(locks & (1 << i))) 535 continue; 536 mutex_enter(&scl->scl_lock); 537 ASSERT(scl->scl_count > 0); 538 if (--scl->scl_count == 0) { 539 ASSERT(scl->scl_writer == NULL || 540 scl->scl_writer == curthread); 541 scl->scl_writer = NULL; /* OK in either case */ 542 cv_broadcast(&scl->scl_cv); 543 } 544 mutex_exit(&scl->scl_lock); 545 } 546 } 547 548 int 549 spa_config_held(spa_t *spa, int locks, krw_t rw) 550 { 551 int locks_held = 0; 552 553 for (int i = 0; i < SCL_LOCKS; i++) { 554 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 555 if (!(locks & (1 << i))) 556 continue; 557 if ((rw == RW_READER && scl->scl_count != 0) || 558 (rw == RW_WRITER && scl->scl_writer == curthread)) 559 locks_held |= 1 << i; 560 } 561 562 return (locks_held); 563 } 564 565 /* 566 * ========================================================================== 567 * SPA namespace functions 568 * ========================================================================== 569 */ 570 571 /* 572 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 573 * Returns NULL if no matching spa_t is found. 574 */ 575 spa_t * 576 spa_lookup(const char *name) 577 { 578 static spa_t search; /* spa_t is large; don't allocate on stack */ 579 spa_t *spa; 580 avl_index_t where; 581 char *cp; 582 583 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 584 585 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 586 587 /* 588 * If it's a full dataset name, figure out the pool name and 589 * just use that. 590 */ 591 cp = strpbrk(search.spa_name, "/@#"); 592 if (cp != NULL) 593 *cp = '\0'; 594 595 spa = avl_find(&spa_namespace_avl, &search, &where); 596 597 return (spa); 598 } 599 600 /* 601 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 602 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 603 * looking for potentially hung I/Os. 604 */ 605 void 606 spa_deadman(void *arg) 607 { 608 spa_t *spa = arg; 609 610 /* Disable the deadman if the pool is suspended. */ 611 if (spa_suspended(spa)) 612 return; 613 614 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 615 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 616 (u_longlong_t)++spa->spa_deadman_calls); 617 if (zfs_deadman_enabled) 618 vdev_deadman(spa->spa_root_vdev, FTAG); 619 620 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, 621 spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + 622 MSEC_TO_TICK(zfs_deadman_checktime_ms)); 623 } 624 625 static int 626 spa_log_sm_sort_by_txg(const void *va, const void *vb) 627 { 628 const spa_log_sm_t *a = va; 629 const spa_log_sm_t *b = vb; 630 631 return (TREE_CMP(a->sls_txg, b->sls_txg)); 632 } 633 634 /* 635 * Create an uninitialized spa_t with the given name. Requires 636 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 637 * exist by calling spa_lookup() first. 638 */ 639 spa_t * 640 spa_add(const char *name, nvlist_t *config, const char *altroot) 641 { 642 spa_t *spa; 643 spa_config_dirent_t *dp; 644 645 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 646 647 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 648 649 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 650 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 651 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 652 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 653 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 654 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 655 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 656 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 657 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 658 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 659 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 660 mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL); 661 mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL); 662 mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL); 663 664 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 665 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 666 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 667 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 668 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 669 cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL); 670 cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL); 671 672 for (int t = 0; t < TXG_SIZE; t++) 673 bplist_create(&spa->spa_free_bplist[t]); 674 675 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 676 spa->spa_state = POOL_STATE_UNINITIALIZED; 677 spa->spa_freeze_txg = UINT64_MAX; 678 spa->spa_final_txg = UINT64_MAX; 679 spa->spa_load_max_txg = UINT64_MAX; 680 spa->spa_proc = &p0; 681 spa->spa_proc_state = SPA_PROC_NONE; 682 spa->spa_trust_config = B_TRUE; 683 spa->spa_hostid = zone_get_hostid(NULL); 684 685 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 686 spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms); 687 spa_set_deadman_failmode(spa, zfs_deadman_failmode); 688 689 zfs_refcount_create(&spa->spa_refcount); 690 spa_config_lock_init(spa); 691 spa_stats_init(spa); 692 693 avl_add(&spa_namespace_avl, spa); 694 695 /* 696 * Set the alternate root, if there is one. 697 */ 698 if (altroot) 699 spa->spa_root = spa_strdup(altroot); 700 701 spa->spa_alloc_count = spa_allocators; 702 spa->spa_allocs = kmem_zalloc(spa->spa_alloc_count * 703 sizeof (spa_alloc_t), KM_SLEEP); 704 for (int i = 0; i < spa->spa_alloc_count; i++) { 705 mutex_init(&spa->spa_allocs[i].spaa_lock, NULL, MUTEX_DEFAULT, 706 NULL); 707 avl_create(&spa->spa_allocs[i].spaa_tree, zio_bookmark_compare, 708 sizeof (zio_t), offsetof(zio_t, io_alloc_node)); 709 } 710 avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed, 711 sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node)); 712 avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg, 713 sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node)); 714 list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t), 715 offsetof(log_summary_entry_t, lse_node)); 716 717 /* 718 * Every pool starts with the default cachefile 719 */ 720 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 721 offsetof(spa_config_dirent_t, scd_link)); 722 723 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 724 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 725 list_insert_head(&spa->spa_config_list, dp); 726 727 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 728 KM_SLEEP) == 0); 729 730 if (config != NULL) { 731 nvlist_t *features; 732 733 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 734 &features) == 0) { 735 VERIFY(nvlist_dup(features, &spa->spa_label_features, 736 0) == 0); 737 } 738 739 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 740 } 741 742 if (spa->spa_label_features == NULL) { 743 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 744 KM_SLEEP) == 0); 745 } 746 747 spa->spa_min_ashift = INT_MAX; 748 spa->spa_max_ashift = 0; 749 spa->spa_min_alloc = INT_MAX; 750 751 /* Reset cached value */ 752 spa->spa_dedup_dspace = ~0ULL; 753 754 /* 755 * As a pool is being created, treat all features as disabled by 756 * setting SPA_FEATURE_DISABLED for all entries in the feature 757 * refcount cache. 758 */ 759 for (int i = 0; i < SPA_FEATURES; i++) { 760 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 761 } 762 763 list_create(&spa->spa_leaf_list, sizeof (vdev_t), 764 offsetof(vdev_t, vdev_leaf_node)); 765 766 return (spa); 767 } 768 769 /* 770 * Removes a spa_t from the namespace, freeing up any memory used. Requires 771 * spa_namespace_lock. This is called only after the spa_t has been closed and 772 * deactivated. 773 */ 774 void 775 spa_remove(spa_t *spa) 776 { 777 spa_config_dirent_t *dp; 778 779 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 780 ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED); 781 ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0); 782 ASSERT0(spa->spa_waiters); 783 784 nvlist_free(spa->spa_config_splitting); 785 786 avl_remove(&spa_namespace_avl, spa); 787 cv_broadcast(&spa_namespace_cv); 788 789 if (spa->spa_root) 790 spa_strfree(spa->spa_root); 791 792 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 793 list_remove(&spa->spa_config_list, dp); 794 if (dp->scd_path != NULL) 795 spa_strfree(dp->scd_path); 796 kmem_free(dp, sizeof (spa_config_dirent_t)); 797 } 798 799 for (int i = 0; i < spa->spa_alloc_count; i++) { 800 avl_destroy(&spa->spa_allocs[i].spaa_tree); 801 mutex_destroy(&spa->spa_allocs[i].spaa_lock); 802 } 803 kmem_free(spa->spa_allocs, spa->spa_alloc_count * 804 sizeof (spa_alloc_t)); 805 806 avl_destroy(&spa->spa_metaslabs_by_flushed); 807 avl_destroy(&spa->spa_sm_logs_by_txg); 808 list_destroy(&spa->spa_log_summary); 809 list_destroy(&spa->spa_config_list); 810 list_destroy(&spa->spa_leaf_list); 811 812 nvlist_free(spa->spa_label_features); 813 nvlist_free(spa->spa_load_info); 814 nvlist_free(spa->spa_feat_stats); 815 spa_config_set(spa, NULL); 816 817 zfs_refcount_destroy(&spa->spa_refcount); 818 819 spa_stats_destroy(spa); 820 spa_config_lock_destroy(spa); 821 822 for (int t = 0; t < TXG_SIZE; t++) 823 bplist_destroy(&spa->spa_free_bplist[t]); 824 825 zio_checksum_templates_free(spa); 826 827 cv_destroy(&spa->spa_async_cv); 828 cv_destroy(&spa->spa_evicting_os_cv); 829 cv_destroy(&spa->spa_proc_cv); 830 cv_destroy(&spa->spa_scrub_io_cv); 831 cv_destroy(&spa->spa_suspend_cv); 832 cv_destroy(&spa->spa_activities_cv); 833 cv_destroy(&spa->spa_waiters_cv); 834 835 mutex_destroy(&spa->spa_flushed_ms_lock); 836 mutex_destroy(&spa->spa_async_lock); 837 mutex_destroy(&spa->spa_errlist_lock); 838 mutex_destroy(&spa->spa_errlog_lock); 839 mutex_destroy(&spa->spa_evicting_os_lock); 840 mutex_destroy(&spa->spa_history_lock); 841 mutex_destroy(&spa->spa_proc_lock); 842 mutex_destroy(&spa->spa_props_lock); 843 mutex_destroy(&spa->spa_cksum_tmpls_lock); 844 mutex_destroy(&spa->spa_scrub_lock); 845 mutex_destroy(&spa->spa_suspend_lock); 846 mutex_destroy(&spa->spa_vdev_top_lock); 847 mutex_destroy(&spa->spa_feat_stats_lock); 848 mutex_destroy(&spa->spa_activities_lock); 849 850 kmem_free(spa, sizeof (spa_t)); 851 } 852 853 /* 854 * Given a pool, return the next pool in the namespace, or NULL if there is 855 * none. If 'prev' is NULL, return the first pool. 856 */ 857 spa_t * 858 spa_next(spa_t *prev) 859 { 860 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 861 862 if (prev) 863 return (AVL_NEXT(&spa_namespace_avl, prev)); 864 else 865 return (avl_first(&spa_namespace_avl)); 866 } 867 868 /* 869 * ========================================================================== 870 * SPA refcount functions 871 * ========================================================================== 872 */ 873 874 /* 875 * Add a reference to the given spa_t. Must have at least one reference, or 876 * have the namespace lock held. 877 */ 878 void 879 spa_open_ref(spa_t *spa, void *tag) 880 { 881 ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref || 882 MUTEX_HELD(&spa_namespace_lock)); 883 (void) zfs_refcount_add(&spa->spa_refcount, tag); 884 } 885 886 /* 887 * Remove a reference to the given spa_t. Must have at least one reference, or 888 * have the namespace lock held. 889 */ 890 void 891 spa_close(spa_t *spa, void *tag) 892 { 893 ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref || 894 MUTEX_HELD(&spa_namespace_lock)); 895 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 896 } 897 898 /* 899 * Remove a reference to the given spa_t held by a dsl dir that is 900 * being asynchronously released. Async releases occur from a taskq 901 * performing eviction of dsl datasets and dirs. The namespace lock 902 * isn't held and the hold by the object being evicted may contribute to 903 * spa_minref (e.g. dataset or directory released during pool export), 904 * so the asserts in spa_close() do not apply. 905 */ 906 void 907 spa_async_close(spa_t *spa, void *tag) 908 { 909 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 910 } 911 912 /* 913 * Check to see if the spa refcount is zero. Must be called with 914 * spa_namespace_lock held. We really compare against spa_minref, which is the 915 * number of references acquired when opening a pool 916 */ 917 boolean_t 918 spa_refcount_zero(spa_t *spa) 919 { 920 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 921 922 return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref); 923 } 924 925 /* 926 * ========================================================================== 927 * SPA spare and l2cache tracking 928 * ========================================================================== 929 */ 930 931 /* 932 * Hot spares and cache devices are tracked using the same code below, 933 * for 'auxiliary' devices. 934 */ 935 936 typedef struct spa_aux { 937 uint64_t aux_guid; 938 uint64_t aux_pool; 939 avl_node_t aux_avl; 940 int aux_count; 941 } spa_aux_t; 942 943 static inline int 944 spa_aux_compare(const void *a, const void *b) 945 { 946 const spa_aux_t *sa = (const spa_aux_t *)a; 947 const spa_aux_t *sb = (const spa_aux_t *)b; 948 949 return (TREE_CMP(sa->aux_guid, sb->aux_guid)); 950 } 951 952 static void 953 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 954 { 955 avl_index_t where; 956 spa_aux_t search; 957 spa_aux_t *aux; 958 959 search.aux_guid = vd->vdev_guid; 960 if ((aux = avl_find(avl, &search, &where)) != NULL) { 961 aux->aux_count++; 962 } else { 963 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 964 aux->aux_guid = vd->vdev_guid; 965 aux->aux_count = 1; 966 avl_insert(avl, aux, where); 967 } 968 } 969 970 static void 971 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 972 { 973 spa_aux_t search; 974 spa_aux_t *aux; 975 avl_index_t where; 976 977 search.aux_guid = vd->vdev_guid; 978 aux = avl_find(avl, &search, &where); 979 980 ASSERT(aux != NULL); 981 982 if (--aux->aux_count == 0) { 983 avl_remove(avl, aux); 984 kmem_free(aux, sizeof (spa_aux_t)); 985 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 986 aux->aux_pool = 0ULL; 987 } 988 } 989 990 static boolean_t 991 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 992 { 993 spa_aux_t search, *found; 994 995 search.aux_guid = guid; 996 found = avl_find(avl, &search, NULL); 997 998 if (pool) { 999 if (found) 1000 *pool = found->aux_pool; 1001 else 1002 *pool = 0ULL; 1003 } 1004 1005 if (refcnt) { 1006 if (found) 1007 *refcnt = found->aux_count; 1008 else 1009 *refcnt = 0; 1010 } 1011 1012 return (found != NULL); 1013 } 1014 1015 static void 1016 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1017 { 1018 spa_aux_t search, *found; 1019 avl_index_t where; 1020 1021 search.aux_guid = vd->vdev_guid; 1022 found = avl_find(avl, &search, &where); 1023 ASSERT(found != NULL); 1024 ASSERT(found->aux_pool == 0ULL); 1025 1026 found->aux_pool = spa_guid(vd->vdev_spa); 1027 } 1028 1029 /* 1030 * Spares are tracked globally due to the following constraints: 1031 * 1032 * - A spare may be part of multiple pools. 1033 * - A spare may be added to a pool even if it's actively in use within 1034 * another pool. 1035 * - A spare in use in any pool can only be the source of a replacement if 1036 * the target is a spare in the same pool. 1037 * 1038 * We keep track of all spares on the system through the use of a reference 1039 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1040 * spare, then we bump the reference count in the AVL tree. In addition, we set 1041 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1042 * inactive). When a spare is made active (used to replace a device in the 1043 * pool), we also keep track of which pool its been made a part of. 1044 * 1045 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1046 * called under the spa_namespace lock as part of vdev reconfiguration. The 1047 * separate spare lock exists for the status query path, which does not need to 1048 * be completely consistent with respect to other vdev configuration changes. 1049 */ 1050 1051 static int 1052 spa_spare_compare(const void *a, const void *b) 1053 { 1054 return (spa_aux_compare(a, b)); 1055 } 1056 1057 void 1058 spa_spare_add(vdev_t *vd) 1059 { 1060 mutex_enter(&spa_spare_lock); 1061 ASSERT(!vd->vdev_isspare); 1062 spa_aux_add(vd, &spa_spare_avl); 1063 vd->vdev_isspare = B_TRUE; 1064 mutex_exit(&spa_spare_lock); 1065 } 1066 1067 void 1068 spa_spare_remove(vdev_t *vd) 1069 { 1070 mutex_enter(&spa_spare_lock); 1071 ASSERT(vd->vdev_isspare); 1072 spa_aux_remove(vd, &spa_spare_avl); 1073 vd->vdev_isspare = B_FALSE; 1074 mutex_exit(&spa_spare_lock); 1075 } 1076 1077 boolean_t 1078 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1079 { 1080 boolean_t found; 1081 1082 mutex_enter(&spa_spare_lock); 1083 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1084 mutex_exit(&spa_spare_lock); 1085 1086 return (found); 1087 } 1088 1089 void 1090 spa_spare_activate(vdev_t *vd) 1091 { 1092 mutex_enter(&spa_spare_lock); 1093 ASSERT(vd->vdev_isspare); 1094 spa_aux_activate(vd, &spa_spare_avl); 1095 mutex_exit(&spa_spare_lock); 1096 } 1097 1098 /* 1099 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1100 * Cache devices currently only support one pool per cache device, and so 1101 * for these devices the aux reference count is currently unused beyond 1. 1102 */ 1103 1104 static int 1105 spa_l2cache_compare(const void *a, const void *b) 1106 { 1107 return (spa_aux_compare(a, b)); 1108 } 1109 1110 void 1111 spa_l2cache_add(vdev_t *vd) 1112 { 1113 mutex_enter(&spa_l2cache_lock); 1114 ASSERT(!vd->vdev_isl2cache); 1115 spa_aux_add(vd, &spa_l2cache_avl); 1116 vd->vdev_isl2cache = B_TRUE; 1117 mutex_exit(&spa_l2cache_lock); 1118 } 1119 1120 void 1121 spa_l2cache_remove(vdev_t *vd) 1122 { 1123 mutex_enter(&spa_l2cache_lock); 1124 ASSERT(vd->vdev_isl2cache); 1125 spa_aux_remove(vd, &spa_l2cache_avl); 1126 vd->vdev_isl2cache = B_FALSE; 1127 mutex_exit(&spa_l2cache_lock); 1128 } 1129 1130 boolean_t 1131 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1132 { 1133 boolean_t found; 1134 1135 mutex_enter(&spa_l2cache_lock); 1136 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1137 mutex_exit(&spa_l2cache_lock); 1138 1139 return (found); 1140 } 1141 1142 void 1143 spa_l2cache_activate(vdev_t *vd) 1144 { 1145 mutex_enter(&spa_l2cache_lock); 1146 ASSERT(vd->vdev_isl2cache); 1147 spa_aux_activate(vd, &spa_l2cache_avl); 1148 mutex_exit(&spa_l2cache_lock); 1149 } 1150 1151 /* 1152 * ========================================================================== 1153 * SPA vdev locking 1154 * ========================================================================== 1155 */ 1156 1157 /* 1158 * Lock the given spa_t for the purpose of adding or removing a vdev. 1159 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1160 * It returns the next transaction group for the spa_t. 1161 */ 1162 uint64_t 1163 spa_vdev_enter(spa_t *spa) 1164 { 1165 mutex_enter(&spa->spa_vdev_top_lock); 1166 mutex_enter(&spa_namespace_lock); 1167 1168 vdev_autotrim_stop_all(spa); 1169 1170 return (spa_vdev_config_enter(spa)); 1171 } 1172 1173 /* 1174 * The same as spa_vdev_enter() above but additionally takes the guid of 1175 * the vdev being detached. When there is a rebuild in process it will be 1176 * suspended while the vdev tree is modified then resumed by spa_vdev_exit(). 1177 * The rebuild is canceled if only a single child remains after the detach. 1178 */ 1179 uint64_t 1180 spa_vdev_detach_enter(spa_t *spa, uint64_t guid) 1181 { 1182 mutex_enter(&spa->spa_vdev_top_lock); 1183 mutex_enter(&spa_namespace_lock); 1184 1185 vdev_autotrim_stop_all(spa); 1186 1187 if (guid != 0) { 1188 vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE); 1189 if (vd) { 1190 vdev_rebuild_stop_wait(vd->vdev_top); 1191 } 1192 } 1193 1194 return (spa_vdev_config_enter(spa)); 1195 } 1196 1197 /* 1198 * Internal implementation for spa_vdev_enter(). Used when a vdev 1199 * operation requires multiple syncs (i.e. removing a device) while 1200 * keeping the spa_namespace_lock held. 1201 */ 1202 uint64_t 1203 spa_vdev_config_enter(spa_t *spa) 1204 { 1205 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1206 1207 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1208 1209 return (spa_last_synced_txg(spa) + 1); 1210 } 1211 1212 /* 1213 * Used in combination with spa_vdev_config_enter() to allow the syncing 1214 * of multiple transactions without releasing the spa_namespace_lock. 1215 */ 1216 void 1217 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1218 { 1219 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1220 1221 int config_changed = B_FALSE; 1222 1223 ASSERT(txg > spa_last_synced_txg(spa)); 1224 1225 spa->spa_pending_vdev = NULL; 1226 1227 /* 1228 * Reassess the DTLs. 1229 */ 1230 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE); 1231 1232 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1233 config_changed = B_TRUE; 1234 spa->spa_config_generation++; 1235 } 1236 1237 /* 1238 * Verify the metaslab classes. 1239 */ 1240 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1241 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1242 ASSERT(metaslab_class_validate(spa_embedded_log_class(spa)) == 0); 1243 ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0); 1244 ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0); 1245 1246 spa_config_exit(spa, SCL_ALL, spa); 1247 1248 /* 1249 * Panic the system if the specified tag requires it. This 1250 * is useful for ensuring that configurations are updated 1251 * transactionally. 1252 */ 1253 if (zio_injection_enabled) 1254 zio_handle_panic_injection(spa, tag, 0); 1255 1256 /* 1257 * Note: this txg_wait_synced() is important because it ensures 1258 * that there won't be more than one config change per txg. 1259 * This allows us to use the txg as the generation number. 1260 */ 1261 if (error == 0) 1262 txg_wait_synced(spa->spa_dsl_pool, txg); 1263 1264 if (vd != NULL) { 1265 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1266 if (vd->vdev_ops->vdev_op_leaf) { 1267 mutex_enter(&vd->vdev_initialize_lock); 1268 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, 1269 NULL); 1270 mutex_exit(&vd->vdev_initialize_lock); 1271 1272 mutex_enter(&vd->vdev_trim_lock); 1273 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL); 1274 mutex_exit(&vd->vdev_trim_lock); 1275 } 1276 1277 /* 1278 * The vdev may be both a leaf and top-level device. 1279 */ 1280 vdev_autotrim_stop_wait(vd); 1281 1282 spa_config_enter(spa, SCL_STATE_ALL, spa, RW_WRITER); 1283 vdev_free(vd); 1284 spa_config_exit(spa, SCL_STATE_ALL, spa); 1285 } 1286 1287 /* 1288 * If the config changed, update the config cache. 1289 */ 1290 if (config_changed) 1291 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1292 } 1293 1294 /* 1295 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1296 * locking of spa_vdev_enter(), we also want make sure the transactions have 1297 * synced to disk, and then update the global configuration cache with the new 1298 * information. 1299 */ 1300 int 1301 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1302 { 1303 vdev_autotrim_restart(spa); 1304 vdev_rebuild_restart(spa); 1305 1306 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1307 mutex_exit(&spa_namespace_lock); 1308 mutex_exit(&spa->spa_vdev_top_lock); 1309 1310 return (error); 1311 } 1312 1313 /* 1314 * Lock the given spa_t for the purpose of changing vdev state. 1315 */ 1316 void 1317 spa_vdev_state_enter(spa_t *spa, int oplocks) 1318 { 1319 int locks = SCL_STATE_ALL | oplocks; 1320 1321 /* 1322 * Root pools may need to read of the underlying devfs filesystem 1323 * when opening up a vdev. Unfortunately if we're holding the 1324 * SCL_ZIO lock it will result in a deadlock when we try to issue 1325 * the read from the root filesystem. Instead we "prefetch" 1326 * the associated vnodes that we need prior to opening the 1327 * underlying devices and cache them so that we can prevent 1328 * any I/O when we are doing the actual open. 1329 */ 1330 if (spa_is_root(spa)) { 1331 int low = locks & ~(SCL_ZIO - 1); 1332 int high = locks & ~low; 1333 1334 spa_config_enter(spa, high, spa, RW_WRITER); 1335 vdev_hold(spa->spa_root_vdev); 1336 spa_config_enter(spa, low, spa, RW_WRITER); 1337 } else { 1338 spa_config_enter(spa, locks, spa, RW_WRITER); 1339 } 1340 spa->spa_vdev_locks = locks; 1341 } 1342 1343 int 1344 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1345 { 1346 boolean_t config_changed = B_FALSE; 1347 vdev_t *vdev_top; 1348 1349 if (vd == NULL || vd == spa->spa_root_vdev) { 1350 vdev_top = spa->spa_root_vdev; 1351 } else { 1352 vdev_top = vd->vdev_top; 1353 } 1354 1355 if (vd != NULL || error == 0) 1356 vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE, B_FALSE); 1357 1358 if (vd != NULL) { 1359 if (vd != spa->spa_root_vdev) 1360 vdev_state_dirty(vdev_top); 1361 1362 config_changed = B_TRUE; 1363 spa->spa_config_generation++; 1364 } 1365 1366 if (spa_is_root(spa)) 1367 vdev_rele(spa->spa_root_vdev); 1368 1369 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1370 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1371 1372 /* 1373 * If anything changed, wait for it to sync. This ensures that, 1374 * from the system administrator's perspective, zpool(8) commands 1375 * are synchronous. This is important for things like zpool offline: 1376 * when the command completes, you expect no further I/O from ZFS. 1377 */ 1378 if (vd != NULL) 1379 txg_wait_synced(spa->spa_dsl_pool, 0); 1380 1381 /* 1382 * If the config changed, update the config cache. 1383 */ 1384 if (config_changed) { 1385 mutex_enter(&spa_namespace_lock); 1386 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1387 mutex_exit(&spa_namespace_lock); 1388 } 1389 1390 return (error); 1391 } 1392 1393 /* 1394 * ========================================================================== 1395 * Miscellaneous functions 1396 * ========================================================================== 1397 */ 1398 1399 void 1400 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1401 { 1402 if (!nvlist_exists(spa->spa_label_features, feature)) { 1403 fnvlist_add_boolean(spa->spa_label_features, feature); 1404 /* 1405 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1406 * dirty the vdev config because lock SCL_CONFIG is not held. 1407 * Thankfully, in this case we don't need to dirty the config 1408 * because it will be written out anyway when we finish 1409 * creating the pool. 1410 */ 1411 if (tx->tx_txg != TXG_INITIAL) 1412 vdev_config_dirty(spa->spa_root_vdev); 1413 } 1414 } 1415 1416 void 1417 spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1418 { 1419 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1420 vdev_config_dirty(spa->spa_root_vdev); 1421 } 1422 1423 /* 1424 * Return the spa_t associated with given pool_guid, if it exists. If 1425 * device_guid is non-zero, determine whether the pool exists *and* contains 1426 * a device with the specified device_guid. 1427 */ 1428 spa_t * 1429 spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1430 { 1431 spa_t *spa; 1432 avl_tree_t *t = &spa_namespace_avl; 1433 1434 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1435 1436 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1437 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1438 continue; 1439 if (spa->spa_root_vdev == NULL) 1440 continue; 1441 if (spa_guid(spa) == pool_guid) { 1442 if (device_guid == 0) 1443 break; 1444 1445 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1446 device_guid) != NULL) 1447 break; 1448 1449 /* 1450 * Check any devices we may be in the process of adding. 1451 */ 1452 if (spa->spa_pending_vdev) { 1453 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1454 device_guid) != NULL) 1455 break; 1456 } 1457 } 1458 } 1459 1460 return (spa); 1461 } 1462 1463 /* 1464 * Determine whether a pool with the given pool_guid exists. 1465 */ 1466 boolean_t 1467 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1468 { 1469 return (spa_by_guid(pool_guid, device_guid) != NULL); 1470 } 1471 1472 char * 1473 spa_strdup(const char *s) 1474 { 1475 size_t len; 1476 char *new; 1477 1478 len = strlen(s); 1479 new = kmem_alloc(len + 1, KM_SLEEP); 1480 memcpy(new, s, len + 1); 1481 1482 return (new); 1483 } 1484 1485 void 1486 spa_strfree(char *s) 1487 { 1488 kmem_free(s, strlen(s) + 1); 1489 } 1490 1491 uint64_t 1492 spa_generate_guid(spa_t *spa) 1493 { 1494 uint64_t guid; 1495 1496 if (spa != NULL) { 1497 do { 1498 (void) random_get_pseudo_bytes((void *)&guid, 1499 sizeof (guid)); 1500 } while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)); 1501 } else { 1502 do { 1503 (void) random_get_pseudo_bytes((void *)&guid, 1504 sizeof (guid)); 1505 } while (guid == 0 || spa_guid_exists(guid, 0)); 1506 } 1507 1508 return (guid); 1509 } 1510 1511 void 1512 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1513 { 1514 char type[256]; 1515 char *checksum = NULL; 1516 char *compress = NULL; 1517 1518 if (bp != NULL) { 1519 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1520 dmu_object_byteswap_t bswap = 1521 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1522 (void) snprintf(type, sizeof (type), "bswap %s %s", 1523 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1524 "metadata" : "data", 1525 dmu_ot_byteswap[bswap].ob_name); 1526 } else { 1527 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1528 sizeof (type)); 1529 } 1530 if (!BP_IS_EMBEDDED(bp)) { 1531 checksum = 1532 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1533 } 1534 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1535 } 1536 1537 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1538 compress); 1539 } 1540 1541 void 1542 spa_freeze(spa_t *spa) 1543 { 1544 uint64_t freeze_txg = 0; 1545 1546 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1547 if (spa->spa_freeze_txg == UINT64_MAX) { 1548 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1549 spa->spa_freeze_txg = freeze_txg; 1550 } 1551 spa_config_exit(spa, SCL_ALL, FTAG); 1552 if (freeze_txg != 0) 1553 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1554 } 1555 1556 void 1557 zfs_panic_recover(const char *fmt, ...) 1558 { 1559 va_list adx; 1560 1561 va_start(adx, fmt); 1562 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1563 va_end(adx); 1564 } 1565 1566 /* 1567 * This is a stripped-down version of strtoull, suitable only for converting 1568 * lowercase hexadecimal numbers that don't overflow. 1569 */ 1570 uint64_t 1571 zfs_strtonum(const char *str, char **nptr) 1572 { 1573 uint64_t val = 0; 1574 char c; 1575 int digit; 1576 1577 while ((c = *str) != '\0') { 1578 if (c >= '0' && c <= '9') 1579 digit = c - '0'; 1580 else if (c >= 'a' && c <= 'f') 1581 digit = 10 + c - 'a'; 1582 else 1583 break; 1584 1585 val *= 16; 1586 val += digit; 1587 1588 str++; 1589 } 1590 1591 if (nptr) 1592 *nptr = (char *)str; 1593 1594 return (val); 1595 } 1596 1597 void 1598 spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx) 1599 { 1600 /* 1601 * We bump the feature refcount for each special vdev added to the pool 1602 */ 1603 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES)); 1604 spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx); 1605 } 1606 1607 /* 1608 * ========================================================================== 1609 * Accessor functions 1610 * ========================================================================== 1611 */ 1612 1613 boolean_t 1614 spa_shutting_down(spa_t *spa) 1615 { 1616 return (spa->spa_async_suspended); 1617 } 1618 1619 dsl_pool_t * 1620 spa_get_dsl(spa_t *spa) 1621 { 1622 return (spa->spa_dsl_pool); 1623 } 1624 1625 boolean_t 1626 spa_is_initializing(spa_t *spa) 1627 { 1628 return (spa->spa_is_initializing); 1629 } 1630 1631 boolean_t 1632 spa_indirect_vdevs_loaded(spa_t *spa) 1633 { 1634 return (spa->spa_indirect_vdevs_loaded); 1635 } 1636 1637 blkptr_t * 1638 spa_get_rootblkptr(spa_t *spa) 1639 { 1640 return (&spa->spa_ubsync.ub_rootbp); 1641 } 1642 1643 void 1644 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1645 { 1646 spa->spa_uberblock.ub_rootbp = *bp; 1647 } 1648 1649 void 1650 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1651 { 1652 if (spa->spa_root == NULL) 1653 buf[0] = '\0'; 1654 else 1655 (void) strncpy(buf, spa->spa_root, buflen); 1656 } 1657 1658 int 1659 spa_sync_pass(spa_t *spa) 1660 { 1661 return (spa->spa_sync_pass); 1662 } 1663 1664 char * 1665 spa_name(spa_t *spa) 1666 { 1667 return (spa->spa_name); 1668 } 1669 1670 uint64_t 1671 spa_guid(spa_t *spa) 1672 { 1673 dsl_pool_t *dp = spa_get_dsl(spa); 1674 uint64_t guid; 1675 1676 /* 1677 * If we fail to parse the config during spa_load(), we can go through 1678 * the error path (which posts an ereport) and end up here with no root 1679 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1680 * this case. 1681 */ 1682 if (spa->spa_root_vdev == NULL) 1683 return (spa->spa_config_guid); 1684 1685 guid = spa->spa_last_synced_guid != 0 ? 1686 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1687 1688 /* 1689 * Return the most recently synced out guid unless we're 1690 * in syncing context. 1691 */ 1692 if (dp && dsl_pool_sync_context(dp)) 1693 return (spa->spa_root_vdev->vdev_guid); 1694 else 1695 return (guid); 1696 } 1697 1698 uint64_t 1699 spa_load_guid(spa_t *spa) 1700 { 1701 /* 1702 * This is a GUID that exists solely as a reference for the 1703 * purposes of the arc. It is generated at load time, and 1704 * is never written to persistent storage. 1705 */ 1706 return (spa->spa_load_guid); 1707 } 1708 1709 uint64_t 1710 spa_last_synced_txg(spa_t *spa) 1711 { 1712 return (spa->spa_ubsync.ub_txg); 1713 } 1714 1715 uint64_t 1716 spa_first_txg(spa_t *spa) 1717 { 1718 return (spa->spa_first_txg); 1719 } 1720 1721 uint64_t 1722 spa_syncing_txg(spa_t *spa) 1723 { 1724 return (spa->spa_syncing_txg); 1725 } 1726 1727 /* 1728 * Return the last txg where data can be dirtied. The final txgs 1729 * will be used to just clear out any deferred frees that remain. 1730 */ 1731 uint64_t 1732 spa_final_dirty_txg(spa_t *spa) 1733 { 1734 return (spa->spa_final_txg - TXG_DEFER_SIZE); 1735 } 1736 1737 pool_state_t 1738 spa_state(spa_t *spa) 1739 { 1740 return (spa->spa_state); 1741 } 1742 1743 spa_load_state_t 1744 spa_load_state(spa_t *spa) 1745 { 1746 return (spa->spa_load_state); 1747 } 1748 1749 uint64_t 1750 spa_freeze_txg(spa_t *spa) 1751 { 1752 return (spa->spa_freeze_txg); 1753 } 1754 1755 /* 1756 * Return the inflated asize for a logical write in bytes. This is used by the 1757 * DMU to calculate the space a logical write will require on disk. 1758 * If lsize is smaller than the largest physical block size allocatable on this 1759 * pool we use its value instead, since the write will end up using the whole 1760 * block anyway. 1761 */ 1762 uint64_t 1763 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) 1764 { 1765 if (lsize == 0) 1766 return (0); /* No inflation needed */ 1767 return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation); 1768 } 1769 1770 /* 1771 * Return the amount of slop space in bytes. It is typically 1/32 of the pool 1772 * (3.2%), minus the embedded log space. On very small pools, it may be 1773 * slightly larger than this. On very large pools, it will be capped to 1774 * the value of spa_max_slop. The embedded log space is not included in 1775 * spa_dspace. By subtracting it, the usable space (per "zfs list") is a 1776 * constant 97% of the total space, regardless of metaslab size (assuming the 1777 * default spa_slop_shift=5 and a non-tiny pool). 1778 * 1779 * See the comment above spa_slop_shift for more details. 1780 */ 1781 uint64_t 1782 spa_get_slop_space(spa_t *spa) 1783 { 1784 uint64_t space = 0; 1785 uint64_t slop = 0; 1786 1787 /* 1788 * Make sure spa_dedup_dspace has been set. 1789 */ 1790 if (spa->spa_dedup_dspace == ~0ULL) 1791 spa_update_dspace(spa); 1792 1793 /* 1794 * spa_get_dspace() includes the space only logically "used" by 1795 * deduplicated data, so since it's not useful to reserve more 1796 * space with more deduplicated data, we subtract that out here. 1797 */ 1798 space = spa_get_dspace(spa) - spa->spa_dedup_dspace; 1799 slop = MIN(space >> spa_slop_shift, spa_max_slop); 1800 1801 /* 1802 * Subtract the embedded log space, but no more than half the (3.2%) 1803 * unusable space. Note, the "no more than half" is only relevant if 1804 * zfs_embedded_slog_min_ms >> spa_slop_shift < 2, which is not true by 1805 * default. 1806 */ 1807 uint64_t embedded_log = 1808 metaslab_class_get_dspace(spa_embedded_log_class(spa)); 1809 slop -= MIN(embedded_log, slop >> 1); 1810 1811 /* 1812 * Slop space should be at least spa_min_slop, but no more than half 1813 * the entire pool. 1814 */ 1815 slop = MAX(slop, MIN(space >> 1, spa_min_slop)); 1816 return (slop); 1817 } 1818 1819 uint64_t 1820 spa_get_dspace(spa_t *spa) 1821 { 1822 return (spa->spa_dspace); 1823 } 1824 1825 uint64_t 1826 spa_get_checkpoint_space(spa_t *spa) 1827 { 1828 return (spa->spa_checkpoint_info.sci_dspace); 1829 } 1830 1831 void 1832 spa_update_dspace(spa_t *spa) 1833 { 1834 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1835 ddt_get_dedup_dspace(spa); 1836 if (spa->spa_nonallocating_dspace > 0) { 1837 /* 1838 * Subtract the space provided by all non-allocating vdevs that 1839 * contribute to dspace. If a file is overwritten, its old 1840 * blocks are freed and new blocks are allocated. If there are 1841 * no snapshots of the file, the available space should remain 1842 * the same. The old blocks could be freed from the 1843 * non-allocating vdev, but the new blocks must be allocated on 1844 * other (allocating) vdevs. By reserving the entire size of 1845 * the non-allocating vdevs (including allocated space), we 1846 * ensure that there will be enough space on the allocating 1847 * vdevs for this file overwrite to succeed. 1848 * 1849 * Note that the DMU/DSL doesn't actually know or care 1850 * how much space is allocated (it does its own tracking 1851 * of how much space has been logically used). So it 1852 * doesn't matter that the data we are moving may be 1853 * allocated twice (on the old device and the new device). 1854 */ 1855 ASSERT3U(spa->spa_dspace, >=, spa->spa_nonallocating_dspace); 1856 spa->spa_dspace -= spa->spa_nonallocating_dspace; 1857 } 1858 } 1859 1860 /* 1861 * Return the failure mode that has been set to this pool. The default 1862 * behavior will be to block all I/Os when a complete failure occurs. 1863 */ 1864 uint64_t 1865 spa_get_failmode(spa_t *spa) 1866 { 1867 return (spa->spa_failmode); 1868 } 1869 1870 boolean_t 1871 spa_suspended(spa_t *spa) 1872 { 1873 return (spa->spa_suspended != ZIO_SUSPEND_NONE); 1874 } 1875 1876 uint64_t 1877 spa_version(spa_t *spa) 1878 { 1879 return (spa->spa_ubsync.ub_version); 1880 } 1881 1882 boolean_t 1883 spa_deflate(spa_t *spa) 1884 { 1885 return (spa->spa_deflate); 1886 } 1887 1888 metaslab_class_t * 1889 spa_normal_class(spa_t *spa) 1890 { 1891 return (spa->spa_normal_class); 1892 } 1893 1894 metaslab_class_t * 1895 spa_log_class(spa_t *spa) 1896 { 1897 return (spa->spa_log_class); 1898 } 1899 1900 metaslab_class_t * 1901 spa_embedded_log_class(spa_t *spa) 1902 { 1903 return (spa->spa_embedded_log_class); 1904 } 1905 1906 metaslab_class_t * 1907 spa_special_class(spa_t *spa) 1908 { 1909 return (spa->spa_special_class); 1910 } 1911 1912 metaslab_class_t * 1913 spa_dedup_class(spa_t *spa) 1914 { 1915 return (spa->spa_dedup_class); 1916 } 1917 1918 /* 1919 * Locate an appropriate allocation class 1920 */ 1921 metaslab_class_t * 1922 spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype, 1923 uint_t level, uint_t special_smallblk) 1924 { 1925 /* 1926 * ZIL allocations determine their class in zio_alloc_zil(). 1927 */ 1928 ASSERT(objtype != DMU_OT_INTENT_LOG); 1929 1930 boolean_t has_special_class = spa->spa_special_class->mc_groups != 0; 1931 1932 if (DMU_OT_IS_DDT(objtype)) { 1933 if (spa->spa_dedup_class->mc_groups != 0) 1934 return (spa_dedup_class(spa)); 1935 else if (has_special_class && zfs_ddt_data_is_special) 1936 return (spa_special_class(spa)); 1937 else 1938 return (spa_normal_class(spa)); 1939 } 1940 1941 /* Indirect blocks for user data can land in special if allowed */ 1942 if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) { 1943 if (has_special_class && zfs_user_indirect_is_special) 1944 return (spa_special_class(spa)); 1945 else 1946 return (spa_normal_class(spa)); 1947 } 1948 1949 if (DMU_OT_IS_METADATA(objtype) || level > 0) { 1950 if (has_special_class) 1951 return (spa_special_class(spa)); 1952 else 1953 return (spa_normal_class(spa)); 1954 } 1955 1956 /* 1957 * Allow small file blocks in special class in some cases (like 1958 * for the dRAID vdev feature). But always leave a reserve of 1959 * zfs_special_class_metadata_reserve_pct exclusively for metadata. 1960 */ 1961 if (DMU_OT_IS_FILE(objtype) && 1962 has_special_class && size <= special_smallblk) { 1963 metaslab_class_t *special = spa_special_class(spa); 1964 uint64_t alloc = metaslab_class_get_alloc(special); 1965 uint64_t space = metaslab_class_get_space(special); 1966 uint64_t limit = 1967 (space * (100 - zfs_special_class_metadata_reserve_pct)) 1968 / 100; 1969 1970 if (alloc < limit) 1971 return (special); 1972 } 1973 1974 return (spa_normal_class(spa)); 1975 } 1976 1977 void 1978 spa_evicting_os_register(spa_t *spa, objset_t *os) 1979 { 1980 mutex_enter(&spa->spa_evicting_os_lock); 1981 list_insert_head(&spa->spa_evicting_os_list, os); 1982 mutex_exit(&spa->spa_evicting_os_lock); 1983 } 1984 1985 void 1986 spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1987 { 1988 mutex_enter(&spa->spa_evicting_os_lock); 1989 list_remove(&spa->spa_evicting_os_list, os); 1990 cv_broadcast(&spa->spa_evicting_os_cv); 1991 mutex_exit(&spa->spa_evicting_os_lock); 1992 } 1993 1994 void 1995 spa_evicting_os_wait(spa_t *spa) 1996 { 1997 mutex_enter(&spa->spa_evicting_os_lock); 1998 while (!list_is_empty(&spa->spa_evicting_os_list)) 1999 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 2000 mutex_exit(&spa->spa_evicting_os_lock); 2001 2002 dmu_buf_user_evict_wait(); 2003 } 2004 2005 int 2006 spa_max_replication(spa_t *spa) 2007 { 2008 /* 2009 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 2010 * handle BPs with more than one DVA allocated. Set our max 2011 * replication level accordingly. 2012 */ 2013 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 2014 return (1); 2015 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 2016 } 2017 2018 int 2019 spa_prev_software_version(spa_t *spa) 2020 { 2021 return (spa->spa_prev_software_version); 2022 } 2023 2024 uint64_t 2025 spa_deadman_synctime(spa_t *spa) 2026 { 2027 return (spa->spa_deadman_synctime); 2028 } 2029 2030 spa_autotrim_t 2031 spa_get_autotrim(spa_t *spa) 2032 { 2033 return (spa->spa_autotrim); 2034 } 2035 2036 uint64_t 2037 spa_deadman_ziotime(spa_t *spa) 2038 { 2039 return (spa->spa_deadman_ziotime); 2040 } 2041 2042 uint64_t 2043 spa_get_deadman_failmode(spa_t *spa) 2044 { 2045 return (spa->spa_deadman_failmode); 2046 } 2047 2048 void 2049 spa_set_deadman_failmode(spa_t *spa, const char *failmode) 2050 { 2051 if (strcmp(failmode, "wait") == 0) 2052 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; 2053 else if (strcmp(failmode, "continue") == 0) 2054 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE; 2055 else if (strcmp(failmode, "panic") == 0) 2056 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC; 2057 else 2058 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; 2059 } 2060 2061 void 2062 spa_set_deadman_ziotime(hrtime_t ns) 2063 { 2064 spa_t *spa = NULL; 2065 2066 if (spa_mode_global != SPA_MODE_UNINIT) { 2067 mutex_enter(&spa_namespace_lock); 2068 while ((spa = spa_next(spa)) != NULL) 2069 spa->spa_deadman_ziotime = ns; 2070 mutex_exit(&spa_namespace_lock); 2071 } 2072 } 2073 2074 void 2075 spa_set_deadman_synctime(hrtime_t ns) 2076 { 2077 spa_t *spa = NULL; 2078 2079 if (spa_mode_global != SPA_MODE_UNINIT) { 2080 mutex_enter(&spa_namespace_lock); 2081 while ((spa = spa_next(spa)) != NULL) 2082 spa->spa_deadman_synctime = ns; 2083 mutex_exit(&spa_namespace_lock); 2084 } 2085 } 2086 2087 uint64_t 2088 dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 2089 { 2090 uint64_t asize = DVA_GET_ASIZE(dva); 2091 uint64_t dsize = asize; 2092 2093 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 2094 2095 if (asize != 0 && spa->spa_deflate) { 2096 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 2097 if (vd != NULL) 2098 dsize = (asize >> SPA_MINBLOCKSHIFT) * 2099 vd->vdev_deflate_ratio; 2100 } 2101 2102 return (dsize); 2103 } 2104 2105 uint64_t 2106 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 2107 { 2108 uint64_t dsize = 0; 2109 2110 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2111 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2112 2113 return (dsize); 2114 } 2115 2116 uint64_t 2117 bp_get_dsize(spa_t *spa, const blkptr_t *bp) 2118 { 2119 uint64_t dsize = 0; 2120 2121 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2122 2123 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2124 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2125 2126 spa_config_exit(spa, SCL_VDEV, FTAG); 2127 2128 return (dsize); 2129 } 2130 2131 uint64_t 2132 spa_dirty_data(spa_t *spa) 2133 { 2134 return (spa->spa_dsl_pool->dp_dirty_total); 2135 } 2136 2137 /* 2138 * ========================================================================== 2139 * SPA Import Progress Routines 2140 * ========================================================================== 2141 */ 2142 2143 typedef struct spa_import_progress { 2144 uint64_t pool_guid; /* unique id for updates */ 2145 char *pool_name; 2146 spa_load_state_t spa_load_state; 2147 uint64_t mmp_sec_remaining; /* MMP activity check */ 2148 uint64_t spa_load_max_txg; /* rewind txg */ 2149 procfs_list_node_t smh_node; 2150 } spa_import_progress_t; 2151 2152 spa_history_list_t *spa_import_progress_list = NULL; 2153 2154 static int 2155 spa_import_progress_show_header(struct seq_file *f) 2156 { 2157 seq_printf(f, "%-20s %-14s %-14s %-12s %s\n", "pool_guid", 2158 "load_state", "multihost_secs", "max_txg", 2159 "pool_name"); 2160 return (0); 2161 } 2162 2163 static int 2164 spa_import_progress_show(struct seq_file *f, void *data) 2165 { 2166 spa_import_progress_t *sip = (spa_import_progress_t *)data; 2167 2168 seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %s\n", 2169 (u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state, 2170 (u_longlong_t)sip->mmp_sec_remaining, 2171 (u_longlong_t)sip->spa_load_max_txg, 2172 (sip->pool_name ? sip->pool_name : "-")); 2173 2174 return (0); 2175 } 2176 2177 /* Remove oldest elements from list until there are no more than 'size' left */ 2178 static void 2179 spa_import_progress_truncate(spa_history_list_t *shl, unsigned int size) 2180 { 2181 spa_import_progress_t *sip; 2182 while (shl->size > size) { 2183 sip = list_remove_head(&shl->procfs_list.pl_list); 2184 if (sip->pool_name) 2185 spa_strfree(sip->pool_name); 2186 kmem_free(sip, sizeof (spa_import_progress_t)); 2187 shl->size--; 2188 } 2189 2190 IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list)); 2191 } 2192 2193 static void 2194 spa_import_progress_init(void) 2195 { 2196 spa_import_progress_list = kmem_zalloc(sizeof (spa_history_list_t), 2197 KM_SLEEP); 2198 2199 spa_import_progress_list->size = 0; 2200 2201 spa_import_progress_list->procfs_list.pl_private = 2202 spa_import_progress_list; 2203 2204 procfs_list_install("zfs", 2205 NULL, 2206 "import_progress", 2207 0644, 2208 &spa_import_progress_list->procfs_list, 2209 spa_import_progress_show, 2210 spa_import_progress_show_header, 2211 NULL, 2212 offsetof(spa_import_progress_t, smh_node)); 2213 } 2214 2215 static void 2216 spa_import_progress_destroy(void) 2217 { 2218 spa_history_list_t *shl = spa_import_progress_list; 2219 procfs_list_uninstall(&shl->procfs_list); 2220 spa_import_progress_truncate(shl, 0); 2221 procfs_list_destroy(&shl->procfs_list); 2222 kmem_free(shl, sizeof (spa_history_list_t)); 2223 } 2224 2225 int 2226 spa_import_progress_set_state(uint64_t pool_guid, 2227 spa_load_state_t load_state) 2228 { 2229 spa_history_list_t *shl = spa_import_progress_list; 2230 spa_import_progress_t *sip; 2231 int error = ENOENT; 2232 2233 if (shl->size == 0) 2234 return (0); 2235 2236 mutex_enter(&shl->procfs_list.pl_lock); 2237 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2238 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2239 if (sip->pool_guid == pool_guid) { 2240 sip->spa_load_state = load_state; 2241 error = 0; 2242 break; 2243 } 2244 } 2245 mutex_exit(&shl->procfs_list.pl_lock); 2246 2247 return (error); 2248 } 2249 2250 int 2251 spa_import_progress_set_max_txg(uint64_t pool_guid, uint64_t load_max_txg) 2252 { 2253 spa_history_list_t *shl = spa_import_progress_list; 2254 spa_import_progress_t *sip; 2255 int error = ENOENT; 2256 2257 if (shl->size == 0) 2258 return (0); 2259 2260 mutex_enter(&shl->procfs_list.pl_lock); 2261 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2262 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2263 if (sip->pool_guid == pool_guid) { 2264 sip->spa_load_max_txg = load_max_txg; 2265 error = 0; 2266 break; 2267 } 2268 } 2269 mutex_exit(&shl->procfs_list.pl_lock); 2270 2271 return (error); 2272 } 2273 2274 int 2275 spa_import_progress_set_mmp_check(uint64_t pool_guid, 2276 uint64_t mmp_sec_remaining) 2277 { 2278 spa_history_list_t *shl = spa_import_progress_list; 2279 spa_import_progress_t *sip; 2280 int error = ENOENT; 2281 2282 if (shl->size == 0) 2283 return (0); 2284 2285 mutex_enter(&shl->procfs_list.pl_lock); 2286 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2287 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2288 if (sip->pool_guid == pool_guid) { 2289 sip->mmp_sec_remaining = mmp_sec_remaining; 2290 error = 0; 2291 break; 2292 } 2293 } 2294 mutex_exit(&shl->procfs_list.pl_lock); 2295 2296 return (error); 2297 } 2298 2299 /* 2300 * A new import is in progress, add an entry. 2301 */ 2302 void 2303 spa_import_progress_add(spa_t *spa) 2304 { 2305 spa_history_list_t *shl = spa_import_progress_list; 2306 spa_import_progress_t *sip; 2307 char *poolname = NULL; 2308 2309 sip = kmem_zalloc(sizeof (spa_import_progress_t), KM_SLEEP); 2310 sip->pool_guid = spa_guid(spa); 2311 2312 (void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME, 2313 &poolname); 2314 if (poolname == NULL) 2315 poolname = spa_name(spa); 2316 sip->pool_name = spa_strdup(poolname); 2317 sip->spa_load_state = spa_load_state(spa); 2318 2319 mutex_enter(&shl->procfs_list.pl_lock); 2320 procfs_list_add(&shl->procfs_list, sip); 2321 shl->size++; 2322 mutex_exit(&shl->procfs_list.pl_lock); 2323 } 2324 2325 void 2326 spa_import_progress_remove(uint64_t pool_guid) 2327 { 2328 spa_history_list_t *shl = spa_import_progress_list; 2329 spa_import_progress_t *sip; 2330 2331 mutex_enter(&shl->procfs_list.pl_lock); 2332 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; 2333 sip = list_prev(&shl->procfs_list.pl_list, sip)) { 2334 if (sip->pool_guid == pool_guid) { 2335 if (sip->pool_name) 2336 spa_strfree(sip->pool_name); 2337 list_remove(&shl->procfs_list.pl_list, sip); 2338 shl->size--; 2339 kmem_free(sip, sizeof (spa_import_progress_t)); 2340 break; 2341 } 2342 } 2343 mutex_exit(&shl->procfs_list.pl_lock); 2344 } 2345 2346 /* 2347 * ========================================================================== 2348 * Initialization and Termination 2349 * ========================================================================== 2350 */ 2351 2352 static int 2353 spa_name_compare(const void *a1, const void *a2) 2354 { 2355 const spa_t *s1 = a1; 2356 const spa_t *s2 = a2; 2357 int s; 2358 2359 s = strcmp(s1->spa_name, s2->spa_name); 2360 2361 return (TREE_ISIGN(s)); 2362 } 2363 2364 void 2365 spa_boot_init(void) 2366 { 2367 spa_config_load(); 2368 } 2369 2370 void 2371 spa_init(spa_mode_t mode) 2372 { 2373 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 2374 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 2375 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 2376 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 2377 2378 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 2379 offsetof(spa_t, spa_avl)); 2380 2381 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 2382 offsetof(spa_aux_t, aux_avl)); 2383 2384 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 2385 offsetof(spa_aux_t, aux_avl)); 2386 2387 spa_mode_global = mode; 2388 2389 #ifndef _KERNEL 2390 if (spa_mode_global != SPA_MODE_READ && dprintf_find_string("watch")) { 2391 struct sigaction sa; 2392 2393 sa.sa_flags = SA_SIGINFO; 2394 sigemptyset(&sa.sa_mask); 2395 sa.sa_sigaction = arc_buf_sigsegv; 2396 2397 if (sigaction(SIGSEGV, &sa, NULL) == -1) { 2398 perror("could not enable watchpoints: " 2399 "sigaction(SIGSEGV, ...) = "); 2400 } else { 2401 arc_watch = B_TRUE; 2402 } 2403 } 2404 #endif 2405 2406 fm_init(); 2407 zfs_refcount_init(); 2408 unique_init(); 2409 zfs_btree_init(); 2410 metaslab_stat_init(); 2411 ddt_init(); 2412 zio_init(); 2413 dmu_init(); 2414 zil_init(); 2415 vdev_cache_stat_init(); 2416 vdev_mirror_stat_init(); 2417 vdev_raidz_math_init(); 2418 vdev_file_init(); 2419 zfs_prop_init(); 2420 zpool_prop_init(); 2421 zpool_feature_init(); 2422 spa_config_load(); 2423 vdev_prop_init(); 2424 l2arc_start(); 2425 scan_init(); 2426 qat_init(); 2427 spa_import_progress_init(); 2428 } 2429 2430 void 2431 spa_fini(void) 2432 { 2433 l2arc_stop(); 2434 2435 spa_evict_all(); 2436 2437 vdev_file_fini(); 2438 vdev_cache_stat_fini(); 2439 vdev_mirror_stat_fini(); 2440 vdev_raidz_math_fini(); 2441 zil_fini(); 2442 dmu_fini(); 2443 zio_fini(); 2444 ddt_fini(); 2445 metaslab_stat_fini(); 2446 zfs_btree_fini(); 2447 unique_fini(); 2448 zfs_refcount_fini(); 2449 fm_fini(); 2450 scan_fini(); 2451 qat_fini(); 2452 spa_import_progress_destroy(); 2453 2454 avl_destroy(&spa_namespace_avl); 2455 avl_destroy(&spa_spare_avl); 2456 avl_destroy(&spa_l2cache_avl); 2457 2458 cv_destroy(&spa_namespace_cv); 2459 mutex_destroy(&spa_namespace_lock); 2460 mutex_destroy(&spa_spare_lock); 2461 mutex_destroy(&spa_l2cache_lock); 2462 } 2463 2464 /* 2465 * Return whether this pool has a dedicated slog device. No locking needed. 2466 * It's not a problem if the wrong answer is returned as it's only for 2467 * performance and not correctness. 2468 */ 2469 boolean_t 2470 spa_has_slogs(spa_t *spa) 2471 { 2472 return (spa->spa_log_class->mc_groups != 0); 2473 } 2474 2475 spa_log_state_t 2476 spa_get_log_state(spa_t *spa) 2477 { 2478 return (spa->spa_log_state); 2479 } 2480 2481 void 2482 spa_set_log_state(spa_t *spa, spa_log_state_t state) 2483 { 2484 spa->spa_log_state = state; 2485 } 2486 2487 boolean_t 2488 spa_is_root(spa_t *spa) 2489 { 2490 return (spa->spa_is_root); 2491 } 2492 2493 boolean_t 2494 spa_writeable(spa_t *spa) 2495 { 2496 return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config); 2497 } 2498 2499 /* 2500 * Returns true if there is a pending sync task in any of the current 2501 * syncing txg, the current quiescing txg, or the current open txg. 2502 */ 2503 boolean_t 2504 spa_has_pending_synctask(spa_t *spa) 2505 { 2506 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) || 2507 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks)); 2508 } 2509 2510 spa_mode_t 2511 spa_mode(spa_t *spa) 2512 { 2513 return (spa->spa_mode); 2514 } 2515 2516 uint64_t 2517 spa_bootfs(spa_t *spa) 2518 { 2519 return (spa->spa_bootfs); 2520 } 2521 2522 uint64_t 2523 spa_delegation(spa_t *spa) 2524 { 2525 return (spa->spa_delegation); 2526 } 2527 2528 objset_t * 2529 spa_meta_objset(spa_t *spa) 2530 { 2531 return (spa->spa_meta_objset); 2532 } 2533 2534 enum zio_checksum 2535 spa_dedup_checksum(spa_t *spa) 2536 { 2537 return (spa->spa_dedup_checksum); 2538 } 2539 2540 /* 2541 * Reset pool scan stat per scan pass (or reboot). 2542 */ 2543 void 2544 spa_scan_stat_init(spa_t *spa) 2545 { 2546 /* data not stored on disk */ 2547 spa->spa_scan_pass_start = gethrestime_sec(); 2548 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) 2549 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; 2550 else 2551 spa->spa_scan_pass_scrub_pause = 0; 2552 spa->spa_scan_pass_scrub_spent_paused = 0; 2553 spa->spa_scan_pass_exam = 0; 2554 spa->spa_scan_pass_issued = 0; 2555 vdev_scan_stat_init(spa->spa_root_vdev); 2556 } 2557 2558 /* 2559 * Get scan stats for zpool status reports 2560 */ 2561 int 2562 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2563 { 2564 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2565 2566 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2567 return (SET_ERROR(ENOENT)); 2568 memset(ps, 0, sizeof (pool_scan_stat_t)); 2569 2570 /* data stored on disk */ 2571 ps->pss_func = scn->scn_phys.scn_func; 2572 ps->pss_state = scn->scn_phys.scn_state; 2573 ps->pss_start_time = scn->scn_phys.scn_start_time; 2574 ps->pss_end_time = scn->scn_phys.scn_end_time; 2575 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2576 ps->pss_examined = scn->scn_phys.scn_examined; 2577 ps->pss_to_process = scn->scn_phys.scn_to_process; 2578 ps->pss_processed = scn->scn_phys.scn_processed; 2579 ps->pss_errors = scn->scn_phys.scn_errors; 2580 2581 /* data not stored on disk */ 2582 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2583 ps->pss_pass_start = spa->spa_scan_pass_start; 2584 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; 2585 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; 2586 ps->pss_pass_issued = spa->spa_scan_pass_issued; 2587 ps->pss_issued = 2588 scn->scn_issued_before_pass + spa->spa_scan_pass_issued; 2589 2590 return (0); 2591 } 2592 2593 int 2594 spa_maxblocksize(spa_t *spa) 2595 { 2596 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2597 return (SPA_MAXBLOCKSIZE); 2598 else 2599 return (SPA_OLD_MAXBLOCKSIZE); 2600 } 2601 2602 2603 /* 2604 * Returns the txg that the last device removal completed. No indirect mappings 2605 * have been added since this txg. 2606 */ 2607 uint64_t 2608 spa_get_last_removal_txg(spa_t *spa) 2609 { 2610 uint64_t vdevid; 2611 uint64_t ret = -1ULL; 2612 2613 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2614 /* 2615 * sr_prev_indirect_vdev is only modified while holding all the 2616 * config locks, so it is sufficient to hold SCL_VDEV as reader when 2617 * examining it. 2618 */ 2619 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; 2620 2621 while (vdevid != -1ULL) { 2622 vdev_t *vd = vdev_lookup_top(spa, vdevid); 2623 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 2624 2625 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2626 2627 /* 2628 * If the removal did not remap any data, we don't care. 2629 */ 2630 if (vdev_indirect_births_count(vib) != 0) { 2631 ret = vdev_indirect_births_last_entry_txg(vib); 2632 break; 2633 } 2634 2635 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; 2636 } 2637 spa_config_exit(spa, SCL_VDEV, FTAG); 2638 2639 IMPLY(ret != -1ULL, 2640 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 2641 2642 return (ret); 2643 } 2644 2645 int 2646 spa_maxdnodesize(spa_t *spa) 2647 { 2648 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) 2649 return (DNODE_MAX_SIZE); 2650 else 2651 return (DNODE_MIN_SIZE); 2652 } 2653 2654 boolean_t 2655 spa_multihost(spa_t *spa) 2656 { 2657 return (spa->spa_multihost ? B_TRUE : B_FALSE); 2658 } 2659 2660 uint32_t 2661 spa_get_hostid(spa_t *spa) 2662 { 2663 return (spa->spa_hostid); 2664 } 2665 2666 boolean_t 2667 spa_trust_config(spa_t *spa) 2668 { 2669 return (spa->spa_trust_config); 2670 } 2671 2672 uint64_t 2673 spa_missing_tvds_allowed(spa_t *spa) 2674 { 2675 return (spa->spa_missing_tvds_allowed); 2676 } 2677 2678 space_map_t * 2679 spa_syncing_log_sm(spa_t *spa) 2680 { 2681 return (spa->spa_syncing_log_sm); 2682 } 2683 2684 void 2685 spa_set_missing_tvds(spa_t *spa, uint64_t missing) 2686 { 2687 spa->spa_missing_tvds = missing; 2688 } 2689 2690 /* 2691 * Return the pool state string ("ONLINE", "DEGRADED", "SUSPENDED", etc). 2692 */ 2693 const char * 2694 spa_state_to_name(spa_t *spa) 2695 { 2696 ASSERT3P(spa, !=, NULL); 2697 2698 /* 2699 * it is possible for the spa to exist, without root vdev 2700 * as the spa transitions during import/export 2701 */ 2702 vdev_t *rvd = spa->spa_root_vdev; 2703 if (rvd == NULL) { 2704 return ("TRANSITIONING"); 2705 } 2706 vdev_state_t state = rvd->vdev_state; 2707 vdev_aux_t aux = rvd->vdev_stat.vs_aux; 2708 2709 if (spa_suspended(spa) && 2710 (spa_get_failmode(spa) != ZIO_FAILURE_MODE_CONTINUE)) 2711 return ("SUSPENDED"); 2712 2713 switch (state) { 2714 case VDEV_STATE_CLOSED: 2715 case VDEV_STATE_OFFLINE: 2716 return ("OFFLINE"); 2717 case VDEV_STATE_REMOVED: 2718 return ("REMOVED"); 2719 case VDEV_STATE_CANT_OPEN: 2720 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 2721 return ("FAULTED"); 2722 else if (aux == VDEV_AUX_SPLIT_POOL) 2723 return ("SPLIT"); 2724 else 2725 return ("UNAVAIL"); 2726 case VDEV_STATE_FAULTED: 2727 return ("FAULTED"); 2728 case VDEV_STATE_DEGRADED: 2729 return ("DEGRADED"); 2730 case VDEV_STATE_HEALTHY: 2731 return ("ONLINE"); 2732 default: 2733 break; 2734 } 2735 2736 return ("UNKNOWN"); 2737 } 2738 2739 boolean_t 2740 spa_top_vdevs_spacemap_addressable(spa_t *spa) 2741 { 2742 vdev_t *rvd = spa->spa_root_vdev; 2743 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2744 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c])) 2745 return (B_FALSE); 2746 } 2747 return (B_TRUE); 2748 } 2749 2750 boolean_t 2751 spa_has_checkpoint(spa_t *spa) 2752 { 2753 return (spa->spa_checkpoint_txg != 0); 2754 } 2755 2756 boolean_t 2757 spa_importing_readonly_checkpoint(spa_t *spa) 2758 { 2759 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) && 2760 spa->spa_mode == SPA_MODE_READ); 2761 } 2762 2763 uint64_t 2764 spa_min_claim_txg(spa_t *spa) 2765 { 2766 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg; 2767 2768 if (checkpoint_txg != 0) 2769 return (checkpoint_txg + 1); 2770 2771 return (spa->spa_first_txg); 2772 } 2773 2774 /* 2775 * If there is a checkpoint, async destroys may consume more space from 2776 * the pool instead of freeing it. In an attempt to save the pool from 2777 * getting suspended when it is about to run out of space, we stop 2778 * processing async destroys. 2779 */ 2780 boolean_t 2781 spa_suspend_async_destroy(spa_t *spa) 2782 { 2783 dsl_pool_t *dp = spa_get_dsl(spa); 2784 2785 uint64_t unreserved = dsl_pool_unreserved_space(dp, 2786 ZFS_SPACE_CHECK_EXTRA_RESERVED); 2787 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes; 2788 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0; 2789 2790 if (spa_has_checkpoint(spa) && avail == 0) 2791 return (B_TRUE); 2792 2793 return (B_FALSE); 2794 } 2795 2796 #if defined(_KERNEL) 2797 2798 int 2799 param_set_deadman_failmode_common(const char *val) 2800 { 2801 spa_t *spa = NULL; 2802 char *p; 2803 2804 if (val == NULL) 2805 return (SET_ERROR(EINVAL)); 2806 2807 if ((p = strchr(val, '\n')) != NULL) 2808 *p = '\0'; 2809 2810 if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 && 2811 strcmp(val, "panic")) 2812 return (SET_ERROR(EINVAL)); 2813 2814 if (spa_mode_global != SPA_MODE_UNINIT) { 2815 mutex_enter(&spa_namespace_lock); 2816 while ((spa = spa_next(spa)) != NULL) 2817 spa_set_deadman_failmode(spa, val); 2818 mutex_exit(&spa_namespace_lock); 2819 } 2820 2821 return (0); 2822 } 2823 #endif 2824 2825 /* Namespace manipulation */ 2826 EXPORT_SYMBOL(spa_lookup); 2827 EXPORT_SYMBOL(spa_add); 2828 EXPORT_SYMBOL(spa_remove); 2829 EXPORT_SYMBOL(spa_next); 2830 2831 /* Refcount functions */ 2832 EXPORT_SYMBOL(spa_open_ref); 2833 EXPORT_SYMBOL(spa_close); 2834 EXPORT_SYMBOL(spa_refcount_zero); 2835 2836 /* Pool configuration lock */ 2837 EXPORT_SYMBOL(spa_config_tryenter); 2838 EXPORT_SYMBOL(spa_config_enter); 2839 EXPORT_SYMBOL(spa_config_exit); 2840 EXPORT_SYMBOL(spa_config_held); 2841 2842 /* Pool vdev add/remove lock */ 2843 EXPORT_SYMBOL(spa_vdev_enter); 2844 EXPORT_SYMBOL(spa_vdev_exit); 2845 2846 /* Pool vdev state change lock */ 2847 EXPORT_SYMBOL(spa_vdev_state_enter); 2848 EXPORT_SYMBOL(spa_vdev_state_exit); 2849 2850 /* Accessor functions */ 2851 EXPORT_SYMBOL(spa_shutting_down); 2852 EXPORT_SYMBOL(spa_get_dsl); 2853 EXPORT_SYMBOL(spa_get_rootblkptr); 2854 EXPORT_SYMBOL(spa_set_rootblkptr); 2855 EXPORT_SYMBOL(spa_altroot); 2856 EXPORT_SYMBOL(spa_sync_pass); 2857 EXPORT_SYMBOL(spa_name); 2858 EXPORT_SYMBOL(spa_guid); 2859 EXPORT_SYMBOL(spa_last_synced_txg); 2860 EXPORT_SYMBOL(spa_first_txg); 2861 EXPORT_SYMBOL(spa_syncing_txg); 2862 EXPORT_SYMBOL(spa_version); 2863 EXPORT_SYMBOL(spa_state); 2864 EXPORT_SYMBOL(spa_load_state); 2865 EXPORT_SYMBOL(spa_freeze_txg); 2866 EXPORT_SYMBOL(spa_get_dspace); 2867 EXPORT_SYMBOL(spa_update_dspace); 2868 EXPORT_SYMBOL(spa_deflate); 2869 EXPORT_SYMBOL(spa_normal_class); 2870 EXPORT_SYMBOL(spa_log_class); 2871 EXPORT_SYMBOL(spa_special_class); 2872 EXPORT_SYMBOL(spa_preferred_class); 2873 EXPORT_SYMBOL(spa_max_replication); 2874 EXPORT_SYMBOL(spa_prev_software_version); 2875 EXPORT_SYMBOL(spa_get_failmode); 2876 EXPORT_SYMBOL(spa_suspended); 2877 EXPORT_SYMBOL(spa_bootfs); 2878 EXPORT_SYMBOL(spa_delegation); 2879 EXPORT_SYMBOL(spa_meta_objset); 2880 EXPORT_SYMBOL(spa_maxblocksize); 2881 EXPORT_SYMBOL(spa_maxdnodesize); 2882 2883 /* Miscellaneous support routines */ 2884 EXPORT_SYMBOL(spa_guid_exists); 2885 EXPORT_SYMBOL(spa_strdup); 2886 EXPORT_SYMBOL(spa_strfree); 2887 EXPORT_SYMBOL(spa_generate_guid); 2888 EXPORT_SYMBOL(snprintf_blkptr); 2889 EXPORT_SYMBOL(spa_freeze); 2890 EXPORT_SYMBOL(spa_upgrade); 2891 EXPORT_SYMBOL(spa_evict_all); 2892 EXPORT_SYMBOL(spa_lookup_by_guid); 2893 EXPORT_SYMBOL(spa_has_spare); 2894 EXPORT_SYMBOL(dva_get_dsize_sync); 2895 EXPORT_SYMBOL(bp_get_dsize_sync); 2896 EXPORT_SYMBOL(bp_get_dsize); 2897 EXPORT_SYMBOL(spa_has_slogs); 2898 EXPORT_SYMBOL(spa_is_root); 2899 EXPORT_SYMBOL(spa_writeable); 2900 EXPORT_SYMBOL(spa_mode); 2901 EXPORT_SYMBOL(spa_namespace_lock); 2902 EXPORT_SYMBOL(spa_trust_config); 2903 EXPORT_SYMBOL(spa_missing_tvds_allowed); 2904 EXPORT_SYMBOL(spa_set_missing_tvds); 2905 EXPORT_SYMBOL(spa_state_to_name); 2906 EXPORT_SYMBOL(spa_importing_readonly_checkpoint); 2907 EXPORT_SYMBOL(spa_min_claim_txg); 2908 EXPORT_SYMBOL(spa_suspend_async_destroy); 2909 EXPORT_SYMBOL(spa_has_checkpoint); 2910 EXPORT_SYMBOL(spa_top_vdevs_spacemap_addressable); 2911 2912 ZFS_MODULE_PARAM(zfs, zfs_, flags, UINT, ZMOD_RW, 2913 "Set additional debugging flags"); 2914 2915 ZFS_MODULE_PARAM(zfs, zfs_, recover, INT, ZMOD_RW, 2916 "Set to attempt to recover from fatal errors"); 2917 2918 ZFS_MODULE_PARAM(zfs, zfs_, free_leak_on_eio, INT, ZMOD_RW, 2919 "Set to ignore IO errors during free and permanently leak the space"); 2920 2921 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, checktime_ms, ULONG, ZMOD_RW, 2922 "Dead I/O check interval in milliseconds"); 2923 2924 ZFS_MODULE_PARAM(zfs_deadman, zfs_deadman_, enabled, INT, ZMOD_RW, 2925 "Enable deadman timer"); 2926 2927 ZFS_MODULE_PARAM(zfs_spa, spa_, asize_inflation, INT, ZMOD_RW, 2928 "SPA size estimate multiplication factor"); 2929 2930 ZFS_MODULE_PARAM(zfs, zfs_, ddt_data_is_special, INT, ZMOD_RW, 2931 "Place DDT data into the special class"); 2932 2933 ZFS_MODULE_PARAM(zfs, zfs_, user_indirect_is_special, INT, ZMOD_RW, 2934 "Place user data indirect blocks into the special class"); 2935 2936 /* BEGIN CSTYLED */ 2937 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, failmode, 2938 param_set_deadman_failmode, param_get_charp, ZMOD_RW, 2939 "Failmode for deadman timer"); 2940 2941 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, synctime_ms, 2942 param_set_deadman_synctime, param_get_ulong, ZMOD_RW, 2943 "Pool sync expiration time in milliseconds"); 2944 2945 ZFS_MODULE_PARAM_CALL(zfs_deadman, zfs_deadman_, ziotime_ms, 2946 param_set_deadman_ziotime, param_get_ulong, ZMOD_RW, 2947 "IO expiration time in milliseconds"); 2948 2949 ZFS_MODULE_PARAM(zfs, zfs_, special_class_metadata_reserve_pct, INT, ZMOD_RW, 2950 "Small file blocks in special vdevs depends on this much " 2951 "free space available"); 2952 /* END CSTYLED */ 2953 2954 ZFS_MODULE_PARAM_CALL(zfs_spa, spa_, slop_shift, param_set_slop_shift, 2955 param_get_int, ZMOD_RW, "Reserved free space in pool"); 2956