1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 * Copyright 2013 Saso Kiselkov. All rights reserved. 27 * Copyright (c) 2014 Integros [integros.com] 28 * Copyright (c) 2017 Datto Inc. 29 * Copyright 2019 Joyent, Inc. 30 * Copyright (c) 2017, Intel Corporation. 31 * Copyright 2020 Joyent, Inc. 32 */ 33 34 #include <sys/zfs_context.h> 35 #include <sys/spa_impl.h> 36 #include <sys/spa_boot.h> 37 #include <sys/zio.h> 38 #include <sys/zio_checksum.h> 39 #include <sys/zio_compress.h> 40 #include <sys/dmu.h> 41 #include <sys/dmu_tx.h> 42 #include <sys/zap.h> 43 #include <sys/zil.h> 44 #include <sys/vdev_impl.h> 45 #include <sys/vdev_initialize.h> 46 #include <sys/vdev_trim.h> 47 #include <sys/metaslab.h> 48 #include <sys/uberblock_impl.h> 49 #include <sys/txg.h> 50 #include <sys/avl.h> 51 #include <sys/unique.h> 52 #include <sys/dsl_pool.h> 53 #include <sys/dsl_dir.h> 54 #include <sys/dsl_prop.h> 55 #include <sys/dsl_scan.h> 56 #include <sys/fs/zfs.h> 57 #include <sys/metaslab_impl.h> 58 #include <sys/arc.h> 59 #include <sys/ddt.h> 60 #include "zfs_prop.h" 61 #include <sys/btree.h> 62 #include <sys/zfeature.h> 63 64 /* 65 * SPA locking 66 * 67 * There are three basic locks for managing spa_t structures: 68 * 69 * spa_namespace_lock (global mutex) 70 * 71 * This lock must be acquired to do any of the following: 72 * 73 * - Lookup a spa_t by name 74 * - Add or remove a spa_t from the namespace 75 * - Increase spa_refcount from non-zero 76 * - Check if spa_refcount is zero 77 * - Rename a spa_t 78 * - add/remove/attach/detach devices 79 * - Held for the duration of create/destroy/import/export 80 * 81 * It does not need to handle recursion. A create or destroy may 82 * reference objects (files or zvols) in other pools, but by 83 * definition they must have an existing reference, and will never need 84 * to lookup a spa_t by name. 85 * 86 * spa_refcount (per-spa zfs_refcount_t protected by mutex) 87 * 88 * This reference count keep track of any active users of the spa_t. The 89 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 90 * the refcount is never really 'zero' - opening a pool implicitly keeps 91 * some references in the DMU. Internally we check against spa_minref, but 92 * present the image of a zero/non-zero value to consumers. 93 * 94 * spa_config_lock[] (per-spa array of rwlocks) 95 * 96 * This protects the spa_t from config changes, and must be held in 97 * the following circumstances: 98 * 99 * - RW_READER to perform I/O to the spa 100 * - RW_WRITER to change the vdev config 101 * 102 * The locking order is fairly straightforward: 103 * 104 * spa_namespace_lock -> spa_refcount 105 * 106 * The namespace lock must be acquired to increase the refcount from 0 107 * or to check if it is zero. 108 * 109 * spa_refcount -> spa_config_lock[] 110 * 111 * There must be at least one valid reference on the spa_t to acquire 112 * the config lock. 113 * 114 * spa_namespace_lock -> spa_config_lock[] 115 * 116 * The namespace lock must always be taken before the config lock. 117 * 118 * 119 * The spa_namespace_lock can be acquired directly and is globally visible. 120 * 121 * The namespace is manipulated using the following functions, all of which 122 * require the spa_namespace_lock to be held. 123 * 124 * spa_lookup() Lookup a spa_t by name. 125 * 126 * spa_add() Create a new spa_t in the namespace. 127 * 128 * spa_remove() Remove a spa_t from the namespace. This also 129 * frees up any memory associated with the spa_t. 130 * 131 * spa_next() Returns the next spa_t in the system, or the 132 * first if NULL is passed. 133 * 134 * spa_evict_all() Shutdown and remove all spa_t structures in 135 * the system. 136 * 137 * spa_guid_exists() Determine whether a pool/device guid exists. 138 * 139 * The spa_refcount is manipulated using the following functions: 140 * 141 * spa_open_ref() Adds a reference to the given spa_t. Must be 142 * called with spa_namespace_lock held if the 143 * refcount is currently zero. 144 * 145 * spa_close() Remove a reference from the spa_t. This will 146 * not free the spa_t or remove it from the 147 * namespace. No locking is required. 148 * 149 * spa_refcount_zero() Returns true if the refcount is currently 150 * zero. Must be called with spa_namespace_lock 151 * held. 152 * 153 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 154 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 155 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 156 * 157 * To read the configuration, it suffices to hold one of these locks as reader. 158 * To modify the configuration, you must hold all locks as writer. To modify 159 * vdev state without altering the vdev tree's topology (e.g. online/offline), 160 * you must hold SCL_STATE and SCL_ZIO as writer. 161 * 162 * We use these distinct config locks to avoid recursive lock entry. 163 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 164 * block allocations (SCL_ALLOC), which may require reading space maps 165 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 166 * 167 * The spa config locks cannot be normal rwlocks because we need the 168 * ability to hand off ownership. For example, SCL_ZIO is acquired 169 * by the issuing thread and later released by an interrupt thread. 170 * They do, however, obey the usual write-wanted semantics to prevent 171 * writer (i.e. system administrator) starvation. 172 * 173 * The lock acquisition rules are as follows: 174 * 175 * SCL_CONFIG 176 * Protects changes to the vdev tree topology, such as vdev 177 * add/remove/attach/detach. Protects the dirty config list 178 * (spa_config_dirty_list) and the set of spares and l2arc devices. 179 * 180 * SCL_STATE 181 * Protects changes to pool state and vdev state, such as vdev 182 * online/offline/fault/degrade/clear. Protects the dirty state list 183 * (spa_state_dirty_list) and global pool state (spa_state). 184 * 185 * SCL_ALLOC 186 * Protects changes to metaslab groups and classes. 187 * Held as reader by metaslab_alloc() and metaslab_claim(). 188 * 189 * SCL_ZIO 190 * Held by bp-level zios (those which have no io_vd upon entry) 191 * to prevent changes to the vdev tree. The bp-level zio implicitly 192 * protects all of its vdev child zios, which do not hold SCL_ZIO. 193 * 194 * SCL_FREE 195 * Protects changes to metaslab groups and classes. 196 * Held as reader by metaslab_free(). SCL_FREE is distinct from 197 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 198 * blocks in zio_done() while another i/o that holds either 199 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 200 * 201 * SCL_VDEV 202 * Held as reader to prevent changes to the vdev tree during trivial 203 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 204 * other locks, and lower than all of them, to ensure that it's safe 205 * to acquire regardless of caller context. 206 * 207 * In addition, the following rules apply: 208 * 209 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 210 * The lock ordering is SCL_CONFIG > spa_props_lock. 211 * 212 * (b) I/O operations on leaf vdevs. For any zio operation that takes 213 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 214 * or zio_write_phys() -- the caller must ensure that the config cannot 215 * cannot change in the interim, and that the vdev cannot be reopened. 216 * SCL_STATE as reader suffices for both. 217 * 218 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 219 * 220 * spa_vdev_enter() Acquire the namespace lock and the config lock 221 * for writing. 222 * 223 * spa_vdev_exit() Release the config lock, wait for all I/O 224 * to complete, sync the updated configs to the 225 * cache, and release the namespace lock. 226 * 227 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 228 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 229 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 230 */ 231 232 static avl_tree_t spa_namespace_avl; 233 kmutex_t spa_namespace_lock; 234 static kcondvar_t spa_namespace_cv; 235 static int spa_active_count; 236 int spa_max_replication_override = SPA_DVAS_PER_BP; 237 238 static kmutex_t spa_spare_lock; 239 static avl_tree_t spa_spare_avl; 240 static kmutex_t spa_l2cache_lock; 241 static avl_tree_t spa_l2cache_avl; 242 243 kmem_cache_t *spa_buffer_pool; 244 int spa_mode_global; 245 246 #ifdef ZFS_DEBUG 247 /* 248 * Everything except dprintf, spa, and indirect_remap is on by default 249 * in debug builds. 250 */ 251 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_INDIRECT_REMAP); 252 #else 253 int zfs_flags = 0; 254 #endif 255 256 /* 257 * zfs_recover can be set to nonzero to attempt to recover from 258 * otherwise-fatal errors, typically caused by on-disk corruption. When 259 * set, calls to zfs_panic_recover() will turn into warning messages. 260 * This should only be used as a last resort, as it typically results 261 * in leaked space, or worse. 262 */ 263 boolean_t zfs_recover = B_FALSE; 264 265 /* 266 * If destroy encounters an EIO while reading metadata (e.g. indirect 267 * blocks), space referenced by the missing metadata can not be freed. 268 * Normally this causes the background destroy to become "stalled", as 269 * it is unable to make forward progress. While in this stalled state, 270 * all remaining space to free from the error-encountering filesystem is 271 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 272 * permanently leak the space from indirect blocks that can not be read, 273 * and continue to free everything else that it can. 274 * 275 * The default, "stalling" behavior is useful if the storage partially 276 * fails (i.e. some but not all i/os fail), and then later recovers. In 277 * this case, we will be able to continue pool operations while it is 278 * partially failed, and when it recovers, we can continue to free the 279 * space, with no leaks. However, note that this case is actually 280 * fairly rare. 281 * 282 * Typically pools either (a) fail completely (but perhaps temporarily, 283 * e.g. a top-level vdev going offline), or (b) have localized, 284 * permanent errors (e.g. disk returns the wrong data due to bit flip or 285 * firmware bug). In case (a), this setting does not matter because the 286 * pool will be suspended and the sync thread will not be able to make 287 * forward progress regardless. In case (b), because the error is 288 * permanent, the best we can do is leak the minimum amount of space, 289 * which is what setting this flag will do. Therefore, it is reasonable 290 * for this flag to normally be set, but we chose the more conservative 291 * approach of not setting it, so that there is no possibility of 292 * leaking space in the "partial temporary" failure case. 293 */ 294 boolean_t zfs_free_leak_on_eio = B_FALSE; 295 296 /* 297 * Expiration time in milliseconds. This value has two meanings. First it is 298 * used to determine when the spa_deadman() logic should fire. By default the 299 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 300 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 301 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 302 * in a system panic. 303 */ 304 uint64_t zfs_deadman_synctime_ms = 1000000ULL; 305 306 /* 307 * Check time in milliseconds. This defines the frequency at which we check 308 * for hung I/O. 309 */ 310 uint64_t zfs_deadman_checktime_ms = 5000ULL; 311 312 /* 313 * Override the zfs deadman behavior via /etc/system. By default the 314 * deadman is enabled except on VMware and sparc deployments. 315 */ 316 int zfs_deadman_enabled = -1; 317 318 /* 319 * The worst case is single-sector max-parity RAID-Z blocks, in which 320 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 321 * times the size; so just assume that. Add to this the fact that 322 * we can have up to 3 DVAs per bp, and one more factor of 2 because 323 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 324 * the worst case is: 325 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 326 */ 327 int spa_asize_inflation = 24; 328 329 /* 330 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 331 * the pool to be consumed. This ensures that we don't run the pool 332 * completely out of space, due to unaccounted changes (e.g. to the MOS). 333 * It also limits the worst-case time to allocate space. If we have 334 * less than this amount of free space, most ZPL operations (e.g. write, 335 * create) will return ENOSPC. 336 * 337 * Certain operations (e.g. file removal, most administrative actions) can 338 * use half the slop space. They will only return ENOSPC if less than half 339 * the slop space is free. Typically, once the pool has less than the slop 340 * space free, the user will use these operations to free up space in the pool. 341 * These are the operations that call dsl_pool_adjustedsize() with the netfree 342 * argument set to TRUE. 343 * 344 * Operations that are almost guaranteed to free up space in the absence of 345 * a pool checkpoint can use up to three quarters of the slop space 346 * (e.g zfs destroy). 347 * 348 * A very restricted set of operations are always permitted, regardless of 349 * the amount of free space. These are the operations that call 350 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net 351 * increase in the amount of space used, it is possible to run the pool 352 * completely out of space, causing it to be permanently read-only. 353 * 354 * Note that on very small pools, the slop space will be larger than 355 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 356 * but we never allow it to be more than half the pool size. 357 * 358 * See also the comments in zfs_space_check_t. 359 */ 360 int spa_slop_shift = 5; 361 uint64_t spa_min_slop = 128 * 1024 * 1024; 362 363 int spa_allocators = 4; 364 365 /*PRINTFLIKE2*/ 366 void 367 spa_load_failed(spa_t *spa, const char *fmt, ...) 368 { 369 va_list adx; 370 char buf[256]; 371 372 va_start(adx, fmt); 373 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 374 va_end(adx); 375 376 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name, 377 spa->spa_trust_config ? "trusted" : "untrusted", buf); 378 } 379 380 /*PRINTFLIKE2*/ 381 void 382 spa_load_note(spa_t *spa, const char *fmt, ...) 383 { 384 va_list adx; 385 char buf[256]; 386 387 va_start(adx, fmt); 388 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 389 va_end(adx); 390 391 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name, 392 spa->spa_trust_config ? "trusted" : "untrusted", buf); 393 } 394 395 /* 396 * By default dedup and user data indirects land in the special class 397 */ 398 int zfs_ddt_data_is_special = B_TRUE; 399 int zfs_user_indirect_is_special = B_TRUE; 400 401 /* 402 * The percentage of special class final space reserved for metadata only. 403 * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only 404 * let metadata into the class. 405 */ 406 int zfs_special_class_metadata_reserve_pct = 25; 407 408 /* 409 * ========================================================================== 410 * SPA config locking 411 * ========================================================================== 412 */ 413 static void 414 spa_config_lock_init(spa_t *spa) 415 { 416 for (int i = 0; i < SCL_LOCKS; i++) { 417 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 418 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 419 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 420 zfs_refcount_create_untracked(&scl->scl_count); 421 scl->scl_writer = NULL; 422 scl->scl_write_wanted = 0; 423 } 424 } 425 426 static void 427 spa_config_lock_destroy(spa_t *spa) 428 { 429 for (int i = 0; i < SCL_LOCKS; i++) { 430 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 431 mutex_destroy(&scl->scl_lock); 432 cv_destroy(&scl->scl_cv); 433 zfs_refcount_destroy(&scl->scl_count); 434 ASSERT(scl->scl_writer == NULL); 435 ASSERT(scl->scl_write_wanted == 0); 436 } 437 } 438 439 int 440 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 441 { 442 for (int i = 0; i < SCL_LOCKS; i++) { 443 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 444 if (!(locks & (1 << i))) 445 continue; 446 mutex_enter(&scl->scl_lock); 447 if (rw == RW_READER) { 448 if (scl->scl_writer || scl->scl_write_wanted) { 449 mutex_exit(&scl->scl_lock); 450 spa_config_exit(spa, locks & ((1 << i) - 1), 451 tag); 452 return (0); 453 } 454 } else { 455 ASSERT(scl->scl_writer != curthread); 456 if (!zfs_refcount_is_zero(&scl->scl_count)) { 457 mutex_exit(&scl->scl_lock); 458 spa_config_exit(spa, locks & ((1 << i) - 1), 459 tag); 460 return (0); 461 } 462 scl->scl_writer = curthread; 463 } 464 (void) zfs_refcount_add(&scl->scl_count, tag); 465 mutex_exit(&scl->scl_lock); 466 } 467 return (1); 468 } 469 470 void 471 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 472 { 473 int wlocks_held = 0; 474 475 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 476 477 for (int i = 0; i < SCL_LOCKS; i++) { 478 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 479 if (scl->scl_writer == curthread) 480 wlocks_held |= (1 << i); 481 if (!(locks & (1 << i))) 482 continue; 483 mutex_enter(&scl->scl_lock); 484 if (rw == RW_READER) { 485 while (scl->scl_writer || scl->scl_write_wanted) { 486 cv_wait(&scl->scl_cv, &scl->scl_lock); 487 } 488 } else { 489 ASSERT(scl->scl_writer != curthread); 490 while (!zfs_refcount_is_zero(&scl->scl_count)) { 491 scl->scl_write_wanted++; 492 cv_wait(&scl->scl_cv, &scl->scl_lock); 493 scl->scl_write_wanted--; 494 } 495 scl->scl_writer = curthread; 496 } 497 (void) zfs_refcount_add(&scl->scl_count, tag); 498 mutex_exit(&scl->scl_lock); 499 } 500 ASSERT3U(wlocks_held, <=, locks); 501 } 502 503 void 504 spa_config_exit(spa_t *spa, int locks, void *tag) 505 { 506 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 507 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 508 if (!(locks & (1 << i))) 509 continue; 510 mutex_enter(&scl->scl_lock); 511 ASSERT(!zfs_refcount_is_zero(&scl->scl_count)); 512 if (zfs_refcount_remove(&scl->scl_count, tag) == 0) { 513 ASSERT(scl->scl_writer == NULL || 514 scl->scl_writer == curthread); 515 scl->scl_writer = NULL; /* OK in either case */ 516 cv_broadcast(&scl->scl_cv); 517 } 518 mutex_exit(&scl->scl_lock); 519 } 520 } 521 522 int 523 spa_config_held(spa_t *spa, int locks, krw_t rw) 524 { 525 int locks_held = 0; 526 527 for (int i = 0; i < SCL_LOCKS; i++) { 528 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 529 if (!(locks & (1 << i))) 530 continue; 531 if ((rw == RW_READER && 532 !zfs_refcount_is_zero(&scl->scl_count)) || 533 (rw == RW_WRITER && scl->scl_writer == curthread)) 534 locks_held |= 1 << i; 535 } 536 537 return (locks_held); 538 } 539 540 /* 541 * ========================================================================== 542 * SPA namespace functions 543 * ========================================================================== 544 */ 545 546 /* 547 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 548 * Returns NULL if no matching spa_t is found. 549 */ 550 spa_t * 551 spa_lookup(const char *name) 552 { 553 static spa_t search; /* spa_t is large; don't allocate on stack */ 554 spa_t *spa; 555 avl_index_t where; 556 char *cp; 557 558 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 559 560 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 561 562 /* 563 * If it's a full dataset name, figure out the pool name and 564 * just use that. 565 */ 566 cp = strpbrk(search.spa_name, "/@#"); 567 if (cp != NULL) 568 *cp = '\0'; 569 570 spa = avl_find(&spa_namespace_avl, &search, &where); 571 572 return (spa); 573 } 574 575 /* 576 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 577 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 578 * looking for potentially hung I/Os. 579 */ 580 void 581 spa_deadman(void *arg) 582 { 583 spa_t *spa = arg; 584 585 /* 586 * Disable the deadman timer if the pool is suspended. 587 */ 588 if (spa_suspended(spa)) { 589 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 590 return; 591 } 592 593 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 594 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 595 ++spa->spa_deadman_calls); 596 if (zfs_deadman_enabled) 597 vdev_deadman(spa->spa_root_vdev); 598 } 599 600 int 601 spa_log_sm_sort_by_txg(const void *va, const void *vb) 602 { 603 const spa_log_sm_t *a = va; 604 const spa_log_sm_t *b = vb; 605 606 return (TREE_CMP(a->sls_txg, b->sls_txg)); 607 } 608 609 /* 610 * Create an uninitialized spa_t with the given name. Requires 611 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 612 * exist by calling spa_lookup() first. 613 */ 614 spa_t * 615 spa_add(const char *name, nvlist_t *config, const char *altroot) 616 { 617 spa_t *spa; 618 spa_config_dirent_t *dp; 619 cyc_handler_t hdlr; 620 cyc_time_t when; 621 622 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 623 624 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 625 626 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 627 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 628 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 629 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 630 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 631 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 632 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 633 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 634 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 635 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 636 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 637 mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL); 638 mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL); 639 mutex_init(&spa->spa_imp_kstat_lock, NULL, MUTEX_DEFAULT, NULL); 640 641 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 642 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 643 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 644 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 645 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 646 647 for (int t = 0; t < TXG_SIZE; t++) 648 bplist_create(&spa->spa_free_bplist[t]); 649 650 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 651 spa->spa_state = POOL_STATE_UNINITIALIZED; 652 spa->spa_freeze_txg = UINT64_MAX; 653 spa->spa_final_txg = UINT64_MAX; 654 spa->spa_load_max_txg = UINT64_MAX; 655 spa->spa_proc = &p0; 656 spa->spa_proc_state = SPA_PROC_NONE; 657 spa->spa_trust_config = B_TRUE; 658 659 hdlr.cyh_func = spa_deadman; 660 hdlr.cyh_arg = spa; 661 hdlr.cyh_level = CY_LOW_LEVEL; 662 663 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 664 665 /* 666 * This determines how often we need to check for hung I/Os after 667 * the cyclic has already fired. Since checking for hung I/Os is 668 * an expensive operation we don't want to check too frequently. 669 * Instead wait for 5 seconds before checking again. 670 */ 671 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 672 when.cyt_when = CY_INFINITY; 673 mutex_enter(&cpu_lock); 674 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 675 mutex_exit(&cpu_lock); 676 677 zfs_refcount_create(&spa->spa_refcount); 678 spa_config_lock_init(spa); 679 680 avl_add(&spa_namespace_avl, spa); 681 682 /* 683 * Set the alternate root, if there is one. 684 */ 685 if (altroot) { 686 spa->spa_root = spa_strdup(altroot); 687 spa_active_count++; 688 } 689 690 spa->spa_alloc_count = spa_allocators; 691 spa->spa_alloc_locks = kmem_zalloc(spa->spa_alloc_count * 692 sizeof (kmutex_t), KM_SLEEP); 693 spa->spa_alloc_trees = kmem_zalloc(spa->spa_alloc_count * 694 sizeof (avl_tree_t), KM_SLEEP); 695 for (int i = 0; i < spa->spa_alloc_count; i++) { 696 mutex_init(&spa->spa_alloc_locks[i], NULL, MUTEX_DEFAULT, NULL); 697 avl_create(&spa->spa_alloc_trees[i], zio_bookmark_compare, 698 sizeof (zio_t), offsetof(zio_t, io_alloc_node)); 699 } 700 avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed, 701 sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node)); 702 avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg, 703 sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node)); 704 list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t), 705 offsetof(log_summary_entry_t, lse_node)); 706 707 /* 708 * Every pool starts with the default cachefile 709 */ 710 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 711 offsetof(spa_config_dirent_t, scd_link)); 712 713 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 714 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 715 list_insert_head(&spa->spa_config_list, dp); 716 717 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 718 KM_SLEEP) == 0); 719 720 if (config != NULL) { 721 nvlist_t *features; 722 723 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 724 &features) == 0) { 725 VERIFY(nvlist_dup(features, &spa->spa_label_features, 726 0) == 0); 727 } 728 729 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 730 } 731 732 if (spa->spa_label_features == NULL) { 733 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 734 KM_SLEEP) == 0); 735 } 736 737 spa->spa_iokstat = kstat_create("zfs", 0, name, 738 "disk", KSTAT_TYPE_IO, 1, 0); 739 if (spa->spa_iokstat) { 740 spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock; 741 kstat_install(spa->spa_iokstat); 742 } 743 744 spa->spa_min_ashift = INT_MAX; 745 spa->spa_max_ashift = 0; 746 747 /* 748 * As a pool is being created, treat all features as disabled by 749 * setting SPA_FEATURE_DISABLED for all entries in the feature 750 * refcount cache. 751 */ 752 for (int i = 0; i < SPA_FEATURES; i++) { 753 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 754 } 755 756 list_create(&spa->spa_leaf_list, sizeof (vdev_t), 757 offsetof(vdev_t, vdev_leaf_node)); 758 759 return (spa); 760 } 761 762 /* 763 * Removes a spa_t from the namespace, freeing up any memory used. Requires 764 * spa_namespace_lock. This is called only after the spa_t has been closed and 765 * deactivated. 766 */ 767 void 768 spa_remove(spa_t *spa) 769 { 770 spa_config_dirent_t *dp; 771 772 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 773 ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED); 774 ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0); 775 776 nvlist_free(spa->spa_config_splitting); 777 778 avl_remove(&spa_namespace_avl, spa); 779 cv_broadcast(&spa_namespace_cv); 780 781 if (spa->spa_root) { 782 spa_strfree(spa->spa_root); 783 spa_active_count--; 784 } 785 786 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 787 list_remove(&spa->spa_config_list, dp); 788 if (dp->scd_path != NULL) 789 spa_strfree(dp->scd_path); 790 kmem_free(dp, sizeof (spa_config_dirent_t)); 791 } 792 793 for (int i = 0; i < spa->spa_alloc_count; i++) { 794 avl_destroy(&spa->spa_alloc_trees[i]); 795 mutex_destroy(&spa->spa_alloc_locks[i]); 796 } 797 kmem_free(spa->spa_alloc_locks, spa->spa_alloc_count * 798 sizeof (kmutex_t)); 799 kmem_free(spa->spa_alloc_trees, spa->spa_alloc_count * 800 sizeof (avl_tree_t)); 801 802 avl_destroy(&spa->spa_metaslabs_by_flushed); 803 avl_destroy(&spa->spa_sm_logs_by_txg); 804 list_destroy(&spa->spa_log_summary); 805 list_destroy(&spa->spa_config_list); 806 list_destroy(&spa->spa_leaf_list); 807 808 nvlist_free(spa->spa_label_features); 809 nvlist_free(spa->spa_load_info); 810 spa_config_set(spa, NULL); 811 812 mutex_enter(&cpu_lock); 813 if (spa->spa_deadman_cycid != CYCLIC_NONE) 814 cyclic_remove(spa->spa_deadman_cycid); 815 mutex_exit(&cpu_lock); 816 spa->spa_deadman_cycid = CYCLIC_NONE; 817 818 zfs_refcount_destroy(&spa->spa_refcount); 819 820 spa_config_lock_destroy(spa); 821 822 kstat_delete(spa->spa_iokstat); 823 spa->spa_iokstat = NULL; 824 825 for (int t = 0; t < TXG_SIZE; t++) 826 bplist_destroy(&spa->spa_free_bplist[t]); 827 828 zio_checksum_templates_free(spa); 829 830 cv_destroy(&spa->spa_async_cv); 831 cv_destroy(&spa->spa_evicting_os_cv); 832 cv_destroy(&spa->spa_proc_cv); 833 cv_destroy(&spa->spa_scrub_io_cv); 834 cv_destroy(&spa->spa_suspend_cv); 835 836 mutex_destroy(&spa->spa_flushed_ms_lock); 837 mutex_destroy(&spa->spa_async_lock); 838 mutex_destroy(&spa->spa_errlist_lock); 839 mutex_destroy(&spa->spa_errlog_lock); 840 mutex_destroy(&spa->spa_evicting_os_lock); 841 mutex_destroy(&spa->spa_history_lock); 842 mutex_destroy(&spa->spa_proc_lock); 843 mutex_destroy(&spa->spa_props_lock); 844 mutex_destroy(&spa->spa_cksum_tmpls_lock); 845 mutex_destroy(&spa->spa_scrub_lock); 846 mutex_destroy(&spa->spa_suspend_lock); 847 mutex_destroy(&spa->spa_vdev_top_lock); 848 mutex_destroy(&spa->spa_iokstat_lock); 849 mutex_destroy(&spa->spa_imp_kstat_lock); 850 851 kmem_free(spa, sizeof (spa_t)); 852 } 853 854 /* 855 * Given a pool, return the next pool in the namespace, or NULL if there is 856 * none. If 'prev' is NULL, return the first pool. 857 */ 858 spa_t * 859 spa_next(spa_t *prev) 860 { 861 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 862 863 if (prev) 864 return (AVL_NEXT(&spa_namespace_avl, prev)); 865 else 866 return (avl_first(&spa_namespace_avl)); 867 } 868 869 /* 870 * ========================================================================== 871 * SPA refcount functions 872 * ========================================================================== 873 */ 874 875 /* 876 * Add a reference to the given spa_t. Must have at least one reference, or 877 * have the namespace lock held. 878 */ 879 void 880 spa_open_ref(spa_t *spa, void *tag) 881 { 882 ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref || 883 MUTEX_HELD(&spa_namespace_lock)); 884 (void) zfs_refcount_add(&spa->spa_refcount, tag); 885 } 886 887 /* 888 * Remove a reference to the given spa_t. Must have at least one reference, or 889 * have the namespace lock held. 890 */ 891 void 892 spa_close(spa_t *spa, void *tag) 893 { 894 ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref || 895 MUTEX_HELD(&spa_namespace_lock)); 896 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 897 } 898 899 /* 900 * Remove a reference to the given spa_t held by a dsl dir that is 901 * being asynchronously released. Async releases occur from a taskq 902 * performing eviction of dsl datasets and dirs. The namespace lock 903 * isn't held and the hold by the object being evicted may contribute to 904 * spa_minref (e.g. dataset or directory released during pool export), 905 * so the asserts in spa_close() do not apply. 906 */ 907 void 908 spa_async_close(spa_t *spa, void *tag) 909 { 910 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 911 } 912 913 /* 914 * Check to see if the spa refcount is zero. Must be called with 915 * spa_namespace_lock held. We really compare against spa_minref, which is the 916 * number of references acquired when opening a pool 917 */ 918 boolean_t 919 spa_refcount_zero(spa_t *spa) 920 { 921 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 922 923 return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref); 924 } 925 926 /* 927 * ========================================================================== 928 * SPA spare and l2cache tracking 929 * ========================================================================== 930 */ 931 932 /* 933 * Hot spares and cache devices are tracked using the same code below, 934 * for 'auxiliary' devices. 935 */ 936 937 typedef struct spa_aux { 938 uint64_t aux_guid; 939 uint64_t aux_pool; 940 avl_node_t aux_avl; 941 int aux_count; 942 } spa_aux_t; 943 944 static inline int 945 spa_aux_compare(const void *a, const void *b) 946 { 947 const spa_aux_t *sa = (const spa_aux_t *)a; 948 const spa_aux_t *sb = (const spa_aux_t *)b; 949 950 return (TREE_CMP(sa->aux_guid, sb->aux_guid)); 951 } 952 953 void 954 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 955 { 956 avl_index_t where; 957 spa_aux_t search; 958 spa_aux_t *aux; 959 960 search.aux_guid = vd->vdev_guid; 961 if ((aux = avl_find(avl, &search, &where)) != NULL) { 962 aux->aux_count++; 963 } else { 964 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 965 aux->aux_guid = vd->vdev_guid; 966 aux->aux_count = 1; 967 avl_insert(avl, aux, where); 968 } 969 } 970 971 void 972 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 973 { 974 spa_aux_t search; 975 spa_aux_t *aux; 976 avl_index_t where; 977 978 search.aux_guid = vd->vdev_guid; 979 aux = avl_find(avl, &search, &where); 980 981 ASSERT(aux != NULL); 982 983 if (--aux->aux_count == 0) { 984 avl_remove(avl, aux); 985 kmem_free(aux, sizeof (spa_aux_t)); 986 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 987 aux->aux_pool = 0ULL; 988 } 989 } 990 991 boolean_t 992 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 993 { 994 spa_aux_t search, *found; 995 996 search.aux_guid = guid; 997 found = avl_find(avl, &search, NULL); 998 999 if (pool) { 1000 if (found) 1001 *pool = found->aux_pool; 1002 else 1003 *pool = 0ULL; 1004 } 1005 1006 if (refcnt) { 1007 if (found) 1008 *refcnt = found->aux_count; 1009 else 1010 *refcnt = 0; 1011 } 1012 1013 return (found != NULL); 1014 } 1015 1016 void 1017 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1018 { 1019 spa_aux_t search, *found; 1020 avl_index_t where; 1021 1022 search.aux_guid = vd->vdev_guid; 1023 found = avl_find(avl, &search, &where); 1024 ASSERT(found != NULL); 1025 ASSERT(found->aux_pool == 0ULL); 1026 1027 found->aux_pool = spa_guid(vd->vdev_spa); 1028 } 1029 1030 /* 1031 * Spares are tracked globally due to the following constraints: 1032 * 1033 * - A spare may be part of multiple pools. 1034 * - A spare may be added to a pool even if it's actively in use within 1035 * another pool. 1036 * - A spare in use in any pool can only be the source of a replacement if 1037 * the target is a spare in the same pool. 1038 * 1039 * We keep track of all spares on the system through the use of a reference 1040 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1041 * spare, then we bump the reference count in the AVL tree. In addition, we set 1042 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1043 * inactive). When a spare is made active (used to replace a device in the 1044 * pool), we also keep track of which pool its been made a part of. 1045 * 1046 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1047 * called under the spa_namespace lock as part of vdev reconfiguration. The 1048 * separate spare lock exists for the status query path, which does not need to 1049 * be completely consistent with respect to other vdev configuration changes. 1050 */ 1051 1052 /* 1053 * Poll the spare vdevs to make sure they are not faulty. 1054 * 1055 * The probe operation will raise an ENXIO error and create an FM ereport if the 1056 * probe fails. 1057 */ 1058 void 1059 spa_spare_poll(spa_t *spa) 1060 { 1061 boolean_t async_request = B_FALSE; 1062 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 1063 for (int i = 0; i < spa->spa_spares.sav_count; i++) { 1064 spa_aux_t search, *found; 1065 vdev_t *vd = spa->spa_spares.sav_vdevs[i]; 1066 1067 search.aux_guid = vd->vdev_guid; 1068 1069 mutex_enter(&spa_spare_lock); 1070 found = avl_find(&spa_spare_avl, &search, NULL); 1071 /* This spare is in use by a pool. */ 1072 if (found != NULL && found->aux_pool != 0) { 1073 mutex_exit(&spa_spare_lock); 1074 continue; 1075 } 1076 mutex_exit(&spa_spare_lock); 1077 1078 vd->vdev_probe_wanted = B_TRUE; 1079 async_request = B_TRUE; 1080 } 1081 if (async_request) 1082 spa_async_request(spa, SPA_ASYNC_PROBE); 1083 1084 spa_config_exit(spa, SCL_STATE, FTAG); 1085 } 1086 1087 static int 1088 spa_spare_compare(const void *a, const void *b) 1089 { 1090 return (spa_aux_compare(a, b)); 1091 } 1092 1093 void 1094 spa_spare_add(vdev_t *vd) 1095 { 1096 mutex_enter(&spa_spare_lock); 1097 ASSERT(!vd->vdev_isspare); 1098 spa_aux_add(vd, &spa_spare_avl); 1099 vd->vdev_isspare = B_TRUE; 1100 mutex_exit(&spa_spare_lock); 1101 } 1102 1103 void 1104 spa_spare_remove(vdev_t *vd) 1105 { 1106 mutex_enter(&spa_spare_lock); 1107 ASSERT(vd->vdev_isspare); 1108 spa_aux_remove(vd, &spa_spare_avl); 1109 vd->vdev_isspare = B_FALSE; 1110 mutex_exit(&spa_spare_lock); 1111 } 1112 1113 boolean_t 1114 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1115 { 1116 boolean_t found; 1117 1118 mutex_enter(&spa_spare_lock); 1119 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1120 mutex_exit(&spa_spare_lock); 1121 1122 return (found); 1123 } 1124 1125 void 1126 spa_spare_activate(vdev_t *vd) 1127 { 1128 mutex_enter(&spa_spare_lock); 1129 ASSERT(vd->vdev_isspare); 1130 spa_aux_activate(vd, &spa_spare_avl); 1131 mutex_exit(&spa_spare_lock); 1132 } 1133 1134 /* 1135 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1136 * Cache devices currently only support one pool per cache device, and so 1137 * for these devices the aux reference count is currently unused beyond 1. 1138 */ 1139 1140 static int 1141 spa_l2cache_compare(const void *a, const void *b) 1142 { 1143 return (spa_aux_compare(a, b)); 1144 } 1145 1146 void 1147 spa_l2cache_add(vdev_t *vd) 1148 { 1149 mutex_enter(&spa_l2cache_lock); 1150 ASSERT(!vd->vdev_isl2cache); 1151 spa_aux_add(vd, &spa_l2cache_avl); 1152 vd->vdev_isl2cache = B_TRUE; 1153 mutex_exit(&spa_l2cache_lock); 1154 } 1155 1156 void 1157 spa_l2cache_remove(vdev_t *vd) 1158 { 1159 mutex_enter(&spa_l2cache_lock); 1160 ASSERT(vd->vdev_isl2cache); 1161 spa_aux_remove(vd, &spa_l2cache_avl); 1162 vd->vdev_isl2cache = B_FALSE; 1163 mutex_exit(&spa_l2cache_lock); 1164 } 1165 1166 boolean_t 1167 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1168 { 1169 boolean_t found; 1170 1171 mutex_enter(&spa_l2cache_lock); 1172 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1173 mutex_exit(&spa_l2cache_lock); 1174 1175 return (found); 1176 } 1177 1178 void 1179 spa_l2cache_activate(vdev_t *vd) 1180 { 1181 mutex_enter(&spa_l2cache_lock); 1182 ASSERT(vd->vdev_isl2cache); 1183 spa_aux_activate(vd, &spa_l2cache_avl); 1184 mutex_exit(&spa_l2cache_lock); 1185 } 1186 1187 /* 1188 * ========================================================================== 1189 * SPA vdev locking 1190 * ========================================================================== 1191 */ 1192 1193 /* 1194 * Lock the given spa_t for the purpose of adding or removing a vdev. 1195 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1196 * It returns the next transaction group for the spa_t. 1197 */ 1198 uint64_t 1199 spa_vdev_enter(spa_t *spa) 1200 { 1201 mutex_enter(&spa->spa_vdev_top_lock); 1202 mutex_enter(&spa_namespace_lock); 1203 1204 vdev_autotrim_stop_all(spa); 1205 1206 return (spa_vdev_config_enter(spa)); 1207 } 1208 1209 /* 1210 * Internal implementation for spa_vdev_enter(). Used when a vdev 1211 * operation requires multiple syncs (i.e. removing a device) while 1212 * keeping the spa_namespace_lock held. 1213 */ 1214 uint64_t 1215 spa_vdev_config_enter(spa_t *spa) 1216 { 1217 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1218 1219 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1220 1221 return (spa_last_synced_txg(spa) + 1); 1222 } 1223 1224 /* 1225 * Used in combination with spa_vdev_config_enter() to allow the syncing 1226 * of multiple transactions without releasing the spa_namespace_lock. 1227 */ 1228 void 1229 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1230 { 1231 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1232 1233 int config_changed = B_FALSE; 1234 1235 ASSERT(txg > spa_last_synced_txg(spa)); 1236 1237 spa->spa_pending_vdev = NULL; 1238 1239 /* 1240 * Reassess the DTLs. 1241 */ 1242 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1243 1244 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1245 config_changed = B_TRUE; 1246 spa->spa_config_generation++; 1247 } 1248 1249 /* 1250 * Verify the metaslab classes. 1251 */ 1252 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1253 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1254 ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0); 1255 ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0); 1256 1257 spa_config_exit(spa, SCL_ALL, spa); 1258 1259 /* 1260 * Panic the system if the specified tag requires it. This 1261 * is useful for ensuring that configurations are updated 1262 * transactionally. 1263 */ 1264 if (zio_injection_enabled) 1265 zio_handle_panic_injection(spa, tag, 0); 1266 1267 /* 1268 * Note: this txg_wait_synced() is important because it ensures 1269 * that there won't be more than one config change per txg. 1270 * This allows us to use the txg as the generation number. 1271 */ 1272 if (error == 0) 1273 txg_wait_synced(spa->spa_dsl_pool, txg); 1274 1275 if (vd != NULL) { 1276 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1277 if (vd->vdev_ops->vdev_op_leaf) { 1278 mutex_enter(&vd->vdev_initialize_lock); 1279 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, 1280 NULL); 1281 mutex_exit(&vd->vdev_initialize_lock); 1282 1283 mutex_enter(&vd->vdev_trim_lock); 1284 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL); 1285 mutex_exit(&vd->vdev_trim_lock); 1286 } 1287 1288 /* 1289 * The vdev may be both a leaf and top-level device. 1290 */ 1291 vdev_autotrim_stop_wait(vd); 1292 1293 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1294 vdev_free(vd); 1295 spa_config_exit(spa, SCL_ALL, spa); 1296 } 1297 1298 /* 1299 * If the config changed, update the config cache. 1300 */ 1301 if (config_changed) 1302 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1303 } 1304 1305 /* 1306 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1307 * locking of spa_vdev_enter(), we also want make sure the transactions have 1308 * synced to disk, and then update the global configuration cache with the new 1309 * information. 1310 */ 1311 int 1312 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1313 { 1314 vdev_autotrim_restart(spa); 1315 1316 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1317 mutex_exit(&spa_namespace_lock); 1318 mutex_exit(&spa->spa_vdev_top_lock); 1319 1320 return (error); 1321 } 1322 1323 /* 1324 * Lock the given spa_t for the purpose of changing vdev state. 1325 */ 1326 void 1327 spa_vdev_state_enter(spa_t *spa, int oplocks) 1328 { 1329 int locks = SCL_STATE_ALL | oplocks; 1330 1331 /* 1332 * Root pools may need to read of the underlying devfs filesystem 1333 * when opening up a vdev. Unfortunately if we're holding the 1334 * SCL_ZIO lock it will result in a deadlock when we try to issue 1335 * the read from the root filesystem. Instead we "prefetch" 1336 * the associated vnodes that we need prior to opening the 1337 * underlying devices and cache them so that we can prevent 1338 * any I/O when we are doing the actual open. 1339 */ 1340 if (spa_is_root(spa)) { 1341 int low = locks & ~(SCL_ZIO - 1); 1342 int high = locks & ~low; 1343 1344 spa_config_enter(spa, high, spa, RW_WRITER); 1345 vdev_hold(spa->spa_root_vdev); 1346 spa_config_enter(spa, low, spa, RW_WRITER); 1347 } else { 1348 spa_config_enter(spa, locks, spa, RW_WRITER); 1349 } 1350 spa->spa_vdev_locks = locks; 1351 } 1352 1353 int 1354 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1355 { 1356 boolean_t config_changed = B_FALSE; 1357 1358 if (vd != NULL || error == 0) 1359 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1360 0, 0, B_FALSE); 1361 1362 if (vd != NULL) { 1363 vdev_state_dirty(vd->vdev_top); 1364 config_changed = B_TRUE; 1365 spa->spa_config_generation++; 1366 } 1367 1368 if (spa_is_root(spa)) 1369 vdev_rele(spa->spa_root_vdev); 1370 1371 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1372 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1373 1374 /* 1375 * If anything changed, wait for it to sync. This ensures that, 1376 * from the system administrator's perspective, zpool(1M) commands 1377 * are synchronous. This is important for things like zpool offline: 1378 * when the command completes, you expect no further I/O from ZFS. 1379 */ 1380 if (vd != NULL) 1381 txg_wait_synced(spa->spa_dsl_pool, 0); 1382 1383 /* 1384 * If the config changed, update the config cache. 1385 */ 1386 if (config_changed) { 1387 mutex_enter(&spa_namespace_lock); 1388 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1389 mutex_exit(&spa_namespace_lock); 1390 } 1391 1392 return (error); 1393 } 1394 1395 /* 1396 * ========================================================================== 1397 * Miscellaneous functions 1398 * ========================================================================== 1399 */ 1400 1401 void 1402 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1403 { 1404 if (!nvlist_exists(spa->spa_label_features, feature)) { 1405 fnvlist_add_boolean(spa->spa_label_features, feature); 1406 /* 1407 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1408 * dirty the vdev config because lock SCL_CONFIG is not held. 1409 * Thankfully, in this case we don't need to dirty the config 1410 * because it will be written out anyway when we finish 1411 * creating the pool. 1412 */ 1413 if (tx->tx_txg != TXG_INITIAL) 1414 vdev_config_dirty(spa->spa_root_vdev); 1415 } 1416 } 1417 1418 void 1419 spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1420 { 1421 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1422 vdev_config_dirty(spa->spa_root_vdev); 1423 } 1424 1425 /* 1426 * Return the spa_t associated with given pool_guid, if it exists. If 1427 * device_guid is non-zero, determine whether the pool exists *and* contains 1428 * a device with the specified device_guid. 1429 */ 1430 spa_t * 1431 spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1432 { 1433 spa_t *spa; 1434 avl_tree_t *t = &spa_namespace_avl; 1435 1436 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1437 1438 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1439 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1440 continue; 1441 if (spa->spa_root_vdev == NULL) 1442 continue; 1443 if (spa_guid(spa) == pool_guid) { 1444 if (device_guid == 0) 1445 break; 1446 1447 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1448 device_guid) != NULL) 1449 break; 1450 1451 /* 1452 * Check any devices we may be in the process of adding. 1453 */ 1454 if (spa->spa_pending_vdev) { 1455 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1456 device_guid) != NULL) 1457 break; 1458 } 1459 } 1460 } 1461 1462 return (spa); 1463 } 1464 1465 /* 1466 * Determine whether a pool with the given pool_guid exists. 1467 */ 1468 boolean_t 1469 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1470 { 1471 return (spa_by_guid(pool_guid, device_guid) != NULL); 1472 } 1473 1474 char * 1475 spa_strdup(const char *s) 1476 { 1477 size_t len; 1478 char *new; 1479 1480 len = strlen(s); 1481 new = kmem_alloc(len + 1, KM_SLEEP); 1482 bcopy(s, new, len); 1483 new[len] = '\0'; 1484 1485 return (new); 1486 } 1487 1488 void 1489 spa_strfree(char *s) 1490 { 1491 kmem_free(s, strlen(s) + 1); 1492 } 1493 1494 uint64_t 1495 spa_get_random(uint64_t range) 1496 { 1497 uint64_t r; 1498 1499 ASSERT(range != 0); 1500 1501 if (range == 1) 1502 return (0); 1503 1504 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1505 1506 return (r % range); 1507 } 1508 1509 uint64_t 1510 spa_generate_guid(spa_t *spa) 1511 { 1512 uint64_t guid = spa_get_random(-1ULL); 1513 1514 if (spa != NULL) { 1515 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1516 guid = spa_get_random(-1ULL); 1517 } else { 1518 while (guid == 0 || spa_guid_exists(guid, 0)) 1519 guid = spa_get_random(-1ULL); 1520 } 1521 1522 return (guid); 1523 } 1524 1525 void 1526 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1527 { 1528 char type[256]; 1529 char *checksum = NULL; 1530 char *compress = NULL; 1531 1532 if (bp != NULL) { 1533 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1534 dmu_object_byteswap_t bswap = 1535 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1536 (void) snprintf(type, sizeof (type), "bswap %s %s", 1537 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1538 "metadata" : "data", 1539 dmu_ot_byteswap[bswap].ob_name); 1540 } else { 1541 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1542 sizeof (type)); 1543 } 1544 if (!BP_IS_EMBEDDED(bp)) { 1545 checksum = 1546 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1547 } 1548 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1549 } 1550 1551 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1552 compress); 1553 } 1554 1555 void 1556 spa_freeze(spa_t *spa) 1557 { 1558 uint64_t freeze_txg = 0; 1559 1560 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1561 if (spa->spa_freeze_txg == UINT64_MAX) { 1562 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1563 spa->spa_freeze_txg = freeze_txg; 1564 } 1565 spa_config_exit(spa, SCL_ALL, FTAG); 1566 if (freeze_txg != 0) 1567 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1568 } 1569 1570 void 1571 zfs_panic_recover(const char *fmt, ...) 1572 { 1573 va_list adx; 1574 1575 va_start(adx, fmt); 1576 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1577 va_end(adx); 1578 } 1579 1580 /* 1581 * This is a stripped-down version of strtoull, suitable only for converting 1582 * lowercase hexadecimal numbers that don't overflow. 1583 */ 1584 uint64_t 1585 zfs_strtonum(const char *str, char **nptr) 1586 { 1587 uint64_t val = 0; 1588 char c; 1589 int digit; 1590 1591 while ((c = *str) != '\0') { 1592 if (c >= '0' && c <= '9') 1593 digit = c - '0'; 1594 else if (c >= 'a' && c <= 'f') 1595 digit = 10 + c - 'a'; 1596 else 1597 break; 1598 1599 val *= 16; 1600 val += digit; 1601 1602 str++; 1603 } 1604 1605 if (nptr) 1606 *nptr = (char *)str; 1607 1608 return (val); 1609 } 1610 1611 void 1612 spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx) 1613 { 1614 /* 1615 * We bump the feature refcount for each special vdev added to the pool 1616 */ 1617 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES)); 1618 spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx); 1619 } 1620 1621 /* 1622 * ========================================================================== 1623 * Accessor functions 1624 * ========================================================================== 1625 */ 1626 1627 boolean_t 1628 spa_shutting_down(spa_t *spa) 1629 { 1630 return (spa->spa_async_suspended); 1631 } 1632 1633 dsl_pool_t * 1634 spa_get_dsl(spa_t *spa) 1635 { 1636 return (spa->spa_dsl_pool); 1637 } 1638 1639 boolean_t 1640 spa_is_initializing(spa_t *spa) 1641 { 1642 return (spa->spa_is_initializing); 1643 } 1644 1645 boolean_t 1646 spa_indirect_vdevs_loaded(spa_t *spa) 1647 { 1648 return (spa->spa_indirect_vdevs_loaded); 1649 } 1650 1651 blkptr_t * 1652 spa_get_rootblkptr(spa_t *spa) 1653 { 1654 return (&spa->spa_ubsync.ub_rootbp); 1655 } 1656 1657 void 1658 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1659 { 1660 spa->spa_uberblock.ub_rootbp = *bp; 1661 } 1662 1663 void 1664 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1665 { 1666 if (spa->spa_root == NULL) 1667 buf[0] = '\0'; 1668 else 1669 (void) strncpy(buf, spa->spa_root, buflen); 1670 } 1671 1672 int 1673 spa_sync_pass(spa_t *spa) 1674 { 1675 return (spa->spa_sync_pass); 1676 } 1677 1678 char * 1679 spa_name(spa_t *spa) 1680 { 1681 return (spa->spa_name); 1682 } 1683 1684 uint64_t 1685 spa_guid(spa_t *spa) 1686 { 1687 dsl_pool_t *dp = spa_get_dsl(spa); 1688 uint64_t guid; 1689 1690 /* 1691 * If we fail to parse the config during spa_load(), we can go through 1692 * the error path (which posts an ereport) and end up here with no root 1693 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1694 * this case. 1695 */ 1696 if (spa->spa_root_vdev == NULL) 1697 return (spa->spa_config_guid); 1698 1699 guid = spa->spa_last_synced_guid != 0 ? 1700 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1701 1702 /* 1703 * Return the most recently synced out guid unless we're 1704 * in syncing context. 1705 */ 1706 if (dp && dsl_pool_sync_context(dp)) 1707 return (spa->spa_root_vdev->vdev_guid); 1708 else 1709 return (guid); 1710 } 1711 1712 uint64_t 1713 spa_load_guid(spa_t *spa) 1714 { 1715 /* 1716 * This is a GUID that exists solely as a reference for the 1717 * purposes of the arc. It is generated at load time, and 1718 * is never written to persistent storage. 1719 */ 1720 return (spa->spa_load_guid); 1721 } 1722 1723 uint64_t 1724 spa_last_synced_txg(spa_t *spa) 1725 { 1726 return (spa->spa_ubsync.ub_txg); 1727 } 1728 1729 uint64_t 1730 spa_first_txg(spa_t *spa) 1731 { 1732 return (spa->spa_first_txg); 1733 } 1734 1735 uint64_t 1736 spa_syncing_txg(spa_t *spa) 1737 { 1738 return (spa->spa_syncing_txg); 1739 } 1740 1741 /* 1742 * Return the last txg where data can be dirtied. The final txgs 1743 * will be used to just clear out any deferred frees that remain. 1744 */ 1745 uint64_t 1746 spa_final_dirty_txg(spa_t *spa) 1747 { 1748 return (spa->spa_final_txg - TXG_DEFER_SIZE); 1749 } 1750 1751 pool_state_t 1752 spa_state(spa_t *spa) 1753 { 1754 return (spa->spa_state); 1755 } 1756 1757 spa_load_state_t 1758 spa_load_state(spa_t *spa) 1759 { 1760 return (spa->spa_load_state); 1761 } 1762 1763 uint64_t 1764 spa_freeze_txg(spa_t *spa) 1765 { 1766 return (spa->spa_freeze_txg); 1767 } 1768 1769 /* ARGSUSED */ 1770 uint64_t 1771 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) 1772 { 1773 return (lsize * spa_asize_inflation); 1774 } 1775 1776 /* 1777 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), 1778 * or at least 128MB, unless that would cause it to be more than half the 1779 * pool size. 1780 * 1781 * See the comment above spa_slop_shift for details. 1782 */ 1783 uint64_t 1784 spa_get_slop_space(spa_t *spa) 1785 { 1786 uint64_t space = spa_get_dspace(spa); 1787 return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop))); 1788 } 1789 1790 uint64_t 1791 spa_get_dspace(spa_t *spa) 1792 { 1793 return (spa->spa_dspace); 1794 } 1795 1796 uint64_t 1797 spa_get_checkpoint_space(spa_t *spa) 1798 { 1799 return (spa->spa_checkpoint_info.sci_dspace); 1800 } 1801 1802 void 1803 spa_update_dspace(spa_t *spa) 1804 { 1805 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1806 ddt_get_dedup_dspace(spa); 1807 if (spa->spa_vdev_removal != NULL) { 1808 /* 1809 * We can't allocate from the removing device, so 1810 * subtract its size. This prevents the DMU/DSL from 1811 * filling up the (now smaller) pool while we are in the 1812 * middle of removing the device. 1813 * 1814 * Note that the DMU/DSL doesn't actually know or care 1815 * how much space is allocated (it does its own tracking 1816 * of how much space has been logically used). So it 1817 * doesn't matter that the data we are moving may be 1818 * allocated twice (on the old device and the new 1819 * device). 1820 */ 1821 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1822 vdev_t *vd = 1823 vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id); 1824 spa->spa_dspace -= spa_deflate(spa) ? 1825 vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space; 1826 spa_config_exit(spa, SCL_VDEV, FTAG); 1827 } 1828 } 1829 1830 /* 1831 * Return the failure mode that has been set to this pool. The default 1832 * behavior will be to block all I/Os when a complete failure occurs. 1833 */ 1834 uint8_t 1835 spa_get_failmode(spa_t *spa) 1836 { 1837 return (spa->spa_failmode); 1838 } 1839 1840 boolean_t 1841 spa_suspended(spa_t *spa) 1842 { 1843 return (spa->spa_suspended != ZIO_SUSPEND_NONE); 1844 } 1845 1846 uint64_t 1847 spa_version(spa_t *spa) 1848 { 1849 return (spa->spa_ubsync.ub_version); 1850 } 1851 1852 boolean_t 1853 spa_deflate(spa_t *spa) 1854 { 1855 return (spa->spa_deflate); 1856 } 1857 1858 metaslab_class_t * 1859 spa_normal_class(spa_t *spa) 1860 { 1861 return (spa->spa_normal_class); 1862 } 1863 1864 metaslab_class_t * 1865 spa_log_class(spa_t *spa) 1866 { 1867 return (spa->spa_log_class); 1868 } 1869 1870 metaslab_class_t * 1871 spa_special_class(spa_t *spa) 1872 { 1873 return (spa->spa_special_class); 1874 } 1875 1876 metaslab_class_t * 1877 spa_dedup_class(spa_t *spa) 1878 { 1879 return (spa->spa_dedup_class); 1880 } 1881 1882 /* 1883 * Locate an appropriate allocation class 1884 */ 1885 metaslab_class_t * 1886 spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype, 1887 uint_t level, uint_t special_smallblk) 1888 { 1889 if (DMU_OT_IS_ZIL(objtype)) { 1890 if (spa->spa_log_class->mc_groups != 0) 1891 return (spa_log_class(spa)); 1892 else 1893 return (spa_normal_class(spa)); 1894 } 1895 1896 boolean_t has_special_class = spa->spa_special_class->mc_groups != 0; 1897 1898 if (DMU_OT_IS_DDT(objtype)) { 1899 if (spa->spa_dedup_class->mc_groups != 0) 1900 return (spa_dedup_class(spa)); 1901 else if (has_special_class && zfs_ddt_data_is_special) 1902 return (spa_special_class(spa)); 1903 else 1904 return (spa_normal_class(spa)); 1905 } 1906 1907 /* Indirect blocks for user data can land in special if allowed */ 1908 if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) { 1909 if (has_special_class && zfs_user_indirect_is_special) 1910 return (spa_special_class(spa)); 1911 else 1912 return (spa_normal_class(spa)); 1913 } 1914 1915 if (DMU_OT_IS_METADATA(objtype) || level > 0) { 1916 if (has_special_class) 1917 return (spa_special_class(spa)); 1918 else 1919 return (spa_normal_class(spa)); 1920 } 1921 1922 /* 1923 * Allow small file blocks in special class in some cases (like 1924 * for the dRAID vdev feature). But always leave a reserve of 1925 * zfs_special_class_metadata_reserve_pct exclusively for metadata. 1926 */ 1927 if (DMU_OT_IS_FILE(objtype) && 1928 has_special_class && size <= special_smallblk) { 1929 metaslab_class_t *special = spa_special_class(spa); 1930 uint64_t alloc = metaslab_class_get_alloc(special); 1931 uint64_t space = metaslab_class_get_space(special); 1932 uint64_t limit = 1933 (space * (100 - zfs_special_class_metadata_reserve_pct)) 1934 / 100; 1935 1936 if (alloc < limit) 1937 return (special); 1938 } 1939 1940 return (spa_normal_class(spa)); 1941 } 1942 1943 void 1944 spa_evicting_os_register(spa_t *spa, objset_t *os) 1945 { 1946 mutex_enter(&spa->spa_evicting_os_lock); 1947 list_insert_head(&spa->spa_evicting_os_list, os); 1948 mutex_exit(&spa->spa_evicting_os_lock); 1949 } 1950 1951 void 1952 spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1953 { 1954 mutex_enter(&spa->spa_evicting_os_lock); 1955 list_remove(&spa->spa_evicting_os_list, os); 1956 cv_broadcast(&spa->spa_evicting_os_cv); 1957 mutex_exit(&spa->spa_evicting_os_lock); 1958 } 1959 1960 void 1961 spa_evicting_os_wait(spa_t *spa) 1962 { 1963 mutex_enter(&spa->spa_evicting_os_lock); 1964 while (!list_is_empty(&spa->spa_evicting_os_list)) 1965 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 1966 mutex_exit(&spa->spa_evicting_os_lock); 1967 1968 dmu_buf_user_evict_wait(); 1969 } 1970 1971 int 1972 spa_max_replication(spa_t *spa) 1973 { 1974 /* 1975 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1976 * handle BPs with more than one DVA allocated. Set our max 1977 * replication level accordingly. 1978 */ 1979 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1980 return (1); 1981 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1982 } 1983 1984 int 1985 spa_prev_software_version(spa_t *spa) 1986 { 1987 return (spa->spa_prev_software_version); 1988 } 1989 1990 uint64_t 1991 spa_deadman_synctime(spa_t *spa) 1992 { 1993 return (spa->spa_deadman_synctime); 1994 } 1995 1996 spa_autotrim_t 1997 spa_get_autotrim(spa_t *spa) 1998 { 1999 return (spa->spa_autotrim); 2000 } 2001 2002 uint64_t 2003 dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 2004 { 2005 uint64_t asize = DVA_GET_ASIZE(dva); 2006 uint64_t dsize = asize; 2007 2008 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 2009 2010 if (asize != 0 && spa->spa_deflate) { 2011 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 2012 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 2013 } 2014 2015 return (dsize); 2016 } 2017 2018 uint64_t 2019 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 2020 { 2021 uint64_t dsize = 0; 2022 2023 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2024 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2025 2026 return (dsize); 2027 } 2028 2029 uint64_t 2030 bp_get_dsize(spa_t *spa, const blkptr_t *bp) 2031 { 2032 uint64_t dsize = 0; 2033 2034 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2035 2036 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2037 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2038 2039 spa_config_exit(spa, SCL_VDEV, FTAG); 2040 2041 return (dsize); 2042 } 2043 2044 uint64_t 2045 spa_dirty_data(spa_t *spa) 2046 { 2047 return (spa->spa_dsl_pool->dp_dirty_total); 2048 } 2049 2050 /* 2051 * ========================================================================== 2052 * SPA Import Progress Routines 2053 * The illumos implementation of these are different from OpenZFS. OpenZFS 2054 * uses the Linux /proc fs, whereas we use a kstat on the spa. 2055 * ========================================================================== 2056 */ 2057 2058 typedef struct spa_import_progress { 2059 kstat_named_t sip_load_state; 2060 kstat_named_t sip_mmp_sec_remaining; /* MMP activity check */ 2061 kstat_named_t sip_load_max_txg; /* rewind txg */ 2062 } spa_import_progress_t; 2063 2064 static void 2065 spa_import_progress_init(void) 2066 { 2067 } 2068 2069 static void 2070 spa_import_progress_destroy(void) 2071 { 2072 } 2073 2074 void spa_import_progress_add(spa_t *); 2075 2076 int 2077 spa_import_progress_set_state(spa_t *spa, spa_load_state_t load_state) 2078 { 2079 if (spa->spa_imp_kstat == NULL) 2080 spa_import_progress_add(spa); 2081 2082 mutex_enter(&spa->spa_imp_kstat_lock); 2083 if (spa->spa_imp_kstat != NULL) { 2084 spa_import_progress_t *sip = spa->spa_imp_kstat->ks_data; 2085 if (sip != NULL) 2086 sip->sip_load_state.value.ui64 = (uint64_t)load_state; 2087 } 2088 mutex_exit(&spa->spa_imp_kstat_lock); 2089 2090 return (0); 2091 } 2092 2093 int 2094 spa_import_progress_set_max_txg(spa_t *spa, uint64_t load_max_txg) 2095 { 2096 if (spa->spa_imp_kstat == NULL) 2097 spa_import_progress_add(spa); 2098 2099 mutex_enter(&spa->spa_imp_kstat_lock); 2100 if (spa->spa_imp_kstat != NULL) { 2101 spa_import_progress_t *sip = spa->spa_imp_kstat->ks_data; 2102 if (sip != NULL) 2103 sip->sip_load_max_txg.value.ui64 = load_max_txg; 2104 } 2105 mutex_exit(&spa->spa_imp_kstat_lock); 2106 2107 return (0); 2108 } 2109 2110 int 2111 spa_import_progress_set_mmp_check(spa_t *spa, uint64_t mmp_sec_remaining) 2112 { 2113 if (spa->spa_imp_kstat == NULL) 2114 spa_import_progress_add(spa); 2115 2116 mutex_enter(&spa->spa_imp_kstat_lock); 2117 if (spa->spa_imp_kstat != NULL) { 2118 spa_import_progress_t *sip = spa->spa_imp_kstat->ks_data; 2119 if (sip != NULL) 2120 sip->sip_mmp_sec_remaining.value.ui64 = 2121 mmp_sec_remaining; 2122 } 2123 mutex_exit(&spa->spa_imp_kstat_lock); 2124 2125 return (0); 2126 } 2127 2128 /* 2129 * A new import is in progress. Add an entry. 2130 */ 2131 void 2132 spa_import_progress_add(spa_t *spa) 2133 { 2134 char *poolname = NULL; 2135 spa_import_progress_t *sip; 2136 2137 mutex_enter(&spa->spa_imp_kstat_lock); 2138 if (spa->spa_imp_kstat != NULL) { 2139 sip = spa->spa_imp_kstat->ks_data; 2140 sip->sip_load_state.value.ui64 = (uint64_t)spa_load_state(spa); 2141 mutex_exit(&spa->spa_imp_kstat_lock); 2142 return; 2143 } 2144 2145 (void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME, 2146 &poolname); 2147 if (poolname == NULL) 2148 poolname = spa_name(spa); 2149 2150 spa->spa_imp_kstat = kstat_create("zfs_import", 0, poolname, 2151 "zfs_misc", KSTAT_TYPE_NAMED, 2152 sizeof (spa_import_progress_t) / sizeof (kstat_named_t), 2153 KSTAT_FLAG_VIRTUAL); 2154 if (spa->spa_imp_kstat != NULL) { 2155 sip = kmem_alloc(sizeof (spa_import_progress_t), KM_SLEEP); 2156 spa->spa_imp_kstat->ks_data = sip; 2157 2158 sip->sip_load_state.value.ui64 = (uint64_t)spa_load_state(spa); 2159 2160 kstat_named_init(&sip->sip_load_state, 2161 "spa_load_state", KSTAT_DATA_UINT64); 2162 kstat_named_init(&sip->sip_mmp_sec_remaining, 2163 "mmp_sec_remaining", KSTAT_DATA_UINT64); 2164 kstat_named_init(&sip->sip_load_max_txg, 2165 "spa_load_max_txg", KSTAT_DATA_UINT64); 2166 spa->spa_imp_kstat->ks_lock = &spa->spa_imp_kstat_lock; 2167 kstat_install(spa->spa_imp_kstat); 2168 } 2169 mutex_exit(&spa->spa_imp_kstat_lock); 2170 } 2171 2172 void 2173 spa_import_progress_remove(spa_t *spa) 2174 { 2175 if (spa->spa_imp_kstat != NULL) { 2176 void *data = spa->spa_imp_kstat->ks_data; 2177 2178 kstat_delete(spa->spa_imp_kstat); 2179 spa->spa_imp_kstat = NULL; 2180 kmem_free(data, sizeof (spa_import_progress_t)); 2181 } 2182 } 2183 2184 /* 2185 * ========================================================================== 2186 * Initialization and Termination 2187 * ========================================================================== 2188 */ 2189 2190 static int 2191 spa_name_compare(const void *a1, const void *a2) 2192 { 2193 const spa_t *s1 = a1; 2194 const spa_t *s2 = a2; 2195 int s; 2196 2197 s = strcmp(s1->spa_name, s2->spa_name); 2198 2199 return (TREE_ISIGN(s)); 2200 } 2201 2202 int 2203 spa_busy(void) 2204 { 2205 return (spa_active_count); 2206 } 2207 2208 void 2209 spa_boot_init() 2210 { 2211 spa_config_load(); 2212 } 2213 2214 void 2215 spa_init(int mode) 2216 { 2217 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 2218 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 2219 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 2220 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 2221 2222 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 2223 offsetof(spa_t, spa_avl)); 2224 2225 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 2226 offsetof(spa_aux_t, aux_avl)); 2227 2228 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 2229 offsetof(spa_aux_t, aux_avl)); 2230 2231 spa_mode_global = mode; 2232 2233 #ifdef _KERNEL 2234 spa_arch_init(); 2235 #else 2236 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 2237 arc_procfd = open("/proc/self/ctl", O_WRONLY); 2238 if (arc_procfd == -1) { 2239 perror("could not enable watchpoints: " 2240 "opening /proc/self/ctl failed: "); 2241 } else { 2242 arc_watch = B_TRUE; 2243 } 2244 } 2245 #endif 2246 2247 zfs_refcount_init(); 2248 unique_init(); 2249 zfs_btree_init(); 2250 metaslab_stat_init(); 2251 zio_init(); 2252 dmu_init(); 2253 zil_init(); 2254 vdev_cache_stat_init(); 2255 vdev_mirror_stat_init(); 2256 zfs_prop_init(); 2257 zpool_prop_init(); 2258 zpool_feature_init(); 2259 spa_config_load(); 2260 l2arc_start(); 2261 scan_init(); 2262 spa_import_progress_init(); 2263 } 2264 2265 void 2266 spa_fini(void) 2267 { 2268 l2arc_stop(); 2269 2270 spa_evict_all(); 2271 2272 vdev_cache_stat_fini(); 2273 vdev_mirror_stat_fini(); 2274 zil_fini(); 2275 dmu_fini(); 2276 zio_fini(); 2277 metaslab_stat_fini(); 2278 zfs_btree_fini(); 2279 unique_fini(); 2280 zfs_refcount_fini(); 2281 scan_fini(); 2282 spa_import_progress_destroy(); 2283 2284 avl_destroy(&spa_namespace_avl); 2285 avl_destroy(&spa_spare_avl); 2286 avl_destroy(&spa_l2cache_avl); 2287 2288 cv_destroy(&spa_namespace_cv); 2289 mutex_destroy(&spa_namespace_lock); 2290 mutex_destroy(&spa_spare_lock); 2291 mutex_destroy(&spa_l2cache_lock); 2292 } 2293 2294 /* 2295 * Return whether this pool has slogs. No locking needed. 2296 * It's not a problem if the wrong answer is returned as it's only for 2297 * performance and not correctness 2298 */ 2299 boolean_t 2300 spa_has_slogs(spa_t *spa) 2301 { 2302 return (spa->spa_log_class->mc_rotor != NULL); 2303 } 2304 2305 spa_log_state_t 2306 spa_get_log_state(spa_t *spa) 2307 { 2308 return (spa->spa_log_state); 2309 } 2310 2311 void 2312 spa_set_log_state(spa_t *spa, spa_log_state_t state) 2313 { 2314 spa->spa_log_state = state; 2315 } 2316 2317 boolean_t 2318 spa_is_root(spa_t *spa) 2319 { 2320 return (spa->spa_is_root); 2321 } 2322 2323 boolean_t 2324 spa_writeable(spa_t *spa) 2325 { 2326 return (!!(spa->spa_mode & FWRITE) && spa->spa_trust_config); 2327 } 2328 2329 /* 2330 * Returns true if there is a pending sync task in any of the current 2331 * syncing txg, the current quiescing txg, or the current open txg. 2332 */ 2333 boolean_t 2334 spa_has_pending_synctask(spa_t *spa) 2335 { 2336 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) || 2337 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks)); 2338 } 2339 2340 int 2341 spa_mode(spa_t *spa) 2342 { 2343 return (spa->spa_mode); 2344 } 2345 2346 uint64_t 2347 spa_bootfs(spa_t *spa) 2348 { 2349 return (spa->spa_bootfs); 2350 } 2351 2352 uint64_t 2353 spa_delegation(spa_t *spa) 2354 { 2355 return (spa->spa_delegation); 2356 } 2357 2358 objset_t * 2359 spa_meta_objset(spa_t *spa) 2360 { 2361 return (spa->spa_meta_objset); 2362 } 2363 2364 enum zio_checksum 2365 spa_dedup_checksum(spa_t *spa) 2366 { 2367 return (spa->spa_dedup_checksum); 2368 } 2369 2370 /* 2371 * Reset pool scan stat per scan pass (or reboot). 2372 */ 2373 void 2374 spa_scan_stat_init(spa_t *spa) 2375 { 2376 /* data not stored on disk */ 2377 spa->spa_scan_pass_start = gethrestime_sec(); 2378 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) 2379 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; 2380 else 2381 spa->spa_scan_pass_scrub_pause = 0; 2382 spa->spa_scan_pass_scrub_spent_paused = 0; 2383 spa->spa_scan_pass_exam = 0; 2384 spa->spa_scan_pass_issued = 0; 2385 vdev_scan_stat_init(spa->spa_root_vdev); 2386 } 2387 2388 /* 2389 * Get scan stats for zpool status reports 2390 */ 2391 int 2392 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2393 { 2394 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2395 2396 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2397 return (SET_ERROR(ENOENT)); 2398 bzero(ps, sizeof (pool_scan_stat_t)); 2399 2400 /* data stored on disk */ 2401 ps->pss_func = scn->scn_phys.scn_func; 2402 ps->pss_state = scn->scn_phys.scn_state; 2403 ps->pss_start_time = scn->scn_phys.scn_start_time; 2404 ps->pss_end_time = scn->scn_phys.scn_end_time; 2405 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2406 ps->pss_to_process = scn->scn_phys.scn_to_process; 2407 ps->pss_processed = scn->scn_phys.scn_processed; 2408 ps->pss_errors = scn->scn_phys.scn_errors; 2409 ps->pss_examined = scn->scn_phys.scn_examined; 2410 ps->pss_issued = 2411 scn->scn_issued_before_pass + spa->spa_scan_pass_issued; 2412 ps->pss_state = scn->scn_phys.scn_state; 2413 2414 /* data not stored on disk */ 2415 ps->pss_pass_start = spa->spa_scan_pass_start; 2416 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2417 ps->pss_pass_issued = spa->spa_scan_pass_issued; 2418 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; 2419 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; 2420 2421 return (0); 2422 } 2423 2424 int 2425 spa_maxblocksize(spa_t *spa) 2426 { 2427 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2428 return (SPA_MAXBLOCKSIZE); 2429 else 2430 return (SPA_OLD_MAXBLOCKSIZE); 2431 } 2432 2433 int 2434 spa_maxdnodesize(spa_t *spa) 2435 { 2436 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) 2437 return (DNODE_MAX_SIZE); 2438 else 2439 return (DNODE_MIN_SIZE); 2440 } 2441 2442 boolean_t 2443 spa_multihost(spa_t *spa) 2444 { 2445 return (spa->spa_multihost ? B_TRUE : B_FALSE); 2446 } 2447 2448 unsigned long 2449 spa_get_hostid(void) 2450 { 2451 unsigned long myhostid; 2452 2453 #ifdef _KERNEL 2454 myhostid = zone_get_hostid(NULL); 2455 #else /* _KERNEL */ 2456 /* 2457 * We're emulating the system's hostid in userland, so 2458 * we can't use zone_get_hostid(). 2459 */ 2460 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 2461 #endif /* _KERNEL */ 2462 2463 return (myhostid); 2464 } 2465 2466 /* 2467 * Returns the txg that the last device removal completed. No indirect mappings 2468 * have been added since this txg. 2469 */ 2470 uint64_t 2471 spa_get_last_removal_txg(spa_t *spa) 2472 { 2473 uint64_t vdevid; 2474 uint64_t ret = -1ULL; 2475 2476 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2477 /* 2478 * sr_prev_indirect_vdev is only modified while holding all the 2479 * config locks, so it is sufficient to hold SCL_VDEV as reader when 2480 * examining it. 2481 */ 2482 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; 2483 2484 while (vdevid != -1ULL) { 2485 vdev_t *vd = vdev_lookup_top(spa, vdevid); 2486 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 2487 2488 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2489 2490 /* 2491 * If the removal did not remap any data, we don't care. 2492 */ 2493 if (vdev_indirect_births_count(vib) != 0) { 2494 ret = vdev_indirect_births_last_entry_txg(vib); 2495 break; 2496 } 2497 2498 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; 2499 } 2500 spa_config_exit(spa, SCL_VDEV, FTAG); 2501 2502 IMPLY(ret != -1ULL, 2503 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 2504 2505 return (ret); 2506 } 2507 2508 boolean_t 2509 spa_trust_config(spa_t *spa) 2510 { 2511 return (spa->spa_trust_config); 2512 } 2513 2514 uint64_t 2515 spa_missing_tvds_allowed(spa_t *spa) 2516 { 2517 return (spa->spa_missing_tvds_allowed); 2518 } 2519 2520 space_map_t * 2521 spa_syncing_log_sm(spa_t *spa) 2522 { 2523 return (spa->spa_syncing_log_sm); 2524 } 2525 2526 void 2527 spa_set_missing_tvds(spa_t *spa, uint64_t missing) 2528 { 2529 spa->spa_missing_tvds = missing; 2530 } 2531 2532 boolean_t 2533 spa_top_vdevs_spacemap_addressable(spa_t *spa) 2534 { 2535 vdev_t *rvd = spa->spa_root_vdev; 2536 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2537 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c])) 2538 return (B_FALSE); 2539 } 2540 return (B_TRUE); 2541 } 2542 2543 boolean_t 2544 spa_has_checkpoint(spa_t *spa) 2545 { 2546 return (spa->spa_checkpoint_txg != 0); 2547 } 2548 2549 boolean_t 2550 spa_importing_readonly_checkpoint(spa_t *spa) 2551 { 2552 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) && 2553 spa->spa_mode == FREAD); 2554 } 2555 2556 uint64_t 2557 spa_min_claim_txg(spa_t *spa) 2558 { 2559 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg; 2560 2561 if (checkpoint_txg != 0) 2562 return (checkpoint_txg + 1); 2563 2564 return (spa->spa_first_txg); 2565 } 2566 2567 /* 2568 * If there is a checkpoint, async destroys may consume more space from 2569 * the pool instead of freeing it. In an attempt to save the pool from 2570 * getting suspended when it is about to run out of space, we stop 2571 * processing async destroys. 2572 */ 2573 boolean_t 2574 spa_suspend_async_destroy(spa_t *spa) 2575 { 2576 dsl_pool_t *dp = spa_get_dsl(spa); 2577 2578 uint64_t unreserved = dsl_pool_unreserved_space(dp, 2579 ZFS_SPACE_CHECK_EXTRA_RESERVED); 2580 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes; 2581 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0; 2582 2583 if (spa_has_checkpoint(spa) && avail == 0) 2584 return (B_TRUE); 2585 2586 return (B_FALSE); 2587 } 2588