1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2019 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 * Copyright 2013 Saso Kiselkov. All rights reserved. 27 * Copyright (c) 2014 Integros [integros.com] 28 * Copyright (c) 2017 Datto Inc. 29 * Copyright (c) 2017, Intel Corporation. 30 */ 31 32 #include <sys/zfs_context.h> 33 #include <sys/spa_impl.h> 34 #include <sys/spa_boot.h> 35 #include <sys/zio.h> 36 #include <sys/zio_checksum.h> 37 #include <sys/zio_compress.h> 38 #include <sys/dmu.h> 39 #include <sys/dmu_tx.h> 40 #include <sys/zap.h> 41 #include <sys/zil.h> 42 #include <sys/vdev_impl.h> 43 #include <sys/vdev_initialize.h> 44 #include <sys/vdev_trim.h> 45 #include <sys/metaslab.h> 46 #include <sys/uberblock_impl.h> 47 #include <sys/txg.h> 48 #include <sys/avl.h> 49 #include <sys/unique.h> 50 #include <sys/dsl_pool.h> 51 #include <sys/dsl_dir.h> 52 #include <sys/dsl_prop.h> 53 #include <sys/dsl_scan.h> 54 #include <sys/fs/zfs.h> 55 #include <sys/metaslab_impl.h> 56 #include <sys/arc.h> 57 #include <sys/ddt.h> 58 #include "zfs_prop.h" 59 #include <sys/zfeature.h> 60 61 /* 62 * SPA locking 63 * 64 * There are three basic locks for managing spa_t structures: 65 * 66 * spa_namespace_lock (global mutex) 67 * 68 * This lock must be acquired to do any of the following: 69 * 70 * - Lookup a spa_t by name 71 * - Add or remove a spa_t from the namespace 72 * - Increase spa_refcount from non-zero 73 * - Check if spa_refcount is zero 74 * - Rename a spa_t 75 * - add/remove/attach/detach devices 76 * - Held for the duration of create/destroy/import/export 77 * 78 * It does not need to handle recursion. A create or destroy may 79 * reference objects (files or zvols) in other pools, but by 80 * definition they must have an existing reference, and will never need 81 * to lookup a spa_t by name. 82 * 83 * spa_refcount (per-spa zfs_refcount_t protected by mutex) 84 * 85 * This reference count keep track of any active users of the spa_t. The 86 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 87 * the refcount is never really 'zero' - opening a pool implicitly keeps 88 * some references in the DMU. Internally we check against spa_minref, but 89 * present the image of a zero/non-zero value to consumers. 90 * 91 * spa_config_lock[] (per-spa array of rwlocks) 92 * 93 * This protects the spa_t from config changes, and must be held in 94 * the following circumstances: 95 * 96 * - RW_READER to perform I/O to the spa 97 * - RW_WRITER to change the vdev config 98 * 99 * The locking order is fairly straightforward: 100 * 101 * spa_namespace_lock -> spa_refcount 102 * 103 * The namespace lock must be acquired to increase the refcount from 0 104 * or to check if it is zero. 105 * 106 * spa_refcount -> spa_config_lock[] 107 * 108 * There must be at least one valid reference on the spa_t to acquire 109 * the config lock. 110 * 111 * spa_namespace_lock -> spa_config_lock[] 112 * 113 * The namespace lock must always be taken before the config lock. 114 * 115 * 116 * The spa_namespace_lock can be acquired directly and is globally visible. 117 * 118 * The namespace is manipulated using the following functions, all of which 119 * require the spa_namespace_lock to be held. 120 * 121 * spa_lookup() Lookup a spa_t by name. 122 * 123 * spa_add() Create a new spa_t in the namespace. 124 * 125 * spa_remove() Remove a spa_t from the namespace. This also 126 * frees up any memory associated with the spa_t. 127 * 128 * spa_next() Returns the next spa_t in the system, or the 129 * first if NULL is passed. 130 * 131 * spa_evict_all() Shutdown and remove all spa_t structures in 132 * the system. 133 * 134 * spa_guid_exists() Determine whether a pool/device guid exists. 135 * 136 * The spa_refcount is manipulated using the following functions: 137 * 138 * spa_open_ref() Adds a reference to the given spa_t. Must be 139 * called with spa_namespace_lock held if the 140 * refcount is currently zero. 141 * 142 * spa_close() Remove a reference from the spa_t. This will 143 * not free the spa_t or remove it from the 144 * namespace. No locking is required. 145 * 146 * spa_refcount_zero() Returns true if the refcount is currently 147 * zero. Must be called with spa_namespace_lock 148 * held. 149 * 150 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 151 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 152 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 153 * 154 * To read the configuration, it suffices to hold one of these locks as reader. 155 * To modify the configuration, you must hold all locks as writer. To modify 156 * vdev state without altering the vdev tree's topology (e.g. online/offline), 157 * you must hold SCL_STATE and SCL_ZIO as writer. 158 * 159 * We use these distinct config locks to avoid recursive lock entry. 160 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 161 * block allocations (SCL_ALLOC), which may require reading space maps 162 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 163 * 164 * The spa config locks cannot be normal rwlocks because we need the 165 * ability to hand off ownership. For example, SCL_ZIO is acquired 166 * by the issuing thread and later released by an interrupt thread. 167 * They do, however, obey the usual write-wanted semantics to prevent 168 * writer (i.e. system administrator) starvation. 169 * 170 * The lock acquisition rules are as follows: 171 * 172 * SCL_CONFIG 173 * Protects changes to the vdev tree topology, such as vdev 174 * add/remove/attach/detach. Protects the dirty config list 175 * (spa_config_dirty_list) and the set of spares and l2arc devices. 176 * 177 * SCL_STATE 178 * Protects changes to pool state and vdev state, such as vdev 179 * online/offline/fault/degrade/clear. Protects the dirty state list 180 * (spa_state_dirty_list) and global pool state (spa_state). 181 * 182 * SCL_ALLOC 183 * Protects changes to metaslab groups and classes. 184 * Held as reader by metaslab_alloc() and metaslab_claim(). 185 * 186 * SCL_ZIO 187 * Held by bp-level zios (those which have no io_vd upon entry) 188 * to prevent changes to the vdev tree. The bp-level zio implicitly 189 * protects all of its vdev child zios, which do not hold SCL_ZIO. 190 * 191 * SCL_FREE 192 * Protects changes to metaslab groups and classes. 193 * Held as reader by metaslab_free(). SCL_FREE is distinct from 194 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 195 * blocks in zio_done() while another i/o that holds either 196 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 197 * 198 * SCL_VDEV 199 * Held as reader to prevent changes to the vdev tree during trivial 200 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 201 * other locks, and lower than all of them, to ensure that it's safe 202 * to acquire regardless of caller context. 203 * 204 * In addition, the following rules apply: 205 * 206 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 207 * The lock ordering is SCL_CONFIG > spa_props_lock. 208 * 209 * (b) I/O operations on leaf vdevs. For any zio operation that takes 210 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 211 * or zio_write_phys() -- the caller must ensure that the config cannot 212 * cannot change in the interim, and that the vdev cannot be reopened. 213 * SCL_STATE as reader suffices for both. 214 * 215 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 216 * 217 * spa_vdev_enter() Acquire the namespace lock and the config lock 218 * for writing. 219 * 220 * spa_vdev_exit() Release the config lock, wait for all I/O 221 * to complete, sync the updated configs to the 222 * cache, and release the namespace lock. 223 * 224 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 225 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 226 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 227 */ 228 229 static avl_tree_t spa_namespace_avl; 230 kmutex_t spa_namespace_lock; 231 static kcondvar_t spa_namespace_cv; 232 static int spa_active_count; 233 int spa_max_replication_override = SPA_DVAS_PER_BP; 234 235 static kmutex_t spa_spare_lock; 236 static avl_tree_t spa_spare_avl; 237 static kmutex_t spa_l2cache_lock; 238 static avl_tree_t spa_l2cache_avl; 239 240 kmem_cache_t *spa_buffer_pool; 241 int spa_mode_global; 242 243 #ifdef ZFS_DEBUG 244 /* 245 * Everything except dprintf, spa, and indirect_remap is on by default 246 * in debug builds. 247 */ 248 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_INDIRECT_REMAP); 249 #else 250 int zfs_flags = 0; 251 #endif 252 253 /* 254 * zfs_recover can be set to nonzero to attempt to recover from 255 * otherwise-fatal errors, typically caused by on-disk corruption. When 256 * set, calls to zfs_panic_recover() will turn into warning messages. 257 * This should only be used as a last resort, as it typically results 258 * in leaked space, or worse. 259 */ 260 boolean_t zfs_recover = B_FALSE; 261 262 /* 263 * If destroy encounters an EIO while reading metadata (e.g. indirect 264 * blocks), space referenced by the missing metadata can not be freed. 265 * Normally this causes the background destroy to become "stalled", as 266 * it is unable to make forward progress. While in this stalled state, 267 * all remaining space to free from the error-encountering filesystem is 268 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 269 * permanently leak the space from indirect blocks that can not be read, 270 * and continue to free everything else that it can. 271 * 272 * The default, "stalling" behavior is useful if the storage partially 273 * fails (i.e. some but not all i/os fail), and then later recovers. In 274 * this case, we will be able to continue pool operations while it is 275 * partially failed, and when it recovers, we can continue to free the 276 * space, with no leaks. However, note that this case is actually 277 * fairly rare. 278 * 279 * Typically pools either (a) fail completely (but perhaps temporarily, 280 * e.g. a top-level vdev going offline), or (b) have localized, 281 * permanent errors (e.g. disk returns the wrong data due to bit flip or 282 * firmware bug). In case (a), this setting does not matter because the 283 * pool will be suspended and the sync thread will not be able to make 284 * forward progress regardless. In case (b), because the error is 285 * permanent, the best we can do is leak the minimum amount of space, 286 * which is what setting this flag will do. Therefore, it is reasonable 287 * for this flag to normally be set, but we chose the more conservative 288 * approach of not setting it, so that there is no possibility of 289 * leaking space in the "partial temporary" failure case. 290 */ 291 boolean_t zfs_free_leak_on_eio = B_FALSE; 292 293 /* 294 * Expiration time in milliseconds. This value has two meanings. First it is 295 * used to determine when the spa_deadman() logic should fire. By default the 296 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 297 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 298 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 299 * in a system panic. 300 */ 301 uint64_t zfs_deadman_synctime_ms = 1000000ULL; 302 303 /* 304 * Check time in milliseconds. This defines the frequency at which we check 305 * for hung I/O. 306 */ 307 uint64_t zfs_deadman_checktime_ms = 5000ULL; 308 309 /* 310 * Override the zfs deadman behavior via /etc/system. By default the 311 * deadman is enabled except on VMware and sparc deployments. 312 */ 313 int zfs_deadman_enabled = -1; 314 315 /* 316 * The worst case is single-sector max-parity RAID-Z blocks, in which 317 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 318 * times the size; so just assume that. Add to this the fact that 319 * we can have up to 3 DVAs per bp, and one more factor of 2 because 320 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 321 * the worst case is: 322 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 323 */ 324 int spa_asize_inflation = 24; 325 326 /* 327 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 328 * the pool to be consumed. This ensures that we don't run the pool 329 * completely out of space, due to unaccounted changes (e.g. to the MOS). 330 * It also limits the worst-case time to allocate space. If we have 331 * less than this amount of free space, most ZPL operations (e.g. write, 332 * create) will return ENOSPC. 333 * 334 * Certain operations (e.g. file removal, most administrative actions) can 335 * use half the slop space. They will only return ENOSPC if less than half 336 * the slop space is free. Typically, once the pool has less than the slop 337 * space free, the user will use these operations to free up space in the pool. 338 * These are the operations that call dsl_pool_adjustedsize() with the netfree 339 * argument set to TRUE. 340 * 341 * Operations that are almost guaranteed to free up space in the absence of 342 * a pool checkpoint can use up to three quarters of the slop space 343 * (e.g zfs destroy). 344 * 345 * A very restricted set of operations are always permitted, regardless of 346 * the amount of free space. These are the operations that call 347 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net 348 * increase in the amount of space used, it is possible to run the pool 349 * completely out of space, causing it to be permanently read-only. 350 * 351 * Note that on very small pools, the slop space will be larger than 352 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 353 * but we never allow it to be more than half the pool size. 354 * 355 * See also the comments in zfs_space_check_t. 356 */ 357 int spa_slop_shift = 5; 358 uint64_t spa_min_slop = 128 * 1024 * 1024; 359 360 int spa_allocators = 4; 361 362 /*PRINTFLIKE2*/ 363 void 364 spa_load_failed(spa_t *spa, const char *fmt, ...) 365 { 366 va_list adx; 367 char buf[256]; 368 369 va_start(adx, fmt); 370 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 371 va_end(adx); 372 373 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name, 374 spa->spa_trust_config ? "trusted" : "untrusted", buf); 375 } 376 377 /*PRINTFLIKE2*/ 378 void 379 spa_load_note(spa_t *spa, const char *fmt, ...) 380 { 381 va_list adx; 382 char buf[256]; 383 384 va_start(adx, fmt); 385 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 386 va_end(adx); 387 388 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name, 389 spa->spa_trust_config ? "trusted" : "untrusted", buf); 390 } 391 392 /* 393 * By default dedup and user data indirects land in the special class 394 */ 395 int zfs_ddt_data_is_special = B_TRUE; 396 int zfs_user_indirect_is_special = B_TRUE; 397 398 /* 399 * The percentage of special class final space reserved for metadata only. 400 * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only 401 * let metadata into the class. 402 */ 403 int zfs_special_class_metadata_reserve_pct = 25; 404 405 /* 406 * ========================================================================== 407 * SPA config locking 408 * ========================================================================== 409 */ 410 static void 411 spa_config_lock_init(spa_t *spa) 412 { 413 for (int i = 0; i < SCL_LOCKS; i++) { 414 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 415 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 416 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 417 zfs_refcount_create_untracked(&scl->scl_count); 418 scl->scl_writer = NULL; 419 scl->scl_write_wanted = 0; 420 } 421 } 422 423 static void 424 spa_config_lock_destroy(spa_t *spa) 425 { 426 for (int i = 0; i < SCL_LOCKS; i++) { 427 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 428 mutex_destroy(&scl->scl_lock); 429 cv_destroy(&scl->scl_cv); 430 zfs_refcount_destroy(&scl->scl_count); 431 ASSERT(scl->scl_writer == NULL); 432 ASSERT(scl->scl_write_wanted == 0); 433 } 434 } 435 436 int 437 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 438 { 439 for (int i = 0; i < SCL_LOCKS; i++) { 440 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 441 if (!(locks & (1 << i))) 442 continue; 443 mutex_enter(&scl->scl_lock); 444 if (rw == RW_READER) { 445 if (scl->scl_writer || scl->scl_write_wanted) { 446 mutex_exit(&scl->scl_lock); 447 spa_config_exit(spa, locks & ((1 << i) - 1), 448 tag); 449 return (0); 450 } 451 } else { 452 ASSERT(scl->scl_writer != curthread); 453 if (!zfs_refcount_is_zero(&scl->scl_count)) { 454 mutex_exit(&scl->scl_lock); 455 spa_config_exit(spa, locks & ((1 << i) - 1), 456 tag); 457 return (0); 458 } 459 scl->scl_writer = curthread; 460 } 461 (void) zfs_refcount_add(&scl->scl_count, tag); 462 mutex_exit(&scl->scl_lock); 463 } 464 return (1); 465 } 466 467 void 468 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 469 { 470 int wlocks_held = 0; 471 472 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 473 474 for (int i = 0; i < SCL_LOCKS; i++) { 475 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 476 if (scl->scl_writer == curthread) 477 wlocks_held |= (1 << i); 478 if (!(locks & (1 << i))) 479 continue; 480 mutex_enter(&scl->scl_lock); 481 if (rw == RW_READER) { 482 while (scl->scl_writer || scl->scl_write_wanted) { 483 cv_wait(&scl->scl_cv, &scl->scl_lock); 484 } 485 } else { 486 ASSERT(scl->scl_writer != curthread); 487 while (!zfs_refcount_is_zero(&scl->scl_count)) { 488 scl->scl_write_wanted++; 489 cv_wait(&scl->scl_cv, &scl->scl_lock); 490 scl->scl_write_wanted--; 491 } 492 scl->scl_writer = curthread; 493 } 494 (void) zfs_refcount_add(&scl->scl_count, tag); 495 mutex_exit(&scl->scl_lock); 496 } 497 ASSERT3U(wlocks_held, <=, locks); 498 } 499 500 void 501 spa_config_exit(spa_t *spa, int locks, void *tag) 502 { 503 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 504 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 505 if (!(locks & (1 << i))) 506 continue; 507 mutex_enter(&scl->scl_lock); 508 ASSERT(!zfs_refcount_is_zero(&scl->scl_count)); 509 if (zfs_refcount_remove(&scl->scl_count, tag) == 0) { 510 ASSERT(scl->scl_writer == NULL || 511 scl->scl_writer == curthread); 512 scl->scl_writer = NULL; /* OK in either case */ 513 cv_broadcast(&scl->scl_cv); 514 } 515 mutex_exit(&scl->scl_lock); 516 } 517 } 518 519 int 520 spa_config_held(spa_t *spa, int locks, krw_t rw) 521 { 522 int locks_held = 0; 523 524 for (int i = 0; i < SCL_LOCKS; i++) { 525 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 526 if (!(locks & (1 << i))) 527 continue; 528 if ((rw == RW_READER && 529 !zfs_refcount_is_zero(&scl->scl_count)) || 530 (rw == RW_WRITER && scl->scl_writer == curthread)) 531 locks_held |= 1 << i; 532 } 533 534 return (locks_held); 535 } 536 537 /* 538 * ========================================================================== 539 * SPA namespace functions 540 * ========================================================================== 541 */ 542 543 /* 544 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 545 * Returns NULL if no matching spa_t is found. 546 */ 547 spa_t * 548 spa_lookup(const char *name) 549 { 550 static spa_t search; /* spa_t is large; don't allocate on stack */ 551 spa_t *spa; 552 avl_index_t where; 553 char *cp; 554 555 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 556 557 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 558 559 /* 560 * If it's a full dataset name, figure out the pool name and 561 * just use that. 562 */ 563 cp = strpbrk(search.spa_name, "/@#"); 564 if (cp != NULL) 565 *cp = '\0'; 566 567 spa = avl_find(&spa_namespace_avl, &search, &where); 568 569 return (spa); 570 } 571 572 /* 573 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 574 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 575 * looking for potentially hung I/Os. 576 */ 577 void 578 spa_deadman(void *arg) 579 { 580 spa_t *spa = arg; 581 582 /* 583 * Disable the deadman timer if the pool is suspended. 584 */ 585 if (spa_suspended(spa)) { 586 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 587 return; 588 } 589 590 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 591 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 592 ++spa->spa_deadman_calls); 593 if (zfs_deadman_enabled) 594 vdev_deadman(spa->spa_root_vdev); 595 } 596 597 int 598 spa_log_sm_sort_by_txg(const void *va, const void *vb) 599 { 600 const spa_log_sm_t *a = va; 601 const spa_log_sm_t *b = vb; 602 603 return (AVL_CMP(a->sls_txg, b->sls_txg)); 604 } 605 606 /* 607 * Create an uninitialized spa_t with the given name. Requires 608 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 609 * exist by calling spa_lookup() first. 610 */ 611 spa_t * 612 spa_add(const char *name, nvlist_t *config, const char *altroot) 613 { 614 spa_t *spa; 615 spa_config_dirent_t *dp; 616 cyc_handler_t hdlr; 617 cyc_time_t when; 618 619 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 620 621 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 622 623 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 624 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 625 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 626 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 627 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 628 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 629 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 630 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 631 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 632 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 633 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 634 mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL); 635 mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL); 636 637 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 638 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 639 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 640 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 641 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 642 643 for (int t = 0; t < TXG_SIZE; t++) 644 bplist_create(&spa->spa_free_bplist[t]); 645 646 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 647 spa->spa_state = POOL_STATE_UNINITIALIZED; 648 spa->spa_freeze_txg = UINT64_MAX; 649 spa->spa_final_txg = UINT64_MAX; 650 spa->spa_load_max_txg = UINT64_MAX; 651 spa->spa_proc = &p0; 652 spa->spa_proc_state = SPA_PROC_NONE; 653 spa->spa_trust_config = B_TRUE; 654 655 hdlr.cyh_func = spa_deadman; 656 hdlr.cyh_arg = spa; 657 hdlr.cyh_level = CY_LOW_LEVEL; 658 659 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 660 661 /* 662 * This determines how often we need to check for hung I/Os after 663 * the cyclic has already fired. Since checking for hung I/Os is 664 * an expensive operation we don't want to check too frequently. 665 * Instead wait for 5 seconds before checking again. 666 */ 667 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 668 when.cyt_when = CY_INFINITY; 669 mutex_enter(&cpu_lock); 670 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 671 mutex_exit(&cpu_lock); 672 673 zfs_refcount_create(&spa->spa_refcount); 674 spa_config_lock_init(spa); 675 676 avl_add(&spa_namespace_avl, spa); 677 678 /* 679 * Set the alternate root, if there is one. 680 */ 681 if (altroot) { 682 spa->spa_root = spa_strdup(altroot); 683 spa_active_count++; 684 } 685 686 spa->spa_alloc_count = spa_allocators; 687 spa->spa_alloc_locks = kmem_zalloc(spa->spa_alloc_count * 688 sizeof (kmutex_t), KM_SLEEP); 689 spa->spa_alloc_trees = kmem_zalloc(spa->spa_alloc_count * 690 sizeof (avl_tree_t), KM_SLEEP); 691 for (int i = 0; i < spa->spa_alloc_count; i++) { 692 mutex_init(&spa->spa_alloc_locks[i], NULL, MUTEX_DEFAULT, NULL); 693 avl_create(&spa->spa_alloc_trees[i], zio_bookmark_compare, 694 sizeof (zio_t), offsetof(zio_t, io_alloc_node)); 695 } 696 avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed, 697 sizeof (metaslab_t), offsetof(metaslab_t, ms_spa_txg_node)); 698 avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg, 699 sizeof (spa_log_sm_t), offsetof(spa_log_sm_t, sls_node)); 700 list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t), 701 offsetof(log_summary_entry_t, lse_node)); 702 703 /* 704 * Every pool starts with the default cachefile 705 */ 706 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 707 offsetof(spa_config_dirent_t, scd_link)); 708 709 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 710 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 711 list_insert_head(&spa->spa_config_list, dp); 712 713 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 714 KM_SLEEP) == 0); 715 716 if (config != NULL) { 717 nvlist_t *features; 718 719 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 720 &features) == 0) { 721 VERIFY(nvlist_dup(features, &spa->spa_label_features, 722 0) == 0); 723 } 724 725 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 726 } 727 728 if (spa->spa_label_features == NULL) { 729 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 730 KM_SLEEP) == 0); 731 } 732 733 spa->spa_iokstat = kstat_create("zfs", 0, name, 734 "disk", KSTAT_TYPE_IO, 1, 0); 735 if (spa->spa_iokstat) { 736 spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock; 737 kstat_install(spa->spa_iokstat); 738 } 739 740 spa->spa_min_ashift = INT_MAX; 741 spa->spa_max_ashift = 0; 742 743 /* 744 * As a pool is being created, treat all features as disabled by 745 * setting SPA_FEATURE_DISABLED for all entries in the feature 746 * refcount cache. 747 */ 748 for (int i = 0; i < SPA_FEATURES; i++) { 749 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 750 } 751 752 list_create(&spa->spa_leaf_list, sizeof (vdev_t), 753 offsetof(vdev_t, vdev_leaf_node)); 754 755 return (spa); 756 } 757 758 /* 759 * Removes a spa_t from the namespace, freeing up any memory used. Requires 760 * spa_namespace_lock. This is called only after the spa_t has been closed and 761 * deactivated. 762 */ 763 void 764 spa_remove(spa_t *spa) 765 { 766 spa_config_dirent_t *dp; 767 768 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 769 ASSERT(spa_state(spa) == POOL_STATE_UNINITIALIZED); 770 ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0); 771 772 nvlist_free(spa->spa_config_splitting); 773 774 avl_remove(&spa_namespace_avl, spa); 775 cv_broadcast(&spa_namespace_cv); 776 777 if (spa->spa_root) { 778 spa_strfree(spa->spa_root); 779 spa_active_count--; 780 } 781 782 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 783 list_remove(&spa->spa_config_list, dp); 784 if (dp->scd_path != NULL) 785 spa_strfree(dp->scd_path); 786 kmem_free(dp, sizeof (spa_config_dirent_t)); 787 } 788 789 for (int i = 0; i < spa->spa_alloc_count; i++) { 790 avl_destroy(&spa->spa_alloc_trees[i]); 791 mutex_destroy(&spa->spa_alloc_locks[i]); 792 } 793 kmem_free(spa->spa_alloc_locks, spa->spa_alloc_count * 794 sizeof (kmutex_t)); 795 kmem_free(spa->spa_alloc_trees, spa->spa_alloc_count * 796 sizeof (avl_tree_t)); 797 798 avl_destroy(&spa->spa_metaslabs_by_flushed); 799 avl_destroy(&spa->spa_sm_logs_by_txg); 800 list_destroy(&spa->spa_log_summary); 801 list_destroy(&spa->spa_config_list); 802 list_destroy(&spa->spa_leaf_list); 803 804 nvlist_free(spa->spa_label_features); 805 nvlist_free(spa->spa_load_info); 806 spa_config_set(spa, NULL); 807 808 mutex_enter(&cpu_lock); 809 if (spa->spa_deadman_cycid != CYCLIC_NONE) 810 cyclic_remove(spa->spa_deadman_cycid); 811 mutex_exit(&cpu_lock); 812 spa->spa_deadman_cycid = CYCLIC_NONE; 813 814 zfs_refcount_destroy(&spa->spa_refcount); 815 816 spa_config_lock_destroy(spa); 817 818 kstat_delete(spa->spa_iokstat); 819 spa->spa_iokstat = NULL; 820 821 for (int t = 0; t < TXG_SIZE; t++) 822 bplist_destroy(&spa->spa_free_bplist[t]); 823 824 zio_checksum_templates_free(spa); 825 826 cv_destroy(&spa->spa_async_cv); 827 cv_destroy(&spa->spa_evicting_os_cv); 828 cv_destroy(&spa->spa_proc_cv); 829 cv_destroy(&spa->spa_scrub_io_cv); 830 cv_destroy(&spa->spa_suspend_cv); 831 832 mutex_destroy(&spa->spa_flushed_ms_lock); 833 mutex_destroy(&spa->spa_async_lock); 834 mutex_destroy(&spa->spa_errlist_lock); 835 mutex_destroy(&spa->spa_errlog_lock); 836 mutex_destroy(&spa->spa_evicting_os_lock); 837 mutex_destroy(&spa->spa_history_lock); 838 mutex_destroy(&spa->spa_proc_lock); 839 mutex_destroy(&spa->spa_props_lock); 840 mutex_destroy(&spa->spa_cksum_tmpls_lock); 841 mutex_destroy(&spa->spa_scrub_lock); 842 mutex_destroy(&spa->spa_suspend_lock); 843 mutex_destroy(&spa->spa_vdev_top_lock); 844 mutex_destroy(&spa->spa_iokstat_lock); 845 846 kmem_free(spa, sizeof (spa_t)); 847 } 848 849 /* 850 * Given a pool, return the next pool in the namespace, or NULL if there is 851 * none. If 'prev' is NULL, return the first pool. 852 */ 853 spa_t * 854 spa_next(spa_t *prev) 855 { 856 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 857 858 if (prev) 859 return (AVL_NEXT(&spa_namespace_avl, prev)); 860 else 861 return (avl_first(&spa_namespace_avl)); 862 } 863 864 /* 865 * ========================================================================== 866 * SPA refcount functions 867 * ========================================================================== 868 */ 869 870 /* 871 * Add a reference to the given spa_t. Must have at least one reference, or 872 * have the namespace lock held. 873 */ 874 void 875 spa_open_ref(spa_t *spa, void *tag) 876 { 877 ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref || 878 MUTEX_HELD(&spa_namespace_lock)); 879 (void) zfs_refcount_add(&spa->spa_refcount, tag); 880 } 881 882 /* 883 * Remove a reference to the given spa_t. Must have at least one reference, or 884 * have the namespace lock held. 885 */ 886 void 887 spa_close(spa_t *spa, void *tag) 888 { 889 ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref || 890 MUTEX_HELD(&spa_namespace_lock)); 891 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 892 } 893 894 /* 895 * Remove a reference to the given spa_t held by a dsl dir that is 896 * being asynchronously released. Async releases occur from a taskq 897 * performing eviction of dsl datasets and dirs. The namespace lock 898 * isn't held and the hold by the object being evicted may contribute to 899 * spa_minref (e.g. dataset or directory released during pool export), 900 * so the asserts in spa_close() do not apply. 901 */ 902 void 903 spa_async_close(spa_t *spa, void *tag) 904 { 905 (void) zfs_refcount_remove(&spa->spa_refcount, tag); 906 } 907 908 /* 909 * Check to see if the spa refcount is zero. Must be called with 910 * spa_namespace_lock held. We really compare against spa_minref, which is the 911 * number of references acquired when opening a pool 912 */ 913 boolean_t 914 spa_refcount_zero(spa_t *spa) 915 { 916 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 917 918 return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref); 919 } 920 921 /* 922 * ========================================================================== 923 * SPA spare and l2cache tracking 924 * ========================================================================== 925 */ 926 927 /* 928 * Hot spares and cache devices are tracked using the same code below, 929 * for 'auxiliary' devices. 930 */ 931 932 typedef struct spa_aux { 933 uint64_t aux_guid; 934 uint64_t aux_pool; 935 avl_node_t aux_avl; 936 int aux_count; 937 } spa_aux_t; 938 939 static inline int 940 spa_aux_compare(const void *a, const void *b) 941 { 942 const spa_aux_t *sa = (const spa_aux_t *)a; 943 const spa_aux_t *sb = (const spa_aux_t *)b; 944 945 return (AVL_CMP(sa->aux_guid, sb->aux_guid)); 946 } 947 948 void 949 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 950 { 951 avl_index_t where; 952 spa_aux_t search; 953 spa_aux_t *aux; 954 955 search.aux_guid = vd->vdev_guid; 956 if ((aux = avl_find(avl, &search, &where)) != NULL) { 957 aux->aux_count++; 958 } else { 959 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 960 aux->aux_guid = vd->vdev_guid; 961 aux->aux_count = 1; 962 avl_insert(avl, aux, where); 963 } 964 } 965 966 void 967 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 968 { 969 spa_aux_t search; 970 spa_aux_t *aux; 971 avl_index_t where; 972 973 search.aux_guid = vd->vdev_guid; 974 aux = avl_find(avl, &search, &where); 975 976 ASSERT(aux != NULL); 977 978 if (--aux->aux_count == 0) { 979 avl_remove(avl, aux); 980 kmem_free(aux, sizeof (spa_aux_t)); 981 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 982 aux->aux_pool = 0ULL; 983 } 984 } 985 986 boolean_t 987 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 988 { 989 spa_aux_t search, *found; 990 991 search.aux_guid = guid; 992 found = avl_find(avl, &search, NULL); 993 994 if (pool) { 995 if (found) 996 *pool = found->aux_pool; 997 else 998 *pool = 0ULL; 999 } 1000 1001 if (refcnt) { 1002 if (found) 1003 *refcnt = found->aux_count; 1004 else 1005 *refcnt = 0; 1006 } 1007 1008 return (found != NULL); 1009 } 1010 1011 void 1012 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1013 { 1014 spa_aux_t search, *found; 1015 avl_index_t where; 1016 1017 search.aux_guid = vd->vdev_guid; 1018 found = avl_find(avl, &search, &where); 1019 ASSERT(found != NULL); 1020 ASSERT(found->aux_pool == 0ULL); 1021 1022 found->aux_pool = spa_guid(vd->vdev_spa); 1023 } 1024 1025 /* 1026 * Spares are tracked globally due to the following constraints: 1027 * 1028 * - A spare may be part of multiple pools. 1029 * - A spare may be added to a pool even if it's actively in use within 1030 * another pool. 1031 * - A spare in use in any pool can only be the source of a replacement if 1032 * the target is a spare in the same pool. 1033 * 1034 * We keep track of all spares on the system through the use of a reference 1035 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1036 * spare, then we bump the reference count in the AVL tree. In addition, we set 1037 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1038 * inactive). When a spare is made active (used to replace a device in the 1039 * pool), we also keep track of which pool its been made a part of. 1040 * 1041 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1042 * called under the spa_namespace lock as part of vdev reconfiguration. The 1043 * separate spare lock exists for the status query path, which does not need to 1044 * be completely consistent with respect to other vdev configuration changes. 1045 */ 1046 1047 static int 1048 spa_spare_compare(const void *a, const void *b) 1049 { 1050 return (spa_aux_compare(a, b)); 1051 } 1052 1053 void 1054 spa_spare_add(vdev_t *vd) 1055 { 1056 mutex_enter(&spa_spare_lock); 1057 ASSERT(!vd->vdev_isspare); 1058 spa_aux_add(vd, &spa_spare_avl); 1059 vd->vdev_isspare = B_TRUE; 1060 mutex_exit(&spa_spare_lock); 1061 } 1062 1063 void 1064 spa_spare_remove(vdev_t *vd) 1065 { 1066 mutex_enter(&spa_spare_lock); 1067 ASSERT(vd->vdev_isspare); 1068 spa_aux_remove(vd, &spa_spare_avl); 1069 vd->vdev_isspare = B_FALSE; 1070 mutex_exit(&spa_spare_lock); 1071 } 1072 1073 boolean_t 1074 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1075 { 1076 boolean_t found; 1077 1078 mutex_enter(&spa_spare_lock); 1079 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1080 mutex_exit(&spa_spare_lock); 1081 1082 return (found); 1083 } 1084 1085 void 1086 spa_spare_activate(vdev_t *vd) 1087 { 1088 mutex_enter(&spa_spare_lock); 1089 ASSERT(vd->vdev_isspare); 1090 spa_aux_activate(vd, &spa_spare_avl); 1091 mutex_exit(&spa_spare_lock); 1092 } 1093 1094 /* 1095 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1096 * Cache devices currently only support one pool per cache device, and so 1097 * for these devices the aux reference count is currently unused beyond 1. 1098 */ 1099 1100 static int 1101 spa_l2cache_compare(const void *a, const void *b) 1102 { 1103 return (spa_aux_compare(a, b)); 1104 } 1105 1106 void 1107 spa_l2cache_add(vdev_t *vd) 1108 { 1109 mutex_enter(&spa_l2cache_lock); 1110 ASSERT(!vd->vdev_isl2cache); 1111 spa_aux_add(vd, &spa_l2cache_avl); 1112 vd->vdev_isl2cache = B_TRUE; 1113 mutex_exit(&spa_l2cache_lock); 1114 } 1115 1116 void 1117 spa_l2cache_remove(vdev_t *vd) 1118 { 1119 mutex_enter(&spa_l2cache_lock); 1120 ASSERT(vd->vdev_isl2cache); 1121 spa_aux_remove(vd, &spa_l2cache_avl); 1122 vd->vdev_isl2cache = B_FALSE; 1123 mutex_exit(&spa_l2cache_lock); 1124 } 1125 1126 boolean_t 1127 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1128 { 1129 boolean_t found; 1130 1131 mutex_enter(&spa_l2cache_lock); 1132 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1133 mutex_exit(&spa_l2cache_lock); 1134 1135 return (found); 1136 } 1137 1138 void 1139 spa_l2cache_activate(vdev_t *vd) 1140 { 1141 mutex_enter(&spa_l2cache_lock); 1142 ASSERT(vd->vdev_isl2cache); 1143 spa_aux_activate(vd, &spa_l2cache_avl); 1144 mutex_exit(&spa_l2cache_lock); 1145 } 1146 1147 /* 1148 * ========================================================================== 1149 * SPA vdev locking 1150 * ========================================================================== 1151 */ 1152 1153 /* 1154 * Lock the given spa_t for the purpose of adding or removing a vdev. 1155 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1156 * It returns the next transaction group for the spa_t. 1157 */ 1158 uint64_t 1159 spa_vdev_enter(spa_t *spa) 1160 { 1161 mutex_enter(&spa->spa_vdev_top_lock); 1162 mutex_enter(&spa_namespace_lock); 1163 1164 vdev_autotrim_stop_all(spa); 1165 1166 return (spa_vdev_config_enter(spa)); 1167 } 1168 1169 /* 1170 * Internal implementation for spa_vdev_enter(). Used when a vdev 1171 * operation requires multiple syncs (i.e. removing a device) while 1172 * keeping the spa_namespace_lock held. 1173 */ 1174 uint64_t 1175 spa_vdev_config_enter(spa_t *spa) 1176 { 1177 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1178 1179 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1180 1181 return (spa_last_synced_txg(spa) + 1); 1182 } 1183 1184 /* 1185 * Used in combination with spa_vdev_config_enter() to allow the syncing 1186 * of multiple transactions without releasing the spa_namespace_lock. 1187 */ 1188 void 1189 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1190 { 1191 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1192 1193 int config_changed = B_FALSE; 1194 1195 ASSERT(txg > spa_last_synced_txg(spa)); 1196 1197 spa->spa_pending_vdev = NULL; 1198 1199 /* 1200 * Reassess the DTLs. 1201 */ 1202 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1203 1204 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1205 config_changed = B_TRUE; 1206 spa->spa_config_generation++; 1207 } 1208 1209 /* 1210 * Verify the metaslab classes. 1211 */ 1212 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1213 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1214 ASSERT(metaslab_class_validate(spa_special_class(spa)) == 0); 1215 ASSERT(metaslab_class_validate(spa_dedup_class(spa)) == 0); 1216 1217 spa_config_exit(spa, SCL_ALL, spa); 1218 1219 /* 1220 * Panic the system if the specified tag requires it. This 1221 * is useful for ensuring that configurations are updated 1222 * transactionally. 1223 */ 1224 if (zio_injection_enabled) 1225 zio_handle_panic_injection(spa, tag, 0); 1226 1227 /* 1228 * Note: this txg_wait_synced() is important because it ensures 1229 * that there won't be more than one config change per txg. 1230 * This allows us to use the txg as the generation number. 1231 */ 1232 if (error == 0) 1233 txg_wait_synced(spa->spa_dsl_pool, txg); 1234 1235 if (vd != NULL) { 1236 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1237 if (vd->vdev_ops->vdev_op_leaf) { 1238 mutex_enter(&vd->vdev_initialize_lock); 1239 vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, 1240 NULL); 1241 mutex_exit(&vd->vdev_initialize_lock); 1242 1243 mutex_enter(&vd->vdev_trim_lock); 1244 vdev_trim_stop(vd, VDEV_TRIM_CANCELED, NULL); 1245 mutex_exit(&vd->vdev_trim_lock); 1246 } 1247 1248 /* 1249 * The vdev may be both a leaf and top-level device. 1250 */ 1251 vdev_autotrim_stop_wait(vd); 1252 1253 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1254 vdev_free(vd); 1255 spa_config_exit(spa, SCL_ALL, spa); 1256 } 1257 1258 /* 1259 * If the config changed, update the config cache. 1260 */ 1261 if (config_changed) 1262 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1263 } 1264 1265 /* 1266 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1267 * locking of spa_vdev_enter(), we also want make sure the transactions have 1268 * synced to disk, and then update the global configuration cache with the new 1269 * information. 1270 */ 1271 int 1272 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1273 { 1274 vdev_autotrim_restart(spa); 1275 1276 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1277 mutex_exit(&spa_namespace_lock); 1278 mutex_exit(&spa->spa_vdev_top_lock); 1279 1280 return (error); 1281 } 1282 1283 /* 1284 * Lock the given spa_t for the purpose of changing vdev state. 1285 */ 1286 void 1287 spa_vdev_state_enter(spa_t *spa, int oplocks) 1288 { 1289 int locks = SCL_STATE_ALL | oplocks; 1290 1291 /* 1292 * Root pools may need to read of the underlying devfs filesystem 1293 * when opening up a vdev. Unfortunately if we're holding the 1294 * SCL_ZIO lock it will result in a deadlock when we try to issue 1295 * the read from the root filesystem. Instead we "prefetch" 1296 * the associated vnodes that we need prior to opening the 1297 * underlying devices and cache them so that we can prevent 1298 * any I/O when we are doing the actual open. 1299 */ 1300 if (spa_is_root(spa)) { 1301 int low = locks & ~(SCL_ZIO - 1); 1302 int high = locks & ~low; 1303 1304 spa_config_enter(spa, high, spa, RW_WRITER); 1305 vdev_hold(spa->spa_root_vdev); 1306 spa_config_enter(spa, low, spa, RW_WRITER); 1307 } else { 1308 spa_config_enter(spa, locks, spa, RW_WRITER); 1309 } 1310 spa->spa_vdev_locks = locks; 1311 } 1312 1313 int 1314 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1315 { 1316 boolean_t config_changed = B_FALSE; 1317 1318 if (vd != NULL || error == 0) 1319 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1320 0, 0, B_FALSE); 1321 1322 if (vd != NULL) { 1323 vdev_state_dirty(vd->vdev_top); 1324 config_changed = B_TRUE; 1325 spa->spa_config_generation++; 1326 } 1327 1328 if (spa_is_root(spa)) 1329 vdev_rele(spa->spa_root_vdev); 1330 1331 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1332 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1333 1334 /* 1335 * If anything changed, wait for it to sync. This ensures that, 1336 * from the system administrator's perspective, zpool(1M) commands 1337 * are synchronous. This is important for things like zpool offline: 1338 * when the command completes, you expect no further I/O from ZFS. 1339 */ 1340 if (vd != NULL) 1341 txg_wait_synced(spa->spa_dsl_pool, 0); 1342 1343 /* 1344 * If the config changed, update the config cache. 1345 */ 1346 if (config_changed) { 1347 mutex_enter(&spa_namespace_lock); 1348 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1349 mutex_exit(&spa_namespace_lock); 1350 } 1351 1352 return (error); 1353 } 1354 1355 /* 1356 * ========================================================================== 1357 * Miscellaneous functions 1358 * ========================================================================== 1359 */ 1360 1361 void 1362 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1363 { 1364 if (!nvlist_exists(spa->spa_label_features, feature)) { 1365 fnvlist_add_boolean(spa->spa_label_features, feature); 1366 /* 1367 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1368 * dirty the vdev config because lock SCL_CONFIG is not held. 1369 * Thankfully, in this case we don't need to dirty the config 1370 * because it will be written out anyway when we finish 1371 * creating the pool. 1372 */ 1373 if (tx->tx_txg != TXG_INITIAL) 1374 vdev_config_dirty(spa->spa_root_vdev); 1375 } 1376 } 1377 1378 void 1379 spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1380 { 1381 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1382 vdev_config_dirty(spa->spa_root_vdev); 1383 } 1384 1385 /* 1386 * Return the spa_t associated with given pool_guid, if it exists. If 1387 * device_guid is non-zero, determine whether the pool exists *and* contains 1388 * a device with the specified device_guid. 1389 */ 1390 spa_t * 1391 spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1392 { 1393 spa_t *spa; 1394 avl_tree_t *t = &spa_namespace_avl; 1395 1396 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1397 1398 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1399 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1400 continue; 1401 if (spa->spa_root_vdev == NULL) 1402 continue; 1403 if (spa_guid(spa) == pool_guid) { 1404 if (device_guid == 0) 1405 break; 1406 1407 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1408 device_guid) != NULL) 1409 break; 1410 1411 /* 1412 * Check any devices we may be in the process of adding. 1413 */ 1414 if (spa->spa_pending_vdev) { 1415 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1416 device_guid) != NULL) 1417 break; 1418 } 1419 } 1420 } 1421 1422 return (spa); 1423 } 1424 1425 /* 1426 * Determine whether a pool with the given pool_guid exists. 1427 */ 1428 boolean_t 1429 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1430 { 1431 return (spa_by_guid(pool_guid, device_guid) != NULL); 1432 } 1433 1434 char * 1435 spa_strdup(const char *s) 1436 { 1437 size_t len; 1438 char *new; 1439 1440 len = strlen(s); 1441 new = kmem_alloc(len + 1, KM_SLEEP); 1442 bcopy(s, new, len); 1443 new[len] = '\0'; 1444 1445 return (new); 1446 } 1447 1448 void 1449 spa_strfree(char *s) 1450 { 1451 kmem_free(s, strlen(s) + 1); 1452 } 1453 1454 uint64_t 1455 spa_get_random(uint64_t range) 1456 { 1457 uint64_t r; 1458 1459 ASSERT(range != 0); 1460 1461 if (range == 1) 1462 return (0); 1463 1464 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1465 1466 return (r % range); 1467 } 1468 1469 uint64_t 1470 spa_generate_guid(spa_t *spa) 1471 { 1472 uint64_t guid = spa_get_random(-1ULL); 1473 1474 if (spa != NULL) { 1475 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1476 guid = spa_get_random(-1ULL); 1477 } else { 1478 while (guid == 0 || spa_guid_exists(guid, 0)) 1479 guid = spa_get_random(-1ULL); 1480 } 1481 1482 return (guid); 1483 } 1484 1485 void 1486 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1487 { 1488 char type[256]; 1489 char *checksum = NULL; 1490 char *compress = NULL; 1491 1492 if (bp != NULL) { 1493 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1494 dmu_object_byteswap_t bswap = 1495 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1496 (void) snprintf(type, sizeof (type), "bswap %s %s", 1497 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1498 "metadata" : "data", 1499 dmu_ot_byteswap[bswap].ob_name); 1500 } else { 1501 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1502 sizeof (type)); 1503 } 1504 if (!BP_IS_EMBEDDED(bp)) { 1505 checksum = 1506 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1507 } 1508 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1509 } 1510 1511 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1512 compress); 1513 } 1514 1515 void 1516 spa_freeze(spa_t *spa) 1517 { 1518 uint64_t freeze_txg = 0; 1519 1520 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1521 if (spa->spa_freeze_txg == UINT64_MAX) { 1522 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1523 spa->spa_freeze_txg = freeze_txg; 1524 } 1525 spa_config_exit(spa, SCL_ALL, FTAG); 1526 if (freeze_txg != 0) 1527 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1528 } 1529 1530 void 1531 zfs_panic_recover(const char *fmt, ...) 1532 { 1533 va_list adx; 1534 1535 va_start(adx, fmt); 1536 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1537 va_end(adx); 1538 } 1539 1540 /* 1541 * This is a stripped-down version of strtoull, suitable only for converting 1542 * lowercase hexadecimal numbers that don't overflow. 1543 */ 1544 uint64_t 1545 zfs_strtonum(const char *str, char **nptr) 1546 { 1547 uint64_t val = 0; 1548 char c; 1549 int digit; 1550 1551 while ((c = *str) != '\0') { 1552 if (c >= '0' && c <= '9') 1553 digit = c - '0'; 1554 else if (c >= 'a' && c <= 'f') 1555 digit = 10 + c - 'a'; 1556 else 1557 break; 1558 1559 val *= 16; 1560 val += digit; 1561 1562 str++; 1563 } 1564 1565 if (nptr) 1566 *nptr = (char *)str; 1567 1568 return (val); 1569 } 1570 1571 void 1572 spa_activate_allocation_classes(spa_t *spa, dmu_tx_t *tx) 1573 { 1574 /* 1575 * We bump the feature refcount for each special vdev added to the pool 1576 */ 1577 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_ALLOCATION_CLASSES)); 1578 spa_feature_incr(spa, SPA_FEATURE_ALLOCATION_CLASSES, tx); 1579 } 1580 1581 /* 1582 * ========================================================================== 1583 * Accessor functions 1584 * ========================================================================== 1585 */ 1586 1587 boolean_t 1588 spa_shutting_down(spa_t *spa) 1589 { 1590 return (spa->spa_async_suspended); 1591 } 1592 1593 dsl_pool_t * 1594 spa_get_dsl(spa_t *spa) 1595 { 1596 return (spa->spa_dsl_pool); 1597 } 1598 1599 boolean_t 1600 spa_is_initializing(spa_t *spa) 1601 { 1602 return (spa->spa_is_initializing); 1603 } 1604 1605 boolean_t 1606 spa_indirect_vdevs_loaded(spa_t *spa) 1607 { 1608 return (spa->spa_indirect_vdevs_loaded); 1609 } 1610 1611 blkptr_t * 1612 spa_get_rootblkptr(spa_t *spa) 1613 { 1614 return (&spa->spa_ubsync.ub_rootbp); 1615 } 1616 1617 void 1618 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1619 { 1620 spa->spa_uberblock.ub_rootbp = *bp; 1621 } 1622 1623 void 1624 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1625 { 1626 if (spa->spa_root == NULL) 1627 buf[0] = '\0'; 1628 else 1629 (void) strncpy(buf, spa->spa_root, buflen); 1630 } 1631 1632 int 1633 spa_sync_pass(spa_t *spa) 1634 { 1635 return (spa->spa_sync_pass); 1636 } 1637 1638 char * 1639 spa_name(spa_t *spa) 1640 { 1641 return (spa->spa_name); 1642 } 1643 1644 uint64_t 1645 spa_guid(spa_t *spa) 1646 { 1647 dsl_pool_t *dp = spa_get_dsl(spa); 1648 uint64_t guid; 1649 1650 /* 1651 * If we fail to parse the config during spa_load(), we can go through 1652 * the error path (which posts an ereport) and end up here with no root 1653 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1654 * this case. 1655 */ 1656 if (spa->spa_root_vdev == NULL) 1657 return (spa->spa_config_guid); 1658 1659 guid = spa->spa_last_synced_guid != 0 ? 1660 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1661 1662 /* 1663 * Return the most recently synced out guid unless we're 1664 * in syncing context. 1665 */ 1666 if (dp && dsl_pool_sync_context(dp)) 1667 return (spa->spa_root_vdev->vdev_guid); 1668 else 1669 return (guid); 1670 } 1671 1672 uint64_t 1673 spa_load_guid(spa_t *spa) 1674 { 1675 /* 1676 * This is a GUID that exists solely as a reference for the 1677 * purposes of the arc. It is generated at load time, and 1678 * is never written to persistent storage. 1679 */ 1680 return (spa->spa_load_guid); 1681 } 1682 1683 uint64_t 1684 spa_last_synced_txg(spa_t *spa) 1685 { 1686 return (spa->spa_ubsync.ub_txg); 1687 } 1688 1689 uint64_t 1690 spa_first_txg(spa_t *spa) 1691 { 1692 return (spa->spa_first_txg); 1693 } 1694 1695 uint64_t 1696 spa_syncing_txg(spa_t *spa) 1697 { 1698 return (spa->spa_syncing_txg); 1699 } 1700 1701 /* 1702 * Return the last txg where data can be dirtied. The final txgs 1703 * will be used to just clear out any deferred frees that remain. 1704 */ 1705 uint64_t 1706 spa_final_dirty_txg(spa_t *spa) 1707 { 1708 return (spa->spa_final_txg - TXG_DEFER_SIZE); 1709 } 1710 1711 pool_state_t 1712 spa_state(spa_t *spa) 1713 { 1714 return (spa->spa_state); 1715 } 1716 1717 spa_load_state_t 1718 spa_load_state(spa_t *spa) 1719 { 1720 return (spa->spa_load_state); 1721 } 1722 1723 uint64_t 1724 spa_freeze_txg(spa_t *spa) 1725 { 1726 return (spa->spa_freeze_txg); 1727 } 1728 1729 /* ARGSUSED */ 1730 uint64_t 1731 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) 1732 { 1733 return (lsize * spa_asize_inflation); 1734 } 1735 1736 /* 1737 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), 1738 * or at least 128MB, unless that would cause it to be more than half the 1739 * pool size. 1740 * 1741 * See the comment above spa_slop_shift for details. 1742 */ 1743 uint64_t 1744 spa_get_slop_space(spa_t *spa) 1745 { 1746 uint64_t space = spa_get_dspace(spa); 1747 return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop))); 1748 } 1749 1750 uint64_t 1751 spa_get_dspace(spa_t *spa) 1752 { 1753 return (spa->spa_dspace); 1754 } 1755 1756 uint64_t 1757 spa_get_checkpoint_space(spa_t *spa) 1758 { 1759 return (spa->spa_checkpoint_info.sci_dspace); 1760 } 1761 1762 void 1763 spa_update_dspace(spa_t *spa) 1764 { 1765 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1766 ddt_get_dedup_dspace(spa); 1767 if (spa->spa_vdev_removal != NULL) { 1768 /* 1769 * We can't allocate from the removing device, so 1770 * subtract its size. This prevents the DMU/DSL from 1771 * filling up the (now smaller) pool while we are in the 1772 * middle of removing the device. 1773 * 1774 * Note that the DMU/DSL doesn't actually know or care 1775 * how much space is allocated (it does its own tracking 1776 * of how much space has been logically used). So it 1777 * doesn't matter that the data we are moving may be 1778 * allocated twice (on the old device and the new 1779 * device). 1780 */ 1781 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1782 vdev_t *vd = 1783 vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id); 1784 spa->spa_dspace -= spa_deflate(spa) ? 1785 vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space; 1786 spa_config_exit(spa, SCL_VDEV, FTAG); 1787 } 1788 } 1789 1790 /* 1791 * Return the failure mode that has been set to this pool. The default 1792 * behavior will be to block all I/Os when a complete failure occurs. 1793 */ 1794 uint8_t 1795 spa_get_failmode(spa_t *spa) 1796 { 1797 return (spa->spa_failmode); 1798 } 1799 1800 boolean_t 1801 spa_suspended(spa_t *spa) 1802 { 1803 return (spa->spa_suspended != ZIO_SUSPEND_NONE); 1804 } 1805 1806 uint64_t 1807 spa_version(spa_t *spa) 1808 { 1809 return (spa->spa_ubsync.ub_version); 1810 } 1811 1812 boolean_t 1813 spa_deflate(spa_t *spa) 1814 { 1815 return (spa->spa_deflate); 1816 } 1817 1818 metaslab_class_t * 1819 spa_normal_class(spa_t *spa) 1820 { 1821 return (spa->spa_normal_class); 1822 } 1823 1824 metaslab_class_t * 1825 spa_log_class(spa_t *spa) 1826 { 1827 return (spa->spa_log_class); 1828 } 1829 1830 metaslab_class_t * 1831 spa_special_class(spa_t *spa) 1832 { 1833 return (spa->spa_special_class); 1834 } 1835 1836 metaslab_class_t * 1837 spa_dedup_class(spa_t *spa) 1838 { 1839 return (spa->spa_dedup_class); 1840 } 1841 1842 /* 1843 * Locate an appropriate allocation class 1844 */ 1845 metaslab_class_t * 1846 spa_preferred_class(spa_t *spa, uint64_t size, dmu_object_type_t objtype, 1847 uint_t level, uint_t special_smallblk) 1848 { 1849 if (DMU_OT_IS_ZIL(objtype)) { 1850 if (spa->spa_log_class->mc_groups != 0) 1851 return (spa_log_class(spa)); 1852 else 1853 return (spa_normal_class(spa)); 1854 } 1855 1856 boolean_t has_special_class = spa->spa_special_class->mc_groups != 0; 1857 1858 if (DMU_OT_IS_DDT(objtype)) { 1859 if (spa->spa_dedup_class->mc_groups != 0) 1860 return (spa_dedup_class(spa)); 1861 else if (has_special_class && zfs_ddt_data_is_special) 1862 return (spa_special_class(spa)); 1863 else 1864 return (spa_normal_class(spa)); 1865 } 1866 1867 /* Indirect blocks for user data can land in special if allowed */ 1868 if (level > 0 && (DMU_OT_IS_FILE(objtype) || objtype == DMU_OT_ZVOL)) { 1869 if (has_special_class && zfs_user_indirect_is_special) 1870 return (spa_special_class(spa)); 1871 else 1872 return (spa_normal_class(spa)); 1873 } 1874 1875 if (DMU_OT_IS_METADATA(objtype) || level > 0) { 1876 if (has_special_class) 1877 return (spa_special_class(spa)); 1878 else 1879 return (spa_normal_class(spa)); 1880 } 1881 1882 /* 1883 * Allow small file blocks in special class in some cases (like 1884 * for the dRAID vdev feature). But always leave a reserve of 1885 * zfs_special_class_metadata_reserve_pct exclusively for metadata. 1886 */ 1887 if (DMU_OT_IS_FILE(objtype) && 1888 has_special_class && size <= special_smallblk) { 1889 metaslab_class_t *special = spa_special_class(spa); 1890 uint64_t alloc = metaslab_class_get_alloc(special); 1891 uint64_t space = metaslab_class_get_space(special); 1892 uint64_t limit = 1893 (space * (100 - zfs_special_class_metadata_reserve_pct)) 1894 / 100; 1895 1896 if (alloc < limit) 1897 return (special); 1898 } 1899 1900 return (spa_normal_class(spa)); 1901 } 1902 1903 void 1904 spa_evicting_os_register(spa_t *spa, objset_t *os) 1905 { 1906 mutex_enter(&spa->spa_evicting_os_lock); 1907 list_insert_head(&spa->spa_evicting_os_list, os); 1908 mutex_exit(&spa->spa_evicting_os_lock); 1909 } 1910 1911 void 1912 spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1913 { 1914 mutex_enter(&spa->spa_evicting_os_lock); 1915 list_remove(&spa->spa_evicting_os_list, os); 1916 cv_broadcast(&spa->spa_evicting_os_cv); 1917 mutex_exit(&spa->spa_evicting_os_lock); 1918 } 1919 1920 void 1921 spa_evicting_os_wait(spa_t *spa) 1922 { 1923 mutex_enter(&spa->spa_evicting_os_lock); 1924 while (!list_is_empty(&spa->spa_evicting_os_list)) 1925 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 1926 mutex_exit(&spa->spa_evicting_os_lock); 1927 1928 dmu_buf_user_evict_wait(); 1929 } 1930 1931 int 1932 spa_max_replication(spa_t *spa) 1933 { 1934 /* 1935 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1936 * handle BPs with more than one DVA allocated. Set our max 1937 * replication level accordingly. 1938 */ 1939 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1940 return (1); 1941 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1942 } 1943 1944 int 1945 spa_prev_software_version(spa_t *spa) 1946 { 1947 return (spa->spa_prev_software_version); 1948 } 1949 1950 uint64_t 1951 spa_deadman_synctime(spa_t *spa) 1952 { 1953 return (spa->spa_deadman_synctime); 1954 } 1955 1956 spa_autotrim_t 1957 spa_get_autotrim(spa_t *spa) 1958 { 1959 return (spa->spa_autotrim); 1960 } 1961 1962 uint64_t 1963 dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1964 { 1965 uint64_t asize = DVA_GET_ASIZE(dva); 1966 uint64_t dsize = asize; 1967 1968 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1969 1970 if (asize != 0 && spa->spa_deflate) { 1971 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 1972 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1973 } 1974 1975 return (dsize); 1976 } 1977 1978 uint64_t 1979 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1980 { 1981 uint64_t dsize = 0; 1982 1983 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1984 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1985 1986 return (dsize); 1987 } 1988 1989 uint64_t 1990 bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1991 { 1992 uint64_t dsize = 0; 1993 1994 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1995 1996 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1997 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1998 1999 spa_config_exit(spa, SCL_VDEV, FTAG); 2000 2001 return (dsize); 2002 } 2003 2004 uint64_t 2005 spa_dirty_data(spa_t *spa) 2006 { 2007 return (spa->spa_dsl_pool->dp_dirty_total); 2008 } 2009 2010 /* 2011 * ========================================================================== 2012 * Initialization and Termination 2013 * ========================================================================== 2014 */ 2015 2016 static int 2017 spa_name_compare(const void *a1, const void *a2) 2018 { 2019 const spa_t *s1 = a1; 2020 const spa_t *s2 = a2; 2021 int s; 2022 2023 s = strcmp(s1->spa_name, s2->spa_name); 2024 2025 return (AVL_ISIGN(s)); 2026 } 2027 2028 int 2029 spa_busy(void) 2030 { 2031 return (spa_active_count); 2032 } 2033 2034 void 2035 spa_boot_init() 2036 { 2037 spa_config_load(); 2038 } 2039 2040 void 2041 spa_init(int mode) 2042 { 2043 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 2044 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 2045 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 2046 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 2047 2048 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 2049 offsetof(spa_t, spa_avl)); 2050 2051 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 2052 offsetof(spa_aux_t, aux_avl)); 2053 2054 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 2055 offsetof(spa_aux_t, aux_avl)); 2056 2057 spa_mode_global = mode; 2058 2059 #ifdef _KERNEL 2060 spa_arch_init(); 2061 #else 2062 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 2063 arc_procfd = open("/proc/self/ctl", O_WRONLY); 2064 if (arc_procfd == -1) { 2065 perror("could not enable watchpoints: " 2066 "opening /proc/self/ctl failed: "); 2067 } else { 2068 arc_watch = B_TRUE; 2069 } 2070 } 2071 #endif 2072 2073 zfs_refcount_init(); 2074 unique_init(); 2075 range_tree_init(); 2076 metaslab_alloc_trace_init(); 2077 zio_init(); 2078 dmu_init(); 2079 zil_init(); 2080 vdev_cache_stat_init(); 2081 vdev_mirror_stat_init(); 2082 zfs_prop_init(); 2083 zpool_prop_init(); 2084 zpool_feature_init(); 2085 spa_config_load(); 2086 l2arc_start(); 2087 scan_init(); 2088 } 2089 2090 void 2091 spa_fini(void) 2092 { 2093 l2arc_stop(); 2094 2095 spa_evict_all(); 2096 2097 vdev_cache_stat_fini(); 2098 vdev_mirror_stat_fini(); 2099 zil_fini(); 2100 dmu_fini(); 2101 zio_fini(); 2102 metaslab_alloc_trace_fini(); 2103 range_tree_fini(); 2104 unique_fini(); 2105 zfs_refcount_fini(); 2106 scan_fini(); 2107 2108 avl_destroy(&spa_namespace_avl); 2109 avl_destroy(&spa_spare_avl); 2110 avl_destroy(&spa_l2cache_avl); 2111 2112 cv_destroy(&spa_namespace_cv); 2113 mutex_destroy(&spa_namespace_lock); 2114 mutex_destroy(&spa_spare_lock); 2115 mutex_destroy(&spa_l2cache_lock); 2116 } 2117 2118 /* 2119 * Return whether this pool has slogs. No locking needed. 2120 * It's not a problem if the wrong answer is returned as it's only for 2121 * performance and not correctness 2122 */ 2123 boolean_t 2124 spa_has_slogs(spa_t *spa) 2125 { 2126 return (spa->spa_log_class->mc_rotor != NULL); 2127 } 2128 2129 spa_log_state_t 2130 spa_get_log_state(spa_t *spa) 2131 { 2132 return (spa->spa_log_state); 2133 } 2134 2135 void 2136 spa_set_log_state(spa_t *spa, spa_log_state_t state) 2137 { 2138 spa->spa_log_state = state; 2139 } 2140 2141 boolean_t 2142 spa_is_root(spa_t *spa) 2143 { 2144 return (spa->spa_is_root); 2145 } 2146 2147 boolean_t 2148 spa_writeable(spa_t *spa) 2149 { 2150 return (!!(spa->spa_mode & FWRITE) && spa->spa_trust_config); 2151 } 2152 2153 /* 2154 * Returns true if there is a pending sync task in any of the current 2155 * syncing txg, the current quiescing txg, or the current open txg. 2156 */ 2157 boolean_t 2158 spa_has_pending_synctask(spa_t *spa) 2159 { 2160 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) || 2161 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks)); 2162 } 2163 2164 int 2165 spa_mode(spa_t *spa) 2166 { 2167 return (spa->spa_mode); 2168 } 2169 2170 uint64_t 2171 spa_bootfs(spa_t *spa) 2172 { 2173 return (spa->spa_bootfs); 2174 } 2175 2176 uint64_t 2177 spa_delegation(spa_t *spa) 2178 { 2179 return (spa->spa_delegation); 2180 } 2181 2182 objset_t * 2183 spa_meta_objset(spa_t *spa) 2184 { 2185 return (spa->spa_meta_objset); 2186 } 2187 2188 enum zio_checksum 2189 spa_dedup_checksum(spa_t *spa) 2190 { 2191 return (spa->spa_dedup_checksum); 2192 } 2193 2194 /* 2195 * Reset pool scan stat per scan pass (or reboot). 2196 */ 2197 void 2198 spa_scan_stat_init(spa_t *spa) 2199 { 2200 /* data not stored on disk */ 2201 spa->spa_scan_pass_start = gethrestime_sec(); 2202 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) 2203 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; 2204 else 2205 spa->spa_scan_pass_scrub_pause = 0; 2206 spa->spa_scan_pass_scrub_spent_paused = 0; 2207 spa->spa_scan_pass_exam = 0; 2208 spa->spa_scan_pass_issued = 0; 2209 vdev_scan_stat_init(spa->spa_root_vdev); 2210 } 2211 2212 /* 2213 * Get scan stats for zpool status reports 2214 */ 2215 int 2216 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2217 { 2218 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2219 2220 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2221 return (SET_ERROR(ENOENT)); 2222 bzero(ps, sizeof (pool_scan_stat_t)); 2223 2224 /* data stored on disk */ 2225 ps->pss_func = scn->scn_phys.scn_func; 2226 ps->pss_state = scn->scn_phys.scn_state; 2227 ps->pss_start_time = scn->scn_phys.scn_start_time; 2228 ps->pss_end_time = scn->scn_phys.scn_end_time; 2229 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2230 ps->pss_to_process = scn->scn_phys.scn_to_process; 2231 ps->pss_processed = scn->scn_phys.scn_processed; 2232 ps->pss_errors = scn->scn_phys.scn_errors; 2233 ps->pss_examined = scn->scn_phys.scn_examined; 2234 ps->pss_issued = 2235 scn->scn_issued_before_pass + spa->spa_scan_pass_issued; 2236 ps->pss_state = scn->scn_phys.scn_state; 2237 2238 /* data not stored on disk */ 2239 ps->pss_pass_start = spa->spa_scan_pass_start; 2240 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2241 ps->pss_pass_issued = spa->spa_scan_pass_issued; 2242 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; 2243 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; 2244 2245 return (0); 2246 } 2247 2248 int 2249 spa_maxblocksize(spa_t *spa) 2250 { 2251 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2252 return (SPA_MAXBLOCKSIZE); 2253 else 2254 return (SPA_OLD_MAXBLOCKSIZE); 2255 } 2256 2257 int 2258 spa_maxdnodesize(spa_t *spa) 2259 { 2260 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) 2261 return (DNODE_MAX_SIZE); 2262 else 2263 return (DNODE_MIN_SIZE); 2264 } 2265 2266 boolean_t 2267 spa_multihost(spa_t *spa) 2268 { 2269 return (spa->spa_multihost ? B_TRUE : B_FALSE); 2270 } 2271 2272 unsigned long 2273 spa_get_hostid(void) 2274 { 2275 unsigned long myhostid; 2276 2277 #ifdef _KERNEL 2278 myhostid = zone_get_hostid(NULL); 2279 #else /* _KERNEL */ 2280 /* 2281 * We're emulating the system's hostid in userland, so 2282 * we can't use zone_get_hostid(). 2283 */ 2284 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 2285 #endif /* _KERNEL */ 2286 2287 return (myhostid); 2288 } 2289 2290 /* 2291 * Returns the txg that the last device removal completed. No indirect mappings 2292 * have been added since this txg. 2293 */ 2294 uint64_t 2295 spa_get_last_removal_txg(spa_t *spa) 2296 { 2297 uint64_t vdevid; 2298 uint64_t ret = -1ULL; 2299 2300 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2301 /* 2302 * sr_prev_indirect_vdev is only modified while holding all the 2303 * config locks, so it is sufficient to hold SCL_VDEV as reader when 2304 * examining it. 2305 */ 2306 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; 2307 2308 while (vdevid != -1ULL) { 2309 vdev_t *vd = vdev_lookup_top(spa, vdevid); 2310 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 2311 2312 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2313 2314 /* 2315 * If the removal did not remap any data, we don't care. 2316 */ 2317 if (vdev_indirect_births_count(vib) != 0) { 2318 ret = vdev_indirect_births_last_entry_txg(vib); 2319 break; 2320 } 2321 2322 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; 2323 } 2324 spa_config_exit(spa, SCL_VDEV, FTAG); 2325 2326 IMPLY(ret != -1ULL, 2327 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 2328 2329 return (ret); 2330 } 2331 2332 boolean_t 2333 spa_trust_config(spa_t *spa) 2334 { 2335 return (spa->spa_trust_config); 2336 } 2337 2338 uint64_t 2339 spa_missing_tvds_allowed(spa_t *spa) 2340 { 2341 return (spa->spa_missing_tvds_allowed); 2342 } 2343 2344 space_map_t * 2345 spa_syncing_log_sm(spa_t *spa) 2346 { 2347 return (spa->spa_syncing_log_sm); 2348 } 2349 2350 void 2351 spa_set_missing_tvds(spa_t *spa, uint64_t missing) 2352 { 2353 spa->spa_missing_tvds = missing; 2354 } 2355 2356 boolean_t 2357 spa_top_vdevs_spacemap_addressable(spa_t *spa) 2358 { 2359 vdev_t *rvd = spa->spa_root_vdev; 2360 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2361 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c])) 2362 return (B_FALSE); 2363 } 2364 return (B_TRUE); 2365 } 2366 2367 boolean_t 2368 spa_has_checkpoint(spa_t *spa) 2369 { 2370 return (spa->spa_checkpoint_txg != 0); 2371 } 2372 2373 boolean_t 2374 spa_importing_readonly_checkpoint(spa_t *spa) 2375 { 2376 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) && 2377 spa->spa_mode == FREAD); 2378 } 2379 2380 uint64_t 2381 spa_min_claim_txg(spa_t *spa) 2382 { 2383 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg; 2384 2385 if (checkpoint_txg != 0) 2386 return (checkpoint_txg + 1); 2387 2388 return (spa->spa_first_txg); 2389 } 2390 2391 /* 2392 * If there is a checkpoint, async destroys may consume more space from 2393 * the pool instead of freeing it. In an attempt to save the pool from 2394 * getting suspended when it is about to run out of space, we stop 2395 * processing async destroys. 2396 */ 2397 boolean_t 2398 spa_suspend_async_destroy(spa_t *spa) 2399 { 2400 dsl_pool_t *dp = spa_get_dsl(spa); 2401 2402 uint64_t unreserved = dsl_pool_unreserved_space(dp, 2403 ZFS_SPACE_CHECK_EXTRA_RESERVED); 2404 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes; 2405 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0; 2406 2407 if (spa_has_checkpoint(spa) && avail == 0) 2408 return (B_TRUE); 2409 2410 return (B_FALSE); 2411 } 2412