1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2017 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 * Copyright 2013 Saso Kiselkov. All rights reserved. 27 * Copyright (c) 2014 Integros [integros.com] 28 * Copyright (c) 2017 Datto Inc. 29 */ 30 31 #include <sys/zfs_context.h> 32 #include <sys/spa_impl.h> 33 #include <sys/spa_boot.h> 34 #include <sys/zio.h> 35 #include <sys/zio_checksum.h> 36 #include <sys/zio_compress.h> 37 #include <sys/dmu.h> 38 #include <sys/dmu_tx.h> 39 #include <sys/zap.h> 40 #include <sys/zil.h> 41 #include <sys/vdev_impl.h> 42 #include <sys/metaslab.h> 43 #include <sys/uberblock_impl.h> 44 #include <sys/txg.h> 45 #include <sys/avl.h> 46 #include <sys/unique.h> 47 #include <sys/dsl_pool.h> 48 #include <sys/dsl_dir.h> 49 #include <sys/dsl_prop.h> 50 #include <sys/dsl_scan.h> 51 #include <sys/fs/zfs.h> 52 #include <sys/metaslab_impl.h> 53 #include <sys/arc.h> 54 #include <sys/ddt.h> 55 #include "zfs_prop.h" 56 #include <sys/zfeature.h> 57 58 /* 59 * SPA locking 60 * 61 * There are four basic locks for managing spa_t structures: 62 * 63 * spa_namespace_lock (global mutex) 64 * 65 * This lock must be acquired to do any of the following: 66 * 67 * - Lookup a spa_t by name 68 * - Add or remove a spa_t from the namespace 69 * - Increase spa_refcount from non-zero 70 * - Check if spa_refcount is zero 71 * - Rename a spa_t 72 * - add/remove/attach/detach devices 73 * - Held for the duration of create/destroy/import/export 74 * 75 * It does not need to handle recursion. A create or destroy may 76 * reference objects (files or zvols) in other pools, but by 77 * definition they must have an existing reference, and will never need 78 * to lookup a spa_t by name. 79 * 80 * spa_refcount (per-spa refcount_t protected by mutex) 81 * 82 * This reference count keep track of any active users of the spa_t. The 83 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 84 * the refcount is never really 'zero' - opening a pool implicitly keeps 85 * some references in the DMU. Internally we check against spa_minref, but 86 * present the image of a zero/non-zero value to consumers. 87 * 88 * spa_config_lock[] (per-spa array of rwlocks) 89 * 90 * This protects the spa_t from config changes, and must be held in 91 * the following circumstances: 92 * 93 * - RW_READER to perform I/O to the spa 94 * - RW_WRITER to change the vdev config 95 * 96 * The locking order is fairly straightforward: 97 * 98 * spa_namespace_lock -> spa_refcount 99 * 100 * The namespace lock must be acquired to increase the refcount from 0 101 * or to check if it is zero. 102 * 103 * spa_refcount -> spa_config_lock[] 104 * 105 * There must be at least one valid reference on the spa_t to acquire 106 * the config lock. 107 * 108 * spa_namespace_lock -> spa_config_lock[] 109 * 110 * The namespace lock must always be taken before the config lock. 111 * 112 * 113 * The spa_namespace_lock can be acquired directly and is globally visible. 114 * 115 * The namespace is manipulated using the following functions, all of which 116 * require the spa_namespace_lock to be held. 117 * 118 * spa_lookup() Lookup a spa_t by name. 119 * 120 * spa_add() Create a new spa_t in the namespace. 121 * 122 * spa_remove() Remove a spa_t from the namespace. This also 123 * frees up any memory associated with the spa_t. 124 * 125 * spa_next() Returns the next spa_t in the system, or the 126 * first if NULL is passed. 127 * 128 * spa_evict_all() Shutdown and remove all spa_t structures in 129 * the system. 130 * 131 * spa_guid_exists() Determine whether a pool/device guid exists. 132 * 133 * The spa_refcount is manipulated using the following functions: 134 * 135 * spa_open_ref() Adds a reference to the given spa_t. Must be 136 * called with spa_namespace_lock held if the 137 * refcount is currently zero. 138 * 139 * spa_close() Remove a reference from the spa_t. This will 140 * not free the spa_t or remove it from the 141 * namespace. No locking is required. 142 * 143 * spa_refcount_zero() Returns true if the refcount is currently 144 * zero. Must be called with spa_namespace_lock 145 * held. 146 * 147 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 148 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 149 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 150 * 151 * To read the configuration, it suffices to hold one of these locks as reader. 152 * To modify the configuration, you must hold all locks as writer. To modify 153 * vdev state without altering the vdev tree's topology (e.g. online/offline), 154 * you must hold SCL_STATE and SCL_ZIO as writer. 155 * 156 * We use these distinct config locks to avoid recursive lock entry. 157 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 158 * block allocations (SCL_ALLOC), which may require reading space maps 159 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 160 * 161 * The spa config locks cannot be normal rwlocks because we need the 162 * ability to hand off ownership. For example, SCL_ZIO is acquired 163 * by the issuing thread and later released by an interrupt thread. 164 * They do, however, obey the usual write-wanted semantics to prevent 165 * writer (i.e. system administrator) starvation. 166 * 167 * The lock acquisition rules are as follows: 168 * 169 * SCL_CONFIG 170 * Protects changes to the vdev tree topology, such as vdev 171 * add/remove/attach/detach. Protects the dirty config list 172 * (spa_config_dirty_list) and the set of spares and l2arc devices. 173 * 174 * SCL_STATE 175 * Protects changes to pool state and vdev state, such as vdev 176 * online/offline/fault/degrade/clear. Protects the dirty state list 177 * (spa_state_dirty_list) and global pool state (spa_state). 178 * 179 * SCL_ALLOC 180 * Protects changes to metaslab groups and classes. 181 * Held as reader by metaslab_alloc() and metaslab_claim(). 182 * 183 * SCL_ZIO 184 * Held by bp-level zios (those which have no io_vd upon entry) 185 * to prevent changes to the vdev tree. The bp-level zio implicitly 186 * protects all of its vdev child zios, which do not hold SCL_ZIO. 187 * 188 * SCL_FREE 189 * Protects changes to metaslab groups and classes. 190 * Held as reader by metaslab_free(). SCL_FREE is distinct from 191 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 192 * blocks in zio_done() while another i/o that holds either 193 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 194 * 195 * SCL_VDEV 196 * Held as reader to prevent changes to the vdev tree during trivial 197 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 198 * other locks, and lower than all of them, to ensure that it's safe 199 * to acquire regardless of caller context. 200 * 201 * In addition, the following rules apply: 202 * 203 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 204 * The lock ordering is SCL_CONFIG > spa_props_lock. 205 * 206 * (b) I/O operations on leaf vdevs. For any zio operation that takes 207 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 208 * or zio_write_phys() -- the caller must ensure that the config cannot 209 * cannot change in the interim, and that the vdev cannot be reopened. 210 * SCL_STATE as reader suffices for both. 211 * 212 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 213 * 214 * spa_vdev_enter() Acquire the namespace lock and the config lock 215 * for writing. 216 * 217 * spa_vdev_exit() Release the config lock, wait for all I/O 218 * to complete, sync the updated configs to the 219 * cache, and release the namespace lock. 220 * 221 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 222 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 223 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 224 * 225 * spa_rename() is also implemented within this file since it requires 226 * manipulation of the namespace. 227 */ 228 229 static avl_tree_t spa_namespace_avl; 230 kmutex_t spa_namespace_lock; 231 static kcondvar_t spa_namespace_cv; 232 static int spa_active_count; 233 int spa_max_replication_override = SPA_DVAS_PER_BP; 234 235 static kmutex_t spa_spare_lock; 236 static avl_tree_t spa_spare_avl; 237 static kmutex_t spa_l2cache_lock; 238 static avl_tree_t spa_l2cache_avl; 239 240 kmem_cache_t *spa_buffer_pool; 241 int spa_mode_global; 242 243 #ifdef ZFS_DEBUG 244 /* 245 * Everything except dprintf, spa, and indirect_remap is on by default 246 * in debug builds. 247 */ 248 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA | ZFS_DEBUG_INDIRECT_REMAP); 249 #else 250 int zfs_flags = 0; 251 #endif 252 253 /* 254 * zfs_recover can be set to nonzero to attempt to recover from 255 * otherwise-fatal errors, typically caused by on-disk corruption. When 256 * set, calls to zfs_panic_recover() will turn into warning messages. 257 * This should only be used as a last resort, as it typically results 258 * in leaked space, or worse. 259 */ 260 boolean_t zfs_recover = B_FALSE; 261 262 /* 263 * If destroy encounters an EIO while reading metadata (e.g. indirect 264 * blocks), space referenced by the missing metadata can not be freed. 265 * Normally this causes the background destroy to become "stalled", as 266 * it is unable to make forward progress. While in this stalled state, 267 * all remaining space to free from the error-encountering filesystem is 268 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 269 * permanently leak the space from indirect blocks that can not be read, 270 * and continue to free everything else that it can. 271 * 272 * The default, "stalling" behavior is useful if the storage partially 273 * fails (i.e. some but not all i/os fail), and then later recovers. In 274 * this case, we will be able to continue pool operations while it is 275 * partially failed, and when it recovers, we can continue to free the 276 * space, with no leaks. However, note that this case is actually 277 * fairly rare. 278 * 279 * Typically pools either (a) fail completely (but perhaps temporarily, 280 * e.g. a top-level vdev going offline), or (b) have localized, 281 * permanent errors (e.g. disk returns the wrong data due to bit flip or 282 * firmware bug). In case (a), this setting does not matter because the 283 * pool will be suspended and the sync thread will not be able to make 284 * forward progress regardless. In case (b), because the error is 285 * permanent, the best we can do is leak the minimum amount of space, 286 * which is what setting this flag will do. Therefore, it is reasonable 287 * for this flag to normally be set, but we chose the more conservative 288 * approach of not setting it, so that there is no possibility of 289 * leaking space in the "partial temporary" failure case. 290 */ 291 boolean_t zfs_free_leak_on_eio = B_FALSE; 292 293 /* 294 * Expiration time in milliseconds. This value has two meanings. First it is 295 * used to determine when the spa_deadman() logic should fire. By default the 296 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 297 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 298 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 299 * in a system panic. 300 */ 301 uint64_t zfs_deadman_synctime_ms = 1000000ULL; 302 303 /* 304 * Check time in milliseconds. This defines the frequency at which we check 305 * for hung I/O. 306 */ 307 uint64_t zfs_deadman_checktime_ms = 5000ULL; 308 309 /* 310 * Override the zfs deadman behavior via /etc/system. By default the 311 * deadman is enabled except on VMware and sparc deployments. 312 */ 313 int zfs_deadman_enabled = -1; 314 315 /* 316 * The worst case is single-sector max-parity RAID-Z blocks, in which 317 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 318 * times the size; so just assume that. Add to this the fact that 319 * we can have up to 3 DVAs per bp, and one more factor of 2 because 320 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 321 * the worst case is: 322 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 323 */ 324 int spa_asize_inflation = 24; 325 326 /* 327 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 328 * the pool to be consumed. This ensures that we don't run the pool 329 * completely out of space, due to unaccounted changes (e.g. to the MOS). 330 * It also limits the worst-case time to allocate space. If we have 331 * less than this amount of free space, most ZPL operations (e.g. write, 332 * create) will return ENOSPC. 333 * 334 * Certain operations (e.g. file removal, most administrative actions) can 335 * use half the slop space. They will only return ENOSPC if less than half 336 * the slop space is free. Typically, once the pool has less than the slop 337 * space free, the user will use these operations to free up space in the pool. 338 * These are the operations that call dsl_pool_adjustedsize() with the netfree 339 * argument set to TRUE. 340 * 341 * A very restricted set of operations are always permitted, regardless of 342 * the amount of free space. These are the operations that call 343 * dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy". If these 344 * operations result in a net increase in the amount of space used, 345 * it is possible to run the pool completely out of space, causing it to 346 * be permanently read-only. 347 * 348 * Note that on very small pools, the slop space will be larger than 349 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 350 * but we never allow it to be more than half the pool size. 351 * 352 * See also the comments in zfs_space_check_t. 353 */ 354 int spa_slop_shift = 5; 355 uint64_t spa_min_slop = 128 * 1024 * 1024; 356 357 /*PRINTFLIKE2*/ 358 void 359 spa_load_failed(spa_t *spa, const char *fmt, ...) 360 { 361 va_list adx; 362 char buf[256]; 363 364 va_start(adx, fmt); 365 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 366 va_end(adx); 367 368 zfs_dbgmsg("spa_load(%s): FAILED: %s", spa->spa_name, buf); 369 } 370 371 /*PRINTFLIKE2*/ 372 void 373 spa_load_note(spa_t *spa, const char *fmt, ...) 374 { 375 va_list adx; 376 char buf[256]; 377 378 va_start(adx, fmt); 379 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 380 va_end(adx); 381 382 zfs_dbgmsg("spa_load(%s): %s", spa->spa_name, buf); 383 } 384 385 /* 386 * ========================================================================== 387 * SPA config locking 388 * ========================================================================== 389 */ 390 static void 391 spa_config_lock_init(spa_t *spa) 392 { 393 for (int i = 0; i < SCL_LOCKS; i++) { 394 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 395 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 396 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 397 refcount_create_untracked(&scl->scl_count); 398 scl->scl_writer = NULL; 399 scl->scl_write_wanted = 0; 400 } 401 } 402 403 static void 404 spa_config_lock_destroy(spa_t *spa) 405 { 406 for (int i = 0; i < SCL_LOCKS; i++) { 407 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 408 mutex_destroy(&scl->scl_lock); 409 cv_destroy(&scl->scl_cv); 410 refcount_destroy(&scl->scl_count); 411 ASSERT(scl->scl_writer == NULL); 412 ASSERT(scl->scl_write_wanted == 0); 413 } 414 } 415 416 int 417 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 418 { 419 for (int i = 0; i < SCL_LOCKS; i++) { 420 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 421 if (!(locks & (1 << i))) 422 continue; 423 mutex_enter(&scl->scl_lock); 424 if (rw == RW_READER) { 425 if (scl->scl_writer || scl->scl_write_wanted) { 426 mutex_exit(&scl->scl_lock); 427 spa_config_exit(spa, locks & ((1 << i) - 1), 428 tag); 429 return (0); 430 } 431 } else { 432 ASSERT(scl->scl_writer != curthread); 433 if (!refcount_is_zero(&scl->scl_count)) { 434 mutex_exit(&scl->scl_lock); 435 spa_config_exit(spa, locks & ((1 << i) - 1), 436 tag); 437 return (0); 438 } 439 scl->scl_writer = curthread; 440 } 441 (void) refcount_add(&scl->scl_count, tag); 442 mutex_exit(&scl->scl_lock); 443 } 444 return (1); 445 } 446 447 void 448 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 449 { 450 int wlocks_held = 0; 451 452 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 453 454 for (int i = 0; i < SCL_LOCKS; i++) { 455 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 456 if (scl->scl_writer == curthread) 457 wlocks_held |= (1 << i); 458 if (!(locks & (1 << i))) 459 continue; 460 mutex_enter(&scl->scl_lock); 461 if (rw == RW_READER) { 462 while (scl->scl_writer || scl->scl_write_wanted) { 463 cv_wait(&scl->scl_cv, &scl->scl_lock); 464 } 465 } else { 466 ASSERT(scl->scl_writer != curthread); 467 while (!refcount_is_zero(&scl->scl_count)) { 468 scl->scl_write_wanted++; 469 cv_wait(&scl->scl_cv, &scl->scl_lock); 470 scl->scl_write_wanted--; 471 } 472 scl->scl_writer = curthread; 473 } 474 (void) refcount_add(&scl->scl_count, tag); 475 mutex_exit(&scl->scl_lock); 476 } 477 ASSERT3U(wlocks_held, <=, locks); 478 } 479 480 void 481 spa_config_exit(spa_t *spa, int locks, void *tag) 482 { 483 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 484 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 485 if (!(locks & (1 << i))) 486 continue; 487 mutex_enter(&scl->scl_lock); 488 ASSERT(!refcount_is_zero(&scl->scl_count)); 489 if (refcount_remove(&scl->scl_count, tag) == 0) { 490 ASSERT(scl->scl_writer == NULL || 491 scl->scl_writer == curthread); 492 scl->scl_writer = NULL; /* OK in either case */ 493 cv_broadcast(&scl->scl_cv); 494 } 495 mutex_exit(&scl->scl_lock); 496 } 497 } 498 499 int 500 spa_config_held(spa_t *spa, int locks, krw_t rw) 501 { 502 int locks_held = 0; 503 504 for (int i = 0; i < SCL_LOCKS; i++) { 505 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 506 if (!(locks & (1 << i))) 507 continue; 508 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 509 (rw == RW_WRITER && scl->scl_writer == curthread)) 510 locks_held |= 1 << i; 511 } 512 513 return (locks_held); 514 } 515 516 /* 517 * ========================================================================== 518 * SPA namespace functions 519 * ========================================================================== 520 */ 521 522 /* 523 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 524 * Returns NULL if no matching spa_t is found. 525 */ 526 spa_t * 527 spa_lookup(const char *name) 528 { 529 static spa_t search; /* spa_t is large; don't allocate on stack */ 530 spa_t *spa; 531 avl_index_t where; 532 char *cp; 533 534 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 535 536 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 537 538 /* 539 * If it's a full dataset name, figure out the pool name and 540 * just use that. 541 */ 542 cp = strpbrk(search.spa_name, "/@#"); 543 if (cp != NULL) 544 *cp = '\0'; 545 546 spa = avl_find(&spa_namespace_avl, &search, &where); 547 548 return (spa); 549 } 550 551 /* 552 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 553 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 554 * looking for potentially hung I/Os. 555 */ 556 void 557 spa_deadman(void *arg) 558 { 559 spa_t *spa = arg; 560 561 /* 562 * Disable the deadman timer if the pool is suspended. 563 */ 564 if (spa_suspended(spa)) { 565 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 566 return; 567 } 568 569 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 570 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 571 ++spa->spa_deadman_calls); 572 if (zfs_deadman_enabled) 573 vdev_deadman(spa->spa_root_vdev); 574 } 575 576 /* 577 * Create an uninitialized spa_t with the given name. Requires 578 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 579 * exist by calling spa_lookup() first. 580 */ 581 spa_t * 582 spa_add(const char *name, nvlist_t *config, const char *altroot) 583 { 584 spa_t *spa; 585 spa_config_dirent_t *dp; 586 cyc_handler_t hdlr; 587 cyc_time_t when; 588 589 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 590 591 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 592 593 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 594 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 595 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 596 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 597 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 598 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 599 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 600 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 601 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 602 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 603 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 604 mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL); 605 mutex_init(&spa->spa_alloc_lock, NULL, MUTEX_DEFAULT, NULL); 606 607 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 608 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 609 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 610 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 611 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 612 613 for (int t = 0; t < TXG_SIZE; t++) 614 bplist_create(&spa->spa_free_bplist[t]); 615 616 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 617 spa->spa_state = POOL_STATE_UNINITIALIZED; 618 spa->spa_freeze_txg = UINT64_MAX; 619 spa->spa_final_txg = UINT64_MAX; 620 spa->spa_load_max_txg = UINT64_MAX; 621 spa->spa_proc = &p0; 622 spa->spa_proc_state = SPA_PROC_NONE; 623 624 hdlr.cyh_func = spa_deadman; 625 hdlr.cyh_arg = spa; 626 hdlr.cyh_level = CY_LOW_LEVEL; 627 628 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 629 630 /* 631 * This determines how often we need to check for hung I/Os after 632 * the cyclic has already fired. Since checking for hung I/Os is 633 * an expensive operation we don't want to check too frequently. 634 * Instead wait for 5 seconds before checking again. 635 */ 636 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 637 when.cyt_when = CY_INFINITY; 638 mutex_enter(&cpu_lock); 639 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 640 mutex_exit(&cpu_lock); 641 642 refcount_create(&spa->spa_refcount); 643 spa_config_lock_init(spa); 644 645 avl_add(&spa_namespace_avl, spa); 646 647 /* 648 * Set the alternate root, if there is one. 649 */ 650 if (altroot) { 651 spa->spa_root = spa_strdup(altroot); 652 spa_active_count++; 653 } 654 655 avl_create(&spa->spa_alloc_tree, zio_bookmark_compare, 656 sizeof (zio_t), offsetof(zio_t, io_alloc_node)); 657 658 /* 659 * Every pool starts with the default cachefile 660 */ 661 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 662 offsetof(spa_config_dirent_t, scd_link)); 663 664 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 665 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 666 list_insert_head(&spa->spa_config_list, dp); 667 668 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 669 KM_SLEEP) == 0); 670 671 if (config != NULL) { 672 nvlist_t *features; 673 674 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 675 &features) == 0) { 676 VERIFY(nvlist_dup(features, &spa->spa_label_features, 677 0) == 0); 678 } 679 680 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 681 } 682 683 if (spa->spa_label_features == NULL) { 684 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 685 KM_SLEEP) == 0); 686 } 687 688 spa->spa_iokstat = kstat_create("zfs", 0, name, 689 "disk", KSTAT_TYPE_IO, 1, 0); 690 if (spa->spa_iokstat) { 691 spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock; 692 kstat_install(spa->spa_iokstat); 693 } 694 695 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0); 696 697 spa->spa_min_ashift = INT_MAX; 698 spa->spa_max_ashift = 0; 699 700 /* 701 * As a pool is being created, treat all features as disabled by 702 * setting SPA_FEATURE_DISABLED for all entries in the feature 703 * refcount cache. 704 */ 705 for (int i = 0; i < SPA_FEATURES; i++) { 706 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 707 } 708 709 return (spa); 710 } 711 712 /* 713 * Removes a spa_t from the namespace, freeing up any memory used. Requires 714 * spa_namespace_lock. This is called only after the spa_t has been closed and 715 * deactivated. 716 */ 717 void 718 spa_remove(spa_t *spa) 719 { 720 spa_config_dirent_t *dp; 721 722 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 723 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 724 ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0); 725 726 nvlist_free(spa->spa_config_splitting); 727 728 avl_remove(&spa_namespace_avl, spa); 729 cv_broadcast(&spa_namespace_cv); 730 731 if (spa->spa_root) { 732 spa_strfree(spa->spa_root); 733 spa_active_count--; 734 } 735 736 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 737 list_remove(&spa->spa_config_list, dp); 738 if (dp->scd_path != NULL) 739 spa_strfree(dp->scd_path); 740 kmem_free(dp, sizeof (spa_config_dirent_t)); 741 } 742 743 avl_destroy(&spa->spa_alloc_tree); 744 list_destroy(&spa->spa_config_list); 745 746 nvlist_free(spa->spa_label_features); 747 nvlist_free(spa->spa_load_info); 748 spa_config_set(spa, NULL); 749 750 mutex_enter(&cpu_lock); 751 if (spa->spa_deadman_cycid != CYCLIC_NONE) 752 cyclic_remove(spa->spa_deadman_cycid); 753 mutex_exit(&cpu_lock); 754 spa->spa_deadman_cycid = CYCLIC_NONE; 755 756 refcount_destroy(&spa->spa_refcount); 757 758 spa_config_lock_destroy(spa); 759 760 kstat_delete(spa->spa_iokstat); 761 spa->spa_iokstat = NULL; 762 763 for (int t = 0; t < TXG_SIZE; t++) 764 bplist_destroy(&spa->spa_free_bplist[t]); 765 766 zio_checksum_templates_free(spa); 767 768 cv_destroy(&spa->spa_async_cv); 769 cv_destroy(&spa->spa_evicting_os_cv); 770 cv_destroy(&spa->spa_proc_cv); 771 cv_destroy(&spa->spa_scrub_io_cv); 772 cv_destroy(&spa->spa_suspend_cv); 773 774 mutex_destroy(&spa->spa_alloc_lock); 775 mutex_destroy(&spa->spa_async_lock); 776 mutex_destroy(&spa->spa_errlist_lock); 777 mutex_destroy(&spa->spa_errlog_lock); 778 mutex_destroy(&spa->spa_evicting_os_lock); 779 mutex_destroy(&spa->spa_history_lock); 780 mutex_destroy(&spa->spa_proc_lock); 781 mutex_destroy(&spa->spa_props_lock); 782 mutex_destroy(&spa->spa_cksum_tmpls_lock); 783 mutex_destroy(&spa->spa_scrub_lock); 784 mutex_destroy(&spa->spa_suspend_lock); 785 mutex_destroy(&spa->spa_vdev_top_lock); 786 mutex_destroy(&spa->spa_iokstat_lock); 787 788 kmem_free(spa, sizeof (spa_t)); 789 } 790 791 /* 792 * Given a pool, return the next pool in the namespace, or NULL if there is 793 * none. If 'prev' is NULL, return the first pool. 794 */ 795 spa_t * 796 spa_next(spa_t *prev) 797 { 798 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 799 800 if (prev) 801 return (AVL_NEXT(&spa_namespace_avl, prev)); 802 else 803 return (avl_first(&spa_namespace_avl)); 804 } 805 806 /* 807 * ========================================================================== 808 * SPA refcount functions 809 * ========================================================================== 810 */ 811 812 /* 813 * Add a reference to the given spa_t. Must have at least one reference, or 814 * have the namespace lock held. 815 */ 816 void 817 spa_open_ref(spa_t *spa, void *tag) 818 { 819 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 820 MUTEX_HELD(&spa_namespace_lock)); 821 (void) refcount_add(&spa->spa_refcount, tag); 822 } 823 824 /* 825 * Remove a reference to the given spa_t. Must have at least one reference, or 826 * have the namespace lock held. 827 */ 828 void 829 spa_close(spa_t *spa, void *tag) 830 { 831 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 832 MUTEX_HELD(&spa_namespace_lock)); 833 (void) refcount_remove(&spa->spa_refcount, tag); 834 } 835 836 /* 837 * Remove a reference to the given spa_t held by a dsl dir that is 838 * being asynchronously released. Async releases occur from a taskq 839 * performing eviction of dsl datasets and dirs. The namespace lock 840 * isn't held and the hold by the object being evicted may contribute to 841 * spa_minref (e.g. dataset or directory released during pool export), 842 * so the asserts in spa_close() do not apply. 843 */ 844 void 845 spa_async_close(spa_t *spa, void *tag) 846 { 847 (void) refcount_remove(&spa->spa_refcount, tag); 848 } 849 850 /* 851 * Check to see if the spa refcount is zero. Must be called with 852 * spa_namespace_lock held. We really compare against spa_minref, which is the 853 * number of references acquired when opening a pool 854 */ 855 boolean_t 856 spa_refcount_zero(spa_t *spa) 857 { 858 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 859 860 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 861 } 862 863 /* 864 * ========================================================================== 865 * SPA spare and l2cache tracking 866 * ========================================================================== 867 */ 868 869 /* 870 * Hot spares and cache devices are tracked using the same code below, 871 * for 'auxiliary' devices. 872 */ 873 874 typedef struct spa_aux { 875 uint64_t aux_guid; 876 uint64_t aux_pool; 877 avl_node_t aux_avl; 878 int aux_count; 879 } spa_aux_t; 880 881 static int 882 spa_aux_compare(const void *a, const void *b) 883 { 884 const spa_aux_t *sa = a; 885 const spa_aux_t *sb = b; 886 887 if (sa->aux_guid < sb->aux_guid) 888 return (-1); 889 else if (sa->aux_guid > sb->aux_guid) 890 return (1); 891 else 892 return (0); 893 } 894 895 void 896 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 897 { 898 avl_index_t where; 899 spa_aux_t search; 900 spa_aux_t *aux; 901 902 search.aux_guid = vd->vdev_guid; 903 if ((aux = avl_find(avl, &search, &where)) != NULL) { 904 aux->aux_count++; 905 } else { 906 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 907 aux->aux_guid = vd->vdev_guid; 908 aux->aux_count = 1; 909 avl_insert(avl, aux, where); 910 } 911 } 912 913 void 914 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 915 { 916 spa_aux_t search; 917 spa_aux_t *aux; 918 avl_index_t where; 919 920 search.aux_guid = vd->vdev_guid; 921 aux = avl_find(avl, &search, &where); 922 923 ASSERT(aux != NULL); 924 925 if (--aux->aux_count == 0) { 926 avl_remove(avl, aux); 927 kmem_free(aux, sizeof (spa_aux_t)); 928 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 929 aux->aux_pool = 0ULL; 930 } 931 } 932 933 boolean_t 934 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 935 { 936 spa_aux_t search, *found; 937 938 search.aux_guid = guid; 939 found = avl_find(avl, &search, NULL); 940 941 if (pool) { 942 if (found) 943 *pool = found->aux_pool; 944 else 945 *pool = 0ULL; 946 } 947 948 if (refcnt) { 949 if (found) 950 *refcnt = found->aux_count; 951 else 952 *refcnt = 0; 953 } 954 955 return (found != NULL); 956 } 957 958 void 959 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 960 { 961 spa_aux_t search, *found; 962 avl_index_t where; 963 964 search.aux_guid = vd->vdev_guid; 965 found = avl_find(avl, &search, &where); 966 ASSERT(found != NULL); 967 ASSERT(found->aux_pool == 0ULL); 968 969 found->aux_pool = spa_guid(vd->vdev_spa); 970 } 971 972 /* 973 * Spares are tracked globally due to the following constraints: 974 * 975 * - A spare may be part of multiple pools. 976 * - A spare may be added to a pool even if it's actively in use within 977 * another pool. 978 * - A spare in use in any pool can only be the source of a replacement if 979 * the target is a spare in the same pool. 980 * 981 * We keep track of all spares on the system through the use of a reference 982 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 983 * spare, then we bump the reference count in the AVL tree. In addition, we set 984 * the 'vdev_isspare' member to indicate that the device is a spare (active or 985 * inactive). When a spare is made active (used to replace a device in the 986 * pool), we also keep track of which pool its been made a part of. 987 * 988 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 989 * called under the spa_namespace lock as part of vdev reconfiguration. The 990 * separate spare lock exists for the status query path, which does not need to 991 * be completely consistent with respect to other vdev configuration changes. 992 */ 993 994 static int 995 spa_spare_compare(const void *a, const void *b) 996 { 997 return (spa_aux_compare(a, b)); 998 } 999 1000 void 1001 spa_spare_add(vdev_t *vd) 1002 { 1003 mutex_enter(&spa_spare_lock); 1004 ASSERT(!vd->vdev_isspare); 1005 spa_aux_add(vd, &spa_spare_avl); 1006 vd->vdev_isspare = B_TRUE; 1007 mutex_exit(&spa_spare_lock); 1008 } 1009 1010 void 1011 spa_spare_remove(vdev_t *vd) 1012 { 1013 mutex_enter(&spa_spare_lock); 1014 ASSERT(vd->vdev_isspare); 1015 spa_aux_remove(vd, &spa_spare_avl); 1016 vd->vdev_isspare = B_FALSE; 1017 mutex_exit(&spa_spare_lock); 1018 } 1019 1020 boolean_t 1021 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1022 { 1023 boolean_t found; 1024 1025 mutex_enter(&spa_spare_lock); 1026 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1027 mutex_exit(&spa_spare_lock); 1028 1029 return (found); 1030 } 1031 1032 void 1033 spa_spare_activate(vdev_t *vd) 1034 { 1035 mutex_enter(&spa_spare_lock); 1036 ASSERT(vd->vdev_isspare); 1037 spa_aux_activate(vd, &spa_spare_avl); 1038 mutex_exit(&spa_spare_lock); 1039 } 1040 1041 /* 1042 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1043 * Cache devices currently only support one pool per cache device, and so 1044 * for these devices the aux reference count is currently unused beyond 1. 1045 */ 1046 1047 static int 1048 spa_l2cache_compare(const void *a, const void *b) 1049 { 1050 return (spa_aux_compare(a, b)); 1051 } 1052 1053 void 1054 spa_l2cache_add(vdev_t *vd) 1055 { 1056 mutex_enter(&spa_l2cache_lock); 1057 ASSERT(!vd->vdev_isl2cache); 1058 spa_aux_add(vd, &spa_l2cache_avl); 1059 vd->vdev_isl2cache = B_TRUE; 1060 mutex_exit(&spa_l2cache_lock); 1061 } 1062 1063 void 1064 spa_l2cache_remove(vdev_t *vd) 1065 { 1066 mutex_enter(&spa_l2cache_lock); 1067 ASSERT(vd->vdev_isl2cache); 1068 spa_aux_remove(vd, &spa_l2cache_avl); 1069 vd->vdev_isl2cache = B_FALSE; 1070 mutex_exit(&spa_l2cache_lock); 1071 } 1072 1073 boolean_t 1074 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1075 { 1076 boolean_t found; 1077 1078 mutex_enter(&spa_l2cache_lock); 1079 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1080 mutex_exit(&spa_l2cache_lock); 1081 1082 return (found); 1083 } 1084 1085 void 1086 spa_l2cache_activate(vdev_t *vd) 1087 { 1088 mutex_enter(&spa_l2cache_lock); 1089 ASSERT(vd->vdev_isl2cache); 1090 spa_aux_activate(vd, &spa_l2cache_avl); 1091 mutex_exit(&spa_l2cache_lock); 1092 } 1093 1094 /* 1095 * ========================================================================== 1096 * SPA vdev locking 1097 * ========================================================================== 1098 */ 1099 1100 /* 1101 * Lock the given spa_t for the purpose of adding or removing a vdev. 1102 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1103 * It returns the next transaction group for the spa_t. 1104 */ 1105 uint64_t 1106 spa_vdev_enter(spa_t *spa) 1107 { 1108 mutex_enter(&spa->spa_vdev_top_lock); 1109 mutex_enter(&spa_namespace_lock); 1110 return (spa_vdev_config_enter(spa)); 1111 } 1112 1113 /* 1114 * Internal implementation for spa_vdev_enter(). Used when a vdev 1115 * operation requires multiple syncs (i.e. removing a device) while 1116 * keeping the spa_namespace_lock held. 1117 */ 1118 uint64_t 1119 spa_vdev_config_enter(spa_t *spa) 1120 { 1121 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1122 1123 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1124 1125 return (spa_last_synced_txg(spa) + 1); 1126 } 1127 1128 /* 1129 * Used in combination with spa_vdev_config_enter() to allow the syncing 1130 * of multiple transactions without releasing the spa_namespace_lock. 1131 */ 1132 void 1133 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1134 { 1135 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1136 1137 int config_changed = B_FALSE; 1138 1139 ASSERT(txg > spa_last_synced_txg(spa)); 1140 1141 spa->spa_pending_vdev = NULL; 1142 1143 /* 1144 * Reassess the DTLs. 1145 */ 1146 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1147 1148 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1149 config_changed = B_TRUE; 1150 spa->spa_config_generation++; 1151 } 1152 1153 /* 1154 * Verify the metaslab classes. 1155 */ 1156 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1157 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1158 1159 spa_config_exit(spa, SCL_ALL, spa); 1160 1161 /* 1162 * Panic the system if the specified tag requires it. This 1163 * is useful for ensuring that configurations are updated 1164 * transactionally. 1165 */ 1166 if (zio_injection_enabled) 1167 zio_handle_panic_injection(spa, tag, 0); 1168 1169 /* 1170 * Note: this txg_wait_synced() is important because it ensures 1171 * that there won't be more than one config change per txg. 1172 * This allows us to use the txg as the generation number. 1173 */ 1174 if (error == 0) 1175 txg_wait_synced(spa->spa_dsl_pool, txg); 1176 1177 if (vd != NULL) { 1178 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1179 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1180 vdev_free(vd); 1181 spa_config_exit(spa, SCL_ALL, spa); 1182 } 1183 1184 /* 1185 * If the config changed, update the config cache. 1186 */ 1187 if (config_changed) 1188 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1189 } 1190 1191 /* 1192 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1193 * locking of spa_vdev_enter(), we also want make sure the transactions have 1194 * synced to disk, and then update the global configuration cache with the new 1195 * information. 1196 */ 1197 int 1198 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1199 { 1200 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1201 mutex_exit(&spa_namespace_lock); 1202 mutex_exit(&spa->spa_vdev_top_lock); 1203 1204 return (error); 1205 } 1206 1207 /* 1208 * Lock the given spa_t for the purpose of changing vdev state. 1209 */ 1210 void 1211 spa_vdev_state_enter(spa_t *spa, int oplocks) 1212 { 1213 int locks = SCL_STATE_ALL | oplocks; 1214 1215 /* 1216 * Root pools may need to read of the underlying devfs filesystem 1217 * when opening up a vdev. Unfortunately if we're holding the 1218 * SCL_ZIO lock it will result in a deadlock when we try to issue 1219 * the read from the root filesystem. Instead we "prefetch" 1220 * the associated vnodes that we need prior to opening the 1221 * underlying devices and cache them so that we can prevent 1222 * any I/O when we are doing the actual open. 1223 */ 1224 if (spa_is_root(spa)) { 1225 int low = locks & ~(SCL_ZIO - 1); 1226 int high = locks & ~low; 1227 1228 spa_config_enter(spa, high, spa, RW_WRITER); 1229 vdev_hold(spa->spa_root_vdev); 1230 spa_config_enter(spa, low, spa, RW_WRITER); 1231 } else { 1232 spa_config_enter(spa, locks, spa, RW_WRITER); 1233 } 1234 spa->spa_vdev_locks = locks; 1235 } 1236 1237 int 1238 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1239 { 1240 boolean_t config_changed = B_FALSE; 1241 1242 if (vd != NULL || error == 0) 1243 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1244 0, 0, B_FALSE); 1245 1246 if (vd != NULL) { 1247 vdev_state_dirty(vd->vdev_top); 1248 config_changed = B_TRUE; 1249 spa->spa_config_generation++; 1250 } 1251 1252 if (spa_is_root(spa)) 1253 vdev_rele(spa->spa_root_vdev); 1254 1255 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1256 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1257 1258 /* 1259 * If anything changed, wait for it to sync. This ensures that, 1260 * from the system administrator's perspective, zpool(1M) commands 1261 * are synchronous. This is important for things like zpool offline: 1262 * when the command completes, you expect no further I/O from ZFS. 1263 */ 1264 if (vd != NULL) 1265 txg_wait_synced(spa->spa_dsl_pool, 0); 1266 1267 /* 1268 * If the config changed, update the config cache. 1269 */ 1270 if (config_changed) { 1271 mutex_enter(&spa_namespace_lock); 1272 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1273 mutex_exit(&spa_namespace_lock); 1274 } 1275 1276 return (error); 1277 } 1278 1279 /* 1280 * ========================================================================== 1281 * Miscellaneous functions 1282 * ========================================================================== 1283 */ 1284 1285 void 1286 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1287 { 1288 if (!nvlist_exists(spa->spa_label_features, feature)) { 1289 fnvlist_add_boolean(spa->spa_label_features, feature); 1290 /* 1291 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1292 * dirty the vdev config because lock SCL_CONFIG is not held. 1293 * Thankfully, in this case we don't need to dirty the config 1294 * because it will be written out anyway when we finish 1295 * creating the pool. 1296 */ 1297 if (tx->tx_txg != TXG_INITIAL) 1298 vdev_config_dirty(spa->spa_root_vdev); 1299 } 1300 } 1301 1302 void 1303 spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1304 { 1305 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1306 vdev_config_dirty(spa->spa_root_vdev); 1307 } 1308 1309 /* 1310 * Rename a spa_t. 1311 */ 1312 int 1313 spa_rename(const char *name, const char *newname) 1314 { 1315 spa_t *spa; 1316 int err; 1317 1318 /* 1319 * Lookup the spa_t and grab the config lock for writing. We need to 1320 * actually open the pool so that we can sync out the necessary labels. 1321 * It's OK to call spa_open() with the namespace lock held because we 1322 * allow recursive calls for other reasons. 1323 */ 1324 mutex_enter(&spa_namespace_lock); 1325 if ((err = spa_open(name, &spa, FTAG)) != 0) { 1326 mutex_exit(&spa_namespace_lock); 1327 return (err); 1328 } 1329 1330 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1331 1332 avl_remove(&spa_namespace_avl, spa); 1333 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 1334 avl_add(&spa_namespace_avl, spa); 1335 1336 /* 1337 * Sync all labels to disk with the new names by marking the root vdev 1338 * dirty and waiting for it to sync. It will pick up the new pool name 1339 * during the sync. 1340 */ 1341 vdev_config_dirty(spa->spa_root_vdev); 1342 1343 spa_config_exit(spa, SCL_ALL, FTAG); 1344 1345 txg_wait_synced(spa->spa_dsl_pool, 0); 1346 1347 /* 1348 * Sync the updated config cache. 1349 */ 1350 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1351 1352 spa_close(spa, FTAG); 1353 1354 mutex_exit(&spa_namespace_lock); 1355 1356 return (0); 1357 } 1358 1359 /* 1360 * Return the spa_t associated with given pool_guid, if it exists. If 1361 * device_guid is non-zero, determine whether the pool exists *and* contains 1362 * a device with the specified device_guid. 1363 */ 1364 spa_t * 1365 spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1366 { 1367 spa_t *spa; 1368 avl_tree_t *t = &spa_namespace_avl; 1369 1370 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1371 1372 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1373 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1374 continue; 1375 if (spa->spa_root_vdev == NULL) 1376 continue; 1377 if (spa_guid(spa) == pool_guid) { 1378 if (device_guid == 0) 1379 break; 1380 1381 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1382 device_guid) != NULL) 1383 break; 1384 1385 /* 1386 * Check any devices we may be in the process of adding. 1387 */ 1388 if (spa->spa_pending_vdev) { 1389 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1390 device_guid) != NULL) 1391 break; 1392 } 1393 } 1394 } 1395 1396 return (spa); 1397 } 1398 1399 /* 1400 * Determine whether a pool with the given pool_guid exists. 1401 */ 1402 boolean_t 1403 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1404 { 1405 return (spa_by_guid(pool_guid, device_guid) != NULL); 1406 } 1407 1408 char * 1409 spa_strdup(const char *s) 1410 { 1411 size_t len; 1412 char *new; 1413 1414 len = strlen(s); 1415 new = kmem_alloc(len + 1, KM_SLEEP); 1416 bcopy(s, new, len); 1417 new[len] = '\0'; 1418 1419 return (new); 1420 } 1421 1422 void 1423 spa_strfree(char *s) 1424 { 1425 kmem_free(s, strlen(s) + 1); 1426 } 1427 1428 uint64_t 1429 spa_get_random(uint64_t range) 1430 { 1431 uint64_t r; 1432 1433 ASSERT(range != 0); 1434 1435 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1436 1437 return (r % range); 1438 } 1439 1440 uint64_t 1441 spa_generate_guid(spa_t *spa) 1442 { 1443 uint64_t guid = spa_get_random(-1ULL); 1444 1445 if (spa != NULL) { 1446 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1447 guid = spa_get_random(-1ULL); 1448 } else { 1449 while (guid == 0 || spa_guid_exists(guid, 0)) 1450 guid = spa_get_random(-1ULL); 1451 } 1452 1453 return (guid); 1454 } 1455 1456 void 1457 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1458 { 1459 char type[256]; 1460 char *checksum = NULL; 1461 char *compress = NULL; 1462 1463 if (bp != NULL) { 1464 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1465 dmu_object_byteswap_t bswap = 1466 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1467 (void) snprintf(type, sizeof (type), "bswap %s %s", 1468 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1469 "metadata" : "data", 1470 dmu_ot_byteswap[bswap].ob_name); 1471 } else { 1472 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1473 sizeof (type)); 1474 } 1475 if (!BP_IS_EMBEDDED(bp)) { 1476 checksum = 1477 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1478 } 1479 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1480 } 1481 1482 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1483 compress); 1484 } 1485 1486 void 1487 spa_freeze(spa_t *spa) 1488 { 1489 uint64_t freeze_txg = 0; 1490 1491 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1492 if (spa->spa_freeze_txg == UINT64_MAX) { 1493 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1494 spa->spa_freeze_txg = freeze_txg; 1495 } 1496 spa_config_exit(spa, SCL_ALL, FTAG); 1497 if (freeze_txg != 0) 1498 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1499 } 1500 1501 void 1502 zfs_panic_recover(const char *fmt, ...) 1503 { 1504 va_list adx; 1505 1506 va_start(adx, fmt); 1507 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1508 va_end(adx); 1509 } 1510 1511 /* 1512 * This is a stripped-down version of strtoull, suitable only for converting 1513 * lowercase hexadecimal numbers that don't overflow. 1514 */ 1515 uint64_t 1516 zfs_strtonum(const char *str, char **nptr) 1517 { 1518 uint64_t val = 0; 1519 char c; 1520 int digit; 1521 1522 while ((c = *str) != '\0') { 1523 if (c >= '0' && c <= '9') 1524 digit = c - '0'; 1525 else if (c >= 'a' && c <= 'f') 1526 digit = 10 + c - 'a'; 1527 else 1528 break; 1529 1530 val *= 16; 1531 val += digit; 1532 1533 str++; 1534 } 1535 1536 if (nptr) 1537 *nptr = (char *)str; 1538 1539 return (val); 1540 } 1541 1542 /* 1543 * ========================================================================== 1544 * Accessor functions 1545 * ========================================================================== 1546 */ 1547 1548 boolean_t 1549 spa_shutting_down(spa_t *spa) 1550 { 1551 return (spa->spa_async_suspended); 1552 } 1553 1554 dsl_pool_t * 1555 spa_get_dsl(spa_t *spa) 1556 { 1557 return (spa->spa_dsl_pool); 1558 } 1559 1560 boolean_t 1561 spa_is_initializing(spa_t *spa) 1562 { 1563 return (spa->spa_is_initializing); 1564 } 1565 1566 boolean_t 1567 spa_indirect_vdevs_loaded(spa_t *spa) 1568 { 1569 return (spa->spa_indirect_vdevs_loaded); 1570 } 1571 1572 blkptr_t * 1573 spa_get_rootblkptr(spa_t *spa) 1574 { 1575 return (&spa->spa_ubsync.ub_rootbp); 1576 } 1577 1578 void 1579 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1580 { 1581 spa->spa_uberblock.ub_rootbp = *bp; 1582 } 1583 1584 void 1585 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1586 { 1587 if (spa->spa_root == NULL) 1588 buf[0] = '\0'; 1589 else 1590 (void) strncpy(buf, spa->spa_root, buflen); 1591 } 1592 1593 int 1594 spa_sync_pass(spa_t *spa) 1595 { 1596 return (spa->spa_sync_pass); 1597 } 1598 1599 char * 1600 spa_name(spa_t *spa) 1601 { 1602 return (spa->spa_name); 1603 } 1604 1605 uint64_t 1606 spa_guid(spa_t *spa) 1607 { 1608 dsl_pool_t *dp = spa_get_dsl(spa); 1609 uint64_t guid; 1610 1611 /* 1612 * If we fail to parse the config during spa_load(), we can go through 1613 * the error path (which posts an ereport) and end up here with no root 1614 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1615 * this case. 1616 */ 1617 if (spa->spa_root_vdev == NULL) 1618 return (spa->spa_config_guid); 1619 1620 guid = spa->spa_last_synced_guid != 0 ? 1621 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1622 1623 /* 1624 * Return the most recently synced out guid unless we're 1625 * in syncing context. 1626 */ 1627 if (dp && dsl_pool_sync_context(dp)) 1628 return (spa->spa_root_vdev->vdev_guid); 1629 else 1630 return (guid); 1631 } 1632 1633 uint64_t 1634 spa_load_guid(spa_t *spa) 1635 { 1636 /* 1637 * This is a GUID that exists solely as a reference for the 1638 * purposes of the arc. It is generated at load time, and 1639 * is never written to persistent storage. 1640 */ 1641 return (spa->spa_load_guid); 1642 } 1643 1644 uint64_t 1645 spa_last_synced_txg(spa_t *spa) 1646 { 1647 return (spa->spa_ubsync.ub_txg); 1648 } 1649 1650 uint64_t 1651 spa_first_txg(spa_t *spa) 1652 { 1653 return (spa->spa_first_txg); 1654 } 1655 1656 uint64_t 1657 spa_syncing_txg(spa_t *spa) 1658 { 1659 return (spa->spa_syncing_txg); 1660 } 1661 1662 /* 1663 * Return the last txg where data can be dirtied. The final txgs 1664 * will be used to just clear out any deferred frees that remain. 1665 */ 1666 uint64_t 1667 spa_final_dirty_txg(spa_t *spa) 1668 { 1669 return (spa->spa_final_txg - TXG_DEFER_SIZE); 1670 } 1671 1672 pool_state_t 1673 spa_state(spa_t *spa) 1674 { 1675 return (spa->spa_state); 1676 } 1677 1678 spa_load_state_t 1679 spa_load_state(spa_t *spa) 1680 { 1681 return (spa->spa_load_state); 1682 } 1683 1684 uint64_t 1685 spa_freeze_txg(spa_t *spa) 1686 { 1687 return (spa->spa_freeze_txg); 1688 } 1689 1690 /* ARGSUSED */ 1691 uint64_t 1692 spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) 1693 { 1694 return (lsize * spa_asize_inflation); 1695 } 1696 1697 /* 1698 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), 1699 * or at least 128MB, unless that would cause it to be more than half the 1700 * pool size. 1701 * 1702 * See the comment above spa_slop_shift for details. 1703 */ 1704 uint64_t 1705 spa_get_slop_space(spa_t *spa) 1706 { 1707 uint64_t space = spa_get_dspace(spa); 1708 return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop))); 1709 } 1710 1711 uint64_t 1712 spa_get_dspace(spa_t *spa) 1713 { 1714 return (spa->spa_dspace); 1715 } 1716 1717 void 1718 spa_update_dspace(spa_t *spa) 1719 { 1720 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1721 ddt_get_dedup_dspace(spa); 1722 if (spa->spa_vdev_removal != NULL) { 1723 /* 1724 * We can't allocate from the removing device, so 1725 * subtract its size. This prevents the DMU/DSL from 1726 * filling up the (now smaller) pool while we are in the 1727 * middle of removing the device. 1728 * 1729 * Note that the DMU/DSL doesn't actually know or care 1730 * how much space is allocated (it does its own tracking 1731 * of how much space has been logically used). So it 1732 * doesn't matter that the data we are moving may be 1733 * allocated twice (on the old device and the new 1734 * device). 1735 */ 1736 vdev_t *vd = spa->spa_vdev_removal->svr_vdev; 1737 spa->spa_dspace -= spa_deflate(spa) ? 1738 vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space; 1739 } 1740 } 1741 1742 /* 1743 * Return the failure mode that has been set to this pool. The default 1744 * behavior will be to block all I/Os when a complete failure occurs. 1745 */ 1746 uint8_t 1747 spa_get_failmode(spa_t *spa) 1748 { 1749 return (spa->spa_failmode); 1750 } 1751 1752 boolean_t 1753 spa_suspended(spa_t *spa) 1754 { 1755 return (spa->spa_suspended); 1756 } 1757 1758 uint64_t 1759 spa_version(spa_t *spa) 1760 { 1761 return (spa->spa_ubsync.ub_version); 1762 } 1763 1764 boolean_t 1765 spa_deflate(spa_t *spa) 1766 { 1767 return (spa->spa_deflate); 1768 } 1769 1770 metaslab_class_t * 1771 spa_normal_class(spa_t *spa) 1772 { 1773 return (spa->spa_normal_class); 1774 } 1775 1776 metaslab_class_t * 1777 spa_log_class(spa_t *spa) 1778 { 1779 return (spa->spa_log_class); 1780 } 1781 1782 void 1783 spa_evicting_os_register(spa_t *spa, objset_t *os) 1784 { 1785 mutex_enter(&spa->spa_evicting_os_lock); 1786 list_insert_head(&spa->spa_evicting_os_list, os); 1787 mutex_exit(&spa->spa_evicting_os_lock); 1788 } 1789 1790 void 1791 spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1792 { 1793 mutex_enter(&spa->spa_evicting_os_lock); 1794 list_remove(&spa->spa_evicting_os_list, os); 1795 cv_broadcast(&spa->spa_evicting_os_cv); 1796 mutex_exit(&spa->spa_evicting_os_lock); 1797 } 1798 1799 void 1800 spa_evicting_os_wait(spa_t *spa) 1801 { 1802 mutex_enter(&spa->spa_evicting_os_lock); 1803 while (!list_is_empty(&spa->spa_evicting_os_list)) 1804 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 1805 mutex_exit(&spa->spa_evicting_os_lock); 1806 1807 dmu_buf_user_evict_wait(); 1808 } 1809 1810 int 1811 spa_max_replication(spa_t *spa) 1812 { 1813 /* 1814 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1815 * handle BPs with more than one DVA allocated. Set our max 1816 * replication level accordingly. 1817 */ 1818 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1819 return (1); 1820 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1821 } 1822 1823 int 1824 spa_prev_software_version(spa_t *spa) 1825 { 1826 return (spa->spa_prev_software_version); 1827 } 1828 1829 uint64_t 1830 spa_deadman_synctime(spa_t *spa) 1831 { 1832 return (spa->spa_deadman_synctime); 1833 } 1834 1835 uint64_t 1836 dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1837 { 1838 uint64_t asize = DVA_GET_ASIZE(dva); 1839 uint64_t dsize = asize; 1840 1841 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1842 1843 if (asize != 0 && spa->spa_deflate) { 1844 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 1845 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1846 } 1847 1848 return (dsize); 1849 } 1850 1851 uint64_t 1852 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1853 { 1854 uint64_t dsize = 0; 1855 1856 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1857 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1858 1859 return (dsize); 1860 } 1861 1862 uint64_t 1863 bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1864 { 1865 uint64_t dsize = 0; 1866 1867 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1868 1869 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1870 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1871 1872 spa_config_exit(spa, SCL_VDEV, FTAG); 1873 1874 return (dsize); 1875 } 1876 1877 /* 1878 * ========================================================================== 1879 * Initialization and Termination 1880 * ========================================================================== 1881 */ 1882 1883 static int 1884 spa_name_compare(const void *a1, const void *a2) 1885 { 1886 const spa_t *s1 = a1; 1887 const spa_t *s2 = a2; 1888 int s; 1889 1890 s = strcmp(s1->spa_name, s2->spa_name); 1891 if (s > 0) 1892 return (1); 1893 if (s < 0) 1894 return (-1); 1895 return (0); 1896 } 1897 1898 int 1899 spa_busy(void) 1900 { 1901 return (spa_active_count); 1902 } 1903 1904 void 1905 spa_boot_init() 1906 { 1907 spa_config_load(); 1908 } 1909 1910 void 1911 spa_init(int mode) 1912 { 1913 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1914 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1915 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1916 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1917 1918 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1919 offsetof(spa_t, spa_avl)); 1920 1921 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1922 offsetof(spa_aux_t, aux_avl)); 1923 1924 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1925 offsetof(spa_aux_t, aux_avl)); 1926 1927 spa_mode_global = mode; 1928 1929 #ifdef _KERNEL 1930 spa_arch_init(); 1931 #else 1932 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 1933 arc_procfd = open("/proc/self/ctl", O_WRONLY); 1934 if (arc_procfd == -1) { 1935 perror("could not enable watchpoints: " 1936 "opening /proc/self/ctl failed: "); 1937 } else { 1938 arc_watch = B_TRUE; 1939 } 1940 } 1941 #endif 1942 1943 refcount_init(); 1944 unique_init(); 1945 range_tree_init(); 1946 metaslab_alloc_trace_init(); 1947 zio_init(); 1948 dmu_init(); 1949 zil_init(); 1950 vdev_cache_stat_init(); 1951 zfs_prop_init(); 1952 zpool_prop_init(); 1953 zpool_feature_init(); 1954 spa_config_load(); 1955 l2arc_start(); 1956 } 1957 1958 void 1959 spa_fini(void) 1960 { 1961 l2arc_stop(); 1962 1963 spa_evict_all(); 1964 1965 vdev_cache_stat_fini(); 1966 zil_fini(); 1967 dmu_fini(); 1968 zio_fini(); 1969 metaslab_alloc_trace_fini(); 1970 range_tree_fini(); 1971 unique_fini(); 1972 refcount_fini(); 1973 1974 avl_destroy(&spa_namespace_avl); 1975 avl_destroy(&spa_spare_avl); 1976 avl_destroy(&spa_l2cache_avl); 1977 1978 cv_destroy(&spa_namespace_cv); 1979 mutex_destroy(&spa_namespace_lock); 1980 mutex_destroy(&spa_spare_lock); 1981 mutex_destroy(&spa_l2cache_lock); 1982 } 1983 1984 /* 1985 * Return whether this pool has slogs. No locking needed. 1986 * It's not a problem if the wrong answer is returned as it's only for 1987 * performance and not correctness 1988 */ 1989 boolean_t 1990 spa_has_slogs(spa_t *spa) 1991 { 1992 return (spa->spa_log_class->mc_rotor != NULL); 1993 } 1994 1995 spa_log_state_t 1996 spa_get_log_state(spa_t *spa) 1997 { 1998 return (spa->spa_log_state); 1999 } 2000 2001 void 2002 spa_set_log_state(spa_t *spa, spa_log_state_t state) 2003 { 2004 spa->spa_log_state = state; 2005 } 2006 2007 boolean_t 2008 spa_is_root(spa_t *spa) 2009 { 2010 return (spa->spa_is_root); 2011 } 2012 2013 boolean_t 2014 spa_writeable(spa_t *spa) 2015 { 2016 return (!!(spa->spa_mode & FWRITE)); 2017 } 2018 2019 /* 2020 * Returns true if there is a pending sync task in any of the current 2021 * syncing txg, the current quiescing txg, or the current open txg. 2022 */ 2023 boolean_t 2024 spa_has_pending_synctask(spa_t *spa) 2025 { 2026 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks)); 2027 } 2028 2029 int 2030 spa_mode(spa_t *spa) 2031 { 2032 return (spa->spa_mode); 2033 } 2034 2035 uint64_t 2036 spa_bootfs(spa_t *spa) 2037 { 2038 return (spa->spa_bootfs); 2039 } 2040 2041 uint64_t 2042 spa_delegation(spa_t *spa) 2043 { 2044 return (spa->spa_delegation); 2045 } 2046 2047 objset_t * 2048 spa_meta_objset(spa_t *spa) 2049 { 2050 return (spa->spa_meta_objset); 2051 } 2052 2053 enum zio_checksum 2054 spa_dedup_checksum(spa_t *spa) 2055 { 2056 return (spa->spa_dedup_checksum); 2057 } 2058 2059 /* 2060 * Reset pool scan stat per scan pass (or reboot). 2061 */ 2062 void 2063 spa_scan_stat_init(spa_t *spa) 2064 { 2065 /* data not stored on disk */ 2066 spa->spa_scan_pass_start = gethrestime_sec(); 2067 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) 2068 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; 2069 else 2070 spa->spa_scan_pass_scrub_pause = 0; 2071 spa->spa_scan_pass_scrub_spent_paused = 0; 2072 spa->spa_scan_pass_exam = 0; 2073 vdev_scan_stat_init(spa->spa_root_vdev); 2074 } 2075 2076 /* 2077 * Get scan stats for zpool status reports 2078 */ 2079 int 2080 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2081 { 2082 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2083 2084 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2085 return (SET_ERROR(ENOENT)); 2086 bzero(ps, sizeof (pool_scan_stat_t)); 2087 2088 /* data stored on disk */ 2089 ps->pss_func = scn->scn_phys.scn_func; 2090 ps->pss_start_time = scn->scn_phys.scn_start_time; 2091 ps->pss_end_time = scn->scn_phys.scn_end_time; 2092 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2093 ps->pss_examined = scn->scn_phys.scn_examined; 2094 ps->pss_to_process = scn->scn_phys.scn_to_process; 2095 ps->pss_processed = scn->scn_phys.scn_processed; 2096 ps->pss_errors = scn->scn_phys.scn_errors; 2097 ps->pss_state = scn->scn_phys.scn_state; 2098 2099 /* data not stored on disk */ 2100 ps->pss_pass_start = spa->spa_scan_pass_start; 2101 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2102 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; 2103 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; 2104 2105 return (0); 2106 } 2107 2108 boolean_t 2109 spa_debug_enabled(spa_t *spa) 2110 { 2111 return (spa->spa_debug); 2112 } 2113 2114 int 2115 spa_maxblocksize(spa_t *spa) 2116 { 2117 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2118 return (SPA_MAXBLOCKSIZE); 2119 else 2120 return (SPA_OLD_MAXBLOCKSIZE); 2121 } 2122 2123 /* 2124 * Returns the txg that the last device removal completed. No indirect mappings 2125 * have been added since this txg. 2126 */ 2127 uint64_t 2128 spa_get_last_removal_txg(spa_t *spa) 2129 { 2130 uint64_t vdevid; 2131 uint64_t ret = -1ULL; 2132 2133 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2134 /* 2135 * sr_prev_indirect_vdev is only modified while holding all the 2136 * config locks, so it is sufficient to hold SCL_VDEV as reader when 2137 * examining it. 2138 */ 2139 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; 2140 2141 while (vdevid != -1ULL) { 2142 vdev_t *vd = vdev_lookup_top(spa, vdevid); 2143 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 2144 2145 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2146 2147 /* 2148 * If the removal did not remap any data, we don't care. 2149 */ 2150 if (vdev_indirect_births_count(vib) != 0) { 2151 ret = vdev_indirect_births_last_entry_txg(vib); 2152 break; 2153 } 2154 2155 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; 2156 } 2157 spa_config_exit(spa, SCL_VDEV, FTAG); 2158 2159 IMPLY(ret != -1ULL, 2160 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 2161 2162 return (ret); 2163 } 2164