1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 26 * Copyright 2013 Saso Kiselkov. All rights reserved. 27 * Copyright (c) 2014 Integros [integros.com] 28 */ 29 30 #include <sys/zfs_context.h> 31 #include <sys/spa_impl.h> 32 #include <sys/spa_boot.h> 33 #include <sys/zio.h> 34 #include <sys/zio_checksum.h> 35 #include <sys/zio_compress.h> 36 #include <sys/dmu.h> 37 #include <sys/dmu_tx.h> 38 #include <sys/zap.h> 39 #include <sys/zil.h> 40 #include <sys/vdev_impl.h> 41 #include <sys/metaslab.h> 42 #include <sys/uberblock_impl.h> 43 #include <sys/txg.h> 44 #include <sys/avl.h> 45 #include <sys/unique.h> 46 #include <sys/dsl_pool.h> 47 #include <sys/dsl_dir.h> 48 #include <sys/dsl_prop.h> 49 #include <sys/dsl_scan.h> 50 #include <sys/fs/zfs.h> 51 #include <sys/metaslab_impl.h> 52 #include <sys/arc.h> 53 #include <sys/ddt.h> 54 #include "zfs_prop.h" 55 #include <sys/zfeature.h> 56 57 /* 58 * SPA locking 59 * 60 * There are four basic locks for managing spa_t structures: 61 * 62 * spa_namespace_lock (global mutex) 63 * 64 * This lock must be acquired to do any of the following: 65 * 66 * - Lookup a spa_t by name 67 * - Add or remove a spa_t from the namespace 68 * - Increase spa_refcount from non-zero 69 * - Check if spa_refcount is zero 70 * - Rename a spa_t 71 * - add/remove/attach/detach devices 72 * - Held for the duration of create/destroy/import/export 73 * 74 * It does not need to handle recursion. A create or destroy may 75 * reference objects (files or zvols) in other pools, but by 76 * definition they must have an existing reference, and will never need 77 * to lookup a spa_t by name. 78 * 79 * spa_refcount (per-spa refcount_t protected by mutex) 80 * 81 * This reference count keep track of any active users of the spa_t. The 82 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 83 * the refcount is never really 'zero' - opening a pool implicitly keeps 84 * some references in the DMU. Internally we check against spa_minref, but 85 * present the image of a zero/non-zero value to consumers. 86 * 87 * spa_config_lock[] (per-spa array of rwlocks) 88 * 89 * This protects the spa_t from config changes, and must be held in 90 * the following circumstances: 91 * 92 * - RW_READER to perform I/O to the spa 93 * - RW_WRITER to change the vdev config 94 * 95 * The locking order is fairly straightforward: 96 * 97 * spa_namespace_lock -> spa_refcount 98 * 99 * The namespace lock must be acquired to increase the refcount from 0 100 * or to check if it is zero. 101 * 102 * spa_refcount -> spa_config_lock[] 103 * 104 * There must be at least one valid reference on the spa_t to acquire 105 * the config lock. 106 * 107 * spa_namespace_lock -> spa_config_lock[] 108 * 109 * The namespace lock must always be taken before the config lock. 110 * 111 * 112 * The spa_namespace_lock can be acquired directly and is globally visible. 113 * 114 * The namespace is manipulated using the following functions, all of which 115 * require the spa_namespace_lock to be held. 116 * 117 * spa_lookup() Lookup a spa_t by name. 118 * 119 * spa_add() Create a new spa_t in the namespace. 120 * 121 * spa_remove() Remove a spa_t from the namespace. This also 122 * frees up any memory associated with the spa_t. 123 * 124 * spa_next() Returns the next spa_t in the system, or the 125 * first if NULL is passed. 126 * 127 * spa_evict_all() Shutdown and remove all spa_t structures in 128 * the system. 129 * 130 * spa_guid_exists() Determine whether a pool/device guid exists. 131 * 132 * The spa_refcount is manipulated using the following functions: 133 * 134 * spa_open_ref() Adds a reference to the given spa_t. Must be 135 * called with spa_namespace_lock held if the 136 * refcount is currently zero. 137 * 138 * spa_close() Remove a reference from the spa_t. This will 139 * not free the spa_t or remove it from the 140 * namespace. No locking is required. 141 * 142 * spa_refcount_zero() Returns true if the refcount is currently 143 * zero. Must be called with spa_namespace_lock 144 * held. 145 * 146 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 147 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 148 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 149 * 150 * To read the configuration, it suffices to hold one of these locks as reader. 151 * To modify the configuration, you must hold all locks as writer. To modify 152 * vdev state without altering the vdev tree's topology (e.g. online/offline), 153 * you must hold SCL_STATE and SCL_ZIO as writer. 154 * 155 * We use these distinct config locks to avoid recursive lock entry. 156 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 157 * block allocations (SCL_ALLOC), which may require reading space maps 158 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 159 * 160 * The spa config locks cannot be normal rwlocks because we need the 161 * ability to hand off ownership. For example, SCL_ZIO is acquired 162 * by the issuing thread and later released by an interrupt thread. 163 * They do, however, obey the usual write-wanted semantics to prevent 164 * writer (i.e. system administrator) starvation. 165 * 166 * The lock acquisition rules are as follows: 167 * 168 * SCL_CONFIG 169 * Protects changes to the vdev tree topology, such as vdev 170 * add/remove/attach/detach. Protects the dirty config list 171 * (spa_config_dirty_list) and the set of spares and l2arc devices. 172 * 173 * SCL_STATE 174 * Protects changes to pool state and vdev state, such as vdev 175 * online/offline/fault/degrade/clear. Protects the dirty state list 176 * (spa_state_dirty_list) and global pool state (spa_state). 177 * 178 * SCL_ALLOC 179 * Protects changes to metaslab groups and classes. 180 * Held as reader by metaslab_alloc() and metaslab_claim(). 181 * 182 * SCL_ZIO 183 * Held by bp-level zios (those which have no io_vd upon entry) 184 * to prevent changes to the vdev tree. The bp-level zio implicitly 185 * protects all of its vdev child zios, which do not hold SCL_ZIO. 186 * 187 * SCL_FREE 188 * Protects changes to metaslab groups and classes. 189 * Held as reader by metaslab_free(). SCL_FREE is distinct from 190 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 191 * blocks in zio_done() while another i/o that holds either 192 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 193 * 194 * SCL_VDEV 195 * Held as reader to prevent changes to the vdev tree during trivial 196 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 197 * other locks, and lower than all of them, to ensure that it's safe 198 * to acquire regardless of caller context. 199 * 200 * In addition, the following rules apply: 201 * 202 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 203 * The lock ordering is SCL_CONFIG > spa_props_lock. 204 * 205 * (b) I/O operations on leaf vdevs. For any zio operation that takes 206 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 207 * or zio_write_phys() -- the caller must ensure that the config cannot 208 * cannot change in the interim, and that the vdev cannot be reopened. 209 * SCL_STATE as reader suffices for both. 210 * 211 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 212 * 213 * spa_vdev_enter() Acquire the namespace lock and the config lock 214 * for writing. 215 * 216 * spa_vdev_exit() Release the config lock, wait for all I/O 217 * to complete, sync the updated configs to the 218 * cache, and release the namespace lock. 219 * 220 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 221 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 222 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 223 * 224 * spa_rename() is also implemented within this file since it requires 225 * manipulation of the namespace. 226 */ 227 228 static avl_tree_t spa_namespace_avl; 229 kmutex_t spa_namespace_lock; 230 static kcondvar_t spa_namespace_cv; 231 static int spa_active_count; 232 int spa_max_replication_override = SPA_DVAS_PER_BP; 233 234 static kmutex_t spa_spare_lock; 235 static avl_tree_t spa_spare_avl; 236 static kmutex_t spa_l2cache_lock; 237 static avl_tree_t spa_l2cache_avl; 238 239 kmem_cache_t *spa_buffer_pool; 240 int spa_mode_global; 241 242 #ifdef ZFS_DEBUG 243 /* Everything except dprintf and spa is on by default in debug builds */ 244 int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA); 245 #else 246 int zfs_flags = 0; 247 #endif 248 249 /* 250 * zfs_recover can be set to nonzero to attempt to recover from 251 * otherwise-fatal errors, typically caused by on-disk corruption. When 252 * set, calls to zfs_panic_recover() will turn into warning messages. 253 * This should only be used as a last resort, as it typically results 254 * in leaked space, or worse. 255 */ 256 boolean_t zfs_recover = B_FALSE; 257 258 /* 259 * If destroy encounters an EIO while reading metadata (e.g. indirect 260 * blocks), space referenced by the missing metadata can not be freed. 261 * Normally this causes the background destroy to become "stalled", as 262 * it is unable to make forward progress. While in this stalled state, 263 * all remaining space to free from the error-encountering filesystem is 264 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 265 * permanently leak the space from indirect blocks that can not be read, 266 * and continue to free everything else that it can. 267 * 268 * The default, "stalling" behavior is useful if the storage partially 269 * fails (i.e. some but not all i/os fail), and then later recovers. In 270 * this case, we will be able to continue pool operations while it is 271 * partially failed, and when it recovers, we can continue to free the 272 * space, with no leaks. However, note that this case is actually 273 * fairly rare. 274 * 275 * Typically pools either (a) fail completely (but perhaps temporarily, 276 * e.g. a top-level vdev going offline), or (b) have localized, 277 * permanent errors (e.g. disk returns the wrong data due to bit flip or 278 * firmware bug). In case (a), this setting does not matter because the 279 * pool will be suspended and the sync thread will not be able to make 280 * forward progress regardless. In case (b), because the error is 281 * permanent, the best we can do is leak the minimum amount of space, 282 * which is what setting this flag will do. Therefore, it is reasonable 283 * for this flag to normally be set, but we chose the more conservative 284 * approach of not setting it, so that there is no possibility of 285 * leaking space in the "partial temporary" failure case. 286 */ 287 boolean_t zfs_free_leak_on_eio = B_FALSE; 288 289 /* 290 * Expiration time in milliseconds. This value has two meanings. First it is 291 * used to determine when the spa_deadman() logic should fire. By default the 292 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 293 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 294 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 295 * in a system panic. 296 */ 297 uint64_t zfs_deadman_synctime_ms = 1000000ULL; 298 299 /* 300 * Check time in milliseconds. This defines the frequency at which we check 301 * for hung I/O. 302 */ 303 uint64_t zfs_deadman_checktime_ms = 5000ULL; 304 305 /* 306 * Override the zfs deadman behavior via /etc/system. By default the 307 * deadman is enabled except on VMware and sparc deployments. 308 */ 309 int zfs_deadman_enabled = -1; 310 311 /* 312 * The worst case is single-sector max-parity RAID-Z blocks, in which 313 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 314 * times the size; so just assume that. Add to this the fact that 315 * we can have up to 3 DVAs per bp, and one more factor of 2 because 316 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 317 * the worst case is: 318 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 319 */ 320 int spa_asize_inflation = 24; 321 322 /* 323 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 324 * the pool to be consumed. This ensures that we don't run the pool 325 * completely out of space, due to unaccounted changes (e.g. to the MOS). 326 * It also limits the worst-case time to allocate space. If we have 327 * less than this amount of free space, most ZPL operations (e.g. write, 328 * create) will return ENOSPC. 329 * 330 * Certain operations (e.g. file removal, most administrative actions) can 331 * use half the slop space. They will only return ENOSPC if less than half 332 * the slop space is free. Typically, once the pool has less than the slop 333 * space free, the user will use these operations to free up space in the pool. 334 * These are the operations that call dsl_pool_adjustedsize() with the netfree 335 * argument set to TRUE. 336 * 337 * A very restricted set of operations are always permitted, regardless of 338 * the amount of free space. These are the operations that call 339 * dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy". If these 340 * operations result in a net increase in the amount of space used, 341 * it is possible to run the pool completely out of space, causing it to 342 * be permanently read-only. 343 * 344 * Note that on very small pools, the slop space will be larger than 345 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 346 * but we never allow it to be more than half the pool size. 347 * 348 * See also the comments in zfs_space_check_t. 349 */ 350 int spa_slop_shift = 5; 351 uint64_t spa_min_slop = 128 * 1024 * 1024; 352 353 /* 354 * ========================================================================== 355 * SPA config locking 356 * ========================================================================== 357 */ 358 static void 359 spa_config_lock_init(spa_t *spa) 360 { 361 for (int i = 0; i < SCL_LOCKS; i++) { 362 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 363 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 364 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 365 refcount_create_untracked(&scl->scl_count); 366 scl->scl_writer = NULL; 367 scl->scl_write_wanted = 0; 368 } 369 } 370 371 static void 372 spa_config_lock_destroy(spa_t *spa) 373 { 374 for (int i = 0; i < SCL_LOCKS; i++) { 375 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 376 mutex_destroy(&scl->scl_lock); 377 cv_destroy(&scl->scl_cv); 378 refcount_destroy(&scl->scl_count); 379 ASSERT(scl->scl_writer == NULL); 380 ASSERT(scl->scl_write_wanted == 0); 381 } 382 } 383 384 int 385 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 386 { 387 for (int i = 0; i < SCL_LOCKS; i++) { 388 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 389 if (!(locks & (1 << i))) 390 continue; 391 mutex_enter(&scl->scl_lock); 392 if (rw == RW_READER) { 393 if (scl->scl_writer || scl->scl_write_wanted) { 394 mutex_exit(&scl->scl_lock); 395 spa_config_exit(spa, locks & ((1 << i) - 1), 396 tag); 397 return (0); 398 } 399 } else { 400 ASSERT(scl->scl_writer != curthread); 401 if (!refcount_is_zero(&scl->scl_count)) { 402 mutex_exit(&scl->scl_lock); 403 spa_config_exit(spa, locks & ((1 << i) - 1), 404 tag); 405 return (0); 406 } 407 scl->scl_writer = curthread; 408 } 409 (void) refcount_add(&scl->scl_count, tag); 410 mutex_exit(&scl->scl_lock); 411 } 412 return (1); 413 } 414 415 void 416 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 417 { 418 int wlocks_held = 0; 419 420 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 421 422 for (int i = 0; i < SCL_LOCKS; i++) { 423 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 424 if (scl->scl_writer == curthread) 425 wlocks_held |= (1 << i); 426 if (!(locks & (1 << i))) 427 continue; 428 mutex_enter(&scl->scl_lock); 429 if (rw == RW_READER) { 430 while (scl->scl_writer || scl->scl_write_wanted) { 431 cv_wait(&scl->scl_cv, &scl->scl_lock); 432 } 433 } else { 434 ASSERT(scl->scl_writer != curthread); 435 while (!refcount_is_zero(&scl->scl_count)) { 436 scl->scl_write_wanted++; 437 cv_wait(&scl->scl_cv, &scl->scl_lock); 438 scl->scl_write_wanted--; 439 } 440 scl->scl_writer = curthread; 441 } 442 (void) refcount_add(&scl->scl_count, tag); 443 mutex_exit(&scl->scl_lock); 444 } 445 ASSERT(wlocks_held <= locks); 446 } 447 448 void 449 spa_config_exit(spa_t *spa, int locks, void *tag) 450 { 451 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 452 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 453 if (!(locks & (1 << i))) 454 continue; 455 mutex_enter(&scl->scl_lock); 456 ASSERT(!refcount_is_zero(&scl->scl_count)); 457 if (refcount_remove(&scl->scl_count, tag) == 0) { 458 ASSERT(scl->scl_writer == NULL || 459 scl->scl_writer == curthread); 460 scl->scl_writer = NULL; /* OK in either case */ 461 cv_broadcast(&scl->scl_cv); 462 } 463 mutex_exit(&scl->scl_lock); 464 } 465 } 466 467 int 468 spa_config_held(spa_t *spa, int locks, krw_t rw) 469 { 470 int locks_held = 0; 471 472 for (int i = 0; i < SCL_LOCKS; i++) { 473 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 474 if (!(locks & (1 << i))) 475 continue; 476 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 477 (rw == RW_WRITER && scl->scl_writer == curthread)) 478 locks_held |= 1 << i; 479 } 480 481 return (locks_held); 482 } 483 484 /* 485 * ========================================================================== 486 * SPA namespace functions 487 * ========================================================================== 488 */ 489 490 /* 491 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 492 * Returns NULL if no matching spa_t is found. 493 */ 494 spa_t * 495 spa_lookup(const char *name) 496 { 497 static spa_t search; /* spa_t is large; don't allocate on stack */ 498 spa_t *spa; 499 avl_index_t where; 500 char *cp; 501 502 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 503 504 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 505 506 /* 507 * If it's a full dataset name, figure out the pool name and 508 * just use that. 509 */ 510 cp = strpbrk(search.spa_name, "/@#"); 511 if (cp != NULL) 512 *cp = '\0'; 513 514 spa = avl_find(&spa_namespace_avl, &search, &where); 515 516 return (spa); 517 } 518 519 /* 520 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 521 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 522 * looking for potentially hung I/Os. 523 */ 524 void 525 spa_deadman(void *arg) 526 { 527 spa_t *spa = arg; 528 529 /* 530 * Disable the deadman timer if the pool is suspended. 531 */ 532 if (spa_suspended(spa)) { 533 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 534 return; 535 } 536 537 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 538 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 539 ++spa->spa_deadman_calls); 540 if (zfs_deadman_enabled) 541 vdev_deadman(spa->spa_root_vdev); 542 } 543 544 /* 545 * Create an uninitialized spa_t with the given name. Requires 546 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 547 * exist by calling spa_lookup() first. 548 */ 549 spa_t * 550 spa_add(const char *name, nvlist_t *config, const char *altroot) 551 { 552 spa_t *spa; 553 spa_config_dirent_t *dp; 554 cyc_handler_t hdlr; 555 cyc_time_t when; 556 557 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 558 559 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 560 561 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 562 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 563 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 564 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 565 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 566 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 567 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 568 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 569 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 570 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 571 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 572 mutex_init(&spa->spa_iokstat_lock, NULL, MUTEX_DEFAULT, NULL); 573 574 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 575 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 576 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 577 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 578 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 579 580 for (int t = 0; t < TXG_SIZE; t++) 581 bplist_create(&spa->spa_free_bplist[t]); 582 583 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 584 spa->spa_state = POOL_STATE_UNINITIALIZED; 585 spa->spa_freeze_txg = UINT64_MAX; 586 spa->spa_final_txg = UINT64_MAX; 587 spa->spa_load_max_txg = UINT64_MAX; 588 spa->spa_proc = &p0; 589 spa->spa_proc_state = SPA_PROC_NONE; 590 591 hdlr.cyh_func = spa_deadman; 592 hdlr.cyh_arg = spa; 593 hdlr.cyh_level = CY_LOW_LEVEL; 594 595 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 596 597 /* 598 * This determines how often we need to check for hung I/Os after 599 * the cyclic has already fired. Since checking for hung I/Os is 600 * an expensive operation we don't want to check too frequently. 601 * Instead wait for 5 seconds before checking again. 602 */ 603 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 604 when.cyt_when = CY_INFINITY; 605 mutex_enter(&cpu_lock); 606 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 607 mutex_exit(&cpu_lock); 608 609 refcount_create(&spa->spa_refcount); 610 spa_config_lock_init(spa); 611 612 avl_add(&spa_namespace_avl, spa); 613 614 /* 615 * Set the alternate root, if there is one. 616 */ 617 if (altroot) { 618 spa->spa_root = spa_strdup(altroot); 619 spa_active_count++; 620 } 621 622 /* 623 * Every pool starts with the default cachefile 624 */ 625 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 626 offsetof(spa_config_dirent_t, scd_link)); 627 628 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 629 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 630 list_insert_head(&spa->spa_config_list, dp); 631 632 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 633 KM_SLEEP) == 0); 634 635 if (config != NULL) { 636 nvlist_t *features; 637 638 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 639 &features) == 0) { 640 VERIFY(nvlist_dup(features, &spa->spa_label_features, 641 0) == 0); 642 } 643 644 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 645 } 646 647 if (spa->spa_label_features == NULL) { 648 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 649 KM_SLEEP) == 0); 650 } 651 652 spa->spa_iokstat = kstat_create("zfs", 0, name, 653 "disk", KSTAT_TYPE_IO, 1, 0); 654 if (spa->spa_iokstat) { 655 spa->spa_iokstat->ks_lock = &spa->spa_iokstat_lock; 656 kstat_install(spa->spa_iokstat); 657 } 658 659 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0); 660 661 spa->spa_min_ashift = INT_MAX; 662 spa->spa_max_ashift = 0; 663 664 /* 665 * As a pool is being created, treat all features as disabled by 666 * setting SPA_FEATURE_DISABLED for all entries in the feature 667 * refcount cache. 668 */ 669 for (int i = 0; i < SPA_FEATURES; i++) { 670 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 671 } 672 673 return (spa); 674 } 675 676 /* 677 * Removes a spa_t from the namespace, freeing up any memory used. Requires 678 * spa_namespace_lock. This is called only after the spa_t has been closed and 679 * deactivated. 680 */ 681 void 682 spa_remove(spa_t *spa) 683 { 684 spa_config_dirent_t *dp; 685 686 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 687 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 688 ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0); 689 690 nvlist_free(spa->spa_config_splitting); 691 692 avl_remove(&spa_namespace_avl, spa); 693 cv_broadcast(&spa_namespace_cv); 694 695 if (spa->spa_root) { 696 spa_strfree(spa->spa_root); 697 spa_active_count--; 698 } 699 700 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 701 list_remove(&spa->spa_config_list, dp); 702 if (dp->scd_path != NULL) 703 spa_strfree(dp->scd_path); 704 kmem_free(dp, sizeof (spa_config_dirent_t)); 705 } 706 707 list_destroy(&spa->spa_config_list); 708 709 nvlist_free(spa->spa_label_features); 710 nvlist_free(spa->spa_load_info); 711 spa_config_set(spa, NULL); 712 713 mutex_enter(&cpu_lock); 714 if (spa->spa_deadman_cycid != CYCLIC_NONE) 715 cyclic_remove(spa->spa_deadman_cycid); 716 mutex_exit(&cpu_lock); 717 spa->spa_deadman_cycid = CYCLIC_NONE; 718 719 refcount_destroy(&spa->spa_refcount); 720 721 spa_config_lock_destroy(spa); 722 723 kstat_delete(spa->spa_iokstat); 724 spa->spa_iokstat = NULL; 725 726 for (int t = 0; t < TXG_SIZE; t++) 727 bplist_destroy(&spa->spa_free_bplist[t]); 728 729 zio_checksum_templates_free(spa); 730 731 cv_destroy(&spa->spa_async_cv); 732 cv_destroy(&spa->spa_evicting_os_cv); 733 cv_destroy(&spa->spa_proc_cv); 734 cv_destroy(&spa->spa_scrub_io_cv); 735 cv_destroy(&spa->spa_suspend_cv); 736 737 mutex_destroy(&spa->spa_async_lock); 738 mutex_destroy(&spa->spa_errlist_lock); 739 mutex_destroy(&spa->spa_errlog_lock); 740 mutex_destroy(&spa->spa_evicting_os_lock); 741 mutex_destroy(&spa->spa_history_lock); 742 mutex_destroy(&spa->spa_proc_lock); 743 mutex_destroy(&spa->spa_props_lock); 744 mutex_destroy(&spa->spa_cksum_tmpls_lock); 745 mutex_destroy(&spa->spa_scrub_lock); 746 mutex_destroy(&spa->spa_suspend_lock); 747 mutex_destroy(&spa->spa_vdev_top_lock); 748 mutex_destroy(&spa->spa_iokstat_lock); 749 750 kmem_free(spa, sizeof (spa_t)); 751 } 752 753 /* 754 * Given a pool, return the next pool in the namespace, or NULL if there is 755 * none. If 'prev' is NULL, return the first pool. 756 */ 757 spa_t * 758 spa_next(spa_t *prev) 759 { 760 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 761 762 if (prev) 763 return (AVL_NEXT(&spa_namespace_avl, prev)); 764 else 765 return (avl_first(&spa_namespace_avl)); 766 } 767 768 /* 769 * ========================================================================== 770 * SPA refcount functions 771 * ========================================================================== 772 */ 773 774 /* 775 * Add a reference to the given spa_t. Must have at least one reference, or 776 * have the namespace lock held. 777 */ 778 void 779 spa_open_ref(spa_t *spa, void *tag) 780 { 781 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 782 MUTEX_HELD(&spa_namespace_lock)); 783 (void) refcount_add(&spa->spa_refcount, tag); 784 } 785 786 /* 787 * Remove a reference to the given spa_t. Must have at least one reference, or 788 * have the namespace lock held. 789 */ 790 void 791 spa_close(spa_t *spa, void *tag) 792 { 793 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 794 MUTEX_HELD(&spa_namespace_lock)); 795 (void) refcount_remove(&spa->spa_refcount, tag); 796 } 797 798 /* 799 * Remove a reference to the given spa_t held by a dsl dir that is 800 * being asynchronously released. Async releases occur from a taskq 801 * performing eviction of dsl datasets and dirs. The namespace lock 802 * isn't held and the hold by the object being evicted may contribute to 803 * spa_minref (e.g. dataset or directory released during pool export), 804 * so the asserts in spa_close() do not apply. 805 */ 806 void 807 spa_async_close(spa_t *spa, void *tag) 808 { 809 (void) refcount_remove(&spa->spa_refcount, tag); 810 } 811 812 /* 813 * Check to see if the spa refcount is zero. Must be called with 814 * spa_namespace_lock held. We really compare against spa_minref, which is the 815 * number of references acquired when opening a pool 816 */ 817 boolean_t 818 spa_refcount_zero(spa_t *spa) 819 { 820 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 821 822 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 823 } 824 825 /* 826 * ========================================================================== 827 * SPA spare and l2cache tracking 828 * ========================================================================== 829 */ 830 831 /* 832 * Hot spares and cache devices are tracked using the same code below, 833 * for 'auxiliary' devices. 834 */ 835 836 typedef struct spa_aux { 837 uint64_t aux_guid; 838 uint64_t aux_pool; 839 avl_node_t aux_avl; 840 int aux_count; 841 } spa_aux_t; 842 843 static int 844 spa_aux_compare(const void *a, const void *b) 845 { 846 const spa_aux_t *sa = a; 847 const spa_aux_t *sb = b; 848 849 if (sa->aux_guid < sb->aux_guid) 850 return (-1); 851 else if (sa->aux_guid > sb->aux_guid) 852 return (1); 853 else 854 return (0); 855 } 856 857 void 858 spa_aux_add(vdev_t *vd, avl_tree_t *avl) 859 { 860 avl_index_t where; 861 spa_aux_t search; 862 spa_aux_t *aux; 863 864 search.aux_guid = vd->vdev_guid; 865 if ((aux = avl_find(avl, &search, &where)) != NULL) { 866 aux->aux_count++; 867 } else { 868 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 869 aux->aux_guid = vd->vdev_guid; 870 aux->aux_count = 1; 871 avl_insert(avl, aux, where); 872 } 873 } 874 875 void 876 spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 877 { 878 spa_aux_t search; 879 spa_aux_t *aux; 880 avl_index_t where; 881 882 search.aux_guid = vd->vdev_guid; 883 aux = avl_find(avl, &search, &where); 884 885 ASSERT(aux != NULL); 886 887 if (--aux->aux_count == 0) { 888 avl_remove(avl, aux); 889 kmem_free(aux, sizeof (spa_aux_t)); 890 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 891 aux->aux_pool = 0ULL; 892 } 893 } 894 895 boolean_t 896 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 897 { 898 spa_aux_t search, *found; 899 900 search.aux_guid = guid; 901 found = avl_find(avl, &search, NULL); 902 903 if (pool) { 904 if (found) 905 *pool = found->aux_pool; 906 else 907 *pool = 0ULL; 908 } 909 910 if (refcnt) { 911 if (found) 912 *refcnt = found->aux_count; 913 else 914 *refcnt = 0; 915 } 916 917 return (found != NULL); 918 } 919 920 void 921 spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 922 { 923 spa_aux_t search, *found; 924 avl_index_t where; 925 926 search.aux_guid = vd->vdev_guid; 927 found = avl_find(avl, &search, &where); 928 ASSERT(found != NULL); 929 ASSERT(found->aux_pool == 0ULL); 930 931 found->aux_pool = spa_guid(vd->vdev_spa); 932 } 933 934 /* 935 * Spares are tracked globally due to the following constraints: 936 * 937 * - A spare may be part of multiple pools. 938 * - A spare may be added to a pool even if it's actively in use within 939 * another pool. 940 * - A spare in use in any pool can only be the source of a replacement if 941 * the target is a spare in the same pool. 942 * 943 * We keep track of all spares on the system through the use of a reference 944 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 945 * spare, then we bump the reference count in the AVL tree. In addition, we set 946 * the 'vdev_isspare' member to indicate that the device is a spare (active or 947 * inactive). When a spare is made active (used to replace a device in the 948 * pool), we also keep track of which pool its been made a part of. 949 * 950 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 951 * called under the spa_namespace lock as part of vdev reconfiguration. The 952 * separate spare lock exists for the status query path, which does not need to 953 * be completely consistent with respect to other vdev configuration changes. 954 */ 955 956 static int 957 spa_spare_compare(const void *a, const void *b) 958 { 959 return (spa_aux_compare(a, b)); 960 } 961 962 void 963 spa_spare_add(vdev_t *vd) 964 { 965 mutex_enter(&spa_spare_lock); 966 ASSERT(!vd->vdev_isspare); 967 spa_aux_add(vd, &spa_spare_avl); 968 vd->vdev_isspare = B_TRUE; 969 mutex_exit(&spa_spare_lock); 970 } 971 972 void 973 spa_spare_remove(vdev_t *vd) 974 { 975 mutex_enter(&spa_spare_lock); 976 ASSERT(vd->vdev_isspare); 977 spa_aux_remove(vd, &spa_spare_avl); 978 vd->vdev_isspare = B_FALSE; 979 mutex_exit(&spa_spare_lock); 980 } 981 982 boolean_t 983 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 984 { 985 boolean_t found; 986 987 mutex_enter(&spa_spare_lock); 988 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 989 mutex_exit(&spa_spare_lock); 990 991 return (found); 992 } 993 994 void 995 spa_spare_activate(vdev_t *vd) 996 { 997 mutex_enter(&spa_spare_lock); 998 ASSERT(vd->vdev_isspare); 999 spa_aux_activate(vd, &spa_spare_avl); 1000 mutex_exit(&spa_spare_lock); 1001 } 1002 1003 /* 1004 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1005 * Cache devices currently only support one pool per cache device, and so 1006 * for these devices the aux reference count is currently unused beyond 1. 1007 */ 1008 1009 static int 1010 spa_l2cache_compare(const void *a, const void *b) 1011 { 1012 return (spa_aux_compare(a, b)); 1013 } 1014 1015 void 1016 spa_l2cache_add(vdev_t *vd) 1017 { 1018 mutex_enter(&spa_l2cache_lock); 1019 ASSERT(!vd->vdev_isl2cache); 1020 spa_aux_add(vd, &spa_l2cache_avl); 1021 vd->vdev_isl2cache = B_TRUE; 1022 mutex_exit(&spa_l2cache_lock); 1023 } 1024 1025 void 1026 spa_l2cache_remove(vdev_t *vd) 1027 { 1028 mutex_enter(&spa_l2cache_lock); 1029 ASSERT(vd->vdev_isl2cache); 1030 spa_aux_remove(vd, &spa_l2cache_avl); 1031 vd->vdev_isl2cache = B_FALSE; 1032 mutex_exit(&spa_l2cache_lock); 1033 } 1034 1035 boolean_t 1036 spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1037 { 1038 boolean_t found; 1039 1040 mutex_enter(&spa_l2cache_lock); 1041 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1042 mutex_exit(&spa_l2cache_lock); 1043 1044 return (found); 1045 } 1046 1047 void 1048 spa_l2cache_activate(vdev_t *vd) 1049 { 1050 mutex_enter(&spa_l2cache_lock); 1051 ASSERT(vd->vdev_isl2cache); 1052 spa_aux_activate(vd, &spa_l2cache_avl); 1053 mutex_exit(&spa_l2cache_lock); 1054 } 1055 1056 /* 1057 * ========================================================================== 1058 * SPA vdev locking 1059 * ========================================================================== 1060 */ 1061 1062 /* 1063 * Lock the given spa_t for the purpose of adding or removing a vdev. 1064 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1065 * It returns the next transaction group for the spa_t. 1066 */ 1067 uint64_t 1068 spa_vdev_enter(spa_t *spa) 1069 { 1070 mutex_enter(&spa->spa_vdev_top_lock); 1071 mutex_enter(&spa_namespace_lock); 1072 return (spa_vdev_config_enter(spa)); 1073 } 1074 1075 /* 1076 * Internal implementation for spa_vdev_enter(). Used when a vdev 1077 * operation requires multiple syncs (i.e. removing a device) while 1078 * keeping the spa_namespace_lock held. 1079 */ 1080 uint64_t 1081 spa_vdev_config_enter(spa_t *spa) 1082 { 1083 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1084 1085 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1086 1087 return (spa_last_synced_txg(spa) + 1); 1088 } 1089 1090 /* 1091 * Used in combination with spa_vdev_config_enter() to allow the syncing 1092 * of multiple transactions without releasing the spa_namespace_lock. 1093 */ 1094 void 1095 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1096 { 1097 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1098 1099 int config_changed = B_FALSE; 1100 1101 ASSERT(txg > spa_last_synced_txg(spa)); 1102 1103 spa->spa_pending_vdev = NULL; 1104 1105 /* 1106 * Reassess the DTLs. 1107 */ 1108 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1109 1110 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1111 config_changed = B_TRUE; 1112 spa->spa_config_generation++; 1113 } 1114 1115 /* 1116 * Verify the metaslab classes. 1117 */ 1118 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1119 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1120 1121 spa_config_exit(spa, SCL_ALL, spa); 1122 1123 /* 1124 * Panic the system if the specified tag requires it. This 1125 * is useful for ensuring that configurations are updated 1126 * transactionally. 1127 */ 1128 if (zio_injection_enabled) 1129 zio_handle_panic_injection(spa, tag, 0); 1130 1131 /* 1132 * Note: this txg_wait_synced() is important because it ensures 1133 * that there won't be more than one config change per txg. 1134 * This allows us to use the txg as the generation number. 1135 */ 1136 if (error == 0) 1137 txg_wait_synced(spa->spa_dsl_pool, txg); 1138 1139 if (vd != NULL) { 1140 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1141 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1142 vdev_free(vd); 1143 spa_config_exit(spa, SCL_ALL, spa); 1144 } 1145 1146 /* 1147 * If the config changed, update the config cache. 1148 */ 1149 if (config_changed) 1150 spa_config_sync(spa, B_FALSE, B_TRUE); 1151 } 1152 1153 /* 1154 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1155 * locking of spa_vdev_enter(), we also want make sure the transactions have 1156 * synced to disk, and then update the global configuration cache with the new 1157 * information. 1158 */ 1159 int 1160 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1161 { 1162 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1163 mutex_exit(&spa_namespace_lock); 1164 mutex_exit(&spa->spa_vdev_top_lock); 1165 1166 return (error); 1167 } 1168 1169 /* 1170 * Lock the given spa_t for the purpose of changing vdev state. 1171 */ 1172 void 1173 spa_vdev_state_enter(spa_t *spa, int oplocks) 1174 { 1175 int locks = SCL_STATE_ALL | oplocks; 1176 1177 /* 1178 * Root pools may need to read of the underlying devfs filesystem 1179 * when opening up a vdev. Unfortunately if we're holding the 1180 * SCL_ZIO lock it will result in a deadlock when we try to issue 1181 * the read from the root filesystem. Instead we "prefetch" 1182 * the associated vnodes that we need prior to opening the 1183 * underlying devices and cache them so that we can prevent 1184 * any I/O when we are doing the actual open. 1185 */ 1186 if (spa_is_root(spa)) { 1187 int low = locks & ~(SCL_ZIO - 1); 1188 int high = locks & ~low; 1189 1190 spa_config_enter(spa, high, spa, RW_WRITER); 1191 vdev_hold(spa->spa_root_vdev); 1192 spa_config_enter(spa, low, spa, RW_WRITER); 1193 } else { 1194 spa_config_enter(spa, locks, spa, RW_WRITER); 1195 } 1196 spa->spa_vdev_locks = locks; 1197 } 1198 1199 int 1200 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1201 { 1202 boolean_t config_changed = B_FALSE; 1203 1204 if (vd != NULL || error == 0) 1205 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1206 0, 0, B_FALSE); 1207 1208 if (vd != NULL) { 1209 vdev_state_dirty(vd->vdev_top); 1210 config_changed = B_TRUE; 1211 spa->spa_config_generation++; 1212 } 1213 1214 if (spa_is_root(spa)) 1215 vdev_rele(spa->spa_root_vdev); 1216 1217 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1218 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1219 1220 /* 1221 * If anything changed, wait for it to sync. This ensures that, 1222 * from the system administrator's perspective, zpool(1M) commands 1223 * are synchronous. This is important for things like zpool offline: 1224 * when the command completes, you expect no further I/O from ZFS. 1225 */ 1226 if (vd != NULL) 1227 txg_wait_synced(spa->spa_dsl_pool, 0); 1228 1229 /* 1230 * If the config changed, update the config cache. 1231 */ 1232 if (config_changed) { 1233 mutex_enter(&spa_namespace_lock); 1234 spa_config_sync(spa, B_FALSE, B_TRUE); 1235 mutex_exit(&spa_namespace_lock); 1236 } 1237 1238 return (error); 1239 } 1240 1241 /* 1242 * ========================================================================== 1243 * Miscellaneous functions 1244 * ========================================================================== 1245 */ 1246 1247 void 1248 spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1249 { 1250 if (!nvlist_exists(spa->spa_label_features, feature)) { 1251 fnvlist_add_boolean(spa->spa_label_features, feature); 1252 /* 1253 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1254 * dirty the vdev config because lock SCL_CONFIG is not held. 1255 * Thankfully, in this case we don't need to dirty the config 1256 * because it will be written out anyway when we finish 1257 * creating the pool. 1258 */ 1259 if (tx->tx_txg != TXG_INITIAL) 1260 vdev_config_dirty(spa->spa_root_vdev); 1261 } 1262 } 1263 1264 void 1265 spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1266 { 1267 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1268 vdev_config_dirty(spa->spa_root_vdev); 1269 } 1270 1271 /* 1272 * Rename a spa_t. 1273 */ 1274 int 1275 spa_rename(const char *name, const char *newname) 1276 { 1277 spa_t *spa; 1278 int err; 1279 1280 /* 1281 * Lookup the spa_t and grab the config lock for writing. We need to 1282 * actually open the pool so that we can sync out the necessary labels. 1283 * It's OK to call spa_open() with the namespace lock held because we 1284 * allow recursive calls for other reasons. 1285 */ 1286 mutex_enter(&spa_namespace_lock); 1287 if ((err = spa_open(name, &spa, FTAG)) != 0) { 1288 mutex_exit(&spa_namespace_lock); 1289 return (err); 1290 } 1291 1292 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1293 1294 avl_remove(&spa_namespace_avl, spa); 1295 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 1296 avl_add(&spa_namespace_avl, spa); 1297 1298 /* 1299 * Sync all labels to disk with the new names by marking the root vdev 1300 * dirty and waiting for it to sync. It will pick up the new pool name 1301 * during the sync. 1302 */ 1303 vdev_config_dirty(spa->spa_root_vdev); 1304 1305 spa_config_exit(spa, SCL_ALL, FTAG); 1306 1307 txg_wait_synced(spa->spa_dsl_pool, 0); 1308 1309 /* 1310 * Sync the updated config cache. 1311 */ 1312 spa_config_sync(spa, B_FALSE, B_TRUE); 1313 1314 spa_close(spa, FTAG); 1315 1316 mutex_exit(&spa_namespace_lock); 1317 1318 return (0); 1319 } 1320 1321 /* 1322 * Return the spa_t associated with given pool_guid, if it exists. If 1323 * device_guid is non-zero, determine whether the pool exists *and* contains 1324 * a device with the specified device_guid. 1325 */ 1326 spa_t * 1327 spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1328 { 1329 spa_t *spa; 1330 avl_tree_t *t = &spa_namespace_avl; 1331 1332 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1333 1334 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1335 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1336 continue; 1337 if (spa->spa_root_vdev == NULL) 1338 continue; 1339 if (spa_guid(spa) == pool_guid) { 1340 if (device_guid == 0) 1341 break; 1342 1343 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1344 device_guid) != NULL) 1345 break; 1346 1347 /* 1348 * Check any devices we may be in the process of adding. 1349 */ 1350 if (spa->spa_pending_vdev) { 1351 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1352 device_guid) != NULL) 1353 break; 1354 } 1355 } 1356 } 1357 1358 return (spa); 1359 } 1360 1361 /* 1362 * Determine whether a pool with the given pool_guid exists. 1363 */ 1364 boolean_t 1365 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1366 { 1367 return (spa_by_guid(pool_guid, device_guid) != NULL); 1368 } 1369 1370 char * 1371 spa_strdup(const char *s) 1372 { 1373 size_t len; 1374 char *new; 1375 1376 len = strlen(s); 1377 new = kmem_alloc(len + 1, KM_SLEEP); 1378 bcopy(s, new, len); 1379 new[len] = '\0'; 1380 1381 return (new); 1382 } 1383 1384 void 1385 spa_strfree(char *s) 1386 { 1387 kmem_free(s, strlen(s) + 1); 1388 } 1389 1390 uint64_t 1391 spa_get_random(uint64_t range) 1392 { 1393 uint64_t r; 1394 1395 ASSERT(range != 0); 1396 1397 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1398 1399 return (r % range); 1400 } 1401 1402 uint64_t 1403 spa_generate_guid(spa_t *spa) 1404 { 1405 uint64_t guid = spa_get_random(-1ULL); 1406 1407 if (spa != NULL) { 1408 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1409 guid = spa_get_random(-1ULL); 1410 } else { 1411 while (guid == 0 || spa_guid_exists(guid, 0)) 1412 guid = spa_get_random(-1ULL); 1413 } 1414 1415 return (guid); 1416 } 1417 1418 void 1419 snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1420 { 1421 char type[256]; 1422 char *checksum = NULL; 1423 char *compress = NULL; 1424 1425 if (bp != NULL) { 1426 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1427 dmu_object_byteswap_t bswap = 1428 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1429 (void) snprintf(type, sizeof (type), "bswap %s %s", 1430 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1431 "metadata" : "data", 1432 dmu_ot_byteswap[bswap].ob_name); 1433 } else { 1434 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1435 sizeof (type)); 1436 } 1437 if (!BP_IS_EMBEDDED(bp)) { 1438 checksum = 1439 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1440 } 1441 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1442 } 1443 1444 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1445 compress); 1446 } 1447 1448 void 1449 spa_freeze(spa_t *spa) 1450 { 1451 uint64_t freeze_txg = 0; 1452 1453 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1454 if (spa->spa_freeze_txg == UINT64_MAX) { 1455 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1456 spa->spa_freeze_txg = freeze_txg; 1457 } 1458 spa_config_exit(spa, SCL_ALL, FTAG); 1459 if (freeze_txg != 0) 1460 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1461 } 1462 1463 void 1464 zfs_panic_recover(const char *fmt, ...) 1465 { 1466 va_list adx; 1467 1468 va_start(adx, fmt); 1469 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1470 va_end(adx); 1471 } 1472 1473 /* 1474 * This is a stripped-down version of strtoull, suitable only for converting 1475 * lowercase hexadecimal numbers that don't overflow. 1476 */ 1477 uint64_t 1478 strtonum(const char *str, char **nptr) 1479 { 1480 uint64_t val = 0; 1481 char c; 1482 int digit; 1483 1484 while ((c = *str) != '\0') { 1485 if (c >= '0' && c <= '9') 1486 digit = c - '0'; 1487 else if (c >= 'a' && c <= 'f') 1488 digit = 10 + c - 'a'; 1489 else 1490 break; 1491 1492 val *= 16; 1493 val += digit; 1494 1495 str++; 1496 } 1497 1498 if (nptr) 1499 *nptr = (char *)str; 1500 1501 return (val); 1502 } 1503 1504 /* 1505 * ========================================================================== 1506 * Accessor functions 1507 * ========================================================================== 1508 */ 1509 1510 boolean_t 1511 spa_shutting_down(spa_t *spa) 1512 { 1513 return (spa->spa_async_suspended); 1514 } 1515 1516 dsl_pool_t * 1517 spa_get_dsl(spa_t *spa) 1518 { 1519 return (spa->spa_dsl_pool); 1520 } 1521 1522 boolean_t 1523 spa_is_initializing(spa_t *spa) 1524 { 1525 return (spa->spa_is_initializing); 1526 } 1527 1528 blkptr_t * 1529 spa_get_rootblkptr(spa_t *spa) 1530 { 1531 return (&spa->spa_ubsync.ub_rootbp); 1532 } 1533 1534 void 1535 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1536 { 1537 spa->spa_uberblock.ub_rootbp = *bp; 1538 } 1539 1540 void 1541 spa_altroot(spa_t *spa, char *buf, size_t buflen) 1542 { 1543 if (spa->spa_root == NULL) 1544 buf[0] = '\0'; 1545 else 1546 (void) strncpy(buf, spa->spa_root, buflen); 1547 } 1548 1549 int 1550 spa_sync_pass(spa_t *spa) 1551 { 1552 return (spa->spa_sync_pass); 1553 } 1554 1555 char * 1556 spa_name(spa_t *spa) 1557 { 1558 return (spa->spa_name); 1559 } 1560 1561 uint64_t 1562 spa_guid(spa_t *spa) 1563 { 1564 dsl_pool_t *dp = spa_get_dsl(spa); 1565 uint64_t guid; 1566 1567 /* 1568 * If we fail to parse the config during spa_load(), we can go through 1569 * the error path (which posts an ereport) and end up here with no root 1570 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1571 * this case. 1572 */ 1573 if (spa->spa_root_vdev == NULL) 1574 return (spa->spa_config_guid); 1575 1576 guid = spa->spa_last_synced_guid != 0 ? 1577 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1578 1579 /* 1580 * Return the most recently synced out guid unless we're 1581 * in syncing context. 1582 */ 1583 if (dp && dsl_pool_sync_context(dp)) 1584 return (spa->spa_root_vdev->vdev_guid); 1585 else 1586 return (guid); 1587 } 1588 1589 uint64_t 1590 spa_load_guid(spa_t *spa) 1591 { 1592 /* 1593 * This is a GUID that exists solely as a reference for the 1594 * purposes of the arc. It is generated at load time, and 1595 * is never written to persistent storage. 1596 */ 1597 return (spa->spa_load_guid); 1598 } 1599 1600 uint64_t 1601 spa_last_synced_txg(spa_t *spa) 1602 { 1603 return (spa->spa_ubsync.ub_txg); 1604 } 1605 1606 uint64_t 1607 spa_first_txg(spa_t *spa) 1608 { 1609 return (spa->spa_first_txg); 1610 } 1611 1612 uint64_t 1613 spa_syncing_txg(spa_t *spa) 1614 { 1615 return (spa->spa_syncing_txg); 1616 } 1617 1618 pool_state_t 1619 spa_state(spa_t *spa) 1620 { 1621 return (spa->spa_state); 1622 } 1623 1624 spa_load_state_t 1625 spa_load_state(spa_t *spa) 1626 { 1627 return (spa->spa_load_state); 1628 } 1629 1630 uint64_t 1631 spa_freeze_txg(spa_t *spa) 1632 { 1633 return (spa->spa_freeze_txg); 1634 } 1635 1636 /* ARGSUSED */ 1637 uint64_t 1638 spa_get_asize(spa_t *spa, uint64_t lsize) 1639 { 1640 return (lsize * spa_asize_inflation); 1641 } 1642 1643 /* 1644 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), 1645 * or at least 128MB, unless that would cause it to be more than half the 1646 * pool size. 1647 * 1648 * See the comment above spa_slop_shift for details. 1649 */ 1650 uint64_t 1651 spa_get_slop_space(spa_t *spa) 1652 { 1653 uint64_t space = spa_get_dspace(spa); 1654 return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop))); 1655 } 1656 1657 uint64_t 1658 spa_get_dspace(spa_t *spa) 1659 { 1660 return (spa->spa_dspace); 1661 } 1662 1663 void 1664 spa_update_dspace(spa_t *spa) 1665 { 1666 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1667 ddt_get_dedup_dspace(spa); 1668 } 1669 1670 /* 1671 * Return the failure mode that has been set to this pool. The default 1672 * behavior will be to block all I/Os when a complete failure occurs. 1673 */ 1674 uint8_t 1675 spa_get_failmode(spa_t *spa) 1676 { 1677 return (spa->spa_failmode); 1678 } 1679 1680 boolean_t 1681 spa_suspended(spa_t *spa) 1682 { 1683 return (spa->spa_suspended); 1684 } 1685 1686 uint64_t 1687 spa_version(spa_t *spa) 1688 { 1689 return (spa->spa_ubsync.ub_version); 1690 } 1691 1692 boolean_t 1693 spa_deflate(spa_t *spa) 1694 { 1695 return (spa->spa_deflate); 1696 } 1697 1698 metaslab_class_t * 1699 spa_normal_class(spa_t *spa) 1700 { 1701 return (spa->spa_normal_class); 1702 } 1703 1704 metaslab_class_t * 1705 spa_log_class(spa_t *spa) 1706 { 1707 return (spa->spa_log_class); 1708 } 1709 1710 void 1711 spa_evicting_os_register(spa_t *spa, objset_t *os) 1712 { 1713 mutex_enter(&spa->spa_evicting_os_lock); 1714 list_insert_head(&spa->spa_evicting_os_list, os); 1715 mutex_exit(&spa->spa_evicting_os_lock); 1716 } 1717 1718 void 1719 spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1720 { 1721 mutex_enter(&spa->spa_evicting_os_lock); 1722 list_remove(&spa->spa_evicting_os_list, os); 1723 cv_broadcast(&spa->spa_evicting_os_cv); 1724 mutex_exit(&spa->spa_evicting_os_lock); 1725 } 1726 1727 void 1728 spa_evicting_os_wait(spa_t *spa) 1729 { 1730 mutex_enter(&spa->spa_evicting_os_lock); 1731 while (!list_is_empty(&spa->spa_evicting_os_list)) 1732 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 1733 mutex_exit(&spa->spa_evicting_os_lock); 1734 1735 dmu_buf_user_evict_wait(); 1736 } 1737 1738 int 1739 spa_max_replication(spa_t *spa) 1740 { 1741 /* 1742 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1743 * handle BPs with more than one DVA allocated. Set our max 1744 * replication level accordingly. 1745 */ 1746 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1747 return (1); 1748 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1749 } 1750 1751 int 1752 spa_prev_software_version(spa_t *spa) 1753 { 1754 return (spa->spa_prev_software_version); 1755 } 1756 1757 uint64_t 1758 spa_deadman_synctime(spa_t *spa) 1759 { 1760 return (spa->spa_deadman_synctime); 1761 } 1762 1763 uint64_t 1764 dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1765 { 1766 uint64_t asize = DVA_GET_ASIZE(dva); 1767 uint64_t dsize = asize; 1768 1769 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1770 1771 if (asize != 0 && spa->spa_deflate) { 1772 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); 1773 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1774 } 1775 1776 return (dsize); 1777 } 1778 1779 uint64_t 1780 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1781 { 1782 uint64_t dsize = 0; 1783 1784 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1785 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1786 1787 return (dsize); 1788 } 1789 1790 uint64_t 1791 bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1792 { 1793 uint64_t dsize = 0; 1794 1795 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1796 1797 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1798 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1799 1800 spa_config_exit(spa, SCL_VDEV, FTAG); 1801 1802 return (dsize); 1803 } 1804 1805 /* 1806 * ========================================================================== 1807 * Initialization and Termination 1808 * ========================================================================== 1809 */ 1810 1811 static int 1812 spa_name_compare(const void *a1, const void *a2) 1813 { 1814 const spa_t *s1 = a1; 1815 const spa_t *s2 = a2; 1816 int s; 1817 1818 s = strcmp(s1->spa_name, s2->spa_name); 1819 if (s > 0) 1820 return (1); 1821 if (s < 0) 1822 return (-1); 1823 return (0); 1824 } 1825 1826 int 1827 spa_busy(void) 1828 { 1829 return (spa_active_count); 1830 } 1831 1832 void 1833 spa_boot_init() 1834 { 1835 spa_config_load(); 1836 } 1837 1838 void 1839 spa_init(int mode) 1840 { 1841 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1842 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1843 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1844 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1845 1846 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1847 offsetof(spa_t, spa_avl)); 1848 1849 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1850 offsetof(spa_aux_t, aux_avl)); 1851 1852 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1853 offsetof(spa_aux_t, aux_avl)); 1854 1855 spa_mode_global = mode; 1856 1857 #ifdef _KERNEL 1858 spa_arch_init(); 1859 #else 1860 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 1861 arc_procfd = open("/proc/self/ctl", O_WRONLY); 1862 if (arc_procfd == -1) { 1863 perror("could not enable watchpoints: " 1864 "opening /proc/self/ctl failed: "); 1865 } else { 1866 arc_watch = B_TRUE; 1867 } 1868 } 1869 #endif 1870 1871 refcount_init(); 1872 unique_init(); 1873 range_tree_init(); 1874 zio_init(); 1875 dmu_init(); 1876 zil_init(); 1877 vdev_cache_stat_init(); 1878 zfs_prop_init(); 1879 zpool_prop_init(); 1880 zpool_feature_init(); 1881 spa_config_load(); 1882 l2arc_start(); 1883 } 1884 1885 void 1886 spa_fini(void) 1887 { 1888 l2arc_stop(); 1889 1890 spa_evict_all(); 1891 1892 vdev_cache_stat_fini(); 1893 zil_fini(); 1894 dmu_fini(); 1895 zio_fini(); 1896 range_tree_fini(); 1897 unique_fini(); 1898 refcount_fini(); 1899 1900 avl_destroy(&spa_namespace_avl); 1901 avl_destroy(&spa_spare_avl); 1902 avl_destroy(&spa_l2cache_avl); 1903 1904 cv_destroy(&spa_namespace_cv); 1905 mutex_destroy(&spa_namespace_lock); 1906 mutex_destroy(&spa_spare_lock); 1907 mutex_destroy(&spa_l2cache_lock); 1908 } 1909 1910 /* 1911 * Return whether this pool has slogs. No locking needed. 1912 * It's not a problem if the wrong answer is returned as it's only for 1913 * performance and not correctness 1914 */ 1915 boolean_t 1916 spa_has_slogs(spa_t *spa) 1917 { 1918 return (spa->spa_log_class->mc_rotor != NULL); 1919 } 1920 1921 spa_log_state_t 1922 spa_get_log_state(spa_t *spa) 1923 { 1924 return (spa->spa_log_state); 1925 } 1926 1927 void 1928 spa_set_log_state(spa_t *spa, spa_log_state_t state) 1929 { 1930 spa->spa_log_state = state; 1931 } 1932 1933 boolean_t 1934 spa_is_root(spa_t *spa) 1935 { 1936 return (spa->spa_is_root); 1937 } 1938 1939 boolean_t 1940 spa_writeable(spa_t *spa) 1941 { 1942 return (!!(spa->spa_mode & FWRITE)); 1943 } 1944 1945 /* 1946 * Returns true if there is a pending sync task in any of the current 1947 * syncing txg, the current quiescing txg, or the current open txg. 1948 */ 1949 boolean_t 1950 spa_has_pending_synctask(spa_t *spa) 1951 { 1952 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks)); 1953 } 1954 1955 int 1956 spa_mode(spa_t *spa) 1957 { 1958 return (spa->spa_mode); 1959 } 1960 1961 uint64_t 1962 spa_bootfs(spa_t *spa) 1963 { 1964 return (spa->spa_bootfs); 1965 } 1966 1967 uint64_t 1968 spa_delegation(spa_t *spa) 1969 { 1970 return (spa->spa_delegation); 1971 } 1972 1973 objset_t * 1974 spa_meta_objset(spa_t *spa) 1975 { 1976 return (spa->spa_meta_objset); 1977 } 1978 1979 enum zio_checksum 1980 spa_dedup_checksum(spa_t *spa) 1981 { 1982 return (spa->spa_dedup_checksum); 1983 } 1984 1985 /* 1986 * Reset pool scan stat per scan pass (or reboot). 1987 */ 1988 void 1989 spa_scan_stat_init(spa_t *spa) 1990 { 1991 /* data not stored on disk */ 1992 spa->spa_scan_pass_start = gethrestime_sec(); 1993 spa->spa_scan_pass_exam = 0; 1994 vdev_scan_stat_init(spa->spa_root_vdev); 1995 } 1996 1997 /* 1998 * Get scan stats for zpool status reports 1999 */ 2000 int 2001 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2002 { 2003 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2004 2005 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2006 return (SET_ERROR(ENOENT)); 2007 bzero(ps, sizeof (pool_scan_stat_t)); 2008 2009 /* data stored on disk */ 2010 ps->pss_func = scn->scn_phys.scn_func; 2011 ps->pss_start_time = scn->scn_phys.scn_start_time; 2012 ps->pss_end_time = scn->scn_phys.scn_end_time; 2013 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2014 ps->pss_examined = scn->scn_phys.scn_examined; 2015 ps->pss_to_process = scn->scn_phys.scn_to_process; 2016 ps->pss_processed = scn->scn_phys.scn_processed; 2017 ps->pss_errors = scn->scn_phys.scn_errors; 2018 ps->pss_state = scn->scn_phys.scn_state; 2019 2020 /* data not stored on disk */ 2021 ps->pss_pass_start = spa->spa_scan_pass_start; 2022 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2023 2024 return (0); 2025 } 2026 2027 boolean_t 2028 spa_debug_enabled(spa_t *spa) 2029 { 2030 return (spa->spa_debug); 2031 } 2032 2033 int 2034 spa_maxblocksize(spa_t *spa) 2035 { 2036 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2037 return (SPA_MAXBLOCKSIZE); 2038 else 2039 return (SPA_OLD_MAXBLOCKSIZE); 2040 } 2041