Lines Matching +full:config +full:- +full:space
1 // SPDX-License-Identifier: CDDL-1.0
10 * or https://opensource.org/licenses/CDDL-1.0.
80 * - Lookup a spa_t by name
81 * - Add or remove a spa_t from the namespace
82 * - Increase spa_refcount from non-zero
83 * - Check if spa_refcount is zero
84 * - Rename a spa_t
85 * - add/remove/attach/detach devices
86 * - Held for the duration of create/destroy
87 * - Held at the start and end of import and export
94 * spa_refcount (per-spa zfs_refcount_t protected by mutex)
97 * spa_t cannot be destroyed or freed while this is non-zero. Internally,
98 * the refcount is never really 'zero' - opening a pool implicitly keeps
100 * present the image of a zero/non-zero value to consumers.
102 * spa_config_lock[] (per-spa array of rwlocks)
104 * This protects the spa_t from config changes, and must be held in
107 * - RW_READER to perform I/O to the spa
108 * - RW_WRITER to change the vdev config
112 * spa_namespace_lock -> spa_refcount
117 * spa_refcount -> spa_config_lock[]
120 * the config lock.
122 * spa_namespace_lock -> spa_config_lock[]
124 * The namespace lock must always be taken before the config lock.
170 * We use these distinct config locks to avoid recursive lock entry.
172 * block allocations (SCL_ALLOC), which may require reading space maps
173 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
175 * The spa config locks cannot be normal rwlocks because we need the
178 * They do, however, obey the usual write-wanted semantics to prevent
185 * add/remove/attach/detach. Protects the dirty config list
198 * Held by bp-level zios (those which have no io_vd upon entry)
199 * to prevent changes to the vdev tree. The bp-level zio implicitly
221 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
222 * or zio_write_phys() -- the caller must ensure that the config cannot
228 * spa_vdev_enter() Acquire the namespace lock and the config lock
231 * spa_vdev_exit() Release the config lock, wait for all I/O
236 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
265 * otherwise-fatal errors, typically caused by on-disk corruption. When
268 * in leaked space, or worse.
274 * blocks), space referenced by the missing metadata can not be freed.
277 * all remaining space to free from the error-encountering filesystem is
279 * permanently leak the space from indirect blocks that can not be read,
286 * space, with no leaks. However, note that this case is actually
290 * e.g. a top-level vdev going offline), or (b) have localized,
295 * permanent, the best we can do is leak the minimum amount of space,
299 * leaking space in the "partial temporary" failure case.
335 * wait - Wait for the "hung" I/O (default)
336 * continue - Attempt to recover from a "hung" I/O
337 * panic - Panic the system
342 * The worst case is single-sector max-parity RAID-Z blocks, in which
343 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
353 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
355 * don't run the pool completely out of space, due to unaccounted changes (e.g.
356 * to the MOS). It also limits the worst-case time to allocate space. If we
357 * have less than this amount of free space, most ZPL operations (e.g. write,
359 * also part of this 3.2% of space which can't be consumed by normal writes;
360 * the slop space "proper" (spa_get_slop_space()) is decreased by the embedded
361 * log space.
364 * use half the slop space. They will only return ENOSPC if less than half
365 * the slop space is free. Typically, once the pool has less than the slop
366 * space free, the user will use these operations to free up space in the pool.
370 * Operations that are almost guaranteed to free up space in the absence of
371 * a pool checkpoint can use up to three quarters of the slop space
375 * the amount of free space. These are the operations that call
377 * increase in the amount of space used, it is possible to run the pool
378 * completely out of space, causing it to be permanently read-only.
380 * Note that on very small pools, the slop space will be larger than
384 * Further, on very large pools, the slop space will be smaller than
385 * 3.2%, to avoid reserving much more space than we actually need; bounded
402 * Valid values are zfs_active_allocator=<dynamic|cursor|new-dynamic>.
416 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name, in spa_load_failed()
417 spa->spa_trust_config ? "trusted" : "untrusted", buf); in spa_load_failed()
430 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name, in spa_load_note()
431 spa->spa_trust_config ? "trusted" : "untrusted", buf); in spa_load_note()
443 * The percentage of special class final space reserved for metadata only.
444 * Once we allocate 100 - zfs_special_class_metadata_reserve_pct we only
451 * SPA config locking
458 spa_config_lock_t *scl = &spa->spa_config_lock[i]; in spa_config_lock_init()
459 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); in spa_config_lock_init()
460 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); in spa_config_lock_init()
461 scl->scl_writer = NULL; in spa_config_lock_init()
462 scl->scl_write_wanted = 0; in spa_config_lock_init()
463 scl->scl_count = 0; in spa_config_lock_init()
471 spa_config_lock_t *scl = &spa->spa_config_lock[i]; in spa_config_lock_destroy()
472 mutex_destroy(&scl->scl_lock); in spa_config_lock_destroy()
473 cv_destroy(&scl->scl_cv); in spa_config_lock_destroy()
474 ASSERT(scl->scl_writer == NULL); in spa_config_lock_destroy()
475 ASSERT(scl->scl_write_wanted == 0); in spa_config_lock_destroy()
476 ASSERT(scl->scl_count == 0); in spa_config_lock_destroy()
484 spa_config_lock_t *scl = &spa->spa_config_lock[i]; in spa_config_tryenter()
487 mutex_enter(&scl->scl_lock); in spa_config_tryenter()
489 if (scl->scl_writer || scl->scl_write_wanted) { in spa_config_tryenter()
490 mutex_exit(&scl->scl_lock); in spa_config_tryenter()
491 spa_config_exit(spa, locks & ((1 << i) - 1), in spa_config_tryenter()
496 ASSERT(scl->scl_writer != curthread); in spa_config_tryenter()
497 if (scl->scl_count != 0) { in spa_config_tryenter()
498 mutex_exit(&scl->scl_lock); in spa_config_tryenter()
499 spa_config_exit(spa, locks & ((1 << i) - 1), in spa_config_tryenter()
503 scl->scl_writer = curthread; in spa_config_tryenter()
505 scl->scl_count++; in spa_config_tryenter()
506 mutex_exit(&scl->scl_lock); in spa_config_tryenter()
521 spa_config_lock_t *scl = &spa->spa_config_lock[i]; in spa_config_enter_impl()
522 if (scl->scl_writer == curthread) in spa_config_enter_impl()
526 mutex_enter(&scl->scl_lock); in spa_config_enter_impl()
528 while (scl->scl_writer || in spa_config_enter_impl()
529 (!mmp_flag && scl->scl_write_wanted)) { in spa_config_enter_impl()
530 cv_wait(&scl->scl_cv, &scl->scl_lock); in spa_config_enter_impl()
533 ASSERT(scl->scl_writer != curthread); in spa_config_enter_impl()
534 while (scl->scl_count != 0) { in spa_config_enter_impl()
535 scl->scl_write_wanted++; in spa_config_enter_impl()
536 cv_wait(&scl->scl_cv, &scl->scl_lock); in spa_config_enter_impl()
537 scl->scl_write_wanted--; in spa_config_enter_impl()
539 scl->scl_writer = curthread; in spa_config_enter_impl()
541 scl->scl_count++; in spa_config_enter_impl()
542 mutex_exit(&scl->scl_lock); in spa_config_enter_impl()
572 for (int i = SCL_LOCKS - 1; i >= 0; i--) { in spa_config_exit()
573 spa_config_lock_t *scl = &spa->spa_config_lock[i]; in spa_config_exit()
576 mutex_enter(&scl->scl_lock); in spa_config_exit()
577 ASSERT(scl->scl_count > 0); in spa_config_exit()
578 if (--scl->scl_count == 0) { in spa_config_exit()
579 ASSERT(scl->scl_writer == NULL || in spa_config_exit()
580 scl->scl_writer == curthread); in spa_config_exit()
581 scl->scl_writer = NULL; /* OK in either case */ in spa_config_exit()
582 cv_broadcast(&scl->scl_cv); in spa_config_exit()
584 mutex_exit(&scl->scl_lock); in spa_config_exit()
594 spa_config_lock_t *scl = &spa->spa_config_lock[i]; in spa_config_held()
597 if ((rw == RW_READER && scl->scl_count != 0) || in spa_config_held()
598 (rw == RW_WRITER && scl->scl_writer == curthread)) in spa_config_held()
644 if ((spa->spa_load_thread != NULL && in spa_lookup()
645 spa->spa_load_thread != curthread) || in spa_lookup()
646 (spa->spa_export_thread != NULL && in spa_lookup()
647 spa->spa_export_thread != curthread)) { in spa_lookup()
670 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, in spa_deadman()
671 (u_longlong_t)++spa->spa_deadman_calls); in spa_deadman()
673 vdev_deadman(spa->spa_root_vdev, FTAG); in spa_deadman()
675 spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, in spa_deadman()
686 return (TREE_CMP(a->sls_txg, b->sls_txg)); in spa_log_sm_sort_by_txg()
695 spa_add(const char *name, nvlist_t *config, const char *altroot) in spa_add() argument
704 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); in spa_add()
705 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); in spa_add()
706 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); in spa_add()
707 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); in spa_add()
708 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); in spa_add()
709 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); in spa_add()
710 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); in spa_add()
711 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); in spa_add()
712 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); in spa_add()
713 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); in spa_add()
714 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); in spa_add()
715 mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL); in spa_add()
716 mutex_init(&spa->spa_flushed_ms_lock, NULL, MUTEX_DEFAULT, NULL); in spa_add()
717 mutex_init(&spa->spa_activities_lock, NULL, MUTEX_DEFAULT, NULL); in spa_add()
719 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); in spa_add()
720 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); in spa_add()
721 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); in spa_add()
722 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); in spa_add()
723 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); in spa_add()
724 cv_init(&spa->spa_activities_cv, NULL, CV_DEFAULT, NULL); in spa_add()
725 cv_init(&spa->spa_waiters_cv, NULL, CV_DEFAULT, NULL); in spa_add()
728 bplist_create(&spa->spa_free_bplist[t]); in spa_add()
730 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); in spa_add()
731 spa->spa_state = POOL_STATE_UNINITIALIZED; in spa_add()
732 spa->spa_freeze_txg = UINT64_MAX; in spa_add()
733 spa->spa_final_txg = UINT64_MAX; in spa_add()
734 spa->spa_load_max_txg = UINT64_MAX; in spa_add()
735 spa->spa_proc = &p0; in spa_add()
736 spa->spa_proc_state = SPA_PROC_NONE; in spa_add()
737 spa->spa_trust_config = B_TRUE; in spa_add()
738 spa->spa_hostid = zone_get_hostid(NULL); in spa_add()
740 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); in spa_add()
741 spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms); in spa_add()
745 zfs_refcount_create(&spa->spa_refcount); in spa_add()
756 spa->spa_root = spa_strdup(altroot); in spa_add()
759 spa->spa_alloc_count = MAX(MIN(spa_num_allocators, in spa_add()
762 if (spa->spa_alloc_count > 1) { in spa_add()
763 spa->spa_allocs_use = kmem_zalloc(offsetof(spa_allocs_use_t, in spa_add()
764 sau_inuse[spa->spa_alloc_count]), KM_SLEEP); in spa_add()
765 mutex_init(&spa->spa_allocs_use->sau_lock, NULL, MUTEX_DEFAULT, in spa_add()
769 avl_create(&spa->spa_metaslabs_by_flushed, metaslab_sort_by_flushed, in spa_add()
771 avl_create(&spa->spa_sm_logs_by_txg, spa_log_sm_sort_by_txg, in spa_add()
773 list_create(&spa->spa_log_summary, sizeof (log_summary_entry_t), in spa_add()
779 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), in spa_add()
783 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); in spa_add()
784 list_insert_head(&spa->spa_config_list, dp); in spa_add()
786 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, in spa_add()
789 if (config != NULL) { in spa_add()
792 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, in spa_add()
794 VERIFY(nvlist_dup(features, &spa->spa_label_features, in spa_add()
798 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); in spa_add()
801 if (spa->spa_label_features == NULL) { in spa_add()
802 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, in spa_add()
806 spa->spa_min_ashift = INT_MAX; in spa_add()
807 spa->spa_max_ashift = 0; in spa_add()
808 spa->spa_min_alloc = INT_MAX; in spa_add()
809 spa->spa_gcd_alloc = INT_MAX; in spa_add()
812 spa->spa_dedup_dspace = ~0ULL; in spa_add()
820 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; in spa_add()
823 list_create(&spa->spa_leaf_list, sizeof (vdev_t), in spa_add()
841 ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0); in spa_remove()
842 ASSERT0(spa->spa_waiters); in spa_remove()
844 nvlist_free(spa->spa_config_splitting); in spa_remove()
848 if (spa->spa_root) in spa_remove()
849 spa_strfree(spa->spa_root); in spa_remove()
851 while ((dp = list_remove_head(&spa->spa_config_list)) != NULL) { in spa_remove()
852 if (dp->scd_path != NULL) in spa_remove()
853 spa_strfree(dp->scd_path); in spa_remove()
857 if (spa->spa_alloc_count > 1) { in spa_remove()
858 mutex_destroy(&spa->spa_allocs_use->sau_lock); in spa_remove()
859 kmem_free(spa->spa_allocs_use, offsetof(spa_allocs_use_t, in spa_remove()
860 sau_inuse[spa->spa_alloc_count])); in spa_remove()
863 avl_destroy(&spa->spa_metaslabs_by_flushed); in spa_remove()
864 avl_destroy(&spa->spa_sm_logs_by_txg); in spa_remove()
865 list_destroy(&spa->spa_log_summary); in spa_remove()
866 list_destroy(&spa->spa_config_list); in spa_remove()
867 list_destroy(&spa->spa_leaf_list); in spa_remove()
869 nvlist_free(spa->spa_label_features); in spa_remove()
870 nvlist_free(spa->spa_load_info); in spa_remove()
871 nvlist_free(spa->spa_feat_stats); in spa_remove()
874 zfs_refcount_destroy(&spa->spa_refcount); in spa_remove()
880 bplist_destroy(&spa->spa_free_bplist[t]); in spa_remove()
884 cv_destroy(&spa->spa_async_cv); in spa_remove()
885 cv_destroy(&spa->spa_evicting_os_cv); in spa_remove()
886 cv_destroy(&spa->spa_proc_cv); in spa_remove()
887 cv_destroy(&spa->spa_scrub_io_cv); in spa_remove()
888 cv_destroy(&spa->spa_suspend_cv); in spa_remove()
889 cv_destroy(&spa->spa_activities_cv); in spa_remove()
890 cv_destroy(&spa->spa_waiters_cv); in spa_remove()
892 mutex_destroy(&spa->spa_flushed_ms_lock); in spa_remove()
893 mutex_destroy(&spa->spa_async_lock); in spa_remove()
894 mutex_destroy(&spa->spa_errlist_lock); in spa_remove()
895 mutex_destroy(&spa->spa_errlog_lock); in spa_remove()
896 mutex_destroy(&spa->spa_evicting_os_lock); in spa_remove()
897 mutex_destroy(&spa->spa_history_lock); in spa_remove()
898 mutex_destroy(&spa->spa_proc_lock); in spa_remove()
899 mutex_destroy(&spa->spa_props_lock); in spa_remove()
900 mutex_destroy(&spa->spa_cksum_tmpls_lock); in spa_remove()
901 mutex_destroy(&spa->spa_scrub_lock); in spa_remove()
902 mutex_destroy(&spa->spa_suspend_lock); in spa_remove()
903 mutex_destroy(&spa->spa_vdev_top_lock); in spa_remove()
904 mutex_destroy(&spa->spa_feat_stats_lock); in spa_remove()
905 mutex_destroy(&spa->spa_activities_lock); in spa_remove()
938 ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref || in spa_open_ref()
940 spa->spa_load_thread == curthread); in spa_open_ref()
941 (void) zfs_refcount_add(&spa->spa_refcount, tag); in spa_open_ref()
951 ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref || in spa_close()
953 spa->spa_load_thread == curthread || in spa_close()
954 spa->spa_export_thread == curthread); in spa_close()
955 (void) zfs_refcount_remove(&spa->spa_refcount, tag); in spa_close()
969 (void) zfs_refcount_remove(&spa->spa_refcount, tag); in spa_async_close()
982 spa->spa_export_thread == curthread); in spa_refcount_zero()
984 return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref); in spa_refcount_zero()
1011 return (TREE_CMP(sa->aux_guid, sb->aux_guid)); in spa_aux_compare()
1021 search.aux_guid = vd->vdev_guid; in spa_aux_add()
1023 aux->aux_count++; in spa_aux_add()
1026 aux->aux_guid = vd->vdev_guid; in spa_aux_add()
1027 aux->aux_count = 1; in spa_aux_add()
1039 search.aux_guid = vd->vdev_guid; in spa_aux_remove()
1044 if (--aux->aux_count == 0) { in spa_aux_remove()
1047 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { in spa_aux_remove()
1048 aux->aux_pool = 0ULL; in spa_aux_remove()
1062 *pool = found->aux_pool; in spa_aux_exists()
1069 *refcnt = found->aux_count; in spa_aux_exists()
1083 search.aux_guid = vd->vdev_guid; in spa_aux_activate()
1086 ASSERT(found->aux_pool == 0ULL); in spa_aux_activate()
1088 found->aux_pool = spa_guid(vd->vdev_spa); in spa_aux_activate()
1094 * - A spare may be part of multiple pools.
1095 * - A spare may be added to a pool even if it's actively in use within
1097 * - A spare in use in any pool can only be the source of a replacement if
1123 ASSERT(!vd->vdev_isspare); in spa_spare_add()
1125 vd->vdev_isspare = B_TRUE; in spa_spare_add()
1133 ASSERT(vd->vdev_isspare); in spa_spare_remove()
1135 vd->vdev_isspare = B_FALSE; in spa_spare_remove()
1155 ASSERT(vd->vdev_isspare); in spa_spare_activate()
1176 ASSERT(!vd->vdev_isl2cache); in spa_l2cache_add()
1178 vd->vdev_isl2cache = B_TRUE; in spa_l2cache_add()
1186 ASSERT(vd->vdev_isl2cache); in spa_l2cache_remove()
1188 vd->vdev_isl2cache = B_FALSE; in spa_l2cache_remove()
1208 ASSERT(vd->vdev_isl2cache); in spa_l2cache_activate()
1221 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
1227 mutex_enter(&spa->spa_vdev_top_lock); in spa_vdev_enter()
1230 ASSERT0(spa->spa_export_thread); in spa_vdev_enter()
1246 mutex_enter(&spa->spa_vdev_top_lock); in spa_vdev_detach_enter()
1249 ASSERT0(spa->spa_export_thread); in spa_vdev_detach_enter()
1256 vdev_rebuild_stop_wait(vd->vdev_top); in spa_vdev_detach_enter()
1292 spa->spa_pending_vdev = NULL; in spa_vdev_config_exit()
1297 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE, B_FALSE); in spa_vdev_config_exit()
1299 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { in spa_vdev_config_exit()
1301 spa->spa_config_generation++; in spa_vdev_config_exit()
1325 * that there won't be more than one config change per txg. in spa_vdev_config_exit()
1329 txg_wait_synced(spa->spa_dsl_pool, txg); in spa_vdev_config_exit()
1332 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); in spa_vdev_config_exit()
1333 if (vd->vdev_ops->vdev_op_leaf) { in spa_vdev_config_exit()
1334 mutex_enter(&vd->vdev_initialize_lock); in spa_vdev_config_exit()
1337 mutex_exit(&vd->vdev_initialize_lock); in spa_vdev_config_exit()
1339 mutex_enter(&vd->vdev_trim_lock); in spa_vdev_config_exit()
1341 mutex_exit(&vd->vdev_trim_lock); in spa_vdev_config_exit()
1345 * The vdev may be both a leaf and top-level device. in spa_vdev_config_exit()
1355 * If the config changed, update the config cache. in spa_vdev_config_exit()
1375 mutex_exit(&spa->spa_vdev_top_lock); in spa_vdev_exit()
1398 int low = locks & ~(SCL_ZIO - 1); in spa_vdev_state_enter()
1402 vdev_hold(spa->spa_root_vdev); in spa_vdev_state_enter()
1407 spa->spa_vdev_locks = locks; in spa_vdev_state_enter()
1416 if (vd == NULL || vd == spa->spa_root_vdev) { in spa_vdev_state_exit()
1417 vdev_top = spa->spa_root_vdev; in spa_vdev_state_exit()
1419 vdev_top = vd->vdev_top; in spa_vdev_state_exit()
1426 if (vd != spa->spa_root_vdev) in spa_vdev_state_exit()
1430 spa->spa_config_generation++; in spa_vdev_state_exit()
1434 vdev_rele(spa->spa_root_vdev); in spa_vdev_state_exit()
1436 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); in spa_vdev_state_exit()
1437 spa_config_exit(spa, spa->spa_vdev_locks, spa); in spa_vdev_state_exit()
1446 txg_wait_synced(spa->spa_dsl_pool, 0); in spa_vdev_state_exit()
1449 * If the config changed, update the config cache. in spa_vdev_state_exit()
1469 if (!nvlist_exists(spa->spa_label_features, feature)) { in spa_activate_mos_feature()
1470 fnvlist_add_boolean(spa->spa_label_features, feature); in spa_activate_mos_feature()
1473 * dirty the vdev config because lock SCL_CONFIG is not held. in spa_activate_mos_feature()
1474 * Thankfully, in this case we don't need to dirty the config in spa_activate_mos_feature()
1478 if (tx->tx_txg != TXG_INITIAL) in spa_activate_mos_feature()
1479 vdev_config_dirty(spa->spa_root_vdev); in spa_activate_mos_feature()
1486 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) in spa_deactivate_mos_feature()
1487 vdev_config_dirty(spa->spa_root_vdev); in spa_deactivate_mos_feature()
1492 * device_guid is non-zero, determine whether the pool exists *and* contains
1504 if (spa->spa_state == POOL_STATE_UNINITIALIZED) in spa_by_guid()
1506 if (spa->spa_root_vdev == NULL) in spa_by_guid()
1512 if (vdev_lookup_by_guid(spa->spa_root_vdev, in spa_by_guid()
1519 if (spa->spa_pending_vdev) { in spa_by_guid()
1520 if (vdev_lookup_by_guid(spa->spa_pending_vdev, in spa_by_guid()
1642 if (spa->spa_freeze_txg == UINT64_MAX) { in spa_freeze()
1644 spa->spa_freeze_txg = freeze_txg; in spa_freeze()
1662 * This is a stripped-down version of strtoull, suitable only for converting
1674 digit = c - '0'; in zfs_strtonum()
1676 digit = 10 + c - 'a'; in zfs_strtonum()
1711 return (spa->spa_async_suspended); in spa_shutting_down()
1717 return (spa->spa_dsl_pool); in spa_get_dsl()
1723 return (spa->spa_is_initializing); in spa_is_initializing()
1729 return (spa->spa_indirect_vdevs_loaded); in spa_indirect_vdevs_loaded()
1735 return (&spa->spa_ubsync.ub_rootbp); in spa_get_rootblkptr()
1741 spa->spa_uberblock.ub_rootbp = *bp; in spa_set_rootblkptr()
1747 if (spa->spa_root == NULL) in spa_altroot()
1750 (void) strlcpy(buf, spa->spa_root, buflen); in spa_altroot()
1756 return (spa->spa_sync_pass); in spa_sync_pass()
1762 return (spa->spa_name); in spa_name()
1772 * If we fail to parse the config during spa_load(), we can go through in spa_guid()
1777 if (spa->spa_root_vdev == NULL) in spa_guid()
1778 return (spa->spa_config_guid); in spa_guid()
1780 guid = spa->spa_last_synced_guid != 0 ? in spa_guid()
1781 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; in spa_guid()
1788 return (spa->spa_root_vdev->vdev_guid); in spa_guid()
1801 return (spa->spa_load_guid); in spa_load_guid()
1807 return (spa->spa_ubsync.ub_txg); in spa_last_synced_txg()
1813 return (spa->spa_first_txg); in spa_first_txg()
1819 return (spa->spa_syncing_txg); in spa_syncing_txg()
1829 return (spa->spa_final_txg - TXG_DEFER_SIZE); in spa_final_dirty_txg()
1835 return (spa->spa_state); in spa_state()
1841 return (spa->spa_load_state); in spa_load_state()
1847 return (spa->spa_freeze_txg); in spa_freeze_txg()
1852 * DMU to calculate the space a logical write will require on disk.
1862 return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation); in spa_get_worst_case_asize()
1866 * Return the amount of slop space in bytes. It is typically 1/32 of the pool
1867 * (3.2%), minus the embedded log space. On very small pools, it may be
1869 * the value of spa_max_slop. The embedded log space is not included in
1870 * spa_dspace. By subtracting it, the usable space (per "zfs list") is a
1871 * constant 97% of the total space, regardless of metaslab size (assuming the
1872 * default spa_slop_shift=5 and a non-tiny pool).
1879 uint64_t space = 0; in spa_get_slop_space() local
1885 if (spa->spa_dedup_dspace == ~0ULL) in spa_get_slop_space()
1888 space = spa->spa_rdspace; in spa_get_slop_space()
1889 slop = MIN(space >> spa_slop_shift, spa_max_slop); in spa_get_slop_space()
1892 * Subtract the embedded log space, but no more than half the (3.2%) in spa_get_slop_space()
1893 * unusable space. Note, the "no more than half" is only relevant if in spa_get_slop_space()
1899 slop -= MIN(embedded_log, slop >> 1); in spa_get_slop_space()
1902 * Slop space should be at least spa_min_slop, but no more than half in spa_get_slop_space()
1905 slop = MAX(slop, MIN(space >> 1, spa_min_slop)); in spa_get_slop_space()
1912 return (spa->spa_dspace); in spa_get_dspace()
1918 return (spa->spa_checkpoint_info.sci_dspace); in spa_get_checkpoint_space()
1924 spa->spa_rdspace = metaslab_class_get_dspace(spa_normal_class(spa)); in spa_update_dspace()
1925 if (spa->spa_nonallocating_dspace > 0) { in spa_update_dspace()
1927 * Subtract the space provided by all non-allocating vdevs that in spa_update_dspace()
1930 * no snapshots of the file, the available space should remain in spa_update_dspace()
1932 * non-allocating vdev, but the new blocks must be allocated on in spa_update_dspace()
1934 * the non-allocating vdevs (including allocated space), we in spa_update_dspace()
1935 * ensure that there will be enough space on the allocating in spa_update_dspace()
1939 * how much space is allocated (it does its own tracking in spa_update_dspace()
1940 * of how much space has been logically used). So it in spa_update_dspace()
1944 ASSERT3U(spa->spa_rdspace, >=, spa->spa_nonallocating_dspace); in spa_update_dspace()
1945 spa->spa_rdspace -= spa->spa_nonallocating_dspace; in spa_update_dspace()
1947 spa->spa_dspace = spa->spa_rdspace + ddt_get_dedup_dspace(spa) + in spa_update_dspace()
1958 return (spa->spa_failmode); in spa_get_failmode()
1964 return (spa->spa_suspended != ZIO_SUSPEND_NONE); in spa_suspended()
1970 return (spa->spa_ubsync.ub_version); in spa_version()
1976 return (spa->spa_deflate); in spa_deflate()
1982 return (spa->spa_normal_class); in spa_normal_class()
1988 return (spa->spa_log_class); in spa_log_class()
1994 return (spa->spa_embedded_log_class); in spa_embedded_log_class()
2000 return (spa->spa_special_class); in spa_special_class()
2006 return (spa->spa_dedup_class); in spa_dedup_class()
2021 metaslab_class_t *mc = zio->io_metaslab_class; in spa_preferred_class()
2024 const zio_prop_t *zp = &zio->io_prop; in spa_preferred_class()
2033 zp->zp_storage_type == DMU_OT_NONE ? in spa_preferred_class()
2034 zp->zp_type : zp->zp_storage_type; in spa_preferred_class()
2051 if (zp->zp_level > 0 && in spa_preferred_class()
2060 if (DMU_OT_IS_METADATA(objtype) || zp->zp_level > 0) { in spa_preferred_class()
2074 zio->io_size <= zp->zp_zpl_smallblk) { in spa_preferred_class()
2077 uint64_t space = metaslab_class_get_space(special); in spa_preferred_class() local
2079 (space * (100 - zfs_special_class_metadata_reserve_pct)) in spa_preferred_class()
2092 mutex_enter(&spa->spa_evicting_os_lock); in spa_evicting_os_register()
2093 list_insert_head(&spa->spa_evicting_os_list, os); in spa_evicting_os_register()
2094 mutex_exit(&spa->spa_evicting_os_lock); in spa_evicting_os_register()
2100 mutex_enter(&spa->spa_evicting_os_lock); in spa_evicting_os_deregister()
2101 list_remove(&spa->spa_evicting_os_list, os); in spa_evicting_os_deregister()
2102 cv_broadcast(&spa->spa_evicting_os_cv); in spa_evicting_os_deregister()
2103 mutex_exit(&spa->spa_evicting_os_lock); in spa_evicting_os_deregister()
2109 mutex_enter(&spa->spa_evicting_os_lock); in spa_evicting_os_wait()
2110 while (!list_is_empty(&spa->spa_evicting_os_list)) in spa_evicting_os_wait()
2111 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); in spa_evicting_os_wait()
2112 mutex_exit(&spa->spa_evicting_os_lock); in spa_evicting_os_wait()
2133 return (spa->spa_prev_software_version); in spa_prev_software_version()
2139 return (spa->spa_deadman_synctime); in spa_deadman_synctime()
2145 return (spa->spa_autotrim); in spa_get_autotrim()
2151 return (spa->spa_deadman_ziotime); in spa_deadman_ziotime()
2157 return (spa->spa_deadman_failmode); in spa_get_deadman_failmode()
2164 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; in spa_set_deadman_failmode()
2166 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE; in spa_set_deadman_failmode()
2168 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC; in spa_set_deadman_failmode()
2170 spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT; in spa_set_deadman_failmode()
2181 spa->spa_deadman_ziotime = ns; in spa_set_deadman_ziotime()
2194 spa->spa_deadman_synctime = ns; in spa_set_deadman_synctime()
2207 if (asize != 0 && spa->spa_deflate) { in dva_get_dsize_sync()
2211 vd->vdev_deflate_ratio; in dva_get_dsize_sync()
2223 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); in bp_get_dsize_sync()
2236 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); in bp_get_dsize()
2246 return (spa->spa_dsl_pool->dp_dirty_total); in spa_dirty_data()
2270 seq_printf(f, "%-20s %-14s %-14s %-12s %-16s %s\n", "pool_guid", in spa_import_progress_show_header()
2281 seq_printf(f, "%-20llu %-14llu %-14llu %-12llu %-16s %s\n", in spa_import_progress_show()
2282 (u_longlong_t)sip->pool_guid, (u_longlong_t)sip->spa_load_state, in spa_import_progress_show()
2283 (u_longlong_t)sip->mmp_sec_remaining, in spa_import_progress_show()
2284 (u_longlong_t)sip->spa_load_max_txg, in spa_import_progress_show()
2285 (sip->pool_name ? sip->pool_name : "-"), in spa_import_progress_show()
2286 (sip->spa_load_notes ? sip->spa_load_notes : "-")); in spa_import_progress_show()
2296 while (shl->size > size) { in spa_import_progress_truncate()
2297 sip = list_remove_head(&shl->procfs_list.pl_list); in spa_import_progress_truncate()
2298 if (sip->pool_name) in spa_import_progress_truncate()
2299 spa_strfree(sip->pool_name); in spa_import_progress_truncate()
2300 if (sip->spa_load_notes) in spa_import_progress_truncate()
2301 kmem_strfree(sip->spa_load_notes); in spa_import_progress_truncate()
2303 shl->size--; in spa_import_progress_truncate()
2306 IMPLY(size == 0, list_is_empty(&shl->procfs_list.pl_list)); in spa_import_progress_truncate()
2315 spa_import_progress_list->size = 0; in spa_import_progress_init()
2317 spa_import_progress_list->procfs_list.pl_private = in spa_import_progress_init()
2324 &spa_import_progress_list->procfs_list, in spa_import_progress_init()
2335 procfs_list_uninstall(&shl->procfs_list); in spa_import_progress_destroy()
2337 procfs_list_destroy(&shl->procfs_list); in spa_import_progress_destroy()
2349 if (shl->size == 0) in spa_import_progress_set_state()
2352 mutex_enter(&shl->procfs_list.pl_lock); in spa_import_progress_set_state()
2353 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; in spa_import_progress_set_state()
2354 sip = list_prev(&shl->procfs_list.pl_list, sip)) { in spa_import_progress_set_state()
2355 if (sip->pool_guid == pool_guid) { in spa_import_progress_set_state()
2356 sip->spa_load_state = load_state; in spa_import_progress_set_state()
2357 if (sip->spa_load_notes != NULL) { in spa_import_progress_set_state()
2358 kmem_strfree(sip->spa_load_notes); in spa_import_progress_set_state()
2359 sip->spa_load_notes = NULL; in spa_import_progress_set_state()
2365 mutex_exit(&shl->procfs_list.pl_lock); in spa_import_progress_set_state()
2378 if (shl->size == 0) in spa_import_progress_set_notes_impl()
2383 mutex_enter(&shl->procfs_list.pl_lock); in spa_import_progress_set_notes_impl()
2384 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; in spa_import_progress_set_notes_impl()
2385 sip = list_prev(&shl->procfs_list.pl_list, sip)) { in spa_import_progress_set_notes_impl()
2386 if (sip->pool_guid == pool_guid) { in spa_import_progress_set_notes_impl()
2387 if (sip->spa_load_notes != NULL) { in spa_import_progress_set_notes_impl()
2388 kmem_strfree(sip->spa_load_notes); in spa_import_progress_set_notes_impl()
2389 sip->spa_load_notes = NULL; in spa_import_progress_set_notes_impl()
2391 sip->spa_load_notes = notes; in spa_import_progress_set_notes_impl()
2393 zfs_dbgmsg("'%s' %s", sip->pool_name, notes); in spa_import_progress_set_notes_impl()
2398 mutex_exit(&shl->procfs_list.pl_lock); in spa_import_progress_set_notes_impl()
2430 if (shl->size == 0) in spa_import_progress_set_max_txg()
2433 mutex_enter(&shl->procfs_list.pl_lock); in spa_import_progress_set_max_txg()
2434 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; in spa_import_progress_set_max_txg()
2435 sip = list_prev(&shl->procfs_list.pl_list, sip)) { in spa_import_progress_set_max_txg()
2436 if (sip->pool_guid == pool_guid) { in spa_import_progress_set_max_txg()
2437 sip->spa_load_max_txg = load_max_txg; in spa_import_progress_set_max_txg()
2442 mutex_exit(&shl->procfs_list.pl_lock); in spa_import_progress_set_max_txg()
2455 if (shl->size == 0) in spa_import_progress_set_mmp_check()
2458 mutex_enter(&shl->procfs_list.pl_lock); in spa_import_progress_set_mmp_check()
2459 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; in spa_import_progress_set_mmp_check()
2460 sip = list_prev(&shl->procfs_list.pl_list, sip)) { in spa_import_progress_set_mmp_check()
2461 if (sip->pool_guid == pool_guid) { in spa_import_progress_set_mmp_check()
2462 sip->mmp_sec_remaining = mmp_sec_remaining; in spa_import_progress_set_mmp_check()
2467 mutex_exit(&shl->procfs_list.pl_lock); in spa_import_progress_set_mmp_check()
2483 sip->pool_guid = spa_guid(spa); in spa_import_progress_add()
2485 (void) nvlist_lookup_string(spa->spa_config, ZPOOL_CONFIG_POOL_NAME, in spa_import_progress_add()
2489 sip->pool_name = spa_strdup(poolname); in spa_import_progress_add()
2490 sip->spa_load_state = spa_load_state(spa); in spa_import_progress_add()
2491 sip->spa_load_notes = NULL; in spa_import_progress_add()
2493 mutex_enter(&shl->procfs_list.pl_lock); in spa_import_progress_add()
2494 procfs_list_add(&shl->procfs_list, sip); in spa_import_progress_add()
2495 shl->size++; in spa_import_progress_add()
2496 mutex_exit(&shl->procfs_list.pl_lock); in spa_import_progress_add()
2505 mutex_enter(&shl->procfs_list.pl_lock); in spa_import_progress_remove()
2506 for (sip = list_tail(&shl->procfs_list.pl_list); sip != NULL; in spa_import_progress_remove()
2507 sip = list_prev(&shl->procfs_list.pl_list, sip)) { in spa_import_progress_remove()
2508 if (sip->pool_guid == pool_guid) { in spa_import_progress_remove()
2509 if (sip->pool_name) in spa_import_progress_remove()
2510 spa_strfree(sip->pool_name); in spa_import_progress_remove()
2511 if (sip->spa_load_notes) in spa_import_progress_remove()
2512 spa_strfree(sip->spa_load_notes); in spa_import_progress_remove()
2513 list_remove(&shl->procfs_list.pl_list, sip); in spa_import_progress_remove()
2514 shl->size--; in spa_import_progress_remove()
2519 mutex_exit(&shl->procfs_list.pl_lock); in spa_import_progress_remove()
2535 s = strcmp(s1->spa_name, s2->spa_name); in spa_name_compare()
2574 if (sigaction(SIGSEGV, &sa, NULL) == -1) { in spa_init()
2648 return (spa->spa_dedup_class->mc_groups != 0); in spa_has_dedup()
2659 return (spa->spa_log_class->mc_groups != 0); in spa_has_slogs()
2665 return (spa->spa_special_class->mc_groups != 0); in spa_has_special()
2671 return (spa->spa_log_state); in spa_get_log_state()
2677 spa->spa_log_state = state; in spa_set_log_state()
2683 return (spa->spa_is_root); in spa_is_root()
2689 return (!!(spa->spa_mode & SPA_MODE_WRITE) && spa->spa_trust_config); in spa_writeable()
2699 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) || in spa_has_pending_synctask()
2700 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks)); in spa_has_pending_synctask()
2706 return (spa->spa_mode); in spa_mode()
2712 return (spa->spa_scrubbed_last_txg); in spa_get_last_scrubbed_txg()
2718 return (spa->spa_bootfs); in spa_bootfs()
2724 return (spa->spa_delegation); in spa_delegation()
2730 return (spa->spa_meta_objset); in spa_meta_objset()
2736 return (spa->spa_dedup_checksum); in spa_dedup_checksum()
2746 spa->spa_scan_pass_start = gethrestime_sec(); in spa_scan_stat_init()
2747 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) in spa_scan_stat_init()
2748 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; in spa_scan_stat_init()
2750 spa->spa_scan_pass_scrub_pause = 0; in spa_scan_stat_init()
2752 if (dsl_errorscrub_is_paused(spa->spa_dsl_pool->dp_scan)) in spa_scan_stat_init()
2753 spa->spa_scan_pass_errorscrub_pause = spa->spa_scan_pass_start; in spa_scan_stat_init()
2755 spa->spa_scan_pass_errorscrub_pause = 0; in spa_scan_stat_init()
2757 spa->spa_scan_pass_scrub_spent_paused = 0; in spa_scan_stat_init()
2758 spa->spa_scan_pass_exam = 0; in spa_scan_stat_init()
2759 spa->spa_scan_pass_issued = 0; in spa_scan_stat_init()
2762 spa->spa_scan_pass_errorscrub_spent_paused = 0; in spa_scan_stat_init()
2771 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; in spa_scan_get_stats()
2773 if (scn == NULL || (scn->scn_phys.scn_func == POOL_SCAN_NONE && in spa_scan_get_stats()
2774 scn->errorscrub_phys.dep_func == POOL_SCAN_NONE)) in spa_scan_get_stats()
2780 ps->pss_func = scn->scn_phys.scn_func; in spa_scan_get_stats()
2781 ps->pss_state = scn->scn_phys.scn_state; in spa_scan_get_stats()
2782 ps->pss_start_time = scn->scn_phys.scn_start_time; in spa_scan_get_stats()
2783 ps->pss_end_time = scn->scn_phys.scn_end_time; in spa_scan_get_stats()
2784 ps->pss_to_examine = scn->scn_phys.scn_to_examine; in spa_scan_get_stats()
2785 ps->pss_examined = scn->scn_phys.scn_examined; in spa_scan_get_stats()
2786 ps->pss_skipped = scn->scn_phys.scn_skipped; in spa_scan_get_stats()
2787 ps->pss_processed = scn->scn_phys.scn_processed; in spa_scan_get_stats()
2788 ps->pss_errors = scn->scn_phys.scn_errors; in spa_scan_get_stats()
2791 ps->pss_pass_exam = spa->spa_scan_pass_exam; in spa_scan_get_stats()
2792 ps->pss_pass_start = spa->spa_scan_pass_start; in spa_scan_get_stats()
2793 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; in spa_scan_get_stats()
2794 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; in spa_scan_get_stats()
2795 ps->pss_pass_issued = spa->spa_scan_pass_issued; in spa_scan_get_stats()
2796 ps->pss_issued = in spa_scan_get_stats()
2797 scn->scn_issued_before_pass + spa->spa_scan_pass_issued; in spa_scan_get_stats()
2800 ps->pss_error_scrub_func = scn->errorscrub_phys.dep_func; in spa_scan_get_stats()
2801 ps->pss_error_scrub_state = scn->errorscrub_phys.dep_state; in spa_scan_get_stats()
2802 ps->pss_error_scrub_start = scn->errorscrub_phys.dep_start_time; in spa_scan_get_stats()
2803 ps->pss_error_scrub_end = scn->errorscrub_phys.dep_end_time; in spa_scan_get_stats()
2804 ps->pss_error_scrub_examined = scn->errorscrub_phys.dep_examined; in spa_scan_get_stats()
2805 ps->pss_error_scrub_to_be_examined = in spa_scan_get_stats()
2806 scn->errorscrub_phys.dep_to_examine; in spa_scan_get_stats()
2809 ps->pss_pass_error_scrub_pause = spa->spa_scan_pass_errorscrub_pause; in spa_scan_get_stats()
2832 uint64_t ret = -1ULL; in spa_get_last_removal_txg()
2837 * config locks, so it is sufficient to hold SCL_VDEV as reader when in spa_get_last_removal_txg()
2840 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; in spa_get_last_removal_txg()
2842 while (vdevid != -1ULL) { in spa_get_last_removal_txg()
2844 vdev_indirect_births_t *vib = vd->vdev_indirect_births; in spa_get_last_removal_txg()
2846 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); in spa_get_last_removal_txg()
2856 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; in spa_get_last_removal_txg()
2860 IMPLY(ret != -1ULL, in spa_get_last_removal_txg()
2878 return (spa->spa_multihost ? B_TRUE : B_FALSE); in spa_multihost()
2884 return (spa->spa_hostid); in spa_get_hostid()
2890 return (spa->spa_trust_config); in spa_trust_config()
2896 return (spa->spa_missing_tvds_allowed); in spa_missing_tvds_allowed()
2902 return (spa->spa_syncing_log_sm); in spa_syncing_log_sm()
2908 spa->spa_missing_tvds = missing; in spa_set_missing_tvds()
2923 vdev_t *rvd = spa->spa_root_vdev; in spa_state_to_name()
2927 vdev_state_t state = rvd->vdev_state; in spa_state_to_name()
2928 vdev_aux_t aux = rvd->vdev_stat.vs_aux; in spa_state_to_name()
2962 vdev_t *rvd = spa->spa_root_vdev; in spa_top_vdevs_spacemap_addressable()
2963 for (uint64_t c = 0; c < rvd->vdev_children; c++) { in spa_top_vdevs_spacemap_addressable()
2964 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c])) in spa_top_vdevs_spacemap_addressable()
2973 return (spa->spa_checkpoint_txg != 0); in spa_has_checkpoint()
2979 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) && in spa_importing_readonly_checkpoint()
2980 spa->spa_mode == SPA_MODE_READ); in spa_importing_readonly_checkpoint()
2986 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg; in spa_min_claim_txg()
2991 return (spa->spa_first_txg); in spa_min_claim_txg()
2995 * If there is a checkpoint, async destroys may consume more space from
2997 * getting suspended when it is about to run out of space, we stop
3007 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes; in spa_suspend_async_destroy()
3008 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0; in spa_suspend_async_destroy()
3139 "Set to ignore IO errors during free and permanently leak the space");
3170 "free space available");
3173 param_get_uint, ZMOD_RW, "Reserved free space in pool");