1 // SPDX-License-Identifier: CDDL-1.0 2 /* 3 * CDDL HEADER START 4 * 5 * The contents of this file are subject to the terms of the 6 * Common Development and Distribution License (the "License"). 7 * You may not use this file except in compliance with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or https://opensource.org/licenses/CDDL-1.0. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2024 by Delphix. All rights reserved. 25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2013 Saso Kiselkov. All rights reserved. 28 * Copyright (c) 2016 Actifio, Inc. All rights reserved. 29 * Copyright (c) 2017, Intel Corporation. 30 * Copyright (c) 2019 Datto Inc. 31 */ 32 33 #ifndef _SYS_SPA_IMPL_H 34 #define _SYS_SPA_IMPL_H 35 36 #include <sys/spa.h> 37 #include <sys/spa_checkpoint.h> 38 #include <sys/spa_log_spacemap.h> 39 #include <sys/vdev.h> 40 #include <sys/vdev_rebuild.h> 41 #include <sys/vdev_removal.h> 42 #include <sys/vdev_raidz.h> 43 #include <sys/metaslab.h> 44 #include <sys/dmu.h> 45 #include <sys/dsl_pool.h> 46 #include <sys/uberblock_impl.h> 47 #include <sys/zfs_context.h> 48 #include <sys/avl.h> 49 #include <sys/zfs_refcount.h> 50 #include <sys/bplist.h> 51 #include <sys/bpobj.h> 52 #include <sys/dsl_crypt.h> 53 #include <sys/zfeature.h> 54 #include <sys/zthr.h> 55 #include <sys/dsl_deadlist.h> 56 #include <zfeature_common.h> 57 58 #ifdef __cplusplus 59 extern "C" { 60 #endif 61 62 typedef struct spa_allocs_use { 63 kmutex_t sau_lock; 64 uint_t sau_rotor; 65 boolean_t sau_inuse[]; 66 } spa_allocs_use_t; 67 68 typedef struct spa_error_entry { 69 zbookmark_phys_t se_bookmark; 70 char *se_name; 71 avl_node_t se_avl; 72 zbookmark_err_phys_t se_zep; /* not accounted in avl_find */ 73 } spa_error_entry_t; 74 75 typedef struct spa_history_phys { 76 uint64_t sh_pool_create_len; /* ending offset of zpool create */ 77 uint64_t sh_phys_max_off; /* physical EOF */ 78 uint64_t sh_bof; /* logical BOF */ 79 uint64_t sh_eof; /* logical EOF */ 80 uint64_t sh_records_lost; /* num of records overwritten */ 81 } spa_history_phys_t; 82 83 /* 84 * All members must be uint64_t, for byteswap purposes. 85 */ 86 typedef struct spa_removing_phys { 87 uint64_t sr_state; /* dsl_scan_state_t */ 88 89 /* 90 * The vdev ID that we most recently attempted to remove, 91 * or -1 if no removal has been attempted. 92 */ 93 uint64_t sr_removing_vdev; 94 95 /* 96 * The vdev ID that we most recently successfully removed, 97 * or -1 if no devices have been removed. 98 */ 99 uint64_t sr_prev_indirect_vdev; 100 101 uint64_t sr_start_time; 102 uint64_t sr_end_time; 103 104 /* 105 * Note that we can not use the space map's or indirect mapping's 106 * accounting as a substitute for these values, because we need to 107 * count frees of not-yet-copied data as though it did the copy. 108 * Otherwise, we could get into a situation where copied > to_copy, 109 * or we complete before copied == to_copy. 110 */ 111 uint64_t sr_to_copy; /* bytes that need to be copied */ 112 uint64_t sr_copied; /* bytes that have been copied or freed */ 113 } spa_removing_phys_t; 114 115 /* 116 * This struct is stored as an entry in the DMU_POOL_DIRECTORY_OBJECT 117 * (with key DMU_POOL_CONDENSING_INDIRECT). It is present if a condense 118 * of an indirect vdev's mapping object is in progress. 119 */ 120 typedef struct spa_condensing_indirect_phys { 121 /* 122 * The vdev ID of the indirect vdev whose indirect mapping is 123 * being condensed. 124 */ 125 uint64_t scip_vdev; 126 127 /* 128 * The vdev's old obsolete spacemap. This spacemap's contents are 129 * being integrated into the new mapping. 130 */ 131 uint64_t scip_prev_obsolete_sm_object; 132 133 /* 134 * The new mapping object that is being created. 135 */ 136 uint64_t scip_next_mapping_object; 137 } spa_condensing_indirect_phys_t; 138 139 struct spa_aux_vdev { 140 uint64_t sav_object; /* MOS object for device list */ 141 nvlist_t *sav_config; /* cached device config */ 142 vdev_t **sav_vdevs; /* devices */ 143 int sav_count; /* number devices */ 144 boolean_t sav_sync; /* sync the device list */ 145 boolean_t sav_label_sync; /* sync aux labels */ 146 nvlist_t **sav_pending; /* pending device additions */ 147 uint_t sav_npending; /* # pending devices */ 148 }; 149 150 typedef struct spa_config_lock { 151 kmutex_t scl_lock; 152 kthread_t *scl_writer; 153 int scl_write_wanted; 154 int scl_count; 155 kcondvar_t scl_cv; 156 } ____cacheline_aligned spa_config_lock_t; 157 158 typedef struct spa_config_dirent { 159 list_node_t scd_link; 160 char *scd_path; 161 } spa_config_dirent_t; 162 163 typedef enum zio_taskq_type { 164 ZIO_TASKQ_ISSUE = 0, 165 ZIO_TASKQ_ISSUE_HIGH, 166 ZIO_TASKQ_INTERRUPT, 167 ZIO_TASKQ_INTERRUPT_HIGH, 168 ZIO_TASKQ_TYPES 169 } zio_taskq_type_t; 170 171 /* 172 * State machine for the zpool-poolname process. The states transitions 173 * are done as follows: 174 * 175 * From To Routine 176 * PROC_NONE -> PROC_CREATED spa_activate() 177 * PROC_CREATED -> PROC_ACTIVE spa_thread() 178 * PROC_ACTIVE -> PROC_DEACTIVATE spa_deactivate() 179 * PROC_DEACTIVATE -> PROC_GONE spa_thread() 180 * PROC_GONE -> PROC_NONE spa_deactivate() 181 */ 182 typedef enum spa_proc_state { 183 SPA_PROC_NONE, /* spa_proc = &p0, no process created */ 184 SPA_PROC_CREATED, /* spa_activate() has proc, is waiting */ 185 SPA_PROC_ACTIVE, /* taskqs created, spa_proc set */ 186 SPA_PROC_DEACTIVATE, /* spa_deactivate() requests process exit */ 187 SPA_PROC_GONE /* spa_thread() is exiting, spa_proc = &p0 */ 188 } spa_proc_state_t; 189 190 typedef struct spa_taskqs { 191 uint_t stqs_count; 192 taskq_t **stqs_taskq; 193 } spa_taskqs_t; 194 195 /* one for each thread in the spa sync taskq */ 196 typedef struct spa_syncthread_info { 197 kthread_t *sti_thread; 198 uint_t sti_allocator; 199 } spa_syncthread_info_t; 200 201 typedef enum spa_all_vdev_zap_action { 202 AVZ_ACTION_NONE = 0, 203 AVZ_ACTION_DESTROY, /* Destroy all per-vdev ZAPs and the AVZ. */ 204 AVZ_ACTION_REBUILD, /* Populate the new AVZ, see spa_avz_rebuild */ 205 AVZ_ACTION_INITIALIZE 206 } spa_avz_action_t; 207 208 typedef enum spa_config_source { 209 SPA_CONFIG_SRC_NONE = 0, 210 SPA_CONFIG_SRC_SCAN, /* scan of path (default: /dev/dsk) */ 211 SPA_CONFIG_SRC_CACHEFILE, /* any cachefile */ 212 SPA_CONFIG_SRC_TRYIMPORT, /* returned from call to tryimport */ 213 SPA_CONFIG_SRC_SPLIT, /* new pool in a pool split */ 214 SPA_CONFIG_SRC_MOS /* MOS, but not always from right txg */ 215 } spa_config_source_t; 216 217 struct spa { 218 /* 219 * Fields protected by spa_namespace_lock. 220 */ 221 char spa_name[ZFS_MAX_DATASET_NAME_LEN]; /* pool name */ 222 char *spa_comment; /* comment */ 223 avl_node_t spa_avl; /* node in spa_namespace_avl */ 224 nvlist_t *spa_config; /* last synced config */ 225 nvlist_t *spa_config_syncing; /* currently syncing config */ 226 nvlist_t *spa_config_splitting; /* config for splitting */ 227 nvlist_t *spa_load_info; /* info and errors from load */ 228 uint64_t spa_config_txg; /* txg of last config change */ 229 uint32_t spa_sync_pass; /* iterate-to-convergence */ 230 pool_state_t spa_state; /* pool state */ 231 int spa_inject_ref; /* injection references */ 232 uint8_t spa_sync_on; /* sync threads are running */ 233 spa_load_state_t spa_load_state; /* current load operation */ 234 boolean_t spa_indirect_vdevs_loaded; /* mappings loaded? */ 235 boolean_t spa_trust_config; /* do we trust vdev tree? */ 236 boolean_t spa_is_splitting; /* in the middle of a split? */ 237 spa_config_source_t spa_config_source; /* where config comes from? */ 238 uint64_t spa_import_flags; /* import specific flags */ 239 spa_taskqs_t spa_zio_taskq[ZIO_TYPES][ZIO_TASKQ_TYPES]; 240 dsl_pool_t *spa_dsl_pool; 241 boolean_t spa_is_initializing; /* true while opening pool */ 242 boolean_t spa_is_exporting; /* true while exporting pool */ 243 kthread_t *spa_export_thread; /* valid during pool export */ 244 kthread_t *spa_load_thread; /* loading, no namespace lock */ 245 metaslab_class_t *spa_normal_class; /* normal data class */ 246 metaslab_class_t *spa_log_class; /* intent log data class */ 247 metaslab_class_t *spa_embedded_log_class; /* log on normal vdevs */ 248 metaslab_class_t *spa_special_class; /* special allocation class */ 249 metaslab_class_t *spa_dedup_class; /* dedup allocation class */ 250 uint64_t spa_first_txg; /* first txg after spa_open() */ 251 uint64_t spa_final_txg; /* txg of export/destroy */ 252 uint64_t spa_freeze_txg; /* freeze pool at this txg */ 253 uint64_t spa_load_max_txg; /* best initial ub_txg */ 254 uint64_t spa_claim_max_txg; /* highest claimed birth txg */ 255 inode_timespec_t spa_loaded_ts; /* 1st successful open time */ 256 objset_t *spa_meta_objset; /* copy of dp->dp_meta_objset */ 257 kmutex_t spa_evicting_os_lock; /* Evicting objset list lock */ 258 list_t spa_evicting_os_list; /* Objsets being evicted. */ 259 kcondvar_t spa_evicting_os_cv; /* Objset Eviction Completion */ 260 txg_list_t spa_vdev_txg_list; /* per-txg dirty vdev list */ 261 vdev_t *spa_root_vdev; /* top-level vdev container */ 262 uint64_t spa_min_ashift; /* of vdevs in normal class */ 263 uint64_t spa_max_ashift; /* of vdevs in normal class */ 264 uint64_t spa_min_alloc; /* of vdevs in normal class */ 265 uint64_t spa_gcd_alloc; /* of vdevs in normal class */ 266 uint64_t spa_config_guid; /* config pool guid */ 267 uint64_t spa_load_guid; /* spa_load initialized guid */ 268 uint64_t spa_last_synced_guid; /* last synced guid */ 269 list_t spa_config_dirty_list; /* vdevs with dirty config */ 270 list_t spa_state_dirty_list; /* vdevs with dirty state */ 271 spa_allocs_use_t *spa_allocs_use; 272 int spa_alloc_count; 273 int spa_active_allocator; /* selectable allocator */ 274 275 /* per-allocator sync thread taskqs */ 276 taskq_t *spa_sync_tq; 277 spa_syncthread_info_t *spa_syncthreads; 278 279 spa_aux_vdev_t spa_spares; /* hot spares */ 280 spa_aux_vdev_t spa_l2cache; /* L2ARC cache devices */ 281 boolean_t spa_aux_sync_uber; /* need to sync aux uber */ 282 nvlist_t *spa_label_features; /* Features for reading MOS */ 283 uint64_t spa_config_object; /* MOS object for pool config */ 284 uint64_t spa_config_generation; /* config generation number */ 285 uint64_t spa_syncing_txg; /* txg currently syncing */ 286 bpobj_t spa_deferred_bpobj; /* deferred-free bplist */ 287 bplist_t spa_free_bplist[TXG_SIZE]; /* bplist of stuff to free */ 288 zio_cksum_salt_t spa_cksum_salt; /* secret salt for cksum */ 289 /* checksum context templates */ 290 kmutex_t spa_cksum_tmpls_lock; 291 void *spa_cksum_tmpls[ZIO_CHECKSUM_FUNCTIONS]; 292 uberblock_t spa_ubsync; /* last synced uberblock */ 293 uberblock_t spa_uberblock; /* current uberblock */ 294 boolean_t spa_extreme_rewind; /* rewind past deferred frees */ 295 kmutex_t spa_scrub_lock; /* resilver/scrub lock */ 296 uint64_t spa_scrub_inflight; /* in-flight scrub bytes */ 297 298 /* in-flight verification bytes */ 299 uint64_t spa_load_verify_bytes; 300 kcondvar_t spa_scrub_io_cv; /* scrub I/O completion */ 301 uint8_t spa_scrub_active; /* active or suspended? */ 302 uint8_t spa_scrub_type; /* type of scrub we're doing */ 303 uint8_t spa_scrub_finished; /* indicator to rotate logs */ 304 uint8_t spa_scrub_started; /* started since last boot */ 305 uint8_t spa_scrub_reopen; /* scrub doing vdev_reopen */ 306 uint64_t spa_scan_pass_start; /* start time per pass/reboot */ 307 uint64_t spa_scan_pass_scrub_pause; /* scrub pause time */ 308 uint64_t spa_scan_pass_scrub_spent_paused; /* total paused */ 309 uint64_t spa_scan_pass_exam; /* examined bytes per pass */ 310 uint64_t spa_scan_pass_issued; /* issued bytes per pass */ 311 uint64_t spa_scrubbed_last_txg; /* last txg scrubbed */ 312 313 /* error scrub pause time in milliseconds */ 314 uint64_t spa_scan_pass_errorscrub_pause; 315 /* total error scrub paused time in milliseconds */ 316 uint64_t spa_scan_pass_errorscrub_spent_paused; 317 /* 318 * We are in the middle of a resilver, and another resilver 319 * is needed once this one completes. This is set iff any 320 * vdev_resilver_deferred is set. 321 */ 322 boolean_t spa_resilver_deferred; 323 kmutex_t spa_async_lock; /* protect async state */ 324 kthread_t *spa_async_thread; /* thread doing async task */ 325 int spa_async_suspended; /* async tasks suspended */ 326 kcondvar_t spa_async_cv; /* wait for thread_exit() */ 327 uint16_t spa_async_tasks; /* async task mask */ 328 uint64_t spa_missing_tvds; /* unopenable tvds on load */ 329 uint64_t spa_missing_tvds_allowed; /* allow loading spa? */ 330 331 uint64_t spa_nonallocating_dspace; 332 spa_removing_phys_t spa_removing_phys; 333 spa_vdev_removal_t *spa_vdev_removal; 334 335 spa_condensing_indirect_phys_t spa_condensing_indirect_phys; 336 spa_condensing_indirect_t *spa_condensing_indirect; 337 zthr_t *spa_condense_zthr; /* zthr doing condense. */ 338 339 vdev_raidz_expand_t *spa_raidz_expand; 340 zthr_t *spa_raidz_expand_zthr; 341 342 uint64_t spa_checkpoint_txg; /* the txg of the checkpoint */ 343 spa_checkpoint_info_t spa_checkpoint_info; /* checkpoint accounting */ 344 zthr_t *spa_checkpoint_discard_zthr; 345 346 space_map_t *spa_syncing_log_sm; /* current log space map */ 347 avl_tree_t spa_sm_logs_by_txg; 348 kmutex_t spa_flushed_ms_lock; /* for metaslabs_by_flushed */ 349 avl_tree_t spa_metaslabs_by_flushed; 350 spa_unflushed_stats_t spa_unflushed_stats; 351 list_t spa_log_summary; 352 uint64_t spa_log_flushall_txg; 353 354 zthr_t *spa_livelist_delete_zthr; /* deleting livelists */ 355 zthr_t *spa_livelist_condense_zthr; /* condensing livelists */ 356 uint64_t spa_livelists_to_delete; /* set of livelists to free */ 357 livelist_condense_entry_t spa_to_condense; /* next to condense */ 358 359 char *spa_root; /* alternate root directory */ 360 uint64_t spa_ena; /* spa-wide ereport ENA */ 361 int spa_last_open_failed; /* error if last open failed */ 362 uint64_t spa_last_ubsync_txg; /* "best" uberblock txg */ 363 uint64_t spa_last_ubsync_txg_ts; /* timestamp from that ub */ 364 uint64_t spa_load_txg; /* ub txg that loaded */ 365 uint64_t spa_load_txg_ts; /* timestamp from that ub */ 366 uint64_t spa_load_meta_errors; /* verify metadata err count */ 367 uint64_t spa_load_data_errors; /* verify data err count */ 368 uint64_t spa_verify_min_txg; /* start txg of verify scrub */ 369 kmutex_t spa_errlog_lock; /* error log lock */ 370 uint64_t spa_errlog_last; /* last error log object */ 371 uint64_t spa_errlog_scrub; /* scrub error log object */ 372 kmutex_t spa_errlist_lock; /* error list/ereport lock */ 373 avl_tree_t spa_errlist_last; /* last error list */ 374 avl_tree_t spa_errlist_scrub; /* scrub error list */ 375 avl_tree_t spa_errlist_healed; /* list of healed blocks */ 376 uint64_t spa_deflate; /* should we deflate? */ 377 uint64_t spa_history; /* history object */ 378 kmutex_t spa_history_lock; /* history lock */ 379 vdev_t *spa_pending_vdev; /* pending vdev additions */ 380 kmutex_t spa_props_lock; /* property lock */ 381 uint64_t spa_pool_props_object; /* object for properties */ 382 uint64_t spa_bootfs; /* default boot filesystem */ 383 uint64_t spa_failmode; /* failure mode for the pool */ 384 uint64_t spa_deadman_failmode; /* failure mode for deadman */ 385 uint64_t spa_delegation; /* delegation on/off */ 386 list_t spa_config_list; /* previous cache file(s) */ 387 /* per-CPU array of root of async I/O: */ 388 zio_t **spa_async_zio_root; 389 zio_t *spa_suspend_zio_root; /* root of all suspended I/O */ 390 zio_t *spa_txg_zio[TXG_SIZE]; /* spa_sync() waits for this */ 391 kmutex_t spa_suspend_lock; /* protects suspend_zio_root */ 392 kcondvar_t spa_suspend_cv; /* notification of resume */ 393 zio_suspend_reason_t spa_suspended; /* pool is suspended */ 394 uint8_t spa_claiming; /* pool is doing zil_claim() */ 395 boolean_t spa_is_root; /* pool is root */ 396 int spa_minref; /* num refs when first opened */ 397 spa_mode_t spa_mode; /* SPA_MODE_{READ|WRITE} */ 398 boolean_t spa_read_spacemaps; /* spacemaps available if ro */ 399 spa_log_state_t spa_log_state; /* log state */ 400 uint64_t spa_autoexpand; /* lun expansion on/off */ 401 ddt_t *spa_ddt[ZIO_CHECKSUM_FUNCTIONS]; /* in-core DDTs */ 402 uint64_t spa_ddt_stat_object; /* DDT statistics */ 403 uint64_t spa_dedup_dspace; /* Cache get_dedup_dspace() */ 404 uint64_t spa_dedup_checksum; /* default dedup checksum */ 405 uint64_t spa_dspace; /* dspace in normal class */ 406 uint64_t spa_rdspace; /* raw (non-dedup) --//-- */ 407 boolean_t spa_active_ddt_prune; /* ddt prune process active */ 408 brt_vdev_t **spa_brt_vdevs; /* array of per-vdev BRTs */ 409 uint64_t spa_brt_nvdevs; /* number of vdevs in BRT */ 410 uint64_t spa_brt_rangesize; /* pool's BRT range size */ 411 krwlock_t spa_brt_lock; /* Protects brt_vdevs/nvdevs */ 412 kmutex_t spa_vdev_top_lock; /* dueling offline/remove */ 413 kmutex_t spa_proc_lock; /* protects spa_proc* */ 414 kcondvar_t spa_proc_cv; /* spa_proc_state transitions */ 415 spa_proc_state_t spa_proc_state; /* see definition */ 416 proc_t *spa_proc; /* "zpool-poolname" process */ 417 uintptr_t spa_did; /* if procp != p0, did of t1 */ 418 boolean_t spa_autoreplace; /* autoreplace set in open */ 419 int spa_vdev_locks; /* locks grabbed */ 420 uint64_t spa_creation_version; /* version at pool creation */ 421 uint64_t spa_prev_software_version; /* See ub_software_version */ 422 uint64_t spa_feat_for_write_obj; /* required to write to pool */ 423 uint64_t spa_feat_for_read_obj; /* required to read from pool */ 424 uint64_t spa_feat_desc_obj; /* Feature descriptions */ 425 uint64_t spa_feat_enabled_txg_obj; /* Feature enabled txg */ 426 kmutex_t spa_feat_stats_lock; /* protects spa_feat_stats */ 427 nvlist_t *spa_feat_stats; /* Cache of enabled features */ 428 /* cache feature refcounts */ 429 uint64_t spa_feat_refcount_cache[SPA_FEATURES]; 430 taskqid_t spa_deadman_tqid; /* Task id */ 431 uint64_t spa_deadman_calls; /* number of deadman calls */ 432 hrtime_t spa_sync_starttime; /* starting time of spa_sync */ 433 uint64_t spa_deadman_synctime; /* deadman sync expiration */ 434 uint64_t spa_deadman_ziotime; /* deadman zio expiration */ 435 uint64_t spa_all_vdev_zaps; /* ZAP of per-vd ZAP obj #s */ 436 spa_avz_action_t spa_avz_action; /* destroy/rebuild AVZ? */ 437 uint64_t spa_autotrim; /* automatic background trim? */ 438 uint64_t spa_errata; /* errata issues detected */ 439 spa_stats_t spa_stats; /* assorted spa statistics */ 440 spa_keystore_t spa_keystore; /* loaded crypto keys */ 441 442 /* arc_memory_throttle() parameters during low memory condition */ 443 uint64_t spa_lowmem_page_load; /* memory load during txg */ 444 uint64_t spa_lowmem_last_txg; /* txg window start */ 445 446 hrtime_t spa_ccw_fail_time; /* Conf cache write fail time */ 447 taskq_t *spa_zvol_taskq; /* Taskq for minor management */ 448 taskq_t *spa_metaslab_taskq; /* Taskq for metaslab preload */ 449 taskq_t *spa_prefetch_taskq; /* Taskq for prefetch threads */ 450 taskq_t *spa_upgrade_taskq; /* Taskq for upgrade jobs */ 451 uint64_t spa_multihost; /* multihost aware (mmp) */ 452 mmp_thread_t spa_mmp; /* multihost mmp thread */ 453 list_t spa_leaf_list; /* list of leaf vdevs */ 454 uint64_t spa_leaf_list_gen; /* track leaf_list changes */ 455 uint32_t spa_hostid; /* cached system hostid */ 456 457 /* synchronization for threads in spa_wait */ 458 kmutex_t spa_activities_lock; 459 kcondvar_t spa_activities_cv; 460 kcondvar_t spa_waiters_cv; 461 int spa_waiters; /* number of waiting threads */ 462 boolean_t spa_waiters_cancel; /* waiters should return */ 463 464 char *spa_compatibility; /* compatibility file(s) */ 465 uint64_t spa_dedup_table_quota; /* property DDT maximum size */ 466 uint64_t spa_dedup_dsize; /* cached on-disk size of DDT */ 467 uint64_t spa_dedup_class_full_txg; /* txg dedup class was full */ 468 469 /* 470 * spa_refcount & spa_config_lock must be the last elements 471 * because zfs_refcount_t changes size based on compilation options. 472 * In order for the MDB module to function correctly, the other 473 * fields must remain in the same location. 474 */ 475 spa_config_lock_t spa_config_lock[SCL_LOCKS]; /* config changes */ 476 zfs_refcount_t spa_refcount; /* number of opens */ 477 }; 478 479 extern char *spa_config_path; 480 extern const char *zfs_deadman_failmode; 481 extern uint_t spa_slop_shift; 482 extern void spa_taskq_dispatch(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 483 task_func_t *func, zio_t *zio, boolean_t cutinline); 484 extern void spa_load_spares(spa_t *spa); 485 extern void spa_load_l2cache(spa_t *spa); 486 extern sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, 487 const char *name); 488 extern void spa_event_post(sysevent_t *ev); 489 extern int param_set_deadman_failmode_common(const char *val); 490 extern void spa_set_deadman_synctime(hrtime_t ns); 491 extern void spa_set_deadman_ziotime(hrtime_t ns); 492 extern const char *spa_history_zone(void); 493 extern const char *zfs_active_allocator; 494 extern int param_set_active_allocator_common(const char *val); 495 496 #ifdef __cplusplus 497 } 498 #endif 499 500 #endif /* _SYS_SPA_IMPL_H */ 501