1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #ifndef _SYS_SPA_IMPL_H 27 #define _SYS_SPA_IMPL_H 28 29 #include <sys/spa.h> 30 #include <sys/vdev.h> 31 #include <sys/metaslab.h> 32 #include <sys/dmu.h> 33 #include <sys/dsl_pool.h> 34 #include <sys/uberblock_impl.h> 35 #include <sys/zfs_context.h> 36 #include <sys/avl.h> 37 #include <sys/refcount.h> 38 #include <sys/bplist.h> 39 40 #ifdef __cplusplus 41 extern "C" { 42 #endif 43 44 typedef struct spa_error_entry { 45 zbookmark_t se_bookmark; 46 char *se_name; 47 avl_node_t se_avl; 48 } spa_error_entry_t; 49 50 typedef struct spa_history_phys { 51 uint64_t sh_pool_create_len; /* ending offset of zpool create */ 52 uint64_t sh_phys_max_off; /* physical EOF */ 53 uint64_t sh_bof; /* logical BOF */ 54 uint64_t sh_eof; /* logical EOF */ 55 uint64_t sh_records_lost; /* num of records overwritten */ 56 } spa_history_phys_t; 57 58 struct spa_aux_vdev { 59 uint64_t sav_object; /* MOS object for device list */ 60 nvlist_t *sav_config; /* cached device config */ 61 vdev_t **sav_vdevs; /* devices */ 62 int sav_count; /* number devices */ 63 boolean_t sav_sync; /* sync the device list */ 64 nvlist_t **sav_pending; /* pending device additions */ 65 uint_t sav_npending; /* # pending devices */ 66 }; 67 68 typedef struct spa_config_lock { 69 kmutex_t scl_lock; 70 kthread_t *scl_writer; 71 int scl_write_wanted; 72 kcondvar_t scl_cv; 73 refcount_t scl_count; 74 } spa_config_lock_t; 75 76 typedef struct spa_config_dirent { 77 list_node_t scd_link; 78 char *scd_path; 79 } spa_config_dirent_t; 80 81 typedef enum spa_log_state { 82 SPA_LOG_UNKNOWN = 0, /* unknown log state */ 83 SPA_LOG_MISSING, /* missing log(s) */ 84 SPA_LOG_CLEAR, /* clear the log(s) */ 85 SPA_LOG_GOOD, /* log(s) are good */ 86 } spa_log_state_t; 87 88 enum zio_taskq_type { 89 ZIO_TASKQ_ISSUE = 0, 90 ZIO_TASKQ_INTERRUPT, 91 ZIO_TASKQ_TYPES 92 }; 93 94 struct spa { 95 /* 96 * Fields protected by spa_namespace_lock. 97 */ 98 char spa_name[MAXNAMELEN]; /* pool name */ 99 avl_node_t spa_avl; /* node in spa_namespace_avl */ 100 nvlist_t *spa_config; /* last synced config */ 101 nvlist_t *spa_config_syncing; /* currently syncing config */ 102 uint64_t spa_config_txg; /* txg of last config change */ 103 int spa_sync_pass; /* iterate-to-convergence */ 104 pool_state_t spa_state; /* pool state */ 105 int spa_inject_ref; /* injection references */ 106 uint8_t spa_sync_on; /* sync threads are running */ 107 spa_load_state_t spa_load_state; /* current load operation */ 108 taskq_t *spa_zio_taskq[ZIO_TYPES][ZIO_TASKQ_TYPES]; 109 dsl_pool_t *spa_dsl_pool; 110 metaslab_class_t *spa_normal_class; /* normal data class */ 111 metaslab_class_t *spa_log_class; /* intent log data class */ 112 uint64_t spa_first_txg; /* first txg after spa_open() */ 113 uint64_t spa_final_txg; /* txg of export/destroy */ 114 uint64_t spa_freeze_txg; /* freeze pool at this txg */ 115 objset_t *spa_meta_objset; /* copy of dp->dp_meta_objset */ 116 txg_list_t spa_vdev_txg_list; /* per-txg dirty vdev list */ 117 vdev_t *spa_root_vdev; /* top-level vdev container */ 118 uint64_t spa_load_guid; /* initial guid for spa_load */ 119 list_t spa_config_dirty_list; /* vdevs with dirty config */ 120 list_t spa_state_dirty_list; /* vdevs with dirty state */ 121 spa_aux_vdev_t spa_spares; /* hot spares */ 122 spa_aux_vdev_t spa_l2cache; /* L2ARC cache devices */ 123 uint64_t spa_config_object; /* MOS object for pool config */ 124 uint64_t spa_syncing_txg; /* txg currently syncing */ 125 uint64_t spa_sync_bplist_obj; /* object for deferred frees */ 126 bplist_t spa_sync_bplist; /* deferred-free bplist */ 127 uberblock_t spa_ubsync; /* last synced uberblock */ 128 uberblock_t spa_uberblock; /* current uberblock */ 129 kmutex_t spa_scrub_lock; /* resilver/scrub lock */ 130 uint64_t spa_scrub_inflight; /* in-flight scrub I/Os */ 131 uint64_t spa_scrub_maxinflight; /* max in-flight scrub I/Os */ 132 uint64_t spa_scrub_errors; /* scrub I/O error count */ 133 kcondvar_t spa_scrub_io_cv; /* scrub I/O completion */ 134 uint8_t spa_scrub_active; /* active or suspended? */ 135 uint8_t spa_scrub_type; /* type of scrub we're doing */ 136 uint8_t spa_scrub_finished; /* indicator to rotate logs */ 137 uint8_t spa_scrub_started; /* started since last boot */ 138 uint8_t spa_scrub_reopen; /* scrub doing vdev_reopen */ 139 kmutex_t spa_async_lock; /* protect async state */ 140 kthread_t *spa_async_thread; /* thread doing async task */ 141 int spa_async_suspended; /* async tasks suspended */ 142 kcondvar_t spa_async_cv; /* wait for thread_exit() */ 143 uint16_t spa_async_tasks; /* async task mask */ 144 kmutex_t spa_async_root_lock; /* protects async root count */ 145 uint64_t spa_async_root_count; /* number of async root zios */ 146 kcondvar_t spa_async_root_cv; /* notify when count == 0 */ 147 char *spa_root; /* alternate root directory */ 148 uint64_t spa_ena; /* spa-wide ereport ENA */ 149 boolean_t spa_last_open_failed; /* true if last open faled */ 150 kmutex_t spa_errlog_lock; /* error log lock */ 151 uint64_t spa_errlog_last; /* last error log object */ 152 uint64_t spa_errlog_scrub; /* scrub error log object */ 153 kmutex_t spa_errlist_lock; /* error list/ereport lock */ 154 avl_tree_t spa_errlist_last; /* last error list */ 155 avl_tree_t spa_errlist_scrub; /* scrub error list */ 156 uint64_t spa_deflate; /* should we deflate? */ 157 uint64_t spa_history; /* history object */ 158 kmutex_t spa_history_lock; /* history lock */ 159 vdev_t *spa_pending_vdev; /* pending vdev additions */ 160 kmutex_t spa_props_lock; /* property lock */ 161 uint64_t spa_pool_props_object; /* object for properties */ 162 uint64_t spa_bootfs; /* default boot filesystem */ 163 uint64_t spa_failmode; /* failure mode for the pool */ 164 uint64_t spa_delegation; /* delegation on/off */ 165 list_t spa_config_list; /* previous cache file(s) */ 166 zio_t *spa_suspend_zio_root; /* root of all suspended I/O */ 167 kmutex_t spa_suspend_lock; /* protects suspend_zio_root */ 168 kcondvar_t spa_suspend_cv; /* notification of resume */ 169 uint8_t spa_suspended; /* pool is suspended */ 170 boolean_t spa_import_faulted; /* allow faulted vdevs */ 171 boolean_t spa_is_root; /* pool is root */ 172 int spa_minref; /* num refs when first opened */ 173 int spa_mode; /* FREAD | FWRITE */ 174 spa_log_state_t spa_log_state; /* log state */ 175 /* 176 * spa_refcnt & spa_config_lock must be the last elements 177 * because refcount_t changes size based on compilation options. 178 * In order for the MDB module to function correctly, the other 179 * fields must remain in the same location. 180 */ 181 spa_config_lock_t spa_config_lock[SCL_LOCKS]; /* config changes */ 182 refcount_t spa_refcount; /* number of opens */ 183 }; 184 185 extern const char *spa_config_path; 186 187 #define BOOTFS_COMPRESS_VALID(compress) \ 188 ((compress) == ZIO_COMPRESS_LZJB || \ 189 ((compress) == ZIO_COMPRESS_ON && \ 190 ZIO_COMPRESS_ON_VALUE == ZIO_COMPRESS_LZJB) || \ 191 (compress) == ZIO_COMPRESS_OFF) 192 193 #ifdef __cplusplus 194 } 195 #endif 196 197 #endif /* _SYS_SPA_IMPL_H */ 198