xref: /illumos-gate/usr/src/uts/common/fs/zfs/sys/spa_impl.h (revision 088f389458728c464569a5506b58070254fa4f7d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #ifndef _SYS_SPA_IMPL_H
27 #define	_SYS_SPA_IMPL_H
28 
29 #pragma ident	"%Z%%M%	%I%	%E% SMI"
30 
31 #include <sys/spa.h>
32 #include <sys/vdev.h>
33 #include <sys/metaslab.h>
34 #include <sys/dmu.h>
35 #include <sys/dsl_pool.h>
36 #include <sys/uberblock_impl.h>
37 #include <sys/zfs_context.h>
38 #include <sys/avl.h>
39 #include <sys/refcount.h>
40 #include <sys/bplist.h>
41 
42 #ifdef	__cplusplus
43 extern "C" {
44 #endif
45 
46 typedef struct spa_error_entry {
47 	zbookmark_t	se_bookmark;
48 	char		*se_name;
49 	avl_node_t	se_avl;
50 } spa_error_entry_t;
51 
52 typedef struct spa_history_phys {
53 	uint64_t sh_pool_create_len;	/* ending offset of zpool create */
54 	uint64_t sh_phys_max_off;	/* physical EOF */
55 	uint64_t sh_bof;		/* logical BOF */
56 	uint64_t sh_eof;		/* logical EOF */
57 	uint64_t sh_records_lost;	/* num of records overwritten */
58 } spa_history_phys_t;
59 
60 struct spa_aux_vdev {
61 	uint64_t	sav_object;		/* MOS object for device list */
62 	nvlist_t	*sav_config;		/* cached device config */
63 	vdev_t		**sav_vdevs;		/* devices */
64 	int		sav_count;		/* number devices */
65 	boolean_t	sav_sync;		/* sync the device list */
66 	nvlist_t	**sav_pending;		/* pending device additions */
67 	uint_t		sav_npending;		/* # pending devices */
68 };
69 
70 typedef struct spa_config_lock {
71 	kmutex_t	scl_lock;
72 	kthread_t	*scl_writer;
73 	uint16_t	scl_write_wanted;
74 	kcondvar_t	scl_cv;
75 	refcount_t	scl_count;
76 } spa_config_lock_t;
77 
78 typedef struct spa_config_dirent {
79 	list_node_t	scd_link;
80 	char		*scd_path;
81 } spa_config_dirent_t;
82 
83 struct spa {
84 	/*
85 	 * Fields protected by spa_namespace_lock.
86 	 */
87 	char		*spa_name;		/* pool name */
88 	avl_node_t	spa_avl;		/* node in spa_namespace_avl */
89 	nvlist_t	*spa_config;		/* last synced config */
90 	nvlist_t	*spa_config_syncing;	/* currently syncing config */
91 	uint64_t	spa_config_txg;		/* txg of last config change */
92 	kmutex_t	spa_config_cache_lock;	/* for spa_config RW_READER */
93 	int		spa_sync_pass;		/* iterate-to-convergence */
94 	int		spa_state;		/* pool state */
95 	int		spa_inject_ref;		/* injection references */
96 	uint8_t		spa_traverse_wanted;	/* traverse lock wanted */
97 	uint8_t		spa_sync_on;		/* sync threads are running */
98 	spa_load_state_t spa_load_state;	/* current load operation */
99 	taskq_t		*spa_zio_issue_taskq[ZIO_TYPES];
100 	taskq_t		*spa_zio_intr_taskq[ZIO_TYPES];
101 	dsl_pool_t	*spa_dsl_pool;
102 	metaslab_class_t *spa_normal_class;	/* normal data class */
103 	metaslab_class_t *spa_log_class;	/* intent log data class */
104 	uint64_t	spa_first_txg;		/* first txg after spa_open() */
105 	uint64_t	spa_final_txg;		/* txg of export/destroy */
106 	uint64_t	spa_freeze_txg;		/* freeze pool at this txg */
107 	objset_t	*spa_meta_objset;	/* copy of dp->dp_meta_objset */
108 	txg_list_t	spa_vdev_txg_list;	/* per-txg dirty vdev list */
109 	vdev_t		*spa_root_vdev;		/* top-level vdev container */
110 	uint64_t	spa_load_guid;		/* initial guid for spa_load */
111 	list_t		spa_dirty_list;		/* vdevs with dirty labels */
112 	spa_aux_vdev_t	spa_spares;		/* hot spares */
113 	spa_aux_vdev_t	spa_l2cache;		/* L2ARC cache devices */
114 	uint64_t	spa_config_object;	/* MOS object for pool config */
115 	uint64_t	spa_syncing_txg;	/* txg currently syncing */
116 	uint64_t	spa_sync_bplist_obj;	/* object for deferred frees */
117 	bplist_t	spa_sync_bplist;	/* deferred-free bplist */
118 	krwlock_t	spa_traverse_lock;	/* traverse vs. spa_sync() */
119 	uberblock_t	spa_ubsync;		/* last synced uberblock */
120 	uberblock_t	spa_uberblock;		/* current uberblock */
121 	kmutex_t	spa_scrub_lock;		/* resilver/scrub lock */
122 	uint64_t	spa_scrub_inflight;	/* in-flight scrub I/Os */
123 	uint64_t	spa_scrub_maxinflight;	/* max in-flight scrub I/Os */
124 	uint64_t	spa_scrub_errors;	/* scrub I/O error count */
125 	kcondvar_t	spa_scrub_io_cv;	/* scrub I/O completion */
126 	uint8_t		spa_scrub_active;	/* active or suspended? */
127 	uint8_t		spa_scrub_type;		/* type of scrub we're doing */
128 	uint8_t		spa_scrub_finished;	/* indicator to rotate logs */
129 	uint8_t		spa_scrub_started;	/* started since last boot */
130 	uint8_t		spa_scrub_reopen;	/* scrub doing vdev_reopen */
131 	kmutex_t	spa_async_lock;		/* protect async state */
132 	kthread_t	*spa_async_thread;	/* thread doing async task */
133 	int		spa_async_suspended;	/* async tasks suspended */
134 	kcondvar_t	spa_async_cv;		/* wait for thread_exit() */
135 	uint16_t	spa_async_tasks;	/* async task mask */
136 	char		*spa_root;		/* alternate root directory */
137 	kmutex_t	spa_uberblock_lock;	/* vdev_uberblock_load_done() */
138 	uint64_t	spa_ena;		/* spa-wide ereport ENA */
139 	boolean_t	spa_last_open_failed;	/* true if last open faled */
140 	kmutex_t	spa_errlog_lock;	/* error log lock */
141 	uint64_t	spa_errlog_last;	/* last error log object */
142 	uint64_t	spa_errlog_scrub;	/* scrub error log object */
143 	kmutex_t	spa_errlist_lock;	/* error list/ereport lock */
144 	avl_tree_t	spa_errlist_last;	/* last error list */
145 	avl_tree_t	spa_errlist_scrub;	/* scrub error list */
146 	uint64_t	spa_deflate;		/* should we deflate? */
147 	uint64_t	spa_history;		/* history object */
148 	kmutex_t	spa_history_lock;	/* history lock */
149 	vdev_t		*spa_pending_vdev;	/* pending vdev additions */
150 	kmutex_t	spa_props_lock;		/* property lock */
151 	uint64_t	spa_pool_props_object;	/* object for properties */
152 	uint64_t	spa_bootfs;		/* default boot filesystem */
153 	boolean_t	spa_delegation;		/* delegation on/off */
154 	list_t		spa_config_list;	/* previous cache file(s) */
155 	list_t		spa_zio_list;		/* zio error list */
156 	kcondvar_t	spa_zio_cv;		/* resume I/O pipeline */
157 	kmutex_t	spa_zio_lock;		/* zio error lock */
158 	uint8_t		spa_failmode;		/* failure mode for the pool */
159 	boolean_t	spa_import_faulted;	/* allow faulted vdevs */
160 	boolean_t	spa_is_root;		/* pool is root */
161 	int		spa_minref;		/* num refs when first opened */
162 	/*
163 	 * spa_refcnt & spa_config_lock must be the last elements
164 	 * because refcount_t changes size based on compilation options.
165 	 * In order for the MDB module to function correctly, the other
166 	 * fields must remain in the same location.
167 	 */
168 	spa_config_lock_t spa_config_lock;	/* configuration changes */
169 	refcount_t	spa_refcount;		/* number of opens */
170 };
171 
172 extern const char *spa_config_path;
173 
174 #define	BOOTFS_COMPRESS_VALID(compress) \
175 	((compress) == ZIO_COMPRESS_LZJB || \
176 	((compress) == ZIO_COMPRESS_ON && \
177 	ZIO_COMPRESS_ON_VALUE == ZIO_COMPRESS_LZJB) || \
178 	(compress) == ZIO_COMPRESS_OFF)
179 
180 #ifdef	__cplusplus
181 }
182 #endif
183 
184 #endif	/* _SYS_SPA_IMPL_H */
185