xref: /freebsd/sys/geom/raid/g_raid.h (revision 2b15cb3d0922bd70ea592f0da9b4a5b167f4d53f)
1 /*-
2  * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #ifndef	_G_RAID_H_
30 #define	_G_RAID_H_
31 
32 #include <sys/param.h>
33 #include <sys/kobj.h>
34 #include <sys/bio.h>
35 #include <sys/time.h>
36 #ifdef _KERNEL
37 #include <sys/sysctl.h>
38 #endif
39 
40 #define	G_RAID_CLASS_NAME	"RAID"
41 
42 #define	G_RAID_MAGIC		"GEOM::RAID"
43 
44 #define	G_RAID_VERSION		0
45 
46 struct g_raid_md_object;
47 struct g_raid_tr_object;
48 
49 #define	G_RAID_DEVICE_FLAG_NOAUTOSYNC	0x0000000000000001ULL
50 #define	G_RAID_DEVICE_FLAG_NOFAILSYNC	0x0000000000000002ULL
51 #define	G_RAID_DEVICE_FLAG_MASK	(G_RAID_DEVICE_FLAG_NOAUTOSYNC | \
52 					 G_RAID_DEVICE_FLAG_NOFAILSYNC)
53 
54 #ifdef _KERNEL
55 extern u_int g_raid_aggressive_spare;
56 extern u_int g_raid_debug;
57 extern int g_raid_enable;
58 extern int g_raid_read_err_thresh;
59 extern u_int g_raid_start_timeout;
60 extern struct g_class g_raid_class;
61 
62 #define	G_RAID_DEBUG(lvl, fmt, ...)	do {				\
63 	if (g_raid_debug >= (lvl)) {					\
64 		if (g_raid_debug > 0) {					\
65 			printf("GEOM_RAID[%u]: " fmt "\n",		\
66 			    lvl, ## __VA_ARGS__);			\
67 		} else {						\
68 			printf("GEOM_RAID: " fmt "\n",			\
69 			    ## __VA_ARGS__);				\
70 		}							\
71 	}								\
72 } while (0)
73 #define	G_RAID_DEBUG1(lvl, sc, fmt, ...)	do {			\
74 	if (g_raid_debug >= (lvl)) {					\
75 		if (g_raid_debug > 0) {					\
76 			printf("GEOM_RAID[%u]: %s: " fmt "\n",		\
77 			    lvl, (sc)->sc_name, ## __VA_ARGS__);	\
78 		} else {						\
79 			printf("GEOM_RAID: %s: " fmt "\n",		\
80 			    (sc)->sc_name, ## __VA_ARGS__);		\
81 		}							\
82 	}								\
83 } while (0)
84 #define	G_RAID_LOGREQ(lvl, bp, fmt, ...)	do {			\
85 	if (g_raid_debug >= (lvl)) {					\
86 		if (g_raid_debug > 0) {					\
87 			printf("GEOM_RAID[%u]: " fmt " ",		\
88 			    lvl, ## __VA_ARGS__);			\
89 		} else							\
90 			printf("GEOM_RAID: " fmt " ", ## __VA_ARGS__);	\
91 		g_print_bio(bp);					\
92 		printf("\n");						\
93 	}								\
94 } while (0)
95 
96 /*
97  * Flags we use to distinguish I/O initiated by the TR layer to maintain
98  * the volume's characteristics, fix subdisks, extra copies of data, etc.
99  *
100  * G_RAID_BIO_FLAG_SYNC		I/O to update an extra copy of the data
101  *				for RAID volumes that maintain extra data
102  *				and need to rebuild that data.
103  * G_RAID_BIO_FLAG_REMAP	I/O done to try to provoke a subdisk into
104  *				doing some desirable action such as bad
105  *				block remapping after we detect a bad part
106  *				of the disk.
107  * G_RAID_BIO_FLAG_LOCKED	I/O holds range lock that should re released.
108  *
109  * and the following meta item:
110  * G_RAID_BIO_FLAG_SPECIAL	And of the I/O flags that need to make it
111  *				through the range locking which would
112  *				otherwise defer the I/O until after that
113  *				range is unlocked.
114  */
115 #define	G_RAID_BIO_FLAG_SYNC		0x01
116 #define	G_RAID_BIO_FLAG_REMAP		0x02
117 #define	G_RAID_BIO_FLAG_SPECIAL \
118 		(G_RAID_BIO_FLAG_SYNC|G_RAID_BIO_FLAG_REMAP)
119 #define	G_RAID_BIO_FLAG_LOCKED		0x80
120 
121 struct g_raid_lock {
122 	off_t			 l_offset;
123 	off_t			 l_length;
124 	void			*l_callback_arg;
125 	int			 l_pending;
126 	LIST_ENTRY(g_raid_lock)	 l_next;
127 };
128 
129 #define	G_RAID_EVENT_WAIT	0x01
130 #define	G_RAID_EVENT_VOLUME	0x02
131 #define	G_RAID_EVENT_SUBDISK	0x04
132 #define	G_RAID_EVENT_DISK	0x08
133 #define	G_RAID_EVENT_DONE	0x10
134 struct g_raid_event {
135 	void			*e_tgt;
136 	int			 e_event;
137 	int			 e_flags;
138 	int			 e_error;
139 	TAILQ_ENTRY(g_raid_event) e_next;
140 };
141 #define G_RAID_DISK_S_NONE		0x00	/* State is unknown. */
142 #define G_RAID_DISK_S_OFFLINE		0x01	/* Missing disk placeholder. */
143 #define G_RAID_DISK_S_DISABLED		0x02	/* Disabled. */
144 #define G_RAID_DISK_S_FAILED		0x03	/* Failed. */
145 #define G_RAID_DISK_S_STALE_FAILED	0x04	/* Old failed. */
146 #define G_RAID_DISK_S_SPARE		0x05	/* Hot-spare. */
147 #define G_RAID_DISK_S_STALE		0x06	/* Old disk, unused now. */
148 #define G_RAID_DISK_S_ACTIVE		0x07	/* Operational. */
149 
150 #define G_RAID_DISK_E_DISCONNECTED	0x01
151 
152 struct g_raid_disk {
153 	struct g_raid_softc	*d_softc;	/* Back-pointer to softc. */
154 	struct g_consumer	*d_consumer;	/* GEOM disk consumer. */
155 	void			*d_md_data;	/* Disk's metadata storage. */
156 	struct g_kerneldump	 d_kd;		/* Kernel dumping method/args. */
157 	int			 d_candelete;	/* BIO_DELETE supported. */
158 	uint64_t		 d_flags;	/* Additional flags. */
159 	u_int			 d_state;	/* Disk state. */
160 	u_int			 d_load;	/* Disk average load. */
161 	off_t			 d_last_offset;	/* Last head offset. */
162 	int			 d_read_errs;	/* Count of the read errors */
163 	TAILQ_HEAD(, g_raid_subdisk)	 d_subdisks; /* List of subdisks. */
164 	TAILQ_ENTRY(g_raid_disk)	 d_next;	/* Next disk in the node. */
165 };
166 
167 #define G_RAID_SUBDISK_S_NONE		0x00	/* Absent. */
168 #define G_RAID_SUBDISK_S_FAILED		0x01	/* Failed. */
169 #define G_RAID_SUBDISK_S_NEW		0x02	/* Blank. */
170 #define G_RAID_SUBDISK_S_REBUILD	0x03	/* Blank + rebuild. */
171 #define G_RAID_SUBDISK_S_UNINITIALIZED	0x04	/* Disk of the new volume. */
172 #define G_RAID_SUBDISK_S_STALE		0x05	/* Dirty. */
173 #define G_RAID_SUBDISK_S_RESYNC		0x06	/* Dirty + check/repair. */
174 #define G_RAID_SUBDISK_S_ACTIVE		0x07	/* Usable. */
175 
176 #define G_RAID_SUBDISK_E_NEW		0x01	/* A new subdisk has arrived */
177 #define G_RAID_SUBDISK_E_FAILED		0x02	/* A subdisk failed, but remains in volume */
178 #define G_RAID_SUBDISK_E_DISCONNECTED	0x03	/* A subdisk removed from volume. */
179 #define G_RAID_SUBDISK_E_FIRST_TR_PRIVATE 0x80	/* translation private events */
180 
181 #define G_RAID_SUBDISK_POS(sd)						\
182     ((sd)->sd_disk ? ((sd)->sd_disk->d_last_offset - (sd)->sd_offset) : 0)
183 #define G_RAID_SUBDISK_TRACK_SIZE	(1 * 1024 * 1024)
184 #define G_RAID_SUBDISK_LOAD(sd)						\
185     ((sd)->sd_disk ? ((sd)->sd_disk->d_load) : 0)
186 #define G_RAID_SUBDISK_LOAD_SCALE	256
187 
188 struct g_raid_subdisk {
189 	struct g_raid_softc	*sd_softc;	/* Back-pointer to softc. */
190 	struct g_raid_disk	*sd_disk;	/* Where this subdisk lives. */
191 	struct g_raid_volume	*sd_volume;	/* Volume, sd is a part of. */
192 	off_t			 sd_offset;	/* Offset on the disk. */
193 	off_t			 sd_size;	/* Size on the disk. */
194 	u_int			 sd_pos;	/* Position in volume. */
195 	u_int			 sd_state;	/* Subdisk state. */
196 	off_t			 sd_rebuild_pos; /* Rebuild position. */
197 	int			 sd_recovery;	/* Count of recovery reqs. */
198 	TAILQ_ENTRY(g_raid_subdisk)	 sd_next; /* Next subdisk on disk. */
199 };
200 
201 #define G_RAID_MAX_SUBDISKS	16
202 #define G_RAID_MAX_VOLUMENAME	32
203 
204 #define G_RAID_VOLUME_S_STARTING	0x00
205 #define G_RAID_VOLUME_S_BROKEN		0x01
206 #define G_RAID_VOLUME_S_DEGRADED	0x02
207 #define G_RAID_VOLUME_S_SUBOPTIMAL	0x03
208 #define G_RAID_VOLUME_S_OPTIMAL		0x04
209 #define G_RAID_VOLUME_S_UNSUPPORTED	0x05
210 #define G_RAID_VOLUME_S_STOPPED		0x06
211 
212 #define G_RAID_VOLUME_S_ALIVE(s)			\
213     ((s) == G_RAID_VOLUME_S_DEGRADED ||			\
214      (s) == G_RAID_VOLUME_S_SUBOPTIMAL ||		\
215      (s) == G_RAID_VOLUME_S_OPTIMAL)
216 
217 #define G_RAID_VOLUME_E_DOWN		0x00
218 #define G_RAID_VOLUME_E_UP		0x01
219 #define G_RAID_VOLUME_E_START		0x10
220 #define G_RAID_VOLUME_E_STARTMD		0x11
221 
222 #define G_RAID_VOLUME_RL_RAID0		0x00
223 #define G_RAID_VOLUME_RL_RAID1		0x01
224 #define G_RAID_VOLUME_RL_RAID3		0x03
225 #define G_RAID_VOLUME_RL_RAID4		0x04
226 #define G_RAID_VOLUME_RL_RAID5		0x05
227 #define G_RAID_VOLUME_RL_RAID6		0x06
228 #define G_RAID_VOLUME_RL_RAIDMDF	0x07
229 #define G_RAID_VOLUME_RL_RAID1E		0x11
230 #define G_RAID_VOLUME_RL_SINGLE		0x0f
231 #define G_RAID_VOLUME_RL_CONCAT		0x1f
232 #define G_RAID_VOLUME_RL_RAID5E		0x15
233 #define G_RAID_VOLUME_RL_RAID5EE	0x25
234 #define G_RAID_VOLUME_RL_RAID5R		0x35
235 #define G_RAID_VOLUME_RL_UNKNOWN	0xff
236 
237 #define G_RAID_VOLUME_RLQ_NONE		0x00
238 #define G_RAID_VOLUME_RLQ_R1SM		0x00
239 #define G_RAID_VOLUME_RLQ_R1MM		0x01
240 #define G_RAID_VOLUME_RLQ_R3P0		0x00
241 #define G_RAID_VOLUME_RLQ_R3PN		0x01
242 #define G_RAID_VOLUME_RLQ_R4P0		0x00
243 #define G_RAID_VOLUME_RLQ_R4PN		0x01
244 #define G_RAID_VOLUME_RLQ_R5RA		0x00
245 #define G_RAID_VOLUME_RLQ_R5RS		0x01
246 #define G_RAID_VOLUME_RLQ_R5LA		0x02
247 #define G_RAID_VOLUME_RLQ_R5LS		0x03
248 #define G_RAID_VOLUME_RLQ_R6RA		0x00
249 #define G_RAID_VOLUME_RLQ_R6RS		0x01
250 #define G_RAID_VOLUME_RLQ_R6LA		0x02
251 #define G_RAID_VOLUME_RLQ_R6LS		0x03
252 #define G_RAID_VOLUME_RLQ_RMDFRA	0x00
253 #define G_RAID_VOLUME_RLQ_RMDFRS	0x01
254 #define G_RAID_VOLUME_RLQ_RMDFLA	0x02
255 #define G_RAID_VOLUME_RLQ_RMDFLS	0x03
256 #define G_RAID_VOLUME_RLQ_R1EA		0x00
257 #define G_RAID_VOLUME_RLQ_R1EO		0x01
258 #define G_RAID_VOLUME_RLQ_R5ERA		0x00
259 #define G_RAID_VOLUME_RLQ_R5ERS		0x01
260 #define G_RAID_VOLUME_RLQ_R5ELA		0x02
261 #define G_RAID_VOLUME_RLQ_R5ELS		0x03
262 #define G_RAID_VOLUME_RLQ_R5EERA	0x00
263 #define G_RAID_VOLUME_RLQ_R5EERS	0x01
264 #define G_RAID_VOLUME_RLQ_R5EELA	0x02
265 #define G_RAID_VOLUME_RLQ_R5EELS	0x03
266 #define G_RAID_VOLUME_RLQ_R5RRA		0x00
267 #define G_RAID_VOLUME_RLQ_R5RRS		0x01
268 #define G_RAID_VOLUME_RLQ_R5RLA		0x02
269 #define G_RAID_VOLUME_RLQ_R5RLS		0x03
270 #define G_RAID_VOLUME_RLQ_UNKNOWN	0xff
271 
272 struct g_raid_volume;
273 
274 struct g_raid_volume {
275 	struct g_raid_softc	*v_softc;	/* Back-pointer to softc. */
276 	struct g_provider	*v_provider;	/* GEOM provider. */
277 	struct g_raid_subdisk	 v_subdisks[G_RAID_MAX_SUBDISKS];
278 						/* Subdisks of this volume. */
279 	void			*v_md_data;	/* Volume's metadata storage. */
280 	struct g_raid_tr_object	*v_tr;		/* Transformation object. */
281 	char			 v_name[G_RAID_MAX_VOLUMENAME];
282 						/* Volume name. */
283 	u_int			 v_state;	/* Volume state. */
284 	u_int			 v_raid_level;	/* Array RAID level. */
285 	u_int			 v_raid_level_qualifier; /* RAID level det. */
286 	u_int			 v_disks_count;	/* Number of disks in array. */
287 	u_int			 v_mdf_pdisks;	/* Number of parity disks
288 						   in RAIDMDF array. */
289 	uint16_t		 v_mdf_polynomial; /* Polynomial for RAIDMDF. */
290 	uint8_t			 v_mdf_method;	/* Generation method for RAIDMDF. */
291 	u_int			 v_strip_size;	/* Array strip size. */
292 	u_int			 v_rotate_parity; /* Rotate RAID5R parity
293 						   after numer of stripes. */
294 	u_int			 v_sectorsize;	/* Volume sector size. */
295 	off_t			 v_mediasize;	/* Volume media size.  */
296 	struct bio_queue_head	 v_inflight;	/* In-flight write requests. */
297 	struct bio_queue_head	 v_locked;	/* Blocked I/O requests. */
298 	LIST_HEAD(, g_raid_lock) v_locks;	 /* List of locked regions. */
299 	int			 v_pending_lock; /* writes to locked region */
300 	int			 v_dirty;	/* Volume is DIRTY. */
301 	struct timeval		 v_last_done;	/* Time of the last I/O. */
302 	time_t			 v_last_write;	/* Time of the last write. */
303 	u_int			 v_writes;	/* Number of active writes. */
304 	struct root_hold_token	*v_rootmount;	/* Root mount delay token. */
305 	int			 v_starting;	/* Volume is starting */
306 	int			 v_stopping;	/* Volume is stopping */
307 	int			 v_provider_open; /* Number of opens. */
308 	int			 v_global_id;	/* Global volume ID (rX). */
309 	int			 v_read_only;	/* Volume is read-only. */
310 	TAILQ_ENTRY(g_raid_volume)	 v_next; /* List of volumes entry. */
311 	LIST_ENTRY(g_raid_volume)	 v_global_next; /* Global list entry. */
312 };
313 
314 #define G_RAID_NODE_E_WAKE	0x00
315 #define G_RAID_NODE_E_START	0x01
316 
317 struct g_raid_softc {
318 	struct g_raid_md_object	*sc_md;		/* Metadata object. */
319 	struct g_geom		*sc_geom;	/* GEOM class instance. */
320 	uint64_t		 sc_flags;	/* Additional flags. */
321 	TAILQ_HEAD(, g_raid_volume)	 sc_volumes;	/* List of volumes. */
322 	TAILQ_HEAD(, g_raid_disk)	 sc_disks;	/* List of disks. */
323 	struct sx		 sc_lock;	/* Main node lock. */
324 	struct proc		*sc_worker;	/* Worker process. */
325 	struct mtx		 sc_queue_mtx;	/* Worker queues lock. */
326 	TAILQ_HEAD(, g_raid_event) sc_events;	/* Worker events queue. */
327 	struct bio_queue_head	 sc_queue;	/* Worker I/O queue. */
328 	int			 sc_stopping;	/* Node is stopping */
329 };
330 #define	sc_name	sc_geom->name
331 
332 SYSCTL_DECL(_kern_geom_raid);
333 
334 /*
335  * KOBJ parent class of metadata processing modules.
336  */
337 struct g_raid_md_class {
338 	KOBJ_CLASS_FIELDS;
339 	int		 mdc_enable;
340 	int		 mdc_priority;
341 	LIST_ENTRY(g_raid_md_class) mdc_list;
342 };
343 
344 /*
345  * KOBJ instance of metadata processing module.
346  */
347 struct g_raid_md_object {
348 	KOBJ_FIELDS;
349 	struct g_raid_md_class	*mdo_class;
350 	struct g_raid_softc	*mdo_softc;	/* Back-pointer to softc. */
351 };
352 
353 int g_raid_md_modevent(module_t, int, void *);
354 
355 #define	G_RAID_MD_DECLARE(name, label)				\
356     static moduledata_t g_raid_md_##name##_mod = {		\
357 	"g_raid_md_" __XSTRING(name),				\
358 	g_raid_md_modevent,					\
359 	&g_raid_md_##name##_class				\
360     };								\
361     DECLARE_MODULE(g_raid_md_##name, g_raid_md_##name##_mod,	\
362 	SI_SUB_DRIVERS, SI_ORDER_SECOND);			\
363     MODULE_DEPEND(g_raid_md_##name, geom_raid, 0, 0, 0);	\
364     SYSCTL_NODE(_kern_geom_raid, OID_AUTO, name, CTLFLAG_RD,	\
365 	NULL, label " metadata module");			\
366     SYSCTL_INT(_kern_geom_raid_##name, OID_AUTO, enable,	\
367 	CTLFLAG_RWTUN, &g_raid_md_##name##_class.mdc_enable, 0,	\
368 	"Enable " label " metadata format taste")
369 
370 /*
371  * KOBJ parent class of data transformation modules.
372  */
373 struct g_raid_tr_class {
374 	KOBJ_CLASS_FIELDS;
375 	int		 trc_enable;
376 	int		 trc_priority;
377 	int		 trc_accept_unmapped;
378 	LIST_ENTRY(g_raid_tr_class) trc_list;
379 };
380 
381 /*
382  * KOBJ instance of data transformation module.
383  */
384 struct g_raid_tr_object {
385 	KOBJ_FIELDS;
386 	struct g_raid_tr_class	*tro_class;
387 	struct g_raid_volume 	*tro_volume;	/* Back-pointer to volume. */
388 };
389 
390 int g_raid_tr_modevent(module_t, int, void *);
391 
392 #define	G_RAID_TR_DECLARE(name, label)				\
393     static moduledata_t g_raid_tr_##name##_mod = {		\
394 	"g_raid_tr_" __XSTRING(name),				\
395 	g_raid_tr_modevent,					\
396 	&g_raid_tr_##name##_class				\
397     };								\
398     DECLARE_MODULE(g_raid_tr_##name, g_raid_tr_##name##_mod,	\
399 	SI_SUB_DRIVERS, SI_ORDER_FIRST);			\
400     MODULE_DEPEND(g_raid_tr_##name, geom_raid, 0, 0, 0);	\
401     SYSCTL_NODE(_kern_geom_raid, OID_AUTO, name, CTLFLAG_RD,	\
402 	NULL, label " transformation module");			\
403     SYSCTL_INT(_kern_geom_raid_##name, OID_AUTO, enable,	\
404 	CTLFLAG_RWTUN, &g_raid_tr_##name##_class.trc_enable, 0,	\
405 	"Enable " label " transformation module taste")
406 
407 const char * g_raid_volume_level2str(int level, int qual);
408 int g_raid_volume_str2level(const char *str, int *level, int *qual);
409 const char * g_raid_volume_state2str(int state);
410 const char * g_raid_subdisk_state2str(int state);
411 const char * g_raid_disk_state2str(int state);
412 
413 struct g_raid_softc * g_raid_create_node(struct g_class *mp,
414     const char *name, struct g_raid_md_object *md);
415 int g_raid_create_node_format(const char *format, struct gctl_req *req,
416     struct g_geom **gp);
417 struct g_raid_volume * g_raid_create_volume(struct g_raid_softc *sc,
418     const char *name, int id);
419 struct g_raid_disk * g_raid_create_disk(struct g_raid_softc *sc);
420 const char * g_raid_get_diskname(struct g_raid_disk *disk);
421 void g_raid_get_disk_info(struct g_raid_disk *disk);
422 
423 int g_raid_start_volume(struct g_raid_volume *vol);
424 
425 int g_raid_destroy_node(struct g_raid_softc *sc, int worker);
426 int g_raid_destroy_volume(struct g_raid_volume *vol);
427 int g_raid_destroy_disk(struct g_raid_disk *disk);
428 
429 void g_raid_iodone(struct bio *bp, int error);
430 void g_raid_subdisk_iostart(struct g_raid_subdisk *sd, struct bio *bp);
431 int g_raid_subdisk_kerneldump(struct g_raid_subdisk *sd,
432     void *virtual, vm_offset_t physical, off_t offset, size_t length);
433 
434 struct g_consumer *g_raid_open_consumer(struct g_raid_softc *sc,
435     const char *name);
436 void g_raid_kill_consumer(struct g_raid_softc *sc, struct g_consumer *cp);
437 
438 void g_raid_report_disk_state(struct g_raid_disk *disk);
439 void g_raid_change_disk_state(struct g_raid_disk *disk, int state);
440 void g_raid_change_subdisk_state(struct g_raid_subdisk *sd, int state);
441 void g_raid_change_volume_state(struct g_raid_volume *vol, int state);
442 
443 void g_raid_write_metadata(struct g_raid_softc *sc, struct g_raid_volume *vol,
444     struct g_raid_subdisk *sd, struct g_raid_disk *disk);
445 void g_raid_fail_disk(struct g_raid_softc *sc,
446     struct g_raid_subdisk *sd, struct g_raid_disk *disk);
447 
448 void g_raid_tr_flush_common(struct g_raid_tr_object *tr, struct bio *bp);
449 int g_raid_tr_kerneldump_common(struct g_raid_tr_object *tr,
450     void *virtual, vm_offset_t physical, off_t offset, size_t length);
451 
452 u_int g_raid_ndisks(struct g_raid_softc *sc, int state);
453 u_int g_raid_nsubdisks(struct g_raid_volume *vol, int state);
454 u_int g_raid_nopens(struct g_raid_softc *sc);
455 struct g_raid_subdisk * g_raid_get_subdisk(struct g_raid_volume *vol,
456     int state);
457 #define	G_RAID_DESTROY_SOFT		0
458 #define	G_RAID_DESTROY_DELAYED	1
459 #define	G_RAID_DESTROY_HARD		2
460 int g_raid_destroy(struct g_raid_softc *sc, int how);
461 int g_raid_event_send(void *arg, int event, int flags);
462 int g_raid_lock_range(struct g_raid_volume *vol, off_t off, off_t len,
463     struct bio *ignore, void *argp);
464 int g_raid_unlock_range(struct g_raid_volume *vol, off_t off, off_t len);
465 
466 g_ctl_req_t g_raid_ctl;
467 #endif	/* _KERNEL */
468 
469 #endif	/* !_G_RAID_H_ */
470