xref: /titanic_41/usr/src/uts/common/sys/lvm/md_raid.h (revision cde2885fdf538266ee2a3b08dee2d5075ce8fa2b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #ifndef _SYS_MD_RAID_H
27 #define	_SYS_MD_RAID_H
28 
29 #include <sys/lvm/mdvar.h>
30 #include <sys/lvm/md_rename.h>
31 
32 #ifdef	__cplusplus
33 extern "C" {
34 #endif
35 
36 
37 /*
38  * following bits are used in status word in the common section
39  * of unit structure: un_status
40  */
41 #define	RAID_UNMAGIC		0xBADBABE0
42 #define	RAID_PSMAGIC		0xBADBABE1
43 #define	RAID_CSMAGIC		0xBADBABE2
44 #define	RAID_PWMAGIC		0xBADBABE3
45 #define	RAID_BUFMAGIC		0xBADBABE4
46 /*
47  * These are the major constants for the definition of a raid device
48  */
49 #define	PWCNT_MIN	10	/* mininum # prewrites */
50 #define	PWCNT_MAX	100	/* maximum # prewrites */
51 #define	RAID_MIN_INTERLACE	(DEV_BSIZE * 2)
52 
53 #define	UNIT_STATE(un) ((un)->un_state)
54 #define	COLUMN_STATE(un, column) ((un)->un_column[(column)].un_devstate)
55 
56 #define	COLUMN_STATE_ONLY(un, column) (\
57 	((un)->un_column[(column)].un_devstate == RCS_INIT) || \
58 	((un)->un_column[(column)].un_devstate == RCS_OKAY) || \
59 	((un)->un_column[(column)].un_devstate == RCS_ERRED) || \
60 	((un)->un_column[(column)].un_devstate == RCS_RESYNC) || \
61 	((un)->un_column[(column)].un_devstate == RCS_LAST_ERRED) || \
62 	((un)->un_column[(column)].un_devstate == RCS_REGEN))
63 
64 #define	COLUMN_ISUP(un, column) (\
65 	((un)->un_column[(column)].un_devstate == RCS_OKAY) || \
66 	((un)->un_column[(column)].un_devstate == RCS_RESYNC) || \
67 	((un)->un_column[(column)].un_devstate == RCS_LAST_ERRED))
68 
69 #define	COLUMN_ISOKAY(un, column) (\
70 	((un)->un_column[(column)].un_devstate == RCS_OKAY))
71 
72 #define	COLUMN_ISLASTERR(un, column) (\
73 	((un)->un_column[(column)].un_devstate == RCS_LAST_ERRED))
74 
75 #define	WRITE_ALT(un, column) ( \
76 	((un)->un_column[(column)].un_alt_dev != NODEV64) && \
77 	(((un)->un_column[(column)].un_devflags & MD_RAID_WRITE_ALT)))
78 
79 #define	HOTSPARED(un, column) ( \
80 	((un)->un_column[(column)].un_hs_id != 0))
81 
82 #define	OVERLAPED(blk1, lblk1, blk2, lblk2) (				\
83 	(((blk1 > lblk2) ? 1 : 0) ||					\
84 	((lblk1 < blk2) ? 1 : 0)))
85 
86 
87 /*
88  * Note: magic is needed only to set rpw_magic, not rpw_magic_ext!
89  */
90 #define	RAID_FILLIN_RPW(buf, un, sum, colnum, 				\
91 			blkno, blkcnt, id,  				\
92 			colcount, col, magic) { 			\
93 	if ((un)->c.un_revision & MD_64BIT_META_DEV) { 		\
94 		raid_pwhdr_t *rpw64	= (raid_pwhdr_t *)(void *)(buf);\
95 		rpw64->rpw_magic	= magic;			\
96 		rpw64->rpw_sum		= sum;				\
97 		rpw64->rpw_columnnum	= colnum;			\
98 		rpw64->rpw_blkno	= (diskaddr_t)blkno;		\
99 		rpw64->rpw_blkcnt	= blkcnt;			\
100 		rpw64->rpw_id		= id;				\
101 		rpw64->rpw_colcount	= colcount;			\
102 		rpw64->rpw_column	= col;				\
103 		rpw64->rpw_unit		= MD_SID(un);			\
104 		rpw64->rpw_magic_ext	= RAID_PWMAGIC;			\
105 		rpw64->rpw_origcolumncnt  = (un)->un_origcolumncnt;	\
106 		rpw64->rpw_totalcolumncnt  = (un)->un_totalcolumncnt;	\
107 		rpw64->rpw_segsize	= (un)->un_segsize;		\
108 		rpw64->rpw_segsincolumn	= (diskaddr_t)((un)->un_segsincolumn);\
109 		rpw64->rpw_pwcnt	= (un)->un_pwcnt;		\
110 		rpw64->rpw_pwsize	= (un)->un_pwsize;		\
111 		rpw64->rpw_devstart	=				\
112 			(diskaddr_t)((un)->un_column[col].un_orig_devstart);\
113 		rpw64->rpw_pwstart	=				\
114 			(diskaddr_t)((un)->un_column[col].un_orig_pwstart);\
115 	} else { 							\
116 		raid_pwhdr32_od_t *rpw32 =				\
117 				(raid_pwhdr32_od_t *)(void *)(buf);	\
118 		rpw32->rpw_magic	= magic;			\
119 		rpw32->rpw_sum		= sum;				\
120 		rpw32->rpw_columnnum	= colnum;			\
121 		rpw32->rpw_blkno	= (daddr_t)blkno;		\
122 		rpw32->rpw_blkcnt	= blkcnt;			\
123 		rpw32->rpw_id		= id;				\
124 		rpw32->rpw_colcount	= colcount;			\
125 		rpw32->rpw_column	= col;				\
126 		rpw32->rpw_unit		= MD_SID(un);			\
127 		rpw32->rpw_magic_ext	= RAID_PWMAGIC;			\
128 		rpw32->rpw_origcolumncnt  = (un)->un_origcolumncnt;	\
129 		rpw32->rpw_totalcolumncnt = (un)->un_totalcolumncnt;	\
130 		rpw32->rpw_segsize	= (daddr_t)((un)->un_segsize);	\
131 		rpw32->rpw_segsincolumn	= (daddr_t)((un)->un_segsincolumn);\
132 		rpw32->rpw_pwcnt	= (un)->un_pwcnt;		\
133 		rpw32->rpw_pwsize	= (un)->un_pwsize;		\
134 		rpw32->rpw_devstart	=				\
135 			(daddr_t)((un)->un_column[col].un_orig_devstart);\
136 		rpw32->rpw_pwstart	=				\
137 			(daddr_t)((un)->un_column[col].un_orig_pwstart);\
138 	} 								\
139 }
140 
141 #define	RAID_CONVERT_RPW(rpw32, rpw64) { 				\
142 	(rpw64)->rpw_magic		= (rpw32)->rpw_magic;		\
143 	(rpw64)->rpw_sum		= (rpw32)->rpw_sum;		\
144 	(rpw64)->rpw_columnnum		= (rpw32)->rpw_columnnum;	\
145 	(rpw64)->rpw_blkno		= (rpw32)->rpw_blkno;		\
146 	(rpw64)->rpw_blkcnt		= (rpw32)->rpw_blkcnt;		\
147 	(rpw64)->rpw_id			= (rpw32)->rpw_id;		\
148 	(rpw64)->rpw_colcount		= (rpw32)->rpw_colcount;	\
149 	(rpw64)->rpw_column		= (rpw32)->rpw_column;		\
150 	(rpw64)->rpw_unit		= (rpw32)->rpw_unit;		\
151 	(rpw64)->rpw_magic_ext		= (rpw32)->rpw_magic_ext;	\
152 	(rpw64)->rpw_origcolumncnt	= (rpw32)->rpw_origcolumncnt;	\
153 	(rpw64)->rpw_totalcolumncnt	= (rpw32)->rpw_totalcolumncnt;	\
154 	(rpw64)->rpw_segsize		= (rpw32)->rpw_segsize;		\
155 	(rpw64)->rpw_segsincolumn	= (rpw32)->rpw_segsincolumn;	\
156 	(rpw64)->rpw_pwcnt		= (rpw32)->rpw_pwcnt;		\
157 	(rpw64)->rpw_pwsize		= (rpw32)->rpw_pwsize;		\
158 	(rpw64)->rpw_devstart		= (rpw32)->rpw_devstart;	\
159 	(rpw64)->rpw_pwstart		= (rpw32)->rpw_pwstart;		\
160 }
161 
162 typedef struct mr_scoreboard {
163 	int		sb_column;
164 	int		sb_flags;
165 	diskaddr_t	sb_start_blk;
166 	diskaddr_t	sb_last_blk;
167 	void		*sb_cs;
168 } mr_scoreboard_t;
169 
170 #define	SB_AVAIL	(0x00000001)	/* useable and valid blocks */
171 #define	SB_INUSE	(0x00000002)	/* being used */
172 #define	SB_UNUSED	(0x00000004)	/* useable and no valid blocks */
173 #define	SB_INVAL_PEND	(0x00000008)	/* being invalidated */
174 
175 typedef struct mr_pw_reserve {
176 	uint_t		pw_magic;
177 	int		pw_column;
178 	int		pw_free;
179 	mr_scoreboard_t	pw_sb[1];
180 } mr_pw_reserve_t;
181 
182 
183 #if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
184 #pragma pack(4)
185 #endif
186 typedef struct mr_column {
187 	rcs_state_t	un_devstate;
188 	rcs_flags_t	un_devflags;
189 	md_timeval32_t	un_devtimestamp; /* time of last state change, 32 bit */
190 
191 	mddb_recid_t	un_hs_id;
192 	diskaddr_t	un_hs_pwstart;
193 	diskaddr_t	un_hs_devstart;
194 	mdkey_t		un_hs_key;
195 
196 
197 	md_dev64_t	un_orig_dev;		/* original device, 64 bit */
198 	mdkey_t		un_orig_key;
199 	diskaddr_t	un_orig_pwstart;
200 	diskaddr_t	un_orig_devstart;
201 
202 	md_dev64_t	un_dev;			/* current read/write dev */
203 	diskaddr_t	un_pwstart;
204 	diskaddr_t	un_devstart;
205 
206 	md_dev64_t	un_alt_dev;		/* write to if resync */
207 	diskaddr_t	un_alt_pwstart;
208 	diskaddr_t	un_alt_devstart;
209 } mr_column_t;
210 
211 /*
212  * mr_column32_od is for old 32 bit format only
213  */
214 typedef struct mr_column32_od {
215 	rcs_state_t	un_devstate;
216 	rcs_flags_t	un_devflags;
217 	struct timeval32 un_devtimestamp;	/* time of last state change */
218 	caddr32_t	xx_un_pw_reserve;
219 
220 	mddb_recid_t	un_hs_id;
221 	daddr32_t	un_hs_pwstart;
222 	daddr32_t	un_hs_devstart;
223 	mdkey_t		un_hs_key;
224 
225 	dev32_t		un_orig_dev;	/* original device */
226 	mdkey_t		un_orig_key;
227 	daddr32_t	un_orig_pwstart;
228 	daddr32_t	un_orig_devstart;
229 
230 	dev32_t		un_dev;		/* current read/write dev */
231 	daddr32_t	un_pwstart;
232 	daddr32_t	un_devstart;
233 
234 	dev32_t		un_alt_dev;	/* write to if resync */
235 	daddr32_t	un_alt_pwstart;
236 	daddr32_t	un_alt_devstart;
237 } mr_column32_od_t;
238 
239 
240 /*
241  * Incore only elements structures
242  */
243 typedef struct mr_column_ic {
244 	mr_pw_reserve_t *un_pw_reserve;
245 } mr_column_ic_t;
246 
247 /*
248  * Do not rearrange elements as mutexes must be aligned on
249  * an 8 byte boundary. Element _t_un_linlck_mx corresponds to
250  * _t_un_linlck_cv and element _t_un_mx corresponds to _t_un_cv
251  */
252 typedef struct mr_unit_ic {
253 	caddr_t			_t_un_pbuffer;
254 	caddr_t			_t_un_dbuffer;
255 	struct md_raidcs	*_t_un_linlck_chn;
256 	kmutex_t		_t_un_linlck_mx;
257 	kmutex_t		_t_un_mx;
258 	kcondvar_t		_t_un_linlck_cv;
259 	kcondvar_t		_t_un_cv;
260 	mr_column_ic_t		*_t_un_column_ic;
261 } mr_unit_ic_t;
262 
263 typedef struct mr_unit {
264 	mdc_unit_t	c;
265 	int		un_raid_res;
266 	uint_t		un_magic;
267 	rus_state_t	un_state;
268 	md_timeval32_t	un_timestamp;	/* 32 bit fixed size */
269 	uint_t		un_origcolumncnt;
270 	uint_t		un_totalcolumncnt;
271 	uint_t		un_rflags;
272 	uint_t		un_segsize;
273 	diskaddr_t	un_segsincolumn;
274 	uint_t		un_maxio;	/* in blks */
275 	uint_t		un_iosize;	/* in blks */
276 	uint_t		un_linlck_flg;
277 	uint_t		un_pwcnt;
278 	uint_t		un_pwsize;
279 	long long	un_pwid;
280 	uint_t		un_percent_done;
281 	uint_t		un_resync_copysize;	/* in blks */
282 	hsp_t		un_hsp_id;
283 	/*
284 	 * This union has to begin at an 8 byte aligned address.
285 	 * If not, this structure has different sizes in 32 / 64 bit
286 	 * environments, since in a 64 bit environment the compiler
287 	 * adds paddings before a long long, if it doesn't start at an 8byte
288 	 * aligned address.
289 	 * Be careful if you add or remove structure elements before it!
290 	 */
291 
292 	union	{
293 		struct	{
294 			diskaddr_t	_t_un_resync_line_index;
295 			uint_t		_t_un_resync_segment;
296 			int		_t_un_resync_index;
297 		} _resync;
298 		struct	{
299 			diskaddr_t	_t_un_grow_tb;
300 			uint_t		_t_un_init_colcnt;
301 			u_longlong_t	_t_un_init_iocnt;
302 		} _init;
303 	} _t_un;
304 
305 	/*
306 	 * This union has to begin at an 8 byte aligned address.
307 	 * Be careful if you add or remove structure elements before it!
308 	 */
309 	union {
310 		mr_unit_ic_t	*_mr_ic;
311 		uint_t		_mr_ic_pad[2];
312 	} un_mr_ic;
313 
314 	mr_column_t	un_column[1];
315 } mr_unit_t;
316 
317 #define	mr_ic		un_mr_ic._mr_ic
318 #define	un_pbuffer	mr_ic->_t_un_pbuffer
319 #define	un_dbuffer	mr_ic->_t_un_dbuffer
320 #define	un_linlck_chn	mr_ic->_t_un_linlck_chn
321 #define	un_linlck_mx	mr_ic->_t_un_linlck_mx
322 #define	un_linlck_cv	mr_ic->_t_un_linlck_cv
323 #define	un_mx		mr_ic->_t_un_mx
324 #define	un_cv		mr_ic->_t_un_cv
325 #define	un_column_ic	mr_ic->_t_un_column_ic
326 
327 /*
328  * For old 32 bit format use only
329  */
330 typedef struct mr_unit32_od {
331 	mdc_unit32_od_t		c;
332 	caddr32_t		xx_un_raid_res;
333 	uint_t			un_magic;
334 	rus_state_t		un_state;
335 	struct timeval32	un_timestamp;
336 	uint_t			un_origcolumncnt;
337 	uint_t			un_totalcolumncnt;
338 	uint_t			un_rflags;
339 	uint_t			un_segsize;
340 	uint_t			un_segsincolumn;
341 	uint_t			un_maxio;
342 	uint_t			un_iosize;
343 	caddr32_t		xx_un_pbuffer;
344 	caddr32_t		xx_un_dbuffer;
345 	uint_t			un_linlck_flg;
346 	caddr32_t		xx_un_linlck_chn;
347 	uint_t			un_pwcnt;
348 	uint_t			un_pwsize;
349 	long long		un_pwid;
350 	uint_t			un_rebuild_size;
351 	uint_t			un_percent_done;
352 	union   {
353 		struct  {
354 			uint_t	_t_un_resync_segment;
355 			int	_t_un_resync_index;
356 			uint_t	 _t_un_resync_line_index;
357 		} _resync;
358 		struct  {
359 			daddr32_t _t_un_grow_tb;
360 			uint_t  _t_un_init_colcnt;
361 			uint_t  _t_un_init_iocnt;
362 		} _init;
363 	} _t_un;
364 	uint_t			un_resync_copysize;
365 
366 	/*
367 	 * This spot is 8 byte aligned!!!
368 	 * Don't change this arrangement.
369 	 */
370 	union {
371 		struct {
372 			mr_unit_ic_t *_t_mr_ic;
373 		} _mric;
374 		struct {
375 			uint_t xx_un_linlck_mx[2];
376 		} _lckmx;
377 	} _unic;
378 
379 	short			xx_un_linlck_cv;
380 	int			xx_un_mx[2];
381 	short			xx_un_cv;
382 	hsp_t			un_hsp_id;
383 	mr_column32_od_t	un_column[1];
384 } mr_unit32_od_t;
385 
386 typedef struct raid_pwhdr {
387 	uint_t		rpw_magic;
388 	uint_t		rpw_sum;
389 	int		rpw_columnnum;
390 	diskaddr_t	rpw_blkno;
391 	uint_t		rpw_blkcnt;
392 	long long	rpw_id;
393 	uint_t		rpw_colcount;
394 	uint_t		rpw_column;
395 	uint_t		rpw_unit;
396 	uint_t		rpw_magic_ext;
397 	uint_t		rpw_origcolumncnt;
398 	uint_t		rpw_totalcolumncnt;
399 	uint_t		rpw_segsize;
400 	diskaddr_t	rpw_segsincolumn;
401 	uint_t		rpw_pwcnt;
402 	uint_t		rpw_pwsize;
403 	diskaddr_t	rpw_devstart;
404 	diskaddr_t	rpw_pwstart;
405 	char 		rpw_filler[12];
406 } raid_pwhdr_t;
407 
408 /*
409  * For old 32 bit pre-write area
410  */
411 typedef struct raid_pwhdr32_od {
412 	uint_t		rpw_magic;
413 	uint_t		rpw_sum;
414 	int		rpw_columnnum;
415 	daddr32_t	rpw_blkno;
416 	daddr32_t	rpw_blkcnt;
417 	long long	rpw_id;
418 	uint_t		rpw_colcount;
419 	uint_t		rpw_column;
420 	uint_t		rpw_unit;
421 	uint_t		rpw_magic_ext;
422 	uint_t		rpw_origcolumncnt;
423 	uint_t		rpw_totalcolumncnt;
424 	uint_t		rpw_segsize;
425 	uint_t		rpw_segsincolumn;
426 	uint_t		rpw_pwcnt;
427 	uint_t		rpw_pwsize;
428 	uint_t		rpw_devstart;
429 	uint_t		rpw_pwstart;
430 	rus_state_t	rpw_unit_state;
431 	rcs_state_t	rpw_next_column_state;
432 	rcs_state_t	rpw_prev_column_state;
433 } raid_pwhdr32_od_t;
434 #if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
435 #pragma pack()
436 #endif
437 
438 #ifdef	_KERNEL
439 
440 /*
441  * the buffer header is only bp_mapin if it is needed.  It is needed on
442  * all writes and on some reads.  ps_mapin is non zero if the buffer is
443  * maped in.  ps_mapin_mx protect ps_mapin.  The protocol for usage is
444  *
445  * 1) check for non-zero and continue if non-zero
446  * 2) aquire the ps_mapin_mx
447  * 3) recheck for non-zero and continue if non-zero
448  * 4) bp_mapin
449  * 5) set ps_mapin to non-zero
450  * 6) drop ps_mapin_mx
451  *
452  * the reason for this is to avoid the mutex when possible.
453  */
454 typedef struct md_raidps {			/* raid parent save */
455 	DAEMON_QUEUE
456 	uint_t		ps_magic;
457 	mr_unit_t	*ps_un;
458 	mdi_unit_t	*ps_ui;
459 	buf_t		*ps_bp;
460 	caddr_t		ps_addr;
461 	int		ps_flags;
462 	int		ps_error;
463 	int		ps_frags;
464 	int		ps_pwfrags;
465 	int		ps_mapin;	/* buffer maped in if non zero */
466 	kmutex_t	ps_mx;
467 	kmutex_t	ps_mapin_mx;	/* protects ps_mapin */
468 } md_raidps_t;
469 
470 /* flags for parent save area */
471 
472 #define	MD_RPS_ERROR		0x0001
473 #define	MD_RPS_READ		0x0020
474 #define	MD_RPS_WRITE		0x0040
475 #define	MD_RPS_DONE		0x0080
476 #define	MD_RPS_INUSE		0x0100
477 #define	MD_RPS_IODONE		0x0200
478 #define	MD_RPS_HSREQ		0x0400
479 
480 /*
481  * used in cs_state to describe the type of io operation in progress
482  */
483 enum	raid_io_stage {
484 		RAID_NONE = 0x0,
485 		RAID_READ_DONE = 0x1,
486 		RAID_WRITE_DONE = 0x2,
487 		RAID_PREWRITE_DONE = 0x4,
488 		RAID_WRITE_PONLY_DONE = 0x8,
489 		RAID_WRITE_DONLY_DONE = 0x10,
490 		RAID_LINE_PWDONE = 0x20
491 };
492 
493 typedef struct md_raidcbuf {
494 	DAEMON_QUEUE
495 	uint_t			cbuf_magic;
496 	struct md_raidcbuf	*cbuf_next;		/* 0x10 */
497 	mr_unit_t		*cbuf_un;
498 	md_raidps_t		*cbuf_ps;
499 	int			cbuf_column;
500 	size_t			cbuf_bcount;		/* 0x20 */
501 	caddr_t			cbuf_buffer;
502 	int			cbuf_sum;
503 	int			cbuf_pwslot;
504 	int			cbuf_pwcnt;		/* 0x30 */
505 	int			cbuf_flags;
506 	buf_t			cbuf_bp;
507 	uint_t			cbuf_pad[4];
508 } md_raidcbuf_t;
509 #define	CBUF_PW_INVALIDATE	(0x00000001)
510 #define	CBUF_WRITE		(0x00000002)
511 
512 typedef struct md_raidcs {
513 	DAEMON_QUEUE
514 	uint_t			cs_magic;
515 	minor_t			cs_mdunit;
516 	mr_unit_t		*cs_un;
517 	int			cs_flags;
518 	md_raidps_t		*cs_ps;
519 	diskaddr_t		cs_line;
520 	void			(*cs_call)();
521 	void			(*cs_error_call)();
522 	void			(*cs_retry_call)();
523 	struct md_raidcs	*cs_linlck_next;
524 	struct md_raidcs	*cs_linlck_prev;
525 	long long		cs_pwid;
526 	int			cs_dcolumn;
527 	int			cs_dpwslot;
528 	uint_t			cs_dflags;
529 	int			cs_pcolumn;
530 	int			cs_ppwslot;
531 	uint_t			cs_pflags;
532 	size_t			cs_bcount;
533 	uint_t			cs_blkcnt;
534 	diskaddr_t		cs_blkno;
535 	diskaddr_t		cs_lastblk;
536 	int			cs_loop;
537 	caddr_t			cs_addr;	/* base address of io */
538 	off_t			cs_offset;	/* offset into the base */
539 	caddr_t			cs_dbuffer;
540 	caddr_t			cs_pbuffer;
541 	int			cs_frags;
542 	int			cs_strategy_flag;
543 	void			*cs_strategy_private;
544 	md_raidcbuf_t		*cs_buflist;
545 	int			cs_error;
546 	int			cs_resync_check;
547 	int			cs_rstate;
548 	enum raid_io_stage	cs_stage; 		/* current io stage */
549 	md_raidcbuf_t		*cs_pw_inval_list;
550 
551 	kmutex_t		cs_mx;
552 
553 	buf_t			cs_pbuf;
554 	uint_t			cs_pad1;
555 	buf_t			cs_hbuf;
556 	uint_t			cs_pad2;
557 	/* Add new structure members HERE!! */
558 	buf_t			cs_dbuf;
559 	/*  DO NOT add struture members here; cs_dbuf is dynamically sized */
560 } md_raidcs_t;
561 
562 /* value definitions for cs_resync_check */
563 #define	RCL_OKAY		0x01	/* write to both orig and alt */
564 #define	RCL_ERRED		0x08	/* treat column as rcs_ERRED */
565 
566 #define	RCL_DATA_MASK		0x000000ff
567 #define	RCL_PARITY_MASK		0x0000ff00
568 #define	RCL_PARITY_OFFSET	8	/* insure masks match offset */
569 
570 #define	RCL_PARITY(value)	(((value) & RCL_PARITY_MASK) >> \
571 				    RCL_PARITY_OFFSET)
572 
573 #define	RCL_DATA(value)		((value) & RCL_DATA_MASK)
574 
575 /* value definitions for cs_flags */
576 #define	MD_RCS_ISCALL		0x000001	/* call cs_call in interrupt */
577 #define	MD_RCS_UNDBUF		0x000002	/* holding unit data buffer */
578 #define	MD_RCS_UNPBUF		0x000004	/* holding unit parity buffer */
579 #define	MD_RCS_MPBUF		0x000008
580 #define	MD_RCS_HAVE_PW_SLOTS	0x000010	/* pw slots gotten */
581 #define	MD_RCS_PWDONE		0x000040	/* pwfrags are decremented */
582 #define	MD_RCS_READER		0x000100	/* reader line lock needed */
583 #define	MD_RCS_WRITER		0x000200	/* writer line lock needed */
584 #define	MD_RCS_LLOCKD		0x000400	/* line lock held */
585 #define	MD_RCS_WAITING		0x000800	/* line lock waiting */
586 #define	MD_RCS_LINE		0x001000	/* full line write */
587 #define	MD_RCS_ERROR		0x010000	/* I/O error on this child */
588 #define	MD_RCS_RECOVERY		0x020000
589 
590 /* value definitions for cs_pflags or cs_dflags */
591 #define	MD_RCS_ISUP		0x0002
592 
593 /* value definitions for gcs_flags */
594 #define	MD_RGCS_ALLOCBUF	0x0001
595 /* returned value from raid_replay() */
596 #define	RAID_RPLY_SUCCESS	0x0000
597 #define	RAID_RPLY_ALLOCFAIL	0x0001
598 #define	RAID_RPLY_COMPREPLAY	0x0002
599 #define	RAID_RPLY_READONLY	0x0004
600 #define	RAID_RPLY_EIO		0x0008
601 
602 typedef struct raid_rplybuf {
603 	caddr_t			rpl_data;
604 	buf_t			*rpl_buf;
605 } raid_rplybuf_t;
606 
607 typedef struct raid_rplylst {
608 	struct raid_rplylst	*rpl_next;
609 	uint_t			rpl_colcnt;
610 	long long		rpl_id;
611 	int			rpl_column1;
612 	uint_t			rpl_slot1;
613 	raid_pwhdr_t		rpl_pwhdr1;
614 	int			rpl_column2;
615 	uint_t			rpl_slot2;
616 	raid_pwhdr_t		rpl_pwhdr2;
617 } raid_rplylst_t;
618 
619 /* Externals from raid.c */
620 extern int	raid_build_incore(void *, int);
621 extern void	reset_raid(mr_unit_t *, minor_t, int);
622 
623 /* Externals from raid_ioctl.c */
624 extern int	md_raid_ioctl(dev_t dev, int cmd, void *data,
625 		    int mode, IOLOCK *lockp);
626 
627 /* rename named service functions */
628 md_ren_svc_t		raid_rename_check;
629 md_ren_svc_t		raid_rename_lock;
630 md_ren_void_svc_t	raid_rename_unlock;
631 
632 
633 /* redefinitions of the union shared by resync and init */
634 #define		un_resync_segment 	_t_un._resync._t_un_resync_segment
635 #define		un_resync_index		_t_un._resync._t_un_resync_index
636 #define		un_resync_line_index	_t_un._resync._t_un_resync_line_index
637 
638 #define		un_grow_tb 		_t_un._init._t_un_grow_tb
639 #define		un_init_colcnt		_t_un._init._t_un_init_colcnt
640 #define		un_init_iocnt		_t_un._init._t_un_init_iocnt
641 
642 #define	MD_RFLAG_NEEDBUF	(0x0001)
643 #define	MD_RFLAG_CLEAR		(0x0002)
644 #define	MD_RFLAG_KEEP		(0x0004)
645 #define	MD_RFLAG_NEEDPW		(0x0008)
646 
647 
648 extern void 		raid_set_state(mr_unit_t *un, int col,
649 			    rcs_state_t new_state, int force);
650 extern int		raid_replay(mr_unit_t *un);
651 extern void		raid_commit(mr_unit_t *un, mddb_recid_t *extras);
652 extern char		*raid_unit_state(rus_state_t state);
653 extern intptr_t		raid_hotspares();
654 extern void		raid_hs_release(hs_cmds_t cmd, mr_unit_t *un,
655 			    mddb_recid_t *recids, int hs_index);
656 extern int		raid_internal_open(minor_t mnum, int flag, int otyp,
657 			    int oflags);
658 extern int		raid_internal_close(minor_t mnum, int otyp,
659 			    int init_pw, int cflags);
660 extern int		raid_build_pwslot(mr_unit_t *unit, int column_index);
661 extern void		raid_free_pwslot(mr_unit_t *unit, int column_index);
662 extern void		release_resync_request(minor_t mnum);
663 extern int		resync_request(minor_t mnum, int column_index,
664 				size_t copysize, md_error_t *ep);
665 extern int		raid_resync_unit(minor_t mnum, md_error_t *ep);
666 extern void		raid_line_reader_lock(md_raidcs_t *cs,
667 			    int resync_thread);
668 extern void		raid_line_exit(md_raidcs_t *cs);
669 extern int		raid_state_cnt(mr_unit_t *un, rcs_state_t state);
670 extern int		raid_build_pw_reservation(mr_unit_t *un,
671 				int colindex);
672 extern int		init_pw_area(mr_unit_t *un, md_dev64_t dev_to_write,
673 			    diskaddr_t pwstart, uint_t col);
674 extern void		init_buf(buf_t *bp, int flags, size_t size);
675 extern void		destroy_buf(buf_t *bp);
676 extern void		reset_buf(buf_t *bp, int flags, size_t size);
677 extern void		md_raid_strategy(buf_t *pb, int flag, void *private);
678 extern void		raid_free_pw_reservation(mr_unit_t *un,
679 				int colindex);
680 extern void		raid_fillin_rpw(mr_unit_t *un,
681 				raid_pwhdr_t *pwhdrp, int col);
682 #endif  /* _KERNEL */
683 
684 #ifdef	__cplusplus
685 }
686 #endif
687 
688 #endif	/* _SYS_MD_RAID_H */
689