1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #ifndef _SYS_MD_RAID_H 27 #define _SYS_MD_RAID_H 28 29 #pragma ident "%Z%%M% %I% %E% SMI" 30 31 #include <sys/lvm/mdvar.h> 32 #include <sys/lvm/md_rename.h> 33 34 #ifdef __cplusplus 35 extern "C" { 36 #endif 37 38 39 /* 40 * following bits are used in status word in the common section 41 * of unit structure: un_status 42 */ 43 #define RAID_UNMAGIC 0xBADBABE0 44 #define RAID_PSMAGIC 0xBADBABE1 45 #define RAID_CSMAGIC 0xBADBABE2 46 #define RAID_PWMAGIC 0xBADBABE3 47 #define RAID_BUFMAGIC 0xBADBABE4 48 /* 49 * These are the major constants for the definition of a raid device 50 */ 51 #define PWCNT_MIN 10 /* mininum # prewrites */ 52 #define PWCNT_MAX 100 /* maximum # prewrites */ 53 #define RAID_MIN_INTERLACE (DEV_BSIZE * 2) 54 55 #define UNIT_STATE(un) ((un)->un_state) 56 #define COLUMN_STATE(un, column) ((un)->un_column[(column)].un_devstate) 57 58 #define COLUMN_STATE_ONLY(un, column) (\ 59 ((un)->un_column[(column)].un_devstate == RCS_INIT) || \ 60 ((un)->un_column[(column)].un_devstate == RCS_OKAY) || \ 61 ((un)->un_column[(column)].un_devstate == RCS_ERRED) || \ 62 ((un)->un_column[(column)].un_devstate == RCS_RESYNC) || \ 63 ((un)->un_column[(column)].un_devstate == RCS_LAST_ERRED) || \ 64 ((un)->un_column[(column)].un_devstate == RCS_REGEN))) 65 66 #define COLUMN_ISUP(un, column) (\ 67 ((un)->un_column[(column)].un_devstate == RCS_OKAY) || \ 68 ((un)->un_column[(column)].un_devstate == RCS_RESYNC) || \ 69 ((un)->un_column[(column)].un_devstate == RCS_LAST_ERRED)) 70 71 #define COLUMN_ISOKAY(un, column) (\ 72 ((un)->un_column[(column)].un_devstate == RCS_OKAY)) 73 74 #define COLUMN_ISLASTERR(un, column) (\ 75 ((un)->un_column[(column)].un_devstate == RCS_LAST_ERRED)) 76 77 #define WRITE_ALT(un, column) ( \ 78 ((un)->un_column[(column)].un_alt_dev != NODEV64) && \ 79 (((un)->un_column[(column)].un_devflags & MD_RAID_WRITE_ALT))) 80 81 #define HOTSPARED(un, column) ( \ 82 ((un)->un_column[(column)].un_hs_id != 0)) 83 84 #define OVERLAPED(blk1, lblk1, blk2, lblk2) ( \ 85 (((blk1 > lblk2) ? 1 : 0) || \ 86 ((lblk1 < blk2) ? 1 : 0))) 87 88 89 /* 90 * Note: magic is needed only to set rpw_magic, not rpw_magic_ext! 91 */ 92 #define RAID_FILLIN_RPW(buf, un, sum, colnum, \ 93 blkno, blkcnt, id, \ 94 colcount, col, magic) { \ 95 if ((un)->c.un_revision & MD_64BIT_META_DEV) { \ 96 raid_pwhdr_t *rpw64 = (raid_pwhdr_t *)(void *)(buf);\ 97 rpw64->rpw_magic = magic; \ 98 rpw64->rpw_sum = sum; \ 99 rpw64->rpw_columnnum = colnum; \ 100 rpw64->rpw_blkno = (diskaddr_t)blkno; \ 101 rpw64->rpw_blkcnt = blkcnt; \ 102 rpw64->rpw_id = id; \ 103 rpw64->rpw_colcount = colcount; \ 104 rpw64->rpw_column = col; \ 105 rpw64->rpw_unit = MD_SID(un); \ 106 rpw64->rpw_magic_ext = RAID_PWMAGIC; \ 107 rpw64->rpw_origcolumncnt = (un)->un_origcolumncnt; \ 108 rpw64->rpw_totalcolumncnt = (un)->un_totalcolumncnt; \ 109 rpw64->rpw_segsize = (un)->un_segsize; \ 110 rpw64->rpw_segsincolumn = (diskaddr_t)((un)->un_segsincolumn);\ 111 rpw64->rpw_pwcnt = (un)->un_pwcnt; \ 112 rpw64->rpw_pwsize = (un)->un_pwsize; \ 113 rpw64->rpw_devstart = \ 114 (diskaddr_t)((un)->un_column[col].un_orig_devstart);\ 115 rpw64->rpw_pwstart = \ 116 (diskaddr_t)((un)->un_column[col].un_orig_pwstart);\ 117 } else { \ 118 raid_pwhdr32_od_t *rpw32 = \ 119 (raid_pwhdr32_od_t *)(void *)(buf); \ 120 rpw32->rpw_magic = magic; \ 121 rpw32->rpw_sum = sum; \ 122 rpw32->rpw_columnnum = colnum; \ 123 rpw32->rpw_blkno = (daddr_t)blkno; \ 124 rpw32->rpw_blkcnt = blkcnt; \ 125 rpw32->rpw_id = id; \ 126 rpw32->rpw_colcount = colcount; \ 127 rpw32->rpw_column = col; \ 128 rpw32->rpw_unit = MD_SID(un); \ 129 rpw32->rpw_magic_ext = RAID_PWMAGIC; \ 130 rpw32->rpw_origcolumncnt = (un)->un_origcolumncnt; \ 131 rpw32->rpw_totalcolumncnt = (un)->un_totalcolumncnt; \ 132 rpw32->rpw_segsize = (daddr_t)((un)->un_segsize); \ 133 rpw32->rpw_segsincolumn = (daddr_t)((un)->un_segsincolumn);\ 134 rpw32->rpw_pwcnt = (un)->un_pwcnt; \ 135 rpw32->rpw_pwsize = (un)->un_pwsize; \ 136 rpw32->rpw_devstart = \ 137 (daddr_t)((un)->un_column[col].un_orig_devstart);\ 138 rpw32->rpw_pwstart = \ 139 (daddr_t)((un)->un_column[col].un_orig_pwstart);\ 140 } \ 141 } 142 143 #define RAID_CONVERT_RPW(rpw32, rpw64) { \ 144 (rpw64)->rpw_magic = (rpw32)->rpw_magic; \ 145 (rpw64)->rpw_sum = (rpw32)->rpw_sum; \ 146 (rpw64)->rpw_columnnum = (rpw32)->rpw_columnnum; \ 147 (rpw64)->rpw_blkno = (rpw32)->rpw_blkno; \ 148 (rpw64)->rpw_blkcnt = (rpw32)->rpw_blkcnt; \ 149 (rpw64)->rpw_id = (rpw32)->rpw_id; \ 150 (rpw64)->rpw_colcount = (rpw32)->rpw_colcount; \ 151 (rpw64)->rpw_column = (rpw32)->rpw_column; \ 152 (rpw64)->rpw_unit = (rpw32)->rpw_unit; \ 153 (rpw64)->rpw_magic_ext = (rpw32)->rpw_magic_ext; \ 154 (rpw64)->rpw_origcolumncnt = (rpw32)->rpw_origcolumncnt; \ 155 (rpw64)->rpw_totalcolumncnt = (rpw32)->rpw_totalcolumncnt; \ 156 (rpw64)->rpw_segsize = (rpw32)->rpw_segsize; \ 157 (rpw64)->rpw_segsincolumn = (rpw32)->rpw_segsincolumn; \ 158 (rpw64)->rpw_pwcnt = (rpw32)->rpw_pwcnt; \ 159 (rpw64)->rpw_pwsize = (rpw32)->rpw_pwsize; \ 160 (rpw64)->rpw_devstart = (rpw32)->rpw_devstart; \ 161 (rpw64)->rpw_pwstart = (rpw32)->rpw_pwstart; \ 162 } 163 164 typedef struct mr_scoreboard { 165 int sb_column; 166 int sb_flags; 167 diskaddr_t sb_start_blk; 168 diskaddr_t sb_last_blk; 169 void *sb_cs; 170 } mr_scoreboard_t; 171 172 #define SB_AVAIL (0x00000001) /* useable and valid blocks */ 173 #define SB_INUSE (0x00000002) /* being used */ 174 #define SB_UNUSED (0x00000004) /* useable and no valid blocks */ 175 #define SB_INVAL_PEND (0x00000008) /* being invalidated */ 176 177 typedef struct mr_pw_reserve { 178 uint_t pw_magic; 179 int pw_column; 180 int pw_free; 181 mr_scoreboard_t pw_sb[1]; 182 } mr_pw_reserve_t; 183 184 185 #if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4 186 #pragma pack(4) 187 #endif 188 typedef struct mr_column { 189 rcs_state_t un_devstate; 190 rcs_flags_t un_devflags; 191 md_timeval32_t un_devtimestamp; /* time of last state change, 32 bit */ 192 193 mddb_recid_t un_hs_id; 194 diskaddr_t un_hs_pwstart; 195 diskaddr_t un_hs_devstart; 196 mdkey_t un_hs_key; 197 198 199 md_dev64_t un_orig_dev; /* original device, 64 bit */ 200 mdkey_t un_orig_key; 201 diskaddr_t un_orig_pwstart; 202 diskaddr_t un_orig_devstart; 203 204 md_dev64_t un_dev; /* current read/write dev */ 205 diskaddr_t un_pwstart; 206 diskaddr_t un_devstart; 207 208 md_dev64_t un_alt_dev; /* write to if resync */ 209 diskaddr_t un_alt_pwstart; 210 diskaddr_t un_alt_devstart; 211 } mr_column_t; 212 213 /* 214 * mr_column32_od is for old 32 bit format only 215 */ 216 typedef struct mr_column32_od { 217 rcs_state_t un_devstate; 218 rcs_flags_t un_devflags; 219 struct timeval32 un_devtimestamp; /* time of last state change */ 220 caddr32_t xx_un_pw_reserve; 221 222 mddb_recid_t un_hs_id; 223 daddr32_t un_hs_pwstart; 224 daddr32_t un_hs_devstart; 225 mdkey_t un_hs_key; 226 227 dev32_t un_orig_dev; /* original device */ 228 mdkey_t un_orig_key; 229 daddr32_t un_orig_pwstart; 230 daddr32_t un_orig_devstart; 231 232 dev32_t un_dev; /* current read/write dev */ 233 daddr32_t un_pwstart; 234 daddr32_t un_devstart; 235 236 dev32_t un_alt_dev; /* write to if resync */ 237 daddr32_t un_alt_pwstart; 238 daddr32_t un_alt_devstart; 239 } mr_column32_od_t; 240 241 242 /* 243 * Incore only elements structures 244 */ 245 typedef struct mr_column_ic { 246 mr_pw_reserve_t *un_pw_reserve; 247 } mr_column_ic_t; 248 249 /* 250 * Do not rearrange elements as mutexes must be aligned on 251 * an 8 byte boundary. Element _t_un_linlck_mx corresponds to 252 * _t_un_linlck_cv and element _t_un_mx corresponds to _t_un_cv 253 */ 254 typedef struct mr_unit_ic { 255 caddr_t _t_un_pbuffer; 256 caddr_t _t_un_dbuffer; 257 struct md_raidcs *_t_un_linlck_chn; 258 kmutex_t _t_un_linlck_mx; 259 kmutex_t _t_un_mx; 260 kcondvar_t _t_un_linlck_cv; 261 kcondvar_t _t_un_cv; 262 mr_column_ic_t *_t_un_column_ic; 263 } mr_unit_ic_t; 264 265 typedef struct mr_unit { 266 mdc_unit_t c; 267 int un_raid_res; 268 uint_t un_magic; 269 rus_state_t un_state; 270 md_timeval32_t un_timestamp; /* 32 bit fixed size */ 271 uint_t un_origcolumncnt; 272 uint_t un_totalcolumncnt; 273 uint_t un_rflags; 274 uint_t un_segsize; 275 diskaddr_t un_segsincolumn; 276 uint_t un_maxio; /* in blks */ 277 uint_t un_iosize; /* in blks */ 278 uint_t un_linlck_flg; 279 uint_t un_pwcnt; 280 uint_t un_pwsize; 281 long long un_pwid; 282 uint_t un_percent_done; 283 uint_t un_resync_copysize; /* in blks */ 284 hsp_t un_hsp_id; 285 /* 286 * This union has to begin at an 8 byte aligned address. 287 * If not, this structure has different sizes in 32 / 64 bit 288 * environments, since in a 64 bit environment the compiler 289 * adds paddings before a long long, if it doesn't start at an 8byte 290 * aligned address. 291 * Be careful if you add or remove structure elements before it! 292 */ 293 294 union { 295 struct { 296 diskaddr_t _t_un_resync_line_index; 297 uint_t _t_un_resync_segment; 298 int _t_un_resync_index; 299 } _resync; 300 struct { 301 diskaddr_t _t_un_grow_tb; 302 uint_t _t_un_init_colcnt; 303 u_longlong_t _t_un_init_iocnt; 304 } _init; 305 } _t_un; 306 307 /* 308 * This union has to begin at an 8 byte aligned address. 309 * Be careful if you add or remove structure elements before it! 310 */ 311 union { 312 mr_unit_ic_t *_mr_ic; 313 uint_t _mr_ic_pad[2]; 314 } un_mr_ic; 315 316 mr_column_t un_column[1]; 317 } mr_unit_t; 318 319 #define mr_ic un_mr_ic._mr_ic 320 #define un_pbuffer mr_ic->_t_un_pbuffer 321 #define un_dbuffer mr_ic->_t_un_dbuffer 322 #define un_linlck_chn mr_ic->_t_un_linlck_chn 323 #define un_linlck_mx mr_ic->_t_un_linlck_mx 324 #define un_linlck_cv mr_ic->_t_un_linlck_cv 325 #define un_mx mr_ic->_t_un_mx 326 #define un_cv mr_ic->_t_un_cv 327 #define un_column_ic mr_ic->_t_un_column_ic 328 329 /* 330 * For old 32 bit format use only 331 */ 332 typedef struct mr_unit32_od { 333 mdc_unit32_od_t c; 334 caddr32_t xx_un_raid_res; 335 uint_t un_magic; 336 rus_state_t un_state; 337 struct timeval32 un_timestamp; 338 uint_t un_origcolumncnt; 339 uint_t un_totalcolumncnt; 340 uint_t un_rflags; 341 uint_t un_segsize; 342 uint_t un_segsincolumn; 343 uint_t un_maxio; 344 uint_t un_iosize; 345 caddr32_t xx_un_pbuffer; 346 caddr32_t xx_un_dbuffer; 347 uint_t un_linlck_flg; 348 caddr32_t xx_un_linlck_chn; 349 uint_t un_pwcnt; 350 uint_t un_pwsize; 351 long long un_pwid; 352 uint_t un_rebuild_size; 353 uint_t un_percent_done; 354 union { 355 struct { 356 uint_t _t_un_resync_segment; 357 int _t_un_resync_index; 358 uint_t _t_un_resync_line_index; 359 } _resync; 360 struct { 361 daddr32_t _t_un_grow_tb; 362 uint_t _t_un_init_colcnt; 363 uint_t _t_un_init_iocnt; 364 } _init; 365 } _t_un; 366 uint_t un_resync_copysize; 367 368 /* 369 * This spot is 8 byte aligned!!! 370 * Don't change this arrangement. 371 */ 372 union { 373 struct { 374 mr_unit_ic_t *_t_mr_ic; 375 } _mric; 376 struct { 377 uint_t xx_un_linlck_mx[2]; 378 } _lckmx; 379 } _unic; 380 381 short xx_un_linlck_cv; 382 int xx_un_mx[2]; 383 short xx_un_cv; 384 hsp_t un_hsp_id; 385 mr_column32_od_t un_column[1]; 386 } mr_unit32_od_t; 387 388 typedef struct raid_pwhdr { 389 uint_t rpw_magic; 390 uint_t rpw_sum; 391 int rpw_columnnum; 392 diskaddr_t rpw_blkno; 393 uint_t rpw_blkcnt; 394 long long rpw_id; 395 uint_t rpw_colcount; 396 uint_t rpw_column; 397 uint_t rpw_unit; 398 uint_t rpw_magic_ext; 399 uint_t rpw_origcolumncnt; 400 uint_t rpw_totalcolumncnt; 401 uint_t rpw_segsize; 402 diskaddr_t rpw_segsincolumn; 403 uint_t rpw_pwcnt; 404 uint_t rpw_pwsize; 405 diskaddr_t rpw_devstart; 406 diskaddr_t rpw_pwstart; 407 char rpw_filler[12]; 408 } raid_pwhdr_t; 409 410 /* 411 * For old 32 bit pre-write area 412 */ 413 typedef struct raid_pwhdr32_od { 414 uint_t rpw_magic; 415 uint_t rpw_sum; 416 int rpw_columnnum; 417 daddr32_t rpw_blkno; 418 daddr32_t rpw_blkcnt; 419 long long rpw_id; 420 uint_t rpw_colcount; 421 uint_t rpw_column; 422 uint_t rpw_unit; 423 uint_t rpw_magic_ext; 424 uint_t rpw_origcolumncnt; 425 uint_t rpw_totalcolumncnt; 426 uint_t rpw_segsize; 427 uint_t rpw_segsincolumn; 428 uint_t rpw_pwcnt; 429 uint_t rpw_pwsize; 430 uint_t rpw_devstart; 431 uint_t rpw_pwstart; 432 rus_state_t rpw_unit_state; 433 rcs_state_t rpw_next_column_state; 434 rcs_state_t rpw_prev_column_state; 435 } raid_pwhdr32_od_t; 436 #if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4 437 #pragma pack() 438 #endif 439 440 #ifdef _KERNEL 441 442 /* 443 * the buffer header is only bp_mapin if it is needed. It is needed on 444 * all writes and on some reads. ps_mapin is non zero if the buffer is 445 * maped in. ps_mapin_mx protect ps_mapin. The protocol for usage is 446 * 447 * 1) check for non-zero and continue if non-zero 448 * 2) aquire the ps_mapin_mx 449 * 3) recheck for non-zero and continue if non-zero 450 * 4) bp_mapin 451 * 5) set ps_mapin to non-zero 452 * 6) drop ps_mapin_mx 453 * 454 * the reason for this is to avoid the mutex when possible. 455 */ 456 typedef struct md_raidps { /* raid parent save */ 457 DAEMON_QUEUE 458 uint_t ps_magic; 459 mr_unit_t *ps_un; 460 mdi_unit_t *ps_ui; 461 buf_t *ps_bp; 462 caddr_t ps_addr; 463 int ps_flags; 464 int ps_error; 465 int ps_frags; 466 int ps_pwfrags; 467 int ps_mapin; /* buffer maped in if non zero */ 468 kmutex_t ps_mx; 469 kmutex_t ps_mapin_mx; /* protects ps_mapin */ 470 } md_raidps_t; 471 472 /* flags for parent save area */ 473 474 #define MD_RPS_ERROR 0x0001 475 #define MD_RPS_READ 0x0020 476 #define MD_RPS_WRITE 0x0040 477 #define MD_RPS_DONE 0x0080 478 #define MD_RPS_INUSE 0x0100 479 #define MD_RPS_IODONE 0x0200 480 #define MD_RPS_HSREQ 0x0400 481 482 /* 483 * used in cs_state to describe the type of io operation in progress 484 */ 485 enum raid_io_stage { 486 RAID_NONE = 0x0, 487 RAID_READ_DONE = 0x1, 488 RAID_WRITE_DONE = 0x2, 489 RAID_PREWRITE_DONE = 0x4, 490 RAID_WRITE_PONLY_DONE = 0x8, 491 RAID_WRITE_DONLY_DONE = 0x10, 492 RAID_LINE_PWDONE = 0x20 493 }; 494 495 typedef struct md_raidcbuf { 496 DAEMON_QUEUE 497 uint_t cbuf_magic; 498 struct md_raidcbuf *cbuf_next; /* 0x10 */ 499 mr_unit_t *cbuf_un; 500 md_raidps_t *cbuf_ps; 501 int cbuf_column; 502 size_t cbuf_bcount; /* 0x20 */ 503 caddr_t cbuf_buffer; 504 int cbuf_sum; 505 int cbuf_pwslot; 506 int cbuf_pwcnt; /* 0x30 */ 507 int cbuf_flags; 508 buf_t cbuf_bp; 509 uint_t cbuf_pad[4]; 510 } md_raidcbuf_t; 511 #define CBUF_PW_INVALIDATE (0x00000001) 512 #define CBUF_WRITE (0x00000002) 513 514 typedef struct md_raidcs { 515 DAEMON_QUEUE 516 uint_t cs_magic; 517 minor_t cs_mdunit; 518 mr_unit_t *cs_un; 519 int cs_flags; 520 md_raidps_t *cs_ps; 521 diskaddr_t cs_line; 522 void (*cs_call)(); 523 void (*cs_error_call)(); 524 void (*cs_retry_call)(); 525 struct md_raidcs *cs_linlck_next; 526 struct md_raidcs *cs_linlck_prev; 527 long long cs_pwid; 528 int cs_dcolumn; 529 int cs_dpwslot; 530 uint_t cs_dflags; 531 int cs_pcolumn; 532 int cs_ppwslot; 533 uint_t cs_pflags; 534 size_t cs_bcount; 535 uint_t cs_blkcnt; 536 diskaddr_t cs_blkno; 537 diskaddr_t cs_lastblk; 538 int cs_loop; 539 caddr_t cs_addr; /* base address of io */ 540 off_t cs_offset; /* offset into the base */ 541 caddr_t cs_dbuffer; 542 caddr_t cs_pbuffer; 543 int cs_frags; 544 int cs_strategy_flag; 545 void *cs_strategy_private; 546 md_raidcbuf_t *cs_buflist; 547 int cs_error; 548 int cs_resync_check; 549 int cs_rstate; 550 enum raid_io_stage cs_stage; /* current io stage */ 551 md_raidcbuf_t *cs_pw_inval_list; 552 553 kmutex_t cs_mx; 554 555 buf_t cs_pbuf; 556 uint_t cs_pad1; 557 buf_t cs_hbuf; 558 uint_t cs_pad2; 559 /* Add new structure members HERE!! */ 560 buf_t cs_dbuf; 561 /* DO NOT add struture members here; cs_dbuf is dynamically sized */ 562 } md_raidcs_t; 563 564 /* value definitions for cs_resync_check */ 565 #define RCL_OKAY 0x01 /* write to both orig and alt */ 566 #define RCL_ERRED 0x08 /* treat column as rcs_ERRED */ 567 568 #define RCL_DATA_MASK 0x000000ff 569 #define RCL_PARITY_MASK 0x0000ff00 570 #define RCL_PARITY_OFFSET 8 /* insure masks match offset */ 571 572 #define RCL_PARITY(value) (((value) & RCL_PARITY_MASK) >> \ 573 RCL_PARITY_OFFSET) 574 575 #define RCL_DATA(value) ((value) & RCL_DATA_MASK) 576 577 /* value definitions for cs_flags */ 578 #define MD_RCS_ISCALL 0x000001 /* call cs_call in interrupt */ 579 #define MD_RCS_UNDBUF 0x000002 /* holding unit data buffer */ 580 #define MD_RCS_UNPBUF 0x000004 /* holding unit parity buffer */ 581 #define MD_RCS_MPBUF 0x000008 582 #define MD_RCS_HAVE_PW_SLOTS 0x000010 /* pw slots gotten */ 583 #define MD_RCS_PWDONE 0x000040 /* pwfrags are decremented */ 584 #define MD_RCS_READER 0x000100 /* reader line lock needed */ 585 #define MD_RCS_WRITER 0x000200 /* writer line lock needed */ 586 #define MD_RCS_LLOCKD 0x000400 /* line lock held */ 587 #define MD_RCS_WAITING 0x000800 /* line lock waiting */ 588 #define MD_RCS_LINE 0x001000 /* full line write */ 589 #define MD_RCS_ERROR 0x010000 /* I/O error on this child */ 590 #define MD_RCS_RECOVERY 0x020000 591 592 /* value definitions for cs_pflags or cs_dflags */ 593 #define MD_RCS_ISUP 0x0002 594 595 /* value definitions for gcs_flags */ 596 #define MD_RGCS_ALLOCBUF 0x0001 597 /* returned value from raid_replay() */ 598 #define RAID_RPLY_SUCCESS 0x0000 599 #define RAID_RPLY_ALLOCFAIL 0x0001 600 #define RAID_RPLY_COMPREPLAY 0x0002 601 #define RAID_RPLY_READONLY 0x0004 602 #define RAID_RPLY_EIO 0x0008 603 604 typedef struct raid_rplybuf { 605 caddr_t rpl_data; 606 buf_t *rpl_buf; 607 } raid_rplybuf_t; 608 609 typedef struct raid_rplylst { 610 struct raid_rplylst *rpl_next; 611 uint_t rpl_colcnt; 612 long long rpl_id; 613 int rpl_column1; 614 uint_t rpl_slot1; 615 raid_pwhdr_t rpl_pwhdr1; 616 int rpl_column2; 617 uint_t rpl_slot2; 618 raid_pwhdr_t rpl_pwhdr2; 619 } raid_rplylst_t; 620 621 /* Externals from raid.c */ 622 extern int raid_build_incore(void *, int); 623 extern void reset_raid(mr_unit_t *, minor_t, int); 624 625 /* Externals from raid_ioctl.c */ 626 extern int md_raid_ioctl(dev_t dev, int cmd, void *data, 627 int mode, IOLOCK *lockp); 628 629 /* rename named service functions */ 630 md_ren_svc_t raid_rename_check; 631 md_ren_svc_t raid_rename_lock; 632 md_ren_void_svc_t raid_rename_unlock; 633 634 635 /* redefinitions of the union shared by resync and init */ 636 #define un_resync_segment _t_un._resync._t_un_resync_segment 637 #define un_resync_index _t_un._resync._t_un_resync_index 638 #define un_resync_line_index _t_un._resync._t_un_resync_line_index 639 640 #define un_grow_tb _t_un._init._t_un_grow_tb 641 #define un_init_colcnt _t_un._init._t_un_init_colcnt 642 #define un_init_iocnt _t_un._init._t_un_init_iocnt 643 644 #define MD_RFLAG_NEEDBUF (0x0001) 645 #define MD_RFLAG_CLEAR (0x0002) 646 #define MD_RFLAG_KEEP (0x0004) 647 #define MD_RFLAG_NEEDPW (0x0008) 648 649 650 extern void raid_set_state(mr_unit_t *un, int col, 651 rcs_state_t new_state, int force); 652 extern int raid_replay(mr_unit_t *un); 653 extern void raid_commit(mr_unit_t *un, mddb_recid_t *extras); 654 extern char *raid_unit_state(rus_state_t state); 655 extern intptr_t raid_hotspares(); 656 extern void raid_hs_release(hs_cmds_t cmd, mr_unit_t *un, 657 mddb_recid_t *recids, int hs_index); 658 extern int raid_internal_open(minor_t mnum, int flag, int otyp, 659 int oflags); 660 extern int raid_internal_close(minor_t mnum, int otyp, 661 int init_pw, int cflags); 662 extern int raid_build_pwslot(mr_unit_t *unit, int column_index); 663 extern void raid_free_pwslot(mr_unit_t *unit, int column_index); 664 extern void release_resync_request(minor_t mnum); 665 extern int resync_request(minor_t mnum, int column_index, 666 size_t copysize, md_error_t *ep); 667 extern int raid_resync_unit(minor_t mnum, md_error_t *ep); 668 extern void raid_line_reader_lock(md_raidcs_t *cs, 669 int resync_thread); 670 extern void raid_line_exit(md_raidcs_t *cs); 671 extern int raid_state_cnt(mr_unit_t *un, rcs_state_t state); 672 extern int raid_build_pw_reservation(mr_unit_t *un, 673 int colindex); 674 extern int init_pw_area(mr_unit_t *un, md_dev64_t dev_to_write, 675 diskaddr_t pwstart, uint_t col); 676 extern void init_buf(buf_t *bp, int flags, size_t size); 677 extern void destroy_buf(buf_t *bp); 678 extern void reset_buf(buf_t *bp, int flags, size_t size); 679 extern void md_raid_strategy(buf_t *pb, int flag, void *private); 680 extern void raid_free_pw_reservation(mr_unit_t *un, 681 int colindex); 682 extern void raid_fillin_rpw(mr_unit_t *un, 683 raid_pwhdr_t *pwhdrp, int col); 684 #endif /* _KERNEL */ 685 686 #ifdef __cplusplus 687 } 688 #endif 689 690 #endif /* _SYS_MD_RAID_H */ 691