1 /*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #ifndef _G_MIRROR_H_ 30 #define _G_MIRROR_H_ 31 32 #include <sys/endian.h> 33 #include <sys/md5.h> 34 35 #define G_MIRROR_CLASS_NAME "MIRROR" 36 37 #define G_MIRROR_MAGIC "GEOM::MIRROR" 38 /* 39 * Version history: 40 * 0 - Initial version number. 41 * 1 - Added 'prefer' balance algorithm. 42 * 2 - Added md_genid field to metadata. 43 * 3 - Added md_provsize field to metadata. 44 * 4 - Added 'no failure synchronization' flag. 45 */ 46 #define G_MIRROR_VERSION 4 47 48 #define G_MIRROR_BALANCE_NONE 0 49 #define G_MIRROR_BALANCE_ROUND_ROBIN 1 50 #define G_MIRROR_BALANCE_LOAD 2 51 #define G_MIRROR_BALANCE_SPLIT 3 52 #define G_MIRROR_BALANCE_PREFER 4 53 #define G_MIRROR_BALANCE_MIN G_MIRROR_BALANCE_NONE 54 #define G_MIRROR_BALANCE_MAX G_MIRROR_BALANCE_PREFER 55 56 #define G_MIRROR_DISK_FLAG_DIRTY 0x0000000000000001ULL 57 #define G_MIRROR_DISK_FLAG_SYNCHRONIZING 0x0000000000000002ULL 58 #define G_MIRROR_DISK_FLAG_FORCE_SYNC 0x0000000000000004ULL 59 #define G_MIRROR_DISK_FLAG_INACTIVE 0x0000000000000008ULL 60 #define G_MIRROR_DISK_FLAG_HARDCODED 0x0000000000000010ULL 61 #define G_MIRROR_DISK_FLAG_BROKEN 0x0000000000000020ULL 62 #define G_MIRROR_DISK_FLAG_CANDELETE 0x0000000000000040ULL 63 #define G_MIRROR_DISK_FLAG_MASK (G_MIRROR_DISK_FLAG_DIRTY | \ 64 G_MIRROR_DISK_FLAG_SYNCHRONIZING | \ 65 G_MIRROR_DISK_FLAG_FORCE_SYNC | \ 66 G_MIRROR_DISK_FLAG_INACTIVE | \ 67 G_MIRROR_DISK_FLAG_CANDELETE) 68 69 #define G_MIRROR_DEVICE_FLAG_NOAUTOSYNC 0x0000000000000001ULL 70 #define G_MIRROR_DEVICE_FLAG_NOFAILSYNC 0x0000000000000002ULL 71 #define G_MIRROR_DEVICE_FLAG_MASK (G_MIRROR_DEVICE_FLAG_NOAUTOSYNC | \ 72 G_MIRROR_DEVICE_FLAG_NOFAILSYNC) 73 74 #ifdef _KERNEL 75 extern u_int g_mirror_debug; 76 77 #define G_MIRROR_DEBUG(lvl, ...) do { \ 78 if (g_mirror_debug >= (lvl)) { \ 79 printf("GEOM_MIRROR"); \ 80 if (g_mirror_debug > 0) \ 81 printf("[%u]", lvl); \ 82 printf(": "); \ 83 printf(__VA_ARGS__); \ 84 printf("\n"); \ 85 } \ 86 } while (0) 87 #define G_MIRROR_LOGREQ(lvl, bp, ...) do { \ 88 if (g_mirror_debug >= (lvl)) { \ 89 printf("GEOM_MIRROR"); \ 90 if (g_mirror_debug > 0) \ 91 printf("[%u]", lvl); \ 92 printf(": "); \ 93 printf(__VA_ARGS__); \ 94 printf(" "); \ 95 g_print_bio(bp); \ 96 printf("\n"); \ 97 } \ 98 } while (0) 99 100 #define G_MIRROR_BIO_FLAG_REGULAR 0x01 101 #define G_MIRROR_BIO_FLAG_SYNC 0x02 102 103 /* 104 * Informations needed for synchronization. 105 */ 106 struct g_mirror_disk_sync { 107 struct g_consumer *ds_consumer; /* Consumer connected to our mirror. */ 108 off_t ds_offset; /* Offset of next request to send. */ 109 off_t ds_offset_done; /* Offset of already synchronized 110 region. */ 111 u_int ds_syncid; /* Disk's synchronization ID. */ 112 u_int ds_inflight; /* Number of in-flight sync requests. */ 113 struct bio **ds_bios; /* BIOs for synchronization I/O. */ 114 }; 115 116 /* 117 * Informations needed for synchronization. 118 */ 119 struct g_mirror_device_sync { 120 struct g_geom *ds_geom; /* Synchronization geom. */ 121 u_int ds_ndisks; /* Number of disks in SYNCHRONIZING 122 state. */ 123 }; 124 125 #define G_MIRROR_DISK_STATE_NONE 0 126 #define G_MIRROR_DISK_STATE_NEW 1 127 #define G_MIRROR_DISK_STATE_ACTIVE 2 128 #define G_MIRROR_DISK_STATE_STALE 3 129 #define G_MIRROR_DISK_STATE_SYNCHRONIZING 4 130 #define G_MIRROR_DISK_STATE_DISCONNECTED 5 131 #define G_MIRROR_DISK_STATE_DESTROY 6 132 struct g_mirror_disk { 133 uint32_t d_id; /* Disk ID. */ 134 struct g_consumer *d_consumer; /* Consumer. */ 135 struct g_mirror_softc *d_softc; /* Back-pointer to softc. */ 136 int d_state; /* Disk state. */ 137 u_int d_priority; /* Disk priority. */ 138 u_int load; /* Averaged queue length */ 139 off_t d_last_offset; /* Last read offset */ 140 uint64_t d_flags; /* Additional flags. */ 141 u_int d_genid; /* Disk's generation ID. */ 142 struct g_mirror_disk_sync d_sync;/* Sync information. */ 143 LIST_ENTRY(g_mirror_disk) d_next; 144 }; 145 #define d_name d_consumer->provider->name 146 147 #define G_MIRROR_EVENT_DONTWAIT 0x1 148 #define G_MIRROR_EVENT_WAIT 0x2 149 #define G_MIRROR_EVENT_DEVICE 0x4 150 #define G_MIRROR_EVENT_DONE 0x8 151 struct g_mirror_event { 152 struct g_mirror_disk *e_disk; 153 int e_state; 154 int e_flags; 155 int e_error; 156 TAILQ_ENTRY(g_mirror_event) e_next; 157 }; 158 159 #define G_MIRROR_DEVICE_FLAG_DESTROY 0x0100000000000000ULL 160 #define G_MIRROR_DEVICE_FLAG_DRAIN 0x0200000000000000ULL 161 #define G_MIRROR_DEVICE_FLAG_CLOSEWAIT 0x0400000000000000ULL 162 #define G_MIRROR_DEVICE_FLAG_TASTING 0x0800000000000000ULL 163 #define G_MIRROR_DEVICE_FLAG_WIPE 0x1000000000000000ULL 164 165 #define G_MIRROR_DEVICE_STATE_STARTING 0 166 #define G_MIRROR_DEVICE_STATE_RUNNING 1 167 168 #define G_MIRROR_TYPE_MANUAL 0 169 #define G_MIRROR_TYPE_AUTOMATIC 1 170 171 /* Bump syncid on first write. */ 172 #define G_MIRROR_BUMP_SYNCID 0x1 173 /* Bump genid immediately. */ 174 #define G_MIRROR_BUMP_GENID 0x2 175 /* Bump syncid immediately. */ 176 #define G_MIRROR_BUMP_SYNCID_NOW 0x4 177 struct g_mirror_softc { 178 u_int sc_type; /* Device type (manual/automatic). */ 179 u_int sc_state; /* Device state. */ 180 uint32_t sc_slice; /* Slice size. */ 181 uint8_t sc_balance; /* Balance algorithm. */ 182 uint64_t sc_mediasize; /* Device size. */ 183 uint32_t sc_sectorsize; /* Sector size. */ 184 uint64_t sc_flags; /* Additional flags. */ 185 186 struct g_geom *sc_geom; 187 struct g_provider *sc_provider; 188 int sc_provider_open; 189 190 uint32_t sc_id; /* Mirror unique ID. */ 191 192 struct sx sc_lock; 193 struct bio_queue_head sc_queue; 194 struct mtx sc_queue_mtx; 195 struct proc *sc_worker; 196 struct bio_queue_head sc_regular_delayed; /* Delayed I/O requests due 197 collision with sync 198 requests. */ 199 struct bio_queue_head sc_inflight; /* In-flight regular write 200 requests. */ 201 struct bio_queue_head sc_sync_delayed; /* Delayed sync requests due 202 collision with regular 203 requests. */ 204 205 LIST_HEAD(, g_mirror_disk) sc_disks; 206 u_int sc_ndisks; /* Number of disks. */ 207 struct g_mirror_disk *sc_hint; 208 209 u_int sc_genid; /* Generation ID. */ 210 u_int sc_syncid; /* Synchronization ID. */ 211 int sc_bump_id; 212 struct g_mirror_device_sync sc_sync; 213 int sc_idle; /* DIRTY flags removed. */ 214 time_t sc_last_write; 215 u_int sc_writes; 216 u_int sc_refcnt; /* Number of softc references */ 217 218 TAILQ_HEAD(, g_mirror_event) sc_events; 219 struct mtx sc_events_mtx; 220 221 struct callout sc_callout; 222 223 struct root_hold_token *sc_rootmount; 224 225 struct mtx sc_done_mtx; 226 }; 227 #define sc_name sc_geom->name 228 229 struct g_mirror_metadata; 230 231 u_int g_mirror_ndisks(struct g_mirror_softc *sc, int state); 232 struct g_geom * g_mirror_create(struct g_class *mp, 233 const struct g_mirror_metadata *md, u_int type); 234 #define G_MIRROR_DESTROY_SOFT 0 235 #define G_MIRROR_DESTROY_DELAYED 1 236 #define G_MIRROR_DESTROY_HARD 2 237 int g_mirror_destroy(struct g_mirror_softc *sc, int how); 238 int g_mirror_event_send(void *arg, int state, int flags); 239 struct g_mirror_metadata; 240 int g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 241 struct g_mirror_metadata *md); 242 int g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md); 243 void g_mirror_fill_metadata(struct g_mirror_softc *sc, 244 struct g_mirror_disk *disk, struct g_mirror_metadata *md); 245 void g_mirror_update_metadata(struct g_mirror_disk *disk); 246 247 g_ctl_req_t g_mirror_config; 248 #endif /* _KERNEL */ 249 250 struct g_mirror_metadata { 251 char md_magic[16]; /* Magic value. */ 252 uint32_t md_version; /* Version number. */ 253 char md_name[16]; /* Mirror name. */ 254 uint32_t md_mid; /* Mirror unique ID. */ 255 uint32_t md_did; /* Disk unique ID. */ 256 uint8_t md_all; /* Number of disks in mirror. */ 257 uint32_t md_genid; /* Generation ID. */ 258 uint32_t md_syncid; /* Synchronization ID. */ 259 uint8_t md_priority; /* Disk priority. */ 260 uint32_t md_slice; /* Slice size. */ 261 uint8_t md_balance; /* Balance type. */ 262 uint64_t md_mediasize; /* Size of the smallest 263 disk in mirror. */ 264 uint32_t md_sectorsize; /* Sector size. */ 265 uint64_t md_sync_offset; /* Synchronized offset. */ 266 uint64_t md_mflags; /* Additional mirror flags. */ 267 uint64_t md_dflags; /* Additional disk flags. */ 268 char md_provider[16]; /* Hardcoded provider. */ 269 uint64_t md_provsize; /* Provider's size. */ 270 u_char md_hash[16]; /* MD5 hash. */ 271 }; 272 static __inline void 273 mirror_metadata_encode(struct g_mirror_metadata *md, u_char *data) 274 { 275 MD5_CTX ctx; 276 277 bcopy(md->md_magic, data, 16); 278 le32enc(data + 16, md->md_version); 279 bcopy(md->md_name, data + 20, 16); 280 le32enc(data + 36, md->md_mid); 281 le32enc(data + 40, md->md_did); 282 *(data + 44) = md->md_all; 283 le32enc(data + 45, md->md_genid); 284 le32enc(data + 49, md->md_syncid); 285 *(data + 53) = md->md_priority; 286 le32enc(data + 54, md->md_slice); 287 *(data + 58) = md->md_balance; 288 le64enc(data + 59, md->md_mediasize); 289 le32enc(data + 67, md->md_sectorsize); 290 le64enc(data + 71, md->md_sync_offset); 291 le64enc(data + 79, md->md_mflags); 292 le64enc(data + 87, md->md_dflags); 293 bcopy(md->md_provider, data + 95, 16); 294 le64enc(data + 111, md->md_provsize); 295 MD5Init(&ctx); 296 MD5Update(&ctx, data, 119); 297 MD5Final(md->md_hash, &ctx); 298 bcopy(md->md_hash, data + 119, 16); 299 } 300 static __inline int 301 mirror_metadata_decode_v0v1(const u_char *data, struct g_mirror_metadata *md) 302 { 303 MD5_CTX ctx; 304 305 bcopy(data + 20, md->md_name, 16); 306 md->md_mid = le32dec(data + 36); 307 md->md_did = le32dec(data + 40); 308 md->md_all = *(data + 44); 309 md->md_syncid = le32dec(data + 45); 310 md->md_priority = *(data + 49); 311 md->md_slice = le32dec(data + 50); 312 md->md_balance = *(data + 54); 313 md->md_mediasize = le64dec(data + 55); 314 md->md_sectorsize = le32dec(data + 63); 315 md->md_sync_offset = le64dec(data + 67); 316 md->md_mflags = le64dec(data + 75); 317 md->md_dflags = le64dec(data + 83); 318 bcopy(data + 91, md->md_provider, 16); 319 bcopy(data + 107, md->md_hash, 16); 320 MD5Init(&ctx); 321 MD5Update(&ctx, data, 107); 322 MD5Final(md->md_hash, &ctx); 323 if (bcmp(md->md_hash, data + 107, 16) != 0) 324 return (EINVAL); 325 326 /* New fields. */ 327 md->md_genid = 0; 328 md->md_provsize = 0; 329 330 return (0); 331 } 332 static __inline int 333 mirror_metadata_decode_v2(const u_char *data, struct g_mirror_metadata *md) 334 { 335 MD5_CTX ctx; 336 337 bcopy(data + 20, md->md_name, 16); 338 md->md_mid = le32dec(data + 36); 339 md->md_did = le32dec(data + 40); 340 md->md_all = *(data + 44); 341 md->md_genid = le32dec(data + 45); 342 md->md_syncid = le32dec(data + 49); 343 md->md_priority = *(data + 53); 344 md->md_slice = le32dec(data + 54); 345 md->md_balance = *(data + 58); 346 md->md_mediasize = le64dec(data + 59); 347 md->md_sectorsize = le32dec(data + 67); 348 md->md_sync_offset = le64dec(data + 71); 349 md->md_mflags = le64dec(data + 79); 350 md->md_dflags = le64dec(data + 87); 351 bcopy(data + 95, md->md_provider, 16); 352 bcopy(data + 111, md->md_hash, 16); 353 MD5Init(&ctx); 354 MD5Update(&ctx, data, 111); 355 MD5Final(md->md_hash, &ctx); 356 if (bcmp(md->md_hash, data + 111, 16) != 0) 357 return (EINVAL); 358 359 /* New fields. */ 360 md->md_provsize = 0; 361 362 return (0); 363 } 364 static __inline int 365 mirror_metadata_decode_v3v4(const u_char *data, struct g_mirror_metadata *md) 366 { 367 MD5_CTX ctx; 368 369 bcopy(data + 20, md->md_name, 16); 370 md->md_mid = le32dec(data + 36); 371 md->md_did = le32dec(data + 40); 372 md->md_all = *(data + 44); 373 md->md_genid = le32dec(data + 45); 374 md->md_syncid = le32dec(data + 49); 375 md->md_priority = *(data + 53); 376 md->md_slice = le32dec(data + 54); 377 md->md_balance = *(data + 58); 378 md->md_mediasize = le64dec(data + 59); 379 md->md_sectorsize = le32dec(data + 67); 380 md->md_sync_offset = le64dec(data + 71); 381 md->md_mflags = le64dec(data + 79); 382 md->md_dflags = le64dec(data + 87); 383 bcopy(data + 95, md->md_provider, 16); 384 md->md_provsize = le64dec(data + 111); 385 bcopy(data + 119, md->md_hash, 16); 386 MD5Init(&ctx); 387 MD5Update(&ctx, data, 119); 388 MD5Final(md->md_hash, &ctx); 389 if (bcmp(md->md_hash, data + 119, 16) != 0) 390 return (EINVAL); 391 return (0); 392 } 393 static __inline int 394 mirror_metadata_decode(const u_char *data, struct g_mirror_metadata *md) 395 { 396 int error; 397 398 bcopy(data, md->md_magic, 16); 399 md->md_version = le32dec(data + 16); 400 switch (md->md_version) { 401 case 0: 402 case 1: 403 error = mirror_metadata_decode_v0v1(data, md); 404 break; 405 case 2: 406 error = mirror_metadata_decode_v2(data, md); 407 break; 408 case 3: 409 case 4: 410 error = mirror_metadata_decode_v3v4(data, md); 411 break; 412 default: 413 error = EINVAL; 414 break; 415 } 416 return (error); 417 } 418 419 static __inline const char * 420 balance_name(u_int balance) 421 { 422 static const char *algorithms[] = { 423 [G_MIRROR_BALANCE_NONE] = "none", 424 [G_MIRROR_BALANCE_ROUND_ROBIN] = "round-robin", 425 [G_MIRROR_BALANCE_LOAD] = "load", 426 [G_MIRROR_BALANCE_SPLIT] = "split", 427 [G_MIRROR_BALANCE_PREFER] = "prefer", 428 [G_MIRROR_BALANCE_MAX + 1] = "unknown" 429 }; 430 431 if (balance > G_MIRROR_BALANCE_MAX) 432 balance = G_MIRROR_BALANCE_MAX + 1; 433 434 return (algorithms[balance]); 435 } 436 437 static __inline int 438 balance_id(const char *name) 439 { 440 static const char *algorithms[] = { 441 [G_MIRROR_BALANCE_NONE] = "none", 442 [G_MIRROR_BALANCE_ROUND_ROBIN] = "round-robin", 443 [G_MIRROR_BALANCE_LOAD] = "load", 444 [G_MIRROR_BALANCE_SPLIT] = "split", 445 [G_MIRROR_BALANCE_PREFER] = "prefer" 446 }; 447 int n; 448 449 for (n = G_MIRROR_BALANCE_MIN; n <= G_MIRROR_BALANCE_MAX; n++) { 450 if (strcmp(name, algorithms[n]) == 0) 451 return (n); 452 } 453 return (-1); 454 } 455 456 static __inline void 457 mirror_metadata_dump(const struct g_mirror_metadata *md) 458 { 459 static const char hex[] = "0123456789abcdef"; 460 char hash[16 * 2 + 1]; 461 u_int i; 462 463 printf(" magic: %s\n", md->md_magic); 464 printf(" version: %u\n", (u_int)md->md_version); 465 printf(" name: %s\n", md->md_name); 466 printf(" mid: %u\n", (u_int)md->md_mid); 467 printf(" did: %u\n", (u_int)md->md_did); 468 printf(" all: %u\n", (u_int)md->md_all); 469 printf(" genid: %u\n", (u_int)md->md_genid); 470 printf(" syncid: %u\n", (u_int)md->md_syncid); 471 printf(" priority: %u\n", (u_int)md->md_priority); 472 printf(" slice: %u\n", (u_int)md->md_slice); 473 printf(" balance: %s\n", balance_name((u_int)md->md_balance)); 474 printf(" mediasize: %jd\n", (intmax_t)md->md_mediasize); 475 printf("sectorsize: %u\n", (u_int)md->md_sectorsize); 476 printf("syncoffset: %jd\n", (intmax_t)md->md_sync_offset); 477 printf(" mflags:"); 478 if (md->md_mflags == 0) 479 printf(" NONE"); 480 else { 481 if ((md->md_mflags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 482 printf(" NOFAILSYNC"); 483 if ((md->md_mflags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0) 484 printf(" NOAUTOSYNC"); 485 } 486 printf("\n"); 487 printf(" dflags:"); 488 if (md->md_dflags == 0) 489 printf(" NONE"); 490 else { 491 if ((md->md_dflags & G_MIRROR_DISK_FLAG_DIRTY) != 0) 492 printf(" DIRTY"); 493 if ((md->md_dflags & G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) 494 printf(" SYNCHRONIZING"); 495 if ((md->md_dflags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) 496 printf(" FORCE_SYNC"); 497 if ((md->md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) 498 printf(" INACTIVE"); 499 } 500 printf("\n"); 501 printf("hcprovider: %s\n", md->md_provider); 502 printf(" provsize: %ju\n", (uintmax_t)md->md_provsize); 503 bzero(hash, sizeof(hash)); 504 for (i = 0; i < 16; i++) { 505 hash[i * 2] = hex[md->md_hash[i] >> 4]; 506 hash[i * 2 + 1] = hex[md->md_hash[i] & 0x0f]; 507 } 508 printf(" MD5 hash: %s\n", hash); 509 } 510 #endif /* !_G_MIRROR_H_ */ 511