1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #ifndef _G_MIRROR_H_ 32 #define _G_MIRROR_H_ 33 34 #include <sys/endian.h> 35 #include <sys/md5.h> 36 37 #define G_MIRROR_CLASS_NAME "MIRROR" 38 39 #define G_MIRROR_MAGIC "GEOM::MIRROR" 40 /* 41 * Version history: 42 * 0 - Initial version number. 43 * 1 - Added 'prefer' balance algorithm. 44 * 2 - Added md_genid field to metadata. 45 * 3 - Added md_provsize field to metadata. 46 * 4 - Added 'no failure synchronization' flag. 47 */ 48 #define G_MIRROR_VERSION 4 49 50 #define G_MIRROR_BALANCE_NONE 0 51 #define G_MIRROR_BALANCE_ROUND_ROBIN 1 52 #define G_MIRROR_BALANCE_LOAD 2 53 #define G_MIRROR_BALANCE_SPLIT 3 54 #define G_MIRROR_BALANCE_PREFER 4 55 #define G_MIRROR_BALANCE_MIN G_MIRROR_BALANCE_NONE 56 #define G_MIRROR_BALANCE_MAX G_MIRROR_BALANCE_PREFER 57 58 #define G_MIRROR_DISK_FLAG_DIRTY 0x0000000000000001ULL 59 #define G_MIRROR_DISK_FLAG_SYNCHRONIZING 0x0000000000000002ULL 60 #define G_MIRROR_DISK_FLAG_FORCE_SYNC 0x0000000000000004ULL 61 #define G_MIRROR_DISK_FLAG_INACTIVE 0x0000000000000008ULL 62 #define G_MIRROR_DISK_FLAG_HARDCODED 0x0000000000000010ULL 63 #define G_MIRROR_DISK_FLAG_BROKEN 0x0000000000000020ULL 64 #define G_MIRROR_DISK_FLAG_CANDELETE 0x0000000000000040ULL 65 66 /* Per-disk flags which are recorded in on-disk metadata. */ 67 #define G_MIRROR_DISK_FLAG_MASK (G_MIRROR_DISK_FLAG_DIRTY | \ 68 G_MIRROR_DISK_FLAG_SYNCHRONIZING | \ 69 G_MIRROR_DISK_FLAG_FORCE_SYNC | \ 70 G_MIRROR_DISK_FLAG_INACTIVE | \ 71 G_MIRROR_DISK_FLAG_CANDELETE) 72 73 #define G_MIRROR_DEVICE_FLAG_NOAUTOSYNC 0x0000000000000001ULL 74 #define G_MIRROR_DEVICE_FLAG_NOFAILSYNC 0x0000000000000002ULL 75 76 /* Mirror flags which are recorded in on-disk metadata. */ 77 #define G_MIRROR_DEVICE_FLAG_MASK (G_MIRROR_DEVICE_FLAG_NOAUTOSYNC | \ 78 G_MIRROR_DEVICE_FLAG_NOFAILSYNC) 79 80 #ifdef _KERNEL 81 extern int g_mirror_debug; 82 83 #define G_MIRROR_DEBUG(lvl, ...) do { \ 84 if (g_mirror_debug >= (lvl)) { \ 85 printf("GEOM_MIRROR"); \ 86 if (g_mirror_debug > 0) \ 87 printf("[%u]", lvl); \ 88 printf(": "); \ 89 printf(__VA_ARGS__); \ 90 printf("\n"); \ 91 } \ 92 } while (0) 93 #define G_MIRROR_LOGREQ(lvl, bp, ...) do { \ 94 if (g_mirror_debug >= (lvl)) { \ 95 printf("GEOM_MIRROR"); \ 96 if (g_mirror_debug > 0) \ 97 printf("[%u]", lvl); \ 98 printf(": "); \ 99 printf(__VA_ARGS__); \ 100 printf(" "); \ 101 g_print_bio(bp); \ 102 printf("\n"); \ 103 } \ 104 } while (0) 105 106 #define G_MIRROR_BIO_FLAG_REGULAR 0x01 107 #define G_MIRROR_BIO_FLAG_SYNC 0x02 108 109 /* 110 * Informations needed for synchronization. 111 */ 112 struct g_mirror_disk_sync { 113 struct g_consumer *ds_consumer; /* Consumer connected to our mirror. */ 114 off_t ds_offset; /* Offset of next request to send. */ 115 off_t ds_offset_done; /* Offset of already synchronized 116 region. */ 117 time_t ds_update_ts; /* Time of last metadata update. */ 118 u_int ds_syncid; /* Disk's synchronization ID. */ 119 u_int ds_inflight; /* Number of in-flight sync requests. */ 120 struct bio **ds_bios; /* BIOs for synchronization I/O. */ 121 }; 122 123 /* 124 * Informations needed for synchronization. 125 */ 126 struct g_mirror_device_sync { 127 struct g_geom *ds_geom; /* Synchronization geom. */ 128 u_int ds_ndisks; /* Number of disks in SYNCHRONIZING 129 state. */ 130 }; 131 132 #define G_MIRROR_DISK_STATE_NONE 0 133 #define G_MIRROR_DISK_STATE_NEW 1 134 #define G_MIRROR_DISK_STATE_ACTIVE 2 135 #define G_MIRROR_DISK_STATE_STALE 3 136 #define G_MIRROR_DISK_STATE_SYNCHRONIZING 4 137 #define G_MIRROR_DISK_STATE_DISCONNECTED 5 138 #define G_MIRROR_DISK_STATE_DESTROY 6 139 struct g_mirror_disk { 140 uint32_t d_id; /* Disk ID. */ 141 struct g_consumer *d_consumer; /* Consumer. */ 142 struct g_mirror_softc *d_softc; /* Back-pointer to softc. */ 143 int d_state; /* Disk state. */ 144 u_int d_priority; /* Disk priority. */ 145 u_int load; /* Averaged queue length */ 146 off_t d_last_offset; /* Last read offset */ 147 uint64_t d_flags; /* Additional flags. */ 148 u_int d_genid; /* Disk's generation ID. */ 149 struct g_mirror_disk_sync d_sync;/* Sync information. */ 150 LIST_ENTRY(g_mirror_disk) d_next; 151 }; 152 #define d_name d_consumer->provider->name 153 154 #define G_MIRROR_EVENT_DONTWAIT 0x1 155 #define G_MIRROR_EVENT_WAIT 0x2 156 #define G_MIRROR_EVENT_DEVICE 0x4 157 #define G_MIRROR_EVENT_DONE 0x8 158 struct g_mirror_event { 159 struct g_mirror_disk *e_disk; 160 int e_state; 161 int e_flags; 162 int e_error; 163 TAILQ_ENTRY(g_mirror_event) e_next; 164 }; 165 166 #define G_MIRROR_DEVICE_FLAG_DESTROY 0x0100000000000000ULL 167 #define G_MIRROR_DEVICE_FLAG_DRAIN 0x0200000000000000ULL 168 #define G_MIRROR_DEVICE_FLAG_CLOSEWAIT 0x0400000000000000ULL 169 #define G_MIRROR_DEVICE_FLAG_TASTING 0x0800000000000000ULL 170 #define G_MIRROR_DEVICE_FLAG_WIPE 0x1000000000000000ULL 171 172 #define G_MIRROR_DEVICE_STATE_STARTING 0 173 #define G_MIRROR_DEVICE_STATE_RUNNING 1 174 175 #define G_MIRROR_TYPE_MANUAL 0 176 #define G_MIRROR_TYPE_AUTOMATIC 1 177 178 /* Bump syncid on first write. */ 179 #define G_MIRROR_BUMP_SYNCID 0x1 180 /* Bump genid immediately. */ 181 #define G_MIRROR_BUMP_GENID 0x2 182 /* Bump syncid immediately. */ 183 #define G_MIRROR_BUMP_SYNCID_NOW 0x4 184 struct g_mirror_softc { 185 u_int sc_type; /* Device type (manual/automatic). */ 186 u_int sc_state; /* Device state. */ 187 uint32_t sc_slice; /* Slice size. */ 188 uint8_t sc_balance; /* Balance algorithm. */ 189 uint64_t sc_mediasize; /* Device size. */ 190 uint32_t sc_sectorsize; /* Sector size. */ 191 uint64_t sc_flags; /* Additional flags. */ 192 193 struct g_geom *sc_geom; 194 struct g_provider *sc_provider; 195 int sc_provider_open; 196 197 uint32_t sc_id; /* Mirror unique ID. */ 198 199 struct sx sc_lock; 200 struct bio_queue sc_queue; 201 struct mtx sc_queue_mtx; 202 struct proc *sc_worker; 203 struct bio_queue sc_inflight; /* In-flight regular write requests. */ 204 struct bio_queue sc_regular_delayed; /* Delayed I/O requests due to 205 collision with sync requests. */ 206 struct bio_queue sc_sync_delayed; /* Delayed sync requests due to 207 collision with regular requests. */ 208 209 LIST_HEAD(, g_mirror_disk) sc_disks; 210 u_int sc_ndisks; /* Number of disks. */ 211 struct g_mirror_disk *sc_hint; 212 213 u_int sc_genid; /* Generation ID. */ 214 u_int sc_syncid; /* Synchronization ID. */ 215 int sc_bump_id; 216 struct g_mirror_device_sync sc_sync; 217 int sc_idle; /* DIRTY flags removed. */ 218 time_t sc_last_write; 219 u_int sc_writes; 220 u_int sc_refcnt; /* Number of softc references */ 221 222 TAILQ_HEAD(, g_mirror_event) sc_events; 223 struct mtx sc_events_mtx; 224 225 struct callout sc_callout; 226 227 struct root_hold_token *sc_rootmount; 228 229 struct mtx sc_done_mtx; 230 }; 231 #define sc_name sc_geom->name 232 233 struct g_mirror_metadata; 234 235 u_int g_mirror_ndisks(struct g_mirror_softc *sc, int state); 236 struct g_geom * g_mirror_create(struct g_class *mp, 237 const struct g_mirror_metadata *md, u_int type); 238 #define G_MIRROR_DESTROY_SOFT 0 239 #define G_MIRROR_DESTROY_DELAYED 1 240 #define G_MIRROR_DESTROY_HARD 2 241 int g_mirror_destroy(struct g_mirror_softc *sc, int how); 242 int g_mirror_event_send(void *arg, int state, int flags); 243 struct g_mirror_metadata; 244 int g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 245 struct g_mirror_metadata *md); 246 int g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md); 247 void g_mirror_fill_metadata(struct g_mirror_softc *sc, 248 struct g_mirror_disk *disk, struct g_mirror_metadata *md); 249 void g_mirror_update_metadata(struct g_mirror_disk *disk); 250 251 g_ctl_req_t g_mirror_config; 252 #endif /* _KERNEL */ 253 254 struct g_mirror_metadata { 255 char md_magic[16]; /* Magic value. */ 256 uint32_t md_version; /* Version number. */ 257 char md_name[16]; /* Mirror name. */ 258 uint32_t md_mid; /* Mirror unique ID. */ 259 uint32_t md_did; /* Disk unique ID. */ 260 uint8_t md_all; /* Number of disks in mirror. */ 261 uint32_t md_genid; /* Generation ID. */ 262 uint32_t md_syncid; /* Synchronization ID. */ 263 uint8_t md_priority; /* Disk priority. */ 264 uint32_t md_slice; /* Slice size. */ 265 uint8_t md_balance; /* Balance type. */ 266 uint64_t md_mediasize; /* Size of the smallest 267 disk in mirror. */ 268 uint32_t md_sectorsize; /* Sector size. */ 269 uint64_t md_sync_offset; /* Synchronized offset. */ 270 uint64_t md_mflags; /* Additional mirror flags. */ 271 uint64_t md_dflags; /* Additional disk flags. */ 272 char md_provider[16]; /* Hardcoded provider. */ 273 uint64_t md_provsize; /* Provider's size. */ 274 u_char md_hash[16]; /* MD5 hash. */ 275 }; 276 static __inline void 277 mirror_metadata_encode(struct g_mirror_metadata *md, u_char *data) 278 { 279 MD5_CTX ctx; 280 281 bcopy(md->md_magic, data, 16); 282 le32enc(data + 16, md->md_version); 283 bcopy(md->md_name, data + 20, 16); 284 le32enc(data + 36, md->md_mid); 285 le32enc(data + 40, md->md_did); 286 *(data + 44) = md->md_all; 287 le32enc(data + 45, md->md_genid); 288 le32enc(data + 49, md->md_syncid); 289 *(data + 53) = md->md_priority; 290 le32enc(data + 54, md->md_slice); 291 *(data + 58) = md->md_balance; 292 le64enc(data + 59, md->md_mediasize); 293 le32enc(data + 67, md->md_sectorsize); 294 le64enc(data + 71, md->md_sync_offset); 295 le64enc(data + 79, md->md_mflags); 296 le64enc(data + 87, md->md_dflags); 297 bcopy(md->md_provider, data + 95, 16); 298 le64enc(data + 111, md->md_provsize); 299 MD5Init(&ctx); 300 MD5Update(&ctx, data, 119); 301 MD5Final(md->md_hash, &ctx); 302 bcopy(md->md_hash, data + 119, 16); 303 } 304 static __inline int 305 mirror_metadata_decode_v0v1(const u_char *data, struct g_mirror_metadata *md) 306 { 307 MD5_CTX ctx; 308 309 bcopy(data + 20, md->md_name, 16); 310 md->md_mid = le32dec(data + 36); 311 md->md_did = le32dec(data + 40); 312 md->md_all = *(data + 44); 313 md->md_syncid = le32dec(data + 45); 314 md->md_priority = *(data + 49); 315 md->md_slice = le32dec(data + 50); 316 md->md_balance = *(data + 54); 317 md->md_mediasize = le64dec(data + 55); 318 md->md_sectorsize = le32dec(data + 63); 319 md->md_sync_offset = le64dec(data + 67); 320 md->md_mflags = le64dec(data + 75); 321 md->md_dflags = le64dec(data + 83); 322 bcopy(data + 91, md->md_provider, 16); 323 bcopy(data + 107, md->md_hash, 16); 324 MD5Init(&ctx); 325 MD5Update(&ctx, data, 107); 326 MD5Final(md->md_hash, &ctx); 327 if (bcmp(md->md_hash, data + 107, 16) != 0) 328 return (EINVAL); 329 330 /* New fields. */ 331 md->md_genid = 0; 332 md->md_provsize = 0; 333 334 return (0); 335 } 336 static __inline int 337 mirror_metadata_decode_v2(const u_char *data, struct g_mirror_metadata *md) 338 { 339 MD5_CTX ctx; 340 341 bcopy(data + 20, md->md_name, 16); 342 md->md_mid = le32dec(data + 36); 343 md->md_did = le32dec(data + 40); 344 md->md_all = *(data + 44); 345 md->md_genid = le32dec(data + 45); 346 md->md_syncid = le32dec(data + 49); 347 md->md_priority = *(data + 53); 348 md->md_slice = le32dec(data + 54); 349 md->md_balance = *(data + 58); 350 md->md_mediasize = le64dec(data + 59); 351 md->md_sectorsize = le32dec(data + 67); 352 md->md_sync_offset = le64dec(data + 71); 353 md->md_mflags = le64dec(data + 79); 354 md->md_dflags = le64dec(data + 87); 355 bcopy(data + 95, md->md_provider, 16); 356 bcopy(data + 111, md->md_hash, 16); 357 MD5Init(&ctx); 358 MD5Update(&ctx, data, 111); 359 MD5Final(md->md_hash, &ctx); 360 if (bcmp(md->md_hash, data + 111, 16) != 0) 361 return (EINVAL); 362 363 /* New fields. */ 364 md->md_provsize = 0; 365 366 return (0); 367 } 368 static __inline int 369 mirror_metadata_decode_v3v4(const u_char *data, struct g_mirror_metadata *md) 370 { 371 MD5_CTX ctx; 372 373 bcopy(data + 20, md->md_name, 16); 374 md->md_mid = le32dec(data + 36); 375 md->md_did = le32dec(data + 40); 376 md->md_all = *(data + 44); 377 md->md_genid = le32dec(data + 45); 378 md->md_syncid = le32dec(data + 49); 379 md->md_priority = *(data + 53); 380 md->md_slice = le32dec(data + 54); 381 md->md_balance = *(data + 58); 382 md->md_mediasize = le64dec(data + 59); 383 md->md_sectorsize = le32dec(data + 67); 384 md->md_sync_offset = le64dec(data + 71); 385 md->md_mflags = le64dec(data + 79); 386 md->md_dflags = le64dec(data + 87); 387 bcopy(data + 95, md->md_provider, 16); 388 md->md_provsize = le64dec(data + 111); 389 bcopy(data + 119, md->md_hash, 16); 390 MD5Init(&ctx); 391 MD5Update(&ctx, data, 119); 392 MD5Final(md->md_hash, &ctx); 393 if (bcmp(md->md_hash, data + 119, 16) != 0) 394 return (EINVAL); 395 return (0); 396 } 397 static __inline int 398 mirror_metadata_decode(const u_char *data, struct g_mirror_metadata *md) 399 { 400 int error; 401 402 bcopy(data, md->md_magic, 16); 403 md->md_version = le32dec(data + 16); 404 switch (md->md_version) { 405 case 0: 406 case 1: 407 error = mirror_metadata_decode_v0v1(data, md); 408 break; 409 case 2: 410 error = mirror_metadata_decode_v2(data, md); 411 break; 412 case 3: 413 case 4: 414 error = mirror_metadata_decode_v3v4(data, md); 415 break; 416 default: 417 error = EINVAL; 418 break; 419 } 420 return (error); 421 } 422 423 static __inline const char * 424 balance_name(u_int balance) 425 { 426 static const char *algorithms[] = { 427 [G_MIRROR_BALANCE_NONE] = "none", 428 [G_MIRROR_BALANCE_ROUND_ROBIN] = "round-robin", 429 [G_MIRROR_BALANCE_LOAD] = "load", 430 [G_MIRROR_BALANCE_SPLIT] = "split", 431 [G_MIRROR_BALANCE_PREFER] = "prefer", 432 [G_MIRROR_BALANCE_MAX + 1] = "unknown" 433 }; 434 435 if (balance > G_MIRROR_BALANCE_MAX) 436 balance = G_MIRROR_BALANCE_MAX + 1; 437 438 return (algorithms[balance]); 439 } 440 441 static __inline int 442 balance_id(const char *name) 443 { 444 static const char *algorithms[] = { 445 [G_MIRROR_BALANCE_NONE] = "none", 446 [G_MIRROR_BALANCE_ROUND_ROBIN] = "round-robin", 447 [G_MIRROR_BALANCE_LOAD] = "load", 448 [G_MIRROR_BALANCE_SPLIT] = "split", 449 [G_MIRROR_BALANCE_PREFER] = "prefer" 450 }; 451 int n; 452 453 for (n = G_MIRROR_BALANCE_MIN; n <= G_MIRROR_BALANCE_MAX; n++) { 454 if (strcmp(name, algorithms[n]) == 0) 455 return (n); 456 } 457 return (-1); 458 } 459 460 static __inline void 461 mirror_metadata_dump(const struct g_mirror_metadata *md) 462 { 463 static const char hex[] = "0123456789abcdef"; 464 char hash[16 * 2 + 1]; 465 u_int i; 466 467 printf(" magic: %s\n", md->md_magic); 468 printf(" version: %u\n", (u_int)md->md_version); 469 printf(" name: %s\n", md->md_name); 470 printf(" mid: %u\n", (u_int)md->md_mid); 471 printf(" did: %u\n", (u_int)md->md_did); 472 printf(" all: %u\n", (u_int)md->md_all); 473 printf(" genid: %u\n", (u_int)md->md_genid); 474 printf(" syncid: %u\n", (u_int)md->md_syncid); 475 printf(" priority: %u\n", (u_int)md->md_priority); 476 printf(" slice: %u\n", (u_int)md->md_slice); 477 printf(" balance: %s\n", balance_name((u_int)md->md_balance)); 478 printf(" mediasize: %jd\n", (intmax_t)md->md_mediasize); 479 printf("sectorsize: %u\n", (u_int)md->md_sectorsize); 480 printf("syncoffset: %jd\n", (intmax_t)md->md_sync_offset); 481 printf(" mflags:"); 482 if (md->md_mflags == 0) 483 printf(" NONE"); 484 else { 485 if ((md->md_mflags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 486 printf(" NOFAILSYNC"); 487 if ((md->md_mflags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0) 488 printf(" NOAUTOSYNC"); 489 } 490 printf("\n"); 491 printf(" dflags:"); 492 if (md->md_dflags == 0) 493 printf(" NONE"); 494 else { 495 if ((md->md_dflags & G_MIRROR_DISK_FLAG_DIRTY) != 0) 496 printf(" DIRTY"); 497 if ((md->md_dflags & G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) 498 printf(" SYNCHRONIZING"); 499 if ((md->md_dflags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) 500 printf(" FORCE_SYNC"); 501 if ((md->md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) 502 printf(" INACTIVE"); 503 } 504 printf("\n"); 505 printf("hcprovider: %s\n", md->md_provider); 506 printf(" provsize: %ju\n", (uintmax_t)md->md_provsize); 507 bzero(hash, sizeof(hash)); 508 for (i = 0; i < 16; i++) { 509 hash[i * 2] = hex[md->md_hash[i] >> 4]; 510 hash[i * 2 + 1] = hex[md->md_hash[i] & 0x0f]; 511 } 512 printf(" MD5 hash: %s\n", hash); 513 } 514 #endif /* !_G_MIRROR_H_ */ 515