1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #ifndef _G_MIRROR_H_ 30 #define _G_MIRROR_H_ 31 32 #include <sys/endian.h> 33 #include <sys/md5.h> 34 35 #define G_MIRROR_CLASS_NAME "MIRROR" 36 37 #define G_MIRROR_MAGIC "GEOM::MIRROR" 38 /* 39 * Version history: 40 * 0 - Initial version number. 41 * 1 - Added 'prefer' balance algorithm. 42 * 2 - Added md_genid field to metadata. 43 * 3 - Added md_provsize field to metadata. 44 * 4 - Added 'no failure synchronization' flag. 45 */ 46 #define G_MIRROR_VERSION 4 47 48 #define G_MIRROR_BALANCE_NONE 0 49 #define G_MIRROR_BALANCE_ROUND_ROBIN 1 50 #define G_MIRROR_BALANCE_LOAD 2 51 #define G_MIRROR_BALANCE_SPLIT 3 52 #define G_MIRROR_BALANCE_PREFER 4 53 #define G_MIRROR_BALANCE_MIN G_MIRROR_BALANCE_NONE 54 #define G_MIRROR_BALANCE_MAX G_MIRROR_BALANCE_PREFER 55 56 #define G_MIRROR_DISK_FLAG_DIRTY 0x0000000000000001ULL 57 #define G_MIRROR_DISK_FLAG_SYNCHRONIZING 0x0000000000000002ULL 58 #define G_MIRROR_DISK_FLAG_FORCE_SYNC 0x0000000000000004ULL 59 #define G_MIRROR_DISK_FLAG_INACTIVE 0x0000000000000008ULL 60 #define G_MIRROR_DISK_FLAG_HARDCODED 0x0000000000000010ULL 61 #define G_MIRROR_DISK_FLAG_BROKEN 0x0000000000000020ULL 62 #define G_MIRROR_DISK_FLAG_CANDELETE 0x0000000000000040ULL 63 64 /* Per-disk flags which are recorded in on-disk metadata. */ 65 #define G_MIRROR_DISK_FLAG_MASK (G_MIRROR_DISK_FLAG_DIRTY | \ 66 G_MIRROR_DISK_FLAG_SYNCHRONIZING | \ 67 G_MIRROR_DISK_FLAG_FORCE_SYNC | \ 68 G_MIRROR_DISK_FLAG_INACTIVE | \ 69 G_MIRROR_DISK_FLAG_CANDELETE) 70 71 #define G_MIRROR_DEVICE_FLAG_NOAUTOSYNC 0x0000000000000001ULL 72 #define G_MIRROR_DEVICE_FLAG_NOFAILSYNC 0x0000000000000002ULL 73 74 /* Mirror flags which are recorded in on-disk metadata. */ 75 #define G_MIRROR_DEVICE_FLAG_MASK (G_MIRROR_DEVICE_FLAG_NOAUTOSYNC | \ 76 G_MIRROR_DEVICE_FLAG_NOFAILSYNC) 77 78 #ifdef _KERNEL 79 #define G_MIRROR_DEVICE_FLAG_DESTROY 0x0100000000000000ULL 80 #define G_MIRROR_DEVICE_FLAG_DRAIN 0x0200000000000000ULL 81 #define G_MIRROR_DEVICE_FLAG_CLOSEWAIT 0x0400000000000000ULL 82 #define G_MIRROR_DEVICE_FLAG_TASTING 0x0800000000000000ULL 83 #define G_MIRROR_DEVICE_FLAG_WIPE 0x1000000000000000ULL 84 85 extern int g_mirror_debug; 86 87 #define G_MIRROR_DEBUG(lvl, ...) \ 88 _GEOM_DEBUG("GEOM_MIRROR", g_mirror_debug, (lvl), NULL, __VA_ARGS__) 89 #define G_MIRROR_LOGREQ(lvl, bp, ...) \ 90 _GEOM_DEBUG("GEOM_MIRROR", g_mirror_debug, (lvl), (bp), __VA_ARGS__) 91 92 #define G_MIRROR_BIO_FLAG_REGULAR 0x01 93 #define G_MIRROR_BIO_FLAG_SYNC 0x02 94 95 /* 96 * Informations needed for synchronization. 97 */ 98 struct g_mirror_disk_sync { 99 struct g_consumer *ds_consumer; /* Consumer connected to our mirror. */ 100 off_t ds_offset; /* Offset of next request to send. */ 101 off_t ds_offset_done; /* Offset of already synchronized 102 region. */ 103 time_t ds_update_ts; /* Time of last metadata update. */ 104 u_int ds_syncid; /* Disk's synchronization ID. */ 105 u_int ds_inflight; /* Number of in-flight sync requests. */ 106 struct bio **ds_bios; /* BIOs for synchronization I/O. */ 107 }; 108 109 /* 110 * Informations needed for synchronization. 111 */ 112 struct g_mirror_device_sync { 113 struct g_geom *ds_geom; /* Synchronization geom. */ 114 u_int ds_ndisks; /* Number of disks in SYNCHRONIZING 115 state. */ 116 }; 117 118 #define G_MIRROR_DISK_STATE_NONE 0 119 #define G_MIRROR_DISK_STATE_NEW 1 120 #define G_MIRROR_DISK_STATE_ACTIVE 2 121 #define G_MIRROR_DISK_STATE_STALE 3 122 #define G_MIRROR_DISK_STATE_SYNCHRONIZING 4 123 #define G_MIRROR_DISK_STATE_DISCONNECTED 5 124 #define G_MIRROR_DISK_STATE_DESTROY 6 125 struct g_mirror_disk { 126 uint32_t d_id; /* Disk ID. */ 127 struct g_consumer *d_consumer; /* Consumer. */ 128 struct g_mirror_softc *d_softc; /* Back-pointer to softc. */ 129 int d_state; /* Disk state. */ 130 u_int d_priority; /* Disk priority. */ 131 u_int load; /* Averaged queue length */ 132 off_t d_last_offset; /* Last read offset */ 133 uint64_t d_flags; /* Additional flags. */ 134 u_int d_genid; /* Disk's generation ID. */ 135 struct g_mirror_disk_sync d_sync;/* Sync information. */ 136 LIST_ENTRY(g_mirror_disk) d_next; 137 u_int d_init_ndisks; /* Initial number of mirror components */ 138 uint32_t d_init_slice; /* Initial slice size */ 139 uint8_t d_init_balance;/* Initial balance */ 140 uint16_t d_rotation_rate;/* Disk's rotation rate */ 141 uint64_t d_init_mediasize;/* Initial mediasize */ 142 }; 143 #define d_name d_consumer->provider->name 144 145 #define G_MIRROR_EVENT_DONTWAIT 0x1 146 #define G_MIRROR_EVENT_WAIT 0x2 147 #define G_MIRROR_EVENT_DEVICE 0x4 148 #define G_MIRROR_EVENT_DONE 0x8 149 struct g_mirror_event { 150 struct g_mirror_disk *e_disk; 151 int e_state; 152 int e_flags; 153 int e_error; 154 TAILQ_ENTRY(g_mirror_event) e_next; 155 }; 156 157 #define G_MIRROR_DEVICE_STATE_STARTING 0 158 #define G_MIRROR_DEVICE_STATE_RUNNING 1 159 160 #define G_MIRROR_TYPE_MANUAL 0 161 #define G_MIRROR_TYPE_AUTOMATIC 1 162 163 /* Bump syncid on first write. */ 164 #define G_MIRROR_BUMP_SYNCID 0x1 165 /* Bump genid immediately. */ 166 #define G_MIRROR_BUMP_GENID 0x2 167 /* Bump syncid immediately. */ 168 #define G_MIRROR_BUMP_SYNCID_NOW 0x4 169 struct g_mirror_softc { 170 u_int sc_type; /* Device type (manual/automatic). */ 171 u_int sc_state; /* Device state. */ 172 uint32_t sc_slice; /* Slice size. */ 173 uint8_t sc_balance; /* Balance algorithm. */ 174 uint64_t sc_mediasize; /* Device size. */ 175 uint32_t sc_sectorsize; /* Sector size. */ 176 uint64_t sc_flags; /* Additional flags. */ 177 178 struct g_geom *sc_geom; 179 struct g_provider *sc_provider; 180 int sc_provider_open; 181 182 uint32_t sc_id; /* Mirror unique ID. */ 183 184 struct sx sc_lock; 185 struct bio_queue sc_queue; 186 struct mtx sc_queue_mtx; 187 struct proc *sc_worker; 188 struct bio_queue sc_inflight; /* In-flight regular write requests. */ 189 struct bio_queue sc_regular_delayed; /* Delayed I/O requests due to 190 collision with sync requests. */ 191 struct bio_queue sc_sync_delayed; /* Delayed sync requests due to 192 collision with regular requests. */ 193 194 LIST_HEAD(, g_mirror_disk) sc_disks; 195 u_int sc_ndisks; /* Number of disks. */ 196 struct g_mirror_disk *sc_hint; 197 198 u_int sc_genid; /* Generation ID. */ 199 u_int sc_syncid; /* Synchronization ID. */ 200 int sc_bump_id; 201 struct g_mirror_device_sync sc_sync; 202 int sc_idle; /* DIRTY flags removed. */ 203 time_t sc_last_write; 204 u_int sc_writes; 205 u_int sc_refcnt; /* Number of softc references */ 206 207 TAILQ_HEAD(, g_mirror_event) sc_events; 208 struct mtx sc_events_mtx; 209 struct g_mirror_event *sc_timeout_event; 210 211 struct callout sc_callout; 212 213 struct root_hold_token *sc_rootmount; 214 215 struct mtx sc_done_mtx; 216 }; 217 #define sc_name sc_geom->name 218 219 struct g_mirror_metadata; 220 221 u_int g_mirror_ndisks(struct g_mirror_softc *sc, int state); 222 struct g_geom * g_mirror_create(struct g_class *mp, 223 const struct g_mirror_metadata *md, u_int type); 224 #define G_MIRROR_DESTROY_SOFT 0 225 #define G_MIRROR_DESTROY_DELAYED 1 226 #define G_MIRROR_DESTROY_HARD 2 227 int g_mirror_destroy(struct g_mirror_softc *sc, int how); 228 int g_mirror_event_send(void *arg, int state, int flags); 229 struct g_mirror_metadata; 230 int g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 231 struct g_mirror_metadata *md); 232 int g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md); 233 void g_mirror_fill_metadata(struct g_mirror_softc *sc, 234 struct g_mirror_disk *disk, struct g_mirror_metadata *md); 235 void g_mirror_update_metadata(struct g_mirror_disk *disk); 236 237 g_ctl_req_t g_mirror_config; 238 #endif /* _KERNEL */ 239 240 struct g_mirror_metadata { 241 char md_magic[16]; /* Magic value. */ 242 uint32_t md_version; /* Version number. */ 243 char md_name[16]; /* Mirror name. */ 244 uint32_t md_mid; /* Mirror unique ID. */ 245 uint32_t md_did; /* Disk unique ID. */ 246 uint8_t md_all; /* Number of disks in mirror. */ 247 uint32_t md_genid; /* Generation ID. */ 248 uint32_t md_syncid; /* Synchronization ID. */ 249 uint8_t md_priority; /* Disk priority. */ 250 uint32_t md_slice; /* Slice size. */ 251 uint8_t md_balance; /* Balance type. */ 252 uint64_t md_mediasize; /* Size of the smallest 253 disk in mirror. */ 254 uint32_t md_sectorsize; /* Sector size. */ 255 uint64_t md_sync_offset; /* Synchronized offset. */ 256 uint64_t md_mflags; /* Additional mirror flags. */ 257 uint64_t md_dflags; /* Additional disk flags. */ 258 char md_provider[16]; /* Hardcoded provider. */ 259 uint64_t md_provsize; /* Provider's size. */ 260 u_char md_hash[16]; /* MD5 hash. */ 261 }; 262 static __inline void 263 mirror_metadata_encode(struct g_mirror_metadata *md, u_char *data) 264 { 265 MD5_CTX ctx; 266 267 bcopy(md->md_magic, data, 16); 268 le32enc(data + 16, md->md_version); 269 bcopy(md->md_name, data + 20, 16); 270 le32enc(data + 36, md->md_mid); 271 le32enc(data + 40, md->md_did); 272 *(data + 44) = md->md_all; 273 le32enc(data + 45, md->md_genid); 274 le32enc(data + 49, md->md_syncid); 275 *(data + 53) = md->md_priority; 276 le32enc(data + 54, md->md_slice); 277 *(data + 58) = md->md_balance; 278 le64enc(data + 59, md->md_mediasize); 279 le32enc(data + 67, md->md_sectorsize); 280 le64enc(data + 71, md->md_sync_offset); 281 le64enc(data + 79, md->md_mflags); 282 le64enc(data + 87, md->md_dflags); 283 bcopy(md->md_provider, data + 95, 16); 284 le64enc(data + 111, md->md_provsize); 285 MD5Init(&ctx); 286 MD5Update(&ctx, data, 119); 287 MD5Final(md->md_hash, &ctx); 288 bcopy(md->md_hash, data + 119, 16); 289 } 290 static __inline int 291 mirror_metadata_decode_v0v1(const u_char *data, struct g_mirror_metadata *md) 292 { 293 MD5_CTX ctx; 294 295 bcopy(data + 20, md->md_name, 16); 296 md->md_mid = le32dec(data + 36); 297 md->md_did = le32dec(data + 40); 298 md->md_all = *(data + 44); 299 md->md_syncid = le32dec(data + 45); 300 md->md_priority = *(data + 49); 301 md->md_slice = le32dec(data + 50); 302 md->md_balance = *(data + 54); 303 md->md_mediasize = le64dec(data + 55); 304 md->md_sectorsize = le32dec(data + 63); 305 md->md_sync_offset = le64dec(data + 67); 306 md->md_mflags = le64dec(data + 75); 307 md->md_dflags = le64dec(data + 83); 308 bcopy(data + 91, md->md_provider, 16); 309 bcopy(data + 107, md->md_hash, 16); 310 MD5Init(&ctx); 311 MD5Update(&ctx, data, 107); 312 MD5Final(md->md_hash, &ctx); 313 if (bcmp(md->md_hash, data + 107, 16) != 0) 314 return (EINVAL); 315 316 /* New fields. */ 317 md->md_genid = 0; 318 md->md_provsize = 0; 319 320 return (0); 321 } 322 static __inline int 323 mirror_metadata_decode_v2(const u_char *data, struct g_mirror_metadata *md) 324 { 325 MD5_CTX ctx; 326 327 bcopy(data + 20, md->md_name, 16); 328 md->md_mid = le32dec(data + 36); 329 md->md_did = le32dec(data + 40); 330 md->md_all = *(data + 44); 331 md->md_genid = le32dec(data + 45); 332 md->md_syncid = le32dec(data + 49); 333 md->md_priority = *(data + 53); 334 md->md_slice = le32dec(data + 54); 335 md->md_balance = *(data + 58); 336 md->md_mediasize = le64dec(data + 59); 337 md->md_sectorsize = le32dec(data + 67); 338 md->md_sync_offset = le64dec(data + 71); 339 md->md_mflags = le64dec(data + 79); 340 md->md_dflags = le64dec(data + 87); 341 bcopy(data + 95, md->md_provider, 16); 342 bcopy(data + 111, md->md_hash, 16); 343 MD5Init(&ctx); 344 MD5Update(&ctx, data, 111); 345 MD5Final(md->md_hash, &ctx); 346 if (bcmp(md->md_hash, data + 111, 16) != 0) 347 return (EINVAL); 348 349 /* New fields. */ 350 md->md_provsize = 0; 351 352 return (0); 353 } 354 static __inline int 355 mirror_metadata_decode_v3v4(const u_char *data, struct g_mirror_metadata *md) 356 { 357 MD5_CTX ctx; 358 359 bcopy(data + 20, md->md_name, 16); 360 md->md_mid = le32dec(data + 36); 361 md->md_did = le32dec(data + 40); 362 md->md_all = *(data + 44); 363 md->md_genid = le32dec(data + 45); 364 md->md_syncid = le32dec(data + 49); 365 md->md_priority = *(data + 53); 366 md->md_slice = le32dec(data + 54); 367 md->md_balance = *(data + 58); 368 md->md_mediasize = le64dec(data + 59); 369 md->md_sectorsize = le32dec(data + 67); 370 md->md_sync_offset = le64dec(data + 71); 371 md->md_mflags = le64dec(data + 79); 372 md->md_dflags = le64dec(data + 87); 373 bcopy(data + 95, md->md_provider, 16); 374 md->md_provsize = le64dec(data + 111); 375 bcopy(data + 119, md->md_hash, 16); 376 MD5Init(&ctx); 377 MD5Update(&ctx, data, 119); 378 MD5Final(md->md_hash, &ctx); 379 if (bcmp(md->md_hash, data + 119, 16) != 0) 380 return (EINVAL); 381 return (0); 382 } 383 static __inline int 384 mirror_metadata_decode(const u_char *data, struct g_mirror_metadata *md) 385 { 386 int error; 387 388 bcopy(data, md->md_magic, 16); 389 md->md_version = le32dec(data + 16); 390 switch (md->md_version) { 391 case 0: 392 case 1: 393 error = mirror_metadata_decode_v0v1(data, md); 394 break; 395 case 2: 396 error = mirror_metadata_decode_v2(data, md); 397 break; 398 case 3: 399 case 4: 400 error = mirror_metadata_decode_v3v4(data, md); 401 break; 402 default: 403 error = EINVAL; 404 break; 405 } 406 return (error); 407 } 408 409 static __inline const char * 410 balance_name(u_int balance) 411 { 412 static const char *algorithms[] = { 413 [G_MIRROR_BALANCE_NONE] = "none", 414 [G_MIRROR_BALANCE_ROUND_ROBIN] = "round-robin", 415 [G_MIRROR_BALANCE_LOAD] = "load", 416 [G_MIRROR_BALANCE_SPLIT] = "split", 417 [G_MIRROR_BALANCE_PREFER] = "prefer", 418 [G_MIRROR_BALANCE_MAX + 1] = "unknown" 419 }; 420 421 if (balance > G_MIRROR_BALANCE_MAX) 422 balance = G_MIRROR_BALANCE_MAX + 1; 423 424 return (algorithms[balance]); 425 } 426 427 static __inline int 428 balance_id(const char *name) 429 { 430 static const char *algorithms[] = { 431 [G_MIRROR_BALANCE_NONE] = "none", 432 [G_MIRROR_BALANCE_ROUND_ROBIN] = "round-robin", 433 [G_MIRROR_BALANCE_LOAD] = "load", 434 [G_MIRROR_BALANCE_SPLIT] = "split", 435 [G_MIRROR_BALANCE_PREFER] = "prefer" 436 }; 437 int n; 438 439 for (n = G_MIRROR_BALANCE_MIN; n <= G_MIRROR_BALANCE_MAX; n++) { 440 if (strcmp(name, algorithms[n]) == 0) 441 return (n); 442 } 443 return (-1); 444 } 445 446 static __inline void 447 mirror_metadata_dump(const struct g_mirror_metadata *md) 448 { 449 static const char hex[] = "0123456789abcdef"; 450 char hash[16 * 2 + 1]; 451 u_int i; 452 453 printf(" magic: %s\n", md->md_magic); 454 printf(" version: %u\n", (u_int)md->md_version); 455 printf(" name: %s\n", md->md_name); 456 printf(" mid: %u\n", (u_int)md->md_mid); 457 printf(" did: %u\n", (u_int)md->md_did); 458 printf(" all: %u\n", (u_int)md->md_all); 459 printf(" genid: %u\n", (u_int)md->md_genid); 460 printf(" syncid: %u\n", (u_int)md->md_syncid); 461 printf(" priority: %u\n", (u_int)md->md_priority); 462 printf(" slice: %u\n", (u_int)md->md_slice); 463 printf(" balance: %s\n", balance_name((u_int)md->md_balance)); 464 printf(" mediasize: %jd\n", (intmax_t)md->md_mediasize); 465 printf("sectorsize: %u\n", (u_int)md->md_sectorsize); 466 printf("syncoffset: %jd\n", (intmax_t)md->md_sync_offset); 467 printf(" mflags:"); 468 if (md->md_mflags == 0) 469 printf(" NONE"); 470 else { 471 if ((md->md_mflags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 472 printf(" NOFAILSYNC"); 473 if ((md->md_mflags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0) 474 printf(" NOAUTOSYNC"); 475 } 476 printf("\n"); 477 printf(" dflags:"); 478 if (md->md_dflags == 0) 479 printf(" NONE"); 480 else { 481 if ((md->md_dflags & G_MIRROR_DISK_FLAG_DIRTY) != 0) 482 printf(" DIRTY"); 483 if ((md->md_dflags & G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) 484 printf(" SYNCHRONIZING"); 485 if ((md->md_dflags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) 486 printf(" FORCE_SYNC"); 487 if ((md->md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) 488 printf(" INACTIVE"); 489 } 490 printf("\n"); 491 printf("hcprovider: %s\n", md->md_provider); 492 printf(" provsize: %ju\n", (uintmax_t)md->md_provsize); 493 bzero(hash, sizeof(hash)); 494 for (i = 0; i < 16; i++) { 495 hash[i * 2] = hex[md->md_hash[i] >> 4]; 496 hash[i * 2 + 1] = hex[md->md_hash[i] & 0x0f]; 497 } 498 printf(" MD5 hash: %s\n", hash); 499 } 500 #endif /* !_G_MIRROR_H_ */ 501