1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bio.h> 35 #include <sys/eventhandler.h> 36 #include <sys/fail.h> 37 #include <sys/kernel.h> 38 #include <sys/kthread.h> 39 #include <sys/limits.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mutex.h> 43 #include <sys/proc.h> 44 #include <sys/sbuf.h> 45 #include <sys/sched.h> 46 #include <sys/sx.h> 47 #include <sys/sysctl.h> 48 49 #include <geom/geom.h> 50 #include <geom/mirror/g_mirror.h> 51 52 FEATURE(geom_mirror, "GEOM mirroring support"); 53 54 static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data"); 55 56 SYSCTL_DECL(_kern_geom); 57 static SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, 58 "GEOM_MIRROR stuff"); 59 int g_mirror_debug = 0; 60 SYSCTL_INT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RWTUN, &g_mirror_debug, 0, 61 "Debug level"); 62 bool g_launch_mirror_before_timeout = true; 63 SYSCTL_BOOL(_kern_geom_mirror, OID_AUTO, launch_mirror_before_timeout, 64 CTLFLAG_RWTUN, &g_launch_mirror_before_timeout, 0, 65 "If false, force gmirror to wait out the full kern.geom.mirror.timeout " 66 "before launching mirrors"); 67 static u_int g_mirror_timeout = 4; 68 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_mirror_timeout, 69 0, "Time to wait on all mirror components"); 70 static u_int g_mirror_idletime = 5; 71 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RWTUN, 72 &g_mirror_idletime, 0, "Mark components as clean when idling"); 73 static u_int g_mirror_disconnect_on_failure = 1; 74 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN, 75 &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 76 static u_int g_mirror_syncreqs = 2; 77 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 78 &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests."); 79 static u_int g_mirror_sync_period = 5; 80 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_update_period, CTLFLAG_RWTUN, 81 &g_mirror_sync_period, 0, 82 "Metadata update period during synchronization, in seconds"); 83 84 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 85 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 86 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 87 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 88 } while (0) 89 90 static eventhandler_tag g_mirror_post_sync = NULL; 91 static int g_mirror_shutdown = 0; 92 93 static g_ctl_destroy_geom_t g_mirror_destroy_geom; 94 static g_taste_t g_mirror_taste; 95 static g_init_t g_mirror_init; 96 static g_fini_t g_mirror_fini; 97 static g_provgone_t g_mirror_providergone; 98 static g_resize_t g_mirror_resize; 99 100 struct g_class g_mirror_class = { 101 .name = G_MIRROR_CLASS_NAME, 102 .version = G_VERSION, 103 .ctlreq = g_mirror_config, 104 .taste = g_mirror_taste, 105 .destroy_geom = g_mirror_destroy_geom, 106 .init = g_mirror_init, 107 .fini = g_mirror_fini, 108 .providergone = g_mirror_providergone, 109 .resize = g_mirror_resize 110 }; 111 112 113 static void g_mirror_destroy_provider(struct g_mirror_softc *sc); 114 static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state); 115 static void g_mirror_update_device(struct g_mirror_softc *sc, bool force); 116 static void g_mirror_dumpconf(struct sbuf *sb, const char *indent, 117 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 118 static int g_mirror_refresh_device(struct g_mirror_softc *sc, 119 const struct g_provider *pp, const struct g_mirror_metadata *md); 120 static void g_mirror_sync_reinit(const struct g_mirror_disk *disk, 121 struct bio *bp, off_t offset); 122 static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type); 123 static void g_mirror_register_request(struct g_mirror_softc *sc, 124 struct bio *bp); 125 static void g_mirror_sync_release(struct g_mirror_softc *sc); 126 127 128 static const char * 129 g_mirror_disk_state2str(int state) 130 { 131 132 switch (state) { 133 case G_MIRROR_DISK_STATE_NONE: 134 return ("NONE"); 135 case G_MIRROR_DISK_STATE_NEW: 136 return ("NEW"); 137 case G_MIRROR_DISK_STATE_ACTIVE: 138 return ("ACTIVE"); 139 case G_MIRROR_DISK_STATE_STALE: 140 return ("STALE"); 141 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 142 return ("SYNCHRONIZING"); 143 case G_MIRROR_DISK_STATE_DISCONNECTED: 144 return ("DISCONNECTED"); 145 case G_MIRROR_DISK_STATE_DESTROY: 146 return ("DESTROY"); 147 default: 148 return ("INVALID"); 149 } 150 } 151 152 static const char * 153 g_mirror_device_state2str(int state) 154 { 155 156 switch (state) { 157 case G_MIRROR_DEVICE_STATE_STARTING: 158 return ("STARTING"); 159 case G_MIRROR_DEVICE_STATE_RUNNING: 160 return ("RUNNING"); 161 default: 162 return ("INVALID"); 163 } 164 } 165 166 static const char * 167 g_mirror_get_diskname(struct g_mirror_disk *disk) 168 { 169 170 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 171 return ("[unknown]"); 172 return (disk->d_name); 173 } 174 175 /* 176 * --- Events handling functions --- 177 * Events in geom_mirror are used to maintain disks and device status 178 * from one thread to simplify locking. 179 */ 180 static void 181 g_mirror_event_free(struct g_mirror_event *ep) 182 { 183 184 free(ep, M_MIRROR); 185 } 186 187 int 188 g_mirror_event_send(void *arg, int state, int flags) 189 { 190 struct g_mirror_softc *sc; 191 struct g_mirror_disk *disk; 192 struct g_mirror_event *ep; 193 int error; 194 195 ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK); 196 G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep); 197 if ((flags & G_MIRROR_EVENT_DEVICE) != 0) { 198 disk = NULL; 199 sc = arg; 200 } else { 201 disk = arg; 202 sc = disk->d_softc; 203 } 204 ep->e_disk = disk; 205 ep->e_state = state; 206 ep->e_flags = flags; 207 ep->e_error = 0; 208 mtx_lock(&sc->sc_events_mtx); 209 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 210 mtx_unlock(&sc->sc_events_mtx); 211 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 212 mtx_lock(&sc->sc_queue_mtx); 213 wakeup(sc); 214 mtx_unlock(&sc->sc_queue_mtx); 215 if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0) 216 return (0); 217 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 218 sx_xunlock(&sc->sc_lock); 219 while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) { 220 mtx_lock(&sc->sc_events_mtx); 221 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event", 222 hz * 5); 223 } 224 error = ep->e_error; 225 g_mirror_event_free(ep); 226 sx_xlock(&sc->sc_lock); 227 return (error); 228 } 229 230 static struct g_mirror_event * 231 g_mirror_event_first(struct g_mirror_softc *sc) 232 { 233 struct g_mirror_event *ep; 234 235 mtx_lock(&sc->sc_events_mtx); 236 ep = TAILQ_FIRST(&sc->sc_events); 237 mtx_unlock(&sc->sc_events_mtx); 238 return (ep); 239 } 240 241 static void 242 g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep) 243 { 244 245 mtx_lock(&sc->sc_events_mtx); 246 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 247 mtx_unlock(&sc->sc_events_mtx); 248 } 249 250 static void 251 g_mirror_event_cancel(struct g_mirror_disk *disk) 252 { 253 struct g_mirror_softc *sc; 254 struct g_mirror_event *ep, *tmpep; 255 256 sc = disk->d_softc; 257 sx_assert(&sc->sc_lock, SX_XLOCKED); 258 259 mtx_lock(&sc->sc_events_mtx); 260 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 261 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) 262 continue; 263 if (ep->e_disk != disk) 264 continue; 265 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 266 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 267 g_mirror_event_free(ep); 268 else { 269 ep->e_error = ECANCELED; 270 wakeup(ep); 271 } 272 } 273 mtx_unlock(&sc->sc_events_mtx); 274 } 275 276 /* 277 * Return the number of disks in given state. 278 * If state is equal to -1, count all connected disks. 279 */ 280 u_int 281 g_mirror_ndisks(struct g_mirror_softc *sc, int state) 282 { 283 struct g_mirror_disk *disk; 284 u_int n = 0; 285 286 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 287 if (state == -1 || disk->d_state == state) 288 n++; 289 } 290 return (n); 291 } 292 293 /* 294 * Find a disk in mirror by its disk ID. 295 */ 296 static struct g_mirror_disk * 297 g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id) 298 { 299 struct g_mirror_disk *disk; 300 301 sx_assert(&sc->sc_lock, SX_XLOCKED); 302 303 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 304 if (disk->d_id == id) 305 return (disk); 306 } 307 return (NULL); 308 } 309 310 static u_int 311 g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp) 312 { 313 struct bio *bp; 314 u_int nreqs = 0; 315 316 mtx_lock(&sc->sc_queue_mtx); 317 TAILQ_FOREACH(bp, &sc->sc_queue, bio_queue) { 318 if (bp->bio_from == cp) 319 nreqs++; 320 } 321 mtx_unlock(&sc->sc_queue_mtx); 322 return (nreqs); 323 } 324 325 static int 326 g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp) 327 { 328 329 if (cp->index > 0) { 330 G_MIRROR_DEBUG(2, 331 "I/O requests for %s exist, can't destroy it now.", 332 cp->provider->name); 333 return (1); 334 } 335 if (g_mirror_nrequests(sc, cp) > 0) { 336 G_MIRROR_DEBUG(2, 337 "I/O requests for %s in queue, can't destroy it now.", 338 cp->provider->name); 339 return (1); 340 } 341 return (0); 342 } 343 344 static void 345 g_mirror_destroy_consumer(void *arg, int flags __unused) 346 { 347 struct g_consumer *cp; 348 349 g_topology_assert(); 350 351 cp = arg; 352 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 353 g_detach(cp); 354 g_destroy_consumer(cp); 355 } 356 357 static void 358 g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 359 { 360 struct g_provider *pp; 361 int retaste_wait; 362 363 g_topology_assert(); 364 365 cp->private = NULL; 366 if (g_mirror_is_busy(sc, cp)) 367 return; 368 pp = cp->provider; 369 retaste_wait = 0; 370 if (cp->acw == 1) { 371 if ((pp->geom->flags & G_GEOM_WITHER) == 0) 372 retaste_wait = 1; 373 } 374 G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 375 -cp->acw, -cp->ace, 0); 376 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 377 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 378 if (retaste_wait) { 379 /* 380 * After retaste event was send (inside g_access()), we can send 381 * event to detach and destroy consumer. 382 * A class, which has consumer to the given provider connected 383 * will not receive retaste event for the provider. 384 * This is the way how I ignore retaste events when I close 385 * consumers opened for write: I detach and destroy consumer 386 * after retaste event is sent. 387 */ 388 g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL); 389 return; 390 } 391 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name); 392 g_detach(cp); 393 g_destroy_consumer(cp); 394 } 395 396 static int 397 g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp) 398 { 399 struct g_consumer *cp; 400 int error; 401 402 g_topology_assert_not(); 403 KASSERT(disk->d_consumer == NULL, 404 ("Disk already connected (device %s).", disk->d_softc->sc_name)); 405 406 g_topology_lock(); 407 cp = g_new_consumer(disk->d_softc->sc_geom); 408 cp->flags |= G_CF_DIRECT_RECEIVE; 409 error = g_attach(cp, pp); 410 if (error != 0) { 411 g_destroy_consumer(cp); 412 g_topology_unlock(); 413 return (error); 414 } 415 error = g_access(cp, 1, 1, 1); 416 if (error != 0) { 417 g_detach(cp); 418 g_destroy_consumer(cp); 419 g_topology_unlock(); 420 G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).", 421 pp->name, error); 422 return (error); 423 } 424 g_topology_unlock(); 425 disk->d_consumer = cp; 426 disk->d_consumer->private = disk; 427 disk->d_consumer->index = 0; 428 429 G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk)); 430 return (0); 431 } 432 433 static void 434 g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 435 { 436 437 g_topology_assert(); 438 439 if (cp == NULL) 440 return; 441 if (cp->provider != NULL) 442 g_mirror_kill_consumer(sc, cp); 443 else 444 g_destroy_consumer(cp); 445 } 446 447 /* 448 * Initialize disk. This means allocate memory, create consumer, attach it 449 * to the provider and open access (r1w1e1) to it. 450 */ 451 static struct g_mirror_disk * 452 g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp, 453 struct g_mirror_metadata *md, int *errorp) 454 { 455 struct g_mirror_disk *disk; 456 int i, error; 457 458 disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO); 459 if (disk == NULL) { 460 error = ENOMEM; 461 goto fail; 462 } 463 disk->d_softc = sc; 464 error = g_mirror_connect_disk(disk, pp); 465 if (error != 0) 466 goto fail; 467 disk->d_id = md->md_did; 468 disk->d_state = G_MIRROR_DISK_STATE_NONE; 469 disk->d_priority = md->md_priority; 470 disk->d_flags = md->md_dflags; 471 error = g_getattr("GEOM::candelete", disk->d_consumer, &i); 472 if (error == 0 && i != 0) 473 disk->d_flags |= G_MIRROR_DISK_FLAG_CANDELETE; 474 if (md->md_provider[0] != '\0') 475 disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED; 476 disk->d_sync.ds_consumer = NULL; 477 disk->d_sync.ds_offset = md->md_sync_offset; 478 disk->d_sync.ds_offset_done = md->md_sync_offset; 479 disk->d_sync.ds_update_ts = time_uptime; 480 disk->d_genid = md->md_genid; 481 disk->d_sync.ds_syncid = md->md_syncid; 482 disk->d_init_ndisks = md->md_all; 483 disk->d_init_slice = md->md_slice; 484 disk->d_init_balance = md->md_balance; 485 disk->d_init_mediasize = md->md_mediasize; 486 if (errorp != NULL) 487 *errorp = 0; 488 return (disk); 489 fail: 490 if (errorp != NULL) 491 *errorp = error; 492 if (disk != NULL) 493 free(disk, M_MIRROR); 494 return (NULL); 495 } 496 497 static void 498 g_mirror_destroy_disk(struct g_mirror_disk *disk) 499 { 500 struct g_mirror_softc *sc; 501 502 g_topology_assert_not(); 503 sc = disk->d_softc; 504 sx_assert(&sc->sc_lock, SX_XLOCKED); 505 506 g_topology_lock(); 507 LIST_REMOVE(disk, d_next); 508 g_topology_unlock(); 509 g_mirror_event_cancel(disk); 510 if (sc->sc_hint == disk) 511 sc->sc_hint = NULL; 512 switch (disk->d_state) { 513 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 514 g_mirror_sync_stop(disk, 1); 515 /* FALLTHROUGH */ 516 case G_MIRROR_DISK_STATE_NEW: 517 case G_MIRROR_DISK_STATE_STALE: 518 case G_MIRROR_DISK_STATE_ACTIVE: 519 g_topology_lock(); 520 g_mirror_disconnect_consumer(sc, disk->d_consumer); 521 g_topology_unlock(); 522 free(disk, M_MIRROR); 523 break; 524 default: 525 KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 526 g_mirror_get_diskname(disk), 527 g_mirror_disk_state2str(disk->d_state))); 528 } 529 } 530 531 static void 532 g_mirror_free_device(struct g_mirror_softc *sc) 533 { 534 535 g_topology_assert(); 536 537 mtx_destroy(&sc->sc_queue_mtx); 538 mtx_destroy(&sc->sc_events_mtx); 539 mtx_destroy(&sc->sc_done_mtx); 540 sx_destroy(&sc->sc_lock); 541 free(sc, M_MIRROR); 542 } 543 544 static void 545 g_mirror_providergone(struct g_provider *pp) 546 { 547 struct g_mirror_softc *sc = pp->private; 548 549 if ((--sc->sc_refcnt) == 0) 550 g_mirror_free_device(sc); 551 } 552 553 static void 554 g_mirror_destroy_device(struct g_mirror_softc *sc) 555 { 556 struct g_mirror_disk *disk; 557 struct g_mirror_event *ep; 558 struct g_geom *gp; 559 struct g_consumer *cp, *tmpcp; 560 561 g_topology_assert_not(); 562 sx_assert(&sc->sc_lock, SX_XLOCKED); 563 564 gp = sc->sc_geom; 565 if (sc->sc_provider != NULL) 566 g_mirror_destroy_provider(sc); 567 for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL; 568 disk = LIST_FIRST(&sc->sc_disks)) { 569 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 570 g_mirror_update_metadata(disk); 571 g_mirror_destroy_disk(disk); 572 } 573 while ((ep = g_mirror_event_first(sc)) != NULL) { 574 g_mirror_event_remove(sc, ep); 575 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 576 g_mirror_event_free(ep); 577 else { 578 ep->e_error = ECANCELED; 579 ep->e_flags |= G_MIRROR_EVENT_DONE; 580 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep); 581 mtx_lock(&sc->sc_events_mtx); 582 wakeup(ep); 583 mtx_unlock(&sc->sc_events_mtx); 584 } 585 } 586 callout_drain(&sc->sc_callout); 587 588 g_topology_lock(); 589 LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) { 590 g_mirror_disconnect_consumer(sc, cp); 591 } 592 g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 593 G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name); 594 g_wither_geom(gp, ENXIO); 595 sx_xunlock(&sc->sc_lock); 596 if ((--sc->sc_refcnt) == 0) 597 g_mirror_free_device(sc); 598 g_topology_unlock(); 599 } 600 601 static void 602 g_mirror_orphan(struct g_consumer *cp) 603 { 604 struct g_mirror_disk *disk; 605 606 g_topology_assert(); 607 608 disk = cp->private; 609 if (disk == NULL) 610 return; 611 disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 612 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, 613 G_MIRROR_EVENT_DONTWAIT); 614 } 615 616 /* 617 * Function should return the next active disk on the list. 618 * It is possible that it will be the same disk as given. 619 * If there are no active disks on list, NULL is returned. 620 */ 621 static __inline struct g_mirror_disk * 622 g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 623 { 624 struct g_mirror_disk *dp; 625 626 for (dp = LIST_NEXT(disk, d_next); dp != disk; 627 dp = LIST_NEXT(dp, d_next)) { 628 if (dp == NULL) 629 dp = LIST_FIRST(&sc->sc_disks); 630 if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE) 631 break; 632 } 633 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 634 return (NULL); 635 return (dp); 636 } 637 638 static struct g_mirror_disk * 639 g_mirror_get_disk(struct g_mirror_softc *sc) 640 { 641 struct g_mirror_disk *disk; 642 643 if (sc->sc_hint == NULL) { 644 sc->sc_hint = LIST_FIRST(&sc->sc_disks); 645 if (sc->sc_hint == NULL) 646 return (NULL); 647 } 648 disk = sc->sc_hint; 649 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) { 650 disk = g_mirror_find_next(sc, disk); 651 if (disk == NULL) 652 return (NULL); 653 } 654 sc->sc_hint = g_mirror_find_next(sc, disk); 655 return (disk); 656 } 657 658 static int 659 g_mirror_write_metadata(struct g_mirror_disk *disk, 660 struct g_mirror_metadata *md) 661 { 662 struct g_mirror_softc *sc; 663 struct g_consumer *cp; 664 off_t offset, length; 665 u_char *sector; 666 int error = 0; 667 668 g_topology_assert_not(); 669 sc = disk->d_softc; 670 sx_assert(&sc->sc_lock, SX_LOCKED); 671 672 cp = disk->d_consumer; 673 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 674 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 675 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 676 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 677 cp->acw, cp->ace)); 678 length = cp->provider->sectorsize; 679 offset = cp->provider->mediasize - length; 680 sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO); 681 if (md != NULL && 682 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) { 683 /* 684 * Handle the case, when the size of parent provider reduced. 685 */ 686 if (offset < md->md_mediasize) 687 error = ENOSPC; 688 else 689 mirror_metadata_encode(md, sector); 690 } 691 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_metadata_write, error); 692 if (error == 0) 693 error = g_write_data(cp, offset, sector, length); 694 free(sector, M_MIRROR); 695 if (error != 0) { 696 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 697 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 698 G_MIRROR_DEBUG(0, "Cannot write metadata on %s " 699 "(device=%s, error=%d).", 700 g_mirror_get_diskname(disk), sc->sc_name, error); 701 } else { 702 G_MIRROR_DEBUG(1, "Cannot write metadata on %s " 703 "(device=%s, error=%d).", 704 g_mirror_get_diskname(disk), sc->sc_name, error); 705 } 706 if (g_mirror_disconnect_on_failure && 707 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) { 708 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 709 g_mirror_event_send(disk, 710 G_MIRROR_DISK_STATE_DISCONNECTED, 711 G_MIRROR_EVENT_DONTWAIT); 712 } 713 } 714 return (error); 715 } 716 717 static int 718 g_mirror_clear_metadata(struct g_mirror_disk *disk) 719 { 720 int error; 721 722 g_topology_assert_not(); 723 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 724 725 if (disk->d_softc->sc_type != G_MIRROR_TYPE_AUTOMATIC) 726 return (0); 727 error = g_mirror_write_metadata(disk, NULL); 728 if (error == 0) { 729 G_MIRROR_DEBUG(2, "Metadata on %s cleared.", 730 g_mirror_get_diskname(disk)); 731 } else { 732 G_MIRROR_DEBUG(0, 733 "Cannot clear metadata on disk %s (error=%d).", 734 g_mirror_get_diskname(disk), error); 735 } 736 return (error); 737 } 738 739 void 740 g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk, 741 struct g_mirror_metadata *md) 742 { 743 744 strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic)); 745 md->md_version = G_MIRROR_VERSION; 746 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 747 md->md_mid = sc->sc_id; 748 md->md_all = sc->sc_ndisks; 749 md->md_slice = sc->sc_slice; 750 md->md_balance = sc->sc_balance; 751 md->md_genid = sc->sc_genid; 752 md->md_mediasize = sc->sc_mediasize; 753 md->md_sectorsize = sc->sc_sectorsize; 754 md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK); 755 bzero(md->md_provider, sizeof(md->md_provider)); 756 if (disk == NULL) { 757 md->md_did = arc4random(); 758 md->md_priority = 0; 759 md->md_syncid = 0; 760 md->md_dflags = 0; 761 md->md_sync_offset = 0; 762 md->md_provsize = 0; 763 } else { 764 md->md_did = disk->d_id; 765 md->md_priority = disk->d_priority; 766 md->md_syncid = disk->d_sync.ds_syncid; 767 md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK); 768 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 769 md->md_sync_offset = disk->d_sync.ds_offset_done; 770 else 771 md->md_sync_offset = 0; 772 if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) { 773 strlcpy(md->md_provider, 774 disk->d_consumer->provider->name, 775 sizeof(md->md_provider)); 776 } 777 md->md_provsize = disk->d_consumer->provider->mediasize; 778 } 779 } 780 781 void 782 g_mirror_update_metadata(struct g_mirror_disk *disk) 783 { 784 struct g_mirror_softc *sc; 785 struct g_mirror_metadata md; 786 int error; 787 788 g_topology_assert_not(); 789 sc = disk->d_softc; 790 sx_assert(&sc->sc_lock, SX_LOCKED); 791 792 if (sc->sc_type != G_MIRROR_TYPE_AUTOMATIC) 793 return; 794 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) 795 g_mirror_fill_metadata(sc, disk, &md); 796 error = g_mirror_write_metadata(disk, &md); 797 if (error == 0) { 798 G_MIRROR_DEBUG(2, "Metadata on %s updated.", 799 g_mirror_get_diskname(disk)); 800 } else { 801 G_MIRROR_DEBUG(0, 802 "Cannot update metadata on disk %s (error=%d).", 803 g_mirror_get_diskname(disk), error); 804 } 805 } 806 807 static void 808 g_mirror_bump_syncid(struct g_mirror_softc *sc) 809 { 810 struct g_mirror_disk *disk; 811 812 g_topology_assert_not(); 813 sx_assert(&sc->sc_lock, SX_XLOCKED); 814 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 815 ("%s called with no active disks (device=%s).", __func__, 816 sc->sc_name)); 817 818 sc->sc_syncid++; 819 G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 820 sc->sc_syncid); 821 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 822 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 823 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 824 disk->d_sync.ds_syncid = sc->sc_syncid; 825 g_mirror_update_metadata(disk); 826 } 827 } 828 } 829 830 static void 831 g_mirror_bump_genid(struct g_mirror_softc *sc) 832 { 833 struct g_mirror_disk *disk; 834 835 g_topology_assert_not(); 836 sx_assert(&sc->sc_lock, SX_XLOCKED); 837 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 838 ("%s called with no active disks (device=%s).", __func__, 839 sc->sc_name)); 840 841 sc->sc_genid++; 842 G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 843 sc->sc_genid); 844 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 845 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 846 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 847 disk->d_genid = sc->sc_genid; 848 g_mirror_update_metadata(disk); 849 } 850 } 851 } 852 853 static int 854 g_mirror_idle(struct g_mirror_softc *sc, int acw) 855 { 856 struct g_mirror_disk *disk; 857 int timeout; 858 859 g_topology_assert_not(); 860 sx_assert(&sc->sc_lock, SX_XLOCKED); 861 862 if (sc->sc_provider == NULL) 863 return (0); 864 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 865 return (0); 866 if (sc->sc_idle) 867 return (0); 868 if (sc->sc_writes > 0) 869 return (0); 870 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 871 timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write); 872 if (!g_mirror_shutdown && timeout > 0) 873 return (timeout); 874 } 875 sc->sc_idle = 1; 876 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 877 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 878 continue; 879 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.", 880 g_mirror_get_diskname(disk), sc->sc_name); 881 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 882 g_mirror_update_metadata(disk); 883 } 884 return (0); 885 } 886 887 static void 888 g_mirror_unidle(struct g_mirror_softc *sc) 889 { 890 struct g_mirror_disk *disk; 891 892 g_topology_assert_not(); 893 sx_assert(&sc->sc_lock, SX_XLOCKED); 894 895 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 896 return; 897 sc->sc_idle = 0; 898 sc->sc_last_write = time_uptime; 899 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 900 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 901 continue; 902 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.", 903 g_mirror_get_diskname(disk), sc->sc_name); 904 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 905 g_mirror_update_metadata(disk); 906 } 907 } 908 909 static void 910 g_mirror_done(struct bio *bp) 911 { 912 struct g_mirror_softc *sc; 913 914 sc = bp->bio_from->geom->softc; 915 bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR; 916 mtx_lock(&sc->sc_queue_mtx); 917 TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue); 918 mtx_unlock(&sc->sc_queue_mtx); 919 wakeup(sc); 920 } 921 922 static void 923 g_mirror_regular_request_error(struct g_mirror_softc *sc, 924 struct g_mirror_disk *disk, struct bio *bp) 925 { 926 927 if (bp->bio_cmd == BIO_FLUSH && bp->bio_error == EOPNOTSUPP) 928 return; 929 930 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 931 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 932 G_MIRROR_LOGREQ(0, bp, "Request failed (error=%d).", 933 bp->bio_error); 934 } else { 935 G_MIRROR_LOGREQ(1, bp, "Request failed (error=%d).", 936 bp->bio_error); 937 } 938 if (g_mirror_disconnect_on_failure && 939 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) { 940 if (bp->bio_error == ENXIO && 941 bp->bio_cmd == BIO_READ) 942 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 943 else if (bp->bio_error == ENXIO) 944 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID_NOW; 945 else 946 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 947 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, 948 G_MIRROR_EVENT_DONTWAIT); 949 } 950 } 951 952 static void 953 g_mirror_regular_request(struct g_mirror_softc *sc, struct bio *bp) 954 { 955 struct g_mirror_disk *disk; 956 struct bio *pbp; 957 958 g_topology_assert_not(); 959 KASSERT(sc->sc_provider == bp->bio_parent->bio_to, 960 ("regular request %p with unexpected origin", bp)); 961 962 pbp = bp->bio_parent; 963 bp->bio_from->index--; 964 if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) 965 sc->sc_writes--; 966 disk = bp->bio_from->private; 967 if (disk == NULL) { 968 g_topology_lock(); 969 g_mirror_kill_consumer(sc, bp->bio_from); 970 g_topology_unlock(); 971 } 972 973 switch (bp->bio_cmd) { 974 case BIO_READ: 975 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_read, 976 bp->bio_error); 977 break; 978 case BIO_WRITE: 979 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_write, 980 bp->bio_error); 981 break; 982 case BIO_DELETE: 983 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_delete, 984 bp->bio_error); 985 break; 986 case BIO_FLUSH: 987 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_flush, 988 bp->bio_error); 989 break; 990 } 991 992 pbp->bio_inbed++; 993 KASSERT(pbp->bio_inbed <= pbp->bio_children, 994 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 995 pbp->bio_children)); 996 if (bp->bio_error == 0 && pbp->bio_error == 0) { 997 G_MIRROR_LOGREQ(3, bp, "Request delivered."); 998 g_destroy_bio(bp); 999 if (pbp->bio_children == pbp->bio_inbed) { 1000 G_MIRROR_LOGREQ(3, pbp, "Request delivered."); 1001 pbp->bio_completed = pbp->bio_length; 1002 if (pbp->bio_cmd == BIO_WRITE || 1003 pbp->bio_cmd == BIO_DELETE) { 1004 TAILQ_REMOVE(&sc->sc_inflight, pbp, bio_queue); 1005 /* Release delayed sync requests if possible. */ 1006 g_mirror_sync_release(sc); 1007 } 1008 g_io_deliver(pbp, pbp->bio_error); 1009 } 1010 return; 1011 } else if (bp->bio_error != 0) { 1012 if (pbp->bio_error == 0) 1013 pbp->bio_error = bp->bio_error; 1014 if (disk != NULL) 1015 g_mirror_regular_request_error(sc, disk, bp); 1016 switch (pbp->bio_cmd) { 1017 case BIO_DELETE: 1018 case BIO_WRITE: 1019 case BIO_FLUSH: 1020 pbp->bio_inbed--; 1021 pbp->bio_children--; 1022 break; 1023 } 1024 } 1025 g_destroy_bio(bp); 1026 1027 switch (pbp->bio_cmd) { 1028 case BIO_READ: 1029 if (pbp->bio_inbed < pbp->bio_children) 1030 break; 1031 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1) 1032 g_io_deliver(pbp, pbp->bio_error); 1033 else { 1034 pbp->bio_error = 0; 1035 mtx_lock(&sc->sc_queue_mtx); 1036 TAILQ_INSERT_TAIL(&sc->sc_queue, pbp, bio_queue); 1037 mtx_unlock(&sc->sc_queue_mtx); 1038 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1039 wakeup(sc); 1040 } 1041 break; 1042 case BIO_DELETE: 1043 case BIO_WRITE: 1044 case BIO_FLUSH: 1045 if (pbp->bio_children == 0) { 1046 /* 1047 * All requests failed. 1048 */ 1049 } else if (pbp->bio_inbed < pbp->bio_children) { 1050 /* Do nothing. */ 1051 break; 1052 } else if (pbp->bio_children == pbp->bio_inbed) { 1053 /* Some requests succeeded. */ 1054 pbp->bio_error = 0; 1055 pbp->bio_completed = pbp->bio_length; 1056 } 1057 if (pbp->bio_cmd == BIO_WRITE || pbp->bio_cmd == BIO_DELETE) { 1058 TAILQ_REMOVE(&sc->sc_inflight, pbp, bio_queue); 1059 /* Release delayed sync requests if possible. */ 1060 g_mirror_sync_release(sc); 1061 } 1062 g_io_deliver(pbp, pbp->bio_error); 1063 break; 1064 default: 1065 KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd)); 1066 break; 1067 } 1068 } 1069 1070 static void 1071 g_mirror_sync_done(struct bio *bp) 1072 { 1073 struct g_mirror_softc *sc; 1074 1075 G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered."); 1076 sc = bp->bio_from->geom->softc; 1077 bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC; 1078 mtx_lock(&sc->sc_queue_mtx); 1079 TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue); 1080 mtx_unlock(&sc->sc_queue_mtx); 1081 wakeup(sc); 1082 } 1083 1084 static void 1085 g_mirror_candelete(struct bio *bp) 1086 { 1087 struct g_mirror_softc *sc; 1088 struct g_mirror_disk *disk; 1089 int *val; 1090 1091 sc = bp->bio_to->private; 1092 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1093 if (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) 1094 break; 1095 } 1096 val = (int *)bp->bio_data; 1097 *val = (disk != NULL); 1098 g_io_deliver(bp, 0); 1099 } 1100 1101 static void 1102 g_mirror_kernel_dump(struct bio *bp) 1103 { 1104 struct g_mirror_softc *sc; 1105 struct g_mirror_disk *disk; 1106 struct bio *cbp; 1107 struct g_kerneldump *gkd; 1108 1109 /* 1110 * We configure dumping to the first component, because this component 1111 * will be used for reading with 'prefer' balance algorithm. 1112 * If the component with the highest priority is currently disconnected 1113 * we will not be able to read the dump after the reboot if it will be 1114 * connected and synchronized later. Can we do something better? 1115 */ 1116 sc = bp->bio_to->private; 1117 disk = LIST_FIRST(&sc->sc_disks); 1118 1119 gkd = (struct g_kerneldump *)bp->bio_data; 1120 if (gkd->length > bp->bio_to->mediasize) 1121 gkd->length = bp->bio_to->mediasize; 1122 cbp = g_clone_bio(bp); 1123 if (cbp == NULL) { 1124 g_io_deliver(bp, ENOMEM); 1125 return; 1126 } 1127 cbp->bio_done = g_std_done; 1128 g_io_request(cbp, disk->d_consumer); 1129 G_MIRROR_DEBUG(1, "Kernel dump will go to %s.", 1130 g_mirror_get_diskname(disk)); 1131 } 1132 1133 static void 1134 g_mirror_start(struct bio *bp) 1135 { 1136 struct g_mirror_softc *sc; 1137 1138 sc = bp->bio_to->private; 1139 /* 1140 * If sc == NULL or there are no valid disks, provider's error 1141 * should be set and g_mirror_start() should not be called at all. 1142 */ 1143 KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1144 ("Provider's error should be set (error=%d)(mirror=%s).", 1145 bp->bio_to->error, bp->bio_to->name)); 1146 G_MIRROR_LOGREQ(3, bp, "Request received."); 1147 1148 switch (bp->bio_cmd) { 1149 case BIO_READ: 1150 case BIO_WRITE: 1151 case BIO_DELETE: 1152 case BIO_FLUSH: 1153 break; 1154 case BIO_GETATTR: 1155 if (!strcmp(bp->bio_attribute, "GEOM::candelete")) { 1156 g_mirror_candelete(bp); 1157 return; 1158 } else if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) { 1159 g_mirror_kernel_dump(bp); 1160 return; 1161 } 1162 /* FALLTHROUGH */ 1163 default: 1164 g_io_deliver(bp, EOPNOTSUPP); 1165 return; 1166 } 1167 mtx_lock(&sc->sc_queue_mtx); 1168 if (bp->bio_to->error != 0) { 1169 mtx_unlock(&sc->sc_queue_mtx); 1170 g_io_deliver(bp, bp->bio_to->error); 1171 return; 1172 } 1173 TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue); 1174 mtx_unlock(&sc->sc_queue_mtx); 1175 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1176 wakeup(sc); 1177 } 1178 1179 /* 1180 * Return TRUE if the given request is colliding with a in-progress 1181 * synchronization request. 1182 */ 1183 static bool 1184 g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp) 1185 { 1186 struct g_mirror_disk *disk; 1187 struct bio *sbp; 1188 off_t rstart, rend, sstart, send; 1189 u_int i; 1190 1191 if (sc->sc_sync.ds_ndisks == 0) 1192 return (false); 1193 rstart = bp->bio_offset; 1194 rend = bp->bio_offset + bp->bio_length; 1195 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1196 if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING) 1197 continue; 1198 for (i = 0; i < g_mirror_syncreqs; i++) { 1199 sbp = disk->d_sync.ds_bios[i]; 1200 if (sbp == NULL) 1201 continue; 1202 sstart = sbp->bio_offset; 1203 send = sbp->bio_offset + sbp->bio_length; 1204 if (rend > sstart && rstart < send) 1205 return (true); 1206 } 1207 } 1208 return (false); 1209 } 1210 1211 /* 1212 * Return TRUE if the given sync request is colliding with a in-progress regular 1213 * request. 1214 */ 1215 static bool 1216 g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp) 1217 { 1218 off_t rstart, rend, sstart, send; 1219 struct bio *bp; 1220 1221 if (sc->sc_sync.ds_ndisks == 0) 1222 return (false); 1223 sstart = sbp->bio_offset; 1224 send = sbp->bio_offset + sbp->bio_length; 1225 TAILQ_FOREACH(bp, &sc->sc_inflight, bio_queue) { 1226 rstart = bp->bio_offset; 1227 rend = bp->bio_offset + bp->bio_length; 1228 if (rend > sstart && rstart < send) 1229 return (true); 1230 } 1231 return (false); 1232 } 1233 1234 /* 1235 * Puts regular request onto delayed queue. 1236 */ 1237 static void 1238 g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp) 1239 { 1240 1241 G_MIRROR_LOGREQ(2, bp, "Delaying request."); 1242 TAILQ_INSERT_TAIL(&sc->sc_regular_delayed, bp, bio_queue); 1243 } 1244 1245 /* 1246 * Puts synchronization request onto delayed queue. 1247 */ 1248 static void 1249 g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp) 1250 { 1251 1252 G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request."); 1253 TAILQ_INSERT_TAIL(&sc->sc_sync_delayed, bp, bio_queue); 1254 } 1255 1256 /* 1257 * Requeue delayed regular requests. 1258 */ 1259 static void 1260 g_mirror_regular_release(struct g_mirror_softc *sc) 1261 { 1262 struct bio *bp; 1263 1264 if ((bp = TAILQ_FIRST(&sc->sc_regular_delayed)) == NULL) 1265 return; 1266 if (g_mirror_sync_collision(sc, bp)) 1267 return; 1268 1269 G_MIRROR_DEBUG(2, "Requeuing regular requests after collision."); 1270 mtx_lock(&sc->sc_queue_mtx); 1271 TAILQ_CONCAT(&sc->sc_regular_delayed, &sc->sc_queue, bio_queue); 1272 TAILQ_SWAP(&sc->sc_regular_delayed, &sc->sc_queue, bio, bio_queue); 1273 mtx_unlock(&sc->sc_queue_mtx); 1274 } 1275 1276 /* 1277 * Releases delayed sync requests which don't collide anymore with regular 1278 * requests. 1279 */ 1280 static void 1281 g_mirror_sync_release(struct g_mirror_softc *sc) 1282 { 1283 struct bio *bp, *bp2; 1284 1285 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed, bio_queue, bp2) { 1286 if (g_mirror_regular_collision(sc, bp)) 1287 continue; 1288 TAILQ_REMOVE(&sc->sc_sync_delayed, bp, bio_queue); 1289 G_MIRROR_LOGREQ(2, bp, 1290 "Releasing delayed synchronization request."); 1291 g_io_request(bp, bp->bio_from); 1292 } 1293 } 1294 1295 /* 1296 * Free a synchronization request and clear its slot in the array. 1297 */ 1298 static void 1299 g_mirror_sync_request_free(struct g_mirror_disk *disk, struct bio *bp) 1300 { 1301 int idx; 1302 1303 if (disk != NULL && disk->d_sync.ds_bios != NULL) { 1304 idx = (int)(uintptr_t)bp->bio_caller1; 1305 KASSERT(disk->d_sync.ds_bios[idx] == bp, 1306 ("unexpected sync BIO at %p:%d", disk, idx)); 1307 disk->d_sync.ds_bios[idx] = NULL; 1308 } 1309 free(bp->bio_data, M_MIRROR); 1310 g_destroy_bio(bp); 1311 } 1312 1313 /* 1314 * Handle synchronization requests. 1315 * Every synchronization request is a two-step process: first, a read request is 1316 * sent to the mirror provider via the sync consumer. If that request completes 1317 * successfully, it is converted to a write and sent to the disk being 1318 * synchronized. If the write also completes successfully, the synchronization 1319 * offset is advanced and a new read request is submitted. 1320 */ 1321 static void 1322 g_mirror_sync_request(struct g_mirror_softc *sc, struct bio *bp) 1323 { 1324 struct g_mirror_disk *disk; 1325 struct g_mirror_disk_sync *sync; 1326 1327 KASSERT((bp->bio_cmd == BIO_READ && 1328 bp->bio_from->geom == sc->sc_sync.ds_geom) || 1329 (bp->bio_cmd == BIO_WRITE && bp->bio_from->geom == sc->sc_geom), 1330 ("Sync BIO %p with unexpected origin", bp)); 1331 1332 bp->bio_from->index--; 1333 disk = bp->bio_from->private; 1334 if (disk == NULL) { 1335 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 1336 g_topology_lock(); 1337 g_mirror_kill_consumer(sc, bp->bio_from); 1338 g_topology_unlock(); 1339 g_mirror_sync_request_free(NULL, bp); 1340 sx_xlock(&sc->sc_lock); 1341 return; 1342 } 1343 1344 sync = &disk->d_sync; 1345 1346 /* 1347 * Synchronization request. 1348 */ 1349 switch (bp->bio_cmd) { 1350 case BIO_READ: { 1351 struct g_consumer *cp; 1352 1353 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_read, 1354 bp->bio_error); 1355 1356 if (bp->bio_error != 0) { 1357 G_MIRROR_LOGREQ(0, bp, 1358 "Synchronization request failed (error=%d).", 1359 bp->bio_error); 1360 1361 /* 1362 * The read error will trigger a syncid bump, so there's 1363 * no need to do that here. 1364 * 1365 * The read error handling for regular requests will 1366 * retry the read from all active mirrors before passing 1367 * the error back up, so there's no need to retry here. 1368 */ 1369 g_mirror_sync_request_free(disk, bp); 1370 g_mirror_event_send(disk, 1371 G_MIRROR_DISK_STATE_DISCONNECTED, 1372 G_MIRROR_EVENT_DONTWAIT); 1373 return; 1374 } 1375 G_MIRROR_LOGREQ(3, bp, 1376 "Synchronization request half-finished."); 1377 bp->bio_cmd = BIO_WRITE; 1378 bp->bio_cflags = 0; 1379 cp = disk->d_consumer; 1380 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1381 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1382 cp->acr, cp->acw, cp->ace)); 1383 cp->index++; 1384 g_io_request(bp, cp); 1385 return; 1386 } 1387 case BIO_WRITE: { 1388 off_t offset; 1389 int i; 1390 1391 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_write, 1392 bp->bio_error); 1393 1394 if (bp->bio_error != 0) { 1395 G_MIRROR_LOGREQ(0, bp, 1396 "Synchronization request failed (error=%d).", 1397 bp->bio_error); 1398 g_mirror_sync_request_free(disk, bp); 1399 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 1400 g_mirror_event_send(disk, 1401 G_MIRROR_DISK_STATE_DISCONNECTED, 1402 G_MIRROR_EVENT_DONTWAIT); 1403 return; 1404 } 1405 G_MIRROR_LOGREQ(3, bp, "Synchronization request finished."); 1406 if (sync->ds_offset >= sc->sc_mediasize || 1407 sync->ds_consumer == NULL || 1408 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1409 /* Don't send more synchronization requests. */ 1410 sync->ds_inflight--; 1411 g_mirror_sync_request_free(disk, bp); 1412 if (sync->ds_inflight > 0) 1413 return; 1414 if (sync->ds_consumer == NULL || 1415 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1416 return; 1417 } 1418 /* Disk up-to-date, activate it. */ 1419 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE, 1420 G_MIRROR_EVENT_DONTWAIT); 1421 return; 1422 } 1423 1424 /* Send next synchronization request. */ 1425 g_mirror_sync_reinit(disk, bp, sync->ds_offset); 1426 sync->ds_offset += bp->bio_length; 1427 1428 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1429 sync->ds_consumer->index++; 1430 1431 /* 1432 * Delay the request if it is colliding with a regular request. 1433 */ 1434 if (g_mirror_regular_collision(sc, bp)) 1435 g_mirror_sync_delay(sc, bp); 1436 else 1437 g_io_request(bp, sync->ds_consumer); 1438 1439 /* Requeue delayed requests if possible. */ 1440 g_mirror_regular_release(sc); 1441 1442 /* Find the smallest offset */ 1443 offset = sc->sc_mediasize; 1444 for (i = 0; i < g_mirror_syncreqs; i++) { 1445 bp = sync->ds_bios[i]; 1446 if (bp != NULL && bp->bio_offset < offset) 1447 offset = bp->bio_offset; 1448 } 1449 if (g_mirror_sync_period > 0 && 1450 time_uptime - sync->ds_update_ts > g_mirror_sync_period) { 1451 sync->ds_offset_done = offset; 1452 g_mirror_update_metadata(disk); 1453 sync->ds_update_ts = time_uptime; 1454 } 1455 return; 1456 } 1457 default: 1458 panic("Invalid I/O request %p", bp); 1459 } 1460 } 1461 1462 static void 1463 g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp) 1464 { 1465 struct g_mirror_disk *disk; 1466 struct g_consumer *cp; 1467 struct bio *cbp; 1468 1469 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1470 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE) 1471 break; 1472 } 1473 if (disk == NULL) { 1474 if (bp->bio_error == 0) 1475 bp->bio_error = ENXIO; 1476 g_io_deliver(bp, bp->bio_error); 1477 return; 1478 } 1479 cbp = g_clone_bio(bp); 1480 if (cbp == NULL) { 1481 if (bp->bio_error == 0) 1482 bp->bio_error = ENOMEM; 1483 g_io_deliver(bp, bp->bio_error); 1484 return; 1485 } 1486 /* 1487 * Fill in the component buf structure. 1488 */ 1489 cp = disk->d_consumer; 1490 cbp->bio_done = g_mirror_done; 1491 cbp->bio_to = cp->provider; 1492 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1493 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1494 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1495 cp->acw, cp->ace)); 1496 cp->index++; 1497 g_io_request(cbp, cp); 1498 } 1499 1500 static void 1501 g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp) 1502 { 1503 struct g_mirror_disk *disk; 1504 struct g_consumer *cp; 1505 struct bio *cbp; 1506 1507 disk = g_mirror_get_disk(sc); 1508 if (disk == NULL) { 1509 if (bp->bio_error == 0) 1510 bp->bio_error = ENXIO; 1511 g_io_deliver(bp, bp->bio_error); 1512 return; 1513 } 1514 cbp = g_clone_bio(bp); 1515 if (cbp == NULL) { 1516 if (bp->bio_error == 0) 1517 bp->bio_error = ENOMEM; 1518 g_io_deliver(bp, bp->bio_error); 1519 return; 1520 } 1521 /* 1522 * Fill in the component buf structure. 1523 */ 1524 cp = disk->d_consumer; 1525 cbp->bio_done = g_mirror_done; 1526 cbp->bio_to = cp->provider; 1527 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1528 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1529 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1530 cp->acw, cp->ace)); 1531 cp->index++; 1532 g_io_request(cbp, cp); 1533 } 1534 1535 #define TRACK_SIZE (1 * 1024 * 1024) 1536 #define LOAD_SCALE 256 1537 #define ABS(x) (((x) >= 0) ? (x) : (-(x))) 1538 1539 static void 1540 g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp) 1541 { 1542 struct g_mirror_disk *disk, *dp; 1543 struct g_consumer *cp; 1544 struct bio *cbp; 1545 int prio, best; 1546 1547 /* Find a disk with the smallest load. */ 1548 disk = NULL; 1549 best = INT_MAX; 1550 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1551 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1552 continue; 1553 prio = dp->load; 1554 /* If disk head is precisely in position - highly prefer it. */ 1555 if (dp->d_last_offset == bp->bio_offset) 1556 prio -= 2 * LOAD_SCALE; 1557 else 1558 /* If disk head is close to position - prefer it. */ 1559 if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE) 1560 prio -= 1 * LOAD_SCALE; 1561 if (prio <= best) { 1562 disk = dp; 1563 best = prio; 1564 } 1565 } 1566 KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name)); 1567 cbp = g_clone_bio(bp); 1568 if (cbp == NULL) { 1569 if (bp->bio_error == 0) 1570 bp->bio_error = ENOMEM; 1571 g_io_deliver(bp, bp->bio_error); 1572 return; 1573 } 1574 /* 1575 * Fill in the component buf structure. 1576 */ 1577 cp = disk->d_consumer; 1578 cbp->bio_done = g_mirror_done; 1579 cbp->bio_to = cp->provider; 1580 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1581 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1582 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1583 cp->acw, cp->ace)); 1584 cp->index++; 1585 /* Remember last head position */ 1586 disk->d_last_offset = bp->bio_offset + bp->bio_length; 1587 /* Update loads. */ 1588 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1589 dp->load = (dp->d_consumer->index * LOAD_SCALE + 1590 dp->load * 7) / 8; 1591 } 1592 g_io_request(cbp, cp); 1593 } 1594 1595 static void 1596 g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp) 1597 { 1598 struct bio_queue queue; 1599 struct g_mirror_disk *disk; 1600 struct g_consumer *cp; 1601 struct bio *cbp; 1602 off_t left, mod, offset, slice; 1603 u_char *data; 1604 u_int ndisks; 1605 1606 if (bp->bio_length <= sc->sc_slice) { 1607 g_mirror_request_round_robin(sc, bp); 1608 return; 1609 } 1610 ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE); 1611 slice = bp->bio_length / ndisks; 1612 mod = slice % sc->sc_provider->sectorsize; 1613 if (mod != 0) 1614 slice += sc->sc_provider->sectorsize - mod; 1615 /* 1616 * Allocate all bios before sending any request, so we can 1617 * return ENOMEM in nice and clean way. 1618 */ 1619 left = bp->bio_length; 1620 offset = bp->bio_offset; 1621 data = bp->bio_data; 1622 TAILQ_INIT(&queue); 1623 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1624 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1625 continue; 1626 cbp = g_clone_bio(bp); 1627 if (cbp == NULL) { 1628 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1629 TAILQ_REMOVE(&queue, cbp, bio_queue); 1630 g_destroy_bio(cbp); 1631 } 1632 if (bp->bio_error == 0) 1633 bp->bio_error = ENOMEM; 1634 g_io_deliver(bp, bp->bio_error); 1635 return; 1636 } 1637 TAILQ_INSERT_TAIL(&queue, cbp, bio_queue); 1638 cbp->bio_done = g_mirror_done; 1639 cbp->bio_caller1 = disk; 1640 cbp->bio_to = disk->d_consumer->provider; 1641 cbp->bio_offset = offset; 1642 cbp->bio_data = data; 1643 cbp->bio_length = MIN(left, slice); 1644 left -= cbp->bio_length; 1645 if (left == 0) 1646 break; 1647 offset += cbp->bio_length; 1648 data += cbp->bio_length; 1649 } 1650 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1651 TAILQ_REMOVE(&queue, cbp, bio_queue); 1652 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1653 disk = cbp->bio_caller1; 1654 cbp->bio_caller1 = NULL; 1655 cp = disk->d_consumer; 1656 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1657 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1658 cp->acr, cp->acw, cp->ace)); 1659 disk->d_consumer->index++; 1660 g_io_request(cbp, disk->d_consumer); 1661 } 1662 } 1663 1664 static void 1665 g_mirror_register_request(struct g_mirror_softc *sc, struct bio *bp) 1666 { 1667 struct bio_queue queue; 1668 struct bio *cbp; 1669 struct g_consumer *cp; 1670 struct g_mirror_disk *disk; 1671 1672 sx_assert(&sc->sc_lock, SA_XLOCKED); 1673 1674 /* 1675 * To avoid ordering issues, if a write is deferred because of a 1676 * collision with a sync request, all I/O is deferred until that 1677 * write is initiated. 1678 */ 1679 if (bp->bio_from->geom != sc->sc_sync.ds_geom && 1680 !TAILQ_EMPTY(&sc->sc_regular_delayed)) { 1681 g_mirror_regular_delay(sc, bp); 1682 return; 1683 } 1684 1685 switch (bp->bio_cmd) { 1686 case BIO_READ: 1687 switch (sc->sc_balance) { 1688 case G_MIRROR_BALANCE_LOAD: 1689 g_mirror_request_load(sc, bp); 1690 break; 1691 case G_MIRROR_BALANCE_PREFER: 1692 g_mirror_request_prefer(sc, bp); 1693 break; 1694 case G_MIRROR_BALANCE_ROUND_ROBIN: 1695 g_mirror_request_round_robin(sc, bp); 1696 break; 1697 case G_MIRROR_BALANCE_SPLIT: 1698 g_mirror_request_split(sc, bp); 1699 break; 1700 } 1701 return; 1702 case BIO_WRITE: 1703 case BIO_DELETE: 1704 /* 1705 * Delay the request if it is colliding with a synchronization 1706 * request. 1707 */ 1708 if (g_mirror_sync_collision(sc, bp)) { 1709 g_mirror_regular_delay(sc, bp); 1710 return; 1711 } 1712 1713 if (sc->sc_idle) 1714 g_mirror_unidle(sc); 1715 else 1716 sc->sc_last_write = time_uptime; 1717 1718 /* 1719 * Bump syncid on first write. 1720 */ 1721 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) { 1722 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 1723 g_mirror_bump_syncid(sc); 1724 } 1725 1726 /* 1727 * Allocate all bios before sending any request, so we can 1728 * return ENOMEM in nice and clean way. 1729 */ 1730 TAILQ_INIT(&queue); 1731 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1732 switch (disk->d_state) { 1733 case G_MIRROR_DISK_STATE_ACTIVE: 1734 break; 1735 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 1736 if (bp->bio_offset >= disk->d_sync.ds_offset) 1737 continue; 1738 break; 1739 default: 1740 continue; 1741 } 1742 if (bp->bio_cmd == BIO_DELETE && 1743 (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) == 0) 1744 continue; 1745 cbp = g_clone_bio(bp); 1746 if (cbp == NULL) { 1747 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1748 TAILQ_REMOVE(&queue, cbp, bio_queue); 1749 g_destroy_bio(cbp); 1750 } 1751 if (bp->bio_error == 0) 1752 bp->bio_error = ENOMEM; 1753 g_io_deliver(bp, bp->bio_error); 1754 return; 1755 } 1756 TAILQ_INSERT_TAIL(&queue, cbp, bio_queue); 1757 cbp->bio_done = g_mirror_done; 1758 cp = disk->d_consumer; 1759 cbp->bio_caller1 = cp; 1760 cbp->bio_to = cp->provider; 1761 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1762 ("Consumer %s not opened (r%dw%de%d).", 1763 cp->provider->name, cp->acr, cp->acw, cp->ace)); 1764 } 1765 if (TAILQ_EMPTY(&queue)) { 1766 KASSERT(bp->bio_cmd == BIO_DELETE, 1767 ("No consumers for regular request %p", bp)); 1768 g_io_deliver(bp, EOPNOTSUPP); 1769 return; 1770 } 1771 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1772 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1773 TAILQ_REMOVE(&queue, cbp, bio_queue); 1774 cp = cbp->bio_caller1; 1775 cbp->bio_caller1 = NULL; 1776 cp->index++; 1777 sc->sc_writes++; 1778 g_io_request(cbp, cp); 1779 } 1780 /* 1781 * Put request onto inflight queue, so we can check if new 1782 * synchronization requests don't collide with it. 1783 */ 1784 TAILQ_INSERT_TAIL(&sc->sc_inflight, bp, bio_queue); 1785 return; 1786 case BIO_FLUSH: 1787 TAILQ_INIT(&queue); 1788 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1789 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1790 continue; 1791 cbp = g_clone_bio(bp); 1792 if (cbp == NULL) { 1793 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1794 TAILQ_REMOVE(&queue, cbp, bio_queue); 1795 g_destroy_bio(cbp); 1796 } 1797 if (bp->bio_error == 0) 1798 bp->bio_error = ENOMEM; 1799 g_io_deliver(bp, bp->bio_error); 1800 return; 1801 } 1802 TAILQ_INSERT_TAIL(&queue, cbp, bio_queue); 1803 cbp->bio_done = g_mirror_done; 1804 cbp->bio_caller1 = disk; 1805 cbp->bio_to = disk->d_consumer->provider; 1806 } 1807 KASSERT(!TAILQ_EMPTY(&queue), 1808 ("No consumers for regular request %p", bp)); 1809 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1810 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1811 TAILQ_REMOVE(&queue, cbp, bio_queue); 1812 disk = cbp->bio_caller1; 1813 cbp->bio_caller1 = NULL; 1814 cp = disk->d_consumer; 1815 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1816 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1817 cp->acr, cp->acw, cp->ace)); 1818 cp->index++; 1819 g_io_request(cbp, cp); 1820 } 1821 break; 1822 default: 1823 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1824 bp->bio_cmd, sc->sc_name)); 1825 break; 1826 } 1827 } 1828 1829 static int 1830 g_mirror_can_destroy(struct g_mirror_softc *sc) 1831 { 1832 struct g_geom *gp; 1833 struct g_consumer *cp; 1834 1835 g_topology_assert(); 1836 gp = sc->sc_geom; 1837 if (gp->softc == NULL) 1838 return (1); 1839 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_TASTING) != 0) 1840 return (0); 1841 LIST_FOREACH(cp, &gp->consumer, consumer) { 1842 if (g_mirror_is_busy(sc, cp)) 1843 return (0); 1844 } 1845 gp = sc->sc_sync.ds_geom; 1846 LIST_FOREACH(cp, &gp->consumer, consumer) { 1847 if (g_mirror_is_busy(sc, cp)) 1848 return (0); 1849 } 1850 G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 1851 sc->sc_name); 1852 return (1); 1853 } 1854 1855 static int 1856 g_mirror_try_destroy(struct g_mirror_softc *sc) 1857 { 1858 1859 if (sc->sc_rootmount != NULL) { 1860 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 1861 sc->sc_rootmount); 1862 root_mount_rel(sc->sc_rootmount); 1863 sc->sc_rootmount = NULL; 1864 } 1865 g_topology_lock(); 1866 if (!g_mirror_can_destroy(sc)) { 1867 g_topology_unlock(); 1868 return (0); 1869 } 1870 sc->sc_geom->softc = NULL; 1871 sc->sc_sync.ds_geom->softc = NULL; 1872 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DRAIN) != 0) { 1873 g_topology_unlock(); 1874 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1875 &sc->sc_worker); 1876 /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 1877 sx_xunlock(&sc->sc_lock); 1878 wakeup(&sc->sc_worker); 1879 sc->sc_worker = NULL; 1880 } else { 1881 g_topology_unlock(); 1882 g_mirror_destroy_device(sc); 1883 } 1884 return (1); 1885 } 1886 1887 /* 1888 * Worker thread. 1889 */ 1890 static void 1891 g_mirror_worker(void *arg) 1892 { 1893 struct g_mirror_softc *sc; 1894 struct g_mirror_event *ep; 1895 struct bio *bp; 1896 int timeout; 1897 1898 sc = arg; 1899 thread_lock(curthread); 1900 sched_prio(curthread, PRIBIO); 1901 thread_unlock(curthread); 1902 1903 sx_xlock(&sc->sc_lock); 1904 for (;;) { 1905 G_MIRROR_DEBUG(5, "%s: Let's see...", __func__); 1906 /* 1907 * First take a look at events. 1908 * This is important to handle events before any I/O requests. 1909 */ 1910 ep = g_mirror_event_first(sc); 1911 if (ep != NULL) { 1912 g_mirror_event_remove(sc, ep); 1913 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) { 1914 /* Update only device status. */ 1915 G_MIRROR_DEBUG(3, 1916 "Running event for device %s.", 1917 sc->sc_name); 1918 ep->e_error = 0; 1919 g_mirror_update_device(sc, true); 1920 } else { 1921 /* Update disk status. */ 1922 G_MIRROR_DEBUG(3, "Running event for disk %s.", 1923 g_mirror_get_diskname(ep->e_disk)); 1924 ep->e_error = g_mirror_update_disk(ep->e_disk, 1925 ep->e_state); 1926 if (ep->e_error == 0) 1927 g_mirror_update_device(sc, false); 1928 } 1929 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) { 1930 KASSERT(ep->e_error == 0, 1931 ("Error cannot be handled.")); 1932 g_mirror_event_free(ep); 1933 } else { 1934 ep->e_flags |= G_MIRROR_EVENT_DONE; 1935 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1936 ep); 1937 mtx_lock(&sc->sc_events_mtx); 1938 wakeup(ep); 1939 mtx_unlock(&sc->sc_events_mtx); 1940 } 1941 if ((sc->sc_flags & 1942 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1943 if (g_mirror_try_destroy(sc)) { 1944 curthread->td_pflags &= ~TDP_GEOM; 1945 G_MIRROR_DEBUG(1, "Thread exiting."); 1946 kproc_exit(0); 1947 } 1948 } 1949 G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__); 1950 continue; 1951 } 1952 1953 /* 1954 * Check if we can mark array as CLEAN and if we can't take 1955 * how much seconds should we wait. 1956 */ 1957 timeout = g_mirror_idle(sc, -1); 1958 1959 /* 1960 * Handle I/O requests. 1961 */ 1962 mtx_lock(&sc->sc_queue_mtx); 1963 bp = TAILQ_FIRST(&sc->sc_queue); 1964 if (bp != NULL) 1965 TAILQ_REMOVE(&sc->sc_queue, bp, bio_queue); 1966 else { 1967 if ((sc->sc_flags & 1968 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1969 mtx_unlock(&sc->sc_queue_mtx); 1970 if (g_mirror_try_destroy(sc)) { 1971 curthread->td_pflags &= ~TDP_GEOM; 1972 G_MIRROR_DEBUG(1, "Thread exiting."); 1973 kproc_exit(0); 1974 } 1975 mtx_lock(&sc->sc_queue_mtx); 1976 if (!TAILQ_EMPTY(&sc->sc_queue)) { 1977 mtx_unlock(&sc->sc_queue_mtx); 1978 continue; 1979 } 1980 } 1981 if (g_mirror_event_first(sc) != NULL) { 1982 mtx_unlock(&sc->sc_queue_mtx); 1983 continue; 1984 } 1985 sx_xunlock(&sc->sc_lock); 1986 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1", 1987 timeout * hz); 1988 sx_xlock(&sc->sc_lock); 1989 G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__); 1990 continue; 1991 } 1992 mtx_unlock(&sc->sc_queue_mtx); 1993 1994 if (bp->bio_from->geom == sc->sc_sync.ds_geom && 1995 (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) { 1996 /* 1997 * Handle completion of the first half (the read) of a 1998 * block synchronization operation. 1999 */ 2000 g_mirror_sync_request(sc, bp); 2001 } else if (bp->bio_to != sc->sc_provider) { 2002 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) 2003 /* 2004 * Handle completion of a regular I/O request. 2005 */ 2006 g_mirror_regular_request(sc, bp); 2007 else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 2008 /* 2009 * Handle completion of the second half (the 2010 * write) of a block synchronization operation. 2011 */ 2012 g_mirror_sync_request(sc, bp); 2013 else { 2014 KASSERT(0, 2015 ("Invalid request cflags=0x%hx to=%s.", 2016 bp->bio_cflags, bp->bio_to->name)); 2017 } 2018 } else { 2019 /* 2020 * Initiate an I/O request. 2021 */ 2022 g_mirror_register_request(sc, bp); 2023 } 2024 G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__); 2025 } 2026 } 2027 2028 static void 2029 g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 2030 { 2031 2032 sx_assert(&sc->sc_lock, SX_LOCKED); 2033 2034 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 2035 return; 2036 if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) { 2037 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.", 2038 g_mirror_get_diskname(disk), sc->sc_name); 2039 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 2040 } else if (sc->sc_idle && 2041 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 2042 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.", 2043 g_mirror_get_diskname(disk), sc->sc_name); 2044 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2045 } 2046 } 2047 2048 static void 2049 g_mirror_sync_reinit(const struct g_mirror_disk *disk, struct bio *bp, 2050 off_t offset) 2051 { 2052 void *data; 2053 int idx; 2054 2055 data = bp->bio_data; 2056 idx = (int)(uintptr_t)bp->bio_caller1; 2057 g_reset_bio(bp); 2058 2059 bp->bio_cmd = BIO_READ; 2060 bp->bio_data = data; 2061 bp->bio_done = g_mirror_sync_done; 2062 bp->bio_from = disk->d_sync.ds_consumer; 2063 bp->bio_to = disk->d_softc->sc_provider; 2064 bp->bio_caller1 = (void *)(uintptr_t)idx; 2065 bp->bio_offset = offset; 2066 bp->bio_length = MIN(MAXPHYS, 2067 disk->d_softc->sc_mediasize - bp->bio_offset); 2068 } 2069 2070 static void 2071 g_mirror_sync_start(struct g_mirror_disk *disk) 2072 { 2073 struct g_mirror_softc *sc; 2074 struct g_mirror_disk_sync *sync; 2075 struct g_consumer *cp; 2076 struct bio *bp; 2077 int error, i; 2078 2079 g_topology_assert_not(); 2080 sc = disk->d_softc; 2081 sync = &disk->d_sync; 2082 sx_assert(&sc->sc_lock, SX_LOCKED); 2083 2084 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2085 ("Disk %s is not marked for synchronization.", 2086 g_mirror_get_diskname(disk))); 2087 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2088 ("Device not in RUNNING state (%s, %u).", sc->sc_name, 2089 sc->sc_state)); 2090 2091 sx_xunlock(&sc->sc_lock); 2092 g_topology_lock(); 2093 cp = g_new_consumer(sc->sc_sync.ds_geom); 2094 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 2095 error = g_attach(cp, sc->sc_provider); 2096 KASSERT(error == 0, 2097 ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 2098 error = g_access(cp, 1, 0, 0); 2099 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 2100 g_topology_unlock(); 2101 sx_xlock(&sc->sc_lock); 2102 2103 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 2104 g_mirror_get_diskname(disk)); 2105 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0) 2106 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 2107 KASSERT(sync->ds_consumer == NULL, 2108 ("Sync consumer already exists (device=%s, disk=%s).", 2109 sc->sc_name, g_mirror_get_diskname(disk))); 2110 2111 sync->ds_consumer = cp; 2112 sync->ds_consumer->private = disk; 2113 sync->ds_consumer->index = 0; 2114 2115 /* 2116 * Allocate memory for synchronization bios and initialize them. 2117 */ 2118 sync->ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs, 2119 M_MIRROR, M_WAITOK); 2120 for (i = 0; i < g_mirror_syncreqs; i++) { 2121 bp = g_alloc_bio(); 2122 sync->ds_bios[i] = bp; 2123 2124 bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK); 2125 bp->bio_caller1 = (void *)(uintptr_t)i; 2126 g_mirror_sync_reinit(disk, bp, sync->ds_offset); 2127 sync->ds_offset += bp->bio_length; 2128 } 2129 2130 /* Increase the number of disks in SYNCHRONIZING state. */ 2131 sc->sc_sync.ds_ndisks++; 2132 /* Set the number of in-flight synchronization requests. */ 2133 sync->ds_inflight = g_mirror_syncreqs; 2134 2135 /* 2136 * Fire off first synchronization requests. 2137 */ 2138 for (i = 0; i < g_mirror_syncreqs; i++) { 2139 bp = sync->ds_bios[i]; 2140 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 2141 sync->ds_consumer->index++; 2142 /* 2143 * Delay the request if it is colliding with a regular request. 2144 */ 2145 if (g_mirror_regular_collision(sc, bp)) 2146 g_mirror_sync_delay(sc, bp); 2147 else 2148 g_io_request(bp, sync->ds_consumer); 2149 } 2150 } 2151 2152 /* 2153 * Stop synchronization process. 2154 * type: 0 - synchronization finished 2155 * 1 - synchronization stopped 2156 */ 2157 static void 2158 g_mirror_sync_stop(struct g_mirror_disk *disk, int type) 2159 { 2160 struct g_mirror_softc *sc; 2161 struct g_consumer *cp; 2162 2163 g_topology_assert_not(); 2164 sc = disk->d_softc; 2165 sx_assert(&sc->sc_lock, SX_LOCKED); 2166 2167 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2168 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2169 g_mirror_disk_state2str(disk->d_state))); 2170 if (disk->d_sync.ds_consumer == NULL) 2171 return; 2172 2173 if (type == 0) { 2174 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.", 2175 sc->sc_name, g_mirror_get_diskname(disk)); 2176 } else /* if (type == 1) */ { 2177 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 2178 sc->sc_name, g_mirror_get_diskname(disk)); 2179 } 2180 g_mirror_regular_release(sc); 2181 free(disk->d_sync.ds_bios, M_MIRROR); 2182 disk->d_sync.ds_bios = NULL; 2183 cp = disk->d_sync.ds_consumer; 2184 disk->d_sync.ds_consumer = NULL; 2185 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2186 sc->sc_sync.ds_ndisks--; 2187 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 2188 g_topology_lock(); 2189 g_mirror_kill_consumer(sc, cp); 2190 g_topology_unlock(); 2191 sx_xlock(&sc->sc_lock); 2192 } 2193 2194 static void 2195 g_mirror_launch_provider(struct g_mirror_softc *sc) 2196 { 2197 struct g_mirror_disk *disk; 2198 struct g_provider *pp, *dp; 2199 2200 sx_assert(&sc->sc_lock, SX_LOCKED); 2201 2202 g_topology_lock(); 2203 pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name); 2204 pp->flags |= G_PF_DIRECT_RECEIVE; 2205 pp->mediasize = sc->sc_mediasize; 2206 pp->sectorsize = sc->sc_sectorsize; 2207 pp->stripesize = 0; 2208 pp->stripeoffset = 0; 2209 2210 /* Splitting of unmapped BIO's could work but isn't implemented now */ 2211 if (sc->sc_balance != G_MIRROR_BALANCE_SPLIT) 2212 pp->flags |= G_PF_ACCEPT_UNMAPPED; 2213 2214 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2215 if (disk->d_consumer && disk->d_consumer->provider) { 2216 dp = disk->d_consumer->provider; 2217 if (dp->stripesize > pp->stripesize) { 2218 pp->stripesize = dp->stripesize; 2219 pp->stripeoffset = dp->stripeoffset; 2220 } 2221 /* A provider underneath us doesn't support unmapped */ 2222 if ((dp->flags & G_PF_ACCEPT_UNMAPPED) == 0) { 2223 G_MIRROR_DEBUG(0, "Cancelling unmapped " 2224 "because of %s.", dp->name); 2225 pp->flags &= ~G_PF_ACCEPT_UNMAPPED; 2226 } 2227 } 2228 } 2229 pp->private = sc; 2230 sc->sc_refcnt++; 2231 sc->sc_provider = pp; 2232 g_error_provider(pp, 0); 2233 g_topology_unlock(); 2234 G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 2235 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks); 2236 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2237 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2238 g_mirror_sync_start(disk); 2239 } 2240 } 2241 2242 static void 2243 g_mirror_destroy_provider(struct g_mirror_softc *sc) 2244 { 2245 struct g_mirror_disk *disk; 2246 struct bio *bp; 2247 2248 g_topology_assert_not(); 2249 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 2250 sc->sc_name)); 2251 2252 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2253 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2254 g_mirror_sync_stop(disk, 1); 2255 } 2256 2257 g_topology_lock(); 2258 g_error_provider(sc->sc_provider, ENXIO); 2259 mtx_lock(&sc->sc_queue_mtx); 2260 while ((bp = TAILQ_FIRST(&sc->sc_queue)) != NULL) { 2261 TAILQ_REMOVE(&sc->sc_queue, bp, bio_queue); 2262 /* 2263 * Abort any pending I/O that wasn't generated by us. 2264 * Synchronization requests and requests destined for individual 2265 * mirror components can be destroyed immediately. 2266 */ 2267 if (bp->bio_to == sc->sc_provider && 2268 bp->bio_from->geom != sc->sc_sync.ds_geom) { 2269 g_io_deliver(bp, ENXIO); 2270 } else { 2271 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 2272 free(bp->bio_data, M_MIRROR); 2273 g_destroy_bio(bp); 2274 } 2275 } 2276 mtx_unlock(&sc->sc_queue_mtx); 2277 g_wither_provider(sc->sc_provider, ENXIO); 2278 sc->sc_provider = NULL; 2279 G_MIRROR_DEBUG(0, "Device %s: provider destroyed.", sc->sc_name); 2280 g_topology_unlock(); 2281 } 2282 2283 static void 2284 g_mirror_go(void *arg) 2285 { 2286 struct g_mirror_softc *sc; 2287 2288 sc = arg; 2289 G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 2290 g_mirror_event_send(sc, 0, 2291 G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE); 2292 } 2293 2294 static u_int 2295 g_mirror_determine_state(struct g_mirror_disk *disk) 2296 { 2297 struct g_mirror_softc *sc; 2298 u_int state; 2299 2300 sc = disk->d_softc; 2301 if (sc->sc_syncid == disk->d_sync.ds_syncid) { 2302 if ((disk->d_flags & 2303 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0 && 2304 (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 || 2305 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0)) { 2306 /* Disk does not need synchronization. */ 2307 state = G_MIRROR_DISK_STATE_ACTIVE; 2308 } else { 2309 if ((sc->sc_flags & 2310 G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2311 (disk->d_flags & 2312 G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2313 /* 2314 * We can start synchronization from 2315 * the stored offset. 2316 */ 2317 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2318 } else { 2319 state = G_MIRROR_DISK_STATE_STALE; 2320 } 2321 } 2322 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 2323 /* 2324 * Reset all synchronization data for this disk, 2325 * because if it even was synchronized, it was 2326 * synchronized to disks with different syncid. 2327 */ 2328 disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2329 disk->d_sync.ds_offset = 0; 2330 disk->d_sync.ds_offset_done = 0; 2331 disk->d_sync.ds_syncid = sc->sc_syncid; 2332 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2333 (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2334 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2335 } else { 2336 state = G_MIRROR_DISK_STATE_STALE; 2337 } 2338 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 2339 /* 2340 * Not good, NOT GOOD! 2341 * It means that mirror was started on stale disks 2342 * and more fresh disk just arrive. 2343 * If there were writes, mirror is broken, sorry. 2344 * I think the best choice here is don't touch 2345 * this disk and inform the user loudly. 2346 */ 2347 G_MIRROR_DEBUG(0, "Device %s was started before the freshest " 2348 "disk (%s) arrives!! It will not be connected to the " 2349 "running device.", sc->sc_name, 2350 g_mirror_get_diskname(disk)); 2351 g_mirror_destroy_disk(disk); 2352 state = G_MIRROR_DISK_STATE_NONE; 2353 /* Return immediately, because disk was destroyed. */ 2354 return (state); 2355 } 2356 G_MIRROR_DEBUG(3, "State for %s disk: %s.", 2357 g_mirror_get_diskname(disk), g_mirror_disk_state2str(state)); 2358 return (state); 2359 } 2360 2361 /* 2362 * Update device state. 2363 */ 2364 static void 2365 g_mirror_update_device(struct g_mirror_softc *sc, bool force) 2366 { 2367 struct g_mirror_disk *disk; 2368 u_int state; 2369 2370 sx_assert(&sc->sc_lock, SX_XLOCKED); 2371 2372 switch (sc->sc_state) { 2373 case G_MIRROR_DEVICE_STATE_STARTING: 2374 { 2375 struct g_mirror_disk *pdisk, *tdisk; 2376 const char *mismatch; 2377 uintmax_t found, newest; 2378 u_int dirty, ndisks; 2379 2380 /* Pre-flight checks */ 2381 LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) { 2382 /* 2383 * Confirm we already detected the newest genid. 2384 */ 2385 KASSERT(sc->sc_genid >= disk->d_genid, 2386 ("%s: found newer genid %u (sc:%p had %u).", __func__, 2387 disk->d_genid, sc, sc->sc_genid)); 2388 2389 /* Kick out any previously tasted stale components. */ 2390 if (disk->d_genid < sc->sc_genid) { 2391 G_MIRROR_DEBUG(0, "Stale 'genid' field on %s " 2392 "(device %s) (component=%u latest=%u), skipping.", 2393 g_mirror_get_diskname(disk), sc->sc_name, 2394 disk->d_genid, sc->sc_genid); 2395 g_mirror_destroy_disk(disk); 2396 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2397 continue; 2398 } 2399 2400 /* 2401 * Confirm we already detected the newest syncid. 2402 */ 2403 KASSERT(sc->sc_syncid >= disk->d_sync.ds_syncid, 2404 ("%s: found newer syncid %u (sc:%p had %u).", 2405 __func__, disk->d_sync.ds_syncid, sc, 2406 sc->sc_syncid)); 2407 2408 #define DETECT_MISMATCH(field, name) \ 2409 if (mismatch == NULL && \ 2410 disk->d_init_ ## field != sc->sc_ ## field) { \ 2411 mismatch = name; \ 2412 found = (intmax_t)disk->d_init_ ## field; \ 2413 newest = (intmax_t)sc->sc_ ## field; \ 2414 } 2415 mismatch = NULL; 2416 DETECT_MISMATCH(ndisks, "md_all"); 2417 DETECT_MISMATCH(balance, "md_balance"); 2418 DETECT_MISMATCH(slice, "md_slice"); 2419 DETECT_MISMATCH(mediasize, "md_mediasize"); 2420 #undef DETECT_MISMATCH 2421 if (mismatch != NULL) { 2422 G_MIRROR_DEBUG(0, "Found a mismatching '%s' " 2423 "field on %s (device %s) (found=%ju " 2424 "newest=%ju).", mismatch, 2425 g_mirror_get_diskname(disk), sc->sc_name, 2426 found, newest); 2427 g_mirror_destroy_disk(disk); 2428 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2429 continue; 2430 } 2431 } 2432 2433 KASSERT(sc->sc_provider == NULL, 2434 ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 2435 /* 2436 * Are we ready? If the timeout (force is true) has expired, and 2437 * any disks are present, then yes. If we're permitted to launch 2438 * before the timeout has expired and the expected number of 2439 * current-generation mirror disks have been tasted, then yes. 2440 */ 2441 ndisks = g_mirror_ndisks(sc, -1); 2442 if ((force && ndisks > 0) || 2443 (g_launch_mirror_before_timeout && ndisks == sc->sc_ndisks)) { 2444 ; 2445 } else if (ndisks == 0) { 2446 /* 2447 * Disks went down in starting phase, so destroy 2448 * device. 2449 */ 2450 callout_drain(&sc->sc_callout); 2451 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2452 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2453 sc->sc_rootmount); 2454 root_mount_rel(sc->sc_rootmount); 2455 sc->sc_rootmount = NULL; 2456 return; 2457 } else { 2458 return; 2459 } 2460 2461 /* 2462 * Activate all disks with the biggest syncid. 2463 */ 2464 if (force) { 2465 /* 2466 * If 'force' is true, we have been called due to 2467 * timeout, so don't bother canceling timeout. 2468 */ 2469 ndisks = 0; 2470 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2471 if ((disk->d_flags & 2472 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2473 ndisks++; 2474 } 2475 } 2476 if (ndisks == 0) { 2477 /* No valid disks found, destroy device. */ 2478 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2479 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2480 __LINE__, sc->sc_rootmount); 2481 root_mount_rel(sc->sc_rootmount); 2482 sc->sc_rootmount = NULL; 2483 return; 2484 } 2485 } else { 2486 /* Cancel timeout. */ 2487 callout_drain(&sc->sc_callout); 2488 } 2489 2490 /* 2491 * Here we need to look for dirty disks and if all disks 2492 * with the biggest syncid are dirty, we have to choose 2493 * one with the biggest priority and rebuild the rest. 2494 */ 2495 /* 2496 * Find the number of dirty disks with the biggest syncid. 2497 * Find the number of disks with the biggest syncid. 2498 * While here, find a disk with the biggest priority. 2499 */ 2500 dirty = ndisks = 0; 2501 pdisk = NULL; 2502 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2503 if (disk->d_sync.ds_syncid != sc->sc_syncid) 2504 continue; 2505 if ((disk->d_flags & 2506 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2507 continue; 2508 } 2509 ndisks++; 2510 if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 2511 dirty++; 2512 if (pdisk == NULL || 2513 pdisk->d_priority < disk->d_priority) { 2514 pdisk = disk; 2515 } 2516 } 2517 } 2518 if (dirty == 0) { 2519 /* No dirty disks at all, great. */ 2520 } else if (dirty == ndisks) { 2521 /* 2522 * Force synchronization for all dirty disks except one 2523 * with the biggest priority. 2524 */ 2525 KASSERT(pdisk != NULL, ("pdisk == NULL")); 2526 G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a " 2527 "master disk for synchronization.", 2528 g_mirror_get_diskname(pdisk), sc->sc_name); 2529 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2530 if (disk->d_sync.ds_syncid != sc->sc_syncid) 2531 continue; 2532 if ((disk->d_flags & 2533 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2534 continue; 2535 } 2536 KASSERT((disk->d_flags & 2537 G_MIRROR_DISK_FLAG_DIRTY) != 0, 2538 ("Disk %s isn't marked as dirty.", 2539 g_mirror_get_diskname(disk))); 2540 /* Skip the disk with the biggest priority. */ 2541 if (disk == pdisk) 2542 continue; 2543 disk->d_sync.ds_syncid = 0; 2544 } 2545 } else if (dirty < ndisks) { 2546 /* 2547 * Force synchronization for all dirty disks. 2548 * We have some non-dirty disks. 2549 */ 2550 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2551 if (disk->d_sync.ds_syncid != sc->sc_syncid) 2552 continue; 2553 if ((disk->d_flags & 2554 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2555 continue; 2556 } 2557 if ((disk->d_flags & 2558 G_MIRROR_DISK_FLAG_DIRTY) == 0) { 2559 continue; 2560 } 2561 disk->d_sync.ds_syncid = 0; 2562 } 2563 } 2564 2565 /* Reset hint. */ 2566 sc->sc_hint = NULL; 2567 if (force) { 2568 /* Remember to bump syncid on first write. */ 2569 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2570 } 2571 state = G_MIRROR_DEVICE_STATE_RUNNING; 2572 G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.", 2573 sc->sc_name, g_mirror_device_state2str(sc->sc_state), 2574 g_mirror_device_state2str(state)); 2575 sc->sc_state = state; 2576 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2577 state = g_mirror_determine_state(disk); 2578 g_mirror_event_send(disk, state, 2579 G_MIRROR_EVENT_DONTWAIT); 2580 if (state == G_MIRROR_DISK_STATE_STALE) 2581 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2582 } 2583 break; 2584 } 2585 case G_MIRROR_DEVICE_STATE_RUNNING: 2586 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 && 2587 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2588 /* 2589 * No usable disks, so destroy the device. 2590 */ 2591 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2592 break; 2593 } else if (g_mirror_ndisks(sc, 2594 G_MIRROR_DISK_STATE_ACTIVE) > 0 && 2595 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2596 /* 2597 * We have active disks, launch provider if it doesn't 2598 * exist. 2599 */ 2600 if (sc->sc_provider == NULL) 2601 g_mirror_launch_provider(sc); 2602 if (sc->sc_rootmount != NULL) { 2603 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2604 __LINE__, sc->sc_rootmount); 2605 root_mount_rel(sc->sc_rootmount); 2606 sc->sc_rootmount = NULL; 2607 } 2608 } 2609 /* 2610 * Genid should be bumped immediately, so do it here. 2611 */ 2612 if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) { 2613 sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID; 2614 g_mirror_bump_genid(sc); 2615 } 2616 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID_NOW) != 0) { 2617 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID_NOW; 2618 g_mirror_bump_syncid(sc); 2619 } 2620 break; 2621 default: 2622 KASSERT(1 == 0, ("Wrong device state (%s, %s).", 2623 sc->sc_name, g_mirror_device_state2str(sc->sc_state))); 2624 break; 2625 } 2626 } 2627 2628 /* 2629 * Update disk state and device state if needed. 2630 */ 2631 #define DISK_STATE_CHANGED() G_MIRROR_DEBUG(1, \ 2632 "Disk %s state changed from %s to %s (device %s).", \ 2633 g_mirror_get_diskname(disk), \ 2634 g_mirror_disk_state2str(disk->d_state), \ 2635 g_mirror_disk_state2str(state), sc->sc_name) 2636 static int 2637 g_mirror_update_disk(struct g_mirror_disk *disk, u_int state) 2638 { 2639 struct g_mirror_softc *sc; 2640 2641 sc = disk->d_softc; 2642 sx_assert(&sc->sc_lock, SX_XLOCKED); 2643 2644 again: 2645 G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.", 2646 g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), 2647 g_mirror_disk_state2str(state)); 2648 switch (state) { 2649 case G_MIRROR_DISK_STATE_NEW: 2650 /* 2651 * Possible scenarios: 2652 * 1. New disk arrive. 2653 */ 2654 /* Previous state should be NONE. */ 2655 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE, 2656 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2657 g_mirror_disk_state2str(disk->d_state))); 2658 DISK_STATE_CHANGED(); 2659 2660 disk->d_state = state; 2661 g_topology_lock(); 2662 if (LIST_EMPTY(&sc->sc_disks)) 2663 LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next); 2664 else { 2665 struct g_mirror_disk *dp; 2666 2667 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 2668 if (disk->d_priority >= dp->d_priority) { 2669 LIST_INSERT_BEFORE(dp, disk, d_next); 2670 dp = NULL; 2671 break; 2672 } 2673 if (LIST_NEXT(dp, d_next) == NULL) 2674 break; 2675 } 2676 if (dp != NULL) 2677 LIST_INSERT_AFTER(dp, disk, d_next); 2678 } 2679 g_topology_unlock(); 2680 G_MIRROR_DEBUG(1, "Device %s: provider %s detected.", 2681 sc->sc_name, g_mirror_get_diskname(disk)); 2682 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 2683 break; 2684 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2685 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2686 g_mirror_device_state2str(sc->sc_state), 2687 g_mirror_get_diskname(disk), 2688 g_mirror_disk_state2str(disk->d_state))); 2689 state = g_mirror_determine_state(disk); 2690 if (state != G_MIRROR_DISK_STATE_NONE) 2691 goto again; 2692 break; 2693 case G_MIRROR_DISK_STATE_ACTIVE: 2694 /* 2695 * Possible scenarios: 2696 * 1. New disk does not need synchronization. 2697 * 2. Synchronization process finished successfully. 2698 */ 2699 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2700 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2701 g_mirror_device_state2str(sc->sc_state), 2702 g_mirror_get_diskname(disk), 2703 g_mirror_disk_state2str(disk->d_state))); 2704 /* Previous state should be NEW or SYNCHRONIZING. */ 2705 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW || 2706 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2707 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2708 g_mirror_disk_state2str(disk->d_state))); 2709 DISK_STATE_CHANGED(); 2710 2711 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2712 disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2713 disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC; 2714 g_mirror_sync_stop(disk, 0); 2715 } 2716 disk->d_state = state; 2717 disk->d_sync.ds_offset = 0; 2718 disk->d_sync.ds_offset_done = 0; 2719 g_mirror_update_idle(sc, disk); 2720 g_mirror_update_metadata(disk); 2721 G_MIRROR_DEBUG(1, "Device %s: provider %s activated.", 2722 sc->sc_name, g_mirror_get_diskname(disk)); 2723 break; 2724 case G_MIRROR_DISK_STATE_STALE: 2725 /* 2726 * Possible scenarios: 2727 * 1. Stale disk was connected. 2728 */ 2729 /* Previous state should be NEW. */ 2730 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2731 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2732 g_mirror_disk_state2str(disk->d_state))); 2733 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2734 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2735 g_mirror_device_state2str(sc->sc_state), 2736 g_mirror_get_diskname(disk), 2737 g_mirror_disk_state2str(disk->d_state))); 2738 /* 2739 * STALE state is only possible if device is marked 2740 * NOAUTOSYNC. 2741 */ 2742 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0, 2743 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2744 g_mirror_device_state2str(sc->sc_state), 2745 g_mirror_get_diskname(disk), 2746 g_mirror_disk_state2str(disk->d_state))); 2747 DISK_STATE_CHANGED(); 2748 2749 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2750 disk->d_state = state; 2751 g_mirror_update_metadata(disk); 2752 G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.", 2753 sc->sc_name, g_mirror_get_diskname(disk)); 2754 break; 2755 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 2756 /* 2757 * Possible scenarios: 2758 * 1. Disk which needs synchronization was connected. 2759 */ 2760 /* Previous state should be NEW. */ 2761 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2762 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2763 g_mirror_disk_state2str(disk->d_state))); 2764 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2765 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2766 g_mirror_device_state2str(sc->sc_state), 2767 g_mirror_get_diskname(disk), 2768 g_mirror_disk_state2str(disk->d_state))); 2769 DISK_STATE_CHANGED(); 2770 2771 if (disk->d_state == G_MIRROR_DISK_STATE_NEW) 2772 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2773 disk->d_state = state; 2774 if (sc->sc_provider != NULL) { 2775 g_mirror_sync_start(disk); 2776 g_mirror_update_metadata(disk); 2777 } 2778 break; 2779 case G_MIRROR_DISK_STATE_DISCONNECTED: 2780 /* 2781 * Possible scenarios: 2782 * 1. Device wasn't running yet, but disk disappear. 2783 * 2. Disk was active and disapppear. 2784 * 3. Disk disappear during synchronization process. 2785 */ 2786 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) { 2787 /* 2788 * Previous state should be ACTIVE, STALE or 2789 * SYNCHRONIZING. 2790 */ 2791 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 2792 disk->d_state == G_MIRROR_DISK_STATE_STALE || 2793 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2794 ("Wrong disk state (%s, %s).", 2795 g_mirror_get_diskname(disk), 2796 g_mirror_disk_state2str(disk->d_state))); 2797 } else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) { 2798 /* Previous state should be NEW. */ 2799 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2800 ("Wrong disk state (%s, %s).", 2801 g_mirror_get_diskname(disk), 2802 g_mirror_disk_state2str(disk->d_state))); 2803 /* 2804 * Reset bumping syncid if disk disappeared in STARTING 2805 * state. 2806 */ 2807 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) 2808 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 2809 #ifdef INVARIANTS 2810 } else { 2811 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 2812 sc->sc_name, 2813 g_mirror_device_state2str(sc->sc_state), 2814 g_mirror_get_diskname(disk), 2815 g_mirror_disk_state2str(disk->d_state))); 2816 #endif 2817 } 2818 DISK_STATE_CHANGED(); 2819 G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.", 2820 sc->sc_name, g_mirror_get_diskname(disk)); 2821 2822 g_mirror_destroy_disk(disk); 2823 break; 2824 case G_MIRROR_DISK_STATE_DESTROY: 2825 { 2826 int error; 2827 2828 error = g_mirror_clear_metadata(disk); 2829 if (error != 0) { 2830 G_MIRROR_DEBUG(0, 2831 "Device %s: failed to clear metadata on %s: %d.", 2832 sc->sc_name, g_mirror_get_diskname(disk), error); 2833 break; 2834 } 2835 DISK_STATE_CHANGED(); 2836 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", 2837 sc->sc_name, g_mirror_get_diskname(disk)); 2838 2839 g_mirror_destroy_disk(disk); 2840 sc->sc_ndisks--; 2841 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2842 g_mirror_update_metadata(disk); 2843 } 2844 break; 2845 } 2846 default: 2847 KASSERT(1 == 0, ("Unknown state (%u).", state)); 2848 break; 2849 } 2850 return (0); 2851 } 2852 #undef DISK_STATE_CHANGED 2853 2854 int 2855 g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md) 2856 { 2857 struct g_provider *pp; 2858 u_char *buf; 2859 int error; 2860 2861 g_topology_assert(); 2862 2863 error = g_access(cp, 1, 0, 0); 2864 if (error != 0) 2865 return (error); 2866 pp = cp->provider; 2867 g_topology_unlock(); 2868 /* Metadata are stored on last sector. */ 2869 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 2870 &error); 2871 g_topology_lock(); 2872 g_access(cp, -1, 0, 0); 2873 if (buf == NULL) { 2874 G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2875 cp->provider->name, error); 2876 return (error); 2877 } 2878 2879 /* Decode metadata. */ 2880 error = mirror_metadata_decode(buf, md); 2881 g_free(buf); 2882 if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0) 2883 return (EINVAL); 2884 if (md->md_version > G_MIRROR_VERSION) { 2885 G_MIRROR_DEBUG(0, 2886 "Kernel module is too old to handle metadata from %s.", 2887 cp->provider->name); 2888 return (EINVAL); 2889 } 2890 if (error != 0) { 2891 G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 2892 cp->provider->name); 2893 return (error); 2894 } 2895 2896 return (0); 2897 } 2898 2899 static int 2900 g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp, 2901 struct g_mirror_metadata *md) 2902 { 2903 2904 G_MIRROR_DEBUG(2, "%s: md_did 0x%u disk %s device %s md_all 0x%x " 2905 "sc_ndisks 0x%x md_slice 0x%x sc_slice 0x%x md_balance 0x%x " 2906 "sc_balance 0x%x sc_mediasize 0x%jx pp_mediasize 0x%jx " 2907 "md_sectorsize 0x%x sc_sectorsize 0x%x md_mflags 0x%jx " 2908 "md_dflags 0x%jx md_syncid 0x%x md_genid 0x%x md_priority 0x%x " 2909 "sc_state 0x%x.", 2910 __func__, md->md_did, pp->name, sc->sc_name, md->md_all, 2911 sc->sc_ndisks, md->md_slice, sc->sc_slice, md->md_balance, 2912 sc->sc_balance, (uintmax_t)sc->sc_mediasize, 2913 (uintmax_t)pp->mediasize, md->md_sectorsize, sc->sc_sectorsize, 2914 (uintmax_t)md->md_mflags, (uintmax_t)md->md_dflags, md->md_syncid, 2915 md->md_genid, md->md_priority, sc->sc_state); 2916 2917 if (g_mirror_id2disk(sc, md->md_did) != NULL) { 2918 G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.", 2919 pp->name, md->md_did); 2920 return (EEXIST); 2921 } 2922 if (sc->sc_mediasize > pp->mediasize) { 2923 G_MIRROR_DEBUG(1, 2924 "Invalid size of disk %s (device %s), skipping.", pp->name, 2925 sc->sc_name); 2926 return (EINVAL); 2927 } 2928 if (md->md_sectorsize != sc->sc_sectorsize) { 2929 G_MIRROR_DEBUG(1, 2930 "Invalid '%s' field on disk %s (device %s), skipping.", 2931 "md_sectorsize", pp->name, sc->sc_name); 2932 return (EINVAL); 2933 } 2934 if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 2935 G_MIRROR_DEBUG(1, 2936 "Invalid sector size of disk %s (device %s), skipping.", 2937 pp->name, sc->sc_name); 2938 return (EINVAL); 2939 } 2940 if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) { 2941 G_MIRROR_DEBUG(1, 2942 "Invalid device flags on disk %s (device %s), skipping.", 2943 pp->name, sc->sc_name); 2944 return (EINVAL); 2945 } 2946 if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) { 2947 G_MIRROR_DEBUG(1, 2948 "Invalid disk flags on disk %s (device %s), skipping.", 2949 pp->name, sc->sc_name); 2950 return (EINVAL); 2951 } 2952 return (0); 2953 } 2954 2955 int 2956 g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 2957 struct g_mirror_metadata *md) 2958 { 2959 struct g_mirror_disk *disk; 2960 int error; 2961 2962 g_topology_assert_not(); 2963 G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name); 2964 2965 error = g_mirror_check_metadata(sc, pp, md); 2966 if (error != 0) 2967 return (error); 2968 2969 if (md->md_genid < sc->sc_genid) { 2970 G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.", 2971 pp->name, sc->sc_name); 2972 return (EINVAL); 2973 } 2974 2975 /* 2976 * If the component disk we're tasting has newer metadata than the 2977 * STARTING gmirror device, refresh the device from the component. 2978 */ 2979 error = g_mirror_refresh_device(sc, pp, md); 2980 if (error != 0) 2981 return (error); 2982 2983 disk = g_mirror_init_disk(sc, pp, md, &error); 2984 if (disk == NULL) 2985 return (error); 2986 error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW, 2987 G_MIRROR_EVENT_WAIT); 2988 if (error != 0) 2989 return (error); 2990 if (md->md_version < G_MIRROR_VERSION) { 2991 G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 2992 pp->name, md->md_version, G_MIRROR_VERSION); 2993 g_mirror_update_metadata(disk); 2994 } 2995 return (0); 2996 } 2997 2998 static void 2999 g_mirror_destroy_delayed(void *arg, int flag) 3000 { 3001 struct g_mirror_softc *sc; 3002 int error; 3003 3004 if (flag == EV_CANCEL) { 3005 G_MIRROR_DEBUG(1, "Destroying canceled."); 3006 return; 3007 } 3008 sc = arg; 3009 g_topology_unlock(); 3010 sx_xlock(&sc->sc_lock); 3011 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0, 3012 ("DESTROY flag set on %s.", sc->sc_name)); 3013 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0, 3014 ("CLOSEWAIT flag not set on %s.", sc->sc_name)); 3015 G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name); 3016 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT); 3017 if (error != 0) { 3018 G_MIRROR_DEBUG(0, "Cannot destroy %s (error=%d).", 3019 sc->sc_name, error); 3020 sx_xunlock(&sc->sc_lock); 3021 } 3022 g_topology_lock(); 3023 } 3024 3025 static int 3026 g_mirror_access(struct g_provider *pp, int acr, int acw, int ace) 3027 { 3028 struct g_mirror_softc *sc; 3029 int error = 0; 3030 3031 g_topology_assert(); 3032 G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 3033 acw, ace); 3034 3035 sc = pp->private; 3036 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 3037 3038 g_topology_unlock(); 3039 sx_xlock(&sc->sc_lock); 3040 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 || 3041 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0 || 3042 LIST_EMPTY(&sc->sc_disks)) { 3043 if (acr > 0 || acw > 0 || ace > 0) 3044 error = ENXIO; 3045 goto end; 3046 } 3047 sc->sc_provider_open += acr + acw + ace; 3048 if (pp->acw + acw == 0) 3049 g_mirror_idle(sc, 0); 3050 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0 && 3051 sc->sc_provider_open == 0) 3052 g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, sc, NULL); 3053 end: 3054 sx_xunlock(&sc->sc_lock); 3055 g_topology_lock(); 3056 return (error); 3057 } 3058 3059 static void 3060 g_mirror_reinit_from_metadata(struct g_mirror_softc *sc, 3061 const struct g_mirror_metadata *md) 3062 { 3063 3064 sc->sc_genid = md->md_genid; 3065 sc->sc_syncid = md->md_syncid; 3066 3067 sc->sc_slice = md->md_slice; 3068 sc->sc_balance = md->md_balance; 3069 sc->sc_mediasize = md->md_mediasize; 3070 sc->sc_ndisks = md->md_all; 3071 sc->sc_flags = md->md_mflags; 3072 } 3073 3074 struct g_geom * 3075 g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md, 3076 u_int type) 3077 { 3078 struct g_mirror_softc *sc; 3079 struct g_geom *gp; 3080 int error, timeout; 3081 3082 g_topology_assert(); 3083 G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name, 3084 md->md_mid); 3085 3086 /* One disk is minimum. */ 3087 if (md->md_all < 1) 3088 return (NULL); 3089 /* 3090 * Action geom. 3091 */ 3092 gp = g_new_geomf(mp, "%s", md->md_name); 3093 sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO); 3094 gp->start = g_mirror_start; 3095 gp->orphan = g_mirror_orphan; 3096 gp->access = g_mirror_access; 3097 gp->dumpconf = g_mirror_dumpconf; 3098 3099 sc->sc_type = type; 3100 sc->sc_id = md->md_mid; 3101 g_mirror_reinit_from_metadata(sc, md); 3102 sc->sc_sectorsize = md->md_sectorsize; 3103 sc->sc_bump_id = 0; 3104 sc->sc_idle = 1; 3105 sc->sc_last_write = time_uptime; 3106 sc->sc_writes = 0; 3107 sc->sc_refcnt = 1; 3108 sx_init(&sc->sc_lock, "gmirror:lock"); 3109 TAILQ_INIT(&sc->sc_queue); 3110 mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF); 3111 TAILQ_INIT(&sc->sc_regular_delayed); 3112 TAILQ_INIT(&sc->sc_inflight); 3113 TAILQ_INIT(&sc->sc_sync_delayed); 3114 LIST_INIT(&sc->sc_disks); 3115 TAILQ_INIT(&sc->sc_events); 3116 mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF); 3117 callout_init(&sc->sc_callout, 1); 3118 mtx_init(&sc->sc_done_mtx, "gmirror:done", NULL, MTX_DEF); 3119 sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING; 3120 gp->softc = sc; 3121 sc->sc_geom = gp; 3122 sc->sc_provider = NULL; 3123 sc->sc_provider_open = 0; 3124 /* 3125 * Synchronization geom. 3126 */ 3127 gp = g_new_geomf(mp, "%s.sync", md->md_name); 3128 gp->softc = sc; 3129 gp->orphan = g_mirror_orphan; 3130 sc->sc_sync.ds_geom = gp; 3131 sc->sc_sync.ds_ndisks = 0; 3132 error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0, 3133 "g_mirror %s", md->md_name); 3134 if (error != 0) { 3135 G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.", 3136 sc->sc_name); 3137 g_destroy_geom(sc->sc_sync.ds_geom); 3138 g_destroy_geom(sc->sc_geom); 3139 g_mirror_free_device(sc); 3140 return (NULL); 3141 } 3142 3143 G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).", 3144 sc->sc_name, sc->sc_ndisks, sc->sc_id); 3145 3146 sc->sc_rootmount = root_mount_hold("GMIRROR"); 3147 G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 3148 /* 3149 * Run timeout. 3150 */ 3151 timeout = g_mirror_timeout * hz; 3152 callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc); 3153 return (sc->sc_geom); 3154 } 3155 3156 int 3157 g_mirror_destroy(struct g_mirror_softc *sc, int how) 3158 { 3159 struct g_mirror_disk *disk; 3160 3161 g_topology_assert_not(); 3162 sx_assert(&sc->sc_lock, SX_XLOCKED); 3163 3164 if (sc->sc_provider_open != 0) { 3165 switch (how) { 3166 case G_MIRROR_DESTROY_SOFT: 3167 G_MIRROR_DEBUG(1, 3168 "Device %s is still open (%d).", sc->sc_name, 3169 sc->sc_provider_open); 3170 return (EBUSY); 3171 case G_MIRROR_DESTROY_DELAYED: 3172 G_MIRROR_DEBUG(1, 3173 "Device %s will be destroyed on last close.", 3174 sc->sc_name); 3175 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 3176 if (disk->d_state == 3177 G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3178 g_mirror_sync_stop(disk, 1); 3179 } 3180 } 3181 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_CLOSEWAIT; 3182 return (EBUSY); 3183 case G_MIRROR_DESTROY_HARD: 3184 G_MIRROR_DEBUG(1, "Device %s is still open, so it " 3185 "can't be definitely removed.", sc->sc_name); 3186 } 3187 } 3188 3189 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 3190 sx_xunlock(&sc->sc_lock); 3191 return (0); 3192 } 3193 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 3194 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DRAIN; 3195 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 3196 sx_xunlock(&sc->sc_lock); 3197 mtx_lock(&sc->sc_queue_mtx); 3198 wakeup(sc); 3199 mtx_unlock(&sc->sc_queue_mtx); 3200 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 3201 while (sc->sc_worker != NULL) 3202 tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5); 3203 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 3204 sx_xlock(&sc->sc_lock); 3205 g_mirror_destroy_device(sc); 3206 return (0); 3207 } 3208 3209 static void 3210 g_mirror_taste_orphan(struct g_consumer *cp) 3211 { 3212 3213 KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 3214 cp->provider->name)); 3215 } 3216 3217 static struct g_geom * 3218 g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 3219 { 3220 struct g_mirror_metadata md; 3221 struct g_mirror_softc *sc; 3222 struct g_consumer *cp; 3223 struct g_geom *gp; 3224 int error; 3225 3226 g_topology_assert(); 3227 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 3228 G_MIRROR_DEBUG(2, "Tasting %s.", pp->name); 3229 3230 gp = g_new_geomf(mp, "mirror:taste"); 3231 /* 3232 * This orphan function should be never called. 3233 */ 3234 gp->orphan = g_mirror_taste_orphan; 3235 cp = g_new_consumer(gp); 3236 g_attach(cp, pp); 3237 error = g_mirror_read_metadata(cp, &md); 3238 g_detach(cp); 3239 g_destroy_consumer(cp); 3240 g_destroy_geom(gp); 3241 if (error != 0) 3242 return (NULL); 3243 gp = NULL; 3244 3245 if (md.md_provider[0] != '\0' && 3246 !g_compare_names(md.md_provider, pp->name)) 3247 return (NULL); 3248 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3249 return (NULL); 3250 if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) { 3251 G_MIRROR_DEBUG(0, 3252 "Device %s: provider %s marked as inactive, skipping.", 3253 md.md_name, pp->name); 3254 return (NULL); 3255 } 3256 if (g_mirror_debug >= 2) 3257 mirror_metadata_dump(&md); 3258 3259 /* 3260 * Let's check if device already exists. 3261 */ 3262 sc = NULL; 3263 LIST_FOREACH(gp, &mp->geom, geom) { 3264 sc = gp->softc; 3265 if (sc == NULL) 3266 continue; 3267 if (sc->sc_type != G_MIRROR_TYPE_AUTOMATIC) 3268 continue; 3269 if (sc->sc_sync.ds_geom == gp) 3270 continue; 3271 if (strcmp(md.md_name, sc->sc_name) != 0) 3272 continue; 3273 if (md.md_mid != sc->sc_id) { 3274 G_MIRROR_DEBUG(0, "Device %s already configured.", 3275 sc->sc_name); 3276 return (NULL); 3277 } 3278 break; 3279 } 3280 if (gp == NULL) { 3281 gp = g_mirror_create(mp, &md, G_MIRROR_TYPE_AUTOMATIC); 3282 if (gp == NULL) { 3283 G_MIRROR_DEBUG(0, "Cannot create device %s.", 3284 md.md_name); 3285 return (NULL); 3286 } 3287 sc = gp->softc; 3288 } 3289 G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 3290 g_topology_unlock(); 3291 sx_xlock(&sc->sc_lock); 3292 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_TASTING; 3293 error = g_mirror_add_disk(sc, pp, &md); 3294 if (error != 0) { 3295 G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 3296 pp->name, gp->name, error); 3297 if (LIST_EMPTY(&sc->sc_disks)) { 3298 g_cancel_event(sc); 3299 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3300 g_topology_lock(); 3301 return (NULL); 3302 } 3303 gp = NULL; 3304 } 3305 sc->sc_flags &= ~G_MIRROR_DEVICE_FLAG_TASTING; 3306 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 3307 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3308 g_topology_lock(); 3309 return (NULL); 3310 } 3311 sx_xunlock(&sc->sc_lock); 3312 g_topology_lock(); 3313 return (gp); 3314 } 3315 3316 static void 3317 g_mirror_resize(struct g_consumer *cp) 3318 { 3319 struct g_mirror_disk *disk; 3320 3321 g_topology_assert(); 3322 g_trace(G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name); 3323 3324 disk = cp->private; 3325 if (disk == NULL) 3326 return; 3327 g_topology_unlock(); 3328 g_mirror_update_metadata(disk); 3329 g_topology_lock(); 3330 } 3331 3332 static int 3333 g_mirror_destroy_geom(struct gctl_req *req __unused, 3334 struct g_class *mp __unused, struct g_geom *gp) 3335 { 3336 struct g_mirror_softc *sc; 3337 int error; 3338 3339 g_topology_unlock(); 3340 sc = gp->softc; 3341 sx_xlock(&sc->sc_lock); 3342 g_cancel_event(sc); 3343 error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT); 3344 if (error != 0) 3345 sx_xunlock(&sc->sc_lock); 3346 g_topology_lock(); 3347 return (error); 3348 } 3349 3350 static void 3351 g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 3352 struct g_consumer *cp, struct g_provider *pp) 3353 { 3354 struct g_mirror_softc *sc; 3355 3356 g_topology_assert(); 3357 3358 sc = gp->softc; 3359 if (sc == NULL) 3360 return; 3361 /* Skip synchronization geom. */ 3362 if (gp == sc->sc_sync.ds_geom) 3363 return; 3364 if (pp != NULL) { 3365 /* Nothing here. */ 3366 } else if (cp != NULL) { 3367 struct g_mirror_disk *disk; 3368 3369 disk = cp->private; 3370 if (disk == NULL) 3371 return; 3372 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id); 3373 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3374 sbuf_printf(sb, "%s<Synchronized>", indent); 3375 if (disk->d_sync.ds_offset == 0) 3376 sbuf_printf(sb, "0%%"); 3377 else 3378 sbuf_printf(sb, "%u%%", 3379 (u_int)((disk->d_sync.ds_offset * 100) / 3380 sc->sc_mediasize)); 3381 sbuf_printf(sb, "</Synchronized>\n"); 3382 if (disk->d_sync.ds_offset > 0) 3383 sbuf_printf(sb, "%s<BytesSynced>%jd" 3384 "</BytesSynced>\n", indent, 3385 (intmax_t)disk->d_sync.ds_offset); 3386 } 3387 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 3388 disk->d_sync.ds_syncid); 3389 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, 3390 disk->d_genid); 3391 sbuf_printf(sb, "%s<Flags>", indent); 3392 if (disk->d_flags == 0) 3393 sbuf_printf(sb, "NONE"); 3394 else { 3395 int first = 1; 3396 3397 #define ADD_FLAG(flag, name) do { \ 3398 if ((disk->d_flags & (flag)) != 0) { \ 3399 if (!first) \ 3400 sbuf_printf(sb, ", "); \ 3401 else \ 3402 first = 0; \ 3403 sbuf_printf(sb, name); \ 3404 } \ 3405 } while (0) 3406 ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY"); 3407 ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED"); 3408 ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE"); 3409 ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING, 3410 "SYNCHRONIZING"); 3411 ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 3412 ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN"); 3413 #undef ADD_FLAG 3414 } 3415 sbuf_printf(sb, "</Flags>\n"); 3416 sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent, 3417 disk->d_priority); 3418 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3419 g_mirror_disk_state2str(disk->d_state)); 3420 } else { 3421 sbuf_printf(sb, "%s<Type>", indent); 3422 switch (sc->sc_type) { 3423 case G_MIRROR_TYPE_AUTOMATIC: 3424 sbuf_printf(sb, "AUTOMATIC"); 3425 break; 3426 case G_MIRROR_TYPE_MANUAL: 3427 sbuf_printf(sb, "MANUAL"); 3428 break; 3429 default: 3430 sbuf_printf(sb, "UNKNOWN"); 3431 break; 3432 } 3433 sbuf_printf(sb, "</Type>\n"); 3434 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 3435 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3436 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 3437 sbuf_printf(sb, "%s<Flags>", indent); 3438 if (sc->sc_flags == 0) 3439 sbuf_printf(sb, "NONE"); 3440 else { 3441 int first = 1; 3442 3443 #define ADD_FLAG(flag, name) do { \ 3444 if ((sc->sc_flags & (flag)) != 0) { \ 3445 if (!first) \ 3446 sbuf_printf(sb, ", "); \ 3447 else \ 3448 first = 0; \ 3449 sbuf_printf(sb, name); \ 3450 } \ 3451 } while (0) 3452 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 3453 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3454 #undef ADD_FLAG 3455 } 3456 sbuf_printf(sb, "</Flags>\n"); 3457 sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent, 3458 (u_int)sc->sc_slice); 3459 sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent, 3460 balance_name(sc->sc_balance)); 3461 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 3462 sc->sc_ndisks); 3463 sbuf_printf(sb, "%s<State>", indent); 3464 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 3465 sbuf_printf(sb, "%s", "STARTING"); 3466 else if (sc->sc_ndisks == 3467 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE)) 3468 sbuf_printf(sb, "%s", "COMPLETE"); 3469 else 3470 sbuf_printf(sb, "%s", "DEGRADED"); 3471 sbuf_printf(sb, "</State>\n"); 3472 } 3473 } 3474 3475 static void 3476 g_mirror_shutdown_post_sync(void *arg, int howto) 3477 { 3478 struct g_class *mp; 3479 struct g_geom *gp, *gp2; 3480 struct g_mirror_softc *sc; 3481 int error; 3482 3483 if (panicstr != NULL) 3484 return; 3485 3486 mp = arg; 3487 g_topology_lock(); 3488 g_mirror_shutdown = 1; 3489 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 3490 if ((sc = gp->softc) == NULL) 3491 continue; 3492 /* Skip synchronization geom. */ 3493 if (gp == sc->sc_sync.ds_geom) 3494 continue; 3495 g_topology_unlock(); 3496 sx_xlock(&sc->sc_lock); 3497 g_mirror_idle(sc, -1); 3498 g_cancel_event(sc); 3499 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED); 3500 if (error != 0) 3501 sx_xunlock(&sc->sc_lock); 3502 g_topology_lock(); 3503 } 3504 g_topology_unlock(); 3505 } 3506 3507 static void 3508 g_mirror_init(struct g_class *mp) 3509 { 3510 3511 g_mirror_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync, 3512 g_mirror_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST); 3513 if (g_mirror_post_sync == NULL) 3514 G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event."); 3515 } 3516 3517 static void 3518 g_mirror_fini(struct g_class *mp) 3519 { 3520 3521 if (g_mirror_post_sync != NULL) 3522 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_mirror_post_sync); 3523 } 3524 3525 /* 3526 * Refresh the mirror device's metadata when gmirror encounters a newer 3527 * generation as the individual components are being added to the mirror set. 3528 */ 3529 static int 3530 g_mirror_refresh_device(struct g_mirror_softc *sc, const struct g_provider *pp, 3531 const struct g_mirror_metadata *md) 3532 { 3533 3534 g_topology_assert_not(); 3535 sx_assert(&sc->sc_lock, SX_XLOCKED); 3536 3537 KASSERT(sc->sc_genid <= md->md_genid, 3538 ("%s: attempted to refresh from stale component %s (device %s) " 3539 "(%u < %u).", __func__, pp->name, sc->sc_name, md->md_genid, 3540 sc->sc_genid)); 3541 3542 if (sc->sc_genid > md->md_genid || (sc->sc_genid == md->md_genid && 3543 sc->sc_syncid >= md->md_syncid)) 3544 return (0); 3545 3546 G_MIRROR_DEBUG(0, "Found newer version for device %s (genid: curr=%u " 3547 "new=%u; syncid: curr=%u new=%u; ndisks: curr=%u new=%u; " 3548 "provider=%s).", sc->sc_name, sc->sc_genid, md->md_genid, 3549 sc->sc_syncid, md->md_syncid, sc->sc_ndisks, md->md_all, pp->name); 3550 3551 if (sc->sc_state != G_MIRROR_DEVICE_STATE_STARTING) { 3552 /* Probable data corruption detected */ 3553 G_MIRROR_DEBUG(0, "Cannot refresh metadata in %s state " 3554 "(device=%s genid=%u). A stale mirror device was launched.", 3555 g_mirror_device_state2str(sc->sc_state), sc->sc_name, 3556 sc->sc_genid); 3557 return (EINVAL); 3558 } 3559 3560 /* Update softc */ 3561 g_mirror_reinit_from_metadata(sc, md); 3562 3563 G_MIRROR_DEBUG(1, "Refresh device %s (id=%u, state=%s) from disk %s " 3564 "(genid=%u syncid=%u md_all=%u).", sc->sc_name, md->md_mid, 3565 g_mirror_device_state2str(sc->sc_state), pp->name, md->md_genid, 3566 md->md_syncid, (unsigned)md->md_all); 3567 3568 return (0); 3569 } 3570 3571 DECLARE_GEOM_CLASS(g_mirror_class, g_mirror); 3572 MODULE_VERSION(geom_mirror, 0); 3573