1 /*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/fail.h> 33 #include <sys/kernel.h> 34 #include <sys/module.h> 35 #include <sys/limits.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/bio.h> 39 #include <sys/sbuf.h> 40 #include <sys/sysctl.h> 41 #include <sys/malloc.h> 42 #include <sys/eventhandler.h> 43 #include <vm/uma.h> 44 #include <geom/geom.h> 45 #include <sys/proc.h> 46 #include <sys/kthread.h> 47 #include <sys/sched.h> 48 #include <geom/mirror/g_mirror.h> 49 50 FEATURE(geom_mirror, "GEOM mirroring support"); 51 52 static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data"); 53 54 SYSCTL_DECL(_kern_geom); 55 static SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, 56 "GEOM_MIRROR stuff"); 57 u_int g_mirror_debug = 0; 58 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RWTUN, &g_mirror_debug, 0, 59 "Debug level"); 60 static u_int g_mirror_timeout = 4; 61 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_mirror_timeout, 62 0, "Time to wait on all mirror components"); 63 static u_int g_mirror_idletime = 5; 64 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RWTUN, 65 &g_mirror_idletime, 0, "Mark components as clean when idling"); 66 static u_int g_mirror_disconnect_on_failure = 1; 67 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN, 68 &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 69 static u_int g_mirror_syncreqs = 2; 70 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 71 &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests."); 72 73 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 74 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 75 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 76 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 77 } while (0) 78 79 static eventhandler_tag g_mirror_post_sync = NULL; 80 static int g_mirror_shutdown = 0; 81 82 static g_ctl_destroy_geom_t g_mirror_destroy_geom; 83 static g_taste_t g_mirror_taste; 84 static g_init_t g_mirror_init; 85 static g_fini_t g_mirror_fini; 86 static g_provgone_t g_mirror_providergone; 87 static g_resize_t g_mirror_resize; 88 89 struct g_class g_mirror_class = { 90 .name = G_MIRROR_CLASS_NAME, 91 .version = G_VERSION, 92 .ctlreq = g_mirror_config, 93 .taste = g_mirror_taste, 94 .destroy_geom = g_mirror_destroy_geom, 95 .init = g_mirror_init, 96 .fini = g_mirror_fini, 97 .providergone = g_mirror_providergone, 98 .resize = g_mirror_resize 99 }; 100 101 102 static void g_mirror_destroy_provider(struct g_mirror_softc *sc); 103 static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state); 104 static void g_mirror_update_device(struct g_mirror_softc *sc, bool force); 105 static void g_mirror_dumpconf(struct sbuf *sb, const char *indent, 106 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 107 static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type); 108 static void g_mirror_register_request(struct bio *bp); 109 static void g_mirror_sync_release(struct g_mirror_softc *sc); 110 111 112 static const char * 113 g_mirror_disk_state2str(int state) 114 { 115 116 switch (state) { 117 case G_MIRROR_DISK_STATE_NONE: 118 return ("NONE"); 119 case G_MIRROR_DISK_STATE_NEW: 120 return ("NEW"); 121 case G_MIRROR_DISK_STATE_ACTIVE: 122 return ("ACTIVE"); 123 case G_MIRROR_DISK_STATE_STALE: 124 return ("STALE"); 125 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 126 return ("SYNCHRONIZING"); 127 case G_MIRROR_DISK_STATE_DISCONNECTED: 128 return ("DISCONNECTED"); 129 case G_MIRROR_DISK_STATE_DESTROY: 130 return ("DESTROY"); 131 default: 132 return ("INVALID"); 133 } 134 } 135 136 static const char * 137 g_mirror_device_state2str(int state) 138 { 139 140 switch (state) { 141 case G_MIRROR_DEVICE_STATE_STARTING: 142 return ("STARTING"); 143 case G_MIRROR_DEVICE_STATE_RUNNING: 144 return ("RUNNING"); 145 default: 146 return ("INVALID"); 147 } 148 } 149 150 static const char * 151 g_mirror_get_diskname(struct g_mirror_disk *disk) 152 { 153 154 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 155 return ("[unknown]"); 156 return (disk->d_name); 157 } 158 159 /* 160 * --- Events handling functions --- 161 * Events in geom_mirror are used to maintain disks and device status 162 * from one thread to simplify locking. 163 */ 164 static void 165 g_mirror_event_free(struct g_mirror_event *ep) 166 { 167 168 free(ep, M_MIRROR); 169 } 170 171 int 172 g_mirror_event_send(void *arg, int state, int flags) 173 { 174 struct g_mirror_softc *sc; 175 struct g_mirror_disk *disk; 176 struct g_mirror_event *ep; 177 int error; 178 179 ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK); 180 G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep); 181 if ((flags & G_MIRROR_EVENT_DEVICE) != 0) { 182 disk = NULL; 183 sc = arg; 184 } else { 185 disk = arg; 186 sc = disk->d_softc; 187 } 188 ep->e_disk = disk; 189 ep->e_state = state; 190 ep->e_flags = flags; 191 ep->e_error = 0; 192 mtx_lock(&sc->sc_events_mtx); 193 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 194 mtx_unlock(&sc->sc_events_mtx); 195 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 196 mtx_lock(&sc->sc_queue_mtx); 197 wakeup(sc); 198 mtx_unlock(&sc->sc_queue_mtx); 199 if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0) 200 return (0); 201 sx_assert(&sc->sc_lock, SX_XLOCKED); 202 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 203 sx_xunlock(&sc->sc_lock); 204 while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) { 205 mtx_lock(&sc->sc_events_mtx); 206 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event", 207 hz * 5); 208 } 209 error = ep->e_error; 210 g_mirror_event_free(ep); 211 sx_xlock(&sc->sc_lock); 212 return (error); 213 } 214 215 static struct g_mirror_event * 216 g_mirror_event_get(struct g_mirror_softc *sc) 217 { 218 struct g_mirror_event *ep; 219 220 mtx_lock(&sc->sc_events_mtx); 221 ep = TAILQ_FIRST(&sc->sc_events); 222 mtx_unlock(&sc->sc_events_mtx); 223 return (ep); 224 } 225 226 static void 227 g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep) 228 { 229 230 mtx_lock(&sc->sc_events_mtx); 231 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 232 mtx_unlock(&sc->sc_events_mtx); 233 } 234 235 static void 236 g_mirror_event_cancel(struct g_mirror_disk *disk) 237 { 238 struct g_mirror_softc *sc; 239 struct g_mirror_event *ep, *tmpep; 240 241 sc = disk->d_softc; 242 sx_assert(&sc->sc_lock, SX_XLOCKED); 243 244 mtx_lock(&sc->sc_events_mtx); 245 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 246 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) 247 continue; 248 if (ep->e_disk != disk) 249 continue; 250 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 251 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 252 g_mirror_event_free(ep); 253 else { 254 ep->e_error = ECANCELED; 255 wakeup(ep); 256 } 257 } 258 mtx_unlock(&sc->sc_events_mtx); 259 } 260 261 /* 262 * Return the number of disks in given state. 263 * If state is equal to -1, count all connected disks. 264 */ 265 u_int 266 g_mirror_ndisks(struct g_mirror_softc *sc, int state) 267 { 268 struct g_mirror_disk *disk; 269 u_int n = 0; 270 271 sx_assert(&sc->sc_lock, SX_LOCKED); 272 273 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 274 if (state == -1 || disk->d_state == state) 275 n++; 276 } 277 return (n); 278 } 279 280 /* 281 * Find a disk in mirror by its disk ID. 282 */ 283 static struct g_mirror_disk * 284 g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id) 285 { 286 struct g_mirror_disk *disk; 287 288 sx_assert(&sc->sc_lock, SX_XLOCKED); 289 290 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 291 if (disk->d_id == id) 292 return (disk); 293 } 294 return (NULL); 295 } 296 297 static u_int 298 g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp) 299 { 300 struct bio *bp; 301 u_int nreqs = 0; 302 303 mtx_lock(&sc->sc_queue_mtx); 304 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 305 if (bp->bio_from == cp) 306 nreqs++; 307 } 308 mtx_unlock(&sc->sc_queue_mtx); 309 return (nreqs); 310 } 311 312 static int 313 g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp) 314 { 315 316 if (cp->index > 0) { 317 G_MIRROR_DEBUG(2, 318 "I/O requests for %s exist, can't destroy it now.", 319 cp->provider->name); 320 return (1); 321 } 322 if (g_mirror_nrequests(sc, cp) > 0) { 323 G_MIRROR_DEBUG(2, 324 "I/O requests for %s in queue, can't destroy it now.", 325 cp->provider->name); 326 return (1); 327 } 328 return (0); 329 } 330 331 static void 332 g_mirror_destroy_consumer(void *arg, int flags __unused) 333 { 334 struct g_consumer *cp; 335 336 g_topology_assert(); 337 338 cp = arg; 339 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 340 g_detach(cp); 341 g_destroy_consumer(cp); 342 } 343 344 static void 345 g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 346 { 347 struct g_provider *pp; 348 int retaste_wait; 349 350 g_topology_assert(); 351 352 cp->private = NULL; 353 if (g_mirror_is_busy(sc, cp)) 354 return; 355 pp = cp->provider; 356 retaste_wait = 0; 357 if (cp->acw == 1) { 358 if ((pp->geom->flags & G_GEOM_WITHER) == 0) 359 retaste_wait = 1; 360 } 361 G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 362 -cp->acw, -cp->ace, 0); 363 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 364 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 365 if (retaste_wait) { 366 /* 367 * After retaste event was send (inside g_access()), we can send 368 * event to detach and destroy consumer. 369 * A class, which has consumer to the given provider connected 370 * will not receive retaste event for the provider. 371 * This is the way how I ignore retaste events when I close 372 * consumers opened for write: I detach and destroy consumer 373 * after retaste event is sent. 374 */ 375 g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL); 376 return; 377 } 378 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name); 379 g_detach(cp); 380 g_destroy_consumer(cp); 381 } 382 383 static int 384 g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp) 385 { 386 struct g_consumer *cp; 387 int error; 388 389 g_topology_assert_not(); 390 KASSERT(disk->d_consumer == NULL, 391 ("Disk already connected (device %s).", disk->d_softc->sc_name)); 392 393 g_topology_lock(); 394 cp = g_new_consumer(disk->d_softc->sc_geom); 395 cp->flags |= G_CF_DIRECT_RECEIVE; 396 error = g_attach(cp, pp); 397 if (error != 0) { 398 g_destroy_consumer(cp); 399 g_topology_unlock(); 400 return (error); 401 } 402 error = g_access(cp, 1, 1, 1); 403 if (error != 0) { 404 g_detach(cp); 405 g_destroy_consumer(cp); 406 g_topology_unlock(); 407 G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).", 408 pp->name, error); 409 return (error); 410 } 411 g_topology_unlock(); 412 disk->d_consumer = cp; 413 disk->d_consumer->private = disk; 414 disk->d_consumer->index = 0; 415 416 G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk)); 417 return (0); 418 } 419 420 static void 421 g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 422 { 423 424 g_topology_assert(); 425 426 if (cp == NULL) 427 return; 428 if (cp->provider != NULL) 429 g_mirror_kill_consumer(sc, cp); 430 else 431 g_destroy_consumer(cp); 432 } 433 434 /* 435 * Initialize disk. This means allocate memory, create consumer, attach it 436 * to the provider and open access (r1w1e1) to it. 437 */ 438 static struct g_mirror_disk * 439 g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp, 440 struct g_mirror_metadata *md, int *errorp) 441 { 442 struct g_mirror_disk *disk; 443 int i, error; 444 445 disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO); 446 if (disk == NULL) { 447 error = ENOMEM; 448 goto fail; 449 } 450 disk->d_softc = sc; 451 error = g_mirror_connect_disk(disk, pp); 452 if (error != 0) 453 goto fail; 454 disk->d_id = md->md_did; 455 disk->d_state = G_MIRROR_DISK_STATE_NONE; 456 disk->d_priority = md->md_priority; 457 disk->d_flags = md->md_dflags; 458 error = g_getattr("GEOM::candelete", disk->d_consumer, &i); 459 if (error == 0 && i != 0) 460 disk->d_flags |= G_MIRROR_DISK_FLAG_CANDELETE; 461 if (md->md_provider[0] != '\0') 462 disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED; 463 disk->d_sync.ds_consumer = NULL; 464 disk->d_sync.ds_offset = md->md_sync_offset; 465 disk->d_sync.ds_offset_done = md->md_sync_offset; 466 disk->d_genid = md->md_genid; 467 disk->d_sync.ds_syncid = md->md_syncid; 468 if (errorp != NULL) 469 *errorp = 0; 470 return (disk); 471 fail: 472 if (errorp != NULL) 473 *errorp = error; 474 if (disk != NULL) 475 free(disk, M_MIRROR); 476 return (NULL); 477 } 478 479 static void 480 g_mirror_destroy_disk(struct g_mirror_disk *disk) 481 { 482 struct g_mirror_softc *sc; 483 484 g_topology_assert_not(); 485 sc = disk->d_softc; 486 sx_assert(&sc->sc_lock, SX_XLOCKED); 487 488 LIST_REMOVE(disk, d_next); 489 g_mirror_event_cancel(disk); 490 if (sc->sc_hint == disk) 491 sc->sc_hint = NULL; 492 switch (disk->d_state) { 493 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 494 g_mirror_sync_stop(disk, 1); 495 /* FALLTHROUGH */ 496 case G_MIRROR_DISK_STATE_NEW: 497 case G_MIRROR_DISK_STATE_STALE: 498 case G_MIRROR_DISK_STATE_ACTIVE: 499 g_topology_lock(); 500 g_mirror_disconnect_consumer(sc, disk->d_consumer); 501 g_topology_unlock(); 502 free(disk, M_MIRROR); 503 break; 504 default: 505 KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 506 g_mirror_get_diskname(disk), 507 g_mirror_disk_state2str(disk->d_state))); 508 } 509 } 510 511 static void 512 g_mirror_free_device(struct g_mirror_softc *sc) 513 { 514 515 mtx_destroy(&sc->sc_queue_mtx); 516 mtx_destroy(&sc->sc_events_mtx); 517 mtx_destroy(&sc->sc_done_mtx); 518 sx_destroy(&sc->sc_lock); 519 free(sc, M_MIRROR); 520 } 521 522 static void 523 g_mirror_providergone(struct g_provider *pp) 524 { 525 struct g_mirror_softc *sc = pp->private; 526 527 if ((--sc->sc_refcnt) == 0) 528 g_mirror_free_device(sc); 529 } 530 531 static void 532 g_mirror_destroy_device(struct g_mirror_softc *sc) 533 { 534 struct g_mirror_disk *disk; 535 struct g_mirror_event *ep; 536 struct g_geom *gp; 537 struct g_consumer *cp, *tmpcp; 538 539 g_topology_assert_not(); 540 sx_assert(&sc->sc_lock, SX_XLOCKED); 541 542 gp = sc->sc_geom; 543 if (sc->sc_provider != NULL) 544 g_mirror_destroy_provider(sc); 545 for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL; 546 disk = LIST_FIRST(&sc->sc_disks)) { 547 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 548 g_mirror_update_metadata(disk); 549 g_mirror_destroy_disk(disk); 550 } 551 while ((ep = g_mirror_event_get(sc)) != NULL) { 552 g_mirror_event_remove(sc, ep); 553 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 554 g_mirror_event_free(ep); 555 else { 556 ep->e_error = ECANCELED; 557 ep->e_flags |= G_MIRROR_EVENT_DONE; 558 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep); 559 mtx_lock(&sc->sc_events_mtx); 560 wakeup(ep); 561 mtx_unlock(&sc->sc_events_mtx); 562 } 563 } 564 callout_drain(&sc->sc_callout); 565 566 g_topology_lock(); 567 LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) { 568 g_mirror_disconnect_consumer(sc, cp); 569 } 570 g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 571 G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name); 572 g_wither_geom(gp, ENXIO); 573 sx_xunlock(&sc->sc_lock); 574 if ((--sc->sc_refcnt) == 0) 575 g_mirror_free_device(sc); 576 g_topology_unlock(); 577 } 578 579 static void 580 g_mirror_orphan(struct g_consumer *cp) 581 { 582 struct g_mirror_disk *disk; 583 584 g_topology_assert(); 585 586 disk = cp->private; 587 if (disk == NULL) 588 return; 589 disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 590 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, 591 G_MIRROR_EVENT_DONTWAIT); 592 } 593 594 /* 595 * Function should return the next active disk on the list. 596 * It is possible that it will be the same disk as given. 597 * If there are no active disks on list, NULL is returned. 598 */ 599 static __inline struct g_mirror_disk * 600 g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 601 { 602 struct g_mirror_disk *dp; 603 604 for (dp = LIST_NEXT(disk, d_next); dp != disk; 605 dp = LIST_NEXT(dp, d_next)) { 606 if (dp == NULL) 607 dp = LIST_FIRST(&sc->sc_disks); 608 if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE) 609 break; 610 } 611 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 612 return (NULL); 613 return (dp); 614 } 615 616 static struct g_mirror_disk * 617 g_mirror_get_disk(struct g_mirror_softc *sc) 618 { 619 struct g_mirror_disk *disk; 620 621 if (sc->sc_hint == NULL) { 622 sc->sc_hint = LIST_FIRST(&sc->sc_disks); 623 if (sc->sc_hint == NULL) 624 return (NULL); 625 } 626 disk = sc->sc_hint; 627 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) { 628 disk = g_mirror_find_next(sc, disk); 629 if (disk == NULL) 630 return (NULL); 631 } 632 sc->sc_hint = g_mirror_find_next(sc, disk); 633 return (disk); 634 } 635 636 static int 637 g_mirror_write_metadata(struct g_mirror_disk *disk, 638 struct g_mirror_metadata *md) 639 { 640 struct g_mirror_softc *sc; 641 struct g_consumer *cp; 642 off_t offset, length; 643 u_char *sector; 644 int error = 0; 645 646 g_topology_assert_not(); 647 sc = disk->d_softc; 648 sx_assert(&sc->sc_lock, SX_LOCKED); 649 650 cp = disk->d_consumer; 651 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 652 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 653 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 654 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 655 cp->acw, cp->ace)); 656 length = cp->provider->sectorsize; 657 offset = cp->provider->mediasize - length; 658 sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO); 659 if (md != NULL && 660 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) { 661 /* 662 * Handle the case, when the size of parent provider reduced. 663 */ 664 if (offset < md->md_mediasize) 665 error = ENOSPC; 666 else 667 mirror_metadata_encode(md, sector); 668 } 669 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_metadata_write, error); 670 if (error == 0) 671 error = g_write_data(cp, offset, sector, length); 672 free(sector, M_MIRROR); 673 if (error != 0) { 674 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 675 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 676 G_MIRROR_DEBUG(0, "Cannot write metadata on %s " 677 "(device=%s, error=%d).", 678 g_mirror_get_diskname(disk), sc->sc_name, error); 679 } else { 680 G_MIRROR_DEBUG(1, "Cannot write metadata on %s " 681 "(device=%s, error=%d).", 682 g_mirror_get_diskname(disk), sc->sc_name, error); 683 } 684 if (g_mirror_disconnect_on_failure && 685 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) { 686 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 687 g_mirror_event_send(disk, 688 G_MIRROR_DISK_STATE_DISCONNECTED, 689 G_MIRROR_EVENT_DONTWAIT); 690 } 691 } 692 return (error); 693 } 694 695 static int 696 g_mirror_clear_metadata(struct g_mirror_disk *disk) 697 { 698 int error; 699 700 g_topology_assert_not(); 701 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 702 703 error = g_mirror_write_metadata(disk, NULL); 704 if (error == 0) { 705 G_MIRROR_DEBUG(2, "Metadata on %s cleared.", 706 g_mirror_get_diskname(disk)); 707 } else { 708 G_MIRROR_DEBUG(0, 709 "Cannot clear metadata on disk %s (error=%d).", 710 g_mirror_get_diskname(disk), error); 711 } 712 return (error); 713 } 714 715 void 716 g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk, 717 struct g_mirror_metadata *md) 718 { 719 720 strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic)); 721 md->md_version = G_MIRROR_VERSION; 722 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 723 md->md_mid = sc->sc_id; 724 md->md_all = sc->sc_ndisks; 725 md->md_slice = sc->sc_slice; 726 md->md_balance = sc->sc_balance; 727 md->md_genid = sc->sc_genid; 728 md->md_mediasize = sc->sc_mediasize; 729 md->md_sectorsize = sc->sc_sectorsize; 730 md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK); 731 bzero(md->md_provider, sizeof(md->md_provider)); 732 if (disk == NULL) { 733 md->md_did = arc4random(); 734 md->md_priority = 0; 735 md->md_syncid = 0; 736 md->md_dflags = 0; 737 md->md_sync_offset = 0; 738 md->md_provsize = 0; 739 } else { 740 md->md_did = disk->d_id; 741 md->md_priority = disk->d_priority; 742 md->md_syncid = disk->d_sync.ds_syncid; 743 md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK); 744 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 745 md->md_sync_offset = disk->d_sync.ds_offset_done; 746 else 747 md->md_sync_offset = 0; 748 if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) { 749 strlcpy(md->md_provider, 750 disk->d_consumer->provider->name, 751 sizeof(md->md_provider)); 752 } 753 md->md_provsize = disk->d_consumer->provider->mediasize; 754 } 755 } 756 757 void 758 g_mirror_update_metadata(struct g_mirror_disk *disk) 759 { 760 struct g_mirror_softc *sc; 761 struct g_mirror_metadata md; 762 int error; 763 764 g_topology_assert_not(); 765 sc = disk->d_softc; 766 sx_assert(&sc->sc_lock, SX_LOCKED); 767 768 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) 769 g_mirror_fill_metadata(sc, disk, &md); 770 error = g_mirror_write_metadata(disk, &md); 771 if (error == 0) { 772 G_MIRROR_DEBUG(2, "Metadata on %s updated.", 773 g_mirror_get_diskname(disk)); 774 } else { 775 G_MIRROR_DEBUG(0, 776 "Cannot update metadata on disk %s (error=%d).", 777 g_mirror_get_diskname(disk), error); 778 } 779 } 780 781 static void 782 g_mirror_bump_syncid(struct g_mirror_softc *sc) 783 { 784 struct g_mirror_disk *disk; 785 786 g_topology_assert_not(); 787 sx_assert(&sc->sc_lock, SX_XLOCKED); 788 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 789 ("%s called with no active disks (device=%s).", __func__, 790 sc->sc_name)); 791 792 sc->sc_syncid++; 793 G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 794 sc->sc_syncid); 795 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 796 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 797 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 798 disk->d_sync.ds_syncid = sc->sc_syncid; 799 g_mirror_update_metadata(disk); 800 } 801 } 802 } 803 804 static void 805 g_mirror_bump_genid(struct g_mirror_softc *sc) 806 { 807 struct g_mirror_disk *disk; 808 809 g_topology_assert_not(); 810 sx_assert(&sc->sc_lock, SX_XLOCKED); 811 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 812 ("%s called with no active disks (device=%s).", __func__, 813 sc->sc_name)); 814 815 sc->sc_genid++; 816 G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 817 sc->sc_genid); 818 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 819 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 820 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 821 disk->d_genid = sc->sc_genid; 822 g_mirror_update_metadata(disk); 823 } 824 } 825 } 826 827 static int 828 g_mirror_idle(struct g_mirror_softc *sc, int acw) 829 { 830 struct g_mirror_disk *disk; 831 int timeout; 832 833 g_topology_assert_not(); 834 sx_assert(&sc->sc_lock, SX_XLOCKED); 835 836 if (sc->sc_provider == NULL) 837 return (0); 838 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 839 return (0); 840 if (sc->sc_idle) 841 return (0); 842 if (sc->sc_writes > 0) 843 return (0); 844 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 845 timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write); 846 if (!g_mirror_shutdown && timeout > 0) 847 return (timeout); 848 } 849 sc->sc_idle = 1; 850 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 851 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 852 continue; 853 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.", 854 g_mirror_get_diskname(disk), sc->sc_name); 855 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 856 g_mirror_update_metadata(disk); 857 } 858 return (0); 859 } 860 861 static void 862 g_mirror_unidle(struct g_mirror_softc *sc) 863 { 864 struct g_mirror_disk *disk; 865 866 g_topology_assert_not(); 867 sx_assert(&sc->sc_lock, SX_XLOCKED); 868 869 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 870 return; 871 sc->sc_idle = 0; 872 sc->sc_last_write = time_uptime; 873 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 874 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 875 continue; 876 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.", 877 g_mirror_get_diskname(disk), sc->sc_name); 878 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 879 g_mirror_update_metadata(disk); 880 } 881 } 882 883 static void 884 g_mirror_flush_done(struct bio *bp) 885 { 886 struct g_mirror_softc *sc; 887 struct bio *pbp; 888 889 pbp = bp->bio_parent; 890 sc = pbp->bio_to->private; 891 mtx_lock(&sc->sc_done_mtx); 892 if (pbp->bio_error == 0) 893 pbp->bio_error = bp->bio_error; 894 pbp->bio_completed += bp->bio_completed; 895 pbp->bio_inbed++; 896 if (pbp->bio_children == pbp->bio_inbed) { 897 mtx_unlock(&sc->sc_done_mtx); 898 g_io_deliver(pbp, pbp->bio_error); 899 } else 900 mtx_unlock(&sc->sc_done_mtx); 901 g_destroy_bio(bp); 902 } 903 904 static void 905 g_mirror_done(struct bio *bp) 906 { 907 struct g_mirror_softc *sc; 908 909 sc = bp->bio_from->geom->softc; 910 bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR; 911 mtx_lock(&sc->sc_queue_mtx); 912 bioq_insert_tail(&sc->sc_queue, bp); 913 mtx_unlock(&sc->sc_queue_mtx); 914 wakeup(sc); 915 } 916 917 static void 918 g_mirror_regular_request(struct bio *bp) 919 { 920 struct g_mirror_softc *sc; 921 struct g_mirror_disk *disk; 922 struct bio *pbp; 923 924 g_topology_assert_not(); 925 926 pbp = bp->bio_parent; 927 sc = pbp->bio_to->private; 928 bp->bio_from->index--; 929 if (bp->bio_cmd == BIO_WRITE) 930 sc->sc_writes--; 931 disk = bp->bio_from->private; 932 if (disk == NULL) { 933 g_topology_lock(); 934 g_mirror_kill_consumer(sc, bp->bio_from); 935 g_topology_unlock(); 936 } 937 938 if (bp->bio_cmd == BIO_READ) 939 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_read, 940 bp->bio_error); 941 else if (bp->bio_cmd == BIO_WRITE) 942 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_write, 943 bp->bio_error); 944 945 pbp->bio_inbed++; 946 KASSERT(pbp->bio_inbed <= pbp->bio_children, 947 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 948 pbp->bio_children)); 949 if (bp->bio_error == 0 && pbp->bio_error == 0) { 950 G_MIRROR_LOGREQ(3, bp, "Request delivered."); 951 g_destroy_bio(bp); 952 if (pbp->bio_children == pbp->bio_inbed) { 953 G_MIRROR_LOGREQ(3, pbp, "Request delivered."); 954 pbp->bio_completed = pbp->bio_length; 955 if (pbp->bio_cmd == BIO_WRITE || 956 pbp->bio_cmd == BIO_DELETE) { 957 bioq_remove(&sc->sc_inflight, pbp); 958 /* Release delayed sync requests if possible. */ 959 g_mirror_sync_release(sc); 960 } 961 g_io_deliver(pbp, pbp->bio_error); 962 } 963 return; 964 } else if (bp->bio_error != 0) { 965 if (pbp->bio_error == 0) 966 pbp->bio_error = bp->bio_error; 967 if (disk != NULL) { 968 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 969 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 970 G_MIRROR_LOGREQ(0, bp, 971 "Request failed (error=%d).", 972 bp->bio_error); 973 } else { 974 G_MIRROR_LOGREQ(1, bp, 975 "Request failed (error=%d).", 976 bp->bio_error); 977 } 978 if (g_mirror_disconnect_on_failure && 979 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) 980 { 981 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 982 g_mirror_event_send(disk, 983 G_MIRROR_DISK_STATE_DISCONNECTED, 984 G_MIRROR_EVENT_DONTWAIT); 985 } 986 } 987 switch (pbp->bio_cmd) { 988 case BIO_DELETE: 989 case BIO_WRITE: 990 pbp->bio_inbed--; 991 pbp->bio_children--; 992 break; 993 } 994 } 995 g_destroy_bio(bp); 996 997 switch (pbp->bio_cmd) { 998 case BIO_READ: 999 if (pbp->bio_inbed < pbp->bio_children) 1000 break; 1001 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1) 1002 g_io_deliver(pbp, pbp->bio_error); 1003 else { 1004 pbp->bio_error = 0; 1005 mtx_lock(&sc->sc_queue_mtx); 1006 bioq_insert_tail(&sc->sc_queue, pbp); 1007 mtx_unlock(&sc->sc_queue_mtx); 1008 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1009 wakeup(sc); 1010 } 1011 break; 1012 case BIO_DELETE: 1013 case BIO_WRITE: 1014 if (pbp->bio_children == 0) { 1015 /* 1016 * All requests failed. 1017 */ 1018 } else if (pbp->bio_inbed < pbp->bio_children) { 1019 /* Do nothing. */ 1020 break; 1021 } else if (pbp->bio_children == pbp->bio_inbed) { 1022 /* Some requests succeeded. */ 1023 pbp->bio_error = 0; 1024 pbp->bio_completed = pbp->bio_length; 1025 } 1026 bioq_remove(&sc->sc_inflight, pbp); 1027 /* Release delayed sync requests if possible. */ 1028 g_mirror_sync_release(sc); 1029 g_io_deliver(pbp, pbp->bio_error); 1030 break; 1031 default: 1032 KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd)); 1033 break; 1034 } 1035 } 1036 1037 static void 1038 g_mirror_sync_done(struct bio *bp) 1039 { 1040 struct g_mirror_softc *sc; 1041 1042 G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered."); 1043 sc = bp->bio_from->geom->softc; 1044 bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC; 1045 mtx_lock(&sc->sc_queue_mtx); 1046 bioq_insert_tail(&sc->sc_queue, bp); 1047 mtx_unlock(&sc->sc_queue_mtx); 1048 wakeup(sc); 1049 } 1050 1051 static void 1052 g_mirror_candelete(struct bio *bp) 1053 { 1054 struct g_mirror_softc *sc; 1055 struct g_mirror_disk *disk; 1056 int *val; 1057 1058 sc = bp->bio_to->private; 1059 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1060 if (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) 1061 break; 1062 } 1063 val = (int *)bp->bio_data; 1064 *val = (disk != NULL); 1065 g_io_deliver(bp, 0); 1066 } 1067 1068 static void 1069 g_mirror_kernel_dump(struct bio *bp) 1070 { 1071 struct g_mirror_softc *sc; 1072 struct g_mirror_disk *disk; 1073 struct bio *cbp; 1074 struct g_kerneldump *gkd; 1075 1076 /* 1077 * We configure dumping to the first component, because this component 1078 * will be used for reading with 'prefer' balance algorithm. 1079 * If the component with the highest priority is currently disconnected 1080 * we will not be able to read the dump after the reboot if it will be 1081 * connected and synchronized later. Can we do something better? 1082 */ 1083 sc = bp->bio_to->private; 1084 disk = LIST_FIRST(&sc->sc_disks); 1085 1086 gkd = (struct g_kerneldump *)bp->bio_data; 1087 if (gkd->length > bp->bio_to->mediasize) 1088 gkd->length = bp->bio_to->mediasize; 1089 cbp = g_clone_bio(bp); 1090 if (cbp == NULL) { 1091 g_io_deliver(bp, ENOMEM); 1092 return; 1093 } 1094 cbp->bio_done = g_std_done; 1095 g_io_request(cbp, disk->d_consumer); 1096 G_MIRROR_DEBUG(1, "Kernel dump will go to %s.", 1097 g_mirror_get_diskname(disk)); 1098 } 1099 1100 static void 1101 g_mirror_flush(struct g_mirror_softc *sc, struct bio *bp) 1102 { 1103 struct bio_queue_head queue; 1104 struct g_mirror_disk *disk; 1105 struct g_consumer *cp; 1106 struct bio *cbp; 1107 1108 bioq_init(&queue); 1109 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1110 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1111 continue; 1112 cbp = g_clone_bio(bp); 1113 if (cbp == NULL) { 1114 while ((cbp = bioq_takefirst(&queue)) != NULL) 1115 g_destroy_bio(cbp); 1116 if (bp->bio_error == 0) 1117 bp->bio_error = ENOMEM; 1118 g_io_deliver(bp, bp->bio_error); 1119 return; 1120 } 1121 bioq_insert_tail(&queue, cbp); 1122 cbp->bio_done = g_mirror_flush_done; 1123 cbp->bio_caller1 = disk; 1124 cbp->bio_to = disk->d_consumer->provider; 1125 } 1126 while ((cbp = bioq_takefirst(&queue)) != NULL) { 1127 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1128 disk = cbp->bio_caller1; 1129 cbp->bio_caller1 = NULL; 1130 cp = disk->d_consumer; 1131 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1132 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1133 cp->acr, cp->acw, cp->ace)); 1134 g_io_request(cbp, disk->d_consumer); 1135 } 1136 } 1137 1138 static void 1139 g_mirror_start(struct bio *bp) 1140 { 1141 struct g_mirror_softc *sc; 1142 1143 sc = bp->bio_to->private; 1144 /* 1145 * If sc == NULL or there are no valid disks, provider's error 1146 * should be set and g_mirror_start() should not be called at all. 1147 */ 1148 KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1149 ("Provider's error should be set (error=%d)(mirror=%s).", 1150 bp->bio_to->error, bp->bio_to->name)); 1151 G_MIRROR_LOGREQ(3, bp, "Request received."); 1152 1153 switch (bp->bio_cmd) { 1154 case BIO_READ: 1155 case BIO_WRITE: 1156 case BIO_DELETE: 1157 break; 1158 case BIO_FLUSH: 1159 g_mirror_flush(sc, bp); 1160 return; 1161 case BIO_GETATTR: 1162 if (!strcmp(bp->bio_attribute, "GEOM::candelete")) { 1163 g_mirror_candelete(bp); 1164 return; 1165 } else if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) { 1166 g_mirror_kernel_dump(bp); 1167 return; 1168 } 1169 /* FALLTHROUGH */ 1170 default: 1171 g_io_deliver(bp, EOPNOTSUPP); 1172 return; 1173 } 1174 mtx_lock(&sc->sc_queue_mtx); 1175 bioq_insert_tail(&sc->sc_queue, bp); 1176 mtx_unlock(&sc->sc_queue_mtx); 1177 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1178 wakeup(sc); 1179 } 1180 1181 /* 1182 * Return TRUE if the given request is colliding with a in-progress 1183 * synchronization request. 1184 */ 1185 static int 1186 g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp) 1187 { 1188 struct g_mirror_disk *disk; 1189 struct bio *sbp; 1190 off_t rstart, rend, sstart, send; 1191 u_int i; 1192 1193 if (sc->sc_sync.ds_ndisks == 0) 1194 return (0); 1195 rstart = bp->bio_offset; 1196 rend = bp->bio_offset + bp->bio_length; 1197 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1198 if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING) 1199 continue; 1200 for (i = 0; i < g_mirror_syncreqs; i++) { 1201 sbp = disk->d_sync.ds_bios[i]; 1202 if (sbp == NULL) 1203 continue; 1204 sstart = sbp->bio_offset; 1205 send = sbp->bio_offset + sbp->bio_length; 1206 if (rend > sstart && rstart < send) 1207 return (1); 1208 } 1209 } 1210 return (0); 1211 } 1212 1213 /* 1214 * Return TRUE if the given sync request is colliding with a in-progress regular 1215 * request. 1216 */ 1217 static int 1218 g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp) 1219 { 1220 off_t rstart, rend, sstart, send; 1221 struct bio *bp; 1222 1223 if (sc->sc_sync.ds_ndisks == 0) 1224 return (0); 1225 sstart = sbp->bio_offset; 1226 send = sbp->bio_offset + sbp->bio_length; 1227 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) { 1228 rstart = bp->bio_offset; 1229 rend = bp->bio_offset + bp->bio_length; 1230 if (rend > sstart && rstart < send) 1231 return (1); 1232 } 1233 return (0); 1234 } 1235 1236 /* 1237 * Puts request onto delayed queue. 1238 */ 1239 static void 1240 g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp) 1241 { 1242 1243 G_MIRROR_LOGREQ(2, bp, "Delaying request."); 1244 bioq_insert_head(&sc->sc_regular_delayed, bp); 1245 } 1246 1247 /* 1248 * Puts synchronization request onto delayed queue. 1249 */ 1250 static void 1251 g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp) 1252 { 1253 1254 G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request."); 1255 bioq_insert_tail(&sc->sc_sync_delayed, bp); 1256 } 1257 1258 /* 1259 * Releases delayed regular requests which don't collide anymore with sync 1260 * requests. 1261 */ 1262 static void 1263 g_mirror_regular_release(struct g_mirror_softc *sc) 1264 { 1265 struct bio *bp, *bp2; 1266 1267 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) { 1268 if (g_mirror_sync_collision(sc, bp)) 1269 continue; 1270 bioq_remove(&sc->sc_regular_delayed, bp); 1271 G_MIRROR_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 1272 mtx_lock(&sc->sc_queue_mtx); 1273 bioq_insert_head(&sc->sc_queue, bp); 1274 mtx_unlock(&sc->sc_queue_mtx); 1275 } 1276 } 1277 1278 /* 1279 * Releases delayed sync requests which don't collide anymore with regular 1280 * requests. 1281 */ 1282 static void 1283 g_mirror_sync_release(struct g_mirror_softc *sc) 1284 { 1285 struct bio *bp, *bp2; 1286 1287 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) { 1288 if (g_mirror_regular_collision(sc, bp)) 1289 continue; 1290 bioq_remove(&sc->sc_sync_delayed, bp); 1291 G_MIRROR_LOGREQ(2, bp, 1292 "Releasing delayed synchronization request."); 1293 g_io_request(bp, bp->bio_from); 1294 } 1295 } 1296 1297 /* 1298 * Free a synchronization request and clear its slot in the array. 1299 */ 1300 static void 1301 g_mirror_sync_request_free(struct g_mirror_disk *disk, struct bio *bp) 1302 { 1303 int i; 1304 1305 if (disk != NULL && disk->d_sync.ds_bios != NULL) { 1306 i = (int)(uintptr_t)bp->bio_caller1; 1307 disk->d_sync.ds_bios[i] = NULL; 1308 } 1309 free(bp->bio_data, M_MIRROR); 1310 g_destroy_bio(bp); 1311 } 1312 1313 /* 1314 * Handle synchronization requests. 1315 * Every synchronization request is two-steps process: first, READ request is 1316 * send to active provider and then WRITE request (with read data) to the provider 1317 * being synchronized. When WRITE is finished, new synchronization request is 1318 * send. 1319 */ 1320 static void 1321 g_mirror_sync_request(struct bio *bp) 1322 { 1323 struct g_mirror_softc *sc; 1324 struct g_mirror_disk *disk; 1325 struct g_mirror_disk_sync *sync; 1326 1327 bp->bio_from->index--; 1328 sc = bp->bio_from->geom->softc; 1329 disk = bp->bio_from->private; 1330 if (disk == NULL) { 1331 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 1332 g_topology_lock(); 1333 g_mirror_kill_consumer(sc, bp->bio_from); 1334 g_topology_unlock(); 1335 g_mirror_sync_request_free(NULL, bp); 1336 sx_xlock(&sc->sc_lock); 1337 return; 1338 } 1339 1340 /* 1341 * Synchronization request. 1342 */ 1343 switch (bp->bio_cmd) { 1344 case BIO_READ: 1345 { 1346 struct g_consumer *cp; 1347 1348 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_read, 1349 bp->bio_error); 1350 1351 if (bp->bio_error != 0) { 1352 G_MIRROR_LOGREQ(0, bp, 1353 "Synchronization request failed (error=%d).", 1354 bp->bio_error); 1355 g_mirror_sync_request_free(disk, bp); 1356 return; 1357 } 1358 G_MIRROR_LOGREQ(3, bp, 1359 "Synchronization request half-finished."); 1360 bp->bio_cmd = BIO_WRITE; 1361 bp->bio_cflags = 0; 1362 cp = disk->d_consumer; 1363 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1364 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1365 cp->acr, cp->acw, cp->ace)); 1366 cp->index++; 1367 g_io_request(bp, cp); 1368 return; 1369 } 1370 case BIO_WRITE: 1371 { 1372 off_t offset; 1373 void *data; 1374 int i; 1375 1376 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_write, 1377 bp->bio_error); 1378 1379 if (bp->bio_error != 0) { 1380 G_MIRROR_LOGREQ(0, bp, 1381 "Synchronization request failed (error=%d).", 1382 bp->bio_error); 1383 g_mirror_sync_request_free(disk, bp); 1384 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 1385 g_mirror_event_send(disk, 1386 G_MIRROR_DISK_STATE_DISCONNECTED, 1387 G_MIRROR_EVENT_DONTWAIT); 1388 return; 1389 } 1390 G_MIRROR_LOGREQ(3, bp, "Synchronization request finished."); 1391 sync = &disk->d_sync; 1392 if (sync->ds_offset >= sc->sc_mediasize || 1393 sync->ds_consumer == NULL || 1394 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1395 /* Don't send more synchronization requests. */ 1396 sync->ds_inflight--; 1397 g_mirror_sync_request_free(disk, bp); 1398 if (sync->ds_inflight > 0) 1399 return; 1400 if (sync->ds_consumer == NULL || 1401 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1402 return; 1403 } 1404 /* Disk up-to-date, activate it. */ 1405 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE, 1406 G_MIRROR_EVENT_DONTWAIT); 1407 return; 1408 } 1409 1410 /* Send next synchronization request. */ 1411 data = bp->bio_data; 1412 g_reset_bio(bp); 1413 bp->bio_cmd = BIO_READ; 1414 bp->bio_offset = sync->ds_offset; 1415 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1416 sync->ds_offset += bp->bio_length; 1417 bp->bio_done = g_mirror_sync_done; 1418 bp->bio_data = data; 1419 bp->bio_from = sync->ds_consumer; 1420 bp->bio_to = sc->sc_provider; 1421 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1422 sync->ds_consumer->index++; 1423 /* 1424 * Delay the request if it is colliding with a regular request. 1425 */ 1426 if (g_mirror_regular_collision(sc, bp)) 1427 g_mirror_sync_delay(sc, bp); 1428 else 1429 g_io_request(bp, sync->ds_consumer); 1430 1431 /* Release delayed requests if possible. */ 1432 g_mirror_regular_release(sc); 1433 1434 /* Find the smallest offset */ 1435 offset = sc->sc_mediasize; 1436 for (i = 0; i < g_mirror_syncreqs; i++) { 1437 bp = sync->ds_bios[i]; 1438 if (bp->bio_offset < offset) 1439 offset = bp->bio_offset; 1440 } 1441 if (sync->ds_offset_done + (MAXPHYS * 100) < offset) { 1442 /* Update offset_done on every 100 blocks. */ 1443 sync->ds_offset_done = offset; 1444 g_mirror_update_metadata(disk); 1445 } 1446 return; 1447 } 1448 default: 1449 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1450 bp->bio_cmd, sc->sc_name)); 1451 break; 1452 } 1453 } 1454 1455 static void 1456 g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp) 1457 { 1458 struct g_mirror_disk *disk; 1459 struct g_consumer *cp; 1460 struct bio *cbp; 1461 1462 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1463 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE) 1464 break; 1465 } 1466 if (disk == NULL) { 1467 if (bp->bio_error == 0) 1468 bp->bio_error = ENXIO; 1469 g_io_deliver(bp, bp->bio_error); 1470 return; 1471 } 1472 cbp = g_clone_bio(bp); 1473 if (cbp == NULL) { 1474 if (bp->bio_error == 0) 1475 bp->bio_error = ENOMEM; 1476 g_io_deliver(bp, bp->bio_error); 1477 return; 1478 } 1479 /* 1480 * Fill in the component buf structure. 1481 */ 1482 cp = disk->d_consumer; 1483 cbp->bio_done = g_mirror_done; 1484 cbp->bio_to = cp->provider; 1485 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1486 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1487 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1488 cp->acw, cp->ace)); 1489 cp->index++; 1490 g_io_request(cbp, cp); 1491 } 1492 1493 static void 1494 g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp) 1495 { 1496 struct g_mirror_disk *disk; 1497 struct g_consumer *cp; 1498 struct bio *cbp; 1499 1500 disk = g_mirror_get_disk(sc); 1501 if (disk == NULL) { 1502 if (bp->bio_error == 0) 1503 bp->bio_error = ENXIO; 1504 g_io_deliver(bp, bp->bio_error); 1505 return; 1506 } 1507 cbp = g_clone_bio(bp); 1508 if (cbp == NULL) { 1509 if (bp->bio_error == 0) 1510 bp->bio_error = ENOMEM; 1511 g_io_deliver(bp, bp->bio_error); 1512 return; 1513 } 1514 /* 1515 * Fill in the component buf structure. 1516 */ 1517 cp = disk->d_consumer; 1518 cbp->bio_done = g_mirror_done; 1519 cbp->bio_to = cp->provider; 1520 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1521 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1522 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1523 cp->acw, cp->ace)); 1524 cp->index++; 1525 g_io_request(cbp, cp); 1526 } 1527 1528 #define TRACK_SIZE (1 * 1024 * 1024) 1529 #define LOAD_SCALE 256 1530 #define ABS(x) (((x) >= 0) ? (x) : (-(x))) 1531 1532 static void 1533 g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp) 1534 { 1535 struct g_mirror_disk *disk, *dp; 1536 struct g_consumer *cp; 1537 struct bio *cbp; 1538 int prio, best; 1539 1540 /* Find a disk with the smallest load. */ 1541 disk = NULL; 1542 best = INT_MAX; 1543 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1544 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1545 continue; 1546 prio = dp->load; 1547 /* If disk head is precisely in position - highly prefer it. */ 1548 if (dp->d_last_offset == bp->bio_offset) 1549 prio -= 2 * LOAD_SCALE; 1550 else 1551 /* If disk head is close to position - prefer it. */ 1552 if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE) 1553 prio -= 1 * LOAD_SCALE; 1554 if (prio <= best) { 1555 disk = dp; 1556 best = prio; 1557 } 1558 } 1559 KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name)); 1560 cbp = g_clone_bio(bp); 1561 if (cbp == NULL) { 1562 if (bp->bio_error == 0) 1563 bp->bio_error = ENOMEM; 1564 g_io_deliver(bp, bp->bio_error); 1565 return; 1566 } 1567 /* 1568 * Fill in the component buf structure. 1569 */ 1570 cp = disk->d_consumer; 1571 cbp->bio_done = g_mirror_done; 1572 cbp->bio_to = cp->provider; 1573 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1574 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1575 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1576 cp->acw, cp->ace)); 1577 cp->index++; 1578 /* Remember last head position */ 1579 disk->d_last_offset = bp->bio_offset + bp->bio_length; 1580 /* Update loads. */ 1581 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1582 dp->load = (dp->d_consumer->index * LOAD_SCALE + 1583 dp->load * 7) / 8; 1584 } 1585 g_io_request(cbp, cp); 1586 } 1587 1588 static void 1589 g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp) 1590 { 1591 struct bio_queue_head queue; 1592 struct g_mirror_disk *disk; 1593 struct g_consumer *cp; 1594 struct bio *cbp; 1595 off_t left, mod, offset, slice; 1596 u_char *data; 1597 u_int ndisks; 1598 1599 if (bp->bio_length <= sc->sc_slice) { 1600 g_mirror_request_round_robin(sc, bp); 1601 return; 1602 } 1603 ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE); 1604 slice = bp->bio_length / ndisks; 1605 mod = slice % sc->sc_provider->sectorsize; 1606 if (mod != 0) 1607 slice += sc->sc_provider->sectorsize - mod; 1608 /* 1609 * Allocate all bios before sending any request, so we can 1610 * return ENOMEM in nice and clean way. 1611 */ 1612 left = bp->bio_length; 1613 offset = bp->bio_offset; 1614 data = bp->bio_data; 1615 bioq_init(&queue); 1616 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1617 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1618 continue; 1619 cbp = g_clone_bio(bp); 1620 if (cbp == NULL) { 1621 while ((cbp = bioq_takefirst(&queue)) != NULL) 1622 g_destroy_bio(cbp); 1623 if (bp->bio_error == 0) 1624 bp->bio_error = ENOMEM; 1625 g_io_deliver(bp, bp->bio_error); 1626 return; 1627 } 1628 bioq_insert_tail(&queue, cbp); 1629 cbp->bio_done = g_mirror_done; 1630 cbp->bio_caller1 = disk; 1631 cbp->bio_to = disk->d_consumer->provider; 1632 cbp->bio_offset = offset; 1633 cbp->bio_data = data; 1634 cbp->bio_length = MIN(left, slice); 1635 left -= cbp->bio_length; 1636 if (left == 0) 1637 break; 1638 offset += cbp->bio_length; 1639 data += cbp->bio_length; 1640 } 1641 while ((cbp = bioq_takefirst(&queue)) != NULL) { 1642 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1643 disk = cbp->bio_caller1; 1644 cbp->bio_caller1 = NULL; 1645 cp = disk->d_consumer; 1646 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1647 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1648 cp->acr, cp->acw, cp->ace)); 1649 disk->d_consumer->index++; 1650 g_io_request(cbp, disk->d_consumer); 1651 } 1652 } 1653 1654 static void 1655 g_mirror_register_request(struct bio *bp) 1656 { 1657 struct g_mirror_softc *sc; 1658 1659 sc = bp->bio_to->private; 1660 switch (bp->bio_cmd) { 1661 case BIO_READ: 1662 switch (sc->sc_balance) { 1663 case G_MIRROR_BALANCE_LOAD: 1664 g_mirror_request_load(sc, bp); 1665 break; 1666 case G_MIRROR_BALANCE_PREFER: 1667 g_mirror_request_prefer(sc, bp); 1668 break; 1669 case G_MIRROR_BALANCE_ROUND_ROBIN: 1670 g_mirror_request_round_robin(sc, bp); 1671 break; 1672 case G_MIRROR_BALANCE_SPLIT: 1673 g_mirror_request_split(sc, bp); 1674 break; 1675 } 1676 return; 1677 case BIO_WRITE: 1678 case BIO_DELETE: 1679 { 1680 struct g_mirror_disk *disk; 1681 struct g_mirror_disk_sync *sync; 1682 struct bio_queue_head queue; 1683 struct g_consumer *cp; 1684 struct bio *cbp; 1685 1686 /* 1687 * Delay the request if it is colliding with a synchronization 1688 * request. 1689 */ 1690 if (g_mirror_sync_collision(sc, bp)) { 1691 g_mirror_regular_delay(sc, bp); 1692 return; 1693 } 1694 1695 if (sc->sc_idle) 1696 g_mirror_unidle(sc); 1697 else 1698 sc->sc_last_write = time_uptime; 1699 1700 /* 1701 * Bump syncid on first write. 1702 */ 1703 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) { 1704 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 1705 g_mirror_bump_syncid(sc); 1706 } 1707 1708 /* 1709 * Allocate all bios before sending any request, so we can 1710 * return ENOMEM in nice and clean way. 1711 */ 1712 bioq_init(&queue); 1713 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1714 sync = &disk->d_sync; 1715 switch (disk->d_state) { 1716 case G_MIRROR_DISK_STATE_ACTIVE: 1717 break; 1718 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 1719 if (bp->bio_offset >= sync->ds_offset) 1720 continue; 1721 break; 1722 default: 1723 continue; 1724 } 1725 if (bp->bio_cmd == BIO_DELETE && 1726 (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) == 0) 1727 continue; 1728 cbp = g_clone_bio(bp); 1729 if (cbp == NULL) { 1730 while ((cbp = bioq_takefirst(&queue)) != NULL) 1731 g_destroy_bio(cbp); 1732 if (bp->bio_error == 0) 1733 bp->bio_error = ENOMEM; 1734 g_io_deliver(bp, bp->bio_error); 1735 return; 1736 } 1737 bioq_insert_tail(&queue, cbp); 1738 cbp->bio_done = g_mirror_done; 1739 cp = disk->d_consumer; 1740 cbp->bio_caller1 = cp; 1741 cbp->bio_to = cp->provider; 1742 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1743 ("Consumer %s not opened (r%dw%de%d).", 1744 cp->provider->name, cp->acr, cp->acw, cp->ace)); 1745 } 1746 if (bioq_first(&queue) == NULL) { 1747 g_io_deliver(bp, EOPNOTSUPP); 1748 return; 1749 } 1750 while ((cbp = bioq_takefirst(&queue)) != NULL) { 1751 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1752 cp = cbp->bio_caller1; 1753 cbp->bio_caller1 = NULL; 1754 cp->index++; 1755 sc->sc_writes++; 1756 g_io_request(cbp, cp); 1757 } 1758 /* 1759 * Put request onto inflight queue, so we can check if new 1760 * synchronization requests don't collide with it. 1761 */ 1762 bioq_insert_tail(&sc->sc_inflight, bp); 1763 return; 1764 } 1765 default: 1766 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1767 bp->bio_cmd, sc->sc_name)); 1768 break; 1769 } 1770 } 1771 1772 static int 1773 g_mirror_can_destroy(struct g_mirror_softc *sc) 1774 { 1775 struct g_geom *gp; 1776 struct g_consumer *cp; 1777 1778 g_topology_assert(); 1779 gp = sc->sc_geom; 1780 if (gp->softc == NULL) 1781 return (1); 1782 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_TASTING) != 0) 1783 return (0); 1784 LIST_FOREACH(cp, &gp->consumer, consumer) { 1785 if (g_mirror_is_busy(sc, cp)) 1786 return (0); 1787 } 1788 gp = sc->sc_sync.ds_geom; 1789 LIST_FOREACH(cp, &gp->consumer, consumer) { 1790 if (g_mirror_is_busy(sc, cp)) 1791 return (0); 1792 } 1793 G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 1794 sc->sc_name); 1795 return (1); 1796 } 1797 1798 static int 1799 g_mirror_try_destroy(struct g_mirror_softc *sc) 1800 { 1801 1802 if (sc->sc_rootmount != NULL) { 1803 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 1804 sc->sc_rootmount); 1805 root_mount_rel(sc->sc_rootmount); 1806 sc->sc_rootmount = NULL; 1807 } 1808 g_topology_lock(); 1809 if (!g_mirror_can_destroy(sc)) { 1810 g_topology_unlock(); 1811 return (0); 1812 } 1813 sc->sc_geom->softc = NULL; 1814 sc->sc_sync.ds_geom->softc = NULL; 1815 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) { 1816 g_topology_unlock(); 1817 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1818 &sc->sc_worker); 1819 /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 1820 sx_xunlock(&sc->sc_lock); 1821 wakeup(&sc->sc_worker); 1822 sc->sc_worker = NULL; 1823 } else { 1824 g_topology_unlock(); 1825 g_mirror_destroy_device(sc); 1826 } 1827 return (1); 1828 } 1829 1830 /* 1831 * Worker thread. 1832 */ 1833 static void 1834 g_mirror_worker(void *arg) 1835 { 1836 struct g_mirror_softc *sc; 1837 struct g_mirror_event *ep; 1838 struct bio *bp; 1839 int timeout; 1840 1841 sc = arg; 1842 thread_lock(curthread); 1843 sched_prio(curthread, PRIBIO); 1844 thread_unlock(curthread); 1845 1846 sx_xlock(&sc->sc_lock); 1847 for (;;) { 1848 G_MIRROR_DEBUG(5, "%s: Let's see...", __func__); 1849 /* 1850 * First take a look at events. 1851 * This is important to handle events before any I/O requests. 1852 */ 1853 ep = g_mirror_event_get(sc); 1854 if (ep != NULL) { 1855 g_mirror_event_remove(sc, ep); 1856 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) { 1857 /* Update only device status. */ 1858 G_MIRROR_DEBUG(3, 1859 "Running event for device %s.", 1860 sc->sc_name); 1861 ep->e_error = 0; 1862 g_mirror_update_device(sc, true); 1863 } else { 1864 /* Update disk status. */ 1865 G_MIRROR_DEBUG(3, "Running event for disk %s.", 1866 g_mirror_get_diskname(ep->e_disk)); 1867 ep->e_error = g_mirror_update_disk(ep->e_disk, 1868 ep->e_state); 1869 if (ep->e_error == 0) 1870 g_mirror_update_device(sc, false); 1871 } 1872 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) { 1873 KASSERT(ep->e_error == 0, 1874 ("Error cannot be handled.")); 1875 g_mirror_event_free(ep); 1876 } else { 1877 ep->e_flags |= G_MIRROR_EVENT_DONE; 1878 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1879 ep); 1880 mtx_lock(&sc->sc_events_mtx); 1881 wakeup(ep); 1882 mtx_unlock(&sc->sc_events_mtx); 1883 } 1884 if ((sc->sc_flags & 1885 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1886 if (g_mirror_try_destroy(sc)) { 1887 curthread->td_pflags &= ~TDP_GEOM; 1888 G_MIRROR_DEBUG(1, "Thread exiting."); 1889 kproc_exit(0); 1890 } 1891 } 1892 G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__); 1893 continue; 1894 } 1895 /* 1896 * Check if we can mark array as CLEAN and if we can't take 1897 * how much seconds should we wait. 1898 */ 1899 timeout = g_mirror_idle(sc, -1); 1900 /* 1901 * Now I/O requests. 1902 */ 1903 /* Get first request from the queue. */ 1904 mtx_lock(&sc->sc_queue_mtx); 1905 bp = bioq_takefirst(&sc->sc_queue); 1906 if (bp == NULL) { 1907 if ((sc->sc_flags & 1908 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1909 mtx_unlock(&sc->sc_queue_mtx); 1910 if (g_mirror_try_destroy(sc)) { 1911 curthread->td_pflags &= ~TDP_GEOM; 1912 G_MIRROR_DEBUG(1, "Thread exiting."); 1913 kproc_exit(0); 1914 } 1915 mtx_lock(&sc->sc_queue_mtx); 1916 } 1917 sx_xunlock(&sc->sc_lock); 1918 /* 1919 * XXX: We can miss an event here, because an event 1920 * can be added without sx-device-lock and without 1921 * mtx-queue-lock. Maybe I should just stop using 1922 * dedicated mutex for events synchronization and 1923 * stick with the queue lock? 1924 * The event will hang here until next I/O request 1925 * or next event is received. 1926 */ 1927 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1", 1928 timeout * hz); 1929 sx_xlock(&sc->sc_lock); 1930 G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__); 1931 continue; 1932 } 1933 mtx_unlock(&sc->sc_queue_mtx); 1934 1935 if (bp->bio_from->geom == sc->sc_sync.ds_geom && 1936 (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) { 1937 g_mirror_sync_request(bp); /* READ */ 1938 } else if (bp->bio_to != sc->sc_provider) { 1939 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) 1940 g_mirror_regular_request(bp); 1941 else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 1942 g_mirror_sync_request(bp); /* WRITE */ 1943 else { 1944 KASSERT(0, 1945 ("Invalid request cflags=0x%hx to=%s.", 1946 bp->bio_cflags, bp->bio_to->name)); 1947 } 1948 } else { 1949 g_mirror_register_request(bp); 1950 } 1951 G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__); 1952 } 1953 } 1954 1955 static void 1956 g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 1957 { 1958 1959 sx_assert(&sc->sc_lock, SX_LOCKED); 1960 1961 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 1962 return; 1963 if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) { 1964 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.", 1965 g_mirror_get_diskname(disk), sc->sc_name); 1966 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1967 } else if (sc->sc_idle && 1968 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 1969 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.", 1970 g_mirror_get_diskname(disk), sc->sc_name); 1971 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 1972 } 1973 } 1974 1975 static void 1976 g_mirror_sync_start(struct g_mirror_disk *disk) 1977 { 1978 struct g_mirror_softc *sc; 1979 struct g_consumer *cp; 1980 struct bio *bp; 1981 int error, i; 1982 1983 g_topology_assert_not(); 1984 sc = disk->d_softc; 1985 sx_assert(&sc->sc_lock, SX_LOCKED); 1986 1987 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 1988 ("Disk %s is not marked for synchronization.", 1989 g_mirror_get_diskname(disk))); 1990 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1991 ("Device not in RUNNING state (%s, %u).", sc->sc_name, 1992 sc->sc_state)); 1993 1994 sx_xunlock(&sc->sc_lock); 1995 g_topology_lock(); 1996 cp = g_new_consumer(sc->sc_sync.ds_geom); 1997 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1998 error = g_attach(cp, sc->sc_provider); 1999 KASSERT(error == 0, 2000 ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 2001 error = g_access(cp, 1, 0, 0); 2002 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 2003 g_topology_unlock(); 2004 sx_xlock(&sc->sc_lock); 2005 2006 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 2007 g_mirror_get_diskname(disk)); 2008 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0) 2009 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 2010 KASSERT(disk->d_sync.ds_consumer == NULL, 2011 ("Sync consumer already exists (device=%s, disk=%s).", 2012 sc->sc_name, g_mirror_get_diskname(disk))); 2013 2014 disk->d_sync.ds_consumer = cp; 2015 disk->d_sync.ds_consumer->private = disk; 2016 disk->d_sync.ds_consumer->index = 0; 2017 2018 /* 2019 * Allocate memory for synchronization bios and initialize them. 2020 */ 2021 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs, 2022 M_MIRROR, M_WAITOK); 2023 for (i = 0; i < g_mirror_syncreqs; i++) { 2024 bp = g_alloc_bio(); 2025 disk->d_sync.ds_bios[i] = bp; 2026 bp->bio_parent = NULL; 2027 bp->bio_cmd = BIO_READ; 2028 bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK); 2029 bp->bio_cflags = 0; 2030 bp->bio_offset = disk->d_sync.ds_offset; 2031 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 2032 disk->d_sync.ds_offset += bp->bio_length; 2033 bp->bio_done = g_mirror_sync_done; 2034 bp->bio_from = disk->d_sync.ds_consumer; 2035 bp->bio_to = sc->sc_provider; 2036 bp->bio_caller1 = (void *)(uintptr_t)i; 2037 } 2038 2039 /* Increase the number of disks in SYNCHRONIZING state. */ 2040 sc->sc_sync.ds_ndisks++; 2041 /* Set the number of in-flight synchronization requests. */ 2042 disk->d_sync.ds_inflight = g_mirror_syncreqs; 2043 2044 /* 2045 * Fire off first synchronization requests. 2046 */ 2047 for (i = 0; i < g_mirror_syncreqs; i++) { 2048 bp = disk->d_sync.ds_bios[i]; 2049 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 2050 disk->d_sync.ds_consumer->index++; 2051 /* 2052 * Delay the request if it is colliding with a regular request. 2053 */ 2054 if (g_mirror_regular_collision(sc, bp)) 2055 g_mirror_sync_delay(sc, bp); 2056 else 2057 g_io_request(bp, disk->d_sync.ds_consumer); 2058 } 2059 } 2060 2061 /* 2062 * Stop synchronization process. 2063 * type: 0 - synchronization finished 2064 * 1 - synchronization stopped 2065 */ 2066 static void 2067 g_mirror_sync_stop(struct g_mirror_disk *disk, int type) 2068 { 2069 struct g_mirror_softc *sc; 2070 struct g_consumer *cp; 2071 2072 g_topology_assert_not(); 2073 sc = disk->d_softc; 2074 sx_assert(&sc->sc_lock, SX_LOCKED); 2075 2076 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2077 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2078 g_mirror_disk_state2str(disk->d_state))); 2079 if (disk->d_sync.ds_consumer == NULL) 2080 return; 2081 2082 if (type == 0) { 2083 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.", 2084 sc->sc_name, g_mirror_get_diskname(disk)); 2085 } else /* if (type == 1) */ { 2086 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 2087 sc->sc_name, g_mirror_get_diskname(disk)); 2088 } 2089 g_mirror_regular_release(sc); 2090 free(disk->d_sync.ds_bios, M_MIRROR); 2091 disk->d_sync.ds_bios = NULL; 2092 cp = disk->d_sync.ds_consumer; 2093 disk->d_sync.ds_consumer = NULL; 2094 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2095 sc->sc_sync.ds_ndisks--; 2096 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 2097 g_topology_lock(); 2098 g_mirror_kill_consumer(sc, cp); 2099 g_topology_unlock(); 2100 sx_xlock(&sc->sc_lock); 2101 } 2102 2103 static void 2104 g_mirror_launch_provider(struct g_mirror_softc *sc) 2105 { 2106 struct g_mirror_disk *disk; 2107 struct g_provider *pp, *dp; 2108 2109 sx_assert(&sc->sc_lock, SX_LOCKED); 2110 2111 g_topology_lock(); 2112 pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name); 2113 pp->flags |= G_PF_DIRECT_RECEIVE; 2114 pp->mediasize = sc->sc_mediasize; 2115 pp->sectorsize = sc->sc_sectorsize; 2116 pp->stripesize = 0; 2117 pp->stripeoffset = 0; 2118 2119 /* Splitting of unmapped BIO's could work but isn't implemented now */ 2120 if (sc->sc_balance != G_MIRROR_BALANCE_SPLIT) 2121 pp->flags |= G_PF_ACCEPT_UNMAPPED; 2122 2123 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2124 if (disk->d_consumer && disk->d_consumer->provider) { 2125 dp = disk->d_consumer->provider; 2126 if (dp->stripesize > pp->stripesize) { 2127 pp->stripesize = dp->stripesize; 2128 pp->stripeoffset = dp->stripeoffset; 2129 } 2130 /* A provider underneath us doesn't support unmapped */ 2131 if ((dp->flags & G_PF_ACCEPT_UNMAPPED) == 0) { 2132 G_MIRROR_DEBUG(0, "Cancelling unmapped " 2133 "because of %s.", dp->name); 2134 pp->flags &= ~G_PF_ACCEPT_UNMAPPED; 2135 } 2136 } 2137 } 2138 pp->private = sc; 2139 sc->sc_refcnt++; 2140 sc->sc_provider = pp; 2141 g_error_provider(pp, 0); 2142 g_topology_unlock(); 2143 G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 2144 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks); 2145 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2146 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2147 g_mirror_sync_start(disk); 2148 } 2149 } 2150 2151 static void 2152 g_mirror_destroy_provider(struct g_mirror_softc *sc) 2153 { 2154 struct g_mirror_disk *disk; 2155 struct bio *bp; 2156 2157 g_topology_assert_not(); 2158 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 2159 sc->sc_name)); 2160 2161 g_topology_lock(); 2162 g_error_provider(sc->sc_provider, ENXIO); 2163 mtx_lock(&sc->sc_queue_mtx); 2164 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) { 2165 /* 2166 * Abort any pending I/O that wasn't generated by us. 2167 * Synchronization requests and requests destined for individual 2168 * mirror components can be destroyed immediately. 2169 */ 2170 if (bp->bio_to == sc->sc_provider && 2171 bp->bio_from->geom != sc->sc_sync.ds_geom) { 2172 g_io_deliver(bp, ENXIO); 2173 } else { 2174 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 2175 free(bp->bio_data, M_MIRROR); 2176 g_destroy_bio(bp); 2177 } 2178 } 2179 mtx_unlock(&sc->sc_queue_mtx); 2180 g_wither_provider(sc->sc_provider, ENXIO); 2181 sc->sc_provider = NULL; 2182 G_MIRROR_DEBUG(0, "Device %s: provider destroyed.", sc->sc_name); 2183 g_topology_unlock(); 2184 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2185 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2186 g_mirror_sync_stop(disk, 1); 2187 } 2188 } 2189 2190 static void 2191 g_mirror_go(void *arg) 2192 { 2193 struct g_mirror_softc *sc; 2194 2195 sc = arg; 2196 G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 2197 g_mirror_event_send(sc, 0, 2198 G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE); 2199 } 2200 2201 static u_int 2202 g_mirror_determine_state(struct g_mirror_disk *disk) 2203 { 2204 struct g_mirror_softc *sc; 2205 u_int state; 2206 2207 sc = disk->d_softc; 2208 if (sc->sc_syncid == disk->d_sync.ds_syncid) { 2209 if ((disk->d_flags & 2210 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2211 /* Disk does not need synchronization. */ 2212 state = G_MIRROR_DISK_STATE_ACTIVE; 2213 } else { 2214 if ((sc->sc_flags & 2215 G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2216 (disk->d_flags & 2217 G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2218 /* 2219 * We can start synchronization from 2220 * the stored offset. 2221 */ 2222 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2223 } else { 2224 state = G_MIRROR_DISK_STATE_STALE; 2225 } 2226 } 2227 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 2228 /* 2229 * Reset all synchronization data for this disk, 2230 * because if it even was synchronized, it was 2231 * synchronized to disks with different syncid. 2232 */ 2233 disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2234 disk->d_sync.ds_offset = 0; 2235 disk->d_sync.ds_offset_done = 0; 2236 disk->d_sync.ds_syncid = sc->sc_syncid; 2237 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2238 (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2239 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2240 } else { 2241 state = G_MIRROR_DISK_STATE_STALE; 2242 } 2243 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 2244 /* 2245 * Not good, NOT GOOD! 2246 * It means that mirror was started on stale disks 2247 * and more fresh disk just arrive. 2248 * If there were writes, mirror is broken, sorry. 2249 * I think the best choice here is don't touch 2250 * this disk and inform the user loudly. 2251 */ 2252 G_MIRROR_DEBUG(0, "Device %s was started before the freshest " 2253 "disk (%s) arrives!! It will not be connected to the " 2254 "running device.", sc->sc_name, 2255 g_mirror_get_diskname(disk)); 2256 g_mirror_destroy_disk(disk); 2257 state = G_MIRROR_DISK_STATE_NONE; 2258 /* Return immediately, because disk was destroyed. */ 2259 return (state); 2260 } 2261 G_MIRROR_DEBUG(3, "State for %s disk: %s.", 2262 g_mirror_get_diskname(disk), g_mirror_disk_state2str(state)); 2263 return (state); 2264 } 2265 2266 /* 2267 * Update device state. 2268 */ 2269 static void 2270 g_mirror_update_device(struct g_mirror_softc *sc, bool force) 2271 { 2272 struct g_mirror_disk *disk; 2273 u_int state; 2274 2275 sx_assert(&sc->sc_lock, SX_XLOCKED); 2276 2277 switch (sc->sc_state) { 2278 case G_MIRROR_DEVICE_STATE_STARTING: 2279 { 2280 struct g_mirror_disk *pdisk, *tdisk; 2281 u_int dirty, ndisks, genid, syncid; 2282 bool broken; 2283 2284 KASSERT(sc->sc_provider == NULL, 2285 ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 2286 /* 2287 * Are we ready? We are, if all disks are connected or 2288 * if we have any disks and 'force' is true. 2289 */ 2290 ndisks = g_mirror_ndisks(sc, -1); 2291 if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) { 2292 ; 2293 } else if (ndisks == 0) { 2294 /* 2295 * Disks went down in starting phase, so destroy 2296 * device. 2297 */ 2298 callout_drain(&sc->sc_callout); 2299 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2300 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2301 sc->sc_rootmount); 2302 root_mount_rel(sc->sc_rootmount); 2303 sc->sc_rootmount = NULL; 2304 return; 2305 } else { 2306 return; 2307 } 2308 2309 /* 2310 * Activate all disks with the biggest syncid. 2311 */ 2312 if (force) { 2313 /* 2314 * If 'force' is true, we have been called due to 2315 * timeout, so don't bother canceling timeout. 2316 */ 2317 ndisks = 0; 2318 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2319 if ((disk->d_flags & 2320 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2321 ndisks++; 2322 } 2323 } 2324 if (ndisks == 0) { 2325 /* No valid disks found, destroy device. */ 2326 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2327 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2328 __LINE__, sc->sc_rootmount); 2329 root_mount_rel(sc->sc_rootmount); 2330 sc->sc_rootmount = NULL; 2331 return; 2332 } 2333 } else { 2334 /* Cancel timeout. */ 2335 callout_drain(&sc->sc_callout); 2336 } 2337 2338 /* 2339 * Find the biggest genid. 2340 */ 2341 genid = 0; 2342 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2343 if (disk->d_genid > genid) 2344 genid = disk->d_genid; 2345 } 2346 sc->sc_genid = genid; 2347 /* 2348 * Remove all disks without the biggest genid. 2349 */ 2350 broken = false; 2351 LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) { 2352 if (disk->d_genid < genid) { 2353 G_MIRROR_DEBUG(0, 2354 "Component %s (device %s) broken, skipping.", 2355 g_mirror_get_diskname(disk), sc->sc_name); 2356 g_mirror_destroy_disk(disk); 2357 /* 2358 * Bump the syncid in case we discover a healthy 2359 * replacement disk after starting the mirror. 2360 */ 2361 broken = true; 2362 } 2363 } 2364 2365 /* 2366 * Find the biggest syncid. 2367 */ 2368 syncid = 0; 2369 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2370 if (disk->d_sync.ds_syncid > syncid) 2371 syncid = disk->d_sync.ds_syncid; 2372 } 2373 2374 /* 2375 * Here we need to look for dirty disks and if all disks 2376 * with the biggest syncid are dirty, we have to choose 2377 * one with the biggest priority and rebuild the rest. 2378 */ 2379 /* 2380 * Find the number of dirty disks with the biggest syncid. 2381 * Find the number of disks with the biggest syncid. 2382 * While here, find a disk with the biggest priority. 2383 */ 2384 dirty = ndisks = 0; 2385 pdisk = NULL; 2386 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2387 if (disk->d_sync.ds_syncid != syncid) 2388 continue; 2389 if ((disk->d_flags & 2390 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2391 continue; 2392 } 2393 ndisks++; 2394 if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 2395 dirty++; 2396 if (pdisk == NULL || 2397 pdisk->d_priority < disk->d_priority) { 2398 pdisk = disk; 2399 } 2400 } 2401 } 2402 if (dirty == 0) { 2403 /* No dirty disks at all, great. */ 2404 } else if (dirty == ndisks) { 2405 /* 2406 * Force synchronization for all dirty disks except one 2407 * with the biggest priority. 2408 */ 2409 KASSERT(pdisk != NULL, ("pdisk == NULL")); 2410 G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a " 2411 "master disk for synchronization.", 2412 g_mirror_get_diskname(pdisk), sc->sc_name); 2413 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2414 if (disk->d_sync.ds_syncid != syncid) 2415 continue; 2416 if ((disk->d_flags & 2417 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2418 continue; 2419 } 2420 KASSERT((disk->d_flags & 2421 G_MIRROR_DISK_FLAG_DIRTY) != 0, 2422 ("Disk %s isn't marked as dirty.", 2423 g_mirror_get_diskname(disk))); 2424 /* Skip the disk with the biggest priority. */ 2425 if (disk == pdisk) 2426 continue; 2427 disk->d_sync.ds_syncid = 0; 2428 } 2429 } else if (dirty < ndisks) { 2430 /* 2431 * Force synchronization for all dirty disks. 2432 * We have some non-dirty disks. 2433 */ 2434 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2435 if (disk->d_sync.ds_syncid != syncid) 2436 continue; 2437 if ((disk->d_flags & 2438 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2439 continue; 2440 } 2441 if ((disk->d_flags & 2442 G_MIRROR_DISK_FLAG_DIRTY) == 0) { 2443 continue; 2444 } 2445 disk->d_sync.ds_syncid = 0; 2446 } 2447 } 2448 2449 /* Reset hint. */ 2450 sc->sc_hint = NULL; 2451 sc->sc_syncid = syncid; 2452 if (force || broken) { 2453 /* Remember to bump syncid on first write. */ 2454 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2455 } 2456 state = G_MIRROR_DEVICE_STATE_RUNNING; 2457 G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.", 2458 sc->sc_name, g_mirror_device_state2str(sc->sc_state), 2459 g_mirror_device_state2str(state)); 2460 sc->sc_state = state; 2461 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2462 state = g_mirror_determine_state(disk); 2463 g_mirror_event_send(disk, state, 2464 G_MIRROR_EVENT_DONTWAIT); 2465 if (state == G_MIRROR_DISK_STATE_STALE) 2466 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2467 } 2468 break; 2469 } 2470 case G_MIRROR_DEVICE_STATE_RUNNING: 2471 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 && 2472 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2473 /* 2474 * No active disks or no disks at all, 2475 * so destroy device. 2476 */ 2477 if (sc->sc_provider != NULL) 2478 g_mirror_destroy_provider(sc); 2479 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2480 break; 2481 } else if (g_mirror_ndisks(sc, 2482 G_MIRROR_DISK_STATE_ACTIVE) > 0 && 2483 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2484 /* 2485 * We have active disks, launch provider if it doesn't 2486 * exist. 2487 */ 2488 if (sc->sc_provider == NULL) 2489 g_mirror_launch_provider(sc); 2490 if (sc->sc_rootmount != NULL) { 2491 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2492 __LINE__, sc->sc_rootmount); 2493 root_mount_rel(sc->sc_rootmount); 2494 sc->sc_rootmount = NULL; 2495 } 2496 } 2497 /* 2498 * Genid should be bumped immediately, so do it here. 2499 */ 2500 if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) { 2501 sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID; 2502 g_mirror_bump_genid(sc); 2503 } 2504 break; 2505 default: 2506 KASSERT(1 == 0, ("Wrong device state (%s, %s).", 2507 sc->sc_name, g_mirror_device_state2str(sc->sc_state))); 2508 break; 2509 } 2510 } 2511 2512 /* 2513 * Update disk state and device state if needed. 2514 */ 2515 #define DISK_STATE_CHANGED() G_MIRROR_DEBUG(1, \ 2516 "Disk %s state changed from %s to %s (device %s).", \ 2517 g_mirror_get_diskname(disk), \ 2518 g_mirror_disk_state2str(disk->d_state), \ 2519 g_mirror_disk_state2str(state), sc->sc_name) 2520 static int 2521 g_mirror_update_disk(struct g_mirror_disk *disk, u_int state) 2522 { 2523 struct g_mirror_softc *sc; 2524 2525 sc = disk->d_softc; 2526 sx_assert(&sc->sc_lock, SX_XLOCKED); 2527 2528 again: 2529 G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.", 2530 g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), 2531 g_mirror_disk_state2str(state)); 2532 switch (state) { 2533 case G_MIRROR_DISK_STATE_NEW: 2534 /* 2535 * Possible scenarios: 2536 * 1. New disk arrive. 2537 */ 2538 /* Previous state should be NONE. */ 2539 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE, 2540 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2541 g_mirror_disk_state2str(disk->d_state))); 2542 DISK_STATE_CHANGED(); 2543 2544 disk->d_state = state; 2545 if (LIST_EMPTY(&sc->sc_disks)) 2546 LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next); 2547 else { 2548 struct g_mirror_disk *dp; 2549 2550 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 2551 if (disk->d_priority >= dp->d_priority) { 2552 LIST_INSERT_BEFORE(dp, disk, d_next); 2553 dp = NULL; 2554 break; 2555 } 2556 if (LIST_NEXT(dp, d_next) == NULL) 2557 break; 2558 } 2559 if (dp != NULL) 2560 LIST_INSERT_AFTER(dp, disk, d_next); 2561 } 2562 G_MIRROR_DEBUG(1, "Device %s: provider %s detected.", 2563 sc->sc_name, g_mirror_get_diskname(disk)); 2564 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 2565 break; 2566 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2567 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2568 g_mirror_device_state2str(sc->sc_state), 2569 g_mirror_get_diskname(disk), 2570 g_mirror_disk_state2str(disk->d_state))); 2571 state = g_mirror_determine_state(disk); 2572 if (state != G_MIRROR_DISK_STATE_NONE) 2573 goto again; 2574 break; 2575 case G_MIRROR_DISK_STATE_ACTIVE: 2576 /* 2577 * Possible scenarios: 2578 * 1. New disk does not need synchronization. 2579 * 2. Synchronization process finished successfully. 2580 */ 2581 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2582 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2583 g_mirror_device_state2str(sc->sc_state), 2584 g_mirror_get_diskname(disk), 2585 g_mirror_disk_state2str(disk->d_state))); 2586 /* Previous state should be NEW or SYNCHRONIZING. */ 2587 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW || 2588 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2589 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2590 g_mirror_disk_state2str(disk->d_state))); 2591 DISK_STATE_CHANGED(); 2592 2593 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2594 disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2595 disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC; 2596 g_mirror_sync_stop(disk, 0); 2597 } 2598 disk->d_state = state; 2599 disk->d_sync.ds_offset = 0; 2600 disk->d_sync.ds_offset_done = 0; 2601 g_mirror_update_idle(sc, disk); 2602 g_mirror_update_metadata(disk); 2603 G_MIRROR_DEBUG(1, "Device %s: provider %s activated.", 2604 sc->sc_name, g_mirror_get_diskname(disk)); 2605 break; 2606 case G_MIRROR_DISK_STATE_STALE: 2607 /* 2608 * Possible scenarios: 2609 * 1. Stale disk was connected. 2610 */ 2611 /* Previous state should be NEW. */ 2612 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2613 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2614 g_mirror_disk_state2str(disk->d_state))); 2615 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2616 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2617 g_mirror_device_state2str(sc->sc_state), 2618 g_mirror_get_diskname(disk), 2619 g_mirror_disk_state2str(disk->d_state))); 2620 /* 2621 * STALE state is only possible if device is marked 2622 * NOAUTOSYNC. 2623 */ 2624 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0, 2625 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2626 g_mirror_device_state2str(sc->sc_state), 2627 g_mirror_get_diskname(disk), 2628 g_mirror_disk_state2str(disk->d_state))); 2629 DISK_STATE_CHANGED(); 2630 2631 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2632 disk->d_state = state; 2633 g_mirror_update_metadata(disk); 2634 G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.", 2635 sc->sc_name, g_mirror_get_diskname(disk)); 2636 break; 2637 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 2638 /* 2639 * Possible scenarios: 2640 * 1. Disk which needs synchronization was connected. 2641 */ 2642 /* Previous state should be NEW. */ 2643 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2644 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2645 g_mirror_disk_state2str(disk->d_state))); 2646 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2647 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2648 g_mirror_device_state2str(sc->sc_state), 2649 g_mirror_get_diskname(disk), 2650 g_mirror_disk_state2str(disk->d_state))); 2651 DISK_STATE_CHANGED(); 2652 2653 if (disk->d_state == G_MIRROR_DISK_STATE_NEW) 2654 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2655 disk->d_state = state; 2656 if (sc->sc_provider != NULL) { 2657 g_mirror_sync_start(disk); 2658 g_mirror_update_metadata(disk); 2659 } 2660 break; 2661 case G_MIRROR_DISK_STATE_DISCONNECTED: 2662 /* 2663 * Possible scenarios: 2664 * 1. Device wasn't running yet, but disk disappear. 2665 * 2. Disk was active and disapppear. 2666 * 3. Disk disappear during synchronization process. 2667 */ 2668 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) { 2669 /* 2670 * Previous state should be ACTIVE, STALE or 2671 * SYNCHRONIZING. 2672 */ 2673 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 2674 disk->d_state == G_MIRROR_DISK_STATE_STALE || 2675 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2676 ("Wrong disk state (%s, %s).", 2677 g_mirror_get_diskname(disk), 2678 g_mirror_disk_state2str(disk->d_state))); 2679 } else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) { 2680 /* Previous state should be NEW. */ 2681 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2682 ("Wrong disk state (%s, %s).", 2683 g_mirror_get_diskname(disk), 2684 g_mirror_disk_state2str(disk->d_state))); 2685 /* 2686 * Reset bumping syncid if disk disappeared in STARTING 2687 * state. 2688 */ 2689 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) 2690 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 2691 #ifdef INVARIANTS 2692 } else { 2693 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 2694 sc->sc_name, 2695 g_mirror_device_state2str(sc->sc_state), 2696 g_mirror_get_diskname(disk), 2697 g_mirror_disk_state2str(disk->d_state))); 2698 #endif 2699 } 2700 DISK_STATE_CHANGED(); 2701 G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.", 2702 sc->sc_name, g_mirror_get_diskname(disk)); 2703 2704 g_mirror_destroy_disk(disk); 2705 break; 2706 case G_MIRROR_DISK_STATE_DESTROY: 2707 { 2708 int error; 2709 2710 error = g_mirror_clear_metadata(disk); 2711 if (error != 0) { 2712 G_MIRROR_DEBUG(0, 2713 "Device %s: failed to clear metadata on %s: %d.", 2714 sc->sc_name, g_mirror_get_diskname(disk), error); 2715 break; 2716 } 2717 DISK_STATE_CHANGED(); 2718 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", 2719 sc->sc_name, g_mirror_get_diskname(disk)); 2720 2721 g_mirror_destroy_disk(disk); 2722 sc->sc_ndisks--; 2723 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2724 g_mirror_update_metadata(disk); 2725 } 2726 break; 2727 } 2728 default: 2729 KASSERT(1 == 0, ("Unknown state (%u).", state)); 2730 break; 2731 } 2732 return (0); 2733 } 2734 #undef DISK_STATE_CHANGED 2735 2736 int 2737 g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md) 2738 { 2739 struct g_provider *pp; 2740 u_char *buf; 2741 int error; 2742 2743 g_topology_assert(); 2744 2745 error = g_access(cp, 1, 0, 0); 2746 if (error != 0) 2747 return (error); 2748 pp = cp->provider; 2749 g_topology_unlock(); 2750 /* Metadata are stored on last sector. */ 2751 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 2752 &error); 2753 g_topology_lock(); 2754 g_access(cp, -1, 0, 0); 2755 if (buf == NULL) { 2756 G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2757 cp->provider->name, error); 2758 return (error); 2759 } 2760 2761 /* Decode metadata. */ 2762 error = mirror_metadata_decode(buf, md); 2763 g_free(buf); 2764 if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0) 2765 return (EINVAL); 2766 if (md->md_version > G_MIRROR_VERSION) { 2767 G_MIRROR_DEBUG(0, 2768 "Kernel module is too old to handle metadata from %s.", 2769 cp->provider->name); 2770 return (EINVAL); 2771 } 2772 if (error != 0) { 2773 G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 2774 cp->provider->name); 2775 return (error); 2776 } 2777 2778 return (0); 2779 } 2780 2781 static int 2782 g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp, 2783 struct g_mirror_metadata *md) 2784 { 2785 2786 if (g_mirror_id2disk(sc, md->md_did) != NULL) { 2787 G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.", 2788 pp->name, md->md_did); 2789 return (EEXIST); 2790 } 2791 if (md->md_all != sc->sc_ndisks) { 2792 G_MIRROR_DEBUG(1, 2793 "Invalid '%s' field on disk %s (device %s), skipping.", 2794 "md_all", pp->name, sc->sc_name); 2795 return (EINVAL); 2796 } 2797 if (md->md_slice != sc->sc_slice) { 2798 G_MIRROR_DEBUG(1, 2799 "Invalid '%s' field on disk %s (device %s), skipping.", 2800 "md_slice", pp->name, sc->sc_name); 2801 return (EINVAL); 2802 } 2803 if (md->md_balance != sc->sc_balance) { 2804 G_MIRROR_DEBUG(1, 2805 "Invalid '%s' field on disk %s (device %s), skipping.", 2806 "md_balance", pp->name, sc->sc_name); 2807 return (EINVAL); 2808 } 2809 #if 0 2810 if (md->md_mediasize != sc->sc_mediasize) { 2811 G_MIRROR_DEBUG(1, 2812 "Invalid '%s' field on disk %s (device %s), skipping.", 2813 "md_mediasize", pp->name, sc->sc_name); 2814 return (EINVAL); 2815 } 2816 #endif 2817 if (sc->sc_mediasize > pp->mediasize) { 2818 G_MIRROR_DEBUG(1, 2819 "Invalid size of disk %s (device %s), skipping.", pp->name, 2820 sc->sc_name); 2821 return (EINVAL); 2822 } 2823 if (md->md_sectorsize != sc->sc_sectorsize) { 2824 G_MIRROR_DEBUG(1, 2825 "Invalid '%s' field on disk %s (device %s), skipping.", 2826 "md_sectorsize", pp->name, sc->sc_name); 2827 return (EINVAL); 2828 } 2829 if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 2830 G_MIRROR_DEBUG(1, 2831 "Invalid sector size of disk %s (device %s), skipping.", 2832 pp->name, sc->sc_name); 2833 return (EINVAL); 2834 } 2835 if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) { 2836 G_MIRROR_DEBUG(1, 2837 "Invalid device flags on disk %s (device %s), skipping.", 2838 pp->name, sc->sc_name); 2839 return (EINVAL); 2840 } 2841 if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) { 2842 G_MIRROR_DEBUG(1, 2843 "Invalid disk flags on disk %s (device %s), skipping.", 2844 pp->name, sc->sc_name); 2845 return (EINVAL); 2846 } 2847 return (0); 2848 } 2849 2850 int 2851 g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 2852 struct g_mirror_metadata *md) 2853 { 2854 struct g_mirror_disk *disk; 2855 int error; 2856 2857 g_topology_assert_not(); 2858 G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name); 2859 2860 error = g_mirror_check_metadata(sc, pp, md); 2861 if (error != 0) 2862 return (error); 2863 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING && 2864 md->md_genid < sc->sc_genid) { 2865 G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.", 2866 pp->name, sc->sc_name); 2867 return (EINVAL); 2868 } 2869 disk = g_mirror_init_disk(sc, pp, md, &error); 2870 if (disk == NULL) 2871 return (error); 2872 error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW, 2873 G_MIRROR_EVENT_WAIT); 2874 if (error != 0) 2875 return (error); 2876 if (md->md_version < G_MIRROR_VERSION) { 2877 G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 2878 pp->name, md->md_version, G_MIRROR_VERSION); 2879 g_mirror_update_metadata(disk); 2880 } 2881 return (0); 2882 } 2883 2884 static void 2885 g_mirror_destroy_delayed(void *arg, int flag) 2886 { 2887 struct g_mirror_softc *sc; 2888 int error; 2889 2890 if (flag == EV_CANCEL) { 2891 G_MIRROR_DEBUG(1, "Destroying canceled."); 2892 return; 2893 } 2894 sc = arg; 2895 g_topology_unlock(); 2896 sx_xlock(&sc->sc_lock); 2897 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0, 2898 ("DESTROY flag set on %s.", sc->sc_name)); 2899 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0, 2900 ("DESTROYING flag not set on %s.", sc->sc_name)); 2901 G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name); 2902 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT); 2903 if (error != 0) { 2904 G_MIRROR_DEBUG(0, "Cannot destroy %s (error=%d).", 2905 sc->sc_name, error); 2906 sx_xunlock(&sc->sc_lock); 2907 } 2908 g_topology_lock(); 2909 } 2910 2911 static int 2912 g_mirror_access(struct g_provider *pp, int acr, int acw, int ace) 2913 { 2914 struct g_mirror_softc *sc; 2915 int error = 0; 2916 2917 g_topology_assert(); 2918 G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 2919 acw, ace); 2920 2921 sc = pp->private; 2922 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 2923 2924 g_topology_unlock(); 2925 sx_xlock(&sc->sc_lock); 2926 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 || 2927 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0 || 2928 LIST_EMPTY(&sc->sc_disks)) { 2929 if (acr > 0 || acw > 0 || ace > 0) 2930 error = ENXIO; 2931 goto end; 2932 } 2933 sc->sc_provider_open += acr + acw + ace; 2934 if (pp->acw + acw == 0) 2935 g_mirror_idle(sc, 0); 2936 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0 && 2937 sc->sc_provider_open == 0) 2938 g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, sc, NULL); 2939 end: 2940 sx_xunlock(&sc->sc_lock); 2941 g_topology_lock(); 2942 return (error); 2943 } 2944 2945 static struct g_geom * 2946 g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md) 2947 { 2948 struct g_mirror_softc *sc; 2949 struct g_geom *gp; 2950 int error, timeout; 2951 2952 g_topology_assert(); 2953 G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name, 2954 md->md_mid); 2955 2956 /* One disk is minimum. */ 2957 if (md->md_all < 1) 2958 return (NULL); 2959 /* 2960 * Action geom. 2961 */ 2962 gp = g_new_geomf(mp, "%s", md->md_name); 2963 sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO); 2964 gp->start = g_mirror_start; 2965 gp->orphan = g_mirror_orphan; 2966 gp->access = g_mirror_access; 2967 gp->dumpconf = g_mirror_dumpconf; 2968 2969 sc->sc_id = md->md_mid; 2970 sc->sc_slice = md->md_slice; 2971 sc->sc_balance = md->md_balance; 2972 sc->sc_mediasize = md->md_mediasize; 2973 sc->sc_sectorsize = md->md_sectorsize; 2974 sc->sc_ndisks = md->md_all; 2975 sc->sc_flags = md->md_mflags; 2976 sc->sc_bump_id = 0; 2977 sc->sc_idle = 1; 2978 sc->sc_last_write = time_uptime; 2979 sc->sc_writes = 0; 2980 sc->sc_refcnt = 1; 2981 sx_init(&sc->sc_lock, "gmirror:lock"); 2982 bioq_init(&sc->sc_queue); 2983 mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF); 2984 bioq_init(&sc->sc_regular_delayed); 2985 bioq_init(&sc->sc_inflight); 2986 bioq_init(&sc->sc_sync_delayed); 2987 LIST_INIT(&sc->sc_disks); 2988 TAILQ_INIT(&sc->sc_events); 2989 mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF); 2990 callout_init(&sc->sc_callout, 1); 2991 mtx_init(&sc->sc_done_mtx, "gmirror:done", NULL, MTX_DEF); 2992 sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING; 2993 gp->softc = sc; 2994 sc->sc_geom = gp; 2995 sc->sc_provider = NULL; 2996 sc->sc_provider_open = 0; 2997 /* 2998 * Synchronization geom. 2999 */ 3000 gp = g_new_geomf(mp, "%s.sync", md->md_name); 3001 gp->softc = sc; 3002 gp->orphan = g_mirror_orphan; 3003 sc->sc_sync.ds_geom = gp; 3004 sc->sc_sync.ds_ndisks = 0; 3005 error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0, 3006 "g_mirror %s", md->md_name); 3007 if (error != 0) { 3008 G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.", 3009 sc->sc_name); 3010 g_destroy_geom(sc->sc_sync.ds_geom); 3011 g_destroy_geom(sc->sc_geom); 3012 g_mirror_free_device(sc); 3013 return (NULL); 3014 } 3015 3016 G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).", 3017 sc->sc_name, sc->sc_ndisks, sc->sc_id); 3018 3019 sc->sc_rootmount = root_mount_hold("GMIRROR"); 3020 G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 3021 /* 3022 * Run timeout. 3023 */ 3024 timeout = g_mirror_timeout * hz; 3025 callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc); 3026 return (sc->sc_geom); 3027 } 3028 3029 int 3030 g_mirror_destroy(struct g_mirror_softc *sc, int how) 3031 { 3032 struct g_mirror_disk *disk; 3033 3034 g_topology_assert_not(); 3035 sx_assert(&sc->sc_lock, SX_XLOCKED); 3036 3037 if (sc->sc_provider_open != 0 || SCHEDULER_STOPPED()) { 3038 switch (how) { 3039 case G_MIRROR_DESTROY_SOFT: 3040 G_MIRROR_DEBUG(1, 3041 "Device %s is still open (%d).", sc->sc_name, 3042 sc->sc_provider_open); 3043 return (EBUSY); 3044 case G_MIRROR_DESTROY_DELAYED: 3045 G_MIRROR_DEBUG(1, 3046 "Device %s will be destroyed on last close.", 3047 sc->sc_name); 3048 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 3049 if (disk->d_state == 3050 G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3051 g_mirror_sync_stop(disk, 1); 3052 } 3053 } 3054 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROYING; 3055 return (EBUSY); 3056 case G_MIRROR_DESTROY_HARD: 3057 G_MIRROR_DEBUG(1, "Device %s is still open, so it " 3058 "can't be definitely removed.", sc->sc_name); 3059 } 3060 } 3061 3062 g_topology_lock(); 3063 if (sc->sc_geom->softc == NULL) { 3064 g_topology_unlock(); 3065 return (0); 3066 } 3067 sc->sc_geom->softc = NULL; 3068 sc->sc_sync.ds_geom->softc = NULL; 3069 g_topology_unlock(); 3070 3071 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 3072 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT; 3073 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 3074 sx_xunlock(&sc->sc_lock); 3075 mtx_lock(&sc->sc_queue_mtx); 3076 wakeup(sc); 3077 mtx_unlock(&sc->sc_queue_mtx); 3078 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 3079 while (sc->sc_worker != NULL) 3080 tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5); 3081 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 3082 sx_xlock(&sc->sc_lock); 3083 g_mirror_destroy_device(sc); 3084 return (0); 3085 } 3086 3087 static void 3088 g_mirror_taste_orphan(struct g_consumer *cp) 3089 { 3090 3091 KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 3092 cp->provider->name)); 3093 } 3094 3095 static struct g_geom * 3096 g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 3097 { 3098 struct g_mirror_metadata md; 3099 struct g_mirror_softc *sc; 3100 struct g_consumer *cp; 3101 struct g_geom *gp; 3102 int error; 3103 3104 g_topology_assert(); 3105 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 3106 G_MIRROR_DEBUG(2, "Tasting %s.", pp->name); 3107 3108 gp = g_new_geomf(mp, "mirror:taste"); 3109 /* 3110 * This orphan function should be never called. 3111 */ 3112 gp->orphan = g_mirror_taste_orphan; 3113 cp = g_new_consumer(gp); 3114 g_attach(cp, pp); 3115 error = g_mirror_read_metadata(cp, &md); 3116 g_detach(cp); 3117 g_destroy_consumer(cp); 3118 g_destroy_geom(gp); 3119 if (error != 0) 3120 return (NULL); 3121 gp = NULL; 3122 3123 if (md.md_provider[0] != '\0' && 3124 !g_compare_names(md.md_provider, pp->name)) 3125 return (NULL); 3126 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3127 return (NULL); 3128 if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) { 3129 G_MIRROR_DEBUG(0, 3130 "Device %s: provider %s marked as inactive, skipping.", 3131 md.md_name, pp->name); 3132 return (NULL); 3133 } 3134 if (g_mirror_debug >= 2) 3135 mirror_metadata_dump(&md); 3136 3137 /* 3138 * Let's check if device already exists. 3139 */ 3140 sc = NULL; 3141 LIST_FOREACH(gp, &mp->geom, geom) { 3142 sc = gp->softc; 3143 if (sc == NULL) 3144 continue; 3145 if (sc->sc_sync.ds_geom == gp) 3146 continue; 3147 if (strcmp(md.md_name, sc->sc_name) != 0) 3148 continue; 3149 if (md.md_mid != sc->sc_id) { 3150 G_MIRROR_DEBUG(0, "Device %s already configured.", 3151 sc->sc_name); 3152 return (NULL); 3153 } 3154 break; 3155 } 3156 if (gp == NULL) { 3157 gp = g_mirror_create(mp, &md); 3158 if (gp == NULL) { 3159 G_MIRROR_DEBUG(0, "Cannot create device %s.", 3160 md.md_name); 3161 return (NULL); 3162 } 3163 sc = gp->softc; 3164 } 3165 G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 3166 g_topology_unlock(); 3167 sx_xlock(&sc->sc_lock); 3168 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_TASTING; 3169 error = g_mirror_add_disk(sc, pp, &md); 3170 if (error != 0) { 3171 G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 3172 pp->name, gp->name, error); 3173 if (LIST_EMPTY(&sc->sc_disks)) { 3174 g_cancel_event(sc); 3175 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3176 g_topology_lock(); 3177 return (NULL); 3178 } 3179 gp = NULL; 3180 } 3181 sc->sc_flags &= ~G_MIRROR_DEVICE_FLAG_TASTING; 3182 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 3183 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3184 g_topology_lock(); 3185 return (NULL); 3186 } 3187 sx_xunlock(&sc->sc_lock); 3188 g_topology_lock(); 3189 return (gp); 3190 } 3191 3192 static void 3193 g_mirror_resize(struct g_consumer *cp) 3194 { 3195 struct g_mirror_disk *disk; 3196 3197 g_topology_assert(); 3198 g_trace(G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name); 3199 3200 disk = cp->private; 3201 if (disk == NULL) 3202 return; 3203 g_topology_unlock(); 3204 g_mirror_update_metadata(disk); 3205 g_topology_lock(); 3206 } 3207 3208 static int 3209 g_mirror_destroy_geom(struct gctl_req *req __unused, 3210 struct g_class *mp __unused, struct g_geom *gp) 3211 { 3212 struct g_mirror_softc *sc; 3213 int error; 3214 3215 g_topology_unlock(); 3216 sc = gp->softc; 3217 sx_xlock(&sc->sc_lock); 3218 g_cancel_event(sc); 3219 error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT); 3220 if (error != 0) 3221 sx_xunlock(&sc->sc_lock); 3222 g_topology_lock(); 3223 return (error); 3224 } 3225 3226 static void 3227 g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 3228 struct g_consumer *cp, struct g_provider *pp) 3229 { 3230 struct g_mirror_softc *sc; 3231 3232 g_topology_assert(); 3233 3234 sc = gp->softc; 3235 if (sc == NULL) 3236 return; 3237 /* Skip synchronization geom. */ 3238 if (gp == sc->sc_sync.ds_geom) 3239 return; 3240 if (pp != NULL) { 3241 /* Nothing here. */ 3242 } else if (cp != NULL) { 3243 struct g_mirror_disk *disk; 3244 3245 disk = cp->private; 3246 if (disk == NULL) 3247 return; 3248 g_topology_unlock(); 3249 sx_xlock(&sc->sc_lock); 3250 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id); 3251 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3252 sbuf_printf(sb, "%s<Synchronized>", indent); 3253 if (disk->d_sync.ds_offset == 0) 3254 sbuf_printf(sb, "0%%"); 3255 else { 3256 sbuf_printf(sb, "%u%%", 3257 (u_int)((disk->d_sync.ds_offset * 100) / 3258 sc->sc_provider->mediasize)); 3259 } 3260 sbuf_printf(sb, "</Synchronized>\n"); 3261 if (disk->d_sync.ds_offset > 0) { 3262 sbuf_printf(sb, "%s<BytesSynced>%jd" 3263 "</BytesSynced>\n", indent, 3264 (intmax_t)disk->d_sync.ds_offset); 3265 } 3266 } 3267 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 3268 disk->d_sync.ds_syncid); 3269 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, 3270 disk->d_genid); 3271 sbuf_printf(sb, "%s<Flags>", indent); 3272 if (disk->d_flags == 0) 3273 sbuf_printf(sb, "NONE"); 3274 else { 3275 int first = 1; 3276 3277 #define ADD_FLAG(flag, name) do { \ 3278 if ((disk->d_flags & (flag)) != 0) { \ 3279 if (!first) \ 3280 sbuf_printf(sb, ", "); \ 3281 else \ 3282 first = 0; \ 3283 sbuf_printf(sb, name); \ 3284 } \ 3285 } while (0) 3286 ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY"); 3287 ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED"); 3288 ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE"); 3289 ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING, 3290 "SYNCHRONIZING"); 3291 ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 3292 ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN"); 3293 #undef ADD_FLAG 3294 } 3295 sbuf_printf(sb, "</Flags>\n"); 3296 sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent, 3297 disk->d_priority); 3298 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3299 g_mirror_disk_state2str(disk->d_state)); 3300 sx_xunlock(&sc->sc_lock); 3301 g_topology_lock(); 3302 } else { 3303 g_topology_unlock(); 3304 sx_xlock(&sc->sc_lock); 3305 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 3306 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3307 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 3308 sbuf_printf(sb, "%s<Flags>", indent); 3309 if (sc->sc_flags == 0) 3310 sbuf_printf(sb, "NONE"); 3311 else { 3312 int first = 1; 3313 3314 #define ADD_FLAG(flag, name) do { \ 3315 if ((sc->sc_flags & (flag)) != 0) { \ 3316 if (!first) \ 3317 sbuf_printf(sb, ", "); \ 3318 else \ 3319 first = 0; \ 3320 sbuf_printf(sb, name); \ 3321 } \ 3322 } while (0) 3323 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 3324 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3325 #undef ADD_FLAG 3326 } 3327 sbuf_printf(sb, "</Flags>\n"); 3328 sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent, 3329 (u_int)sc->sc_slice); 3330 sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent, 3331 balance_name(sc->sc_balance)); 3332 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 3333 sc->sc_ndisks); 3334 sbuf_printf(sb, "%s<State>", indent); 3335 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 3336 sbuf_printf(sb, "%s", "STARTING"); 3337 else if (sc->sc_ndisks == 3338 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE)) 3339 sbuf_printf(sb, "%s", "COMPLETE"); 3340 else 3341 sbuf_printf(sb, "%s", "DEGRADED"); 3342 sbuf_printf(sb, "</State>\n"); 3343 sx_xunlock(&sc->sc_lock); 3344 g_topology_lock(); 3345 } 3346 } 3347 3348 static void 3349 g_mirror_shutdown_post_sync(void *arg, int howto) 3350 { 3351 struct g_class *mp; 3352 struct g_geom *gp, *gp2; 3353 struct g_mirror_softc *sc; 3354 int error; 3355 3356 mp = arg; 3357 g_topology_lock(); 3358 g_mirror_shutdown = 1; 3359 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 3360 if ((sc = gp->softc) == NULL) 3361 continue; 3362 /* Skip synchronization geom. */ 3363 if (gp == sc->sc_sync.ds_geom) 3364 continue; 3365 g_topology_unlock(); 3366 sx_xlock(&sc->sc_lock); 3367 g_mirror_idle(sc, -1); 3368 g_cancel_event(sc); 3369 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED); 3370 if (error != 0) 3371 sx_xunlock(&sc->sc_lock); 3372 g_topology_lock(); 3373 } 3374 g_topology_unlock(); 3375 } 3376 3377 static void 3378 g_mirror_init(struct g_class *mp) 3379 { 3380 3381 g_mirror_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync, 3382 g_mirror_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST); 3383 if (g_mirror_post_sync == NULL) 3384 G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event."); 3385 } 3386 3387 static void 3388 g_mirror_fini(struct g_class *mp) 3389 { 3390 3391 if (g_mirror_post_sync != NULL) 3392 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_mirror_post_sync); 3393 } 3394 3395 DECLARE_GEOM_CLASS(g_mirror_class, g_mirror); 3396