1 /*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/fail.h> 33 #include <sys/kernel.h> 34 #include <sys/module.h> 35 #include <sys/limits.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/bio.h> 39 #include <sys/sbuf.h> 40 #include <sys/sysctl.h> 41 #include <sys/malloc.h> 42 #include <sys/eventhandler.h> 43 #include <vm/uma.h> 44 #include <geom/geom.h> 45 #include <sys/proc.h> 46 #include <sys/kthread.h> 47 #include <sys/sched.h> 48 #include <geom/mirror/g_mirror.h> 49 50 FEATURE(geom_mirror, "GEOM mirroring support"); 51 52 static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data"); 53 54 SYSCTL_DECL(_kern_geom); 55 static SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, 56 "GEOM_MIRROR stuff"); 57 u_int g_mirror_debug = 0; 58 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RWTUN, &g_mirror_debug, 0, 59 "Debug level"); 60 static u_int g_mirror_timeout = 4; 61 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_mirror_timeout, 62 0, "Time to wait on all mirror components"); 63 static u_int g_mirror_idletime = 5; 64 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RWTUN, 65 &g_mirror_idletime, 0, "Mark components as clean when idling"); 66 static u_int g_mirror_disconnect_on_failure = 1; 67 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN, 68 &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 69 static u_int g_mirror_syncreqs = 2; 70 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 71 &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests."); 72 73 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 74 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 75 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 76 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 77 } while (0) 78 79 static eventhandler_tag g_mirror_post_sync = NULL; 80 static int g_mirror_shutdown = 0; 81 82 static g_ctl_destroy_geom_t g_mirror_destroy_geom; 83 static g_taste_t g_mirror_taste; 84 static g_init_t g_mirror_init; 85 static g_fini_t g_mirror_fini; 86 static g_provgone_t g_mirror_providergone; 87 static g_resize_t g_mirror_resize; 88 89 struct g_class g_mirror_class = { 90 .name = G_MIRROR_CLASS_NAME, 91 .version = G_VERSION, 92 .ctlreq = g_mirror_config, 93 .taste = g_mirror_taste, 94 .destroy_geom = g_mirror_destroy_geom, 95 .init = g_mirror_init, 96 .fini = g_mirror_fini, 97 .providergone = g_mirror_providergone, 98 .resize = g_mirror_resize 99 }; 100 101 102 static void g_mirror_destroy_provider(struct g_mirror_softc *sc); 103 static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state); 104 static void g_mirror_update_device(struct g_mirror_softc *sc, bool force); 105 static void g_mirror_dumpconf(struct sbuf *sb, const char *indent, 106 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 107 static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type); 108 static void g_mirror_register_request(struct bio *bp); 109 static void g_mirror_sync_release(struct g_mirror_softc *sc); 110 111 112 static const char * 113 g_mirror_disk_state2str(int state) 114 { 115 116 switch (state) { 117 case G_MIRROR_DISK_STATE_NONE: 118 return ("NONE"); 119 case G_MIRROR_DISK_STATE_NEW: 120 return ("NEW"); 121 case G_MIRROR_DISK_STATE_ACTIVE: 122 return ("ACTIVE"); 123 case G_MIRROR_DISK_STATE_STALE: 124 return ("STALE"); 125 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 126 return ("SYNCHRONIZING"); 127 case G_MIRROR_DISK_STATE_DISCONNECTED: 128 return ("DISCONNECTED"); 129 case G_MIRROR_DISK_STATE_DESTROY: 130 return ("DESTROY"); 131 default: 132 return ("INVALID"); 133 } 134 } 135 136 static const char * 137 g_mirror_device_state2str(int state) 138 { 139 140 switch (state) { 141 case G_MIRROR_DEVICE_STATE_STARTING: 142 return ("STARTING"); 143 case G_MIRROR_DEVICE_STATE_RUNNING: 144 return ("RUNNING"); 145 default: 146 return ("INVALID"); 147 } 148 } 149 150 static const char * 151 g_mirror_get_diskname(struct g_mirror_disk *disk) 152 { 153 154 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 155 return ("[unknown]"); 156 return (disk->d_name); 157 } 158 159 /* 160 * --- Events handling functions --- 161 * Events in geom_mirror are used to maintain disks and device status 162 * from one thread to simplify locking. 163 */ 164 static void 165 g_mirror_event_free(struct g_mirror_event *ep) 166 { 167 168 free(ep, M_MIRROR); 169 } 170 171 int 172 g_mirror_event_send(void *arg, int state, int flags) 173 { 174 struct g_mirror_softc *sc; 175 struct g_mirror_disk *disk; 176 struct g_mirror_event *ep; 177 int error; 178 179 ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK); 180 G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep); 181 if ((flags & G_MIRROR_EVENT_DEVICE) != 0) { 182 disk = NULL; 183 sc = arg; 184 } else { 185 disk = arg; 186 sc = disk->d_softc; 187 } 188 ep->e_disk = disk; 189 ep->e_state = state; 190 ep->e_flags = flags; 191 ep->e_error = 0; 192 mtx_lock(&sc->sc_events_mtx); 193 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 194 mtx_unlock(&sc->sc_events_mtx); 195 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 196 mtx_lock(&sc->sc_queue_mtx); 197 wakeup(sc); 198 mtx_unlock(&sc->sc_queue_mtx); 199 if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0) 200 return (0); 201 sx_assert(&sc->sc_lock, SX_XLOCKED); 202 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 203 sx_xunlock(&sc->sc_lock); 204 while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) { 205 mtx_lock(&sc->sc_events_mtx); 206 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event", 207 hz * 5); 208 } 209 error = ep->e_error; 210 g_mirror_event_free(ep); 211 sx_xlock(&sc->sc_lock); 212 return (error); 213 } 214 215 static struct g_mirror_event * 216 g_mirror_event_get(struct g_mirror_softc *sc) 217 { 218 struct g_mirror_event *ep; 219 220 mtx_lock(&sc->sc_events_mtx); 221 ep = TAILQ_FIRST(&sc->sc_events); 222 mtx_unlock(&sc->sc_events_mtx); 223 return (ep); 224 } 225 226 static void 227 g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep) 228 { 229 230 mtx_lock(&sc->sc_events_mtx); 231 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 232 mtx_unlock(&sc->sc_events_mtx); 233 } 234 235 static void 236 g_mirror_event_cancel(struct g_mirror_disk *disk) 237 { 238 struct g_mirror_softc *sc; 239 struct g_mirror_event *ep, *tmpep; 240 241 sc = disk->d_softc; 242 sx_assert(&sc->sc_lock, SX_XLOCKED); 243 244 mtx_lock(&sc->sc_events_mtx); 245 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 246 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) 247 continue; 248 if (ep->e_disk != disk) 249 continue; 250 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 251 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 252 g_mirror_event_free(ep); 253 else { 254 ep->e_error = ECANCELED; 255 wakeup(ep); 256 } 257 } 258 mtx_unlock(&sc->sc_events_mtx); 259 } 260 261 /* 262 * Return the number of disks in given state. 263 * If state is equal to -1, count all connected disks. 264 */ 265 u_int 266 g_mirror_ndisks(struct g_mirror_softc *sc, int state) 267 { 268 struct g_mirror_disk *disk; 269 u_int n = 0; 270 271 sx_assert(&sc->sc_lock, SX_LOCKED); 272 273 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 274 if (state == -1 || disk->d_state == state) 275 n++; 276 } 277 return (n); 278 } 279 280 /* 281 * Find a disk in mirror by its disk ID. 282 */ 283 static struct g_mirror_disk * 284 g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id) 285 { 286 struct g_mirror_disk *disk; 287 288 sx_assert(&sc->sc_lock, SX_XLOCKED); 289 290 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 291 if (disk->d_id == id) 292 return (disk); 293 } 294 return (NULL); 295 } 296 297 static u_int 298 g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp) 299 { 300 struct bio *bp; 301 u_int nreqs = 0; 302 303 mtx_lock(&sc->sc_queue_mtx); 304 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 305 if (bp->bio_from == cp) 306 nreqs++; 307 } 308 mtx_unlock(&sc->sc_queue_mtx); 309 return (nreqs); 310 } 311 312 static int 313 g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp) 314 { 315 316 if (cp->index > 0) { 317 G_MIRROR_DEBUG(2, 318 "I/O requests for %s exist, can't destroy it now.", 319 cp->provider->name); 320 return (1); 321 } 322 if (g_mirror_nrequests(sc, cp) > 0) { 323 G_MIRROR_DEBUG(2, 324 "I/O requests for %s in queue, can't destroy it now.", 325 cp->provider->name); 326 return (1); 327 } 328 return (0); 329 } 330 331 static void 332 g_mirror_destroy_consumer(void *arg, int flags __unused) 333 { 334 struct g_consumer *cp; 335 336 g_topology_assert(); 337 338 cp = arg; 339 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 340 g_detach(cp); 341 g_destroy_consumer(cp); 342 } 343 344 static void 345 g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 346 { 347 struct g_provider *pp; 348 int retaste_wait; 349 350 g_topology_assert(); 351 352 cp->private = NULL; 353 if (g_mirror_is_busy(sc, cp)) 354 return; 355 pp = cp->provider; 356 retaste_wait = 0; 357 if (cp->acw == 1) { 358 if ((pp->geom->flags & G_GEOM_WITHER) == 0) 359 retaste_wait = 1; 360 } 361 G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 362 -cp->acw, -cp->ace, 0); 363 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 364 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 365 if (retaste_wait) { 366 /* 367 * After retaste event was send (inside g_access()), we can send 368 * event to detach and destroy consumer. 369 * A class, which has consumer to the given provider connected 370 * will not receive retaste event for the provider. 371 * This is the way how I ignore retaste events when I close 372 * consumers opened for write: I detach and destroy consumer 373 * after retaste event is sent. 374 */ 375 g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL); 376 return; 377 } 378 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name); 379 g_detach(cp); 380 g_destroy_consumer(cp); 381 } 382 383 static int 384 g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp) 385 { 386 struct g_consumer *cp; 387 int error; 388 389 g_topology_assert_not(); 390 KASSERT(disk->d_consumer == NULL, 391 ("Disk already connected (device %s).", disk->d_softc->sc_name)); 392 393 g_topology_lock(); 394 cp = g_new_consumer(disk->d_softc->sc_geom); 395 cp->flags |= G_CF_DIRECT_RECEIVE; 396 error = g_attach(cp, pp); 397 if (error != 0) { 398 g_destroy_consumer(cp); 399 g_topology_unlock(); 400 return (error); 401 } 402 error = g_access(cp, 1, 1, 1); 403 if (error != 0) { 404 g_detach(cp); 405 g_destroy_consumer(cp); 406 g_topology_unlock(); 407 G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).", 408 pp->name, error); 409 return (error); 410 } 411 g_topology_unlock(); 412 disk->d_consumer = cp; 413 disk->d_consumer->private = disk; 414 disk->d_consumer->index = 0; 415 416 G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk)); 417 return (0); 418 } 419 420 static void 421 g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 422 { 423 424 g_topology_assert(); 425 426 if (cp == NULL) 427 return; 428 if (cp->provider != NULL) 429 g_mirror_kill_consumer(sc, cp); 430 else 431 g_destroy_consumer(cp); 432 } 433 434 /* 435 * Initialize disk. This means allocate memory, create consumer, attach it 436 * to the provider and open access (r1w1e1) to it. 437 */ 438 static struct g_mirror_disk * 439 g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp, 440 struct g_mirror_metadata *md, int *errorp) 441 { 442 struct g_mirror_disk *disk; 443 int i, error; 444 445 disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO); 446 if (disk == NULL) { 447 error = ENOMEM; 448 goto fail; 449 } 450 disk->d_softc = sc; 451 error = g_mirror_connect_disk(disk, pp); 452 if (error != 0) 453 goto fail; 454 disk->d_id = md->md_did; 455 disk->d_state = G_MIRROR_DISK_STATE_NONE; 456 disk->d_priority = md->md_priority; 457 disk->d_flags = md->md_dflags; 458 error = g_getattr("GEOM::candelete", disk->d_consumer, &i); 459 if (error == 0 && i != 0) 460 disk->d_flags |= G_MIRROR_DISK_FLAG_CANDELETE; 461 if (md->md_provider[0] != '\0') 462 disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED; 463 disk->d_sync.ds_consumer = NULL; 464 disk->d_sync.ds_offset = md->md_sync_offset; 465 disk->d_sync.ds_offset_done = md->md_sync_offset; 466 disk->d_genid = md->md_genid; 467 disk->d_sync.ds_syncid = md->md_syncid; 468 if (errorp != NULL) 469 *errorp = 0; 470 return (disk); 471 fail: 472 if (errorp != NULL) 473 *errorp = error; 474 if (disk != NULL) 475 free(disk, M_MIRROR); 476 return (NULL); 477 } 478 479 static void 480 g_mirror_destroy_disk(struct g_mirror_disk *disk) 481 { 482 struct g_mirror_softc *sc; 483 484 g_topology_assert_not(); 485 sc = disk->d_softc; 486 sx_assert(&sc->sc_lock, SX_XLOCKED); 487 488 LIST_REMOVE(disk, d_next); 489 g_mirror_event_cancel(disk); 490 if (sc->sc_hint == disk) 491 sc->sc_hint = NULL; 492 switch (disk->d_state) { 493 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 494 g_mirror_sync_stop(disk, 1); 495 /* FALLTHROUGH */ 496 case G_MIRROR_DISK_STATE_NEW: 497 case G_MIRROR_DISK_STATE_STALE: 498 case G_MIRROR_DISK_STATE_ACTIVE: 499 g_topology_lock(); 500 g_mirror_disconnect_consumer(sc, disk->d_consumer); 501 g_topology_unlock(); 502 free(disk, M_MIRROR); 503 break; 504 default: 505 KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 506 g_mirror_get_diskname(disk), 507 g_mirror_disk_state2str(disk->d_state))); 508 } 509 } 510 511 static void 512 g_mirror_free_device(struct g_mirror_softc *sc) 513 { 514 515 mtx_destroy(&sc->sc_queue_mtx); 516 mtx_destroy(&sc->sc_events_mtx); 517 mtx_destroy(&sc->sc_done_mtx); 518 sx_destroy(&sc->sc_lock); 519 free(sc, M_MIRROR); 520 } 521 522 static void 523 g_mirror_providergone(struct g_provider *pp) 524 { 525 struct g_mirror_softc *sc = pp->private; 526 527 if ((--sc->sc_refcnt) == 0) 528 g_mirror_free_device(sc); 529 } 530 531 static void 532 g_mirror_destroy_device(struct g_mirror_softc *sc) 533 { 534 struct g_mirror_disk *disk; 535 struct g_mirror_event *ep; 536 struct g_geom *gp; 537 struct g_consumer *cp, *tmpcp; 538 539 g_topology_assert_not(); 540 sx_assert(&sc->sc_lock, SX_XLOCKED); 541 542 gp = sc->sc_geom; 543 if (sc->sc_provider != NULL) 544 g_mirror_destroy_provider(sc); 545 for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL; 546 disk = LIST_FIRST(&sc->sc_disks)) { 547 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 548 g_mirror_update_metadata(disk); 549 g_mirror_destroy_disk(disk); 550 } 551 while ((ep = g_mirror_event_get(sc)) != NULL) { 552 g_mirror_event_remove(sc, ep); 553 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 554 g_mirror_event_free(ep); 555 else { 556 ep->e_error = ECANCELED; 557 ep->e_flags |= G_MIRROR_EVENT_DONE; 558 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep); 559 mtx_lock(&sc->sc_events_mtx); 560 wakeup(ep); 561 mtx_unlock(&sc->sc_events_mtx); 562 } 563 } 564 callout_drain(&sc->sc_callout); 565 566 g_topology_lock(); 567 LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) { 568 g_mirror_disconnect_consumer(sc, cp); 569 } 570 g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 571 G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name); 572 g_wither_geom(gp, ENXIO); 573 sx_xunlock(&sc->sc_lock); 574 if ((--sc->sc_refcnt) == 0) 575 g_mirror_free_device(sc); 576 g_topology_unlock(); 577 } 578 579 static void 580 g_mirror_orphan(struct g_consumer *cp) 581 { 582 struct g_mirror_disk *disk; 583 584 g_topology_assert(); 585 586 disk = cp->private; 587 if (disk == NULL) 588 return; 589 disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 590 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, 591 G_MIRROR_EVENT_DONTWAIT); 592 } 593 594 /* 595 * Function should return the next active disk on the list. 596 * It is possible that it will be the same disk as given. 597 * If there are no active disks on list, NULL is returned. 598 */ 599 static __inline struct g_mirror_disk * 600 g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 601 { 602 struct g_mirror_disk *dp; 603 604 for (dp = LIST_NEXT(disk, d_next); dp != disk; 605 dp = LIST_NEXT(dp, d_next)) { 606 if (dp == NULL) 607 dp = LIST_FIRST(&sc->sc_disks); 608 if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE) 609 break; 610 } 611 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 612 return (NULL); 613 return (dp); 614 } 615 616 static struct g_mirror_disk * 617 g_mirror_get_disk(struct g_mirror_softc *sc) 618 { 619 struct g_mirror_disk *disk; 620 621 if (sc->sc_hint == NULL) { 622 sc->sc_hint = LIST_FIRST(&sc->sc_disks); 623 if (sc->sc_hint == NULL) 624 return (NULL); 625 } 626 disk = sc->sc_hint; 627 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) { 628 disk = g_mirror_find_next(sc, disk); 629 if (disk == NULL) 630 return (NULL); 631 } 632 sc->sc_hint = g_mirror_find_next(sc, disk); 633 return (disk); 634 } 635 636 static int 637 g_mirror_write_metadata(struct g_mirror_disk *disk, 638 struct g_mirror_metadata *md) 639 { 640 struct g_mirror_softc *sc; 641 struct g_consumer *cp; 642 off_t offset, length; 643 u_char *sector; 644 int error = 0; 645 646 g_topology_assert_not(); 647 sc = disk->d_softc; 648 sx_assert(&sc->sc_lock, SX_LOCKED); 649 650 cp = disk->d_consumer; 651 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 652 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 653 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 654 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 655 cp->acw, cp->ace)); 656 length = cp->provider->sectorsize; 657 offset = cp->provider->mediasize - length; 658 sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO); 659 if (md != NULL && 660 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) { 661 /* 662 * Handle the case, when the size of parent provider reduced. 663 */ 664 if (offset < md->md_mediasize) 665 error = ENOSPC; 666 else 667 mirror_metadata_encode(md, sector); 668 } 669 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_metadata_write, error); 670 if (error == 0) 671 error = g_write_data(cp, offset, sector, length); 672 free(sector, M_MIRROR); 673 if (error != 0) { 674 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 675 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 676 G_MIRROR_DEBUG(0, "Cannot write metadata on %s " 677 "(device=%s, error=%d).", 678 g_mirror_get_diskname(disk), sc->sc_name, error); 679 } else { 680 G_MIRROR_DEBUG(1, "Cannot write metadata on %s " 681 "(device=%s, error=%d).", 682 g_mirror_get_diskname(disk), sc->sc_name, error); 683 } 684 if (g_mirror_disconnect_on_failure && 685 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) { 686 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 687 g_mirror_event_send(disk, 688 G_MIRROR_DISK_STATE_DISCONNECTED, 689 G_MIRROR_EVENT_DONTWAIT); 690 } 691 } 692 return (error); 693 } 694 695 static int 696 g_mirror_clear_metadata(struct g_mirror_disk *disk) 697 { 698 int error; 699 700 g_topology_assert_not(); 701 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 702 703 if (disk->d_softc->sc_type != G_MIRROR_TYPE_AUTOMATIC) 704 return (0); 705 error = g_mirror_write_metadata(disk, NULL); 706 if (error == 0) { 707 G_MIRROR_DEBUG(2, "Metadata on %s cleared.", 708 g_mirror_get_diskname(disk)); 709 } else { 710 G_MIRROR_DEBUG(0, 711 "Cannot clear metadata on disk %s (error=%d).", 712 g_mirror_get_diskname(disk), error); 713 } 714 return (error); 715 } 716 717 void 718 g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk, 719 struct g_mirror_metadata *md) 720 { 721 722 strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic)); 723 md->md_version = G_MIRROR_VERSION; 724 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 725 md->md_mid = sc->sc_id; 726 md->md_all = sc->sc_ndisks; 727 md->md_slice = sc->sc_slice; 728 md->md_balance = sc->sc_balance; 729 md->md_genid = sc->sc_genid; 730 md->md_mediasize = sc->sc_mediasize; 731 md->md_sectorsize = sc->sc_sectorsize; 732 md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK); 733 bzero(md->md_provider, sizeof(md->md_provider)); 734 if (disk == NULL) { 735 md->md_did = arc4random(); 736 md->md_priority = 0; 737 md->md_syncid = 0; 738 md->md_dflags = 0; 739 md->md_sync_offset = 0; 740 md->md_provsize = 0; 741 } else { 742 md->md_did = disk->d_id; 743 md->md_priority = disk->d_priority; 744 md->md_syncid = disk->d_sync.ds_syncid; 745 md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK); 746 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 747 md->md_sync_offset = disk->d_sync.ds_offset_done; 748 else 749 md->md_sync_offset = 0; 750 if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) { 751 strlcpy(md->md_provider, 752 disk->d_consumer->provider->name, 753 sizeof(md->md_provider)); 754 } 755 md->md_provsize = disk->d_consumer->provider->mediasize; 756 } 757 } 758 759 void 760 g_mirror_update_metadata(struct g_mirror_disk *disk) 761 { 762 struct g_mirror_softc *sc; 763 struct g_mirror_metadata md; 764 int error; 765 766 g_topology_assert_not(); 767 sc = disk->d_softc; 768 sx_assert(&sc->sc_lock, SX_LOCKED); 769 770 if (sc->sc_type != G_MIRROR_TYPE_AUTOMATIC) 771 return; 772 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) 773 g_mirror_fill_metadata(sc, disk, &md); 774 error = g_mirror_write_metadata(disk, &md); 775 if (error == 0) { 776 G_MIRROR_DEBUG(2, "Metadata on %s updated.", 777 g_mirror_get_diskname(disk)); 778 } else { 779 G_MIRROR_DEBUG(0, 780 "Cannot update metadata on disk %s (error=%d).", 781 g_mirror_get_diskname(disk), error); 782 } 783 } 784 785 static void 786 g_mirror_bump_syncid(struct g_mirror_softc *sc) 787 { 788 struct g_mirror_disk *disk; 789 790 g_topology_assert_not(); 791 sx_assert(&sc->sc_lock, SX_XLOCKED); 792 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 793 ("%s called with no active disks (device=%s).", __func__, 794 sc->sc_name)); 795 796 sc->sc_syncid++; 797 G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 798 sc->sc_syncid); 799 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 800 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 801 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 802 disk->d_sync.ds_syncid = sc->sc_syncid; 803 g_mirror_update_metadata(disk); 804 } 805 } 806 } 807 808 static void 809 g_mirror_bump_genid(struct g_mirror_softc *sc) 810 { 811 struct g_mirror_disk *disk; 812 813 g_topology_assert_not(); 814 sx_assert(&sc->sc_lock, SX_XLOCKED); 815 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 816 ("%s called with no active disks (device=%s).", __func__, 817 sc->sc_name)); 818 819 sc->sc_genid++; 820 G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 821 sc->sc_genid); 822 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 823 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 824 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 825 disk->d_genid = sc->sc_genid; 826 g_mirror_update_metadata(disk); 827 } 828 } 829 } 830 831 static int 832 g_mirror_idle(struct g_mirror_softc *sc, int acw) 833 { 834 struct g_mirror_disk *disk; 835 int timeout; 836 837 g_topology_assert_not(); 838 sx_assert(&sc->sc_lock, SX_XLOCKED); 839 840 if (sc->sc_provider == NULL) 841 return (0); 842 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 843 return (0); 844 if (sc->sc_idle) 845 return (0); 846 if (sc->sc_writes > 0) 847 return (0); 848 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 849 timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write); 850 if (!g_mirror_shutdown && timeout > 0) 851 return (timeout); 852 } 853 sc->sc_idle = 1; 854 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 855 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 856 continue; 857 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.", 858 g_mirror_get_diskname(disk), sc->sc_name); 859 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 860 g_mirror_update_metadata(disk); 861 } 862 return (0); 863 } 864 865 static void 866 g_mirror_unidle(struct g_mirror_softc *sc) 867 { 868 struct g_mirror_disk *disk; 869 870 g_topology_assert_not(); 871 sx_assert(&sc->sc_lock, SX_XLOCKED); 872 873 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 874 return; 875 sc->sc_idle = 0; 876 sc->sc_last_write = time_uptime; 877 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 878 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 879 continue; 880 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.", 881 g_mirror_get_diskname(disk), sc->sc_name); 882 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 883 g_mirror_update_metadata(disk); 884 } 885 } 886 887 static void 888 g_mirror_flush_done(struct bio *bp) 889 { 890 struct g_mirror_softc *sc; 891 struct bio *pbp; 892 893 pbp = bp->bio_parent; 894 sc = pbp->bio_to->private; 895 mtx_lock(&sc->sc_done_mtx); 896 if (pbp->bio_error == 0) 897 pbp->bio_error = bp->bio_error; 898 pbp->bio_completed += bp->bio_completed; 899 pbp->bio_inbed++; 900 if (pbp->bio_children == pbp->bio_inbed) { 901 mtx_unlock(&sc->sc_done_mtx); 902 g_io_deliver(pbp, pbp->bio_error); 903 } else 904 mtx_unlock(&sc->sc_done_mtx); 905 g_destroy_bio(bp); 906 } 907 908 static void 909 g_mirror_done(struct bio *bp) 910 { 911 struct g_mirror_softc *sc; 912 913 sc = bp->bio_from->geom->softc; 914 bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR; 915 mtx_lock(&sc->sc_queue_mtx); 916 bioq_insert_tail(&sc->sc_queue, bp); 917 mtx_unlock(&sc->sc_queue_mtx); 918 wakeup(sc); 919 } 920 921 static void 922 g_mirror_regular_request(struct bio *bp) 923 { 924 struct g_mirror_softc *sc; 925 struct g_mirror_disk *disk; 926 struct bio *pbp; 927 928 g_topology_assert_not(); 929 930 pbp = bp->bio_parent; 931 sc = pbp->bio_to->private; 932 bp->bio_from->index--; 933 if (bp->bio_cmd == BIO_WRITE) 934 sc->sc_writes--; 935 disk = bp->bio_from->private; 936 if (disk == NULL) { 937 g_topology_lock(); 938 g_mirror_kill_consumer(sc, bp->bio_from); 939 g_topology_unlock(); 940 } 941 942 if (bp->bio_cmd == BIO_READ) 943 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_read, 944 bp->bio_error); 945 else if (bp->bio_cmd == BIO_WRITE) 946 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_write, 947 bp->bio_error); 948 949 pbp->bio_inbed++; 950 KASSERT(pbp->bio_inbed <= pbp->bio_children, 951 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 952 pbp->bio_children)); 953 if (bp->bio_error == 0 && pbp->bio_error == 0) { 954 G_MIRROR_LOGREQ(3, bp, "Request delivered."); 955 g_destroy_bio(bp); 956 if (pbp->bio_children == pbp->bio_inbed) { 957 G_MIRROR_LOGREQ(3, pbp, "Request delivered."); 958 pbp->bio_completed = pbp->bio_length; 959 if (pbp->bio_cmd == BIO_WRITE || 960 pbp->bio_cmd == BIO_DELETE) { 961 bioq_remove(&sc->sc_inflight, pbp); 962 /* Release delayed sync requests if possible. */ 963 g_mirror_sync_release(sc); 964 } 965 g_io_deliver(pbp, pbp->bio_error); 966 } 967 return; 968 } else if (bp->bio_error != 0) { 969 if (pbp->bio_error == 0) 970 pbp->bio_error = bp->bio_error; 971 if (disk != NULL) { 972 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 973 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 974 G_MIRROR_LOGREQ(0, bp, 975 "Request failed (error=%d).", 976 bp->bio_error); 977 } else { 978 G_MIRROR_LOGREQ(1, bp, 979 "Request failed (error=%d).", 980 bp->bio_error); 981 } 982 if (g_mirror_disconnect_on_failure && 983 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) 984 { 985 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 986 g_mirror_event_send(disk, 987 G_MIRROR_DISK_STATE_DISCONNECTED, 988 G_MIRROR_EVENT_DONTWAIT); 989 } 990 } 991 switch (pbp->bio_cmd) { 992 case BIO_DELETE: 993 case BIO_WRITE: 994 pbp->bio_inbed--; 995 pbp->bio_children--; 996 break; 997 } 998 } 999 g_destroy_bio(bp); 1000 1001 switch (pbp->bio_cmd) { 1002 case BIO_READ: 1003 if (pbp->bio_inbed < pbp->bio_children) 1004 break; 1005 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1) 1006 g_io_deliver(pbp, pbp->bio_error); 1007 else { 1008 pbp->bio_error = 0; 1009 mtx_lock(&sc->sc_queue_mtx); 1010 bioq_insert_tail(&sc->sc_queue, pbp); 1011 mtx_unlock(&sc->sc_queue_mtx); 1012 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1013 wakeup(sc); 1014 } 1015 break; 1016 case BIO_DELETE: 1017 case BIO_WRITE: 1018 if (pbp->bio_children == 0) { 1019 /* 1020 * All requests failed. 1021 */ 1022 } else if (pbp->bio_inbed < pbp->bio_children) { 1023 /* Do nothing. */ 1024 break; 1025 } else if (pbp->bio_children == pbp->bio_inbed) { 1026 /* Some requests succeeded. */ 1027 pbp->bio_error = 0; 1028 pbp->bio_completed = pbp->bio_length; 1029 } 1030 bioq_remove(&sc->sc_inflight, pbp); 1031 /* Release delayed sync requests if possible. */ 1032 g_mirror_sync_release(sc); 1033 g_io_deliver(pbp, pbp->bio_error); 1034 break; 1035 default: 1036 KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd)); 1037 break; 1038 } 1039 } 1040 1041 static void 1042 g_mirror_sync_done(struct bio *bp) 1043 { 1044 struct g_mirror_softc *sc; 1045 1046 G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered."); 1047 sc = bp->bio_from->geom->softc; 1048 bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC; 1049 mtx_lock(&sc->sc_queue_mtx); 1050 bioq_insert_tail(&sc->sc_queue, bp); 1051 mtx_unlock(&sc->sc_queue_mtx); 1052 wakeup(sc); 1053 } 1054 1055 static void 1056 g_mirror_candelete(struct bio *bp) 1057 { 1058 struct g_mirror_softc *sc; 1059 struct g_mirror_disk *disk; 1060 int *val; 1061 1062 sc = bp->bio_to->private; 1063 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1064 if (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) 1065 break; 1066 } 1067 val = (int *)bp->bio_data; 1068 *val = (disk != NULL); 1069 g_io_deliver(bp, 0); 1070 } 1071 1072 static void 1073 g_mirror_kernel_dump(struct bio *bp) 1074 { 1075 struct g_mirror_softc *sc; 1076 struct g_mirror_disk *disk; 1077 struct bio *cbp; 1078 struct g_kerneldump *gkd; 1079 1080 /* 1081 * We configure dumping to the first component, because this component 1082 * will be used for reading with 'prefer' balance algorithm. 1083 * If the component with the highest priority is currently disconnected 1084 * we will not be able to read the dump after the reboot if it will be 1085 * connected and synchronized later. Can we do something better? 1086 */ 1087 sc = bp->bio_to->private; 1088 disk = LIST_FIRST(&sc->sc_disks); 1089 1090 gkd = (struct g_kerneldump *)bp->bio_data; 1091 if (gkd->length > bp->bio_to->mediasize) 1092 gkd->length = bp->bio_to->mediasize; 1093 cbp = g_clone_bio(bp); 1094 if (cbp == NULL) { 1095 g_io_deliver(bp, ENOMEM); 1096 return; 1097 } 1098 cbp->bio_done = g_std_done; 1099 g_io_request(cbp, disk->d_consumer); 1100 G_MIRROR_DEBUG(1, "Kernel dump will go to %s.", 1101 g_mirror_get_diskname(disk)); 1102 } 1103 1104 static void 1105 g_mirror_flush(struct g_mirror_softc *sc, struct bio *bp) 1106 { 1107 struct bio_queue_head queue; 1108 struct g_mirror_disk *disk; 1109 struct g_consumer *cp; 1110 struct bio *cbp; 1111 1112 bioq_init(&queue); 1113 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1114 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1115 continue; 1116 cbp = g_clone_bio(bp); 1117 if (cbp == NULL) { 1118 while ((cbp = bioq_takefirst(&queue)) != NULL) 1119 g_destroy_bio(cbp); 1120 if (bp->bio_error == 0) 1121 bp->bio_error = ENOMEM; 1122 g_io_deliver(bp, bp->bio_error); 1123 return; 1124 } 1125 bioq_insert_tail(&queue, cbp); 1126 cbp->bio_done = g_mirror_flush_done; 1127 cbp->bio_caller1 = disk; 1128 cbp->bio_to = disk->d_consumer->provider; 1129 } 1130 while ((cbp = bioq_takefirst(&queue)) != NULL) { 1131 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1132 disk = cbp->bio_caller1; 1133 cbp->bio_caller1 = NULL; 1134 cp = disk->d_consumer; 1135 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1136 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1137 cp->acr, cp->acw, cp->ace)); 1138 g_io_request(cbp, disk->d_consumer); 1139 } 1140 } 1141 1142 static void 1143 g_mirror_start(struct bio *bp) 1144 { 1145 struct g_mirror_softc *sc; 1146 1147 sc = bp->bio_to->private; 1148 /* 1149 * If sc == NULL or there are no valid disks, provider's error 1150 * should be set and g_mirror_start() should not be called at all. 1151 */ 1152 KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1153 ("Provider's error should be set (error=%d)(mirror=%s).", 1154 bp->bio_to->error, bp->bio_to->name)); 1155 G_MIRROR_LOGREQ(3, bp, "Request received."); 1156 1157 switch (bp->bio_cmd) { 1158 case BIO_READ: 1159 case BIO_WRITE: 1160 case BIO_DELETE: 1161 break; 1162 case BIO_FLUSH: 1163 g_mirror_flush(sc, bp); 1164 return; 1165 case BIO_GETATTR: 1166 if (!strcmp(bp->bio_attribute, "GEOM::candelete")) { 1167 g_mirror_candelete(bp); 1168 return; 1169 } else if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) { 1170 g_mirror_kernel_dump(bp); 1171 return; 1172 } 1173 /* FALLTHROUGH */ 1174 default: 1175 g_io_deliver(bp, EOPNOTSUPP); 1176 return; 1177 } 1178 mtx_lock(&sc->sc_queue_mtx); 1179 bioq_insert_tail(&sc->sc_queue, bp); 1180 mtx_unlock(&sc->sc_queue_mtx); 1181 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1182 wakeup(sc); 1183 } 1184 1185 /* 1186 * Return TRUE if the given request is colliding with a in-progress 1187 * synchronization request. 1188 */ 1189 static int 1190 g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp) 1191 { 1192 struct g_mirror_disk *disk; 1193 struct bio *sbp; 1194 off_t rstart, rend, sstart, send; 1195 u_int i; 1196 1197 if (sc->sc_sync.ds_ndisks == 0) 1198 return (0); 1199 rstart = bp->bio_offset; 1200 rend = bp->bio_offset + bp->bio_length; 1201 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1202 if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING) 1203 continue; 1204 for (i = 0; i < g_mirror_syncreqs; i++) { 1205 sbp = disk->d_sync.ds_bios[i]; 1206 if (sbp == NULL) 1207 continue; 1208 sstart = sbp->bio_offset; 1209 send = sbp->bio_offset + sbp->bio_length; 1210 if (rend > sstart && rstart < send) 1211 return (1); 1212 } 1213 } 1214 return (0); 1215 } 1216 1217 /* 1218 * Return TRUE if the given sync request is colliding with a in-progress regular 1219 * request. 1220 */ 1221 static int 1222 g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp) 1223 { 1224 off_t rstart, rend, sstart, send; 1225 struct bio *bp; 1226 1227 if (sc->sc_sync.ds_ndisks == 0) 1228 return (0); 1229 sstart = sbp->bio_offset; 1230 send = sbp->bio_offset + sbp->bio_length; 1231 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) { 1232 rstart = bp->bio_offset; 1233 rend = bp->bio_offset + bp->bio_length; 1234 if (rend > sstart && rstart < send) 1235 return (1); 1236 } 1237 return (0); 1238 } 1239 1240 /* 1241 * Puts request onto delayed queue. 1242 */ 1243 static void 1244 g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp) 1245 { 1246 1247 G_MIRROR_LOGREQ(2, bp, "Delaying request."); 1248 bioq_insert_head(&sc->sc_regular_delayed, bp); 1249 } 1250 1251 /* 1252 * Puts synchronization request onto delayed queue. 1253 */ 1254 static void 1255 g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp) 1256 { 1257 1258 G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request."); 1259 bioq_insert_tail(&sc->sc_sync_delayed, bp); 1260 } 1261 1262 /* 1263 * Releases delayed regular requests which don't collide anymore with sync 1264 * requests. 1265 */ 1266 static void 1267 g_mirror_regular_release(struct g_mirror_softc *sc) 1268 { 1269 struct bio *bp, *bp2; 1270 1271 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) { 1272 if (g_mirror_sync_collision(sc, bp)) 1273 continue; 1274 bioq_remove(&sc->sc_regular_delayed, bp); 1275 G_MIRROR_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 1276 mtx_lock(&sc->sc_queue_mtx); 1277 bioq_insert_head(&sc->sc_queue, bp); 1278 mtx_unlock(&sc->sc_queue_mtx); 1279 } 1280 } 1281 1282 /* 1283 * Releases delayed sync requests which don't collide anymore with regular 1284 * requests. 1285 */ 1286 static void 1287 g_mirror_sync_release(struct g_mirror_softc *sc) 1288 { 1289 struct bio *bp, *bp2; 1290 1291 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) { 1292 if (g_mirror_regular_collision(sc, bp)) 1293 continue; 1294 bioq_remove(&sc->sc_sync_delayed, bp); 1295 G_MIRROR_LOGREQ(2, bp, 1296 "Releasing delayed synchronization request."); 1297 g_io_request(bp, bp->bio_from); 1298 } 1299 } 1300 1301 /* 1302 * Free a synchronization request and clear its slot in the array. 1303 */ 1304 static void 1305 g_mirror_sync_request_free(struct g_mirror_disk *disk, struct bio *bp) 1306 { 1307 int idx; 1308 1309 if (disk != NULL && disk->d_sync.ds_bios != NULL) { 1310 idx = (int)(uintptr_t)bp->bio_caller1; 1311 KASSERT(disk->d_sync.ds_bios[idx] == bp, 1312 ("unexpected sync BIO at %p:%d", disk, idx)); 1313 disk->d_sync.ds_bios[idx] = NULL; 1314 } 1315 free(bp->bio_data, M_MIRROR); 1316 g_destroy_bio(bp); 1317 } 1318 1319 /* 1320 * Handle synchronization requests. 1321 * Every synchronization request is two-steps process: first, READ request is 1322 * send to active provider and then WRITE request (with read data) to the provider 1323 * being synchronized. When WRITE is finished, new synchronization request is 1324 * send. 1325 */ 1326 static void 1327 g_mirror_sync_request(struct bio *bp) 1328 { 1329 struct g_mirror_softc *sc; 1330 struct g_mirror_disk *disk; 1331 struct g_mirror_disk_sync *sync; 1332 1333 bp->bio_from->index--; 1334 sc = bp->bio_from->geom->softc; 1335 disk = bp->bio_from->private; 1336 if (disk == NULL) { 1337 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 1338 g_topology_lock(); 1339 g_mirror_kill_consumer(sc, bp->bio_from); 1340 g_topology_unlock(); 1341 g_mirror_sync_request_free(NULL, bp); 1342 sx_xlock(&sc->sc_lock); 1343 return; 1344 } 1345 1346 /* 1347 * Synchronization request. 1348 */ 1349 switch (bp->bio_cmd) { 1350 case BIO_READ: 1351 { 1352 struct g_consumer *cp; 1353 1354 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_read, 1355 bp->bio_error); 1356 1357 if (bp->bio_error != 0) { 1358 G_MIRROR_LOGREQ(0, bp, 1359 "Synchronization request failed (error=%d).", 1360 bp->bio_error); 1361 g_mirror_sync_request_free(disk, bp); 1362 return; 1363 } 1364 G_MIRROR_LOGREQ(3, bp, 1365 "Synchronization request half-finished."); 1366 bp->bio_cmd = BIO_WRITE; 1367 bp->bio_cflags = 0; 1368 cp = disk->d_consumer; 1369 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1370 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1371 cp->acr, cp->acw, cp->ace)); 1372 cp->index++; 1373 g_io_request(bp, cp); 1374 return; 1375 } 1376 case BIO_WRITE: 1377 { 1378 off_t offset; 1379 void *data; 1380 int i, idx; 1381 1382 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_write, 1383 bp->bio_error); 1384 1385 if (bp->bio_error != 0) { 1386 G_MIRROR_LOGREQ(0, bp, 1387 "Synchronization request failed (error=%d).", 1388 bp->bio_error); 1389 g_mirror_sync_request_free(disk, bp); 1390 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 1391 g_mirror_event_send(disk, 1392 G_MIRROR_DISK_STATE_DISCONNECTED, 1393 G_MIRROR_EVENT_DONTWAIT); 1394 return; 1395 } 1396 G_MIRROR_LOGREQ(3, bp, "Synchronization request finished."); 1397 sync = &disk->d_sync; 1398 if (sync->ds_offset >= sc->sc_mediasize || 1399 sync->ds_consumer == NULL || 1400 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1401 /* Don't send more synchronization requests. */ 1402 sync->ds_inflight--; 1403 g_mirror_sync_request_free(disk, bp); 1404 if (sync->ds_inflight > 0) 1405 return; 1406 if (sync->ds_consumer == NULL || 1407 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1408 return; 1409 } 1410 /* Disk up-to-date, activate it. */ 1411 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE, 1412 G_MIRROR_EVENT_DONTWAIT); 1413 return; 1414 } 1415 1416 /* Send next synchronization request. */ 1417 data = bp->bio_data; 1418 idx = (int)(uintptr_t)bp->bio_caller1; 1419 g_reset_bio(bp); 1420 bp->bio_cmd = BIO_READ; 1421 bp->bio_offset = sync->ds_offset; 1422 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1423 sync->ds_offset += bp->bio_length; 1424 bp->bio_done = g_mirror_sync_done; 1425 bp->bio_data = data; 1426 bp->bio_from = sync->ds_consumer; 1427 bp->bio_to = sc->sc_provider; 1428 bp->bio_caller1 = (void *)(uintptr_t)idx; 1429 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1430 sync->ds_consumer->index++; 1431 /* 1432 * Delay the request if it is colliding with a regular request. 1433 */ 1434 if (g_mirror_regular_collision(sc, bp)) 1435 g_mirror_sync_delay(sc, bp); 1436 else 1437 g_io_request(bp, sync->ds_consumer); 1438 1439 /* Release delayed requests if possible. */ 1440 g_mirror_regular_release(sc); 1441 1442 /* Find the smallest offset */ 1443 offset = sc->sc_mediasize; 1444 for (i = 0; i < g_mirror_syncreqs; i++) { 1445 bp = sync->ds_bios[i]; 1446 if (bp != NULL && bp->bio_offset < offset) 1447 offset = bp->bio_offset; 1448 } 1449 if (sync->ds_offset_done + (MAXPHYS * 100) < offset) { 1450 /* Update offset_done on every 100 blocks. */ 1451 sync->ds_offset_done = offset; 1452 g_mirror_update_metadata(disk); 1453 } 1454 return; 1455 } 1456 default: 1457 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1458 bp->bio_cmd, sc->sc_name)); 1459 break; 1460 } 1461 } 1462 1463 static void 1464 g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp) 1465 { 1466 struct g_mirror_disk *disk; 1467 struct g_consumer *cp; 1468 struct bio *cbp; 1469 1470 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1471 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE) 1472 break; 1473 } 1474 if (disk == NULL) { 1475 if (bp->bio_error == 0) 1476 bp->bio_error = ENXIO; 1477 g_io_deliver(bp, bp->bio_error); 1478 return; 1479 } 1480 cbp = g_clone_bio(bp); 1481 if (cbp == NULL) { 1482 if (bp->bio_error == 0) 1483 bp->bio_error = ENOMEM; 1484 g_io_deliver(bp, bp->bio_error); 1485 return; 1486 } 1487 /* 1488 * Fill in the component buf structure. 1489 */ 1490 cp = disk->d_consumer; 1491 cbp->bio_done = g_mirror_done; 1492 cbp->bio_to = cp->provider; 1493 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1494 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1495 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1496 cp->acw, cp->ace)); 1497 cp->index++; 1498 g_io_request(cbp, cp); 1499 } 1500 1501 static void 1502 g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp) 1503 { 1504 struct g_mirror_disk *disk; 1505 struct g_consumer *cp; 1506 struct bio *cbp; 1507 1508 disk = g_mirror_get_disk(sc); 1509 if (disk == NULL) { 1510 if (bp->bio_error == 0) 1511 bp->bio_error = ENXIO; 1512 g_io_deliver(bp, bp->bio_error); 1513 return; 1514 } 1515 cbp = g_clone_bio(bp); 1516 if (cbp == NULL) { 1517 if (bp->bio_error == 0) 1518 bp->bio_error = ENOMEM; 1519 g_io_deliver(bp, bp->bio_error); 1520 return; 1521 } 1522 /* 1523 * Fill in the component buf structure. 1524 */ 1525 cp = disk->d_consumer; 1526 cbp->bio_done = g_mirror_done; 1527 cbp->bio_to = cp->provider; 1528 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1529 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1530 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1531 cp->acw, cp->ace)); 1532 cp->index++; 1533 g_io_request(cbp, cp); 1534 } 1535 1536 #define TRACK_SIZE (1 * 1024 * 1024) 1537 #define LOAD_SCALE 256 1538 #define ABS(x) (((x) >= 0) ? (x) : (-(x))) 1539 1540 static void 1541 g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp) 1542 { 1543 struct g_mirror_disk *disk, *dp; 1544 struct g_consumer *cp; 1545 struct bio *cbp; 1546 int prio, best; 1547 1548 /* Find a disk with the smallest load. */ 1549 disk = NULL; 1550 best = INT_MAX; 1551 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1552 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1553 continue; 1554 prio = dp->load; 1555 /* If disk head is precisely in position - highly prefer it. */ 1556 if (dp->d_last_offset == bp->bio_offset) 1557 prio -= 2 * LOAD_SCALE; 1558 else 1559 /* If disk head is close to position - prefer it. */ 1560 if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE) 1561 prio -= 1 * LOAD_SCALE; 1562 if (prio <= best) { 1563 disk = dp; 1564 best = prio; 1565 } 1566 } 1567 KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name)); 1568 cbp = g_clone_bio(bp); 1569 if (cbp == NULL) { 1570 if (bp->bio_error == 0) 1571 bp->bio_error = ENOMEM; 1572 g_io_deliver(bp, bp->bio_error); 1573 return; 1574 } 1575 /* 1576 * Fill in the component buf structure. 1577 */ 1578 cp = disk->d_consumer; 1579 cbp->bio_done = g_mirror_done; 1580 cbp->bio_to = cp->provider; 1581 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1582 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1583 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1584 cp->acw, cp->ace)); 1585 cp->index++; 1586 /* Remember last head position */ 1587 disk->d_last_offset = bp->bio_offset + bp->bio_length; 1588 /* Update loads. */ 1589 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1590 dp->load = (dp->d_consumer->index * LOAD_SCALE + 1591 dp->load * 7) / 8; 1592 } 1593 g_io_request(cbp, cp); 1594 } 1595 1596 static void 1597 g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp) 1598 { 1599 struct bio_queue_head queue; 1600 struct g_mirror_disk *disk; 1601 struct g_consumer *cp; 1602 struct bio *cbp; 1603 off_t left, mod, offset, slice; 1604 u_char *data; 1605 u_int ndisks; 1606 1607 if (bp->bio_length <= sc->sc_slice) { 1608 g_mirror_request_round_robin(sc, bp); 1609 return; 1610 } 1611 ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE); 1612 slice = bp->bio_length / ndisks; 1613 mod = slice % sc->sc_provider->sectorsize; 1614 if (mod != 0) 1615 slice += sc->sc_provider->sectorsize - mod; 1616 /* 1617 * Allocate all bios before sending any request, so we can 1618 * return ENOMEM in nice and clean way. 1619 */ 1620 left = bp->bio_length; 1621 offset = bp->bio_offset; 1622 data = bp->bio_data; 1623 bioq_init(&queue); 1624 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1625 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1626 continue; 1627 cbp = g_clone_bio(bp); 1628 if (cbp == NULL) { 1629 while ((cbp = bioq_takefirst(&queue)) != NULL) 1630 g_destroy_bio(cbp); 1631 if (bp->bio_error == 0) 1632 bp->bio_error = ENOMEM; 1633 g_io_deliver(bp, bp->bio_error); 1634 return; 1635 } 1636 bioq_insert_tail(&queue, cbp); 1637 cbp->bio_done = g_mirror_done; 1638 cbp->bio_caller1 = disk; 1639 cbp->bio_to = disk->d_consumer->provider; 1640 cbp->bio_offset = offset; 1641 cbp->bio_data = data; 1642 cbp->bio_length = MIN(left, slice); 1643 left -= cbp->bio_length; 1644 if (left == 0) 1645 break; 1646 offset += cbp->bio_length; 1647 data += cbp->bio_length; 1648 } 1649 while ((cbp = bioq_takefirst(&queue)) != NULL) { 1650 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1651 disk = cbp->bio_caller1; 1652 cbp->bio_caller1 = NULL; 1653 cp = disk->d_consumer; 1654 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1655 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1656 cp->acr, cp->acw, cp->ace)); 1657 disk->d_consumer->index++; 1658 g_io_request(cbp, disk->d_consumer); 1659 } 1660 } 1661 1662 static void 1663 g_mirror_register_request(struct bio *bp) 1664 { 1665 struct g_mirror_softc *sc; 1666 1667 sc = bp->bio_to->private; 1668 switch (bp->bio_cmd) { 1669 case BIO_READ: 1670 switch (sc->sc_balance) { 1671 case G_MIRROR_BALANCE_LOAD: 1672 g_mirror_request_load(sc, bp); 1673 break; 1674 case G_MIRROR_BALANCE_PREFER: 1675 g_mirror_request_prefer(sc, bp); 1676 break; 1677 case G_MIRROR_BALANCE_ROUND_ROBIN: 1678 g_mirror_request_round_robin(sc, bp); 1679 break; 1680 case G_MIRROR_BALANCE_SPLIT: 1681 g_mirror_request_split(sc, bp); 1682 break; 1683 } 1684 return; 1685 case BIO_WRITE: 1686 case BIO_DELETE: 1687 { 1688 struct g_mirror_disk *disk; 1689 struct g_mirror_disk_sync *sync; 1690 struct bio_queue_head queue; 1691 struct g_consumer *cp; 1692 struct bio *cbp; 1693 1694 /* 1695 * Delay the request if it is colliding with a synchronization 1696 * request. 1697 */ 1698 if (g_mirror_sync_collision(sc, bp)) { 1699 g_mirror_regular_delay(sc, bp); 1700 return; 1701 } 1702 1703 if (sc->sc_idle) 1704 g_mirror_unidle(sc); 1705 else 1706 sc->sc_last_write = time_uptime; 1707 1708 /* 1709 * Bump syncid on first write. 1710 */ 1711 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) { 1712 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 1713 g_mirror_bump_syncid(sc); 1714 } 1715 1716 /* 1717 * Allocate all bios before sending any request, so we can 1718 * return ENOMEM in nice and clean way. 1719 */ 1720 bioq_init(&queue); 1721 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1722 sync = &disk->d_sync; 1723 switch (disk->d_state) { 1724 case G_MIRROR_DISK_STATE_ACTIVE: 1725 break; 1726 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 1727 if (bp->bio_offset >= sync->ds_offset) 1728 continue; 1729 break; 1730 default: 1731 continue; 1732 } 1733 if (bp->bio_cmd == BIO_DELETE && 1734 (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) == 0) 1735 continue; 1736 cbp = g_clone_bio(bp); 1737 if (cbp == NULL) { 1738 while ((cbp = bioq_takefirst(&queue)) != NULL) 1739 g_destroy_bio(cbp); 1740 if (bp->bio_error == 0) 1741 bp->bio_error = ENOMEM; 1742 g_io_deliver(bp, bp->bio_error); 1743 return; 1744 } 1745 bioq_insert_tail(&queue, cbp); 1746 cbp->bio_done = g_mirror_done; 1747 cp = disk->d_consumer; 1748 cbp->bio_caller1 = cp; 1749 cbp->bio_to = cp->provider; 1750 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1751 ("Consumer %s not opened (r%dw%de%d).", 1752 cp->provider->name, cp->acr, cp->acw, cp->ace)); 1753 } 1754 if (bioq_first(&queue) == NULL) { 1755 g_io_deliver(bp, EOPNOTSUPP); 1756 return; 1757 } 1758 while ((cbp = bioq_takefirst(&queue)) != NULL) { 1759 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1760 cp = cbp->bio_caller1; 1761 cbp->bio_caller1 = NULL; 1762 cp->index++; 1763 sc->sc_writes++; 1764 g_io_request(cbp, cp); 1765 } 1766 /* 1767 * Put request onto inflight queue, so we can check if new 1768 * synchronization requests don't collide with it. 1769 */ 1770 bioq_insert_tail(&sc->sc_inflight, bp); 1771 return; 1772 } 1773 default: 1774 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1775 bp->bio_cmd, sc->sc_name)); 1776 break; 1777 } 1778 } 1779 1780 static int 1781 g_mirror_can_destroy(struct g_mirror_softc *sc) 1782 { 1783 struct g_geom *gp; 1784 struct g_consumer *cp; 1785 1786 g_topology_assert(); 1787 gp = sc->sc_geom; 1788 if (gp->softc == NULL) 1789 return (1); 1790 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_TASTING) != 0) 1791 return (0); 1792 LIST_FOREACH(cp, &gp->consumer, consumer) { 1793 if (g_mirror_is_busy(sc, cp)) 1794 return (0); 1795 } 1796 gp = sc->sc_sync.ds_geom; 1797 LIST_FOREACH(cp, &gp->consumer, consumer) { 1798 if (g_mirror_is_busy(sc, cp)) 1799 return (0); 1800 } 1801 G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 1802 sc->sc_name); 1803 return (1); 1804 } 1805 1806 static int 1807 g_mirror_try_destroy(struct g_mirror_softc *sc) 1808 { 1809 1810 if (sc->sc_rootmount != NULL) { 1811 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 1812 sc->sc_rootmount); 1813 root_mount_rel(sc->sc_rootmount); 1814 sc->sc_rootmount = NULL; 1815 } 1816 g_topology_lock(); 1817 if (!g_mirror_can_destroy(sc)) { 1818 g_topology_unlock(); 1819 return (0); 1820 } 1821 sc->sc_geom->softc = NULL; 1822 sc->sc_sync.ds_geom->softc = NULL; 1823 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) { 1824 g_topology_unlock(); 1825 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1826 &sc->sc_worker); 1827 /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 1828 sx_xunlock(&sc->sc_lock); 1829 wakeup(&sc->sc_worker); 1830 sc->sc_worker = NULL; 1831 } else { 1832 g_topology_unlock(); 1833 g_mirror_destroy_device(sc); 1834 } 1835 return (1); 1836 } 1837 1838 /* 1839 * Worker thread. 1840 */ 1841 static void 1842 g_mirror_worker(void *arg) 1843 { 1844 struct g_mirror_softc *sc; 1845 struct g_mirror_event *ep; 1846 struct bio *bp; 1847 int timeout; 1848 1849 sc = arg; 1850 thread_lock(curthread); 1851 sched_prio(curthread, PRIBIO); 1852 thread_unlock(curthread); 1853 1854 sx_xlock(&sc->sc_lock); 1855 for (;;) { 1856 G_MIRROR_DEBUG(5, "%s: Let's see...", __func__); 1857 /* 1858 * First take a look at events. 1859 * This is important to handle events before any I/O requests. 1860 */ 1861 ep = g_mirror_event_get(sc); 1862 if (ep != NULL) { 1863 g_mirror_event_remove(sc, ep); 1864 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) { 1865 /* Update only device status. */ 1866 G_MIRROR_DEBUG(3, 1867 "Running event for device %s.", 1868 sc->sc_name); 1869 ep->e_error = 0; 1870 g_mirror_update_device(sc, true); 1871 } else { 1872 /* Update disk status. */ 1873 G_MIRROR_DEBUG(3, "Running event for disk %s.", 1874 g_mirror_get_diskname(ep->e_disk)); 1875 ep->e_error = g_mirror_update_disk(ep->e_disk, 1876 ep->e_state); 1877 if (ep->e_error == 0) 1878 g_mirror_update_device(sc, false); 1879 } 1880 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) { 1881 KASSERT(ep->e_error == 0, 1882 ("Error cannot be handled.")); 1883 g_mirror_event_free(ep); 1884 } else { 1885 ep->e_flags |= G_MIRROR_EVENT_DONE; 1886 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1887 ep); 1888 mtx_lock(&sc->sc_events_mtx); 1889 wakeup(ep); 1890 mtx_unlock(&sc->sc_events_mtx); 1891 } 1892 if ((sc->sc_flags & 1893 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1894 if (g_mirror_try_destroy(sc)) { 1895 curthread->td_pflags &= ~TDP_GEOM; 1896 G_MIRROR_DEBUG(1, "Thread exiting."); 1897 kproc_exit(0); 1898 } 1899 } 1900 G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__); 1901 continue; 1902 } 1903 /* 1904 * Check if we can mark array as CLEAN and if we can't take 1905 * how much seconds should we wait. 1906 */ 1907 timeout = g_mirror_idle(sc, -1); 1908 /* 1909 * Now I/O requests. 1910 */ 1911 /* Get first request from the queue. */ 1912 mtx_lock(&sc->sc_queue_mtx); 1913 bp = bioq_takefirst(&sc->sc_queue); 1914 if (bp == NULL) { 1915 if ((sc->sc_flags & 1916 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1917 mtx_unlock(&sc->sc_queue_mtx); 1918 if (g_mirror_try_destroy(sc)) { 1919 curthread->td_pflags &= ~TDP_GEOM; 1920 G_MIRROR_DEBUG(1, "Thread exiting."); 1921 kproc_exit(0); 1922 } 1923 mtx_lock(&sc->sc_queue_mtx); 1924 if (bioq_first(&sc->sc_queue) != NULL) { 1925 mtx_unlock(&sc->sc_queue_mtx); 1926 continue; 1927 } 1928 } 1929 sx_xunlock(&sc->sc_lock); 1930 /* 1931 * XXX: We can miss an event here, because an event 1932 * can be added without sx-device-lock and without 1933 * mtx-queue-lock. Maybe I should just stop using 1934 * dedicated mutex for events synchronization and 1935 * stick with the queue lock? 1936 * The event will hang here until next I/O request 1937 * or next event is received. 1938 */ 1939 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1", 1940 timeout * hz); 1941 sx_xlock(&sc->sc_lock); 1942 G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__); 1943 continue; 1944 } 1945 mtx_unlock(&sc->sc_queue_mtx); 1946 1947 if (bp->bio_from->geom == sc->sc_sync.ds_geom && 1948 (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) { 1949 g_mirror_sync_request(bp); /* READ */ 1950 } else if (bp->bio_to != sc->sc_provider) { 1951 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) 1952 g_mirror_regular_request(bp); 1953 else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 1954 g_mirror_sync_request(bp); /* WRITE */ 1955 else { 1956 KASSERT(0, 1957 ("Invalid request cflags=0x%hx to=%s.", 1958 bp->bio_cflags, bp->bio_to->name)); 1959 } 1960 } else { 1961 g_mirror_register_request(bp); 1962 } 1963 G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__); 1964 } 1965 } 1966 1967 static void 1968 g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 1969 { 1970 1971 sx_assert(&sc->sc_lock, SX_LOCKED); 1972 1973 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 1974 return; 1975 if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) { 1976 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.", 1977 g_mirror_get_diskname(disk), sc->sc_name); 1978 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1979 } else if (sc->sc_idle && 1980 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 1981 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.", 1982 g_mirror_get_diskname(disk), sc->sc_name); 1983 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 1984 } 1985 } 1986 1987 static void 1988 g_mirror_sync_start(struct g_mirror_disk *disk) 1989 { 1990 struct g_mirror_softc *sc; 1991 struct g_consumer *cp; 1992 struct bio *bp; 1993 int error, i; 1994 1995 g_topology_assert_not(); 1996 sc = disk->d_softc; 1997 sx_assert(&sc->sc_lock, SX_LOCKED); 1998 1999 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2000 ("Disk %s is not marked for synchronization.", 2001 g_mirror_get_diskname(disk))); 2002 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2003 ("Device not in RUNNING state (%s, %u).", sc->sc_name, 2004 sc->sc_state)); 2005 2006 sx_xunlock(&sc->sc_lock); 2007 g_topology_lock(); 2008 cp = g_new_consumer(sc->sc_sync.ds_geom); 2009 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 2010 error = g_attach(cp, sc->sc_provider); 2011 KASSERT(error == 0, 2012 ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 2013 error = g_access(cp, 1, 0, 0); 2014 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 2015 g_topology_unlock(); 2016 sx_xlock(&sc->sc_lock); 2017 2018 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 2019 g_mirror_get_diskname(disk)); 2020 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0) 2021 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 2022 KASSERT(disk->d_sync.ds_consumer == NULL, 2023 ("Sync consumer already exists (device=%s, disk=%s).", 2024 sc->sc_name, g_mirror_get_diskname(disk))); 2025 2026 disk->d_sync.ds_consumer = cp; 2027 disk->d_sync.ds_consumer->private = disk; 2028 disk->d_sync.ds_consumer->index = 0; 2029 2030 /* 2031 * Allocate memory for synchronization bios and initialize them. 2032 */ 2033 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs, 2034 M_MIRROR, M_WAITOK); 2035 for (i = 0; i < g_mirror_syncreqs; i++) { 2036 bp = g_alloc_bio(); 2037 disk->d_sync.ds_bios[i] = bp; 2038 bp->bio_parent = NULL; 2039 bp->bio_cmd = BIO_READ; 2040 bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK); 2041 bp->bio_cflags = 0; 2042 bp->bio_offset = disk->d_sync.ds_offset; 2043 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 2044 disk->d_sync.ds_offset += bp->bio_length; 2045 bp->bio_done = g_mirror_sync_done; 2046 bp->bio_from = disk->d_sync.ds_consumer; 2047 bp->bio_to = sc->sc_provider; 2048 bp->bio_caller1 = (void *)(uintptr_t)i; 2049 } 2050 2051 /* Increase the number of disks in SYNCHRONIZING state. */ 2052 sc->sc_sync.ds_ndisks++; 2053 /* Set the number of in-flight synchronization requests. */ 2054 disk->d_sync.ds_inflight = g_mirror_syncreqs; 2055 2056 /* 2057 * Fire off first synchronization requests. 2058 */ 2059 for (i = 0; i < g_mirror_syncreqs; i++) { 2060 bp = disk->d_sync.ds_bios[i]; 2061 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 2062 disk->d_sync.ds_consumer->index++; 2063 /* 2064 * Delay the request if it is colliding with a regular request. 2065 */ 2066 if (g_mirror_regular_collision(sc, bp)) 2067 g_mirror_sync_delay(sc, bp); 2068 else 2069 g_io_request(bp, disk->d_sync.ds_consumer); 2070 } 2071 } 2072 2073 /* 2074 * Stop synchronization process. 2075 * type: 0 - synchronization finished 2076 * 1 - synchronization stopped 2077 */ 2078 static void 2079 g_mirror_sync_stop(struct g_mirror_disk *disk, int type) 2080 { 2081 struct g_mirror_softc *sc; 2082 struct g_consumer *cp; 2083 2084 g_topology_assert_not(); 2085 sc = disk->d_softc; 2086 sx_assert(&sc->sc_lock, SX_LOCKED); 2087 2088 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2089 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2090 g_mirror_disk_state2str(disk->d_state))); 2091 if (disk->d_sync.ds_consumer == NULL) 2092 return; 2093 2094 if (type == 0) { 2095 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.", 2096 sc->sc_name, g_mirror_get_diskname(disk)); 2097 } else /* if (type == 1) */ { 2098 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 2099 sc->sc_name, g_mirror_get_diskname(disk)); 2100 } 2101 g_mirror_regular_release(sc); 2102 free(disk->d_sync.ds_bios, M_MIRROR); 2103 disk->d_sync.ds_bios = NULL; 2104 cp = disk->d_sync.ds_consumer; 2105 disk->d_sync.ds_consumer = NULL; 2106 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2107 sc->sc_sync.ds_ndisks--; 2108 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 2109 g_topology_lock(); 2110 g_mirror_kill_consumer(sc, cp); 2111 g_topology_unlock(); 2112 sx_xlock(&sc->sc_lock); 2113 } 2114 2115 static void 2116 g_mirror_launch_provider(struct g_mirror_softc *sc) 2117 { 2118 struct g_mirror_disk *disk; 2119 struct g_provider *pp, *dp; 2120 2121 sx_assert(&sc->sc_lock, SX_LOCKED); 2122 2123 g_topology_lock(); 2124 pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name); 2125 pp->flags |= G_PF_DIRECT_RECEIVE; 2126 pp->mediasize = sc->sc_mediasize; 2127 pp->sectorsize = sc->sc_sectorsize; 2128 pp->stripesize = 0; 2129 pp->stripeoffset = 0; 2130 2131 /* Splitting of unmapped BIO's could work but isn't implemented now */ 2132 if (sc->sc_balance != G_MIRROR_BALANCE_SPLIT) 2133 pp->flags |= G_PF_ACCEPT_UNMAPPED; 2134 2135 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2136 if (disk->d_consumer && disk->d_consumer->provider) { 2137 dp = disk->d_consumer->provider; 2138 if (dp->stripesize > pp->stripesize) { 2139 pp->stripesize = dp->stripesize; 2140 pp->stripeoffset = dp->stripeoffset; 2141 } 2142 /* A provider underneath us doesn't support unmapped */ 2143 if ((dp->flags & G_PF_ACCEPT_UNMAPPED) == 0) { 2144 G_MIRROR_DEBUG(0, "Cancelling unmapped " 2145 "because of %s.", dp->name); 2146 pp->flags &= ~G_PF_ACCEPT_UNMAPPED; 2147 } 2148 } 2149 } 2150 pp->private = sc; 2151 sc->sc_refcnt++; 2152 sc->sc_provider = pp; 2153 g_error_provider(pp, 0); 2154 g_topology_unlock(); 2155 G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 2156 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks); 2157 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2158 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2159 g_mirror_sync_start(disk); 2160 } 2161 } 2162 2163 static void 2164 g_mirror_destroy_provider(struct g_mirror_softc *sc) 2165 { 2166 struct g_mirror_disk *disk; 2167 struct bio *bp; 2168 2169 g_topology_assert_not(); 2170 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 2171 sc->sc_name)); 2172 2173 g_topology_lock(); 2174 g_error_provider(sc->sc_provider, ENXIO); 2175 mtx_lock(&sc->sc_queue_mtx); 2176 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) { 2177 /* 2178 * Abort any pending I/O that wasn't generated by us. 2179 * Synchronization requests and requests destined for individual 2180 * mirror components can be destroyed immediately. 2181 */ 2182 if (bp->bio_to == sc->sc_provider && 2183 bp->bio_from->geom != sc->sc_sync.ds_geom) { 2184 g_io_deliver(bp, ENXIO); 2185 } else { 2186 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 2187 free(bp->bio_data, M_MIRROR); 2188 g_destroy_bio(bp); 2189 } 2190 } 2191 mtx_unlock(&sc->sc_queue_mtx); 2192 g_wither_provider(sc->sc_provider, ENXIO); 2193 sc->sc_provider = NULL; 2194 G_MIRROR_DEBUG(0, "Device %s: provider destroyed.", sc->sc_name); 2195 g_topology_unlock(); 2196 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2197 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2198 g_mirror_sync_stop(disk, 1); 2199 } 2200 } 2201 2202 static void 2203 g_mirror_go(void *arg) 2204 { 2205 struct g_mirror_softc *sc; 2206 2207 sc = arg; 2208 G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 2209 g_mirror_event_send(sc, 0, 2210 G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE); 2211 } 2212 2213 static u_int 2214 g_mirror_determine_state(struct g_mirror_disk *disk) 2215 { 2216 struct g_mirror_softc *sc; 2217 u_int state; 2218 2219 sc = disk->d_softc; 2220 if (sc->sc_syncid == disk->d_sync.ds_syncid) { 2221 if ((disk->d_flags & 2222 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2223 /* Disk does not need synchronization. */ 2224 state = G_MIRROR_DISK_STATE_ACTIVE; 2225 } else { 2226 if ((sc->sc_flags & 2227 G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2228 (disk->d_flags & 2229 G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2230 /* 2231 * We can start synchronization from 2232 * the stored offset. 2233 */ 2234 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2235 } else { 2236 state = G_MIRROR_DISK_STATE_STALE; 2237 } 2238 } 2239 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 2240 /* 2241 * Reset all synchronization data for this disk, 2242 * because if it even was synchronized, it was 2243 * synchronized to disks with different syncid. 2244 */ 2245 disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2246 disk->d_sync.ds_offset = 0; 2247 disk->d_sync.ds_offset_done = 0; 2248 disk->d_sync.ds_syncid = sc->sc_syncid; 2249 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2250 (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2251 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2252 } else { 2253 state = G_MIRROR_DISK_STATE_STALE; 2254 } 2255 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 2256 /* 2257 * Not good, NOT GOOD! 2258 * It means that mirror was started on stale disks 2259 * and more fresh disk just arrive. 2260 * If there were writes, mirror is broken, sorry. 2261 * I think the best choice here is don't touch 2262 * this disk and inform the user loudly. 2263 */ 2264 G_MIRROR_DEBUG(0, "Device %s was started before the freshest " 2265 "disk (%s) arrives!! It will not be connected to the " 2266 "running device.", sc->sc_name, 2267 g_mirror_get_diskname(disk)); 2268 g_mirror_destroy_disk(disk); 2269 state = G_MIRROR_DISK_STATE_NONE; 2270 /* Return immediately, because disk was destroyed. */ 2271 return (state); 2272 } 2273 G_MIRROR_DEBUG(3, "State for %s disk: %s.", 2274 g_mirror_get_diskname(disk), g_mirror_disk_state2str(state)); 2275 return (state); 2276 } 2277 2278 /* 2279 * Update device state. 2280 */ 2281 static void 2282 g_mirror_update_device(struct g_mirror_softc *sc, bool force) 2283 { 2284 struct g_mirror_disk *disk; 2285 u_int state; 2286 2287 sx_assert(&sc->sc_lock, SX_XLOCKED); 2288 2289 switch (sc->sc_state) { 2290 case G_MIRROR_DEVICE_STATE_STARTING: 2291 { 2292 struct g_mirror_disk *pdisk, *tdisk; 2293 u_int dirty, ndisks, genid, syncid; 2294 bool broken; 2295 2296 KASSERT(sc->sc_provider == NULL, 2297 ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 2298 /* 2299 * Are we ready? We are, if all disks are connected or 2300 * if we have any disks and 'force' is true. 2301 */ 2302 ndisks = g_mirror_ndisks(sc, -1); 2303 if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) { 2304 ; 2305 } else if (ndisks == 0) { 2306 /* 2307 * Disks went down in starting phase, so destroy 2308 * device. 2309 */ 2310 callout_drain(&sc->sc_callout); 2311 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2312 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2313 sc->sc_rootmount); 2314 root_mount_rel(sc->sc_rootmount); 2315 sc->sc_rootmount = NULL; 2316 return; 2317 } else { 2318 return; 2319 } 2320 2321 /* 2322 * Activate all disks with the biggest syncid. 2323 */ 2324 if (force) { 2325 /* 2326 * If 'force' is true, we have been called due to 2327 * timeout, so don't bother canceling timeout. 2328 */ 2329 ndisks = 0; 2330 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2331 if ((disk->d_flags & 2332 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2333 ndisks++; 2334 } 2335 } 2336 if (ndisks == 0) { 2337 /* No valid disks found, destroy device. */ 2338 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2339 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2340 __LINE__, sc->sc_rootmount); 2341 root_mount_rel(sc->sc_rootmount); 2342 sc->sc_rootmount = NULL; 2343 return; 2344 } 2345 } else { 2346 /* Cancel timeout. */ 2347 callout_drain(&sc->sc_callout); 2348 } 2349 2350 /* 2351 * Find the biggest genid. 2352 */ 2353 genid = 0; 2354 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2355 if (disk->d_genid > genid) 2356 genid = disk->d_genid; 2357 } 2358 sc->sc_genid = genid; 2359 /* 2360 * Remove all disks without the biggest genid. 2361 */ 2362 broken = false; 2363 LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) { 2364 if (disk->d_genid < genid) { 2365 G_MIRROR_DEBUG(0, 2366 "Component %s (device %s) broken, skipping.", 2367 g_mirror_get_diskname(disk), sc->sc_name); 2368 g_mirror_destroy_disk(disk); 2369 /* 2370 * Bump the syncid in case we discover a healthy 2371 * replacement disk after starting the mirror. 2372 */ 2373 broken = true; 2374 } 2375 } 2376 2377 /* 2378 * Find the biggest syncid. 2379 */ 2380 syncid = 0; 2381 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2382 if (disk->d_sync.ds_syncid > syncid) 2383 syncid = disk->d_sync.ds_syncid; 2384 } 2385 2386 /* 2387 * Here we need to look for dirty disks and if all disks 2388 * with the biggest syncid are dirty, we have to choose 2389 * one with the biggest priority and rebuild the rest. 2390 */ 2391 /* 2392 * Find the number of dirty disks with the biggest syncid. 2393 * Find the number of disks with the biggest syncid. 2394 * While here, find a disk with the biggest priority. 2395 */ 2396 dirty = ndisks = 0; 2397 pdisk = NULL; 2398 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2399 if (disk->d_sync.ds_syncid != syncid) 2400 continue; 2401 if ((disk->d_flags & 2402 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2403 continue; 2404 } 2405 ndisks++; 2406 if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 2407 dirty++; 2408 if (pdisk == NULL || 2409 pdisk->d_priority < disk->d_priority) { 2410 pdisk = disk; 2411 } 2412 } 2413 } 2414 if (dirty == 0) { 2415 /* No dirty disks at all, great. */ 2416 } else if (dirty == ndisks) { 2417 /* 2418 * Force synchronization for all dirty disks except one 2419 * with the biggest priority. 2420 */ 2421 KASSERT(pdisk != NULL, ("pdisk == NULL")); 2422 G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a " 2423 "master disk for synchronization.", 2424 g_mirror_get_diskname(pdisk), sc->sc_name); 2425 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2426 if (disk->d_sync.ds_syncid != syncid) 2427 continue; 2428 if ((disk->d_flags & 2429 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2430 continue; 2431 } 2432 KASSERT((disk->d_flags & 2433 G_MIRROR_DISK_FLAG_DIRTY) != 0, 2434 ("Disk %s isn't marked as dirty.", 2435 g_mirror_get_diskname(disk))); 2436 /* Skip the disk with the biggest priority. */ 2437 if (disk == pdisk) 2438 continue; 2439 disk->d_sync.ds_syncid = 0; 2440 } 2441 } else if (dirty < ndisks) { 2442 /* 2443 * Force synchronization for all dirty disks. 2444 * We have some non-dirty disks. 2445 */ 2446 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2447 if (disk->d_sync.ds_syncid != syncid) 2448 continue; 2449 if ((disk->d_flags & 2450 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2451 continue; 2452 } 2453 if ((disk->d_flags & 2454 G_MIRROR_DISK_FLAG_DIRTY) == 0) { 2455 continue; 2456 } 2457 disk->d_sync.ds_syncid = 0; 2458 } 2459 } 2460 2461 /* Reset hint. */ 2462 sc->sc_hint = NULL; 2463 sc->sc_syncid = syncid; 2464 if (force || broken) { 2465 /* Remember to bump syncid on first write. */ 2466 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2467 } 2468 state = G_MIRROR_DEVICE_STATE_RUNNING; 2469 G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.", 2470 sc->sc_name, g_mirror_device_state2str(sc->sc_state), 2471 g_mirror_device_state2str(state)); 2472 sc->sc_state = state; 2473 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2474 state = g_mirror_determine_state(disk); 2475 g_mirror_event_send(disk, state, 2476 G_MIRROR_EVENT_DONTWAIT); 2477 if (state == G_MIRROR_DISK_STATE_STALE) 2478 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2479 } 2480 break; 2481 } 2482 case G_MIRROR_DEVICE_STATE_RUNNING: 2483 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 && 2484 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2485 /* 2486 * No usable disks, so destroy the device. 2487 */ 2488 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2489 break; 2490 } else if (g_mirror_ndisks(sc, 2491 G_MIRROR_DISK_STATE_ACTIVE) > 0 && 2492 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2493 /* 2494 * We have active disks, launch provider if it doesn't 2495 * exist. 2496 */ 2497 if (sc->sc_provider == NULL) 2498 g_mirror_launch_provider(sc); 2499 if (sc->sc_rootmount != NULL) { 2500 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2501 __LINE__, sc->sc_rootmount); 2502 root_mount_rel(sc->sc_rootmount); 2503 sc->sc_rootmount = NULL; 2504 } 2505 } 2506 /* 2507 * Genid should be bumped immediately, so do it here. 2508 */ 2509 if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) { 2510 sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID; 2511 g_mirror_bump_genid(sc); 2512 } 2513 break; 2514 default: 2515 KASSERT(1 == 0, ("Wrong device state (%s, %s).", 2516 sc->sc_name, g_mirror_device_state2str(sc->sc_state))); 2517 break; 2518 } 2519 } 2520 2521 /* 2522 * Update disk state and device state if needed. 2523 */ 2524 #define DISK_STATE_CHANGED() G_MIRROR_DEBUG(1, \ 2525 "Disk %s state changed from %s to %s (device %s).", \ 2526 g_mirror_get_diskname(disk), \ 2527 g_mirror_disk_state2str(disk->d_state), \ 2528 g_mirror_disk_state2str(state), sc->sc_name) 2529 static int 2530 g_mirror_update_disk(struct g_mirror_disk *disk, u_int state) 2531 { 2532 struct g_mirror_softc *sc; 2533 2534 sc = disk->d_softc; 2535 sx_assert(&sc->sc_lock, SX_XLOCKED); 2536 2537 again: 2538 G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.", 2539 g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), 2540 g_mirror_disk_state2str(state)); 2541 switch (state) { 2542 case G_MIRROR_DISK_STATE_NEW: 2543 /* 2544 * Possible scenarios: 2545 * 1. New disk arrive. 2546 */ 2547 /* Previous state should be NONE. */ 2548 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE, 2549 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2550 g_mirror_disk_state2str(disk->d_state))); 2551 DISK_STATE_CHANGED(); 2552 2553 disk->d_state = state; 2554 if (LIST_EMPTY(&sc->sc_disks)) 2555 LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next); 2556 else { 2557 struct g_mirror_disk *dp; 2558 2559 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 2560 if (disk->d_priority >= dp->d_priority) { 2561 LIST_INSERT_BEFORE(dp, disk, d_next); 2562 dp = NULL; 2563 break; 2564 } 2565 if (LIST_NEXT(dp, d_next) == NULL) 2566 break; 2567 } 2568 if (dp != NULL) 2569 LIST_INSERT_AFTER(dp, disk, d_next); 2570 } 2571 G_MIRROR_DEBUG(1, "Device %s: provider %s detected.", 2572 sc->sc_name, g_mirror_get_diskname(disk)); 2573 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 2574 break; 2575 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2576 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2577 g_mirror_device_state2str(sc->sc_state), 2578 g_mirror_get_diskname(disk), 2579 g_mirror_disk_state2str(disk->d_state))); 2580 state = g_mirror_determine_state(disk); 2581 if (state != G_MIRROR_DISK_STATE_NONE) 2582 goto again; 2583 break; 2584 case G_MIRROR_DISK_STATE_ACTIVE: 2585 /* 2586 * Possible scenarios: 2587 * 1. New disk does not need synchronization. 2588 * 2. Synchronization process finished successfully. 2589 */ 2590 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2591 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2592 g_mirror_device_state2str(sc->sc_state), 2593 g_mirror_get_diskname(disk), 2594 g_mirror_disk_state2str(disk->d_state))); 2595 /* Previous state should be NEW or SYNCHRONIZING. */ 2596 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW || 2597 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2598 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2599 g_mirror_disk_state2str(disk->d_state))); 2600 DISK_STATE_CHANGED(); 2601 2602 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2603 disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2604 disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC; 2605 g_mirror_sync_stop(disk, 0); 2606 } 2607 disk->d_state = state; 2608 disk->d_sync.ds_offset = 0; 2609 disk->d_sync.ds_offset_done = 0; 2610 g_mirror_update_idle(sc, disk); 2611 g_mirror_update_metadata(disk); 2612 G_MIRROR_DEBUG(1, "Device %s: provider %s activated.", 2613 sc->sc_name, g_mirror_get_diskname(disk)); 2614 break; 2615 case G_MIRROR_DISK_STATE_STALE: 2616 /* 2617 * Possible scenarios: 2618 * 1. Stale disk was connected. 2619 */ 2620 /* Previous state should be NEW. */ 2621 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2622 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2623 g_mirror_disk_state2str(disk->d_state))); 2624 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2625 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2626 g_mirror_device_state2str(sc->sc_state), 2627 g_mirror_get_diskname(disk), 2628 g_mirror_disk_state2str(disk->d_state))); 2629 /* 2630 * STALE state is only possible if device is marked 2631 * NOAUTOSYNC. 2632 */ 2633 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0, 2634 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2635 g_mirror_device_state2str(sc->sc_state), 2636 g_mirror_get_diskname(disk), 2637 g_mirror_disk_state2str(disk->d_state))); 2638 DISK_STATE_CHANGED(); 2639 2640 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2641 disk->d_state = state; 2642 g_mirror_update_metadata(disk); 2643 G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.", 2644 sc->sc_name, g_mirror_get_diskname(disk)); 2645 break; 2646 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 2647 /* 2648 * Possible scenarios: 2649 * 1. Disk which needs synchronization was connected. 2650 */ 2651 /* Previous state should be NEW. */ 2652 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2653 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2654 g_mirror_disk_state2str(disk->d_state))); 2655 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2656 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2657 g_mirror_device_state2str(sc->sc_state), 2658 g_mirror_get_diskname(disk), 2659 g_mirror_disk_state2str(disk->d_state))); 2660 DISK_STATE_CHANGED(); 2661 2662 if (disk->d_state == G_MIRROR_DISK_STATE_NEW) 2663 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2664 disk->d_state = state; 2665 if (sc->sc_provider != NULL) { 2666 g_mirror_sync_start(disk); 2667 g_mirror_update_metadata(disk); 2668 } 2669 break; 2670 case G_MIRROR_DISK_STATE_DISCONNECTED: 2671 /* 2672 * Possible scenarios: 2673 * 1. Device wasn't running yet, but disk disappear. 2674 * 2. Disk was active and disapppear. 2675 * 3. Disk disappear during synchronization process. 2676 */ 2677 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) { 2678 /* 2679 * Previous state should be ACTIVE, STALE or 2680 * SYNCHRONIZING. 2681 */ 2682 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 2683 disk->d_state == G_MIRROR_DISK_STATE_STALE || 2684 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2685 ("Wrong disk state (%s, %s).", 2686 g_mirror_get_diskname(disk), 2687 g_mirror_disk_state2str(disk->d_state))); 2688 } else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) { 2689 /* Previous state should be NEW. */ 2690 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2691 ("Wrong disk state (%s, %s).", 2692 g_mirror_get_diskname(disk), 2693 g_mirror_disk_state2str(disk->d_state))); 2694 /* 2695 * Reset bumping syncid if disk disappeared in STARTING 2696 * state. 2697 */ 2698 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) 2699 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 2700 #ifdef INVARIANTS 2701 } else { 2702 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 2703 sc->sc_name, 2704 g_mirror_device_state2str(sc->sc_state), 2705 g_mirror_get_diskname(disk), 2706 g_mirror_disk_state2str(disk->d_state))); 2707 #endif 2708 } 2709 DISK_STATE_CHANGED(); 2710 G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.", 2711 sc->sc_name, g_mirror_get_diskname(disk)); 2712 2713 g_mirror_destroy_disk(disk); 2714 break; 2715 case G_MIRROR_DISK_STATE_DESTROY: 2716 { 2717 int error; 2718 2719 error = g_mirror_clear_metadata(disk); 2720 if (error != 0) { 2721 G_MIRROR_DEBUG(0, 2722 "Device %s: failed to clear metadata on %s: %d.", 2723 sc->sc_name, g_mirror_get_diskname(disk), error); 2724 break; 2725 } 2726 DISK_STATE_CHANGED(); 2727 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", 2728 sc->sc_name, g_mirror_get_diskname(disk)); 2729 2730 g_mirror_destroy_disk(disk); 2731 sc->sc_ndisks--; 2732 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2733 g_mirror_update_metadata(disk); 2734 } 2735 break; 2736 } 2737 default: 2738 KASSERT(1 == 0, ("Unknown state (%u).", state)); 2739 break; 2740 } 2741 return (0); 2742 } 2743 #undef DISK_STATE_CHANGED 2744 2745 int 2746 g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md) 2747 { 2748 struct g_provider *pp; 2749 u_char *buf; 2750 int error; 2751 2752 g_topology_assert(); 2753 2754 error = g_access(cp, 1, 0, 0); 2755 if (error != 0) 2756 return (error); 2757 pp = cp->provider; 2758 g_topology_unlock(); 2759 /* Metadata are stored on last sector. */ 2760 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 2761 &error); 2762 g_topology_lock(); 2763 g_access(cp, -1, 0, 0); 2764 if (buf == NULL) { 2765 G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2766 cp->provider->name, error); 2767 return (error); 2768 } 2769 2770 /* Decode metadata. */ 2771 error = mirror_metadata_decode(buf, md); 2772 g_free(buf); 2773 if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0) 2774 return (EINVAL); 2775 if (md->md_version > G_MIRROR_VERSION) { 2776 G_MIRROR_DEBUG(0, 2777 "Kernel module is too old to handle metadata from %s.", 2778 cp->provider->name); 2779 return (EINVAL); 2780 } 2781 if (error != 0) { 2782 G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 2783 cp->provider->name); 2784 return (error); 2785 } 2786 2787 return (0); 2788 } 2789 2790 static int 2791 g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp, 2792 struct g_mirror_metadata *md) 2793 { 2794 2795 if (g_mirror_id2disk(sc, md->md_did) != NULL) { 2796 G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.", 2797 pp->name, md->md_did); 2798 return (EEXIST); 2799 } 2800 if (md->md_all != sc->sc_ndisks) { 2801 G_MIRROR_DEBUG(1, 2802 "Invalid '%s' field on disk %s (device %s), skipping.", 2803 "md_all", pp->name, sc->sc_name); 2804 return (EINVAL); 2805 } 2806 if (md->md_slice != sc->sc_slice) { 2807 G_MIRROR_DEBUG(1, 2808 "Invalid '%s' field on disk %s (device %s), skipping.", 2809 "md_slice", pp->name, sc->sc_name); 2810 return (EINVAL); 2811 } 2812 if (md->md_balance != sc->sc_balance) { 2813 G_MIRROR_DEBUG(1, 2814 "Invalid '%s' field on disk %s (device %s), skipping.", 2815 "md_balance", pp->name, sc->sc_name); 2816 return (EINVAL); 2817 } 2818 #if 0 2819 if (md->md_mediasize != sc->sc_mediasize) { 2820 G_MIRROR_DEBUG(1, 2821 "Invalid '%s' field on disk %s (device %s), skipping.", 2822 "md_mediasize", pp->name, sc->sc_name); 2823 return (EINVAL); 2824 } 2825 #endif 2826 if (sc->sc_mediasize > pp->mediasize) { 2827 G_MIRROR_DEBUG(1, 2828 "Invalid size of disk %s (device %s), skipping.", pp->name, 2829 sc->sc_name); 2830 return (EINVAL); 2831 } 2832 if (md->md_sectorsize != sc->sc_sectorsize) { 2833 G_MIRROR_DEBUG(1, 2834 "Invalid '%s' field on disk %s (device %s), skipping.", 2835 "md_sectorsize", pp->name, sc->sc_name); 2836 return (EINVAL); 2837 } 2838 if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 2839 G_MIRROR_DEBUG(1, 2840 "Invalid sector size of disk %s (device %s), skipping.", 2841 pp->name, sc->sc_name); 2842 return (EINVAL); 2843 } 2844 if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) { 2845 G_MIRROR_DEBUG(1, 2846 "Invalid device flags on disk %s (device %s), skipping.", 2847 pp->name, sc->sc_name); 2848 return (EINVAL); 2849 } 2850 if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) { 2851 G_MIRROR_DEBUG(1, 2852 "Invalid disk flags on disk %s (device %s), skipping.", 2853 pp->name, sc->sc_name); 2854 return (EINVAL); 2855 } 2856 return (0); 2857 } 2858 2859 int 2860 g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 2861 struct g_mirror_metadata *md) 2862 { 2863 struct g_mirror_disk *disk; 2864 int error; 2865 2866 g_topology_assert_not(); 2867 G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name); 2868 2869 error = g_mirror_check_metadata(sc, pp, md); 2870 if (error != 0) 2871 return (error); 2872 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING && 2873 md->md_genid < sc->sc_genid) { 2874 G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.", 2875 pp->name, sc->sc_name); 2876 return (EINVAL); 2877 } 2878 disk = g_mirror_init_disk(sc, pp, md, &error); 2879 if (disk == NULL) 2880 return (error); 2881 error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW, 2882 G_MIRROR_EVENT_WAIT); 2883 if (error != 0) 2884 return (error); 2885 if (md->md_version < G_MIRROR_VERSION) { 2886 G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 2887 pp->name, md->md_version, G_MIRROR_VERSION); 2888 g_mirror_update_metadata(disk); 2889 } 2890 return (0); 2891 } 2892 2893 static void 2894 g_mirror_destroy_delayed(void *arg, int flag) 2895 { 2896 struct g_mirror_softc *sc; 2897 int error; 2898 2899 if (flag == EV_CANCEL) { 2900 G_MIRROR_DEBUG(1, "Destroying canceled."); 2901 return; 2902 } 2903 sc = arg; 2904 g_topology_unlock(); 2905 sx_xlock(&sc->sc_lock); 2906 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0, 2907 ("DESTROY flag set on %s.", sc->sc_name)); 2908 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0, 2909 ("DESTROYING flag not set on %s.", sc->sc_name)); 2910 G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name); 2911 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT); 2912 if (error != 0) { 2913 G_MIRROR_DEBUG(0, "Cannot destroy %s (error=%d).", 2914 sc->sc_name, error); 2915 sx_xunlock(&sc->sc_lock); 2916 } 2917 g_topology_lock(); 2918 } 2919 2920 static int 2921 g_mirror_access(struct g_provider *pp, int acr, int acw, int ace) 2922 { 2923 struct g_mirror_softc *sc; 2924 int error = 0; 2925 2926 g_topology_assert(); 2927 G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 2928 acw, ace); 2929 2930 sc = pp->private; 2931 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 2932 2933 g_topology_unlock(); 2934 sx_xlock(&sc->sc_lock); 2935 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 || 2936 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0 || 2937 LIST_EMPTY(&sc->sc_disks)) { 2938 if (acr > 0 || acw > 0 || ace > 0) 2939 error = ENXIO; 2940 goto end; 2941 } 2942 sc->sc_provider_open += acr + acw + ace; 2943 if (pp->acw + acw == 0) 2944 g_mirror_idle(sc, 0); 2945 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0 && 2946 sc->sc_provider_open == 0) 2947 g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, sc, NULL); 2948 end: 2949 sx_xunlock(&sc->sc_lock); 2950 g_topology_lock(); 2951 return (error); 2952 } 2953 2954 struct g_geom * 2955 g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md, 2956 u_int type) 2957 { 2958 struct g_mirror_softc *sc; 2959 struct g_geom *gp; 2960 int error, timeout; 2961 2962 g_topology_assert(); 2963 G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name, 2964 md->md_mid); 2965 2966 /* One disk is minimum. */ 2967 if (md->md_all < 1) 2968 return (NULL); 2969 /* 2970 * Action geom. 2971 */ 2972 gp = g_new_geomf(mp, "%s", md->md_name); 2973 sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO); 2974 gp->start = g_mirror_start; 2975 gp->orphan = g_mirror_orphan; 2976 gp->access = g_mirror_access; 2977 gp->dumpconf = g_mirror_dumpconf; 2978 2979 sc->sc_type = type; 2980 sc->sc_id = md->md_mid; 2981 sc->sc_slice = md->md_slice; 2982 sc->sc_balance = md->md_balance; 2983 sc->sc_mediasize = md->md_mediasize; 2984 sc->sc_sectorsize = md->md_sectorsize; 2985 sc->sc_ndisks = md->md_all; 2986 sc->sc_flags = md->md_mflags; 2987 sc->sc_bump_id = 0; 2988 sc->sc_idle = 1; 2989 sc->sc_last_write = time_uptime; 2990 sc->sc_writes = 0; 2991 sc->sc_refcnt = 1; 2992 sx_init(&sc->sc_lock, "gmirror:lock"); 2993 bioq_init(&sc->sc_queue); 2994 mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF); 2995 bioq_init(&sc->sc_regular_delayed); 2996 bioq_init(&sc->sc_inflight); 2997 bioq_init(&sc->sc_sync_delayed); 2998 LIST_INIT(&sc->sc_disks); 2999 TAILQ_INIT(&sc->sc_events); 3000 mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF); 3001 callout_init(&sc->sc_callout, 1); 3002 mtx_init(&sc->sc_done_mtx, "gmirror:done", NULL, MTX_DEF); 3003 sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING; 3004 gp->softc = sc; 3005 sc->sc_geom = gp; 3006 sc->sc_provider = NULL; 3007 sc->sc_provider_open = 0; 3008 /* 3009 * Synchronization geom. 3010 */ 3011 gp = g_new_geomf(mp, "%s.sync", md->md_name); 3012 gp->softc = sc; 3013 gp->orphan = g_mirror_orphan; 3014 sc->sc_sync.ds_geom = gp; 3015 sc->sc_sync.ds_ndisks = 0; 3016 error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0, 3017 "g_mirror %s", md->md_name); 3018 if (error != 0) { 3019 G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.", 3020 sc->sc_name); 3021 g_destroy_geom(sc->sc_sync.ds_geom); 3022 g_destroy_geom(sc->sc_geom); 3023 g_mirror_free_device(sc); 3024 return (NULL); 3025 } 3026 3027 G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).", 3028 sc->sc_name, sc->sc_ndisks, sc->sc_id); 3029 3030 sc->sc_rootmount = root_mount_hold("GMIRROR"); 3031 G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 3032 /* 3033 * Run timeout. 3034 */ 3035 timeout = g_mirror_timeout * hz; 3036 callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc); 3037 return (sc->sc_geom); 3038 } 3039 3040 int 3041 g_mirror_destroy(struct g_mirror_softc *sc, int how) 3042 { 3043 struct g_mirror_disk *disk; 3044 3045 g_topology_assert_not(); 3046 sx_assert(&sc->sc_lock, SX_XLOCKED); 3047 3048 if (sc->sc_provider_open != 0) { 3049 switch (how) { 3050 case G_MIRROR_DESTROY_SOFT: 3051 G_MIRROR_DEBUG(1, 3052 "Device %s is still open (%d).", sc->sc_name, 3053 sc->sc_provider_open); 3054 return (EBUSY); 3055 case G_MIRROR_DESTROY_DELAYED: 3056 G_MIRROR_DEBUG(1, 3057 "Device %s will be destroyed on last close.", 3058 sc->sc_name); 3059 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 3060 if (disk->d_state == 3061 G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3062 g_mirror_sync_stop(disk, 1); 3063 } 3064 } 3065 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROYING; 3066 return (EBUSY); 3067 case G_MIRROR_DESTROY_HARD: 3068 G_MIRROR_DEBUG(1, "Device %s is still open, so it " 3069 "can't be definitely removed.", sc->sc_name); 3070 } 3071 } 3072 3073 g_topology_lock(); 3074 if (sc->sc_geom->softc == NULL) { 3075 g_topology_unlock(); 3076 return (0); 3077 } 3078 sc->sc_geom->softc = NULL; 3079 sc->sc_sync.ds_geom->softc = NULL; 3080 g_topology_unlock(); 3081 3082 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 3083 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT; 3084 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 3085 sx_xunlock(&sc->sc_lock); 3086 mtx_lock(&sc->sc_queue_mtx); 3087 wakeup(sc); 3088 mtx_unlock(&sc->sc_queue_mtx); 3089 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 3090 while (sc->sc_worker != NULL) 3091 tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5); 3092 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 3093 sx_xlock(&sc->sc_lock); 3094 g_mirror_destroy_device(sc); 3095 return (0); 3096 } 3097 3098 static void 3099 g_mirror_taste_orphan(struct g_consumer *cp) 3100 { 3101 3102 KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 3103 cp->provider->name)); 3104 } 3105 3106 static struct g_geom * 3107 g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 3108 { 3109 struct g_mirror_metadata md; 3110 struct g_mirror_softc *sc; 3111 struct g_consumer *cp; 3112 struct g_geom *gp; 3113 int error; 3114 3115 g_topology_assert(); 3116 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 3117 G_MIRROR_DEBUG(2, "Tasting %s.", pp->name); 3118 3119 gp = g_new_geomf(mp, "mirror:taste"); 3120 /* 3121 * This orphan function should be never called. 3122 */ 3123 gp->orphan = g_mirror_taste_orphan; 3124 cp = g_new_consumer(gp); 3125 g_attach(cp, pp); 3126 error = g_mirror_read_metadata(cp, &md); 3127 g_detach(cp); 3128 g_destroy_consumer(cp); 3129 g_destroy_geom(gp); 3130 if (error != 0) 3131 return (NULL); 3132 gp = NULL; 3133 3134 if (md.md_provider[0] != '\0' && 3135 !g_compare_names(md.md_provider, pp->name)) 3136 return (NULL); 3137 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3138 return (NULL); 3139 if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) { 3140 G_MIRROR_DEBUG(0, 3141 "Device %s: provider %s marked as inactive, skipping.", 3142 md.md_name, pp->name); 3143 return (NULL); 3144 } 3145 if (g_mirror_debug >= 2) 3146 mirror_metadata_dump(&md); 3147 3148 /* 3149 * Let's check if device already exists. 3150 */ 3151 sc = NULL; 3152 LIST_FOREACH(gp, &mp->geom, geom) { 3153 sc = gp->softc; 3154 if (sc == NULL) 3155 continue; 3156 if (sc->sc_type != G_MIRROR_TYPE_AUTOMATIC) 3157 continue; 3158 if (sc->sc_sync.ds_geom == gp) 3159 continue; 3160 if (strcmp(md.md_name, sc->sc_name) != 0) 3161 continue; 3162 if (md.md_mid != sc->sc_id) { 3163 G_MIRROR_DEBUG(0, "Device %s already configured.", 3164 sc->sc_name); 3165 return (NULL); 3166 } 3167 break; 3168 } 3169 if (gp == NULL) { 3170 gp = g_mirror_create(mp, &md, G_MIRROR_TYPE_AUTOMATIC); 3171 if (gp == NULL) { 3172 G_MIRROR_DEBUG(0, "Cannot create device %s.", 3173 md.md_name); 3174 return (NULL); 3175 } 3176 sc = gp->softc; 3177 } 3178 G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 3179 g_topology_unlock(); 3180 sx_xlock(&sc->sc_lock); 3181 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_TASTING; 3182 error = g_mirror_add_disk(sc, pp, &md); 3183 if (error != 0) { 3184 G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 3185 pp->name, gp->name, error); 3186 if (LIST_EMPTY(&sc->sc_disks)) { 3187 g_cancel_event(sc); 3188 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3189 g_topology_lock(); 3190 return (NULL); 3191 } 3192 gp = NULL; 3193 } 3194 sc->sc_flags &= ~G_MIRROR_DEVICE_FLAG_TASTING; 3195 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 3196 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3197 g_topology_lock(); 3198 return (NULL); 3199 } 3200 sx_xunlock(&sc->sc_lock); 3201 g_topology_lock(); 3202 return (gp); 3203 } 3204 3205 static void 3206 g_mirror_resize(struct g_consumer *cp) 3207 { 3208 struct g_mirror_disk *disk; 3209 3210 g_topology_assert(); 3211 g_trace(G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name); 3212 3213 disk = cp->private; 3214 if (disk == NULL) 3215 return; 3216 g_topology_unlock(); 3217 g_mirror_update_metadata(disk); 3218 g_topology_lock(); 3219 } 3220 3221 static int 3222 g_mirror_destroy_geom(struct gctl_req *req __unused, 3223 struct g_class *mp __unused, struct g_geom *gp) 3224 { 3225 struct g_mirror_softc *sc; 3226 int error; 3227 3228 g_topology_unlock(); 3229 sc = gp->softc; 3230 sx_xlock(&sc->sc_lock); 3231 g_cancel_event(sc); 3232 error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT); 3233 if (error != 0) 3234 sx_xunlock(&sc->sc_lock); 3235 g_topology_lock(); 3236 return (error); 3237 } 3238 3239 static void 3240 g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 3241 struct g_consumer *cp, struct g_provider *pp) 3242 { 3243 struct g_mirror_softc *sc; 3244 3245 g_topology_assert(); 3246 3247 sc = gp->softc; 3248 if (sc == NULL) 3249 return; 3250 /* Skip synchronization geom. */ 3251 if (gp == sc->sc_sync.ds_geom) 3252 return; 3253 if (pp != NULL) { 3254 /* Nothing here. */ 3255 } else if (cp != NULL) { 3256 struct g_mirror_disk *disk; 3257 3258 disk = cp->private; 3259 if (disk == NULL) 3260 return; 3261 g_topology_unlock(); 3262 sx_xlock(&sc->sc_lock); 3263 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id); 3264 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3265 sbuf_printf(sb, "%s<Synchronized>", indent); 3266 if (disk->d_sync.ds_offset == 0) 3267 sbuf_printf(sb, "0%%"); 3268 else { 3269 sbuf_printf(sb, "%u%%", 3270 (u_int)((disk->d_sync.ds_offset * 100) / 3271 sc->sc_provider->mediasize)); 3272 } 3273 sbuf_printf(sb, "</Synchronized>\n"); 3274 if (disk->d_sync.ds_offset > 0) { 3275 sbuf_printf(sb, "%s<BytesSynced>%jd" 3276 "</BytesSynced>\n", indent, 3277 (intmax_t)disk->d_sync.ds_offset); 3278 } 3279 } 3280 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 3281 disk->d_sync.ds_syncid); 3282 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, 3283 disk->d_genid); 3284 sbuf_printf(sb, "%s<Flags>", indent); 3285 if (disk->d_flags == 0) 3286 sbuf_printf(sb, "NONE"); 3287 else { 3288 int first = 1; 3289 3290 #define ADD_FLAG(flag, name) do { \ 3291 if ((disk->d_flags & (flag)) != 0) { \ 3292 if (!first) \ 3293 sbuf_printf(sb, ", "); \ 3294 else \ 3295 first = 0; \ 3296 sbuf_printf(sb, name); \ 3297 } \ 3298 } while (0) 3299 ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY"); 3300 ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED"); 3301 ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE"); 3302 ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING, 3303 "SYNCHRONIZING"); 3304 ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 3305 ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN"); 3306 #undef ADD_FLAG 3307 } 3308 sbuf_printf(sb, "</Flags>\n"); 3309 sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent, 3310 disk->d_priority); 3311 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3312 g_mirror_disk_state2str(disk->d_state)); 3313 sx_xunlock(&sc->sc_lock); 3314 g_topology_lock(); 3315 } else { 3316 g_topology_unlock(); 3317 sx_xlock(&sc->sc_lock); 3318 sbuf_printf(sb, "%s<Type>", indent); 3319 switch (sc->sc_type) { 3320 case G_MIRROR_TYPE_AUTOMATIC: 3321 sbuf_printf(sb, "AUTOMATIC"); 3322 break; 3323 case G_MIRROR_TYPE_MANUAL: 3324 sbuf_printf(sb, "MANUAL"); 3325 break; 3326 default: 3327 sbuf_printf(sb, "UNKNOWN"); 3328 break; 3329 } 3330 sbuf_printf(sb, "</Type>\n"); 3331 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 3332 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3333 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 3334 sbuf_printf(sb, "%s<Flags>", indent); 3335 if (sc->sc_flags == 0) 3336 sbuf_printf(sb, "NONE"); 3337 else { 3338 int first = 1; 3339 3340 #define ADD_FLAG(flag, name) do { \ 3341 if ((sc->sc_flags & (flag)) != 0) { \ 3342 if (!first) \ 3343 sbuf_printf(sb, ", "); \ 3344 else \ 3345 first = 0; \ 3346 sbuf_printf(sb, name); \ 3347 } \ 3348 } while (0) 3349 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 3350 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3351 #undef ADD_FLAG 3352 } 3353 sbuf_printf(sb, "</Flags>\n"); 3354 sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent, 3355 (u_int)sc->sc_slice); 3356 sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent, 3357 balance_name(sc->sc_balance)); 3358 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 3359 sc->sc_ndisks); 3360 sbuf_printf(sb, "%s<State>", indent); 3361 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 3362 sbuf_printf(sb, "%s", "STARTING"); 3363 else if (sc->sc_ndisks == 3364 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE)) 3365 sbuf_printf(sb, "%s", "COMPLETE"); 3366 else 3367 sbuf_printf(sb, "%s", "DEGRADED"); 3368 sbuf_printf(sb, "</State>\n"); 3369 sx_xunlock(&sc->sc_lock); 3370 g_topology_lock(); 3371 } 3372 } 3373 3374 static void 3375 g_mirror_shutdown_post_sync(void *arg, int howto) 3376 { 3377 struct g_class *mp; 3378 struct g_geom *gp, *gp2; 3379 struct g_mirror_softc *sc; 3380 int error; 3381 3382 if (panicstr != NULL) 3383 return; 3384 3385 mp = arg; 3386 g_topology_lock(); 3387 g_mirror_shutdown = 1; 3388 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 3389 if ((sc = gp->softc) == NULL) 3390 continue; 3391 /* Skip synchronization geom. */ 3392 if (gp == sc->sc_sync.ds_geom) 3393 continue; 3394 g_topology_unlock(); 3395 sx_xlock(&sc->sc_lock); 3396 g_mirror_idle(sc, -1); 3397 g_cancel_event(sc); 3398 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED); 3399 if (error != 0) 3400 sx_xunlock(&sc->sc_lock); 3401 g_topology_lock(); 3402 } 3403 g_topology_unlock(); 3404 } 3405 3406 static void 3407 g_mirror_init(struct g_class *mp) 3408 { 3409 3410 g_mirror_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync, 3411 g_mirror_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST); 3412 if (g_mirror_post_sync == NULL) 3413 G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event."); 3414 } 3415 3416 static void 3417 g_mirror_fini(struct g_class *mp) 3418 { 3419 3420 if (g_mirror_post_sync != NULL) 3421 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_mirror_post_sync); 3422 } 3423 3424 DECLARE_GEOM_CLASS(g_mirror_class, g_mirror); 3425