1 /*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/limits.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/bio.h> 38 #include <sys/sysctl.h> 39 #include <sys/malloc.h> 40 #include <sys/eventhandler.h> 41 #include <vm/uma.h> 42 #include <geom/geom.h> 43 #include <sys/proc.h> 44 #include <sys/kthread.h> 45 #include <sys/sched.h> 46 #include <geom/mirror/g_mirror.h> 47 48 FEATURE(geom_mirror, "GEOM mirroring support"); 49 50 static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data"); 51 52 SYSCTL_DECL(_kern_geom); 53 SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, "GEOM_MIRROR stuff"); 54 u_int g_mirror_debug = 0; 55 TUNABLE_INT("kern.geom.mirror.debug", &g_mirror_debug); 56 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RW, &g_mirror_debug, 0, 57 "Debug level"); 58 static u_int g_mirror_timeout = 4; 59 TUNABLE_INT("kern.geom.mirror.timeout", &g_mirror_timeout); 60 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RW, &g_mirror_timeout, 61 0, "Time to wait on all mirror components"); 62 static u_int g_mirror_idletime = 5; 63 TUNABLE_INT("kern.geom.mirror.idletime", &g_mirror_idletime); 64 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RW, 65 &g_mirror_idletime, 0, "Mark components as clean when idling"); 66 static u_int g_mirror_disconnect_on_failure = 1; 67 TUNABLE_INT("kern.geom.mirror.disconnect_on_failure", 68 &g_mirror_disconnect_on_failure); 69 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RW, 70 &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 71 static u_int g_mirror_syncreqs = 2; 72 TUNABLE_INT("kern.geom.mirror.sync_requests", &g_mirror_syncreqs); 73 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 74 &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests."); 75 76 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 77 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 78 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 79 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 80 } while (0) 81 82 static eventhandler_tag g_mirror_pre_sync = NULL; 83 84 static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp, 85 struct g_geom *gp); 86 static g_taste_t g_mirror_taste; 87 static void g_mirror_init(struct g_class *mp); 88 static void g_mirror_fini(struct g_class *mp); 89 90 struct g_class g_mirror_class = { 91 .name = G_MIRROR_CLASS_NAME, 92 .version = G_VERSION, 93 .ctlreq = g_mirror_config, 94 .taste = g_mirror_taste, 95 .destroy_geom = g_mirror_destroy_geom, 96 .init = g_mirror_init, 97 .fini = g_mirror_fini 98 }; 99 100 101 static void g_mirror_destroy_provider(struct g_mirror_softc *sc); 102 static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state); 103 static void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force); 104 static void g_mirror_dumpconf(struct sbuf *sb, const char *indent, 105 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 106 static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type); 107 static void g_mirror_register_request(struct bio *bp); 108 static void g_mirror_sync_release(struct g_mirror_softc *sc); 109 110 111 static const char * 112 g_mirror_disk_state2str(int state) 113 { 114 115 switch (state) { 116 case G_MIRROR_DISK_STATE_NONE: 117 return ("NONE"); 118 case G_MIRROR_DISK_STATE_NEW: 119 return ("NEW"); 120 case G_MIRROR_DISK_STATE_ACTIVE: 121 return ("ACTIVE"); 122 case G_MIRROR_DISK_STATE_STALE: 123 return ("STALE"); 124 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 125 return ("SYNCHRONIZING"); 126 case G_MIRROR_DISK_STATE_DISCONNECTED: 127 return ("DISCONNECTED"); 128 case G_MIRROR_DISK_STATE_DESTROY: 129 return ("DESTROY"); 130 default: 131 return ("INVALID"); 132 } 133 } 134 135 static const char * 136 g_mirror_device_state2str(int state) 137 { 138 139 switch (state) { 140 case G_MIRROR_DEVICE_STATE_STARTING: 141 return ("STARTING"); 142 case G_MIRROR_DEVICE_STATE_RUNNING: 143 return ("RUNNING"); 144 default: 145 return ("INVALID"); 146 } 147 } 148 149 static const char * 150 g_mirror_get_diskname(struct g_mirror_disk *disk) 151 { 152 153 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 154 return ("[unknown]"); 155 return (disk->d_name); 156 } 157 158 /* 159 * --- Events handling functions --- 160 * Events in geom_mirror are used to maintain disks and device status 161 * from one thread to simplify locking. 162 */ 163 static void 164 g_mirror_event_free(struct g_mirror_event *ep) 165 { 166 167 free(ep, M_MIRROR); 168 } 169 170 int 171 g_mirror_event_send(void *arg, int state, int flags) 172 { 173 struct g_mirror_softc *sc; 174 struct g_mirror_disk *disk; 175 struct g_mirror_event *ep; 176 int error; 177 178 ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK); 179 G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep); 180 if ((flags & G_MIRROR_EVENT_DEVICE) != 0) { 181 disk = NULL; 182 sc = arg; 183 } else { 184 disk = arg; 185 sc = disk->d_softc; 186 } 187 ep->e_disk = disk; 188 ep->e_state = state; 189 ep->e_flags = flags; 190 ep->e_error = 0; 191 mtx_lock(&sc->sc_events_mtx); 192 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 193 mtx_unlock(&sc->sc_events_mtx); 194 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 195 mtx_lock(&sc->sc_queue_mtx); 196 wakeup(sc); 197 mtx_unlock(&sc->sc_queue_mtx); 198 if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0) 199 return (0); 200 sx_assert(&sc->sc_lock, SX_XLOCKED); 201 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 202 sx_xunlock(&sc->sc_lock); 203 while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) { 204 mtx_lock(&sc->sc_events_mtx); 205 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event", 206 hz * 5); 207 } 208 error = ep->e_error; 209 g_mirror_event_free(ep); 210 sx_xlock(&sc->sc_lock); 211 return (error); 212 } 213 214 static struct g_mirror_event * 215 g_mirror_event_get(struct g_mirror_softc *sc) 216 { 217 struct g_mirror_event *ep; 218 219 mtx_lock(&sc->sc_events_mtx); 220 ep = TAILQ_FIRST(&sc->sc_events); 221 mtx_unlock(&sc->sc_events_mtx); 222 return (ep); 223 } 224 225 static void 226 g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep) 227 { 228 229 mtx_lock(&sc->sc_events_mtx); 230 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 231 mtx_unlock(&sc->sc_events_mtx); 232 } 233 234 static void 235 g_mirror_event_cancel(struct g_mirror_disk *disk) 236 { 237 struct g_mirror_softc *sc; 238 struct g_mirror_event *ep, *tmpep; 239 240 sc = disk->d_softc; 241 sx_assert(&sc->sc_lock, SX_XLOCKED); 242 243 mtx_lock(&sc->sc_events_mtx); 244 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 245 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) 246 continue; 247 if (ep->e_disk != disk) 248 continue; 249 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 250 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 251 g_mirror_event_free(ep); 252 else { 253 ep->e_error = ECANCELED; 254 wakeup(ep); 255 } 256 } 257 mtx_unlock(&sc->sc_events_mtx); 258 } 259 260 /* 261 * Return the number of disks in given state. 262 * If state is equal to -1, count all connected disks. 263 */ 264 u_int 265 g_mirror_ndisks(struct g_mirror_softc *sc, int state) 266 { 267 struct g_mirror_disk *disk; 268 u_int n = 0; 269 270 sx_assert(&sc->sc_lock, SX_LOCKED); 271 272 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 273 if (state == -1 || disk->d_state == state) 274 n++; 275 } 276 return (n); 277 } 278 279 /* 280 * Find a disk in mirror by its disk ID. 281 */ 282 static struct g_mirror_disk * 283 g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id) 284 { 285 struct g_mirror_disk *disk; 286 287 sx_assert(&sc->sc_lock, SX_XLOCKED); 288 289 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 290 if (disk->d_id == id) 291 return (disk); 292 } 293 return (NULL); 294 } 295 296 static u_int 297 g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp) 298 { 299 struct bio *bp; 300 u_int nreqs = 0; 301 302 mtx_lock(&sc->sc_queue_mtx); 303 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 304 if (bp->bio_from == cp) 305 nreqs++; 306 } 307 mtx_unlock(&sc->sc_queue_mtx); 308 return (nreqs); 309 } 310 311 static int 312 g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp) 313 { 314 315 if (cp->index > 0) { 316 G_MIRROR_DEBUG(2, 317 "I/O requests for %s exist, can't destroy it now.", 318 cp->provider->name); 319 return (1); 320 } 321 if (g_mirror_nrequests(sc, cp) > 0) { 322 G_MIRROR_DEBUG(2, 323 "I/O requests for %s in queue, can't destroy it now.", 324 cp->provider->name); 325 return (1); 326 } 327 return (0); 328 } 329 330 static void 331 g_mirror_destroy_consumer(void *arg, int flags __unused) 332 { 333 struct g_consumer *cp; 334 335 g_topology_assert(); 336 337 cp = arg; 338 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 339 g_detach(cp); 340 g_destroy_consumer(cp); 341 } 342 343 static void 344 g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 345 { 346 struct g_provider *pp; 347 int retaste_wait; 348 349 g_topology_assert(); 350 351 cp->private = NULL; 352 if (g_mirror_is_busy(sc, cp)) 353 return; 354 pp = cp->provider; 355 retaste_wait = 0; 356 if (cp->acw == 1) { 357 if ((pp->geom->flags & G_GEOM_WITHER) == 0) 358 retaste_wait = 1; 359 } 360 G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 361 -cp->acw, -cp->ace, 0); 362 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 363 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 364 if (retaste_wait) { 365 /* 366 * After retaste event was send (inside g_access()), we can send 367 * event to detach and destroy consumer. 368 * A class, which has consumer to the given provider connected 369 * will not receive retaste event for the provider. 370 * This is the way how I ignore retaste events when I close 371 * consumers opened for write: I detach and destroy consumer 372 * after retaste event is sent. 373 */ 374 g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL); 375 return; 376 } 377 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name); 378 g_detach(cp); 379 g_destroy_consumer(cp); 380 } 381 382 static int 383 g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp) 384 { 385 struct g_consumer *cp; 386 int error; 387 388 g_topology_assert_not(); 389 KASSERT(disk->d_consumer == NULL, 390 ("Disk already connected (device %s).", disk->d_softc->sc_name)); 391 392 g_topology_lock(); 393 cp = g_new_consumer(disk->d_softc->sc_geom); 394 error = g_attach(cp, pp); 395 if (error != 0) { 396 g_destroy_consumer(cp); 397 g_topology_unlock(); 398 return (error); 399 } 400 error = g_access(cp, 1, 1, 1); 401 if (error != 0) { 402 g_detach(cp); 403 g_destroy_consumer(cp); 404 g_topology_unlock(); 405 G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).", 406 pp->name, error); 407 return (error); 408 } 409 g_topology_unlock(); 410 disk->d_consumer = cp; 411 disk->d_consumer->private = disk; 412 disk->d_consumer->index = 0; 413 414 G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk)); 415 return (0); 416 } 417 418 static void 419 g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 420 { 421 422 g_topology_assert(); 423 424 if (cp == NULL) 425 return; 426 if (cp->provider != NULL) 427 g_mirror_kill_consumer(sc, cp); 428 else 429 g_destroy_consumer(cp); 430 } 431 432 /* 433 * Initialize disk. This means allocate memory, create consumer, attach it 434 * to the provider and open access (r1w1e1) to it. 435 */ 436 static struct g_mirror_disk * 437 g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp, 438 struct g_mirror_metadata *md, int *errorp) 439 { 440 struct g_mirror_disk *disk; 441 int error; 442 443 disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO); 444 if (disk == NULL) { 445 error = ENOMEM; 446 goto fail; 447 } 448 disk->d_softc = sc; 449 error = g_mirror_connect_disk(disk, pp); 450 if (error != 0) 451 goto fail; 452 disk->d_id = md->md_did; 453 disk->d_state = G_MIRROR_DISK_STATE_NONE; 454 disk->d_priority = md->md_priority; 455 disk->d_flags = md->md_dflags; 456 if (md->md_provider[0] != '\0') 457 disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED; 458 disk->d_sync.ds_consumer = NULL; 459 disk->d_sync.ds_offset = md->md_sync_offset; 460 disk->d_sync.ds_offset_done = md->md_sync_offset; 461 disk->d_genid = md->md_genid; 462 disk->d_sync.ds_syncid = md->md_syncid; 463 if (errorp != NULL) 464 *errorp = 0; 465 return (disk); 466 fail: 467 if (errorp != NULL) 468 *errorp = error; 469 if (disk != NULL) 470 free(disk, M_MIRROR); 471 return (NULL); 472 } 473 474 static void 475 g_mirror_destroy_disk(struct g_mirror_disk *disk) 476 { 477 struct g_mirror_softc *sc; 478 479 g_topology_assert_not(); 480 sc = disk->d_softc; 481 sx_assert(&sc->sc_lock, SX_XLOCKED); 482 483 LIST_REMOVE(disk, d_next); 484 g_mirror_event_cancel(disk); 485 if (sc->sc_hint == disk) 486 sc->sc_hint = NULL; 487 switch (disk->d_state) { 488 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 489 g_mirror_sync_stop(disk, 1); 490 /* FALLTHROUGH */ 491 case G_MIRROR_DISK_STATE_NEW: 492 case G_MIRROR_DISK_STATE_STALE: 493 case G_MIRROR_DISK_STATE_ACTIVE: 494 g_topology_lock(); 495 g_mirror_disconnect_consumer(sc, disk->d_consumer); 496 g_topology_unlock(); 497 free(disk, M_MIRROR); 498 break; 499 default: 500 KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 501 g_mirror_get_diskname(disk), 502 g_mirror_disk_state2str(disk->d_state))); 503 } 504 } 505 506 static void 507 g_mirror_destroy_device(struct g_mirror_softc *sc) 508 { 509 struct g_mirror_disk *disk; 510 struct g_mirror_event *ep; 511 struct g_geom *gp; 512 struct g_consumer *cp, *tmpcp; 513 514 g_topology_assert_not(); 515 sx_assert(&sc->sc_lock, SX_XLOCKED); 516 517 gp = sc->sc_geom; 518 if (sc->sc_provider != NULL) 519 g_mirror_destroy_provider(sc); 520 for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL; 521 disk = LIST_FIRST(&sc->sc_disks)) { 522 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 523 g_mirror_update_metadata(disk); 524 g_mirror_destroy_disk(disk); 525 } 526 while ((ep = g_mirror_event_get(sc)) != NULL) { 527 g_mirror_event_remove(sc, ep); 528 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 529 g_mirror_event_free(ep); 530 else { 531 ep->e_error = ECANCELED; 532 ep->e_flags |= G_MIRROR_EVENT_DONE; 533 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep); 534 mtx_lock(&sc->sc_events_mtx); 535 wakeup(ep); 536 mtx_unlock(&sc->sc_events_mtx); 537 } 538 } 539 callout_drain(&sc->sc_callout); 540 541 g_topology_lock(); 542 LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) { 543 g_mirror_disconnect_consumer(sc, cp); 544 } 545 g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 546 G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name); 547 g_wither_geom(gp, ENXIO); 548 g_topology_unlock(); 549 mtx_destroy(&sc->sc_queue_mtx); 550 mtx_destroy(&sc->sc_events_mtx); 551 sx_xunlock(&sc->sc_lock); 552 sx_destroy(&sc->sc_lock); 553 } 554 555 static void 556 g_mirror_orphan(struct g_consumer *cp) 557 { 558 struct g_mirror_disk *disk; 559 560 g_topology_assert(); 561 562 disk = cp->private; 563 if (disk == NULL) 564 return; 565 disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 566 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, 567 G_MIRROR_EVENT_DONTWAIT); 568 } 569 570 /* 571 * Function should return the next active disk on the list. 572 * It is possible that it will be the same disk as given. 573 * If there are no active disks on list, NULL is returned. 574 */ 575 static __inline struct g_mirror_disk * 576 g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 577 { 578 struct g_mirror_disk *dp; 579 580 for (dp = LIST_NEXT(disk, d_next); dp != disk; 581 dp = LIST_NEXT(dp, d_next)) { 582 if (dp == NULL) 583 dp = LIST_FIRST(&sc->sc_disks); 584 if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE) 585 break; 586 } 587 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 588 return (NULL); 589 return (dp); 590 } 591 592 static struct g_mirror_disk * 593 g_mirror_get_disk(struct g_mirror_softc *sc) 594 { 595 struct g_mirror_disk *disk; 596 597 if (sc->sc_hint == NULL) { 598 sc->sc_hint = LIST_FIRST(&sc->sc_disks); 599 if (sc->sc_hint == NULL) 600 return (NULL); 601 } 602 disk = sc->sc_hint; 603 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) { 604 disk = g_mirror_find_next(sc, disk); 605 if (disk == NULL) 606 return (NULL); 607 } 608 sc->sc_hint = g_mirror_find_next(sc, disk); 609 return (disk); 610 } 611 612 static int 613 g_mirror_write_metadata(struct g_mirror_disk *disk, 614 struct g_mirror_metadata *md) 615 { 616 struct g_mirror_softc *sc; 617 struct g_consumer *cp; 618 off_t offset, length; 619 u_char *sector; 620 int error = 0; 621 622 g_topology_assert_not(); 623 sc = disk->d_softc; 624 sx_assert(&sc->sc_lock, SX_LOCKED); 625 626 cp = disk->d_consumer; 627 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 628 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 629 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 630 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 631 cp->acw, cp->ace)); 632 length = cp->provider->sectorsize; 633 offset = cp->provider->mediasize - length; 634 sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO); 635 if (md != NULL) 636 mirror_metadata_encode(md, sector); 637 error = g_write_data(cp, offset, sector, length); 638 free(sector, M_MIRROR); 639 if (error != 0) { 640 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 641 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 642 G_MIRROR_DEBUG(0, "Cannot write metadata on %s " 643 "(device=%s, error=%d).", 644 g_mirror_get_diskname(disk), sc->sc_name, error); 645 } else { 646 G_MIRROR_DEBUG(1, "Cannot write metadata on %s " 647 "(device=%s, error=%d).", 648 g_mirror_get_diskname(disk), sc->sc_name, error); 649 } 650 if (g_mirror_disconnect_on_failure && 651 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) { 652 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 653 g_mirror_event_send(disk, 654 G_MIRROR_DISK_STATE_DISCONNECTED, 655 G_MIRROR_EVENT_DONTWAIT); 656 } 657 } 658 return (error); 659 } 660 661 static int 662 g_mirror_clear_metadata(struct g_mirror_disk *disk) 663 { 664 int error; 665 666 g_topology_assert_not(); 667 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 668 669 error = g_mirror_write_metadata(disk, NULL); 670 if (error == 0) { 671 G_MIRROR_DEBUG(2, "Metadata on %s cleared.", 672 g_mirror_get_diskname(disk)); 673 } else { 674 G_MIRROR_DEBUG(0, 675 "Cannot clear metadata on disk %s (error=%d).", 676 g_mirror_get_diskname(disk), error); 677 } 678 return (error); 679 } 680 681 void 682 g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk, 683 struct g_mirror_metadata *md) 684 { 685 686 strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic)); 687 md->md_version = G_MIRROR_VERSION; 688 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 689 md->md_mid = sc->sc_id; 690 md->md_all = sc->sc_ndisks; 691 md->md_slice = sc->sc_slice; 692 md->md_balance = sc->sc_balance; 693 md->md_genid = sc->sc_genid; 694 md->md_mediasize = sc->sc_mediasize; 695 md->md_sectorsize = sc->sc_sectorsize; 696 md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK); 697 bzero(md->md_provider, sizeof(md->md_provider)); 698 if (disk == NULL) { 699 md->md_did = arc4random(); 700 md->md_priority = 0; 701 md->md_syncid = 0; 702 md->md_dflags = 0; 703 md->md_sync_offset = 0; 704 md->md_provsize = 0; 705 } else { 706 md->md_did = disk->d_id; 707 md->md_priority = disk->d_priority; 708 md->md_syncid = disk->d_sync.ds_syncid; 709 md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK); 710 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 711 md->md_sync_offset = disk->d_sync.ds_offset_done; 712 else 713 md->md_sync_offset = 0; 714 if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) { 715 strlcpy(md->md_provider, 716 disk->d_consumer->provider->name, 717 sizeof(md->md_provider)); 718 } 719 md->md_provsize = disk->d_consumer->provider->mediasize; 720 } 721 } 722 723 void 724 g_mirror_update_metadata(struct g_mirror_disk *disk) 725 { 726 struct g_mirror_softc *sc; 727 struct g_mirror_metadata md; 728 int error; 729 730 g_topology_assert_not(); 731 sc = disk->d_softc; 732 sx_assert(&sc->sc_lock, SX_LOCKED); 733 734 g_mirror_fill_metadata(sc, disk, &md); 735 error = g_mirror_write_metadata(disk, &md); 736 if (error == 0) { 737 G_MIRROR_DEBUG(2, "Metadata on %s updated.", 738 g_mirror_get_diskname(disk)); 739 } else { 740 G_MIRROR_DEBUG(0, 741 "Cannot update metadata on disk %s (error=%d).", 742 g_mirror_get_diskname(disk), error); 743 } 744 } 745 746 static void 747 g_mirror_bump_syncid(struct g_mirror_softc *sc) 748 { 749 struct g_mirror_disk *disk; 750 751 g_topology_assert_not(); 752 sx_assert(&sc->sc_lock, SX_XLOCKED); 753 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 754 ("%s called with no active disks (device=%s).", __func__, 755 sc->sc_name)); 756 757 sc->sc_syncid++; 758 G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 759 sc->sc_syncid); 760 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 761 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 762 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 763 disk->d_sync.ds_syncid = sc->sc_syncid; 764 g_mirror_update_metadata(disk); 765 } 766 } 767 } 768 769 static void 770 g_mirror_bump_genid(struct g_mirror_softc *sc) 771 { 772 struct g_mirror_disk *disk; 773 774 g_topology_assert_not(); 775 sx_assert(&sc->sc_lock, SX_XLOCKED); 776 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 777 ("%s called with no active disks (device=%s).", __func__, 778 sc->sc_name)); 779 780 sc->sc_genid++; 781 G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 782 sc->sc_genid); 783 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 784 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 785 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 786 disk->d_genid = sc->sc_genid; 787 g_mirror_update_metadata(disk); 788 } 789 } 790 } 791 792 static int 793 g_mirror_idle(struct g_mirror_softc *sc, int acw) 794 { 795 struct g_mirror_disk *disk; 796 int timeout; 797 798 g_topology_assert_not(); 799 sx_assert(&sc->sc_lock, SX_XLOCKED); 800 801 if (sc->sc_provider == NULL) 802 return (0); 803 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 804 return (0); 805 if (sc->sc_idle) 806 return (0); 807 if (sc->sc_writes > 0) 808 return (0); 809 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 810 timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write); 811 if (timeout > 0) 812 return (timeout); 813 } 814 sc->sc_idle = 1; 815 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 816 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 817 continue; 818 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.", 819 g_mirror_get_diskname(disk), sc->sc_name); 820 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 821 g_mirror_update_metadata(disk); 822 } 823 return (0); 824 } 825 826 static void 827 g_mirror_unidle(struct g_mirror_softc *sc) 828 { 829 struct g_mirror_disk *disk; 830 831 g_topology_assert_not(); 832 sx_assert(&sc->sc_lock, SX_XLOCKED); 833 834 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 835 return; 836 sc->sc_idle = 0; 837 sc->sc_last_write = time_uptime; 838 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 839 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 840 continue; 841 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.", 842 g_mirror_get_diskname(disk), sc->sc_name); 843 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 844 g_mirror_update_metadata(disk); 845 } 846 } 847 848 static void 849 g_mirror_done(struct bio *bp) 850 { 851 struct g_mirror_softc *sc; 852 853 sc = bp->bio_from->geom->softc; 854 bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR; 855 mtx_lock(&sc->sc_queue_mtx); 856 bioq_disksort(&sc->sc_queue, bp); 857 mtx_unlock(&sc->sc_queue_mtx); 858 wakeup(sc); 859 } 860 861 static void 862 g_mirror_regular_request(struct bio *bp) 863 { 864 struct g_mirror_softc *sc; 865 struct g_mirror_disk *disk; 866 struct bio *pbp; 867 868 g_topology_assert_not(); 869 870 pbp = bp->bio_parent; 871 sc = pbp->bio_to->geom->softc; 872 bp->bio_from->index--; 873 if (bp->bio_cmd == BIO_WRITE) 874 sc->sc_writes--; 875 disk = bp->bio_from->private; 876 if (disk == NULL) { 877 g_topology_lock(); 878 g_mirror_kill_consumer(sc, bp->bio_from); 879 g_topology_unlock(); 880 } 881 882 pbp->bio_inbed++; 883 KASSERT(pbp->bio_inbed <= pbp->bio_children, 884 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 885 pbp->bio_children)); 886 if (bp->bio_error == 0 && pbp->bio_error == 0) { 887 G_MIRROR_LOGREQ(3, bp, "Request delivered."); 888 g_destroy_bio(bp); 889 if (pbp->bio_children == pbp->bio_inbed) { 890 G_MIRROR_LOGREQ(3, pbp, "Request delivered."); 891 pbp->bio_completed = pbp->bio_length; 892 if (pbp->bio_cmd == BIO_WRITE) { 893 bioq_remove(&sc->sc_inflight, pbp); 894 /* Release delayed sync requests if possible. */ 895 g_mirror_sync_release(sc); 896 } 897 g_io_deliver(pbp, pbp->bio_error); 898 } 899 return; 900 } else if (bp->bio_error != 0) { 901 if (pbp->bio_error == 0) 902 pbp->bio_error = bp->bio_error; 903 if (disk != NULL) { 904 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 905 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 906 G_MIRROR_LOGREQ(0, bp, 907 "Request failed (error=%d).", 908 bp->bio_error); 909 } else { 910 G_MIRROR_LOGREQ(1, bp, 911 "Request failed (error=%d).", 912 bp->bio_error); 913 } 914 if (g_mirror_disconnect_on_failure && 915 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) 916 { 917 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 918 g_mirror_event_send(disk, 919 G_MIRROR_DISK_STATE_DISCONNECTED, 920 G_MIRROR_EVENT_DONTWAIT); 921 } 922 } 923 switch (pbp->bio_cmd) { 924 case BIO_DELETE: 925 case BIO_WRITE: 926 pbp->bio_inbed--; 927 pbp->bio_children--; 928 break; 929 } 930 } 931 g_destroy_bio(bp); 932 933 switch (pbp->bio_cmd) { 934 case BIO_READ: 935 if (pbp->bio_inbed < pbp->bio_children) 936 break; 937 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1) 938 g_io_deliver(pbp, pbp->bio_error); 939 else { 940 pbp->bio_error = 0; 941 mtx_lock(&sc->sc_queue_mtx); 942 bioq_disksort(&sc->sc_queue, pbp); 943 mtx_unlock(&sc->sc_queue_mtx); 944 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 945 wakeup(sc); 946 } 947 break; 948 case BIO_DELETE: 949 case BIO_WRITE: 950 if (pbp->bio_children == 0) { 951 /* 952 * All requests failed. 953 */ 954 } else if (pbp->bio_inbed < pbp->bio_children) { 955 /* Do nothing. */ 956 break; 957 } else if (pbp->bio_children == pbp->bio_inbed) { 958 /* Some requests succeeded. */ 959 pbp->bio_error = 0; 960 pbp->bio_completed = pbp->bio_length; 961 } 962 bioq_remove(&sc->sc_inflight, pbp); 963 /* Release delayed sync requests if possible. */ 964 g_mirror_sync_release(sc); 965 g_io_deliver(pbp, pbp->bio_error); 966 break; 967 default: 968 KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd)); 969 break; 970 } 971 } 972 973 static void 974 g_mirror_sync_done(struct bio *bp) 975 { 976 struct g_mirror_softc *sc; 977 978 G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered."); 979 sc = bp->bio_from->geom->softc; 980 bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC; 981 mtx_lock(&sc->sc_queue_mtx); 982 bioq_disksort(&sc->sc_queue, bp); 983 mtx_unlock(&sc->sc_queue_mtx); 984 wakeup(sc); 985 } 986 987 static void 988 g_mirror_kernel_dump(struct bio *bp) 989 { 990 struct g_mirror_softc *sc; 991 struct g_mirror_disk *disk; 992 struct bio *cbp; 993 struct g_kerneldump *gkd; 994 995 /* 996 * We configure dumping to the first component, because this component 997 * will be used for reading with 'prefer' balance algorithm. 998 * If the component with the higest priority is currently disconnected 999 * we will not be able to read the dump after the reboot if it will be 1000 * connected and synchronized later. Can we do something better? 1001 */ 1002 sc = bp->bio_to->geom->softc; 1003 disk = LIST_FIRST(&sc->sc_disks); 1004 1005 gkd = (struct g_kerneldump *)bp->bio_data; 1006 if (gkd->length > bp->bio_to->mediasize) 1007 gkd->length = bp->bio_to->mediasize; 1008 cbp = g_clone_bio(bp); 1009 if (cbp == NULL) { 1010 g_io_deliver(bp, ENOMEM); 1011 return; 1012 } 1013 cbp->bio_done = g_std_done; 1014 g_io_request(cbp, disk->d_consumer); 1015 G_MIRROR_DEBUG(1, "Kernel dump will go to %s.", 1016 g_mirror_get_diskname(disk)); 1017 } 1018 1019 static void 1020 g_mirror_flush(struct g_mirror_softc *sc, struct bio *bp) 1021 { 1022 struct bio_queue_head queue; 1023 struct g_mirror_disk *disk; 1024 struct g_consumer *cp; 1025 struct bio *cbp; 1026 1027 bioq_init(&queue); 1028 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1029 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1030 continue; 1031 cbp = g_clone_bio(bp); 1032 if (cbp == NULL) { 1033 for (cbp = bioq_first(&queue); cbp != NULL; 1034 cbp = bioq_first(&queue)) { 1035 bioq_remove(&queue, cbp); 1036 g_destroy_bio(cbp); 1037 } 1038 if (bp->bio_error == 0) 1039 bp->bio_error = ENOMEM; 1040 g_io_deliver(bp, bp->bio_error); 1041 return; 1042 } 1043 bioq_insert_tail(&queue, cbp); 1044 cbp->bio_done = g_std_done; 1045 cbp->bio_caller1 = disk; 1046 cbp->bio_to = disk->d_consumer->provider; 1047 } 1048 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) { 1049 bioq_remove(&queue, cbp); 1050 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1051 disk = cbp->bio_caller1; 1052 cbp->bio_caller1 = NULL; 1053 cp = disk->d_consumer; 1054 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1055 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1056 cp->acr, cp->acw, cp->ace)); 1057 g_io_request(cbp, disk->d_consumer); 1058 } 1059 } 1060 1061 static void 1062 g_mirror_start(struct bio *bp) 1063 { 1064 struct g_mirror_softc *sc; 1065 1066 sc = bp->bio_to->geom->softc; 1067 /* 1068 * If sc == NULL or there are no valid disks, provider's error 1069 * should be set and g_mirror_start() should not be called at all. 1070 */ 1071 KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1072 ("Provider's error should be set (error=%d)(mirror=%s).", 1073 bp->bio_to->error, bp->bio_to->name)); 1074 G_MIRROR_LOGREQ(3, bp, "Request received."); 1075 1076 switch (bp->bio_cmd) { 1077 case BIO_READ: 1078 case BIO_WRITE: 1079 case BIO_DELETE: 1080 break; 1081 case BIO_FLUSH: 1082 g_mirror_flush(sc, bp); 1083 return; 1084 case BIO_GETATTR: 1085 if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) { 1086 g_mirror_kernel_dump(bp); 1087 return; 1088 } 1089 /* FALLTHROUGH */ 1090 default: 1091 g_io_deliver(bp, EOPNOTSUPP); 1092 return; 1093 } 1094 mtx_lock(&sc->sc_queue_mtx); 1095 bioq_disksort(&sc->sc_queue, bp); 1096 mtx_unlock(&sc->sc_queue_mtx); 1097 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1098 wakeup(sc); 1099 } 1100 1101 /* 1102 * Return TRUE if the given request is colliding with a in-progress 1103 * synchronization request. 1104 */ 1105 static int 1106 g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp) 1107 { 1108 struct g_mirror_disk *disk; 1109 struct bio *sbp; 1110 off_t rstart, rend, sstart, send; 1111 int i; 1112 1113 if (sc->sc_sync.ds_ndisks == 0) 1114 return (0); 1115 rstart = bp->bio_offset; 1116 rend = bp->bio_offset + bp->bio_length; 1117 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1118 if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING) 1119 continue; 1120 for (i = 0; i < g_mirror_syncreqs; i++) { 1121 sbp = disk->d_sync.ds_bios[i]; 1122 if (sbp == NULL) 1123 continue; 1124 sstart = sbp->bio_offset; 1125 send = sbp->bio_offset + sbp->bio_length; 1126 if (rend > sstart && rstart < send) 1127 return (1); 1128 } 1129 } 1130 return (0); 1131 } 1132 1133 /* 1134 * Return TRUE if the given sync request is colliding with a in-progress regular 1135 * request. 1136 */ 1137 static int 1138 g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp) 1139 { 1140 off_t rstart, rend, sstart, send; 1141 struct bio *bp; 1142 1143 if (sc->sc_sync.ds_ndisks == 0) 1144 return (0); 1145 sstart = sbp->bio_offset; 1146 send = sbp->bio_offset + sbp->bio_length; 1147 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) { 1148 rstart = bp->bio_offset; 1149 rend = bp->bio_offset + bp->bio_length; 1150 if (rend > sstart && rstart < send) 1151 return (1); 1152 } 1153 return (0); 1154 } 1155 1156 /* 1157 * Puts request onto delayed queue. 1158 */ 1159 static void 1160 g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp) 1161 { 1162 1163 G_MIRROR_LOGREQ(2, bp, "Delaying request."); 1164 bioq_insert_head(&sc->sc_regular_delayed, bp); 1165 } 1166 1167 /* 1168 * Puts synchronization request onto delayed queue. 1169 */ 1170 static void 1171 g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp) 1172 { 1173 1174 G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request."); 1175 bioq_insert_tail(&sc->sc_sync_delayed, bp); 1176 } 1177 1178 /* 1179 * Releases delayed regular requests which don't collide anymore with sync 1180 * requests. 1181 */ 1182 static void 1183 g_mirror_regular_release(struct g_mirror_softc *sc) 1184 { 1185 struct bio *bp, *bp2; 1186 1187 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) { 1188 if (g_mirror_sync_collision(sc, bp)) 1189 continue; 1190 bioq_remove(&sc->sc_regular_delayed, bp); 1191 G_MIRROR_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 1192 mtx_lock(&sc->sc_queue_mtx); 1193 bioq_insert_head(&sc->sc_queue, bp); 1194 #if 0 1195 /* 1196 * wakeup() is not needed, because this function is called from 1197 * the worker thread. 1198 */ 1199 wakeup(&sc->sc_queue); 1200 #endif 1201 mtx_unlock(&sc->sc_queue_mtx); 1202 } 1203 } 1204 1205 /* 1206 * Releases delayed sync requests which don't collide anymore with regular 1207 * requests. 1208 */ 1209 static void 1210 g_mirror_sync_release(struct g_mirror_softc *sc) 1211 { 1212 struct bio *bp, *bp2; 1213 1214 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) { 1215 if (g_mirror_regular_collision(sc, bp)) 1216 continue; 1217 bioq_remove(&sc->sc_sync_delayed, bp); 1218 G_MIRROR_LOGREQ(2, bp, 1219 "Releasing delayed synchronization request."); 1220 g_io_request(bp, bp->bio_from); 1221 } 1222 } 1223 1224 /* 1225 * Handle synchronization requests. 1226 * Every synchronization request is two-steps process: first, READ request is 1227 * send to active provider and then WRITE request (with read data) to the provider 1228 * beeing synchronized. When WRITE is finished, new synchronization request is 1229 * send. 1230 */ 1231 static void 1232 g_mirror_sync_request(struct bio *bp) 1233 { 1234 struct g_mirror_softc *sc; 1235 struct g_mirror_disk *disk; 1236 1237 bp->bio_from->index--; 1238 sc = bp->bio_from->geom->softc; 1239 disk = bp->bio_from->private; 1240 if (disk == NULL) { 1241 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 1242 g_topology_lock(); 1243 g_mirror_kill_consumer(sc, bp->bio_from); 1244 g_topology_unlock(); 1245 free(bp->bio_data, M_MIRROR); 1246 g_destroy_bio(bp); 1247 sx_xlock(&sc->sc_lock); 1248 return; 1249 } 1250 1251 /* 1252 * Synchronization request. 1253 */ 1254 switch (bp->bio_cmd) { 1255 case BIO_READ: 1256 { 1257 struct g_consumer *cp; 1258 1259 if (bp->bio_error != 0) { 1260 G_MIRROR_LOGREQ(0, bp, 1261 "Synchronization request failed (error=%d).", 1262 bp->bio_error); 1263 g_destroy_bio(bp); 1264 return; 1265 } 1266 G_MIRROR_LOGREQ(3, bp, 1267 "Synchronization request half-finished."); 1268 bp->bio_cmd = BIO_WRITE; 1269 bp->bio_cflags = 0; 1270 cp = disk->d_consumer; 1271 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1272 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1273 cp->acr, cp->acw, cp->ace)); 1274 cp->index++; 1275 g_io_request(bp, cp); 1276 return; 1277 } 1278 case BIO_WRITE: 1279 { 1280 struct g_mirror_disk_sync *sync; 1281 off_t offset; 1282 void *data; 1283 int i; 1284 1285 if (bp->bio_error != 0) { 1286 G_MIRROR_LOGREQ(0, bp, 1287 "Synchronization request failed (error=%d).", 1288 bp->bio_error); 1289 g_destroy_bio(bp); 1290 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 1291 g_mirror_event_send(disk, 1292 G_MIRROR_DISK_STATE_DISCONNECTED, 1293 G_MIRROR_EVENT_DONTWAIT); 1294 return; 1295 } 1296 G_MIRROR_LOGREQ(3, bp, "Synchronization request finished."); 1297 sync = &disk->d_sync; 1298 if (sync->ds_offset == sc->sc_mediasize || 1299 sync->ds_consumer == NULL || 1300 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1301 /* Don't send more synchronization requests. */ 1302 sync->ds_inflight--; 1303 if (sync->ds_bios != NULL) { 1304 i = (int)(uintptr_t)bp->bio_caller1; 1305 sync->ds_bios[i] = NULL; 1306 } 1307 free(bp->bio_data, M_MIRROR); 1308 g_destroy_bio(bp); 1309 if (sync->ds_inflight > 0) 1310 return; 1311 if (sync->ds_consumer == NULL || 1312 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1313 return; 1314 } 1315 /* Disk up-to-date, activate it. */ 1316 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE, 1317 G_MIRROR_EVENT_DONTWAIT); 1318 return; 1319 } 1320 1321 /* Send next synchronization request. */ 1322 data = bp->bio_data; 1323 bzero(bp, sizeof(*bp)); 1324 bp->bio_cmd = BIO_READ; 1325 bp->bio_offset = sync->ds_offset; 1326 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1327 sync->ds_offset += bp->bio_length; 1328 bp->bio_done = g_mirror_sync_done; 1329 bp->bio_data = data; 1330 bp->bio_from = sync->ds_consumer; 1331 bp->bio_to = sc->sc_provider; 1332 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1333 sync->ds_consumer->index++; 1334 /* 1335 * Delay the request if it is colliding with a regular request. 1336 */ 1337 if (g_mirror_regular_collision(sc, bp)) 1338 g_mirror_sync_delay(sc, bp); 1339 else 1340 g_io_request(bp, sync->ds_consumer); 1341 1342 /* Release delayed requests if possible. */ 1343 g_mirror_regular_release(sc); 1344 1345 /* Find the smallest offset */ 1346 offset = sc->sc_mediasize; 1347 for (i = 0; i < g_mirror_syncreqs; i++) { 1348 bp = sync->ds_bios[i]; 1349 if (bp->bio_offset < offset) 1350 offset = bp->bio_offset; 1351 } 1352 if (sync->ds_offset_done + (MAXPHYS * 100) < offset) { 1353 /* Update offset_done on every 100 blocks. */ 1354 sync->ds_offset_done = offset; 1355 g_mirror_update_metadata(disk); 1356 } 1357 return; 1358 } 1359 default: 1360 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1361 bp->bio_cmd, sc->sc_name)); 1362 break; 1363 } 1364 } 1365 1366 static void 1367 g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp) 1368 { 1369 struct g_mirror_disk *disk; 1370 struct g_consumer *cp; 1371 struct bio *cbp; 1372 1373 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1374 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE) 1375 break; 1376 } 1377 if (disk == NULL) { 1378 if (bp->bio_error == 0) 1379 bp->bio_error = ENXIO; 1380 g_io_deliver(bp, bp->bio_error); 1381 return; 1382 } 1383 cbp = g_clone_bio(bp); 1384 if (cbp == NULL) { 1385 if (bp->bio_error == 0) 1386 bp->bio_error = ENOMEM; 1387 g_io_deliver(bp, bp->bio_error); 1388 return; 1389 } 1390 /* 1391 * Fill in the component buf structure. 1392 */ 1393 cp = disk->d_consumer; 1394 cbp->bio_done = g_mirror_done; 1395 cbp->bio_to = cp->provider; 1396 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1397 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1398 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1399 cp->acw, cp->ace)); 1400 cp->index++; 1401 g_io_request(cbp, cp); 1402 } 1403 1404 static void 1405 g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp) 1406 { 1407 struct g_mirror_disk *disk; 1408 struct g_consumer *cp; 1409 struct bio *cbp; 1410 1411 disk = g_mirror_get_disk(sc); 1412 if (disk == NULL) { 1413 if (bp->bio_error == 0) 1414 bp->bio_error = ENXIO; 1415 g_io_deliver(bp, bp->bio_error); 1416 return; 1417 } 1418 cbp = g_clone_bio(bp); 1419 if (cbp == NULL) { 1420 if (bp->bio_error == 0) 1421 bp->bio_error = ENOMEM; 1422 g_io_deliver(bp, bp->bio_error); 1423 return; 1424 } 1425 /* 1426 * Fill in the component buf structure. 1427 */ 1428 cp = disk->d_consumer; 1429 cbp->bio_done = g_mirror_done; 1430 cbp->bio_to = cp->provider; 1431 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1432 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1433 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1434 cp->acw, cp->ace)); 1435 cp->index++; 1436 g_io_request(cbp, cp); 1437 } 1438 1439 #define TRACK_SIZE (1 * 1024 * 1024) 1440 #define LOAD_SCALE 256 1441 #define ABS(x) (((x) >= 0) ? (x) : (-(x))) 1442 1443 static void 1444 g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp) 1445 { 1446 struct g_mirror_disk *disk, *dp; 1447 struct g_consumer *cp; 1448 struct bio *cbp; 1449 int prio, best; 1450 1451 /* Find a disk with the smallest load. */ 1452 disk = NULL; 1453 best = INT_MAX; 1454 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1455 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1456 continue; 1457 prio = dp->load; 1458 /* If disk head is precisely in position - highly prefer it. */ 1459 if (dp->d_last_offset == bp->bio_offset) 1460 prio -= 2 * LOAD_SCALE; 1461 else 1462 /* If disk head is close to position - prefer it. */ 1463 if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE) 1464 prio -= 1 * LOAD_SCALE; 1465 if (prio <= best) { 1466 disk = dp; 1467 best = prio; 1468 } 1469 } 1470 KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name)); 1471 cbp = g_clone_bio(bp); 1472 if (cbp == NULL) { 1473 if (bp->bio_error == 0) 1474 bp->bio_error = ENOMEM; 1475 g_io_deliver(bp, bp->bio_error); 1476 return; 1477 } 1478 /* 1479 * Fill in the component buf structure. 1480 */ 1481 cp = disk->d_consumer; 1482 cbp->bio_done = g_mirror_done; 1483 cbp->bio_to = cp->provider; 1484 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1485 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1486 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1487 cp->acw, cp->ace)); 1488 cp->index++; 1489 /* Remember last head position */ 1490 disk->d_last_offset = bp->bio_offset + bp->bio_length; 1491 /* Update loads. */ 1492 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1493 dp->load = (dp->d_consumer->index * LOAD_SCALE + 1494 dp->load * 7) / 8; 1495 } 1496 g_io_request(cbp, cp); 1497 } 1498 1499 static void 1500 g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp) 1501 { 1502 struct bio_queue_head queue; 1503 struct g_mirror_disk *disk; 1504 struct g_consumer *cp; 1505 struct bio *cbp; 1506 off_t left, mod, offset, slice; 1507 u_char *data; 1508 u_int ndisks; 1509 1510 if (bp->bio_length <= sc->sc_slice) { 1511 g_mirror_request_round_robin(sc, bp); 1512 return; 1513 } 1514 ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE); 1515 slice = bp->bio_length / ndisks; 1516 mod = slice % sc->sc_provider->sectorsize; 1517 if (mod != 0) 1518 slice += sc->sc_provider->sectorsize - mod; 1519 /* 1520 * Allocate all bios before sending any request, so we can 1521 * return ENOMEM in nice and clean way. 1522 */ 1523 left = bp->bio_length; 1524 offset = bp->bio_offset; 1525 data = bp->bio_data; 1526 bioq_init(&queue); 1527 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1528 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1529 continue; 1530 cbp = g_clone_bio(bp); 1531 if (cbp == NULL) { 1532 for (cbp = bioq_first(&queue); cbp != NULL; 1533 cbp = bioq_first(&queue)) { 1534 bioq_remove(&queue, cbp); 1535 g_destroy_bio(cbp); 1536 } 1537 if (bp->bio_error == 0) 1538 bp->bio_error = ENOMEM; 1539 g_io_deliver(bp, bp->bio_error); 1540 return; 1541 } 1542 bioq_insert_tail(&queue, cbp); 1543 cbp->bio_done = g_mirror_done; 1544 cbp->bio_caller1 = disk; 1545 cbp->bio_to = disk->d_consumer->provider; 1546 cbp->bio_offset = offset; 1547 cbp->bio_data = data; 1548 cbp->bio_length = MIN(left, slice); 1549 left -= cbp->bio_length; 1550 if (left == 0) 1551 break; 1552 offset += cbp->bio_length; 1553 data += cbp->bio_length; 1554 } 1555 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) { 1556 bioq_remove(&queue, cbp); 1557 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1558 disk = cbp->bio_caller1; 1559 cbp->bio_caller1 = NULL; 1560 cp = disk->d_consumer; 1561 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1562 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1563 cp->acr, cp->acw, cp->ace)); 1564 disk->d_consumer->index++; 1565 g_io_request(cbp, disk->d_consumer); 1566 } 1567 } 1568 1569 static void 1570 g_mirror_register_request(struct bio *bp) 1571 { 1572 struct g_mirror_softc *sc; 1573 1574 sc = bp->bio_to->geom->softc; 1575 switch (bp->bio_cmd) { 1576 case BIO_READ: 1577 switch (sc->sc_balance) { 1578 case G_MIRROR_BALANCE_LOAD: 1579 g_mirror_request_load(sc, bp); 1580 break; 1581 case G_MIRROR_BALANCE_PREFER: 1582 g_mirror_request_prefer(sc, bp); 1583 break; 1584 case G_MIRROR_BALANCE_ROUND_ROBIN: 1585 g_mirror_request_round_robin(sc, bp); 1586 break; 1587 case G_MIRROR_BALANCE_SPLIT: 1588 g_mirror_request_split(sc, bp); 1589 break; 1590 } 1591 return; 1592 case BIO_WRITE: 1593 case BIO_DELETE: 1594 { 1595 struct g_mirror_disk *disk; 1596 struct g_mirror_disk_sync *sync; 1597 struct bio_queue_head queue; 1598 struct g_consumer *cp; 1599 struct bio *cbp; 1600 1601 /* 1602 * Delay the request if it is colliding with a synchronization 1603 * request. 1604 */ 1605 if (g_mirror_sync_collision(sc, bp)) { 1606 g_mirror_regular_delay(sc, bp); 1607 return; 1608 } 1609 1610 if (sc->sc_idle) 1611 g_mirror_unidle(sc); 1612 else 1613 sc->sc_last_write = time_uptime; 1614 1615 /* 1616 * Allocate all bios before sending any request, so we can 1617 * return ENOMEM in nice and clean way. 1618 */ 1619 bioq_init(&queue); 1620 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1621 sync = &disk->d_sync; 1622 switch (disk->d_state) { 1623 case G_MIRROR_DISK_STATE_ACTIVE: 1624 break; 1625 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 1626 if (bp->bio_offset >= sync->ds_offset) 1627 continue; 1628 break; 1629 default: 1630 continue; 1631 } 1632 cbp = g_clone_bio(bp); 1633 if (cbp == NULL) { 1634 for (cbp = bioq_first(&queue); cbp != NULL; 1635 cbp = bioq_first(&queue)) { 1636 bioq_remove(&queue, cbp); 1637 g_destroy_bio(cbp); 1638 } 1639 if (bp->bio_error == 0) 1640 bp->bio_error = ENOMEM; 1641 g_io_deliver(bp, bp->bio_error); 1642 return; 1643 } 1644 bioq_insert_tail(&queue, cbp); 1645 cbp->bio_done = g_mirror_done; 1646 cp = disk->d_consumer; 1647 cbp->bio_caller1 = cp; 1648 cbp->bio_to = cp->provider; 1649 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1650 ("Consumer %s not opened (r%dw%de%d).", 1651 cp->provider->name, cp->acr, cp->acw, cp->ace)); 1652 } 1653 for (cbp = bioq_first(&queue); cbp != NULL; 1654 cbp = bioq_first(&queue)) { 1655 bioq_remove(&queue, cbp); 1656 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1657 cp = cbp->bio_caller1; 1658 cbp->bio_caller1 = NULL; 1659 cp->index++; 1660 sc->sc_writes++; 1661 g_io_request(cbp, cp); 1662 } 1663 /* 1664 * Put request onto inflight queue, so we can check if new 1665 * synchronization requests don't collide with it. 1666 */ 1667 bioq_insert_tail(&sc->sc_inflight, bp); 1668 /* 1669 * Bump syncid on first write. 1670 */ 1671 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) { 1672 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 1673 g_mirror_bump_syncid(sc); 1674 } 1675 return; 1676 } 1677 default: 1678 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1679 bp->bio_cmd, sc->sc_name)); 1680 break; 1681 } 1682 } 1683 1684 static int 1685 g_mirror_can_destroy(struct g_mirror_softc *sc) 1686 { 1687 struct g_geom *gp; 1688 struct g_consumer *cp; 1689 1690 g_topology_assert(); 1691 gp = sc->sc_geom; 1692 if (gp->softc == NULL) 1693 return (1); 1694 LIST_FOREACH(cp, &gp->consumer, consumer) { 1695 if (g_mirror_is_busy(sc, cp)) 1696 return (0); 1697 } 1698 gp = sc->sc_sync.ds_geom; 1699 LIST_FOREACH(cp, &gp->consumer, consumer) { 1700 if (g_mirror_is_busy(sc, cp)) 1701 return (0); 1702 } 1703 G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 1704 sc->sc_name); 1705 return (1); 1706 } 1707 1708 static int 1709 g_mirror_try_destroy(struct g_mirror_softc *sc) 1710 { 1711 1712 if (sc->sc_rootmount != NULL) { 1713 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 1714 sc->sc_rootmount); 1715 root_mount_rel(sc->sc_rootmount); 1716 sc->sc_rootmount = NULL; 1717 } 1718 g_topology_lock(); 1719 if (!g_mirror_can_destroy(sc)) { 1720 g_topology_unlock(); 1721 return (0); 1722 } 1723 sc->sc_geom->softc = NULL; 1724 sc->sc_sync.ds_geom->softc = NULL; 1725 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) { 1726 g_topology_unlock(); 1727 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1728 &sc->sc_worker); 1729 /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 1730 sx_xunlock(&sc->sc_lock); 1731 wakeup(&sc->sc_worker); 1732 sc->sc_worker = NULL; 1733 } else { 1734 g_topology_unlock(); 1735 g_mirror_destroy_device(sc); 1736 free(sc, M_MIRROR); 1737 } 1738 return (1); 1739 } 1740 1741 /* 1742 * Worker thread. 1743 */ 1744 static void 1745 g_mirror_worker(void *arg) 1746 { 1747 struct g_mirror_softc *sc; 1748 struct g_mirror_event *ep; 1749 struct bio *bp; 1750 int timeout; 1751 1752 sc = arg; 1753 thread_lock(curthread); 1754 sched_prio(curthread, PRIBIO); 1755 thread_unlock(curthread); 1756 1757 sx_xlock(&sc->sc_lock); 1758 for (;;) { 1759 G_MIRROR_DEBUG(5, "%s: Let's see...", __func__); 1760 /* 1761 * First take a look at events. 1762 * This is important to handle events before any I/O requests. 1763 */ 1764 ep = g_mirror_event_get(sc); 1765 if (ep != NULL) { 1766 g_mirror_event_remove(sc, ep); 1767 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) { 1768 /* Update only device status. */ 1769 G_MIRROR_DEBUG(3, 1770 "Running event for device %s.", 1771 sc->sc_name); 1772 ep->e_error = 0; 1773 g_mirror_update_device(sc, 1); 1774 } else { 1775 /* Update disk status. */ 1776 G_MIRROR_DEBUG(3, "Running event for disk %s.", 1777 g_mirror_get_diskname(ep->e_disk)); 1778 ep->e_error = g_mirror_update_disk(ep->e_disk, 1779 ep->e_state); 1780 if (ep->e_error == 0) 1781 g_mirror_update_device(sc, 0); 1782 } 1783 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) { 1784 KASSERT(ep->e_error == 0, 1785 ("Error cannot be handled.")); 1786 g_mirror_event_free(ep); 1787 } else { 1788 ep->e_flags |= G_MIRROR_EVENT_DONE; 1789 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1790 ep); 1791 mtx_lock(&sc->sc_events_mtx); 1792 wakeup(ep); 1793 mtx_unlock(&sc->sc_events_mtx); 1794 } 1795 if ((sc->sc_flags & 1796 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1797 if (g_mirror_try_destroy(sc)) { 1798 curthread->td_pflags &= ~TDP_GEOM; 1799 G_MIRROR_DEBUG(1, "Thread exiting."); 1800 kproc_exit(0); 1801 } 1802 } 1803 G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__); 1804 continue; 1805 } 1806 /* 1807 * Check if we can mark array as CLEAN and if we can't take 1808 * how much seconds should we wait. 1809 */ 1810 timeout = g_mirror_idle(sc, -1); 1811 /* 1812 * Now I/O requests. 1813 */ 1814 /* Get first request from the queue. */ 1815 mtx_lock(&sc->sc_queue_mtx); 1816 bp = bioq_first(&sc->sc_queue); 1817 if (bp == NULL) { 1818 if ((sc->sc_flags & 1819 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1820 mtx_unlock(&sc->sc_queue_mtx); 1821 if (g_mirror_try_destroy(sc)) { 1822 curthread->td_pflags &= ~TDP_GEOM; 1823 G_MIRROR_DEBUG(1, "Thread exiting."); 1824 kproc_exit(0); 1825 } 1826 mtx_lock(&sc->sc_queue_mtx); 1827 } 1828 sx_xunlock(&sc->sc_lock); 1829 /* 1830 * XXX: We can miss an event here, because an event 1831 * can be added without sx-device-lock and without 1832 * mtx-queue-lock. Maybe I should just stop using 1833 * dedicated mutex for events synchronization and 1834 * stick with the queue lock? 1835 * The event will hang here until next I/O request 1836 * or next event is received. 1837 */ 1838 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1", 1839 timeout * hz); 1840 sx_xlock(&sc->sc_lock); 1841 G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__); 1842 continue; 1843 } 1844 bioq_remove(&sc->sc_queue, bp); 1845 mtx_unlock(&sc->sc_queue_mtx); 1846 1847 if (bp->bio_from->geom == sc->sc_sync.ds_geom && 1848 (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) { 1849 g_mirror_sync_request(bp); /* READ */ 1850 } else if (bp->bio_to != sc->sc_provider) { 1851 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) 1852 g_mirror_regular_request(bp); 1853 else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 1854 g_mirror_sync_request(bp); /* WRITE */ 1855 else { 1856 KASSERT(0, 1857 ("Invalid request cflags=0x%hhx to=%s.", 1858 bp->bio_cflags, bp->bio_to->name)); 1859 } 1860 } else { 1861 g_mirror_register_request(bp); 1862 } 1863 G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__); 1864 } 1865 } 1866 1867 static void 1868 g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 1869 { 1870 1871 sx_assert(&sc->sc_lock, SX_LOCKED); 1872 1873 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 1874 return; 1875 if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) { 1876 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.", 1877 g_mirror_get_diskname(disk), sc->sc_name); 1878 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1879 } else if (sc->sc_idle && 1880 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 1881 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.", 1882 g_mirror_get_diskname(disk), sc->sc_name); 1883 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 1884 } 1885 } 1886 1887 static void 1888 g_mirror_sync_start(struct g_mirror_disk *disk) 1889 { 1890 struct g_mirror_softc *sc; 1891 struct g_consumer *cp; 1892 struct bio *bp; 1893 int error, i; 1894 1895 g_topology_assert_not(); 1896 sc = disk->d_softc; 1897 sx_assert(&sc->sc_lock, SX_LOCKED); 1898 1899 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 1900 ("Disk %s is not marked for synchronization.", 1901 g_mirror_get_diskname(disk))); 1902 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1903 ("Device not in RUNNING state (%s, %u).", sc->sc_name, 1904 sc->sc_state)); 1905 1906 sx_xunlock(&sc->sc_lock); 1907 g_topology_lock(); 1908 cp = g_new_consumer(sc->sc_sync.ds_geom); 1909 error = g_attach(cp, sc->sc_provider); 1910 KASSERT(error == 0, 1911 ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 1912 error = g_access(cp, 1, 0, 0); 1913 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 1914 g_topology_unlock(); 1915 sx_xlock(&sc->sc_lock); 1916 1917 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 1918 g_mirror_get_diskname(disk)); 1919 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0) 1920 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1921 KASSERT(disk->d_sync.ds_consumer == NULL, 1922 ("Sync consumer already exists (device=%s, disk=%s).", 1923 sc->sc_name, g_mirror_get_diskname(disk))); 1924 1925 disk->d_sync.ds_consumer = cp; 1926 disk->d_sync.ds_consumer->private = disk; 1927 disk->d_sync.ds_consumer->index = 0; 1928 1929 /* 1930 * Allocate memory for synchronization bios and initialize them. 1931 */ 1932 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs, 1933 M_MIRROR, M_WAITOK); 1934 for (i = 0; i < g_mirror_syncreqs; i++) { 1935 bp = g_alloc_bio(); 1936 disk->d_sync.ds_bios[i] = bp; 1937 bp->bio_parent = NULL; 1938 bp->bio_cmd = BIO_READ; 1939 bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK); 1940 bp->bio_cflags = 0; 1941 bp->bio_offset = disk->d_sync.ds_offset; 1942 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1943 disk->d_sync.ds_offset += bp->bio_length; 1944 bp->bio_done = g_mirror_sync_done; 1945 bp->bio_from = disk->d_sync.ds_consumer; 1946 bp->bio_to = sc->sc_provider; 1947 bp->bio_caller1 = (void *)(uintptr_t)i; 1948 } 1949 1950 /* Increase the number of disks in SYNCHRONIZING state. */ 1951 sc->sc_sync.ds_ndisks++; 1952 /* Set the number of in-flight synchronization requests. */ 1953 disk->d_sync.ds_inflight = g_mirror_syncreqs; 1954 1955 /* 1956 * Fire off first synchronization requests. 1957 */ 1958 for (i = 0; i < g_mirror_syncreqs; i++) { 1959 bp = disk->d_sync.ds_bios[i]; 1960 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1961 disk->d_sync.ds_consumer->index++; 1962 /* 1963 * Delay the request if it is colliding with a regular request. 1964 */ 1965 if (g_mirror_regular_collision(sc, bp)) 1966 g_mirror_sync_delay(sc, bp); 1967 else 1968 g_io_request(bp, disk->d_sync.ds_consumer); 1969 } 1970 } 1971 1972 /* 1973 * Stop synchronization process. 1974 * type: 0 - synchronization finished 1975 * 1 - synchronization stopped 1976 */ 1977 static void 1978 g_mirror_sync_stop(struct g_mirror_disk *disk, int type) 1979 { 1980 struct g_mirror_softc *sc; 1981 struct g_consumer *cp; 1982 1983 g_topology_assert_not(); 1984 sc = disk->d_softc; 1985 sx_assert(&sc->sc_lock, SX_LOCKED); 1986 1987 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 1988 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 1989 g_mirror_disk_state2str(disk->d_state))); 1990 if (disk->d_sync.ds_consumer == NULL) 1991 return; 1992 1993 if (type == 0) { 1994 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.", 1995 sc->sc_name, g_mirror_get_diskname(disk)); 1996 } else /* if (type == 1) */ { 1997 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 1998 sc->sc_name, g_mirror_get_diskname(disk)); 1999 } 2000 free(disk->d_sync.ds_bios, M_MIRROR); 2001 disk->d_sync.ds_bios = NULL; 2002 cp = disk->d_sync.ds_consumer; 2003 disk->d_sync.ds_consumer = NULL; 2004 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2005 sc->sc_sync.ds_ndisks--; 2006 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 2007 g_topology_lock(); 2008 g_mirror_kill_consumer(sc, cp); 2009 g_topology_unlock(); 2010 sx_xlock(&sc->sc_lock); 2011 } 2012 2013 static void 2014 g_mirror_launch_provider(struct g_mirror_softc *sc) 2015 { 2016 struct g_mirror_disk *disk; 2017 struct g_provider *pp; 2018 2019 sx_assert(&sc->sc_lock, SX_LOCKED); 2020 2021 g_topology_lock(); 2022 pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name); 2023 pp->mediasize = sc->sc_mediasize; 2024 pp->sectorsize = sc->sc_sectorsize; 2025 pp->stripesize = 0; 2026 pp->stripeoffset = 0; 2027 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2028 if (disk->d_consumer && disk->d_consumer->provider && 2029 disk->d_consumer->provider->stripesize > pp->stripesize) { 2030 pp->stripesize = disk->d_consumer->provider->stripesize; 2031 pp->stripeoffset = disk->d_consumer->provider->stripeoffset; 2032 } 2033 } 2034 sc->sc_provider = pp; 2035 g_error_provider(pp, 0); 2036 g_topology_unlock(); 2037 G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 2038 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks); 2039 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2040 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2041 g_mirror_sync_start(disk); 2042 } 2043 } 2044 2045 static void 2046 g_mirror_destroy_provider(struct g_mirror_softc *sc) 2047 { 2048 struct g_mirror_disk *disk; 2049 struct bio *bp; 2050 2051 g_topology_assert_not(); 2052 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 2053 sc->sc_name)); 2054 2055 g_topology_lock(); 2056 g_error_provider(sc->sc_provider, ENXIO); 2057 mtx_lock(&sc->sc_queue_mtx); 2058 while ((bp = bioq_first(&sc->sc_queue)) != NULL) { 2059 bioq_remove(&sc->sc_queue, bp); 2060 g_io_deliver(bp, ENXIO); 2061 } 2062 mtx_unlock(&sc->sc_queue_mtx); 2063 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name, 2064 sc->sc_provider->name); 2065 sc->sc_provider->flags |= G_PF_WITHER; 2066 g_orphan_provider(sc->sc_provider, ENXIO); 2067 g_topology_unlock(); 2068 sc->sc_provider = NULL; 2069 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2070 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2071 g_mirror_sync_stop(disk, 1); 2072 } 2073 } 2074 2075 static void 2076 g_mirror_go(void *arg) 2077 { 2078 struct g_mirror_softc *sc; 2079 2080 sc = arg; 2081 G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 2082 g_mirror_event_send(sc, 0, 2083 G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE); 2084 } 2085 2086 static u_int 2087 g_mirror_determine_state(struct g_mirror_disk *disk) 2088 { 2089 struct g_mirror_softc *sc; 2090 u_int state; 2091 2092 sc = disk->d_softc; 2093 if (sc->sc_syncid == disk->d_sync.ds_syncid) { 2094 if ((disk->d_flags & 2095 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2096 /* Disk does not need synchronization. */ 2097 state = G_MIRROR_DISK_STATE_ACTIVE; 2098 } else { 2099 if ((sc->sc_flags & 2100 G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2101 (disk->d_flags & 2102 G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2103 /* 2104 * We can start synchronization from 2105 * the stored offset. 2106 */ 2107 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2108 } else { 2109 state = G_MIRROR_DISK_STATE_STALE; 2110 } 2111 } 2112 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 2113 /* 2114 * Reset all synchronization data for this disk, 2115 * because if it even was synchronized, it was 2116 * synchronized to disks with different syncid. 2117 */ 2118 disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2119 disk->d_sync.ds_offset = 0; 2120 disk->d_sync.ds_offset_done = 0; 2121 disk->d_sync.ds_syncid = sc->sc_syncid; 2122 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2123 (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2124 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2125 } else { 2126 state = G_MIRROR_DISK_STATE_STALE; 2127 } 2128 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 2129 /* 2130 * Not good, NOT GOOD! 2131 * It means that mirror was started on stale disks 2132 * and more fresh disk just arrive. 2133 * If there were writes, mirror is broken, sorry. 2134 * I think the best choice here is don't touch 2135 * this disk and inform the user loudly. 2136 */ 2137 G_MIRROR_DEBUG(0, "Device %s was started before the freshest " 2138 "disk (%s) arrives!! It will not be connected to the " 2139 "running device.", sc->sc_name, 2140 g_mirror_get_diskname(disk)); 2141 g_mirror_destroy_disk(disk); 2142 state = G_MIRROR_DISK_STATE_NONE; 2143 /* Return immediately, because disk was destroyed. */ 2144 return (state); 2145 } 2146 G_MIRROR_DEBUG(3, "State for %s disk: %s.", 2147 g_mirror_get_diskname(disk), g_mirror_disk_state2str(state)); 2148 return (state); 2149 } 2150 2151 /* 2152 * Update device state. 2153 */ 2154 static void 2155 g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force) 2156 { 2157 struct g_mirror_disk *disk; 2158 u_int state; 2159 2160 sx_assert(&sc->sc_lock, SX_XLOCKED); 2161 2162 switch (sc->sc_state) { 2163 case G_MIRROR_DEVICE_STATE_STARTING: 2164 { 2165 struct g_mirror_disk *pdisk, *tdisk; 2166 u_int dirty, ndisks, genid, syncid; 2167 2168 KASSERT(sc->sc_provider == NULL, 2169 ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 2170 /* 2171 * Are we ready? We are, if all disks are connected or 2172 * if we have any disks and 'force' is true. 2173 */ 2174 ndisks = g_mirror_ndisks(sc, -1); 2175 if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) { 2176 ; 2177 } else if (ndisks == 0) { 2178 /* 2179 * Disks went down in starting phase, so destroy 2180 * device. 2181 */ 2182 callout_drain(&sc->sc_callout); 2183 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2184 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2185 sc->sc_rootmount); 2186 root_mount_rel(sc->sc_rootmount); 2187 sc->sc_rootmount = NULL; 2188 return; 2189 } else { 2190 return; 2191 } 2192 2193 /* 2194 * Activate all disks with the biggest syncid. 2195 */ 2196 if (force) { 2197 /* 2198 * If 'force' is true, we have been called due to 2199 * timeout, so don't bother canceling timeout. 2200 */ 2201 ndisks = 0; 2202 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2203 if ((disk->d_flags & 2204 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2205 ndisks++; 2206 } 2207 } 2208 if (ndisks == 0) { 2209 /* No valid disks found, destroy device. */ 2210 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2211 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2212 __LINE__, sc->sc_rootmount); 2213 root_mount_rel(sc->sc_rootmount); 2214 sc->sc_rootmount = NULL; 2215 return; 2216 } 2217 } else { 2218 /* Cancel timeout. */ 2219 callout_drain(&sc->sc_callout); 2220 } 2221 2222 /* 2223 * Find the biggest genid. 2224 */ 2225 genid = 0; 2226 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2227 if (disk->d_genid > genid) 2228 genid = disk->d_genid; 2229 } 2230 sc->sc_genid = genid; 2231 /* 2232 * Remove all disks without the biggest genid. 2233 */ 2234 LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) { 2235 if (disk->d_genid < genid) { 2236 G_MIRROR_DEBUG(0, 2237 "Component %s (device %s) broken, skipping.", 2238 g_mirror_get_diskname(disk), sc->sc_name); 2239 g_mirror_destroy_disk(disk); 2240 } 2241 } 2242 2243 /* 2244 * Find the biggest syncid. 2245 */ 2246 syncid = 0; 2247 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2248 if (disk->d_sync.ds_syncid > syncid) 2249 syncid = disk->d_sync.ds_syncid; 2250 } 2251 2252 /* 2253 * Here we need to look for dirty disks and if all disks 2254 * with the biggest syncid are dirty, we have to choose 2255 * one with the biggest priority and rebuild the rest. 2256 */ 2257 /* 2258 * Find the number of dirty disks with the biggest syncid. 2259 * Find the number of disks with the biggest syncid. 2260 * While here, find a disk with the biggest priority. 2261 */ 2262 dirty = ndisks = 0; 2263 pdisk = NULL; 2264 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2265 if (disk->d_sync.ds_syncid != syncid) 2266 continue; 2267 if ((disk->d_flags & 2268 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2269 continue; 2270 } 2271 ndisks++; 2272 if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 2273 dirty++; 2274 if (pdisk == NULL || 2275 pdisk->d_priority < disk->d_priority) { 2276 pdisk = disk; 2277 } 2278 } 2279 } 2280 if (dirty == 0) { 2281 /* No dirty disks at all, great. */ 2282 } else if (dirty == ndisks) { 2283 /* 2284 * Force synchronization for all dirty disks except one 2285 * with the biggest priority. 2286 */ 2287 KASSERT(pdisk != NULL, ("pdisk == NULL")); 2288 G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a " 2289 "master disk for synchronization.", 2290 g_mirror_get_diskname(pdisk), sc->sc_name); 2291 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2292 if (disk->d_sync.ds_syncid != syncid) 2293 continue; 2294 if ((disk->d_flags & 2295 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2296 continue; 2297 } 2298 KASSERT((disk->d_flags & 2299 G_MIRROR_DISK_FLAG_DIRTY) != 0, 2300 ("Disk %s isn't marked as dirty.", 2301 g_mirror_get_diskname(disk))); 2302 /* Skip the disk with the biggest priority. */ 2303 if (disk == pdisk) 2304 continue; 2305 disk->d_sync.ds_syncid = 0; 2306 } 2307 } else if (dirty < ndisks) { 2308 /* 2309 * Force synchronization for all dirty disks. 2310 * We have some non-dirty disks. 2311 */ 2312 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2313 if (disk->d_sync.ds_syncid != syncid) 2314 continue; 2315 if ((disk->d_flags & 2316 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2317 continue; 2318 } 2319 if ((disk->d_flags & 2320 G_MIRROR_DISK_FLAG_DIRTY) == 0) { 2321 continue; 2322 } 2323 disk->d_sync.ds_syncid = 0; 2324 } 2325 } 2326 2327 /* Reset hint. */ 2328 sc->sc_hint = NULL; 2329 sc->sc_syncid = syncid; 2330 if (force) { 2331 /* Remember to bump syncid on first write. */ 2332 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2333 } 2334 state = G_MIRROR_DEVICE_STATE_RUNNING; 2335 G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.", 2336 sc->sc_name, g_mirror_device_state2str(sc->sc_state), 2337 g_mirror_device_state2str(state)); 2338 sc->sc_state = state; 2339 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2340 state = g_mirror_determine_state(disk); 2341 g_mirror_event_send(disk, state, 2342 G_MIRROR_EVENT_DONTWAIT); 2343 if (state == G_MIRROR_DISK_STATE_STALE) 2344 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2345 } 2346 break; 2347 } 2348 case G_MIRROR_DEVICE_STATE_RUNNING: 2349 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 && 2350 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2351 /* 2352 * No active disks or no disks at all, 2353 * so destroy device. 2354 */ 2355 if (sc->sc_provider != NULL) 2356 g_mirror_destroy_provider(sc); 2357 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2358 break; 2359 } else if (g_mirror_ndisks(sc, 2360 G_MIRROR_DISK_STATE_ACTIVE) > 0 && 2361 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2362 /* 2363 * We have active disks, launch provider if it doesn't 2364 * exist. 2365 */ 2366 if (sc->sc_provider == NULL) 2367 g_mirror_launch_provider(sc); 2368 if (sc->sc_rootmount != NULL) { 2369 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2370 __LINE__, sc->sc_rootmount); 2371 root_mount_rel(sc->sc_rootmount); 2372 sc->sc_rootmount = NULL; 2373 } 2374 } 2375 /* 2376 * Genid should be bumped immediately, so do it here. 2377 */ 2378 if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) { 2379 sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID; 2380 g_mirror_bump_genid(sc); 2381 } 2382 break; 2383 default: 2384 KASSERT(1 == 0, ("Wrong device state (%s, %s).", 2385 sc->sc_name, g_mirror_device_state2str(sc->sc_state))); 2386 break; 2387 } 2388 } 2389 2390 /* 2391 * Update disk state and device state if needed. 2392 */ 2393 #define DISK_STATE_CHANGED() G_MIRROR_DEBUG(1, \ 2394 "Disk %s state changed from %s to %s (device %s).", \ 2395 g_mirror_get_diskname(disk), \ 2396 g_mirror_disk_state2str(disk->d_state), \ 2397 g_mirror_disk_state2str(state), sc->sc_name) 2398 static int 2399 g_mirror_update_disk(struct g_mirror_disk *disk, u_int state) 2400 { 2401 struct g_mirror_softc *sc; 2402 2403 sc = disk->d_softc; 2404 sx_assert(&sc->sc_lock, SX_XLOCKED); 2405 2406 again: 2407 G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.", 2408 g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), 2409 g_mirror_disk_state2str(state)); 2410 switch (state) { 2411 case G_MIRROR_DISK_STATE_NEW: 2412 /* 2413 * Possible scenarios: 2414 * 1. New disk arrive. 2415 */ 2416 /* Previous state should be NONE. */ 2417 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE, 2418 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2419 g_mirror_disk_state2str(disk->d_state))); 2420 DISK_STATE_CHANGED(); 2421 2422 disk->d_state = state; 2423 if (LIST_EMPTY(&sc->sc_disks)) 2424 LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next); 2425 else { 2426 struct g_mirror_disk *dp; 2427 2428 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 2429 if (disk->d_priority >= dp->d_priority) { 2430 LIST_INSERT_BEFORE(dp, disk, d_next); 2431 dp = NULL; 2432 break; 2433 } 2434 if (LIST_NEXT(dp, d_next) == NULL) 2435 break; 2436 } 2437 if (dp != NULL) 2438 LIST_INSERT_AFTER(dp, disk, d_next); 2439 } 2440 G_MIRROR_DEBUG(1, "Device %s: provider %s detected.", 2441 sc->sc_name, g_mirror_get_diskname(disk)); 2442 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 2443 break; 2444 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2445 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2446 g_mirror_device_state2str(sc->sc_state), 2447 g_mirror_get_diskname(disk), 2448 g_mirror_disk_state2str(disk->d_state))); 2449 state = g_mirror_determine_state(disk); 2450 if (state != G_MIRROR_DISK_STATE_NONE) 2451 goto again; 2452 break; 2453 case G_MIRROR_DISK_STATE_ACTIVE: 2454 /* 2455 * Possible scenarios: 2456 * 1. New disk does not need synchronization. 2457 * 2. Synchronization process finished successfully. 2458 */ 2459 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2460 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2461 g_mirror_device_state2str(sc->sc_state), 2462 g_mirror_get_diskname(disk), 2463 g_mirror_disk_state2str(disk->d_state))); 2464 /* Previous state should be NEW or SYNCHRONIZING. */ 2465 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW || 2466 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2467 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2468 g_mirror_disk_state2str(disk->d_state))); 2469 DISK_STATE_CHANGED(); 2470 2471 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2472 disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2473 disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC; 2474 g_mirror_sync_stop(disk, 0); 2475 } 2476 disk->d_state = state; 2477 disk->d_sync.ds_offset = 0; 2478 disk->d_sync.ds_offset_done = 0; 2479 g_mirror_update_idle(sc, disk); 2480 g_mirror_update_metadata(disk); 2481 G_MIRROR_DEBUG(1, "Device %s: provider %s activated.", 2482 sc->sc_name, g_mirror_get_diskname(disk)); 2483 break; 2484 case G_MIRROR_DISK_STATE_STALE: 2485 /* 2486 * Possible scenarios: 2487 * 1. Stale disk was connected. 2488 */ 2489 /* Previous state should be NEW. */ 2490 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2491 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2492 g_mirror_disk_state2str(disk->d_state))); 2493 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2494 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2495 g_mirror_device_state2str(sc->sc_state), 2496 g_mirror_get_diskname(disk), 2497 g_mirror_disk_state2str(disk->d_state))); 2498 /* 2499 * STALE state is only possible if device is marked 2500 * NOAUTOSYNC. 2501 */ 2502 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0, 2503 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2504 g_mirror_device_state2str(sc->sc_state), 2505 g_mirror_get_diskname(disk), 2506 g_mirror_disk_state2str(disk->d_state))); 2507 DISK_STATE_CHANGED(); 2508 2509 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2510 disk->d_state = state; 2511 g_mirror_update_metadata(disk); 2512 G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.", 2513 sc->sc_name, g_mirror_get_diskname(disk)); 2514 break; 2515 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 2516 /* 2517 * Possible scenarios: 2518 * 1. Disk which needs synchronization was connected. 2519 */ 2520 /* Previous state should be NEW. */ 2521 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2522 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2523 g_mirror_disk_state2str(disk->d_state))); 2524 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2525 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2526 g_mirror_device_state2str(sc->sc_state), 2527 g_mirror_get_diskname(disk), 2528 g_mirror_disk_state2str(disk->d_state))); 2529 DISK_STATE_CHANGED(); 2530 2531 if (disk->d_state == G_MIRROR_DISK_STATE_NEW) 2532 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2533 disk->d_state = state; 2534 if (sc->sc_provider != NULL) { 2535 g_mirror_sync_start(disk); 2536 g_mirror_update_metadata(disk); 2537 } 2538 break; 2539 case G_MIRROR_DISK_STATE_DISCONNECTED: 2540 /* 2541 * Possible scenarios: 2542 * 1. Device wasn't running yet, but disk disappear. 2543 * 2. Disk was active and disapppear. 2544 * 3. Disk disappear during synchronization process. 2545 */ 2546 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) { 2547 /* 2548 * Previous state should be ACTIVE, STALE or 2549 * SYNCHRONIZING. 2550 */ 2551 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 2552 disk->d_state == G_MIRROR_DISK_STATE_STALE || 2553 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2554 ("Wrong disk state (%s, %s).", 2555 g_mirror_get_diskname(disk), 2556 g_mirror_disk_state2str(disk->d_state))); 2557 } else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) { 2558 /* Previous state should be NEW. */ 2559 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2560 ("Wrong disk state (%s, %s).", 2561 g_mirror_get_diskname(disk), 2562 g_mirror_disk_state2str(disk->d_state))); 2563 /* 2564 * Reset bumping syncid if disk disappeared in STARTING 2565 * state. 2566 */ 2567 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) 2568 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 2569 #ifdef INVARIANTS 2570 } else { 2571 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 2572 sc->sc_name, 2573 g_mirror_device_state2str(sc->sc_state), 2574 g_mirror_get_diskname(disk), 2575 g_mirror_disk_state2str(disk->d_state))); 2576 #endif 2577 } 2578 DISK_STATE_CHANGED(); 2579 G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.", 2580 sc->sc_name, g_mirror_get_diskname(disk)); 2581 2582 g_mirror_destroy_disk(disk); 2583 break; 2584 case G_MIRROR_DISK_STATE_DESTROY: 2585 { 2586 int error; 2587 2588 error = g_mirror_clear_metadata(disk); 2589 if (error != 0) 2590 return (error); 2591 DISK_STATE_CHANGED(); 2592 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", 2593 sc->sc_name, g_mirror_get_diskname(disk)); 2594 2595 g_mirror_destroy_disk(disk); 2596 sc->sc_ndisks--; 2597 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2598 g_mirror_update_metadata(disk); 2599 } 2600 break; 2601 } 2602 default: 2603 KASSERT(1 == 0, ("Unknown state (%u).", state)); 2604 break; 2605 } 2606 return (0); 2607 } 2608 #undef DISK_STATE_CHANGED 2609 2610 int 2611 g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md) 2612 { 2613 struct g_provider *pp; 2614 u_char *buf; 2615 int error; 2616 2617 g_topology_assert(); 2618 2619 error = g_access(cp, 1, 0, 0); 2620 if (error != 0) 2621 return (error); 2622 pp = cp->provider; 2623 g_topology_unlock(); 2624 /* Metadata are stored on last sector. */ 2625 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 2626 &error); 2627 g_topology_lock(); 2628 g_access(cp, -1, 0, 0); 2629 if (buf == NULL) { 2630 G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2631 cp->provider->name, error); 2632 return (error); 2633 } 2634 2635 /* Decode metadata. */ 2636 error = mirror_metadata_decode(buf, md); 2637 g_free(buf); 2638 if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0) 2639 return (EINVAL); 2640 if (md->md_version > G_MIRROR_VERSION) { 2641 G_MIRROR_DEBUG(0, 2642 "Kernel module is too old to handle metadata from %s.", 2643 cp->provider->name); 2644 return (EINVAL); 2645 } 2646 if (error != 0) { 2647 G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 2648 cp->provider->name); 2649 return (error); 2650 } 2651 2652 return (0); 2653 } 2654 2655 static int 2656 g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp, 2657 struct g_mirror_metadata *md) 2658 { 2659 2660 if (g_mirror_id2disk(sc, md->md_did) != NULL) { 2661 G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.", 2662 pp->name, md->md_did); 2663 return (EEXIST); 2664 } 2665 if (md->md_all != sc->sc_ndisks) { 2666 G_MIRROR_DEBUG(1, 2667 "Invalid '%s' field on disk %s (device %s), skipping.", 2668 "md_all", pp->name, sc->sc_name); 2669 return (EINVAL); 2670 } 2671 if (md->md_slice != sc->sc_slice) { 2672 G_MIRROR_DEBUG(1, 2673 "Invalid '%s' field on disk %s (device %s), skipping.", 2674 "md_slice", pp->name, sc->sc_name); 2675 return (EINVAL); 2676 } 2677 if (md->md_balance != sc->sc_balance) { 2678 G_MIRROR_DEBUG(1, 2679 "Invalid '%s' field on disk %s (device %s), skipping.", 2680 "md_balance", pp->name, sc->sc_name); 2681 return (EINVAL); 2682 } 2683 if (md->md_mediasize != sc->sc_mediasize) { 2684 G_MIRROR_DEBUG(1, 2685 "Invalid '%s' field on disk %s (device %s), skipping.", 2686 "md_mediasize", pp->name, sc->sc_name); 2687 return (EINVAL); 2688 } 2689 if (sc->sc_mediasize > pp->mediasize) { 2690 G_MIRROR_DEBUG(1, 2691 "Invalid size of disk %s (device %s), skipping.", pp->name, 2692 sc->sc_name); 2693 return (EINVAL); 2694 } 2695 if (md->md_sectorsize != sc->sc_sectorsize) { 2696 G_MIRROR_DEBUG(1, 2697 "Invalid '%s' field on disk %s (device %s), skipping.", 2698 "md_sectorsize", pp->name, sc->sc_name); 2699 return (EINVAL); 2700 } 2701 if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 2702 G_MIRROR_DEBUG(1, 2703 "Invalid sector size of disk %s (device %s), skipping.", 2704 pp->name, sc->sc_name); 2705 return (EINVAL); 2706 } 2707 if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) { 2708 G_MIRROR_DEBUG(1, 2709 "Invalid device flags on disk %s (device %s), skipping.", 2710 pp->name, sc->sc_name); 2711 return (EINVAL); 2712 } 2713 if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) { 2714 G_MIRROR_DEBUG(1, 2715 "Invalid disk flags on disk %s (device %s), skipping.", 2716 pp->name, sc->sc_name); 2717 return (EINVAL); 2718 } 2719 return (0); 2720 } 2721 2722 int 2723 g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 2724 struct g_mirror_metadata *md) 2725 { 2726 struct g_mirror_disk *disk; 2727 int error; 2728 2729 g_topology_assert_not(); 2730 G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name); 2731 2732 error = g_mirror_check_metadata(sc, pp, md); 2733 if (error != 0) 2734 return (error); 2735 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING && 2736 md->md_genid < sc->sc_genid) { 2737 G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.", 2738 pp->name, sc->sc_name); 2739 return (EINVAL); 2740 } 2741 disk = g_mirror_init_disk(sc, pp, md, &error); 2742 if (disk == NULL) 2743 return (error); 2744 error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW, 2745 G_MIRROR_EVENT_WAIT); 2746 if (error != 0) 2747 return (error); 2748 if (md->md_version < G_MIRROR_VERSION) { 2749 G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 2750 pp->name, md->md_version, G_MIRROR_VERSION); 2751 g_mirror_update_metadata(disk); 2752 } 2753 return (0); 2754 } 2755 2756 static void 2757 g_mirror_destroy_delayed(void *arg, int flag) 2758 { 2759 struct g_mirror_softc *sc; 2760 int error; 2761 2762 if (flag == EV_CANCEL) { 2763 G_MIRROR_DEBUG(1, "Destroying canceled."); 2764 return; 2765 } 2766 sc = arg; 2767 g_topology_unlock(); 2768 sx_xlock(&sc->sc_lock); 2769 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0, 2770 ("DESTROY flag set on %s.", sc->sc_name)); 2771 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0, 2772 ("DESTROYING flag not set on %s.", sc->sc_name)); 2773 G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name); 2774 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT); 2775 if (error != 0) { 2776 G_MIRROR_DEBUG(0, "Cannot destroy %s.", sc->sc_name); 2777 sx_xunlock(&sc->sc_lock); 2778 } 2779 g_topology_lock(); 2780 } 2781 2782 static int 2783 g_mirror_access(struct g_provider *pp, int acr, int acw, int ace) 2784 { 2785 struct g_mirror_softc *sc; 2786 int dcr, dcw, dce, error = 0; 2787 2788 g_topology_assert(); 2789 G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 2790 acw, ace); 2791 2792 sc = pp->geom->softc; 2793 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0) 2794 return (0); 2795 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 2796 2797 dcr = pp->acr + acr; 2798 dcw = pp->acw + acw; 2799 dce = pp->ace + ace; 2800 2801 g_topology_unlock(); 2802 sx_xlock(&sc->sc_lock); 2803 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 || 2804 LIST_EMPTY(&sc->sc_disks)) { 2805 if (acr > 0 || acw > 0 || ace > 0) 2806 error = ENXIO; 2807 goto end; 2808 } 2809 if (dcw == 0 && !sc->sc_idle) 2810 g_mirror_idle(sc, dcw); 2811 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0) { 2812 if (acr > 0 || acw > 0 || ace > 0) { 2813 error = ENXIO; 2814 goto end; 2815 } 2816 if (dcr == 0 && dcw == 0 && dce == 0) { 2817 g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, 2818 sc, NULL); 2819 } 2820 } 2821 end: 2822 sx_xunlock(&sc->sc_lock); 2823 g_topology_lock(); 2824 return (error); 2825 } 2826 2827 static struct g_geom * 2828 g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md) 2829 { 2830 struct g_mirror_softc *sc; 2831 struct g_geom *gp; 2832 int error, timeout; 2833 2834 g_topology_assert(); 2835 G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name, 2836 md->md_mid); 2837 2838 /* One disk is minimum. */ 2839 if (md->md_all < 1) 2840 return (NULL); 2841 /* 2842 * Action geom. 2843 */ 2844 gp = g_new_geomf(mp, "%s", md->md_name); 2845 sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO); 2846 gp->start = g_mirror_start; 2847 gp->orphan = g_mirror_orphan; 2848 gp->access = g_mirror_access; 2849 gp->dumpconf = g_mirror_dumpconf; 2850 2851 sc->sc_id = md->md_mid; 2852 sc->sc_slice = md->md_slice; 2853 sc->sc_balance = md->md_balance; 2854 sc->sc_mediasize = md->md_mediasize; 2855 sc->sc_sectorsize = md->md_sectorsize; 2856 sc->sc_ndisks = md->md_all; 2857 sc->sc_flags = md->md_mflags; 2858 sc->sc_bump_id = 0; 2859 sc->sc_idle = 1; 2860 sc->sc_last_write = time_uptime; 2861 sc->sc_writes = 0; 2862 sx_init(&sc->sc_lock, "gmirror:lock"); 2863 bioq_init(&sc->sc_queue); 2864 mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF); 2865 bioq_init(&sc->sc_regular_delayed); 2866 bioq_init(&sc->sc_inflight); 2867 bioq_init(&sc->sc_sync_delayed); 2868 LIST_INIT(&sc->sc_disks); 2869 TAILQ_INIT(&sc->sc_events); 2870 mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF); 2871 callout_init(&sc->sc_callout, CALLOUT_MPSAFE); 2872 sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING; 2873 gp->softc = sc; 2874 sc->sc_geom = gp; 2875 sc->sc_provider = NULL; 2876 /* 2877 * Synchronization geom. 2878 */ 2879 gp = g_new_geomf(mp, "%s.sync", md->md_name); 2880 gp->softc = sc; 2881 gp->orphan = g_mirror_orphan; 2882 sc->sc_sync.ds_geom = gp; 2883 sc->sc_sync.ds_ndisks = 0; 2884 error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0, 2885 "g_mirror %s", md->md_name); 2886 if (error != 0) { 2887 G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.", 2888 sc->sc_name); 2889 g_destroy_geom(sc->sc_sync.ds_geom); 2890 mtx_destroy(&sc->sc_events_mtx); 2891 mtx_destroy(&sc->sc_queue_mtx); 2892 sx_destroy(&sc->sc_lock); 2893 g_destroy_geom(sc->sc_geom); 2894 free(sc, M_MIRROR); 2895 return (NULL); 2896 } 2897 2898 G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).", 2899 sc->sc_name, sc->sc_ndisks, sc->sc_id); 2900 2901 sc->sc_rootmount = root_mount_hold("GMIRROR"); 2902 G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 2903 /* 2904 * Run timeout. 2905 */ 2906 timeout = g_mirror_timeout * hz; 2907 callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc); 2908 return (sc->sc_geom); 2909 } 2910 2911 int 2912 g_mirror_destroy(struct g_mirror_softc *sc, int how) 2913 { 2914 struct g_mirror_disk *disk; 2915 struct g_provider *pp; 2916 2917 g_topology_assert_not(); 2918 if (sc == NULL) 2919 return (ENXIO); 2920 sx_assert(&sc->sc_lock, SX_XLOCKED); 2921 2922 pp = sc->sc_provider; 2923 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 2924 switch (how) { 2925 case G_MIRROR_DESTROY_SOFT: 2926 G_MIRROR_DEBUG(1, 2927 "Device %s is still open (r%dw%de%d).", pp->name, 2928 pp->acr, pp->acw, pp->ace); 2929 return (EBUSY); 2930 case G_MIRROR_DESTROY_DELAYED: 2931 G_MIRROR_DEBUG(1, 2932 "Device %s will be destroyed on last close.", 2933 pp->name); 2934 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2935 if (disk->d_state == 2936 G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2937 g_mirror_sync_stop(disk, 1); 2938 } 2939 } 2940 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROYING; 2941 return (EBUSY); 2942 case G_MIRROR_DESTROY_HARD: 2943 G_MIRROR_DEBUG(1, "Device %s is still open, so it " 2944 "can't be definitely removed.", pp->name); 2945 } 2946 } 2947 2948 g_topology_lock(); 2949 if (sc->sc_geom->softc == NULL) { 2950 g_topology_unlock(); 2951 return (0); 2952 } 2953 sc->sc_geom->softc = NULL; 2954 sc->sc_sync.ds_geom->softc = NULL; 2955 g_topology_unlock(); 2956 2957 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2958 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT; 2959 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 2960 sx_xunlock(&sc->sc_lock); 2961 mtx_lock(&sc->sc_queue_mtx); 2962 wakeup(sc); 2963 mtx_unlock(&sc->sc_queue_mtx); 2964 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 2965 while (sc->sc_worker != NULL) 2966 tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5); 2967 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 2968 sx_xlock(&sc->sc_lock); 2969 g_mirror_destroy_device(sc); 2970 free(sc, M_MIRROR); 2971 return (0); 2972 } 2973 2974 static void 2975 g_mirror_taste_orphan(struct g_consumer *cp) 2976 { 2977 2978 KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 2979 cp->provider->name)); 2980 } 2981 2982 static struct g_geom * 2983 g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 2984 { 2985 struct g_mirror_metadata md; 2986 struct g_mirror_softc *sc; 2987 struct g_consumer *cp; 2988 struct g_geom *gp; 2989 int error; 2990 2991 g_topology_assert(); 2992 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 2993 G_MIRROR_DEBUG(2, "Tasting %s.", pp->name); 2994 2995 gp = g_new_geomf(mp, "mirror:taste"); 2996 /* 2997 * This orphan function should be never called. 2998 */ 2999 gp->orphan = g_mirror_taste_orphan; 3000 cp = g_new_consumer(gp); 3001 g_attach(cp, pp); 3002 error = g_mirror_read_metadata(cp, &md); 3003 g_detach(cp); 3004 g_destroy_consumer(cp); 3005 g_destroy_geom(gp); 3006 if (error != 0) 3007 return (NULL); 3008 gp = NULL; 3009 3010 if (md.md_provider[0] != '\0' && 3011 !g_compare_names(md.md_provider, pp->name)) 3012 return (NULL); 3013 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3014 return (NULL); 3015 if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) { 3016 G_MIRROR_DEBUG(0, 3017 "Device %s: provider %s marked as inactive, skipping.", 3018 md.md_name, pp->name); 3019 return (NULL); 3020 } 3021 if (g_mirror_debug >= 2) 3022 mirror_metadata_dump(&md); 3023 3024 /* 3025 * Let's check if device already exists. 3026 */ 3027 sc = NULL; 3028 LIST_FOREACH(gp, &mp->geom, geom) { 3029 sc = gp->softc; 3030 if (sc == NULL) 3031 continue; 3032 if (sc->sc_sync.ds_geom == gp) 3033 continue; 3034 if (strcmp(md.md_name, sc->sc_name) != 0) 3035 continue; 3036 if (md.md_mid != sc->sc_id) { 3037 G_MIRROR_DEBUG(0, "Device %s already configured.", 3038 sc->sc_name); 3039 return (NULL); 3040 } 3041 break; 3042 } 3043 if (gp == NULL) { 3044 gp = g_mirror_create(mp, &md); 3045 if (gp == NULL) { 3046 G_MIRROR_DEBUG(0, "Cannot create device %s.", 3047 md.md_name); 3048 return (NULL); 3049 } 3050 sc = gp->softc; 3051 } 3052 G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 3053 g_topology_unlock(); 3054 sx_xlock(&sc->sc_lock); 3055 error = g_mirror_add_disk(sc, pp, &md); 3056 if (error != 0) { 3057 G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 3058 pp->name, gp->name, error); 3059 if (LIST_EMPTY(&sc->sc_disks)) { 3060 g_cancel_event(sc); 3061 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3062 g_topology_lock(); 3063 return (NULL); 3064 } 3065 gp = NULL; 3066 } 3067 sx_xunlock(&sc->sc_lock); 3068 g_topology_lock(); 3069 return (gp); 3070 } 3071 3072 static int 3073 g_mirror_destroy_geom(struct gctl_req *req __unused, 3074 struct g_class *mp __unused, struct g_geom *gp) 3075 { 3076 struct g_mirror_softc *sc; 3077 int error; 3078 3079 g_topology_unlock(); 3080 sc = gp->softc; 3081 sx_xlock(&sc->sc_lock); 3082 g_cancel_event(sc); 3083 error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT); 3084 if (error != 0) 3085 sx_xunlock(&sc->sc_lock); 3086 g_topology_lock(); 3087 return (error); 3088 } 3089 3090 static void 3091 g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 3092 struct g_consumer *cp, struct g_provider *pp) 3093 { 3094 struct g_mirror_softc *sc; 3095 3096 g_topology_assert(); 3097 3098 sc = gp->softc; 3099 if (sc == NULL) 3100 return; 3101 /* Skip synchronization geom. */ 3102 if (gp == sc->sc_sync.ds_geom) 3103 return; 3104 if (pp != NULL) { 3105 /* Nothing here. */ 3106 } else if (cp != NULL) { 3107 struct g_mirror_disk *disk; 3108 3109 disk = cp->private; 3110 if (disk == NULL) 3111 return; 3112 g_topology_unlock(); 3113 sx_xlock(&sc->sc_lock); 3114 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id); 3115 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3116 sbuf_printf(sb, "%s<Synchronized>", indent); 3117 if (disk->d_sync.ds_offset == 0) 3118 sbuf_printf(sb, "0%%"); 3119 else { 3120 sbuf_printf(sb, "%u%%", 3121 (u_int)((disk->d_sync.ds_offset * 100) / 3122 sc->sc_provider->mediasize)); 3123 } 3124 sbuf_printf(sb, "</Synchronized>\n"); 3125 } 3126 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 3127 disk->d_sync.ds_syncid); 3128 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, 3129 disk->d_genid); 3130 sbuf_printf(sb, "%s<Flags>", indent); 3131 if (disk->d_flags == 0) 3132 sbuf_printf(sb, "NONE"); 3133 else { 3134 int first = 1; 3135 3136 #define ADD_FLAG(flag, name) do { \ 3137 if ((disk->d_flags & (flag)) != 0) { \ 3138 if (!first) \ 3139 sbuf_printf(sb, ", "); \ 3140 else \ 3141 first = 0; \ 3142 sbuf_printf(sb, name); \ 3143 } \ 3144 } while (0) 3145 ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY"); 3146 ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED"); 3147 ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE"); 3148 ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING, 3149 "SYNCHRONIZING"); 3150 ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 3151 ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN"); 3152 #undef ADD_FLAG 3153 } 3154 sbuf_printf(sb, "</Flags>\n"); 3155 sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent, 3156 disk->d_priority); 3157 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3158 g_mirror_disk_state2str(disk->d_state)); 3159 sx_xunlock(&sc->sc_lock); 3160 g_topology_lock(); 3161 } else { 3162 g_topology_unlock(); 3163 sx_xlock(&sc->sc_lock); 3164 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 3165 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3166 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 3167 sbuf_printf(sb, "%s<Flags>", indent); 3168 if (sc->sc_flags == 0) 3169 sbuf_printf(sb, "NONE"); 3170 else { 3171 int first = 1; 3172 3173 #define ADD_FLAG(flag, name) do { \ 3174 if ((sc->sc_flags & (flag)) != 0) { \ 3175 if (!first) \ 3176 sbuf_printf(sb, ", "); \ 3177 else \ 3178 first = 0; \ 3179 sbuf_printf(sb, name); \ 3180 } \ 3181 } while (0) 3182 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 3183 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3184 #undef ADD_FLAG 3185 } 3186 sbuf_printf(sb, "</Flags>\n"); 3187 sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent, 3188 (u_int)sc->sc_slice); 3189 sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent, 3190 balance_name(sc->sc_balance)); 3191 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 3192 sc->sc_ndisks); 3193 sbuf_printf(sb, "%s<State>", indent); 3194 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 3195 sbuf_printf(sb, "%s", "STARTING"); 3196 else if (sc->sc_ndisks == 3197 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE)) 3198 sbuf_printf(sb, "%s", "COMPLETE"); 3199 else 3200 sbuf_printf(sb, "%s", "DEGRADED"); 3201 sbuf_printf(sb, "</State>\n"); 3202 sx_xunlock(&sc->sc_lock); 3203 g_topology_lock(); 3204 } 3205 } 3206 3207 static void 3208 g_mirror_shutdown_pre_sync(void *arg, int howto) 3209 { 3210 struct g_class *mp; 3211 struct g_geom *gp, *gp2; 3212 struct g_mirror_softc *sc; 3213 int error; 3214 3215 mp = arg; 3216 DROP_GIANT(); 3217 g_topology_lock(); 3218 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 3219 if ((sc = gp->softc) == NULL) 3220 continue; 3221 /* Skip synchronization geom. */ 3222 if (gp == sc->sc_sync.ds_geom) 3223 continue; 3224 g_topology_unlock(); 3225 sx_xlock(&sc->sc_lock); 3226 g_cancel_event(sc); 3227 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED); 3228 if (error != 0) 3229 sx_xunlock(&sc->sc_lock); 3230 g_topology_lock(); 3231 } 3232 g_topology_unlock(); 3233 PICKUP_GIANT(); 3234 } 3235 3236 static void 3237 g_mirror_init(struct g_class *mp) 3238 { 3239 3240 g_mirror_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync, 3241 g_mirror_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST); 3242 if (g_mirror_pre_sync == NULL) 3243 G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event."); 3244 } 3245 3246 static void 3247 g_mirror_fini(struct g_class *mp) 3248 { 3249 3250 if (g_mirror_pre_sync != NULL) 3251 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_mirror_pre_sync); 3252 } 3253 3254 DECLARE_GEOM_CLASS(g_mirror_class, g_mirror); 3255