1 /*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/limits.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/bio.h> 38 #include <sys/sysctl.h> 39 #include <sys/malloc.h> 40 #include <sys/eventhandler.h> 41 #include <vm/uma.h> 42 #include <geom/geom.h> 43 #include <sys/proc.h> 44 #include <sys/kthread.h> 45 #include <sys/sched.h> 46 #include <geom/mirror/g_mirror.h> 47 48 49 static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data"); 50 51 SYSCTL_DECL(_kern_geom); 52 SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, "GEOM_MIRROR stuff"); 53 u_int g_mirror_debug = 0; 54 TUNABLE_INT("kern.geom.mirror.debug", &g_mirror_debug); 55 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RW, &g_mirror_debug, 0, 56 "Debug level"); 57 static u_int g_mirror_timeout = 4; 58 TUNABLE_INT("kern.geom.mirror.timeout", &g_mirror_timeout); 59 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RW, &g_mirror_timeout, 60 0, "Time to wait on all mirror components"); 61 static u_int g_mirror_idletime = 5; 62 TUNABLE_INT("kern.geom.mirror.idletime", &g_mirror_idletime); 63 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RW, 64 &g_mirror_idletime, 0, "Mark components as clean when idling"); 65 static u_int g_mirror_disconnect_on_failure = 1; 66 TUNABLE_INT("kern.geom.mirror.disconnect_on_failure", 67 &g_mirror_disconnect_on_failure); 68 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RW, 69 &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 70 static u_int g_mirror_syncreqs = 2; 71 TUNABLE_INT("kern.geom.mirror.sync_requests", &g_mirror_syncreqs); 72 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 73 &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests."); 74 75 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 76 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 77 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 78 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 79 } while (0) 80 81 static eventhandler_tag g_mirror_pre_sync = NULL; 82 83 static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp, 84 struct g_geom *gp); 85 static g_taste_t g_mirror_taste; 86 static void g_mirror_init(struct g_class *mp); 87 static void g_mirror_fini(struct g_class *mp); 88 89 struct g_class g_mirror_class = { 90 .name = G_MIRROR_CLASS_NAME, 91 .version = G_VERSION, 92 .ctlreq = g_mirror_config, 93 .taste = g_mirror_taste, 94 .destroy_geom = g_mirror_destroy_geom, 95 .init = g_mirror_init, 96 .fini = g_mirror_fini 97 }; 98 99 100 static void g_mirror_destroy_provider(struct g_mirror_softc *sc); 101 static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state); 102 static void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force); 103 static void g_mirror_dumpconf(struct sbuf *sb, const char *indent, 104 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 105 static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type); 106 static void g_mirror_register_request(struct bio *bp); 107 static void g_mirror_sync_release(struct g_mirror_softc *sc); 108 109 110 static const char * 111 g_mirror_disk_state2str(int state) 112 { 113 114 switch (state) { 115 case G_MIRROR_DISK_STATE_NONE: 116 return ("NONE"); 117 case G_MIRROR_DISK_STATE_NEW: 118 return ("NEW"); 119 case G_MIRROR_DISK_STATE_ACTIVE: 120 return ("ACTIVE"); 121 case G_MIRROR_DISK_STATE_STALE: 122 return ("STALE"); 123 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 124 return ("SYNCHRONIZING"); 125 case G_MIRROR_DISK_STATE_DISCONNECTED: 126 return ("DISCONNECTED"); 127 case G_MIRROR_DISK_STATE_DESTROY: 128 return ("DESTROY"); 129 default: 130 return ("INVALID"); 131 } 132 } 133 134 static const char * 135 g_mirror_device_state2str(int state) 136 { 137 138 switch (state) { 139 case G_MIRROR_DEVICE_STATE_STARTING: 140 return ("STARTING"); 141 case G_MIRROR_DEVICE_STATE_RUNNING: 142 return ("RUNNING"); 143 default: 144 return ("INVALID"); 145 } 146 } 147 148 static const char * 149 g_mirror_get_diskname(struct g_mirror_disk *disk) 150 { 151 152 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 153 return ("[unknown]"); 154 return (disk->d_name); 155 } 156 157 /* 158 * --- Events handling functions --- 159 * Events in geom_mirror are used to maintain disks and device status 160 * from one thread to simplify locking. 161 */ 162 static void 163 g_mirror_event_free(struct g_mirror_event *ep) 164 { 165 166 free(ep, M_MIRROR); 167 } 168 169 int 170 g_mirror_event_send(void *arg, int state, int flags) 171 { 172 struct g_mirror_softc *sc; 173 struct g_mirror_disk *disk; 174 struct g_mirror_event *ep; 175 int error; 176 177 ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK); 178 G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep); 179 if ((flags & G_MIRROR_EVENT_DEVICE) != 0) { 180 disk = NULL; 181 sc = arg; 182 } else { 183 disk = arg; 184 sc = disk->d_softc; 185 } 186 ep->e_disk = disk; 187 ep->e_state = state; 188 ep->e_flags = flags; 189 ep->e_error = 0; 190 mtx_lock(&sc->sc_events_mtx); 191 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 192 mtx_unlock(&sc->sc_events_mtx); 193 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 194 mtx_lock(&sc->sc_queue_mtx); 195 wakeup(sc); 196 mtx_unlock(&sc->sc_queue_mtx); 197 if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0) 198 return (0); 199 sx_assert(&sc->sc_lock, SX_XLOCKED); 200 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 201 sx_xunlock(&sc->sc_lock); 202 while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) { 203 mtx_lock(&sc->sc_events_mtx); 204 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event", 205 hz * 5); 206 } 207 error = ep->e_error; 208 g_mirror_event_free(ep); 209 sx_xlock(&sc->sc_lock); 210 return (error); 211 } 212 213 static struct g_mirror_event * 214 g_mirror_event_get(struct g_mirror_softc *sc) 215 { 216 struct g_mirror_event *ep; 217 218 mtx_lock(&sc->sc_events_mtx); 219 ep = TAILQ_FIRST(&sc->sc_events); 220 mtx_unlock(&sc->sc_events_mtx); 221 return (ep); 222 } 223 224 static void 225 g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep) 226 { 227 228 mtx_lock(&sc->sc_events_mtx); 229 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 230 mtx_unlock(&sc->sc_events_mtx); 231 } 232 233 static void 234 g_mirror_event_cancel(struct g_mirror_disk *disk) 235 { 236 struct g_mirror_softc *sc; 237 struct g_mirror_event *ep, *tmpep; 238 239 sc = disk->d_softc; 240 sx_assert(&sc->sc_lock, SX_XLOCKED); 241 242 mtx_lock(&sc->sc_events_mtx); 243 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 244 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) 245 continue; 246 if (ep->e_disk != disk) 247 continue; 248 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 249 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 250 g_mirror_event_free(ep); 251 else { 252 ep->e_error = ECANCELED; 253 wakeup(ep); 254 } 255 } 256 mtx_unlock(&sc->sc_events_mtx); 257 } 258 259 /* 260 * Return the number of disks in given state. 261 * If state is equal to -1, count all connected disks. 262 */ 263 u_int 264 g_mirror_ndisks(struct g_mirror_softc *sc, int state) 265 { 266 struct g_mirror_disk *disk; 267 u_int n = 0; 268 269 sx_assert(&sc->sc_lock, SX_LOCKED); 270 271 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 272 if (state == -1 || disk->d_state == state) 273 n++; 274 } 275 return (n); 276 } 277 278 /* 279 * Find a disk in mirror by its disk ID. 280 */ 281 static struct g_mirror_disk * 282 g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id) 283 { 284 struct g_mirror_disk *disk; 285 286 sx_assert(&sc->sc_lock, SX_XLOCKED); 287 288 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 289 if (disk->d_id == id) 290 return (disk); 291 } 292 return (NULL); 293 } 294 295 static u_int 296 g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp) 297 { 298 struct bio *bp; 299 u_int nreqs = 0; 300 301 mtx_lock(&sc->sc_queue_mtx); 302 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 303 if (bp->bio_from == cp) 304 nreqs++; 305 } 306 mtx_unlock(&sc->sc_queue_mtx); 307 return (nreqs); 308 } 309 310 static int 311 g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp) 312 { 313 314 if (cp->index > 0) { 315 G_MIRROR_DEBUG(2, 316 "I/O requests for %s exist, can't destroy it now.", 317 cp->provider->name); 318 return (1); 319 } 320 if (g_mirror_nrequests(sc, cp) > 0) { 321 G_MIRROR_DEBUG(2, 322 "I/O requests for %s in queue, can't destroy it now.", 323 cp->provider->name); 324 return (1); 325 } 326 return (0); 327 } 328 329 static void 330 g_mirror_destroy_consumer(void *arg, int flags __unused) 331 { 332 struct g_consumer *cp; 333 334 g_topology_assert(); 335 336 cp = arg; 337 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 338 g_detach(cp); 339 g_destroy_consumer(cp); 340 } 341 342 static void 343 g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 344 { 345 struct g_provider *pp; 346 int retaste_wait; 347 348 g_topology_assert(); 349 350 cp->private = NULL; 351 if (g_mirror_is_busy(sc, cp)) 352 return; 353 pp = cp->provider; 354 retaste_wait = 0; 355 if (cp->acw == 1) { 356 if ((pp->geom->flags & G_GEOM_WITHER) == 0) 357 retaste_wait = 1; 358 } 359 G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 360 -cp->acw, -cp->ace, 0); 361 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 362 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 363 if (retaste_wait) { 364 /* 365 * After retaste event was send (inside g_access()), we can send 366 * event to detach and destroy consumer. 367 * A class, which has consumer to the given provider connected 368 * will not receive retaste event for the provider. 369 * This is the way how I ignore retaste events when I close 370 * consumers opened for write: I detach and destroy consumer 371 * after retaste event is sent. 372 */ 373 g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL); 374 return; 375 } 376 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name); 377 g_detach(cp); 378 g_destroy_consumer(cp); 379 } 380 381 static int 382 g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp) 383 { 384 struct g_consumer *cp; 385 int error; 386 387 g_topology_assert_not(); 388 KASSERT(disk->d_consumer == NULL, 389 ("Disk already connected (device %s).", disk->d_softc->sc_name)); 390 391 g_topology_lock(); 392 cp = g_new_consumer(disk->d_softc->sc_geom); 393 error = g_attach(cp, pp); 394 if (error != 0) { 395 g_destroy_consumer(cp); 396 g_topology_unlock(); 397 return (error); 398 } 399 error = g_access(cp, 1, 1, 1); 400 if (error != 0) { 401 g_detach(cp); 402 g_destroy_consumer(cp); 403 g_topology_unlock(); 404 G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).", 405 pp->name, error); 406 return (error); 407 } 408 g_topology_unlock(); 409 disk->d_consumer = cp; 410 disk->d_consumer->private = disk; 411 disk->d_consumer->index = 0; 412 413 G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk)); 414 return (0); 415 } 416 417 static void 418 g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 419 { 420 421 g_topology_assert(); 422 423 if (cp == NULL) 424 return; 425 if (cp->provider != NULL) 426 g_mirror_kill_consumer(sc, cp); 427 else 428 g_destroy_consumer(cp); 429 } 430 431 /* 432 * Initialize disk. This means allocate memory, create consumer, attach it 433 * to the provider and open access (r1w1e1) to it. 434 */ 435 static struct g_mirror_disk * 436 g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp, 437 struct g_mirror_metadata *md, int *errorp) 438 { 439 struct g_mirror_disk *disk; 440 int error; 441 442 disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO); 443 if (disk == NULL) { 444 error = ENOMEM; 445 goto fail; 446 } 447 disk->d_softc = sc; 448 error = g_mirror_connect_disk(disk, pp); 449 if (error != 0) 450 goto fail; 451 disk->d_id = md->md_did; 452 disk->d_state = G_MIRROR_DISK_STATE_NONE; 453 disk->d_priority = md->md_priority; 454 disk->d_flags = md->md_dflags; 455 if (md->md_provider[0] != '\0') 456 disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED; 457 disk->d_sync.ds_consumer = NULL; 458 disk->d_sync.ds_offset = md->md_sync_offset; 459 disk->d_sync.ds_offset_done = md->md_sync_offset; 460 disk->d_genid = md->md_genid; 461 disk->d_sync.ds_syncid = md->md_syncid; 462 if (errorp != NULL) 463 *errorp = 0; 464 return (disk); 465 fail: 466 if (errorp != NULL) 467 *errorp = error; 468 if (disk != NULL) 469 free(disk, M_MIRROR); 470 return (NULL); 471 } 472 473 static void 474 g_mirror_destroy_disk(struct g_mirror_disk *disk) 475 { 476 struct g_mirror_softc *sc; 477 478 g_topology_assert_not(); 479 sc = disk->d_softc; 480 sx_assert(&sc->sc_lock, SX_XLOCKED); 481 482 LIST_REMOVE(disk, d_next); 483 g_mirror_event_cancel(disk); 484 if (sc->sc_hint == disk) 485 sc->sc_hint = NULL; 486 switch (disk->d_state) { 487 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 488 g_mirror_sync_stop(disk, 1); 489 /* FALLTHROUGH */ 490 case G_MIRROR_DISK_STATE_NEW: 491 case G_MIRROR_DISK_STATE_STALE: 492 case G_MIRROR_DISK_STATE_ACTIVE: 493 g_topology_lock(); 494 g_mirror_disconnect_consumer(sc, disk->d_consumer); 495 g_topology_unlock(); 496 free(disk, M_MIRROR); 497 break; 498 default: 499 KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 500 g_mirror_get_diskname(disk), 501 g_mirror_disk_state2str(disk->d_state))); 502 } 503 } 504 505 static void 506 g_mirror_destroy_device(struct g_mirror_softc *sc) 507 { 508 struct g_mirror_disk *disk; 509 struct g_mirror_event *ep; 510 struct g_geom *gp; 511 struct g_consumer *cp, *tmpcp; 512 513 g_topology_assert_not(); 514 sx_assert(&sc->sc_lock, SX_XLOCKED); 515 516 gp = sc->sc_geom; 517 if (sc->sc_provider != NULL) 518 g_mirror_destroy_provider(sc); 519 for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL; 520 disk = LIST_FIRST(&sc->sc_disks)) { 521 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 522 g_mirror_update_metadata(disk); 523 g_mirror_destroy_disk(disk); 524 } 525 while ((ep = g_mirror_event_get(sc)) != NULL) { 526 g_mirror_event_remove(sc, ep); 527 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 528 g_mirror_event_free(ep); 529 else { 530 ep->e_error = ECANCELED; 531 ep->e_flags |= G_MIRROR_EVENT_DONE; 532 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep); 533 mtx_lock(&sc->sc_events_mtx); 534 wakeup(ep); 535 mtx_unlock(&sc->sc_events_mtx); 536 } 537 } 538 callout_drain(&sc->sc_callout); 539 540 g_topology_lock(); 541 LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) { 542 g_mirror_disconnect_consumer(sc, cp); 543 } 544 g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 545 G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name); 546 g_wither_geom(gp, ENXIO); 547 g_topology_unlock(); 548 mtx_destroy(&sc->sc_queue_mtx); 549 mtx_destroy(&sc->sc_events_mtx); 550 sx_xunlock(&sc->sc_lock); 551 sx_destroy(&sc->sc_lock); 552 } 553 554 static void 555 g_mirror_orphan(struct g_consumer *cp) 556 { 557 struct g_mirror_disk *disk; 558 559 g_topology_assert(); 560 561 disk = cp->private; 562 if (disk == NULL) 563 return; 564 disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 565 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, 566 G_MIRROR_EVENT_DONTWAIT); 567 } 568 569 /* 570 * Function should return the next active disk on the list. 571 * It is possible that it will be the same disk as given. 572 * If there are no active disks on list, NULL is returned. 573 */ 574 static __inline struct g_mirror_disk * 575 g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 576 { 577 struct g_mirror_disk *dp; 578 579 for (dp = LIST_NEXT(disk, d_next); dp != disk; 580 dp = LIST_NEXT(dp, d_next)) { 581 if (dp == NULL) 582 dp = LIST_FIRST(&sc->sc_disks); 583 if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE) 584 break; 585 } 586 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 587 return (NULL); 588 return (dp); 589 } 590 591 static struct g_mirror_disk * 592 g_mirror_get_disk(struct g_mirror_softc *sc) 593 { 594 struct g_mirror_disk *disk; 595 596 if (sc->sc_hint == NULL) { 597 sc->sc_hint = LIST_FIRST(&sc->sc_disks); 598 if (sc->sc_hint == NULL) 599 return (NULL); 600 } 601 disk = sc->sc_hint; 602 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) { 603 disk = g_mirror_find_next(sc, disk); 604 if (disk == NULL) 605 return (NULL); 606 } 607 sc->sc_hint = g_mirror_find_next(sc, disk); 608 return (disk); 609 } 610 611 static int 612 g_mirror_write_metadata(struct g_mirror_disk *disk, 613 struct g_mirror_metadata *md) 614 { 615 struct g_mirror_softc *sc; 616 struct g_consumer *cp; 617 off_t offset, length; 618 u_char *sector; 619 int error = 0; 620 621 g_topology_assert_not(); 622 sc = disk->d_softc; 623 sx_assert(&sc->sc_lock, SX_LOCKED); 624 625 cp = disk->d_consumer; 626 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 627 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 628 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 629 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 630 cp->acw, cp->ace)); 631 length = cp->provider->sectorsize; 632 offset = cp->provider->mediasize - length; 633 sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO); 634 if (md != NULL) 635 mirror_metadata_encode(md, sector); 636 error = g_write_data(cp, offset, sector, length); 637 free(sector, M_MIRROR); 638 if (error != 0) { 639 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 640 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 641 G_MIRROR_DEBUG(0, "Cannot write metadata on %s " 642 "(device=%s, error=%d).", 643 g_mirror_get_diskname(disk), sc->sc_name, error); 644 } else { 645 G_MIRROR_DEBUG(1, "Cannot write metadata on %s " 646 "(device=%s, error=%d).", 647 g_mirror_get_diskname(disk), sc->sc_name, error); 648 } 649 if (g_mirror_disconnect_on_failure && 650 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) { 651 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 652 g_mirror_event_send(disk, 653 G_MIRROR_DISK_STATE_DISCONNECTED, 654 G_MIRROR_EVENT_DONTWAIT); 655 } 656 } 657 return (error); 658 } 659 660 static int 661 g_mirror_clear_metadata(struct g_mirror_disk *disk) 662 { 663 int error; 664 665 g_topology_assert_not(); 666 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 667 668 error = g_mirror_write_metadata(disk, NULL); 669 if (error == 0) { 670 G_MIRROR_DEBUG(2, "Metadata on %s cleared.", 671 g_mirror_get_diskname(disk)); 672 } else { 673 G_MIRROR_DEBUG(0, 674 "Cannot clear metadata on disk %s (error=%d).", 675 g_mirror_get_diskname(disk), error); 676 } 677 return (error); 678 } 679 680 void 681 g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk, 682 struct g_mirror_metadata *md) 683 { 684 685 strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic)); 686 md->md_version = G_MIRROR_VERSION; 687 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 688 md->md_mid = sc->sc_id; 689 md->md_all = sc->sc_ndisks; 690 md->md_slice = sc->sc_slice; 691 md->md_balance = sc->sc_balance; 692 md->md_genid = sc->sc_genid; 693 md->md_mediasize = sc->sc_mediasize; 694 md->md_sectorsize = sc->sc_sectorsize; 695 md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK); 696 bzero(md->md_provider, sizeof(md->md_provider)); 697 if (disk == NULL) { 698 md->md_did = arc4random(); 699 md->md_priority = 0; 700 md->md_syncid = 0; 701 md->md_dflags = 0; 702 md->md_sync_offset = 0; 703 md->md_provsize = 0; 704 } else { 705 md->md_did = disk->d_id; 706 md->md_priority = disk->d_priority; 707 md->md_syncid = disk->d_sync.ds_syncid; 708 md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK); 709 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 710 md->md_sync_offset = disk->d_sync.ds_offset_done; 711 else 712 md->md_sync_offset = 0; 713 if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) { 714 strlcpy(md->md_provider, 715 disk->d_consumer->provider->name, 716 sizeof(md->md_provider)); 717 } 718 md->md_provsize = disk->d_consumer->provider->mediasize; 719 } 720 } 721 722 void 723 g_mirror_update_metadata(struct g_mirror_disk *disk) 724 { 725 struct g_mirror_softc *sc; 726 struct g_mirror_metadata md; 727 int error; 728 729 g_topology_assert_not(); 730 sc = disk->d_softc; 731 sx_assert(&sc->sc_lock, SX_LOCKED); 732 733 g_mirror_fill_metadata(sc, disk, &md); 734 error = g_mirror_write_metadata(disk, &md); 735 if (error == 0) { 736 G_MIRROR_DEBUG(2, "Metadata on %s updated.", 737 g_mirror_get_diskname(disk)); 738 } else { 739 G_MIRROR_DEBUG(0, 740 "Cannot update metadata on disk %s (error=%d).", 741 g_mirror_get_diskname(disk), error); 742 } 743 } 744 745 static void 746 g_mirror_bump_syncid(struct g_mirror_softc *sc) 747 { 748 struct g_mirror_disk *disk; 749 750 g_topology_assert_not(); 751 sx_assert(&sc->sc_lock, SX_XLOCKED); 752 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 753 ("%s called with no active disks (device=%s).", __func__, 754 sc->sc_name)); 755 756 sc->sc_syncid++; 757 G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 758 sc->sc_syncid); 759 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 760 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 761 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 762 disk->d_sync.ds_syncid = sc->sc_syncid; 763 g_mirror_update_metadata(disk); 764 } 765 } 766 } 767 768 static void 769 g_mirror_bump_genid(struct g_mirror_softc *sc) 770 { 771 struct g_mirror_disk *disk; 772 773 g_topology_assert_not(); 774 sx_assert(&sc->sc_lock, SX_XLOCKED); 775 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 776 ("%s called with no active disks (device=%s).", __func__, 777 sc->sc_name)); 778 779 sc->sc_genid++; 780 G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 781 sc->sc_genid); 782 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 783 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 784 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 785 disk->d_genid = sc->sc_genid; 786 g_mirror_update_metadata(disk); 787 } 788 } 789 } 790 791 static int 792 g_mirror_idle(struct g_mirror_softc *sc, int acw) 793 { 794 struct g_mirror_disk *disk; 795 int timeout; 796 797 g_topology_assert_not(); 798 sx_assert(&sc->sc_lock, SX_XLOCKED); 799 800 if (sc->sc_provider == NULL) 801 return (0); 802 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 803 return (0); 804 if (sc->sc_idle) 805 return (0); 806 if (sc->sc_writes > 0) 807 return (0); 808 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 809 timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write); 810 if (timeout > 0) 811 return (timeout); 812 } 813 sc->sc_idle = 1; 814 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 815 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 816 continue; 817 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.", 818 g_mirror_get_diskname(disk), sc->sc_name); 819 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 820 g_mirror_update_metadata(disk); 821 } 822 return (0); 823 } 824 825 static void 826 g_mirror_unidle(struct g_mirror_softc *sc) 827 { 828 struct g_mirror_disk *disk; 829 830 g_topology_assert_not(); 831 sx_assert(&sc->sc_lock, SX_XLOCKED); 832 833 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 834 return; 835 sc->sc_idle = 0; 836 sc->sc_last_write = time_uptime; 837 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 838 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 839 continue; 840 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.", 841 g_mirror_get_diskname(disk), sc->sc_name); 842 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 843 g_mirror_update_metadata(disk); 844 } 845 } 846 847 static __inline int 848 bintime_cmp(struct bintime *bt1, struct bintime *bt2) 849 { 850 851 if (bt1->sec < bt2->sec) 852 return (-1); 853 else if (bt1->sec > bt2->sec) 854 return (1); 855 if (bt1->frac < bt2->frac) 856 return (-1); 857 else if (bt1->frac > bt2->frac) 858 return (1); 859 return (0); 860 } 861 862 static void 863 g_mirror_done(struct bio *bp) 864 { 865 struct g_mirror_softc *sc; 866 867 sc = bp->bio_from->geom->softc; 868 bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR; 869 mtx_lock(&sc->sc_queue_mtx); 870 bioq_disksort(&sc->sc_queue, bp); 871 wakeup(sc); 872 mtx_unlock(&sc->sc_queue_mtx); 873 } 874 875 static void 876 g_mirror_regular_request(struct bio *bp) 877 { 878 struct g_mirror_softc *sc; 879 struct g_mirror_disk *disk; 880 struct bio *pbp; 881 882 g_topology_assert_not(); 883 884 pbp = bp->bio_parent; 885 sc = pbp->bio_to->geom->softc; 886 bp->bio_from->index--; 887 if (bp->bio_cmd == BIO_WRITE) 888 sc->sc_writes--; 889 disk = bp->bio_from->private; 890 if (disk == NULL) { 891 g_topology_lock(); 892 g_mirror_kill_consumer(sc, bp->bio_from); 893 g_topology_unlock(); 894 } 895 896 pbp->bio_inbed++; 897 KASSERT(pbp->bio_inbed <= pbp->bio_children, 898 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 899 pbp->bio_children)); 900 if (bp->bio_error == 0 && pbp->bio_error == 0) { 901 G_MIRROR_LOGREQ(3, bp, "Request delivered."); 902 g_destroy_bio(bp); 903 if (pbp->bio_children == pbp->bio_inbed) { 904 G_MIRROR_LOGREQ(3, pbp, "Request delivered."); 905 pbp->bio_completed = pbp->bio_length; 906 if (pbp->bio_cmd == BIO_WRITE) { 907 bioq_remove(&sc->sc_inflight, pbp); 908 /* Release delayed sync requests if possible. */ 909 g_mirror_sync_release(sc); 910 } 911 g_io_deliver(pbp, pbp->bio_error); 912 } 913 return; 914 } else if (bp->bio_error != 0) { 915 if (pbp->bio_error == 0) 916 pbp->bio_error = bp->bio_error; 917 if (disk != NULL) { 918 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 919 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 920 G_MIRROR_LOGREQ(0, bp, 921 "Request failed (error=%d).", 922 bp->bio_error); 923 } else { 924 G_MIRROR_LOGREQ(1, bp, 925 "Request failed (error=%d).", 926 bp->bio_error); 927 } 928 if (g_mirror_disconnect_on_failure && 929 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) 930 { 931 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 932 g_mirror_event_send(disk, 933 G_MIRROR_DISK_STATE_DISCONNECTED, 934 G_MIRROR_EVENT_DONTWAIT); 935 } 936 } 937 switch (pbp->bio_cmd) { 938 case BIO_DELETE: 939 case BIO_WRITE: 940 pbp->bio_inbed--; 941 pbp->bio_children--; 942 break; 943 } 944 } 945 g_destroy_bio(bp); 946 947 switch (pbp->bio_cmd) { 948 case BIO_READ: 949 if (pbp->bio_inbed < pbp->bio_children) 950 break; 951 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1) 952 g_io_deliver(pbp, pbp->bio_error); 953 else { 954 pbp->bio_error = 0; 955 mtx_lock(&sc->sc_queue_mtx); 956 bioq_disksort(&sc->sc_queue, pbp); 957 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 958 wakeup(sc); 959 mtx_unlock(&sc->sc_queue_mtx); 960 } 961 break; 962 case BIO_DELETE: 963 case BIO_WRITE: 964 if (pbp->bio_children == 0) { 965 /* 966 * All requests failed. 967 */ 968 } else if (pbp->bio_inbed < pbp->bio_children) { 969 /* Do nothing. */ 970 break; 971 } else if (pbp->bio_children == pbp->bio_inbed) { 972 /* Some requests succeeded. */ 973 pbp->bio_error = 0; 974 pbp->bio_completed = pbp->bio_length; 975 } 976 bioq_remove(&sc->sc_inflight, pbp); 977 /* Release delayed sync requests if possible. */ 978 g_mirror_sync_release(sc); 979 g_io_deliver(pbp, pbp->bio_error); 980 break; 981 default: 982 KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd)); 983 break; 984 } 985 } 986 987 static void 988 g_mirror_sync_done(struct bio *bp) 989 { 990 struct g_mirror_softc *sc; 991 992 G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered."); 993 sc = bp->bio_from->geom->softc; 994 bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC; 995 mtx_lock(&sc->sc_queue_mtx); 996 bioq_disksort(&sc->sc_queue, bp); 997 wakeup(sc); 998 mtx_unlock(&sc->sc_queue_mtx); 999 } 1000 1001 static void 1002 g_mirror_kernel_dump(struct bio *bp) 1003 { 1004 struct g_mirror_softc *sc; 1005 struct g_mirror_disk *disk; 1006 struct bio *cbp; 1007 struct g_kerneldump *gkd; 1008 1009 /* 1010 * We configure dumping to the first component, because this component 1011 * will be used for reading with 'prefer' balance algorithm. 1012 * If the component with the higest priority is currently disconnected 1013 * we will not be able to read the dump after the reboot if it will be 1014 * connected and synchronized later. Can we do something better? 1015 */ 1016 sc = bp->bio_to->geom->softc; 1017 disk = LIST_FIRST(&sc->sc_disks); 1018 1019 gkd = (struct g_kerneldump *)bp->bio_data; 1020 if (gkd->length > bp->bio_to->mediasize) 1021 gkd->length = bp->bio_to->mediasize; 1022 cbp = g_clone_bio(bp); 1023 if (cbp == NULL) { 1024 g_io_deliver(bp, ENOMEM); 1025 return; 1026 } 1027 cbp->bio_done = g_std_done; 1028 g_io_request(cbp, disk->d_consumer); 1029 G_MIRROR_DEBUG(1, "Kernel dump will go to %s.", 1030 g_mirror_get_diskname(disk)); 1031 } 1032 1033 static void 1034 g_mirror_flush(struct g_mirror_softc *sc, struct bio *bp) 1035 { 1036 struct bio_queue_head queue; 1037 struct g_mirror_disk *disk; 1038 struct g_consumer *cp; 1039 struct bio *cbp; 1040 1041 bioq_init(&queue); 1042 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1043 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1044 continue; 1045 cbp = g_clone_bio(bp); 1046 if (cbp == NULL) { 1047 for (cbp = bioq_first(&queue); cbp != NULL; 1048 cbp = bioq_first(&queue)) { 1049 bioq_remove(&queue, cbp); 1050 g_destroy_bio(cbp); 1051 } 1052 if (bp->bio_error == 0) 1053 bp->bio_error = ENOMEM; 1054 g_io_deliver(bp, bp->bio_error); 1055 return; 1056 } 1057 bioq_insert_tail(&queue, cbp); 1058 cbp->bio_done = g_std_done; 1059 cbp->bio_caller1 = disk; 1060 cbp->bio_to = disk->d_consumer->provider; 1061 } 1062 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) { 1063 bioq_remove(&queue, cbp); 1064 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1065 disk = cbp->bio_caller1; 1066 cbp->bio_caller1 = NULL; 1067 cp = disk->d_consumer; 1068 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1069 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1070 cp->acr, cp->acw, cp->ace)); 1071 g_io_request(cbp, disk->d_consumer); 1072 } 1073 } 1074 1075 static void 1076 g_mirror_start(struct bio *bp) 1077 { 1078 struct g_mirror_softc *sc; 1079 1080 sc = bp->bio_to->geom->softc; 1081 /* 1082 * If sc == NULL or there are no valid disks, provider's error 1083 * should be set and g_mirror_start() should not be called at all. 1084 */ 1085 KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1086 ("Provider's error should be set (error=%d)(mirror=%s).", 1087 bp->bio_to->error, bp->bio_to->name)); 1088 G_MIRROR_LOGREQ(3, bp, "Request received."); 1089 1090 switch (bp->bio_cmd) { 1091 case BIO_READ: 1092 case BIO_WRITE: 1093 case BIO_DELETE: 1094 break; 1095 case BIO_FLUSH: 1096 g_mirror_flush(sc, bp); 1097 return; 1098 case BIO_GETATTR: 1099 if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) { 1100 g_mirror_kernel_dump(bp); 1101 return; 1102 } 1103 /* FALLTHROUGH */ 1104 default: 1105 g_io_deliver(bp, EOPNOTSUPP); 1106 return; 1107 } 1108 mtx_lock(&sc->sc_queue_mtx); 1109 bioq_disksort(&sc->sc_queue, bp); 1110 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1111 wakeup(sc); 1112 mtx_unlock(&sc->sc_queue_mtx); 1113 } 1114 1115 /* 1116 * Return TRUE if the given request is colliding with a in-progress 1117 * synchronization request. 1118 */ 1119 static int 1120 g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp) 1121 { 1122 struct g_mirror_disk *disk; 1123 struct bio *sbp; 1124 off_t rstart, rend, sstart, send; 1125 int i; 1126 1127 if (sc->sc_sync.ds_ndisks == 0) 1128 return (0); 1129 rstart = bp->bio_offset; 1130 rend = bp->bio_offset + bp->bio_length; 1131 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1132 if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING) 1133 continue; 1134 for (i = 0; i < g_mirror_syncreqs; i++) { 1135 sbp = disk->d_sync.ds_bios[i]; 1136 if (sbp == NULL) 1137 continue; 1138 sstart = sbp->bio_offset; 1139 send = sbp->bio_offset + sbp->bio_length; 1140 if (rend > sstart && rstart < send) 1141 return (1); 1142 } 1143 } 1144 return (0); 1145 } 1146 1147 /* 1148 * Return TRUE if the given sync request is colliding with a in-progress regular 1149 * request. 1150 */ 1151 static int 1152 g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp) 1153 { 1154 off_t rstart, rend, sstart, send; 1155 struct bio *bp; 1156 1157 if (sc->sc_sync.ds_ndisks == 0) 1158 return (0); 1159 sstart = sbp->bio_offset; 1160 send = sbp->bio_offset + sbp->bio_length; 1161 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) { 1162 rstart = bp->bio_offset; 1163 rend = bp->bio_offset + bp->bio_length; 1164 if (rend > sstart && rstart < send) 1165 return (1); 1166 } 1167 return (0); 1168 } 1169 1170 /* 1171 * Puts request onto delayed queue. 1172 */ 1173 static void 1174 g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp) 1175 { 1176 1177 G_MIRROR_LOGREQ(2, bp, "Delaying request."); 1178 bioq_insert_head(&sc->sc_regular_delayed, bp); 1179 } 1180 1181 /* 1182 * Puts synchronization request onto delayed queue. 1183 */ 1184 static void 1185 g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp) 1186 { 1187 1188 G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request."); 1189 bioq_insert_tail(&sc->sc_sync_delayed, bp); 1190 } 1191 1192 /* 1193 * Releases delayed regular requests which don't collide anymore with sync 1194 * requests. 1195 */ 1196 static void 1197 g_mirror_regular_release(struct g_mirror_softc *sc) 1198 { 1199 struct bio *bp, *bp2; 1200 1201 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) { 1202 if (g_mirror_sync_collision(sc, bp)) 1203 continue; 1204 bioq_remove(&sc->sc_regular_delayed, bp); 1205 G_MIRROR_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 1206 mtx_lock(&sc->sc_queue_mtx); 1207 bioq_insert_head(&sc->sc_queue, bp); 1208 #if 0 1209 /* 1210 * wakeup() is not needed, because this function is called from 1211 * the worker thread. 1212 */ 1213 wakeup(&sc->sc_queue); 1214 #endif 1215 mtx_unlock(&sc->sc_queue_mtx); 1216 } 1217 } 1218 1219 /* 1220 * Releases delayed sync requests which don't collide anymore with regular 1221 * requests. 1222 */ 1223 static void 1224 g_mirror_sync_release(struct g_mirror_softc *sc) 1225 { 1226 struct bio *bp, *bp2; 1227 1228 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) { 1229 if (g_mirror_regular_collision(sc, bp)) 1230 continue; 1231 bioq_remove(&sc->sc_sync_delayed, bp); 1232 G_MIRROR_LOGREQ(2, bp, 1233 "Releasing delayed synchronization request."); 1234 g_io_request(bp, bp->bio_from); 1235 } 1236 } 1237 1238 /* 1239 * Handle synchronization requests. 1240 * Every synchronization request is two-steps process: first, READ request is 1241 * send to active provider and then WRITE request (with read data) to the provider 1242 * beeing synchronized. When WRITE is finished, new synchronization request is 1243 * send. 1244 */ 1245 static void 1246 g_mirror_sync_request(struct bio *bp) 1247 { 1248 struct g_mirror_softc *sc; 1249 struct g_mirror_disk *disk; 1250 1251 bp->bio_from->index--; 1252 sc = bp->bio_from->geom->softc; 1253 disk = bp->bio_from->private; 1254 if (disk == NULL) { 1255 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 1256 g_topology_lock(); 1257 g_mirror_kill_consumer(sc, bp->bio_from); 1258 g_topology_unlock(); 1259 free(bp->bio_data, M_MIRROR); 1260 g_destroy_bio(bp); 1261 sx_xlock(&sc->sc_lock); 1262 return; 1263 } 1264 1265 /* 1266 * Synchronization request. 1267 */ 1268 switch (bp->bio_cmd) { 1269 case BIO_READ: 1270 { 1271 struct g_consumer *cp; 1272 1273 if (bp->bio_error != 0) { 1274 G_MIRROR_LOGREQ(0, bp, 1275 "Synchronization request failed (error=%d).", 1276 bp->bio_error); 1277 g_destroy_bio(bp); 1278 return; 1279 } 1280 G_MIRROR_LOGREQ(3, bp, 1281 "Synchronization request half-finished."); 1282 bp->bio_cmd = BIO_WRITE; 1283 bp->bio_cflags = 0; 1284 cp = disk->d_consumer; 1285 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1286 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1287 cp->acr, cp->acw, cp->ace)); 1288 cp->index++; 1289 g_io_request(bp, cp); 1290 return; 1291 } 1292 case BIO_WRITE: 1293 { 1294 struct g_mirror_disk_sync *sync; 1295 off_t offset; 1296 void *data; 1297 int i; 1298 1299 if (bp->bio_error != 0) { 1300 G_MIRROR_LOGREQ(0, bp, 1301 "Synchronization request failed (error=%d).", 1302 bp->bio_error); 1303 g_destroy_bio(bp); 1304 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 1305 g_mirror_event_send(disk, 1306 G_MIRROR_DISK_STATE_DISCONNECTED, 1307 G_MIRROR_EVENT_DONTWAIT); 1308 return; 1309 } 1310 G_MIRROR_LOGREQ(3, bp, "Synchronization request finished."); 1311 sync = &disk->d_sync; 1312 if (sync->ds_offset == sc->sc_mediasize || 1313 sync->ds_consumer == NULL || 1314 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1315 /* Don't send more synchronization requests. */ 1316 sync->ds_inflight--; 1317 if (sync->ds_bios != NULL) { 1318 i = (int)(uintptr_t)bp->bio_caller1; 1319 sync->ds_bios[i] = NULL; 1320 } 1321 free(bp->bio_data, M_MIRROR); 1322 g_destroy_bio(bp); 1323 if (sync->ds_inflight > 0) 1324 return; 1325 if (sync->ds_consumer == NULL || 1326 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1327 return; 1328 } 1329 /* Disk up-to-date, activate it. */ 1330 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE, 1331 G_MIRROR_EVENT_DONTWAIT); 1332 return; 1333 } 1334 1335 /* Send next synchronization request. */ 1336 data = bp->bio_data; 1337 bzero(bp, sizeof(*bp)); 1338 bp->bio_cmd = BIO_READ; 1339 bp->bio_offset = sync->ds_offset; 1340 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1341 sync->ds_offset += bp->bio_length; 1342 bp->bio_done = g_mirror_sync_done; 1343 bp->bio_data = data; 1344 bp->bio_from = sync->ds_consumer; 1345 bp->bio_to = sc->sc_provider; 1346 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1347 sync->ds_consumer->index++; 1348 /* 1349 * Delay the request if it is colliding with a regular request. 1350 */ 1351 if (g_mirror_regular_collision(sc, bp)) 1352 g_mirror_sync_delay(sc, bp); 1353 else 1354 g_io_request(bp, sync->ds_consumer); 1355 1356 /* Release delayed requests if possible. */ 1357 g_mirror_regular_release(sc); 1358 1359 /* Find the smallest offset */ 1360 offset = sc->sc_mediasize; 1361 for (i = 0; i < g_mirror_syncreqs; i++) { 1362 bp = sync->ds_bios[i]; 1363 if (bp->bio_offset < offset) 1364 offset = bp->bio_offset; 1365 } 1366 if (sync->ds_offset_done + (MAXPHYS * 100) < offset) { 1367 /* Update offset_done on every 100 blocks. */ 1368 sync->ds_offset_done = offset; 1369 g_mirror_update_metadata(disk); 1370 } 1371 return; 1372 } 1373 default: 1374 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1375 bp->bio_cmd, sc->sc_name)); 1376 break; 1377 } 1378 } 1379 1380 static void 1381 g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp) 1382 { 1383 struct g_mirror_disk *disk; 1384 struct g_consumer *cp; 1385 struct bio *cbp; 1386 1387 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1388 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE) 1389 break; 1390 } 1391 if (disk == NULL) { 1392 if (bp->bio_error == 0) 1393 bp->bio_error = ENXIO; 1394 g_io_deliver(bp, bp->bio_error); 1395 return; 1396 } 1397 cbp = g_clone_bio(bp); 1398 if (cbp == NULL) { 1399 if (bp->bio_error == 0) 1400 bp->bio_error = ENOMEM; 1401 g_io_deliver(bp, bp->bio_error); 1402 return; 1403 } 1404 /* 1405 * Fill in the component buf structure. 1406 */ 1407 cp = disk->d_consumer; 1408 cbp->bio_done = g_mirror_done; 1409 cbp->bio_to = cp->provider; 1410 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1411 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1412 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1413 cp->acw, cp->ace)); 1414 cp->index++; 1415 g_io_request(cbp, cp); 1416 } 1417 1418 static void 1419 g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp) 1420 { 1421 struct g_mirror_disk *disk; 1422 struct g_consumer *cp; 1423 struct bio *cbp; 1424 1425 disk = g_mirror_get_disk(sc); 1426 if (disk == NULL) { 1427 if (bp->bio_error == 0) 1428 bp->bio_error = ENXIO; 1429 g_io_deliver(bp, bp->bio_error); 1430 return; 1431 } 1432 cbp = g_clone_bio(bp); 1433 if (cbp == NULL) { 1434 if (bp->bio_error == 0) 1435 bp->bio_error = ENOMEM; 1436 g_io_deliver(bp, bp->bio_error); 1437 return; 1438 } 1439 /* 1440 * Fill in the component buf structure. 1441 */ 1442 cp = disk->d_consumer; 1443 cbp->bio_done = g_mirror_done; 1444 cbp->bio_to = cp->provider; 1445 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1446 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1447 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1448 cp->acw, cp->ace)); 1449 cp->index++; 1450 g_io_request(cbp, cp); 1451 } 1452 1453 #define TRACK_SIZE (1 * 1024 * 1024) 1454 #define LOAD_SCALE 256 1455 #define ABS(x) (((x) >= 0) ? (x) : (-(x))) 1456 1457 static void 1458 g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp) 1459 { 1460 struct g_mirror_disk *disk, *dp; 1461 struct g_consumer *cp; 1462 struct bio *cbp; 1463 int prio, best; 1464 1465 /* Find a disk with the smallest load. */ 1466 disk = NULL; 1467 best = INT_MAX; 1468 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1469 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1470 continue; 1471 prio = dp->load; 1472 /* If disk head is precisely in position - highly prefer it. */ 1473 if (dp->d_last_offset == bp->bio_offset) 1474 prio -= 2 * LOAD_SCALE; 1475 else 1476 /* If disk head is close to position - prefer it. */ 1477 if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE) 1478 prio -= 1 * LOAD_SCALE; 1479 if (prio <= best) { 1480 disk = dp; 1481 best = prio; 1482 } 1483 } 1484 KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name)); 1485 cbp = g_clone_bio(bp); 1486 if (cbp == NULL) { 1487 if (bp->bio_error == 0) 1488 bp->bio_error = ENOMEM; 1489 g_io_deliver(bp, bp->bio_error); 1490 return; 1491 } 1492 /* 1493 * Fill in the component buf structure. 1494 */ 1495 cp = disk->d_consumer; 1496 cbp->bio_done = g_mirror_done; 1497 cbp->bio_to = cp->provider; 1498 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1499 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1500 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1501 cp->acw, cp->ace)); 1502 cp->index++; 1503 /* Remember last head position */ 1504 disk->d_last_offset = bp->bio_offset + bp->bio_length; 1505 /* Update loads. */ 1506 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1507 dp->load = (dp->d_consumer->index * LOAD_SCALE + 1508 dp->load * 7) / 8; 1509 } 1510 g_io_request(cbp, cp); 1511 } 1512 1513 static void 1514 g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp) 1515 { 1516 struct bio_queue_head queue; 1517 struct g_mirror_disk *disk; 1518 struct g_consumer *cp; 1519 struct bio *cbp; 1520 off_t left, mod, offset, slice; 1521 u_char *data; 1522 u_int ndisks; 1523 1524 if (bp->bio_length <= sc->sc_slice) { 1525 g_mirror_request_round_robin(sc, bp); 1526 return; 1527 } 1528 ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE); 1529 slice = bp->bio_length / ndisks; 1530 mod = slice % sc->sc_provider->sectorsize; 1531 if (mod != 0) 1532 slice += sc->sc_provider->sectorsize - mod; 1533 /* 1534 * Allocate all bios before sending any request, so we can 1535 * return ENOMEM in nice and clean way. 1536 */ 1537 left = bp->bio_length; 1538 offset = bp->bio_offset; 1539 data = bp->bio_data; 1540 bioq_init(&queue); 1541 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1542 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1543 continue; 1544 cbp = g_clone_bio(bp); 1545 if (cbp == NULL) { 1546 for (cbp = bioq_first(&queue); cbp != NULL; 1547 cbp = bioq_first(&queue)) { 1548 bioq_remove(&queue, cbp); 1549 g_destroy_bio(cbp); 1550 } 1551 if (bp->bio_error == 0) 1552 bp->bio_error = ENOMEM; 1553 g_io_deliver(bp, bp->bio_error); 1554 return; 1555 } 1556 bioq_insert_tail(&queue, cbp); 1557 cbp->bio_done = g_mirror_done; 1558 cbp->bio_caller1 = disk; 1559 cbp->bio_to = disk->d_consumer->provider; 1560 cbp->bio_offset = offset; 1561 cbp->bio_data = data; 1562 cbp->bio_length = MIN(left, slice); 1563 left -= cbp->bio_length; 1564 if (left == 0) 1565 break; 1566 offset += cbp->bio_length; 1567 data += cbp->bio_length; 1568 } 1569 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) { 1570 bioq_remove(&queue, cbp); 1571 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1572 disk = cbp->bio_caller1; 1573 cbp->bio_caller1 = NULL; 1574 cp = disk->d_consumer; 1575 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1576 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1577 cp->acr, cp->acw, cp->ace)); 1578 disk->d_consumer->index++; 1579 g_io_request(cbp, disk->d_consumer); 1580 } 1581 } 1582 1583 static void 1584 g_mirror_register_request(struct bio *bp) 1585 { 1586 struct g_mirror_softc *sc; 1587 1588 sc = bp->bio_to->geom->softc; 1589 switch (bp->bio_cmd) { 1590 case BIO_READ: 1591 switch (sc->sc_balance) { 1592 case G_MIRROR_BALANCE_LOAD: 1593 g_mirror_request_load(sc, bp); 1594 break; 1595 case G_MIRROR_BALANCE_PREFER: 1596 g_mirror_request_prefer(sc, bp); 1597 break; 1598 case G_MIRROR_BALANCE_ROUND_ROBIN: 1599 g_mirror_request_round_robin(sc, bp); 1600 break; 1601 case G_MIRROR_BALANCE_SPLIT: 1602 g_mirror_request_split(sc, bp); 1603 break; 1604 } 1605 return; 1606 case BIO_WRITE: 1607 case BIO_DELETE: 1608 { 1609 struct g_mirror_disk *disk; 1610 struct g_mirror_disk_sync *sync; 1611 struct bio_queue_head queue; 1612 struct g_consumer *cp; 1613 struct bio *cbp; 1614 1615 /* 1616 * Delay the request if it is colliding with a synchronization 1617 * request. 1618 */ 1619 if (g_mirror_sync_collision(sc, bp)) { 1620 g_mirror_regular_delay(sc, bp); 1621 return; 1622 } 1623 1624 if (sc->sc_idle) 1625 g_mirror_unidle(sc); 1626 else 1627 sc->sc_last_write = time_uptime; 1628 1629 /* 1630 * Allocate all bios before sending any request, so we can 1631 * return ENOMEM in nice and clean way. 1632 */ 1633 bioq_init(&queue); 1634 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1635 sync = &disk->d_sync; 1636 switch (disk->d_state) { 1637 case G_MIRROR_DISK_STATE_ACTIVE: 1638 break; 1639 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 1640 if (bp->bio_offset >= sync->ds_offset) 1641 continue; 1642 break; 1643 default: 1644 continue; 1645 } 1646 cbp = g_clone_bio(bp); 1647 if (cbp == NULL) { 1648 for (cbp = bioq_first(&queue); cbp != NULL; 1649 cbp = bioq_first(&queue)) { 1650 bioq_remove(&queue, cbp); 1651 g_destroy_bio(cbp); 1652 } 1653 if (bp->bio_error == 0) 1654 bp->bio_error = ENOMEM; 1655 g_io_deliver(bp, bp->bio_error); 1656 return; 1657 } 1658 bioq_insert_tail(&queue, cbp); 1659 cbp->bio_done = g_mirror_done; 1660 cp = disk->d_consumer; 1661 cbp->bio_caller1 = cp; 1662 cbp->bio_to = cp->provider; 1663 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1664 ("Consumer %s not opened (r%dw%de%d).", 1665 cp->provider->name, cp->acr, cp->acw, cp->ace)); 1666 } 1667 for (cbp = bioq_first(&queue); cbp != NULL; 1668 cbp = bioq_first(&queue)) { 1669 bioq_remove(&queue, cbp); 1670 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1671 cp = cbp->bio_caller1; 1672 cbp->bio_caller1 = NULL; 1673 cp->index++; 1674 sc->sc_writes++; 1675 g_io_request(cbp, cp); 1676 } 1677 /* 1678 * Put request onto inflight queue, so we can check if new 1679 * synchronization requests don't collide with it. 1680 */ 1681 bioq_insert_tail(&sc->sc_inflight, bp); 1682 /* 1683 * Bump syncid on first write. 1684 */ 1685 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) { 1686 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 1687 g_mirror_bump_syncid(sc); 1688 } 1689 return; 1690 } 1691 default: 1692 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1693 bp->bio_cmd, sc->sc_name)); 1694 break; 1695 } 1696 } 1697 1698 static int 1699 g_mirror_can_destroy(struct g_mirror_softc *sc) 1700 { 1701 struct g_geom *gp; 1702 struct g_consumer *cp; 1703 1704 g_topology_assert(); 1705 gp = sc->sc_geom; 1706 if (gp->softc == NULL) 1707 return (1); 1708 LIST_FOREACH(cp, &gp->consumer, consumer) { 1709 if (g_mirror_is_busy(sc, cp)) 1710 return (0); 1711 } 1712 gp = sc->sc_sync.ds_geom; 1713 LIST_FOREACH(cp, &gp->consumer, consumer) { 1714 if (g_mirror_is_busy(sc, cp)) 1715 return (0); 1716 } 1717 G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 1718 sc->sc_name); 1719 return (1); 1720 } 1721 1722 static int 1723 g_mirror_try_destroy(struct g_mirror_softc *sc) 1724 { 1725 1726 if (sc->sc_rootmount != NULL) { 1727 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 1728 sc->sc_rootmount); 1729 root_mount_rel(sc->sc_rootmount); 1730 sc->sc_rootmount = NULL; 1731 } 1732 g_topology_lock(); 1733 if (!g_mirror_can_destroy(sc)) { 1734 g_topology_unlock(); 1735 return (0); 1736 } 1737 sc->sc_geom->softc = NULL; 1738 sc->sc_sync.ds_geom->softc = NULL; 1739 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) { 1740 g_topology_unlock(); 1741 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1742 &sc->sc_worker); 1743 /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 1744 sx_xunlock(&sc->sc_lock); 1745 wakeup(&sc->sc_worker); 1746 sc->sc_worker = NULL; 1747 } else { 1748 g_topology_unlock(); 1749 g_mirror_destroy_device(sc); 1750 free(sc, M_MIRROR); 1751 } 1752 return (1); 1753 } 1754 1755 /* 1756 * Worker thread. 1757 */ 1758 static void 1759 g_mirror_worker(void *arg) 1760 { 1761 struct g_mirror_softc *sc; 1762 struct g_mirror_event *ep; 1763 struct bio *bp; 1764 int timeout; 1765 1766 sc = arg; 1767 thread_lock(curthread); 1768 sched_prio(curthread, PRIBIO); 1769 thread_unlock(curthread); 1770 1771 sx_xlock(&sc->sc_lock); 1772 for (;;) { 1773 G_MIRROR_DEBUG(5, "%s: Let's see...", __func__); 1774 /* 1775 * First take a look at events. 1776 * This is important to handle events before any I/O requests. 1777 */ 1778 ep = g_mirror_event_get(sc); 1779 if (ep != NULL) { 1780 g_mirror_event_remove(sc, ep); 1781 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) { 1782 /* Update only device status. */ 1783 G_MIRROR_DEBUG(3, 1784 "Running event for device %s.", 1785 sc->sc_name); 1786 ep->e_error = 0; 1787 g_mirror_update_device(sc, 1); 1788 } else { 1789 /* Update disk status. */ 1790 G_MIRROR_DEBUG(3, "Running event for disk %s.", 1791 g_mirror_get_diskname(ep->e_disk)); 1792 ep->e_error = g_mirror_update_disk(ep->e_disk, 1793 ep->e_state); 1794 if (ep->e_error == 0) 1795 g_mirror_update_device(sc, 0); 1796 } 1797 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) { 1798 KASSERT(ep->e_error == 0, 1799 ("Error cannot be handled.")); 1800 g_mirror_event_free(ep); 1801 } else { 1802 ep->e_flags |= G_MIRROR_EVENT_DONE; 1803 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1804 ep); 1805 mtx_lock(&sc->sc_events_mtx); 1806 wakeup(ep); 1807 mtx_unlock(&sc->sc_events_mtx); 1808 } 1809 if ((sc->sc_flags & 1810 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1811 if (g_mirror_try_destroy(sc)) { 1812 curthread->td_pflags &= ~TDP_GEOM; 1813 G_MIRROR_DEBUG(1, "Thread exiting."); 1814 kproc_exit(0); 1815 } 1816 } 1817 G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__); 1818 continue; 1819 } 1820 /* 1821 * Check if we can mark array as CLEAN and if we can't take 1822 * how much seconds should we wait. 1823 */ 1824 timeout = g_mirror_idle(sc, -1); 1825 /* 1826 * Now I/O requests. 1827 */ 1828 /* Get first request from the queue. */ 1829 mtx_lock(&sc->sc_queue_mtx); 1830 bp = bioq_first(&sc->sc_queue); 1831 if (bp == NULL) { 1832 if ((sc->sc_flags & 1833 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1834 mtx_unlock(&sc->sc_queue_mtx); 1835 if (g_mirror_try_destroy(sc)) { 1836 curthread->td_pflags &= ~TDP_GEOM; 1837 G_MIRROR_DEBUG(1, "Thread exiting."); 1838 kproc_exit(0); 1839 } 1840 mtx_lock(&sc->sc_queue_mtx); 1841 } 1842 sx_xunlock(&sc->sc_lock); 1843 /* 1844 * XXX: We can miss an event here, because an event 1845 * can be added without sx-device-lock and without 1846 * mtx-queue-lock. Maybe I should just stop using 1847 * dedicated mutex for events synchronization and 1848 * stick with the queue lock? 1849 * The event will hang here until next I/O request 1850 * or next event is received. 1851 */ 1852 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1", 1853 timeout * hz); 1854 sx_xlock(&sc->sc_lock); 1855 G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__); 1856 continue; 1857 } 1858 bioq_remove(&sc->sc_queue, bp); 1859 mtx_unlock(&sc->sc_queue_mtx); 1860 1861 if (bp->bio_from->geom == sc->sc_sync.ds_geom && 1862 (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) { 1863 g_mirror_sync_request(bp); /* READ */ 1864 } else if (bp->bio_to != sc->sc_provider) { 1865 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) 1866 g_mirror_regular_request(bp); 1867 else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 1868 g_mirror_sync_request(bp); /* WRITE */ 1869 else { 1870 KASSERT(0, 1871 ("Invalid request cflags=0x%hhx to=%s.", 1872 bp->bio_cflags, bp->bio_to->name)); 1873 } 1874 } else { 1875 g_mirror_register_request(bp); 1876 } 1877 G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__); 1878 } 1879 } 1880 1881 static void 1882 g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 1883 { 1884 1885 sx_assert(&sc->sc_lock, SX_LOCKED); 1886 1887 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 1888 return; 1889 if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) { 1890 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.", 1891 g_mirror_get_diskname(disk), sc->sc_name); 1892 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1893 } else if (sc->sc_idle && 1894 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 1895 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.", 1896 g_mirror_get_diskname(disk), sc->sc_name); 1897 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 1898 } 1899 } 1900 1901 static void 1902 g_mirror_sync_start(struct g_mirror_disk *disk) 1903 { 1904 struct g_mirror_softc *sc; 1905 struct g_consumer *cp; 1906 struct bio *bp; 1907 int error, i; 1908 1909 g_topology_assert_not(); 1910 sc = disk->d_softc; 1911 sx_assert(&sc->sc_lock, SX_LOCKED); 1912 1913 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 1914 ("Disk %s is not marked for synchronization.", 1915 g_mirror_get_diskname(disk))); 1916 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1917 ("Device not in RUNNING state (%s, %u).", sc->sc_name, 1918 sc->sc_state)); 1919 1920 sx_xunlock(&sc->sc_lock); 1921 g_topology_lock(); 1922 cp = g_new_consumer(sc->sc_sync.ds_geom); 1923 error = g_attach(cp, sc->sc_provider); 1924 KASSERT(error == 0, 1925 ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 1926 error = g_access(cp, 1, 0, 0); 1927 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 1928 g_topology_unlock(); 1929 sx_xlock(&sc->sc_lock); 1930 1931 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 1932 g_mirror_get_diskname(disk)); 1933 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0) 1934 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1935 KASSERT(disk->d_sync.ds_consumer == NULL, 1936 ("Sync consumer already exists (device=%s, disk=%s).", 1937 sc->sc_name, g_mirror_get_diskname(disk))); 1938 1939 disk->d_sync.ds_consumer = cp; 1940 disk->d_sync.ds_consumer->private = disk; 1941 disk->d_sync.ds_consumer->index = 0; 1942 1943 /* 1944 * Allocate memory for synchronization bios and initialize them. 1945 */ 1946 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs, 1947 M_MIRROR, M_WAITOK); 1948 for (i = 0; i < g_mirror_syncreqs; i++) { 1949 bp = g_alloc_bio(); 1950 disk->d_sync.ds_bios[i] = bp; 1951 bp->bio_parent = NULL; 1952 bp->bio_cmd = BIO_READ; 1953 bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK); 1954 bp->bio_cflags = 0; 1955 bp->bio_offset = disk->d_sync.ds_offset; 1956 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1957 disk->d_sync.ds_offset += bp->bio_length; 1958 bp->bio_done = g_mirror_sync_done; 1959 bp->bio_from = disk->d_sync.ds_consumer; 1960 bp->bio_to = sc->sc_provider; 1961 bp->bio_caller1 = (void *)(uintptr_t)i; 1962 } 1963 1964 /* Increase the number of disks in SYNCHRONIZING state. */ 1965 sc->sc_sync.ds_ndisks++; 1966 /* Set the number of in-flight synchronization requests. */ 1967 disk->d_sync.ds_inflight = g_mirror_syncreqs; 1968 1969 /* 1970 * Fire off first synchronization requests. 1971 */ 1972 for (i = 0; i < g_mirror_syncreqs; i++) { 1973 bp = disk->d_sync.ds_bios[i]; 1974 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1975 disk->d_sync.ds_consumer->index++; 1976 /* 1977 * Delay the request if it is colliding with a regular request. 1978 */ 1979 if (g_mirror_regular_collision(sc, bp)) 1980 g_mirror_sync_delay(sc, bp); 1981 else 1982 g_io_request(bp, disk->d_sync.ds_consumer); 1983 } 1984 } 1985 1986 /* 1987 * Stop synchronization process. 1988 * type: 0 - synchronization finished 1989 * 1 - synchronization stopped 1990 */ 1991 static void 1992 g_mirror_sync_stop(struct g_mirror_disk *disk, int type) 1993 { 1994 struct g_mirror_softc *sc; 1995 struct g_consumer *cp; 1996 1997 g_topology_assert_not(); 1998 sc = disk->d_softc; 1999 sx_assert(&sc->sc_lock, SX_LOCKED); 2000 2001 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2002 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2003 g_mirror_disk_state2str(disk->d_state))); 2004 if (disk->d_sync.ds_consumer == NULL) 2005 return; 2006 2007 if (type == 0) { 2008 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.", 2009 sc->sc_name, g_mirror_get_diskname(disk)); 2010 } else /* if (type == 1) */ { 2011 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 2012 sc->sc_name, g_mirror_get_diskname(disk)); 2013 } 2014 free(disk->d_sync.ds_bios, M_MIRROR); 2015 disk->d_sync.ds_bios = NULL; 2016 cp = disk->d_sync.ds_consumer; 2017 disk->d_sync.ds_consumer = NULL; 2018 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2019 sc->sc_sync.ds_ndisks--; 2020 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 2021 g_topology_lock(); 2022 g_mirror_kill_consumer(sc, cp); 2023 g_topology_unlock(); 2024 sx_xlock(&sc->sc_lock); 2025 } 2026 2027 static void 2028 g_mirror_launch_provider(struct g_mirror_softc *sc) 2029 { 2030 struct g_mirror_disk *disk; 2031 struct g_provider *pp; 2032 2033 sx_assert(&sc->sc_lock, SX_LOCKED); 2034 2035 g_topology_lock(); 2036 pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name); 2037 pp->mediasize = sc->sc_mediasize; 2038 pp->sectorsize = sc->sc_sectorsize; 2039 sc->sc_provider = pp; 2040 g_error_provider(pp, 0); 2041 g_topology_unlock(); 2042 G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 2043 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks); 2044 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2045 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2046 g_mirror_sync_start(disk); 2047 } 2048 } 2049 2050 static void 2051 g_mirror_destroy_provider(struct g_mirror_softc *sc) 2052 { 2053 struct g_mirror_disk *disk; 2054 struct bio *bp; 2055 2056 g_topology_assert_not(); 2057 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 2058 sc->sc_name)); 2059 2060 g_topology_lock(); 2061 g_error_provider(sc->sc_provider, ENXIO); 2062 mtx_lock(&sc->sc_queue_mtx); 2063 while ((bp = bioq_first(&sc->sc_queue)) != NULL) { 2064 bioq_remove(&sc->sc_queue, bp); 2065 g_io_deliver(bp, ENXIO); 2066 } 2067 mtx_unlock(&sc->sc_queue_mtx); 2068 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name, 2069 sc->sc_provider->name); 2070 sc->sc_provider->flags |= G_PF_WITHER; 2071 g_orphan_provider(sc->sc_provider, ENXIO); 2072 g_topology_unlock(); 2073 sc->sc_provider = NULL; 2074 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2075 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2076 g_mirror_sync_stop(disk, 1); 2077 } 2078 } 2079 2080 static void 2081 g_mirror_go(void *arg) 2082 { 2083 struct g_mirror_softc *sc; 2084 2085 sc = arg; 2086 G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 2087 g_mirror_event_send(sc, 0, 2088 G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE); 2089 } 2090 2091 static u_int 2092 g_mirror_determine_state(struct g_mirror_disk *disk) 2093 { 2094 struct g_mirror_softc *sc; 2095 u_int state; 2096 2097 sc = disk->d_softc; 2098 if (sc->sc_syncid == disk->d_sync.ds_syncid) { 2099 if ((disk->d_flags & 2100 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2101 /* Disk does not need synchronization. */ 2102 state = G_MIRROR_DISK_STATE_ACTIVE; 2103 } else { 2104 if ((sc->sc_flags & 2105 G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2106 (disk->d_flags & 2107 G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2108 /* 2109 * We can start synchronization from 2110 * the stored offset. 2111 */ 2112 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2113 } else { 2114 state = G_MIRROR_DISK_STATE_STALE; 2115 } 2116 } 2117 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 2118 /* 2119 * Reset all synchronization data for this disk, 2120 * because if it even was synchronized, it was 2121 * synchronized to disks with different syncid. 2122 */ 2123 disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2124 disk->d_sync.ds_offset = 0; 2125 disk->d_sync.ds_offset_done = 0; 2126 disk->d_sync.ds_syncid = sc->sc_syncid; 2127 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2128 (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2129 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2130 } else { 2131 state = G_MIRROR_DISK_STATE_STALE; 2132 } 2133 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 2134 /* 2135 * Not good, NOT GOOD! 2136 * It means that mirror was started on stale disks 2137 * and more fresh disk just arrive. 2138 * If there were writes, mirror is broken, sorry. 2139 * I think the best choice here is don't touch 2140 * this disk and inform the user loudly. 2141 */ 2142 G_MIRROR_DEBUG(0, "Device %s was started before the freshest " 2143 "disk (%s) arrives!! It will not be connected to the " 2144 "running device.", sc->sc_name, 2145 g_mirror_get_diskname(disk)); 2146 g_mirror_destroy_disk(disk); 2147 state = G_MIRROR_DISK_STATE_NONE; 2148 /* Return immediately, because disk was destroyed. */ 2149 return (state); 2150 } 2151 G_MIRROR_DEBUG(3, "State for %s disk: %s.", 2152 g_mirror_get_diskname(disk), g_mirror_disk_state2str(state)); 2153 return (state); 2154 } 2155 2156 /* 2157 * Update device state. 2158 */ 2159 static void 2160 g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force) 2161 { 2162 struct g_mirror_disk *disk; 2163 u_int state; 2164 2165 sx_assert(&sc->sc_lock, SX_XLOCKED); 2166 2167 switch (sc->sc_state) { 2168 case G_MIRROR_DEVICE_STATE_STARTING: 2169 { 2170 struct g_mirror_disk *pdisk, *tdisk; 2171 u_int dirty, ndisks, genid, syncid; 2172 2173 KASSERT(sc->sc_provider == NULL, 2174 ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 2175 /* 2176 * Are we ready? We are, if all disks are connected or 2177 * if we have any disks and 'force' is true. 2178 */ 2179 ndisks = g_mirror_ndisks(sc, -1); 2180 if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) { 2181 ; 2182 } else if (ndisks == 0) { 2183 /* 2184 * Disks went down in starting phase, so destroy 2185 * device. 2186 */ 2187 callout_drain(&sc->sc_callout); 2188 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2189 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2190 sc->sc_rootmount); 2191 root_mount_rel(sc->sc_rootmount); 2192 sc->sc_rootmount = NULL; 2193 return; 2194 } else { 2195 return; 2196 } 2197 2198 /* 2199 * Activate all disks with the biggest syncid. 2200 */ 2201 if (force) { 2202 /* 2203 * If 'force' is true, we have been called due to 2204 * timeout, so don't bother canceling timeout. 2205 */ 2206 ndisks = 0; 2207 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2208 if ((disk->d_flags & 2209 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2210 ndisks++; 2211 } 2212 } 2213 if (ndisks == 0) { 2214 /* No valid disks found, destroy device. */ 2215 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2216 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2217 __LINE__, sc->sc_rootmount); 2218 root_mount_rel(sc->sc_rootmount); 2219 sc->sc_rootmount = NULL; 2220 return; 2221 } 2222 } else { 2223 /* Cancel timeout. */ 2224 callout_drain(&sc->sc_callout); 2225 } 2226 2227 /* 2228 * Find the biggest genid. 2229 */ 2230 genid = 0; 2231 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2232 if (disk->d_genid > genid) 2233 genid = disk->d_genid; 2234 } 2235 sc->sc_genid = genid; 2236 /* 2237 * Remove all disks without the biggest genid. 2238 */ 2239 LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) { 2240 if (disk->d_genid < genid) { 2241 G_MIRROR_DEBUG(0, 2242 "Component %s (device %s) broken, skipping.", 2243 g_mirror_get_diskname(disk), sc->sc_name); 2244 g_mirror_destroy_disk(disk); 2245 } 2246 } 2247 2248 /* 2249 * Find the biggest syncid. 2250 */ 2251 syncid = 0; 2252 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2253 if (disk->d_sync.ds_syncid > syncid) 2254 syncid = disk->d_sync.ds_syncid; 2255 } 2256 2257 /* 2258 * Here we need to look for dirty disks and if all disks 2259 * with the biggest syncid are dirty, we have to choose 2260 * one with the biggest priority and rebuild the rest. 2261 */ 2262 /* 2263 * Find the number of dirty disks with the biggest syncid. 2264 * Find the number of disks with the biggest syncid. 2265 * While here, find a disk with the biggest priority. 2266 */ 2267 dirty = ndisks = 0; 2268 pdisk = NULL; 2269 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2270 if (disk->d_sync.ds_syncid != syncid) 2271 continue; 2272 if ((disk->d_flags & 2273 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2274 continue; 2275 } 2276 ndisks++; 2277 if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 2278 dirty++; 2279 if (pdisk == NULL || 2280 pdisk->d_priority < disk->d_priority) { 2281 pdisk = disk; 2282 } 2283 } 2284 } 2285 if (dirty == 0) { 2286 /* No dirty disks at all, great. */ 2287 } else if (dirty == ndisks) { 2288 /* 2289 * Force synchronization for all dirty disks except one 2290 * with the biggest priority. 2291 */ 2292 KASSERT(pdisk != NULL, ("pdisk == NULL")); 2293 G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a " 2294 "master disk for synchronization.", 2295 g_mirror_get_diskname(pdisk), sc->sc_name); 2296 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2297 if (disk->d_sync.ds_syncid != syncid) 2298 continue; 2299 if ((disk->d_flags & 2300 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2301 continue; 2302 } 2303 KASSERT((disk->d_flags & 2304 G_MIRROR_DISK_FLAG_DIRTY) != 0, 2305 ("Disk %s isn't marked as dirty.", 2306 g_mirror_get_diskname(disk))); 2307 /* Skip the disk with the biggest priority. */ 2308 if (disk == pdisk) 2309 continue; 2310 disk->d_sync.ds_syncid = 0; 2311 } 2312 } else if (dirty < ndisks) { 2313 /* 2314 * Force synchronization for all dirty disks. 2315 * We have some non-dirty disks. 2316 */ 2317 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2318 if (disk->d_sync.ds_syncid != syncid) 2319 continue; 2320 if ((disk->d_flags & 2321 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2322 continue; 2323 } 2324 if ((disk->d_flags & 2325 G_MIRROR_DISK_FLAG_DIRTY) == 0) { 2326 continue; 2327 } 2328 disk->d_sync.ds_syncid = 0; 2329 } 2330 } 2331 2332 /* Reset hint. */ 2333 sc->sc_hint = NULL; 2334 sc->sc_syncid = syncid; 2335 if (force) { 2336 /* Remember to bump syncid on first write. */ 2337 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2338 } 2339 state = G_MIRROR_DEVICE_STATE_RUNNING; 2340 G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.", 2341 sc->sc_name, g_mirror_device_state2str(sc->sc_state), 2342 g_mirror_device_state2str(state)); 2343 sc->sc_state = state; 2344 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2345 state = g_mirror_determine_state(disk); 2346 g_mirror_event_send(disk, state, 2347 G_MIRROR_EVENT_DONTWAIT); 2348 if (state == G_MIRROR_DISK_STATE_STALE) 2349 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2350 } 2351 break; 2352 } 2353 case G_MIRROR_DEVICE_STATE_RUNNING: 2354 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 && 2355 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2356 /* 2357 * No active disks or no disks at all, 2358 * so destroy device. 2359 */ 2360 if (sc->sc_provider != NULL) 2361 g_mirror_destroy_provider(sc); 2362 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2363 break; 2364 } else if (g_mirror_ndisks(sc, 2365 G_MIRROR_DISK_STATE_ACTIVE) > 0 && 2366 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2367 /* 2368 * We have active disks, launch provider if it doesn't 2369 * exist. 2370 */ 2371 if (sc->sc_provider == NULL) 2372 g_mirror_launch_provider(sc); 2373 if (sc->sc_rootmount != NULL) { 2374 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2375 __LINE__, sc->sc_rootmount); 2376 root_mount_rel(sc->sc_rootmount); 2377 sc->sc_rootmount = NULL; 2378 } 2379 } 2380 /* 2381 * Genid should be bumped immediately, so do it here. 2382 */ 2383 if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) { 2384 sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID; 2385 g_mirror_bump_genid(sc); 2386 } 2387 break; 2388 default: 2389 KASSERT(1 == 0, ("Wrong device state (%s, %s).", 2390 sc->sc_name, g_mirror_device_state2str(sc->sc_state))); 2391 break; 2392 } 2393 } 2394 2395 /* 2396 * Update disk state and device state if needed. 2397 */ 2398 #define DISK_STATE_CHANGED() G_MIRROR_DEBUG(1, \ 2399 "Disk %s state changed from %s to %s (device %s).", \ 2400 g_mirror_get_diskname(disk), \ 2401 g_mirror_disk_state2str(disk->d_state), \ 2402 g_mirror_disk_state2str(state), sc->sc_name) 2403 static int 2404 g_mirror_update_disk(struct g_mirror_disk *disk, u_int state) 2405 { 2406 struct g_mirror_softc *sc; 2407 2408 sc = disk->d_softc; 2409 sx_assert(&sc->sc_lock, SX_XLOCKED); 2410 2411 again: 2412 G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.", 2413 g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), 2414 g_mirror_disk_state2str(state)); 2415 switch (state) { 2416 case G_MIRROR_DISK_STATE_NEW: 2417 /* 2418 * Possible scenarios: 2419 * 1. New disk arrive. 2420 */ 2421 /* Previous state should be NONE. */ 2422 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE, 2423 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2424 g_mirror_disk_state2str(disk->d_state))); 2425 DISK_STATE_CHANGED(); 2426 2427 disk->d_state = state; 2428 if (LIST_EMPTY(&sc->sc_disks)) 2429 LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next); 2430 else { 2431 struct g_mirror_disk *dp; 2432 2433 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 2434 if (disk->d_priority >= dp->d_priority) { 2435 LIST_INSERT_BEFORE(dp, disk, d_next); 2436 dp = NULL; 2437 break; 2438 } 2439 if (LIST_NEXT(dp, d_next) == NULL) 2440 break; 2441 } 2442 if (dp != NULL) 2443 LIST_INSERT_AFTER(dp, disk, d_next); 2444 } 2445 G_MIRROR_DEBUG(1, "Device %s: provider %s detected.", 2446 sc->sc_name, g_mirror_get_diskname(disk)); 2447 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 2448 break; 2449 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2450 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2451 g_mirror_device_state2str(sc->sc_state), 2452 g_mirror_get_diskname(disk), 2453 g_mirror_disk_state2str(disk->d_state))); 2454 state = g_mirror_determine_state(disk); 2455 if (state != G_MIRROR_DISK_STATE_NONE) 2456 goto again; 2457 break; 2458 case G_MIRROR_DISK_STATE_ACTIVE: 2459 /* 2460 * Possible scenarios: 2461 * 1. New disk does not need synchronization. 2462 * 2. Synchronization process finished successfully. 2463 */ 2464 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2465 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2466 g_mirror_device_state2str(sc->sc_state), 2467 g_mirror_get_diskname(disk), 2468 g_mirror_disk_state2str(disk->d_state))); 2469 /* Previous state should be NEW or SYNCHRONIZING. */ 2470 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW || 2471 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2472 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2473 g_mirror_disk_state2str(disk->d_state))); 2474 DISK_STATE_CHANGED(); 2475 2476 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2477 disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2478 disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC; 2479 g_mirror_sync_stop(disk, 0); 2480 } 2481 disk->d_state = state; 2482 disk->d_sync.ds_offset = 0; 2483 disk->d_sync.ds_offset_done = 0; 2484 g_mirror_update_idle(sc, disk); 2485 g_mirror_update_metadata(disk); 2486 G_MIRROR_DEBUG(1, "Device %s: provider %s activated.", 2487 sc->sc_name, g_mirror_get_diskname(disk)); 2488 break; 2489 case G_MIRROR_DISK_STATE_STALE: 2490 /* 2491 * Possible scenarios: 2492 * 1. Stale disk was connected. 2493 */ 2494 /* Previous state should be NEW. */ 2495 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2496 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2497 g_mirror_disk_state2str(disk->d_state))); 2498 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2499 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2500 g_mirror_device_state2str(sc->sc_state), 2501 g_mirror_get_diskname(disk), 2502 g_mirror_disk_state2str(disk->d_state))); 2503 /* 2504 * STALE state is only possible if device is marked 2505 * NOAUTOSYNC. 2506 */ 2507 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0, 2508 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2509 g_mirror_device_state2str(sc->sc_state), 2510 g_mirror_get_diskname(disk), 2511 g_mirror_disk_state2str(disk->d_state))); 2512 DISK_STATE_CHANGED(); 2513 2514 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2515 disk->d_state = state; 2516 g_mirror_update_metadata(disk); 2517 G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.", 2518 sc->sc_name, g_mirror_get_diskname(disk)); 2519 break; 2520 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 2521 /* 2522 * Possible scenarios: 2523 * 1. Disk which needs synchronization was connected. 2524 */ 2525 /* Previous state should be NEW. */ 2526 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2527 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2528 g_mirror_disk_state2str(disk->d_state))); 2529 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2530 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2531 g_mirror_device_state2str(sc->sc_state), 2532 g_mirror_get_diskname(disk), 2533 g_mirror_disk_state2str(disk->d_state))); 2534 DISK_STATE_CHANGED(); 2535 2536 if (disk->d_state == G_MIRROR_DISK_STATE_NEW) 2537 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2538 disk->d_state = state; 2539 if (sc->sc_provider != NULL) { 2540 g_mirror_sync_start(disk); 2541 g_mirror_update_metadata(disk); 2542 } 2543 break; 2544 case G_MIRROR_DISK_STATE_DISCONNECTED: 2545 /* 2546 * Possible scenarios: 2547 * 1. Device wasn't running yet, but disk disappear. 2548 * 2. Disk was active and disapppear. 2549 * 3. Disk disappear during synchronization process. 2550 */ 2551 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) { 2552 /* 2553 * Previous state should be ACTIVE, STALE or 2554 * SYNCHRONIZING. 2555 */ 2556 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 2557 disk->d_state == G_MIRROR_DISK_STATE_STALE || 2558 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2559 ("Wrong disk state (%s, %s).", 2560 g_mirror_get_diskname(disk), 2561 g_mirror_disk_state2str(disk->d_state))); 2562 } else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) { 2563 /* Previous state should be NEW. */ 2564 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2565 ("Wrong disk state (%s, %s).", 2566 g_mirror_get_diskname(disk), 2567 g_mirror_disk_state2str(disk->d_state))); 2568 /* 2569 * Reset bumping syncid if disk disappeared in STARTING 2570 * state. 2571 */ 2572 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) 2573 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 2574 #ifdef INVARIANTS 2575 } else { 2576 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 2577 sc->sc_name, 2578 g_mirror_device_state2str(sc->sc_state), 2579 g_mirror_get_diskname(disk), 2580 g_mirror_disk_state2str(disk->d_state))); 2581 #endif 2582 } 2583 DISK_STATE_CHANGED(); 2584 G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.", 2585 sc->sc_name, g_mirror_get_diskname(disk)); 2586 2587 g_mirror_destroy_disk(disk); 2588 break; 2589 case G_MIRROR_DISK_STATE_DESTROY: 2590 { 2591 int error; 2592 2593 error = g_mirror_clear_metadata(disk); 2594 if (error != 0) 2595 return (error); 2596 DISK_STATE_CHANGED(); 2597 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", 2598 sc->sc_name, g_mirror_get_diskname(disk)); 2599 2600 g_mirror_destroy_disk(disk); 2601 sc->sc_ndisks--; 2602 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2603 g_mirror_update_metadata(disk); 2604 } 2605 break; 2606 } 2607 default: 2608 KASSERT(1 == 0, ("Unknown state (%u).", state)); 2609 break; 2610 } 2611 return (0); 2612 } 2613 #undef DISK_STATE_CHANGED 2614 2615 int 2616 g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md) 2617 { 2618 struct g_provider *pp; 2619 u_char *buf; 2620 int error; 2621 2622 g_topology_assert(); 2623 2624 error = g_access(cp, 1, 0, 0); 2625 if (error != 0) 2626 return (error); 2627 pp = cp->provider; 2628 g_topology_unlock(); 2629 /* Metadata are stored on last sector. */ 2630 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 2631 &error); 2632 g_topology_lock(); 2633 g_access(cp, -1, 0, 0); 2634 if (buf == NULL) { 2635 G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2636 cp->provider->name, error); 2637 return (error); 2638 } 2639 2640 /* Decode metadata. */ 2641 error = mirror_metadata_decode(buf, md); 2642 g_free(buf); 2643 if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0) 2644 return (EINVAL); 2645 if (md->md_version > G_MIRROR_VERSION) { 2646 G_MIRROR_DEBUG(0, 2647 "Kernel module is too old to handle metadata from %s.", 2648 cp->provider->name); 2649 return (EINVAL); 2650 } 2651 if (error != 0) { 2652 G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 2653 cp->provider->name); 2654 return (error); 2655 } 2656 2657 return (0); 2658 } 2659 2660 static int 2661 g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp, 2662 struct g_mirror_metadata *md) 2663 { 2664 2665 if (g_mirror_id2disk(sc, md->md_did) != NULL) { 2666 G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.", 2667 pp->name, md->md_did); 2668 return (EEXIST); 2669 } 2670 if (md->md_all != sc->sc_ndisks) { 2671 G_MIRROR_DEBUG(1, 2672 "Invalid '%s' field on disk %s (device %s), skipping.", 2673 "md_all", pp->name, sc->sc_name); 2674 return (EINVAL); 2675 } 2676 if (md->md_slice != sc->sc_slice) { 2677 G_MIRROR_DEBUG(1, 2678 "Invalid '%s' field on disk %s (device %s), skipping.", 2679 "md_slice", pp->name, sc->sc_name); 2680 return (EINVAL); 2681 } 2682 if (md->md_balance != sc->sc_balance) { 2683 G_MIRROR_DEBUG(1, 2684 "Invalid '%s' field on disk %s (device %s), skipping.", 2685 "md_balance", pp->name, sc->sc_name); 2686 return (EINVAL); 2687 } 2688 if (md->md_mediasize != sc->sc_mediasize) { 2689 G_MIRROR_DEBUG(1, 2690 "Invalid '%s' field on disk %s (device %s), skipping.", 2691 "md_mediasize", pp->name, sc->sc_name); 2692 return (EINVAL); 2693 } 2694 if (sc->sc_mediasize > pp->mediasize) { 2695 G_MIRROR_DEBUG(1, 2696 "Invalid size of disk %s (device %s), skipping.", pp->name, 2697 sc->sc_name); 2698 return (EINVAL); 2699 } 2700 if (md->md_sectorsize != sc->sc_sectorsize) { 2701 G_MIRROR_DEBUG(1, 2702 "Invalid '%s' field on disk %s (device %s), skipping.", 2703 "md_sectorsize", pp->name, sc->sc_name); 2704 return (EINVAL); 2705 } 2706 if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 2707 G_MIRROR_DEBUG(1, 2708 "Invalid sector size of disk %s (device %s), skipping.", 2709 pp->name, sc->sc_name); 2710 return (EINVAL); 2711 } 2712 if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) { 2713 G_MIRROR_DEBUG(1, 2714 "Invalid device flags on disk %s (device %s), skipping.", 2715 pp->name, sc->sc_name); 2716 return (EINVAL); 2717 } 2718 if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) { 2719 G_MIRROR_DEBUG(1, 2720 "Invalid disk flags on disk %s (device %s), skipping.", 2721 pp->name, sc->sc_name); 2722 return (EINVAL); 2723 } 2724 return (0); 2725 } 2726 2727 int 2728 g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 2729 struct g_mirror_metadata *md) 2730 { 2731 struct g_mirror_disk *disk; 2732 int error; 2733 2734 g_topology_assert_not(); 2735 G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name); 2736 2737 error = g_mirror_check_metadata(sc, pp, md); 2738 if (error != 0) 2739 return (error); 2740 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING && 2741 md->md_genid < sc->sc_genid) { 2742 G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.", 2743 pp->name, sc->sc_name); 2744 return (EINVAL); 2745 } 2746 disk = g_mirror_init_disk(sc, pp, md, &error); 2747 if (disk == NULL) 2748 return (error); 2749 error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW, 2750 G_MIRROR_EVENT_WAIT); 2751 if (error != 0) 2752 return (error); 2753 if (md->md_version < G_MIRROR_VERSION) { 2754 G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 2755 pp->name, md->md_version, G_MIRROR_VERSION); 2756 g_mirror_update_metadata(disk); 2757 } 2758 return (0); 2759 } 2760 2761 static void 2762 g_mirror_destroy_delayed(void *arg, int flag) 2763 { 2764 struct g_mirror_softc *sc; 2765 int error; 2766 2767 if (flag == EV_CANCEL) { 2768 G_MIRROR_DEBUG(1, "Destroying canceled."); 2769 return; 2770 } 2771 sc = arg; 2772 g_topology_unlock(); 2773 sx_xlock(&sc->sc_lock); 2774 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0, 2775 ("DESTROY flag set on %s.", sc->sc_name)); 2776 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0, 2777 ("DESTROYING flag not set on %s.", sc->sc_name)); 2778 G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name); 2779 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT); 2780 if (error != 0) { 2781 G_MIRROR_DEBUG(0, "Cannot destroy %s.", sc->sc_name); 2782 sx_xunlock(&sc->sc_lock); 2783 } 2784 g_topology_lock(); 2785 } 2786 2787 static int 2788 g_mirror_access(struct g_provider *pp, int acr, int acw, int ace) 2789 { 2790 struct g_mirror_softc *sc; 2791 int dcr, dcw, dce, error = 0; 2792 2793 g_topology_assert(); 2794 G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 2795 acw, ace); 2796 2797 sc = pp->geom->softc; 2798 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0) 2799 return (0); 2800 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 2801 2802 dcr = pp->acr + acr; 2803 dcw = pp->acw + acw; 2804 dce = pp->ace + ace; 2805 2806 g_topology_unlock(); 2807 sx_xlock(&sc->sc_lock); 2808 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 || 2809 LIST_EMPTY(&sc->sc_disks)) { 2810 if (acr > 0 || acw > 0 || ace > 0) 2811 error = ENXIO; 2812 goto end; 2813 } 2814 if (dcw == 0 && !sc->sc_idle) 2815 g_mirror_idle(sc, dcw); 2816 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0) { 2817 if (acr > 0 || acw > 0 || ace > 0) { 2818 error = ENXIO; 2819 goto end; 2820 } 2821 if (dcr == 0 && dcw == 0 && dce == 0) { 2822 g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, 2823 sc, NULL); 2824 } 2825 } 2826 end: 2827 sx_xunlock(&sc->sc_lock); 2828 g_topology_lock(); 2829 return (error); 2830 } 2831 2832 static struct g_geom * 2833 g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md) 2834 { 2835 struct g_mirror_softc *sc; 2836 struct g_geom *gp; 2837 int error, timeout; 2838 2839 g_topology_assert(); 2840 G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name, 2841 md->md_mid); 2842 2843 /* One disk is minimum. */ 2844 if (md->md_all < 1) 2845 return (NULL); 2846 /* 2847 * Action geom. 2848 */ 2849 gp = g_new_geomf(mp, "%s", md->md_name); 2850 sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO); 2851 gp->start = g_mirror_start; 2852 gp->orphan = g_mirror_orphan; 2853 gp->access = g_mirror_access; 2854 gp->dumpconf = g_mirror_dumpconf; 2855 2856 sc->sc_id = md->md_mid; 2857 sc->sc_slice = md->md_slice; 2858 sc->sc_balance = md->md_balance; 2859 sc->sc_mediasize = md->md_mediasize; 2860 sc->sc_sectorsize = md->md_sectorsize; 2861 sc->sc_ndisks = md->md_all; 2862 sc->sc_flags = md->md_mflags; 2863 sc->sc_bump_id = 0; 2864 sc->sc_idle = 1; 2865 sc->sc_last_write = time_uptime; 2866 sc->sc_writes = 0; 2867 sx_init(&sc->sc_lock, "gmirror:lock"); 2868 bioq_init(&sc->sc_queue); 2869 mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF); 2870 bioq_init(&sc->sc_regular_delayed); 2871 bioq_init(&sc->sc_inflight); 2872 bioq_init(&sc->sc_sync_delayed); 2873 LIST_INIT(&sc->sc_disks); 2874 TAILQ_INIT(&sc->sc_events); 2875 mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF); 2876 callout_init(&sc->sc_callout, CALLOUT_MPSAFE); 2877 sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING; 2878 gp->softc = sc; 2879 sc->sc_geom = gp; 2880 sc->sc_provider = NULL; 2881 /* 2882 * Synchronization geom. 2883 */ 2884 gp = g_new_geomf(mp, "%s.sync", md->md_name); 2885 gp->softc = sc; 2886 gp->orphan = g_mirror_orphan; 2887 sc->sc_sync.ds_geom = gp; 2888 sc->sc_sync.ds_ndisks = 0; 2889 error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0, 2890 "g_mirror %s", md->md_name); 2891 if (error != 0) { 2892 G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.", 2893 sc->sc_name); 2894 g_destroy_geom(sc->sc_sync.ds_geom); 2895 mtx_destroy(&sc->sc_events_mtx); 2896 mtx_destroy(&sc->sc_queue_mtx); 2897 sx_destroy(&sc->sc_lock); 2898 g_destroy_geom(sc->sc_geom); 2899 free(sc, M_MIRROR); 2900 return (NULL); 2901 } 2902 2903 G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).", 2904 sc->sc_name, sc->sc_ndisks, sc->sc_id); 2905 2906 sc->sc_rootmount = root_mount_hold("GMIRROR"); 2907 G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 2908 /* 2909 * Run timeout. 2910 */ 2911 timeout = g_mirror_timeout * hz; 2912 callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc); 2913 return (sc->sc_geom); 2914 } 2915 2916 int 2917 g_mirror_destroy(struct g_mirror_softc *sc, int how) 2918 { 2919 struct g_mirror_disk *disk; 2920 struct g_provider *pp; 2921 2922 g_topology_assert_not(); 2923 if (sc == NULL) 2924 return (ENXIO); 2925 sx_assert(&sc->sc_lock, SX_XLOCKED); 2926 2927 pp = sc->sc_provider; 2928 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 2929 switch (how) { 2930 case G_MIRROR_DESTROY_SOFT: 2931 G_MIRROR_DEBUG(1, 2932 "Device %s is still open (r%dw%de%d).", pp->name, 2933 pp->acr, pp->acw, pp->ace); 2934 return (EBUSY); 2935 case G_MIRROR_DESTROY_DELAYED: 2936 G_MIRROR_DEBUG(1, 2937 "Device %s will be destroyed on last close.", 2938 pp->name); 2939 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2940 if (disk->d_state == 2941 G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2942 g_mirror_sync_stop(disk, 1); 2943 } 2944 } 2945 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROYING; 2946 return (EBUSY); 2947 case G_MIRROR_DESTROY_HARD: 2948 G_MIRROR_DEBUG(1, "Device %s is still open, so it " 2949 "can't be definitely removed.", pp->name); 2950 } 2951 } 2952 2953 g_topology_lock(); 2954 if (sc->sc_geom->softc == NULL) { 2955 g_topology_unlock(); 2956 return (0); 2957 } 2958 sc->sc_geom->softc = NULL; 2959 sc->sc_sync.ds_geom->softc = NULL; 2960 g_topology_unlock(); 2961 2962 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2963 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT; 2964 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 2965 sx_xunlock(&sc->sc_lock); 2966 mtx_lock(&sc->sc_queue_mtx); 2967 wakeup(sc); 2968 mtx_unlock(&sc->sc_queue_mtx); 2969 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 2970 while (sc->sc_worker != NULL) 2971 tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5); 2972 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 2973 sx_xlock(&sc->sc_lock); 2974 g_mirror_destroy_device(sc); 2975 free(sc, M_MIRROR); 2976 return (0); 2977 } 2978 2979 static void 2980 g_mirror_taste_orphan(struct g_consumer *cp) 2981 { 2982 2983 KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 2984 cp->provider->name)); 2985 } 2986 2987 static struct g_geom * 2988 g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 2989 { 2990 struct g_mirror_metadata md; 2991 struct g_mirror_softc *sc; 2992 struct g_consumer *cp; 2993 struct g_geom *gp; 2994 int error; 2995 2996 g_topology_assert(); 2997 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 2998 G_MIRROR_DEBUG(2, "Tasting %s.", pp->name); 2999 3000 gp = g_new_geomf(mp, "mirror:taste"); 3001 /* 3002 * This orphan function should be never called. 3003 */ 3004 gp->orphan = g_mirror_taste_orphan; 3005 cp = g_new_consumer(gp); 3006 g_attach(cp, pp); 3007 error = g_mirror_read_metadata(cp, &md); 3008 g_detach(cp); 3009 g_destroy_consumer(cp); 3010 g_destroy_geom(gp); 3011 if (error != 0) 3012 return (NULL); 3013 gp = NULL; 3014 3015 if (md.md_provider[0] != '\0' && strcmp(md.md_provider, pp->name) != 0) 3016 return (NULL); 3017 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3018 return (NULL); 3019 if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) { 3020 G_MIRROR_DEBUG(0, 3021 "Device %s: provider %s marked as inactive, skipping.", 3022 md.md_name, pp->name); 3023 return (NULL); 3024 } 3025 if (g_mirror_debug >= 2) 3026 mirror_metadata_dump(&md); 3027 3028 /* 3029 * Let's check if device already exists. 3030 */ 3031 sc = NULL; 3032 LIST_FOREACH(gp, &mp->geom, geom) { 3033 sc = gp->softc; 3034 if (sc == NULL) 3035 continue; 3036 if (sc->sc_sync.ds_geom == gp) 3037 continue; 3038 if (strcmp(md.md_name, sc->sc_name) != 0) 3039 continue; 3040 if (md.md_mid != sc->sc_id) { 3041 G_MIRROR_DEBUG(0, "Device %s already configured.", 3042 sc->sc_name); 3043 return (NULL); 3044 } 3045 break; 3046 } 3047 if (gp == NULL) { 3048 gp = g_mirror_create(mp, &md); 3049 if (gp == NULL) { 3050 G_MIRROR_DEBUG(0, "Cannot create device %s.", 3051 md.md_name); 3052 return (NULL); 3053 } 3054 sc = gp->softc; 3055 } 3056 G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 3057 g_topology_unlock(); 3058 sx_xlock(&sc->sc_lock); 3059 error = g_mirror_add_disk(sc, pp, &md); 3060 if (error != 0) { 3061 G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 3062 pp->name, gp->name, error); 3063 if (LIST_EMPTY(&sc->sc_disks)) { 3064 g_cancel_event(sc); 3065 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3066 g_topology_lock(); 3067 return (NULL); 3068 } 3069 gp = NULL; 3070 } 3071 sx_xunlock(&sc->sc_lock); 3072 g_topology_lock(); 3073 return (gp); 3074 } 3075 3076 static int 3077 g_mirror_destroy_geom(struct gctl_req *req __unused, 3078 struct g_class *mp __unused, struct g_geom *gp) 3079 { 3080 struct g_mirror_softc *sc; 3081 int error; 3082 3083 g_topology_unlock(); 3084 sc = gp->softc; 3085 sx_xlock(&sc->sc_lock); 3086 g_cancel_event(sc); 3087 error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT); 3088 if (error != 0) 3089 sx_xunlock(&sc->sc_lock); 3090 g_topology_lock(); 3091 return (error); 3092 } 3093 3094 static void 3095 g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 3096 struct g_consumer *cp, struct g_provider *pp) 3097 { 3098 struct g_mirror_softc *sc; 3099 3100 g_topology_assert(); 3101 3102 sc = gp->softc; 3103 if (sc == NULL) 3104 return; 3105 /* Skip synchronization geom. */ 3106 if (gp == sc->sc_sync.ds_geom) 3107 return; 3108 if (pp != NULL) { 3109 /* Nothing here. */ 3110 } else if (cp != NULL) { 3111 struct g_mirror_disk *disk; 3112 3113 disk = cp->private; 3114 if (disk == NULL) 3115 return; 3116 g_topology_unlock(); 3117 sx_xlock(&sc->sc_lock); 3118 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id); 3119 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3120 sbuf_printf(sb, "%s<Synchronized>", indent); 3121 if (disk->d_sync.ds_offset == 0) 3122 sbuf_printf(sb, "0%%"); 3123 else { 3124 sbuf_printf(sb, "%u%%", 3125 (u_int)((disk->d_sync.ds_offset * 100) / 3126 sc->sc_provider->mediasize)); 3127 } 3128 sbuf_printf(sb, "</Synchronized>\n"); 3129 } 3130 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 3131 disk->d_sync.ds_syncid); 3132 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, 3133 disk->d_genid); 3134 sbuf_printf(sb, "%s<Flags>", indent); 3135 if (disk->d_flags == 0) 3136 sbuf_printf(sb, "NONE"); 3137 else { 3138 int first = 1; 3139 3140 #define ADD_FLAG(flag, name) do { \ 3141 if ((disk->d_flags & (flag)) != 0) { \ 3142 if (!first) \ 3143 sbuf_printf(sb, ", "); \ 3144 else \ 3145 first = 0; \ 3146 sbuf_printf(sb, name); \ 3147 } \ 3148 } while (0) 3149 ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY"); 3150 ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED"); 3151 ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE"); 3152 ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING, 3153 "SYNCHRONIZING"); 3154 ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 3155 ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN"); 3156 #undef ADD_FLAG 3157 } 3158 sbuf_printf(sb, "</Flags>\n"); 3159 sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent, 3160 disk->d_priority); 3161 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3162 g_mirror_disk_state2str(disk->d_state)); 3163 sx_xunlock(&sc->sc_lock); 3164 g_topology_lock(); 3165 } else { 3166 g_topology_unlock(); 3167 sx_xlock(&sc->sc_lock); 3168 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 3169 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3170 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 3171 sbuf_printf(sb, "%s<Flags>", indent); 3172 if (sc->sc_flags == 0) 3173 sbuf_printf(sb, "NONE"); 3174 else { 3175 int first = 1; 3176 3177 #define ADD_FLAG(flag, name) do { \ 3178 if ((sc->sc_flags & (flag)) != 0) { \ 3179 if (!first) \ 3180 sbuf_printf(sb, ", "); \ 3181 else \ 3182 first = 0; \ 3183 sbuf_printf(sb, name); \ 3184 } \ 3185 } while (0) 3186 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 3187 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3188 #undef ADD_FLAG 3189 } 3190 sbuf_printf(sb, "</Flags>\n"); 3191 sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent, 3192 (u_int)sc->sc_slice); 3193 sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent, 3194 balance_name(sc->sc_balance)); 3195 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 3196 sc->sc_ndisks); 3197 sbuf_printf(sb, "%s<State>", indent); 3198 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 3199 sbuf_printf(sb, "%s", "STARTING"); 3200 else if (sc->sc_ndisks == 3201 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE)) 3202 sbuf_printf(sb, "%s", "COMPLETE"); 3203 else 3204 sbuf_printf(sb, "%s", "DEGRADED"); 3205 sbuf_printf(sb, "</State>\n"); 3206 sx_xunlock(&sc->sc_lock); 3207 g_topology_lock(); 3208 } 3209 } 3210 3211 static void 3212 g_mirror_shutdown_pre_sync(void *arg, int howto) 3213 { 3214 struct g_class *mp; 3215 struct g_geom *gp, *gp2; 3216 struct g_mirror_softc *sc; 3217 int error; 3218 3219 mp = arg; 3220 DROP_GIANT(); 3221 g_topology_lock(); 3222 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 3223 if ((sc = gp->softc) == NULL) 3224 continue; 3225 /* Skip synchronization geom. */ 3226 if (gp == sc->sc_sync.ds_geom) 3227 continue; 3228 g_topology_unlock(); 3229 sx_xlock(&sc->sc_lock); 3230 g_cancel_event(sc); 3231 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED); 3232 if (error != 0) 3233 sx_xunlock(&sc->sc_lock); 3234 g_topology_lock(); 3235 } 3236 g_topology_unlock(); 3237 PICKUP_GIANT(); 3238 } 3239 3240 static void 3241 g_mirror_init(struct g_class *mp) 3242 { 3243 3244 g_mirror_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync, 3245 g_mirror_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST); 3246 if (g_mirror_pre_sync == NULL) 3247 G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event."); 3248 } 3249 3250 static void 3251 g_mirror_fini(struct g_class *mp) 3252 { 3253 3254 if (g_mirror_pre_sync != NULL) 3255 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_mirror_pre_sync); 3256 } 3257 3258 DECLARE_GEOM_CLASS(g_mirror_class, g_mirror); 3259