1 /*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/limits.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/bio.h> 38 #include <sys/sysctl.h> 39 #include <sys/malloc.h> 40 #include <sys/eventhandler.h> 41 #include <vm/uma.h> 42 #include <geom/geom.h> 43 #include <sys/proc.h> 44 #include <sys/kthread.h> 45 #include <sys/sched.h> 46 #include <geom/mirror/g_mirror.h> 47 48 49 static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data"); 50 51 SYSCTL_DECL(_kern_geom); 52 SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, "GEOM_MIRROR stuff"); 53 u_int g_mirror_debug = 0; 54 TUNABLE_INT("kern.geom.mirror.debug", &g_mirror_debug); 55 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RW, &g_mirror_debug, 0, 56 "Debug level"); 57 static u_int g_mirror_timeout = 4; 58 TUNABLE_INT("kern.geom.mirror.timeout", &g_mirror_timeout); 59 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RW, &g_mirror_timeout, 60 0, "Time to wait on all mirror components"); 61 static u_int g_mirror_idletime = 5; 62 TUNABLE_INT("kern.geom.mirror.idletime", &g_mirror_idletime); 63 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RW, 64 &g_mirror_idletime, 0, "Mark components as clean when idling"); 65 static u_int g_mirror_disconnect_on_failure = 1; 66 TUNABLE_INT("kern.geom.mirror.disconnect_on_failure", 67 &g_mirror_disconnect_on_failure); 68 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RW, 69 &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 70 static u_int g_mirror_syncreqs = 2; 71 TUNABLE_INT("kern.geom.mirror.sync_requests", &g_mirror_syncreqs); 72 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 73 &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests."); 74 75 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 76 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 77 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 78 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 79 } while (0) 80 81 static eventhandler_tag g_mirror_pre_sync = NULL; 82 83 static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp, 84 struct g_geom *gp); 85 static g_taste_t g_mirror_taste; 86 static void g_mirror_init(struct g_class *mp); 87 static void g_mirror_fini(struct g_class *mp); 88 89 struct g_class g_mirror_class = { 90 .name = G_MIRROR_CLASS_NAME, 91 .version = G_VERSION, 92 .ctlreq = g_mirror_config, 93 .taste = g_mirror_taste, 94 .destroy_geom = g_mirror_destroy_geom, 95 .init = g_mirror_init, 96 .fini = g_mirror_fini 97 }; 98 99 100 static void g_mirror_destroy_provider(struct g_mirror_softc *sc); 101 static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state); 102 static void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force); 103 static void g_mirror_dumpconf(struct sbuf *sb, const char *indent, 104 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 105 static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type); 106 static void g_mirror_register_request(struct bio *bp); 107 static void g_mirror_sync_release(struct g_mirror_softc *sc); 108 109 110 static const char * 111 g_mirror_disk_state2str(int state) 112 { 113 114 switch (state) { 115 case G_MIRROR_DISK_STATE_NONE: 116 return ("NONE"); 117 case G_MIRROR_DISK_STATE_NEW: 118 return ("NEW"); 119 case G_MIRROR_DISK_STATE_ACTIVE: 120 return ("ACTIVE"); 121 case G_MIRROR_DISK_STATE_STALE: 122 return ("STALE"); 123 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 124 return ("SYNCHRONIZING"); 125 case G_MIRROR_DISK_STATE_DISCONNECTED: 126 return ("DISCONNECTED"); 127 case G_MIRROR_DISK_STATE_DESTROY: 128 return ("DESTROY"); 129 default: 130 return ("INVALID"); 131 } 132 } 133 134 static const char * 135 g_mirror_device_state2str(int state) 136 { 137 138 switch (state) { 139 case G_MIRROR_DEVICE_STATE_STARTING: 140 return ("STARTING"); 141 case G_MIRROR_DEVICE_STATE_RUNNING: 142 return ("RUNNING"); 143 default: 144 return ("INVALID"); 145 } 146 } 147 148 static const char * 149 g_mirror_get_diskname(struct g_mirror_disk *disk) 150 { 151 152 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 153 return ("[unknown]"); 154 return (disk->d_name); 155 } 156 157 /* 158 * --- Events handling functions --- 159 * Events in geom_mirror are used to maintain disks and device status 160 * from one thread to simplify locking. 161 */ 162 static void 163 g_mirror_event_free(struct g_mirror_event *ep) 164 { 165 166 free(ep, M_MIRROR); 167 } 168 169 int 170 g_mirror_event_send(void *arg, int state, int flags) 171 { 172 struct g_mirror_softc *sc; 173 struct g_mirror_disk *disk; 174 struct g_mirror_event *ep; 175 int error; 176 177 ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK); 178 G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep); 179 if ((flags & G_MIRROR_EVENT_DEVICE) != 0) { 180 disk = NULL; 181 sc = arg; 182 } else { 183 disk = arg; 184 sc = disk->d_softc; 185 } 186 ep->e_disk = disk; 187 ep->e_state = state; 188 ep->e_flags = flags; 189 ep->e_error = 0; 190 mtx_lock(&sc->sc_events_mtx); 191 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 192 mtx_unlock(&sc->sc_events_mtx); 193 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 194 mtx_lock(&sc->sc_queue_mtx); 195 wakeup(sc); 196 mtx_unlock(&sc->sc_queue_mtx); 197 if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0) 198 return (0); 199 sx_assert(&sc->sc_lock, SX_XLOCKED); 200 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 201 sx_xunlock(&sc->sc_lock); 202 while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) { 203 mtx_lock(&sc->sc_events_mtx); 204 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event", 205 hz * 5); 206 } 207 error = ep->e_error; 208 g_mirror_event_free(ep); 209 sx_xlock(&sc->sc_lock); 210 return (error); 211 } 212 213 static struct g_mirror_event * 214 g_mirror_event_get(struct g_mirror_softc *sc) 215 { 216 struct g_mirror_event *ep; 217 218 mtx_lock(&sc->sc_events_mtx); 219 ep = TAILQ_FIRST(&sc->sc_events); 220 mtx_unlock(&sc->sc_events_mtx); 221 return (ep); 222 } 223 224 static void 225 g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep) 226 { 227 228 mtx_lock(&sc->sc_events_mtx); 229 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 230 mtx_unlock(&sc->sc_events_mtx); 231 } 232 233 static void 234 g_mirror_event_cancel(struct g_mirror_disk *disk) 235 { 236 struct g_mirror_softc *sc; 237 struct g_mirror_event *ep, *tmpep; 238 239 sc = disk->d_softc; 240 sx_assert(&sc->sc_lock, SX_XLOCKED); 241 242 mtx_lock(&sc->sc_events_mtx); 243 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 244 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) 245 continue; 246 if (ep->e_disk != disk) 247 continue; 248 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 249 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 250 g_mirror_event_free(ep); 251 else { 252 ep->e_error = ECANCELED; 253 wakeup(ep); 254 } 255 } 256 mtx_unlock(&sc->sc_events_mtx); 257 } 258 259 /* 260 * Return the number of disks in given state. 261 * If state is equal to -1, count all connected disks. 262 */ 263 u_int 264 g_mirror_ndisks(struct g_mirror_softc *sc, int state) 265 { 266 struct g_mirror_disk *disk; 267 u_int n = 0; 268 269 sx_assert(&sc->sc_lock, SX_LOCKED); 270 271 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 272 if (state == -1 || disk->d_state == state) 273 n++; 274 } 275 return (n); 276 } 277 278 /* 279 * Find a disk in mirror by its disk ID. 280 */ 281 static struct g_mirror_disk * 282 g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id) 283 { 284 struct g_mirror_disk *disk; 285 286 sx_assert(&sc->sc_lock, SX_XLOCKED); 287 288 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 289 if (disk->d_id == id) 290 return (disk); 291 } 292 return (NULL); 293 } 294 295 static u_int 296 g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp) 297 { 298 struct bio *bp; 299 u_int nreqs = 0; 300 301 mtx_lock(&sc->sc_queue_mtx); 302 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 303 if (bp->bio_from == cp) 304 nreqs++; 305 } 306 mtx_unlock(&sc->sc_queue_mtx); 307 return (nreqs); 308 } 309 310 static int 311 g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp) 312 { 313 314 if (cp->index > 0) { 315 G_MIRROR_DEBUG(2, 316 "I/O requests for %s exist, can't destroy it now.", 317 cp->provider->name); 318 return (1); 319 } 320 if (g_mirror_nrequests(sc, cp) > 0) { 321 G_MIRROR_DEBUG(2, 322 "I/O requests for %s in queue, can't destroy it now.", 323 cp->provider->name); 324 return (1); 325 } 326 return (0); 327 } 328 329 static void 330 g_mirror_destroy_consumer(void *arg, int flags __unused) 331 { 332 struct g_consumer *cp; 333 334 g_topology_assert(); 335 336 cp = arg; 337 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 338 g_detach(cp); 339 g_destroy_consumer(cp); 340 } 341 342 static void 343 g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 344 { 345 struct g_provider *pp; 346 int retaste_wait; 347 348 g_topology_assert(); 349 350 cp->private = NULL; 351 if (g_mirror_is_busy(sc, cp)) 352 return; 353 pp = cp->provider; 354 retaste_wait = 0; 355 if (cp->acw == 1) { 356 if ((pp->geom->flags & G_GEOM_WITHER) == 0) 357 retaste_wait = 1; 358 } 359 G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 360 -cp->acw, -cp->ace, 0); 361 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 362 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 363 if (retaste_wait) { 364 /* 365 * After retaste event was send (inside g_access()), we can send 366 * event to detach and destroy consumer. 367 * A class, which has consumer to the given provider connected 368 * will not receive retaste event for the provider. 369 * This is the way how I ignore retaste events when I close 370 * consumers opened for write: I detach and destroy consumer 371 * after retaste event is sent. 372 */ 373 g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL); 374 return; 375 } 376 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name); 377 g_detach(cp); 378 g_destroy_consumer(cp); 379 } 380 381 static int 382 g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp) 383 { 384 struct g_consumer *cp; 385 int error; 386 387 g_topology_assert_not(); 388 KASSERT(disk->d_consumer == NULL, 389 ("Disk already connected (device %s).", disk->d_softc->sc_name)); 390 391 g_topology_lock(); 392 cp = g_new_consumer(disk->d_softc->sc_geom); 393 error = g_attach(cp, pp); 394 if (error != 0) { 395 g_destroy_consumer(cp); 396 g_topology_unlock(); 397 return (error); 398 } 399 error = g_access(cp, 1, 1, 1); 400 if (error != 0) { 401 g_detach(cp); 402 g_destroy_consumer(cp); 403 g_topology_unlock(); 404 G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).", 405 pp->name, error); 406 return (error); 407 } 408 g_topology_unlock(); 409 disk->d_consumer = cp; 410 disk->d_consumer->private = disk; 411 disk->d_consumer->index = 0; 412 413 G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk)); 414 return (0); 415 } 416 417 static void 418 g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 419 { 420 421 g_topology_assert(); 422 423 if (cp == NULL) 424 return; 425 if (cp->provider != NULL) 426 g_mirror_kill_consumer(sc, cp); 427 else 428 g_destroy_consumer(cp); 429 } 430 431 /* 432 * Initialize disk. This means allocate memory, create consumer, attach it 433 * to the provider and open access (r1w1e1) to it. 434 */ 435 static struct g_mirror_disk * 436 g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp, 437 struct g_mirror_metadata *md, int *errorp) 438 { 439 struct g_mirror_disk *disk; 440 int error; 441 442 disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO); 443 if (disk == NULL) { 444 error = ENOMEM; 445 goto fail; 446 } 447 disk->d_softc = sc; 448 error = g_mirror_connect_disk(disk, pp); 449 if (error != 0) 450 goto fail; 451 disk->d_id = md->md_did; 452 disk->d_state = G_MIRROR_DISK_STATE_NONE; 453 disk->d_priority = md->md_priority; 454 disk->d_delay.sec = 0; 455 disk->d_delay.frac = 0; 456 binuptime(&disk->d_last_used); 457 disk->d_flags = md->md_dflags; 458 if (md->md_provider[0] != '\0') 459 disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED; 460 disk->d_sync.ds_consumer = NULL; 461 disk->d_sync.ds_offset = md->md_sync_offset; 462 disk->d_sync.ds_offset_done = md->md_sync_offset; 463 disk->d_genid = md->md_genid; 464 disk->d_sync.ds_syncid = md->md_syncid; 465 if (errorp != NULL) 466 *errorp = 0; 467 return (disk); 468 fail: 469 if (errorp != NULL) 470 *errorp = error; 471 if (disk != NULL) 472 free(disk, M_MIRROR); 473 return (NULL); 474 } 475 476 static void 477 g_mirror_destroy_disk(struct g_mirror_disk *disk) 478 { 479 struct g_mirror_softc *sc; 480 481 g_topology_assert_not(); 482 sc = disk->d_softc; 483 sx_assert(&sc->sc_lock, SX_XLOCKED); 484 485 LIST_REMOVE(disk, d_next); 486 g_mirror_event_cancel(disk); 487 if (sc->sc_hint == disk) 488 sc->sc_hint = NULL; 489 switch (disk->d_state) { 490 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 491 g_mirror_sync_stop(disk, 1); 492 /* FALLTHROUGH */ 493 case G_MIRROR_DISK_STATE_NEW: 494 case G_MIRROR_DISK_STATE_STALE: 495 case G_MIRROR_DISK_STATE_ACTIVE: 496 g_topology_lock(); 497 g_mirror_disconnect_consumer(sc, disk->d_consumer); 498 g_topology_unlock(); 499 free(disk, M_MIRROR); 500 break; 501 default: 502 KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 503 g_mirror_get_diskname(disk), 504 g_mirror_disk_state2str(disk->d_state))); 505 } 506 } 507 508 static void 509 g_mirror_destroy_device(struct g_mirror_softc *sc) 510 { 511 struct g_mirror_disk *disk; 512 struct g_mirror_event *ep; 513 struct g_geom *gp; 514 struct g_consumer *cp, *tmpcp; 515 516 g_topology_assert_not(); 517 sx_assert(&sc->sc_lock, SX_XLOCKED); 518 519 gp = sc->sc_geom; 520 if (sc->sc_provider != NULL) 521 g_mirror_destroy_provider(sc); 522 for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL; 523 disk = LIST_FIRST(&sc->sc_disks)) { 524 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 525 g_mirror_update_metadata(disk); 526 g_mirror_destroy_disk(disk); 527 } 528 while ((ep = g_mirror_event_get(sc)) != NULL) { 529 g_mirror_event_remove(sc, ep); 530 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 531 g_mirror_event_free(ep); 532 else { 533 ep->e_error = ECANCELED; 534 ep->e_flags |= G_MIRROR_EVENT_DONE; 535 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep); 536 mtx_lock(&sc->sc_events_mtx); 537 wakeup(ep); 538 mtx_unlock(&sc->sc_events_mtx); 539 } 540 } 541 callout_drain(&sc->sc_callout); 542 543 g_topology_lock(); 544 LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) { 545 g_mirror_disconnect_consumer(sc, cp); 546 } 547 g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 548 G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name); 549 g_wither_geom(gp, ENXIO); 550 g_topology_unlock(); 551 mtx_destroy(&sc->sc_queue_mtx); 552 mtx_destroy(&sc->sc_events_mtx); 553 sx_xunlock(&sc->sc_lock); 554 sx_destroy(&sc->sc_lock); 555 } 556 557 static void 558 g_mirror_orphan(struct g_consumer *cp) 559 { 560 struct g_mirror_disk *disk; 561 562 g_topology_assert(); 563 564 disk = cp->private; 565 if (disk == NULL) 566 return; 567 disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 568 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, 569 G_MIRROR_EVENT_DONTWAIT); 570 } 571 572 /* 573 * Function should return the next active disk on the list. 574 * It is possible that it will be the same disk as given. 575 * If there are no active disks on list, NULL is returned. 576 */ 577 static __inline struct g_mirror_disk * 578 g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 579 { 580 struct g_mirror_disk *dp; 581 582 for (dp = LIST_NEXT(disk, d_next); dp != disk; 583 dp = LIST_NEXT(dp, d_next)) { 584 if (dp == NULL) 585 dp = LIST_FIRST(&sc->sc_disks); 586 if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE) 587 break; 588 } 589 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 590 return (NULL); 591 return (dp); 592 } 593 594 static struct g_mirror_disk * 595 g_mirror_get_disk(struct g_mirror_softc *sc) 596 { 597 struct g_mirror_disk *disk; 598 599 if (sc->sc_hint == NULL) { 600 sc->sc_hint = LIST_FIRST(&sc->sc_disks); 601 if (sc->sc_hint == NULL) 602 return (NULL); 603 } 604 disk = sc->sc_hint; 605 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) { 606 disk = g_mirror_find_next(sc, disk); 607 if (disk == NULL) 608 return (NULL); 609 } 610 sc->sc_hint = g_mirror_find_next(sc, disk); 611 return (disk); 612 } 613 614 static int 615 g_mirror_write_metadata(struct g_mirror_disk *disk, 616 struct g_mirror_metadata *md) 617 { 618 struct g_mirror_softc *sc; 619 struct g_consumer *cp; 620 off_t offset, length; 621 u_char *sector; 622 int error = 0; 623 624 g_topology_assert_not(); 625 sc = disk->d_softc; 626 sx_assert(&sc->sc_lock, SX_LOCKED); 627 628 cp = disk->d_consumer; 629 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 630 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 631 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 632 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 633 cp->acw, cp->ace)); 634 length = cp->provider->sectorsize; 635 offset = cp->provider->mediasize - length; 636 sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO); 637 if (md != NULL) 638 mirror_metadata_encode(md, sector); 639 error = g_write_data(cp, offset, sector, length); 640 free(sector, M_MIRROR); 641 if (error != 0) { 642 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 643 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 644 G_MIRROR_DEBUG(0, "Cannot write metadata on %s " 645 "(device=%s, error=%d).", 646 g_mirror_get_diskname(disk), sc->sc_name, error); 647 } else { 648 G_MIRROR_DEBUG(1, "Cannot write metadata on %s " 649 "(device=%s, error=%d).", 650 g_mirror_get_diskname(disk), sc->sc_name, error); 651 } 652 if (g_mirror_disconnect_on_failure && 653 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) { 654 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 655 g_mirror_event_send(disk, 656 G_MIRROR_DISK_STATE_DISCONNECTED, 657 G_MIRROR_EVENT_DONTWAIT); 658 } 659 } 660 return (error); 661 } 662 663 static int 664 g_mirror_clear_metadata(struct g_mirror_disk *disk) 665 { 666 int error; 667 668 g_topology_assert_not(); 669 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 670 671 error = g_mirror_write_metadata(disk, NULL); 672 if (error == 0) { 673 G_MIRROR_DEBUG(2, "Metadata on %s cleared.", 674 g_mirror_get_diskname(disk)); 675 } else { 676 G_MIRROR_DEBUG(0, 677 "Cannot clear metadata on disk %s (error=%d).", 678 g_mirror_get_diskname(disk), error); 679 } 680 return (error); 681 } 682 683 void 684 g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk, 685 struct g_mirror_metadata *md) 686 { 687 688 strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic)); 689 md->md_version = G_MIRROR_VERSION; 690 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 691 md->md_mid = sc->sc_id; 692 md->md_all = sc->sc_ndisks; 693 md->md_slice = sc->sc_slice; 694 md->md_balance = sc->sc_balance; 695 md->md_genid = sc->sc_genid; 696 md->md_mediasize = sc->sc_mediasize; 697 md->md_sectorsize = sc->sc_sectorsize; 698 md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK); 699 bzero(md->md_provider, sizeof(md->md_provider)); 700 if (disk == NULL) { 701 md->md_did = arc4random(); 702 md->md_priority = 0; 703 md->md_syncid = 0; 704 md->md_dflags = 0; 705 md->md_sync_offset = 0; 706 md->md_provsize = 0; 707 } else { 708 md->md_did = disk->d_id; 709 md->md_priority = disk->d_priority; 710 md->md_syncid = disk->d_sync.ds_syncid; 711 md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK); 712 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 713 md->md_sync_offset = disk->d_sync.ds_offset_done; 714 else 715 md->md_sync_offset = 0; 716 if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) { 717 strlcpy(md->md_provider, 718 disk->d_consumer->provider->name, 719 sizeof(md->md_provider)); 720 } 721 md->md_provsize = disk->d_consumer->provider->mediasize; 722 } 723 } 724 725 void 726 g_mirror_update_metadata(struct g_mirror_disk *disk) 727 { 728 struct g_mirror_softc *sc; 729 struct g_mirror_metadata md; 730 int error; 731 732 g_topology_assert_not(); 733 sc = disk->d_softc; 734 sx_assert(&sc->sc_lock, SX_LOCKED); 735 736 g_mirror_fill_metadata(sc, disk, &md); 737 error = g_mirror_write_metadata(disk, &md); 738 if (error == 0) { 739 G_MIRROR_DEBUG(2, "Metadata on %s updated.", 740 g_mirror_get_diskname(disk)); 741 } else { 742 G_MIRROR_DEBUG(0, 743 "Cannot update metadata on disk %s (error=%d).", 744 g_mirror_get_diskname(disk), error); 745 } 746 } 747 748 static void 749 g_mirror_bump_syncid(struct g_mirror_softc *sc) 750 { 751 struct g_mirror_disk *disk; 752 753 g_topology_assert_not(); 754 sx_assert(&sc->sc_lock, SX_XLOCKED); 755 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 756 ("%s called with no active disks (device=%s).", __func__, 757 sc->sc_name)); 758 759 sc->sc_syncid++; 760 G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 761 sc->sc_syncid); 762 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 763 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 764 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 765 disk->d_sync.ds_syncid = sc->sc_syncid; 766 g_mirror_update_metadata(disk); 767 } 768 } 769 } 770 771 static void 772 g_mirror_bump_genid(struct g_mirror_softc *sc) 773 { 774 struct g_mirror_disk *disk; 775 776 g_topology_assert_not(); 777 sx_assert(&sc->sc_lock, SX_XLOCKED); 778 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 779 ("%s called with no active disks (device=%s).", __func__, 780 sc->sc_name)); 781 782 sc->sc_genid++; 783 G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 784 sc->sc_genid); 785 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 786 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 787 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 788 disk->d_genid = sc->sc_genid; 789 g_mirror_update_metadata(disk); 790 } 791 } 792 } 793 794 static int 795 g_mirror_idle(struct g_mirror_softc *sc, int acw) 796 { 797 struct g_mirror_disk *disk; 798 int timeout; 799 800 g_topology_assert_not(); 801 sx_assert(&sc->sc_lock, SX_XLOCKED); 802 803 if (sc->sc_provider == NULL) 804 return (0); 805 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 806 return (0); 807 if (sc->sc_idle) 808 return (0); 809 if (sc->sc_writes > 0) 810 return (0); 811 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 812 timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write); 813 if (timeout > 0) 814 return (timeout); 815 } 816 sc->sc_idle = 1; 817 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 818 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 819 continue; 820 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.", 821 g_mirror_get_diskname(disk), sc->sc_name); 822 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 823 g_mirror_update_metadata(disk); 824 } 825 return (0); 826 } 827 828 static void 829 g_mirror_unidle(struct g_mirror_softc *sc) 830 { 831 struct g_mirror_disk *disk; 832 833 g_topology_assert_not(); 834 sx_assert(&sc->sc_lock, SX_XLOCKED); 835 836 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 837 return; 838 sc->sc_idle = 0; 839 sc->sc_last_write = time_uptime; 840 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 841 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 842 continue; 843 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.", 844 g_mirror_get_diskname(disk), sc->sc_name); 845 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 846 g_mirror_update_metadata(disk); 847 } 848 } 849 850 static __inline int 851 bintime_cmp(struct bintime *bt1, struct bintime *bt2) 852 { 853 854 if (bt1->sec < bt2->sec) 855 return (-1); 856 else if (bt1->sec > bt2->sec) 857 return (1); 858 if (bt1->frac < bt2->frac) 859 return (-1); 860 else if (bt1->frac > bt2->frac) 861 return (1); 862 return (0); 863 } 864 865 static void 866 g_mirror_update_delay(struct g_mirror_disk *disk, struct bio *bp) 867 { 868 869 if (disk->d_softc->sc_balance != G_MIRROR_BALANCE_LOAD) 870 return; 871 binuptime(&disk->d_delay); 872 bintime_sub(&disk->d_delay, &bp->bio_t0); 873 } 874 875 static void 876 g_mirror_done(struct bio *bp) 877 { 878 struct g_mirror_softc *sc; 879 880 sc = bp->bio_from->geom->softc; 881 bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR; 882 mtx_lock(&sc->sc_queue_mtx); 883 bioq_disksort(&sc->sc_queue, bp); 884 wakeup(sc); 885 mtx_unlock(&sc->sc_queue_mtx); 886 } 887 888 static void 889 g_mirror_regular_request(struct bio *bp) 890 { 891 struct g_mirror_softc *sc; 892 struct g_mirror_disk *disk; 893 struct bio *pbp; 894 895 g_topology_assert_not(); 896 897 pbp = bp->bio_parent; 898 sc = pbp->bio_to->geom->softc; 899 bp->bio_from->index--; 900 if (bp->bio_cmd == BIO_WRITE) 901 sc->sc_writes--; 902 disk = bp->bio_from->private; 903 if (disk == NULL) { 904 g_topology_lock(); 905 g_mirror_kill_consumer(sc, bp->bio_from); 906 g_topology_unlock(); 907 } else { 908 g_mirror_update_delay(disk, bp); 909 } 910 911 pbp->bio_inbed++; 912 KASSERT(pbp->bio_inbed <= pbp->bio_children, 913 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 914 pbp->bio_children)); 915 if (bp->bio_error == 0 && pbp->bio_error == 0) { 916 G_MIRROR_LOGREQ(3, bp, "Request delivered."); 917 g_destroy_bio(bp); 918 if (pbp->bio_children == pbp->bio_inbed) { 919 G_MIRROR_LOGREQ(3, pbp, "Request delivered."); 920 pbp->bio_completed = pbp->bio_length; 921 if (pbp->bio_cmd == BIO_WRITE) { 922 bioq_remove(&sc->sc_inflight, pbp); 923 /* Release delayed sync requests if possible. */ 924 g_mirror_sync_release(sc); 925 } 926 g_io_deliver(pbp, pbp->bio_error); 927 } 928 return; 929 } else if (bp->bio_error != 0) { 930 if (pbp->bio_error == 0) 931 pbp->bio_error = bp->bio_error; 932 if (disk != NULL) { 933 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 934 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 935 G_MIRROR_LOGREQ(0, bp, 936 "Request failed (error=%d).", 937 bp->bio_error); 938 } else { 939 G_MIRROR_LOGREQ(1, bp, 940 "Request failed (error=%d).", 941 bp->bio_error); 942 } 943 if (g_mirror_disconnect_on_failure && 944 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) 945 { 946 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 947 g_mirror_event_send(disk, 948 G_MIRROR_DISK_STATE_DISCONNECTED, 949 G_MIRROR_EVENT_DONTWAIT); 950 } 951 } 952 switch (pbp->bio_cmd) { 953 case BIO_DELETE: 954 case BIO_WRITE: 955 pbp->bio_inbed--; 956 pbp->bio_children--; 957 break; 958 } 959 } 960 g_destroy_bio(bp); 961 962 switch (pbp->bio_cmd) { 963 case BIO_READ: 964 if (pbp->bio_inbed < pbp->bio_children) 965 break; 966 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1) 967 g_io_deliver(pbp, pbp->bio_error); 968 else { 969 pbp->bio_error = 0; 970 mtx_lock(&sc->sc_queue_mtx); 971 bioq_disksort(&sc->sc_queue, pbp); 972 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 973 wakeup(sc); 974 mtx_unlock(&sc->sc_queue_mtx); 975 } 976 break; 977 case BIO_DELETE: 978 case BIO_WRITE: 979 if (pbp->bio_children == 0) { 980 /* 981 * All requests failed. 982 */ 983 } else if (pbp->bio_inbed < pbp->bio_children) { 984 /* Do nothing. */ 985 break; 986 } else if (pbp->bio_children == pbp->bio_inbed) { 987 /* Some requests succeeded. */ 988 pbp->bio_error = 0; 989 pbp->bio_completed = pbp->bio_length; 990 } 991 bioq_remove(&sc->sc_inflight, pbp); 992 /* Release delayed sync requests if possible. */ 993 g_mirror_sync_release(sc); 994 g_io_deliver(pbp, pbp->bio_error); 995 break; 996 default: 997 KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd)); 998 break; 999 } 1000 } 1001 1002 static void 1003 g_mirror_sync_done(struct bio *bp) 1004 { 1005 struct g_mirror_softc *sc; 1006 1007 G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered."); 1008 sc = bp->bio_from->geom->softc; 1009 bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC; 1010 mtx_lock(&sc->sc_queue_mtx); 1011 bioq_disksort(&sc->sc_queue, bp); 1012 wakeup(sc); 1013 mtx_unlock(&sc->sc_queue_mtx); 1014 } 1015 1016 static void 1017 g_mirror_kernel_dump(struct bio *bp) 1018 { 1019 struct g_mirror_softc *sc; 1020 struct g_mirror_disk *disk; 1021 struct bio *cbp; 1022 struct g_kerneldump *gkd; 1023 1024 /* 1025 * We configure dumping to the first component, because this component 1026 * will be used for reading with 'prefer' balance algorithm. 1027 * If the component with the higest priority is currently disconnected 1028 * we will not be able to read the dump after the reboot if it will be 1029 * connected and synchronized later. Can we do something better? 1030 */ 1031 sc = bp->bio_to->geom->softc; 1032 disk = LIST_FIRST(&sc->sc_disks); 1033 1034 gkd = (struct g_kerneldump *)bp->bio_data; 1035 if (gkd->length > bp->bio_to->mediasize) 1036 gkd->length = bp->bio_to->mediasize; 1037 cbp = g_clone_bio(bp); 1038 if (cbp == NULL) { 1039 g_io_deliver(bp, ENOMEM); 1040 return; 1041 } 1042 cbp->bio_done = g_std_done; 1043 g_io_request(cbp, disk->d_consumer); 1044 G_MIRROR_DEBUG(1, "Kernel dump will go to %s.", 1045 g_mirror_get_diskname(disk)); 1046 } 1047 1048 static void 1049 g_mirror_flush(struct g_mirror_softc *sc, struct bio *bp) 1050 { 1051 struct bio_queue_head queue; 1052 struct g_mirror_disk *disk; 1053 struct g_consumer *cp; 1054 struct bio *cbp; 1055 1056 bioq_init(&queue); 1057 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1058 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1059 continue; 1060 cbp = g_clone_bio(bp); 1061 if (cbp == NULL) { 1062 for (cbp = bioq_first(&queue); cbp != NULL; 1063 cbp = bioq_first(&queue)) { 1064 bioq_remove(&queue, cbp); 1065 g_destroy_bio(cbp); 1066 } 1067 if (bp->bio_error == 0) 1068 bp->bio_error = ENOMEM; 1069 g_io_deliver(bp, bp->bio_error); 1070 return; 1071 } 1072 bioq_insert_tail(&queue, cbp); 1073 cbp->bio_done = g_std_done; 1074 cbp->bio_caller1 = disk; 1075 cbp->bio_to = disk->d_consumer->provider; 1076 } 1077 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) { 1078 bioq_remove(&queue, cbp); 1079 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1080 disk = cbp->bio_caller1; 1081 cbp->bio_caller1 = NULL; 1082 cp = disk->d_consumer; 1083 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1084 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1085 cp->acr, cp->acw, cp->ace)); 1086 g_io_request(cbp, disk->d_consumer); 1087 } 1088 } 1089 1090 static void 1091 g_mirror_start(struct bio *bp) 1092 { 1093 struct g_mirror_softc *sc; 1094 1095 sc = bp->bio_to->geom->softc; 1096 /* 1097 * If sc == NULL or there are no valid disks, provider's error 1098 * should be set and g_mirror_start() should not be called at all. 1099 */ 1100 KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1101 ("Provider's error should be set (error=%d)(mirror=%s).", 1102 bp->bio_to->error, bp->bio_to->name)); 1103 G_MIRROR_LOGREQ(3, bp, "Request received."); 1104 1105 switch (bp->bio_cmd) { 1106 case BIO_READ: 1107 case BIO_WRITE: 1108 case BIO_DELETE: 1109 break; 1110 case BIO_FLUSH: 1111 g_mirror_flush(sc, bp); 1112 return; 1113 case BIO_GETATTR: 1114 if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) { 1115 g_mirror_kernel_dump(bp); 1116 return; 1117 } 1118 /* FALLTHROUGH */ 1119 default: 1120 g_io_deliver(bp, EOPNOTSUPP); 1121 return; 1122 } 1123 mtx_lock(&sc->sc_queue_mtx); 1124 bioq_disksort(&sc->sc_queue, bp); 1125 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1126 wakeup(sc); 1127 mtx_unlock(&sc->sc_queue_mtx); 1128 } 1129 1130 /* 1131 * Return TRUE if the given request is colliding with a in-progress 1132 * synchronization request. 1133 */ 1134 static int 1135 g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp) 1136 { 1137 struct g_mirror_disk *disk; 1138 struct bio *sbp; 1139 off_t rstart, rend, sstart, send; 1140 int i; 1141 1142 if (sc->sc_sync.ds_ndisks == 0) 1143 return (0); 1144 rstart = bp->bio_offset; 1145 rend = bp->bio_offset + bp->bio_length; 1146 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1147 if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING) 1148 continue; 1149 for (i = 0; i < g_mirror_syncreqs; i++) { 1150 sbp = disk->d_sync.ds_bios[i]; 1151 if (sbp == NULL) 1152 continue; 1153 sstart = sbp->bio_offset; 1154 send = sbp->bio_offset + sbp->bio_length; 1155 if (rend > sstart && rstart < send) 1156 return (1); 1157 } 1158 } 1159 return (0); 1160 } 1161 1162 /* 1163 * Return TRUE if the given sync request is colliding with a in-progress regular 1164 * request. 1165 */ 1166 static int 1167 g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp) 1168 { 1169 off_t rstart, rend, sstart, send; 1170 struct bio *bp; 1171 1172 if (sc->sc_sync.ds_ndisks == 0) 1173 return (0); 1174 sstart = sbp->bio_offset; 1175 send = sbp->bio_offset + sbp->bio_length; 1176 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) { 1177 rstart = bp->bio_offset; 1178 rend = bp->bio_offset + bp->bio_length; 1179 if (rend > sstart && rstart < send) 1180 return (1); 1181 } 1182 return (0); 1183 } 1184 1185 /* 1186 * Puts request onto delayed queue. 1187 */ 1188 static void 1189 g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp) 1190 { 1191 1192 G_MIRROR_LOGREQ(2, bp, "Delaying request."); 1193 bioq_insert_head(&sc->sc_regular_delayed, bp); 1194 } 1195 1196 /* 1197 * Puts synchronization request onto delayed queue. 1198 */ 1199 static void 1200 g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp) 1201 { 1202 1203 G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request."); 1204 bioq_insert_tail(&sc->sc_sync_delayed, bp); 1205 } 1206 1207 /* 1208 * Releases delayed regular requests which don't collide anymore with sync 1209 * requests. 1210 */ 1211 static void 1212 g_mirror_regular_release(struct g_mirror_softc *sc) 1213 { 1214 struct bio *bp, *bp2; 1215 1216 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) { 1217 if (g_mirror_sync_collision(sc, bp)) 1218 continue; 1219 bioq_remove(&sc->sc_regular_delayed, bp); 1220 G_MIRROR_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 1221 mtx_lock(&sc->sc_queue_mtx); 1222 bioq_insert_head(&sc->sc_queue, bp); 1223 #if 0 1224 /* 1225 * wakeup() is not needed, because this function is called from 1226 * the worker thread. 1227 */ 1228 wakeup(&sc->sc_queue); 1229 #endif 1230 mtx_unlock(&sc->sc_queue_mtx); 1231 } 1232 } 1233 1234 /* 1235 * Releases delayed sync requests which don't collide anymore with regular 1236 * requests. 1237 */ 1238 static void 1239 g_mirror_sync_release(struct g_mirror_softc *sc) 1240 { 1241 struct bio *bp, *bp2; 1242 1243 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) { 1244 if (g_mirror_regular_collision(sc, bp)) 1245 continue; 1246 bioq_remove(&sc->sc_sync_delayed, bp); 1247 G_MIRROR_LOGREQ(2, bp, 1248 "Releasing delayed synchronization request."); 1249 g_io_request(bp, bp->bio_from); 1250 } 1251 } 1252 1253 /* 1254 * Handle synchronization requests. 1255 * Every synchronization request is two-steps process: first, READ request is 1256 * send to active provider and then WRITE request (with read data) to the provider 1257 * beeing synchronized. When WRITE is finished, new synchronization request is 1258 * send. 1259 */ 1260 static void 1261 g_mirror_sync_request(struct bio *bp) 1262 { 1263 struct g_mirror_softc *sc; 1264 struct g_mirror_disk *disk; 1265 1266 bp->bio_from->index--; 1267 sc = bp->bio_from->geom->softc; 1268 disk = bp->bio_from->private; 1269 if (disk == NULL) { 1270 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 1271 g_topology_lock(); 1272 g_mirror_kill_consumer(sc, bp->bio_from); 1273 g_topology_unlock(); 1274 free(bp->bio_data, M_MIRROR); 1275 g_destroy_bio(bp); 1276 sx_xlock(&sc->sc_lock); 1277 return; 1278 } 1279 1280 /* 1281 * Synchronization request. 1282 */ 1283 switch (bp->bio_cmd) { 1284 case BIO_READ: 1285 { 1286 struct g_consumer *cp; 1287 1288 if (bp->bio_error != 0) { 1289 G_MIRROR_LOGREQ(0, bp, 1290 "Synchronization request failed (error=%d).", 1291 bp->bio_error); 1292 g_destroy_bio(bp); 1293 return; 1294 } 1295 G_MIRROR_LOGREQ(3, bp, 1296 "Synchronization request half-finished."); 1297 bp->bio_cmd = BIO_WRITE; 1298 bp->bio_cflags = 0; 1299 cp = disk->d_consumer; 1300 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1301 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1302 cp->acr, cp->acw, cp->ace)); 1303 cp->index++; 1304 g_io_request(bp, cp); 1305 return; 1306 } 1307 case BIO_WRITE: 1308 { 1309 struct g_mirror_disk_sync *sync; 1310 off_t offset; 1311 void *data; 1312 int i; 1313 1314 if (bp->bio_error != 0) { 1315 G_MIRROR_LOGREQ(0, bp, 1316 "Synchronization request failed (error=%d).", 1317 bp->bio_error); 1318 g_destroy_bio(bp); 1319 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 1320 g_mirror_event_send(disk, 1321 G_MIRROR_DISK_STATE_DISCONNECTED, 1322 G_MIRROR_EVENT_DONTWAIT); 1323 return; 1324 } 1325 G_MIRROR_LOGREQ(3, bp, "Synchronization request finished."); 1326 sync = &disk->d_sync; 1327 if (sync->ds_offset == sc->sc_mediasize || 1328 sync->ds_consumer == NULL || 1329 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1330 /* Don't send more synchronization requests. */ 1331 sync->ds_inflight--; 1332 if (sync->ds_bios != NULL) { 1333 i = (int)(uintptr_t)bp->bio_caller1; 1334 sync->ds_bios[i] = NULL; 1335 } 1336 free(bp->bio_data, M_MIRROR); 1337 g_destroy_bio(bp); 1338 if (sync->ds_inflight > 0) 1339 return; 1340 if (sync->ds_consumer == NULL || 1341 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1342 return; 1343 } 1344 /* Disk up-to-date, activate it. */ 1345 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE, 1346 G_MIRROR_EVENT_DONTWAIT); 1347 return; 1348 } 1349 1350 /* Send next synchronization request. */ 1351 data = bp->bio_data; 1352 bzero(bp, sizeof(*bp)); 1353 bp->bio_cmd = BIO_READ; 1354 bp->bio_offset = sync->ds_offset; 1355 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1356 sync->ds_offset += bp->bio_length; 1357 bp->bio_done = g_mirror_sync_done; 1358 bp->bio_data = data; 1359 bp->bio_from = sync->ds_consumer; 1360 bp->bio_to = sc->sc_provider; 1361 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1362 sync->ds_consumer->index++; 1363 /* 1364 * Delay the request if it is colliding with a regular request. 1365 */ 1366 if (g_mirror_regular_collision(sc, bp)) 1367 g_mirror_sync_delay(sc, bp); 1368 else 1369 g_io_request(bp, sync->ds_consumer); 1370 1371 /* Release delayed requests if possible. */ 1372 g_mirror_regular_release(sc); 1373 1374 /* Find the smallest offset */ 1375 offset = sc->sc_mediasize; 1376 for (i = 0; i < g_mirror_syncreqs; i++) { 1377 bp = sync->ds_bios[i]; 1378 if (bp->bio_offset < offset) 1379 offset = bp->bio_offset; 1380 } 1381 if (sync->ds_offset_done + (MAXPHYS * 100) < offset) { 1382 /* Update offset_done on every 100 blocks. */ 1383 sync->ds_offset_done = offset; 1384 g_mirror_update_metadata(disk); 1385 } 1386 return; 1387 } 1388 default: 1389 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1390 bp->bio_cmd, sc->sc_name)); 1391 break; 1392 } 1393 } 1394 1395 static void 1396 g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp) 1397 { 1398 struct g_mirror_disk *disk; 1399 struct g_consumer *cp; 1400 struct bio *cbp; 1401 1402 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1403 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE) 1404 break; 1405 } 1406 if (disk == NULL) { 1407 if (bp->bio_error == 0) 1408 bp->bio_error = ENXIO; 1409 g_io_deliver(bp, bp->bio_error); 1410 return; 1411 } 1412 cbp = g_clone_bio(bp); 1413 if (cbp == NULL) { 1414 if (bp->bio_error == 0) 1415 bp->bio_error = ENOMEM; 1416 g_io_deliver(bp, bp->bio_error); 1417 return; 1418 } 1419 /* 1420 * Fill in the component buf structure. 1421 */ 1422 cp = disk->d_consumer; 1423 cbp->bio_done = g_mirror_done; 1424 cbp->bio_to = cp->provider; 1425 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1426 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1427 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1428 cp->acw, cp->ace)); 1429 cp->index++; 1430 g_io_request(cbp, cp); 1431 } 1432 1433 static void 1434 g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp) 1435 { 1436 struct g_mirror_disk *disk; 1437 struct g_consumer *cp; 1438 struct bio *cbp; 1439 1440 disk = g_mirror_get_disk(sc); 1441 if (disk == NULL) { 1442 if (bp->bio_error == 0) 1443 bp->bio_error = ENXIO; 1444 g_io_deliver(bp, bp->bio_error); 1445 return; 1446 } 1447 cbp = g_clone_bio(bp); 1448 if (cbp == NULL) { 1449 if (bp->bio_error == 0) 1450 bp->bio_error = ENOMEM; 1451 g_io_deliver(bp, bp->bio_error); 1452 return; 1453 } 1454 /* 1455 * Fill in the component buf structure. 1456 */ 1457 cp = disk->d_consumer; 1458 cbp->bio_done = g_mirror_done; 1459 cbp->bio_to = cp->provider; 1460 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1461 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1462 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1463 cp->acw, cp->ace)); 1464 cp->index++; 1465 g_io_request(cbp, cp); 1466 } 1467 1468 static void 1469 g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp) 1470 { 1471 struct g_mirror_disk *disk, *dp; 1472 struct g_consumer *cp; 1473 struct bio *cbp; 1474 struct bintime curtime; 1475 1476 binuptime(&curtime); 1477 /* 1478 * Find a disk which the smallest load. 1479 */ 1480 disk = NULL; 1481 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1482 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1483 continue; 1484 /* If disk wasn't used for more than 2 sec, use it. */ 1485 if (curtime.sec - dp->d_last_used.sec >= 2) { 1486 disk = dp; 1487 break; 1488 } 1489 if (disk == NULL || 1490 bintime_cmp(&dp->d_delay, &disk->d_delay) < 0) { 1491 disk = dp; 1492 } 1493 } 1494 KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name)); 1495 cbp = g_clone_bio(bp); 1496 if (cbp == NULL) { 1497 if (bp->bio_error == 0) 1498 bp->bio_error = ENOMEM; 1499 g_io_deliver(bp, bp->bio_error); 1500 return; 1501 } 1502 /* 1503 * Fill in the component buf structure. 1504 */ 1505 cp = disk->d_consumer; 1506 cbp->bio_done = g_mirror_done; 1507 cbp->bio_to = cp->provider; 1508 binuptime(&disk->d_last_used); 1509 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1510 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1511 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1512 cp->acw, cp->ace)); 1513 cp->index++; 1514 g_io_request(cbp, cp); 1515 } 1516 1517 static void 1518 g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp) 1519 { 1520 struct bio_queue_head queue; 1521 struct g_mirror_disk *disk; 1522 struct g_consumer *cp; 1523 struct bio *cbp; 1524 off_t left, mod, offset, slice; 1525 u_char *data; 1526 u_int ndisks; 1527 1528 if (bp->bio_length <= sc->sc_slice) { 1529 g_mirror_request_round_robin(sc, bp); 1530 return; 1531 } 1532 ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE); 1533 slice = bp->bio_length / ndisks; 1534 mod = slice % sc->sc_provider->sectorsize; 1535 if (mod != 0) 1536 slice += sc->sc_provider->sectorsize - mod; 1537 /* 1538 * Allocate all bios before sending any request, so we can 1539 * return ENOMEM in nice and clean way. 1540 */ 1541 left = bp->bio_length; 1542 offset = bp->bio_offset; 1543 data = bp->bio_data; 1544 bioq_init(&queue); 1545 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1546 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1547 continue; 1548 cbp = g_clone_bio(bp); 1549 if (cbp == NULL) { 1550 for (cbp = bioq_first(&queue); cbp != NULL; 1551 cbp = bioq_first(&queue)) { 1552 bioq_remove(&queue, cbp); 1553 g_destroy_bio(cbp); 1554 } 1555 if (bp->bio_error == 0) 1556 bp->bio_error = ENOMEM; 1557 g_io_deliver(bp, bp->bio_error); 1558 return; 1559 } 1560 bioq_insert_tail(&queue, cbp); 1561 cbp->bio_done = g_mirror_done; 1562 cbp->bio_caller1 = disk; 1563 cbp->bio_to = disk->d_consumer->provider; 1564 cbp->bio_offset = offset; 1565 cbp->bio_data = data; 1566 cbp->bio_length = MIN(left, slice); 1567 left -= cbp->bio_length; 1568 if (left == 0) 1569 break; 1570 offset += cbp->bio_length; 1571 data += cbp->bio_length; 1572 } 1573 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) { 1574 bioq_remove(&queue, cbp); 1575 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1576 disk = cbp->bio_caller1; 1577 cbp->bio_caller1 = NULL; 1578 cp = disk->d_consumer; 1579 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1580 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1581 cp->acr, cp->acw, cp->ace)); 1582 disk->d_consumer->index++; 1583 g_io_request(cbp, disk->d_consumer); 1584 } 1585 } 1586 1587 static void 1588 g_mirror_register_request(struct bio *bp) 1589 { 1590 struct g_mirror_softc *sc; 1591 1592 sc = bp->bio_to->geom->softc; 1593 switch (bp->bio_cmd) { 1594 case BIO_READ: 1595 switch (sc->sc_balance) { 1596 case G_MIRROR_BALANCE_LOAD: 1597 g_mirror_request_load(sc, bp); 1598 break; 1599 case G_MIRROR_BALANCE_PREFER: 1600 g_mirror_request_prefer(sc, bp); 1601 break; 1602 case G_MIRROR_BALANCE_ROUND_ROBIN: 1603 g_mirror_request_round_robin(sc, bp); 1604 break; 1605 case G_MIRROR_BALANCE_SPLIT: 1606 g_mirror_request_split(sc, bp); 1607 break; 1608 } 1609 return; 1610 case BIO_WRITE: 1611 case BIO_DELETE: 1612 { 1613 struct g_mirror_disk *disk; 1614 struct g_mirror_disk_sync *sync; 1615 struct bio_queue_head queue; 1616 struct g_consumer *cp; 1617 struct bio *cbp; 1618 1619 /* 1620 * Delay the request if it is colliding with a synchronization 1621 * request. 1622 */ 1623 if (g_mirror_sync_collision(sc, bp)) { 1624 g_mirror_regular_delay(sc, bp); 1625 return; 1626 } 1627 1628 if (sc->sc_idle) 1629 g_mirror_unidle(sc); 1630 else 1631 sc->sc_last_write = time_uptime; 1632 1633 /* 1634 * Allocate all bios before sending any request, so we can 1635 * return ENOMEM in nice and clean way. 1636 */ 1637 bioq_init(&queue); 1638 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1639 sync = &disk->d_sync; 1640 switch (disk->d_state) { 1641 case G_MIRROR_DISK_STATE_ACTIVE: 1642 break; 1643 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 1644 if (bp->bio_offset >= sync->ds_offset) 1645 continue; 1646 break; 1647 default: 1648 continue; 1649 } 1650 cbp = g_clone_bio(bp); 1651 if (cbp == NULL) { 1652 for (cbp = bioq_first(&queue); cbp != NULL; 1653 cbp = bioq_first(&queue)) { 1654 bioq_remove(&queue, cbp); 1655 g_destroy_bio(cbp); 1656 } 1657 if (bp->bio_error == 0) 1658 bp->bio_error = ENOMEM; 1659 g_io_deliver(bp, bp->bio_error); 1660 return; 1661 } 1662 bioq_insert_tail(&queue, cbp); 1663 cbp->bio_done = g_mirror_done; 1664 cp = disk->d_consumer; 1665 cbp->bio_caller1 = cp; 1666 cbp->bio_to = cp->provider; 1667 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1668 ("Consumer %s not opened (r%dw%de%d).", 1669 cp->provider->name, cp->acr, cp->acw, cp->ace)); 1670 } 1671 for (cbp = bioq_first(&queue); cbp != NULL; 1672 cbp = bioq_first(&queue)) { 1673 bioq_remove(&queue, cbp); 1674 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1675 cp = cbp->bio_caller1; 1676 cbp->bio_caller1 = NULL; 1677 cp->index++; 1678 sc->sc_writes++; 1679 g_io_request(cbp, cp); 1680 } 1681 /* 1682 * Put request onto inflight queue, so we can check if new 1683 * synchronization requests don't collide with it. 1684 */ 1685 bioq_insert_tail(&sc->sc_inflight, bp); 1686 /* 1687 * Bump syncid on first write. 1688 */ 1689 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) { 1690 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 1691 g_mirror_bump_syncid(sc); 1692 } 1693 return; 1694 } 1695 default: 1696 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1697 bp->bio_cmd, sc->sc_name)); 1698 break; 1699 } 1700 } 1701 1702 static int 1703 g_mirror_can_destroy(struct g_mirror_softc *sc) 1704 { 1705 struct g_geom *gp; 1706 struct g_consumer *cp; 1707 1708 g_topology_assert(); 1709 gp = sc->sc_geom; 1710 if (gp->softc == NULL) 1711 return (1); 1712 LIST_FOREACH(cp, &gp->consumer, consumer) { 1713 if (g_mirror_is_busy(sc, cp)) 1714 return (0); 1715 } 1716 gp = sc->sc_sync.ds_geom; 1717 LIST_FOREACH(cp, &gp->consumer, consumer) { 1718 if (g_mirror_is_busy(sc, cp)) 1719 return (0); 1720 } 1721 G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 1722 sc->sc_name); 1723 return (1); 1724 } 1725 1726 static int 1727 g_mirror_try_destroy(struct g_mirror_softc *sc) 1728 { 1729 1730 if (sc->sc_rootmount != NULL) { 1731 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 1732 sc->sc_rootmount); 1733 root_mount_rel(sc->sc_rootmount); 1734 sc->sc_rootmount = NULL; 1735 } 1736 g_topology_lock(); 1737 if (!g_mirror_can_destroy(sc)) { 1738 g_topology_unlock(); 1739 return (0); 1740 } 1741 sc->sc_geom->softc = NULL; 1742 sc->sc_sync.ds_geom->softc = NULL; 1743 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) { 1744 g_topology_unlock(); 1745 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1746 &sc->sc_worker); 1747 /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 1748 sx_xunlock(&sc->sc_lock); 1749 wakeup(&sc->sc_worker); 1750 sc->sc_worker = NULL; 1751 } else { 1752 g_topology_unlock(); 1753 g_mirror_destroy_device(sc); 1754 free(sc, M_MIRROR); 1755 } 1756 return (1); 1757 } 1758 1759 /* 1760 * Worker thread. 1761 */ 1762 static void 1763 g_mirror_worker(void *arg) 1764 { 1765 struct g_mirror_softc *sc; 1766 struct g_mirror_event *ep; 1767 struct bio *bp; 1768 int timeout; 1769 1770 sc = arg; 1771 thread_lock(curthread); 1772 sched_prio(curthread, PRIBIO); 1773 thread_unlock(curthread); 1774 1775 sx_xlock(&sc->sc_lock); 1776 for (;;) { 1777 G_MIRROR_DEBUG(5, "%s: Let's see...", __func__); 1778 /* 1779 * First take a look at events. 1780 * This is important to handle events before any I/O requests. 1781 */ 1782 ep = g_mirror_event_get(sc); 1783 if (ep != NULL) { 1784 g_mirror_event_remove(sc, ep); 1785 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) { 1786 /* Update only device status. */ 1787 G_MIRROR_DEBUG(3, 1788 "Running event for device %s.", 1789 sc->sc_name); 1790 ep->e_error = 0; 1791 g_mirror_update_device(sc, 1); 1792 } else { 1793 /* Update disk status. */ 1794 G_MIRROR_DEBUG(3, "Running event for disk %s.", 1795 g_mirror_get_diskname(ep->e_disk)); 1796 ep->e_error = g_mirror_update_disk(ep->e_disk, 1797 ep->e_state); 1798 if (ep->e_error == 0) 1799 g_mirror_update_device(sc, 0); 1800 } 1801 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) { 1802 KASSERT(ep->e_error == 0, 1803 ("Error cannot be handled.")); 1804 g_mirror_event_free(ep); 1805 } else { 1806 ep->e_flags |= G_MIRROR_EVENT_DONE; 1807 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1808 ep); 1809 mtx_lock(&sc->sc_events_mtx); 1810 wakeup(ep); 1811 mtx_unlock(&sc->sc_events_mtx); 1812 } 1813 if ((sc->sc_flags & 1814 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1815 if (g_mirror_try_destroy(sc)) { 1816 curthread->td_pflags &= ~TDP_GEOM; 1817 G_MIRROR_DEBUG(1, "Thread exiting."); 1818 kproc_exit(0); 1819 } 1820 } 1821 G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__); 1822 continue; 1823 } 1824 /* 1825 * Check if we can mark array as CLEAN and if we can't take 1826 * how much seconds should we wait. 1827 */ 1828 timeout = g_mirror_idle(sc, -1); 1829 /* 1830 * Now I/O requests. 1831 */ 1832 /* Get first request from the queue. */ 1833 mtx_lock(&sc->sc_queue_mtx); 1834 bp = bioq_first(&sc->sc_queue); 1835 if (bp == NULL) { 1836 if ((sc->sc_flags & 1837 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1838 mtx_unlock(&sc->sc_queue_mtx); 1839 if (g_mirror_try_destroy(sc)) { 1840 curthread->td_pflags &= ~TDP_GEOM; 1841 G_MIRROR_DEBUG(1, "Thread exiting."); 1842 kproc_exit(0); 1843 } 1844 mtx_lock(&sc->sc_queue_mtx); 1845 } 1846 sx_xunlock(&sc->sc_lock); 1847 /* 1848 * XXX: We can miss an event here, because an event 1849 * can be added without sx-device-lock and without 1850 * mtx-queue-lock. Maybe I should just stop using 1851 * dedicated mutex for events synchronization and 1852 * stick with the queue lock? 1853 * The event will hang here until next I/O request 1854 * or next event is received. 1855 */ 1856 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1", 1857 timeout * hz); 1858 sx_xlock(&sc->sc_lock); 1859 G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__); 1860 continue; 1861 } 1862 bioq_remove(&sc->sc_queue, bp); 1863 mtx_unlock(&sc->sc_queue_mtx); 1864 1865 if (bp->bio_from->geom == sc->sc_sync.ds_geom && 1866 (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) { 1867 g_mirror_sync_request(bp); /* READ */ 1868 } else if (bp->bio_to != sc->sc_provider) { 1869 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) 1870 g_mirror_regular_request(bp); 1871 else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 1872 g_mirror_sync_request(bp); /* WRITE */ 1873 else { 1874 KASSERT(0, 1875 ("Invalid request cflags=0x%hhx to=%s.", 1876 bp->bio_cflags, bp->bio_to->name)); 1877 } 1878 } else { 1879 g_mirror_register_request(bp); 1880 } 1881 G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__); 1882 } 1883 } 1884 1885 static void 1886 g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 1887 { 1888 1889 sx_assert(&sc->sc_lock, SX_LOCKED); 1890 1891 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 1892 return; 1893 if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) { 1894 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.", 1895 g_mirror_get_diskname(disk), sc->sc_name); 1896 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1897 } else if (sc->sc_idle && 1898 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 1899 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.", 1900 g_mirror_get_diskname(disk), sc->sc_name); 1901 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 1902 } 1903 } 1904 1905 static void 1906 g_mirror_sync_start(struct g_mirror_disk *disk) 1907 { 1908 struct g_mirror_softc *sc; 1909 struct g_consumer *cp; 1910 struct bio *bp; 1911 int error, i; 1912 1913 g_topology_assert_not(); 1914 sc = disk->d_softc; 1915 sx_assert(&sc->sc_lock, SX_LOCKED); 1916 1917 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 1918 ("Disk %s is not marked for synchronization.", 1919 g_mirror_get_diskname(disk))); 1920 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1921 ("Device not in RUNNING state (%s, %u).", sc->sc_name, 1922 sc->sc_state)); 1923 1924 sx_xunlock(&sc->sc_lock); 1925 g_topology_lock(); 1926 cp = g_new_consumer(sc->sc_sync.ds_geom); 1927 error = g_attach(cp, sc->sc_provider); 1928 KASSERT(error == 0, 1929 ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 1930 error = g_access(cp, 1, 0, 0); 1931 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 1932 g_topology_unlock(); 1933 sx_xlock(&sc->sc_lock); 1934 1935 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 1936 g_mirror_get_diskname(disk)); 1937 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0) 1938 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1939 KASSERT(disk->d_sync.ds_consumer == NULL, 1940 ("Sync consumer already exists (device=%s, disk=%s).", 1941 sc->sc_name, g_mirror_get_diskname(disk))); 1942 1943 disk->d_sync.ds_consumer = cp; 1944 disk->d_sync.ds_consumer->private = disk; 1945 disk->d_sync.ds_consumer->index = 0; 1946 1947 /* 1948 * Allocate memory for synchronization bios and initialize them. 1949 */ 1950 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs, 1951 M_MIRROR, M_WAITOK); 1952 for (i = 0; i < g_mirror_syncreqs; i++) { 1953 bp = g_alloc_bio(); 1954 disk->d_sync.ds_bios[i] = bp; 1955 bp->bio_parent = NULL; 1956 bp->bio_cmd = BIO_READ; 1957 bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK); 1958 bp->bio_cflags = 0; 1959 bp->bio_offset = disk->d_sync.ds_offset; 1960 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1961 disk->d_sync.ds_offset += bp->bio_length; 1962 bp->bio_done = g_mirror_sync_done; 1963 bp->bio_from = disk->d_sync.ds_consumer; 1964 bp->bio_to = sc->sc_provider; 1965 bp->bio_caller1 = (void *)(uintptr_t)i; 1966 } 1967 1968 /* Increase the number of disks in SYNCHRONIZING state. */ 1969 sc->sc_sync.ds_ndisks++; 1970 /* Set the number of in-flight synchronization requests. */ 1971 disk->d_sync.ds_inflight = g_mirror_syncreqs; 1972 1973 /* 1974 * Fire off first synchronization requests. 1975 */ 1976 for (i = 0; i < g_mirror_syncreqs; i++) { 1977 bp = disk->d_sync.ds_bios[i]; 1978 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1979 disk->d_sync.ds_consumer->index++; 1980 /* 1981 * Delay the request if it is colliding with a regular request. 1982 */ 1983 if (g_mirror_regular_collision(sc, bp)) 1984 g_mirror_sync_delay(sc, bp); 1985 else 1986 g_io_request(bp, disk->d_sync.ds_consumer); 1987 } 1988 } 1989 1990 /* 1991 * Stop synchronization process. 1992 * type: 0 - synchronization finished 1993 * 1 - synchronization stopped 1994 */ 1995 static void 1996 g_mirror_sync_stop(struct g_mirror_disk *disk, int type) 1997 { 1998 struct g_mirror_softc *sc; 1999 struct g_consumer *cp; 2000 2001 g_topology_assert_not(); 2002 sc = disk->d_softc; 2003 sx_assert(&sc->sc_lock, SX_LOCKED); 2004 2005 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2006 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2007 g_mirror_disk_state2str(disk->d_state))); 2008 if (disk->d_sync.ds_consumer == NULL) 2009 return; 2010 2011 if (type == 0) { 2012 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.", 2013 sc->sc_name, g_mirror_get_diskname(disk)); 2014 } else /* if (type == 1) */ { 2015 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 2016 sc->sc_name, g_mirror_get_diskname(disk)); 2017 } 2018 free(disk->d_sync.ds_bios, M_MIRROR); 2019 disk->d_sync.ds_bios = NULL; 2020 cp = disk->d_sync.ds_consumer; 2021 disk->d_sync.ds_consumer = NULL; 2022 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2023 sc->sc_sync.ds_ndisks--; 2024 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 2025 g_topology_lock(); 2026 g_mirror_kill_consumer(sc, cp); 2027 g_topology_unlock(); 2028 sx_xlock(&sc->sc_lock); 2029 } 2030 2031 static void 2032 g_mirror_launch_provider(struct g_mirror_softc *sc) 2033 { 2034 struct g_mirror_disk *disk; 2035 struct g_provider *pp; 2036 2037 sx_assert(&sc->sc_lock, SX_LOCKED); 2038 2039 g_topology_lock(); 2040 pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name); 2041 pp->mediasize = sc->sc_mediasize; 2042 pp->sectorsize = sc->sc_sectorsize; 2043 sc->sc_provider = pp; 2044 g_error_provider(pp, 0); 2045 g_topology_unlock(); 2046 G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 2047 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks); 2048 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2049 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2050 g_mirror_sync_start(disk); 2051 } 2052 } 2053 2054 static void 2055 g_mirror_destroy_provider(struct g_mirror_softc *sc) 2056 { 2057 struct g_mirror_disk *disk; 2058 struct bio *bp; 2059 2060 g_topology_assert_not(); 2061 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 2062 sc->sc_name)); 2063 2064 g_topology_lock(); 2065 g_error_provider(sc->sc_provider, ENXIO); 2066 mtx_lock(&sc->sc_queue_mtx); 2067 while ((bp = bioq_first(&sc->sc_queue)) != NULL) { 2068 bioq_remove(&sc->sc_queue, bp); 2069 g_io_deliver(bp, ENXIO); 2070 } 2071 mtx_unlock(&sc->sc_queue_mtx); 2072 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name, 2073 sc->sc_provider->name); 2074 sc->sc_provider->flags |= G_PF_WITHER; 2075 g_orphan_provider(sc->sc_provider, ENXIO); 2076 g_topology_unlock(); 2077 sc->sc_provider = NULL; 2078 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2079 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2080 g_mirror_sync_stop(disk, 1); 2081 } 2082 } 2083 2084 static void 2085 g_mirror_go(void *arg) 2086 { 2087 struct g_mirror_softc *sc; 2088 2089 sc = arg; 2090 G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 2091 g_mirror_event_send(sc, 0, 2092 G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE); 2093 } 2094 2095 static u_int 2096 g_mirror_determine_state(struct g_mirror_disk *disk) 2097 { 2098 struct g_mirror_softc *sc; 2099 u_int state; 2100 2101 sc = disk->d_softc; 2102 if (sc->sc_syncid == disk->d_sync.ds_syncid) { 2103 if ((disk->d_flags & 2104 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2105 /* Disk does not need synchronization. */ 2106 state = G_MIRROR_DISK_STATE_ACTIVE; 2107 } else { 2108 if ((sc->sc_flags & 2109 G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2110 (disk->d_flags & 2111 G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2112 /* 2113 * We can start synchronization from 2114 * the stored offset. 2115 */ 2116 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2117 } else { 2118 state = G_MIRROR_DISK_STATE_STALE; 2119 } 2120 } 2121 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 2122 /* 2123 * Reset all synchronization data for this disk, 2124 * because if it even was synchronized, it was 2125 * synchronized to disks with different syncid. 2126 */ 2127 disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2128 disk->d_sync.ds_offset = 0; 2129 disk->d_sync.ds_offset_done = 0; 2130 disk->d_sync.ds_syncid = sc->sc_syncid; 2131 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2132 (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2133 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2134 } else { 2135 state = G_MIRROR_DISK_STATE_STALE; 2136 } 2137 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 2138 /* 2139 * Not good, NOT GOOD! 2140 * It means that mirror was started on stale disks 2141 * and more fresh disk just arrive. 2142 * If there were writes, mirror is broken, sorry. 2143 * I think the best choice here is don't touch 2144 * this disk and inform the user loudly. 2145 */ 2146 G_MIRROR_DEBUG(0, "Device %s was started before the freshest " 2147 "disk (%s) arrives!! It will not be connected to the " 2148 "running device.", sc->sc_name, 2149 g_mirror_get_diskname(disk)); 2150 g_mirror_destroy_disk(disk); 2151 state = G_MIRROR_DISK_STATE_NONE; 2152 /* Return immediately, because disk was destroyed. */ 2153 return (state); 2154 } 2155 G_MIRROR_DEBUG(3, "State for %s disk: %s.", 2156 g_mirror_get_diskname(disk), g_mirror_disk_state2str(state)); 2157 return (state); 2158 } 2159 2160 /* 2161 * Update device state. 2162 */ 2163 static void 2164 g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force) 2165 { 2166 struct g_mirror_disk *disk; 2167 u_int state; 2168 2169 sx_assert(&sc->sc_lock, SX_XLOCKED); 2170 2171 switch (sc->sc_state) { 2172 case G_MIRROR_DEVICE_STATE_STARTING: 2173 { 2174 struct g_mirror_disk *pdisk, *tdisk; 2175 u_int dirty, ndisks, genid, syncid; 2176 2177 KASSERT(sc->sc_provider == NULL, 2178 ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 2179 /* 2180 * Are we ready? We are, if all disks are connected or 2181 * if we have any disks and 'force' is true. 2182 */ 2183 ndisks = g_mirror_ndisks(sc, -1); 2184 if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) { 2185 ; 2186 } else if (ndisks == 0) { 2187 /* 2188 * Disks went down in starting phase, so destroy 2189 * device. 2190 */ 2191 callout_drain(&sc->sc_callout); 2192 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2193 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2194 sc->sc_rootmount); 2195 root_mount_rel(sc->sc_rootmount); 2196 sc->sc_rootmount = NULL; 2197 return; 2198 } else { 2199 return; 2200 } 2201 2202 /* 2203 * Activate all disks with the biggest syncid. 2204 */ 2205 if (force) { 2206 /* 2207 * If 'force' is true, we have been called due to 2208 * timeout, so don't bother canceling timeout. 2209 */ 2210 ndisks = 0; 2211 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2212 if ((disk->d_flags & 2213 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2214 ndisks++; 2215 } 2216 } 2217 if (ndisks == 0) { 2218 /* No valid disks found, destroy device. */ 2219 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2220 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2221 __LINE__, sc->sc_rootmount); 2222 root_mount_rel(sc->sc_rootmount); 2223 sc->sc_rootmount = NULL; 2224 return; 2225 } 2226 } else { 2227 /* Cancel timeout. */ 2228 callout_drain(&sc->sc_callout); 2229 } 2230 2231 /* 2232 * Find the biggest genid. 2233 */ 2234 genid = 0; 2235 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2236 if (disk->d_genid > genid) 2237 genid = disk->d_genid; 2238 } 2239 sc->sc_genid = genid; 2240 /* 2241 * Remove all disks without the biggest genid. 2242 */ 2243 LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) { 2244 if (disk->d_genid < genid) { 2245 G_MIRROR_DEBUG(0, 2246 "Component %s (device %s) broken, skipping.", 2247 g_mirror_get_diskname(disk), sc->sc_name); 2248 g_mirror_destroy_disk(disk); 2249 } 2250 } 2251 2252 /* 2253 * Find the biggest syncid. 2254 */ 2255 syncid = 0; 2256 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2257 if (disk->d_sync.ds_syncid > syncid) 2258 syncid = disk->d_sync.ds_syncid; 2259 } 2260 2261 /* 2262 * Here we need to look for dirty disks and if all disks 2263 * with the biggest syncid are dirty, we have to choose 2264 * one with the biggest priority and rebuild the rest. 2265 */ 2266 /* 2267 * Find the number of dirty disks with the biggest syncid. 2268 * Find the number of disks with the biggest syncid. 2269 * While here, find a disk with the biggest priority. 2270 */ 2271 dirty = ndisks = 0; 2272 pdisk = NULL; 2273 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2274 if (disk->d_sync.ds_syncid != syncid) 2275 continue; 2276 if ((disk->d_flags & 2277 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2278 continue; 2279 } 2280 ndisks++; 2281 if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 2282 dirty++; 2283 if (pdisk == NULL || 2284 pdisk->d_priority < disk->d_priority) { 2285 pdisk = disk; 2286 } 2287 } 2288 } 2289 if (dirty == 0) { 2290 /* No dirty disks at all, great. */ 2291 } else if (dirty == ndisks) { 2292 /* 2293 * Force synchronization for all dirty disks except one 2294 * with the biggest priority. 2295 */ 2296 KASSERT(pdisk != NULL, ("pdisk == NULL")); 2297 G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a " 2298 "master disk for synchronization.", 2299 g_mirror_get_diskname(pdisk), sc->sc_name); 2300 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2301 if (disk->d_sync.ds_syncid != syncid) 2302 continue; 2303 if ((disk->d_flags & 2304 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2305 continue; 2306 } 2307 KASSERT((disk->d_flags & 2308 G_MIRROR_DISK_FLAG_DIRTY) != 0, 2309 ("Disk %s isn't marked as dirty.", 2310 g_mirror_get_diskname(disk))); 2311 /* Skip the disk with the biggest priority. */ 2312 if (disk == pdisk) 2313 continue; 2314 disk->d_sync.ds_syncid = 0; 2315 } 2316 } else if (dirty < ndisks) { 2317 /* 2318 * Force synchronization for all dirty disks. 2319 * We have some non-dirty disks. 2320 */ 2321 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2322 if (disk->d_sync.ds_syncid != syncid) 2323 continue; 2324 if ((disk->d_flags & 2325 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2326 continue; 2327 } 2328 if ((disk->d_flags & 2329 G_MIRROR_DISK_FLAG_DIRTY) == 0) { 2330 continue; 2331 } 2332 disk->d_sync.ds_syncid = 0; 2333 } 2334 } 2335 2336 /* Reset hint. */ 2337 sc->sc_hint = NULL; 2338 sc->sc_syncid = syncid; 2339 if (force) { 2340 /* Remember to bump syncid on first write. */ 2341 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2342 } 2343 state = G_MIRROR_DEVICE_STATE_RUNNING; 2344 G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.", 2345 sc->sc_name, g_mirror_device_state2str(sc->sc_state), 2346 g_mirror_device_state2str(state)); 2347 sc->sc_state = state; 2348 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2349 state = g_mirror_determine_state(disk); 2350 g_mirror_event_send(disk, state, 2351 G_MIRROR_EVENT_DONTWAIT); 2352 if (state == G_MIRROR_DISK_STATE_STALE) 2353 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2354 } 2355 break; 2356 } 2357 case G_MIRROR_DEVICE_STATE_RUNNING: 2358 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 && 2359 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2360 /* 2361 * No active disks or no disks at all, 2362 * so destroy device. 2363 */ 2364 if (sc->sc_provider != NULL) 2365 g_mirror_destroy_provider(sc); 2366 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2367 break; 2368 } else if (g_mirror_ndisks(sc, 2369 G_MIRROR_DISK_STATE_ACTIVE) > 0 && 2370 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2371 /* 2372 * We have active disks, launch provider if it doesn't 2373 * exist. 2374 */ 2375 if (sc->sc_provider == NULL) 2376 g_mirror_launch_provider(sc); 2377 if (sc->sc_rootmount != NULL) { 2378 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2379 __LINE__, sc->sc_rootmount); 2380 root_mount_rel(sc->sc_rootmount); 2381 sc->sc_rootmount = NULL; 2382 } 2383 } 2384 /* 2385 * Genid should be bumped immediately, so do it here. 2386 */ 2387 if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) { 2388 sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID; 2389 g_mirror_bump_genid(sc); 2390 } 2391 break; 2392 default: 2393 KASSERT(1 == 0, ("Wrong device state (%s, %s).", 2394 sc->sc_name, g_mirror_device_state2str(sc->sc_state))); 2395 break; 2396 } 2397 } 2398 2399 /* 2400 * Update disk state and device state if needed. 2401 */ 2402 #define DISK_STATE_CHANGED() G_MIRROR_DEBUG(1, \ 2403 "Disk %s state changed from %s to %s (device %s).", \ 2404 g_mirror_get_diskname(disk), \ 2405 g_mirror_disk_state2str(disk->d_state), \ 2406 g_mirror_disk_state2str(state), sc->sc_name) 2407 static int 2408 g_mirror_update_disk(struct g_mirror_disk *disk, u_int state) 2409 { 2410 struct g_mirror_softc *sc; 2411 2412 sc = disk->d_softc; 2413 sx_assert(&sc->sc_lock, SX_XLOCKED); 2414 2415 again: 2416 G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.", 2417 g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), 2418 g_mirror_disk_state2str(state)); 2419 switch (state) { 2420 case G_MIRROR_DISK_STATE_NEW: 2421 /* 2422 * Possible scenarios: 2423 * 1. New disk arrive. 2424 */ 2425 /* Previous state should be NONE. */ 2426 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE, 2427 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2428 g_mirror_disk_state2str(disk->d_state))); 2429 DISK_STATE_CHANGED(); 2430 2431 disk->d_state = state; 2432 if (LIST_EMPTY(&sc->sc_disks)) 2433 LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next); 2434 else { 2435 struct g_mirror_disk *dp; 2436 2437 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 2438 if (disk->d_priority >= dp->d_priority) { 2439 LIST_INSERT_BEFORE(dp, disk, d_next); 2440 dp = NULL; 2441 break; 2442 } 2443 if (LIST_NEXT(dp, d_next) == NULL) 2444 break; 2445 } 2446 if (dp != NULL) 2447 LIST_INSERT_AFTER(dp, disk, d_next); 2448 } 2449 G_MIRROR_DEBUG(1, "Device %s: provider %s detected.", 2450 sc->sc_name, g_mirror_get_diskname(disk)); 2451 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 2452 break; 2453 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2454 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2455 g_mirror_device_state2str(sc->sc_state), 2456 g_mirror_get_diskname(disk), 2457 g_mirror_disk_state2str(disk->d_state))); 2458 state = g_mirror_determine_state(disk); 2459 if (state != G_MIRROR_DISK_STATE_NONE) 2460 goto again; 2461 break; 2462 case G_MIRROR_DISK_STATE_ACTIVE: 2463 /* 2464 * Possible scenarios: 2465 * 1. New disk does not need synchronization. 2466 * 2. Synchronization process finished successfully. 2467 */ 2468 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2469 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2470 g_mirror_device_state2str(sc->sc_state), 2471 g_mirror_get_diskname(disk), 2472 g_mirror_disk_state2str(disk->d_state))); 2473 /* Previous state should be NEW or SYNCHRONIZING. */ 2474 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW || 2475 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2476 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2477 g_mirror_disk_state2str(disk->d_state))); 2478 DISK_STATE_CHANGED(); 2479 2480 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2481 disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2482 disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC; 2483 g_mirror_sync_stop(disk, 0); 2484 } 2485 disk->d_state = state; 2486 disk->d_sync.ds_offset = 0; 2487 disk->d_sync.ds_offset_done = 0; 2488 g_mirror_update_idle(sc, disk); 2489 g_mirror_update_metadata(disk); 2490 G_MIRROR_DEBUG(1, "Device %s: provider %s activated.", 2491 sc->sc_name, g_mirror_get_diskname(disk)); 2492 break; 2493 case G_MIRROR_DISK_STATE_STALE: 2494 /* 2495 * Possible scenarios: 2496 * 1. Stale disk was connected. 2497 */ 2498 /* Previous state should be NEW. */ 2499 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2500 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2501 g_mirror_disk_state2str(disk->d_state))); 2502 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2503 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2504 g_mirror_device_state2str(sc->sc_state), 2505 g_mirror_get_diskname(disk), 2506 g_mirror_disk_state2str(disk->d_state))); 2507 /* 2508 * STALE state is only possible if device is marked 2509 * NOAUTOSYNC. 2510 */ 2511 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0, 2512 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2513 g_mirror_device_state2str(sc->sc_state), 2514 g_mirror_get_diskname(disk), 2515 g_mirror_disk_state2str(disk->d_state))); 2516 DISK_STATE_CHANGED(); 2517 2518 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2519 disk->d_state = state; 2520 g_mirror_update_metadata(disk); 2521 G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.", 2522 sc->sc_name, g_mirror_get_diskname(disk)); 2523 break; 2524 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 2525 /* 2526 * Possible scenarios: 2527 * 1. Disk which needs synchronization was connected. 2528 */ 2529 /* Previous state should be NEW. */ 2530 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2531 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2532 g_mirror_disk_state2str(disk->d_state))); 2533 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2534 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2535 g_mirror_device_state2str(sc->sc_state), 2536 g_mirror_get_diskname(disk), 2537 g_mirror_disk_state2str(disk->d_state))); 2538 DISK_STATE_CHANGED(); 2539 2540 if (disk->d_state == G_MIRROR_DISK_STATE_NEW) 2541 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2542 disk->d_state = state; 2543 if (sc->sc_provider != NULL) { 2544 g_mirror_sync_start(disk); 2545 g_mirror_update_metadata(disk); 2546 } 2547 break; 2548 case G_MIRROR_DISK_STATE_DISCONNECTED: 2549 /* 2550 * Possible scenarios: 2551 * 1. Device wasn't running yet, but disk disappear. 2552 * 2. Disk was active and disapppear. 2553 * 3. Disk disappear during synchronization process. 2554 */ 2555 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) { 2556 /* 2557 * Previous state should be ACTIVE, STALE or 2558 * SYNCHRONIZING. 2559 */ 2560 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 2561 disk->d_state == G_MIRROR_DISK_STATE_STALE || 2562 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2563 ("Wrong disk state (%s, %s).", 2564 g_mirror_get_diskname(disk), 2565 g_mirror_disk_state2str(disk->d_state))); 2566 } else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) { 2567 /* Previous state should be NEW. */ 2568 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2569 ("Wrong disk state (%s, %s).", 2570 g_mirror_get_diskname(disk), 2571 g_mirror_disk_state2str(disk->d_state))); 2572 /* 2573 * Reset bumping syncid if disk disappeared in STARTING 2574 * state. 2575 */ 2576 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) 2577 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 2578 #ifdef INVARIANTS 2579 } else { 2580 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 2581 sc->sc_name, 2582 g_mirror_device_state2str(sc->sc_state), 2583 g_mirror_get_diskname(disk), 2584 g_mirror_disk_state2str(disk->d_state))); 2585 #endif 2586 } 2587 DISK_STATE_CHANGED(); 2588 G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.", 2589 sc->sc_name, g_mirror_get_diskname(disk)); 2590 2591 g_mirror_destroy_disk(disk); 2592 break; 2593 case G_MIRROR_DISK_STATE_DESTROY: 2594 { 2595 int error; 2596 2597 error = g_mirror_clear_metadata(disk); 2598 if (error != 0) 2599 return (error); 2600 DISK_STATE_CHANGED(); 2601 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", 2602 sc->sc_name, g_mirror_get_diskname(disk)); 2603 2604 g_mirror_destroy_disk(disk); 2605 sc->sc_ndisks--; 2606 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2607 g_mirror_update_metadata(disk); 2608 } 2609 break; 2610 } 2611 default: 2612 KASSERT(1 == 0, ("Unknown state (%u).", state)); 2613 break; 2614 } 2615 return (0); 2616 } 2617 #undef DISK_STATE_CHANGED 2618 2619 int 2620 g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md) 2621 { 2622 struct g_provider *pp; 2623 u_char *buf; 2624 int error; 2625 2626 g_topology_assert(); 2627 2628 error = g_access(cp, 1, 0, 0); 2629 if (error != 0) 2630 return (error); 2631 pp = cp->provider; 2632 g_topology_unlock(); 2633 /* Metadata are stored on last sector. */ 2634 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 2635 &error); 2636 g_topology_lock(); 2637 g_access(cp, -1, 0, 0); 2638 if (buf == NULL) { 2639 G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2640 cp->provider->name, error); 2641 return (error); 2642 } 2643 2644 /* Decode metadata. */ 2645 error = mirror_metadata_decode(buf, md); 2646 g_free(buf); 2647 if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0) 2648 return (EINVAL); 2649 if (md->md_version > G_MIRROR_VERSION) { 2650 G_MIRROR_DEBUG(0, 2651 "Kernel module is too old to handle metadata from %s.", 2652 cp->provider->name); 2653 return (EINVAL); 2654 } 2655 if (error != 0) { 2656 G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 2657 cp->provider->name); 2658 return (error); 2659 } 2660 2661 return (0); 2662 } 2663 2664 static int 2665 g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp, 2666 struct g_mirror_metadata *md) 2667 { 2668 2669 if (g_mirror_id2disk(sc, md->md_did) != NULL) { 2670 G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.", 2671 pp->name, md->md_did); 2672 return (EEXIST); 2673 } 2674 if (md->md_all != sc->sc_ndisks) { 2675 G_MIRROR_DEBUG(1, 2676 "Invalid '%s' field on disk %s (device %s), skipping.", 2677 "md_all", pp->name, sc->sc_name); 2678 return (EINVAL); 2679 } 2680 if (md->md_slice != sc->sc_slice) { 2681 G_MIRROR_DEBUG(1, 2682 "Invalid '%s' field on disk %s (device %s), skipping.", 2683 "md_slice", pp->name, sc->sc_name); 2684 return (EINVAL); 2685 } 2686 if (md->md_balance != sc->sc_balance) { 2687 G_MIRROR_DEBUG(1, 2688 "Invalid '%s' field on disk %s (device %s), skipping.", 2689 "md_balance", pp->name, sc->sc_name); 2690 return (EINVAL); 2691 } 2692 if (md->md_mediasize != sc->sc_mediasize) { 2693 G_MIRROR_DEBUG(1, 2694 "Invalid '%s' field on disk %s (device %s), skipping.", 2695 "md_mediasize", pp->name, sc->sc_name); 2696 return (EINVAL); 2697 } 2698 if (sc->sc_mediasize > pp->mediasize) { 2699 G_MIRROR_DEBUG(1, 2700 "Invalid size of disk %s (device %s), skipping.", pp->name, 2701 sc->sc_name); 2702 return (EINVAL); 2703 } 2704 if (md->md_sectorsize != sc->sc_sectorsize) { 2705 G_MIRROR_DEBUG(1, 2706 "Invalid '%s' field on disk %s (device %s), skipping.", 2707 "md_sectorsize", pp->name, sc->sc_name); 2708 return (EINVAL); 2709 } 2710 if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 2711 G_MIRROR_DEBUG(1, 2712 "Invalid sector size of disk %s (device %s), skipping.", 2713 pp->name, sc->sc_name); 2714 return (EINVAL); 2715 } 2716 if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) { 2717 G_MIRROR_DEBUG(1, 2718 "Invalid device flags on disk %s (device %s), skipping.", 2719 pp->name, sc->sc_name); 2720 return (EINVAL); 2721 } 2722 if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) { 2723 G_MIRROR_DEBUG(1, 2724 "Invalid disk flags on disk %s (device %s), skipping.", 2725 pp->name, sc->sc_name); 2726 return (EINVAL); 2727 } 2728 return (0); 2729 } 2730 2731 int 2732 g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 2733 struct g_mirror_metadata *md) 2734 { 2735 struct g_mirror_disk *disk; 2736 int error; 2737 2738 g_topology_assert_not(); 2739 G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name); 2740 2741 error = g_mirror_check_metadata(sc, pp, md); 2742 if (error != 0) 2743 return (error); 2744 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING && 2745 md->md_genid < sc->sc_genid) { 2746 G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.", 2747 pp->name, sc->sc_name); 2748 return (EINVAL); 2749 } 2750 disk = g_mirror_init_disk(sc, pp, md, &error); 2751 if (disk == NULL) 2752 return (error); 2753 error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW, 2754 G_MIRROR_EVENT_WAIT); 2755 if (error != 0) 2756 return (error); 2757 if (md->md_version < G_MIRROR_VERSION) { 2758 G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 2759 pp->name, md->md_version, G_MIRROR_VERSION); 2760 g_mirror_update_metadata(disk); 2761 } 2762 return (0); 2763 } 2764 2765 static void 2766 g_mirror_destroy_delayed(void *arg, int flag) 2767 { 2768 struct g_mirror_softc *sc; 2769 int error; 2770 2771 if (flag == EV_CANCEL) { 2772 G_MIRROR_DEBUG(1, "Destroying canceled."); 2773 return; 2774 } 2775 sc = arg; 2776 g_topology_unlock(); 2777 sx_xlock(&sc->sc_lock); 2778 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0, 2779 ("DESTROY flag set on %s.", sc->sc_name)); 2780 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0, 2781 ("DESTROYING flag not set on %s.", sc->sc_name)); 2782 G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name); 2783 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT); 2784 if (error != 0) { 2785 G_MIRROR_DEBUG(0, "Cannot destroy %s.", sc->sc_name); 2786 sx_xunlock(&sc->sc_lock); 2787 } 2788 g_topology_lock(); 2789 } 2790 2791 static int 2792 g_mirror_access(struct g_provider *pp, int acr, int acw, int ace) 2793 { 2794 struct g_mirror_softc *sc; 2795 int dcr, dcw, dce, error = 0; 2796 2797 g_topology_assert(); 2798 G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 2799 acw, ace); 2800 2801 sc = pp->geom->softc; 2802 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0) 2803 return (0); 2804 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 2805 2806 dcr = pp->acr + acr; 2807 dcw = pp->acw + acw; 2808 dce = pp->ace + ace; 2809 2810 g_topology_unlock(); 2811 sx_xlock(&sc->sc_lock); 2812 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 || 2813 LIST_EMPTY(&sc->sc_disks)) { 2814 if (acr > 0 || acw > 0 || ace > 0) 2815 error = ENXIO; 2816 goto end; 2817 } 2818 if (dcw == 0 && !sc->sc_idle) 2819 g_mirror_idle(sc, dcw); 2820 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0) { 2821 if (acr > 0 || acw > 0 || ace > 0) { 2822 error = ENXIO; 2823 goto end; 2824 } 2825 if (dcr == 0 && dcw == 0 && dce == 0) { 2826 g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, 2827 sc, NULL); 2828 } 2829 } 2830 end: 2831 sx_xunlock(&sc->sc_lock); 2832 g_topology_lock(); 2833 return (error); 2834 } 2835 2836 static struct g_geom * 2837 g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md) 2838 { 2839 struct g_mirror_softc *sc; 2840 struct g_geom *gp; 2841 int error, timeout; 2842 2843 g_topology_assert(); 2844 G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name, 2845 md->md_mid); 2846 2847 /* One disk is minimum. */ 2848 if (md->md_all < 1) 2849 return (NULL); 2850 /* 2851 * Action geom. 2852 */ 2853 gp = g_new_geomf(mp, "%s", md->md_name); 2854 sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO); 2855 gp->start = g_mirror_start; 2856 gp->orphan = g_mirror_orphan; 2857 gp->access = g_mirror_access; 2858 gp->dumpconf = g_mirror_dumpconf; 2859 2860 sc->sc_id = md->md_mid; 2861 sc->sc_slice = md->md_slice; 2862 sc->sc_balance = md->md_balance; 2863 sc->sc_mediasize = md->md_mediasize; 2864 sc->sc_sectorsize = md->md_sectorsize; 2865 sc->sc_ndisks = md->md_all; 2866 sc->sc_flags = md->md_mflags; 2867 sc->sc_bump_id = 0; 2868 sc->sc_idle = 1; 2869 sc->sc_last_write = time_uptime; 2870 sc->sc_writes = 0; 2871 sx_init(&sc->sc_lock, "gmirror:lock"); 2872 bioq_init(&sc->sc_queue); 2873 mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF); 2874 bioq_init(&sc->sc_regular_delayed); 2875 bioq_init(&sc->sc_inflight); 2876 bioq_init(&sc->sc_sync_delayed); 2877 LIST_INIT(&sc->sc_disks); 2878 TAILQ_INIT(&sc->sc_events); 2879 mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF); 2880 callout_init(&sc->sc_callout, CALLOUT_MPSAFE); 2881 sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING; 2882 gp->softc = sc; 2883 sc->sc_geom = gp; 2884 sc->sc_provider = NULL; 2885 /* 2886 * Synchronization geom. 2887 */ 2888 gp = g_new_geomf(mp, "%s.sync", md->md_name); 2889 gp->softc = sc; 2890 gp->orphan = g_mirror_orphan; 2891 sc->sc_sync.ds_geom = gp; 2892 sc->sc_sync.ds_ndisks = 0; 2893 error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0, 2894 "g_mirror %s", md->md_name); 2895 if (error != 0) { 2896 G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.", 2897 sc->sc_name); 2898 g_destroy_geom(sc->sc_sync.ds_geom); 2899 mtx_destroy(&sc->sc_events_mtx); 2900 mtx_destroy(&sc->sc_queue_mtx); 2901 sx_destroy(&sc->sc_lock); 2902 g_destroy_geom(sc->sc_geom); 2903 free(sc, M_MIRROR); 2904 return (NULL); 2905 } 2906 2907 G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).", 2908 sc->sc_name, sc->sc_ndisks, sc->sc_id); 2909 2910 sc->sc_rootmount = root_mount_hold("GMIRROR"); 2911 G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 2912 /* 2913 * Run timeout. 2914 */ 2915 timeout = g_mirror_timeout * hz; 2916 callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc); 2917 return (sc->sc_geom); 2918 } 2919 2920 int 2921 g_mirror_destroy(struct g_mirror_softc *sc, int how) 2922 { 2923 struct g_mirror_disk *disk; 2924 struct g_provider *pp; 2925 2926 g_topology_assert_not(); 2927 if (sc == NULL) 2928 return (ENXIO); 2929 sx_assert(&sc->sc_lock, SX_XLOCKED); 2930 2931 pp = sc->sc_provider; 2932 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 2933 switch (how) { 2934 case G_MIRROR_DESTROY_SOFT: 2935 G_MIRROR_DEBUG(1, 2936 "Device %s is still open (r%dw%de%d).", pp->name, 2937 pp->acr, pp->acw, pp->ace); 2938 return (EBUSY); 2939 case G_MIRROR_DESTROY_DELAYED: 2940 G_MIRROR_DEBUG(1, 2941 "Device %s will be destroyed on last close.", 2942 pp->name); 2943 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2944 if (disk->d_state == 2945 G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2946 g_mirror_sync_stop(disk, 1); 2947 } 2948 } 2949 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROYING; 2950 return (EBUSY); 2951 case G_MIRROR_DESTROY_HARD: 2952 G_MIRROR_DEBUG(1, "Device %s is still open, so it " 2953 "can't be definitely removed.", pp->name); 2954 } 2955 } 2956 2957 g_topology_lock(); 2958 if (sc->sc_geom->softc == NULL) { 2959 g_topology_unlock(); 2960 return (0); 2961 } 2962 sc->sc_geom->softc = NULL; 2963 sc->sc_sync.ds_geom->softc = NULL; 2964 g_topology_unlock(); 2965 2966 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2967 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT; 2968 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 2969 sx_xunlock(&sc->sc_lock); 2970 mtx_lock(&sc->sc_queue_mtx); 2971 wakeup(sc); 2972 mtx_unlock(&sc->sc_queue_mtx); 2973 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 2974 while (sc->sc_worker != NULL) 2975 tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5); 2976 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 2977 sx_xlock(&sc->sc_lock); 2978 g_mirror_destroy_device(sc); 2979 free(sc, M_MIRROR); 2980 return (0); 2981 } 2982 2983 static void 2984 g_mirror_taste_orphan(struct g_consumer *cp) 2985 { 2986 2987 KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 2988 cp->provider->name)); 2989 } 2990 2991 static struct g_geom * 2992 g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 2993 { 2994 struct g_mirror_metadata md; 2995 struct g_mirror_softc *sc; 2996 struct g_consumer *cp; 2997 struct g_geom *gp; 2998 int error; 2999 3000 g_topology_assert(); 3001 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 3002 G_MIRROR_DEBUG(2, "Tasting %s.", pp->name); 3003 3004 gp = g_new_geomf(mp, "mirror:taste"); 3005 /* 3006 * This orphan function should be never called. 3007 */ 3008 gp->orphan = g_mirror_taste_orphan; 3009 cp = g_new_consumer(gp); 3010 g_attach(cp, pp); 3011 error = g_mirror_read_metadata(cp, &md); 3012 g_detach(cp); 3013 g_destroy_consumer(cp); 3014 g_destroy_geom(gp); 3015 if (error != 0) 3016 return (NULL); 3017 gp = NULL; 3018 3019 if (md.md_provider[0] != '\0' && strcmp(md.md_provider, pp->name) != 0) 3020 return (NULL); 3021 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3022 return (NULL); 3023 if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) { 3024 G_MIRROR_DEBUG(0, 3025 "Device %s: provider %s marked as inactive, skipping.", 3026 md.md_name, pp->name); 3027 return (NULL); 3028 } 3029 if (g_mirror_debug >= 2) 3030 mirror_metadata_dump(&md); 3031 3032 /* 3033 * Let's check if device already exists. 3034 */ 3035 sc = NULL; 3036 LIST_FOREACH(gp, &mp->geom, geom) { 3037 sc = gp->softc; 3038 if (sc == NULL) 3039 continue; 3040 if (sc->sc_sync.ds_geom == gp) 3041 continue; 3042 if (strcmp(md.md_name, sc->sc_name) != 0) 3043 continue; 3044 if (md.md_mid != sc->sc_id) { 3045 G_MIRROR_DEBUG(0, "Device %s already configured.", 3046 sc->sc_name); 3047 return (NULL); 3048 } 3049 break; 3050 } 3051 if (gp == NULL) { 3052 gp = g_mirror_create(mp, &md); 3053 if (gp == NULL) { 3054 G_MIRROR_DEBUG(0, "Cannot create device %s.", 3055 md.md_name); 3056 return (NULL); 3057 } 3058 sc = gp->softc; 3059 } 3060 G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 3061 g_topology_unlock(); 3062 sx_xlock(&sc->sc_lock); 3063 error = g_mirror_add_disk(sc, pp, &md); 3064 if (error != 0) { 3065 G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 3066 pp->name, gp->name, error); 3067 if (LIST_EMPTY(&sc->sc_disks)) { 3068 g_cancel_event(sc); 3069 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3070 g_topology_lock(); 3071 return (NULL); 3072 } 3073 gp = NULL; 3074 } 3075 sx_xunlock(&sc->sc_lock); 3076 g_topology_lock(); 3077 return (gp); 3078 } 3079 3080 static int 3081 g_mirror_destroy_geom(struct gctl_req *req __unused, 3082 struct g_class *mp __unused, struct g_geom *gp) 3083 { 3084 struct g_mirror_softc *sc; 3085 int error; 3086 3087 g_topology_unlock(); 3088 sc = gp->softc; 3089 sx_xlock(&sc->sc_lock); 3090 g_cancel_event(sc); 3091 error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT); 3092 if (error != 0) 3093 sx_xunlock(&sc->sc_lock); 3094 g_topology_lock(); 3095 return (error); 3096 } 3097 3098 static void 3099 g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 3100 struct g_consumer *cp, struct g_provider *pp) 3101 { 3102 struct g_mirror_softc *sc; 3103 3104 g_topology_assert(); 3105 3106 sc = gp->softc; 3107 if (sc == NULL) 3108 return; 3109 /* Skip synchronization geom. */ 3110 if (gp == sc->sc_sync.ds_geom) 3111 return; 3112 if (pp != NULL) { 3113 /* Nothing here. */ 3114 } else if (cp != NULL) { 3115 struct g_mirror_disk *disk; 3116 3117 disk = cp->private; 3118 if (disk == NULL) 3119 return; 3120 g_topology_unlock(); 3121 sx_xlock(&sc->sc_lock); 3122 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id); 3123 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3124 sbuf_printf(sb, "%s<Synchronized>", indent); 3125 if (disk->d_sync.ds_offset == 0) 3126 sbuf_printf(sb, "0%%"); 3127 else { 3128 sbuf_printf(sb, "%u%%", 3129 (u_int)((disk->d_sync.ds_offset * 100) / 3130 sc->sc_provider->mediasize)); 3131 } 3132 sbuf_printf(sb, "</Synchronized>\n"); 3133 } 3134 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 3135 disk->d_sync.ds_syncid); 3136 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, 3137 disk->d_genid); 3138 sbuf_printf(sb, "%s<Flags>", indent); 3139 if (disk->d_flags == 0) 3140 sbuf_printf(sb, "NONE"); 3141 else { 3142 int first = 1; 3143 3144 #define ADD_FLAG(flag, name) do { \ 3145 if ((disk->d_flags & (flag)) != 0) { \ 3146 if (!first) \ 3147 sbuf_printf(sb, ", "); \ 3148 else \ 3149 first = 0; \ 3150 sbuf_printf(sb, name); \ 3151 } \ 3152 } while (0) 3153 ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY"); 3154 ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED"); 3155 ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE"); 3156 ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING, 3157 "SYNCHRONIZING"); 3158 ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 3159 ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN"); 3160 #undef ADD_FLAG 3161 } 3162 sbuf_printf(sb, "</Flags>\n"); 3163 sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent, 3164 disk->d_priority); 3165 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3166 g_mirror_disk_state2str(disk->d_state)); 3167 sx_xunlock(&sc->sc_lock); 3168 g_topology_lock(); 3169 } else { 3170 g_topology_unlock(); 3171 sx_xlock(&sc->sc_lock); 3172 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 3173 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3174 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 3175 sbuf_printf(sb, "%s<Flags>", indent); 3176 if (sc->sc_flags == 0) 3177 sbuf_printf(sb, "NONE"); 3178 else { 3179 int first = 1; 3180 3181 #define ADD_FLAG(flag, name) do { \ 3182 if ((sc->sc_flags & (flag)) != 0) { \ 3183 if (!first) \ 3184 sbuf_printf(sb, ", "); \ 3185 else \ 3186 first = 0; \ 3187 sbuf_printf(sb, name); \ 3188 } \ 3189 } while (0) 3190 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 3191 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3192 #undef ADD_FLAG 3193 } 3194 sbuf_printf(sb, "</Flags>\n"); 3195 sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent, 3196 (u_int)sc->sc_slice); 3197 sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent, 3198 balance_name(sc->sc_balance)); 3199 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 3200 sc->sc_ndisks); 3201 sbuf_printf(sb, "%s<State>", indent); 3202 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 3203 sbuf_printf(sb, "%s", "STARTING"); 3204 else if (sc->sc_ndisks == 3205 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE)) 3206 sbuf_printf(sb, "%s", "COMPLETE"); 3207 else 3208 sbuf_printf(sb, "%s", "DEGRADED"); 3209 sbuf_printf(sb, "</State>\n"); 3210 sx_xunlock(&sc->sc_lock); 3211 g_topology_lock(); 3212 } 3213 } 3214 3215 static void 3216 g_mirror_shutdown_pre_sync(void *arg, int howto) 3217 { 3218 struct g_class *mp; 3219 struct g_geom *gp, *gp2; 3220 struct g_mirror_softc *sc; 3221 int error; 3222 3223 mp = arg; 3224 DROP_GIANT(); 3225 g_topology_lock(); 3226 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 3227 if ((sc = gp->softc) == NULL) 3228 continue; 3229 /* Skip synchronization geom. */ 3230 if (gp == sc->sc_sync.ds_geom) 3231 continue; 3232 g_topology_unlock(); 3233 sx_xlock(&sc->sc_lock); 3234 g_cancel_event(sc); 3235 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED); 3236 if (error != 0) 3237 sx_xunlock(&sc->sc_lock); 3238 g_topology_lock(); 3239 } 3240 g_topology_unlock(); 3241 PICKUP_GIANT(); 3242 } 3243 3244 static void 3245 g_mirror_init(struct g_class *mp) 3246 { 3247 3248 g_mirror_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync, 3249 g_mirror_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST); 3250 if (g_mirror_pre_sync == NULL) 3251 G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event."); 3252 } 3253 3254 static void 3255 g_mirror_fini(struct g_class *mp) 3256 { 3257 3258 if (g_mirror_pre_sync != NULL) 3259 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_mirror_pre_sync); 3260 } 3261 3262 DECLARE_GEOM_CLASS(g_mirror_class, g_mirror); 3263