1 /*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/limits.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/bio.h> 38 #include <sys/sysctl.h> 39 #include <sys/malloc.h> 40 #include <sys/eventhandler.h> 41 #include <vm/uma.h> 42 #include <geom/geom.h> 43 #include <sys/proc.h> 44 #include <sys/kthread.h> 45 #include <sys/sched.h> 46 #include <geom/mirror/g_mirror.h> 47 48 49 static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data"); 50 51 SYSCTL_DECL(_kern_geom); 52 SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, "GEOM_MIRROR stuff"); 53 u_int g_mirror_debug = 0; 54 TUNABLE_INT("kern.geom.mirror.debug", &g_mirror_debug); 55 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RW, &g_mirror_debug, 0, 56 "Debug level"); 57 static u_int g_mirror_timeout = 4; 58 TUNABLE_INT("kern.geom.mirror.timeout", &g_mirror_timeout); 59 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RW, &g_mirror_timeout, 60 0, "Time to wait on all mirror components"); 61 static u_int g_mirror_idletime = 5; 62 TUNABLE_INT("kern.geom.mirror.idletime", &g_mirror_idletime); 63 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RW, 64 &g_mirror_idletime, 0, "Mark components as clean when idling"); 65 static u_int g_mirror_disconnect_on_failure = 1; 66 TUNABLE_INT("kern.geom.mirror.disconnect_on_failure", 67 &g_mirror_disconnect_on_failure); 68 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RW, 69 &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 70 static u_int g_mirror_syncreqs = 2; 71 TUNABLE_INT("kern.geom.mirror.sync_requests", &g_mirror_syncreqs); 72 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 73 &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests."); 74 75 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 76 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 77 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 78 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 79 } while (0) 80 81 static eventhandler_tag g_mirror_pre_sync = NULL; 82 83 static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp, 84 struct g_geom *gp); 85 static g_taste_t g_mirror_taste; 86 static void g_mirror_init(struct g_class *mp); 87 static void g_mirror_fini(struct g_class *mp); 88 89 struct g_class g_mirror_class = { 90 .name = G_MIRROR_CLASS_NAME, 91 .version = G_VERSION, 92 .ctlreq = g_mirror_config, 93 .taste = g_mirror_taste, 94 .destroy_geom = g_mirror_destroy_geom, 95 .init = g_mirror_init, 96 .fini = g_mirror_fini 97 }; 98 99 100 static void g_mirror_destroy_provider(struct g_mirror_softc *sc); 101 static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state); 102 static void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force); 103 static void g_mirror_dumpconf(struct sbuf *sb, const char *indent, 104 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 105 static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type); 106 static void g_mirror_register_request(struct bio *bp); 107 static void g_mirror_sync_release(struct g_mirror_softc *sc); 108 109 110 static const char * 111 g_mirror_disk_state2str(int state) 112 { 113 114 switch (state) { 115 case G_MIRROR_DISK_STATE_NONE: 116 return ("NONE"); 117 case G_MIRROR_DISK_STATE_NEW: 118 return ("NEW"); 119 case G_MIRROR_DISK_STATE_ACTIVE: 120 return ("ACTIVE"); 121 case G_MIRROR_DISK_STATE_STALE: 122 return ("STALE"); 123 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 124 return ("SYNCHRONIZING"); 125 case G_MIRROR_DISK_STATE_DISCONNECTED: 126 return ("DISCONNECTED"); 127 case G_MIRROR_DISK_STATE_DESTROY: 128 return ("DESTROY"); 129 default: 130 return ("INVALID"); 131 } 132 } 133 134 static const char * 135 g_mirror_device_state2str(int state) 136 { 137 138 switch (state) { 139 case G_MIRROR_DEVICE_STATE_STARTING: 140 return ("STARTING"); 141 case G_MIRROR_DEVICE_STATE_RUNNING: 142 return ("RUNNING"); 143 default: 144 return ("INVALID"); 145 } 146 } 147 148 static const char * 149 g_mirror_get_diskname(struct g_mirror_disk *disk) 150 { 151 152 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 153 return ("[unknown]"); 154 return (disk->d_name); 155 } 156 157 /* 158 * --- Events handling functions --- 159 * Events in geom_mirror are used to maintain disks and device status 160 * from one thread to simplify locking. 161 */ 162 static void 163 g_mirror_event_free(struct g_mirror_event *ep) 164 { 165 166 free(ep, M_MIRROR); 167 } 168 169 int 170 g_mirror_event_send(void *arg, int state, int flags) 171 { 172 struct g_mirror_softc *sc; 173 struct g_mirror_disk *disk; 174 struct g_mirror_event *ep; 175 int error; 176 177 ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK); 178 G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep); 179 if ((flags & G_MIRROR_EVENT_DEVICE) != 0) { 180 disk = NULL; 181 sc = arg; 182 } else { 183 disk = arg; 184 sc = disk->d_softc; 185 } 186 ep->e_disk = disk; 187 ep->e_state = state; 188 ep->e_flags = flags; 189 ep->e_error = 0; 190 mtx_lock(&sc->sc_events_mtx); 191 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 192 mtx_unlock(&sc->sc_events_mtx); 193 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 194 mtx_lock(&sc->sc_queue_mtx); 195 wakeup(sc); 196 mtx_unlock(&sc->sc_queue_mtx); 197 if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0) 198 return (0); 199 sx_assert(&sc->sc_lock, SX_XLOCKED); 200 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 201 sx_xunlock(&sc->sc_lock); 202 while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) { 203 mtx_lock(&sc->sc_events_mtx); 204 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event", 205 hz * 5); 206 } 207 error = ep->e_error; 208 g_mirror_event_free(ep); 209 sx_xlock(&sc->sc_lock); 210 return (error); 211 } 212 213 static struct g_mirror_event * 214 g_mirror_event_get(struct g_mirror_softc *sc) 215 { 216 struct g_mirror_event *ep; 217 218 mtx_lock(&sc->sc_events_mtx); 219 ep = TAILQ_FIRST(&sc->sc_events); 220 mtx_unlock(&sc->sc_events_mtx); 221 return (ep); 222 } 223 224 static void 225 g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep) 226 { 227 228 mtx_lock(&sc->sc_events_mtx); 229 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 230 mtx_unlock(&sc->sc_events_mtx); 231 } 232 233 static void 234 g_mirror_event_cancel(struct g_mirror_disk *disk) 235 { 236 struct g_mirror_softc *sc; 237 struct g_mirror_event *ep, *tmpep; 238 239 sc = disk->d_softc; 240 sx_assert(&sc->sc_lock, SX_XLOCKED); 241 242 mtx_lock(&sc->sc_events_mtx); 243 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 244 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) 245 continue; 246 if (ep->e_disk != disk) 247 continue; 248 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 249 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 250 g_mirror_event_free(ep); 251 else { 252 ep->e_error = ECANCELED; 253 wakeup(ep); 254 } 255 } 256 mtx_unlock(&sc->sc_events_mtx); 257 } 258 259 /* 260 * Return the number of disks in given state. 261 * If state is equal to -1, count all connected disks. 262 */ 263 u_int 264 g_mirror_ndisks(struct g_mirror_softc *sc, int state) 265 { 266 struct g_mirror_disk *disk; 267 u_int n = 0; 268 269 sx_assert(&sc->sc_lock, SX_LOCKED); 270 271 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 272 if (state == -1 || disk->d_state == state) 273 n++; 274 } 275 return (n); 276 } 277 278 /* 279 * Find a disk in mirror by its disk ID. 280 */ 281 static struct g_mirror_disk * 282 g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id) 283 { 284 struct g_mirror_disk *disk; 285 286 sx_assert(&sc->sc_lock, SX_XLOCKED); 287 288 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 289 if (disk->d_id == id) 290 return (disk); 291 } 292 return (NULL); 293 } 294 295 static u_int 296 g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp) 297 { 298 struct bio *bp; 299 u_int nreqs = 0; 300 301 mtx_lock(&sc->sc_queue_mtx); 302 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 303 if (bp->bio_from == cp) 304 nreqs++; 305 } 306 mtx_unlock(&sc->sc_queue_mtx); 307 return (nreqs); 308 } 309 310 static int 311 g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp) 312 { 313 314 if (cp->index > 0) { 315 G_MIRROR_DEBUG(2, 316 "I/O requests for %s exist, can't destroy it now.", 317 cp->provider->name); 318 return (1); 319 } 320 if (g_mirror_nrequests(sc, cp) > 0) { 321 G_MIRROR_DEBUG(2, 322 "I/O requests for %s in queue, can't destroy it now.", 323 cp->provider->name); 324 return (1); 325 } 326 return (0); 327 } 328 329 static void 330 g_mirror_destroy_consumer(void *arg, int flags __unused) 331 { 332 struct g_consumer *cp; 333 334 g_topology_assert(); 335 336 cp = arg; 337 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 338 g_detach(cp); 339 g_destroy_consumer(cp); 340 } 341 342 static void 343 g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 344 { 345 struct g_provider *pp; 346 int retaste_wait; 347 348 g_topology_assert(); 349 350 cp->private = NULL; 351 if (g_mirror_is_busy(sc, cp)) 352 return; 353 pp = cp->provider; 354 retaste_wait = 0; 355 if (cp->acw == 1) { 356 if ((pp->geom->flags & G_GEOM_WITHER) == 0) 357 retaste_wait = 1; 358 } 359 G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 360 -cp->acw, -cp->ace, 0); 361 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 362 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 363 if (retaste_wait) { 364 /* 365 * After retaste event was send (inside g_access()), we can send 366 * event to detach and destroy consumer. 367 * A class, which has consumer to the given provider connected 368 * will not receive retaste event for the provider. 369 * This is the way how I ignore retaste events when I close 370 * consumers opened for write: I detach and destroy consumer 371 * after retaste event is sent. 372 */ 373 g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL); 374 return; 375 } 376 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name); 377 g_detach(cp); 378 g_destroy_consumer(cp); 379 } 380 381 static int 382 g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp) 383 { 384 struct g_consumer *cp; 385 int error; 386 387 g_topology_assert_not(); 388 KASSERT(disk->d_consumer == NULL, 389 ("Disk already connected (device %s).", disk->d_softc->sc_name)); 390 391 g_topology_lock(); 392 cp = g_new_consumer(disk->d_softc->sc_geom); 393 error = g_attach(cp, pp); 394 if (error != 0) { 395 g_destroy_consumer(cp); 396 g_topology_unlock(); 397 return (error); 398 } 399 error = g_access(cp, 1, 1, 1); 400 if (error != 0) { 401 g_detach(cp); 402 g_destroy_consumer(cp); 403 g_topology_unlock(); 404 G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).", 405 pp->name, error); 406 return (error); 407 } 408 g_topology_unlock(); 409 disk->d_consumer = cp; 410 disk->d_consumer->private = disk; 411 disk->d_consumer->index = 0; 412 413 G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk)); 414 return (0); 415 } 416 417 static void 418 g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 419 { 420 421 g_topology_assert(); 422 423 if (cp == NULL) 424 return; 425 if (cp->provider != NULL) 426 g_mirror_kill_consumer(sc, cp); 427 else 428 g_destroy_consumer(cp); 429 } 430 431 /* 432 * Initialize disk. This means allocate memory, create consumer, attach it 433 * to the provider and open access (r1w1e1) to it. 434 */ 435 static struct g_mirror_disk * 436 g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp, 437 struct g_mirror_metadata *md, int *errorp) 438 { 439 struct g_mirror_disk *disk; 440 int error; 441 442 disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO); 443 if (disk == NULL) { 444 error = ENOMEM; 445 goto fail; 446 } 447 disk->d_softc = sc; 448 error = g_mirror_connect_disk(disk, pp); 449 if (error != 0) 450 goto fail; 451 disk->d_id = md->md_did; 452 disk->d_state = G_MIRROR_DISK_STATE_NONE; 453 disk->d_priority = md->md_priority; 454 disk->d_delay.sec = 0; 455 disk->d_delay.frac = 0; 456 binuptime(&disk->d_last_used); 457 disk->d_flags = md->md_dflags; 458 if (md->md_provider[0] != '\0') 459 disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED; 460 disk->d_sync.ds_consumer = NULL; 461 disk->d_sync.ds_offset = md->md_sync_offset; 462 disk->d_sync.ds_offset_done = md->md_sync_offset; 463 disk->d_genid = md->md_genid; 464 disk->d_sync.ds_syncid = md->md_syncid; 465 if (errorp != NULL) 466 *errorp = 0; 467 return (disk); 468 fail: 469 if (errorp != NULL) 470 *errorp = error; 471 if (disk != NULL) 472 free(disk, M_MIRROR); 473 return (NULL); 474 } 475 476 static void 477 g_mirror_destroy_disk(struct g_mirror_disk *disk) 478 { 479 struct g_mirror_softc *sc; 480 481 g_topology_assert_not(); 482 sc = disk->d_softc; 483 sx_assert(&sc->sc_lock, SX_XLOCKED); 484 485 LIST_REMOVE(disk, d_next); 486 g_mirror_event_cancel(disk); 487 if (sc->sc_hint == disk) 488 sc->sc_hint = NULL; 489 switch (disk->d_state) { 490 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 491 g_mirror_sync_stop(disk, 1); 492 /* FALLTHROUGH */ 493 case G_MIRROR_DISK_STATE_NEW: 494 case G_MIRROR_DISK_STATE_STALE: 495 case G_MIRROR_DISK_STATE_ACTIVE: 496 g_topology_lock(); 497 g_mirror_disconnect_consumer(sc, disk->d_consumer); 498 g_topology_unlock(); 499 free(disk, M_MIRROR); 500 break; 501 default: 502 KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 503 g_mirror_get_diskname(disk), 504 g_mirror_disk_state2str(disk->d_state))); 505 } 506 } 507 508 static void 509 g_mirror_destroy_device(struct g_mirror_softc *sc) 510 { 511 struct g_mirror_disk *disk; 512 struct g_mirror_event *ep; 513 struct g_geom *gp; 514 struct g_consumer *cp, *tmpcp; 515 516 g_topology_assert_not(); 517 sx_assert(&sc->sc_lock, SX_XLOCKED); 518 519 gp = sc->sc_geom; 520 if (sc->sc_provider != NULL) 521 g_mirror_destroy_provider(sc); 522 for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL; 523 disk = LIST_FIRST(&sc->sc_disks)) { 524 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 525 g_mirror_update_metadata(disk); 526 g_mirror_destroy_disk(disk); 527 } 528 while ((ep = g_mirror_event_get(sc)) != NULL) { 529 g_mirror_event_remove(sc, ep); 530 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 531 g_mirror_event_free(ep); 532 else { 533 ep->e_error = ECANCELED; 534 ep->e_flags |= G_MIRROR_EVENT_DONE; 535 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep); 536 mtx_lock(&sc->sc_events_mtx); 537 wakeup(ep); 538 mtx_unlock(&sc->sc_events_mtx); 539 } 540 } 541 callout_drain(&sc->sc_callout); 542 543 g_topology_lock(); 544 LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) { 545 g_mirror_disconnect_consumer(sc, cp); 546 } 547 g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 548 G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name); 549 g_wither_geom(gp, ENXIO); 550 g_topology_unlock(); 551 mtx_destroy(&sc->sc_queue_mtx); 552 mtx_destroy(&sc->sc_events_mtx); 553 sx_xunlock(&sc->sc_lock); 554 sx_destroy(&sc->sc_lock); 555 } 556 557 static void 558 g_mirror_orphan(struct g_consumer *cp) 559 { 560 struct g_mirror_disk *disk; 561 562 g_topology_assert(); 563 564 disk = cp->private; 565 if (disk == NULL) 566 return; 567 disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 568 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, 569 G_MIRROR_EVENT_DONTWAIT); 570 } 571 572 /* 573 * Function should return the next active disk on the list. 574 * It is possible that it will be the same disk as given. 575 * If there are no active disks on list, NULL is returned. 576 */ 577 static __inline struct g_mirror_disk * 578 g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 579 { 580 struct g_mirror_disk *dp; 581 582 for (dp = LIST_NEXT(disk, d_next); dp != disk; 583 dp = LIST_NEXT(dp, d_next)) { 584 if (dp == NULL) 585 dp = LIST_FIRST(&sc->sc_disks); 586 if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE) 587 break; 588 } 589 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 590 return (NULL); 591 return (dp); 592 } 593 594 static struct g_mirror_disk * 595 g_mirror_get_disk(struct g_mirror_softc *sc) 596 { 597 struct g_mirror_disk *disk; 598 599 if (sc->sc_hint == NULL) { 600 sc->sc_hint = LIST_FIRST(&sc->sc_disks); 601 if (sc->sc_hint == NULL) 602 return (NULL); 603 } 604 disk = sc->sc_hint; 605 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) { 606 disk = g_mirror_find_next(sc, disk); 607 if (disk == NULL) 608 return (NULL); 609 } 610 sc->sc_hint = g_mirror_find_next(sc, disk); 611 return (disk); 612 } 613 614 static int 615 g_mirror_write_metadata(struct g_mirror_disk *disk, 616 struct g_mirror_metadata *md) 617 { 618 struct g_mirror_softc *sc; 619 struct g_consumer *cp; 620 off_t offset, length; 621 u_char *sector; 622 int error = 0; 623 624 g_topology_assert_not(); 625 sc = disk->d_softc; 626 sx_assert(&sc->sc_lock, SX_LOCKED); 627 628 cp = disk->d_consumer; 629 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 630 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 631 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 632 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 633 cp->acw, cp->ace)); 634 length = cp->provider->sectorsize; 635 offset = cp->provider->mediasize - length; 636 sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO); 637 if (md != NULL) 638 mirror_metadata_encode(md, sector); 639 error = g_write_data(cp, offset, sector, length); 640 free(sector, M_MIRROR); 641 if (error != 0) { 642 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 643 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 644 G_MIRROR_DEBUG(0, "Cannot write metadata on %s " 645 "(device=%s, error=%d).", 646 g_mirror_get_diskname(disk), sc->sc_name, error); 647 } else { 648 G_MIRROR_DEBUG(1, "Cannot write metadata on %s " 649 "(device=%s, error=%d).", 650 g_mirror_get_diskname(disk), sc->sc_name, error); 651 } 652 if (g_mirror_disconnect_on_failure && 653 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) { 654 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 655 g_mirror_event_send(disk, 656 G_MIRROR_DISK_STATE_DISCONNECTED, 657 G_MIRROR_EVENT_DONTWAIT); 658 } 659 } 660 return (error); 661 } 662 663 static int 664 g_mirror_clear_metadata(struct g_mirror_disk *disk) 665 { 666 int error; 667 668 g_topology_assert_not(); 669 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 670 671 error = g_mirror_write_metadata(disk, NULL); 672 if (error == 0) { 673 G_MIRROR_DEBUG(2, "Metadata on %s cleared.", 674 g_mirror_get_diskname(disk)); 675 } else { 676 G_MIRROR_DEBUG(0, 677 "Cannot clear metadata on disk %s (error=%d).", 678 g_mirror_get_diskname(disk), error); 679 } 680 return (error); 681 } 682 683 void 684 g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk, 685 struct g_mirror_metadata *md) 686 { 687 688 strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic)); 689 md->md_version = G_MIRROR_VERSION; 690 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 691 md->md_mid = sc->sc_id; 692 md->md_all = sc->sc_ndisks; 693 md->md_slice = sc->sc_slice; 694 md->md_balance = sc->sc_balance; 695 md->md_genid = sc->sc_genid; 696 md->md_mediasize = sc->sc_mediasize; 697 md->md_sectorsize = sc->sc_sectorsize; 698 md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK); 699 bzero(md->md_provider, sizeof(md->md_provider)); 700 if (disk == NULL) { 701 md->md_did = arc4random(); 702 md->md_priority = 0; 703 md->md_syncid = 0; 704 md->md_dflags = 0; 705 md->md_sync_offset = 0; 706 md->md_provsize = 0; 707 } else { 708 md->md_did = disk->d_id; 709 md->md_priority = disk->d_priority; 710 md->md_syncid = disk->d_sync.ds_syncid; 711 md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK); 712 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 713 md->md_sync_offset = disk->d_sync.ds_offset_done; 714 else 715 md->md_sync_offset = 0; 716 if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) { 717 strlcpy(md->md_provider, 718 disk->d_consumer->provider->name, 719 sizeof(md->md_provider)); 720 } 721 md->md_provsize = disk->d_consumer->provider->mediasize; 722 } 723 } 724 725 void 726 g_mirror_update_metadata(struct g_mirror_disk *disk) 727 { 728 struct g_mirror_softc *sc; 729 struct g_mirror_metadata md; 730 int error; 731 732 g_topology_assert_not(); 733 sc = disk->d_softc; 734 sx_assert(&sc->sc_lock, SX_LOCKED); 735 736 g_mirror_fill_metadata(sc, disk, &md); 737 error = g_mirror_write_metadata(disk, &md); 738 if (error == 0) { 739 G_MIRROR_DEBUG(2, "Metadata on %s updated.", 740 g_mirror_get_diskname(disk)); 741 } else { 742 G_MIRROR_DEBUG(0, 743 "Cannot update metadata on disk %s (error=%d).", 744 g_mirror_get_diskname(disk), error); 745 } 746 } 747 748 static void 749 g_mirror_bump_syncid(struct g_mirror_softc *sc) 750 { 751 struct g_mirror_disk *disk; 752 753 g_topology_assert_not(); 754 sx_assert(&sc->sc_lock, SX_XLOCKED); 755 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 756 ("%s called with no active disks (device=%s).", __func__, 757 sc->sc_name)); 758 759 sc->sc_syncid++; 760 G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 761 sc->sc_syncid); 762 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 763 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 764 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 765 disk->d_sync.ds_syncid = sc->sc_syncid; 766 g_mirror_update_metadata(disk); 767 } 768 } 769 } 770 771 static void 772 g_mirror_bump_genid(struct g_mirror_softc *sc) 773 { 774 struct g_mirror_disk *disk; 775 776 g_topology_assert_not(); 777 sx_assert(&sc->sc_lock, SX_XLOCKED); 778 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 779 ("%s called with no active disks (device=%s).", __func__, 780 sc->sc_name)); 781 782 sc->sc_genid++; 783 G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 784 sc->sc_genid); 785 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 786 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 787 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 788 disk->d_genid = sc->sc_genid; 789 g_mirror_update_metadata(disk); 790 } 791 } 792 } 793 794 static int 795 g_mirror_idle(struct g_mirror_softc *sc, int acw) 796 { 797 struct g_mirror_disk *disk; 798 int timeout; 799 800 g_topology_assert_not(); 801 sx_assert(&sc->sc_lock, SX_XLOCKED); 802 803 if (sc->sc_provider == NULL) 804 return (0); 805 if (sc->sc_idle) 806 return (0); 807 if (sc->sc_writes > 0) 808 return (0); 809 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 810 timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write); 811 if (timeout > 0) 812 return (timeout); 813 } 814 sc->sc_idle = 1; 815 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 816 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 817 continue; 818 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.", 819 g_mirror_get_diskname(disk), sc->sc_name); 820 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 821 g_mirror_update_metadata(disk); 822 } 823 return (0); 824 } 825 826 static void 827 g_mirror_unidle(struct g_mirror_softc *sc) 828 { 829 struct g_mirror_disk *disk; 830 831 g_topology_assert_not(); 832 sx_assert(&sc->sc_lock, SX_XLOCKED); 833 834 sc->sc_idle = 0; 835 sc->sc_last_write = time_uptime; 836 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 837 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 838 continue; 839 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.", 840 g_mirror_get_diskname(disk), sc->sc_name); 841 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 842 g_mirror_update_metadata(disk); 843 } 844 } 845 846 static __inline int 847 bintime_cmp(struct bintime *bt1, struct bintime *bt2) 848 { 849 850 if (bt1->sec < bt2->sec) 851 return (-1); 852 else if (bt1->sec > bt2->sec) 853 return (1); 854 if (bt1->frac < bt2->frac) 855 return (-1); 856 else if (bt1->frac > bt2->frac) 857 return (1); 858 return (0); 859 } 860 861 static void 862 g_mirror_update_delay(struct g_mirror_disk *disk, struct bio *bp) 863 { 864 865 if (disk->d_softc->sc_balance != G_MIRROR_BALANCE_LOAD) 866 return; 867 binuptime(&disk->d_delay); 868 bintime_sub(&disk->d_delay, &bp->bio_t0); 869 } 870 871 static void 872 g_mirror_done(struct bio *bp) 873 { 874 struct g_mirror_softc *sc; 875 876 sc = bp->bio_from->geom->softc; 877 bp->bio_cflags |= G_MIRROR_BIO_FLAG_REGULAR; 878 mtx_lock(&sc->sc_queue_mtx); 879 bioq_disksort(&sc->sc_queue, bp); 880 wakeup(sc); 881 mtx_unlock(&sc->sc_queue_mtx); 882 } 883 884 static void 885 g_mirror_regular_request(struct bio *bp) 886 { 887 struct g_mirror_softc *sc; 888 struct g_mirror_disk *disk; 889 struct bio *pbp; 890 891 g_topology_assert_not(); 892 893 pbp = bp->bio_parent; 894 sc = pbp->bio_to->geom->softc; 895 bp->bio_from->index--; 896 if (bp->bio_cmd == BIO_WRITE) 897 sc->sc_writes--; 898 disk = bp->bio_from->private; 899 if (disk == NULL) { 900 g_topology_lock(); 901 g_mirror_kill_consumer(sc, bp->bio_from); 902 g_topology_unlock(); 903 } else { 904 g_mirror_update_delay(disk, bp); 905 } 906 907 pbp->bio_inbed++; 908 KASSERT(pbp->bio_inbed <= pbp->bio_children, 909 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 910 pbp->bio_children)); 911 if (bp->bio_error == 0 && pbp->bio_error == 0) { 912 G_MIRROR_LOGREQ(3, bp, "Request delivered."); 913 g_destroy_bio(bp); 914 if (pbp->bio_children == pbp->bio_inbed) { 915 G_MIRROR_LOGREQ(3, pbp, "Request delivered."); 916 pbp->bio_completed = pbp->bio_length; 917 if (pbp->bio_cmd == BIO_WRITE) { 918 bioq_remove(&sc->sc_inflight, pbp); 919 /* Release delayed sync requests if possible. */ 920 g_mirror_sync_release(sc); 921 } 922 g_io_deliver(pbp, pbp->bio_error); 923 } 924 return; 925 } else if (bp->bio_error != 0) { 926 if (pbp->bio_error == 0) 927 pbp->bio_error = bp->bio_error; 928 if (disk != NULL) { 929 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 930 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 931 G_MIRROR_LOGREQ(0, bp, 932 "Request failed (error=%d).", 933 bp->bio_error); 934 } else { 935 G_MIRROR_LOGREQ(1, bp, 936 "Request failed (error=%d).", 937 bp->bio_error); 938 } 939 if (g_mirror_disconnect_on_failure && 940 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) 941 { 942 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 943 g_mirror_event_send(disk, 944 G_MIRROR_DISK_STATE_DISCONNECTED, 945 G_MIRROR_EVENT_DONTWAIT); 946 } 947 } 948 switch (pbp->bio_cmd) { 949 case BIO_DELETE: 950 case BIO_WRITE: 951 pbp->bio_inbed--; 952 pbp->bio_children--; 953 break; 954 } 955 } 956 g_destroy_bio(bp); 957 958 switch (pbp->bio_cmd) { 959 case BIO_READ: 960 if (pbp->bio_inbed < pbp->bio_children) 961 break; 962 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1) 963 g_io_deliver(pbp, pbp->bio_error); 964 else { 965 pbp->bio_error = 0; 966 mtx_lock(&sc->sc_queue_mtx); 967 bioq_disksort(&sc->sc_queue, pbp); 968 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 969 wakeup(sc); 970 mtx_unlock(&sc->sc_queue_mtx); 971 } 972 break; 973 case BIO_DELETE: 974 case BIO_WRITE: 975 if (pbp->bio_children == 0) { 976 /* 977 * All requests failed. 978 */ 979 } else if (pbp->bio_inbed < pbp->bio_children) { 980 /* Do nothing. */ 981 break; 982 } else if (pbp->bio_children == pbp->bio_inbed) { 983 /* Some requests succeeded. */ 984 pbp->bio_error = 0; 985 pbp->bio_completed = pbp->bio_length; 986 } 987 bioq_remove(&sc->sc_inflight, pbp); 988 /* Release delayed sync requests if possible. */ 989 g_mirror_sync_release(sc); 990 g_io_deliver(pbp, pbp->bio_error); 991 break; 992 default: 993 KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd)); 994 break; 995 } 996 } 997 998 static void 999 g_mirror_sync_done(struct bio *bp) 1000 { 1001 struct g_mirror_softc *sc; 1002 1003 G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered."); 1004 sc = bp->bio_from->geom->softc; 1005 bp->bio_cflags |= G_MIRROR_BIO_FLAG_SYNC; 1006 mtx_lock(&sc->sc_queue_mtx); 1007 bioq_disksort(&sc->sc_queue, bp); 1008 wakeup(sc); 1009 mtx_unlock(&sc->sc_queue_mtx); 1010 } 1011 1012 static void 1013 g_mirror_kernel_dump(struct bio *bp) 1014 { 1015 struct g_mirror_softc *sc; 1016 struct g_mirror_disk *disk; 1017 struct bio *cbp; 1018 struct g_kerneldump *gkd; 1019 1020 /* 1021 * We configure dumping to the first component, because this component 1022 * will be used for reading with 'prefer' balance algorithm. 1023 * If the component with the higest priority is currently disconnected 1024 * we will not be able to read the dump after the reboot if it will be 1025 * connected and synchronized later. Can we do something better? 1026 */ 1027 sc = bp->bio_to->geom->softc; 1028 disk = LIST_FIRST(&sc->sc_disks); 1029 1030 gkd = (struct g_kerneldump *)bp->bio_data; 1031 if (gkd->length > bp->bio_to->mediasize) 1032 gkd->length = bp->bio_to->mediasize; 1033 cbp = g_clone_bio(bp); 1034 if (cbp == NULL) { 1035 g_io_deliver(bp, ENOMEM); 1036 return; 1037 } 1038 cbp->bio_done = g_std_done; 1039 g_io_request(cbp, disk->d_consumer); 1040 G_MIRROR_DEBUG(1, "Kernel dump will go to %s.", 1041 g_mirror_get_diskname(disk)); 1042 } 1043 1044 static void 1045 g_mirror_start(struct bio *bp) 1046 { 1047 struct g_mirror_softc *sc; 1048 1049 sc = bp->bio_to->geom->softc; 1050 /* 1051 * If sc == NULL or there are no valid disks, provider's error 1052 * should be set and g_mirror_start() should not be called at all. 1053 */ 1054 KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1055 ("Provider's error should be set (error=%d)(mirror=%s).", 1056 bp->bio_to->error, bp->bio_to->name)); 1057 G_MIRROR_LOGREQ(3, bp, "Request received."); 1058 1059 switch (bp->bio_cmd) { 1060 case BIO_READ: 1061 case BIO_WRITE: 1062 case BIO_DELETE: 1063 break; 1064 case BIO_GETATTR: 1065 if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) { 1066 g_mirror_kernel_dump(bp); 1067 return; 1068 } 1069 /* FALLTHROUGH */ 1070 default: 1071 g_io_deliver(bp, EOPNOTSUPP); 1072 return; 1073 } 1074 mtx_lock(&sc->sc_queue_mtx); 1075 bioq_disksort(&sc->sc_queue, bp); 1076 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1077 wakeup(sc); 1078 mtx_unlock(&sc->sc_queue_mtx); 1079 } 1080 1081 /* 1082 * Return TRUE if the given request is colliding with a in-progress 1083 * synchronization request. 1084 */ 1085 static int 1086 g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp) 1087 { 1088 struct g_mirror_disk *disk; 1089 struct bio *sbp; 1090 off_t rstart, rend, sstart, send; 1091 int i; 1092 1093 if (sc->sc_sync.ds_ndisks == 0) 1094 return (0); 1095 rstart = bp->bio_offset; 1096 rend = bp->bio_offset + bp->bio_length; 1097 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1098 if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING) 1099 continue; 1100 for (i = 0; i < g_mirror_syncreqs; i++) { 1101 sbp = disk->d_sync.ds_bios[i]; 1102 if (sbp == NULL) 1103 continue; 1104 sstart = sbp->bio_offset; 1105 send = sbp->bio_offset + sbp->bio_length; 1106 if (rend > sstart && rstart < send) 1107 return (1); 1108 } 1109 } 1110 return (0); 1111 } 1112 1113 /* 1114 * Return TRUE if the given sync request is colliding with a in-progress regular 1115 * request. 1116 */ 1117 static int 1118 g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp) 1119 { 1120 off_t rstart, rend, sstart, send; 1121 struct bio *bp; 1122 1123 if (sc->sc_sync.ds_ndisks == 0) 1124 return (0); 1125 sstart = sbp->bio_offset; 1126 send = sbp->bio_offset + sbp->bio_length; 1127 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) { 1128 rstart = bp->bio_offset; 1129 rend = bp->bio_offset + bp->bio_length; 1130 if (rend > sstart && rstart < send) 1131 return (1); 1132 } 1133 return (0); 1134 } 1135 1136 /* 1137 * Puts request onto delayed queue. 1138 */ 1139 static void 1140 g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp) 1141 { 1142 1143 G_MIRROR_LOGREQ(2, bp, "Delaying request."); 1144 bioq_insert_head(&sc->sc_regular_delayed, bp); 1145 } 1146 1147 /* 1148 * Puts synchronization request onto delayed queue. 1149 */ 1150 static void 1151 g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp) 1152 { 1153 1154 G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request."); 1155 bioq_insert_tail(&sc->sc_sync_delayed, bp); 1156 } 1157 1158 /* 1159 * Releases delayed regular requests which don't collide anymore with sync 1160 * requests. 1161 */ 1162 static void 1163 g_mirror_regular_release(struct g_mirror_softc *sc) 1164 { 1165 struct bio *bp, *bp2; 1166 1167 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) { 1168 if (g_mirror_sync_collision(sc, bp)) 1169 continue; 1170 bioq_remove(&sc->sc_regular_delayed, bp); 1171 G_MIRROR_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 1172 mtx_lock(&sc->sc_queue_mtx); 1173 bioq_insert_head(&sc->sc_queue, bp); 1174 #if 0 1175 /* 1176 * wakeup() is not needed, because this function is called from 1177 * the worker thread. 1178 */ 1179 wakeup(&sc->sc_queue); 1180 #endif 1181 mtx_unlock(&sc->sc_queue_mtx); 1182 } 1183 } 1184 1185 /* 1186 * Releases delayed sync requests which don't collide anymore with regular 1187 * requests. 1188 */ 1189 static void 1190 g_mirror_sync_release(struct g_mirror_softc *sc) 1191 { 1192 struct bio *bp, *bp2; 1193 1194 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) { 1195 if (g_mirror_regular_collision(sc, bp)) 1196 continue; 1197 bioq_remove(&sc->sc_sync_delayed, bp); 1198 G_MIRROR_LOGREQ(2, bp, 1199 "Releasing delayed synchronization request."); 1200 g_io_request(bp, bp->bio_from); 1201 } 1202 } 1203 1204 /* 1205 * Handle synchronization requests. 1206 * Every synchronization request is two-steps process: first, READ request is 1207 * send to active provider and then WRITE request (with read data) to the provider 1208 * beeing synchronized. When WRITE is finished, new synchronization request is 1209 * send. 1210 */ 1211 static void 1212 g_mirror_sync_request(struct bio *bp) 1213 { 1214 struct g_mirror_softc *sc; 1215 struct g_mirror_disk *disk; 1216 1217 bp->bio_from->index--; 1218 sc = bp->bio_from->geom->softc; 1219 disk = bp->bio_from->private; 1220 if (disk == NULL) { 1221 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 1222 g_topology_lock(); 1223 g_mirror_kill_consumer(sc, bp->bio_from); 1224 g_topology_unlock(); 1225 free(bp->bio_data, M_MIRROR); 1226 g_destroy_bio(bp); 1227 sx_xlock(&sc->sc_lock); 1228 return; 1229 } 1230 1231 /* 1232 * Synchronization request. 1233 */ 1234 switch (bp->bio_cmd) { 1235 case BIO_READ: 1236 { 1237 struct g_consumer *cp; 1238 1239 if (bp->bio_error != 0) { 1240 G_MIRROR_LOGREQ(0, bp, 1241 "Synchronization request failed (error=%d).", 1242 bp->bio_error); 1243 g_destroy_bio(bp); 1244 return; 1245 } 1246 G_MIRROR_LOGREQ(3, bp, 1247 "Synchronization request half-finished."); 1248 bp->bio_cmd = BIO_WRITE; 1249 bp->bio_cflags = 0; 1250 cp = disk->d_consumer; 1251 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1252 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1253 cp->acr, cp->acw, cp->ace)); 1254 cp->index++; 1255 g_io_request(bp, cp); 1256 return; 1257 } 1258 case BIO_WRITE: 1259 { 1260 struct g_mirror_disk_sync *sync; 1261 off_t offset; 1262 void *data; 1263 int i; 1264 1265 if (bp->bio_error != 0) { 1266 G_MIRROR_LOGREQ(0, bp, 1267 "Synchronization request failed (error=%d).", 1268 bp->bio_error); 1269 g_destroy_bio(bp); 1270 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 1271 g_mirror_event_send(disk, 1272 G_MIRROR_DISK_STATE_DISCONNECTED, 1273 G_MIRROR_EVENT_DONTWAIT); 1274 return; 1275 } 1276 G_MIRROR_LOGREQ(3, bp, "Synchronization request finished."); 1277 sync = &disk->d_sync; 1278 if (sync->ds_offset == sc->sc_mediasize || 1279 sync->ds_consumer == NULL || 1280 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1281 /* Don't send more synchronization requests. */ 1282 sync->ds_inflight--; 1283 if (sync->ds_bios != NULL) { 1284 i = (int)(uintptr_t)bp->bio_caller1; 1285 sync->ds_bios[i] = NULL; 1286 } 1287 free(bp->bio_data, M_MIRROR); 1288 g_destroy_bio(bp); 1289 if (sync->ds_inflight > 0) 1290 return; 1291 if (sync->ds_consumer == NULL || 1292 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1293 return; 1294 } 1295 /* Disk up-to-date, activate it. */ 1296 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE, 1297 G_MIRROR_EVENT_DONTWAIT); 1298 return; 1299 } 1300 1301 /* Send next synchronization request. */ 1302 data = bp->bio_data; 1303 bzero(bp, sizeof(*bp)); 1304 bp->bio_cmd = BIO_READ; 1305 bp->bio_offset = sync->ds_offset; 1306 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1307 sync->ds_offset += bp->bio_length; 1308 bp->bio_done = g_mirror_sync_done; 1309 bp->bio_data = data; 1310 bp->bio_from = sync->ds_consumer; 1311 bp->bio_to = sc->sc_provider; 1312 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1313 sync->ds_consumer->index++; 1314 /* 1315 * Delay the request if it is colliding with a regular request. 1316 */ 1317 if (g_mirror_regular_collision(sc, bp)) 1318 g_mirror_sync_delay(sc, bp); 1319 else 1320 g_io_request(bp, sync->ds_consumer); 1321 1322 /* Release delayed requests if possible. */ 1323 g_mirror_regular_release(sc); 1324 1325 /* Find the smallest offset */ 1326 offset = sc->sc_mediasize; 1327 for (i = 0; i < g_mirror_syncreqs; i++) { 1328 bp = sync->ds_bios[i]; 1329 if (bp->bio_offset < offset) 1330 offset = bp->bio_offset; 1331 } 1332 if (sync->ds_offset_done + (MAXPHYS * 100) < offset) { 1333 /* Update offset_done on every 100 blocks. */ 1334 sync->ds_offset_done = offset; 1335 g_mirror_update_metadata(disk); 1336 } 1337 return; 1338 } 1339 default: 1340 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1341 bp->bio_cmd, sc->sc_name)); 1342 break; 1343 } 1344 } 1345 1346 static void 1347 g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp) 1348 { 1349 struct g_mirror_disk *disk; 1350 struct g_consumer *cp; 1351 struct bio *cbp; 1352 1353 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1354 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE) 1355 break; 1356 } 1357 if (disk == NULL) { 1358 if (bp->bio_error == 0) 1359 bp->bio_error = ENXIO; 1360 g_io_deliver(bp, bp->bio_error); 1361 return; 1362 } 1363 cbp = g_clone_bio(bp); 1364 if (cbp == NULL) { 1365 if (bp->bio_error == 0) 1366 bp->bio_error = ENOMEM; 1367 g_io_deliver(bp, bp->bio_error); 1368 return; 1369 } 1370 /* 1371 * Fill in the component buf structure. 1372 */ 1373 cp = disk->d_consumer; 1374 cbp->bio_done = g_mirror_done; 1375 cbp->bio_to = cp->provider; 1376 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1377 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1378 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1379 cp->acw, cp->ace)); 1380 cp->index++; 1381 g_io_request(cbp, cp); 1382 } 1383 1384 static void 1385 g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp) 1386 { 1387 struct g_mirror_disk *disk; 1388 struct g_consumer *cp; 1389 struct bio *cbp; 1390 1391 disk = g_mirror_get_disk(sc); 1392 if (disk == NULL) { 1393 if (bp->bio_error == 0) 1394 bp->bio_error = ENXIO; 1395 g_io_deliver(bp, bp->bio_error); 1396 return; 1397 } 1398 cbp = g_clone_bio(bp); 1399 if (cbp == NULL) { 1400 if (bp->bio_error == 0) 1401 bp->bio_error = ENOMEM; 1402 g_io_deliver(bp, bp->bio_error); 1403 return; 1404 } 1405 /* 1406 * Fill in the component buf structure. 1407 */ 1408 cp = disk->d_consumer; 1409 cbp->bio_done = g_mirror_done; 1410 cbp->bio_to = cp->provider; 1411 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1412 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1413 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1414 cp->acw, cp->ace)); 1415 cp->index++; 1416 g_io_request(cbp, cp); 1417 } 1418 1419 static void 1420 g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp) 1421 { 1422 struct g_mirror_disk *disk, *dp; 1423 struct g_consumer *cp; 1424 struct bio *cbp; 1425 struct bintime curtime; 1426 1427 binuptime(&curtime); 1428 /* 1429 * Find a disk which the smallest load. 1430 */ 1431 disk = NULL; 1432 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1433 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1434 continue; 1435 /* If disk wasn't used for more than 2 sec, use it. */ 1436 if (curtime.sec - dp->d_last_used.sec >= 2) { 1437 disk = dp; 1438 break; 1439 } 1440 if (disk == NULL || 1441 bintime_cmp(&dp->d_delay, &disk->d_delay) < 0) { 1442 disk = dp; 1443 } 1444 } 1445 KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name)); 1446 cbp = g_clone_bio(bp); 1447 if (cbp == NULL) { 1448 if (bp->bio_error == 0) 1449 bp->bio_error = ENOMEM; 1450 g_io_deliver(bp, bp->bio_error); 1451 return; 1452 } 1453 /* 1454 * Fill in the component buf structure. 1455 */ 1456 cp = disk->d_consumer; 1457 cbp->bio_done = g_mirror_done; 1458 cbp->bio_to = cp->provider; 1459 binuptime(&disk->d_last_used); 1460 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1461 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1462 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1463 cp->acw, cp->ace)); 1464 cp->index++; 1465 g_io_request(cbp, cp); 1466 } 1467 1468 static void 1469 g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp) 1470 { 1471 struct bio_queue_head queue; 1472 struct g_mirror_disk *disk; 1473 struct g_consumer *cp; 1474 struct bio *cbp; 1475 off_t left, mod, offset, slice; 1476 u_char *data; 1477 u_int ndisks; 1478 1479 if (bp->bio_length <= sc->sc_slice) { 1480 g_mirror_request_round_robin(sc, bp); 1481 return; 1482 } 1483 ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE); 1484 slice = bp->bio_length / ndisks; 1485 mod = slice % sc->sc_provider->sectorsize; 1486 if (mod != 0) 1487 slice += sc->sc_provider->sectorsize - mod; 1488 /* 1489 * Allocate all bios before sending any request, so we can 1490 * return ENOMEM in nice and clean way. 1491 */ 1492 left = bp->bio_length; 1493 offset = bp->bio_offset; 1494 data = bp->bio_data; 1495 bioq_init(&queue); 1496 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1497 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1498 continue; 1499 cbp = g_clone_bio(bp); 1500 if (cbp == NULL) { 1501 for (cbp = bioq_first(&queue); cbp != NULL; 1502 cbp = bioq_first(&queue)) { 1503 bioq_remove(&queue, cbp); 1504 g_destroy_bio(cbp); 1505 } 1506 if (bp->bio_error == 0) 1507 bp->bio_error = ENOMEM; 1508 g_io_deliver(bp, bp->bio_error); 1509 return; 1510 } 1511 bioq_insert_tail(&queue, cbp); 1512 cbp->bio_done = g_mirror_done; 1513 cbp->bio_caller1 = disk; 1514 cbp->bio_to = disk->d_consumer->provider; 1515 cbp->bio_offset = offset; 1516 cbp->bio_data = data; 1517 cbp->bio_length = MIN(left, slice); 1518 left -= cbp->bio_length; 1519 if (left == 0) 1520 break; 1521 offset += cbp->bio_length; 1522 data += cbp->bio_length; 1523 } 1524 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) { 1525 bioq_remove(&queue, cbp); 1526 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1527 disk = cbp->bio_caller1; 1528 cbp->bio_caller1 = NULL; 1529 cp = disk->d_consumer; 1530 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1531 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1532 cp->acr, cp->acw, cp->ace)); 1533 disk->d_consumer->index++; 1534 g_io_request(cbp, disk->d_consumer); 1535 } 1536 } 1537 1538 static void 1539 g_mirror_register_request(struct bio *bp) 1540 { 1541 struct g_mirror_softc *sc; 1542 1543 sc = bp->bio_to->geom->softc; 1544 switch (bp->bio_cmd) { 1545 case BIO_READ: 1546 switch (sc->sc_balance) { 1547 case G_MIRROR_BALANCE_LOAD: 1548 g_mirror_request_load(sc, bp); 1549 break; 1550 case G_MIRROR_BALANCE_PREFER: 1551 g_mirror_request_prefer(sc, bp); 1552 break; 1553 case G_MIRROR_BALANCE_ROUND_ROBIN: 1554 g_mirror_request_round_robin(sc, bp); 1555 break; 1556 case G_MIRROR_BALANCE_SPLIT: 1557 g_mirror_request_split(sc, bp); 1558 break; 1559 } 1560 return; 1561 case BIO_WRITE: 1562 case BIO_DELETE: 1563 { 1564 struct g_mirror_disk *disk; 1565 struct g_mirror_disk_sync *sync; 1566 struct bio_queue_head queue; 1567 struct g_consumer *cp; 1568 struct bio *cbp; 1569 1570 /* 1571 * Delay the request if it is colliding with a synchronization 1572 * request. 1573 */ 1574 if (g_mirror_sync_collision(sc, bp)) { 1575 g_mirror_regular_delay(sc, bp); 1576 return; 1577 } 1578 1579 if (sc->sc_idle) 1580 g_mirror_unidle(sc); 1581 else 1582 sc->sc_last_write = time_uptime; 1583 1584 /* 1585 * Allocate all bios before sending any request, so we can 1586 * return ENOMEM in nice and clean way. 1587 */ 1588 bioq_init(&queue); 1589 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1590 sync = &disk->d_sync; 1591 switch (disk->d_state) { 1592 case G_MIRROR_DISK_STATE_ACTIVE: 1593 break; 1594 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 1595 if (bp->bio_offset >= sync->ds_offset) 1596 continue; 1597 break; 1598 default: 1599 continue; 1600 } 1601 cbp = g_clone_bio(bp); 1602 if (cbp == NULL) { 1603 for (cbp = bioq_first(&queue); cbp != NULL; 1604 cbp = bioq_first(&queue)) { 1605 bioq_remove(&queue, cbp); 1606 g_destroy_bio(cbp); 1607 } 1608 if (bp->bio_error == 0) 1609 bp->bio_error = ENOMEM; 1610 g_io_deliver(bp, bp->bio_error); 1611 return; 1612 } 1613 bioq_insert_tail(&queue, cbp); 1614 cbp->bio_done = g_mirror_done; 1615 cp = disk->d_consumer; 1616 cbp->bio_caller1 = cp; 1617 cbp->bio_to = cp->provider; 1618 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1619 ("Consumer %s not opened (r%dw%de%d).", 1620 cp->provider->name, cp->acr, cp->acw, cp->ace)); 1621 } 1622 for (cbp = bioq_first(&queue); cbp != NULL; 1623 cbp = bioq_first(&queue)) { 1624 bioq_remove(&queue, cbp); 1625 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1626 cp = cbp->bio_caller1; 1627 cbp->bio_caller1 = NULL; 1628 cp->index++; 1629 sc->sc_writes++; 1630 g_io_request(cbp, cp); 1631 } 1632 /* 1633 * Put request onto inflight queue, so we can check if new 1634 * synchronization requests don't collide with it. 1635 */ 1636 bioq_insert_tail(&sc->sc_inflight, bp); 1637 /* 1638 * Bump syncid on first write. 1639 */ 1640 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) { 1641 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 1642 g_mirror_bump_syncid(sc); 1643 } 1644 return; 1645 } 1646 default: 1647 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1648 bp->bio_cmd, sc->sc_name)); 1649 break; 1650 } 1651 } 1652 1653 static int 1654 g_mirror_can_destroy(struct g_mirror_softc *sc) 1655 { 1656 struct g_geom *gp; 1657 struct g_consumer *cp; 1658 1659 g_topology_assert(); 1660 gp = sc->sc_geom; 1661 if (gp->softc == NULL) 1662 return (1); 1663 LIST_FOREACH(cp, &gp->consumer, consumer) { 1664 if (g_mirror_is_busy(sc, cp)) 1665 return (0); 1666 } 1667 gp = sc->sc_sync.ds_geom; 1668 LIST_FOREACH(cp, &gp->consumer, consumer) { 1669 if (g_mirror_is_busy(sc, cp)) 1670 return (0); 1671 } 1672 G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 1673 sc->sc_name); 1674 return (1); 1675 } 1676 1677 static int 1678 g_mirror_try_destroy(struct g_mirror_softc *sc) 1679 { 1680 1681 if (sc->sc_rootmount != NULL) { 1682 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 1683 sc->sc_rootmount); 1684 root_mount_rel(sc->sc_rootmount); 1685 sc->sc_rootmount = NULL; 1686 } 1687 g_topology_lock(); 1688 if (!g_mirror_can_destroy(sc)) { 1689 g_topology_unlock(); 1690 return (0); 1691 } 1692 sc->sc_geom->softc = NULL; 1693 sc->sc_sync.ds_geom->softc = NULL; 1694 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) { 1695 g_topology_unlock(); 1696 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1697 &sc->sc_worker); 1698 /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 1699 sx_xunlock(&sc->sc_lock); 1700 wakeup(&sc->sc_worker); 1701 sc->sc_worker = NULL; 1702 } else { 1703 g_topology_unlock(); 1704 g_mirror_destroy_device(sc); 1705 free(sc, M_MIRROR); 1706 } 1707 return (1); 1708 } 1709 1710 /* 1711 * Worker thread. 1712 */ 1713 static void 1714 g_mirror_worker(void *arg) 1715 { 1716 struct g_mirror_softc *sc; 1717 struct g_mirror_event *ep; 1718 struct bio *bp; 1719 int timeout; 1720 1721 sc = arg; 1722 mtx_lock_spin(&sched_lock); 1723 sched_prio(curthread, PRIBIO); 1724 mtx_unlock_spin(&sched_lock); 1725 1726 sx_xlock(&sc->sc_lock); 1727 for (;;) { 1728 G_MIRROR_DEBUG(5, "%s: Let's see...", __func__); 1729 /* 1730 * First take a look at events. 1731 * This is important to handle events before any I/O requests. 1732 */ 1733 ep = g_mirror_event_get(sc); 1734 if (ep != NULL) { 1735 g_mirror_event_remove(sc, ep); 1736 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) { 1737 /* Update only device status. */ 1738 G_MIRROR_DEBUG(3, 1739 "Running event for device %s.", 1740 sc->sc_name); 1741 ep->e_error = 0; 1742 g_mirror_update_device(sc, 1); 1743 } else { 1744 /* Update disk status. */ 1745 G_MIRROR_DEBUG(3, "Running event for disk %s.", 1746 g_mirror_get_diskname(ep->e_disk)); 1747 ep->e_error = g_mirror_update_disk(ep->e_disk, 1748 ep->e_state); 1749 if (ep->e_error == 0) 1750 g_mirror_update_device(sc, 0); 1751 } 1752 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) { 1753 KASSERT(ep->e_error == 0, 1754 ("Error cannot be handled.")); 1755 g_mirror_event_free(ep); 1756 } else { 1757 ep->e_flags |= G_MIRROR_EVENT_DONE; 1758 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1759 ep); 1760 mtx_lock(&sc->sc_events_mtx); 1761 wakeup(ep); 1762 mtx_unlock(&sc->sc_events_mtx); 1763 } 1764 if ((sc->sc_flags & 1765 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1766 if (g_mirror_try_destroy(sc)) { 1767 curthread->td_pflags &= ~TDP_GEOM; 1768 G_MIRROR_DEBUG(1, "Thread exiting."); 1769 kthread_exit(0); 1770 } 1771 } 1772 G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__); 1773 continue; 1774 } 1775 /* 1776 * Check if we can mark array as CLEAN and if we can't take 1777 * how much seconds should we wait. 1778 */ 1779 timeout = g_mirror_idle(sc, -1); 1780 /* 1781 * Now I/O requests. 1782 */ 1783 /* Get first request from the queue. */ 1784 mtx_lock(&sc->sc_queue_mtx); 1785 bp = bioq_first(&sc->sc_queue); 1786 if (bp == NULL) { 1787 if ((sc->sc_flags & 1788 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1789 mtx_unlock(&sc->sc_queue_mtx); 1790 if (g_mirror_try_destroy(sc)) { 1791 curthread->td_pflags &= ~TDP_GEOM; 1792 G_MIRROR_DEBUG(1, "Thread exiting."); 1793 kthread_exit(0); 1794 } 1795 mtx_lock(&sc->sc_queue_mtx); 1796 } 1797 sx_xunlock(&sc->sc_lock); 1798 /* 1799 * XXX: We can miss an event here, because an event 1800 * can be added without sx-device-lock and without 1801 * mtx-queue-lock. Maybe I should just stop using 1802 * dedicated mutex for events synchronization and 1803 * stick with the queue lock? 1804 * The event will hang here until next I/O request 1805 * or next event is received. 1806 */ 1807 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1", 1808 timeout * hz); 1809 sx_xlock(&sc->sc_lock); 1810 G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__); 1811 continue; 1812 } 1813 bioq_remove(&sc->sc_queue, bp); 1814 mtx_unlock(&sc->sc_queue_mtx); 1815 1816 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) 1817 g_mirror_regular_request(bp); 1818 else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 1819 g_mirror_sync_request(bp); 1820 else 1821 g_mirror_register_request(bp); 1822 G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__); 1823 } 1824 } 1825 1826 static void 1827 g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 1828 { 1829 1830 sx_assert(&sc->sc_lock, SX_LOCKED); 1831 1832 if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) { 1833 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.", 1834 g_mirror_get_diskname(disk), sc->sc_name); 1835 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1836 } else if (sc->sc_idle && 1837 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 1838 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.", 1839 g_mirror_get_diskname(disk), sc->sc_name); 1840 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 1841 } 1842 } 1843 1844 static void 1845 g_mirror_sync_start(struct g_mirror_disk *disk) 1846 { 1847 struct g_mirror_softc *sc; 1848 struct g_consumer *cp; 1849 struct bio *bp; 1850 int error, i; 1851 1852 g_topology_assert_not(); 1853 sc = disk->d_softc; 1854 sx_assert(&sc->sc_lock, SX_LOCKED); 1855 1856 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 1857 ("Disk %s is not marked for synchronization.", 1858 g_mirror_get_diskname(disk))); 1859 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1860 ("Device not in RUNNING state (%s, %u).", sc->sc_name, 1861 sc->sc_state)); 1862 1863 sx_xunlock(&sc->sc_lock); 1864 g_topology_lock(); 1865 cp = g_new_consumer(sc->sc_sync.ds_geom); 1866 error = g_attach(cp, sc->sc_provider); 1867 KASSERT(error == 0, 1868 ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 1869 error = g_access(cp, 1, 0, 0); 1870 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 1871 g_topology_unlock(); 1872 sx_xlock(&sc->sc_lock); 1873 1874 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 1875 g_mirror_get_diskname(disk)); 1876 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1877 KASSERT(disk->d_sync.ds_consumer == NULL, 1878 ("Sync consumer already exists (device=%s, disk=%s).", 1879 sc->sc_name, g_mirror_get_diskname(disk))); 1880 1881 disk->d_sync.ds_consumer = cp; 1882 disk->d_sync.ds_consumer->private = disk; 1883 disk->d_sync.ds_consumer->index = 0; 1884 1885 /* 1886 * Allocate memory for synchronization bios and initialize them. 1887 */ 1888 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs, 1889 M_MIRROR, M_WAITOK); 1890 for (i = 0; i < g_mirror_syncreqs; i++) { 1891 bp = g_alloc_bio(); 1892 disk->d_sync.ds_bios[i] = bp; 1893 bp->bio_parent = NULL; 1894 bp->bio_cmd = BIO_READ; 1895 bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK); 1896 bp->bio_cflags = 0; 1897 bp->bio_offset = disk->d_sync.ds_offset; 1898 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1899 disk->d_sync.ds_offset += bp->bio_length; 1900 bp->bio_done = g_mirror_sync_done; 1901 bp->bio_from = disk->d_sync.ds_consumer; 1902 bp->bio_to = sc->sc_provider; 1903 bp->bio_caller1 = (void *)(uintptr_t)i; 1904 } 1905 1906 /* Increase the number of disks in SYNCHRONIZING state. */ 1907 sc->sc_sync.ds_ndisks++; 1908 /* Set the number of in-flight synchronization requests. */ 1909 disk->d_sync.ds_inflight = g_mirror_syncreqs; 1910 1911 /* 1912 * Fire off first synchronization requests. 1913 */ 1914 for (i = 0; i < g_mirror_syncreqs; i++) { 1915 bp = disk->d_sync.ds_bios[i]; 1916 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1917 disk->d_sync.ds_consumer->index++; 1918 /* 1919 * Delay the request if it is colliding with a regular request. 1920 */ 1921 if (g_mirror_regular_collision(sc, bp)) 1922 g_mirror_sync_delay(sc, bp); 1923 else 1924 g_io_request(bp, disk->d_sync.ds_consumer); 1925 } 1926 } 1927 1928 /* 1929 * Stop synchronization process. 1930 * type: 0 - synchronization finished 1931 * 1 - synchronization stopped 1932 */ 1933 static void 1934 g_mirror_sync_stop(struct g_mirror_disk *disk, int type) 1935 { 1936 struct g_mirror_softc *sc; 1937 struct g_consumer *cp; 1938 1939 g_topology_assert_not(); 1940 sc = disk->d_softc; 1941 sx_assert(&sc->sc_lock, SX_LOCKED); 1942 1943 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 1944 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 1945 g_mirror_disk_state2str(disk->d_state))); 1946 if (disk->d_sync.ds_consumer == NULL) 1947 return; 1948 1949 if (type == 0) { 1950 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.", 1951 sc->sc_name, g_mirror_get_diskname(disk)); 1952 } else /* if (type == 1) */ { 1953 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 1954 sc->sc_name, g_mirror_get_diskname(disk)); 1955 } 1956 free(disk->d_sync.ds_bios, M_MIRROR); 1957 disk->d_sync.ds_bios = NULL; 1958 cp = disk->d_sync.ds_consumer; 1959 disk->d_sync.ds_consumer = NULL; 1960 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 1961 sc->sc_sync.ds_ndisks--; 1962 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 1963 g_topology_lock(); 1964 g_mirror_kill_consumer(sc, cp); 1965 g_topology_unlock(); 1966 sx_xlock(&sc->sc_lock); 1967 } 1968 1969 static void 1970 g_mirror_launch_provider(struct g_mirror_softc *sc) 1971 { 1972 struct g_mirror_disk *disk; 1973 struct g_provider *pp; 1974 1975 sx_assert(&sc->sc_lock, SX_LOCKED); 1976 1977 g_topology_lock(); 1978 pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name); 1979 pp->mediasize = sc->sc_mediasize; 1980 pp->sectorsize = sc->sc_sectorsize; 1981 sc->sc_provider = pp; 1982 g_error_provider(pp, 0); 1983 g_topology_unlock(); 1984 G_MIRROR_DEBUG(0, "Device %s: provider %s launched.", sc->sc_name, 1985 pp->name); 1986 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1987 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 1988 g_mirror_sync_start(disk); 1989 } 1990 } 1991 1992 static void 1993 g_mirror_destroy_provider(struct g_mirror_softc *sc) 1994 { 1995 struct g_mirror_disk *disk; 1996 struct bio *bp; 1997 1998 g_topology_assert_not(); 1999 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 2000 sc->sc_name)); 2001 2002 g_topology_lock(); 2003 g_error_provider(sc->sc_provider, ENXIO); 2004 mtx_lock(&sc->sc_queue_mtx); 2005 while ((bp = bioq_first(&sc->sc_queue)) != NULL) { 2006 bioq_remove(&sc->sc_queue, bp); 2007 g_io_deliver(bp, ENXIO); 2008 } 2009 mtx_unlock(&sc->sc_queue_mtx); 2010 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name, 2011 sc->sc_provider->name); 2012 sc->sc_provider->flags |= G_PF_WITHER; 2013 g_orphan_provider(sc->sc_provider, ENXIO); 2014 g_topology_unlock(); 2015 sc->sc_provider = NULL; 2016 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2017 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2018 g_mirror_sync_stop(disk, 1); 2019 } 2020 } 2021 2022 static void 2023 g_mirror_go(void *arg) 2024 { 2025 struct g_mirror_softc *sc; 2026 2027 sc = arg; 2028 G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 2029 g_mirror_event_send(sc, 0, 2030 G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE); 2031 } 2032 2033 static u_int 2034 g_mirror_determine_state(struct g_mirror_disk *disk) 2035 { 2036 struct g_mirror_softc *sc; 2037 u_int state; 2038 2039 sc = disk->d_softc; 2040 if (sc->sc_syncid == disk->d_sync.ds_syncid) { 2041 if ((disk->d_flags & 2042 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2043 /* Disk does not need synchronization. */ 2044 state = G_MIRROR_DISK_STATE_ACTIVE; 2045 } else { 2046 if ((sc->sc_flags & 2047 G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2048 (disk->d_flags & 2049 G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2050 /* 2051 * We can start synchronization from 2052 * the stored offset. 2053 */ 2054 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2055 } else { 2056 state = G_MIRROR_DISK_STATE_STALE; 2057 } 2058 } 2059 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 2060 /* 2061 * Reset all synchronization data for this disk, 2062 * because if it even was synchronized, it was 2063 * synchronized to disks with different syncid. 2064 */ 2065 disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2066 disk->d_sync.ds_offset = 0; 2067 disk->d_sync.ds_offset_done = 0; 2068 disk->d_sync.ds_syncid = sc->sc_syncid; 2069 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2070 (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2071 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2072 } else { 2073 state = G_MIRROR_DISK_STATE_STALE; 2074 } 2075 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 2076 /* 2077 * Not good, NOT GOOD! 2078 * It means that mirror was started on stale disks 2079 * and more fresh disk just arrive. 2080 * If there were writes, mirror is fucked up, sorry. 2081 * I think the best choice here is don't touch 2082 * this disk and inform the user laudly. 2083 */ 2084 G_MIRROR_DEBUG(0, "Device %s was started before the freshest " 2085 "disk (%s) arrives!! It will not be connected to the " 2086 "running device.", sc->sc_name, 2087 g_mirror_get_diskname(disk)); 2088 g_mirror_destroy_disk(disk); 2089 state = G_MIRROR_DISK_STATE_NONE; 2090 /* Return immediately, because disk was destroyed. */ 2091 return (state); 2092 } 2093 G_MIRROR_DEBUG(3, "State for %s disk: %s.", 2094 g_mirror_get_diskname(disk), g_mirror_disk_state2str(state)); 2095 return (state); 2096 } 2097 2098 /* 2099 * Update device state. 2100 */ 2101 static void 2102 g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force) 2103 { 2104 struct g_mirror_disk *disk; 2105 u_int state; 2106 2107 sx_assert(&sc->sc_lock, SX_XLOCKED); 2108 2109 switch (sc->sc_state) { 2110 case G_MIRROR_DEVICE_STATE_STARTING: 2111 { 2112 struct g_mirror_disk *pdisk, *tdisk; 2113 u_int dirty, ndisks, genid, syncid; 2114 2115 KASSERT(sc->sc_provider == NULL, 2116 ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 2117 /* 2118 * Are we ready? We are, if all disks are connected or 2119 * if we have any disks and 'force' is true. 2120 */ 2121 ndisks = g_mirror_ndisks(sc, -1); 2122 if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) { 2123 ; 2124 } else if (ndisks == 0) { 2125 /* 2126 * Disks went down in starting phase, so destroy 2127 * device. 2128 */ 2129 callout_drain(&sc->sc_callout); 2130 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2131 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2132 sc->sc_rootmount); 2133 root_mount_rel(sc->sc_rootmount); 2134 sc->sc_rootmount = NULL; 2135 return; 2136 } else { 2137 return; 2138 } 2139 2140 /* 2141 * Activate all disks with the biggest syncid. 2142 */ 2143 if (force) { 2144 /* 2145 * If 'force' is true, we have been called due to 2146 * timeout, so don't bother canceling timeout. 2147 */ 2148 ndisks = 0; 2149 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2150 if ((disk->d_flags & 2151 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2152 ndisks++; 2153 } 2154 } 2155 if (ndisks == 0) { 2156 /* No valid disks found, destroy device. */ 2157 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2158 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2159 __LINE__, sc->sc_rootmount); 2160 root_mount_rel(sc->sc_rootmount); 2161 sc->sc_rootmount = NULL; 2162 return; 2163 } 2164 } else { 2165 /* Cancel timeout. */ 2166 callout_drain(&sc->sc_callout); 2167 } 2168 2169 /* 2170 * Find the biggest genid. 2171 */ 2172 genid = 0; 2173 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2174 if (disk->d_genid > genid) 2175 genid = disk->d_genid; 2176 } 2177 sc->sc_genid = genid; 2178 /* 2179 * Remove all disks without the biggest genid. 2180 */ 2181 LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) { 2182 if (disk->d_genid < genid) { 2183 G_MIRROR_DEBUG(0, 2184 "Component %s (device %s) broken, skipping.", 2185 g_mirror_get_diskname(disk), sc->sc_name); 2186 g_mirror_destroy_disk(disk); 2187 } 2188 } 2189 2190 /* 2191 * Find the biggest syncid. 2192 */ 2193 syncid = 0; 2194 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2195 if (disk->d_sync.ds_syncid > syncid) 2196 syncid = disk->d_sync.ds_syncid; 2197 } 2198 2199 /* 2200 * Here we need to look for dirty disks and if all disks 2201 * with the biggest syncid are dirty, we have to choose 2202 * one with the biggest priority and rebuild the rest. 2203 */ 2204 /* 2205 * Find the number of dirty disks with the biggest syncid. 2206 * Find the number of disks with the biggest syncid. 2207 * While here, find a disk with the biggest priority. 2208 */ 2209 dirty = ndisks = 0; 2210 pdisk = NULL; 2211 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2212 if (disk->d_sync.ds_syncid != syncid) 2213 continue; 2214 if ((disk->d_flags & 2215 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2216 continue; 2217 } 2218 ndisks++; 2219 if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 2220 dirty++; 2221 if (pdisk == NULL || 2222 pdisk->d_priority < disk->d_priority) { 2223 pdisk = disk; 2224 } 2225 } 2226 } 2227 if (dirty == 0) { 2228 /* No dirty disks at all, great. */ 2229 } else if (dirty == ndisks) { 2230 /* 2231 * Force synchronization for all dirty disks except one 2232 * with the biggest priority. 2233 */ 2234 KASSERT(pdisk != NULL, ("pdisk == NULL")); 2235 G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a " 2236 "master disk for synchronization.", 2237 g_mirror_get_diskname(pdisk), sc->sc_name); 2238 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2239 if (disk->d_sync.ds_syncid != syncid) 2240 continue; 2241 if ((disk->d_flags & 2242 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2243 continue; 2244 } 2245 KASSERT((disk->d_flags & 2246 G_MIRROR_DISK_FLAG_DIRTY) != 0, 2247 ("Disk %s isn't marked as dirty.", 2248 g_mirror_get_diskname(disk))); 2249 /* Skip the disk with the biggest priority. */ 2250 if (disk == pdisk) 2251 continue; 2252 disk->d_sync.ds_syncid = 0; 2253 } 2254 } else if (dirty < ndisks) { 2255 /* 2256 * Force synchronization for all dirty disks. 2257 * We have some non-dirty disks. 2258 */ 2259 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2260 if (disk->d_sync.ds_syncid != syncid) 2261 continue; 2262 if ((disk->d_flags & 2263 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2264 continue; 2265 } 2266 if ((disk->d_flags & 2267 G_MIRROR_DISK_FLAG_DIRTY) == 0) { 2268 continue; 2269 } 2270 disk->d_sync.ds_syncid = 0; 2271 } 2272 } 2273 2274 /* Reset hint. */ 2275 sc->sc_hint = NULL; 2276 sc->sc_syncid = syncid; 2277 if (force) { 2278 /* Remember to bump syncid on first write. */ 2279 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2280 } 2281 state = G_MIRROR_DEVICE_STATE_RUNNING; 2282 G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.", 2283 sc->sc_name, g_mirror_device_state2str(sc->sc_state), 2284 g_mirror_device_state2str(state)); 2285 sc->sc_state = state; 2286 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2287 state = g_mirror_determine_state(disk); 2288 g_mirror_event_send(disk, state, 2289 G_MIRROR_EVENT_DONTWAIT); 2290 if (state == G_MIRROR_DISK_STATE_STALE) 2291 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2292 } 2293 break; 2294 } 2295 case G_MIRROR_DEVICE_STATE_RUNNING: 2296 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 && 2297 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2298 /* 2299 * No active disks or no disks at all, 2300 * so destroy device. 2301 */ 2302 if (sc->sc_provider != NULL) 2303 g_mirror_destroy_provider(sc); 2304 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2305 break; 2306 } else if (g_mirror_ndisks(sc, 2307 G_MIRROR_DISK_STATE_ACTIVE) > 0 && 2308 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2309 /* 2310 * We have active disks, launch provider if it doesn't 2311 * exist. 2312 */ 2313 if (sc->sc_provider == NULL) 2314 g_mirror_launch_provider(sc); 2315 if (sc->sc_rootmount != NULL) { 2316 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2317 __LINE__, sc->sc_rootmount); 2318 root_mount_rel(sc->sc_rootmount); 2319 sc->sc_rootmount = NULL; 2320 } 2321 } 2322 /* 2323 * Genid should be bumped immediately, so do it here. 2324 */ 2325 if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) { 2326 sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID; 2327 g_mirror_bump_genid(sc); 2328 } 2329 break; 2330 default: 2331 KASSERT(1 == 0, ("Wrong device state (%s, %s).", 2332 sc->sc_name, g_mirror_device_state2str(sc->sc_state))); 2333 break; 2334 } 2335 } 2336 2337 /* 2338 * Update disk state and device state if needed. 2339 */ 2340 #define DISK_STATE_CHANGED() G_MIRROR_DEBUG(1, \ 2341 "Disk %s state changed from %s to %s (device %s).", \ 2342 g_mirror_get_diskname(disk), \ 2343 g_mirror_disk_state2str(disk->d_state), \ 2344 g_mirror_disk_state2str(state), sc->sc_name) 2345 static int 2346 g_mirror_update_disk(struct g_mirror_disk *disk, u_int state) 2347 { 2348 struct g_mirror_softc *sc; 2349 2350 sc = disk->d_softc; 2351 sx_assert(&sc->sc_lock, SX_XLOCKED); 2352 2353 again: 2354 G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.", 2355 g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), 2356 g_mirror_disk_state2str(state)); 2357 switch (state) { 2358 case G_MIRROR_DISK_STATE_NEW: 2359 /* 2360 * Possible scenarios: 2361 * 1. New disk arrive. 2362 */ 2363 /* Previous state should be NONE. */ 2364 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE, 2365 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2366 g_mirror_disk_state2str(disk->d_state))); 2367 DISK_STATE_CHANGED(); 2368 2369 disk->d_state = state; 2370 if (LIST_EMPTY(&sc->sc_disks)) 2371 LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next); 2372 else { 2373 struct g_mirror_disk *dp; 2374 2375 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 2376 if (disk->d_priority >= dp->d_priority) { 2377 LIST_INSERT_BEFORE(dp, disk, d_next); 2378 dp = NULL; 2379 break; 2380 } 2381 if (LIST_NEXT(dp, d_next) == NULL) 2382 break; 2383 } 2384 if (dp != NULL) 2385 LIST_INSERT_AFTER(dp, disk, d_next); 2386 } 2387 G_MIRROR_DEBUG(0, "Device %s: provider %s detected.", 2388 sc->sc_name, g_mirror_get_diskname(disk)); 2389 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 2390 break; 2391 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2392 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2393 g_mirror_device_state2str(sc->sc_state), 2394 g_mirror_get_diskname(disk), 2395 g_mirror_disk_state2str(disk->d_state))); 2396 state = g_mirror_determine_state(disk); 2397 if (state != G_MIRROR_DISK_STATE_NONE) 2398 goto again; 2399 break; 2400 case G_MIRROR_DISK_STATE_ACTIVE: 2401 /* 2402 * Possible scenarios: 2403 * 1. New disk does not need synchronization. 2404 * 2. Synchronization process finished successfully. 2405 */ 2406 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2407 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2408 g_mirror_device_state2str(sc->sc_state), 2409 g_mirror_get_diskname(disk), 2410 g_mirror_disk_state2str(disk->d_state))); 2411 /* Previous state should be NEW or SYNCHRONIZING. */ 2412 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW || 2413 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2414 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2415 g_mirror_disk_state2str(disk->d_state))); 2416 DISK_STATE_CHANGED(); 2417 2418 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2419 disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2420 disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC; 2421 g_mirror_sync_stop(disk, 0); 2422 } 2423 disk->d_state = state; 2424 disk->d_sync.ds_offset = 0; 2425 disk->d_sync.ds_offset_done = 0; 2426 g_mirror_update_idle(sc, disk); 2427 g_mirror_update_metadata(disk); 2428 G_MIRROR_DEBUG(0, "Device %s: provider %s activated.", 2429 sc->sc_name, g_mirror_get_diskname(disk)); 2430 break; 2431 case G_MIRROR_DISK_STATE_STALE: 2432 /* 2433 * Possible scenarios: 2434 * 1. Stale disk was connected. 2435 */ 2436 /* Previous state should be NEW. */ 2437 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2438 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2439 g_mirror_disk_state2str(disk->d_state))); 2440 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2441 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2442 g_mirror_device_state2str(sc->sc_state), 2443 g_mirror_get_diskname(disk), 2444 g_mirror_disk_state2str(disk->d_state))); 2445 /* 2446 * STALE state is only possible if device is marked 2447 * NOAUTOSYNC. 2448 */ 2449 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0, 2450 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2451 g_mirror_device_state2str(sc->sc_state), 2452 g_mirror_get_diskname(disk), 2453 g_mirror_disk_state2str(disk->d_state))); 2454 DISK_STATE_CHANGED(); 2455 2456 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2457 disk->d_state = state; 2458 g_mirror_update_metadata(disk); 2459 G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.", 2460 sc->sc_name, g_mirror_get_diskname(disk)); 2461 break; 2462 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 2463 /* 2464 * Possible scenarios: 2465 * 1. Disk which needs synchronization was connected. 2466 */ 2467 /* Previous state should be NEW. */ 2468 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2469 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2470 g_mirror_disk_state2str(disk->d_state))); 2471 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2472 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2473 g_mirror_device_state2str(sc->sc_state), 2474 g_mirror_get_diskname(disk), 2475 g_mirror_disk_state2str(disk->d_state))); 2476 DISK_STATE_CHANGED(); 2477 2478 if (disk->d_state == G_MIRROR_DISK_STATE_NEW) 2479 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2480 disk->d_state = state; 2481 if (sc->sc_provider != NULL) { 2482 g_mirror_sync_start(disk); 2483 g_mirror_update_metadata(disk); 2484 } 2485 break; 2486 case G_MIRROR_DISK_STATE_DISCONNECTED: 2487 /* 2488 * Possible scenarios: 2489 * 1. Device wasn't running yet, but disk disappear. 2490 * 2. Disk was active and disapppear. 2491 * 3. Disk disappear during synchronization process. 2492 */ 2493 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) { 2494 /* 2495 * Previous state should be ACTIVE, STALE or 2496 * SYNCHRONIZING. 2497 */ 2498 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 2499 disk->d_state == G_MIRROR_DISK_STATE_STALE || 2500 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2501 ("Wrong disk state (%s, %s).", 2502 g_mirror_get_diskname(disk), 2503 g_mirror_disk_state2str(disk->d_state))); 2504 } else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) { 2505 /* Previous state should be NEW. */ 2506 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2507 ("Wrong disk state (%s, %s).", 2508 g_mirror_get_diskname(disk), 2509 g_mirror_disk_state2str(disk->d_state))); 2510 /* 2511 * Reset bumping syncid if disk disappeared in STARTING 2512 * state. 2513 */ 2514 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) 2515 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 2516 #ifdef INVARIANTS 2517 } else { 2518 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 2519 sc->sc_name, 2520 g_mirror_device_state2str(sc->sc_state), 2521 g_mirror_get_diskname(disk), 2522 g_mirror_disk_state2str(disk->d_state))); 2523 #endif 2524 } 2525 DISK_STATE_CHANGED(); 2526 G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.", 2527 sc->sc_name, g_mirror_get_diskname(disk)); 2528 2529 g_mirror_destroy_disk(disk); 2530 break; 2531 case G_MIRROR_DISK_STATE_DESTROY: 2532 { 2533 int error; 2534 2535 error = g_mirror_clear_metadata(disk); 2536 if (error != 0) 2537 return (error); 2538 DISK_STATE_CHANGED(); 2539 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", 2540 sc->sc_name, g_mirror_get_diskname(disk)); 2541 2542 g_mirror_destroy_disk(disk); 2543 sc->sc_ndisks--; 2544 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2545 g_mirror_update_metadata(disk); 2546 } 2547 break; 2548 } 2549 default: 2550 KASSERT(1 == 0, ("Unknown state (%u).", state)); 2551 break; 2552 } 2553 return (0); 2554 } 2555 #undef DISK_STATE_CHANGED 2556 2557 int 2558 g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md) 2559 { 2560 struct g_provider *pp; 2561 u_char *buf; 2562 int error; 2563 2564 g_topology_assert(); 2565 2566 error = g_access(cp, 1, 0, 0); 2567 if (error != 0) 2568 return (error); 2569 pp = cp->provider; 2570 g_topology_unlock(); 2571 /* Metadata are stored on last sector. */ 2572 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 2573 &error); 2574 g_topology_lock(); 2575 g_access(cp, -1, 0, 0); 2576 if (buf == NULL) { 2577 G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2578 cp->provider->name, error); 2579 return (error); 2580 } 2581 2582 /* Decode metadata. */ 2583 error = mirror_metadata_decode(buf, md); 2584 g_free(buf); 2585 if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0) 2586 return (EINVAL); 2587 if (md->md_version > G_MIRROR_VERSION) { 2588 G_MIRROR_DEBUG(0, 2589 "Kernel module is too old to handle metadata from %s.", 2590 cp->provider->name); 2591 return (EINVAL); 2592 } 2593 if (error != 0) { 2594 G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 2595 cp->provider->name); 2596 return (error); 2597 } 2598 2599 return (0); 2600 } 2601 2602 static int 2603 g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp, 2604 struct g_mirror_metadata *md) 2605 { 2606 2607 if (g_mirror_id2disk(sc, md->md_did) != NULL) { 2608 G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.", 2609 pp->name, md->md_did); 2610 return (EEXIST); 2611 } 2612 if (md->md_all != sc->sc_ndisks) { 2613 G_MIRROR_DEBUG(1, 2614 "Invalid '%s' field on disk %s (device %s), skipping.", 2615 "md_all", pp->name, sc->sc_name); 2616 return (EINVAL); 2617 } 2618 if (md->md_slice != sc->sc_slice) { 2619 G_MIRROR_DEBUG(1, 2620 "Invalid '%s' field on disk %s (device %s), skipping.", 2621 "md_slice", pp->name, sc->sc_name); 2622 return (EINVAL); 2623 } 2624 if (md->md_balance != sc->sc_balance) { 2625 G_MIRROR_DEBUG(1, 2626 "Invalid '%s' field on disk %s (device %s), skipping.", 2627 "md_balance", pp->name, sc->sc_name); 2628 return (EINVAL); 2629 } 2630 if (md->md_mediasize != sc->sc_mediasize) { 2631 G_MIRROR_DEBUG(1, 2632 "Invalid '%s' field on disk %s (device %s), skipping.", 2633 "md_mediasize", pp->name, sc->sc_name); 2634 return (EINVAL); 2635 } 2636 if (sc->sc_mediasize > pp->mediasize) { 2637 G_MIRROR_DEBUG(1, 2638 "Invalid size of disk %s (device %s), skipping.", pp->name, 2639 sc->sc_name); 2640 return (EINVAL); 2641 } 2642 if (md->md_sectorsize != sc->sc_sectorsize) { 2643 G_MIRROR_DEBUG(1, 2644 "Invalid '%s' field on disk %s (device %s), skipping.", 2645 "md_sectorsize", pp->name, sc->sc_name); 2646 return (EINVAL); 2647 } 2648 if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 2649 G_MIRROR_DEBUG(1, 2650 "Invalid sector size of disk %s (device %s), skipping.", 2651 pp->name, sc->sc_name); 2652 return (EINVAL); 2653 } 2654 if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) { 2655 G_MIRROR_DEBUG(1, 2656 "Invalid device flags on disk %s (device %s), skipping.", 2657 pp->name, sc->sc_name); 2658 return (EINVAL); 2659 } 2660 if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) { 2661 G_MIRROR_DEBUG(1, 2662 "Invalid disk flags on disk %s (device %s), skipping.", 2663 pp->name, sc->sc_name); 2664 return (EINVAL); 2665 } 2666 return (0); 2667 } 2668 2669 int 2670 g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 2671 struct g_mirror_metadata *md) 2672 { 2673 struct g_mirror_disk *disk; 2674 int error; 2675 2676 g_topology_assert_not(); 2677 G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name); 2678 2679 error = g_mirror_check_metadata(sc, pp, md); 2680 if (error != 0) 2681 return (error); 2682 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING && 2683 md->md_genid < sc->sc_genid) { 2684 G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.", 2685 pp->name, sc->sc_name); 2686 return (EINVAL); 2687 } 2688 disk = g_mirror_init_disk(sc, pp, md, &error); 2689 if (disk == NULL) 2690 return (error); 2691 error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW, 2692 G_MIRROR_EVENT_WAIT); 2693 if (error != 0) 2694 return (error); 2695 if (md->md_version < G_MIRROR_VERSION) { 2696 G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 2697 pp->name, md->md_version, G_MIRROR_VERSION); 2698 g_mirror_update_metadata(disk); 2699 } 2700 return (0); 2701 } 2702 2703 static void 2704 g_mirror_destroy_delayed(void *arg, int flag) 2705 { 2706 struct g_mirror_softc *sc; 2707 int error; 2708 2709 if (flag == EV_CANCEL) { 2710 G_MIRROR_DEBUG(1, "Destroying canceled."); 2711 return; 2712 } 2713 sc = arg; 2714 g_topology_unlock(); 2715 sx_xlock(&sc->sc_lock); 2716 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0, 2717 ("DESTROY flag set on %s.", sc->sc_name)); 2718 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0, 2719 ("DESTROYING flag not set on %s.", sc->sc_name)); 2720 G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name); 2721 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT); 2722 if (error != 0) { 2723 G_MIRROR_DEBUG(0, "Cannot destroy %s.", sc->sc_name); 2724 sx_xunlock(&sc->sc_lock); 2725 } 2726 g_topology_lock(); 2727 } 2728 2729 static int 2730 g_mirror_access(struct g_provider *pp, int acr, int acw, int ace) 2731 { 2732 struct g_mirror_softc *sc; 2733 int dcr, dcw, dce, error = 0; 2734 2735 g_topology_assert(); 2736 G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 2737 acw, ace); 2738 2739 sc = pp->geom->softc; 2740 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0) 2741 return (0); 2742 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 2743 2744 dcr = pp->acr + acr; 2745 dcw = pp->acw + acw; 2746 dce = pp->ace + ace; 2747 2748 g_topology_unlock(); 2749 sx_xlock(&sc->sc_lock); 2750 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 || 2751 LIST_EMPTY(&sc->sc_disks)) { 2752 if (acr > 0 || acw > 0 || ace > 0) 2753 error = ENXIO; 2754 goto end; 2755 } 2756 if (dcw == 0 && !sc->sc_idle) 2757 g_mirror_idle(sc, dcw); 2758 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0) { 2759 if (acr > 0 || acw > 0 || ace > 0) { 2760 error = ENXIO; 2761 goto end; 2762 } 2763 if (dcr == 0 && dcw == 0 && dce == 0) { 2764 g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, 2765 sc, NULL); 2766 } 2767 } 2768 end: 2769 sx_xunlock(&sc->sc_lock); 2770 g_topology_lock(); 2771 return (error); 2772 } 2773 2774 static struct g_geom * 2775 g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md) 2776 { 2777 struct g_mirror_softc *sc; 2778 struct g_geom *gp; 2779 int error, timeout; 2780 2781 g_topology_assert(); 2782 G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name, 2783 md->md_mid); 2784 2785 /* One disk is minimum. */ 2786 if (md->md_all < 1) 2787 return (NULL); 2788 /* 2789 * Action geom. 2790 */ 2791 gp = g_new_geomf(mp, "%s", md->md_name); 2792 sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO); 2793 gp->start = g_mirror_start; 2794 gp->orphan = g_mirror_orphan; 2795 gp->access = g_mirror_access; 2796 gp->dumpconf = g_mirror_dumpconf; 2797 2798 sc->sc_id = md->md_mid; 2799 sc->sc_slice = md->md_slice; 2800 sc->sc_balance = md->md_balance; 2801 sc->sc_mediasize = md->md_mediasize; 2802 sc->sc_sectorsize = md->md_sectorsize; 2803 sc->sc_ndisks = md->md_all; 2804 sc->sc_flags = md->md_mflags; 2805 sc->sc_bump_id = 0; 2806 sc->sc_idle = 1; 2807 sc->sc_last_write = time_uptime; 2808 sc->sc_writes = 0; 2809 sx_init(&sc->sc_lock, "gmirror:lock"); 2810 bioq_init(&sc->sc_queue); 2811 mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF); 2812 bioq_init(&sc->sc_regular_delayed); 2813 bioq_init(&sc->sc_inflight); 2814 bioq_init(&sc->sc_sync_delayed); 2815 LIST_INIT(&sc->sc_disks); 2816 TAILQ_INIT(&sc->sc_events); 2817 mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF); 2818 callout_init(&sc->sc_callout, CALLOUT_MPSAFE); 2819 sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING; 2820 gp->softc = sc; 2821 sc->sc_geom = gp; 2822 sc->sc_provider = NULL; 2823 /* 2824 * Synchronization geom. 2825 */ 2826 gp = g_new_geomf(mp, "%s.sync", md->md_name); 2827 gp->softc = sc; 2828 gp->orphan = g_mirror_orphan; 2829 sc->sc_sync.ds_geom = gp; 2830 sc->sc_sync.ds_ndisks = 0; 2831 error = kthread_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0, 2832 "g_mirror %s", md->md_name); 2833 if (error != 0) { 2834 G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.", 2835 sc->sc_name); 2836 g_destroy_geom(sc->sc_sync.ds_geom); 2837 mtx_destroy(&sc->sc_events_mtx); 2838 mtx_destroy(&sc->sc_queue_mtx); 2839 sx_destroy(&sc->sc_lock); 2840 g_destroy_geom(sc->sc_geom); 2841 free(sc, M_MIRROR); 2842 return (NULL); 2843 } 2844 2845 G_MIRROR_DEBUG(0, "Device %s created (id=%u).", sc->sc_name, sc->sc_id); 2846 2847 sc->sc_rootmount = root_mount_hold("GMIRROR"); 2848 G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 2849 /* 2850 * Run timeout. 2851 */ 2852 timeout = g_mirror_timeout * hz; 2853 callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc); 2854 return (sc->sc_geom); 2855 } 2856 2857 int 2858 g_mirror_destroy(struct g_mirror_softc *sc, int how) 2859 { 2860 struct g_mirror_disk *disk; 2861 struct g_provider *pp; 2862 2863 g_topology_assert_not(); 2864 if (sc == NULL) 2865 return (ENXIO); 2866 sx_assert(&sc->sc_lock, SX_XLOCKED); 2867 2868 pp = sc->sc_provider; 2869 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 2870 switch (how) { 2871 case G_MIRROR_DESTROY_SOFT: 2872 G_MIRROR_DEBUG(1, 2873 "Device %s is still open (r%dw%de%d).", pp->name, 2874 pp->acr, pp->acw, pp->ace); 2875 return (EBUSY); 2876 case G_MIRROR_DESTROY_DELAYED: 2877 G_MIRROR_DEBUG(1, 2878 "Device %s will be destroyed on last close.", 2879 pp->name); 2880 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2881 if (disk->d_state == 2882 G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2883 g_mirror_sync_stop(disk, 1); 2884 } 2885 } 2886 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROYING; 2887 return (EBUSY); 2888 case G_MIRROR_DESTROY_HARD: 2889 G_MIRROR_DEBUG(1, "Device %s is still open, so it " 2890 "can't be definitely removed.", pp->name); 2891 } 2892 } 2893 2894 g_topology_lock(); 2895 if (sc->sc_geom->softc == NULL) { 2896 g_topology_unlock(); 2897 return (0); 2898 } 2899 sc->sc_geom->softc = NULL; 2900 sc->sc_sync.ds_geom->softc = NULL; 2901 g_topology_unlock(); 2902 2903 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2904 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT; 2905 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 2906 sx_xunlock(&sc->sc_lock); 2907 mtx_lock(&sc->sc_queue_mtx); 2908 wakeup(sc); 2909 mtx_unlock(&sc->sc_queue_mtx); 2910 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 2911 while (sc->sc_worker != NULL) 2912 tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5); 2913 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 2914 sx_xlock(&sc->sc_lock); 2915 g_mirror_destroy_device(sc); 2916 free(sc, M_MIRROR); 2917 return (0); 2918 } 2919 2920 static void 2921 g_mirror_taste_orphan(struct g_consumer *cp) 2922 { 2923 2924 KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 2925 cp->provider->name)); 2926 } 2927 2928 static struct g_geom * 2929 g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 2930 { 2931 struct g_mirror_metadata md; 2932 struct g_mirror_softc *sc; 2933 struct g_consumer *cp; 2934 struct g_geom *gp; 2935 int error; 2936 2937 g_topology_assert(); 2938 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 2939 G_MIRROR_DEBUG(2, "Tasting %s.", pp->name); 2940 2941 gp = g_new_geomf(mp, "mirror:taste"); 2942 /* 2943 * This orphan function should be never called. 2944 */ 2945 gp->orphan = g_mirror_taste_orphan; 2946 cp = g_new_consumer(gp); 2947 g_attach(cp, pp); 2948 error = g_mirror_read_metadata(cp, &md); 2949 g_detach(cp); 2950 g_destroy_consumer(cp); 2951 g_destroy_geom(gp); 2952 if (error != 0) 2953 return (NULL); 2954 gp = NULL; 2955 2956 if (md.md_provider[0] != '\0' && strcmp(md.md_provider, pp->name) != 0) 2957 return (NULL); 2958 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 2959 return (NULL); 2960 if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) { 2961 G_MIRROR_DEBUG(0, 2962 "Device %s: provider %s marked as inactive, skipping.", 2963 md.md_name, pp->name); 2964 return (NULL); 2965 } 2966 if (g_mirror_debug >= 2) 2967 mirror_metadata_dump(&md); 2968 2969 /* 2970 * Let's check if device already exists. 2971 */ 2972 sc = NULL; 2973 LIST_FOREACH(gp, &mp->geom, geom) { 2974 sc = gp->softc; 2975 if (sc == NULL) 2976 continue; 2977 if (sc->sc_sync.ds_geom == gp) 2978 continue; 2979 if (strcmp(md.md_name, sc->sc_name) != 0) 2980 continue; 2981 if (md.md_mid != sc->sc_id) { 2982 G_MIRROR_DEBUG(0, "Device %s already configured.", 2983 sc->sc_name); 2984 return (NULL); 2985 } 2986 break; 2987 } 2988 if (gp == NULL) { 2989 gp = g_mirror_create(mp, &md); 2990 if (gp == NULL) { 2991 G_MIRROR_DEBUG(0, "Cannot create device %s.", 2992 md.md_name); 2993 return (NULL); 2994 } 2995 sc = gp->softc; 2996 } 2997 G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 2998 g_topology_unlock(); 2999 sx_xlock(&sc->sc_lock); 3000 error = g_mirror_add_disk(sc, pp, &md); 3001 if (error != 0) { 3002 G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 3003 pp->name, gp->name, error); 3004 if (LIST_EMPTY(&sc->sc_disks)) { 3005 g_cancel_event(sc); 3006 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3007 g_topology_lock(); 3008 return (NULL); 3009 } 3010 gp = NULL; 3011 } 3012 sx_xunlock(&sc->sc_lock); 3013 g_topology_lock(); 3014 return (gp); 3015 } 3016 3017 static int 3018 g_mirror_destroy_geom(struct gctl_req *req __unused, 3019 struct g_class *mp __unused, struct g_geom *gp) 3020 { 3021 struct g_mirror_softc *sc; 3022 int error; 3023 3024 g_topology_unlock(); 3025 sc = gp->softc; 3026 sx_xlock(&sc->sc_lock); 3027 g_cancel_event(sc); 3028 error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT); 3029 if (error != 0) 3030 sx_xunlock(&sc->sc_lock); 3031 g_topology_lock(); 3032 return (error); 3033 } 3034 3035 static void 3036 g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 3037 struct g_consumer *cp, struct g_provider *pp) 3038 { 3039 struct g_mirror_softc *sc; 3040 3041 g_topology_assert(); 3042 3043 sc = gp->softc; 3044 if (sc == NULL) 3045 return; 3046 /* Skip synchronization geom. */ 3047 if (gp == sc->sc_sync.ds_geom) 3048 return; 3049 if (pp != NULL) { 3050 /* Nothing here. */ 3051 } else if (cp != NULL) { 3052 struct g_mirror_disk *disk; 3053 3054 disk = cp->private; 3055 if (disk == NULL) 3056 return; 3057 g_topology_unlock(); 3058 sx_xlock(&sc->sc_lock); 3059 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id); 3060 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3061 sbuf_printf(sb, "%s<Synchronized>", indent); 3062 if (disk->d_sync.ds_offset == 0) 3063 sbuf_printf(sb, "0%%"); 3064 else { 3065 sbuf_printf(sb, "%u%%", 3066 (u_int)((disk->d_sync.ds_offset * 100) / 3067 sc->sc_provider->mediasize)); 3068 } 3069 sbuf_printf(sb, "</Synchronized>\n"); 3070 } 3071 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 3072 disk->d_sync.ds_syncid); 3073 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, 3074 disk->d_genid); 3075 sbuf_printf(sb, "%s<Flags>", indent); 3076 if (disk->d_flags == 0) 3077 sbuf_printf(sb, "NONE"); 3078 else { 3079 int first = 1; 3080 3081 #define ADD_FLAG(flag, name) do { \ 3082 if ((disk->d_flags & (flag)) != 0) { \ 3083 if (!first) \ 3084 sbuf_printf(sb, ", "); \ 3085 else \ 3086 first = 0; \ 3087 sbuf_printf(sb, name); \ 3088 } \ 3089 } while (0) 3090 ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY"); 3091 ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED"); 3092 ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE"); 3093 ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING, 3094 "SYNCHRONIZING"); 3095 ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 3096 ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN"); 3097 #undef ADD_FLAG 3098 } 3099 sbuf_printf(sb, "</Flags>\n"); 3100 sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent, 3101 disk->d_priority); 3102 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3103 g_mirror_disk_state2str(disk->d_state)); 3104 sx_xunlock(&sc->sc_lock); 3105 g_topology_lock(); 3106 } else { 3107 g_topology_unlock(); 3108 sx_xlock(&sc->sc_lock); 3109 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 3110 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3111 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 3112 sbuf_printf(sb, "%s<Flags>", indent); 3113 if (sc->sc_flags == 0) 3114 sbuf_printf(sb, "NONE"); 3115 else { 3116 int first = 1; 3117 3118 #define ADD_FLAG(flag, name) do { \ 3119 if ((sc->sc_flags & (flag)) != 0) { \ 3120 if (!first) \ 3121 sbuf_printf(sb, ", "); \ 3122 else \ 3123 first = 0; \ 3124 sbuf_printf(sb, name); \ 3125 } \ 3126 } while (0) 3127 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3128 #undef ADD_FLAG 3129 } 3130 sbuf_printf(sb, "</Flags>\n"); 3131 sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent, 3132 (u_int)sc->sc_slice); 3133 sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent, 3134 balance_name(sc->sc_balance)); 3135 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 3136 sc->sc_ndisks); 3137 sbuf_printf(sb, "%s<State>", indent); 3138 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 3139 sbuf_printf(sb, "%s", "STARTING"); 3140 else if (sc->sc_ndisks == 3141 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE)) 3142 sbuf_printf(sb, "%s", "COMPLETE"); 3143 else 3144 sbuf_printf(sb, "%s", "DEGRADED"); 3145 sbuf_printf(sb, "</State>\n"); 3146 sx_xunlock(&sc->sc_lock); 3147 g_topology_lock(); 3148 } 3149 } 3150 3151 static void 3152 g_mirror_shutdown_pre_sync(void *arg, int howto) 3153 { 3154 struct g_class *mp; 3155 struct g_geom *gp, *gp2; 3156 struct g_mirror_softc *sc; 3157 int error; 3158 3159 mp = arg; 3160 DROP_GIANT(); 3161 g_topology_lock(); 3162 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 3163 if ((sc = gp->softc) == NULL) 3164 continue; 3165 /* Skip synchronization geom. */ 3166 if (gp == sc->sc_sync.ds_geom) 3167 continue; 3168 g_topology_unlock(); 3169 sx_xlock(&sc->sc_lock); 3170 g_cancel_event(sc); 3171 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED); 3172 if (error != 0) 3173 sx_xunlock(&sc->sc_lock); 3174 g_topology_lock(); 3175 } 3176 g_topology_unlock(); 3177 PICKUP_GIANT(); 3178 } 3179 3180 static void 3181 g_mirror_init(struct g_class *mp) 3182 { 3183 3184 g_mirror_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync, 3185 g_mirror_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST); 3186 if (g_mirror_pre_sync == NULL) 3187 G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event."); 3188 } 3189 3190 static void 3191 g_mirror_fini(struct g_class *mp) 3192 { 3193 3194 if (g_mirror_pre_sync != NULL) 3195 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_mirror_pre_sync); 3196 } 3197 3198 DECLARE_GEOM_CLASS(g_mirror_class, g_mirror); 3199