1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/fail.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/limits.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <sys/bio.h> 41 #include <sys/sbuf.h> 42 #include <sys/sysctl.h> 43 #include <sys/malloc.h> 44 #include <sys/eventhandler.h> 45 #include <vm/uma.h> 46 #include <geom/geom.h> 47 #include <sys/proc.h> 48 #include <sys/kthread.h> 49 #include <sys/sched.h> 50 #include <geom/mirror/g_mirror.h> 51 52 FEATURE(geom_mirror, "GEOM mirroring support"); 53 54 static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data"); 55 56 SYSCTL_DECL(_kern_geom); 57 static SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, 58 "GEOM_MIRROR stuff"); 59 int g_mirror_debug = 0; 60 SYSCTL_INT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RWTUN, &g_mirror_debug, 0, 61 "Debug level"); 62 static u_int g_mirror_timeout = 4; 63 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_mirror_timeout, 64 0, "Time to wait on all mirror components"); 65 static u_int g_mirror_idletime = 5; 66 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RWTUN, 67 &g_mirror_idletime, 0, "Mark components as clean when idling"); 68 static u_int g_mirror_disconnect_on_failure = 1; 69 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN, 70 &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 71 static u_int g_mirror_syncreqs = 2; 72 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 73 &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests."); 74 static u_int g_mirror_sync_period = 5; 75 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_update_period, CTLFLAG_RWTUN, 76 &g_mirror_sync_period, 0, 77 "Metadata update period during synchronization, in seconds"); 78 79 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 80 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 81 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 82 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 83 } while (0) 84 85 static eventhandler_tag g_mirror_post_sync = NULL; 86 static int g_mirror_shutdown = 0; 87 88 static g_ctl_destroy_geom_t g_mirror_destroy_geom; 89 static g_taste_t g_mirror_taste; 90 static g_init_t g_mirror_init; 91 static g_fini_t g_mirror_fini; 92 static g_provgone_t g_mirror_providergone; 93 static g_resize_t g_mirror_resize; 94 95 struct g_class g_mirror_class = { 96 .name = G_MIRROR_CLASS_NAME, 97 .version = G_VERSION, 98 .ctlreq = g_mirror_config, 99 .taste = g_mirror_taste, 100 .destroy_geom = g_mirror_destroy_geom, 101 .init = g_mirror_init, 102 .fini = g_mirror_fini, 103 .providergone = g_mirror_providergone, 104 .resize = g_mirror_resize 105 }; 106 107 108 static void g_mirror_destroy_provider(struct g_mirror_softc *sc); 109 static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state); 110 static void g_mirror_update_device(struct g_mirror_softc *sc, bool force); 111 static void g_mirror_dumpconf(struct sbuf *sb, const char *indent, 112 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 113 static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type); 114 static void g_mirror_register_request(struct bio *bp); 115 static void g_mirror_sync_release(struct g_mirror_softc *sc); 116 117 118 static const char * 119 g_mirror_disk_state2str(int state) 120 { 121 122 switch (state) { 123 case G_MIRROR_DISK_STATE_NONE: 124 return ("NONE"); 125 case G_MIRROR_DISK_STATE_NEW: 126 return ("NEW"); 127 case G_MIRROR_DISK_STATE_ACTIVE: 128 return ("ACTIVE"); 129 case G_MIRROR_DISK_STATE_STALE: 130 return ("STALE"); 131 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 132 return ("SYNCHRONIZING"); 133 case G_MIRROR_DISK_STATE_DISCONNECTED: 134 return ("DISCONNECTED"); 135 case G_MIRROR_DISK_STATE_DESTROY: 136 return ("DESTROY"); 137 default: 138 return ("INVALID"); 139 } 140 } 141 142 static const char * 143 g_mirror_device_state2str(int state) 144 { 145 146 switch (state) { 147 case G_MIRROR_DEVICE_STATE_STARTING: 148 return ("STARTING"); 149 case G_MIRROR_DEVICE_STATE_RUNNING: 150 return ("RUNNING"); 151 default: 152 return ("INVALID"); 153 } 154 } 155 156 static const char * 157 g_mirror_get_diskname(struct g_mirror_disk *disk) 158 { 159 160 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 161 return ("[unknown]"); 162 return (disk->d_name); 163 } 164 165 /* 166 * --- Events handling functions --- 167 * Events in geom_mirror are used to maintain disks and device status 168 * from one thread to simplify locking. 169 */ 170 static void 171 g_mirror_event_free(struct g_mirror_event *ep) 172 { 173 174 free(ep, M_MIRROR); 175 } 176 177 int 178 g_mirror_event_send(void *arg, int state, int flags) 179 { 180 struct g_mirror_softc *sc; 181 struct g_mirror_disk *disk; 182 struct g_mirror_event *ep; 183 int error; 184 185 ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK); 186 G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep); 187 if ((flags & G_MIRROR_EVENT_DEVICE) != 0) { 188 disk = NULL; 189 sc = arg; 190 } else { 191 disk = arg; 192 sc = disk->d_softc; 193 } 194 ep->e_disk = disk; 195 ep->e_state = state; 196 ep->e_flags = flags; 197 ep->e_error = 0; 198 mtx_lock(&sc->sc_events_mtx); 199 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 200 mtx_unlock(&sc->sc_events_mtx); 201 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 202 mtx_lock(&sc->sc_queue_mtx); 203 wakeup(sc); 204 mtx_unlock(&sc->sc_queue_mtx); 205 if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0) 206 return (0); 207 sx_assert(&sc->sc_lock, SX_XLOCKED); 208 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 209 sx_xunlock(&sc->sc_lock); 210 while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) { 211 mtx_lock(&sc->sc_events_mtx); 212 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event", 213 hz * 5); 214 } 215 error = ep->e_error; 216 g_mirror_event_free(ep); 217 sx_xlock(&sc->sc_lock); 218 return (error); 219 } 220 221 static struct g_mirror_event * 222 g_mirror_event_first(struct g_mirror_softc *sc) 223 { 224 struct g_mirror_event *ep; 225 226 mtx_lock(&sc->sc_events_mtx); 227 ep = TAILQ_FIRST(&sc->sc_events); 228 mtx_unlock(&sc->sc_events_mtx); 229 return (ep); 230 } 231 232 static void 233 g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep) 234 { 235 236 mtx_lock(&sc->sc_events_mtx); 237 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 238 mtx_unlock(&sc->sc_events_mtx); 239 } 240 241 static void 242 g_mirror_event_cancel(struct g_mirror_disk *disk) 243 { 244 struct g_mirror_softc *sc; 245 struct g_mirror_event *ep, *tmpep; 246 247 sc = disk->d_softc; 248 sx_assert(&sc->sc_lock, SX_XLOCKED); 249 250 mtx_lock(&sc->sc_events_mtx); 251 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 252 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) 253 continue; 254 if (ep->e_disk != disk) 255 continue; 256 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 257 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 258 g_mirror_event_free(ep); 259 else { 260 ep->e_error = ECANCELED; 261 wakeup(ep); 262 } 263 } 264 mtx_unlock(&sc->sc_events_mtx); 265 } 266 267 /* 268 * Return the number of disks in given state. 269 * If state is equal to -1, count all connected disks. 270 */ 271 u_int 272 g_mirror_ndisks(struct g_mirror_softc *sc, int state) 273 { 274 struct g_mirror_disk *disk; 275 u_int n = 0; 276 277 sx_assert(&sc->sc_lock, SX_LOCKED); 278 279 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 280 if (state == -1 || disk->d_state == state) 281 n++; 282 } 283 return (n); 284 } 285 286 /* 287 * Find a disk in mirror by its disk ID. 288 */ 289 static struct g_mirror_disk * 290 g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id) 291 { 292 struct g_mirror_disk *disk; 293 294 sx_assert(&sc->sc_lock, SX_XLOCKED); 295 296 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 297 if (disk->d_id == id) 298 return (disk); 299 } 300 return (NULL); 301 } 302 303 static u_int 304 g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp) 305 { 306 struct bio *bp; 307 u_int nreqs = 0; 308 309 mtx_lock(&sc->sc_queue_mtx); 310 TAILQ_FOREACH(bp, &sc->sc_queue, bio_queue) { 311 if (bp->bio_from == cp) 312 nreqs++; 313 } 314 mtx_unlock(&sc->sc_queue_mtx); 315 return (nreqs); 316 } 317 318 static int 319 g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp) 320 { 321 322 if (cp->index > 0) { 323 G_MIRROR_DEBUG(2, 324 "I/O requests for %s exist, can't destroy it now.", 325 cp->provider->name); 326 return (1); 327 } 328 if (g_mirror_nrequests(sc, cp) > 0) { 329 G_MIRROR_DEBUG(2, 330 "I/O requests for %s in queue, can't destroy it now.", 331 cp->provider->name); 332 return (1); 333 } 334 return (0); 335 } 336 337 static void 338 g_mirror_destroy_consumer(void *arg, int flags __unused) 339 { 340 struct g_consumer *cp; 341 342 g_topology_assert(); 343 344 cp = arg; 345 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 346 g_detach(cp); 347 g_destroy_consumer(cp); 348 } 349 350 static void 351 g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 352 { 353 struct g_provider *pp; 354 int retaste_wait; 355 356 g_topology_assert(); 357 358 cp->private = NULL; 359 if (g_mirror_is_busy(sc, cp)) 360 return; 361 pp = cp->provider; 362 retaste_wait = 0; 363 if (cp->acw == 1) { 364 if ((pp->geom->flags & G_GEOM_WITHER) == 0) 365 retaste_wait = 1; 366 } 367 G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 368 -cp->acw, -cp->ace, 0); 369 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 370 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 371 if (retaste_wait) { 372 /* 373 * After retaste event was send (inside g_access()), we can send 374 * event to detach and destroy consumer. 375 * A class, which has consumer to the given provider connected 376 * will not receive retaste event for the provider. 377 * This is the way how I ignore retaste events when I close 378 * consumers opened for write: I detach and destroy consumer 379 * after retaste event is sent. 380 */ 381 g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL); 382 return; 383 } 384 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name); 385 g_detach(cp); 386 g_destroy_consumer(cp); 387 } 388 389 static int 390 g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp) 391 { 392 struct g_consumer *cp; 393 int error; 394 395 g_topology_assert_not(); 396 KASSERT(disk->d_consumer == NULL, 397 ("Disk already connected (device %s).", disk->d_softc->sc_name)); 398 399 g_topology_lock(); 400 cp = g_new_consumer(disk->d_softc->sc_geom); 401 cp->flags |= G_CF_DIRECT_RECEIVE; 402 error = g_attach(cp, pp); 403 if (error != 0) { 404 g_destroy_consumer(cp); 405 g_topology_unlock(); 406 return (error); 407 } 408 error = g_access(cp, 1, 1, 1); 409 if (error != 0) { 410 g_detach(cp); 411 g_destroy_consumer(cp); 412 g_topology_unlock(); 413 G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).", 414 pp->name, error); 415 return (error); 416 } 417 g_topology_unlock(); 418 disk->d_consumer = cp; 419 disk->d_consumer->private = disk; 420 disk->d_consumer->index = 0; 421 422 G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk)); 423 return (0); 424 } 425 426 static void 427 g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 428 { 429 430 g_topology_assert(); 431 432 if (cp == NULL) 433 return; 434 if (cp->provider != NULL) 435 g_mirror_kill_consumer(sc, cp); 436 else 437 g_destroy_consumer(cp); 438 } 439 440 /* 441 * Initialize disk. This means allocate memory, create consumer, attach it 442 * to the provider and open access (r1w1e1) to it. 443 */ 444 static struct g_mirror_disk * 445 g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp, 446 struct g_mirror_metadata *md, int *errorp) 447 { 448 struct g_mirror_disk *disk; 449 int i, error; 450 451 disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO); 452 if (disk == NULL) { 453 error = ENOMEM; 454 goto fail; 455 } 456 disk->d_softc = sc; 457 error = g_mirror_connect_disk(disk, pp); 458 if (error != 0) 459 goto fail; 460 disk->d_id = md->md_did; 461 disk->d_state = G_MIRROR_DISK_STATE_NONE; 462 disk->d_priority = md->md_priority; 463 disk->d_flags = md->md_dflags; 464 error = g_getattr("GEOM::candelete", disk->d_consumer, &i); 465 if (error == 0 && i != 0) 466 disk->d_flags |= G_MIRROR_DISK_FLAG_CANDELETE; 467 if (md->md_provider[0] != '\0') 468 disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED; 469 disk->d_sync.ds_consumer = NULL; 470 disk->d_sync.ds_offset = md->md_sync_offset; 471 disk->d_sync.ds_offset_done = md->md_sync_offset; 472 disk->d_sync.ds_update_ts = time_uptime; 473 disk->d_genid = md->md_genid; 474 disk->d_sync.ds_syncid = md->md_syncid; 475 if (errorp != NULL) 476 *errorp = 0; 477 return (disk); 478 fail: 479 if (errorp != NULL) 480 *errorp = error; 481 if (disk != NULL) 482 free(disk, M_MIRROR); 483 return (NULL); 484 } 485 486 static void 487 g_mirror_destroy_disk(struct g_mirror_disk *disk) 488 { 489 struct g_mirror_softc *sc; 490 491 g_topology_assert_not(); 492 sc = disk->d_softc; 493 sx_assert(&sc->sc_lock, SX_XLOCKED); 494 495 LIST_REMOVE(disk, d_next); 496 g_mirror_event_cancel(disk); 497 if (sc->sc_hint == disk) 498 sc->sc_hint = NULL; 499 switch (disk->d_state) { 500 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 501 g_mirror_sync_stop(disk, 1); 502 /* FALLTHROUGH */ 503 case G_MIRROR_DISK_STATE_NEW: 504 case G_MIRROR_DISK_STATE_STALE: 505 case G_MIRROR_DISK_STATE_ACTIVE: 506 g_topology_lock(); 507 g_mirror_disconnect_consumer(sc, disk->d_consumer); 508 g_topology_unlock(); 509 free(disk, M_MIRROR); 510 break; 511 default: 512 KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 513 g_mirror_get_diskname(disk), 514 g_mirror_disk_state2str(disk->d_state))); 515 } 516 } 517 518 static void 519 g_mirror_free_device(struct g_mirror_softc *sc) 520 { 521 522 mtx_destroy(&sc->sc_queue_mtx); 523 mtx_destroy(&sc->sc_events_mtx); 524 mtx_destroy(&sc->sc_done_mtx); 525 sx_destroy(&sc->sc_lock); 526 free(sc, M_MIRROR); 527 } 528 529 static void 530 g_mirror_providergone(struct g_provider *pp) 531 { 532 struct g_mirror_softc *sc = pp->private; 533 534 if ((--sc->sc_refcnt) == 0) 535 g_mirror_free_device(sc); 536 } 537 538 static void 539 g_mirror_destroy_device(struct g_mirror_softc *sc) 540 { 541 struct g_mirror_disk *disk; 542 struct g_mirror_event *ep; 543 struct g_geom *gp; 544 struct g_consumer *cp, *tmpcp; 545 546 g_topology_assert_not(); 547 sx_assert(&sc->sc_lock, SX_XLOCKED); 548 549 gp = sc->sc_geom; 550 if (sc->sc_provider != NULL) 551 g_mirror_destroy_provider(sc); 552 for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL; 553 disk = LIST_FIRST(&sc->sc_disks)) { 554 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 555 g_mirror_update_metadata(disk); 556 g_mirror_destroy_disk(disk); 557 } 558 while ((ep = g_mirror_event_first(sc)) != NULL) { 559 g_mirror_event_remove(sc, ep); 560 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 561 g_mirror_event_free(ep); 562 else { 563 ep->e_error = ECANCELED; 564 ep->e_flags |= G_MIRROR_EVENT_DONE; 565 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep); 566 mtx_lock(&sc->sc_events_mtx); 567 wakeup(ep); 568 mtx_unlock(&sc->sc_events_mtx); 569 } 570 } 571 callout_drain(&sc->sc_callout); 572 573 g_topology_lock(); 574 LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) { 575 g_mirror_disconnect_consumer(sc, cp); 576 } 577 g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 578 G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name); 579 g_wither_geom(gp, ENXIO); 580 sx_xunlock(&sc->sc_lock); 581 if ((--sc->sc_refcnt) == 0) 582 g_mirror_free_device(sc); 583 g_topology_unlock(); 584 } 585 586 static void 587 g_mirror_orphan(struct g_consumer *cp) 588 { 589 struct g_mirror_disk *disk; 590 591 g_topology_assert(); 592 593 disk = cp->private; 594 if (disk == NULL) 595 return; 596 disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 597 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, 598 G_MIRROR_EVENT_DONTWAIT); 599 } 600 601 /* 602 * Function should return the next active disk on the list. 603 * It is possible that it will be the same disk as given. 604 * If there are no active disks on list, NULL is returned. 605 */ 606 static __inline struct g_mirror_disk * 607 g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 608 { 609 struct g_mirror_disk *dp; 610 611 for (dp = LIST_NEXT(disk, d_next); dp != disk; 612 dp = LIST_NEXT(dp, d_next)) { 613 if (dp == NULL) 614 dp = LIST_FIRST(&sc->sc_disks); 615 if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE) 616 break; 617 } 618 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 619 return (NULL); 620 return (dp); 621 } 622 623 static struct g_mirror_disk * 624 g_mirror_get_disk(struct g_mirror_softc *sc) 625 { 626 struct g_mirror_disk *disk; 627 628 if (sc->sc_hint == NULL) { 629 sc->sc_hint = LIST_FIRST(&sc->sc_disks); 630 if (sc->sc_hint == NULL) 631 return (NULL); 632 } 633 disk = sc->sc_hint; 634 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) { 635 disk = g_mirror_find_next(sc, disk); 636 if (disk == NULL) 637 return (NULL); 638 } 639 sc->sc_hint = g_mirror_find_next(sc, disk); 640 return (disk); 641 } 642 643 static int 644 g_mirror_write_metadata(struct g_mirror_disk *disk, 645 struct g_mirror_metadata *md) 646 { 647 struct g_mirror_softc *sc; 648 struct g_consumer *cp; 649 off_t offset, length; 650 u_char *sector; 651 int error = 0; 652 653 g_topology_assert_not(); 654 sc = disk->d_softc; 655 sx_assert(&sc->sc_lock, SX_LOCKED); 656 657 cp = disk->d_consumer; 658 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 659 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 660 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 661 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 662 cp->acw, cp->ace)); 663 length = cp->provider->sectorsize; 664 offset = cp->provider->mediasize - length; 665 sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO); 666 if (md != NULL && 667 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) { 668 /* 669 * Handle the case, when the size of parent provider reduced. 670 */ 671 if (offset < md->md_mediasize) 672 error = ENOSPC; 673 else 674 mirror_metadata_encode(md, sector); 675 } 676 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_metadata_write, error); 677 if (error == 0) 678 error = g_write_data(cp, offset, sector, length); 679 free(sector, M_MIRROR); 680 if (error != 0) { 681 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 682 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 683 G_MIRROR_DEBUG(0, "Cannot write metadata on %s " 684 "(device=%s, error=%d).", 685 g_mirror_get_diskname(disk), sc->sc_name, error); 686 } else { 687 G_MIRROR_DEBUG(1, "Cannot write metadata on %s " 688 "(device=%s, error=%d).", 689 g_mirror_get_diskname(disk), sc->sc_name, error); 690 } 691 if (g_mirror_disconnect_on_failure && 692 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) { 693 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 694 g_mirror_event_send(disk, 695 G_MIRROR_DISK_STATE_DISCONNECTED, 696 G_MIRROR_EVENT_DONTWAIT); 697 } 698 } 699 return (error); 700 } 701 702 static int 703 g_mirror_clear_metadata(struct g_mirror_disk *disk) 704 { 705 int error; 706 707 g_topology_assert_not(); 708 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 709 710 if (disk->d_softc->sc_type != G_MIRROR_TYPE_AUTOMATIC) 711 return (0); 712 error = g_mirror_write_metadata(disk, NULL); 713 if (error == 0) { 714 G_MIRROR_DEBUG(2, "Metadata on %s cleared.", 715 g_mirror_get_diskname(disk)); 716 } else { 717 G_MIRROR_DEBUG(0, 718 "Cannot clear metadata on disk %s (error=%d).", 719 g_mirror_get_diskname(disk), error); 720 } 721 return (error); 722 } 723 724 void 725 g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk, 726 struct g_mirror_metadata *md) 727 { 728 729 strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic)); 730 md->md_version = G_MIRROR_VERSION; 731 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 732 md->md_mid = sc->sc_id; 733 md->md_all = sc->sc_ndisks; 734 md->md_slice = sc->sc_slice; 735 md->md_balance = sc->sc_balance; 736 md->md_genid = sc->sc_genid; 737 md->md_mediasize = sc->sc_mediasize; 738 md->md_sectorsize = sc->sc_sectorsize; 739 md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK); 740 bzero(md->md_provider, sizeof(md->md_provider)); 741 if (disk == NULL) { 742 md->md_did = arc4random(); 743 md->md_priority = 0; 744 md->md_syncid = 0; 745 md->md_dflags = 0; 746 md->md_sync_offset = 0; 747 md->md_provsize = 0; 748 } else { 749 md->md_did = disk->d_id; 750 md->md_priority = disk->d_priority; 751 md->md_syncid = disk->d_sync.ds_syncid; 752 md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK); 753 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 754 md->md_sync_offset = disk->d_sync.ds_offset_done; 755 else 756 md->md_sync_offset = 0; 757 if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) { 758 strlcpy(md->md_provider, 759 disk->d_consumer->provider->name, 760 sizeof(md->md_provider)); 761 } 762 md->md_provsize = disk->d_consumer->provider->mediasize; 763 } 764 } 765 766 void 767 g_mirror_update_metadata(struct g_mirror_disk *disk) 768 { 769 struct g_mirror_softc *sc; 770 struct g_mirror_metadata md; 771 int error; 772 773 g_topology_assert_not(); 774 sc = disk->d_softc; 775 sx_assert(&sc->sc_lock, SX_LOCKED); 776 777 if (sc->sc_type != G_MIRROR_TYPE_AUTOMATIC) 778 return; 779 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) 780 g_mirror_fill_metadata(sc, disk, &md); 781 error = g_mirror_write_metadata(disk, &md); 782 if (error == 0) { 783 G_MIRROR_DEBUG(2, "Metadata on %s updated.", 784 g_mirror_get_diskname(disk)); 785 } else { 786 G_MIRROR_DEBUG(0, 787 "Cannot update metadata on disk %s (error=%d).", 788 g_mirror_get_diskname(disk), error); 789 } 790 } 791 792 static void 793 g_mirror_bump_syncid(struct g_mirror_softc *sc) 794 { 795 struct g_mirror_disk *disk; 796 797 g_topology_assert_not(); 798 sx_assert(&sc->sc_lock, SX_XLOCKED); 799 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 800 ("%s called with no active disks (device=%s).", __func__, 801 sc->sc_name)); 802 803 sc->sc_syncid++; 804 G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 805 sc->sc_syncid); 806 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 807 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 808 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 809 disk->d_sync.ds_syncid = sc->sc_syncid; 810 g_mirror_update_metadata(disk); 811 } 812 } 813 } 814 815 static void 816 g_mirror_bump_genid(struct g_mirror_softc *sc) 817 { 818 struct g_mirror_disk *disk; 819 820 g_topology_assert_not(); 821 sx_assert(&sc->sc_lock, SX_XLOCKED); 822 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 823 ("%s called with no active disks (device=%s).", __func__, 824 sc->sc_name)); 825 826 sc->sc_genid++; 827 G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 828 sc->sc_genid); 829 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 830 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 831 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 832 disk->d_genid = sc->sc_genid; 833 g_mirror_update_metadata(disk); 834 } 835 } 836 } 837 838 static int 839 g_mirror_idle(struct g_mirror_softc *sc, int acw) 840 { 841 struct g_mirror_disk *disk; 842 int timeout; 843 844 g_topology_assert_not(); 845 sx_assert(&sc->sc_lock, SX_XLOCKED); 846 847 if (sc->sc_provider == NULL) 848 return (0); 849 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 850 return (0); 851 if (sc->sc_idle) 852 return (0); 853 if (sc->sc_writes > 0) 854 return (0); 855 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 856 timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write); 857 if (!g_mirror_shutdown && timeout > 0) 858 return (timeout); 859 } 860 sc->sc_idle = 1; 861 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 862 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 863 continue; 864 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.", 865 g_mirror_get_diskname(disk), sc->sc_name); 866 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 867 g_mirror_update_metadata(disk); 868 } 869 return (0); 870 } 871 872 static void 873 g_mirror_unidle(struct g_mirror_softc *sc) 874 { 875 struct g_mirror_disk *disk; 876 877 g_topology_assert_not(); 878 sx_assert(&sc->sc_lock, SX_XLOCKED); 879 880 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 881 return; 882 sc->sc_idle = 0; 883 sc->sc_last_write = time_uptime; 884 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 885 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 886 continue; 887 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.", 888 g_mirror_get_diskname(disk), sc->sc_name); 889 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 890 g_mirror_update_metadata(disk); 891 } 892 } 893 894 static void 895 g_mirror_flush_done(struct bio *bp) 896 { 897 struct g_mirror_softc *sc; 898 struct bio *pbp; 899 900 pbp = bp->bio_parent; 901 sc = pbp->bio_to->private; 902 mtx_lock(&sc->sc_done_mtx); 903 if (pbp->bio_error == 0) 904 pbp->bio_error = bp->bio_error; 905 pbp->bio_completed += bp->bio_completed; 906 pbp->bio_inbed++; 907 if (pbp->bio_children == pbp->bio_inbed) { 908 mtx_unlock(&sc->sc_done_mtx); 909 g_io_deliver(pbp, pbp->bio_error); 910 } else 911 mtx_unlock(&sc->sc_done_mtx); 912 g_destroy_bio(bp); 913 } 914 915 static void 916 g_mirror_done(struct bio *bp) 917 { 918 struct g_mirror_softc *sc; 919 920 sc = bp->bio_from->geom->softc; 921 bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR; 922 mtx_lock(&sc->sc_queue_mtx); 923 TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue); 924 mtx_unlock(&sc->sc_queue_mtx); 925 wakeup(sc); 926 } 927 928 static void 929 g_mirror_regular_request(struct bio *bp) 930 { 931 struct g_mirror_softc *sc; 932 struct g_mirror_disk *disk; 933 struct bio *pbp; 934 935 g_topology_assert_not(); 936 937 pbp = bp->bio_parent; 938 sc = pbp->bio_to->private; 939 bp->bio_from->index--; 940 if (bp->bio_cmd == BIO_WRITE || bp->bio_cmd == BIO_DELETE) 941 sc->sc_writes--; 942 disk = bp->bio_from->private; 943 if (disk == NULL) { 944 g_topology_lock(); 945 g_mirror_kill_consumer(sc, bp->bio_from); 946 g_topology_unlock(); 947 } 948 949 if (bp->bio_cmd == BIO_READ) 950 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_read, 951 bp->bio_error); 952 else if (bp->bio_cmd == BIO_WRITE) 953 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_write, 954 bp->bio_error); 955 956 pbp->bio_inbed++; 957 KASSERT(pbp->bio_inbed <= pbp->bio_children, 958 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 959 pbp->bio_children)); 960 if (bp->bio_error == 0 && pbp->bio_error == 0) { 961 G_MIRROR_LOGREQ(3, bp, "Request delivered."); 962 g_destroy_bio(bp); 963 if (pbp->bio_children == pbp->bio_inbed) { 964 G_MIRROR_LOGREQ(3, pbp, "Request delivered."); 965 pbp->bio_completed = pbp->bio_length; 966 if (pbp->bio_cmd == BIO_WRITE || 967 pbp->bio_cmd == BIO_DELETE) { 968 TAILQ_REMOVE(&sc->sc_inflight, pbp, bio_queue); 969 /* Release delayed sync requests if possible. */ 970 g_mirror_sync_release(sc); 971 } 972 g_io_deliver(pbp, pbp->bio_error); 973 } 974 return; 975 } else if (bp->bio_error != 0) { 976 if (pbp->bio_error == 0) 977 pbp->bio_error = bp->bio_error; 978 if (disk != NULL) { 979 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 980 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 981 G_MIRROR_LOGREQ(0, bp, 982 "Request failed (error=%d).", 983 bp->bio_error); 984 } else { 985 G_MIRROR_LOGREQ(1, bp, 986 "Request failed (error=%d).", 987 bp->bio_error); 988 } 989 if (g_mirror_disconnect_on_failure && 990 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) 991 { 992 if (bp->bio_error == ENXIO && 993 bp->bio_cmd == BIO_READ) 994 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 995 else if (bp->bio_error == ENXIO) 996 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID_NOW; 997 else 998 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 999 g_mirror_event_send(disk, 1000 G_MIRROR_DISK_STATE_DISCONNECTED, 1001 G_MIRROR_EVENT_DONTWAIT); 1002 } 1003 } 1004 switch (pbp->bio_cmd) { 1005 case BIO_DELETE: 1006 case BIO_WRITE: 1007 pbp->bio_inbed--; 1008 pbp->bio_children--; 1009 break; 1010 } 1011 } 1012 g_destroy_bio(bp); 1013 1014 switch (pbp->bio_cmd) { 1015 case BIO_READ: 1016 if (pbp->bio_inbed < pbp->bio_children) 1017 break; 1018 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1) 1019 g_io_deliver(pbp, pbp->bio_error); 1020 else { 1021 pbp->bio_error = 0; 1022 mtx_lock(&sc->sc_queue_mtx); 1023 TAILQ_INSERT_TAIL(&sc->sc_queue, pbp, bio_queue); 1024 mtx_unlock(&sc->sc_queue_mtx); 1025 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1026 wakeup(sc); 1027 } 1028 break; 1029 case BIO_DELETE: 1030 case BIO_WRITE: 1031 if (pbp->bio_children == 0) { 1032 /* 1033 * All requests failed. 1034 */ 1035 } else if (pbp->bio_inbed < pbp->bio_children) { 1036 /* Do nothing. */ 1037 break; 1038 } else if (pbp->bio_children == pbp->bio_inbed) { 1039 /* Some requests succeeded. */ 1040 pbp->bio_error = 0; 1041 pbp->bio_completed = pbp->bio_length; 1042 } 1043 TAILQ_REMOVE(&sc->sc_inflight, pbp, bio_queue); 1044 /* Release delayed sync requests if possible. */ 1045 g_mirror_sync_release(sc); 1046 g_io_deliver(pbp, pbp->bio_error); 1047 break; 1048 default: 1049 KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd)); 1050 break; 1051 } 1052 } 1053 1054 static void 1055 g_mirror_sync_done(struct bio *bp) 1056 { 1057 struct g_mirror_softc *sc; 1058 1059 G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered."); 1060 sc = bp->bio_from->geom->softc; 1061 bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC; 1062 mtx_lock(&sc->sc_queue_mtx); 1063 TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue); 1064 mtx_unlock(&sc->sc_queue_mtx); 1065 wakeup(sc); 1066 } 1067 1068 static void 1069 g_mirror_candelete(struct bio *bp) 1070 { 1071 struct g_mirror_softc *sc; 1072 struct g_mirror_disk *disk; 1073 int *val; 1074 1075 sc = bp->bio_to->private; 1076 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1077 if (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) 1078 break; 1079 } 1080 val = (int *)bp->bio_data; 1081 *val = (disk != NULL); 1082 g_io_deliver(bp, 0); 1083 } 1084 1085 static void 1086 g_mirror_kernel_dump(struct bio *bp) 1087 { 1088 struct g_mirror_softc *sc; 1089 struct g_mirror_disk *disk; 1090 struct bio *cbp; 1091 struct g_kerneldump *gkd; 1092 1093 /* 1094 * We configure dumping to the first component, because this component 1095 * will be used for reading with 'prefer' balance algorithm. 1096 * If the component with the highest priority is currently disconnected 1097 * we will not be able to read the dump after the reboot if it will be 1098 * connected and synchronized later. Can we do something better? 1099 */ 1100 sc = bp->bio_to->private; 1101 disk = LIST_FIRST(&sc->sc_disks); 1102 1103 gkd = (struct g_kerneldump *)bp->bio_data; 1104 if (gkd->length > bp->bio_to->mediasize) 1105 gkd->length = bp->bio_to->mediasize; 1106 cbp = g_clone_bio(bp); 1107 if (cbp == NULL) { 1108 g_io_deliver(bp, ENOMEM); 1109 return; 1110 } 1111 cbp->bio_done = g_std_done; 1112 g_io_request(cbp, disk->d_consumer); 1113 G_MIRROR_DEBUG(1, "Kernel dump will go to %s.", 1114 g_mirror_get_diskname(disk)); 1115 } 1116 1117 static void 1118 g_mirror_flush(struct g_mirror_softc *sc, struct bio *bp) 1119 { 1120 struct bio_queue queue; 1121 struct g_mirror_disk *disk; 1122 struct g_consumer *cp; 1123 struct bio *cbp; 1124 1125 TAILQ_INIT(&queue); 1126 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1127 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1128 continue; 1129 cbp = g_clone_bio(bp); 1130 if (cbp == NULL) { 1131 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1132 TAILQ_REMOVE(&queue, cbp, bio_queue); 1133 g_destroy_bio(cbp); 1134 } 1135 if (bp->bio_error == 0) 1136 bp->bio_error = ENOMEM; 1137 g_io_deliver(bp, bp->bio_error); 1138 return; 1139 } 1140 TAILQ_INSERT_TAIL(&queue, cbp, bio_queue); 1141 cbp->bio_done = g_mirror_flush_done; 1142 cbp->bio_caller1 = disk; 1143 cbp->bio_to = disk->d_consumer->provider; 1144 } 1145 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1146 TAILQ_REMOVE(&queue, cbp, bio_queue); 1147 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1148 disk = cbp->bio_caller1; 1149 cbp->bio_caller1 = NULL; 1150 cp = disk->d_consumer; 1151 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1152 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1153 cp->acr, cp->acw, cp->ace)); 1154 g_io_request(cbp, disk->d_consumer); 1155 } 1156 } 1157 1158 static void 1159 g_mirror_start(struct bio *bp) 1160 { 1161 struct g_mirror_softc *sc; 1162 1163 sc = bp->bio_to->private; 1164 /* 1165 * If sc == NULL or there are no valid disks, provider's error 1166 * should be set and g_mirror_start() should not be called at all. 1167 */ 1168 KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1169 ("Provider's error should be set (error=%d)(mirror=%s).", 1170 bp->bio_to->error, bp->bio_to->name)); 1171 G_MIRROR_LOGREQ(3, bp, "Request received."); 1172 1173 switch (bp->bio_cmd) { 1174 case BIO_READ: 1175 case BIO_WRITE: 1176 case BIO_DELETE: 1177 break; 1178 case BIO_FLUSH: 1179 g_mirror_flush(sc, bp); 1180 return; 1181 case BIO_GETATTR: 1182 if (!strcmp(bp->bio_attribute, "GEOM::candelete")) { 1183 g_mirror_candelete(bp); 1184 return; 1185 } else if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) { 1186 g_mirror_kernel_dump(bp); 1187 return; 1188 } 1189 /* FALLTHROUGH */ 1190 default: 1191 g_io_deliver(bp, EOPNOTSUPP); 1192 return; 1193 } 1194 mtx_lock(&sc->sc_queue_mtx); 1195 if (bp->bio_to->error != 0) { 1196 mtx_unlock(&sc->sc_queue_mtx); 1197 g_io_deliver(bp, bp->bio_to->error); 1198 return; 1199 } 1200 TAILQ_INSERT_TAIL(&sc->sc_queue, bp, bio_queue); 1201 mtx_unlock(&sc->sc_queue_mtx); 1202 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1203 wakeup(sc); 1204 } 1205 1206 /* 1207 * Return TRUE if the given request is colliding with a in-progress 1208 * synchronization request. 1209 */ 1210 static bool 1211 g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp) 1212 { 1213 struct g_mirror_disk *disk; 1214 struct bio *sbp; 1215 off_t rstart, rend, sstart, send; 1216 u_int i; 1217 1218 if (sc->sc_sync.ds_ndisks == 0) 1219 return (false); 1220 rstart = bp->bio_offset; 1221 rend = bp->bio_offset + bp->bio_length; 1222 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1223 if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING) 1224 continue; 1225 for (i = 0; i < g_mirror_syncreqs; i++) { 1226 sbp = disk->d_sync.ds_bios[i]; 1227 if (sbp == NULL) 1228 continue; 1229 sstart = sbp->bio_offset; 1230 send = sbp->bio_offset + sbp->bio_length; 1231 if (rend > sstart && rstart < send) 1232 return (true); 1233 } 1234 } 1235 return (false); 1236 } 1237 1238 /* 1239 * Return TRUE if the given sync request is colliding with a in-progress regular 1240 * request. 1241 */ 1242 static bool 1243 g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp) 1244 { 1245 off_t rstart, rend, sstart, send; 1246 struct bio *bp; 1247 1248 if (sc->sc_sync.ds_ndisks == 0) 1249 return (false); 1250 sstart = sbp->bio_offset; 1251 send = sbp->bio_offset + sbp->bio_length; 1252 TAILQ_FOREACH(bp, &sc->sc_inflight, bio_queue) { 1253 rstart = bp->bio_offset; 1254 rend = bp->bio_offset + bp->bio_length; 1255 if (rend > sstart && rstart < send) 1256 return (true); 1257 } 1258 return (false); 1259 } 1260 1261 /* 1262 * Puts request onto delayed queue. 1263 */ 1264 static void 1265 g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp) 1266 { 1267 1268 G_MIRROR_LOGREQ(2, bp, "Delaying request."); 1269 TAILQ_INSERT_HEAD(&sc->sc_regular_delayed, bp, bio_queue); 1270 } 1271 1272 /* 1273 * Puts synchronization request onto delayed queue. 1274 */ 1275 static void 1276 g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp) 1277 { 1278 1279 G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request."); 1280 TAILQ_INSERT_TAIL(&sc->sc_sync_delayed, bp, bio_queue); 1281 } 1282 1283 /* 1284 * Releases delayed regular requests which don't collide anymore with sync 1285 * requests. 1286 */ 1287 static void 1288 g_mirror_regular_release(struct g_mirror_softc *sc) 1289 { 1290 struct bio *bp, *bp2; 1291 1292 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed, bio_queue, bp2) { 1293 if (g_mirror_sync_collision(sc, bp)) 1294 continue; 1295 TAILQ_REMOVE(&sc->sc_regular_delayed, bp, bio_queue); 1296 G_MIRROR_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 1297 mtx_lock(&sc->sc_queue_mtx); 1298 TAILQ_INSERT_HEAD(&sc->sc_queue, bp, bio_queue); 1299 mtx_unlock(&sc->sc_queue_mtx); 1300 } 1301 } 1302 1303 /* 1304 * Releases delayed sync requests which don't collide anymore with regular 1305 * requests. 1306 */ 1307 static void 1308 g_mirror_sync_release(struct g_mirror_softc *sc) 1309 { 1310 struct bio *bp, *bp2; 1311 1312 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed, bio_queue, bp2) { 1313 if (g_mirror_regular_collision(sc, bp)) 1314 continue; 1315 TAILQ_REMOVE(&sc->sc_sync_delayed, bp, bio_queue); 1316 G_MIRROR_LOGREQ(2, bp, 1317 "Releasing delayed synchronization request."); 1318 g_io_request(bp, bp->bio_from); 1319 } 1320 } 1321 1322 /* 1323 * Free a synchronization request and clear its slot in the array. 1324 */ 1325 static void 1326 g_mirror_sync_request_free(struct g_mirror_disk *disk, struct bio *bp) 1327 { 1328 int idx; 1329 1330 if (disk != NULL && disk->d_sync.ds_bios != NULL) { 1331 idx = (int)(uintptr_t)bp->bio_caller1; 1332 KASSERT(disk->d_sync.ds_bios[idx] == bp, 1333 ("unexpected sync BIO at %p:%d", disk, idx)); 1334 disk->d_sync.ds_bios[idx] = NULL; 1335 } 1336 free(bp->bio_data, M_MIRROR); 1337 g_destroy_bio(bp); 1338 } 1339 1340 /* 1341 * Handle synchronization requests. 1342 * Every synchronization request is two-steps process: first, READ request is 1343 * send to active provider and then WRITE request (with read data) to the provider 1344 * being synchronized. When WRITE is finished, new synchronization request is 1345 * send. 1346 */ 1347 static void 1348 g_mirror_sync_request(struct bio *bp) 1349 { 1350 struct g_mirror_softc *sc; 1351 struct g_mirror_disk *disk; 1352 struct g_mirror_disk_sync *sync; 1353 1354 bp->bio_from->index--; 1355 sc = bp->bio_from->geom->softc; 1356 disk = bp->bio_from->private; 1357 if (disk == NULL) { 1358 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 1359 g_topology_lock(); 1360 g_mirror_kill_consumer(sc, bp->bio_from); 1361 g_topology_unlock(); 1362 g_mirror_sync_request_free(NULL, bp); 1363 sx_xlock(&sc->sc_lock); 1364 return; 1365 } 1366 1367 /* 1368 * Synchronization request. 1369 */ 1370 switch (bp->bio_cmd) { 1371 case BIO_READ: 1372 { 1373 struct g_consumer *cp; 1374 1375 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_read, 1376 bp->bio_error); 1377 1378 if (bp->bio_error != 0) { 1379 G_MIRROR_LOGREQ(0, bp, 1380 "Synchronization request failed (error=%d).", 1381 bp->bio_error); 1382 g_mirror_sync_request_free(disk, bp); 1383 return; 1384 } 1385 G_MIRROR_LOGREQ(3, bp, 1386 "Synchronization request half-finished."); 1387 bp->bio_cmd = BIO_WRITE; 1388 bp->bio_cflags = 0; 1389 cp = disk->d_consumer; 1390 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1391 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1392 cp->acr, cp->acw, cp->ace)); 1393 cp->index++; 1394 g_io_request(bp, cp); 1395 return; 1396 } 1397 case BIO_WRITE: 1398 { 1399 off_t offset; 1400 void *data; 1401 int i, idx; 1402 1403 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_write, 1404 bp->bio_error); 1405 1406 if (bp->bio_error != 0) { 1407 G_MIRROR_LOGREQ(0, bp, 1408 "Synchronization request failed (error=%d).", 1409 bp->bio_error); 1410 g_mirror_sync_request_free(disk, bp); 1411 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 1412 g_mirror_event_send(disk, 1413 G_MIRROR_DISK_STATE_DISCONNECTED, 1414 G_MIRROR_EVENT_DONTWAIT); 1415 return; 1416 } 1417 G_MIRROR_LOGREQ(3, bp, "Synchronization request finished."); 1418 sync = &disk->d_sync; 1419 if (sync->ds_offset >= sc->sc_mediasize || 1420 sync->ds_consumer == NULL || 1421 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1422 /* Don't send more synchronization requests. */ 1423 sync->ds_inflight--; 1424 g_mirror_sync_request_free(disk, bp); 1425 if (sync->ds_inflight > 0) 1426 return; 1427 if (sync->ds_consumer == NULL || 1428 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1429 return; 1430 } 1431 /* Disk up-to-date, activate it. */ 1432 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE, 1433 G_MIRROR_EVENT_DONTWAIT); 1434 return; 1435 } 1436 1437 /* Send next synchronization request. */ 1438 data = bp->bio_data; 1439 idx = (int)(uintptr_t)bp->bio_caller1; 1440 g_reset_bio(bp); 1441 bp->bio_cmd = BIO_READ; 1442 bp->bio_offset = sync->ds_offset; 1443 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1444 sync->ds_offset += bp->bio_length; 1445 bp->bio_done = g_mirror_sync_done; 1446 bp->bio_data = data; 1447 bp->bio_from = sync->ds_consumer; 1448 bp->bio_to = sc->sc_provider; 1449 bp->bio_caller1 = (void *)(uintptr_t)idx; 1450 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1451 sync->ds_consumer->index++; 1452 /* 1453 * Delay the request if it is colliding with a regular request. 1454 */ 1455 if (g_mirror_regular_collision(sc, bp)) 1456 g_mirror_sync_delay(sc, bp); 1457 else 1458 g_io_request(bp, sync->ds_consumer); 1459 1460 /* Release delayed requests if possible. */ 1461 g_mirror_regular_release(sc); 1462 1463 /* Find the smallest offset */ 1464 offset = sc->sc_mediasize; 1465 for (i = 0; i < g_mirror_syncreqs; i++) { 1466 bp = sync->ds_bios[i]; 1467 if (bp != NULL && bp->bio_offset < offset) 1468 offset = bp->bio_offset; 1469 } 1470 if (g_mirror_sync_period > 0 && 1471 time_uptime - sync->ds_update_ts > g_mirror_sync_period) { 1472 sync->ds_offset_done = offset; 1473 g_mirror_update_metadata(disk); 1474 sync->ds_update_ts = time_uptime; 1475 } 1476 return; 1477 } 1478 default: 1479 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1480 bp->bio_cmd, sc->sc_name)); 1481 break; 1482 } 1483 } 1484 1485 static void 1486 g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp) 1487 { 1488 struct g_mirror_disk *disk; 1489 struct g_consumer *cp; 1490 struct bio *cbp; 1491 1492 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1493 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE) 1494 break; 1495 } 1496 if (disk == NULL) { 1497 if (bp->bio_error == 0) 1498 bp->bio_error = ENXIO; 1499 g_io_deliver(bp, bp->bio_error); 1500 return; 1501 } 1502 cbp = g_clone_bio(bp); 1503 if (cbp == NULL) { 1504 if (bp->bio_error == 0) 1505 bp->bio_error = ENOMEM; 1506 g_io_deliver(bp, bp->bio_error); 1507 return; 1508 } 1509 /* 1510 * Fill in the component buf structure. 1511 */ 1512 cp = disk->d_consumer; 1513 cbp->bio_done = g_mirror_done; 1514 cbp->bio_to = cp->provider; 1515 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1516 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1517 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1518 cp->acw, cp->ace)); 1519 cp->index++; 1520 g_io_request(cbp, cp); 1521 } 1522 1523 static void 1524 g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp) 1525 { 1526 struct g_mirror_disk *disk; 1527 struct g_consumer *cp; 1528 struct bio *cbp; 1529 1530 disk = g_mirror_get_disk(sc); 1531 if (disk == NULL) { 1532 if (bp->bio_error == 0) 1533 bp->bio_error = ENXIO; 1534 g_io_deliver(bp, bp->bio_error); 1535 return; 1536 } 1537 cbp = g_clone_bio(bp); 1538 if (cbp == NULL) { 1539 if (bp->bio_error == 0) 1540 bp->bio_error = ENOMEM; 1541 g_io_deliver(bp, bp->bio_error); 1542 return; 1543 } 1544 /* 1545 * Fill in the component buf structure. 1546 */ 1547 cp = disk->d_consumer; 1548 cbp->bio_done = g_mirror_done; 1549 cbp->bio_to = cp->provider; 1550 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1551 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1552 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1553 cp->acw, cp->ace)); 1554 cp->index++; 1555 g_io_request(cbp, cp); 1556 } 1557 1558 #define TRACK_SIZE (1 * 1024 * 1024) 1559 #define LOAD_SCALE 256 1560 #define ABS(x) (((x) >= 0) ? (x) : (-(x))) 1561 1562 static void 1563 g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp) 1564 { 1565 struct g_mirror_disk *disk, *dp; 1566 struct g_consumer *cp; 1567 struct bio *cbp; 1568 int prio, best; 1569 1570 /* Find a disk with the smallest load. */ 1571 disk = NULL; 1572 best = INT_MAX; 1573 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1574 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1575 continue; 1576 prio = dp->load; 1577 /* If disk head is precisely in position - highly prefer it. */ 1578 if (dp->d_last_offset == bp->bio_offset) 1579 prio -= 2 * LOAD_SCALE; 1580 else 1581 /* If disk head is close to position - prefer it. */ 1582 if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE) 1583 prio -= 1 * LOAD_SCALE; 1584 if (prio <= best) { 1585 disk = dp; 1586 best = prio; 1587 } 1588 } 1589 KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name)); 1590 cbp = g_clone_bio(bp); 1591 if (cbp == NULL) { 1592 if (bp->bio_error == 0) 1593 bp->bio_error = ENOMEM; 1594 g_io_deliver(bp, bp->bio_error); 1595 return; 1596 } 1597 /* 1598 * Fill in the component buf structure. 1599 */ 1600 cp = disk->d_consumer; 1601 cbp->bio_done = g_mirror_done; 1602 cbp->bio_to = cp->provider; 1603 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1604 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1605 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1606 cp->acw, cp->ace)); 1607 cp->index++; 1608 /* Remember last head position */ 1609 disk->d_last_offset = bp->bio_offset + bp->bio_length; 1610 /* Update loads. */ 1611 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1612 dp->load = (dp->d_consumer->index * LOAD_SCALE + 1613 dp->load * 7) / 8; 1614 } 1615 g_io_request(cbp, cp); 1616 } 1617 1618 static void 1619 g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp) 1620 { 1621 struct bio_queue queue; 1622 struct g_mirror_disk *disk; 1623 struct g_consumer *cp; 1624 struct bio *cbp; 1625 off_t left, mod, offset, slice; 1626 u_char *data; 1627 u_int ndisks; 1628 1629 if (bp->bio_length <= sc->sc_slice) { 1630 g_mirror_request_round_robin(sc, bp); 1631 return; 1632 } 1633 ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE); 1634 slice = bp->bio_length / ndisks; 1635 mod = slice % sc->sc_provider->sectorsize; 1636 if (mod != 0) 1637 slice += sc->sc_provider->sectorsize - mod; 1638 /* 1639 * Allocate all bios before sending any request, so we can 1640 * return ENOMEM in nice and clean way. 1641 */ 1642 left = bp->bio_length; 1643 offset = bp->bio_offset; 1644 data = bp->bio_data; 1645 TAILQ_INIT(&queue); 1646 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1647 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1648 continue; 1649 cbp = g_clone_bio(bp); 1650 if (cbp == NULL) { 1651 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1652 TAILQ_REMOVE(&queue, cbp, bio_queue); 1653 g_destroy_bio(cbp); 1654 } 1655 if (bp->bio_error == 0) 1656 bp->bio_error = ENOMEM; 1657 g_io_deliver(bp, bp->bio_error); 1658 return; 1659 } 1660 TAILQ_INSERT_TAIL(&queue, cbp, bio_queue); 1661 cbp->bio_done = g_mirror_done; 1662 cbp->bio_caller1 = disk; 1663 cbp->bio_to = disk->d_consumer->provider; 1664 cbp->bio_offset = offset; 1665 cbp->bio_data = data; 1666 cbp->bio_length = MIN(left, slice); 1667 left -= cbp->bio_length; 1668 if (left == 0) 1669 break; 1670 offset += cbp->bio_length; 1671 data += cbp->bio_length; 1672 } 1673 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1674 TAILQ_REMOVE(&queue, cbp, bio_queue); 1675 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1676 disk = cbp->bio_caller1; 1677 cbp->bio_caller1 = NULL; 1678 cp = disk->d_consumer; 1679 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1680 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1681 cp->acr, cp->acw, cp->ace)); 1682 disk->d_consumer->index++; 1683 g_io_request(cbp, disk->d_consumer); 1684 } 1685 } 1686 1687 static void 1688 g_mirror_register_request(struct bio *bp) 1689 { 1690 struct g_mirror_softc *sc; 1691 1692 sc = bp->bio_to->private; 1693 switch (bp->bio_cmd) { 1694 case BIO_READ: 1695 switch (sc->sc_balance) { 1696 case G_MIRROR_BALANCE_LOAD: 1697 g_mirror_request_load(sc, bp); 1698 break; 1699 case G_MIRROR_BALANCE_PREFER: 1700 g_mirror_request_prefer(sc, bp); 1701 break; 1702 case G_MIRROR_BALANCE_ROUND_ROBIN: 1703 g_mirror_request_round_robin(sc, bp); 1704 break; 1705 case G_MIRROR_BALANCE_SPLIT: 1706 g_mirror_request_split(sc, bp); 1707 break; 1708 } 1709 return; 1710 case BIO_WRITE: 1711 case BIO_DELETE: 1712 { 1713 struct bio_queue queue; 1714 struct g_mirror_disk *disk; 1715 struct g_mirror_disk_sync *sync; 1716 struct g_consumer *cp; 1717 struct bio *cbp; 1718 1719 /* 1720 * Delay the request if it is colliding with a synchronization 1721 * request. 1722 */ 1723 if (g_mirror_sync_collision(sc, bp)) { 1724 g_mirror_regular_delay(sc, bp); 1725 return; 1726 } 1727 1728 if (sc->sc_idle) 1729 g_mirror_unidle(sc); 1730 else 1731 sc->sc_last_write = time_uptime; 1732 1733 /* 1734 * Bump syncid on first write. 1735 */ 1736 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) { 1737 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 1738 g_mirror_bump_syncid(sc); 1739 } 1740 1741 /* 1742 * Allocate all bios before sending any request, so we can 1743 * return ENOMEM in nice and clean way. 1744 */ 1745 TAILQ_INIT(&queue); 1746 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1747 sync = &disk->d_sync; 1748 switch (disk->d_state) { 1749 case G_MIRROR_DISK_STATE_ACTIVE: 1750 break; 1751 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 1752 if (bp->bio_offset >= sync->ds_offset) 1753 continue; 1754 break; 1755 default: 1756 continue; 1757 } 1758 if (bp->bio_cmd == BIO_DELETE && 1759 (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) == 0) 1760 continue; 1761 cbp = g_clone_bio(bp); 1762 if (cbp == NULL) { 1763 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1764 TAILQ_REMOVE(&queue, cbp, bio_queue); 1765 g_destroy_bio(cbp); 1766 } 1767 if (bp->bio_error == 0) 1768 bp->bio_error = ENOMEM; 1769 g_io_deliver(bp, bp->bio_error); 1770 return; 1771 } 1772 TAILQ_INSERT_TAIL(&queue, cbp, bio_queue); 1773 cbp->bio_done = g_mirror_done; 1774 cp = disk->d_consumer; 1775 cbp->bio_caller1 = cp; 1776 cbp->bio_to = cp->provider; 1777 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1778 ("Consumer %s not opened (r%dw%de%d).", 1779 cp->provider->name, cp->acr, cp->acw, cp->ace)); 1780 } 1781 if (TAILQ_EMPTY(&queue)) { 1782 g_io_deliver(bp, EOPNOTSUPP); 1783 return; 1784 } 1785 while ((cbp = TAILQ_FIRST(&queue)) != NULL) { 1786 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1787 TAILQ_REMOVE(&queue, cbp, bio_queue); 1788 cp = cbp->bio_caller1; 1789 cbp->bio_caller1 = NULL; 1790 cp->index++; 1791 sc->sc_writes++; 1792 g_io_request(cbp, cp); 1793 } 1794 /* 1795 * Put request onto inflight queue, so we can check if new 1796 * synchronization requests don't collide with it. 1797 */ 1798 TAILQ_INSERT_TAIL(&sc->sc_inflight, bp, bio_queue); 1799 return; 1800 } 1801 default: 1802 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1803 bp->bio_cmd, sc->sc_name)); 1804 break; 1805 } 1806 } 1807 1808 static int 1809 g_mirror_can_destroy(struct g_mirror_softc *sc) 1810 { 1811 struct g_geom *gp; 1812 struct g_consumer *cp; 1813 1814 g_topology_assert(); 1815 gp = sc->sc_geom; 1816 if (gp->softc == NULL) 1817 return (1); 1818 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_TASTING) != 0) 1819 return (0); 1820 LIST_FOREACH(cp, &gp->consumer, consumer) { 1821 if (g_mirror_is_busy(sc, cp)) 1822 return (0); 1823 } 1824 gp = sc->sc_sync.ds_geom; 1825 LIST_FOREACH(cp, &gp->consumer, consumer) { 1826 if (g_mirror_is_busy(sc, cp)) 1827 return (0); 1828 } 1829 G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 1830 sc->sc_name); 1831 return (1); 1832 } 1833 1834 static int 1835 g_mirror_try_destroy(struct g_mirror_softc *sc) 1836 { 1837 1838 if (sc->sc_rootmount != NULL) { 1839 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 1840 sc->sc_rootmount); 1841 root_mount_rel(sc->sc_rootmount); 1842 sc->sc_rootmount = NULL; 1843 } 1844 g_topology_lock(); 1845 if (!g_mirror_can_destroy(sc)) { 1846 g_topology_unlock(); 1847 return (0); 1848 } 1849 sc->sc_geom->softc = NULL; 1850 sc->sc_sync.ds_geom->softc = NULL; 1851 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DRAIN) != 0) { 1852 g_topology_unlock(); 1853 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1854 &sc->sc_worker); 1855 /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 1856 sx_xunlock(&sc->sc_lock); 1857 wakeup(&sc->sc_worker); 1858 sc->sc_worker = NULL; 1859 } else { 1860 g_topology_unlock(); 1861 g_mirror_destroy_device(sc); 1862 } 1863 return (1); 1864 } 1865 1866 /* 1867 * Worker thread. 1868 */ 1869 static void 1870 g_mirror_worker(void *arg) 1871 { 1872 struct g_mirror_softc *sc; 1873 struct g_mirror_event *ep; 1874 struct bio *bp; 1875 int timeout; 1876 1877 sc = arg; 1878 thread_lock(curthread); 1879 sched_prio(curthread, PRIBIO); 1880 thread_unlock(curthread); 1881 1882 sx_xlock(&sc->sc_lock); 1883 for (;;) { 1884 G_MIRROR_DEBUG(5, "%s: Let's see...", __func__); 1885 /* 1886 * First take a look at events. 1887 * This is important to handle events before any I/O requests. 1888 */ 1889 ep = g_mirror_event_first(sc); 1890 if (ep != NULL) { 1891 g_mirror_event_remove(sc, ep); 1892 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) { 1893 /* Update only device status. */ 1894 G_MIRROR_DEBUG(3, 1895 "Running event for device %s.", 1896 sc->sc_name); 1897 ep->e_error = 0; 1898 g_mirror_update_device(sc, true); 1899 } else { 1900 /* Update disk status. */ 1901 G_MIRROR_DEBUG(3, "Running event for disk %s.", 1902 g_mirror_get_diskname(ep->e_disk)); 1903 ep->e_error = g_mirror_update_disk(ep->e_disk, 1904 ep->e_state); 1905 if (ep->e_error == 0) 1906 g_mirror_update_device(sc, false); 1907 } 1908 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) { 1909 KASSERT(ep->e_error == 0, 1910 ("Error cannot be handled.")); 1911 g_mirror_event_free(ep); 1912 } else { 1913 ep->e_flags |= G_MIRROR_EVENT_DONE; 1914 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1915 ep); 1916 mtx_lock(&sc->sc_events_mtx); 1917 wakeup(ep); 1918 mtx_unlock(&sc->sc_events_mtx); 1919 } 1920 if ((sc->sc_flags & 1921 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1922 if (g_mirror_try_destroy(sc)) { 1923 curthread->td_pflags &= ~TDP_GEOM; 1924 G_MIRROR_DEBUG(1, "Thread exiting."); 1925 kproc_exit(0); 1926 } 1927 } 1928 G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__); 1929 continue; 1930 } 1931 /* 1932 * Check if we can mark array as CLEAN and if we can't take 1933 * how much seconds should we wait. 1934 */ 1935 timeout = g_mirror_idle(sc, -1); 1936 /* 1937 * Now I/O requests. 1938 */ 1939 /* Get first request from the queue. */ 1940 mtx_lock(&sc->sc_queue_mtx); 1941 bp = TAILQ_FIRST(&sc->sc_queue); 1942 if (bp != NULL) 1943 TAILQ_REMOVE(&sc->sc_queue, bp, bio_queue); 1944 else { 1945 if ((sc->sc_flags & 1946 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1947 mtx_unlock(&sc->sc_queue_mtx); 1948 if (g_mirror_try_destroy(sc)) { 1949 curthread->td_pflags &= ~TDP_GEOM; 1950 G_MIRROR_DEBUG(1, "Thread exiting."); 1951 kproc_exit(0); 1952 } 1953 mtx_lock(&sc->sc_queue_mtx); 1954 if (!TAILQ_EMPTY(&sc->sc_queue)) { 1955 mtx_unlock(&sc->sc_queue_mtx); 1956 continue; 1957 } 1958 } 1959 if (g_mirror_event_first(sc) != NULL) 1960 continue; 1961 sx_xunlock(&sc->sc_lock); 1962 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1", 1963 timeout * hz); 1964 sx_xlock(&sc->sc_lock); 1965 G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__); 1966 continue; 1967 } 1968 mtx_unlock(&sc->sc_queue_mtx); 1969 1970 if (bp->bio_from->geom == sc->sc_sync.ds_geom && 1971 (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) { 1972 g_mirror_sync_request(bp); /* READ */ 1973 } else if (bp->bio_to != sc->sc_provider) { 1974 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) 1975 g_mirror_regular_request(bp); 1976 else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 1977 g_mirror_sync_request(bp); /* WRITE */ 1978 else { 1979 KASSERT(0, 1980 ("Invalid request cflags=0x%hx to=%s.", 1981 bp->bio_cflags, bp->bio_to->name)); 1982 } 1983 } else { 1984 g_mirror_register_request(bp); 1985 } 1986 G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__); 1987 } 1988 } 1989 1990 static void 1991 g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 1992 { 1993 1994 sx_assert(&sc->sc_lock, SX_LOCKED); 1995 1996 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 1997 return; 1998 if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) { 1999 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.", 2000 g_mirror_get_diskname(disk), sc->sc_name); 2001 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 2002 } else if (sc->sc_idle && 2003 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 2004 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.", 2005 g_mirror_get_diskname(disk), sc->sc_name); 2006 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2007 } 2008 } 2009 2010 static void 2011 g_mirror_sync_start(struct g_mirror_disk *disk) 2012 { 2013 struct g_mirror_softc *sc; 2014 struct g_consumer *cp; 2015 struct bio *bp; 2016 int error, i; 2017 2018 g_topology_assert_not(); 2019 sc = disk->d_softc; 2020 sx_assert(&sc->sc_lock, SX_LOCKED); 2021 2022 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2023 ("Disk %s is not marked for synchronization.", 2024 g_mirror_get_diskname(disk))); 2025 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2026 ("Device not in RUNNING state (%s, %u).", sc->sc_name, 2027 sc->sc_state)); 2028 2029 sx_xunlock(&sc->sc_lock); 2030 g_topology_lock(); 2031 cp = g_new_consumer(sc->sc_sync.ds_geom); 2032 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 2033 error = g_attach(cp, sc->sc_provider); 2034 KASSERT(error == 0, 2035 ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 2036 error = g_access(cp, 1, 0, 0); 2037 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 2038 g_topology_unlock(); 2039 sx_xlock(&sc->sc_lock); 2040 2041 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 2042 g_mirror_get_diskname(disk)); 2043 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0) 2044 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 2045 KASSERT(disk->d_sync.ds_consumer == NULL, 2046 ("Sync consumer already exists (device=%s, disk=%s).", 2047 sc->sc_name, g_mirror_get_diskname(disk))); 2048 2049 disk->d_sync.ds_consumer = cp; 2050 disk->d_sync.ds_consumer->private = disk; 2051 disk->d_sync.ds_consumer->index = 0; 2052 2053 /* 2054 * Allocate memory for synchronization bios and initialize them. 2055 */ 2056 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs, 2057 M_MIRROR, M_WAITOK); 2058 for (i = 0; i < g_mirror_syncreqs; i++) { 2059 bp = g_alloc_bio(); 2060 disk->d_sync.ds_bios[i] = bp; 2061 bp->bio_parent = NULL; 2062 bp->bio_cmd = BIO_READ; 2063 bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK); 2064 bp->bio_cflags = 0; 2065 bp->bio_offset = disk->d_sync.ds_offset; 2066 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 2067 disk->d_sync.ds_offset += bp->bio_length; 2068 bp->bio_done = g_mirror_sync_done; 2069 bp->bio_from = disk->d_sync.ds_consumer; 2070 bp->bio_to = sc->sc_provider; 2071 bp->bio_caller1 = (void *)(uintptr_t)i; 2072 } 2073 2074 /* Increase the number of disks in SYNCHRONIZING state. */ 2075 sc->sc_sync.ds_ndisks++; 2076 /* Set the number of in-flight synchronization requests. */ 2077 disk->d_sync.ds_inflight = g_mirror_syncreqs; 2078 2079 /* 2080 * Fire off first synchronization requests. 2081 */ 2082 for (i = 0; i < g_mirror_syncreqs; i++) { 2083 bp = disk->d_sync.ds_bios[i]; 2084 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 2085 disk->d_sync.ds_consumer->index++; 2086 /* 2087 * Delay the request if it is colliding with a regular request. 2088 */ 2089 if (g_mirror_regular_collision(sc, bp)) 2090 g_mirror_sync_delay(sc, bp); 2091 else 2092 g_io_request(bp, disk->d_sync.ds_consumer); 2093 } 2094 } 2095 2096 /* 2097 * Stop synchronization process. 2098 * type: 0 - synchronization finished 2099 * 1 - synchronization stopped 2100 */ 2101 static void 2102 g_mirror_sync_stop(struct g_mirror_disk *disk, int type) 2103 { 2104 struct g_mirror_softc *sc; 2105 struct g_consumer *cp; 2106 2107 g_topology_assert_not(); 2108 sc = disk->d_softc; 2109 sx_assert(&sc->sc_lock, SX_LOCKED); 2110 2111 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2112 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2113 g_mirror_disk_state2str(disk->d_state))); 2114 if (disk->d_sync.ds_consumer == NULL) 2115 return; 2116 2117 if (type == 0) { 2118 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.", 2119 sc->sc_name, g_mirror_get_diskname(disk)); 2120 } else /* if (type == 1) */ { 2121 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 2122 sc->sc_name, g_mirror_get_diskname(disk)); 2123 } 2124 g_mirror_regular_release(sc); 2125 free(disk->d_sync.ds_bios, M_MIRROR); 2126 disk->d_sync.ds_bios = NULL; 2127 cp = disk->d_sync.ds_consumer; 2128 disk->d_sync.ds_consumer = NULL; 2129 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2130 sc->sc_sync.ds_ndisks--; 2131 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 2132 g_topology_lock(); 2133 g_mirror_kill_consumer(sc, cp); 2134 g_topology_unlock(); 2135 sx_xlock(&sc->sc_lock); 2136 } 2137 2138 static void 2139 g_mirror_launch_provider(struct g_mirror_softc *sc) 2140 { 2141 struct g_mirror_disk *disk; 2142 struct g_provider *pp, *dp; 2143 2144 sx_assert(&sc->sc_lock, SX_LOCKED); 2145 2146 g_topology_lock(); 2147 pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name); 2148 pp->flags |= G_PF_DIRECT_RECEIVE; 2149 pp->mediasize = sc->sc_mediasize; 2150 pp->sectorsize = sc->sc_sectorsize; 2151 pp->stripesize = 0; 2152 pp->stripeoffset = 0; 2153 2154 /* Splitting of unmapped BIO's could work but isn't implemented now */ 2155 if (sc->sc_balance != G_MIRROR_BALANCE_SPLIT) 2156 pp->flags |= G_PF_ACCEPT_UNMAPPED; 2157 2158 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2159 if (disk->d_consumer && disk->d_consumer->provider) { 2160 dp = disk->d_consumer->provider; 2161 if (dp->stripesize > pp->stripesize) { 2162 pp->stripesize = dp->stripesize; 2163 pp->stripeoffset = dp->stripeoffset; 2164 } 2165 /* A provider underneath us doesn't support unmapped */ 2166 if ((dp->flags & G_PF_ACCEPT_UNMAPPED) == 0) { 2167 G_MIRROR_DEBUG(0, "Cancelling unmapped " 2168 "because of %s.", dp->name); 2169 pp->flags &= ~G_PF_ACCEPT_UNMAPPED; 2170 } 2171 } 2172 } 2173 pp->private = sc; 2174 sc->sc_refcnt++; 2175 sc->sc_provider = pp; 2176 g_error_provider(pp, 0); 2177 g_topology_unlock(); 2178 G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 2179 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks); 2180 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2181 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2182 g_mirror_sync_start(disk); 2183 } 2184 } 2185 2186 static void 2187 g_mirror_destroy_provider(struct g_mirror_softc *sc) 2188 { 2189 struct g_mirror_disk *disk; 2190 struct bio *bp; 2191 2192 g_topology_assert_not(); 2193 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 2194 sc->sc_name)); 2195 2196 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2197 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2198 g_mirror_sync_stop(disk, 1); 2199 } 2200 2201 g_topology_lock(); 2202 g_error_provider(sc->sc_provider, ENXIO); 2203 mtx_lock(&sc->sc_queue_mtx); 2204 while ((bp = TAILQ_FIRST(&sc->sc_queue)) != NULL) { 2205 TAILQ_REMOVE(&sc->sc_queue, bp, bio_queue); 2206 /* 2207 * Abort any pending I/O that wasn't generated by us. 2208 * Synchronization requests and requests destined for individual 2209 * mirror components can be destroyed immediately. 2210 */ 2211 if (bp->bio_to == sc->sc_provider && 2212 bp->bio_from->geom != sc->sc_sync.ds_geom) { 2213 g_io_deliver(bp, ENXIO); 2214 } else { 2215 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 2216 free(bp->bio_data, M_MIRROR); 2217 g_destroy_bio(bp); 2218 } 2219 } 2220 mtx_unlock(&sc->sc_queue_mtx); 2221 g_wither_provider(sc->sc_provider, ENXIO); 2222 sc->sc_provider = NULL; 2223 G_MIRROR_DEBUG(0, "Device %s: provider destroyed.", sc->sc_name); 2224 g_topology_unlock(); 2225 } 2226 2227 static void 2228 g_mirror_go(void *arg) 2229 { 2230 struct g_mirror_softc *sc; 2231 2232 sc = arg; 2233 G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 2234 g_mirror_event_send(sc, 0, 2235 G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE); 2236 } 2237 2238 static u_int 2239 g_mirror_determine_state(struct g_mirror_disk *disk) 2240 { 2241 struct g_mirror_softc *sc; 2242 u_int state; 2243 2244 sc = disk->d_softc; 2245 if (sc->sc_syncid == disk->d_sync.ds_syncid) { 2246 if ((disk->d_flags & 2247 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0 && 2248 (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 || 2249 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0)) { 2250 /* Disk does not need synchronization. */ 2251 state = G_MIRROR_DISK_STATE_ACTIVE; 2252 } else { 2253 if ((sc->sc_flags & 2254 G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2255 (disk->d_flags & 2256 G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2257 /* 2258 * We can start synchronization from 2259 * the stored offset. 2260 */ 2261 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2262 } else { 2263 state = G_MIRROR_DISK_STATE_STALE; 2264 } 2265 } 2266 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 2267 /* 2268 * Reset all synchronization data for this disk, 2269 * because if it even was synchronized, it was 2270 * synchronized to disks with different syncid. 2271 */ 2272 disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2273 disk->d_sync.ds_offset = 0; 2274 disk->d_sync.ds_offset_done = 0; 2275 disk->d_sync.ds_syncid = sc->sc_syncid; 2276 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2277 (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2278 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2279 } else { 2280 state = G_MIRROR_DISK_STATE_STALE; 2281 } 2282 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 2283 /* 2284 * Not good, NOT GOOD! 2285 * It means that mirror was started on stale disks 2286 * and more fresh disk just arrive. 2287 * If there were writes, mirror is broken, sorry. 2288 * I think the best choice here is don't touch 2289 * this disk and inform the user loudly. 2290 */ 2291 G_MIRROR_DEBUG(0, "Device %s was started before the freshest " 2292 "disk (%s) arrives!! It will not be connected to the " 2293 "running device.", sc->sc_name, 2294 g_mirror_get_diskname(disk)); 2295 g_mirror_destroy_disk(disk); 2296 state = G_MIRROR_DISK_STATE_NONE; 2297 /* Return immediately, because disk was destroyed. */ 2298 return (state); 2299 } 2300 G_MIRROR_DEBUG(3, "State for %s disk: %s.", 2301 g_mirror_get_diskname(disk), g_mirror_disk_state2str(state)); 2302 return (state); 2303 } 2304 2305 /* 2306 * Update device state. 2307 */ 2308 static void 2309 g_mirror_update_device(struct g_mirror_softc *sc, bool force) 2310 { 2311 struct g_mirror_disk *disk; 2312 u_int state; 2313 2314 sx_assert(&sc->sc_lock, SX_XLOCKED); 2315 2316 switch (sc->sc_state) { 2317 case G_MIRROR_DEVICE_STATE_STARTING: 2318 { 2319 struct g_mirror_disk *pdisk, *tdisk; 2320 u_int dirty, ndisks, genid, syncid; 2321 bool broken; 2322 2323 KASSERT(sc->sc_provider == NULL, 2324 ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 2325 /* 2326 * Are we ready? We are, if all disks are connected or 2327 * if we have any disks and 'force' is true. 2328 */ 2329 ndisks = g_mirror_ndisks(sc, -1); 2330 if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) { 2331 ; 2332 } else if (ndisks == 0) { 2333 /* 2334 * Disks went down in starting phase, so destroy 2335 * device. 2336 */ 2337 callout_drain(&sc->sc_callout); 2338 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2339 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2340 sc->sc_rootmount); 2341 root_mount_rel(sc->sc_rootmount); 2342 sc->sc_rootmount = NULL; 2343 return; 2344 } else { 2345 return; 2346 } 2347 2348 /* 2349 * Activate all disks with the biggest syncid. 2350 */ 2351 if (force) { 2352 /* 2353 * If 'force' is true, we have been called due to 2354 * timeout, so don't bother canceling timeout. 2355 */ 2356 ndisks = 0; 2357 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2358 if ((disk->d_flags & 2359 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2360 ndisks++; 2361 } 2362 } 2363 if (ndisks == 0) { 2364 /* No valid disks found, destroy device. */ 2365 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2366 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2367 __LINE__, sc->sc_rootmount); 2368 root_mount_rel(sc->sc_rootmount); 2369 sc->sc_rootmount = NULL; 2370 return; 2371 } 2372 } else { 2373 /* Cancel timeout. */ 2374 callout_drain(&sc->sc_callout); 2375 } 2376 2377 /* 2378 * Find the biggest genid. 2379 */ 2380 genid = 0; 2381 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2382 if (disk->d_genid > genid) 2383 genid = disk->d_genid; 2384 } 2385 sc->sc_genid = genid; 2386 /* 2387 * Remove all disks without the biggest genid. 2388 */ 2389 broken = false; 2390 LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) { 2391 if (disk->d_genid < genid) { 2392 G_MIRROR_DEBUG(0, 2393 "Component %s (device %s) broken, skipping.", 2394 g_mirror_get_diskname(disk), sc->sc_name); 2395 g_mirror_destroy_disk(disk); 2396 /* 2397 * Bump the syncid in case we discover a healthy 2398 * replacement disk after starting the mirror. 2399 */ 2400 broken = true; 2401 } 2402 } 2403 2404 /* 2405 * Find the biggest syncid. 2406 */ 2407 syncid = 0; 2408 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2409 if (disk->d_sync.ds_syncid > syncid) 2410 syncid = disk->d_sync.ds_syncid; 2411 } 2412 2413 /* 2414 * Here we need to look for dirty disks and if all disks 2415 * with the biggest syncid are dirty, we have to choose 2416 * one with the biggest priority and rebuild the rest. 2417 */ 2418 /* 2419 * Find the number of dirty disks with the biggest syncid. 2420 * Find the number of disks with the biggest syncid. 2421 * While here, find a disk with the biggest priority. 2422 */ 2423 dirty = ndisks = 0; 2424 pdisk = NULL; 2425 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2426 if (disk->d_sync.ds_syncid != syncid) 2427 continue; 2428 if ((disk->d_flags & 2429 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2430 continue; 2431 } 2432 ndisks++; 2433 if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 2434 dirty++; 2435 if (pdisk == NULL || 2436 pdisk->d_priority < disk->d_priority) { 2437 pdisk = disk; 2438 } 2439 } 2440 } 2441 if (dirty == 0) { 2442 /* No dirty disks at all, great. */ 2443 } else if (dirty == ndisks) { 2444 /* 2445 * Force synchronization for all dirty disks except one 2446 * with the biggest priority. 2447 */ 2448 KASSERT(pdisk != NULL, ("pdisk == NULL")); 2449 G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a " 2450 "master disk for synchronization.", 2451 g_mirror_get_diskname(pdisk), sc->sc_name); 2452 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2453 if (disk->d_sync.ds_syncid != syncid) 2454 continue; 2455 if ((disk->d_flags & 2456 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2457 continue; 2458 } 2459 KASSERT((disk->d_flags & 2460 G_MIRROR_DISK_FLAG_DIRTY) != 0, 2461 ("Disk %s isn't marked as dirty.", 2462 g_mirror_get_diskname(disk))); 2463 /* Skip the disk with the biggest priority. */ 2464 if (disk == pdisk) 2465 continue; 2466 disk->d_sync.ds_syncid = 0; 2467 } 2468 } else if (dirty < ndisks) { 2469 /* 2470 * Force synchronization for all dirty disks. 2471 * We have some non-dirty disks. 2472 */ 2473 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2474 if (disk->d_sync.ds_syncid != syncid) 2475 continue; 2476 if ((disk->d_flags & 2477 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2478 continue; 2479 } 2480 if ((disk->d_flags & 2481 G_MIRROR_DISK_FLAG_DIRTY) == 0) { 2482 continue; 2483 } 2484 disk->d_sync.ds_syncid = 0; 2485 } 2486 } 2487 2488 /* Reset hint. */ 2489 sc->sc_hint = NULL; 2490 sc->sc_syncid = syncid; 2491 if (force || broken) { 2492 /* Remember to bump syncid on first write. */ 2493 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2494 } 2495 state = G_MIRROR_DEVICE_STATE_RUNNING; 2496 G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.", 2497 sc->sc_name, g_mirror_device_state2str(sc->sc_state), 2498 g_mirror_device_state2str(state)); 2499 sc->sc_state = state; 2500 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2501 state = g_mirror_determine_state(disk); 2502 g_mirror_event_send(disk, state, 2503 G_MIRROR_EVENT_DONTWAIT); 2504 if (state == G_MIRROR_DISK_STATE_STALE) 2505 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2506 } 2507 break; 2508 } 2509 case G_MIRROR_DEVICE_STATE_RUNNING: 2510 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 && 2511 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2512 /* 2513 * No usable disks, so destroy the device. 2514 */ 2515 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2516 break; 2517 } else if (g_mirror_ndisks(sc, 2518 G_MIRROR_DISK_STATE_ACTIVE) > 0 && 2519 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2520 /* 2521 * We have active disks, launch provider if it doesn't 2522 * exist. 2523 */ 2524 if (sc->sc_provider == NULL) 2525 g_mirror_launch_provider(sc); 2526 if (sc->sc_rootmount != NULL) { 2527 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2528 __LINE__, sc->sc_rootmount); 2529 root_mount_rel(sc->sc_rootmount); 2530 sc->sc_rootmount = NULL; 2531 } 2532 } 2533 /* 2534 * Genid should be bumped immediately, so do it here. 2535 */ 2536 if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) { 2537 sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID; 2538 g_mirror_bump_genid(sc); 2539 } 2540 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID_NOW) != 0) { 2541 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID_NOW; 2542 g_mirror_bump_syncid(sc); 2543 } 2544 break; 2545 default: 2546 KASSERT(1 == 0, ("Wrong device state (%s, %s).", 2547 sc->sc_name, g_mirror_device_state2str(sc->sc_state))); 2548 break; 2549 } 2550 } 2551 2552 /* 2553 * Update disk state and device state if needed. 2554 */ 2555 #define DISK_STATE_CHANGED() G_MIRROR_DEBUG(1, \ 2556 "Disk %s state changed from %s to %s (device %s).", \ 2557 g_mirror_get_diskname(disk), \ 2558 g_mirror_disk_state2str(disk->d_state), \ 2559 g_mirror_disk_state2str(state), sc->sc_name) 2560 static int 2561 g_mirror_update_disk(struct g_mirror_disk *disk, u_int state) 2562 { 2563 struct g_mirror_softc *sc; 2564 2565 sc = disk->d_softc; 2566 sx_assert(&sc->sc_lock, SX_XLOCKED); 2567 2568 again: 2569 G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.", 2570 g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), 2571 g_mirror_disk_state2str(state)); 2572 switch (state) { 2573 case G_MIRROR_DISK_STATE_NEW: 2574 /* 2575 * Possible scenarios: 2576 * 1. New disk arrive. 2577 */ 2578 /* Previous state should be NONE. */ 2579 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE, 2580 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2581 g_mirror_disk_state2str(disk->d_state))); 2582 DISK_STATE_CHANGED(); 2583 2584 disk->d_state = state; 2585 if (LIST_EMPTY(&sc->sc_disks)) 2586 LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next); 2587 else { 2588 struct g_mirror_disk *dp; 2589 2590 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 2591 if (disk->d_priority >= dp->d_priority) { 2592 LIST_INSERT_BEFORE(dp, disk, d_next); 2593 dp = NULL; 2594 break; 2595 } 2596 if (LIST_NEXT(dp, d_next) == NULL) 2597 break; 2598 } 2599 if (dp != NULL) 2600 LIST_INSERT_AFTER(dp, disk, d_next); 2601 } 2602 G_MIRROR_DEBUG(1, "Device %s: provider %s detected.", 2603 sc->sc_name, g_mirror_get_diskname(disk)); 2604 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 2605 break; 2606 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2607 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2608 g_mirror_device_state2str(sc->sc_state), 2609 g_mirror_get_diskname(disk), 2610 g_mirror_disk_state2str(disk->d_state))); 2611 state = g_mirror_determine_state(disk); 2612 if (state != G_MIRROR_DISK_STATE_NONE) 2613 goto again; 2614 break; 2615 case G_MIRROR_DISK_STATE_ACTIVE: 2616 /* 2617 * Possible scenarios: 2618 * 1. New disk does not need synchronization. 2619 * 2. Synchronization process finished successfully. 2620 */ 2621 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2622 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2623 g_mirror_device_state2str(sc->sc_state), 2624 g_mirror_get_diskname(disk), 2625 g_mirror_disk_state2str(disk->d_state))); 2626 /* Previous state should be NEW or SYNCHRONIZING. */ 2627 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW || 2628 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2629 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2630 g_mirror_disk_state2str(disk->d_state))); 2631 DISK_STATE_CHANGED(); 2632 2633 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2634 disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2635 disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC; 2636 g_mirror_sync_stop(disk, 0); 2637 } 2638 disk->d_state = state; 2639 disk->d_sync.ds_offset = 0; 2640 disk->d_sync.ds_offset_done = 0; 2641 g_mirror_update_idle(sc, disk); 2642 g_mirror_update_metadata(disk); 2643 G_MIRROR_DEBUG(1, "Device %s: provider %s activated.", 2644 sc->sc_name, g_mirror_get_diskname(disk)); 2645 break; 2646 case G_MIRROR_DISK_STATE_STALE: 2647 /* 2648 * Possible scenarios: 2649 * 1. Stale disk was connected. 2650 */ 2651 /* Previous state should be NEW. */ 2652 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2653 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2654 g_mirror_disk_state2str(disk->d_state))); 2655 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2656 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2657 g_mirror_device_state2str(sc->sc_state), 2658 g_mirror_get_diskname(disk), 2659 g_mirror_disk_state2str(disk->d_state))); 2660 /* 2661 * STALE state is only possible if device is marked 2662 * NOAUTOSYNC. 2663 */ 2664 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0, 2665 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2666 g_mirror_device_state2str(sc->sc_state), 2667 g_mirror_get_diskname(disk), 2668 g_mirror_disk_state2str(disk->d_state))); 2669 DISK_STATE_CHANGED(); 2670 2671 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2672 disk->d_state = state; 2673 g_mirror_update_metadata(disk); 2674 G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.", 2675 sc->sc_name, g_mirror_get_diskname(disk)); 2676 break; 2677 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 2678 /* 2679 * Possible scenarios: 2680 * 1. Disk which needs synchronization was connected. 2681 */ 2682 /* Previous state should be NEW. */ 2683 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2684 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2685 g_mirror_disk_state2str(disk->d_state))); 2686 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2687 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2688 g_mirror_device_state2str(sc->sc_state), 2689 g_mirror_get_diskname(disk), 2690 g_mirror_disk_state2str(disk->d_state))); 2691 DISK_STATE_CHANGED(); 2692 2693 if (disk->d_state == G_MIRROR_DISK_STATE_NEW) 2694 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2695 disk->d_state = state; 2696 if (sc->sc_provider != NULL) { 2697 g_mirror_sync_start(disk); 2698 g_mirror_update_metadata(disk); 2699 } 2700 break; 2701 case G_MIRROR_DISK_STATE_DISCONNECTED: 2702 /* 2703 * Possible scenarios: 2704 * 1. Device wasn't running yet, but disk disappear. 2705 * 2. Disk was active and disapppear. 2706 * 3. Disk disappear during synchronization process. 2707 */ 2708 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) { 2709 /* 2710 * Previous state should be ACTIVE, STALE or 2711 * SYNCHRONIZING. 2712 */ 2713 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 2714 disk->d_state == G_MIRROR_DISK_STATE_STALE || 2715 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2716 ("Wrong disk state (%s, %s).", 2717 g_mirror_get_diskname(disk), 2718 g_mirror_disk_state2str(disk->d_state))); 2719 } else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) { 2720 /* Previous state should be NEW. */ 2721 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2722 ("Wrong disk state (%s, %s).", 2723 g_mirror_get_diskname(disk), 2724 g_mirror_disk_state2str(disk->d_state))); 2725 /* 2726 * Reset bumping syncid if disk disappeared in STARTING 2727 * state. 2728 */ 2729 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) 2730 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 2731 #ifdef INVARIANTS 2732 } else { 2733 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 2734 sc->sc_name, 2735 g_mirror_device_state2str(sc->sc_state), 2736 g_mirror_get_diskname(disk), 2737 g_mirror_disk_state2str(disk->d_state))); 2738 #endif 2739 } 2740 DISK_STATE_CHANGED(); 2741 G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.", 2742 sc->sc_name, g_mirror_get_diskname(disk)); 2743 2744 g_mirror_destroy_disk(disk); 2745 break; 2746 case G_MIRROR_DISK_STATE_DESTROY: 2747 { 2748 int error; 2749 2750 error = g_mirror_clear_metadata(disk); 2751 if (error != 0) { 2752 G_MIRROR_DEBUG(0, 2753 "Device %s: failed to clear metadata on %s: %d.", 2754 sc->sc_name, g_mirror_get_diskname(disk), error); 2755 break; 2756 } 2757 DISK_STATE_CHANGED(); 2758 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", 2759 sc->sc_name, g_mirror_get_diskname(disk)); 2760 2761 g_mirror_destroy_disk(disk); 2762 sc->sc_ndisks--; 2763 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2764 g_mirror_update_metadata(disk); 2765 } 2766 break; 2767 } 2768 default: 2769 KASSERT(1 == 0, ("Unknown state (%u).", state)); 2770 break; 2771 } 2772 return (0); 2773 } 2774 #undef DISK_STATE_CHANGED 2775 2776 int 2777 g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md) 2778 { 2779 struct g_provider *pp; 2780 u_char *buf; 2781 int error; 2782 2783 g_topology_assert(); 2784 2785 error = g_access(cp, 1, 0, 0); 2786 if (error != 0) 2787 return (error); 2788 pp = cp->provider; 2789 g_topology_unlock(); 2790 /* Metadata are stored on last sector. */ 2791 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 2792 &error); 2793 g_topology_lock(); 2794 g_access(cp, -1, 0, 0); 2795 if (buf == NULL) { 2796 G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2797 cp->provider->name, error); 2798 return (error); 2799 } 2800 2801 /* Decode metadata. */ 2802 error = mirror_metadata_decode(buf, md); 2803 g_free(buf); 2804 if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0) 2805 return (EINVAL); 2806 if (md->md_version > G_MIRROR_VERSION) { 2807 G_MIRROR_DEBUG(0, 2808 "Kernel module is too old to handle metadata from %s.", 2809 cp->provider->name); 2810 return (EINVAL); 2811 } 2812 if (error != 0) { 2813 G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 2814 cp->provider->name); 2815 return (error); 2816 } 2817 2818 return (0); 2819 } 2820 2821 static int 2822 g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp, 2823 struct g_mirror_metadata *md) 2824 { 2825 2826 if (g_mirror_id2disk(sc, md->md_did) != NULL) { 2827 G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.", 2828 pp->name, md->md_did); 2829 return (EEXIST); 2830 } 2831 if (md->md_all != sc->sc_ndisks) { 2832 G_MIRROR_DEBUG(1, 2833 "Invalid '%s' field on disk %s (device %s), skipping.", 2834 "md_all", pp->name, sc->sc_name); 2835 return (EINVAL); 2836 } 2837 if (md->md_slice != sc->sc_slice) { 2838 G_MIRROR_DEBUG(1, 2839 "Invalid '%s' field on disk %s (device %s), skipping.", 2840 "md_slice", pp->name, sc->sc_name); 2841 return (EINVAL); 2842 } 2843 if (md->md_balance != sc->sc_balance) { 2844 G_MIRROR_DEBUG(1, 2845 "Invalid '%s' field on disk %s (device %s), skipping.", 2846 "md_balance", pp->name, sc->sc_name); 2847 return (EINVAL); 2848 } 2849 #if 0 2850 if (md->md_mediasize != sc->sc_mediasize) { 2851 G_MIRROR_DEBUG(1, 2852 "Invalid '%s' field on disk %s (device %s), skipping.", 2853 "md_mediasize", pp->name, sc->sc_name); 2854 return (EINVAL); 2855 } 2856 #endif 2857 if (sc->sc_mediasize > pp->mediasize) { 2858 G_MIRROR_DEBUG(1, 2859 "Invalid size of disk %s (device %s), skipping.", pp->name, 2860 sc->sc_name); 2861 return (EINVAL); 2862 } 2863 if (md->md_sectorsize != sc->sc_sectorsize) { 2864 G_MIRROR_DEBUG(1, 2865 "Invalid '%s' field on disk %s (device %s), skipping.", 2866 "md_sectorsize", pp->name, sc->sc_name); 2867 return (EINVAL); 2868 } 2869 if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 2870 G_MIRROR_DEBUG(1, 2871 "Invalid sector size of disk %s (device %s), skipping.", 2872 pp->name, sc->sc_name); 2873 return (EINVAL); 2874 } 2875 if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) { 2876 G_MIRROR_DEBUG(1, 2877 "Invalid device flags on disk %s (device %s), skipping.", 2878 pp->name, sc->sc_name); 2879 return (EINVAL); 2880 } 2881 if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) { 2882 G_MIRROR_DEBUG(1, 2883 "Invalid disk flags on disk %s (device %s), skipping.", 2884 pp->name, sc->sc_name); 2885 return (EINVAL); 2886 } 2887 return (0); 2888 } 2889 2890 int 2891 g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 2892 struct g_mirror_metadata *md) 2893 { 2894 struct g_mirror_disk *disk; 2895 int error; 2896 2897 g_topology_assert_not(); 2898 G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name); 2899 2900 error = g_mirror_check_metadata(sc, pp, md); 2901 if (error != 0) 2902 return (error); 2903 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING && 2904 md->md_genid < sc->sc_genid) { 2905 G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.", 2906 pp->name, sc->sc_name); 2907 return (EINVAL); 2908 } 2909 disk = g_mirror_init_disk(sc, pp, md, &error); 2910 if (disk == NULL) 2911 return (error); 2912 error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW, 2913 G_MIRROR_EVENT_WAIT); 2914 if (error != 0) 2915 return (error); 2916 if (md->md_version < G_MIRROR_VERSION) { 2917 G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 2918 pp->name, md->md_version, G_MIRROR_VERSION); 2919 g_mirror_update_metadata(disk); 2920 } 2921 return (0); 2922 } 2923 2924 static void 2925 g_mirror_destroy_delayed(void *arg, int flag) 2926 { 2927 struct g_mirror_softc *sc; 2928 int error; 2929 2930 if (flag == EV_CANCEL) { 2931 G_MIRROR_DEBUG(1, "Destroying canceled."); 2932 return; 2933 } 2934 sc = arg; 2935 g_topology_unlock(); 2936 sx_xlock(&sc->sc_lock); 2937 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0, 2938 ("DESTROY flag set on %s.", sc->sc_name)); 2939 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0, 2940 ("CLOSEWAIT flag not set on %s.", sc->sc_name)); 2941 G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name); 2942 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT); 2943 if (error != 0) { 2944 G_MIRROR_DEBUG(0, "Cannot destroy %s (error=%d).", 2945 sc->sc_name, error); 2946 sx_xunlock(&sc->sc_lock); 2947 } 2948 g_topology_lock(); 2949 } 2950 2951 static int 2952 g_mirror_access(struct g_provider *pp, int acr, int acw, int ace) 2953 { 2954 struct g_mirror_softc *sc; 2955 int error = 0; 2956 2957 g_topology_assert(); 2958 G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 2959 acw, ace); 2960 2961 sc = pp->private; 2962 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 2963 2964 g_topology_unlock(); 2965 sx_xlock(&sc->sc_lock); 2966 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 || 2967 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0 || 2968 LIST_EMPTY(&sc->sc_disks)) { 2969 if (acr > 0 || acw > 0 || ace > 0) 2970 error = ENXIO; 2971 goto end; 2972 } 2973 sc->sc_provider_open += acr + acw + ace; 2974 if (pp->acw + acw == 0) 2975 g_mirror_idle(sc, 0); 2976 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_CLOSEWAIT) != 0 && 2977 sc->sc_provider_open == 0) 2978 g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, sc, NULL); 2979 end: 2980 sx_xunlock(&sc->sc_lock); 2981 g_topology_lock(); 2982 return (error); 2983 } 2984 2985 struct g_geom * 2986 g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md, 2987 u_int type) 2988 { 2989 struct g_mirror_softc *sc; 2990 struct g_geom *gp; 2991 int error, timeout; 2992 2993 g_topology_assert(); 2994 G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name, 2995 md->md_mid); 2996 2997 /* One disk is minimum. */ 2998 if (md->md_all < 1) 2999 return (NULL); 3000 /* 3001 * Action geom. 3002 */ 3003 gp = g_new_geomf(mp, "%s", md->md_name); 3004 sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO); 3005 gp->start = g_mirror_start; 3006 gp->orphan = g_mirror_orphan; 3007 gp->access = g_mirror_access; 3008 gp->dumpconf = g_mirror_dumpconf; 3009 3010 sc->sc_type = type; 3011 sc->sc_id = md->md_mid; 3012 sc->sc_slice = md->md_slice; 3013 sc->sc_balance = md->md_balance; 3014 sc->sc_mediasize = md->md_mediasize; 3015 sc->sc_sectorsize = md->md_sectorsize; 3016 sc->sc_ndisks = md->md_all; 3017 sc->sc_flags = md->md_mflags; 3018 sc->sc_bump_id = 0; 3019 sc->sc_idle = 1; 3020 sc->sc_last_write = time_uptime; 3021 sc->sc_writes = 0; 3022 sc->sc_refcnt = 1; 3023 sx_init(&sc->sc_lock, "gmirror:lock"); 3024 TAILQ_INIT(&sc->sc_queue); 3025 mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF); 3026 TAILQ_INIT(&sc->sc_regular_delayed); 3027 TAILQ_INIT(&sc->sc_inflight); 3028 TAILQ_INIT(&sc->sc_sync_delayed); 3029 LIST_INIT(&sc->sc_disks); 3030 TAILQ_INIT(&sc->sc_events); 3031 mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF); 3032 callout_init(&sc->sc_callout, 1); 3033 mtx_init(&sc->sc_done_mtx, "gmirror:done", NULL, MTX_DEF); 3034 sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING; 3035 gp->softc = sc; 3036 sc->sc_geom = gp; 3037 sc->sc_provider = NULL; 3038 sc->sc_provider_open = 0; 3039 /* 3040 * Synchronization geom. 3041 */ 3042 gp = g_new_geomf(mp, "%s.sync", md->md_name); 3043 gp->softc = sc; 3044 gp->orphan = g_mirror_orphan; 3045 sc->sc_sync.ds_geom = gp; 3046 sc->sc_sync.ds_ndisks = 0; 3047 error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0, 3048 "g_mirror %s", md->md_name); 3049 if (error != 0) { 3050 G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.", 3051 sc->sc_name); 3052 g_destroy_geom(sc->sc_sync.ds_geom); 3053 g_destroy_geom(sc->sc_geom); 3054 g_mirror_free_device(sc); 3055 return (NULL); 3056 } 3057 3058 G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).", 3059 sc->sc_name, sc->sc_ndisks, sc->sc_id); 3060 3061 sc->sc_rootmount = root_mount_hold("GMIRROR"); 3062 G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 3063 /* 3064 * Run timeout. 3065 */ 3066 timeout = g_mirror_timeout * hz; 3067 callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc); 3068 return (sc->sc_geom); 3069 } 3070 3071 int 3072 g_mirror_destroy(struct g_mirror_softc *sc, int how) 3073 { 3074 struct g_mirror_disk *disk; 3075 3076 g_topology_assert_not(); 3077 sx_assert(&sc->sc_lock, SX_XLOCKED); 3078 3079 if (sc->sc_provider_open != 0) { 3080 switch (how) { 3081 case G_MIRROR_DESTROY_SOFT: 3082 G_MIRROR_DEBUG(1, 3083 "Device %s is still open (%d).", sc->sc_name, 3084 sc->sc_provider_open); 3085 return (EBUSY); 3086 case G_MIRROR_DESTROY_DELAYED: 3087 G_MIRROR_DEBUG(1, 3088 "Device %s will be destroyed on last close.", 3089 sc->sc_name); 3090 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 3091 if (disk->d_state == 3092 G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3093 g_mirror_sync_stop(disk, 1); 3094 } 3095 } 3096 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_CLOSEWAIT; 3097 return (EBUSY); 3098 case G_MIRROR_DESTROY_HARD: 3099 G_MIRROR_DEBUG(1, "Device %s is still open, so it " 3100 "can't be definitely removed.", sc->sc_name); 3101 } 3102 } 3103 3104 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 3105 sx_xunlock(&sc->sc_lock); 3106 return (0); 3107 } 3108 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 3109 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DRAIN; 3110 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 3111 sx_xunlock(&sc->sc_lock); 3112 mtx_lock(&sc->sc_queue_mtx); 3113 wakeup(sc); 3114 mtx_unlock(&sc->sc_queue_mtx); 3115 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 3116 while (sc->sc_worker != NULL) 3117 tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5); 3118 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 3119 sx_xlock(&sc->sc_lock); 3120 g_mirror_destroy_device(sc); 3121 return (0); 3122 } 3123 3124 static void 3125 g_mirror_taste_orphan(struct g_consumer *cp) 3126 { 3127 3128 KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 3129 cp->provider->name)); 3130 } 3131 3132 static struct g_geom * 3133 g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 3134 { 3135 struct g_mirror_metadata md; 3136 struct g_mirror_softc *sc; 3137 struct g_consumer *cp; 3138 struct g_geom *gp; 3139 int error; 3140 3141 g_topology_assert(); 3142 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 3143 G_MIRROR_DEBUG(2, "Tasting %s.", pp->name); 3144 3145 gp = g_new_geomf(mp, "mirror:taste"); 3146 /* 3147 * This orphan function should be never called. 3148 */ 3149 gp->orphan = g_mirror_taste_orphan; 3150 cp = g_new_consumer(gp); 3151 g_attach(cp, pp); 3152 error = g_mirror_read_metadata(cp, &md); 3153 g_detach(cp); 3154 g_destroy_consumer(cp); 3155 g_destroy_geom(gp); 3156 if (error != 0) 3157 return (NULL); 3158 gp = NULL; 3159 3160 if (md.md_provider[0] != '\0' && 3161 !g_compare_names(md.md_provider, pp->name)) 3162 return (NULL); 3163 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3164 return (NULL); 3165 if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) { 3166 G_MIRROR_DEBUG(0, 3167 "Device %s: provider %s marked as inactive, skipping.", 3168 md.md_name, pp->name); 3169 return (NULL); 3170 } 3171 if (g_mirror_debug >= 2) 3172 mirror_metadata_dump(&md); 3173 3174 /* 3175 * Let's check if device already exists. 3176 */ 3177 sc = NULL; 3178 LIST_FOREACH(gp, &mp->geom, geom) { 3179 sc = gp->softc; 3180 if (sc == NULL) 3181 continue; 3182 if (sc->sc_type != G_MIRROR_TYPE_AUTOMATIC) 3183 continue; 3184 if (sc->sc_sync.ds_geom == gp) 3185 continue; 3186 if (strcmp(md.md_name, sc->sc_name) != 0) 3187 continue; 3188 if (md.md_mid != sc->sc_id) { 3189 G_MIRROR_DEBUG(0, "Device %s already configured.", 3190 sc->sc_name); 3191 return (NULL); 3192 } 3193 break; 3194 } 3195 if (gp == NULL) { 3196 gp = g_mirror_create(mp, &md, G_MIRROR_TYPE_AUTOMATIC); 3197 if (gp == NULL) { 3198 G_MIRROR_DEBUG(0, "Cannot create device %s.", 3199 md.md_name); 3200 return (NULL); 3201 } 3202 sc = gp->softc; 3203 } 3204 G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 3205 g_topology_unlock(); 3206 sx_xlock(&sc->sc_lock); 3207 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_TASTING; 3208 error = g_mirror_add_disk(sc, pp, &md); 3209 if (error != 0) { 3210 G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 3211 pp->name, gp->name, error); 3212 if (LIST_EMPTY(&sc->sc_disks)) { 3213 g_cancel_event(sc); 3214 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3215 g_topology_lock(); 3216 return (NULL); 3217 } 3218 gp = NULL; 3219 } 3220 sc->sc_flags &= ~G_MIRROR_DEVICE_FLAG_TASTING; 3221 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 3222 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3223 g_topology_lock(); 3224 return (NULL); 3225 } 3226 sx_xunlock(&sc->sc_lock); 3227 g_topology_lock(); 3228 return (gp); 3229 } 3230 3231 static void 3232 g_mirror_resize(struct g_consumer *cp) 3233 { 3234 struct g_mirror_disk *disk; 3235 3236 g_topology_assert(); 3237 g_trace(G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name); 3238 3239 disk = cp->private; 3240 if (disk == NULL) 3241 return; 3242 g_topology_unlock(); 3243 g_mirror_update_metadata(disk); 3244 g_topology_lock(); 3245 } 3246 3247 static int 3248 g_mirror_destroy_geom(struct gctl_req *req __unused, 3249 struct g_class *mp __unused, struct g_geom *gp) 3250 { 3251 struct g_mirror_softc *sc; 3252 int error; 3253 3254 g_topology_unlock(); 3255 sc = gp->softc; 3256 sx_xlock(&sc->sc_lock); 3257 g_cancel_event(sc); 3258 error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT); 3259 if (error != 0) 3260 sx_xunlock(&sc->sc_lock); 3261 g_topology_lock(); 3262 return (error); 3263 } 3264 3265 static void 3266 g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 3267 struct g_consumer *cp, struct g_provider *pp) 3268 { 3269 struct g_mirror_softc *sc; 3270 3271 g_topology_assert(); 3272 3273 sc = gp->softc; 3274 if (sc == NULL) 3275 return; 3276 /* Skip synchronization geom. */ 3277 if (gp == sc->sc_sync.ds_geom) 3278 return; 3279 if (pp != NULL) { 3280 /* Nothing here. */ 3281 } else if (cp != NULL) { 3282 struct g_mirror_disk *disk; 3283 3284 disk = cp->private; 3285 if (disk == NULL) 3286 return; 3287 g_topology_unlock(); 3288 sx_xlock(&sc->sc_lock); 3289 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id); 3290 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3291 sbuf_printf(sb, "%s<Synchronized>", indent); 3292 if (disk->d_sync.ds_offset == 0) 3293 sbuf_printf(sb, "0%%"); 3294 else { 3295 sbuf_printf(sb, "%u%%", 3296 (u_int)((disk->d_sync.ds_offset * 100) / 3297 sc->sc_provider->mediasize)); 3298 } 3299 sbuf_printf(sb, "</Synchronized>\n"); 3300 if (disk->d_sync.ds_offset > 0) { 3301 sbuf_printf(sb, "%s<BytesSynced>%jd" 3302 "</BytesSynced>\n", indent, 3303 (intmax_t)disk->d_sync.ds_offset); 3304 } 3305 } 3306 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 3307 disk->d_sync.ds_syncid); 3308 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, 3309 disk->d_genid); 3310 sbuf_printf(sb, "%s<Flags>", indent); 3311 if (disk->d_flags == 0) 3312 sbuf_printf(sb, "NONE"); 3313 else { 3314 int first = 1; 3315 3316 #define ADD_FLAG(flag, name) do { \ 3317 if ((disk->d_flags & (flag)) != 0) { \ 3318 if (!first) \ 3319 sbuf_printf(sb, ", "); \ 3320 else \ 3321 first = 0; \ 3322 sbuf_printf(sb, name); \ 3323 } \ 3324 } while (0) 3325 ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY"); 3326 ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED"); 3327 ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE"); 3328 ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING, 3329 "SYNCHRONIZING"); 3330 ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 3331 ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN"); 3332 #undef ADD_FLAG 3333 } 3334 sbuf_printf(sb, "</Flags>\n"); 3335 sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent, 3336 disk->d_priority); 3337 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3338 g_mirror_disk_state2str(disk->d_state)); 3339 sx_xunlock(&sc->sc_lock); 3340 g_topology_lock(); 3341 } else { 3342 g_topology_unlock(); 3343 sx_xlock(&sc->sc_lock); 3344 sbuf_printf(sb, "%s<Type>", indent); 3345 switch (sc->sc_type) { 3346 case G_MIRROR_TYPE_AUTOMATIC: 3347 sbuf_printf(sb, "AUTOMATIC"); 3348 break; 3349 case G_MIRROR_TYPE_MANUAL: 3350 sbuf_printf(sb, "MANUAL"); 3351 break; 3352 default: 3353 sbuf_printf(sb, "UNKNOWN"); 3354 break; 3355 } 3356 sbuf_printf(sb, "</Type>\n"); 3357 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 3358 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3359 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 3360 sbuf_printf(sb, "%s<Flags>", indent); 3361 if (sc->sc_flags == 0) 3362 sbuf_printf(sb, "NONE"); 3363 else { 3364 int first = 1; 3365 3366 #define ADD_FLAG(flag, name) do { \ 3367 if ((sc->sc_flags & (flag)) != 0) { \ 3368 if (!first) \ 3369 sbuf_printf(sb, ", "); \ 3370 else \ 3371 first = 0; \ 3372 sbuf_printf(sb, name); \ 3373 } \ 3374 } while (0) 3375 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 3376 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3377 #undef ADD_FLAG 3378 } 3379 sbuf_printf(sb, "</Flags>\n"); 3380 sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent, 3381 (u_int)sc->sc_slice); 3382 sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent, 3383 balance_name(sc->sc_balance)); 3384 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 3385 sc->sc_ndisks); 3386 sbuf_printf(sb, "%s<State>", indent); 3387 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 3388 sbuf_printf(sb, "%s", "STARTING"); 3389 else if (sc->sc_ndisks == 3390 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE)) 3391 sbuf_printf(sb, "%s", "COMPLETE"); 3392 else 3393 sbuf_printf(sb, "%s", "DEGRADED"); 3394 sbuf_printf(sb, "</State>\n"); 3395 sx_xunlock(&sc->sc_lock); 3396 g_topology_lock(); 3397 } 3398 } 3399 3400 static void 3401 g_mirror_shutdown_post_sync(void *arg, int howto) 3402 { 3403 struct g_class *mp; 3404 struct g_geom *gp, *gp2; 3405 struct g_mirror_softc *sc; 3406 int error; 3407 3408 if (panicstr != NULL) 3409 return; 3410 3411 mp = arg; 3412 g_topology_lock(); 3413 g_mirror_shutdown = 1; 3414 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 3415 if ((sc = gp->softc) == NULL) 3416 continue; 3417 /* Skip synchronization geom. */ 3418 if (gp == sc->sc_sync.ds_geom) 3419 continue; 3420 g_topology_unlock(); 3421 sx_xlock(&sc->sc_lock); 3422 g_mirror_idle(sc, -1); 3423 g_cancel_event(sc); 3424 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED); 3425 if (error != 0) 3426 sx_xunlock(&sc->sc_lock); 3427 g_topology_lock(); 3428 } 3429 g_topology_unlock(); 3430 } 3431 3432 static void 3433 g_mirror_init(struct g_class *mp) 3434 { 3435 3436 g_mirror_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync, 3437 g_mirror_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST); 3438 if (g_mirror_post_sync == NULL) 3439 G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event."); 3440 } 3441 3442 static void 3443 g_mirror_fini(struct g_class *mp) 3444 { 3445 3446 if (g_mirror_post_sync != NULL) 3447 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_mirror_post_sync); 3448 } 3449 3450 DECLARE_GEOM_CLASS(g_mirror_class, g_mirror); 3451