1 /*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/fail.h> 33 #include <sys/kernel.h> 34 #include <sys/module.h> 35 #include <sys/limits.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/bio.h> 39 #include <sys/sbuf.h> 40 #include <sys/sysctl.h> 41 #include <sys/malloc.h> 42 #include <sys/eventhandler.h> 43 #include <vm/uma.h> 44 #include <geom/geom.h> 45 #include <sys/proc.h> 46 #include <sys/kthread.h> 47 #include <sys/sched.h> 48 #include <geom/mirror/g_mirror.h> 49 50 FEATURE(geom_mirror, "GEOM mirroring support"); 51 52 static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data"); 53 54 SYSCTL_DECL(_kern_geom); 55 static SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, 56 "GEOM_MIRROR stuff"); 57 u_int g_mirror_debug = 0; 58 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RWTUN, &g_mirror_debug, 0, 59 "Debug level"); 60 static u_int g_mirror_timeout = 4; 61 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_mirror_timeout, 62 0, "Time to wait on all mirror components"); 63 static u_int g_mirror_idletime = 5; 64 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RWTUN, 65 &g_mirror_idletime, 0, "Mark components as clean when idling"); 66 static u_int g_mirror_disconnect_on_failure = 1; 67 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN, 68 &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 69 static u_int g_mirror_syncreqs = 2; 70 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 71 &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests."); 72 73 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 74 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 75 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 76 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 77 } while (0) 78 79 static eventhandler_tag g_mirror_post_sync = NULL; 80 static int g_mirror_shutdown = 0; 81 82 static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp, 83 struct g_geom *gp); 84 static g_taste_t g_mirror_taste; 85 static g_resize_t g_mirror_resize; 86 static void g_mirror_init(struct g_class *mp); 87 static void g_mirror_fini(struct g_class *mp); 88 89 struct g_class g_mirror_class = { 90 .name = G_MIRROR_CLASS_NAME, 91 .version = G_VERSION, 92 .ctlreq = g_mirror_config, 93 .taste = g_mirror_taste, 94 .destroy_geom = g_mirror_destroy_geom, 95 .init = g_mirror_init, 96 .fini = g_mirror_fini, 97 .resize = g_mirror_resize 98 }; 99 100 101 static void g_mirror_destroy_provider(struct g_mirror_softc *sc); 102 static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state); 103 static void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force); 104 static void g_mirror_dumpconf(struct sbuf *sb, const char *indent, 105 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 106 static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type); 107 static void g_mirror_register_request(struct bio *bp); 108 static void g_mirror_sync_release(struct g_mirror_softc *sc); 109 110 111 static const char * 112 g_mirror_disk_state2str(int state) 113 { 114 115 switch (state) { 116 case G_MIRROR_DISK_STATE_NONE: 117 return ("NONE"); 118 case G_MIRROR_DISK_STATE_NEW: 119 return ("NEW"); 120 case G_MIRROR_DISK_STATE_ACTIVE: 121 return ("ACTIVE"); 122 case G_MIRROR_DISK_STATE_STALE: 123 return ("STALE"); 124 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 125 return ("SYNCHRONIZING"); 126 case G_MIRROR_DISK_STATE_DISCONNECTED: 127 return ("DISCONNECTED"); 128 case G_MIRROR_DISK_STATE_DESTROY: 129 return ("DESTROY"); 130 default: 131 return ("INVALID"); 132 } 133 } 134 135 static const char * 136 g_mirror_device_state2str(int state) 137 { 138 139 switch (state) { 140 case G_MIRROR_DEVICE_STATE_STARTING: 141 return ("STARTING"); 142 case G_MIRROR_DEVICE_STATE_RUNNING: 143 return ("RUNNING"); 144 default: 145 return ("INVALID"); 146 } 147 } 148 149 static const char * 150 g_mirror_get_diskname(struct g_mirror_disk *disk) 151 { 152 153 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 154 return ("[unknown]"); 155 return (disk->d_name); 156 } 157 158 /* 159 * --- Events handling functions --- 160 * Events in geom_mirror are used to maintain disks and device status 161 * from one thread to simplify locking. 162 */ 163 static void 164 g_mirror_event_free(struct g_mirror_event *ep) 165 { 166 167 free(ep, M_MIRROR); 168 } 169 170 int 171 g_mirror_event_send(void *arg, int state, int flags) 172 { 173 struct g_mirror_softc *sc; 174 struct g_mirror_disk *disk; 175 struct g_mirror_event *ep; 176 int error; 177 178 ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK); 179 G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep); 180 if ((flags & G_MIRROR_EVENT_DEVICE) != 0) { 181 disk = NULL; 182 sc = arg; 183 } else { 184 disk = arg; 185 sc = disk->d_softc; 186 } 187 ep->e_disk = disk; 188 ep->e_state = state; 189 ep->e_flags = flags; 190 ep->e_error = 0; 191 mtx_lock(&sc->sc_events_mtx); 192 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 193 mtx_unlock(&sc->sc_events_mtx); 194 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 195 mtx_lock(&sc->sc_queue_mtx); 196 wakeup(sc); 197 mtx_unlock(&sc->sc_queue_mtx); 198 if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0) 199 return (0); 200 sx_assert(&sc->sc_lock, SX_XLOCKED); 201 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 202 sx_xunlock(&sc->sc_lock); 203 while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) { 204 mtx_lock(&sc->sc_events_mtx); 205 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event", 206 hz * 5); 207 } 208 error = ep->e_error; 209 g_mirror_event_free(ep); 210 sx_xlock(&sc->sc_lock); 211 return (error); 212 } 213 214 static struct g_mirror_event * 215 g_mirror_event_get(struct g_mirror_softc *sc) 216 { 217 struct g_mirror_event *ep; 218 219 mtx_lock(&sc->sc_events_mtx); 220 ep = TAILQ_FIRST(&sc->sc_events); 221 mtx_unlock(&sc->sc_events_mtx); 222 return (ep); 223 } 224 225 static void 226 g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep) 227 { 228 229 mtx_lock(&sc->sc_events_mtx); 230 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 231 mtx_unlock(&sc->sc_events_mtx); 232 } 233 234 static void 235 g_mirror_event_cancel(struct g_mirror_disk *disk) 236 { 237 struct g_mirror_softc *sc; 238 struct g_mirror_event *ep, *tmpep; 239 240 sc = disk->d_softc; 241 sx_assert(&sc->sc_lock, SX_XLOCKED); 242 243 mtx_lock(&sc->sc_events_mtx); 244 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 245 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) 246 continue; 247 if (ep->e_disk != disk) 248 continue; 249 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 250 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 251 g_mirror_event_free(ep); 252 else { 253 ep->e_error = ECANCELED; 254 wakeup(ep); 255 } 256 } 257 mtx_unlock(&sc->sc_events_mtx); 258 } 259 260 /* 261 * Return the number of disks in given state. 262 * If state is equal to -1, count all connected disks. 263 */ 264 u_int 265 g_mirror_ndisks(struct g_mirror_softc *sc, int state) 266 { 267 struct g_mirror_disk *disk; 268 u_int n = 0; 269 270 sx_assert(&sc->sc_lock, SX_LOCKED); 271 272 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 273 if (state == -1 || disk->d_state == state) 274 n++; 275 } 276 return (n); 277 } 278 279 /* 280 * Find a disk in mirror by its disk ID. 281 */ 282 static struct g_mirror_disk * 283 g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id) 284 { 285 struct g_mirror_disk *disk; 286 287 sx_assert(&sc->sc_lock, SX_XLOCKED); 288 289 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 290 if (disk->d_id == id) 291 return (disk); 292 } 293 return (NULL); 294 } 295 296 static u_int 297 g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp) 298 { 299 struct bio *bp; 300 u_int nreqs = 0; 301 302 mtx_lock(&sc->sc_queue_mtx); 303 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 304 if (bp->bio_from == cp) 305 nreqs++; 306 } 307 mtx_unlock(&sc->sc_queue_mtx); 308 return (nreqs); 309 } 310 311 static int 312 g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp) 313 { 314 315 if (cp->index > 0) { 316 G_MIRROR_DEBUG(2, 317 "I/O requests for %s exist, can't destroy it now.", 318 cp->provider->name); 319 return (1); 320 } 321 if (g_mirror_nrequests(sc, cp) > 0) { 322 G_MIRROR_DEBUG(2, 323 "I/O requests for %s in queue, can't destroy it now.", 324 cp->provider->name); 325 return (1); 326 } 327 return (0); 328 } 329 330 static void 331 g_mirror_destroy_consumer(void *arg, int flags __unused) 332 { 333 struct g_consumer *cp; 334 335 g_topology_assert(); 336 337 cp = arg; 338 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 339 g_detach(cp); 340 g_destroy_consumer(cp); 341 } 342 343 static void 344 g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 345 { 346 struct g_provider *pp; 347 int retaste_wait; 348 349 g_topology_assert(); 350 351 cp->private = NULL; 352 if (g_mirror_is_busy(sc, cp)) 353 return; 354 pp = cp->provider; 355 retaste_wait = 0; 356 if (cp->acw == 1) { 357 if ((pp->geom->flags & G_GEOM_WITHER) == 0) 358 retaste_wait = 1; 359 } 360 G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 361 -cp->acw, -cp->ace, 0); 362 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 363 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 364 if (retaste_wait) { 365 /* 366 * After retaste event was send (inside g_access()), we can send 367 * event to detach and destroy consumer. 368 * A class, which has consumer to the given provider connected 369 * will not receive retaste event for the provider. 370 * This is the way how I ignore retaste events when I close 371 * consumers opened for write: I detach and destroy consumer 372 * after retaste event is sent. 373 */ 374 g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL); 375 return; 376 } 377 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name); 378 g_detach(cp); 379 g_destroy_consumer(cp); 380 } 381 382 static int 383 g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp) 384 { 385 struct g_consumer *cp; 386 int error; 387 388 g_topology_assert_not(); 389 KASSERT(disk->d_consumer == NULL, 390 ("Disk already connected (device %s).", disk->d_softc->sc_name)); 391 392 g_topology_lock(); 393 cp = g_new_consumer(disk->d_softc->sc_geom); 394 cp->flags |= G_CF_DIRECT_RECEIVE; 395 error = g_attach(cp, pp); 396 if (error != 0) { 397 g_destroy_consumer(cp); 398 g_topology_unlock(); 399 return (error); 400 } 401 error = g_access(cp, 1, 1, 1); 402 if (error != 0) { 403 g_detach(cp); 404 g_destroy_consumer(cp); 405 g_topology_unlock(); 406 G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).", 407 pp->name, error); 408 return (error); 409 } 410 g_topology_unlock(); 411 disk->d_consumer = cp; 412 disk->d_consumer->private = disk; 413 disk->d_consumer->index = 0; 414 415 G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk)); 416 return (0); 417 } 418 419 static void 420 g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 421 { 422 423 g_topology_assert(); 424 425 if (cp == NULL) 426 return; 427 if (cp->provider != NULL) 428 g_mirror_kill_consumer(sc, cp); 429 else 430 g_destroy_consumer(cp); 431 } 432 433 /* 434 * Initialize disk. This means allocate memory, create consumer, attach it 435 * to the provider and open access (r1w1e1) to it. 436 */ 437 static struct g_mirror_disk * 438 g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp, 439 struct g_mirror_metadata *md, int *errorp) 440 { 441 struct g_mirror_disk *disk; 442 int i, error; 443 444 disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO); 445 if (disk == NULL) { 446 error = ENOMEM; 447 goto fail; 448 } 449 disk->d_softc = sc; 450 error = g_mirror_connect_disk(disk, pp); 451 if (error != 0) 452 goto fail; 453 disk->d_id = md->md_did; 454 disk->d_state = G_MIRROR_DISK_STATE_NONE; 455 disk->d_priority = md->md_priority; 456 disk->d_flags = md->md_dflags; 457 error = g_getattr("GEOM::candelete", disk->d_consumer, &i); 458 if (error == 0 && i != 0) 459 disk->d_flags |= G_MIRROR_DISK_FLAG_CANDELETE; 460 if (md->md_provider[0] != '\0') 461 disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED; 462 disk->d_sync.ds_consumer = NULL; 463 disk->d_sync.ds_offset = md->md_sync_offset; 464 disk->d_sync.ds_offset_done = md->md_sync_offset; 465 disk->d_genid = md->md_genid; 466 disk->d_sync.ds_syncid = md->md_syncid; 467 if (errorp != NULL) 468 *errorp = 0; 469 return (disk); 470 fail: 471 if (errorp != NULL) 472 *errorp = error; 473 if (disk != NULL) 474 free(disk, M_MIRROR); 475 return (NULL); 476 } 477 478 static void 479 g_mirror_destroy_disk(struct g_mirror_disk *disk) 480 { 481 struct g_mirror_softc *sc; 482 483 g_topology_assert_not(); 484 sc = disk->d_softc; 485 sx_assert(&sc->sc_lock, SX_XLOCKED); 486 487 LIST_REMOVE(disk, d_next); 488 g_mirror_event_cancel(disk); 489 if (sc->sc_hint == disk) 490 sc->sc_hint = NULL; 491 switch (disk->d_state) { 492 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 493 g_mirror_sync_stop(disk, 1); 494 /* FALLTHROUGH */ 495 case G_MIRROR_DISK_STATE_NEW: 496 case G_MIRROR_DISK_STATE_STALE: 497 case G_MIRROR_DISK_STATE_ACTIVE: 498 g_topology_lock(); 499 g_mirror_disconnect_consumer(sc, disk->d_consumer); 500 g_topology_unlock(); 501 free(disk, M_MIRROR); 502 break; 503 default: 504 KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 505 g_mirror_get_diskname(disk), 506 g_mirror_disk_state2str(disk->d_state))); 507 } 508 } 509 510 static void 511 g_mirror_destroy_device(struct g_mirror_softc *sc) 512 { 513 struct g_mirror_disk *disk; 514 struct g_mirror_event *ep; 515 struct g_geom *gp; 516 struct g_consumer *cp, *tmpcp; 517 518 g_topology_assert_not(); 519 sx_assert(&sc->sc_lock, SX_XLOCKED); 520 521 gp = sc->sc_geom; 522 if (sc->sc_provider != NULL) 523 g_mirror_destroy_provider(sc); 524 for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL; 525 disk = LIST_FIRST(&sc->sc_disks)) { 526 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 527 g_mirror_update_metadata(disk); 528 g_mirror_destroy_disk(disk); 529 } 530 while ((ep = g_mirror_event_get(sc)) != NULL) { 531 g_mirror_event_remove(sc, ep); 532 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 533 g_mirror_event_free(ep); 534 else { 535 ep->e_error = ECANCELED; 536 ep->e_flags |= G_MIRROR_EVENT_DONE; 537 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep); 538 mtx_lock(&sc->sc_events_mtx); 539 wakeup(ep); 540 mtx_unlock(&sc->sc_events_mtx); 541 } 542 } 543 callout_drain(&sc->sc_callout); 544 545 g_topology_lock(); 546 LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) { 547 g_mirror_disconnect_consumer(sc, cp); 548 } 549 g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 550 G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name); 551 g_wither_geom(gp, ENXIO); 552 g_topology_unlock(); 553 mtx_destroy(&sc->sc_queue_mtx); 554 mtx_destroy(&sc->sc_events_mtx); 555 mtx_destroy(&sc->sc_done_mtx); 556 sx_xunlock(&sc->sc_lock); 557 sx_destroy(&sc->sc_lock); 558 } 559 560 static void 561 g_mirror_orphan(struct g_consumer *cp) 562 { 563 struct g_mirror_disk *disk; 564 565 g_topology_assert(); 566 567 disk = cp->private; 568 if (disk == NULL) 569 return; 570 disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 571 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, 572 G_MIRROR_EVENT_DONTWAIT); 573 } 574 575 /* 576 * Function should return the next active disk on the list. 577 * It is possible that it will be the same disk as given. 578 * If there are no active disks on list, NULL is returned. 579 */ 580 static __inline struct g_mirror_disk * 581 g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 582 { 583 struct g_mirror_disk *dp; 584 585 for (dp = LIST_NEXT(disk, d_next); dp != disk; 586 dp = LIST_NEXT(dp, d_next)) { 587 if (dp == NULL) 588 dp = LIST_FIRST(&sc->sc_disks); 589 if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE) 590 break; 591 } 592 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 593 return (NULL); 594 return (dp); 595 } 596 597 static struct g_mirror_disk * 598 g_mirror_get_disk(struct g_mirror_softc *sc) 599 { 600 struct g_mirror_disk *disk; 601 602 if (sc->sc_hint == NULL) { 603 sc->sc_hint = LIST_FIRST(&sc->sc_disks); 604 if (sc->sc_hint == NULL) 605 return (NULL); 606 } 607 disk = sc->sc_hint; 608 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) { 609 disk = g_mirror_find_next(sc, disk); 610 if (disk == NULL) 611 return (NULL); 612 } 613 sc->sc_hint = g_mirror_find_next(sc, disk); 614 return (disk); 615 } 616 617 static int 618 g_mirror_write_metadata(struct g_mirror_disk *disk, 619 struct g_mirror_metadata *md) 620 { 621 struct g_mirror_softc *sc; 622 struct g_consumer *cp; 623 off_t offset, length; 624 u_char *sector; 625 int error = 0; 626 627 g_topology_assert_not(); 628 sc = disk->d_softc; 629 sx_assert(&sc->sc_lock, SX_LOCKED); 630 631 cp = disk->d_consumer; 632 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 633 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 634 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 635 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 636 cp->acw, cp->ace)); 637 length = cp->provider->sectorsize; 638 offset = cp->provider->mediasize - length; 639 sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO); 640 if (md != NULL && 641 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) { 642 /* 643 * Handle the case, when the size of parent provider reduced. 644 */ 645 if (offset < md->md_mediasize) 646 error = ENOSPC; 647 else 648 mirror_metadata_encode(md, sector); 649 } 650 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_metadata_write, error); 651 if (error == 0) 652 error = g_write_data(cp, offset, sector, length); 653 free(sector, M_MIRROR); 654 if (error != 0) { 655 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 656 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 657 G_MIRROR_DEBUG(0, "Cannot write metadata on %s " 658 "(device=%s, error=%d).", 659 g_mirror_get_diskname(disk), sc->sc_name, error); 660 } else { 661 G_MIRROR_DEBUG(1, "Cannot write metadata on %s " 662 "(device=%s, error=%d).", 663 g_mirror_get_diskname(disk), sc->sc_name, error); 664 } 665 if (g_mirror_disconnect_on_failure && 666 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) { 667 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 668 g_mirror_event_send(disk, 669 G_MIRROR_DISK_STATE_DISCONNECTED, 670 G_MIRROR_EVENT_DONTWAIT); 671 } 672 } 673 return (error); 674 } 675 676 static int 677 g_mirror_clear_metadata(struct g_mirror_disk *disk) 678 { 679 int error; 680 681 g_topology_assert_not(); 682 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 683 684 error = g_mirror_write_metadata(disk, NULL); 685 if (error == 0) { 686 G_MIRROR_DEBUG(2, "Metadata on %s cleared.", 687 g_mirror_get_diskname(disk)); 688 } else { 689 G_MIRROR_DEBUG(0, 690 "Cannot clear metadata on disk %s (error=%d).", 691 g_mirror_get_diskname(disk), error); 692 } 693 return (error); 694 } 695 696 void 697 g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk, 698 struct g_mirror_metadata *md) 699 { 700 701 strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic)); 702 md->md_version = G_MIRROR_VERSION; 703 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 704 md->md_mid = sc->sc_id; 705 md->md_all = sc->sc_ndisks; 706 md->md_slice = sc->sc_slice; 707 md->md_balance = sc->sc_balance; 708 md->md_genid = sc->sc_genid; 709 md->md_mediasize = sc->sc_mediasize; 710 md->md_sectorsize = sc->sc_sectorsize; 711 md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK); 712 bzero(md->md_provider, sizeof(md->md_provider)); 713 if (disk == NULL) { 714 md->md_did = arc4random(); 715 md->md_priority = 0; 716 md->md_syncid = 0; 717 md->md_dflags = 0; 718 md->md_sync_offset = 0; 719 md->md_provsize = 0; 720 } else { 721 md->md_did = disk->d_id; 722 md->md_priority = disk->d_priority; 723 md->md_syncid = disk->d_sync.ds_syncid; 724 md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK); 725 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 726 md->md_sync_offset = disk->d_sync.ds_offset_done; 727 else 728 md->md_sync_offset = 0; 729 if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) { 730 strlcpy(md->md_provider, 731 disk->d_consumer->provider->name, 732 sizeof(md->md_provider)); 733 } 734 md->md_provsize = disk->d_consumer->provider->mediasize; 735 } 736 } 737 738 void 739 g_mirror_update_metadata(struct g_mirror_disk *disk) 740 { 741 struct g_mirror_softc *sc; 742 struct g_mirror_metadata md; 743 int error; 744 745 g_topology_assert_not(); 746 sc = disk->d_softc; 747 sx_assert(&sc->sc_lock, SX_LOCKED); 748 749 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) 750 g_mirror_fill_metadata(sc, disk, &md); 751 error = g_mirror_write_metadata(disk, &md); 752 if (error == 0) { 753 G_MIRROR_DEBUG(2, "Metadata on %s updated.", 754 g_mirror_get_diskname(disk)); 755 } else { 756 G_MIRROR_DEBUG(0, 757 "Cannot update metadata on disk %s (error=%d).", 758 g_mirror_get_diskname(disk), error); 759 } 760 } 761 762 static void 763 g_mirror_bump_syncid(struct g_mirror_softc *sc) 764 { 765 struct g_mirror_disk *disk; 766 767 g_topology_assert_not(); 768 sx_assert(&sc->sc_lock, SX_XLOCKED); 769 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 770 ("%s called with no active disks (device=%s).", __func__, 771 sc->sc_name)); 772 773 sc->sc_syncid++; 774 G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 775 sc->sc_syncid); 776 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 777 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 778 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 779 disk->d_sync.ds_syncid = sc->sc_syncid; 780 g_mirror_update_metadata(disk); 781 } 782 } 783 } 784 785 static void 786 g_mirror_bump_genid(struct g_mirror_softc *sc) 787 { 788 struct g_mirror_disk *disk; 789 790 g_topology_assert_not(); 791 sx_assert(&sc->sc_lock, SX_XLOCKED); 792 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 793 ("%s called with no active disks (device=%s).", __func__, 794 sc->sc_name)); 795 796 sc->sc_genid++; 797 G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 798 sc->sc_genid); 799 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 800 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 801 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 802 disk->d_genid = sc->sc_genid; 803 g_mirror_update_metadata(disk); 804 } 805 } 806 } 807 808 static int 809 g_mirror_idle(struct g_mirror_softc *sc, int acw) 810 { 811 struct g_mirror_disk *disk; 812 int timeout; 813 814 g_topology_assert_not(); 815 sx_assert(&sc->sc_lock, SX_XLOCKED); 816 817 if (sc->sc_provider == NULL) 818 return (0); 819 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 820 return (0); 821 if (sc->sc_idle) 822 return (0); 823 if (sc->sc_writes > 0) 824 return (0); 825 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 826 timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write); 827 if (!g_mirror_shutdown && timeout > 0) 828 return (timeout); 829 } 830 sc->sc_idle = 1; 831 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 832 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 833 continue; 834 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.", 835 g_mirror_get_diskname(disk), sc->sc_name); 836 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 837 g_mirror_update_metadata(disk); 838 } 839 return (0); 840 } 841 842 static void 843 g_mirror_unidle(struct g_mirror_softc *sc) 844 { 845 struct g_mirror_disk *disk; 846 847 g_topology_assert_not(); 848 sx_assert(&sc->sc_lock, SX_XLOCKED); 849 850 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 851 return; 852 sc->sc_idle = 0; 853 sc->sc_last_write = time_uptime; 854 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 855 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 856 continue; 857 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.", 858 g_mirror_get_diskname(disk), sc->sc_name); 859 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 860 g_mirror_update_metadata(disk); 861 } 862 } 863 864 static void 865 g_mirror_flush_done(struct bio *bp) 866 { 867 struct g_mirror_softc *sc; 868 struct bio *pbp; 869 870 pbp = bp->bio_parent; 871 sc = pbp->bio_to->geom->softc; 872 mtx_lock(&sc->sc_done_mtx); 873 if (pbp->bio_error == 0) 874 pbp->bio_error = bp->bio_error; 875 pbp->bio_completed += bp->bio_completed; 876 pbp->bio_inbed++; 877 if (pbp->bio_children == pbp->bio_inbed) { 878 mtx_unlock(&sc->sc_done_mtx); 879 g_io_deliver(pbp, pbp->bio_error); 880 } else 881 mtx_unlock(&sc->sc_done_mtx); 882 g_destroy_bio(bp); 883 } 884 885 static void 886 g_mirror_done(struct bio *bp) 887 { 888 struct g_mirror_softc *sc; 889 890 sc = bp->bio_from->geom->softc; 891 bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR; 892 mtx_lock(&sc->sc_queue_mtx); 893 bioq_insert_tail(&sc->sc_queue, bp); 894 mtx_unlock(&sc->sc_queue_mtx); 895 wakeup(sc); 896 } 897 898 static void 899 g_mirror_regular_request(struct bio *bp) 900 { 901 struct g_mirror_softc *sc; 902 struct g_mirror_disk *disk; 903 struct bio *pbp; 904 905 g_topology_assert_not(); 906 907 pbp = bp->bio_parent; 908 sc = pbp->bio_to->geom->softc; 909 bp->bio_from->index--; 910 if (bp->bio_cmd == BIO_WRITE) 911 sc->sc_writes--; 912 disk = bp->bio_from->private; 913 if (disk == NULL) { 914 g_topology_lock(); 915 g_mirror_kill_consumer(sc, bp->bio_from); 916 g_topology_unlock(); 917 } 918 919 if (bp->bio_cmd == BIO_READ) 920 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_read, 921 bp->bio_error); 922 else if (bp->bio_cmd == BIO_WRITE) 923 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_regular_request_write, 924 bp->bio_error); 925 926 pbp->bio_inbed++; 927 KASSERT(pbp->bio_inbed <= pbp->bio_children, 928 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 929 pbp->bio_children)); 930 if (bp->bio_error == 0 && pbp->bio_error == 0) { 931 G_MIRROR_LOGREQ(3, bp, "Request delivered."); 932 g_destroy_bio(bp); 933 if (pbp->bio_children == pbp->bio_inbed) { 934 G_MIRROR_LOGREQ(3, pbp, "Request delivered."); 935 pbp->bio_completed = pbp->bio_length; 936 if (pbp->bio_cmd == BIO_WRITE || 937 pbp->bio_cmd == BIO_DELETE) { 938 bioq_remove(&sc->sc_inflight, pbp); 939 /* Release delayed sync requests if possible. */ 940 g_mirror_sync_release(sc); 941 } 942 g_io_deliver(pbp, pbp->bio_error); 943 } 944 return; 945 } else if (bp->bio_error != 0) { 946 if (pbp->bio_error == 0) 947 pbp->bio_error = bp->bio_error; 948 if (disk != NULL) { 949 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 950 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 951 G_MIRROR_LOGREQ(0, bp, 952 "Request failed (error=%d).", 953 bp->bio_error); 954 } else { 955 G_MIRROR_LOGREQ(1, bp, 956 "Request failed (error=%d).", 957 bp->bio_error); 958 } 959 if (g_mirror_disconnect_on_failure && 960 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) 961 { 962 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 963 g_mirror_event_send(disk, 964 G_MIRROR_DISK_STATE_DISCONNECTED, 965 G_MIRROR_EVENT_DONTWAIT); 966 } 967 } 968 switch (pbp->bio_cmd) { 969 case BIO_DELETE: 970 case BIO_WRITE: 971 pbp->bio_inbed--; 972 pbp->bio_children--; 973 break; 974 } 975 } 976 g_destroy_bio(bp); 977 978 switch (pbp->bio_cmd) { 979 case BIO_READ: 980 if (pbp->bio_inbed < pbp->bio_children) 981 break; 982 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1) 983 g_io_deliver(pbp, pbp->bio_error); 984 else { 985 pbp->bio_error = 0; 986 mtx_lock(&sc->sc_queue_mtx); 987 bioq_insert_tail(&sc->sc_queue, pbp); 988 mtx_unlock(&sc->sc_queue_mtx); 989 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 990 wakeup(sc); 991 } 992 break; 993 case BIO_DELETE: 994 case BIO_WRITE: 995 if (pbp->bio_children == 0) { 996 /* 997 * All requests failed. 998 */ 999 } else if (pbp->bio_inbed < pbp->bio_children) { 1000 /* Do nothing. */ 1001 break; 1002 } else if (pbp->bio_children == pbp->bio_inbed) { 1003 /* Some requests succeeded. */ 1004 pbp->bio_error = 0; 1005 pbp->bio_completed = pbp->bio_length; 1006 } 1007 bioq_remove(&sc->sc_inflight, pbp); 1008 /* Release delayed sync requests if possible. */ 1009 g_mirror_sync_release(sc); 1010 g_io_deliver(pbp, pbp->bio_error); 1011 break; 1012 default: 1013 KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd)); 1014 break; 1015 } 1016 } 1017 1018 static void 1019 g_mirror_sync_done(struct bio *bp) 1020 { 1021 struct g_mirror_softc *sc; 1022 1023 G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered."); 1024 sc = bp->bio_from->geom->softc; 1025 bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC; 1026 mtx_lock(&sc->sc_queue_mtx); 1027 bioq_insert_tail(&sc->sc_queue, bp); 1028 mtx_unlock(&sc->sc_queue_mtx); 1029 wakeup(sc); 1030 } 1031 1032 static void 1033 g_mirror_candelete(struct bio *bp) 1034 { 1035 struct g_mirror_softc *sc; 1036 struct g_mirror_disk *disk; 1037 int *val; 1038 1039 sc = bp->bio_to->geom->softc; 1040 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1041 if (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) 1042 break; 1043 } 1044 val = (int *)bp->bio_data; 1045 *val = (disk != NULL); 1046 g_io_deliver(bp, 0); 1047 } 1048 1049 static void 1050 g_mirror_kernel_dump(struct bio *bp) 1051 { 1052 struct g_mirror_softc *sc; 1053 struct g_mirror_disk *disk; 1054 struct bio *cbp; 1055 struct g_kerneldump *gkd; 1056 1057 /* 1058 * We configure dumping to the first component, because this component 1059 * will be used for reading with 'prefer' balance algorithm. 1060 * If the component with the highest priority is currently disconnected 1061 * we will not be able to read the dump after the reboot if it will be 1062 * connected and synchronized later. Can we do something better? 1063 */ 1064 sc = bp->bio_to->geom->softc; 1065 disk = LIST_FIRST(&sc->sc_disks); 1066 1067 gkd = (struct g_kerneldump *)bp->bio_data; 1068 if (gkd->length > bp->bio_to->mediasize) 1069 gkd->length = bp->bio_to->mediasize; 1070 cbp = g_clone_bio(bp); 1071 if (cbp == NULL) { 1072 g_io_deliver(bp, ENOMEM); 1073 return; 1074 } 1075 cbp->bio_done = g_std_done; 1076 g_io_request(cbp, disk->d_consumer); 1077 G_MIRROR_DEBUG(1, "Kernel dump will go to %s.", 1078 g_mirror_get_diskname(disk)); 1079 } 1080 1081 static void 1082 g_mirror_flush(struct g_mirror_softc *sc, struct bio *bp) 1083 { 1084 struct bio_queue_head queue; 1085 struct g_mirror_disk *disk; 1086 struct g_consumer *cp; 1087 struct bio *cbp; 1088 1089 bioq_init(&queue); 1090 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1091 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1092 continue; 1093 cbp = g_clone_bio(bp); 1094 if (cbp == NULL) { 1095 while ((cbp = bioq_takefirst(&queue)) != NULL) 1096 g_destroy_bio(cbp); 1097 if (bp->bio_error == 0) 1098 bp->bio_error = ENOMEM; 1099 g_io_deliver(bp, bp->bio_error); 1100 return; 1101 } 1102 bioq_insert_tail(&queue, cbp); 1103 cbp->bio_done = g_mirror_flush_done; 1104 cbp->bio_caller1 = disk; 1105 cbp->bio_to = disk->d_consumer->provider; 1106 } 1107 while ((cbp = bioq_takefirst(&queue)) != NULL) { 1108 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1109 disk = cbp->bio_caller1; 1110 cbp->bio_caller1 = NULL; 1111 cp = disk->d_consumer; 1112 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1113 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1114 cp->acr, cp->acw, cp->ace)); 1115 g_io_request(cbp, disk->d_consumer); 1116 } 1117 } 1118 1119 static void 1120 g_mirror_start(struct bio *bp) 1121 { 1122 struct g_mirror_softc *sc; 1123 1124 sc = bp->bio_to->geom->softc; 1125 /* 1126 * If sc == NULL or there are no valid disks, provider's error 1127 * should be set and g_mirror_start() should not be called at all. 1128 */ 1129 KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1130 ("Provider's error should be set (error=%d)(mirror=%s).", 1131 bp->bio_to->error, bp->bio_to->name)); 1132 G_MIRROR_LOGREQ(3, bp, "Request received."); 1133 1134 switch (bp->bio_cmd) { 1135 case BIO_READ: 1136 case BIO_WRITE: 1137 case BIO_DELETE: 1138 break; 1139 case BIO_FLUSH: 1140 g_mirror_flush(sc, bp); 1141 return; 1142 case BIO_GETATTR: 1143 if (!strcmp(bp->bio_attribute, "GEOM::candelete")) { 1144 g_mirror_candelete(bp); 1145 return; 1146 } else if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) { 1147 g_mirror_kernel_dump(bp); 1148 return; 1149 } 1150 /* FALLTHROUGH */ 1151 default: 1152 g_io_deliver(bp, EOPNOTSUPP); 1153 return; 1154 } 1155 mtx_lock(&sc->sc_queue_mtx); 1156 bioq_insert_tail(&sc->sc_queue, bp); 1157 mtx_unlock(&sc->sc_queue_mtx); 1158 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1159 wakeup(sc); 1160 } 1161 1162 /* 1163 * Return TRUE if the given request is colliding with a in-progress 1164 * synchronization request. 1165 */ 1166 static int 1167 g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp) 1168 { 1169 struct g_mirror_disk *disk; 1170 struct bio *sbp; 1171 off_t rstart, rend, sstart, send; 1172 u_int i; 1173 1174 if (sc->sc_sync.ds_ndisks == 0) 1175 return (0); 1176 rstart = bp->bio_offset; 1177 rend = bp->bio_offset + bp->bio_length; 1178 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1179 if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING) 1180 continue; 1181 for (i = 0; i < g_mirror_syncreqs; i++) { 1182 sbp = disk->d_sync.ds_bios[i]; 1183 if (sbp == NULL) 1184 continue; 1185 sstart = sbp->bio_offset; 1186 send = sbp->bio_offset + sbp->bio_length; 1187 if (rend > sstart && rstart < send) 1188 return (1); 1189 } 1190 } 1191 return (0); 1192 } 1193 1194 /* 1195 * Return TRUE if the given sync request is colliding with a in-progress regular 1196 * request. 1197 */ 1198 static int 1199 g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp) 1200 { 1201 off_t rstart, rend, sstart, send; 1202 struct bio *bp; 1203 1204 if (sc->sc_sync.ds_ndisks == 0) 1205 return (0); 1206 sstart = sbp->bio_offset; 1207 send = sbp->bio_offset + sbp->bio_length; 1208 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) { 1209 rstart = bp->bio_offset; 1210 rend = bp->bio_offset + bp->bio_length; 1211 if (rend > sstart && rstart < send) 1212 return (1); 1213 } 1214 return (0); 1215 } 1216 1217 /* 1218 * Puts request onto delayed queue. 1219 */ 1220 static void 1221 g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp) 1222 { 1223 1224 G_MIRROR_LOGREQ(2, bp, "Delaying request."); 1225 bioq_insert_head(&sc->sc_regular_delayed, bp); 1226 } 1227 1228 /* 1229 * Puts synchronization request onto delayed queue. 1230 */ 1231 static void 1232 g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp) 1233 { 1234 1235 G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request."); 1236 bioq_insert_tail(&sc->sc_sync_delayed, bp); 1237 } 1238 1239 /* 1240 * Releases delayed regular requests which don't collide anymore with sync 1241 * requests. 1242 */ 1243 static void 1244 g_mirror_regular_release(struct g_mirror_softc *sc) 1245 { 1246 struct bio *bp, *bp2; 1247 1248 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) { 1249 if (g_mirror_sync_collision(sc, bp)) 1250 continue; 1251 bioq_remove(&sc->sc_regular_delayed, bp); 1252 G_MIRROR_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 1253 mtx_lock(&sc->sc_queue_mtx); 1254 bioq_insert_head(&sc->sc_queue, bp); 1255 #if 0 1256 /* 1257 * wakeup() is not needed, because this function is called from 1258 * the worker thread. 1259 */ 1260 wakeup(&sc->sc_queue); 1261 #endif 1262 mtx_unlock(&sc->sc_queue_mtx); 1263 } 1264 } 1265 1266 /* 1267 * Releases delayed sync requests which don't collide anymore with regular 1268 * requests. 1269 */ 1270 static void 1271 g_mirror_sync_release(struct g_mirror_softc *sc) 1272 { 1273 struct bio *bp, *bp2; 1274 1275 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) { 1276 if (g_mirror_regular_collision(sc, bp)) 1277 continue; 1278 bioq_remove(&sc->sc_sync_delayed, bp); 1279 G_MIRROR_LOGREQ(2, bp, 1280 "Releasing delayed synchronization request."); 1281 g_io_request(bp, bp->bio_from); 1282 } 1283 } 1284 1285 /* 1286 * Handle synchronization requests. 1287 * Every synchronization request is two-steps process: first, READ request is 1288 * send to active provider and then WRITE request (with read data) to the provider 1289 * being synchronized. When WRITE is finished, new synchronization request is 1290 * send. 1291 */ 1292 static void 1293 g_mirror_sync_request(struct bio *bp) 1294 { 1295 struct g_mirror_softc *sc; 1296 struct g_mirror_disk *disk; 1297 1298 bp->bio_from->index--; 1299 sc = bp->bio_from->geom->softc; 1300 disk = bp->bio_from->private; 1301 if (disk == NULL) { 1302 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 1303 g_topology_lock(); 1304 g_mirror_kill_consumer(sc, bp->bio_from); 1305 g_topology_unlock(); 1306 free(bp->bio_data, M_MIRROR); 1307 g_destroy_bio(bp); 1308 sx_xlock(&sc->sc_lock); 1309 return; 1310 } 1311 1312 /* 1313 * Synchronization request. 1314 */ 1315 switch (bp->bio_cmd) { 1316 case BIO_READ: 1317 { 1318 struct g_consumer *cp; 1319 1320 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_read, 1321 bp->bio_error); 1322 1323 if (bp->bio_error != 0) { 1324 G_MIRROR_LOGREQ(0, bp, 1325 "Synchronization request failed (error=%d).", 1326 bp->bio_error); 1327 g_destroy_bio(bp); 1328 return; 1329 } 1330 G_MIRROR_LOGREQ(3, bp, 1331 "Synchronization request half-finished."); 1332 bp->bio_cmd = BIO_WRITE; 1333 bp->bio_cflags = 0; 1334 cp = disk->d_consumer; 1335 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1336 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1337 cp->acr, cp->acw, cp->ace)); 1338 cp->index++; 1339 g_io_request(bp, cp); 1340 return; 1341 } 1342 case BIO_WRITE: 1343 { 1344 struct g_mirror_disk_sync *sync; 1345 off_t offset; 1346 void *data; 1347 int i; 1348 1349 KFAIL_POINT_ERROR(DEBUG_FP, g_mirror_sync_request_write, 1350 bp->bio_error); 1351 1352 if (bp->bio_error != 0) { 1353 G_MIRROR_LOGREQ(0, bp, 1354 "Synchronization request failed (error=%d).", 1355 bp->bio_error); 1356 g_destroy_bio(bp); 1357 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 1358 g_mirror_event_send(disk, 1359 G_MIRROR_DISK_STATE_DISCONNECTED, 1360 G_MIRROR_EVENT_DONTWAIT); 1361 return; 1362 } 1363 G_MIRROR_LOGREQ(3, bp, "Synchronization request finished."); 1364 sync = &disk->d_sync; 1365 if (sync->ds_offset >= sc->sc_mediasize || 1366 sync->ds_consumer == NULL || 1367 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1368 /* Don't send more synchronization requests. */ 1369 sync->ds_inflight--; 1370 if (sync->ds_bios != NULL) { 1371 i = (int)(uintptr_t)bp->bio_caller1; 1372 sync->ds_bios[i] = NULL; 1373 } 1374 free(bp->bio_data, M_MIRROR); 1375 g_destroy_bio(bp); 1376 if (sync->ds_inflight > 0) 1377 return; 1378 if (sync->ds_consumer == NULL || 1379 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1380 return; 1381 } 1382 /* Disk up-to-date, activate it. */ 1383 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE, 1384 G_MIRROR_EVENT_DONTWAIT); 1385 return; 1386 } 1387 1388 /* Send next synchronization request. */ 1389 data = bp->bio_data; 1390 g_reset_bio(bp); 1391 bp->bio_cmd = BIO_READ; 1392 bp->bio_offset = sync->ds_offset; 1393 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1394 sync->ds_offset += bp->bio_length; 1395 bp->bio_done = g_mirror_sync_done; 1396 bp->bio_data = data; 1397 bp->bio_from = sync->ds_consumer; 1398 bp->bio_to = sc->sc_provider; 1399 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1400 sync->ds_consumer->index++; 1401 /* 1402 * Delay the request if it is colliding with a regular request. 1403 */ 1404 if (g_mirror_regular_collision(sc, bp)) 1405 g_mirror_sync_delay(sc, bp); 1406 else 1407 g_io_request(bp, sync->ds_consumer); 1408 1409 /* Release delayed requests if possible. */ 1410 g_mirror_regular_release(sc); 1411 1412 /* Find the smallest offset */ 1413 offset = sc->sc_mediasize; 1414 for (i = 0; i < g_mirror_syncreqs; i++) { 1415 bp = sync->ds_bios[i]; 1416 if (bp->bio_offset < offset) 1417 offset = bp->bio_offset; 1418 } 1419 if (sync->ds_offset_done + (MAXPHYS * 100) < offset) { 1420 /* Update offset_done on every 100 blocks. */ 1421 sync->ds_offset_done = offset; 1422 g_mirror_update_metadata(disk); 1423 } 1424 return; 1425 } 1426 default: 1427 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1428 bp->bio_cmd, sc->sc_name)); 1429 break; 1430 } 1431 } 1432 1433 static void 1434 g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp) 1435 { 1436 struct g_mirror_disk *disk; 1437 struct g_consumer *cp; 1438 struct bio *cbp; 1439 1440 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1441 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE) 1442 break; 1443 } 1444 if (disk == NULL) { 1445 if (bp->bio_error == 0) 1446 bp->bio_error = ENXIO; 1447 g_io_deliver(bp, bp->bio_error); 1448 return; 1449 } 1450 cbp = g_clone_bio(bp); 1451 if (cbp == NULL) { 1452 if (bp->bio_error == 0) 1453 bp->bio_error = ENOMEM; 1454 g_io_deliver(bp, bp->bio_error); 1455 return; 1456 } 1457 /* 1458 * Fill in the component buf structure. 1459 */ 1460 cp = disk->d_consumer; 1461 cbp->bio_done = g_mirror_done; 1462 cbp->bio_to = cp->provider; 1463 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1464 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1465 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1466 cp->acw, cp->ace)); 1467 cp->index++; 1468 g_io_request(cbp, cp); 1469 } 1470 1471 static void 1472 g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp) 1473 { 1474 struct g_mirror_disk *disk; 1475 struct g_consumer *cp; 1476 struct bio *cbp; 1477 1478 disk = g_mirror_get_disk(sc); 1479 if (disk == NULL) { 1480 if (bp->bio_error == 0) 1481 bp->bio_error = ENXIO; 1482 g_io_deliver(bp, bp->bio_error); 1483 return; 1484 } 1485 cbp = g_clone_bio(bp); 1486 if (cbp == NULL) { 1487 if (bp->bio_error == 0) 1488 bp->bio_error = ENOMEM; 1489 g_io_deliver(bp, bp->bio_error); 1490 return; 1491 } 1492 /* 1493 * Fill in the component buf structure. 1494 */ 1495 cp = disk->d_consumer; 1496 cbp->bio_done = g_mirror_done; 1497 cbp->bio_to = cp->provider; 1498 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1499 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1500 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1501 cp->acw, cp->ace)); 1502 cp->index++; 1503 g_io_request(cbp, cp); 1504 } 1505 1506 #define TRACK_SIZE (1 * 1024 * 1024) 1507 #define LOAD_SCALE 256 1508 #define ABS(x) (((x) >= 0) ? (x) : (-(x))) 1509 1510 static void 1511 g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp) 1512 { 1513 struct g_mirror_disk *disk, *dp; 1514 struct g_consumer *cp; 1515 struct bio *cbp; 1516 int prio, best; 1517 1518 /* Find a disk with the smallest load. */ 1519 disk = NULL; 1520 best = INT_MAX; 1521 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1522 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1523 continue; 1524 prio = dp->load; 1525 /* If disk head is precisely in position - highly prefer it. */ 1526 if (dp->d_last_offset == bp->bio_offset) 1527 prio -= 2 * LOAD_SCALE; 1528 else 1529 /* If disk head is close to position - prefer it. */ 1530 if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE) 1531 prio -= 1 * LOAD_SCALE; 1532 if (prio <= best) { 1533 disk = dp; 1534 best = prio; 1535 } 1536 } 1537 KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name)); 1538 cbp = g_clone_bio(bp); 1539 if (cbp == NULL) { 1540 if (bp->bio_error == 0) 1541 bp->bio_error = ENOMEM; 1542 g_io_deliver(bp, bp->bio_error); 1543 return; 1544 } 1545 /* 1546 * Fill in the component buf structure. 1547 */ 1548 cp = disk->d_consumer; 1549 cbp->bio_done = g_mirror_done; 1550 cbp->bio_to = cp->provider; 1551 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1552 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1553 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1554 cp->acw, cp->ace)); 1555 cp->index++; 1556 /* Remember last head position */ 1557 disk->d_last_offset = bp->bio_offset + bp->bio_length; 1558 /* Update loads. */ 1559 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1560 dp->load = (dp->d_consumer->index * LOAD_SCALE + 1561 dp->load * 7) / 8; 1562 } 1563 g_io_request(cbp, cp); 1564 } 1565 1566 static void 1567 g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp) 1568 { 1569 struct bio_queue_head queue; 1570 struct g_mirror_disk *disk; 1571 struct g_consumer *cp; 1572 struct bio *cbp; 1573 off_t left, mod, offset, slice; 1574 u_char *data; 1575 u_int ndisks; 1576 1577 if (bp->bio_length <= sc->sc_slice) { 1578 g_mirror_request_round_robin(sc, bp); 1579 return; 1580 } 1581 ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE); 1582 slice = bp->bio_length / ndisks; 1583 mod = slice % sc->sc_provider->sectorsize; 1584 if (mod != 0) 1585 slice += sc->sc_provider->sectorsize - mod; 1586 /* 1587 * Allocate all bios before sending any request, so we can 1588 * return ENOMEM in nice and clean way. 1589 */ 1590 left = bp->bio_length; 1591 offset = bp->bio_offset; 1592 data = bp->bio_data; 1593 bioq_init(&queue); 1594 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1595 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1596 continue; 1597 cbp = g_clone_bio(bp); 1598 if (cbp == NULL) { 1599 while ((cbp = bioq_takefirst(&queue)) != NULL) 1600 g_destroy_bio(cbp); 1601 if (bp->bio_error == 0) 1602 bp->bio_error = ENOMEM; 1603 g_io_deliver(bp, bp->bio_error); 1604 return; 1605 } 1606 bioq_insert_tail(&queue, cbp); 1607 cbp->bio_done = g_mirror_done; 1608 cbp->bio_caller1 = disk; 1609 cbp->bio_to = disk->d_consumer->provider; 1610 cbp->bio_offset = offset; 1611 cbp->bio_data = data; 1612 cbp->bio_length = MIN(left, slice); 1613 left -= cbp->bio_length; 1614 if (left == 0) 1615 break; 1616 offset += cbp->bio_length; 1617 data += cbp->bio_length; 1618 } 1619 while ((cbp = bioq_takefirst(&queue)) != NULL) { 1620 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1621 disk = cbp->bio_caller1; 1622 cbp->bio_caller1 = NULL; 1623 cp = disk->d_consumer; 1624 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1625 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1626 cp->acr, cp->acw, cp->ace)); 1627 disk->d_consumer->index++; 1628 g_io_request(cbp, disk->d_consumer); 1629 } 1630 } 1631 1632 static void 1633 g_mirror_register_request(struct bio *bp) 1634 { 1635 struct g_mirror_softc *sc; 1636 1637 sc = bp->bio_to->geom->softc; 1638 switch (bp->bio_cmd) { 1639 case BIO_READ: 1640 switch (sc->sc_balance) { 1641 case G_MIRROR_BALANCE_LOAD: 1642 g_mirror_request_load(sc, bp); 1643 break; 1644 case G_MIRROR_BALANCE_PREFER: 1645 g_mirror_request_prefer(sc, bp); 1646 break; 1647 case G_MIRROR_BALANCE_ROUND_ROBIN: 1648 g_mirror_request_round_robin(sc, bp); 1649 break; 1650 case G_MIRROR_BALANCE_SPLIT: 1651 g_mirror_request_split(sc, bp); 1652 break; 1653 } 1654 return; 1655 case BIO_WRITE: 1656 case BIO_DELETE: 1657 { 1658 struct g_mirror_disk *disk; 1659 struct g_mirror_disk_sync *sync; 1660 struct bio_queue_head queue; 1661 struct g_consumer *cp; 1662 struct bio *cbp; 1663 1664 /* 1665 * Delay the request if it is colliding with a synchronization 1666 * request. 1667 */ 1668 if (g_mirror_sync_collision(sc, bp)) { 1669 g_mirror_regular_delay(sc, bp); 1670 return; 1671 } 1672 1673 if (sc->sc_idle) 1674 g_mirror_unidle(sc); 1675 else 1676 sc->sc_last_write = time_uptime; 1677 1678 /* 1679 * Allocate all bios before sending any request, so we can 1680 * return ENOMEM in nice and clean way. 1681 */ 1682 bioq_init(&queue); 1683 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1684 sync = &disk->d_sync; 1685 switch (disk->d_state) { 1686 case G_MIRROR_DISK_STATE_ACTIVE: 1687 break; 1688 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 1689 if (bp->bio_offset >= sync->ds_offset) 1690 continue; 1691 break; 1692 default: 1693 continue; 1694 } 1695 if (bp->bio_cmd == BIO_DELETE && 1696 (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) == 0) 1697 continue; 1698 cbp = g_clone_bio(bp); 1699 if (cbp == NULL) { 1700 while ((cbp = bioq_takefirst(&queue)) != NULL) 1701 g_destroy_bio(cbp); 1702 if (bp->bio_error == 0) 1703 bp->bio_error = ENOMEM; 1704 g_io_deliver(bp, bp->bio_error); 1705 return; 1706 } 1707 bioq_insert_tail(&queue, cbp); 1708 cbp->bio_done = g_mirror_done; 1709 cp = disk->d_consumer; 1710 cbp->bio_caller1 = cp; 1711 cbp->bio_to = cp->provider; 1712 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1713 ("Consumer %s not opened (r%dw%de%d).", 1714 cp->provider->name, cp->acr, cp->acw, cp->ace)); 1715 } 1716 if (bioq_first(&queue) == NULL) { 1717 g_io_deliver(bp, EOPNOTSUPP); 1718 return; 1719 } 1720 while ((cbp = bioq_takefirst(&queue)) != NULL) { 1721 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1722 cp = cbp->bio_caller1; 1723 cbp->bio_caller1 = NULL; 1724 cp->index++; 1725 sc->sc_writes++; 1726 g_io_request(cbp, cp); 1727 } 1728 /* 1729 * Put request onto inflight queue, so we can check if new 1730 * synchronization requests don't collide with it. 1731 */ 1732 bioq_insert_tail(&sc->sc_inflight, bp); 1733 /* 1734 * Bump syncid on first write. 1735 */ 1736 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) { 1737 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 1738 g_mirror_bump_syncid(sc); 1739 } 1740 return; 1741 } 1742 default: 1743 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1744 bp->bio_cmd, sc->sc_name)); 1745 break; 1746 } 1747 } 1748 1749 static int 1750 g_mirror_can_destroy(struct g_mirror_softc *sc) 1751 { 1752 struct g_geom *gp; 1753 struct g_consumer *cp; 1754 1755 g_topology_assert(); 1756 gp = sc->sc_geom; 1757 if (gp->softc == NULL) 1758 return (1); 1759 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_TASTING) != 0) 1760 return (0); 1761 LIST_FOREACH(cp, &gp->consumer, consumer) { 1762 if (g_mirror_is_busy(sc, cp)) 1763 return (0); 1764 } 1765 gp = sc->sc_sync.ds_geom; 1766 LIST_FOREACH(cp, &gp->consumer, consumer) { 1767 if (g_mirror_is_busy(sc, cp)) 1768 return (0); 1769 } 1770 G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 1771 sc->sc_name); 1772 return (1); 1773 } 1774 1775 static int 1776 g_mirror_try_destroy(struct g_mirror_softc *sc) 1777 { 1778 1779 if (sc->sc_rootmount != NULL) { 1780 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 1781 sc->sc_rootmount); 1782 root_mount_rel(sc->sc_rootmount); 1783 sc->sc_rootmount = NULL; 1784 } 1785 g_topology_lock(); 1786 if (!g_mirror_can_destroy(sc)) { 1787 g_topology_unlock(); 1788 return (0); 1789 } 1790 sc->sc_geom->softc = NULL; 1791 sc->sc_sync.ds_geom->softc = NULL; 1792 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) { 1793 g_topology_unlock(); 1794 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1795 &sc->sc_worker); 1796 /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 1797 sx_xunlock(&sc->sc_lock); 1798 wakeup(&sc->sc_worker); 1799 sc->sc_worker = NULL; 1800 } else { 1801 g_topology_unlock(); 1802 g_mirror_destroy_device(sc); 1803 free(sc, M_MIRROR); 1804 } 1805 return (1); 1806 } 1807 1808 /* 1809 * Worker thread. 1810 */ 1811 static void 1812 g_mirror_worker(void *arg) 1813 { 1814 struct g_mirror_softc *sc; 1815 struct g_mirror_event *ep; 1816 struct bio *bp; 1817 int timeout; 1818 1819 sc = arg; 1820 thread_lock(curthread); 1821 sched_prio(curthread, PRIBIO); 1822 thread_unlock(curthread); 1823 1824 sx_xlock(&sc->sc_lock); 1825 for (;;) { 1826 G_MIRROR_DEBUG(5, "%s: Let's see...", __func__); 1827 /* 1828 * First take a look at events. 1829 * This is important to handle events before any I/O requests. 1830 */ 1831 ep = g_mirror_event_get(sc); 1832 if (ep != NULL) { 1833 g_mirror_event_remove(sc, ep); 1834 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) { 1835 /* Update only device status. */ 1836 G_MIRROR_DEBUG(3, 1837 "Running event for device %s.", 1838 sc->sc_name); 1839 ep->e_error = 0; 1840 g_mirror_update_device(sc, 1); 1841 } else { 1842 /* Update disk status. */ 1843 G_MIRROR_DEBUG(3, "Running event for disk %s.", 1844 g_mirror_get_diskname(ep->e_disk)); 1845 ep->e_error = g_mirror_update_disk(ep->e_disk, 1846 ep->e_state); 1847 if (ep->e_error == 0) 1848 g_mirror_update_device(sc, 0); 1849 } 1850 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) { 1851 KASSERT(ep->e_error == 0, 1852 ("Error cannot be handled.")); 1853 g_mirror_event_free(ep); 1854 } else { 1855 ep->e_flags |= G_MIRROR_EVENT_DONE; 1856 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1857 ep); 1858 mtx_lock(&sc->sc_events_mtx); 1859 wakeup(ep); 1860 mtx_unlock(&sc->sc_events_mtx); 1861 } 1862 if ((sc->sc_flags & 1863 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1864 if (g_mirror_try_destroy(sc)) { 1865 curthread->td_pflags &= ~TDP_GEOM; 1866 G_MIRROR_DEBUG(1, "Thread exiting."); 1867 kproc_exit(0); 1868 } 1869 } 1870 G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__); 1871 continue; 1872 } 1873 /* 1874 * Check if we can mark array as CLEAN and if we can't take 1875 * how much seconds should we wait. 1876 */ 1877 timeout = g_mirror_idle(sc, -1); 1878 /* 1879 * Now I/O requests. 1880 */ 1881 /* Get first request from the queue. */ 1882 mtx_lock(&sc->sc_queue_mtx); 1883 bp = bioq_takefirst(&sc->sc_queue); 1884 if (bp == NULL) { 1885 if ((sc->sc_flags & 1886 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1887 mtx_unlock(&sc->sc_queue_mtx); 1888 if (g_mirror_try_destroy(sc)) { 1889 curthread->td_pflags &= ~TDP_GEOM; 1890 G_MIRROR_DEBUG(1, "Thread exiting."); 1891 kproc_exit(0); 1892 } 1893 mtx_lock(&sc->sc_queue_mtx); 1894 } 1895 sx_xunlock(&sc->sc_lock); 1896 /* 1897 * XXX: We can miss an event here, because an event 1898 * can be added without sx-device-lock and without 1899 * mtx-queue-lock. Maybe I should just stop using 1900 * dedicated mutex for events synchronization and 1901 * stick with the queue lock? 1902 * The event will hang here until next I/O request 1903 * or next event is received. 1904 */ 1905 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1", 1906 timeout * hz); 1907 sx_xlock(&sc->sc_lock); 1908 G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__); 1909 continue; 1910 } 1911 mtx_unlock(&sc->sc_queue_mtx); 1912 1913 if (bp->bio_from->geom == sc->sc_sync.ds_geom && 1914 (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) { 1915 g_mirror_sync_request(bp); /* READ */ 1916 } else if (bp->bio_to != sc->sc_provider) { 1917 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) 1918 g_mirror_regular_request(bp); 1919 else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 1920 g_mirror_sync_request(bp); /* WRITE */ 1921 else { 1922 KASSERT(0, 1923 ("Invalid request cflags=0x%hx to=%s.", 1924 bp->bio_cflags, bp->bio_to->name)); 1925 } 1926 } else { 1927 g_mirror_register_request(bp); 1928 } 1929 G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__); 1930 } 1931 } 1932 1933 static void 1934 g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 1935 { 1936 1937 sx_assert(&sc->sc_lock, SX_LOCKED); 1938 1939 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 1940 return; 1941 if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) { 1942 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as dirty.", 1943 g_mirror_get_diskname(disk), sc->sc_name); 1944 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1945 } else if (sc->sc_idle && 1946 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 1947 G_MIRROR_DEBUG(2, "Disk %s (device %s) marked as clean.", 1948 g_mirror_get_diskname(disk), sc->sc_name); 1949 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 1950 } 1951 } 1952 1953 static void 1954 g_mirror_sync_start(struct g_mirror_disk *disk) 1955 { 1956 struct g_mirror_softc *sc; 1957 struct g_consumer *cp; 1958 struct bio *bp; 1959 int error, i; 1960 1961 g_topology_assert_not(); 1962 sc = disk->d_softc; 1963 sx_assert(&sc->sc_lock, SX_LOCKED); 1964 1965 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 1966 ("Disk %s is not marked for synchronization.", 1967 g_mirror_get_diskname(disk))); 1968 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1969 ("Device not in RUNNING state (%s, %u).", sc->sc_name, 1970 sc->sc_state)); 1971 1972 sx_xunlock(&sc->sc_lock); 1973 g_topology_lock(); 1974 cp = g_new_consumer(sc->sc_sync.ds_geom); 1975 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1976 error = g_attach(cp, sc->sc_provider); 1977 KASSERT(error == 0, 1978 ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 1979 error = g_access(cp, 1, 0, 0); 1980 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 1981 g_topology_unlock(); 1982 sx_xlock(&sc->sc_lock); 1983 1984 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 1985 g_mirror_get_diskname(disk)); 1986 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0) 1987 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1988 KASSERT(disk->d_sync.ds_consumer == NULL, 1989 ("Sync consumer already exists (device=%s, disk=%s).", 1990 sc->sc_name, g_mirror_get_diskname(disk))); 1991 1992 disk->d_sync.ds_consumer = cp; 1993 disk->d_sync.ds_consumer->private = disk; 1994 disk->d_sync.ds_consumer->index = 0; 1995 1996 /* 1997 * Allocate memory for synchronization bios and initialize them. 1998 */ 1999 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs, 2000 M_MIRROR, M_WAITOK); 2001 for (i = 0; i < g_mirror_syncreqs; i++) { 2002 bp = g_alloc_bio(); 2003 disk->d_sync.ds_bios[i] = bp; 2004 bp->bio_parent = NULL; 2005 bp->bio_cmd = BIO_READ; 2006 bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK); 2007 bp->bio_cflags = 0; 2008 bp->bio_offset = disk->d_sync.ds_offset; 2009 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 2010 disk->d_sync.ds_offset += bp->bio_length; 2011 bp->bio_done = g_mirror_sync_done; 2012 bp->bio_from = disk->d_sync.ds_consumer; 2013 bp->bio_to = sc->sc_provider; 2014 bp->bio_caller1 = (void *)(uintptr_t)i; 2015 } 2016 2017 /* Increase the number of disks in SYNCHRONIZING state. */ 2018 sc->sc_sync.ds_ndisks++; 2019 /* Set the number of in-flight synchronization requests. */ 2020 disk->d_sync.ds_inflight = g_mirror_syncreqs; 2021 2022 /* 2023 * Fire off first synchronization requests. 2024 */ 2025 for (i = 0; i < g_mirror_syncreqs; i++) { 2026 bp = disk->d_sync.ds_bios[i]; 2027 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 2028 disk->d_sync.ds_consumer->index++; 2029 /* 2030 * Delay the request if it is colliding with a regular request. 2031 */ 2032 if (g_mirror_regular_collision(sc, bp)) 2033 g_mirror_sync_delay(sc, bp); 2034 else 2035 g_io_request(bp, disk->d_sync.ds_consumer); 2036 } 2037 } 2038 2039 /* 2040 * Stop synchronization process. 2041 * type: 0 - synchronization finished 2042 * 1 - synchronization stopped 2043 */ 2044 static void 2045 g_mirror_sync_stop(struct g_mirror_disk *disk, int type) 2046 { 2047 struct g_mirror_softc *sc; 2048 struct g_consumer *cp; 2049 2050 g_topology_assert_not(); 2051 sc = disk->d_softc; 2052 sx_assert(&sc->sc_lock, SX_LOCKED); 2053 2054 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2055 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2056 g_mirror_disk_state2str(disk->d_state))); 2057 if (disk->d_sync.ds_consumer == NULL) 2058 return; 2059 2060 if (type == 0) { 2061 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.", 2062 sc->sc_name, g_mirror_get_diskname(disk)); 2063 } else /* if (type == 1) */ { 2064 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 2065 sc->sc_name, g_mirror_get_diskname(disk)); 2066 } 2067 free(disk->d_sync.ds_bios, M_MIRROR); 2068 disk->d_sync.ds_bios = NULL; 2069 cp = disk->d_sync.ds_consumer; 2070 disk->d_sync.ds_consumer = NULL; 2071 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2072 sc->sc_sync.ds_ndisks--; 2073 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 2074 g_topology_lock(); 2075 g_mirror_kill_consumer(sc, cp); 2076 g_topology_unlock(); 2077 sx_xlock(&sc->sc_lock); 2078 } 2079 2080 static void 2081 g_mirror_launch_provider(struct g_mirror_softc *sc) 2082 { 2083 struct g_mirror_disk *disk; 2084 struct g_provider *pp, *dp; 2085 2086 sx_assert(&sc->sc_lock, SX_LOCKED); 2087 2088 g_topology_lock(); 2089 pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name); 2090 pp->flags |= G_PF_DIRECT_RECEIVE; 2091 pp->mediasize = sc->sc_mediasize; 2092 pp->sectorsize = sc->sc_sectorsize; 2093 pp->stripesize = 0; 2094 pp->stripeoffset = 0; 2095 2096 /* Splitting of unmapped BIO's could work but isn't implemented now */ 2097 if (sc->sc_balance != G_MIRROR_BALANCE_SPLIT) 2098 pp->flags |= G_PF_ACCEPT_UNMAPPED; 2099 2100 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2101 if (disk->d_consumer && disk->d_consumer->provider) { 2102 dp = disk->d_consumer->provider; 2103 if (dp->stripesize > pp->stripesize) { 2104 pp->stripesize = dp->stripesize; 2105 pp->stripeoffset = dp->stripeoffset; 2106 } 2107 /* A provider underneath us doesn't support unmapped */ 2108 if ((dp->flags & G_PF_ACCEPT_UNMAPPED) == 0) { 2109 G_MIRROR_DEBUG(0, "Cancelling unmapped " 2110 "because of %s.", dp->name); 2111 pp->flags &= ~G_PF_ACCEPT_UNMAPPED; 2112 } 2113 } 2114 } 2115 sc->sc_provider = pp; 2116 g_error_provider(pp, 0); 2117 g_topology_unlock(); 2118 G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 2119 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks); 2120 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2121 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2122 g_mirror_sync_start(disk); 2123 } 2124 } 2125 2126 static void 2127 g_mirror_destroy_provider(struct g_mirror_softc *sc) 2128 { 2129 struct g_mirror_disk *disk; 2130 struct bio *bp; 2131 2132 g_topology_assert_not(); 2133 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 2134 sc->sc_name)); 2135 2136 g_topology_lock(); 2137 g_error_provider(sc->sc_provider, ENXIO); 2138 mtx_lock(&sc->sc_queue_mtx); 2139 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) { 2140 /* 2141 * Abort any pending I/O that wasn't generated by us. 2142 * Synchronization requests and requests destined for individual 2143 * mirror components can be destroyed immediately. 2144 */ 2145 if (bp->bio_to == sc->sc_provider && 2146 bp->bio_from->geom != sc->sc_sync.ds_geom) { 2147 g_io_deliver(bp, ENXIO); 2148 } else { 2149 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 2150 free(bp->bio_data, M_MIRROR); 2151 g_destroy_bio(bp); 2152 } 2153 } 2154 mtx_unlock(&sc->sc_queue_mtx); 2155 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name, 2156 sc->sc_provider->name); 2157 sc->sc_provider->flags |= G_PF_WITHER; 2158 g_orphan_provider(sc->sc_provider, ENXIO); 2159 g_topology_unlock(); 2160 sc->sc_provider = NULL; 2161 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2162 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2163 g_mirror_sync_stop(disk, 1); 2164 } 2165 } 2166 2167 static void 2168 g_mirror_go(void *arg) 2169 { 2170 struct g_mirror_softc *sc; 2171 2172 sc = arg; 2173 G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 2174 g_mirror_event_send(sc, 0, 2175 G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE); 2176 } 2177 2178 static u_int 2179 g_mirror_determine_state(struct g_mirror_disk *disk) 2180 { 2181 struct g_mirror_softc *sc; 2182 u_int state; 2183 2184 sc = disk->d_softc; 2185 if (sc->sc_syncid == disk->d_sync.ds_syncid) { 2186 if ((disk->d_flags & 2187 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2188 /* Disk does not need synchronization. */ 2189 state = G_MIRROR_DISK_STATE_ACTIVE; 2190 } else { 2191 if ((sc->sc_flags & 2192 G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2193 (disk->d_flags & 2194 G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2195 /* 2196 * We can start synchronization from 2197 * the stored offset. 2198 */ 2199 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2200 } else { 2201 state = G_MIRROR_DISK_STATE_STALE; 2202 } 2203 } 2204 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 2205 /* 2206 * Reset all synchronization data for this disk, 2207 * because if it even was synchronized, it was 2208 * synchronized to disks with different syncid. 2209 */ 2210 disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2211 disk->d_sync.ds_offset = 0; 2212 disk->d_sync.ds_offset_done = 0; 2213 disk->d_sync.ds_syncid = sc->sc_syncid; 2214 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2215 (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2216 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2217 } else { 2218 state = G_MIRROR_DISK_STATE_STALE; 2219 } 2220 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 2221 /* 2222 * Not good, NOT GOOD! 2223 * It means that mirror was started on stale disks 2224 * and more fresh disk just arrive. 2225 * If there were writes, mirror is broken, sorry. 2226 * I think the best choice here is don't touch 2227 * this disk and inform the user loudly. 2228 */ 2229 G_MIRROR_DEBUG(0, "Device %s was started before the freshest " 2230 "disk (%s) arrives!! It will not be connected to the " 2231 "running device.", sc->sc_name, 2232 g_mirror_get_diskname(disk)); 2233 g_mirror_destroy_disk(disk); 2234 state = G_MIRROR_DISK_STATE_NONE; 2235 /* Return immediately, because disk was destroyed. */ 2236 return (state); 2237 } 2238 G_MIRROR_DEBUG(3, "State for %s disk: %s.", 2239 g_mirror_get_diskname(disk), g_mirror_disk_state2str(state)); 2240 return (state); 2241 } 2242 2243 /* 2244 * Update device state. 2245 */ 2246 static void 2247 g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force) 2248 { 2249 struct g_mirror_disk *disk; 2250 u_int state; 2251 2252 sx_assert(&sc->sc_lock, SX_XLOCKED); 2253 2254 switch (sc->sc_state) { 2255 case G_MIRROR_DEVICE_STATE_STARTING: 2256 { 2257 struct g_mirror_disk *pdisk, *tdisk; 2258 u_int dirty, ndisks, genid, syncid; 2259 2260 KASSERT(sc->sc_provider == NULL, 2261 ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 2262 /* 2263 * Are we ready? We are, if all disks are connected or 2264 * if we have any disks and 'force' is true. 2265 */ 2266 ndisks = g_mirror_ndisks(sc, -1); 2267 if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) { 2268 ; 2269 } else if (ndisks == 0) { 2270 /* 2271 * Disks went down in starting phase, so destroy 2272 * device. 2273 */ 2274 callout_drain(&sc->sc_callout); 2275 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2276 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2277 sc->sc_rootmount); 2278 root_mount_rel(sc->sc_rootmount); 2279 sc->sc_rootmount = NULL; 2280 return; 2281 } else { 2282 return; 2283 } 2284 2285 /* 2286 * Activate all disks with the biggest syncid. 2287 */ 2288 if (force) { 2289 /* 2290 * If 'force' is true, we have been called due to 2291 * timeout, so don't bother canceling timeout. 2292 */ 2293 ndisks = 0; 2294 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2295 if ((disk->d_flags & 2296 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2297 ndisks++; 2298 } 2299 } 2300 if (ndisks == 0) { 2301 /* No valid disks found, destroy device. */ 2302 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2303 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2304 __LINE__, sc->sc_rootmount); 2305 root_mount_rel(sc->sc_rootmount); 2306 sc->sc_rootmount = NULL; 2307 return; 2308 } 2309 } else { 2310 /* Cancel timeout. */ 2311 callout_drain(&sc->sc_callout); 2312 } 2313 2314 /* 2315 * Find the biggest genid. 2316 */ 2317 genid = 0; 2318 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2319 if (disk->d_genid > genid) 2320 genid = disk->d_genid; 2321 } 2322 sc->sc_genid = genid; 2323 /* 2324 * Remove all disks without the biggest genid. 2325 */ 2326 LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) { 2327 if (disk->d_genid < genid) { 2328 G_MIRROR_DEBUG(0, 2329 "Component %s (device %s) broken, skipping.", 2330 g_mirror_get_diskname(disk), sc->sc_name); 2331 g_mirror_destroy_disk(disk); 2332 } 2333 } 2334 2335 /* 2336 * Find the biggest syncid. 2337 */ 2338 syncid = 0; 2339 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2340 if (disk->d_sync.ds_syncid > syncid) 2341 syncid = disk->d_sync.ds_syncid; 2342 } 2343 2344 /* 2345 * Here we need to look for dirty disks and if all disks 2346 * with the biggest syncid are dirty, we have to choose 2347 * one with the biggest priority and rebuild the rest. 2348 */ 2349 /* 2350 * Find the number of dirty disks with the biggest syncid. 2351 * Find the number of disks with the biggest syncid. 2352 * While here, find a disk with the biggest priority. 2353 */ 2354 dirty = ndisks = 0; 2355 pdisk = NULL; 2356 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2357 if (disk->d_sync.ds_syncid != syncid) 2358 continue; 2359 if ((disk->d_flags & 2360 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2361 continue; 2362 } 2363 ndisks++; 2364 if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 2365 dirty++; 2366 if (pdisk == NULL || 2367 pdisk->d_priority < disk->d_priority) { 2368 pdisk = disk; 2369 } 2370 } 2371 } 2372 if (dirty == 0) { 2373 /* No dirty disks at all, great. */ 2374 } else if (dirty == ndisks) { 2375 /* 2376 * Force synchronization for all dirty disks except one 2377 * with the biggest priority. 2378 */ 2379 KASSERT(pdisk != NULL, ("pdisk == NULL")); 2380 G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a " 2381 "master disk for synchronization.", 2382 g_mirror_get_diskname(pdisk), sc->sc_name); 2383 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2384 if (disk->d_sync.ds_syncid != syncid) 2385 continue; 2386 if ((disk->d_flags & 2387 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2388 continue; 2389 } 2390 KASSERT((disk->d_flags & 2391 G_MIRROR_DISK_FLAG_DIRTY) != 0, 2392 ("Disk %s isn't marked as dirty.", 2393 g_mirror_get_diskname(disk))); 2394 /* Skip the disk with the biggest priority. */ 2395 if (disk == pdisk) 2396 continue; 2397 disk->d_sync.ds_syncid = 0; 2398 } 2399 } else if (dirty < ndisks) { 2400 /* 2401 * Force synchronization for all dirty disks. 2402 * We have some non-dirty disks. 2403 */ 2404 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2405 if (disk->d_sync.ds_syncid != syncid) 2406 continue; 2407 if ((disk->d_flags & 2408 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2409 continue; 2410 } 2411 if ((disk->d_flags & 2412 G_MIRROR_DISK_FLAG_DIRTY) == 0) { 2413 continue; 2414 } 2415 disk->d_sync.ds_syncid = 0; 2416 } 2417 } 2418 2419 /* Reset hint. */ 2420 sc->sc_hint = NULL; 2421 sc->sc_syncid = syncid; 2422 if (force) { 2423 /* Remember to bump syncid on first write. */ 2424 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2425 } 2426 state = G_MIRROR_DEVICE_STATE_RUNNING; 2427 G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.", 2428 sc->sc_name, g_mirror_device_state2str(sc->sc_state), 2429 g_mirror_device_state2str(state)); 2430 sc->sc_state = state; 2431 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2432 state = g_mirror_determine_state(disk); 2433 g_mirror_event_send(disk, state, 2434 G_MIRROR_EVENT_DONTWAIT); 2435 if (state == G_MIRROR_DISK_STATE_STALE) 2436 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2437 } 2438 break; 2439 } 2440 case G_MIRROR_DEVICE_STATE_RUNNING: 2441 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 && 2442 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2443 /* 2444 * No active disks or no disks at all, 2445 * so destroy device. 2446 */ 2447 if (sc->sc_provider != NULL) 2448 g_mirror_destroy_provider(sc); 2449 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2450 break; 2451 } else if (g_mirror_ndisks(sc, 2452 G_MIRROR_DISK_STATE_ACTIVE) > 0 && 2453 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2454 /* 2455 * We have active disks, launch provider if it doesn't 2456 * exist. 2457 */ 2458 if (sc->sc_provider == NULL) 2459 g_mirror_launch_provider(sc); 2460 if (sc->sc_rootmount != NULL) { 2461 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2462 __LINE__, sc->sc_rootmount); 2463 root_mount_rel(sc->sc_rootmount); 2464 sc->sc_rootmount = NULL; 2465 } 2466 } 2467 /* 2468 * Genid should be bumped immediately, so do it here. 2469 */ 2470 if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) { 2471 sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID; 2472 g_mirror_bump_genid(sc); 2473 } 2474 break; 2475 default: 2476 KASSERT(1 == 0, ("Wrong device state (%s, %s).", 2477 sc->sc_name, g_mirror_device_state2str(sc->sc_state))); 2478 break; 2479 } 2480 } 2481 2482 /* 2483 * Update disk state and device state if needed. 2484 */ 2485 #define DISK_STATE_CHANGED() G_MIRROR_DEBUG(1, \ 2486 "Disk %s state changed from %s to %s (device %s).", \ 2487 g_mirror_get_diskname(disk), \ 2488 g_mirror_disk_state2str(disk->d_state), \ 2489 g_mirror_disk_state2str(state), sc->sc_name) 2490 static int 2491 g_mirror_update_disk(struct g_mirror_disk *disk, u_int state) 2492 { 2493 struct g_mirror_softc *sc; 2494 2495 sc = disk->d_softc; 2496 sx_assert(&sc->sc_lock, SX_XLOCKED); 2497 2498 again: 2499 G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.", 2500 g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), 2501 g_mirror_disk_state2str(state)); 2502 switch (state) { 2503 case G_MIRROR_DISK_STATE_NEW: 2504 /* 2505 * Possible scenarios: 2506 * 1. New disk arrive. 2507 */ 2508 /* Previous state should be NONE. */ 2509 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE, 2510 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2511 g_mirror_disk_state2str(disk->d_state))); 2512 DISK_STATE_CHANGED(); 2513 2514 disk->d_state = state; 2515 if (LIST_EMPTY(&sc->sc_disks)) 2516 LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next); 2517 else { 2518 struct g_mirror_disk *dp; 2519 2520 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 2521 if (disk->d_priority >= dp->d_priority) { 2522 LIST_INSERT_BEFORE(dp, disk, d_next); 2523 dp = NULL; 2524 break; 2525 } 2526 if (LIST_NEXT(dp, d_next) == NULL) 2527 break; 2528 } 2529 if (dp != NULL) 2530 LIST_INSERT_AFTER(dp, disk, d_next); 2531 } 2532 G_MIRROR_DEBUG(1, "Device %s: provider %s detected.", 2533 sc->sc_name, g_mirror_get_diskname(disk)); 2534 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 2535 break; 2536 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2537 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2538 g_mirror_device_state2str(sc->sc_state), 2539 g_mirror_get_diskname(disk), 2540 g_mirror_disk_state2str(disk->d_state))); 2541 state = g_mirror_determine_state(disk); 2542 if (state != G_MIRROR_DISK_STATE_NONE) 2543 goto again; 2544 break; 2545 case G_MIRROR_DISK_STATE_ACTIVE: 2546 /* 2547 * Possible scenarios: 2548 * 1. New disk does not need synchronization. 2549 * 2. Synchronization process finished successfully. 2550 */ 2551 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2552 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2553 g_mirror_device_state2str(sc->sc_state), 2554 g_mirror_get_diskname(disk), 2555 g_mirror_disk_state2str(disk->d_state))); 2556 /* Previous state should be NEW or SYNCHRONIZING. */ 2557 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW || 2558 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2559 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2560 g_mirror_disk_state2str(disk->d_state))); 2561 DISK_STATE_CHANGED(); 2562 2563 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2564 disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2565 disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC; 2566 g_mirror_sync_stop(disk, 0); 2567 } 2568 disk->d_state = state; 2569 disk->d_sync.ds_offset = 0; 2570 disk->d_sync.ds_offset_done = 0; 2571 g_mirror_update_idle(sc, disk); 2572 g_mirror_update_metadata(disk); 2573 G_MIRROR_DEBUG(1, "Device %s: provider %s activated.", 2574 sc->sc_name, g_mirror_get_diskname(disk)); 2575 break; 2576 case G_MIRROR_DISK_STATE_STALE: 2577 /* 2578 * Possible scenarios: 2579 * 1. Stale disk was connected. 2580 */ 2581 /* Previous state should be NEW. */ 2582 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2583 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2584 g_mirror_disk_state2str(disk->d_state))); 2585 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2586 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2587 g_mirror_device_state2str(sc->sc_state), 2588 g_mirror_get_diskname(disk), 2589 g_mirror_disk_state2str(disk->d_state))); 2590 /* 2591 * STALE state is only possible if device is marked 2592 * NOAUTOSYNC. 2593 */ 2594 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0, 2595 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2596 g_mirror_device_state2str(sc->sc_state), 2597 g_mirror_get_diskname(disk), 2598 g_mirror_disk_state2str(disk->d_state))); 2599 DISK_STATE_CHANGED(); 2600 2601 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2602 disk->d_state = state; 2603 g_mirror_update_metadata(disk); 2604 G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.", 2605 sc->sc_name, g_mirror_get_diskname(disk)); 2606 break; 2607 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 2608 /* 2609 * Possible scenarios: 2610 * 1. Disk which needs synchronization was connected. 2611 */ 2612 /* Previous state should be NEW. */ 2613 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2614 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2615 g_mirror_disk_state2str(disk->d_state))); 2616 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2617 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2618 g_mirror_device_state2str(sc->sc_state), 2619 g_mirror_get_diskname(disk), 2620 g_mirror_disk_state2str(disk->d_state))); 2621 DISK_STATE_CHANGED(); 2622 2623 if (disk->d_state == G_MIRROR_DISK_STATE_NEW) 2624 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2625 disk->d_state = state; 2626 if (sc->sc_provider != NULL) { 2627 g_mirror_sync_start(disk); 2628 g_mirror_update_metadata(disk); 2629 } 2630 break; 2631 case G_MIRROR_DISK_STATE_DISCONNECTED: 2632 /* 2633 * Possible scenarios: 2634 * 1. Device wasn't running yet, but disk disappear. 2635 * 2. Disk was active and disapppear. 2636 * 3. Disk disappear during synchronization process. 2637 */ 2638 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) { 2639 /* 2640 * Previous state should be ACTIVE, STALE or 2641 * SYNCHRONIZING. 2642 */ 2643 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 2644 disk->d_state == G_MIRROR_DISK_STATE_STALE || 2645 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2646 ("Wrong disk state (%s, %s).", 2647 g_mirror_get_diskname(disk), 2648 g_mirror_disk_state2str(disk->d_state))); 2649 } else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) { 2650 /* Previous state should be NEW. */ 2651 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2652 ("Wrong disk state (%s, %s).", 2653 g_mirror_get_diskname(disk), 2654 g_mirror_disk_state2str(disk->d_state))); 2655 /* 2656 * Reset bumping syncid if disk disappeared in STARTING 2657 * state. 2658 */ 2659 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) 2660 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 2661 #ifdef INVARIANTS 2662 } else { 2663 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 2664 sc->sc_name, 2665 g_mirror_device_state2str(sc->sc_state), 2666 g_mirror_get_diskname(disk), 2667 g_mirror_disk_state2str(disk->d_state))); 2668 #endif 2669 } 2670 DISK_STATE_CHANGED(); 2671 G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.", 2672 sc->sc_name, g_mirror_get_diskname(disk)); 2673 2674 g_mirror_destroy_disk(disk); 2675 break; 2676 case G_MIRROR_DISK_STATE_DESTROY: 2677 { 2678 int error; 2679 2680 error = g_mirror_clear_metadata(disk); 2681 if (error != 0) { 2682 G_MIRROR_DEBUG(0, 2683 "Device %s: failed to clear metadata on %s: %d.", 2684 sc->sc_name, g_mirror_get_diskname(disk), error); 2685 break; 2686 } 2687 DISK_STATE_CHANGED(); 2688 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", 2689 sc->sc_name, g_mirror_get_diskname(disk)); 2690 2691 g_mirror_destroy_disk(disk); 2692 sc->sc_ndisks--; 2693 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2694 g_mirror_update_metadata(disk); 2695 } 2696 break; 2697 } 2698 default: 2699 KASSERT(1 == 0, ("Unknown state (%u).", state)); 2700 break; 2701 } 2702 return (0); 2703 } 2704 #undef DISK_STATE_CHANGED 2705 2706 int 2707 g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md) 2708 { 2709 struct g_provider *pp; 2710 u_char *buf; 2711 int error; 2712 2713 g_topology_assert(); 2714 2715 error = g_access(cp, 1, 0, 0); 2716 if (error != 0) 2717 return (error); 2718 pp = cp->provider; 2719 g_topology_unlock(); 2720 /* Metadata are stored on last sector. */ 2721 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 2722 &error); 2723 g_topology_lock(); 2724 g_access(cp, -1, 0, 0); 2725 if (buf == NULL) { 2726 G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2727 cp->provider->name, error); 2728 return (error); 2729 } 2730 2731 /* Decode metadata. */ 2732 error = mirror_metadata_decode(buf, md); 2733 g_free(buf); 2734 if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0) 2735 return (EINVAL); 2736 if (md->md_version > G_MIRROR_VERSION) { 2737 G_MIRROR_DEBUG(0, 2738 "Kernel module is too old to handle metadata from %s.", 2739 cp->provider->name); 2740 return (EINVAL); 2741 } 2742 if (error != 0) { 2743 G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 2744 cp->provider->name); 2745 return (error); 2746 } 2747 2748 return (0); 2749 } 2750 2751 static int 2752 g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp, 2753 struct g_mirror_metadata *md) 2754 { 2755 2756 if (g_mirror_id2disk(sc, md->md_did) != NULL) { 2757 G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.", 2758 pp->name, md->md_did); 2759 return (EEXIST); 2760 } 2761 if (md->md_all != sc->sc_ndisks) { 2762 G_MIRROR_DEBUG(1, 2763 "Invalid '%s' field on disk %s (device %s), skipping.", 2764 "md_all", pp->name, sc->sc_name); 2765 return (EINVAL); 2766 } 2767 if (md->md_slice != sc->sc_slice) { 2768 G_MIRROR_DEBUG(1, 2769 "Invalid '%s' field on disk %s (device %s), skipping.", 2770 "md_slice", pp->name, sc->sc_name); 2771 return (EINVAL); 2772 } 2773 if (md->md_balance != sc->sc_balance) { 2774 G_MIRROR_DEBUG(1, 2775 "Invalid '%s' field on disk %s (device %s), skipping.", 2776 "md_balance", pp->name, sc->sc_name); 2777 return (EINVAL); 2778 } 2779 #if 0 2780 if (md->md_mediasize != sc->sc_mediasize) { 2781 G_MIRROR_DEBUG(1, 2782 "Invalid '%s' field on disk %s (device %s), skipping.", 2783 "md_mediasize", pp->name, sc->sc_name); 2784 return (EINVAL); 2785 } 2786 #endif 2787 if (sc->sc_mediasize > pp->mediasize) { 2788 G_MIRROR_DEBUG(1, 2789 "Invalid size of disk %s (device %s), skipping.", pp->name, 2790 sc->sc_name); 2791 return (EINVAL); 2792 } 2793 if (md->md_sectorsize != sc->sc_sectorsize) { 2794 G_MIRROR_DEBUG(1, 2795 "Invalid '%s' field on disk %s (device %s), skipping.", 2796 "md_sectorsize", pp->name, sc->sc_name); 2797 return (EINVAL); 2798 } 2799 if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 2800 G_MIRROR_DEBUG(1, 2801 "Invalid sector size of disk %s (device %s), skipping.", 2802 pp->name, sc->sc_name); 2803 return (EINVAL); 2804 } 2805 if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) { 2806 G_MIRROR_DEBUG(1, 2807 "Invalid device flags on disk %s (device %s), skipping.", 2808 pp->name, sc->sc_name); 2809 return (EINVAL); 2810 } 2811 if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) { 2812 G_MIRROR_DEBUG(1, 2813 "Invalid disk flags on disk %s (device %s), skipping.", 2814 pp->name, sc->sc_name); 2815 return (EINVAL); 2816 } 2817 return (0); 2818 } 2819 2820 int 2821 g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 2822 struct g_mirror_metadata *md) 2823 { 2824 struct g_mirror_disk *disk; 2825 int error; 2826 2827 g_topology_assert_not(); 2828 G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name); 2829 2830 error = g_mirror_check_metadata(sc, pp, md); 2831 if (error != 0) 2832 return (error); 2833 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING && 2834 md->md_genid < sc->sc_genid) { 2835 G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.", 2836 pp->name, sc->sc_name); 2837 return (EINVAL); 2838 } 2839 disk = g_mirror_init_disk(sc, pp, md, &error); 2840 if (disk == NULL) 2841 return (error); 2842 error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW, 2843 G_MIRROR_EVENT_WAIT); 2844 if (error != 0) 2845 return (error); 2846 if (md->md_version < G_MIRROR_VERSION) { 2847 G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 2848 pp->name, md->md_version, G_MIRROR_VERSION); 2849 g_mirror_update_metadata(disk); 2850 } 2851 return (0); 2852 } 2853 2854 static void 2855 g_mirror_destroy_delayed(void *arg, int flag) 2856 { 2857 struct g_mirror_softc *sc; 2858 int error; 2859 2860 if (flag == EV_CANCEL) { 2861 G_MIRROR_DEBUG(1, "Destroying canceled."); 2862 return; 2863 } 2864 sc = arg; 2865 g_topology_unlock(); 2866 sx_xlock(&sc->sc_lock); 2867 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0, 2868 ("DESTROY flag set on %s.", sc->sc_name)); 2869 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0, 2870 ("DESTROYING flag not set on %s.", sc->sc_name)); 2871 G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name); 2872 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT); 2873 if (error != 0) { 2874 G_MIRROR_DEBUG(0, "Cannot destroy %s (error=%d).", 2875 sc->sc_name, error); 2876 sx_xunlock(&sc->sc_lock); 2877 } 2878 g_topology_lock(); 2879 } 2880 2881 static int 2882 g_mirror_access(struct g_provider *pp, int acr, int acw, int ace) 2883 { 2884 struct g_mirror_softc *sc; 2885 int dcr, dcw, dce, error = 0; 2886 2887 g_topology_assert(); 2888 G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 2889 acw, ace); 2890 2891 sc = pp->geom->softc; 2892 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0) 2893 return (0); 2894 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 2895 2896 dcr = pp->acr + acr; 2897 dcw = pp->acw + acw; 2898 dce = pp->ace + ace; 2899 2900 g_topology_unlock(); 2901 sx_xlock(&sc->sc_lock); 2902 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 || 2903 LIST_EMPTY(&sc->sc_disks)) { 2904 if (acr > 0 || acw > 0 || ace > 0) 2905 error = ENXIO; 2906 goto end; 2907 } 2908 if (dcw == 0) 2909 g_mirror_idle(sc, dcw); 2910 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0) { 2911 if (acr > 0 || acw > 0 || ace > 0) { 2912 error = ENXIO; 2913 goto end; 2914 } 2915 if (dcr == 0 && dcw == 0 && dce == 0) { 2916 g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, 2917 sc, NULL); 2918 } 2919 } 2920 end: 2921 sx_xunlock(&sc->sc_lock); 2922 g_topology_lock(); 2923 return (error); 2924 } 2925 2926 static struct g_geom * 2927 g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md) 2928 { 2929 struct g_mirror_softc *sc; 2930 struct g_geom *gp; 2931 int error, timeout; 2932 2933 g_topology_assert(); 2934 G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name, 2935 md->md_mid); 2936 2937 /* One disk is minimum. */ 2938 if (md->md_all < 1) 2939 return (NULL); 2940 /* 2941 * Action geom. 2942 */ 2943 gp = g_new_geomf(mp, "%s", md->md_name); 2944 sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO); 2945 gp->start = g_mirror_start; 2946 gp->orphan = g_mirror_orphan; 2947 gp->access = g_mirror_access; 2948 gp->dumpconf = g_mirror_dumpconf; 2949 2950 sc->sc_id = md->md_mid; 2951 sc->sc_slice = md->md_slice; 2952 sc->sc_balance = md->md_balance; 2953 sc->sc_mediasize = md->md_mediasize; 2954 sc->sc_sectorsize = md->md_sectorsize; 2955 sc->sc_ndisks = md->md_all; 2956 sc->sc_flags = md->md_mflags; 2957 sc->sc_bump_id = 0; 2958 sc->sc_idle = 1; 2959 sc->sc_last_write = time_uptime; 2960 sc->sc_writes = 0; 2961 sx_init(&sc->sc_lock, "gmirror:lock"); 2962 bioq_init(&sc->sc_queue); 2963 mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF); 2964 bioq_init(&sc->sc_regular_delayed); 2965 bioq_init(&sc->sc_inflight); 2966 bioq_init(&sc->sc_sync_delayed); 2967 LIST_INIT(&sc->sc_disks); 2968 TAILQ_INIT(&sc->sc_events); 2969 mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF); 2970 callout_init(&sc->sc_callout, 1); 2971 mtx_init(&sc->sc_done_mtx, "gmirror:done", NULL, MTX_DEF); 2972 sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING; 2973 gp->softc = sc; 2974 sc->sc_geom = gp; 2975 sc->sc_provider = NULL; 2976 /* 2977 * Synchronization geom. 2978 */ 2979 gp = g_new_geomf(mp, "%s.sync", md->md_name); 2980 gp->softc = sc; 2981 gp->orphan = g_mirror_orphan; 2982 sc->sc_sync.ds_geom = gp; 2983 sc->sc_sync.ds_ndisks = 0; 2984 error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0, 2985 "g_mirror %s", md->md_name); 2986 if (error != 0) { 2987 G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.", 2988 sc->sc_name); 2989 g_destroy_geom(sc->sc_sync.ds_geom); 2990 mtx_destroy(&sc->sc_done_mtx); 2991 mtx_destroy(&sc->sc_events_mtx); 2992 mtx_destroy(&sc->sc_queue_mtx); 2993 sx_destroy(&sc->sc_lock); 2994 g_destroy_geom(sc->sc_geom); 2995 free(sc, M_MIRROR); 2996 return (NULL); 2997 } 2998 2999 G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).", 3000 sc->sc_name, sc->sc_ndisks, sc->sc_id); 3001 3002 sc->sc_rootmount = root_mount_hold("GMIRROR"); 3003 G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 3004 /* 3005 * Run timeout. 3006 */ 3007 timeout = g_mirror_timeout * hz; 3008 callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc); 3009 return (sc->sc_geom); 3010 } 3011 3012 int 3013 g_mirror_destroy(struct g_mirror_softc *sc, int how) 3014 { 3015 struct g_mirror_disk *disk; 3016 struct g_provider *pp; 3017 3018 g_topology_assert_not(); 3019 if (sc == NULL) 3020 return (ENXIO); 3021 sx_assert(&sc->sc_lock, SX_XLOCKED); 3022 3023 pp = sc->sc_provider; 3024 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0 || 3025 SCHEDULER_STOPPED())) { 3026 switch (how) { 3027 case G_MIRROR_DESTROY_SOFT: 3028 G_MIRROR_DEBUG(1, 3029 "Device %s is still open (r%dw%de%d).", pp->name, 3030 pp->acr, pp->acw, pp->ace); 3031 return (EBUSY); 3032 case G_MIRROR_DESTROY_DELAYED: 3033 G_MIRROR_DEBUG(1, 3034 "Device %s will be destroyed on last close.", 3035 pp->name); 3036 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 3037 if (disk->d_state == 3038 G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3039 g_mirror_sync_stop(disk, 1); 3040 } 3041 } 3042 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROYING; 3043 return (EBUSY); 3044 case G_MIRROR_DESTROY_HARD: 3045 G_MIRROR_DEBUG(1, "Device %s is still open, so it " 3046 "can't be definitely removed.", pp->name); 3047 } 3048 } 3049 3050 g_topology_lock(); 3051 if (sc->sc_geom->softc == NULL) { 3052 g_topology_unlock(); 3053 return (0); 3054 } 3055 sc->sc_geom->softc = NULL; 3056 sc->sc_sync.ds_geom->softc = NULL; 3057 g_topology_unlock(); 3058 3059 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 3060 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT; 3061 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 3062 sx_xunlock(&sc->sc_lock); 3063 mtx_lock(&sc->sc_queue_mtx); 3064 wakeup(sc); 3065 mtx_unlock(&sc->sc_queue_mtx); 3066 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 3067 while (sc->sc_worker != NULL) 3068 tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5); 3069 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 3070 sx_xlock(&sc->sc_lock); 3071 g_mirror_destroy_device(sc); 3072 free(sc, M_MIRROR); 3073 return (0); 3074 } 3075 3076 static void 3077 g_mirror_taste_orphan(struct g_consumer *cp) 3078 { 3079 3080 KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 3081 cp->provider->name)); 3082 } 3083 3084 static struct g_geom * 3085 g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 3086 { 3087 struct g_mirror_metadata md; 3088 struct g_mirror_softc *sc; 3089 struct g_consumer *cp; 3090 struct g_geom *gp; 3091 int error; 3092 3093 g_topology_assert(); 3094 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 3095 G_MIRROR_DEBUG(2, "Tasting %s.", pp->name); 3096 3097 gp = g_new_geomf(mp, "mirror:taste"); 3098 /* 3099 * This orphan function should be never called. 3100 */ 3101 gp->orphan = g_mirror_taste_orphan; 3102 cp = g_new_consumer(gp); 3103 g_attach(cp, pp); 3104 error = g_mirror_read_metadata(cp, &md); 3105 g_detach(cp); 3106 g_destroy_consumer(cp); 3107 g_destroy_geom(gp); 3108 if (error != 0) 3109 return (NULL); 3110 gp = NULL; 3111 3112 if (md.md_provider[0] != '\0' && 3113 !g_compare_names(md.md_provider, pp->name)) 3114 return (NULL); 3115 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3116 return (NULL); 3117 if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) { 3118 G_MIRROR_DEBUG(0, 3119 "Device %s: provider %s marked as inactive, skipping.", 3120 md.md_name, pp->name); 3121 return (NULL); 3122 } 3123 if (g_mirror_debug >= 2) 3124 mirror_metadata_dump(&md); 3125 3126 /* 3127 * Let's check if device already exists. 3128 */ 3129 sc = NULL; 3130 LIST_FOREACH(gp, &mp->geom, geom) { 3131 sc = gp->softc; 3132 if (sc == NULL) 3133 continue; 3134 if (sc->sc_sync.ds_geom == gp) 3135 continue; 3136 if (strcmp(md.md_name, sc->sc_name) != 0) 3137 continue; 3138 if (md.md_mid != sc->sc_id) { 3139 G_MIRROR_DEBUG(0, "Device %s already configured.", 3140 sc->sc_name); 3141 return (NULL); 3142 } 3143 break; 3144 } 3145 if (gp == NULL) { 3146 gp = g_mirror_create(mp, &md); 3147 if (gp == NULL) { 3148 G_MIRROR_DEBUG(0, "Cannot create device %s.", 3149 md.md_name); 3150 return (NULL); 3151 } 3152 sc = gp->softc; 3153 } 3154 G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 3155 g_topology_unlock(); 3156 sx_xlock(&sc->sc_lock); 3157 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_TASTING; 3158 error = g_mirror_add_disk(sc, pp, &md); 3159 if (error != 0) { 3160 G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 3161 pp->name, gp->name, error); 3162 if (LIST_EMPTY(&sc->sc_disks)) { 3163 g_cancel_event(sc); 3164 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3165 g_topology_lock(); 3166 return (NULL); 3167 } 3168 gp = NULL; 3169 } 3170 sc->sc_flags &= ~G_MIRROR_DEVICE_FLAG_TASTING; 3171 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 3172 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3173 g_topology_lock(); 3174 return (NULL); 3175 } 3176 sx_xunlock(&sc->sc_lock); 3177 g_topology_lock(); 3178 return (gp); 3179 } 3180 3181 static void 3182 g_mirror_resize(struct g_consumer *cp) 3183 { 3184 struct g_mirror_disk *disk; 3185 3186 g_topology_assert(); 3187 g_trace(G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name); 3188 3189 disk = cp->private; 3190 if (disk == NULL) 3191 return; 3192 g_topology_unlock(); 3193 g_mirror_update_metadata(disk); 3194 g_topology_lock(); 3195 } 3196 3197 static int 3198 g_mirror_destroy_geom(struct gctl_req *req __unused, 3199 struct g_class *mp __unused, struct g_geom *gp) 3200 { 3201 struct g_mirror_softc *sc; 3202 int error; 3203 3204 g_topology_unlock(); 3205 sc = gp->softc; 3206 sx_xlock(&sc->sc_lock); 3207 g_cancel_event(sc); 3208 error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT); 3209 if (error != 0) 3210 sx_xunlock(&sc->sc_lock); 3211 g_topology_lock(); 3212 return (error); 3213 } 3214 3215 static void 3216 g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 3217 struct g_consumer *cp, struct g_provider *pp) 3218 { 3219 struct g_mirror_softc *sc; 3220 3221 g_topology_assert(); 3222 3223 sc = gp->softc; 3224 if (sc == NULL) 3225 return; 3226 /* Skip synchronization geom. */ 3227 if (gp == sc->sc_sync.ds_geom) 3228 return; 3229 if (pp != NULL) { 3230 /* Nothing here. */ 3231 } else if (cp != NULL) { 3232 struct g_mirror_disk *disk; 3233 3234 disk = cp->private; 3235 if (disk == NULL) 3236 return; 3237 g_topology_unlock(); 3238 sx_xlock(&sc->sc_lock); 3239 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id); 3240 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3241 sbuf_printf(sb, "%s<Synchronized>", indent); 3242 if (disk->d_sync.ds_offset == 0) 3243 sbuf_printf(sb, "0%%"); 3244 else { 3245 sbuf_printf(sb, "%u%%", 3246 (u_int)((disk->d_sync.ds_offset * 100) / 3247 sc->sc_provider->mediasize)); 3248 } 3249 sbuf_printf(sb, "</Synchronized>\n"); 3250 if (disk->d_sync.ds_offset > 0) { 3251 sbuf_printf(sb, "%s<BytesSynced>%jd" 3252 "</BytesSynced>\n", indent, 3253 (intmax_t)disk->d_sync.ds_offset); 3254 } 3255 } 3256 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 3257 disk->d_sync.ds_syncid); 3258 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, 3259 disk->d_genid); 3260 sbuf_printf(sb, "%s<Flags>", indent); 3261 if (disk->d_flags == 0) 3262 sbuf_printf(sb, "NONE"); 3263 else { 3264 int first = 1; 3265 3266 #define ADD_FLAG(flag, name) do { \ 3267 if ((disk->d_flags & (flag)) != 0) { \ 3268 if (!first) \ 3269 sbuf_printf(sb, ", "); \ 3270 else \ 3271 first = 0; \ 3272 sbuf_printf(sb, name); \ 3273 } \ 3274 } while (0) 3275 ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY"); 3276 ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED"); 3277 ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE"); 3278 ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING, 3279 "SYNCHRONIZING"); 3280 ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 3281 ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN"); 3282 #undef ADD_FLAG 3283 } 3284 sbuf_printf(sb, "</Flags>\n"); 3285 sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent, 3286 disk->d_priority); 3287 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3288 g_mirror_disk_state2str(disk->d_state)); 3289 sx_xunlock(&sc->sc_lock); 3290 g_topology_lock(); 3291 } else { 3292 g_topology_unlock(); 3293 sx_xlock(&sc->sc_lock); 3294 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 3295 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3296 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 3297 sbuf_printf(sb, "%s<Flags>", indent); 3298 if (sc->sc_flags == 0) 3299 sbuf_printf(sb, "NONE"); 3300 else { 3301 int first = 1; 3302 3303 #define ADD_FLAG(flag, name) do { \ 3304 if ((sc->sc_flags & (flag)) != 0) { \ 3305 if (!first) \ 3306 sbuf_printf(sb, ", "); \ 3307 else \ 3308 first = 0; \ 3309 sbuf_printf(sb, name); \ 3310 } \ 3311 } while (0) 3312 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 3313 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3314 #undef ADD_FLAG 3315 } 3316 sbuf_printf(sb, "</Flags>\n"); 3317 sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent, 3318 (u_int)sc->sc_slice); 3319 sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent, 3320 balance_name(sc->sc_balance)); 3321 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 3322 sc->sc_ndisks); 3323 sbuf_printf(sb, "%s<State>", indent); 3324 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 3325 sbuf_printf(sb, "%s", "STARTING"); 3326 else if (sc->sc_ndisks == 3327 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE)) 3328 sbuf_printf(sb, "%s", "COMPLETE"); 3329 else 3330 sbuf_printf(sb, "%s", "DEGRADED"); 3331 sbuf_printf(sb, "</State>\n"); 3332 sx_xunlock(&sc->sc_lock); 3333 g_topology_lock(); 3334 } 3335 } 3336 3337 static void 3338 g_mirror_shutdown_post_sync(void *arg, int howto) 3339 { 3340 struct g_class *mp; 3341 struct g_geom *gp, *gp2; 3342 struct g_mirror_softc *sc; 3343 int error; 3344 3345 mp = arg; 3346 g_topology_lock(); 3347 g_mirror_shutdown = 1; 3348 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 3349 if ((sc = gp->softc) == NULL) 3350 continue; 3351 /* Skip synchronization geom. */ 3352 if (gp == sc->sc_sync.ds_geom) 3353 continue; 3354 g_topology_unlock(); 3355 sx_xlock(&sc->sc_lock); 3356 g_mirror_idle(sc, -1); 3357 g_cancel_event(sc); 3358 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED); 3359 if (error != 0) 3360 sx_xunlock(&sc->sc_lock); 3361 g_topology_lock(); 3362 } 3363 g_topology_unlock(); 3364 } 3365 3366 static void 3367 g_mirror_init(struct g_class *mp) 3368 { 3369 3370 g_mirror_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync, 3371 g_mirror_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST); 3372 if (g_mirror_post_sync == NULL) 3373 G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event."); 3374 } 3375 3376 static void 3377 g_mirror_fini(struct g_class *mp) 3378 { 3379 3380 if (g_mirror_post_sync != NULL) 3381 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_mirror_post_sync); 3382 } 3383 3384 DECLARE_GEOM_CLASS(g_mirror_class, g_mirror); 3385