1 /*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/limits.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/bio.h> 38 #include <sys/sbuf.h> 39 #include <sys/sysctl.h> 40 #include <sys/malloc.h> 41 #include <sys/eventhandler.h> 42 #include <vm/uma.h> 43 #include <geom/geom.h> 44 #include <sys/proc.h> 45 #include <sys/kthread.h> 46 #include <sys/sched.h> 47 #include <geom/mirror/g_mirror.h> 48 49 FEATURE(geom_mirror, "GEOM mirroring support"); 50 51 static MALLOC_DEFINE(M_MIRROR, "mirror_data", "GEOM_MIRROR Data"); 52 53 SYSCTL_DECL(_kern_geom); 54 static SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, 55 "GEOM_MIRROR stuff"); 56 u_int g_mirror_debug = 0; 57 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RWTUN, &g_mirror_debug, 0, 58 "Debug level"); 59 static u_int g_mirror_timeout = 4; 60 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_mirror_timeout, 61 0, "Time to wait on all mirror components"); 62 static u_int g_mirror_idletime = 5; 63 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, idletime, CTLFLAG_RWTUN, 64 &g_mirror_idletime, 0, "Mark components as clean when idling"); 65 static u_int g_mirror_disconnect_on_failure = 1; 66 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN, 67 &g_mirror_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 68 static u_int g_mirror_syncreqs = 2; 69 SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 70 &g_mirror_syncreqs, 0, "Parallel synchronization I/O requests."); 71 72 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 73 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 74 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 75 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 76 } while (0) 77 78 static eventhandler_tag g_mirror_post_sync = NULL; 79 static int g_mirror_shutdown = 0; 80 81 static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp, 82 struct g_geom *gp); 83 static g_taste_t g_mirror_taste; 84 static g_resize_t g_mirror_resize; 85 static void g_mirror_init(struct g_class *mp); 86 static void g_mirror_fini(struct g_class *mp); 87 88 struct g_class g_mirror_class = { 89 .name = G_MIRROR_CLASS_NAME, 90 .version = G_VERSION, 91 .ctlreq = g_mirror_config, 92 .taste = g_mirror_taste, 93 .destroy_geom = g_mirror_destroy_geom, 94 .init = g_mirror_init, 95 .fini = g_mirror_fini, 96 .resize = g_mirror_resize 97 }; 98 99 100 static void g_mirror_destroy_provider(struct g_mirror_softc *sc); 101 static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state); 102 static void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force); 103 static void g_mirror_dumpconf(struct sbuf *sb, const char *indent, 104 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 105 static void g_mirror_sync_stop(struct g_mirror_disk *disk, int type); 106 static void g_mirror_register_request(struct bio *bp); 107 static void g_mirror_sync_release(struct g_mirror_softc *sc); 108 109 110 static const char * 111 g_mirror_disk_state2str(int state) 112 { 113 114 switch (state) { 115 case G_MIRROR_DISK_STATE_NONE: 116 return ("NONE"); 117 case G_MIRROR_DISK_STATE_NEW: 118 return ("NEW"); 119 case G_MIRROR_DISK_STATE_ACTIVE: 120 return ("ACTIVE"); 121 case G_MIRROR_DISK_STATE_STALE: 122 return ("STALE"); 123 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 124 return ("SYNCHRONIZING"); 125 case G_MIRROR_DISK_STATE_DISCONNECTED: 126 return ("DISCONNECTED"); 127 case G_MIRROR_DISK_STATE_DESTROY: 128 return ("DESTROY"); 129 default: 130 return ("INVALID"); 131 } 132 } 133 134 static const char * 135 g_mirror_device_state2str(int state) 136 { 137 138 switch (state) { 139 case G_MIRROR_DEVICE_STATE_STARTING: 140 return ("STARTING"); 141 case G_MIRROR_DEVICE_STATE_RUNNING: 142 return ("RUNNING"); 143 default: 144 return ("INVALID"); 145 } 146 } 147 148 static const char * 149 g_mirror_get_diskname(struct g_mirror_disk *disk) 150 { 151 152 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 153 return ("[unknown]"); 154 return (disk->d_name); 155 } 156 157 /* 158 * --- Events handling functions --- 159 * Events in geom_mirror are used to maintain disks and device status 160 * from one thread to simplify locking. 161 */ 162 static void 163 g_mirror_event_free(struct g_mirror_event *ep) 164 { 165 166 free(ep, M_MIRROR); 167 } 168 169 int 170 g_mirror_event_send(void *arg, int state, int flags) 171 { 172 struct g_mirror_softc *sc; 173 struct g_mirror_disk *disk; 174 struct g_mirror_event *ep; 175 int error; 176 177 ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK); 178 G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep); 179 if ((flags & G_MIRROR_EVENT_DEVICE) != 0) { 180 disk = NULL; 181 sc = arg; 182 } else { 183 disk = arg; 184 sc = disk->d_softc; 185 } 186 ep->e_disk = disk; 187 ep->e_state = state; 188 ep->e_flags = flags; 189 ep->e_error = 0; 190 mtx_lock(&sc->sc_events_mtx); 191 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 192 mtx_unlock(&sc->sc_events_mtx); 193 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 194 mtx_lock(&sc->sc_queue_mtx); 195 wakeup(sc); 196 mtx_unlock(&sc->sc_queue_mtx); 197 if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0) 198 return (0); 199 sx_assert(&sc->sc_lock, SX_XLOCKED); 200 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 201 sx_xunlock(&sc->sc_lock); 202 while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) { 203 mtx_lock(&sc->sc_events_mtx); 204 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "m:event", 205 hz * 5); 206 } 207 error = ep->e_error; 208 g_mirror_event_free(ep); 209 sx_xlock(&sc->sc_lock); 210 return (error); 211 } 212 213 static struct g_mirror_event * 214 g_mirror_event_get(struct g_mirror_softc *sc) 215 { 216 struct g_mirror_event *ep; 217 218 mtx_lock(&sc->sc_events_mtx); 219 ep = TAILQ_FIRST(&sc->sc_events); 220 mtx_unlock(&sc->sc_events_mtx); 221 return (ep); 222 } 223 224 static void 225 g_mirror_event_remove(struct g_mirror_softc *sc, struct g_mirror_event *ep) 226 { 227 228 mtx_lock(&sc->sc_events_mtx); 229 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 230 mtx_unlock(&sc->sc_events_mtx); 231 } 232 233 static void 234 g_mirror_event_cancel(struct g_mirror_disk *disk) 235 { 236 struct g_mirror_softc *sc; 237 struct g_mirror_event *ep, *tmpep; 238 239 sc = disk->d_softc; 240 sx_assert(&sc->sc_lock, SX_XLOCKED); 241 242 mtx_lock(&sc->sc_events_mtx); 243 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 244 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) 245 continue; 246 if (ep->e_disk != disk) 247 continue; 248 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 249 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 250 g_mirror_event_free(ep); 251 else { 252 ep->e_error = ECANCELED; 253 wakeup(ep); 254 } 255 } 256 mtx_unlock(&sc->sc_events_mtx); 257 } 258 259 /* 260 * Return the number of disks in given state. 261 * If state is equal to -1, count all connected disks. 262 */ 263 u_int 264 g_mirror_ndisks(struct g_mirror_softc *sc, int state) 265 { 266 struct g_mirror_disk *disk; 267 u_int n = 0; 268 269 sx_assert(&sc->sc_lock, SX_LOCKED); 270 271 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 272 if (state == -1 || disk->d_state == state) 273 n++; 274 } 275 return (n); 276 } 277 278 /* 279 * Find a disk in mirror by its disk ID. 280 */ 281 static struct g_mirror_disk * 282 g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id) 283 { 284 struct g_mirror_disk *disk; 285 286 sx_assert(&sc->sc_lock, SX_XLOCKED); 287 288 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 289 if (disk->d_id == id) 290 return (disk); 291 } 292 return (NULL); 293 } 294 295 static u_int 296 g_mirror_nrequests(struct g_mirror_softc *sc, struct g_consumer *cp) 297 { 298 struct bio *bp; 299 u_int nreqs = 0; 300 301 mtx_lock(&sc->sc_queue_mtx); 302 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 303 if (bp->bio_from == cp) 304 nreqs++; 305 } 306 mtx_unlock(&sc->sc_queue_mtx); 307 return (nreqs); 308 } 309 310 static int 311 g_mirror_is_busy(struct g_mirror_softc *sc, struct g_consumer *cp) 312 { 313 314 if (cp->index > 0) { 315 G_MIRROR_DEBUG(2, 316 "I/O requests for %s exist, can't destroy it now.", 317 cp->provider->name); 318 return (1); 319 } 320 if (g_mirror_nrequests(sc, cp) > 0) { 321 G_MIRROR_DEBUG(2, 322 "I/O requests for %s in queue, can't destroy it now.", 323 cp->provider->name); 324 return (1); 325 } 326 return (0); 327 } 328 329 static void 330 g_mirror_destroy_consumer(void *arg, int flags __unused) 331 { 332 struct g_consumer *cp; 333 334 g_topology_assert(); 335 336 cp = arg; 337 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 338 g_detach(cp); 339 g_destroy_consumer(cp); 340 } 341 342 static void 343 g_mirror_kill_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 344 { 345 struct g_provider *pp; 346 int retaste_wait; 347 348 g_topology_assert(); 349 350 cp->private = NULL; 351 if (g_mirror_is_busy(sc, cp)) 352 return; 353 pp = cp->provider; 354 retaste_wait = 0; 355 if (cp->acw == 1) { 356 if ((pp->geom->flags & G_GEOM_WITHER) == 0) 357 retaste_wait = 1; 358 } 359 G_MIRROR_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 360 -cp->acw, -cp->ace, 0); 361 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 362 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 363 if (retaste_wait) { 364 /* 365 * After retaste event was send (inside g_access()), we can send 366 * event to detach and destroy consumer. 367 * A class, which has consumer to the given provider connected 368 * will not receive retaste event for the provider. 369 * This is the way how I ignore retaste events when I close 370 * consumers opened for write: I detach and destroy consumer 371 * after retaste event is sent. 372 */ 373 g_post_event(g_mirror_destroy_consumer, cp, M_WAITOK, NULL); 374 return; 375 } 376 G_MIRROR_DEBUG(1, "Consumer %s destroyed.", pp->name); 377 g_detach(cp); 378 g_destroy_consumer(cp); 379 } 380 381 static int 382 g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp) 383 { 384 struct g_consumer *cp; 385 int error; 386 387 g_topology_assert_not(); 388 KASSERT(disk->d_consumer == NULL, 389 ("Disk already connected (device %s).", disk->d_softc->sc_name)); 390 391 g_topology_lock(); 392 cp = g_new_consumer(disk->d_softc->sc_geom); 393 cp->flags |= G_CF_DIRECT_RECEIVE; 394 error = g_attach(cp, pp); 395 if (error != 0) { 396 g_destroy_consumer(cp); 397 g_topology_unlock(); 398 return (error); 399 } 400 error = g_access(cp, 1, 1, 1); 401 if (error != 0) { 402 g_detach(cp); 403 g_destroy_consumer(cp); 404 g_topology_unlock(); 405 G_MIRROR_DEBUG(0, "Cannot open consumer %s (error=%d).", 406 pp->name, error); 407 return (error); 408 } 409 g_topology_unlock(); 410 disk->d_consumer = cp; 411 disk->d_consumer->private = disk; 412 disk->d_consumer->index = 0; 413 414 G_MIRROR_DEBUG(2, "Disk %s connected.", g_mirror_get_diskname(disk)); 415 return (0); 416 } 417 418 static void 419 g_mirror_disconnect_consumer(struct g_mirror_softc *sc, struct g_consumer *cp) 420 { 421 422 g_topology_assert(); 423 424 if (cp == NULL) 425 return; 426 if (cp->provider != NULL) 427 g_mirror_kill_consumer(sc, cp); 428 else 429 g_destroy_consumer(cp); 430 } 431 432 /* 433 * Initialize disk. This means allocate memory, create consumer, attach it 434 * to the provider and open access (r1w1e1) to it. 435 */ 436 static struct g_mirror_disk * 437 g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp, 438 struct g_mirror_metadata *md, int *errorp) 439 { 440 struct g_mirror_disk *disk; 441 int i, error; 442 443 disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO); 444 if (disk == NULL) { 445 error = ENOMEM; 446 goto fail; 447 } 448 disk->d_softc = sc; 449 error = g_mirror_connect_disk(disk, pp); 450 if (error != 0) 451 goto fail; 452 disk->d_id = md->md_did; 453 disk->d_state = G_MIRROR_DISK_STATE_NONE; 454 disk->d_priority = md->md_priority; 455 disk->d_flags = md->md_dflags; 456 error = g_getattr("GEOM::candelete", disk->d_consumer, &i); 457 if (error == 0 && i != 0) 458 disk->d_flags |= G_MIRROR_DISK_FLAG_CANDELETE; 459 if (md->md_provider[0] != '\0') 460 disk->d_flags |= G_MIRROR_DISK_FLAG_HARDCODED; 461 disk->d_sync.ds_consumer = NULL; 462 disk->d_sync.ds_offset = md->md_sync_offset; 463 disk->d_sync.ds_offset_done = md->md_sync_offset; 464 disk->d_genid = md->md_genid; 465 disk->d_sync.ds_syncid = md->md_syncid; 466 if (errorp != NULL) 467 *errorp = 0; 468 return (disk); 469 fail: 470 if (errorp != NULL) 471 *errorp = error; 472 if (disk != NULL) 473 free(disk, M_MIRROR); 474 return (NULL); 475 } 476 477 static void 478 g_mirror_destroy_disk(struct g_mirror_disk *disk) 479 { 480 struct g_mirror_softc *sc; 481 482 g_topology_assert_not(); 483 sc = disk->d_softc; 484 sx_assert(&sc->sc_lock, SX_XLOCKED); 485 486 LIST_REMOVE(disk, d_next); 487 g_mirror_event_cancel(disk); 488 if (sc->sc_hint == disk) 489 sc->sc_hint = NULL; 490 switch (disk->d_state) { 491 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 492 g_mirror_sync_stop(disk, 1); 493 /* FALLTHROUGH */ 494 case G_MIRROR_DISK_STATE_NEW: 495 case G_MIRROR_DISK_STATE_STALE: 496 case G_MIRROR_DISK_STATE_ACTIVE: 497 g_topology_lock(); 498 g_mirror_disconnect_consumer(sc, disk->d_consumer); 499 g_topology_unlock(); 500 free(disk, M_MIRROR); 501 break; 502 default: 503 KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 504 g_mirror_get_diskname(disk), 505 g_mirror_disk_state2str(disk->d_state))); 506 } 507 } 508 509 static void 510 g_mirror_destroy_device(struct g_mirror_softc *sc) 511 { 512 struct g_mirror_disk *disk; 513 struct g_mirror_event *ep; 514 struct g_geom *gp; 515 struct g_consumer *cp, *tmpcp; 516 517 g_topology_assert_not(); 518 sx_assert(&sc->sc_lock, SX_XLOCKED); 519 520 gp = sc->sc_geom; 521 if (sc->sc_provider != NULL) 522 g_mirror_destroy_provider(sc); 523 for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL; 524 disk = LIST_FIRST(&sc->sc_disks)) { 525 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 526 g_mirror_update_metadata(disk); 527 g_mirror_destroy_disk(disk); 528 } 529 while ((ep = g_mirror_event_get(sc)) != NULL) { 530 g_mirror_event_remove(sc, ep); 531 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) 532 g_mirror_event_free(ep); 533 else { 534 ep->e_error = ECANCELED; 535 ep->e_flags |= G_MIRROR_EVENT_DONE; 536 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep); 537 mtx_lock(&sc->sc_events_mtx); 538 wakeup(ep); 539 mtx_unlock(&sc->sc_events_mtx); 540 } 541 } 542 callout_drain(&sc->sc_callout); 543 544 g_topology_lock(); 545 LIST_FOREACH_SAFE(cp, &sc->sc_sync.ds_geom->consumer, consumer, tmpcp) { 546 g_mirror_disconnect_consumer(sc, cp); 547 } 548 g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 549 G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name); 550 g_wither_geom(gp, ENXIO); 551 g_topology_unlock(); 552 mtx_destroy(&sc->sc_queue_mtx); 553 mtx_destroy(&sc->sc_events_mtx); 554 mtx_destroy(&sc->sc_done_mtx); 555 sx_xunlock(&sc->sc_lock); 556 sx_destroy(&sc->sc_lock); 557 } 558 559 static void 560 g_mirror_orphan(struct g_consumer *cp) 561 { 562 struct g_mirror_disk *disk; 563 564 g_topology_assert(); 565 566 disk = cp->private; 567 if (disk == NULL) 568 return; 569 disk->d_softc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 570 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, 571 G_MIRROR_EVENT_DONTWAIT); 572 } 573 574 /* 575 * Function should return the next active disk on the list. 576 * It is possible that it will be the same disk as given. 577 * If there are no active disks on list, NULL is returned. 578 */ 579 static __inline struct g_mirror_disk * 580 g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 581 { 582 struct g_mirror_disk *dp; 583 584 for (dp = LIST_NEXT(disk, d_next); dp != disk; 585 dp = LIST_NEXT(dp, d_next)) { 586 if (dp == NULL) 587 dp = LIST_FIRST(&sc->sc_disks); 588 if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE) 589 break; 590 } 591 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 592 return (NULL); 593 return (dp); 594 } 595 596 static struct g_mirror_disk * 597 g_mirror_get_disk(struct g_mirror_softc *sc) 598 { 599 struct g_mirror_disk *disk; 600 601 if (sc->sc_hint == NULL) { 602 sc->sc_hint = LIST_FIRST(&sc->sc_disks); 603 if (sc->sc_hint == NULL) 604 return (NULL); 605 } 606 disk = sc->sc_hint; 607 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) { 608 disk = g_mirror_find_next(sc, disk); 609 if (disk == NULL) 610 return (NULL); 611 } 612 sc->sc_hint = g_mirror_find_next(sc, disk); 613 return (disk); 614 } 615 616 static int 617 g_mirror_write_metadata(struct g_mirror_disk *disk, 618 struct g_mirror_metadata *md) 619 { 620 struct g_mirror_softc *sc; 621 struct g_consumer *cp; 622 off_t offset, length; 623 u_char *sector; 624 int error = 0; 625 626 g_topology_assert_not(); 627 sc = disk->d_softc; 628 sx_assert(&sc->sc_lock, SX_LOCKED); 629 630 cp = disk->d_consumer; 631 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 632 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 633 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 634 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 635 cp->acw, cp->ace)); 636 length = cp->provider->sectorsize; 637 offset = cp->provider->mediasize - length; 638 sector = malloc((size_t)length, M_MIRROR, M_WAITOK | M_ZERO); 639 if (md != NULL && 640 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) { 641 /* 642 * Handle the case, when the size of parent provider reduced. 643 */ 644 if (offset < md->md_mediasize) 645 error = ENOSPC; 646 else 647 mirror_metadata_encode(md, sector); 648 } 649 if (error == 0) 650 error = g_write_data(cp, offset, sector, length); 651 free(sector, M_MIRROR); 652 if (error != 0) { 653 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 654 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 655 G_MIRROR_DEBUG(0, "Cannot write metadata on %s " 656 "(device=%s, error=%d).", 657 g_mirror_get_diskname(disk), sc->sc_name, error); 658 } else { 659 G_MIRROR_DEBUG(1, "Cannot write metadata on %s " 660 "(device=%s, error=%d).", 661 g_mirror_get_diskname(disk), sc->sc_name, error); 662 } 663 if (g_mirror_disconnect_on_failure && 664 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) { 665 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 666 g_mirror_event_send(disk, 667 G_MIRROR_DISK_STATE_DISCONNECTED, 668 G_MIRROR_EVENT_DONTWAIT); 669 } 670 } 671 return (error); 672 } 673 674 static int 675 g_mirror_clear_metadata(struct g_mirror_disk *disk) 676 { 677 int error; 678 679 g_topology_assert_not(); 680 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 681 682 error = g_mirror_write_metadata(disk, NULL); 683 if (error == 0) { 684 G_MIRROR_DEBUG(2, "Metadata on %s cleared.", 685 g_mirror_get_diskname(disk)); 686 } else { 687 G_MIRROR_DEBUG(0, 688 "Cannot clear metadata on disk %s (error=%d).", 689 g_mirror_get_diskname(disk), error); 690 } 691 return (error); 692 } 693 694 void 695 g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk, 696 struct g_mirror_metadata *md) 697 { 698 699 strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic)); 700 md->md_version = G_MIRROR_VERSION; 701 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 702 md->md_mid = sc->sc_id; 703 md->md_all = sc->sc_ndisks; 704 md->md_slice = sc->sc_slice; 705 md->md_balance = sc->sc_balance; 706 md->md_genid = sc->sc_genid; 707 md->md_mediasize = sc->sc_mediasize; 708 md->md_sectorsize = sc->sc_sectorsize; 709 md->md_mflags = (sc->sc_flags & G_MIRROR_DEVICE_FLAG_MASK); 710 bzero(md->md_provider, sizeof(md->md_provider)); 711 if (disk == NULL) { 712 md->md_did = arc4random(); 713 md->md_priority = 0; 714 md->md_syncid = 0; 715 md->md_dflags = 0; 716 md->md_sync_offset = 0; 717 md->md_provsize = 0; 718 } else { 719 md->md_did = disk->d_id; 720 md->md_priority = disk->d_priority; 721 md->md_syncid = disk->d_sync.ds_syncid; 722 md->md_dflags = (disk->d_flags & G_MIRROR_DISK_FLAG_MASK); 723 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 724 md->md_sync_offset = disk->d_sync.ds_offset_done; 725 else 726 md->md_sync_offset = 0; 727 if ((disk->d_flags & G_MIRROR_DISK_FLAG_HARDCODED) != 0) { 728 strlcpy(md->md_provider, 729 disk->d_consumer->provider->name, 730 sizeof(md->md_provider)); 731 } 732 md->md_provsize = disk->d_consumer->provider->mediasize; 733 } 734 } 735 736 void 737 g_mirror_update_metadata(struct g_mirror_disk *disk) 738 { 739 struct g_mirror_softc *sc; 740 struct g_mirror_metadata md; 741 int error; 742 743 g_topology_assert_not(); 744 sc = disk->d_softc; 745 sx_assert(&sc->sc_lock, SX_LOCKED); 746 747 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WIPE) == 0) 748 g_mirror_fill_metadata(sc, disk, &md); 749 error = g_mirror_write_metadata(disk, &md); 750 if (error == 0) { 751 G_MIRROR_DEBUG(2, "Metadata on %s updated.", 752 g_mirror_get_diskname(disk)); 753 } else { 754 G_MIRROR_DEBUG(0, 755 "Cannot update metadata on disk %s (error=%d).", 756 g_mirror_get_diskname(disk), error); 757 } 758 } 759 760 static void 761 g_mirror_bump_syncid(struct g_mirror_softc *sc) 762 { 763 struct g_mirror_disk *disk; 764 765 g_topology_assert_not(); 766 sx_assert(&sc->sc_lock, SX_XLOCKED); 767 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 768 ("%s called with no active disks (device=%s).", __func__, 769 sc->sc_name)); 770 771 sc->sc_syncid++; 772 G_MIRROR_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 773 sc->sc_syncid); 774 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 775 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 776 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 777 disk->d_sync.ds_syncid = sc->sc_syncid; 778 g_mirror_update_metadata(disk); 779 } 780 } 781 } 782 783 static void 784 g_mirror_bump_genid(struct g_mirror_softc *sc) 785 { 786 struct g_mirror_disk *disk; 787 788 g_topology_assert_not(); 789 sx_assert(&sc->sc_lock, SX_XLOCKED); 790 KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, 791 ("%s called with no active disks (device=%s).", __func__, 792 sc->sc_name)); 793 794 sc->sc_genid++; 795 G_MIRROR_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 796 sc->sc_genid); 797 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 798 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 799 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 800 disk->d_genid = sc->sc_genid; 801 g_mirror_update_metadata(disk); 802 } 803 } 804 } 805 806 static int 807 g_mirror_idle(struct g_mirror_softc *sc, int acw) 808 { 809 struct g_mirror_disk *disk; 810 int timeout; 811 812 g_topology_assert_not(); 813 sx_assert(&sc->sc_lock, SX_XLOCKED); 814 815 if (sc->sc_provider == NULL) 816 return (0); 817 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 818 return (0); 819 if (sc->sc_idle) 820 return (0); 821 if (sc->sc_writes > 0) 822 return (0); 823 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 824 timeout = g_mirror_idletime - (time_uptime - sc->sc_last_write); 825 if (!g_mirror_shutdown && timeout > 0) 826 return (timeout); 827 } 828 sc->sc_idle = 1; 829 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 830 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 831 continue; 832 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.", 833 g_mirror_get_diskname(disk), sc->sc_name); 834 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 835 g_mirror_update_metadata(disk); 836 } 837 return (0); 838 } 839 840 static void 841 g_mirror_unidle(struct g_mirror_softc *sc) 842 { 843 struct g_mirror_disk *disk; 844 845 g_topology_assert_not(); 846 sx_assert(&sc->sc_lock, SX_XLOCKED); 847 848 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 849 return; 850 sc->sc_idle = 0; 851 sc->sc_last_write = time_uptime; 852 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 853 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 854 continue; 855 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.", 856 g_mirror_get_diskname(disk), sc->sc_name); 857 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 858 g_mirror_update_metadata(disk); 859 } 860 } 861 862 static void 863 g_mirror_flush_done(struct bio *bp) 864 { 865 struct g_mirror_softc *sc; 866 struct bio *pbp; 867 868 pbp = bp->bio_parent; 869 sc = pbp->bio_to->geom->softc; 870 mtx_lock(&sc->sc_done_mtx); 871 if (pbp->bio_error == 0) 872 pbp->bio_error = bp->bio_error; 873 pbp->bio_completed += bp->bio_completed; 874 pbp->bio_inbed++; 875 if (pbp->bio_children == pbp->bio_inbed) { 876 mtx_unlock(&sc->sc_done_mtx); 877 g_io_deliver(pbp, pbp->bio_error); 878 } else 879 mtx_unlock(&sc->sc_done_mtx); 880 g_destroy_bio(bp); 881 } 882 883 static void 884 g_mirror_done(struct bio *bp) 885 { 886 struct g_mirror_softc *sc; 887 888 sc = bp->bio_from->geom->softc; 889 bp->bio_cflags = G_MIRROR_BIO_FLAG_REGULAR; 890 mtx_lock(&sc->sc_queue_mtx); 891 bioq_disksort(&sc->sc_queue, bp); 892 mtx_unlock(&sc->sc_queue_mtx); 893 wakeup(sc); 894 } 895 896 static void 897 g_mirror_regular_request(struct bio *bp) 898 { 899 struct g_mirror_softc *sc; 900 struct g_mirror_disk *disk; 901 struct bio *pbp; 902 903 g_topology_assert_not(); 904 905 pbp = bp->bio_parent; 906 sc = pbp->bio_to->geom->softc; 907 bp->bio_from->index--; 908 if (bp->bio_cmd == BIO_WRITE) 909 sc->sc_writes--; 910 disk = bp->bio_from->private; 911 if (disk == NULL) { 912 g_topology_lock(); 913 g_mirror_kill_consumer(sc, bp->bio_from); 914 g_topology_unlock(); 915 } 916 917 pbp->bio_inbed++; 918 KASSERT(pbp->bio_inbed <= pbp->bio_children, 919 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 920 pbp->bio_children)); 921 if (bp->bio_error == 0 && pbp->bio_error == 0) { 922 G_MIRROR_LOGREQ(3, bp, "Request delivered."); 923 g_destroy_bio(bp); 924 if (pbp->bio_children == pbp->bio_inbed) { 925 G_MIRROR_LOGREQ(3, pbp, "Request delivered."); 926 pbp->bio_completed = pbp->bio_length; 927 if (pbp->bio_cmd == BIO_WRITE || 928 pbp->bio_cmd == BIO_DELETE) { 929 bioq_remove(&sc->sc_inflight, pbp); 930 /* Release delayed sync requests if possible. */ 931 g_mirror_sync_release(sc); 932 } 933 g_io_deliver(pbp, pbp->bio_error); 934 } 935 return; 936 } else if (bp->bio_error != 0) { 937 if (pbp->bio_error == 0) 938 pbp->bio_error = bp->bio_error; 939 if (disk != NULL) { 940 if ((disk->d_flags & G_MIRROR_DISK_FLAG_BROKEN) == 0) { 941 disk->d_flags |= G_MIRROR_DISK_FLAG_BROKEN; 942 G_MIRROR_LOGREQ(0, bp, 943 "Request failed (error=%d).", 944 bp->bio_error); 945 } else { 946 G_MIRROR_LOGREQ(1, bp, 947 "Request failed (error=%d).", 948 bp->bio_error); 949 } 950 if (g_mirror_disconnect_on_failure && 951 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 1) 952 { 953 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 954 g_mirror_event_send(disk, 955 G_MIRROR_DISK_STATE_DISCONNECTED, 956 G_MIRROR_EVENT_DONTWAIT); 957 } 958 } 959 switch (pbp->bio_cmd) { 960 case BIO_DELETE: 961 case BIO_WRITE: 962 pbp->bio_inbed--; 963 pbp->bio_children--; 964 break; 965 } 966 } 967 g_destroy_bio(bp); 968 969 switch (pbp->bio_cmd) { 970 case BIO_READ: 971 if (pbp->bio_inbed < pbp->bio_children) 972 break; 973 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 1) 974 g_io_deliver(pbp, pbp->bio_error); 975 else { 976 pbp->bio_error = 0; 977 mtx_lock(&sc->sc_queue_mtx); 978 bioq_disksort(&sc->sc_queue, pbp); 979 mtx_unlock(&sc->sc_queue_mtx); 980 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 981 wakeup(sc); 982 } 983 break; 984 case BIO_DELETE: 985 case BIO_WRITE: 986 if (pbp->bio_children == 0) { 987 /* 988 * All requests failed. 989 */ 990 } else if (pbp->bio_inbed < pbp->bio_children) { 991 /* Do nothing. */ 992 break; 993 } else if (pbp->bio_children == pbp->bio_inbed) { 994 /* Some requests succeeded. */ 995 pbp->bio_error = 0; 996 pbp->bio_completed = pbp->bio_length; 997 } 998 bioq_remove(&sc->sc_inflight, pbp); 999 /* Release delayed sync requests if possible. */ 1000 g_mirror_sync_release(sc); 1001 g_io_deliver(pbp, pbp->bio_error); 1002 break; 1003 default: 1004 KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd)); 1005 break; 1006 } 1007 } 1008 1009 static void 1010 g_mirror_sync_done(struct bio *bp) 1011 { 1012 struct g_mirror_softc *sc; 1013 1014 G_MIRROR_LOGREQ(3, bp, "Synchronization request delivered."); 1015 sc = bp->bio_from->geom->softc; 1016 bp->bio_cflags = G_MIRROR_BIO_FLAG_SYNC; 1017 mtx_lock(&sc->sc_queue_mtx); 1018 bioq_disksort(&sc->sc_queue, bp); 1019 mtx_unlock(&sc->sc_queue_mtx); 1020 wakeup(sc); 1021 } 1022 1023 static void 1024 g_mirror_kernel_dump(struct bio *bp) 1025 { 1026 struct g_mirror_softc *sc; 1027 struct g_mirror_disk *disk; 1028 struct bio *cbp; 1029 struct g_kerneldump *gkd; 1030 1031 /* 1032 * We configure dumping to the first component, because this component 1033 * will be used for reading with 'prefer' balance algorithm. 1034 * If the component with the higest priority is currently disconnected 1035 * we will not be able to read the dump after the reboot if it will be 1036 * connected and synchronized later. Can we do something better? 1037 */ 1038 sc = bp->bio_to->geom->softc; 1039 disk = LIST_FIRST(&sc->sc_disks); 1040 1041 gkd = (struct g_kerneldump *)bp->bio_data; 1042 if (gkd->length > bp->bio_to->mediasize) 1043 gkd->length = bp->bio_to->mediasize; 1044 cbp = g_clone_bio(bp); 1045 if (cbp == NULL) { 1046 g_io_deliver(bp, ENOMEM); 1047 return; 1048 } 1049 cbp->bio_done = g_std_done; 1050 g_io_request(cbp, disk->d_consumer); 1051 G_MIRROR_DEBUG(1, "Kernel dump will go to %s.", 1052 g_mirror_get_diskname(disk)); 1053 } 1054 1055 static void 1056 g_mirror_flush(struct g_mirror_softc *sc, struct bio *bp) 1057 { 1058 struct bio_queue_head queue; 1059 struct g_mirror_disk *disk; 1060 struct g_consumer *cp; 1061 struct bio *cbp; 1062 1063 bioq_init(&queue); 1064 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1065 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1066 continue; 1067 cbp = g_clone_bio(bp); 1068 if (cbp == NULL) { 1069 while ((cbp = bioq_takefirst(&queue)) != NULL) 1070 g_destroy_bio(cbp); 1071 if (bp->bio_error == 0) 1072 bp->bio_error = ENOMEM; 1073 g_io_deliver(bp, bp->bio_error); 1074 return; 1075 } 1076 bioq_insert_tail(&queue, cbp); 1077 cbp->bio_done = g_mirror_flush_done; 1078 cbp->bio_caller1 = disk; 1079 cbp->bio_to = disk->d_consumer->provider; 1080 } 1081 while ((cbp = bioq_takefirst(&queue)) != NULL) { 1082 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1083 disk = cbp->bio_caller1; 1084 cbp->bio_caller1 = NULL; 1085 cp = disk->d_consumer; 1086 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1087 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1088 cp->acr, cp->acw, cp->ace)); 1089 g_io_request(cbp, disk->d_consumer); 1090 } 1091 } 1092 1093 static void 1094 g_mirror_start(struct bio *bp) 1095 { 1096 struct g_mirror_softc *sc; 1097 1098 sc = bp->bio_to->geom->softc; 1099 /* 1100 * If sc == NULL or there are no valid disks, provider's error 1101 * should be set and g_mirror_start() should not be called at all. 1102 */ 1103 KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1104 ("Provider's error should be set (error=%d)(mirror=%s).", 1105 bp->bio_to->error, bp->bio_to->name)); 1106 G_MIRROR_LOGREQ(3, bp, "Request received."); 1107 1108 switch (bp->bio_cmd) { 1109 case BIO_READ: 1110 case BIO_WRITE: 1111 case BIO_DELETE: 1112 break; 1113 case BIO_FLUSH: 1114 g_mirror_flush(sc, bp); 1115 return; 1116 case BIO_GETATTR: 1117 if (g_handleattr_int(bp, "GEOM::candelete", 1)) 1118 return; 1119 else if (strcmp("GEOM::kerneldump", bp->bio_attribute) == 0) { 1120 g_mirror_kernel_dump(bp); 1121 return; 1122 } 1123 /* FALLTHROUGH */ 1124 default: 1125 g_io_deliver(bp, EOPNOTSUPP); 1126 return; 1127 } 1128 mtx_lock(&sc->sc_queue_mtx); 1129 bioq_disksort(&sc->sc_queue, bp); 1130 mtx_unlock(&sc->sc_queue_mtx); 1131 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1132 wakeup(sc); 1133 } 1134 1135 /* 1136 * Return TRUE if the given request is colliding with a in-progress 1137 * synchronization request. 1138 */ 1139 static int 1140 g_mirror_sync_collision(struct g_mirror_softc *sc, struct bio *bp) 1141 { 1142 struct g_mirror_disk *disk; 1143 struct bio *sbp; 1144 off_t rstart, rend, sstart, send; 1145 int i; 1146 1147 if (sc->sc_sync.ds_ndisks == 0) 1148 return (0); 1149 rstart = bp->bio_offset; 1150 rend = bp->bio_offset + bp->bio_length; 1151 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1152 if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING) 1153 continue; 1154 for (i = 0; i < g_mirror_syncreqs; i++) { 1155 sbp = disk->d_sync.ds_bios[i]; 1156 if (sbp == NULL) 1157 continue; 1158 sstart = sbp->bio_offset; 1159 send = sbp->bio_offset + sbp->bio_length; 1160 if (rend > sstart && rstart < send) 1161 return (1); 1162 } 1163 } 1164 return (0); 1165 } 1166 1167 /* 1168 * Return TRUE if the given sync request is colliding with a in-progress regular 1169 * request. 1170 */ 1171 static int 1172 g_mirror_regular_collision(struct g_mirror_softc *sc, struct bio *sbp) 1173 { 1174 off_t rstart, rend, sstart, send; 1175 struct bio *bp; 1176 1177 if (sc->sc_sync.ds_ndisks == 0) 1178 return (0); 1179 sstart = sbp->bio_offset; 1180 send = sbp->bio_offset + sbp->bio_length; 1181 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) { 1182 rstart = bp->bio_offset; 1183 rend = bp->bio_offset + bp->bio_length; 1184 if (rend > sstart && rstart < send) 1185 return (1); 1186 } 1187 return (0); 1188 } 1189 1190 /* 1191 * Puts request onto delayed queue. 1192 */ 1193 static void 1194 g_mirror_regular_delay(struct g_mirror_softc *sc, struct bio *bp) 1195 { 1196 1197 G_MIRROR_LOGREQ(2, bp, "Delaying request."); 1198 bioq_insert_head(&sc->sc_regular_delayed, bp); 1199 } 1200 1201 /* 1202 * Puts synchronization request onto delayed queue. 1203 */ 1204 static void 1205 g_mirror_sync_delay(struct g_mirror_softc *sc, struct bio *bp) 1206 { 1207 1208 G_MIRROR_LOGREQ(2, bp, "Delaying synchronization request."); 1209 bioq_insert_tail(&sc->sc_sync_delayed, bp); 1210 } 1211 1212 /* 1213 * Releases delayed regular requests which don't collide anymore with sync 1214 * requests. 1215 */ 1216 static void 1217 g_mirror_regular_release(struct g_mirror_softc *sc) 1218 { 1219 struct bio *bp, *bp2; 1220 1221 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) { 1222 if (g_mirror_sync_collision(sc, bp)) 1223 continue; 1224 bioq_remove(&sc->sc_regular_delayed, bp); 1225 G_MIRROR_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 1226 mtx_lock(&sc->sc_queue_mtx); 1227 bioq_insert_head(&sc->sc_queue, bp); 1228 #if 0 1229 /* 1230 * wakeup() is not needed, because this function is called from 1231 * the worker thread. 1232 */ 1233 wakeup(&sc->sc_queue); 1234 #endif 1235 mtx_unlock(&sc->sc_queue_mtx); 1236 } 1237 } 1238 1239 /* 1240 * Releases delayed sync requests which don't collide anymore with regular 1241 * requests. 1242 */ 1243 static void 1244 g_mirror_sync_release(struct g_mirror_softc *sc) 1245 { 1246 struct bio *bp, *bp2; 1247 1248 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) { 1249 if (g_mirror_regular_collision(sc, bp)) 1250 continue; 1251 bioq_remove(&sc->sc_sync_delayed, bp); 1252 G_MIRROR_LOGREQ(2, bp, 1253 "Releasing delayed synchronization request."); 1254 g_io_request(bp, bp->bio_from); 1255 } 1256 } 1257 1258 /* 1259 * Handle synchronization requests. 1260 * Every synchronization request is two-steps process: first, READ request is 1261 * send to active provider and then WRITE request (with read data) to the provider 1262 * beeing synchronized. When WRITE is finished, new synchronization request is 1263 * send. 1264 */ 1265 static void 1266 g_mirror_sync_request(struct bio *bp) 1267 { 1268 struct g_mirror_softc *sc; 1269 struct g_mirror_disk *disk; 1270 1271 bp->bio_from->index--; 1272 sc = bp->bio_from->geom->softc; 1273 disk = bp->bio_from->private; 1274 if (disk == NULL) { 1275 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 1276 g_topology_lock(); 1277 g_mirror_kill_consumer(sc, bp->bio_from); 1278 g_topology_unlock(); 1279 free(bp->bio_data, M_MIRROR); 1280 g_destroy_bio(bp); 1281 sx_xlock(&sc->sc_lock); 1282 return; 1283 } 1284 1285 /* 1286 * Synchronization request. 1287 */ 1288 switch (bp->bio_cmd) { 1289 case BIO_READ: 1290 { 1291 struct g_consumer *cp; 1292 1293 if (bp->bio_error != 0) { 1294 G_MIRROR_LOGREQ(0, bp, 1295 "Synchronization request failed (error=%d).", 1296 bp->bio_error); 1297 g_destroy_bio(bp); 1298 return; 1299 } 1300 G_MIRROR_LOGREQ(3, bp, 1301 "Synchronization request half-finished."); 1302 bp->bio_cmd = BIO_WRITE; 1303 bp->bio_cflags = 0; 1304 cp = disk->d_consumer; 1305 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1306 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1307 cp->acr, cp->acw, cp->ace)); 1308 cp->index++; 1309 g_io_request(bp, cp); 1310 return; 1311 } 1312 case BIO_WRITE: 1313 { 1314 struct g_mirror_disk_sync *sync; 1315 off_t offset; 1316 void *data; 1317 int i; 1318 1319 if (bp->bio_error != 0) { 1320 G_MIRROR_LOGREQ(0, bp, 1321 "Synchronization request failed (error=%d).", 1322 bp->bio_error); 1323 g_destroy_bio(bp); 1324 sc->sc_bump_id |= G_MIRROR_BUMP_GENID; 1325 g_mirror_event_send(disk, 1326 G_MIRROR_DISK_STATE_DISCONNECTED, 1327 G_MIRROR_EVENT_DONTWAIT); 1328 return; 1329 } 1330 G_MIRROR_LOGREQ(3, bp, "Synchronization request finished."); 1331 sync = &disk->d_sync; 1332 if (sync->ds_offset >= sc->sc_mediasize || 1333 sync->ds_consumer == NULL || 1334 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1335 /* Don't send more synchronization requests. */ 1336 sync->ds_inflight--; 1337 if (sync->ds_bios != NULL) { 1338 i = (int)(uintptr_t)bp->bio_caller1; 1339 sync->ds_bios[i] = NULL; 1340 } 1341 free(bp->bio_data, M_MIRROR); 1342 g_destroy_bio(bp); 1343 if (sync->ds_inflight > 0) 1344 return; 1345 if (sync->ds_consumer == NULL || 1346 (sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1347 return; 1348 } 1349 /* Disk up-to-date, activate it. */ 1350 g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE, 1351 G_MIRROR_EVENT_DONTWAIT); 1352 return; 1353 } 1354 1355 /* Send next synchronization request. */ 1356 data = bp->bio_data; 1357 bzero(bp, sizeof(*bp)); 1358 bp->bio_cmd = BIO_READ; 1359 bp->bio_offset = sync->ds_offset; 1360 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1361 sync->ds_offset += bp->bio_length; 1362 bp->bio_done = g_mirror_sync_done; 1363 bp->bio_data = data; 1364 bp->bio_from = sync->ds_consumer; 1365 bp->bio_to = sc->sc_provider; 1366 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1367 sync->ds_consumer->index++; 1368 /* 1369 * Delay the request if it is colliding with a regular request. 1370 */ 1371 if (g_mirror_regular_collision(sc, bp)) 1372 g_mirror_sync_delay(sc, bp); 1373 else 1374 g_io_request(bp, sync->ds_consumer); 1375 1376 /* Release delayed requests if possible. */ 1377 g_mirror_regular_release(sc); 1378 1379 /* Find the smallest offset */ 1380 offset = sc->sc_mediasize; 1381 for (i = 0; i < g_mirror_syncreqs; i++) { 1382 bp = sync->ds_bios[i]; 1383 if (bp->bio_offset < offset) 1384 offset = bp->bio_offset; 1385 } 1386 if (sync->ds_offset_done + (MAXPHYS * 100) < offset) { 1387 /* Update offset_done on every 100 blocks. */ 1388 sync->ds_offset_done = offset; 1389 g_mirror_update_metadata(disk); 1390 } 1391 return; 1392 } 1393 default: 1394 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1395 bp->bio_cmd, sc->sc_name)); 1396 break; 1397 } 1398 } 1399 1400 static void 1401 g_mirror_request_prefer(struct g_mirror_softc *sc, struct bio *bp) 1402 { 1403 struct g_mirror_disk *disk; 1404 struct g_consumer *cp; 1405 struct bio *cbp; 1406 1407 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1408 if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE) 1409 break; 1410 } 1411 if (disk == NULL) { 1412 if (bp->bio_error == 0) 1413 bp->bio_error = ENXIO; 1414 g_io_deliver(bp, bp->bio_error); 1415 return; 1416 } 1417 cbp = g_clone_bio(bp); 1418 if (cbp == NULL) { 1419 if (bp->bio_error == 0) 1420 bp->bio_error = ENOMEM; 1421 g_io_deliver(bp, bp->bio_error); 1422 return; 1423 } 1424 /* 1425 * Fill in the component buf structure. 1426 */ 1427 cp = disk->d_consumer; 1428 cbp->bio_done = g_mirror_done; 1429 cbp->bio_to = cp->provider; 1430 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1431 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1432 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1433 cp->acw, cp->ace)); 1434 cp->index++; 1435 g_io_request(cbp, cp); 1436 } 1437 1438 static void 1439 g_mirror_request_round_robin(struct g_mirror_softc *sc, struct bio *bp) 1440 { 1441 struct g_mirror_disk *disk; 1442 struct g_consumer *cp; 1443 struct bio *cbp; 1444 1445 disk = g_mirror_get_disk(sc); 1446 if (disk == NULL) { 1447 if (bp->bio_error == 0) 1448 bp->bio_error = ENXIO; 1449 g_io_deliver(bp, bp->bio_error); 1450 return; 1451 } 1452 cbp = g_clone_bio(bp); 1453 if (cbp == NULL) { 1454 if (bp->bio_error == 0) 1455 bp->bio_error = ENOMEM; 1456 g_io_deliver(bp, bp->bio_error); 1457 return; 1458 } 1459 /* 1460 * Fill in the component buf structure. 1461 */ 1462 cp = disk->d_consumer; 1463 cbp->bio_done = g_mirror_done; 1464 cbp->bio_to = cp->provider; 1465 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1466 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1467 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1468 cp->acw, cp->ace)); 1469 cp->index++; 1470 g_io_request(cbp, cp); 1471 } 1472 1473 #define TRACK_SIZE (1 * 1024 * 1024) 1474 #define LOAD_SCALE 256 1475 #define ABS(x) (((x) >= 0) ? (x) : (-(x))) 1476 1477 static void 1478 g_mirror_request_load(struct g_mirror_softc *sc, struct bio *bp) 1479 { 1480 struct g_mirror_disk *disk, *dp; 1481 struct g_consumer *cp; 1482 struct bio *cbp; 1483 int prio, best; 1484 1485 /* Find a disk with the smallest load. */ 1486 disk = NULL; 1487 best = INT_MAX; 1488 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1489 if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1490 continue; 1491 prio = dp->load; 1492 /* If disk head is precisely in position - highly prefer it. */ 1493 if (dp->d_last_offset == bp->bio_offset) 1494 prio -= 2 * LOAD_SCALE; 1495 else 1496 /* If disk head is close to position - prefer it. */ 1497 if (ABS(dp->d_last_offset - bp->bio_offset) < TRACK_SIZE) 1498 prio -= 1 * LOAD_SCALE; 1499 if (prio <= best) { 1500 disk = dp; 1501 best = prio; 1502 } 1503 } 1504 KASSERT(disk != NULL, ("NULL disk for %s.", sc->sc_name)); 1505 cbp = g_clone_bio(bp); 1506 if (cbp == NULL) { 1507 if (bp->bio_error == 0) 1508 bp->bio_error = ENOMEM; 1509 g_io_deliver(bp, bp->bio_error); 1510 return; 1511 } 1512 /* 1513 * Fill in the component buf structure. 1514 */ 1515 cp = disk->d_consumer; 1516 cbp->bio_done = g_mirror_done; 1517 cbp->bio_to = cp->provider; 1518 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1519 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1520 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, cp->acr, 1521 cp->acw, cp->ace)); 1522 cp->index++; 1523 /* Remember last head position */ 1524 disk->d_last_offset = bp->bio_offset + bp->bio_length; 1525 /* Update loads. */ 1526 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 1527 dp->load = (dp->d_consumer->index * LOAD_SCALE + 1528 dp->load * 7) / 8; 1529 } 1530 g_io_request(cbp, cp); 1531 } 1532 1533 static void 1534 g_mirror_request_split(struct g_mirror_softc *sc, struct bio *bp) 1535 { 1536 struct bio_queue_head queue; 1537 struct g_mirror_disk *disk; 1538 struct g_consumer *cp; 1539 struct bio *cbp; 1540 off_t left, mod, offset, slice; 1541 u_char *data; 1542 u_int ndisks; 1543 1544 if (bp->bio_length <= sc->sc_slice) { 1545 g_mirror_request_round_robin(sc, bp); 1546 return; 1547 } 1548 ndisks = g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE); 1549 slice = bp->bio_length / ndisks; 1550 mod = slice % sc->sc_provider->sectorsize; 1551 if (mod != 0) 1552 slice += sc->sc_provider->sectorsize - mod; 1553 /* 1554 * Allocate all bios before sending any request, so we can 1555 * return ENOMEM in nice and clean way. 1556 */ 1557 left = bp->bio_length; 1558 offset = bp->bio_offset; 1559 data = bp->bio_data; 1560 bioq_init(&queue); 1561 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1562 if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) 1563 continue; 1564 cbp = g_clone_bio(bp); 1565 if (cbp == NULL) { 1566 while ((cbp = bioq_takefirst(&queue)) != NULL) 1567 bioq_remove(&queue, cbp); 1568 if (bp->bio_error == 0) 1569 bp->bio_error = ENOMEM; 1570 g_io_deliver(bp, bp->bio_error); 1571 return; 1572 } 1573 bioq_insert_tail(&queue, cbp); 1574 cbp->bio_done = g_mirror_done; 1575 cbp->bio_caller1 = disk; 1576 cbp->bio_to = disk->d_consumer->provider; 1577 cbp->bio_offset = offset; 1578 cbp->bio_data = data; 1579 cbp->bio_length = MIN(left, slice); 1580 left -= cbp->bio_length; 1581 if (left == 0) 1582 break; 1583 offset += cbp->bio_length; 1584 data += cbp->bio_length; 1585 } 1586 while ((cbp = bioq_takefirst(&queue)) != NULL) { 1587 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1588 disk = cbp->bio_caller1; 1589 cbp->bio_caller1 = NULL; 1590 cp = disk->d_consumer; 1591 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1592 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1593 cp->acr, cp->acw, cp->ace)); 1594 disk->d_consumer->index++; 1595 g_io_request(cbp, disk->d_consumer); 1596 } 1597 } 1598 1599 static void 1600 g_mirror_register_request(struct bio *bp) 1601 { 1602 struct g_mirror_softc *sc; 1603 1604 sc = bp->bio_to->geom->softc; 1605 switch (bp->bio_cmd) { 1606 case BIO_READ: 1607 switch (sc->sc_balance) { 1608 case G_MIRROR_BALANCE_LOAD: 1609 g_mirror_request_load(sc, bp); 1610 break; 1611 case G_MIRROR_BALANCE_PREFER: 1612 g_mirror_request_prefer(sc, bp); 1613 break; 1614 case G_MIRROR_BALANCE_ROUND_ROBIN: 1615 g_mirror_request_round_robin(sc, bp); 1616 break; 1617 case G_MIRROR_BALANCE_SPLIT: 1618 g_mirror_request_split(sc, bp); 1619 break; 1620 } 1621 return; 1622 case BIO_WRITE: 1623 case BIO_DELETE: 1624 { 1625 struct g_mirror_disk *disk; 1626 struct g_mirror_disk_sync *sync; 1627 struct bio_queue_head queue; 1628 struct g_consumer *cp; 1629 struct bio *cbp; 1630 1631 /* 1632 * Delay the request if it is colliding with a synchronization 1633 * request. 1634 */ 1635 if (g_mirror_sync_collision(sc, bp)) { 1636 g_mirror_regular_delay(sc, bp); 1637 return; 1638 } 1639 1640 if (sc->sc_idle) 1641 g_mirror_unidle(sc); 1642 else 1643 sc->sc_last_write = time_uptime; 1644 1645 /* 1646 * Allocate all bios before sending any request, so we can 1647 * return ENOMEM in nice and clean way. 1648 */ 1649 bioq_init(&queue); 1650 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 1651 sync = &disk->d_sync; 1652 switch (disk->d_state) { 1653 case G_MIRROR_DISK_STATE_ACTIVE: 1654 break; 1655 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 1656 if (bp->bio_offset >= sync->ds_offset) 1657 continue; 1658 break; 1659 default: 1660 continue; 1661 } 1662 if (bp->bio_cmd == BIO_DELETE && 1663 (disk->d_flags & G_MIRROR_DISK_FLAG_CANDELETE) == 0) 1664 continue; 1665 cbp = g_clone_bio(bp); 1666 if (cbp == NULL) { 1667 while ((cbp = bioq_takefirst(&queue)) != NULL) 1668 g_destroy_bio(cbp); 1669 if (bp->bio_error == 0) 1670 bp->bio_error = ENOMEM; 1671 g_io_deliver(bp, bp->bio_error); 1672 return; 1673 } 1674 bioq_insert_tail(&queue, cbp); 1675 cbp->bio_done = g_mirror_done; 1676 cp = disk->d_consumer; 1677 cbp->bio_caller1 = cp; 1678 cbp->bio_to = cp->provider; 1679 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1680 ("Consumer %s not opened (r%dw%de%d).", 1681 cp->provider->name, cp->acr, cp->acw, cp->ace)); 1682 } 1683 while ((cbp = bioq_takefirst(&queue)) != NULL) { 1684 G_MIRROR_LOGREQ(3, cbp, "Sending request."); 1685 cp = cbp->bio_caller1; 1686 cbp->bio_caller1 = NULL; 1687 cp->index++; 1688 sc->sc_writes++; 1689 g_io_request(cbp, cp); 1690 } 1691 /* 1692 * Put request onto inflight queue, so we can check if new 1693 * synchronization requests don't collide with it. 1694 */ 1695 bioq_insert_tail(&sc->sc_inflight, bp); 1696 /* 1697 * Bump syncid on first write. 1698 */ 1699 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) { 1700 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 1701 g_mirror_bump_syncid(sc); 1702 } 1703 return; 1704 } 1705 default: 1706 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1707 bp->bio_cmd, sc->sc_name)); 1708 break; 1709 } 1710 } 1711 1712 static int 1713 g_mirror_can_destroy(struct g_mirror_softc *sc) 1714 { 1715 struct g_geom *gp; 1716 struct g_consumer *cp; 1717 1718 g_topology_assert(); 1719 gp = sc->sc_geom; 1720 if (gp->softc == NULL) 1721 return (1); 1722 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_TASTING) != 0) 1723 return (0); 1724 LIST_FOREACH(cp, &gp->consumer, consumer) { 1725 if (g_mirror_is_busy(sc, cp)) 1726 return (0); 1727 } 1728 gp = sc->sc_sync.ds_geom; 1729 LIST_FOREACH(cp, &gp->consumer, consumer) { 1730 if (g_mirror_is_busy(sc, cp)) 1731 return (0); 1732 } 1733 G_MIRROR_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 1734 sc->sc_name); 1735 return (1); 1736 } 1737 1738 static int 1739 g_mirror_try_destroy(struct g_mirror_softc *sc) 1740 { 1741 1742 if (sc->sc_rootmount != NULL) { 1743 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 1744 sc->sc_rootmount); 1745 root_mount_rel(sc->sc_rootmount); 1746 sc->sc_rootmount = NULL; 1747 } 1748 g_topology_lock(); 1749 if (!g_mirror_can_destroy(sc)) { 1750 g_topology_unlock(); 1751 return (0); 1752 } 1753 sc->sc_geom->softc = NULL; 1754 sc->sc_sync.ds_geom->softc = NULL; 1755 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) { 1756 g_topology_unlock(); 1757 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1758 &sc->sc_worker); 1759 /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 1760 sx_xunlock(&sc->sc_lock); 1761 wakeup(&sc->sc_worker); 1762 sc->sc_worker = NULL; 1763 } else { 1764 g_topology_unlock(); 1765 g_mirror_destroy_device(sc); 1766 free(sc, M_MIRROR); 1767 } 1768 return (1); 1769 } 1770 1771 /* 1772 * Worker thread. 1773 */ 1774 static void 1775 g_mirror_worker(void *arg) 1776 { 1777 struct g_mirror_softc *sc; 1778 struct g_mirror_event *ep; 1779 struct bio *bp; 1780 int timeout; 1781 1782 sc = arg; 1783 thread_lock(curthread); 1784 sched_prio(curthread, PRIBIO); 1785 thread_unlock(curthread); 1786 1787 sx_xlock(&sc->sc_lock); 1788 for (;;) { 1789 G_MIRROR_DEBUG(5, "%s: Let's see...", __func__); 1790 /* 1791 * First take a look at events. 1792 * This is important to handle events before any I/O requests. 1793 */ 1794 ep = g_mirror_event_get(sc); 1795 if (ep != NULL) { 1796 g_mirror_event_remove(sc, ep); 1797 if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) { 1798 /* Update only device status. */ 1799 G_MIRROR_DEBUG(3, 1800 "Running event for device %s.", 1801 sc->sc_name); 1802 ep->e_error = 0; 1803 g_mirror_update_device(sc, 1); 1804 } else { 1805 /* Update disk status. */ 1806 G_MIRROR_DEBUG(3, "Running event for disk %s.", 1807 g_mirror_get_diskname(ep->e_disk)); 1808 ep->e_error = g_mirror_update_disk(ep->e_disk, 1809 ep->e_state); 1810 if (ep->e_error == 0) 1811 g_mirror_update_device(sc, 0); 1812 } 1813 if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) { 1814 KASSERT(ep->e_error == 0, 1815 ("Error cannot be handled.")); 1816 g_mirror_event_free(ep); 1817 } else { 1818 ep->e_flags |= G_MIRROR_EVENT_DONE; 1819 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, 1820 ep); 1821 mtx_lock(&sc->sc_events_mtx); 1822 wakeup(ep); 1823 mtx_unlock(&sc->sc_events_mtx); 1824 } 1825 if ((sc->sc_flags & 1826 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1827 if (g_mirror_try_destroy(sc)) { 1828 curthread->td_pflags &= ~TDP_GEOM; 1829 G_MIRROR_DEBUG(1, "Thread exiting."); 1830 kproc_exit(0); 1831 } 1832 } 1833 G_MIRROR_DEBUG(5, "%s: I'm here 1.", __func__); 1834 continue; 1835 } 1836 /* 1837 * Check if we can mark array as CLEAN and if we can't take 1838 * how much seconds should we wait. 1839 */ 1840 timeout = g_mirror_idle(sc, -1); 1841 /* 1842 * Now I/O requests. 1843 */ 1844 /* Get first request from the queue. */ 1845 mtx_lock(&sc->sc_queue_mtx); 1846 bp = bioq_first(&sc->sc_queue); 1847 if (bp == NULL) { 1848 if ((sc->sc_flags & 1849 G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 1850 mtx_unlock(&sc->sc_queue_mtx); 1851 if (g_mirror_try_destroy(sc)) { 1852 curthread->td_pflags &= ~TDP_GEOM; 1853 G_MIRROR_DEBUG(1, "Thread exiting."); 1854 kproc_exit(0); 1855 } 1856 mtx_lock(&sc->sc_queue_mtx); 1857 } 1858 sx_xunlock(&sc->sc_lock); 1859 /* 1860 * XXX: We can miss an event here, because an event 1861 * can be added without sx-device-lock and without 1862 * mtx-queue-lock. Maybe I should just stop using 1863 * dedicated mutex for events synchronization and 1864 * stick with the queue lock? 1865 * The event will hang here until next I/O request 1866 * or next event is received. 1867 */ 1868 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "m:w1", 1869 timeout * hz); 1870 sx_xlock(&sc->sc_lock); 1871 G_MIRROR_DEBUG(5, "%s: I'm here 4.", __func__); 1872 continue; 1873 } 1874 bioq_remove(&sc->sc_queue, bp); 1875 mtx_unlock(&sc->sc_queue_mtx); 1876 1877 if (bp->bio_from->geom == sc->sc_sync.ds_geom && 1878 (bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) { 1879 g_mirror_sync_request(bp); /* READ */ 1880 } else if (bp->bio_to != sc->sc_provider) { 1881 if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_REGULAR) != 0) 1882 g_mirror_regular_request(bp); 1883 else if ((bp->bio_cflags & G_MIRROR_BIO_FLAG_SYNC) != 0) 1884 g_mirror_sync_request(bp); /* WRITE */ 1885 else { 1886 KASSERT(0, 1887 ("Invalid request cflags=0x%hhx to=%s.", 1888 bp->bio_cflags, bp->bio_to->name)); 1889 } 1890 } else { 1891 g_mirror_register_request(bp); 1892 } 1893 G_MIRROR_DEBUG(5, "%s: I'm here 9.", __func__); 1894 } 1895 } 1896 1897 static void 1898 g_mirror_update_idle(struct g_mirror_softc *sc, struct g_mirror_disk *disk) 1899 { 1900 1901 sx_assert(&sc->sc_lock, SX_LOCKED); 1902 1903 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) != 0) 1904 return; 1905 if (!sc->sc_idle && (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) == 0) { 1906 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as dirty.", 1907 g_mirror_get_diskname(disk), sc->sc_name); 1908 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1909 } else if (sc->sc_idle && 1910 (disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 1911 G_MIRROR_DEBUG(1, "Disk %s (device %s) marked as clean.", 1912 g_mirror_get_diskname(disk), sc->sc_name); 1913 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 1914 } 1915 } 1916 1917 static void 1918 g_mirror_sync_start(struct g_mirror_disk *disk) 1919 { 1920 struct g_mirror_softc *sc; 1921 struct g_consumer *cp; 1922 struct bio *bp; 1923 int error, i; 1924 1925 g_topology_assert_not(); 1926 sc = disk->d_softc; 1927 sx_assert(&sc->sc_lock, SX_LOCKED); 1928 1929 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 1930 ("Disk %s is not marked for synchronization.", 1931 g_mirror_get_diskname(disk))); 1932 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 1933 ("Device not in RUNNING state (%s, %u).", sc->sc_name, 1934 sc->sc_state)); 1935 1936 sx_xunlock(&sc->sc_lock); 1937 g_topology_lock(); 1938 cp = g_new_consumer(sc->sc_sync.ds_geom); 1939 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 1940 error = g_attach(cp, sc->sc_provider); 1941 KASSERT(error == 0, 1942 ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 1943 error = g_access(cp, 1, 0, 0); 1944 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 1945 g_topology_unlock(); 1946 sx_xlock(&sc->sc_lock); 1947 1948 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 1949 g_mirror_get_diskname(disk)); 1950 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOFAILSYNC) == 0) 1951 disk->d_flags |= G_MIRROR_DISK_FLAG_DIRTY; 1952 KASSERT(disk->d_sync.ds_consumer == NULL, 1953 ("Sync consumer already exists (device=%s, disk=%s).", 1954 sc->sc_name, g_mirror_get_diskname(disk))); 1955 1956 disk->d_sync.ds_consumer = cp; 1957 disk->d_sync.ds_consumer->private = disk; 1958 disk->d_sync.ds_consumer->index = 0; 1959 1960 /* 1961 * Allocate memory for synchronization bios and initialize them. 1962 */ 1963 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_mirror_syncreqs, 1964 M_MIRROR, M_WAITOK); 1965 for (i = 0; i < g_mirror_syncreqs; i++) { 1966 bp = g_alloc_bio(); 1967 disk->d_sync.ds_bios[i] = bp; 1968 bp->bio_parent = NULL; 1969 bp->bio_cmd = BIO_READ; 1970 bp->bio_data = malloc(MAXPHYS, M_MIRROR, M_WAITOK); 1971 bp->bio_cflags = 0; 1972 bp->bio_offset = disk->d_sync.ds_offset; 1973 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1974 disk->d_sync.ds_offset += bp->bio_length; 1975 bp->bio_done = g_mirror_sync_done; 1976 bp->bio_from = disk->d_sync.ds_consumer; 1977 bp->bio_to = sc->sc_provider; 1978 bp->bio_caller1 = (void *)(uintptr_t)i; 1979 } 1980 1981 /* Increase the number of disks in SYNCHRONIZING state. */ 1982 sc->sc_sync.ds_ndisks++; 1983 /* Set the number of in-flight synchronization requests. */ 1984 disk->d_sync.ds_inflight = g_mirror_syncreqs; 1985 1986 /* 1987 * Fire off first synchronization requests. 1988 */ 1989 for (i = 0; i < g_mirror_syncreqs; i++) { 1990 bp = disk->d_sync.ds_bios[i]; 1991 G_MIRROR_LOGREQ(3, bp, "Sending synchronization request."); 1992 disk->d_sync.ds_consumer->index++; 1993 /* 1994 * Delay the request if it is colliding with a regular request. 1995 */ 1996 if (g_mirror_regular_collision(sc, bp)) 1997 g_mirror_sync_delay(sc, bp); 1998 else 1999 g_io_request(bp, disk->d_sync.ds_consumer); 2000 } 2001 } 2002 2003 /* 2004 * Stop synchronization process. 2005 * type: 0 - synchronization finished 2006 * 1 - synchronization stopped 2007 */ 2008 static void 2009 g_mirror_sync_stop(struct g_mirror_disk *disk, int type) 2010 { 2011 struct g_mirror_softc *sc; 2012 struct g_consumer *cp; 2013 2014 g_topology_assert_not(); 2015 sc = disk->d_softc; 2016 sx_assert(&sc->sc_lock, SX_LOCKED); 2017 2018 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2019 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2020 g_mirror_disk_state2str(disk->d_state))); 2021 if (disk->d_sync.ds_consumer == NULL) 2022 return; 2023 2024 if (type == 0) { 2025 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s finished.", 2026 sc->sc_name, g_mirror_get_diskname(disk)); 2027 } else /* if (type == 1) */ { 2028 G_MIRROR_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 2029 sc->sc_name, g_mirror_get_diskname(disk)); 2030 } 2031 free(disk->d_sync.ds_bios, M_MIRROR); 2032 disk->d_sync.ds_bios = NULL; 2033 cp = disk->d_sync.ds_consumer; 2034 disk->d_sync.ds_consumer = NULL; 2035 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2036 sc->sc_sync.ds_ndisks--; 2037 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 2038 g_topology_lock(); 2039 g_mirror_kill_consumer(sc, cp); 2040 g_topology_unlock(); 2041 sx_xlock(&sc->sc_lock); 2042 } 2043 2044 static void 2045 g_mirror_launch_provider(struct g_mirror_softc *sc) 2046 { 2047 struct g_mirror_disk *disk; 2048 struct g_provider *pp, *dp; 2049 2050 sx_assert(&sc->sc_lock, SX_LOCKED); 2051 2052 g_topology_lock(); 2053 pp = g_new_providerf(sc->sc_geom, "mirror/%s", sc->sc_name); 2054 pp->flags |= G_PF_DIRECT_RECEIVE; 2055 pp->mediasize = sc->sc_mediasize; 2056 pp->sectorsize = sc->sc_sectorsize; 2057 pp->stripesize = 0; 2058 pp->stripeoffset = 0; 2059 2060 /* Splitting of unmapped BIO's could work but isn't implemented now */ 2061 if (sc->sc_balance != G_MIRROR_BALANCE_SPLIT) 2062 pp->flags |= G_PF_ACCEPT_UNMAPPED; 2063 2064 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2065 if (disk->d_consumer && disk->d_consumer->provider) { 2066 dp = disk->d_consumer->provider; 2067 if (dp->stripesize > pp->stripesize) { 2068 pp->stripesize = dp->stripesize; 2069 pp->stripeoffset = dp->stripeoffset; 2070 } 2071 /* A provider underneath us doesn't support unmapped */ 2072 if ((dp->flags & G_PF_ACCEPT_UNMAPPED) == 0) { 2073 G_MIRROR_DEBUG(0, "Cancelling unmapped " 2074 "because of %s.", dp->name); 2075 pp->flags &= ~G_PF_ACCEPT_UNMAPPED; 2076 } 2077 } 2078 } 2079 sc->sc_provider = pp; 2080 g_error_provider(pp, 0); 2081 g_topology_unlock(); 2082 G_MIRROR_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 2083 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE), sc->sc_ndisks); 2084 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2085 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2086 g_mirror_sync_start(disk); 2087 } 2088 } 2089 2090 static void 2091 g_mirror_destroy_provider(struct g_mirror_softc *sc) 2092 { 2093 struct g_mirror_disk *disk; 2094 struct bio *bp; 2095 2096 g_topology_assert_not(); 2097 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 2098 sc->sc_name)); 2099 2100 g_topology_lock(); 2101 g_error_provider(sc->sc_provider, ENXIO); 2102 mtx_lock(&sc->sc_queue_mtx); 2103 while ((bp = bioq_takefirst(&sc->sc_queue)) != NULL) 2104 g_io_deliver(bp, ENXIO); 2105 mtx_unlock(&sc->sc_queue_mtx); 2106 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name, 2107 sc->sc_provider->name); 2108 sc->sc_provider->flags |= G_PF_WITHER; 2109 g_orphan_provider(sc->sc_provider, ENXIO); 2110 g_topology_unlock(); 2111 sc->sc_provider = NULL; 2112 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2113 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) 2114 g_mirror_sync_stop(disk, 1); 2115 } 2116 } 2117 2118 static void 2119 g_mirror_go(void *arg) 2120 { 2121 struct g_mirror_softc *sc; 2122 2123 sc = arg; 2124 G_MIRROR_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 2125 g_mirror_event_send(sc, 0, 2126 G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE); 2127 } 2128 2129 static u_int 2130 g_mirror_determine_state(struct g_mirror_disk *disk) 2131 { 2132 struct g_mirror_softc *sc; 2133 u_int state; 2134 2135 sc = disk->d_softc; 2136 if (sc->sc_syncid == disk->d_sync.ds_syncid) { 2137 if ((disk->d_flags & 2138 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2139 /* Disk does not need synchronization. */ 2140 state = G_MIRROR_DISK_STATE_ACTIVE; 2141 } else { 2142 if ((sc->sc_flags & 2143 G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2144 (disk->d_flags & 2145 G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2146 /* 2147 * We can start synchronization from 2148 * the stored offset. 2149 */ 2150 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2151 } else { 2152 state = G_MIRROR_DISK_STATE_STALE; 2153 } 2154 } 2155 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 2156 /* 2157 * Reset all synchronization data for this disk, 2158 * because if it even was synchronized, it was 2159 * synchronized to disks with different syncid. 2160 */ 2161 disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2162 disk->d_sync.ds_offset = 0; 2163 disk->d_sync.ds_offset_done = 0; 2164 disk->d_sync.ds_syncid = sc->sc_syncid; 2165 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2166 (disk->d_flags & G_MIRROR_DISK_FLAG_FORCE_SYNC) != 0) { 2167 state = G_MIRROR_DISK_STATE_SYNCHRONIZING; 2168 } else { 2169 state = G_MIRROR_DISK_STATE_STALE; 2170 } 2171 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 2172 /* 2173 * Not good, NOT GOOD! 2174 * It means that mirror was started on stale disks 2175 * and more fresh disk just arrive. 2176 * If there were writes, mirror is broken, sorry. 2177 * I think the best choice here is don't touch 2178 * this disk and inform the user loudly. 2179 */ 2180 G_MIRROR_DEBUG(0, "Device %s was started before the freshest " 2181 "disk (%s) arrives!! It will not be connected to the " 2182 "running device.", sc->sc_name, 2183 g_mirror_get_diskname(disk)); 2184 g_mirror_destroy_disk(disk); 2185 state = G_MIRROR_DISK_STATE_NONE; 2186 /* Return immediately, because disk was destroyed. */ 2187 return (state); 2188 } 2189 G_MIRROR_DEBUG(3, "State for %s disk: %s.", 2190 g_mirror_get_diskname(disk), g_mirror_disk_state2str(state)); 2191 return (state); 2192 } 2193 2194 /* 2195 * Update device state. 2196 */ 2197 static void 2198 g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force) 2199 { 2200 struct g_mirror_disk *disk; 2201 u_int state; 2202 2203 sx_assert(&sc->sc_lock, SX_XLOCKED); 2204 2205 switch (sc->sc_state) { 2206 case G_MIRROR_DEVICE_STATE_STARTING: 2207 { 2208 struct g_mirror_disk *pdisk, *tdisk; 2209 u_int dirty, ndisks, genid, syncid; 2210 2211 KASSERT(sc->sc_provider == NULL, 2212 ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 2213 /* 2214 * Are we ready? We are, if all disks are connected or 2215 * if we have any disks and 'force' is true. 2216 */ 2217 ndisks = g_mirror_ndisks(sc, -1); 2218 if (sc->sc_ndisks == ndisks || (force && ndisks > 0)) { 2219 ; 2220 } else if (ndisks == 0) { 2221 /* 2222 * Disks went down in starting phase, so destroy 2223 * device. 2224 */ 2225 callout_drain(&sc->sc_callout); 2226 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2227 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2228 sc->sc_rootmount); 2229 root_mount_rel(sc->sc_rootmount); 2230 sc->sc_rootmount = NULL; 2231 return; 2232 } else { 2233 return; 2234 } 2235 2236 /* 2237 * Activate all disks with the biggest syncid. 2238 */ 2239 if (force) { 2240 /* 2241 * If 'force' is true, we have been called due to 2242 * timeout, so don't bother canceling timeout. 2243 */ 2244 ndisks = 0; 2245 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2246 if ((disk->d_flags & 2247 G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { 2248 ndisks++; 2249 } 2250 } 2251 if (ndisks == 0) { 2252 /* No valid disks found, destroy device. */ 2253 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2254 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2255 __LINE__, sc->sc_rootmount); 2256 root_mount_rel(sc->sc_rootmount); 2257 sc->sc_rootmount = NULL; 2258 return; 2259 } 2260 } else { 2261 /* Cancel timeout. */ 2262 callout_drain(&sc->sc_callout); 2263 } 2264 2265 /* 2266 * Find the biggest genid. 2267 */ 2268 genid = 0; 2269 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2270 if (disk->d_genid > genid) 2271 genid = disk->d_genid; 2272 } 2273 sc->sc_genid = genid; 2274 /* 2275 * Remove all disks without the biggest genid. 2276 */ 2277 LIST_FOREACH_SAFE(disk, &sc->sc_disks, d_next, tdisk) { 2278 if (disk->d_genid < genid) { 2279 G_MIRROR_DEBUG(0, 2280 "Component %s (device %s) broken, skipping.", 2281 g_mirror_get_diskname(disk), sc->sc_name); 2282 g_mirror_destroy_disk(disk); 2283 } 2284 } 2285 2286 /* 2287 * Find the biggest syncid. 2288 */ 2289 syncid = 0; 2290 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2291 if (disk->d_sync.ds_syncid > syncid) 2292 syncid = disk->d_sync.ds_syncid; 2293 } 2294 2295 /* 2296 * Here we need to look for dirty disks and if all disks 2297 * with the biggest syncid are dirty, we have to choose 2298 * one with the biggest priority and rebuild the rest. 2299 */ 2300 /* 2301 * Find the number of dirty disks with the biggest syncid. 2302 * Find the number of disks with the biggest syncid. 2303 * While here, find a disk with the biggest priority. 2304 */ 2305 dirty = ndisks = 0; 2306 pdisk = NULL; 2307 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2308 if (disk->d_sync.ds_syncid != syncid) 2309 continue; 2310 if ((disk->d_flags & 2311 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2312 continue; 2313 } 2314 ndisks++; 2315 if ((disk->d_flags & G_MIRROR_DISK_FLAG_DIRTY) != 0) { 2316 dirty++; 2317 if (pdisk == NULL || 2318 pdisk->d_priority < disk->d_priority) { 2319 pdisk = disk; 2320 } 2321 } 2322 } 2323 if (dirty == 0) { 2324 /* No dirty disks at all, great. */ 2325 } else if (dirty == ndisks) { 2326 /* 2327 * Force synchronization for all dirty disks except one 2328 * with the biggest priority. 2329 */ 2330 KASSERT(pdisk != NULL, ("pdisk == NULL")); 2331 G_MIRROR_DEBUG(1, "Using disk %s (device %s) as a " 2332 "master disk for synchronization.", 2333 g_mirror_get_diskname(pdisk), sc->sc_name); 2334 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2335 if (disk->d_sync.ds_syncid != syncid) 2336 continue; 2337 if ((disk->d_flags & 2338 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2339 continue; 2340 } 2341 KASSERT((disk->d_flags & 2342 G_MIRROR_DISK_FLAG_DIRTY) != 0, 2343 ("Disk %s isn't marked as dirty.", 2344 g_mirror_get_diskname(disk))); 2345 /* Skip the disk with the biggest priority. */ 2346 if (disk == pdisk) 2347 continue; 2348 disk->d_sync.ds_syncid = 0; 2349 } 2350 } else if (dirty < ndisks) { 2351 /* 2352 * Force synchronization for all dirty disks. 2353 * We have some non-dirty disks. 2354 */ 2355 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2356 if (disk->d_sync.ds_syncid != syncid) 2357 continue; 2358 if ((disk->d_flags & 2359 G_MIRROR_DISK_FLAG_SYNCHRONIZING) != 0) { 2360 continue; 2361 } 2362 if ((disk->d_flags & 2363 G_MIRROR_DISK_FLAG_DIRTY) == 0) { 2364 continue; 2365 } 2366 disk->d_sync.ds_syncid = 0; 2367 } 2368 } 2369 2370 /* Reset hint. */ 2371 sc->sc_hint = NULL; 2372 sc->sc_syncid = syncid; 2373 if (force) { 2374 /* Remember to bump syncid on first write. */ 2375 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2376 } 2377 state = G_MIRROR_DEVICE_STATE_RUNNING; 2378 G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.", 2379 sc->sc_name, g_mirror_device_state2str(sc->sc_state), 2380 g_mirror_device_state2str(state)); 2381 sc->sc_state = state; 2382 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2383 state = g_mirror_determine_state(disk); 2384 g_mirror_event_send(disk, state, 2385 G_MIRROR_EVENT_DONTWAIT); 2386 if (state == G_MIRROR_DISK_STATE_STALE) 2387 sc->sc_bump_id |= G_MIRROR_BUMP_SYNCID; 2388 } 2389 break; 2390 } 2391 case G_MIRROR_DEVICE_STATE_RUNNING: 2392 if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) == 0 && 2393 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2394 /* 2395 * No active disks or no disks at all, 2396 * so destroy device. 2397 */ 2398 if (sc->sc_provider != NULL) 2399 g_mirror_destroy_provider(sc); 2400 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 2401 break; 2402 } else if (g_mirror_ndisks(sc, 2403 G_MIRROR_DISK_STATE_ACTIVE) > 0 && 2404 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_NEW) == 0) { 2405 /* 2406 * We have active disks, launch provider if it doesn't 2407 * exist. 2408 */ 2409 if (sc->sc_provider == NULL) 2410 g_mirror_launch_provider(sc); 2411 if (sc->sc_rootmount != NULL) { 2412 G_MIRROR_DEBUG(1, "root_mount_rel[%u] %p", 2413 __LINE__, sc->sc_rootmount); 2414 root_mount_rel(sc->sc_rootmount); 2415 sc->sc_rootmount = NULL; 2416 } 2417 } 2418 /* 2419 * Genid should be bumped immediately, so do it here. 2420 */ 2421 if ((sc->sc_bump_id & G_MIRROR_BUMP_GENID) != 0) { 2422 sc->sc_bump_id &= ~G_MIRROR_BUMP_GENID; 2423 g_mirror_bump_genid(sc); 2424 } 2425 break; 2426 default: 2427 KASSERT(1 == 0, ("Wrong device state (%s, %s).", 2428 sc->sc_name, g_mirror_device_state2str(sc->sc_state))); 2429 break; 2430 } 2431 } 2432 2433 /* 2434 * Update disk state and device state if needed. 2435 */ 2436 #define DISK_STATE_CHANGED() G_MIRROR_DEBUG(1, \ 2437 "Disk %s state changed from %s to %s (device %s).", \ 2438 g_mirror_get_diskname(disk), \ 2439 g_mirror_disk_state2str(disk->d_state), \ 2440 g_mirror_disk_state2str(state), sc->sc_name) 2441 static int 2442 g_mirror_update_disk(struct g_mirror_disk *disk, u_int state) 2443 { 2444 struct g_mirror_softc *sc; 2445 2446 sc = disk->d_softc; 2447 sx_assert(&sc->sc_lock, SX_XLOCKED); 2448 2449 again: 2450 G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.", 2451 g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), 2452 g_mirror_disk_state2str(state)); 2453 switch (state) { 2454 case G_MIRROR_DISK_STATE_NEW: 2455 /* 2456 * Possible scenarios: 2457 * 1. New disk arrive. 2458 */ 2459 /* Previous state should be NONE. */ 2460 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE, 2461 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2462 g_mirror_disk_state2str(disk->d_state))); 2463 DISK_STATE_CHANGED(); 2464 2465 disk->d_state = state; 2466 if (LIST_EMPTY(&sc->sc_disks)) 2467 LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next); 2468 else { 2469 struct g_mirror_disk *dp; 2470 2471 LIST_FOREACH(dp, &sc->sc_disks, d_next) { 2472 if (disk->d_priority >= dp->d_priority) { 2473 LIST_INSERT_BEFORE(dp, disk, d_next); 2474 dp = NULL; 2475 break; 2476 } 2477 if (LIST_NEXT(dp, d_next) == NULL) 2478 break; 2479 } 2480 if (dp != NULL) 2481 LIST_INSERT_AFTER(dp, disk, d_next); 2482 } 2483 G_MIRROR_DEBUG(1, "Device %s: provider %s detected.", 2484 sc->sc_name, g_mirror_get_diskname(disk)); 2485 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 2486 break; 2487 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2488 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2489 g_mirror_device_state2str(sc->sc_state), 2490 g_mirror_get_diskname(disk), 2491 g_mirror_disk_state2str(disk->d_state))); 2492 state = g_mirror_determine_state(disk); 2493 if (state != G_MIRROR_DISK_STATE_NONE) 2494 goto again; 2495 break; 2496 case G_MIRROR_DISK_STATE_ACTIVE: 2497 /* 2498 * Possible scenarios: 2499 * 1. New disk does not need synchronization. 2500 * 2. Synchronization process finished successfully. 2501 */ 2502 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2503 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2504 g_mirror_device_state2str(sc->sc_state), 2505 g_mirror_get_diskname(disk), 2506 g_mirror_disk_state2str(disk->d_state))); 2507 /* Previous state should be NEW or SYNCHRONIZING. */ 2508 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW || 2509 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2510 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2511 g_mirror_disk_state2str(disk->d_state))); 2512 DISK_STATE_CHANGED(); 2513 2514 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2515 disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING; 2516 disk->d_flags &= ~G_MIRROR_DISK_FLAG_FORCE_SYNC; 2517 g_mirror_sync_stop(disk, 0); 2518 } 2519 disk->d_state = state; 2520 disk->d_sync.ds_offset = 0; 2521 disk->d_sync.ds_offset_done = 0; 2522 g_mirror_update_idle(sc, disk); 2523 g_mirror_update_metadata(disk); 2524 G_MIRROR_DEBUG(1, "Device %s: provider %s activated.", 2525 sc->sc_name, g_mirror_get_diskname(disk)); 2526 break; 2527 case G_MIRROR_DISK_STATE_STALE: 2528 /* 2529 * Possible scenarios: 2530 * 1. Stale disk was connected. 2531 */ 2532 /* Previous state should be NEW. */ 2533 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2534 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2535 g_mirror_disk_state2str(disk->d_state))); 2536 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2537 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2538 g_mirror_device_state2str(sc->sc_state), 2539 g_mirror_get_diskname(disk), 2540 g_mirror_disk_state2str(disk->d_state))); 2541 /* 2542 * STALE state is only possible if device is marked 2543 * NOAUTOSYNC. 2544 */ 2545 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_NOAUTOSYNC) != 0, 2546 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2547 g_mirror_device_state2str(sc->sc_state), 2548 g_mirror_get_diskname(disk), 2549 g_mirror_disk_state2str(disk->d_state))); 2550 DISK_STATE_CHANGED(); 2551 2552 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2553 disk->d_state = state; 2554 g_mirror_update_metadata(disk); 2555 G_MIRROR_DEBUG(0, "Device %s: provider %s is stale.", 2556 sc->sc_name, g_mirror_get_diskname(disk)); 2557 break; 2558 case G_MIRROR_DISK_STATE_SYNCHRONIZING: 2559 /* 2560 * Possible scenarios: 2561 * 1. Disk which needs synchronization was connected. 2562 */ 2563 /* Previous state should be NEW. */ 2564 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2565 ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), 2566 g_mirror_disk_state2str(disk->d_state))); 2567 KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, 2568 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2569 g_mirror_device_state2str(sc->sc_state), 2570 g_mirror_get_diskname(disk), 2571 g_mirror_disk_state2str(disk->d_state))); 2572 DISK_STATE_CHANGED(); 2573 2574 if (disk->d_state == G_MIRROR_DISK_STATE_NEW) 2575 disk->d_flags &= ~G_MIRROR_DISK_FLAG_DIRTY; 2576 disk->d_state = state; 2577 if (sc->sc_provider != NULL) { 2578 g_mirror_sync_start(disk); 2579 g_mirror_update_metadata(disk); 2580 } 2581 break; 2582 case G_MIRROR_DISK_STATE_DISCONNECTED: 2583 /* 2584 * Possible scenarios: 2585 * 1. Device wasn't running yet, but disk disappear. 2586 * 2. Disk was active and disapppear. 2587 * 3. Disk disappear during synchronization process. 2588 */ 2589 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) { 2590 /* 2591 * Previous state should be ACTIVE, STALE or 2592 * SYNCHRONIZING. 2593 */ 2594 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || 2595 disk->d_state == G_MIRROR_DISK_STATE_STALE || 2596 disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, 2597 ("Wrong disk state (%s, %s).", 2598 g_mirror_get_diskname(disk), 2599 g_mirror_disk_state2str(disk->d_state))); 2600 } else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) { 2601 /* Previous state should be NEW. */ 2602 KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, 2603 ("Wrong disk state (%s, %s).", 2604 g_mirror_get_diskname(disk), 2605 g_mirror_disk_state2str(disk->d_state))); 2606 /* 2607 * Reset bumping syncid if disk disappeared in STARTING 2608 * state. 2609 */ 2610 if ((sc->sc_bump_id & G_MIRROR_BUMP_SYNCID) != 0) 2611 sc->sc_bump_id &= ~G_MIRROR_BUMP_SYNCID; 2612 #ifdef INVARIANTS 2613 } else { 2614 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 2615 sc->sc_name, 2616 g_mirror_device_state2str(sc->sc_state), 2617 g_mirror_get_diskname(disk), 2618 g_mirror_disk_state2str(disk->d_state))); 2619 #endif 2620 } 2621 DISK_STATE_CHANGED(); 2622 G_MIRROR_DEBUG(0, "Device %s: provider %s disconnected.", 2623 sc->sc_name, g_mirror_get_diskname(disk)); 2624 2625 g_mirror_destroy_disk(disk); 2626 break; 2627 case G_MIRROR_DISK_STATE_DESTROY: 2628 { 2629 int error; 2630 2631 error = g_mirror_clear_metadata(disk); 2632 if (error != 0) 2633 return (error); 2634 DISK_STATE_CHANGED(); 2635 G_MIRROR_DEBUG(0, "Device %s: provider %s destroyed.", 2636 sc->sc_name, g_mirror_get_diskname(disk)); 2637 2638 g_mirror_destroy_disk(disk); 2639 sc->sc_ndisks--; 2640 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2641 g_mirror_update_metadata(disk); 2642 } 2643 break; 2644 } 2645 default: 2646 KASSERT(1 == 0, ("Unknown state (%u).", state)); 2647 break; 2648 } 2649 return (0); 2650 } 2651 #undef DISK_STATE_CHANGED 2652 2653 int 2654 g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md) 2655 { 2656 struct g_provider *pp; 2657 u_char *buf; 2658 int error; 2659 2660 g_topology_assert(); 2661 2662 error = g_access(cp, 1, 0, 0); 2663 if (error != 0) 2664 return (error); 2665 pp = cp->provider; 2666 g_topology_unlock(); 2667 /* Metadata are stored on last sector. */ 2668 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 2669 &error); 2670 g_topology_lock(); 2671 g_access(cp, -1, 0, 0); 2672 if (buf == NULL) { 2673 G_MIRROR_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2674 cp->provider->name, error); 2675 return (error); 2676 } 2677 2678 /* Decode metadata. */ 2679 error = mirror_metadata_decode(buf, md); 2680 g_free(buf); 2681 if (strcmp(md->md_magic, G_MIRROR_MAGIC) != 0) 2682 return (EINVAL); 2683 if (md->md_version > G_MIRROR_VERSION) { 2684 G_MIRROR_DEBUG(0, 2685 "Kernel module is too old to handle metadata from %s.", 2686 cp->provider->name); 2687 return (EINVAL); 2688 } 2689 if (error != 0) { 2690 G_MIRROR_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 2691 cp->provider->name); 2692 return (error); 2693 } 2694 2695 return (0); 2696 } 2697 2698 static int 2699 g_mirror_check_metadata(struct g_mirror_softc *sc, struct g_provider *pp, 2700 struct g_mirror_metadata *md) 2701 { 2702 2703 if (g_mirror_id2disk(sc, md->md_did) != NULL) { 2704 G_MIRROR_DEBUG(1, "Disk %s (id=%u) already exists, skipping.", 2705 pp->name, md->md_did); 2706 return (EEXIST); 2707 } 2708 if (md->md_all != sc->sc_ndisks) { 2709 G_MIRROR_DEBUG(1, 2710 "Invalid '%s' field on disk %s (device %s), skipping.", 2711 "md_all", pp->name, sc->sc_name); 2712 return (EINVAL); 2713 } 2714 if (md->md_slice != sc->sc_slice) { 2715 G_MIRROR_DEBUG(1, 2716 "Invalid '%s' field on disk %s (device %s), skipping.", 2717 "md_slice", pp->name, sc->sc_name); 2718 return (EINVAL); 2719 } 2720 if (md->md_balance != sc->sc_balance) { 2721 G_MIRROR_DEBUG(1, 2722 "Invalid '%s' field on disk %s (device %s), skipping.", 2723 "md_balance", pp->name, sc->sc_name); 2724 return (EINVAL); 2725 } 2726 #if 0 2727 if (md->md_mediasize != sc->sc_mediasize) { 2728 G_MIRROR_DEBUG(1, 2729 "Invalid '%s' field on disk %s (device %s), skipping.", 2730 "md_mediasize", pp->name, sc->sc_name); 2731 return (EINVAL); 2732 } 2733 #endif 2734 if (sc->sc_mediasize > pp->mediasize) { 2735 G_MIRROR_DEBUG(1, 2736 "Invalid size of disk %s (device %s), skipping.", pp->name, 2737 sc->sc_name); 2738 return (EINVAL); 2739 } 2740 if (md->md_sectorsize != sc->sc_sectorsize) { 2741 G_MIRROR_DEBUG(1, 2742 "Invalid '%s' field on disk %s (device %s), skipping.", 2743 "md_sectorsize", pp->name, sc->sc_name); 2744 return (EINVAL); 2745 } 2746 if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 2747 G_MIRROR_DEBUG(1, 2748 "Invalid sector size of disk %s (device %s), skipping.", 2749 pp->name, sc->sc_name); 2750 return (EINVAL); 2751 } 2752 if ((md->md_mflags & ~G_MIRROR_DEVICE_FLAG_MASK) != 0) { 2753 G_MIRROR_DEBUG(1, 2754 "Invalid device flags on disk %s (device %s), skipping.", 2755 pp->name, sc->sc_name); 2756 return (EINVAL); 2757 } 2758 if ((md->md_dflags & ~G_MIRROR_DISK_FLAG_MASK) != 0) { 2759 G_MIRROR_DEBUG(1, 2760 "Invalid disk flags on disk %s (device %s), skipping.", 2761 pp->name, sc->sc_name); 2762 return (EINVAL); 2763 } 2764 return (0); 2765 } 2766 2767 int 2768 g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, 2769 struct g_mirror_metadata *md) 2770 { 2771 struct g_mirror_disk *disk; 2772 int error; 2773 2774 g_topology_assert_not(); 2775 G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name); 2776 2777 error = g_mirror_check_metadata(sc, pp, md); 2778 if (error != 0) 2779 return (error); 2780 if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING && 2781 md->md_genid < sc->sc_genid) { 2782 G_MIRROR_DEBUG(0, "Component %s (device %s) broken, skipping.", 2783 pp->name, sc->sc_name); 2784 return (EINVAL); 2785 } 2786 disk = g_mirror_init_disk(sc, pp, md, &error); 2787 if (disk == NULL) 2788 return (error); 2789 error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW, 2790 G_MIRROR_EVENT_WAIT); 2791 if (error != 0) 2792 return (error); 2793 if (md->md_version < G_MIRROR_VERSION) { 2794 G_MIRROR_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 2795 pp->name, md->md_version, G_MIRROR_VERSION); 2796 g_mirror_update_metadata(disk); 2797 } 2798 return (0); 2799 } 2800 2801 static void 2802 g_mirror_destroy_delayed(void *arg, int flag) 2803 { 2804 struct g_mirror_softc *sc; 2805 int error; 2806 2807 if (flag == EV_CANCEL) { 2808 G_MIRROR_DEBUG(1, "Destroying canceled."); 2809 return; 2810 } 2811 sc = arg; 2812 g_topology_unlock(); 2813 sx_xlock(&sc->sc_lock); 2814 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) == 0, 2815 ("DESTROY flag set on %s.", sc->sc_name)); 2816 KASSERT((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0, 2817 ("DESTROYING flag not set on %s.", sc->sc_name)); 2818 G_MIRROR_DEBUG(1, "Destroying %s (delayed).", sc->sc_name); 2819 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_SOFT); 2820 if (error != 0) { 2821 G_MIRROR_DEBUG(0, "Cannot destroy %s (error=%d).", 2822 sc->sc_name, error); 2823 sx_xunlock(&sc->sc_lock); 2824 } 2825 g_topology_lock(); 2826 } 2827 2828 static int 2829 g_mirror_access(struct g_provider *pp, int acr, int acw, int ace) 2830 { 2831 struct g_mirror_softc *sc; 2832 int dcr, dcw, dce, error = 0; 2833 2834 g_topology_assert(); 2835 G_MIRROR_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 2836 acw, ace); 2837 2838 sc = pp->geom->softc; 2839 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0) 2840 return (0); 2841 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 2842 2843 dcr = pp->acr + acr; 2844 dcw = pp->acw + acw; 2845 dce = pp->ace + ace; 2846 2847 g_topology_unlock(); 2848 sx_xlock(&sc->sc_lock); 2849 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0 || 2850 LIST_EMPTY(&sc->sc_disks)) { 2851 if (acr > 0 || acw > 0 || ace > 0) 2852 error = ENXIO; 2853 goto end; 2854 } 2855 if (dcw == 0) 2856 g_mirror_idle(sc, dcw); 2857 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROYING) != 0) { 2858 if (acr > 0 || acw > 0 || ace > 0) { 2859 error = ENXIO; 2860 goto end; 2861 } 2862 if (dcr == 0 && dcw == 0 && dce == 0) { 2863 g_post_event(g_mirror_destroy_delayed, sc, M_WAITOK, 2864 sc, NULL); 2865 } 2866 } 2867 end: 2868 sx_xunlock(&sc->sc_lock); 2869 g_topology_lock(); 2870 return (error); 2871 } 2872 2873 static struct g_geom * 2874 g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md) 2875 { 2876 struct g_mirror_softc *sc; 2877 struct g_geom *gp; 2878 int error, timeout; 2879 2880 g_topology_assert(); 2881 G_MIRROR_DEBUG(1, "Creating device %s (id=%u).", md->md_name, 2882 md->md_mid); 2883 2884 /* One disk is minimum. */ 2885 if (md->md_all < 1) 2886 return (NULL); 2887 /* 2888 * Action geom. 2889 */ 2890 gp = g_new_geomf(mp, "%s", md->md_name); 2891 sc = malloc(sizeof(*sc), M_MIRROR, M_WAITOK | M_ZERO); 2892 gp->start = g_mirror_start; 2893 gp->orphan = g_mirror_orphan; 2894 gp->access = g_mirror_access; 2895 gp->dumpconf = g_mirror_dumpconf; 2896 2897 sc->sc_id = md->md_mid; 2898 sc->sc_slice = md->md_slice; 2899 sc->sc_balance = md->md_balance; 2900 sc->sc_mediasize = md->md_mediasize; 2901 sc->sc_sectorsize = md->md_sectorsize; 2902 sc->sc_ndisks = md->md_all; 2903 sc->sc_flags = md->md_mflags; 2904 sc->sc_bump_id = 0; 2905 sc->sc_idle = 1; 2906 sc->sc_last_write = time_uptime; 2907 sc->sc_writes = 0; 2908 sx_init(&sc->sc_lock, "gmirror:lock"); 2909 bioq_init(&sc->sc_queue); 2910 mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF); 2911 bioq_init(&sc->sc_regular_delayed); 2912 bioq_init(&sc->sc_inflight); 2913 bioq_init(&sc->sc_sync_delayed); 2914 LIST_INIT(&sc->sc_disks); 2915 TAILQ_INIT(&sc->sc_events); 2916 mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF); 2917 callout_init(&sc->sc_callout, CALLOUT_MPSAFE); 2918 mtx_init(&sc->sc_done_mtx, "gmirror:done", NULL, MTX_DEF); 2919 sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING; 2920 gp->softc = sc; 2921 sc->sc_geom = gp; 2922 sc->sc_provider = NULL; 2923 /* 2924 * Synchronization geom. 2925 */ 2926 gp = g_new_geomf(mp, "%s.sync", md->md_name); 2927 gp->softc = sc; 2928 gp->orphan = g_mirror_orphan; 2929 sc->sc_sync.ds_geom = gp; 2930 sc->sc_sync.ds_ndisks = 0; 2931 error = kproc_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0, 2932 "g_mirror %s", md->md_name); 2933 if (error != 0) { 2934 G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.", 2935 sc->sc_name); 2936 g_destroy_geom(sc->sc_sync.ds_geom); 2937 mtx_destroy(&sc->sc_done_mtx); 2938 mtx_destroy(&sc->sc_events_mtx); 2939 mtx_destroy(&sc->sc_queue_mtx); 2940 sx_destroy(&sc->sc_lock); 2941 g_destroy_geom(sc->sc_geom); 2942 free(sc, M_MIRROR); 2943 return (NULL); 2944 } 2945 2946 G_MIRROR_DEBUG(1, "Device %s created (%u components, id=%u).", 2947 sc->sc_name, sc->sc_ndisks, sc->sc_id); 2948 2949 sc->sc_rootmount = root_mount_hold("GMIRROR"); 2950 G_MIRROR_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 2951 /* 2952 * Run timeout. 2953 */ 2954 timeout = g_mirror_timeout * hz; 2955 callout_reset(&sc->sc_callout, timeout, g_mirror_go, sc); 2956 return (sc->sc_geom); 2957 } 2958 2959 int 2960 g_mirror_destroy(struct g_mirror_softc *sc, int how) 2961 { 2962 struct g_mirror_disk *disk; 2963 struct g_provider *pp; 2964 2965 g_topology_assert_not(); 2966 if (sc == NULL) 2967 return (ENXIO); 2968 sx_assert(&sc->sc_lock, SX_XLOCKED); 2969 2970 pp = sc->sc_provider; 2971 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 2972 switch (how) { 2973 case G_MIRROR_DESTROY_SOFT: 2974 G_MIRROR_DEBUG(1, 2975 "Device %s is still open (r%dw%de%d).", pp->name, 2976 pp->acr, pp->acw, pp->ace); 2977 return (EBUSY); 2978 case G_MIRROR_DESTROY_DELAYED: 2979 G_MIRROR_DEBUG(1, 2980 "Device %s will be destroyed on last close.", 2981 pp->name); 2982 LIST_FOREACH(disk, &sc->sc_disks, d_next) { 2983 if (disk->d_state == 2984 G_MIRROR_DISK_STATE_SYNCHRONIZING) { 2985 g_mirror_sync_stop(disk, 1); 2986 } 2987 } 2988 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROYING; 2989 return (EBUSY); 2990 case G_MIRROR_DESTROY_HARD: 2991 G_MIRROR_DEBUG(1, "Device %s is still open, so it " 2992 "can't be definitely removed.", pp->name); 2993 } 2994 } 2995 2996 g_topology_lock(); 2997 if (sc->sc_geom->softc == NULL) { 2998 g_topology_unlock(); 2999 return (0); 3000 } 3001 sc->sc_geom->softc = NULL; 3002 sc->sc_sync.ds_geom->softc = NULL; 3003 g_topology_unlock(); 3004 3005 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; 3006 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT; 3007 G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); 3008 sx_xunlock(&sc->sc_lock); 3009 mtx_lock(&sc->sc_queue_mtx); 3010 wakeup(sc); 3011 mtx_unlock(&sc->sc_queue_mtx); 3012 G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 3013 while (sc->sc_worker != NULL) 3014 tsleep(&sc->sc_worker, PRIBIO, "m:destroy", hz / 5); 3015 G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 3016 sx_xlock(&sc->sc_lock); 3017 g_mirror_destroy_device(sc); 3018 free(sc, M_MIRROR); 3019 return (0); 3020 } 3021 3022 static void 3023 g_mirror_taste_orphan(struct g_consumer *cp) 3024 { 3025 3026 KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 3027 cp->provider->name)); 3028 } 3029 3030 static struct g_geom * 3031 g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 3032 { 3033 struct g_mirror_metadata md; 3034 struct g_mirror_softc *sc; 3035 struct g_consumer *cp; 3036 struct g_geom *gp; 3037 int error; 3038 3039 g_topology_assert(); 3040 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 3041 G_MIRROR_DEBUG(2, "Tasting %s.", pp->name); 3042 3043 gp = g_new_geomf(mp, "mirror:taste"); 3044 /* 3045 * This orphan function should be never called. 3046 */ 3047 gp->orphan = g_mirror_taste_orphan; 3048 cp = g_new_consumer(gp); 3049 g_attach(cp, pp); 3050 error = g_mirror_read_metadata(cp, &md); 3051 g_detach(cp); 3052 g_destroy_consumer(cp); 3053 g_destroy_geom(gp); 3054 if (error != 0) 3055 return (NULL); 3056 gp = NULL; 3057 3058 if (md.md_provider[0] != '\0' && 3059 !g_compare_names(md.md_provider, pp->name)) 3060 return (NULL); 3061 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3062 return (NULL); 3063 if ((md.md_dflags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) { 3064 G_MIRROR_DEBUG(0, 3065 "Device %s: provider %s marked as inactive, skipping.", 3066 md.md_name, pp->name); 3067 return (NULL); 3068 } 3069 if (g_mirror_debug >= 2) 3070 mirror_metadata_dump(&md); 3071 3072 /* 3073 * Let's check if device already exists. 3074 */ 3075 sc = NULL; 3076 LIST_FOREACH(gp, &mp->geom, geom) { 3077 sc = gp->softc; 3078 if (sc == NULL) 3079 continue; 3080 if (sc->sc_sync.ds_geom == gp) 3081 continue; 3082 if (strcmp(md.md_name, sc->sc_name) != 0) 3083 continue; 3084 if (md.md_mid != sc->sc_id) { 3085 G_MIRROR_DEBUG(0, "Device %s already configured.", 3086 sc->sc_name); 3087 return (NULL); 3088 } 3089 break; 3090 } 3091 if (gp == NULL) { 3092 gp = g_mirror_create(mp, &md); 3093 if (gp == NULL) { 3094 G_MIRROR_DEBUG(0, "Cannot create device %s.", 3095 md.md_name); 3096 return (NULL); 3097 } 3098 sc = gp->softc; 3099 } 3100 G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 3101 g_topology_unlock(); 3102 sx_xlock(&sc->sc_lock); 3103 sc->sc_flags |= G_MIRROR_DEVICE_FLAG_TASTING; 3104 error = g_mirror_add_disk(sc, pp, &md); 3105 if (error != 0) { 3106 G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 3107 pp->name, gp->name, error); 3108 if (LIST_EMPTY(&sc->sc_disks)) { 3109 g_cancel_event(sc); 3110 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3111 g_topology_lock(); 3112 return (NULL); 3113 } 3114 gp = NULL; 3115 } 3116 sc->sc_flags &= ~G_MIRROR_DEVICE_FLAG_TASTING; 3117 if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { 3118 g_mirror_destroy(sc, G_MIRROR_DESTROY_HARD); 3119 g_topology_lock(); 3120 return (NULL); 3121 } 3122 sx_xunlock(&sc->sc_lock); 3123 g_topology_lock(); 3124 return (gp); 3125 } 3126 3127 static void 3128 g_mirror_resize(struct g_consumer *cp) 3129 { 3130 struct g_mirror_disk *disk; 3131 3132 g_topology_assert(); 3133 g_trace(G_T_TOPOLOGY, "%s(%s)", __func__, cp->provider->name); 3134 3135 disk = cp->private; 3136 if (disk == NULL) 3137 return; 3138 g_topology_unlock(); 3139 g_mirror_update_metadata(disk); 3140 g_topology_lock(); 3141 } 3142 3143 static int 3144 g_mirror_destroy_geom(struct gctl_req *req __unused, 3145 struct g_class *mp __unused, struct g_geom *gp) 3146 { 3147 struct g_mirror_softc *sc; 3148 int error; 3149 3150 g_topology_unlock(); 3151 sc = gp->softc; 3152 sx_xlock(&sc->sc_lock); 3153 g_cancel_event(sc); 3154 error = g_mirror_destroy(gp->softc, G_MIRROR_DESTROY_SOFT); 3155 if (error != 0) 3156 sx_xunlock(&sc->sc_lock); 3157 g_topology_lock(); 3158 return (error); 3159 } 3160 3161 static void 3162 g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 3163 struct g_consumer *cp, struct g_provider *pp) 3164 { 3165 struct g_mirror_softc *sc; 3166 3167 g_topology_assert(); 3168 3169 sc = gp->softc; 3170 if (sc == NULL) 3171 return; 3172 /* Skip synchronization geom. */ 3173 if (gp == sc->sc_sync.ds_geom) 3174 return; 3175 if (pp != NULL) { 3176 /* Nothing here. */ 3177 } else if (cp != NULL) { 3178 struct g_mirror_disk *disk; 3179 3180 disk = cp->private; 3181 if (disk == NULL) 3182 return; 3183 g_topology_unlock(); 3184 sx_xlock(&sc->sc_lock); 3185 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)disk->d_id); 3186 if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { 3187 sbuf_printf(sb, "%s<Synchronized>", indent); 3188 if (disk->d_sync.ds_offset == 0) 3189 sbuf_printf(sb, "0%%"); 3190 else { 3191 sbuf_printf(sb, "%u%%", 3192 (u_int)((disk->d_sync.ds_offset * 100) / 3193 sc->sc_provider->mediasize)); 3194 } 3195 sbuf_printf(sb, "</Synchronized>\n"); 3196 if (disk->d_sync.ds_offset > 0) { 3197 sbuf_printf(sb, "%s<BytesSynced>%jd" 3198 "</BytesSynced>\n", indent, 3199 (intmax_t)disk->d_sync.ds_offset); 3200 } 3201 } 3202 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 3203 disk->d_sync.ds_syncid); 3204 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, 3205 disk->d_genid); 3206 sbuf_printf(sb, "%s<Flags>", indent); 3207 if (disk->d_flags == 0) 3208 sbuf_printf(sb, "NONE"); 3209 else { 3210 int first = 1; 3211 3212 #define ADD_FLAG(flag, name) do { \ 3213 if ((disk->d_flags & (flag)) != 0) { \ 3214 if (!first) \ 3215 sbuf_printf(sb, ", "); \ 3216 else \ 3217 first = 0; \ 3218 sbuf_printf(sb, name); \ 3219 } \ 3220 } while (0) 3221 ADD_FLAG(G_MIRROR_DISK_FLAG_DIRTY, "DIRTY"); 3222 ADD_FLAG(G_MIRROR_DISK_FLAG_HARDCODED, "HARDCODED"); 3223 ADD_FLAG(G_MIRROR_DISK_FLAG_INACTIVE, "INACTIVE"); 3224 ADD_FLAG(G_MIRROR_DISK_FLAG_SYNCHRONIZING, 3225 "SYNCHRONIZING"); 3226 ADD_FLAG(G_MIRROR_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 3227 ADD_FLAG(G_MIRROR_DISK_FLAG_BROKEN, "BROKEN"); 3228 #undef ADD_FLAG 3229 } 3230 sbuf_printf(sb, "</Flags>\n"); 3231 sbuf_printf(sb, "%s<Priority>%u</Priority>\n", indent, 3232 disk->d_priority); 3233 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3234 g_mirror_disk_state2str(disk->d_state)); 3235 sx_xunlock(&sc->sc_lock); 3236 g_topology_lock(); 3237 } else { 3238 g_topology_unlock(); 3239 sx_xlock(&sc->sc_lock); 3240 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 3241 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3242 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 3243 sbuf_printf(sb, "%s<Flags>", indent); 3244 if (sc->sc_flags == 0) 3245 sbuf_printf(sb, "NONE"); 3246 else { 3247 int first = 1; 3248 3249 #define ADD_FLAG(flag, name) do { \ 3250 if ((sc->sc_flags & (flag)) != 0) { \ 3251 if (!first) \ 3252 sbuf_printf(sb, ", "); \ 3253 else \ 3254 first = 0; \ 3255 sbuf_printf(sb, name); \ 3256 } \ 3257 } while (0) 3258 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 3259 ADD_FLAG(G_MIRROR_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3260 #undef ADD_FLAG 3261 } 3262 sbuf_printf(sb, "</Flags>\n"); 3263 sbuf_printf(sb, "%s<Slice>%u</Slice>\n", indent, 3264 (u_int)sc->sc_slice); 3265 sbuf_printf(sb, "%s<Balance>%s</Balance>\n", indent, 3266 balance_name(sc->sc_balance)); 3267 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 3268 sc->sc_ndisks); 3269 sbuf_printf(sb, "%s<State>", indent); 3270 if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) 3271 sbuf_printf(sb, "%s", "STARTING"); 3272 else if (sc->sc_ndisks == 3273 g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE)) 3274 sbuf_printf(sb, "%s", "COMPLETE"); 3275 else 3276 sbuf_printf(sb, "%s", "DEGRADED"); 3277 sbuf_printf(sb, "</State>\n"); 3278 sx_xunlock(&sc->sc_lock); 3279 g_topology_lock(); 3280 } 3281 } 3282 3283 static void 3284 g_mirror_shutdown_post_sync(void *arg, int howto) 3285 { 3286 struct g_class *mp; 3287 struct g_geom *gp, *gp2; 3288 struct g_mirror_softc *sc; 3289 int error; 3290 3291 mp = arg; 3292 DROP_GIANT(); 3293 g_topology_lock(); 3294 g_mirror_shutdown = 1; 3295 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 3296 if ((sc = gp->softc) == NULL) 3297 continue; 3298 /* Skip synchronization geom. */ 3299 if (gp == sc->sc_sync.ds_geom) 3300 continue; 3301 g_topology_unlock(); 3302 sx_xlock(&sc->sc_lock); 3303 g_mirror_idle(sc, -1); 3304 g_cancel_event(sc); 3305 error = g_mirror_destroy(sc, G_MIRROR_DESTROY_DELAYED); 3306 if (error != 0) 3307 sx_xunlock(&sc->sc_lock); 3308 g_topology_lock(); 3309 } 3310 g_topology_unlock(); 3311 PICKUP_GIANT(); 3312 } 3313 3314 static void 3315 g_mirror_init(struct g_class *mp) 3316 { 3317 3318 g_mirror_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync, 3319 g_mirror_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST); 3320 if (g_mirror_post_sync == NULL) 3321 G_MIRROR_DEBUG(0, "Warning! Cannot register shutdown event."); 3322 } 3323 3324 static void 3325 g_mirror_fini(struct g_class *mp) 3326 { 3327 3328 if (g_mirror_post_sync != NULL) 3329 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_mirror_post_sync); 3330 } 3331 3332 DECLARE_GEOM_CLASS(g_mirror_class, g_mirror); 3333