1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/limits.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/bio.h> 40 #include <sys/sbuf.h> 41 #include <sys/sysctl.h> 42 #include <sys/malloc.h> 43 #include <sys/eventhandler.h> 44 #include <vm/uma.h> 45 #include <geom/geom.h> 46 #include <geom/geom_dbg.h> 47 #include <sys/proc.h> 48 #include <sys/kthread.h> 49 #include <sys/sched.h> 50 #include <geom/raid3/g_raid3.h> 51 52 FEATURE(geom_raid3, "GEOM RAID-3 functionality"); 53 54 static MALLOC_DEFINE(M_RAID3, "raid3_data", "GEOM_RAID3 Data"); 55 56 SYSCTL_DECL(_kern_geom); 57 static SYSCTL_NODE(_kern_geom, OID_AUTO, raid3, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 58 "GEOM_RAID3 stuff"); 59 u_int g_raid3_debug = 0; 60 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, debug, CTLFLAG_RWTUN, &g_raid3_debug, 0, 61 "Debug level"); 62 static u_int g_raid3_timeout = 4; 63 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, timeout, CTLFLAG_RWTUN, &g_raid3_timeout, 64 0, "Time to wait on all raid3 components"); 65 static u_int g_raid3_idletime = 5; 66 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, idletime, CTLFLAG_RWTUN, 67 &g_raid3_idletime, 0, "Mark components as clean when idling"); 68 static u_int g_raid3_disconnect_on_failure = 1; 69 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, disconnect_on_failure, CTLFLAG_RWTUN, 70 &g_raid3_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 71 static u_int g_raid3_syncreqs = 2; 72 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 73 &g_raid3_syncreqs, 0, "Parallel synchronization I/O requests."); 74 static u_int g_raid3_use_malloc = 0; 75 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, use_malloc, CTLFLAG_RDTUN, 76 &g_raid3_use_malloc, 0, "Use malloc(9) instead of uma(9)."); 77 78 static u_int g_raid3_n64k = 50; 79 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n64k, CTLFLAG_RDTUN, &g_raid3_n64k, 0, 80 "Maximum number of 64kB allocations"); 81 static u_int g_raid3_n16k = 200; 82 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n16k, CTLFLAG_RDTUN, &g_raid3_n16k, 0, 83 "Maximum number of 16kB allocations"); 84 static u_int g_raid3_n4k = 1200; 85 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n4k, CTLFLAG_RDTUN, &g_raid3_n4k, 0, 86 "Maximum number of 4kB allocations"); 87 88 static SYSCTL_NODE(_kern_geom_raid3, OID_AUTO, stat, 89 CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 90 "GEOM_RAID3 statistics"); 91 static u_int g_raid3_parity_mismatch = 0; 92 SYSCTL_UINT(_kern_geom_raid3_stat, OID_AUTO, parity_mismatch, CTLFLAG_RD, 93 &g_raid3_parity_mismatch, 0, "Number of failures in VERIFY mode"); 94 95 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 96 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 97 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 98 G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 99 } while (0) 100 101 static eventhandler_tag g_raid3_post_sync = NULL; 102 static int g_raid3_shutdown = 0; 103 104 static int g_raid3_destroy_geom(struct gctl_req *req, struct g_class *mp, 105 struct g_geom *gp); 106 static g_taste_t g_raid3_taste; 107 static void g_raid3_init(struct g_class *mp); 108 static void g_raid3_fini(struct g_class *mp); 109 110 struct g_class g_raid3_class = { 111 .name = G_RAID3_CLASS_NAME, 112 .version = G_VERSION, 113 .ctlreq = g_raid3_config, 114 .taste = g_raid3_taste, 115 .destroy_geom = g_raid3_destroy_geom, 116 .init = g_raid3_init, 117 .fini = g_raid3_fini 118 }; 119 120 static void g_raid3_destroy_provider(struct g_raid3_softc *sc); 121 static int g_raid3_update_disk(struct g_raid3_disk *disk, u_int state); 122 static void g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force); 123 static void g_raid3_dumpconf(struct sbuf *sb, const char *indent, 124 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 125 static void g_raid3_sync_stop(struct g_raid3_softc *sc, int type); 126 static int g_raid3_register_request(struct bio *pbp); 127 static void g_raid3_sync_release(struct g_raid3_softc *sc); 128 static void g_raid3_timeout_drain(struct g_raid3_softc *sc); 129 130 static const char * 131 g_raid3_disk_state2str(int state) 132 { 133 134 switch (state) { 135 case G_RAID3_DISK_STATE_NODISK: 136 return ("NODISK"); 137 case G_RAID3_DISK_STATE_NONE: 138 return ("NONE"); 139 case G_RAID3_DISK_STATE_NEW: 140 return ("NEW"); 141 case G_RAID3_DISK_STATE_ACTIVE: 142 return ("ACTIVE"); 143 case G_RAID3_DISK_STATE_STALE: 144 return ("STALE"); 145 case G_RAID3_DISK_STATE_SYNCHRONIZING: 146 return ("SYNCHRONIZING"); 147 case G_RAID3_DISK_STATE_DISCONNECTED: 148 return ("DISCONNECTED"); 149 default: 150 return ("INVALID"); 151 } 152 } 153 154 static const char * 155 g_raid3_device_state2str(int state) 156 { 157 158 switch (state) { 159 case G_RAID3_DEVICE_STATE_STARTING: 160 return ("STARTING"); 161 case G_RAID3_DEVICE_STATE_DEGRADED: 162 return ("DEGRADED"); 163 case G_RAID3_DEVICE_STATE_COMPLETE: 164 return ("COMPLETE"); 165 default: 166 return ("INVALID"); 167 } 168 } 169 170 const char * 171 g_raid3_get_diskname(struct g_raid3_disk *disk) 172 { 173 174 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 175 return ("[unknown]"); 176 return (disk->d_name); 177 } 178 179 static void * 180 g_raid3_alloc(struct g_raid3_softc *sc, size_t size, int flags) 181 { 182 void *ptr; 183 enum g_raid3_zones zone; 184 185 if (g_raid3_use_malloc || 186 (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES) 187 ptr = malloc(size, M_RAID3, flags); 188 else { 189 ptr = uma_zalloc_arg(sc->sc_zones[zone].sz_zone, 190 &sc->sc_zones[zone], flags); 191 sc->sc_zones[zone].sz_requested++; 192 if (ptr == NULL) 193 sc->sc_zones[zone].sz_failed++; 194 } 195 return (ptr); 196 } 197 198 static void 199 g_raid3_free(struct g_raid3_softc *sc, void *ptr, size_t size) 200 { 201 enum g_raid3_zones zone; 202 203 if (g_raid3_use_malloc || 204 (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES) 205 free(ptr, M_RAID3); 206 else { 207 uma_zfree_arg(sc->sc_zones[zone].sz_zone, 208 ptr, &sc->sc_zones[zone]); 209 } 210 } 211 212 static int 213 g_raid3_uma_ctor(void *mem, int size, void *arg, int flags) 214 { 215 struct g_raid3_zone *sz = arg; 216 217 if (sz->sz_max > 0 && sz->sz_inuse == sz->sz_max) 218 return (ENOMEM); 219 sz->sz_inuse++; 220 return (0); 221 } 222 223 static void 224 g_raid3_uma_dtor(void *mem, int size, void *arg) 225 { 226 struct g_raid3_zone *sz = arg; 227 228 sz->sz_inuse--; 229 } 230 231 #define g_raid3_xor(src, dst, size) \ 232 _g_raid3_xor((uint64_t *)(src), \ 233 (uint64_t *)(dst), (size_t)size) 234 static void 235 _g_raid3_xor(uint64_t *src, uint64_t *dst, size_t size) 236 { 237 238 KASSERT((size % 128) == 0, ("Invalid size: %zu.", size)); 239 for (; size > 0; size -= 128) { 240 *dst++ ^= (*src++); 241 *dst++ ^= (*src++); 242 *dst++ ^= (*src++); 243 *dst++ ^= (*src++); 244 *dst++ ^= (*src++); 245 *dst++ ^= (*src++); 246 *dst++ ^= (*src++); 247 *dst++ ^= (*src++); 248 *dst++ ^= (*src++); 249 *dst++ ^= (*src++); 250 *dst++ ^= (*src++); 251 *dst++ ^= (*src++); 252 *dst++ ^= (*src++); 253 *dst++ ^= (*src++); 254 *dst++ ^= (*src++); 255 *dst++ ^= (*src++); 256 } 257 } 258 259 static int 260 g_raid3_is_zero(struct bio *bp) 261 { 262 static const uint64_t zeros[] = { 263 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 264 }; 265 u_char *addr; 266 ssize_t size; 267 268 size = bp->bio_length; 269 addr = (u_char *)bp->bio_data; 270 for (; size > 0; size -= sizeof(zeros), addr += sizeof(zeros)) { 271 if (bcmp(addr, zeros, sizeof(zeros)) != 0) 272 return (0); 273 } 274 return (1); 275 } 276 277 /* 278 * --- Events handling functions --- 279 * Events in geom_raid3 are used to maintain disks and device status 280 * from one thread to simplify locking. 281 */ 282 static void 283 g_raid3_event_free(struct g_raid3_event *ep) 284 { 285 286 free(ep, M_RAID3); 287 } 288 289 static int 290 g_raid3_event_dispatch(struct g_raid3_event *ep, void *arg, int state, 291 int flags) 292 { 293 struct g_raid3_softc *sc; 294 struct g_raid3_disk *disk; 295 int error; 296 297 G_RAID3_DEBUG(4, "%s: Sending event %p.", __func__, ep); 298 if ((flags & G_RAID3_EVENT_DEVICE) != 0) { 299 disk = NULL; 300 sc = arg; 301 } else { 302 disk = arg; 303 sc = disk->d_softc; 304 } 305 ep->e_disk = disk; 306 ep->e_state = state; 307 ep->e_flags = flags; 308 ep->e_error = 0; 309 mtx_lock(&sc->sc_events_mtx); 310 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 311 mtx_unlock(&sc->sc_events_mtx); 312 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 313 mtx_lock(&sc->sc_queue_mtx); 314 wakeup(sc); 315 wakeup(&sc->sc_queue); 316 mtx_unlock(&sc->sc_queue_mtx); 317 if ((flags & G_RAID3_EVENT_DONTWAIT) != 0) 318 return (0); 319 sx_assert(&sc->sc_lock, SX_XLOCKED); 320 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 321 sx_xunlock(&sc->sc_lock); 322 while ((ep->e_flags & G_RAID3_EVENT_DONE) == 0) { 323 mtx_lock(&sc->sc_events_mtx); 324 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "r3:event", 325 hz * 5); 326 } 327 error = ep->e_error; 328 g_raid3_event_free(ep); 329 sx_xlock(&sc->sc_lock); 330 return (error); 331 } 332 333 int 334 g_raid3_event_send(void *arg, int state, int flags) 335 { 336 struct g_raid3_event *ep; 337 338 ep = malloc(sizeof(*ep), M_RAID3, M_WAITOK); 339 return (g_raid3_event_dispatch(ep, arg, state, flags)); 340 } 341 342 static struct g_raid3_event * 343 g_raid3_event_get(struct g_raid3_softc *sc) 344 { 345 struct g_raid3_event *ep; 346 347 mtx_lock(&sc->sc_events_mtx); 348 ep = TAILQ_FIRST(&sc->sc_events); 349 mtx_unlock(&sc->sc_events_mtx); 350 return (ep); 351 } 352 353 static void 354 g_raid3_event_remove(struct g_raid3_softc *sc, struct g_raid3_event *ep) 355 { 356 357 mtx_lock(&sc->sc_events_mtx); 358 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 359 mtx_unlock(&sc->sc_events_mtx); 360 } 361 362 static void 363 g_raid3_event_cancel(struct g_raid3_disk *disk) 364 { 365 struct g_raid3_softc *sc; 366 struct g_raid3_event *ep, *tmpep; 367 368 sc = disk->d_softc; 369 sx_assert(&sc->sc_lock, SX_XLOCKED); 370 371 mtx_lock(&sc->sc_events_mtx); 372 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 373 if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) 374 continue; 375 if (ep->e_disk != disk) 376 continue; 377 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 378 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) 379 g_raid3_event_free(ep); 380 else { 381 ep->e_error = ECANCELED; 382 wakeup(ep); 383 } 384 } 385 mtx_unlock(&sc->sc_events_mtx); 386 } 387 388 /* 389 * Return the number of disks in the given state. 390 * If state is equal to -1, count all connected disks. 391 */ 392 u_int 393 g_raid3_ndisks(struct g_raid3_softc *sc, int state) 394 { 395 struct g_raid3_disk *disk; 396 u_int n, ndisks; 397 398 sx_assert(&sc->sc_lock, SX_LOCKED); 399 400 for (n = ndisks = 0; n < sc->sc_ndisks; n++) { 401 disk = &sc->sc_disks[n]; 402 if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 403 continue; 404 if (state == -1 || disk->d_state == state) 405 ndisks++; 406 } 407 return (ndisks); 408 } 409 410 static u_int 411 g_raid3_nrequests(struct g_raid3_softc *sc, struct g_consumer *cp) 412 { 413 struct bio *bp; 414 u_int nreqs = 0; 415 416 mtx_lock(&sc->sc_queue_mtx); 417 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 418 if (bp->bio_from == cp) 419 nreqs++; 420 } 421 mtx_unlock(&sc->sc_queue_mtx); 422 return (nreqs); 423 } 424 425 static int 426 g_raid3_is_busy(struct g_raid3_softc *sc, struct g_consumer *cp) 427 { 428 429 if (cp->index > 0) { 430 G_RAID3_DEBUG(2, 431 "I/O requests for %s exist, can't destroy it now.", 432 cp->provider->name); 433 return (1); 434 } 435 if (g_raid3_nrequests(sc, cp) > 0) { 436 G_RAID3_DEBUG(2, 437 "I/O requests for %s in queue, can't destroy it now.", 438 cp->provider->name); 439 return (1); 440 } 441 return (0); 442 } 443 444 static void 445 g_raid3_destroy_consumer(void *arg, int flags __unused) 446 { 447 struct g_consumer *cp; 448 449 g_topology_assert(); 450 451 cp = arg; 452 G_RAID3_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 453 g_detach(cp); 454 g_destroy_consumer(cp); 455 } 456 457 static void 458 g_raid3_kill_consumer(struct g_raid3_softc *sc, struct g_consumer *cp) 459 { 460 struct g_provider *pp; 461 int retaste_wait; 462 463 g_topology_assert(); 464 465 cp->private = NULL; 466 if (g_raid3_is_busy(sc, cp)) 467 return; 468 G_RAID3_DEBUG(2, "Consumer %s destroyed.", cp->provider->name); 469 pp = cp->provider; 470 retaste_wait = 0; 471 if (cp->acw == 1) { 472 if ((pp->geom->flags & G_GEOM_WITHER) == 0) 473 retaste_wait = 1; 474 } 475 G_RAID3_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 476 -cp->acw, -cp->ace, 0); 477 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 478 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 479 if (retaste_wait) { 480 /* 481 * After retaste event was send (inside g_access()), we can send 482 * event to detach and destroy consumer. 483 * A class, which has consumer to the given provider connected 484 * will not receive retaste event for the provider. 485 * This is the way how I ignore retaste events when I close 486 * consumers opened for write: I detach and destroy consumer 487 * after retaste event is sent. 488 */ 489 g_post_event(g_raid3_destroy_consumer, cp, M_WAITOK, NULL); 490 return; 491 } 492 G_RAID3_DEBUG(1, "Consumer %s destroyed.", pp->name); 493 g_detach(cp); 494 g_destroy_consumer(cp); 495 } 496 497 static int 498 g_raid3_connect_disk(struct g_raid3_disk *disk, struct g_provider *pp) 499 { 500 struct g_consumer *cp; 501 int error; 502 503 g_topology_assert_not(); 504 KASSERT(disk->d_consumer == NULL, 505 ("Disk already connected (device %s).", disk->d_softc->sc_name)); 506 507 g_topology_lock(); 508 cp = g_new_consumer(disk->d_softc->sc_geom); 509 error = g_attach(cp, pp); 510 if (error != 0) { 511 g_destroy_consumer(cp); 512 g_topology_unlock(); 513 return (error); 514 } 515 error = g_access(cp, 1, 1, 1); 516 g_topology_unlock(); 517 if (error != 0) { 518 g_detach(cp); 519 g_destroy_consumer(cp); 520 G_RAID3_DEBUG(0, "Cannot open consumer %s (error=%d).", 521 pp->name, error); 522 return (error); 523 } 524 disk->d_consumer = cp; 525 disk->d_consumer->private = disk; 526 disk->d_consumer->index = 0; 527 G_RAID3_DEBUG(2, "Disk %s connected.", g_raid3_get_diskname(disk)); 528 return (0); 529 } 530 531 static void 532 g_raid3_disconnect_consumer(struct g_raid3_softc *sc, struct g_consumer *cp) 533 { 534 535 g_topology_assert(); 536 537 if (cp == NULL) 538 return; 539 if (cp->provider != NULL) 540 g_raid3_kill_consumer(sc, cp); 541 else 542 g_destroy_consumer(cp); 543 } 544 545 /* 546 * Initialize disk. This means allocate memory, create consumer, attach it 547 * to the provider and open access (r1w1e1) to it. 548 */ 549 static struct g_raid3_disk * 550 g_raid3_init_disk(struct g_raid3_softc *sc, struct g_provider *pp, 551 struct g_raid3_metadata *md, int *errorp) 552 { 553 struct g_raid3_disk *disk; 554 int error; 555 556 disk = &sc->sc_disks[md->md_no]; 557 error = g_raid3_connect_disk(disk, pp); 558 if (error != 0) { 559 if (errorp != NULL) 560 *errorp = error; 561 return (NULL); 562 } 563 disk->d_state = G_RAID3_DISK_STATE_NONE; 564 disk->d_flags = md->md_dflags; 565 if (md->md_provider[0] != '\0') 566 disk->d_flags |= G_RAID3_DISK_FLAG_HARDCODED; 567 disk->d_sync.ds_consumer = NULL; 568 disk->d_sync.ds_offset = md->md_sync_offset; 569 disk->d_sync.ds_offset_done = md->md_sync_offset; 570 disk->d_genid = md->md_genid; 571 disk->d_sync.ds_syncid = md->md_syncid; 572 if (errorp != NULL) 573 *errorp = 0; 574 return (disk); 575 } 576 577 static void 578 g_raid3_destroy_disk(struct g_raid3_disk *disk) 579 { 580 struct g_raid3_softc *sc; 581 582 g_topology_assert_not(); 583 sc = disk->d_softc; 584 sx_assert(&sc->sc_lock, SX_XLOCKED); 585 586 if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 587 return; 588 g_raid3_event_cancel(disk); 589 switch (disk->d_state) { 590 case G_RAID3_DISK_STATE_SYNCHRONIZING: 591 if (sc->sc_syncdisk != NULL) 592 g_raid3_sync_stop(sc, 1); 593 /* FALLTHROUGH */ 594 case G_RAID3_DISK_STATE_NEW: 595 case G_RAID3_DISK_STATE_STALE: 596 case G_RAID3_DISK_STATE_ACTIVE: 597 g_topology_lock(); 598 g_raid3_disconnect_consumer(sc, disk->d_consumer); 599 g_topology_unlock(); 600 disk->d_consumer = NULL; 601 break; 602 default: 603 KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 604 g_raid3_get_diskname(disk), 605 g_raid3_disk_state2str(disk->d_state))); 606 } 607 disk->d_state = G_RAID3_DISK_STATE_NODISK; 608 } 609 610 static void 611 g_raid3_destroy_device(struct g_raid3_softc *sc) 612 { 613 struct g_raid3_event *ep; 614 struct g_raid3_disk *disk; 615 struct g_geom *gp; 616 struct g_consumer *cp; 617 u_int n; 618 619 g_topology_assert_not(); 620 sx_assert(&sc->sc_lock, SX_XLOCKED); 621 622 gp = sc->sc_geom; 623 if (sc->sc_provider != NULL) 624 g_raid3_destroy_provider(sc); 625 for (n = 0; n < sc->sc_ndisks; n++) { 626 disk = &sc->sc_disks[n]; 627 if (disk->d_state != G_RAID3_DISK_STATE_NODISK) { 628 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 629 g_raid3_update_metadata(disk); 630 g_raid3_destroy_disk(disk); 631 } 632 } 633 while ((ep = g_raid3_event_get(sc)) != NULL) { 634 g_raid3_event_remove(sc, ep); 635 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) 636 g_raid3_event_free(ep); 637 else { 638 ep->e_error = ECANCELED; 639 ep->e_flags |= G_RAID3_EVENT_DONE; 640 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, ep); 641 mtx_lock(&sc->sc_events_mtx); 642 wakeup(ep); 643 mtx_unlock(&sc->sc_events_mtx); 644 } 645 } 646 g_raid3_timeout_drain(sc); 647 cp = LIST_FIRST(&sc->sc_sync.ds_geom->consumer); 648 g_topology_lock(); 649 if (cp != NULL) 650 g_raid3_disconnect_consumer(sc, cp); 651 g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 652 G_RAID3_DEBUG(0, "Device %s destroyed.", gp->name); 653 g_wither_geom(gp, ENXIO); 654 g_topology_unlock(); 655 if (!g_raid3_use_malloc) { 656 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone); 657 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone); 658 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone); 659 } 660 mtx_destroy(&sc->sc_queue_mtx); 661 mtx_destroy(&sc->sc_events_mtx); 662 sx_xunlock(&sc->sc_lock); 663 sx_destroy(&sc->sc_lock); 664 } 665 666 static void 667 g_raid3_orphan(struct g_consumer *cp) 668 { 669 struct g_raid3_disk *disk; 670 671 g_topology_assert(); 672 673 disk = cp->private; 674 if (disk == NULL) 675 return; 676 disk->d_softc->sc_bump_id = G_RAID3_BUMP_SYNCID; 677 g_raid3_event_send(disk, G_RAID3_DISK_STATE_DISCONNECTED, 678 G_RAID3_EVENT_DONTWAIT); 679 } 680 681 static int 682 g_raid3_write_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md) 683 { 684 struct g_raid3_softc *sc; 685 struct g_consumer *cp; 686 off_t offset, length; 687 u_char *sector; 688 int error = 0; 689 690 g_topology_assert_not(); 691 sc = disk->d_softc; 692 sx_assert(&sc->sc_lock, SX_LOCKED); 693 694 cp = disk->d_consumer; 695 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 696 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 697 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 698 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 699 cp->acw, cp->ace)); 700 length = cp->provider->sectorsize; 701 offset = cp->provider->mediasize - length; 702 sector = malloc((size_t)length, M_RAID3, M_WAITOK | M_ZERO); 703 if (md != NULL) 704 raid3_metadata_encode(md, sector); 705 error = g_write_data(cp, offset, sector, length); 706 free(sector, M_RAID3); 707 if (error != 0) { 708 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 709 G_RAID3_DEBUG(0, "Cannot write metadata on %s " 710 "(device=%s, error=%d).", 711 g_raid3_get_diskname(disk), sc->sc_name, error); 712 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 713 } else { 714 G_RAID3_DEBUG(1, "Cannot write metadata on %s " 715 "(device=%s, error=%d).", 716 g_raid3_get_diskname(disk), sc->sc_name, error); 717 } 718 if (g_raid3_disconnect_on_failure && 719 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 720 sc->sc_bump_id |= G_RAID3_BUMP_GENID; 721 g_raid3_event_send(disk, 722 G_RAID3_DISK_STATE_DISCONNECTED, 723 G_RAID3_EVENT_DONTWAIT); 724 } 725 } 726 return (error); 727 } 728 729 int 730 g_raid3_clear_metadata(struct g_raid3_disk *disk) 731 { 732 int error; 733 734 g_topology_assert_not(); 735 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 736 737 error = g_raid3_write_metadata(disk, NULL); 738 if (error == 0) { 739 G_RAID3_DEBUG(2, "Metadata on %s cleared.", 740 g_raid3_get_diskname(disk)); 741 } else { 742 G_RAID3_DEBUG(0, 743 "Cannot clear metadata on disk %s (error=%d).", 744 g_raid3_get_diskname(disk), error); 745 } 746 return (error); 747 } 748 749 void 750 g_raid3_fill_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md) 751 { 752 struct g_raid3_softc *sc; 753 struct g_provider *pp; 754 755 bzero(md, sizeof(*md)); 756 sc = disk->d_softc; 757 strlcpy(md->md_magic, G_RAID3_MAGIC, sizeof(md->md_magic)); 758 md->md_version = G_RAID3_VERSION; 759 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 760 md->md_id = sc->sc_id; 761 md->md_all = sc->sc_ndisks; 762 md->md_genid = sc->sc_genid; 763 md->md_mediasize = sc->sc_mediasize; 764 md->md_sectorsize = sc->sc_sectorsize; 765 md->md_mflags = (sc->sc_flags & G_RAID3_DEVICE_FLAG_MASK); 766 md->md_no = disk->d_no; 767 md->md_syncid = disk->d_sync.ds_syncid; 768 md->md_dflags = (disk->d_flags & G_RAID3_DISK_FLAG_MASK); 769 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 770 md->md_sync_offset = 771 disk->d_sync.ds_offset_done / (sc->sc_ndisks - 1); 772 } 773 if (disk->d_consumer != NULL && disk->d_consumer->provider != NULL) 774 pp = disk->d_consumer->provider; 775 else 776 pp = NULL; 777 if ((disk->d_flags & G_RAID3_DISK_FLAG_HARDCODED) != 0 && pp != NULL) 778 strlcpy(md->md_provider, pp->name, sizeof(md->md_provider)); 779 if (pp != NULL) 780 md->md_provsize = pp->mediasize; 781 } 782 783 void 784 g_raid3_update_metadata(struct g_raid3_disk *disk) 785 { 786 struct g_raid3_softc *sc __diagused; 787 struct g_raid3_metadata md; 788 int error; 789 790 g_topology_assert_not(); 791 sc = disk->d_softc; 792 sx_assert(&sc->sc_lock, SX_LOCKED); 793 794 g_raid3_fill_metadata(disk, &md); 795 error = g_raid3_write_metadata(disk, &md); 796 if (error == 0) { 797 G_RAID3_DEBUG(2, "Metadata on %s updated.", 798 g_raid3_get_diskname(disk)); 799 } else { 800 G_RAID3_DEBUG(0, 801 "Cannot update metadata on disk %s (error=%d).", 802 g_raid3_get_diskname(disk), error); 803 } 804 } 805 806 static void 807 g_raid3_bump_syncid(struct g_raid3_softc *sc) 808 { 809 struct g_raid3_disk *disk; 810 u_int n; 811 812 g_topology_assert_not(); 813 sx_assert(&sc->sc_lock, SX_XLOCKED); 814 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0, 815 ("%s called with no active disks (device=%s).", __func__, 816 sc->sc_name)); 817 818 sc->sc_syncid++; 819 G_RAID3_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 820 sc->sc_syncid); 821 for (n = 0; n < sc->sc_ndisks; n++) { 822 disk = &sc->sc_disks[n]; 823 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 824 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 825 disk->d_sync.ds_syncid = sc->sc_syncid; 826 g_raid3_update_metadata(disk); 827 } 828 } 829 } 830 831 static void 832 g_raid3_bump_genid(struct g_raid3_softc *sc) 833 { 834 struct g_raid3_disk *disk; 835 u_int n; 836 837 g_topology_assert_not(); 838 sx_assert(&sc->sc_lock, SX_XLOCKED); 839 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0, 840 ("%s called with no active disks (device=%s).", __func__, 841 sc->sc_name)); 842 843 sc->sc_genid++; 844 G_RAID3_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 845 sc->sc_genid); 846 for (n = 0; n < sc->sc_ndisks; n++) { 847 disk = &sc->sc_disks[n]; 848 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 849 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 850 disk->d_genid = sc->sc_genid; 851 g_raid3_update_metadata(disk); 852 } 853 } 854 } 855 856 static int 857 g_raid3_idle(struct g_raid3_softc *sc, int acw) 858 { 859 struct g_raid3_disk *disk; 860 u_int i; 861 int timeout; 862 863 g_topology_assert_not(); 864 sx_assert(&sc->sc_lock, SX_XLOCKED); 865 866 if (sc->sc_provider == NULL) 867 return (0); 868 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 869 return (0); 870 if (sc->sc_idle) 871 return (0); 872 if (sc->sc_writes > 0) 873 return (0); 874 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 875 timeout = g_raid3_idletime - (time_uptime - sc->sc_last_write); 876 if (!g_raid3_shutdown && timeout > 0) 877 return (timeout); 878 } 879 sc->sc_idle = 1; 880 for (i = 0; i < sc->sc_ndisks; i++) { 881 disk = &sc->sc_disks[i]; 882 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 883 continue; 884 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.", 885 g_raid3_get_diskname(disk), sc->sc_name); 886 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 887 g_raid3_update_metadata(disk); 888 } 889 return (0); 890 } 891 892 static void 893 g_raid3_unidle(struct g_raid3_softc *sc) 894 { 895 struct g_raid3_disk *disk; 896 u_int i; 897 898 g_topology_assert_not(); 899 sx_assert(&sc->sc_lock, SX_XLOCKED); 900 901 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 902 return; 903 sc->sc_idle = 0; 904 sc->sc_last_write = time_uptime; 905 for (i = 0; i < sc->sc_ndisks; i++) { 906 disk = &sc->sc_disks[i]; 907 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 908 continue; 909 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.", 910 g_raid3_get_diskname(disk), sc->sc_name); 911 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 912 g_raid3_update_metadata(disk); 913 } 914 } 915 916 /* 917 * Treat bio_driver1 field in parent bio as list head and field bio_caller1 918 * in child bio as pointer to the next element on the list. 919 */ 920 #define G_RAID3_HEAD_BIO(pbp) (pbp)->bio_driver1 921 922 #define G_RAID3_NEXT_BIO(cbp) (cbp)->bio_caller1 923 924 #define G_RAID3_FOREACH_BIO(pbp, bp) \ 925 for ((bp) = G_RAID3_HEAD_BIO(pbp); (bp) != NULL; \ 926 (bp) = G_RAID3_NEXT_BIO(bp)) 927 928 #define G_RAID3_FOREACH_SAFE_BIO(pbp, bp, tmpbp) \ 929 for ((bp) = G_RAID3_HEAD_BIO(pbp); \ 930 (bp) != NULL && ((tmpbp) = G_RAID3_NEXT_BIO(bp), 1); \ 931 (bp) = (tmpbp)) 932 933 static void 934 g_raid3_init_bio(struct bio *pbp) 935 { 936 937 G_RAID3_HEAD_BIO(pbp) = NULL; 938 } 939 940 static void 941 g_raid3_remove_bio(struct bio *cbp) 942 { 943 struct bio *pbp, *bp; 944 945 pbp = cbp->bio_parent; 946 if (G_RAID3_HEAD_BIO(pbp) == cbp) 947 G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp); 948 else { 949 G_RAID3_FOREACH_BIO(pbp, bp) { 950 if (G_RAID3_NEXT_BIO(bp) == cbp) { 951 G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp); 952 break; 953 } 954 } 955 } 956 G_RAID3_NEXT_BIO(cbp) = NULL; 957 } 958 959 static void 960 g_raid3_replace_bio(struct bio *sbp, struct bio *dbp) 961 { 962 struct bio *pbp, *bp; 963 964 g_raid3_remove_bio(sbp); 965 pbp = dbp->bio_parent; 966 G_RAID3_NEXT_BIO(sbp) = G_RAID3_NEXT_BIO(dbp); 967 if (G_RAID3_HEAD_BIO(pbp) == dbp) 968 G_RAID3_HEAD_BIO(pbp) = sbp; 969 else { 970 G_RAID3_FOREACH_BIO(pbp, bp) { 971 if (G_RAID3_NEXT_BIO(bp) == dbp) { 972 G_RAID3_NEXT_BIO(bp) = sbp; 973 break; 974 } 975 } 976 } 977 G_RAID3_NEXT_BIO(dbp) = NULL; 978 } 979 980 static void 981 g_raid3_destroy_bio(struct g_raid3_softc *sc, struct bio *cbp) 982 { 983 struct bio *bp, *pbp; 984 size_t size; 985 986 pbp = cbp->bio_parent; 987 pbp->bio_children--; 988 KASSERT(cbp->bio_data != NULL, ("NULL bio_data")); 989 size = pbp->bio_length / (sc->sc_ndisks - 1); 990 g_raid3_free(sc, cbp->bio_data, size); 991 if (G_RAID3_HEAD_BIO(pbp) == cbp) { 992 G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp); 993 G_RAID3_NEXT_BIO(cbp) = NULL; 994 g_destroy_bio(cbp); 995 } else { 996 G_RAID3_FOREACH_BIO(pbp, bp) { 997 if (G_RAID3_NEXT_BIO(bp) == cbp) 998 break; 999 } 1000 if (bp != NULL) { 1001 KASSERT(G_RAID3_NEXT_BIO(bp) != NULL, 1002 ("NULL bp->bio_driver1")); 1003 G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp); 1004 G_RAID3_NEXT_BIO(cbp) = NULL; 1005 } 1006 g_destroy_bio(cbp); 1007 } 1008 } 1009 1010 static struct bio * 1011 g_raid3_clone_bio(struct g_raid3_softc *sc, struct bio *pbp) 1012 { 1013 struct bio *bp, *cbp; 1014 size_t size; 1015 int memflag; 1016 1017 cbp = g_clone_bio(pbp); 1018 if (cbp == NULL) 1019 return (NULL); 1020 size = pbp->bio_length / (sc->sc_ndisks - 1); 1021 if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0) 1022 memflag = M_WAITOK; 1023 else 1024 memflag = M_NOWAIT; 1025 cbp->bio_data = g_raid3_alloc(sc, size, memflag); 1026 if (cbp->bio_data == NULL) { 1027 pbp->bio_children--; 1028 g_destroy_bio(cbp); 1029 return (NULL); 1030 } 1031 G_RAID3_NEXT_BIO(cbp) = NULL; 1032 if (G_RAID3_HEAD_BIO(pbp) == NULL) 1033 G_RAID3_HEAD_BIO(pbp) = cbp; 1034 else { 1035 G_RAID3_FOREACH_BIO(pbp, bp) { 1036 if (G_RAID3_NEXT_BIO(bp) == NULL) { 1037 G_RAID3_NEXT_BIO(bp) = cbp; 1038 break; 1039 } 1040 } 1041 } 1042 return (cbp); 1043 } 1044 1045 static void 1046 g_raid3_scatter(struct bio *pbp) 1047 { 1048 struct g_raid3_softc *sc; 1049 struct g_raid3_disk *disk; 1050 struct bio *bp, *cbp, *tmpbp; 1051 off_t atom, cadd, padd, left; 1052 int first; 1053 1054 sc = pbp->bio_to->geom->softc; 1055 bp = NULL; 1056 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) { 1057 /* 1058 * Find bio for which we should calculate data. 1059 */ 1060 G_RAID3_FOREACH_BIO(pbp, cbp) { 1061 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) { 1062 bp = cbp; 1063 break; 1064 } 1065 } 1066 KASSERT(bp != NULL, ("NULL parity bio.")); 1067 } 1068 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 1069 cadd = padd = 0; 1070 for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) { 1071 G_RAID3_FOREACH_BIO(pbp, cbp) { 1072 if (cbp == bp) 1073 continue; 1074 bcopy(pbp->bio_data + padd, cbp->bio_data + cadd, atom); 1075 padd += atom; 1076 } 1077 cadd += atom; 1078 } 1079 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) { 1080 /* 1081 * Calculate parity. 1082 */ 1083 first = 1; 1084 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 1085 if (cbp == bp) 1086 continue; 1087 if (first) { 1088 bcopy(cbp->bio_data, bp->bio_data, 1089 bp->bio_length); 1090 first = 0; 1091 } else { 1092 g_raid3_xor(cbp->bio_data, bp->bio_data, 1093 bp->bio_length); 1094 } 1095 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_NODISK) != 0) 1096 g_raid3_destroy_bio(sc, cbp); 1097 } 1098 } 1099 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 1100 struct g_consumer *cp; 1101 1102 disk = cbp->bio_caller2; 1103 cp = disk->d_consumer; 1104 cbp->bio_to = cp->provider; 1105 G_RAID3_LOGREQ(3, cbp, "Sending request."); 1106 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1107 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1108 cp->acr, cp->acw, cp->ace)); 1109 cp->index++; 1110 sc->sc_writes++; 1111 g_io_request(cbp, cp); 1112 } 1113 } 1114 1115 static void 1116 g_raid3_gather(struct bio *pbp) 1117 { 1118 struct g_raid3_softc *sc; 1119 struct g_raid3_disk *disk; 1120 struct bio *xbp, *fbp, *cbp; 1121 off_t atom, cadd, padd, left; 1122 1123 sc = pbp->bio_to->geom->softc; 1124 /* 1125 * Find bio for which we have to calculate data. 1126 * While going through this path, check if all requests 1127 * succeeded, if not, deny whole request. 1128 * If we're in COMPLETE mode, we allow one request to fail, 1129 * so if we find one, we're sending it to the parity consumer. 1130 * If there are more failed requests, we deny whole request. 1131 */ 1132 xbp = fbp = NULL; 1133 G_RAID3_FOREACH_BIO(pbp, cbp) { 1134 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) { 1135 KASSERT(xbp == NULL, ("More than one parity bio.")); 1136 xbp = cbp; 1137 } 1138 if (cbp->bio_error == 0) 1139 continue; 1140 /* 1141 * Found failed request. 1142 */ 1143 if (fbp == NULL) { 1144 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_DEGRADED) != 0) { 1145 /* 1146 * We are already in degraded mode, so we can't 1147 * accept any failures. 1148 */ 1149 if (pbp->bio_error == 0) 1150 pbp->bio_error = cbp->bio_error; 1151 } else { 1152 fbp = cbp; 1153 } 1154 } else { 1155 /* 1156 * Next failed request, that's too many. 1157 */ 1158 if (pbp->bio_error == 0) 1159 pbp->bio_error = fbp->bio_error; 1160 } 1161 disk = cbp->bio_caller2; 1162 if (disk == NULL) 1163 continue; 1164 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 1165 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 1166 G_RAID3_LOGREQ(0, cbp, "Request failed (error=%d).", 1167 cbp->bio_error); 1168 } else { 1169 G_RAID3_LOGREQ(1, cbp, "Request failed (error=%d).", 1170 cbp->bio_error); 1171 } 1172 if (g_raid3_disconnect_on_failure && 1173 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 1174 sc->sc_bump_id |= G_RAID3_BUMP_GENID; 1175 g_raid3_event_send(disk, 1176 G_RAID3_DISK_STATE_DISCONNECTED, 1177 G_RAID3_EVENT_DONTWAIT); 1178 } 1179 } 1180 if (pbp->bio_error != 0) 1181 goto finish; 1182 if (fbp != NULL && (pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) { 1183 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_VERIFY; 1184 if (xbp != fbp) 1185 g_raid3_replace_bio(xbp, fbp); 1186 g_raid3_destroy_bio(sc, fbp); 1187 } else if (fbp != NULL) { 1188 struct g_consumer *cp; 1189 1190 /* 1191 * One request failed, so send the same request to 1192 * the parity consumer. 1193 */ 1194 disk = pbp->bio_driver2; 1195 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) { 1196 pbp->bio_error = fbp->bio_error; 1197 goto finish; 1198 } 1199 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 1200 pbp->bio_inbed--; 1201 fbp->bio_flags &= ~(BIO_DONE | BIO_ERROR); 1202 if (disk->d_no == sc->sc_ndisks - 1) 1203 fbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 1204 fbp->bio_error = 0; 1205 fbp->bio_completed = 0; 1206 fbp->bio_children = 0; 1207 fbp->bio_inbed = 0; 1208 cp = disk->d_consumer; 1209 fbp->bio_caller2 = disk; 1210 fbp->bio_to = cp->provider; 1211 G_RAID3_LOGREQ(3, fbp, "Sending request (recover)."); 1212 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1213 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1214 cp->acr, cp->acw, cp->ace)); 1215 cp->index++; 1216 g_io_request(fbp, cp); 1217 return; 1218 } 1219 if (xbp != NULL) { 1220 /* 1221 * Calculate parity. 1222 */ 1223 G_RAID3_FOREACH_BIO(pbp, cbp) { 1224 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) 1225 continue; 1226 g_raid3_xor(cbp->bio_data, xbp->bio_data, 1227 xbp->bio_length); 1228 } 1229 xbp->bio_cflags &= ~G_RAID3_BIO_CFLAG_PARITY; 1230 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) { 1231 if (!g_raid3_is_zero(xbp)) { 1232 g_raid3_parity_mismatch++; 1233 pbp->bio_error = EIO; 1234 goto finish; 1235 } 1236 g_raid3_destroy_bio(sc, xbp); 1237 } 1238 } 1239 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 1240 cadd = padd = 0; 1241 for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) { 1242 G_RAID3_FOREACH_BIO(pbp, cbp) { 1243 bcopy(cbp->bio_data + cadd, pbp->bio_data + padd, atom); 1244 pbp->bio_completed += atom; 1245 padd += atom; 1246 } 1247 cadd += atom; 1248 } 1249 finish: 1250 if (pbp->bio_error == 0) 1251 G_RAID3_LOGREQ(3, pbp, "Request finished."); 1252 else { 1253 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) 1254 G_RAID3_LOGREQ(1, pbp, "Verification error."); 1255 else 1256 G_RAID3_LOGREQ(0, pbp, "Request failed."); 1257 } 1258 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_MASK; 1259 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) 1260 g_raid3_destroy_bio(sc, cbp); 1261 g_io_deliver(pbp, pbp->bio_error); 1262 } 1263 1264 static void 1265 g_raid3_done(struct bio *bp) 1266 { 1267 struct g_raid3_softc *sc; 1268 1269 sc = bp->bio_from->geom->softc; 1270 bp->bio_cflags |= G_RAID3_BIO_CFLAG_REGULAR; 1271 G_RAID3_LOGREQ(3, bp, "Regular request done (error=%d).", bp->bio_error); 1272 mtx_lock(&sc->sc_queue_mtx); 1273 bioq_insert_head(&sc->sc_queue, bp); 1274 mtx_unlock(&sc->sc_queue_mtx); 1275 wakeup(sc); 1276 wakeup(&sc->sc_queue); 1277 } 1278 1279 static void 1280 g_raid3_regular_request(struct bio *cbp) 1281 { 1282 struct g_raid3_softc *sc; 1283 struct g_raid3_disk *disk; 1284 struct bio *pbp; 1285 1286 g_topology_assert_not(); 1287 1288 pbp = cbp->bio_parent; 1289 sc = pbp->bio_to->geom->softc; 1290 cbp->bio_from->index--; 1291 if (cbp->bio_cmd == BIO_WRITE) 1292 sc->sc_writes--; 1293 disk = cbp->bio_from->private; 1294 if (disk == NULL) { 1295 g_topology_lock(); 1296 g_raid3_kill_consumer(sc, cbp->bio_from); 1297 g_topology_unlock(); 1298 } 1299 1300 G_RAID3_LOGREQ(3, cbp, "Request finished."); 1301 pbp->bio_inbed++; 1302 KASSERT(pbp->bio_inbed <= pbp->bio_children, 1303 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 1304 pbp->bio_children)); 1305 if (pbp->bio_inbed != pbp->bio_children) 1306 return; 1307 switch (pbp->bio_cmd) { 1308 case BIO_READ: 1309 g_raid3_gather(pbp); 1310 break; 1311 case BIO_WRITE: 1312 case BIO_DELETE: 1313 { 1314 int error = 0; 1315 1316 pbp->bio_completed = pbp->bio_length; 1317 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) { 1318 if (cbp->bio_error == 0) { 1319 g_raid3_destroy_bio(sc, cbp); 1320 continue; 1321 } 1322 1323 if (error == 0) 1324 error = cbp->bio_error; 1325 else if (pbp->bio_error == 0) { 1326 /* 1327 * Next failed request, that's too many. 1328 */ 1329 pbp->bio_error = error; 1330 } 1331 1332 disk = cbp->bio_caller2; 1333 if (disk == NULL) { 1334 g_raid3_destroy_bio(sc, cbp); 1335 continue; 1336 } 1337 1338 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 1339 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 1340 G_RAID3_LOGREQ(0, cbp, 1341 "Request failed (error=%d).", 1342 cbp->bio_error); 1343 } else { 1344 G_RAID3_LOGREQ(1, cbp, 1345 "Request failed (error=%d).", 1346 cbp->bio_error); 1347 } 1348 if (g_raid3_disconnect_on_failure && 1349 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 1350 sc->sc_bump_id |= G_RAID3_BUMP_GENID; 1351 g_raid3_event_send(disk, 1352 G_RAID3_DISK_STATE_DISCONNECTED, 1353 G_RAID3_EVENT_DONTWAIT); 1354 } 1355 g_raid3_destroy_bio(sc, cbp); 1356 } 1357 if (pbp->bio_error == 0) 1358 G_RAID3_LOGREQ(3, pbp, "Request finished."); 1359 else 1360 G_RAID3_LOGREQ(0, pbp, "Request failed."); 1361 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_DEGRADED; 1362 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_NOPARITY; 1363 bioq_remove(&sc->sc_inflight, pbp); 1364 /* Release delayed sync requests if possible. */ 1365 g_raid3_sync_release(sc); 1366 g_io_deliver(pbp, pbp->bio_error); 1367 break; 1368 } 1369 } 1370 } 1371 1372 static void 1373 g_raid3_sync_done(struct bio *bp) 1374 { 1375 struct g_raid3_softc *sc; 1376 1377 G_RAID3_LOGREQ(3, bp, "Synchronization request delivered."); 1378 sc = bp->bio_from->geom->softc; 1379 bp->bio_cflags |= G_RAID3_BIO_CFLAG_SYNC; 1380 mtx_lock(&sc->sc_queue_mtx); 1381 bioq_insert_head(&sc->sc_queue, bp); 1382 mtx_unlock(&sc->sc_queue_mtx); 1383 wakeup(sc); 1384 wakeup(&sc->sc_queue); 1385 } 1386 1387 static void 1388 g_raid3_flush(struct g_raid3_softc *sc, struct bio *bp) 1389 { 1390 struct bio_queue_head queue; 1391 struct g_raid3_disk *disk; 1392 struct g_consumer *cp __diagused; 1393 struct bio *cbp; 1394 u_int i; 1395 1396 bioq_init(&queue); 1397 for (i = 0; i < sc->sc_ndisks; i++) { 1398 disk = &sc->sc_disks[i]; 1399 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 1400 continue; 1401 cbp = g_clone_bio(bp); 1402 if (cbp == NULL) { 1403 for (cbp = bioq_first(&queue); cbp != NULL; 1404 cbp = bioq_first(&queue)) { 1405 bioq_remove(&queue, cbp); 1406 g_destroy_bio(cbp); 1407 } 1408 if (bp->bio_error == 0) 1409 bp->bio_error = ENOMEM; 1410 g_io_deliver(bp, bp->bio_error); 1411 return; 1412 } 1413 bioq_insert_tail(&queue, cbp); 1414 cbp->bio_done = g_std_done; 1415 cbp->bio_caller1 = disk; 1416 cbp->bio_to = disk->d_consumer->provider; 1417 } 1418 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) { 1419 bioq_remove(&queue, cbp); 1420 G_RAID3_LOGREQ(3, cbp, "Sending request."); 1421 disk = cbp->bio_caller1; 1422 cbp->bio_caller1 = NULL; 1423 cp = disk->d_consumer; 1424 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1425 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1426 cp->acr, cp->acw, cp->ace)); 1427 g_io_request(cbp, disk->d_consumer); 1428 } 1429 } 1430 1431 static void 1432 g_raid3_start(struct bio *bp) 1433 { 1434 struct g_raid3_softc *sc; 1435 1436 sc = bp->bio_to->geom->softc; 1437 /* 1438 * If sc == NULL or there are no valid disks, provider's error 1439 * should be set and g_raid3_start() should not be called at all. 1440 */ 1441 KASSERT(sc != NULL && (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 1442 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE), 1443 ("Provider's error should be set (error=%d)(device=%s).", 1444 bp->bio_to->error, bp->bio_to->name)); 1445 G_RAID3_LOGREQ(3, bp, "Request received."); 1446 1447 switch (bp->bio_cmd) { 1448 case BIO_READ: 1449 case BIO_WRITE: 1450 case BIO_DELETE: 1451 break; 1452 case BIO_SPEEDUP: 1453 case BIO_FLUSH: 1454 g_raid3_flush(sc, bp); 1455 return; 1456 case BIO_GETATTR: 1457 default: 1458 g_io_deliver(bp, EOPNOTSUPP); 1459 return; 1460 } 1461 mtx_lock(&sc->sc_queue_mtx); 1462 bioq_insert_tail(&sc->sc_queue, bp); 1463 mtx_unlock(&sc->sc_queue_mtx); 1464 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1465 wakeup(sc); 1466 } 1467 1468 /* 1469 * Return TRUE if the given request is colliding with a in-progress 1470 * synchronization request. 1471 */ 1472 static int 1473 g_raid3_sync_collision(struct g_raid3_softc *sc, struct bio *bp) 1474 { 1475 struct g_raid3_disk *disk; 1476 struct bio *sbp; 1477 off_t rstart, rend, sstart, send; 1478 int i; 1479 1480 disk = sc->sc_syncdisk; 1481 if (disk == NULL) 1482 return (0); 1483 rstart = bp->bio_offset; 1484 rend = bp->bio_offset + bp->bio_length; 1485 for (i = 0; i < g_raid3_syncreqs; i++) { 1486 sbp = disk->d_sync.ds_bios[i]; 1487 if (sbp == NULL) 1488 continue; 1489 sstart = sbp->bio_offset; 1490 send = sbp->bio_length; 1491 if (sbp->bio_cmd == BIO_WRITE) { 1492 sstart *= sc->sc_ndisks - 1; 1493 send *= sc->sc_ndisks - 1; 1494 } 1495 send += sstart; 1496 if (rend > sstart && rstart < send) 1497 return (1); 1498 } 1499 return (0); 1500 } 1501 1502 /* 1503 * Return TRUE if the given sync request is colliding with a in-progress regular 1504 * request. 1505 */ 1506 static int 1507 g_raid3_regular_collision(struct g_raid3_softc *sc, struct bio *sbp) 1508 { 1509 off_t rstart, rend, sstart, send; 1510 struct bio *bp; 1511 1512 if (sc->sc_syncdisk == NULL) 1513 return (0); 1514 sstart = sbp->bio_offset; 1515 send = sstart + sbp->bio_length; 1516 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) { 1517 rstart = bp->bio_offset; 1518 rend = bp->bio_offset + bp->bio_length; 1519 if (rend > sstart && rstart < send) 1520 return (1); 1521 } 1522 return (0); 1523 } 1524 1525 /* 1526 * Puts request onto delayed queue. 1527 */ 1528 static void 1529 g_raid3_regular_delay(struct g_raid3_softc *sc, struct bio *bp) 1530 { 1531 1532 G_RAID3_LOGREQ(2, bp, "Delaying request."); 1533 bioq_insert_head(&sc->sc_regular_delayed, bp); 1534 } 1535 1536 /* 1537 * Puts synchronization request onto delayed queue. 1538 */ 1539 static void 1540 g_raid3_sync_delay(struct g_raid3_softc *sc, struct bio *bp) 1541 { 1542 1543 G_RAID3_LOGREQ(2, bp, "Delaying synchronization request."); 1544 bioq_insert_tail(&sc->sc_sync_delayed, bp); 1545 } 1546 1547 /* 1548 * Releases delayed regular requests which don't collide anymore with sync 1549 * requests. 1550 */ 1551 static void 1552 g_raid3_regular_release(struct g_raid3_softc *sc) 1553 { 1554 struct bio *bp, *bp2; 1555 1556 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) { 1557 if (g_raid3_sync_collision(sc, bp)) 1558 continue; 1559 bioq_remove(&sc->sc_regular_delayed, bp); 1560 G_RAID3_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 1561 mtx_lock(&sc->sc_queue_mtx); 1562 bioq_insert_head(&sc->sc_queue, bp); 1563 #if 0 1564 /* 1565 * wakeup() is not needed, because this function is called from 1566 * the worker thread. 1567 */ 1568 wakeup(&sc->sc_queue); 1569 #endif 1570 mtx_unlock(&sc->sc_queue_mtx); 1571 } 1572 } 1573 1574 /* 1575 * Releases delayed sync requests which don't collide anymore with regular 1576 * requests. 1577 */ 1578 static void 1579 g_raid3_sync_release(struct g_raid3_softc *sc) 1580 { 1581 struct bio *bp, *bp2; 1582 1583 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) { 1584 if (g_raid3_regular_collision(sc, bp)) 1585 continue; 1586 bioq_remove(&sc->sc_sync_delayed, bp); 1587 G_RAID3_LOGREQ(2, bp, 1588 "Releasing delayed synchronization request."); 1589 g_io_request(bp, bp->bio_from); 1590 } 1591 } 1592 1593 /* 1594 * Handle synchronization requests. 1595 * Every synchronization request is two-steps process: first, READ request is 1596 * send to active provider and then WRITE request (with read data) to the provider 1597 * being synchronized. When WRITE is finished, new synchronization request is 1598 * send. 1599 */ 1600 static void 1601 g_raid3_sync_request(struct bio *bp) 1602 { 1603 struct g_raid3_softc *sc; 1604 struct g_raid3_disk *disk; 1605 1606 bp->bio_from->index--; 1607 sc = bp->bio_from->geom->softc; 1608 disk = bp->bio_from->private; 1609 if (disk == NULL) { 1610 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 1611 g_topology_lock(); 1612 g_raid3_kill_consumer(sc, bp->bio_from); 1613 g_topology_unlock(); 1614 free(bp->bio_data, M_RAID3); 1615 g_destroy_bio(bp); 1616 sx_xlock(&sc->sc_lock); 1617 return; 1618 } 1619 1620 /* 1621 * Synchronization request. 1622 */ 1623 switch (bp->bio_cmd) { 1624 case BIO_READ: 1625 { 1626 struct g_consumer *cp; 1627 u_char *dst, *src; 1628 off_t left; 1629 u_int atom; 1630 1631 if (bp->bio_error != 0) { 1632 G_RAID3_LOGREQ(0, bp, 1633 "Synchronization request failed (error=%d).", 1634 bp->bio_error); 1635 g_destroy_bio(bp); 1636 return; 1637 } 1638 G_RAID3_LOGREQ(3, bp, "Synchronization request finished."); 1639 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 1640 dst = src = bp->bio_data; 1641 if (disk->d_no == sc->sc_ndisks - 1) { 1642 u_int n; 1643 1644 /* Parity component. */ 1645 for (left = bp->bio_length; left > 0; 1646 left -= sc->sc_sectorsize) { 1647 bcopy(src, dst, atom); 1648 src += atom; 1649 for (n = 1; n < sc->sc_ndisks - 1; n++) { 1650 g_raid3_xor(src, dst, atom); 1651 src += atom; 1652 } 1653 dst += atom; 1654 } 1655 } else { 1656 /* Regular component. */ 1657 src += atom * disk->d_no; 1658 for (left = bp->bio_length; left > 0; 1659 left -= sc->sc_sectorsize) { 1660 bcopy(src, dst, atom); 1661 src += sc->sc_sectorsize; 1662 dst += atom; 1663 } 1664 } 1665 bp->bio_driver1 = bp->bio_driver2 = NULL; 1666 bp->bio_pflags = 0; 1667 bp->bio_offset /= sc->sc_ndisks - 1; 1668 bp->bio_length /= sc->sc_ndisks - 1; 1669 bp->bio_cmd = BIO_WRITE; 1670 bp->bio_cflags = 0; 1671 bp->bio_children = bp->bio_inbed = 0; 1672 cp = disk->d_consumer; 1673 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1674 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1675 cp->acr, cp->acw, cp->ace)); 1676 cp->index++; 1677 g_io_request(bp, cp); 1678 return; 1679 } 1680 case BIO_WRITE: 1681 { 1682 struct g_raid3_disk_sync *sync; 1683 off_t boffset, moffset; 1684 void *data; 1685 int i; 1686 1687 if (bp->bio_error != 0) { 1688 G_RAID3_LOGREQ(0, bp, 1689 "Synchronization request failed (error=%d).", 1690 bp->bio_error); 1691 g_destroy_bio(bp); 1692 sc->sc_bump_id |= G_RAID3_BUMP_GENID; 1693 g_raid3_event_send(disk, 1694 G_RAID3_DISK_STATE_DISCONNECTED, 1695 G_RAID3_EVENT_DONTWAIT); 1696 return; 1697 } 1698 G_RAID3_LOGREQ(3, bp, "Synchronization request finished."); 1699 sync = &disk->d_sync; 1700 if (sync->ds_offset == sc->sc_mediasize / (sc->sc_ndisks - 1) || 1701 sync->ds_consumer == NULL || 1702 (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 1703 /* Don't send more synchronization requests. */ 1704 sync->ds_inflight--; 1705 if (sync->ds_bios != NULL) { 1706 i = (int)(uintptr_t)bp->bio_caller1; 1707 sync->ds_bios[i] = NULL; 1708 } 1709 free(bp->bio_data, M_RAID3); 1710 g_destroy_bio(bp); 1711 if (sync->ds_inflight > 0) 1712 return; 1713 if (sync->ds_consumer == NULL || 1714 (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 1715 return; 1716 } 1717 /* 1718 * Disk up-to-date, activate it. 1719 */ 1720 g_raid3_event_send(disk, G_RAID3_DISK_STATE_ACTIVE, 1721 G_RAID3_EVENT_DONTWAIT); 1722 return; 1723 } 1724 1725 /* Send next synchronization request. */ 1726 data = bp->bio_data; 1727 g_reset_bio(bp); 1728 bp->bio_cmd = BIO_READ; 1729 bp->bio_offset = sync->ds_offset * (sc->sc_ndisks - 1); 1730 bp->bio_length = MIN(maxphys, sc->sc_mediasize - bp->bio_offset); 1731 sync->ds_offset += bp->bio_length / (sc->sc_ndisks - 1); 1732 bp->bio_done = g_raid3_sync_done; 1733 bp->bio_data = data; 1734 bp->bio_from = sync->ds_consumer; 1735 bp->bio_to = sc->sc_provider; 1736 G_RAID3_LOGREQ(3, bp, "Sending synchronization request."); 1737 sync->ds_consumer->index++; 1738 /* 1739 * Delay the request if it is colliding with a regular request. 1740 */ 1741 if (g_raid3_regular_collision(sc, bp)) 1742 g_raid3_sync_delay(sc, bp); 1743 else 1744 g_io_request(bp, sync->ds_consumer); 1745 1746 /* Release delayed requests if possible. */ 1747 g_raid3_regular_release(sc); 1748 1749 /* Find the smallest offset. */ 1750 moffset = sc->sc_mediasize; 1751 for (i = 0; i < g_raid3_syncreqs; i++) { 1752 bp = sync->ds_bios[i]; 1753 boffset = bp->bio_offset; 1754 if (bp->bio_cmd == BIO_WRITE) 1755 boffset *= sc->sc_ndisks - 1; 1756 if (boffset < moffset) 1757 moffset = boffset; 1758 } 1759 if (sync->ds_offset_done + maxphys * 100 < moffset) { 1760 /* Update offset_done on every 100 blocks. */ 1761 sync->ds_offset_done = moffset; 1762 g_raid3_update_metadata(disk); 1763 } 1764 return; 1765 } 1766 default: 1767 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1768 bp->bio_cmd, sc->sc_name)); 1769 break; 1770 } 1771 } 1772 1773 static int 1774 g_raid3_register_request(struct bio *pbp) 1775 { 1776 struct g_raid3_softc *sc; 1777 struct g_raid3_disk *disk; 1778 struct g_consumer *cp; 1779 struct bio *cbp, *tmpbp; 1780 off_t offset, length; 1781 u_int n, ndisks; 1782 int round_robin, verify; 1783 1784 ndisks = 0; 1785 sc = pbp->bio_to->geom->softc; 1786 if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGSYNC) != 0 && 1787 sc->sc_syncdisk == NULL) { 1788 g_io_deliver(pbp, EIO); 1789 return (0); 1790 } 1791 g_raid3_init_bio(pbp); 1792 length = pbp->bio_length / (sc->sc_ndisks - 1); 1793 offset = pbp->bio_offset / (sc->sc_ndisks - 1); 1794 round_robin = verify = 0; 1795 switch (pbp->bio_cmd) { 1796 case BIO_READ: 1797 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 && 1798 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 1799 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_VERIFY; 1800 verify = 1; 1801 ndisks = sc->sc_ndisks; 1802 } else { 1803 verify = 0; 1804 ndisks = sc->sc_ndisks - 1; 1805 } 1806 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0 && 1807 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 1808 round_robin = 1; 1809 } else { 1810 round_robin = 0; 1811 } 1812 KASSERT(!round_robin || !verify, 1813 ("ROUND-ROBIN and VERIFY are mutually exclusive.")); 1814 pbp->bio_driver2 = &sc->sc_disks[sc->sc_ndisks - 1]; 1815 break; 1816 case BIO_WRITE: 1817 case BIO_DELETE: 1818 /* 1819 * Delay the request if it is colliding with a synchronization 1820 * request. 1821 */ 1822 if (g_raid3_sync_collision(sc, pbp)) { 1823 g_raid3_regular_delay(sc, pbp); 1824 return (0); 1825 } 1826 1827 if (sc->sc_idle) 1828 g_raid3_unidle(sc); 1829 else 1830 sc->sc_last_write = time_uptime; 1831 1832 ndisks = sc->sc_ndisks; 1833 break; 1834 } 1835 for (n = 0; n < ndisks; n++) { 1836 disk = &sc->sc_disks[n]; 1837 cbp = g_raid3_clone_bio(sc, pbp); 1838 if (cbp == NULL) { 1839 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) 1840 g_raid3_destroy_bio(sc, cbp); 1841 /* 1842 * To prevent deadlock, we must run back up 1843 * with the ENOMEM for failed requests of any 1844 * of our consumers. Our own sync requests 1845 * can stick around, as they are finite. 1846 */ 1847 if ((pbp->bio_cflags & 1848 G_RAID3_BIO_CFLAG_REGULAR) != 0) { 1849 g_io_deliver(pbp, ENOMEM); 1850 return (0); 1851 } 1852 return (ENOMEM); 1853 } 1854 cbp->bio_offset = offset; 1855 cbp->bio_length = length; 1856 cbp->bio_done = g_raid3_done; 1857 switch (pbp->bio_cmd) { 1858 case BIO_READ: 1859 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) { 1860 /* 1861 * Replace invalid component with the parity 1862 * component. 1863 */ 1864 disk = &sc->sc_disks[sc->sc_ndisks - 1]; 1865 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 1866 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 1867 } else if (round_robin && 1868 disk->d_no == sc->sc_round_robin) { 1869 /* 1870 * In round-robin mode skip one data component 1871 * and use parity component when reading. 1872 */ 1873 pbp->bio_driver2 = disk; 1874 disk = &sc->sc_disks[sc->sc_ndisks - 1]; 1875 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 1876 sc->sc_round_robin++; 1877 round_robin = 0; 1878 } else if (verify && disk->d_no == sc->sc_ndisks - 1) { 1879 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 1880 } 1881 break; 1882 case BIO_WRITE: 1883 case BIO_DELETE: 1884 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 1885 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 1886 if (n == ndisks - 1) { 1887 /* 1888 * Active parity component, mark it as such. 1889 */ 1890 cbp->bio_cflags |= 1891 G_RAID3_BIO_CFLAG_PARITY; 1892 } 1893 } else { 1894 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 1895 if (n == ndisks - 1) { 1896 /* 1897 * Parity component is not connected, 1898 * so destroy its request. 1899 */ 1900 pbp->bio_pflags |= 1901 G_RAID3_BIO_PFLAG_NOPARITY; 1902 g_raid3_destroy_bio(sc, cbp); 1903 cbp = NULL; 1904 } else { 1905 cbp->bio_cflags |= 1906 G_RAID3_BIO_CFLAG_NODISK; 1907 disk = NULL; 1908 } 1909 } 1910 break; 1911 } 1912 if (cbp != NULL) 1913 cbp->bio_caller2 = disk; 1914 } 1915 switch (pbp->bio_cmd) { 1916 case BIO_READ: 1917 if (round_robin) { 1918 /* 1919 * If we are in round-robin mode and 'round_robin' is 1920 * still 1, it means, that we skipped parity component 1921 * for this read and must reset sc_round_robin field. 1922 */ 1923 sc->sc_round_robin = 0; 1924 } 1925 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 1926 disk = cbp->bio_caller2; 1927 cp = disk->d_consumer; 1928 cbp->bio_to = cp->provider; 1929 G_RAID3_LOGREQ(3, cbp, "Sending request."); 1930 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1931 ("Consumer %s not opened (r%dw%de%d).", 1932 cp->provider->name, cp->acr, cp->acw, cp->ace)); 1933 cp->index++; 1934 g_io_request(cbp, cp); 1935 } 1936 break; 1937 case BIO_WRITE: 1938 case BIO_DELETE: 1939 /* 1940 * Put request onto inflight queue, so we can check if new 1941 * synchronization requests don't collide with it. 1942 */ 1943 bioq_insert_tail(&sc->sc_inflight, pbp); 1944 1945 /* 1946 * Bump syncid on first write. 1947 */ 1948 if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) { 1949 sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID; 1950 g_raid3_bump_syncid(sc); 1951 } 1952 g_raid3_scatter(pbp); 1953 break; 1954 } 1955 return (0); 1956 } 1957 1958 static int 1959 g_raid3_can_destroy(struct g_raid3_softc *sc) 1960 { 1961 struct g_geom *gp; 1962 struct g_consumer *cp; 1963 1964 g_topology_assert(); 1965 gp = sc->sc_geom; 1966 if (gp->softc == NULL) 1967 return (1); 1968 LIST_FOREACH(cp, &gp->consumer, consumer) { 1969 if (g_raid3_is_busy(sc, cp)) 1970 return (0); 1971 } 1972 gp = sc->sc_sync.ds_geom; 1973 LIST_FOREACH(cp, &gp->consumer, consumer) { 1974 if (g_raid3_is_busy(sc, cp)) 1975 return (0); 1976 } 1977 G_RAID3_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 1978 sc->sc_name); 1979 return (1); 1980 } 1981 1982 static int 1983 g_raid3_try_destroy(struct g_raid3_softc *sc) 1984 { 1985 1986 g_topology_assert_not(); 1987 sx_assert(&sc->sc_lock, SX_XLOCKED); 1988 1989 if (sc->sc_rootmount != NULL) { 1990 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 1991 sc->sc_rootmount); 1992 root_mount_rel(sc->sc_rootmount); 1993 sc->sc_rootmount = NULL; 1994 } 1995 1996 g_topology_lock(); 1997 if (!g_raid3_can_destroy(sc)) { 1998 g_topology_unlock(); 1999 return (0); 2000 } 2001 sc->sc_geom->softc = NULL; 2002 sc->sc_sync.ds_geom->softc = NULL; 2003 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_WAIT) != 0) { 2004 g_topology_unlock(); 2005 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, 2006 &sc->sc_worker); 2007 /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 2008 sx_xunlock(&sc->sc_lock); 2009 wakeup(&sc->sc_worker); 2010 sc->sc_worker = NULL; 2011 } else { 2012 g_topology_unlock(); 2013 g_raid3_destroy_device(sc); 2014 free(sc->sc_disks, M_RAID3); 2015 free(sc, M_RAID3); 2016 } 2017 return (1); 2018 } 2019 2020 /* 2021 * Worker thread. 2022 */ 2023 static void 2024 g_raid3_worker(void *arg) 2025 { 2026 struct g_raid3_softc *sc; 2027 struct g_raid3_event *ep; 2028 struct bio *bp; 2029 int timeout; 2030 2031 sc = arg; 2032 thread_lock(curthread); 2033 sched_prio(curthread, PRIBIO); 2034 thread_unlock(curthread); 2035 2036 sx_xlock(&sc->sc_lock); 2037 for (;;) { 2038 G_RAID3_DEBUG(5, "%s: Let's see...", __func__); 2039 /* 2040 * First take a look at events. 2041 * This is important to handle events before any I/O requests. 2042 */ 2043 ep = g_raid3_event_get(sc); 2044 if (ep != NULL) { 2045 g_raid3_event_remove(sc, ep); 2046 if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) { 2047 /* Update only device status. */ 2048 G_RAID3_DEBUG(3, 2049 "Running event for device %s.", 2050 sc->sc_name); 2051 ep->e_error = 0; 2052 g_raid3_update_device(sc, 1); 2053 } else { 2054 /* Update disk status. */ 2055 G_RAID3_DEBUG(3, "Running event for disk %s.", 2056 g_raid3_get_diskname(ep->e_disk)); 2057 ep->e_error = g_raid3_update_disk(ep->e_disk, 2058 ep->e_state); 2059 if (ep->e_error == 0) 2060 g_raid3_update_device(sc, 0); 2061 } 2062 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) { 2063 KASSERT(ep->e_error == 0, 2064 ("Error cannot be handled.")); 2065 g_raid3_event_free(ep); 2066 } else { 2067 ep->e_flags |= G_RAID3_EVENT_DONE; 2068 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, 2069 ep); 2070 mtx_lock(&sc->sc_events_mtx); 2071 wakeup(ep); 2072 mtx_unlock(&sc->sc_events_mtx); 2073 } 2074 if ((sc->sc_flags & 2075 G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 2076 if (g_raid3_try_destroy(sc)) { 2077 curthread->td_pflags &= ~TDP_GEOM; 2078 G_RAID3_DEBUG(1, "Thread exiting."); 2079 kproc_exit(0); 2080 } 2081 } 2082 G_RAID3_DEBUG(5, "%s: I'm here 1.", __func__); 2083 continue; 2084 } 2085 /* 2086 * Check if we can mark array as CLEAN and if we can't take 2087 * how much seconds should we wait. 2088 */ 2089 timeout = g_raid3_idle(sc, -1); 2090 /* 2091 * Now I/O requests. 2092 */ 2093 /* Get first request from the queue. */ 2094 mtx_lock(&sc->sc_queue_mtx); 2095 bp = bioq_first(&sc->sc_queue); 2096 if (bp == NULL) { 2097 if ((sc->sc_flags & 2098 G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 2099 mtx_unlock(&sc->sc_queue_mtx); 2100 if (g_raid3_try_destroy(sc)) { 2101 curthread->td_pflags &= ~TDP_GEOM; 2102 G_RAID3_DEBUG(1, "Thread exiting."); 2103 kproc_exit(0); 2104 } 2105 mtx_lock(&sc->sc_queue_mtx); 2106 } 2107 sx_xunlock(&sc->sc_lock); 2108 /* 2109 * XXX: We can miss an event here, because an event 2110 * can be added without sx-device-lock and without 2111 * mtx-queue-lock. Maybe I should just stop using 2112 * dedicated mutex for events synchronization and 2113 * stick with the queue lock? 2114 * The event will hang here until next I/O request 2115 * or next event is received. 2116 */ 2117 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "r3:w1", 2118 timeout * hz); 2119 sx_xlock(&sc->sc_lock); 2120 G_RAID3_DEBUG(5, "%s: I'm here 4.", __func__); 2121 continue; 2122 } 2123 process: 2124 bioq_remove(&sc->sc_queue, bp); 2125 mtx_unlock(&sc->sc_queue_mtx); 2126 2127 if (bp->bio_from->geom == sc->sc_sync.ds_geom && 2128 (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) { 2129 g_raid3_sync_request(bp); /* READ */ 2130 } else if (bp->bio_to != sc->sc_provider) { 2131 if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0) 2132 g_raid3_regular_request(bp); 2133 else if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) 2134 g_raid3_sync_request(bp); /* WRITE */ 2135 else { 2136 KASSERT(0, 2137 ("Invalid request cflags=0x%hx to=%s.", 2138 bp->bio_cflags, bp->bio_to->name)); 2139 } 2140 } else if (g_raid3_register_request(bp) != 0) { 2141 mtx_lock(&sc->sc_queue_mtx); 2142 bioq_insert_head(&sc->sc_queue, bp); 2143 /* 2144 * We are short in memory, let see if there are finished 2145 * request we can free. 2146 */ 2147 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 2148 if (bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) 2149 goto process; 2150 } 2151 /* 2152 * No finished regular request, so at least keep 2153 * synchronization running. 2154 */ 2155 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 2156 if (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) 2157 goto process; 2158 } 2159 sx_xunlock(&sc->sc_lock); 2160 MSLEEP(&sc->sc_queue, &sc->sc_queue_mtx, PRIBIO | PDROP, 2161 "r3:lowmem", hz / 10); 2162 sx_xlock(&sc->sc_lock); 2163 } 2164 G_RAID3_DEBUG(5, "%s: I'm here 9.", __func__); 2165 } 2166 } 2167 2168 static void 2169 g_raid3_update_idle(struct g_raid3_softc *sc, struct g_raid3_disk *disk) 2170 { 2171 2172 sx_assert(&sc->sc_lock, SX_LOCKED); 2173 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 2174 return; 2175 if (!sc->sc_idle && (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) == 0) { 2176 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.", 2177 g_raid3_get_diskname(disk), sc->sc_name); 2178 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 2179 } else if (sc->sc_idle && 2180 (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) { 2181 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.", 2182 g_raid3_get_diskname(disk), sc->sc_name); 2183 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 2184 } 2185 } 2186 2187 static void 2188 g_raid3_sync_start(struct g_raid3_softc *sc) 2189 { 2190 struct g_raid3_disk *disk; 2191 struct g_consumer *cp; 2192 struct bio *bp; 2193 int error __diagused; 2194 u_int n; 2195 2196 g_topology_assert_not(); 2197 sx_assert(&sc->sc_lock, SX_XLOCKED); 2198 2199 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED, 2200 ("Device not in DEGRADED state (%s, %u).", sc->sc_name, 2201 sc->sc_state)); 2202 KASSERT(sc->sc_syncdisk == NULL, ("Syncdisk is not NULL (%s, %u).", 2203 sc->sc_name, sc->sc_state)); 2204 disk = NULL; 2205 for (n = 0; n < sc->sc_ndisks; n++) { 2206 if (sc->sc_disks[n].d_state != G_RAID3_DISK_STATE_SYNCHRONIZING) 2207 continue; 2208 disk = &sc->sc_disks[n]; 2209 break; 2210 } 2211 if (disk == NULL) 2212 return; 2213 2214 sx_xunlock(&sc->sc_lock); 2215 g_topology_lock(); 2216 cp = g_new_consumer(sc->sc_sync.ds_geom); 2217 error = g_attach(cp, sc->sc_provider); 2218 KASSERT(error == 0, 2219 ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 2220 error = g_access(cp, 1, 0, 0); 2221 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 2222 g_topology_unlock(); 2223 sx_xlock(&sc->sc_lock); 2224 2225 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 2226 g_raid3_get_diskname(disk)); 2227 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) == 0) 2228 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 2229 KASSERT(disk->d_sync.ds_consumer == NULL, 2230 ("Sync consumer already exists (device=%s, disk=%s).", 2231 sc->sc_name, g_raid3_get_diskname(disk))); 2232 2233 disk->d_sync.ds_consumer = cp; 2234 disk->d_sync.ds_consumer->private = disk; 2235 disk->d_sync.ds_consumer->index = 0; 2236 sc->sc_syncdisk = disk; 2237 2238 /* 2239 * Allocate memory for synchronization bios and initialize them. 2240 */ 2241 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_raid3_syncreqs, 2242 M_RAID3, M_WAITOK); 2243 for (n = 0; n < g_raid3_syncreqs; n++) { 2244 bp = g_alloc_bio(); 2245 disk->d_sync.ds_bios[n] = bp; 2246 bp->bio_parent = NULL; 2247 bp->bio_cmd = BIO_READ; 2248 bp->bio_data = malloc(maxphys, M_RAID3, M_WAITOK); 2249 bp->bio_cflags = 0; 2250 bp->bio_offset = disk->d_sync.ds_offset * (sc->sc_ndisks - 1); 2251 bp->bio_length = MIN(maxphys, sc->sc_mediasize - bp->bio_offset); 2252 disk->d_sync.ds_offset += bp->bio_length / (sc->sc_ndisks - 1); 2253 bp->bio_done = g_raid3_sync_done; 2254 bp->bio_from = disk->d_sync.ds_consumer; 2255 bp->bio_to = sc->sc_provider; 2256 bp->bio_caller1 = (void *)(uintptr_t)n; 2257 } 2258 2259 /* Set the number of in-flight synchronization requests. */ 2260 disk->d_sync.ds_inflight = g_raid3_syncreqs; 2261 2262 /* 2263 * Fire off first synchronization requests. 2264 */ 2265 for (n = 0; n < g_raid3_syncreqs; n++) { 2266 bp = disk->d_sync.ds_bios[n]; 2267 G_RAID3_LOGREQ(3, bp, "Sending synchronization request."); 2268 disk->d_sync.ds_consumer->index++; 2269 /* 2270 * Delay the request if it is colliding with a regular request. 2271 */ 2272 if (g_raid3_regular_collision(sc, bp)) 2273 g_raid3_sync_delay(sc, bp); 2274 else 2275 g_io_request(bp, disk->d_sync.ds_consumer); 2276 } 2277 } 2278 2279 /* 2280 * Stop synchronization process. 2281 * type: 0 - synchronization finished 2282 * 1 - synchronization stopped 2283 */ 2284 static void 2285 g_raid3_sync_stop(struct g_raid3_softc *sc, int type) 2286 { 2287 struct g_raid3_disk *disk; 2288 struct g_consumer *cp; 2289 2290 g_topology_assert_not(); 2291 sx_assert(&sc->sc_lock, SX_LOCKED); 2292 2293 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED, 2294 ("Device not in DEGRADED state (%s, %u).", sc->sc_name, 2295 sc->sc_state)); 2296 disk = sc->sc_syncdisk; 2297 sc->sc_syncdisk = NULL; 2298 KASSERT(disk != NULL, ("No disk was synchronized (%s).", sc->sc_name)); 2299 KASSERT(disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 2300 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 2301 g_raid3_disk_state2str(disk->d_state))); 2302 if (disk->d_sync.ds_consumer == NULL) 2303 return; 2304 2305 if (type == 0) { 2306 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s finished.", 2307 sc->sc_name, g_raid3_get_diskname(disk)); 2308 } else /* if (type == 1) */ { 2309 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 2310 sc->sc_name, g_raid3_get_diskname(disk)); 2311 } 2312 free(disk->d_sync.ds_bios, M_RAID3); 2313 disk->d_sync.ds_bios = NULL; 2314 cp = disk->d_sync.ds_consumer; 2315 disk->d_sync.ds_consumer = NULL; 2316 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 2317 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 2318 g_topology_lock(); 2319 g_raid3_kill_consumer(sc, cp); 2320 g_topology_unlock(); 2321 sx_xlock(&sc->sc_lock); 2322 } 2323 2324 static void 2325 g_raid3_launch_provider(struct g_raid3_softc *sc) 2326 { 2327 struct g_provider *pp; 2328 struct g_raid3_disk *disk; 2329 int n; 2330 2331 sx_assert(&sc->sc_lock, SX_LOCKED); 2332 2333 g_topology_lock(); 2334 pp = g_new_providerf(sc->sc_geom, "raid3/%s", sc->sc_name); 2335 pp->mediasize = sc->sc_mediasize; 2336 pp->sectorsize = sc->sc_sectorsize; 2337 pp->stripesize = 0; 2338 pp->stripeoffset = 0; 2339 for (n = 0; n < sc->sc_ndisks; n++) { 2340 disk = &sc->sc_disks[n]; 2341 if (disk->d_consumer && disk->d_consumer->provider && 2342 disk->d_consumer->provider->stripesize > pp->stripesize) { 2343 pp->stripesize = disk->d_consumer->provider->stripesize; 2344 pp->stripeoffset = disk->d_consumer->provider->stripeoffset; 2345 } 2346 } 2347 pp->stripesize *= sc->sc_ndisks - 1; 2348 pp->stripeoffset *= sc->sc_ndisks - 1; 2349 sc->sc_provider = pp; 2350 g_error_provider(pp, 0); 2351 g_topology_unlock(); 2352 G_RAID3_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 2353 g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE), sc->sc_ndisks); 2354 2355 if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED) 2356 g_raid3_sync_start(sc); 2357 } 2358 2359 static void 2360 g_raid3_destroy_provider(struct g_raid3_softc *sc) 2361 { 2362 struct bio *bp; 2363 2364 g_topology_assert_not(); 2365 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 2366 sc->sc_name)); 2367 2368 g_topology_lock(); 2369 g_error_provider(sc->sc_provider, ENXIO); 2370 mtx_lock(&sc->sc_queue_mtx); 2371 while ((bp = bioq_first(&sc->sc_queue)) != NULL) { 2372 bioq_remove(&sc->sc_queue, bp); 2373 g_io_deliver(bp, ENXIO); 2374 } 2375 mtx_unlock(&sc->sc_queue_mtx); 2376 G_RAID3_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name, 2377 sc->sc_provider->name); 2378 g_wither_provider(sc->sc_provider, ENXIO); 2379 g_topology_unlock(); 2380 sc->sc_provider = NULL; 2381 if (sc->sc_syncdisk != NULL) 2382 g_raid3_sync_stop(sc, 1); 2383 } 2384 2385 static void 2386 g_raid3_go(void *arg) 2387 { 2388 struct g_raid3_softc *sc; 2389 struct g_raid3_event *ep; 2390 2391 sc = arg; 2392 G_RAID3_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 2393 ep = sc->sc_timeout_event; 2394 sc->sc_timeout_event = NULL; 2395 g_raid3_event_dispatch(ep, sc, 0, 2396 G_RAID3_EVENT_DONTWAIT | G_RAID3_EVENT_DEVICE); 2397 } 2398 2399 static void 2400 g_raid3_timeout_drain(struct g_raid3_softc *sc) 2401 { 2402 sx_assert(&sc->sc_lock, SX_XLOCKED); 2403 2404 callout_drain(&sc->sc_callout); 2405 g_raid3_event_free(sc->sc_timeout_event); 2406 sc->sc_timeout_event = NULL; 2407 } 2408 2409 static u_int 2410 g_raid3_determine_state(struct g_raid3_disk *disk) 2411 { 2412 struct g_raid3_softc *sc; 2413 u_int state; 2414 2415 sc = disk->d_softc; 2416 if (sc->sc_syncid == disk->d_sync.ds_syncid) { 2417 if ((disk->d_flags & 2418 G_RAID3_DISK_FLAG_SYNCHRONIZING) == 0) { 2419 /* Disk does not need synchronization. */ 2420 state = G_RAID3_DISK_STATE_ACTIVE; 2421 } else { 2422 if ((sc->sc_flags & 2423 G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2424 (disk->d_flags & 2425 G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) { 2426 /* 2427 * We can start synchronization from 2428 * the stored offset. 2429 */ 2430 state = G_RAID3_DISK_STATE_SYNCHRONIZING; 2431 } else { 2432 state = G_RAID3_DISK_STATE_STALE; 2433 } 2434 } 2435 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 2436 /* 2437 * Reset all synchronization data for this disk, 2438 * because if it even was synchronized, it was 2439 * synchronized to disks with different syncid. 2440 */ 2441 disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING; 2442 disk->d_sync.ds_offset = 0; 2443 disk->d_sync.ds_offset_done = 0; 2444 disk->d_sync.ds_syncid = sc->sc_syncid; 2445 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2446 (disk->d_flags & G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) { 2447 state = G_RAID3_DISK_STATE_SYNCHRONIZING; 2448 } else { 2449 state = G_RAID3_DISK_STATE_STALE; 2450 } 2451 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 2452 /* 2453 * Not good, NOT GOOD! 2454 * It means that device was started on stale disks 2455 * and more fresh disk just arrive. 2456 * If there were writes, device is broken, sorry. 2457 * I think the best choice here is don't touch 2458 * this disk and inform the user loudly. 2459 */ 2460 G_RAID3_DEBUG(0, "Device %s was started before the freshest " 2461 "disk (%s) arrives!! It will not be connected to the " 2462 "running device.", sc->sc_name, 2463 g_raid3_get_diskname(disk)); 2464 g_raid3_destroy_disk(disk); 2465 state = G_RAID3_DISK_STATE_NONE; 2466 /* Return immediately, because disk was destroyed. */ 2467 return (state); 2468 } 2469 G_RAID3_DEBUG(3, "State for %s disk: %s.", 2470 g_raid3_get_diskname(disk), g_raid3_disk_state2str(state)); 2471 return (state); 2472 } 2473 2474 /* 2475 * Update device state. 2476 */ 2477 static void 2478 g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force) 2479 { 2480 struct g_raid3_disk *disk; 2481 u_int state; 2482 2483 sx_assert(&sc->sc_lock, SX_XLOCKED); 2484 2485 switch (sc->sc_state) { 2486 case G_RAID3_DEVICE_STATE_STARTING: 2487 { 2488 u_int n, ndirty, ndisks, genid, syncid; 2489 2490 KASSERT(sc->sc_provider == NULL, 2491 ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 2492 /* 2493 * Are we ready? We are, if all disks are connected or 2494 * one disk is missing and 'force' is true. 2495 */ 2496 if (g_raid3_ndisks(sc, -1) + force == sc->sc_ndisks) { 2497 if (!force) 2498 g_raid3_timeout_drain(sc); 2499 } else { 2500 if (force) { 2501 /* 2502 * Timeout expired, so destroy device. 2503 */ 2504 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 2505 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", 2506 __LINE__, sc->sc_rootmount); 2507 root_mount_rel(sc->sc_rootmount); 2508 sc->sc_rootmount = NULL; 2509 } 2510 return; 2511 } 2512 2513 /* 2514 * Find the biggest genid. 2515 */ 2516 genid = 0; 2517 for (n = 0; n < sc->sc_ndisks; n++) { 2518 disk = &sc->sc_disks[n]; 2519 if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 2520 continue; 2521 if (disk->d_genid > genid) 2522 genid = disk->d_genid; 2523 } 2524 sc->sc_genid = genid; 2525 /* 2526 * Remove all disks without the biggest genid. 2527 */ 2528 for (n = 0; n < sc->sc_ndisks; n++) { 2529 disk = &sc->sc_disks[n]; 2530 if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 2531 continue; 2532 if (disk->d_genid < genid) { 2533 G_RAID3_DEBUG(0, 2534 "Component %s (device %s) broken, skipping.", 2535 g_raid3_get_diskname(disk), sc->sc_name); 2536 g_raid3_destroy_disk(disk); 2537 } 2538 } 2539 2540 /* 2541 * There must be at least 'sc->sc_ndisks - 1' components 2542 * with the same syncid and without SYNCHRONIZING flag. 2543 */ 2544 2545 /* 2546 * Find the biggest syncid, number of valid components and 2547 * number of dirty components. 2548 */ 2549 ndirty = ndisks = syncid = 0; 2550 for (n = 0; n < sc->sc_ndisks; n++) { 2551 disk = &sc->sc_disks[n]; 2552 if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 2553 continue; 2554 if ((disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) 2555 ndirty++; 2556 if (disk->d_sync.ds_syncid > syncid) { 2557 syncid = disk->d_sync.ds_syncid; 2558 ndisks = 0; 2559 } else if (disk->d_sync.ds_syncid < syncid) { 2560 continue; 2561 } 2562 if ((disk->d_flags & 2563 G_RAID3_DISK_FLAG_SYNCHRONIZING) != 0) { 2564 continue; 2565 } 2566 ndisks++; 2567 } 2568 /* 2569 * Do we have enough valid components? 2570 */ 2571 if (ndisks + 1 < sc->sc_ndisks) { 2572 G_RAID3_DEBUG(0, 2573 "Device %s is broken, too few valid components.", 2574 sc->sc_name); 2575 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 2576 return; 2577 } 2578 /* 2579 * If there is one DIRTY component and all disks are present, 2580 * mark it for synchronization. If there is more than one DIRTY 2581 * component, mark parity component for synchronization. 2582 */ 2583 if (ndisks == sc->sc_ndisks && ndirty == 1) { 2584 for (n = 0; n < sc->sc_ndisks; n++) { 2585 disk = &sc->sc_disks[n]; 2586 if ((disk->d_flags & 2587 G_RAID3_DISK_FLAG_DIRTY) == 0) { 2588 continue; 2589 } 2590 disk->d_flags |= 2591 G_RAID3_DISK_FLAG_SYNCHRONIZING; 2592 } 2593 } else if (ndisks == sc->sc_ndisks && ndirty > 1) { 2594 disk = &sc->sc_disks[sc->sc_ndisks - 1]; 2595 disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING; 2596 } 2597 2598 sc->sc_syncid = syncid; 2599 if (force) { 2600 /* Remember to bump syncid on first write. */ 2601 sc->sc_bump_id |= G_RAID3_BUMP_SYNCID; 2602 } 2603 if (ndisks == sc->sc_ndisks) 2604 state = G_RAID3_DEVICE_STATE_COMPLETE; 2605 else /* if (ndisks == sc->sc_ndisks - 1) */ 2606 state = G_RAID3_DEVICE_STATE_DEGRADED; 2607 G_RAID3_DEBUG(1, "Device %s state changed from %s to %s.", 2608 sc->sc_name, g_raid3_device_state2str(sc->sc_state), 2609 g_raid3_device_state2str(state)); 2610 sc->sc_state = state; 2611 for (n = 0; n < sc->sc_ndisks; n++) { 2612 disk = &sc->sc_disks[n]; 2613 if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 2614 continue; 2615 state = g_raid3_determine_state(disk); 2616 g_raid3_event_send(disk, state, G_RAID3_EVENT_DONTWAIT); 2617 if (state == G_RAID3_DISK_STATE_STALE) 2618 sc->sc_bump_id |= G_RAID3_BUMP_SYNCID; 2619 } 2620 break; 2621 } 2622 case G_RAID3_DEVICE_STATE_DEGRADED: 2623 /* 2624 * Genid need to be bumped immediately, so do it here. 2625 */ 2626 if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) { 2627 sc->sc_bump_id &= ~G_RAID3_BUMP_GENID; 2628 g_raid3_bump_genid(sc); 2629 } 2630 2631 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0) 2632 return; 2633 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < 2634 sc->sc_ndisks - 1) { 2635 if (sc->sc_provider != NULL) 2636 g_raid3_destroy_provider(sc); 2637 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 2638 return; 2639 } 2640 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) == 2641 sc->sc_ndisks) { 2642 state = G_RAID3_DEVICE_STATE_COMPLETE; 2643 G_RAID3_DEBUG(1, 2644 "Device %s state changed from %s to %s.", 2645 sc->sc_name, g_raid3_device_state2str(sc->sc_state), 2646 g_raid3_device_state2str(state)); 2647 sc->sc_state = state; 2648 } 2649 if (sc->sc_provider == NULL) 2650 g_raid3_launch_provider(sc); 2651 if (sc->sc_rootmount != NULL) { 2652 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2653 sc->sc_rootmount); 2654 root_mount_rel(sc->sc_rootmount); 2655 sc->sc_rootmount = NULL; 2656 } 2657 break; 2658 case G_RAID3_DEVICE_STATE_COMPLETE: 2659 /* 2660 * Genid need to be bumped immediately, so do it here. 2661 */ 2662 if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) { 2663 sc->sc_bump_id &= ~G_RAID3_BUMP_GENID; 2664 g_raid3_bump_genid(sc); 2665 } 2666 2667 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0) 2668 return; 2669 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) >= 2670 sc->sc_ndisks - 1, 2671 ("Too few ACTIVE components in COMPLETE state (device %s).", 2672 sc->sc_name)); 2673 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) == 2674 sc->sc_ndisks - 1) { 2675 state = G_RAID3_DEVICE_STATE_DEGRADED; 2676 G_RAID3_DEBUG(1, 2677 "Device %s state changed from %s to %s.", 2678 sc->sc_name, g_raid3_device_state2str(sc->sc_state), 2679 g_raid3_device_state2str(state)); 2680 sc->sc_state = state; 2681 } 2682 if (sc->sc_provider == NULL) 2683 g_raid3_launch_provider(sc); 2684 if (sc->sc_rootmount != NULL) { 2685 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2686 sc->sc_rootmount); 2687 root_mount_rel(sc->sc_rootmount); 2688 sc->sc_rootmount = NULL; 2689 } 2690 break; 2691 default: 2692 KASSERT(1 == 0, ("Wrong device state (%s, %s).", sc->sc_name, 2693 g_raid3_device_state2str(sc->sc_state))); 2694 break; 2695 } 2696 } 2697 2698 /* 2699 * Update disk state and device state if needed. 2700 */ 2701 #define DISK_STATE_CHANGED() G_RAID3_DEBUG(1, \ 2702 "Disk %s state changed from %s to %s (device %s).", \ 2703 g_raid3_get_diskname(disk), \ 2704 g_raid3_disk_state2str(disk->d_state), \ 2705 g_raid3_disk_state2str(state), sc->sc_name) 2706 static int 2707 g_raid3_update_disk(struct g_raid3_disk *disk, u_int state) 2708 { 2709 struct g_raid3_softc *sc; 2710 2711 sc = disk->d_softc; 2712 sx_assert(&sc->sc_lock, SX_XLOCKED); 2713 2714 again: 2715 G_RAID3_DEBUG(3, "Changing disk %s state from %s to %s.", 2716 g_raid3_get_diskname(disk), g_raid3_disk_state2str(disk->d_state), 2717 g_raid3_disk_state2str(state)); 2718 switch (state) { 2719 case G_RAID3_DISK_STATE_NEW: 2720 /* 2721 * Possible scenarios: 2722 * 1. New disk arrive. 2723 */ 2724 /* Previous state should be NONE. */ 2725 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NONE, 2726 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 2727 g_raid3_disk_state2str(disk->d_state))); 2728 DISK_STATE_CHANGED(); 2729 2730 disk->d_state = state; 2731 G_RAID3_DEBUG(1, "Device %s: provider %s detected.", 2732 sc->sc_name, g_raid3_get_diskname(disk)); 2733 if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) 2734 break; 2735 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 2736 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 2737 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2738 g_raid3_device_state2str(sc->sc_state), 2739 g_raid3_get_diskname(disk), 2740 g_raid3_disk_state2str(disk->d_state))); 2741 state = g_raid3_determine_state(disk); 2742 if (state != G_RAID3_DISK_STATE_NONE) 2743 goto again; 2744 break; 2745 case G_RAID3_DISK_STATE_ACTIVE: 2746 /* 2747 * Possible scenarios: 2748 * 1. New disk does not need synchronization. 2749 * 2. Synchronization process finished successfully. 2750 */ 2751 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 2752 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 2753 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2754 g_raid3_device_state2str(sc->sc_state), 2755 g_raid3_get_diskname(disk), 2756 g_raid3_disk_state2str(disk->d_state))); 2757 /* Previous state should be NEW or SYNCHRONIZING. */ 2758 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW || 2759 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 2760 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 2761 g_raid3_disk_state2str(disk->d_state))); 2762 DISK_STATE_CHANGED(); 2763 2764 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 2765 disk->d_flags &= ~G_RAID3_DISK_FLAG_SYNCHRONIZING; 2766 disk->d_flags &= ~G_RAID3_DISK_FLAG_FORCE_SYNC; 2767 g_raid3_sync_stop(sc, 0); 2768 } 2769 disk->d_state = state; 2770 disk->d_sync.ds_offset = 0; 2771 disk->d_sync.ds_offset_done = 0; 2772 g_raid3_update_idle(sc, disk); 2773 g_raid3_update_metadata(disk); 2774 G_RAID3_DEBUG(1, "Device %s: provider %s activated.", 2775 sc->sc_name, g_raid3_get_diskname(disk)); 2776 break; 2777 case G_RAID3_DISK_STATE_STALE: 2778 /* 2779 * Possible scenarios: 2780 * 1. Stale disk was connected. 2781 */ 2782 /* Previous state should be NEW. */ 2783 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 2784 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 2785 g_raid3_disk_state2str(disk->d_state))); 2786 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 2787 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 2788 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2789 g_raid3_device_state2str(sc->sc_state), 2790 g_raid3_get_diskname(disk), 2791 g_raid3_disk_state2str(disk->d_state))); 2792 /* 2793 * STALE state is only possible if device is marked 2794 * NOAUTOSYNC. 2795 */ 2796 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) != 0, 2797 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2798 g_raid3_device_state2str(sc->sc_state), 2799 g_raid3_get_diskname(disk), 2800 g_raid3_disk_state2str(disk->d_state))); 2801 DISK_STATE_CHANGED(); 2802 2803 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 2804 disk->d_state = state; 2805 g_raid3_update_metadata(disk); 2806 G_RAID3_DEBUG(0, "Device %s: provider %s is stale.", 2807 sc->sc_name, g_raid3_get_diskname(disk)); 2808 break; 2809 case G_RAID3_DISK_STATE_SYNCHRONIZING: 2810 /* 2811 * Possible scenarios: 2812 * 1. Disk which needs synchronization was connected. 2813 */ 2814 /* Previous state should be NEW. */ 2815 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 2816 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 2817 g_raid3_disk_state2str(disk->d_state))); 2818 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 2819 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 2820 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2821 g_raid3_device_state2str(sc->sc_state), 2822 g_raid3_get_diskname(disk), 2823 g_raid3_disk_state2str(disk->d_state))); 2824 DISK_STATE_CHANGED(); 2825 2826 if (disk->d_state == G_RAID3_DISK_STATE_NEW) 2827 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 2828 disk->d_state = state; 2829 if (sc->sc_provider != NULL) { 2830 g_raid3_sync_start(sc); 2831 g_raid3_update_metadata(disk); 2832 } 2833 break; 2834 case G_RAID3_DISK_STATE_DISCONNECTED: 2835 /* 2836 * Possible scenarios: 2837 * 1. Device wasn't running yet, but disk disappear. 2838 * 2. Disk was active and disapppear. 2839 * 3. Disk disappear during synchronization process. 2840 */ 2841 if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 2842 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 2843 /* 2844 * Previous state should be ACTIVE, STALE or 2845 * SYNCHRONIZING. 2846 */ 2847 KASSERT(disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 2848 disk->d_state == G_RAID3_DISK_STATE_STALE || 2849 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 2850 ("Wrong disk state (%s, %s).", 2851 g_raid3_get_diskname(disk), 2852 g_raid3_disk_state2str(disk->d_state))); 2853 } else if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) { 2854 /* Previous state should be NEW. */ 2855 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 2856 ("Wrong disk state (%s, %s).", 2857 g_raid3_get_diskname(disk), 2858 g_raid3_disk_state2str(disk->d_state))); 2859 /* 2860 * Reset bumping syncid if disk disappeared in STARTING 2861 * state. 2862 */ 2863 if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) 2864 sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID; 2865 #ifdef INVARIANTS 2866 } else { 2867 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 2868 sc->sc_name, 2869 g_raid3_device_state2str(sc->sc_state), 2870 g_raid3_get_diskname(disk), 2871 g_raid3_disk_state2str(disk->d_state))); 2872 #endif 2873 } 2874 DISK_STATE_CHANGED(); 2875 G_RAID3_DEBUG(0, "Device %s: provider %s disconnected.", 2876 sc->sc_name, g_raid3_get_diskname(disk)); 2877 2878 g_raid3_destroy_disk(disk); 2879 break; 2880 default: 2881 KASSERT(1 == 0, ("Unknown state (%u).", state)); 2882 break; 2883 } 2884 return (0); 2885 } 2886 #undef DISK_STATE_CHANGED 2887 2888 int 2889 g_raid3_read_metadata(struct g_consumer *cp, struct g_raid3_metadata *md) 2890 { 2891 struct g_provider *pp; 2892 u_char *buf; 2893 int error; 2894 2895 g_topology_assert(); 2896 2897 error = g_access(cp, 1, 0, 0); 2898 if (error != 0) 2899 return (error); 2900 pp = cp->provider; 2901 g_topology_unlock(); 2902 /* Metadata are stored on last sector. */ 2903 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 2904 &error); 2905 g_topology_lock(); 2906 g_access(cp, -1, 0, 0); 2907 if (buf == NULL) { 2908 G_RAID3_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2909 cp->provider->name, error); 2910 return (error); 2911 } 2912 2913 /* Decode metadata. */ 2914 error = raid3_metadata_decode(buf, md); 2915 g_free(buf); 2916 if (strcmp(md->md_magic, G_RAID3_MAGIC) != 0) 2917 return (EINVAL); 2918 if (md->md_version > G_RAID3_VERSION) { 2919 G_RAID3_DEBUG(0, 2920 "Kernel module is too old to handle metadata from %s.", 2921 cp->provider->name); 2922 return (EINVAL); 2923 } 2924 if (error != 0) { 2925 G_RAID3_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 2926 cp->provider->name); 2927 return (error); 2928 } 2929 if (md->md_sectorsize > maxphys) { 2930 G_RAID3_DEBUG(0, "The blocksize is too big."); 2931 return (EINVAL); 2932 } 2933 2934 return (0); 2935 } 2936 2937 static int 2938 g_raid3_check_metadata(struct g_raid3_softc *sc, struct g_provider *pp, 2939 struct g_raid3_metadata *md) 2940 { 2941 2942 if (md->md_no >= sc->sc_ndisks) { 2943 G_RAID3_DEBUG(1, "Invalid disk %s number (no=%u), skipping.", 2944 pp->name, md->md_no); 2945 return (EINVAL); 2946 } 2947 if (sc->sc_disks[md->md_no].d_state != G_RAID3_DISK_STATE_NODISK) { 2948 G_RAID3_DEBUG(1, "Disk %s (no=%u) already exists, skipping.", 2949 pp->name, md->md_no); 2950 return (EEXIST); 2951 } 2952 if (md->md_all != sc->sc_ndisks) { 2953 G_RAID3_DEBUG(1, 2954 "Invalid '%s' field on disk %s (device %s), skipping.", 2955 "md_all", pp->name, sc->sc_name); 2956 return (EINVAL); 2957 } 2958 if ((md->md_mediasize % md->md_sectorsize) != 0) { 2959 G_RAID3_DEBUG(1, "Invalid metadata (mediasize %% sectorsize != " 2960 "0) on disk %s (device %s), skipping.", pp->name, 2961 sc->sc_name); 2962 return (EINVAL); 2963 } 2964 if (md->md_mediasize != sc->sc_mediasize) { 2965 G_RAID3_DEBUG(1, 2966 "Invalid '%s' field on disk %s (device %s), skipping.", 2967 "md_mediasize", pp->name, sc->sc_name); 2968 return (EINVAL); 2969 } 2970 if ((md->md_mediasize % (sc->sc_ndisks - 1)) != 0) { 2971 G_RAID3_DEBUG(1, 2972 "Invalid '%s' field on disk %s (device %s), skipping.", 2973 "md_mediasize", pp->name, sc->sc_name); 2974 return (EINVAL); 2975 } 2976 if ((sc->sc_mediasize / (sc->sc_ndisks - 1)) > pp->mediasize) { 2977 G_RAID3_DEBUG(1, 2978 "Invalid size of disk %s (device %s), skipping.", pp->name, 2979 sc->sc_name); 2980 return (EINVAL); 2981 } 2982 if ((md->md_sectorsize / pp->sectorsize) < sc->sc_ndisks - 1) { 2983 G_RAID3_DEBUG(1, 2984 "Invalid '%s' field on disk %s (device %s), skipping.", 2985 "md_sectorsize", pp->name, sc->sc_name); 2986 return (EINVAL); 2987 } 2988 if (md->md_sectorsize != sc->sc_sectorsize) { 2989 G_RAID3_DEBUG(1, 2990 "Invalid '%s' field on disk %s (device %s), skipping.", 2991 "md_sectorsize", pp->name, sc->sc_name); 2992 return (EINVAL); 2993 } 2994 if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 2995 G_RAID3_DEBUG(1, 2996 "Invalid sector size of disk %s (device %s), skipping.", 2997 pp->name, sc->sc_name); 2998 return (EINVAL); 2999 } 3000 if ((md->md_mflags & ~G_RAID3_DEVICE_FLAG_MASK) != 0) { 3001 G_RAID3_DEBUG(1, 3002 "Invalid device flags on disk %s (device %s), skipping.", 3003 pp->name, sc->sc_name); 3004 return (EINVAL); 3005 } 3006 if ((md->md_mflags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 && 3007 (md->md_mflags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0) { 3008 /* 3009 * VERIFY and ROUND-ROBIN options are mutally exclusive. 3010 */ 3011 G_RAID3_DEBUG(1, "Both VERIFY and ROUND-ROBIN flags exist on " 3012 "disk %s (device %s), skipping.", pp->name, sc->sc_name); 3013 return (EINVAL); 3014 } 3015 if ((md->md_dflags & ~G_RAID3_DISK_FLAG_MASK) != 0) { 3016 G_RAID3_DEBUG(1, 3017 "Invalid disk flags on disk %s (device %s), skipping.", 3018 pp->name, sc->sc_name); 3019 return (EINVAL); 3020 } 3021 return (0); 3022 } 3023 3024 int 3025 g_raid3_add_disk(struct g_raid3_softc *sc, struct g_provider *pp, 3026 struct g_raid3_metadata *md) 3027 { 3028 struct g_raid3_disk *disk; 3029 int error; 3030 3031 g_topology_assert_not(); 3032 G_RAID3_DEBUG(2, "Adding disk %s.", pp->name); 3033 3034 error = g_raid3_check_metadata(sc, pp, md); 3035 if (error != 0) 3036 return (error); 3037 if (sc->sc_state != G_RAID3_DEVICE_STATE_STARTING && 3038 md->md_genid < sc->sc_genid) { 3039 G_RAID3_DEBUG(0, "Component %s (device %s) broken, skipping.", 3040 pp->name, sc->sc_name); 3041 return (EINVAL); 3042 } 3043 disk = g_raid3_init_disk(sc, pp, md, &error); 3044 if (disk == NULL) 3045 return (error); 3046 error = g_raid3_event_send(disk, G_RAID3_DISK_STATE_NEW, 3047 G_RAID3_EVENT_WAIT); 3048 if (error != 0) 3049 return (error); 3050 if (md->md_version < G_RAID3_VERSION) { 3051 G_RAID3_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 3052 pp->name, md->md_version, G_RAID3_VERSION); 3053 g_raid3_update_metadata(disk); 3054 } 3055 return (0); 3056 } 3057 3058 static void 3059 g_raid3_destroy_delayed(void *arg, int flag) 3060 { 3061 struct g_raid3_softc *sc; 3062 int error; 3063 3064 if (flag == EV_CANCEL) { 3065 G_RAID3_DEBUG(1, "Destroying canceled."); 3066 return; 3067 } 3068 sc = arg; 3069 g_topology_unlock(); 3070 sx_xlock(&sc->sc_lock); 3071 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) == 0, 3072 ("DESTROY flag set on %s.", sc->sc_name)); 3073 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0, 3074 ("DESTROYING flag not set on %s.", sc->sc_name)); 3075 G_RAID3_DEBUG(0, "Destroying %s (delayed).", sc->sc_name); 3076 error = g_raid3_destroy(sc, G_RAID3_DESTROY_SOFT); 3077 if (error != 0) { 3078 G_RAID3_DEBUG(0, "Cannot destroy %s.", sc->sc_name); 3079 sx_xunlock(&sc->sc_lock); 3080 } 3081 g_topology_lock(); 3082 } 3083 3084 static int 3085 g_raid3_access(struct g_provider *pp, int acr, int acw, int ace) 3086 { 3087 struct g_raid3_softc *sc; 3088 int dcr, dcw, dce, error = 0; 3089 3090 g_topology_assert(); 3091 G_RAID3_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 3092 acw, ace); 3093 3094 sc = pp->geom->softc; 3095 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0) 3096 return (0); 3097 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 3098 3099 dcr = pp->acr + acr; 3100 dcw = pp->acw + acw; 3101 dce = pp->ace + ace; 3102 3103 g_topology_unlock(); 3104 sx_xlock(&sc->sc_lock); 3105 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0 || 3106 g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < sc->sc_ndisks - 1) { 3107 if (acr > 0 || acw > 0 || ace > 0) 3108 error = ENXIO; 3109 goto end; 3110 } 3111 if (dcw == 0) 3112 g_raid3_idle(sc, dcw); 3113 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0) { 3114 if (acr > 0 || acw > 0 || ace > 0) { 3115 error = ENXIO; 3116 goto end; 3117 } 3118 if (dcr == 0 && dcw == 0 && dce == 0) { 3119 g_post_event(g_raid3_destroy_delayed, sc, M_WAITOK, 3120 sc, NULL); 3121 } 3122 } 3123 end: 3124 sx_xunlock(&sc->sc_lock); 3125 g_topology_lock(); 3126 return (error); 3127 } 3128 3129 static struct g_geom * 3130 g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md) 3131 { 3132 struct g_raid3_softc *sc; 3133 struct g_geom *gp; 3134 int error, timeout; 3135 u_int n; 3136 3137 g_topology_assert(); 3138 G_RAID3_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id); 3139 3140 /* One disk is minimum. */ 3141 if (md->md_all < 1) 3142 return (NULL); 3143 /* 3144 * Action geom. 3145 */ 3146 gp = g_new_geomf(mp, "%s", md->md_name); 3147 sc = malloc(sizeof(*sc), M_RAID3, M_WAITOK | M_ZERO); 3148 sc->sc_disks = malloc(sizeof(struct g_raid3_disk) * md->md_all, M_RAID3, 3149 M_WAITOK | M_ZERO); 3150 gp->start = g_raid3_start; 3151 gp->orphan = g_raid3_orphan; 3152 gp->access = g_raid3_access; 3153 gp->dumpconf = g_raid3_dumpconf; 3154 3155 sc->sc_id = md->md_id; 3156 sc->sc_mediasize = md->md_mediasize; 3157 sc->sc_sectorsize = md->md_sectorsize; 3158 sc->sc_ndisks = md->md_all; 3159 sc->sc_round_robin = 0; 3160 sc->sc_flags = md->md_mflags; 3161 sc->sc_bump_id = 0; 3162 sc->sc_idle = 1; 3163 sc->sc_last_write = time_uptime; 3164 sc->sc_writes = 0; 3165 for (n = 0; n < sc->sc_ndisks; n++) { 3166 sc->sc_disks[n].d_softc = sc; 3167 sc->sc_disks[n].d_no = n; 3168 sc->sc_disks[n].d_state = G_RAID3_DISK_STATE_NODISK; 3169 } 3170 sx_init(&sc->sc_lock, "graid3:lock"); 3171 bioq_init(&sc->sc_queue); 3172 mtx_init(&sc->sc_queue_mtx, "graid3:queue", NULL, MTX_DEF); 3173 bioq_init(&sc->sc_regular_delayed); 3174 bioq_init(&sc->sc_inflight); 3175 bioq_init(&sc->sc_sync_delayed); 3176 TAILQ_INIT(&sc->sc_events); 3177 mtx_init(&sc->sc_events_mtx, "graid3:events", NULL, MTX_DEF); 3178 callout_init(&sc->sc_callout, 1); 3179 sc->sc_state = G_RAID3_DEVICE_STATE_STARTING; 3180 gp->softc = sc; 3181 sc->sc_geom = gp; 3182 sc->sc_provider = NULL; 3183 /* 3184 * Synchronization geom. 3185 */ 3186 gp = g_new_geomf(mp, "%s.sync", md->md_name); 3187 gp->softc = sc; 3188 gp->orphan = g_raid3_orphan; 3189 sc->sc_sync.ds_geom = gp; 3190 3191 if (!g_raid3_use_malloc) { 3192 sc->sc_zones[G_RAID3_ZONE_64K].sz_zone = uma_zcreate("gr3:64k", 3193 65536, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3194 UMA_ALIGN_PTR, 0); 3195 sc->sc_zones[G_RAID3_ZONE_64K].sz_inuse = 0; 3196 sc->sc_zones[G_RAID3_ZONE_64K].sz_max = g_raid3_n64k; 3197 sc->sc_zones[G_RAID3_ZONE_64K].sz_requested = 3198 sc->sc_zones[G_RAID3_ZONE_64K].sz_failed = 0; 3199 sc->sc_zones[G_RAID3_ZONE_16K].sz_zone = uma_zcreate("gr3:16k", 3200 16384, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3201 UMA_ALIGN_PTR, 0); 3202 sc->sc_zones[G_RAID3_ZONE_16K].sz_inuse = 0; 3203 sc->sc_zones[G_RAID3_ZONE_16K].sz_max = g_raid3_n16k; 3204 sc->sc_zones[G_RAID3_ZONE_16K].sz_requested = 3205 sc->sc_zones[G_RAID3_ZONE_16K].sz_failed = 0; 3206 sc->sc_zones[G_RAID3_ZONE_4K].sz_zone = uma_zcreate("gr3:4k", 3207 4096, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3208 UMA_ALIGN_PTR, 0); 3209 sc->sc_zones[G_RAID3_ZONE_4K].sz_inuse = 0; 3210 sc->sc_zones[G_RAID3_ZONE_4K].sz_max = g_raid3_n4k; 3211 sc->sc_zones[G_RAID3_ZONE_4K].sz_requested = 3212 sc->sc_zones[G_RAID3_ZONE_4K].sz_failed = 0; 3213 } 3214 3215 error = kproc_create(g_raid3_worker, sc, &sc->sc_worker, 0, 0, 3216 "g_raid3 %s", md->md_name); 3217 if (error != 0) { 3218 G_RAID3_DEBUG(1, "Cannot create kernel thread for %s.", 3219 sc->sc_name); 3220 if (!g_raid3_use_malloc) { 3221 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone); 3222 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone); 3223 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone); 3224 } 3225 g_destroy_geom(sc->sc_sync.ds_geom); 3226 mtx_destroy(&sc->sc_events_mtx); 3227 mtx_destroy(&sc->sc_queue_mtx); 3228 sx_destroy(&sc->sc_lock); 3229 g_destroy_geom(sc->sc_geom); 3230 free(sc->sc_disks, M_RAID3); 3231 free(sc, M_RAID3); 3232 return (NULL); 3233 } 3234 3235 G_RAID3_DEBUG(1, "Device %s created (%u components, id=%u).", 3236 sc->sc_name, sc->sc_ndisks, sc->sc_id); 3237 3238 sc->sc_rootmount = root_mount_hold("GRAID3"); 3239 G_RAID3_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 3240 3241 /* 3242 * Schedule startup timeout. 3243 */ 3244 timeout = atomic_load_acq_int(&g_raid3_timeout); 3245 sc->sc_timeout_event = malloc(sizeof(struct g_raid3_event), M_RAID3, 3246 M_WAITOK); 3247 callout_reset(&sc->sc_callout, timeout * hz, g_raid3_go, sc); 3248 return (sc->sc_geom); 3249 } 3250 3251 int 3252 g_raid3_destroy(struct g_raid3_softc *sc, int how) 3253 { 3254 struct g_provider *pp; 3255 3256 g_topology_assert_not(); 3257 if (sc == NULL) 3258 return (ENXIO); 3259 sx_assert(&sc->sc_lock, SX_XLOCKED); 3260 3261 pp = sc->sc_provider; 3262 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 3263 switch (how) { 3264 case G_RAID3_DESTROY_SOFT: 3265 G_RAID3_DEBUG(1, 3266 "Device %s is still open (r%dw%de%d).", pp->name, 3267 pp->acr, pp->acw, pp->ace); 3268 return (EBUSY); 3269 case G_RAID3_DESTROY_DELAYED: 3270 G_RAID3_DEBUG(1, 3271 "Device %s will be destroyed on last close.", 3272 pp->name); 3273 if (sc->sc_syncdisk != NULL) 3274 g_raid3_sync_stop(sc, 1); 3275 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROYING; 3276 return (EBUSY); 3277 case G_RAID3_DESTROY_HARD: 3278 G_RAID3_DEBUG(1, "Device %s is still open, so it " 3279 "can't be definitely removed.", pp->name); 3280 break; 3281 } 3282 } 3283 3284 g_topology_lock(); 3285 if (sc->sc_geom->softc == NULL) { 3286 g_topology_unlock(); 3287 return (0); 3288 } 3289 sc->sc_geom->softc = NULL; 3290 sc->sc_sync.ds_geom->softc = NULL; 3291 g_topology_unlock(); 3292 3293 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 3294 sc->sc_flags |= G_RAID3_DEVICE_FLAG_WAIT; 3295 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 3296 sx_xunlock(&sc->sc_lock); 3297 mtx_lock(&sc->sc_queue_mtx); 3298 wakeup(sc); 3299 wakeup(&sc->sc_queue); 3300 mtx_unlock(&sc->sc_queue_mtx); 3301 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 3302 while (sc->sc_worker != NULL) 3303 tsleep(&sc->sc_worker, PRIBIO, "r3:destroy", hz / 5); 3304 G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 3305 sx_xlock(&sc->sc_lock); 3306 g_raid3_destroy_device(sc); 3307 free(sc->sc_disks, M_RAID3); 3308 free(sc, M_RAID3); 3309 return (0); 3310 } 3311 3312 static void 3313 g_raid3_taste_orphan(struct g_consumer *cp) 3314 { 3315 3316 KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 3317 cp->provider->name)); 3318 } 3319 3320 static struct g_geom * 3321 g_raid3_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 3322 { 3323 struct g_raid3_metadata md; 3324 struct g_raid3_softc *sc; 3325 struct g_consumer *cp; 3326 struct g_geom *gp; 3327 int error; 3328 3329 g_topology_assert(); 3330 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 3331 G_RAID3_DEBUG(2, "Tasting %s.", pp->name); 3332 3333 gp = g_new_geomf(mp, "raid3:taste"); 3334 /* This orphan function should be never called. */ 3335 gp->orphan = g_raid3_taste_orphan; 3336 cp = g_new_consumer(gp); 3337 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE; 3338 error = g_attach(cp, pp); 3339 if (error == 0) { 3340 error = g_raid3_read_metadata(cp, &md); 3341 g_detach(cp); 3342 } 3343 g_destroy_consumer(cp); 3344 g_destroy_geom(gp); 3345 if (error != 0) 3346 return (NULL); 3347 gp = NULL; 3348 3349 if (md.md_provider[0] != '\0' && 3350 !g_compare_names(md.md_provider, pp->name)) 3351 return (NULL); 3352 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3353 return (NULL); 3354 if (g_raid3_debug >= 2) 3355 raid3_metadata_dump(&md); 3356 3357 /* 3358 * Let's check if device already exists. 3359 */ 3360 sc = NULL; 3361 LIST_FOREACH(gp, &mp->geom, geom) { 3362 sc = gp->softc; 3363 if (sc == NULL) 3364 continue; 3365 if (sc->sc_sync.ds_geom == gp) 3366 continue; 3367 if (strcmp(md.md_name, sc->sc_name) != 0) 3368 continue; 3369 if (md.md_id != sc->sc_id) { 3370 G_RAID3_DEBUG(0, "Device %s already configured.", 3371 sc->sc_name); 3372 return (NULL); 3373 } 3374 break; 3375 } 3376 if (gp == NULL) { 3377 gp = g_raid3_create(mp, &md); 3378 if (gp == NULL) { 3379 G_RAID3_DEBUG(0, "Cannot create device %s.", 3380 md.md_name); 3381 return (NULL); 3382 } 3383 sc = gp->softc; 3384 } 3385 G_RAID3_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 3386 g_topology_unlock(); 3387 sx_xlock(&sc->sc_lock); 3388 error = g_raid3_add_disk(sc, pp, &md); 3389 if (error != 0) { 3390 G_RAID3_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 3391 pp->name, gp->name, error); 3392 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NODISK) == 3393 sc->sc_ndisks) { 3394 g_cancel_event(sc); 3395 g_raid3_destroy(sc, G_RAID3_DESTROY_HARD); 3396 g_topology_lock(); 3397 return (NULL); 3398 } 3399 gp = NULL; 3400 } 3401 sx_xunlock(&sc->sc_lock); 3402 g_topology_lock(); 3403 return (gp); 3404 } 3405 3406 static int 3407 g_raid3_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused, 3408 struct g_geom *gp) 3409 { 3410 struct g_raid3_softc *sc; 3411 int error; 3412 3413 g_topology_unlock(); 3414 sc = gp->softc; 3415 sx_xlock(&sc->sc_lock); 3416 g_cancel_event(sc); 3417 error = g_raid3_destroy(gp->softc, G_RAID3_DESTROY_SOFT); 3418 if (error != 0) 3419 sx_xunlock(&sc->sc_lock); 3420 g_topology_lock(); 3421 return (error); 3422 } 3423 3424 static void 3425 g_raid3_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 3426 struct g_consumer *cp, struct g_provider *pp) 3427 { 3428 struct g_raid3_softc *sc; 3429 3430 g_topology_assert(); 3431 3432 sc = gp->softc; 3433 if (sc == NULL) 3434 return; 3435 /* Skip synchronization geom. */ 3436 if (gp == sc->sc_sync.ds_geom) 3437 return; 3438 if (pp != NULL) { 3439 /* Nothing here. */ 3440 } else if (cp != NULL) { 3441 struct g_raid3_disk *disk; 3442 3443 disk = cp->private; 3444 if (disk == NULL) 3445 return; 3446 g_topology_unlock(); 3447 sx_xlock(&sc->sc_lock); 3448 sbuf_printf(sb, "%s<Type>", indent); 3449 if (disk->d_no == sc->sc_ndisks - 1) 3450 sbuf_cat(sb, "PARITY"); 3451 else 3452 sbuf_cat(sb, "DATA"); 3453 sbuf_cat(sb, "</Type>\n"); 3454 sbuf_printf(sb, "%s<Number>%u</Number>\n", indent, 3455 (u_int)disk->d_no); 3456 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 3457 sbuf_printf(sb, "%s<Synchronized>", indent); 3458 if (disk->d_sync.ds_offset == 0) 3459 sbuf_cat(sb, "0%"); 3460 else { 3461 sbuf_printf(sb, "%u%%", 3462 (u_int)((disk->d_sync.ds_offset * 100) / 3463 (sc->sc_mediasize / (sc->sc_ndisks - 1)))); 3464 } 3465 sbuf_cat(sb, "</Synchronized>\n"); 3466 if (disk->d_sync.ds_offset > 0) { 3467 sbuf_printf(sb, "%s<BytesSynced>%jd" 3468 "</BytesSynced>\n", indent, 3469 (intmax_t)disk->d_sync.ds_offset); 3470 } 3471 } 3472 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 3473 disk->d_sync.ds_syncid); 3474 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, disk->d_genid); 3475 sbuf_printf(sb, "%s<Flags>", indent); 3476 if (disk->d_flags == 0) 3477 sbuf_cat(sb, "NONE"); 3478 else { 3479 int first = 1; 3480 3481 #define ADD_FLAG(flag, name) do { \ 3482 if ((disk->d_flags & (flag)) != 0) { \ 3483 if (!first) \ 3484 sbuf_cat(sb, ", "); \ 3485 else \ 3486 first = 0; \ 3487 sbuf_cat(sb, name); \ 3488 } \ 3489 } while (0) 3490 ADD_FLAG(G_RAID3_DISK_FLAG_DIRTY, "DIRTY"); 3491 ADD_FLAG(G_RAID3_DISK_FLAG_HARDCODED, "HARDCODED"); 3492 ADD_FLAG(G_RAID3_DISK_FLAG_SYNCHRONIZING, 3493 "SYNCHRONIZING"); 3494 ADD_FLAG(G_RAID3_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 3495 ADD_FLAG(G_RAID3_DISK_FLAG_BROKEN, "BROKEN"); 3496 #undef ADD_FLAG 3497 } 3498 sbuf_cat(sb, "</Flags>\n"); 3499 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3500 g_raid3_disk_state2str(disk->d_state)); 3501 sx_xunlock(&sc->sc_lock); 3502 g_topology_lock(); 3503 } else { 3504 g_topology_unlock(); 3505 sx_xlock(&sc->sc_lock); 3506 if (!g_raid3_use_malloc) { 3507 sbuf_printf(sb, 3508 "%s<Zone4kRequested>%u</Zone4kRequested>\n", indent, 3509 sc->sc_zones[G_RAID3_ZONE_4K].sz_requested); 3510 sbuf_printf(sb, 3511 "%s<Zone4kFailed>%u</Zone4kFailed>\n", indent, 3512 sc->sc_zones[G_RAID3_ZONE_4K].sz_failed); 3513 sbuf_printf(sb, 3514 "%s<Zone16kRequested>%u</Zone16kRequested>\n", indent, 3515 sc->sc_zones[G_RAID3_ZONE_16K].sz_requested); 3516 sbuf_printf(sb, 3517 "%s<Zone16kFailed>%u</Zone16kFailed>\n", indent, 3518 sc->sc_zones[G_RAID3_ZONE_16K].sz_failed); 3519 sbuf_printf(sb, 3520 "%s<Zone64kRequested>%u</Zone64kRequested>\n", indent, 3521 sc->sc_zones[G_RAID3_ZONE_64K].sz_requested); 3522 sbuf_printf(sb, 3523 "%s<Zone64kFailed>%u</Zone64kFailed>\n", indent, 3524 sc->sc_zones[G_RAID3_ZONE_64K].sz_failed); 3525 } 3526 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 3527 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3528 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 3529 sbuf_printf(sb, "%s<Flags>", indent); 3530 if (sc->sc_flags == 0) 3531 sbuf_cat(sb, "NONE"); 3532 else { 3533 int first = 1; 3534 3535 #define ADD_FLAG(flag, name) do { \ 3536 if ((sc->sc_flags & (flag)) != 0) { \ 3537 if (!first) \ 3538 sbuf_cat(sb, ", "); \ 3539 else \ 3540 first = 0; \ 3541 sbuf_cat(sb, name); \ 3542 } \ 3543 } while (0) 3544 ADD_FLAG(G_RAID3_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 3545 ADD_FLAG(G_RAID3_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3546 ADD_FLAG(G_RAID3_DEVICE_FLAG_ROUND_ROBIN, 3547 "ROUND-ROBIN"); 3548 ADD_FLAG(G_RAID3_DEVICE_FLAG_VERIFY, "VERIFY"); 3549 #undef ADD_FLAG 3550 } 3551 sbuf_cat(sb, "</Flags>\n"); 3552 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 3553 sc->sc_ndisks); 3554 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3555 g_raid3_device_state2str(sc->sc_state)); 3556 sx_xunlock(&sc->sc_lock); 3557 g_topology_lock(); 3558 } 3559 } 3560 3561 static void 3562 g_raid3_shutdown_post_sync(void *arg, int howto) 3563 { 3564 struct g_class *mp; 3565 struct g_geom *gp, *gp2; 3566 struct g_raid3_softc *sc; 3567 int error; 3568 3569 mp = arg; 3570 g_topology_lock(); 3571 g_raid3_shutdown = 1; 3572 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 3573 if ((sc = gp->softc) == NULL) 3574 continue; 3575 /* Skip synchronization geom. */ 3576 if (gp == sc->sc_sync.ds_geom) 3577 continue; 3578 g_topology_unlock(); 3579 sx_xlock(&sc->sc_lock); 3580 g_raid3_idle(sc, -1); 3581 g_cancel_event(sc); 3582 error = g_raid3_destroy(sc, G_RAID3_DESTROY_DELAYED); 3583 if (error != 0) 3584 sx_xunlock(&sc->sc_lock); 3585 g_topology_lock(); 3586 } 3587 g_topology_unlock(); 3588 } 3589 3590 static void 3591 g_raid3_init(struct g_class *mp) 3592 { 3593 3594 g_raid3_post_sync = EVENTHANDLER_REGISTER(shutdown_post_sync, 3595 g_raid3_shutdown_post_sync, mp, SHUTDOWN_PRI_FIRST); 3596 if (g_raid3_post_sync == NULL) 3597 G_RAID3_DEBUG(0, "Warning! Cannot register shutdown event."); 3598 } 3599 3600 static void 3601 g_raid3_fini(struct g_class *mp) 3602 { 3603 3604 if (g_raid3_post_sync != NULL) 3605 EVENTHANDLER_DEREGISTER(shutdown_post_sync, g_raid3_post_sync); 3606 } 3607 3608 DECLARE_GEOM_CLASS(g_raid3_class, g_raid3); 3609 MODULE_VERSION(geom_raid3, 0); 3610