1 /*- 2 * Copyright (c) 2004-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/module.h> 34 #include <sys/limits.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/bio.h> 38 #include <sys/sysctl.h> 39 #include <sys/malloc.h> 40 #include <sys/eventhandler.h> 41 #include <vm/uma.h> 42 #include <geom/geom.h> 43 #include <sys/proc.h> 44 #include <sys/kthread.h> 45 #include <sys/sched.h> 46 #include <geom/raid3/g_raid3.h> 47 48 FEATURE(geom_raid3, "GEOM RAID-3 functionality"); 49 50 static MALLOC_DEFINE(M_RAID3, "raid3_data", "GEOM_RAID3 Data"); 51 52 SYSCTL_DECL(_kern_geom); 53 SYSCTL_NODE(_kern_geom, OID_AUTO, raid3, CTLFLAG_RW, 0, "GEOM_RAID3 stuff"); 54 u_int g_raid3_debug = 0; 55 TUNABLE_INT("kern.geom.raid3.debug", &g_raid3_debug); 56 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, debug, CTLFLAG_RW, &g_raid3_debug, 0, 57 "Debug level"); 58 static u_int g_raid3_timeout = 4; 59 TUNABLE_INT("kern.geom.raid3.timeout", &g_raid3_timeout); 60 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, timeout, CTLFLAG_RW, &g_raid3_timeout, 61 0, "Time to wait on all raid3 components"); 62 static u_int g_raid3_idletime = 5; 63 TUNABLE_INT("kern.geom.raid3.idletime", &g_raid3_idletime); 64 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, idletime, CTLFLAG_RW, 65 &g_raid3_idletime, 0, "Mark components as clean when idling"); 66 static u_int g_raid3_disconnect_on_failure = 1; 67 TUNABLE_INT("kern.geom.raid3.disconnect_on_failure", 68 &g_raid3_disconnect_on_failure); 69 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, disconnect_on_failure, CTLFLAG_RW, 70 &g_raid3_disconnect_on_failure, 0, "Disconnect component on I/O failure."); 71 static u_int g_raid3_syncreqs = 2; 72 TUNABLE_INT("kern.geom.raid3.sync_requests", &g_raid3_syncreqs); 73 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, sync_requests, CTLFLAG_RDTUN, 74 &g_raid3_syncreqs, 0, "Parallel synchronization I/O requests."); 75 static u_int g_raid3_use_malloc = 0; 76 TUNABLE_INT("kern.geom.raid3.use_malloc", &g_raid3_use_malloc); 77 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, use_malloc, CTLFLAG_RDTUN, 78 &g_raid3_use_malloc, 0, "Use malloc(9) instead of uma(9)."); 79 80 static u_int g_raid3_n64k = 50; 81 TUNABLE_INT("kern.geom.raid3.n64k", &g_raid3_n64k); 82 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n64k, CTLFLAG_RD, &g_raid3_n64k, 0, 83 "Maximum number of 64kB allocations"); 84 static u_int g_raid3_n16k = 200; 85 TUNABLE_INT("kern.geom.raid3.n16k", &g_raid3_n16k); 86 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n16k, CTLFLAG_RD, &g_raid3_n16k, 0, 87 "Maximum number of 16kB allocations"); 88 static u_int g_raid3_n4k = 1200; 89 TUNABLE_INT("kern.geom.raid3.n4k", &g_raid3_n4k); 90 SYSCTL_UINT(_kern_geom_raid3, OID_AUTO, n4k, CTLFLAG_RD, &g_raid3_n4k, 0, 91 "Maximum number of 4kB allocations"); 92 93 SYSCTL_NODE(_kern_geom_raid3, OID_AUTO, stat, CTLFLAG_RW, 0, 94 "GEOM_RAID3 statistics"); 95 static u_int g_raid3_parity_mismatch = 0; 96 SYSCTL_UINT(_kern_geom_raid3_stat, OID_AUTO, parity_mismatch, CTLFLAG_RD, 97 &g_raid3_parity_mismatch, 0, "Number of failures in VERIFY mode"); 98 99 #define MSLEEP(ident, mtx, priority, wmesg, timeout) do { \ 100 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, (ident)); \ 101 msleep((ident), (mtx), (priority), (wmesg), (timeout)); \ 102 G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, (ident)); \ 103 } while (0) 104 105 static eventhandler_tag g_raid3_pre_sync = NULL; 106 107 static int g_raid3_destroy_geom(struct gctl_req *req, struct g_class *mp, 108 struct g_geom *gp); 109 static g_taste_t g_raid3_taste; 110 static void g_raid3_init(struct g_class *mp); 111 static void g_raid3_fini(struct g_class *mp); 112 113 struct g_class g_raid3_class = { 114 .name = G_RAID3_CLASS_NAME, 115 .version = G_VERSION, 116 .ctlreq = g_raid3_config, 117 .taste = g_raid3_taste, 118 .destroy_geom = g_raid3_destroy_geom, 119 .init = g_raid3_init, 120 .fini = g_raid3_fini 121 }; 122 123 124 static void g_raid3_destroy_provider(struct g_raid3_softc *sc); 125 static int g_raid3_update_disk(struct g_raid3_disk *disk, u_int state); 126 static void g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force); 127 static void g_raid3_dumpconf(struct sbuf *sb, const char *indent, 128 struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); 129 static void g_raid3_sync_stop(struct g_raid3_softc *sc, int type); 130 static int g_raid3_register_request(struct bio *pbp); 131 static void g_raid3_sync_release(struct g_raid3_softc *sc); 132 133 134 static const char * 135 g_raid3_disk_state2str(int state) 136 { 137 138 switch (state) { 139 case G_RAID3_DISK_STATE_NODISK: 140 return ("NODISK"); 141 case G_RAID3_DISK_STATE_NONE: 142 return ("NONE"); 143 case G_RAID3_DISK_STATE_NEW: 144 return ("NEW"); 145 case G_RAID3_DISK_STATE_ACTIVE: 146 return ("ACTIVE"); 147 case G_RAID3_DISK_STATE_STALE: 148 return ("STALE"); 149 case G_RAID3_DISK_STATE_SYNCHRONIZING: 150 return ("SYNCHRONIZING"); 151 case G_RAID3_DISK_STATE_DISCONNECTED: 152 return ("DISCONNECTED"); 153 default: 154 return ("INVALID"); 155 } 156 } 157 158 static const char * 159 g_raid3_device_state2str(int state) 160 { 161 162 switch (state) { 163 case G_RAID3_DEVICE_STATE_STARTING: 164 return ("STARTING"); 165 case G_RAID3_DEVICE_STATE_DEGRADED: 166 return ("DEGRADED"); 167 case G_RAID3_DEVICE_STATE_COMPLETE: 168 return ("COMPLETE"); 169 default: 170 return ("INVALID"); 171 } 172 } 173 174 const char * 175 g_raid3_get_diskname(struct g_raid3_disk *disk) 176 { 177 178 if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) 179 return ("[unknown]"); 180 return (disk->d_name); 181 } 182 183 static void * 184 g_raid3_alloc(struct g_raid3_softc *sc, size_t size, int flags) 185 { 186 void *ptr; 187 enum g_raid3_zones zone; 188 189 if (g_raid3_use_malloc || 190 (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES) 191 ptr = malloc(size, M_RAID3, flags); 192 else { 193 ptr = uma_zalloc_arg(sc->sc_zones[zone].sz_zone, 194 &sc->sc_zones[zone], flags); 195 sc->sc_zones[zone].sz_requested++; 196 if (ptr == NULL) 197 sc->sc_zones[zone].sz_failed++; 198 } 199 return (ptr); 200 } 201 202 static void 203 g_raid3_free(struct g_raid3_softc *sc, void *ptr, size_t size) 204 { 205 enum g_raid3_zones zone; 206 207 if (g_raid3_use_malloc || 208 (zone = g_raid3_zone(size)) == G_RAID3_NUM_ZONES) 209 free(ptr, M_RAID3); 210 else { 211 uma_zfree_arg(sc->sc_zones[zone].sz_zone, 212 ptr, &sc->sc_zones[zone]); 213 } 214 } 215 216 static int 217 g_raid3_uma_ctor(void *mem, int size, void *arg, int flags) 218 { 219 struct g_raid3_zone *sz = arg; 220 221 if (sz->sz_max > 0 && sz->sz_inuse == sz->sz_max) 222 return (ENOMEM); 223 sz->sz_inuse++; 224 return (0); 225 } 226 227 static void 228 g_raid3_uma_dtor(void *mem, int size, void *arg) 229 { 230 struct g_raid3_zone *sz = arg; 231 232 sz->sz_inuse--; 233 } 234 235 #define g_raid3_xor(src, dst, size) \ 236 _g_raid3_xor((uint64_t *)(src), \ 237 (uint64_t *)(dst), (size_t)size) 238 static void 239 _g_raid3_xor(uint64_t *src, uint64_t *dst, size_t size) 240 { 241 242 KASSERT((size % 128) == 0, ("Invalid size: %zu.", size)); 243 for (; size > 0; size -= 128) { 244 *dst++ ^= (*src++); 245 *dst++ ^= (*src++); 246 *dst++ ^= (*src++); 247 *dst++ ^= (*src++); 248 *dst++ ^= (*src++); 249 *dst++ ^= (*src++); 250 *dst++ ^= (*src++); 251 *dst++ ^= (*src++); 252 *dst++ ^= (*src++); 253 *dst++ ^= (*src++); 254 *dst++ ^= (*src++); 255 *dst++ ^= (*src++); 256 *dst++ ^= (*src++); 257 *dst++ ^= (*src++); 258 *dst++ ^= (*src++); 259 *dst++ ^= (*src++); 260 } 261 } 262 263 static int 264 g_raid3_is_zero(struct bio *bp) 265 { 266 static const uint64_t zeros[] = { 267 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 268 }; 269 u_char *addr; 270 ssize_t size; 271 272 size = bp->bio_length; 273 addr = (u_char *)bp->bio_data; 274 for (; size > 0; size -= sizeof(zeros), addr += sizeof(zeros)) { 275 if (bcmp(addr, zeros, sizeof(zeros)) != 0) 276 return (0); 277 } 278 return (1); 279 } 280 281 /* 282 * --- Events handling functions --- 283 * Events in geom_raid3 are used to maintain disks and device status 284 * from one thread to simplify locking. 285 */ 286 static void 287 g_raid3_event_free(struct g_raid3_event *ep) 288 { 289 290 free(ep, M_RAID3); 291 } 292 293 int 294 g_raid3_event_send(void *arg, int state, int flags) 295 { 296 struct g_raid3_softc *sc; 297 struct g_raid3_disk *disk; 298 struct g_raid3_event *ep; 299 int error; 300 301 ep = malloc(sizeof(*ep), M_RAID3, M_WAITOK); 302 G_RAID3_DEBUG(4, "%s: Sending event %p.", __func__, ep); 303 if ((flags & G_RAID3_EVENT_DEVICE) != 0) { 304 disk = NULL; 305 sc = arg; 306 } else { 307 disk = arg; 308 sc = disk->d_softc; 309 } 310 ep->e_disk = disk; 311 ep->e_state = state; 312 ep->e_flags = flags; 313 ep->e_error = 0; 314 mtx_lock(&sc->sc_events_mtx); 315 TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); 316 mtx_unlock(&sc->sc_events_mtx); 317 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 318 mtx_lock(&sc->sc_queue_mtx); 319 wakeup(sc); 320 wakeup(&sc->sc_queue); 321 mtx_unlock(&sc->sc_queue_mtx); 322 if ((flags & G_RAID3_EVENT_DONTWAIT) != 0) 323 return (0); 324 sx_assert(&sc->sc_lock, SX_XLOCKED); 325 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, ep); 326 sx_xunlock(&sc->sc_lock); 327 while ((ep->e_flags & G_RAID3_EVENT_DONE) == 0) { 328 mtx_lock(&sc->sc_events_mtx); 329 MSLEEP(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "r3:event", 330 hz * 5); 331 } 332 error = ep->e_error; 333 g_raid3_event_free(ep); 334 sx_xlock(&sc->sc_lock); 335 return (error); 336 } 337 338 static struct g_raid3_event * 339 g_raid3_event_get(struct g_raid3_softc *sc) 340 { 341 struct g_raid3_event *ep; 342 343 mtx_lock(&sc->sc_events_mtx); 344 ep = TAILQ_FIRST(&sc->sc_events); 345 mtx_unlock(&sc->sc_events_mtx); 346 return (ep); 347 } 348 349 static void 350 g_raid3_event_remove(struct g_raid3_softc *sc, struct g_raid3_event *ep) 351 { 352 353 mtx_lock(&sc->sc_events_mtx); 354 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 355 mtx_unlock(&sc->sc_events_mtx); 356 } 357 358 static void 359 g_raid3_event_cancel(struct g_raid3_disk *disk) 360 { 361 struct g_raid3_softc *sc; 362 struct g_raid3_event *ep, *tmpep; 363 364 sc = disk->d_softc; 365 sx_assert(&sc->sc_lock, SX_XLOCKED); 366 367 mtx_lock(&sc->sc_events_mtx); 368 TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { 369 if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) 370 continue; 371 if (ep->e_disk != disk) 372 continue; 373 TAILQ_REMOVE(&sc->sc_events, ep, e_next); 374 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) 375 g_raid3_event_free(ep); 376 else { 377 ep->e_error = ECANCELED; 378 wakeup(ep); 379 } 380 } 381 mtx_unlock(&sc->sc_events_mtx); 382 } 383 384 /* 385 * Return the number of disks in the given state. 386 * If state is equal to -1, count all connected disks. 387 */ 388 u_int 389 g_raid3_ndisks(struct g_raid3_softc *sc, int state) 390 { 391 struct g_raid3_disk *disk; 392 u_int n, ndisks; 393 394 sx_assert(&sc->sc_lock, SX_LOCKED); 395 396 for (n = ndisks = 0; n < sc->sc_ndisks; n++) { 397 disk = &sc->sc_disks[n]; 398 if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 399 continue; 400 if (state == -1 || disk->d_state == state) 401 ndisks++; 402 } 403 return (ndisks); 404 } 405 406 static u_int 407 g_raid3_nrequests(struct g_raid3_softc *sc, struct g_consumer *cp) 408 { 409 struct bio *bp; 410 u_int nreqs = 0; 411 412 mtx_lock(&sc->sc_queue_mtx); 413 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 414 if (bp->bio_from == cp) 415 nreqs++; 416 } 417 mtx_unlock(&sc->sc_queue_mtx); 418 return (nreqs); 419 } 420 421 static int 422 g_raid3_is_busy(struct g_raid3_softc *sc, struct g_consumer *cp) 423 { 424 425 if (cp->index > 0) { 426 G_RAID3_DEBUG(2, 427 "I/O requests for %s exist, can't destroy it now.", 428 cp->provider->name); 429 return (1); 430 } 431 if (g_raid3_nrequests(sc, cp) > 0) { 432 G_RAID3_DEBUG(2, 433 "I/O requests for %s in queue, can't destroy it now.", 434 cp->provider->name); 435 return (1); 436 } 437 return (0); 438 } 439 440 static void 441 g_raid3_destroy_consumer(void *arg, int flags __unused) 442 { 443 struct g_consumer *cp; 444 445 g_topology_assert(); 446 447 cp = arg; 448 G_RAID3_DEBUG(1, "Consumer %s destroyed.", cp->provider->name); 449 g_detach(cp); 450 g_destroy_consumer(cp); 451 } 452 453 static void 454 g_raid3_kill_consumer(struct g_raid3_softc *sc, struct g_consumer *cp) 455 { 456 struct g_provider *pp; 457 int retaste_wait; 458 459 g_topology_assert(); 460 461 cp->private = NULL; 462 if (g_raid3_is_busy(sc, cp)) 463 return; 464 G_RAID3_DEBUG(2, "Consumer %s destroyed.", cp->provider->name); 465 pp = cp->provider; 466 retaste_wait = 0; 467 if (cp->acw == 1) { 468 if ((pp->geom->flags & G_GEOM_WITHER) == 0) 469 retaste_wait = 1; 470 } 471 G_RAID3_DEBUG(2, "Access %s r%dw%de%d = %d", pp->name, -cp->acr, 472 -cp->acw, -cp->ace, 0); 473 if (cp->acr > 0 || cp->acw > 0 || cp->ace > 0) 474 g_access(cp, -cp->acr, -cp->acw, -cp->ace); 475 if (retaste_wait) { 476 /* 477 * After retaste event was send (inside g_access()), we can send 478 * event to detach and destroy consumer. 479 * A class, which has consumer to the given provider connected 480 * will not receive retaste event for the provider. 481 * This is the way how I ignore retaste events when I close 482 * consumers opened for write: I detach and destroy consumer 483 * after retaste event is sent. 484 */ 485 g_post_event(g_raid3_destroy_consumer, cp, M_WAITOK, NULL); 486 return; 487 } 488 G_RAID3_DEBUG(1, "Consumer %s destroyed.", pp->name); 489 g_detach(cp); 490 g_destroy_consumer(cp); 491 } 492 493 static int 494 g_raid3_connect_disk(struct g_raid3_disk *disk, struct g_provider *pp) 495 { 496 struct g_consumer *cp; 497 int error; 498 499 g_topology_assert_not(); 500 KASSERT(disk->d_consumer == NULL, 501 ("Disk already connected (device %s).", disk->d_softc->sc_name)); 502 503 g_topology_lock(); 504 cp = g_new_consumer(disk->d_softc->sc_geom); 505 error = g_attach(cp, pp); 506 if (error != 0) { 507 g_destroy_consumer(cp); 508 g_topology_unlock(); 509 return (error); 510 } 511 error = g_access(cp, 1, 1, 1); 512 g_topology_unlock(); 513 if (error != 0) { 514 g_detach(cp); 515 g_destroy_consumer(cp); 516 G_RAID3_DEBUG(0, "Cannot open consumer %s (error=%d).", 517 pp->name, error); 518 return (error); 519 } 520 disk->d_consumer = cp; 521 disk->d_consumer->private = disk; 522 disk->d_consumer->index = 0; 523 G_RAID3_DEBUG(2, "Disk %s connected.", g_raid3_get_diskname(disk)); 524 return (0); 525 } 526 527 static void 528 g_raid3_disconnect_consumer(struct g_raid3_softc *sc, struct g_consumer *cp) 529 { 530 531 g_topology_assert(); 532 533 if (cp == NULL) 534 return; 535 if (cp->provider != NULL) 536 g_raid3_kill_consumer(sc, cp); 537 else 538 g_destroy_consumer(cp); 539 } 540 541 /* 542 * Initialize disk. This means allocate memory, create consumer, attach it 543 * to the provider and open access (r1w1e1) to it. 544 */ 545 static struct g_raid3_disk * 546 g_raid3_init_disk(struct g_raid3_softc *sc, struct g_provider *pp, 547 struct g_raid3_metadata *md, int *errorp) 548 { 549 struct g_raid3_disk *disk; 550 int error; 551 552 disk = &sc->sc_disks[md->md_no]; 553 error = g_raid3_connect_disk(disk, pp); 554 if (error != 0) { 555 if (errorp != NULL) 556 *errorp = error; 557 return (NULL); 558 } 559 disk->d_state = G_RAID3_DISK_STATE_NONE; 560 disk->d_flags = md->md_dflags; 561 if (md->md_provider[0] != '\0') 562 disk->d_flags |= G_RAID3_DISK_FLAG_HARDCODED; 563 disk->d_sync.ds_consumer = NULL; 564 disk->d_sync.ds_offset = md->md_sync_offset; 565 disk->d_sync.ds_offset_done = md->md_sync_offset; 566 disk->d_genid = md->md_genid; 567 disk->d_sync.ds_syncid = md->md_syncid; 568 if (errorp != NULL) 569 *errorp = 0; 570 return (disk); 571 } 572 573 static void 574 g_raid3_destroy_disk(struct g_raid3_disk *disk) 575 { 576 struct g_raid3_softc *sc; 577 578 g_topology_assert_not(); 579 sc = disk->d_softc; 580 sx_assert(&sc->sc_lock, SX_XLOCKED); 581 582 if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 583 return; 584 g_raid3_event_cancel(disk); 585 switch (disk->d_state) { 586 case G_RAID3_DISK_STATE_SYNCHRONIZING: 587 if (sc->sc_syncdisk != NULL) 588 g_raid3_sync_stop(sc, 1); 589 /* FALLTHROUGH */ 590 case G_RAID3_DISK_STATE_NEW: 591 case G_RAID3_DISK_STATE_STALE: 592 case G_RAID3_DISK_STATE_ACTIVE: 593 g_topology_lock(); 594 g_raid3_disconnect_consumer(sc, disk->d_consumer); 595 g_topology_unlock(); 596 disk->d_consumer = NULL; 597 break; 598 default: 599 KASSERT(0 == 1, ("Wrong disk state (%s, %s).", 600 g_raid3_get_diskname(disk), 601 g_raid3_disk_state2str(disk->d_state))); 602 } 603 disk->d_state = G_RAID3_DISK_STATE_NODISK; 604 } 605 606 static void 607 g_raid3_destroy_device(struct g_raid3_softc *sc) 608 { 609 struct g_raid3_event *ep; 610 struct g_raid3_disk *disk; 611 struct g_geom *gp; 612 struct g_consumer *cp; 613 u_int n; 614 615 g_topology_assert_not(); 616 sx_assert(&sc->sc_lock, SX_XLOCKED); 617 618 gp = sc->sc_geom; 619 if (sc->sc_provider != NULL) 620 g_raid3_destroy_provider(sc); 621 for (n = 0; n < sc->sc_ndisks; n++) { 622 disk = &sc->sc_disks[n]; 623 if (disk->d_state != G_RAID3_DISK_STATE_NODISK) { 624 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 625 g_raid3_update_metadata(disk); 626 g_raid3_destroy_disk(disk); 627 } 628 } 629 while ((ep = g_raid3_event_get(sc)) != NULL) { 630 g_raid3_event_remove(sc, ep); 631 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) 632 g_raid3_event_free(ep); 633 else { 634 ep->e_error = ECANCELED; 635 ep->e_flags |= G_RAID3_EVENT_DONE; 636 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, ep); 637 mtx_lock(&sc->sc_events_mtx); 638 wakeup(ep); 639 mtx_unlock(&sc->sc_events_mtx); 640 } 641 } 642 callout_drain(&sc->sc_callout); 643 cp = LIST_FIRST(&sc->sc_sync.ds_geom->consumer); 644 g_topology_lock(); 645 if (cp != NULL) 646 g_raid3_disconnect_consumer(sc, cp); 647 g_wither_geom(sc->sc_sync.ds_geom, ENXIO); 648 G_RAID3_DEBUG(0, "Device %s destroyed.", gp->name); 649 g_wither_geom(gp, ENXIO); 650 g_topology_unlock(); 651 if (!g_raid3_use_malloc) { 652 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone); 653 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone); 654 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone); 655 } 656 mtx_destroy(&sc->sc_queue_mtx); 657 mtx_destroy(&sc->sc_events_mtx); 658 sx_xunlock(&sc->sc_lock); 659 sx_destroy(&sc->sc_lock); 660 } 661 662 static void 663 g_raid3_orphan(struct g_consumer *cp) 664 { 665 struct g_raid3_disk *disk; 666 667 g_topology_assert(); 668 669 disk = cp->private; 670 if (disk == NULL) 671 return; 672 disk->d_softc->sc_bump_id = G_RAID3_BUMP_SYNCID; 673 g_raid3_event_send(disk, G_RAID3_DISK_STATE_DISCONNECTED, 674 G_RAID3_EVENT_DONTWAIT); 675 } 676 677 static int 678 g_raid3_write_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md) 679 { 680 struct g_raid3_softc *sc; 681 struct g_consumer *cp; 682 off_t offset, length; 683 u_char *sector; 684 int error = 0; 685 686 g_topology_assert_not(); 687 sc = disk->d_softc; 688 sx_assert(&sc->sc_lock, SX_LOCKED); 689 690 cp = disk->d_consumer; 691 KASSERT(cp != NULL, ("NULL consumer (%s).", sc->sc_name)); 692 KASSERT(cp->provider != NULL, ("NULL provider (%s).", sc->sc_name)); 693 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 694 ("Consumer %s closed? (r%dw%de%d).", cp->provider->name, cp->acr, 695 cp->acw, cp->ace)); 696 length = cp->provider->sectorsize; 697 offset = cp->provider->mediasize - length; 698 sector = malloc((size_t)length, M_RAID3, M_WAITOK | M_ZERO); 699 if (md != NULL) 700 raid3_metadata_encode(md, sector); 701 error = g_write_data(cp, offset, sector, length); 702 free(sector, M_RAID3); 703 if (error != 0) { 704 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 705 G_RAID3_DEBUG(0, "Cannot write metadata on %s " 706 "(device=%s, error=%d).", 707 g_raid3_get_diskname(disk), sc->sc_name, error); 708 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 709 } else { 710 G_RAID3_DEBUG(1, "Cannot write metadata on %s " 711 "(device=%s, error=%d).", 712 g_raid3_get_diskname(disk), sc->sc_name, error); 713 } 714 if (g_raid3_disconnect_on_failure && 715 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 716 sc->sc_bump_id |= G_RAID3_BUMP_GENID; 717 g_raid3_event_send(disk, 718 G_RAID3_DISK_STATE_DISCONNECTED, 719 G_RAID3_EVENT_DONTWAIT); 720 } 721 } 722 return (error); 723 } 724 725 int 726 g_raid3_clear_metadata(struct g_raid3_disk *disk) 727 { 728 int error; 729 730 g_topology_assert_not(); 731 sx_assert(&disk->d_softc->sc_lock, SX_LOCKED); 732 733 error = g_raid3_write_metadata(disk, NULL); 734 if (error == 0) { 735 G_RAID3_DEBUG(2, "Metadata on %s cleared.", 736 g_raid3_get_diskname(disk)); 737 } else { 738 G_RAID3_DEBUG(0, 739 "Cannot clear metadata on disk %s (error=%d).", 740 g_raid3_get_diskname(disk), error); 741 } 742 return (error); 743 } 744 745 void 746 g_raid3_fill_metadata(struct g_raid3_disk *disk, struct g_raid3_metadata *md) 747 { 748 struct g_raid3_softc *sc; 749 struct g_provider *pp; 750 751 sc = disk->d_softc; 752 strlcpy(md->md_magic, G_RAID3_MAGIC, sizeof(md->md_magic)); 753 md->md_version = G_RAID3_VERSION; 754 strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); 755 md->md_id = sc->sc_id; 756 md->md_all = sc->sc_ndisks; 757 md->md_genid = sc->sc_genid; 758 md->md_mediasize = sc->sc_mediasize; 759 md->md_sectorsize = sc->sc_sectorsize; 760 md->md_mflags = (sc->sc_flags & G_RAID3_DEVICE_FLAG_MASK); 761 md->md_no = disk->d_no; 762 md->md_syncid = disk->d_sync.ds_syncid; 763 md->md_dflags = (disk->d_flags & G_RAID3_DISK_FLAG_MASK); 764 if (disk->d_state != G_RAID3_DISK_STATE_SYNCHRONIZING) 765 md->md_sync_offset = 0; 766 else { 767 md->md_sync_offset = 768 disk->d_sync.ds_offset_done / (sc->sc_ndisks - 1); 769 } 770 if (disk->d_consumer != NULL && disk->d_consumer->provider != NULL) 771 pp = disk->d_consumer->provider; 772 else 773 pp = NULL; 774 if ((disk->d_flags & G_RAID3_DISK_FLAG_HARDCODED) != 0 && pp != NULL) 775 strlcpy(md->md_provider, pp->name, sizeof(md->md_provider)); 776 else 777 bzero(md->md_provider, sizeof(md->md_provider)); 778 if (pp != NULL) 779 md->md_provsize = pp->mediasize; 780 else 781 md->md_provsize = 0; 782 } 783 784 void 785 g_raid3_update_metadata(struct g_raid3_disk *disk) 786 { 787 struct g_raid3_softc *sc; 788 struct g_raid3_metadata md; 789 int error; 790 791 g_topology_assert_not(); 792 sc = disk->d_softc; 793 sx_assert(&sc->sc_lock, SX_LOCKED); 794 795 g_raid3_fill_metadata(disk, &md); 796 error = g_raid3_write_metadata(disk, &md); 797 if (error == 0) { 798 G_RAID3_DEBUG(2, "Metadata on %s updated.", 799 g_raid3_get_diskname(disk)); 800 } else { 801 G_RAID3_DEBUG(0, 802 "Cannot update metadata on disk %s (error=%d).", 803 g_raid3_get_diskname(disk), error); 804 } 805 } 806 807 static void 808 g_raid3_bump_syncid(struct g_raid3_softc *sc) 809 { 810 struct g_raid3_disk *disk; 811 u_int n; 812 813 g_topology_assert_not(); 814 sx_assert(&sc->sc_lock, SX_XLOCKED); 815 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0, 816 ("%s called with no active disks (device=%s).", __func__, 817 sc->sc_name)); 818 819 sc->sc_syncid++; 820 G_RAID3_DEBUG(1, "Device %s: syncid bumped to %u.", sc->sc_name, 821 sc->sc_syncid); 822 for (n = 0; n < sc->sc_ndisks; n++) { 823 disk = &sc->sc_disks[n]; 824 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 825 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 826 disk->d_sync.ds_syncid = sc->sc_syncid; 827 g_raid3_update_metadata(disk); 828 } 829 } 830 } 831 832 static void 833 g_raid3_bump_genid(struct g_raid3_softc *sc) 834 { 835 struct g_raid3_disk *disk; 836 u_int n; 837 838 g_topology_assert_not(); 839 sx_assert(&sc->sc_lock, SX_XLOCKED); 840 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) > 0, 841 ("%s called with no active disks (device=%s).", __func__, 842 sc->sc_name)); 843 844 sc->sc_genid++; 845 G_RAID3_DEBUG(1, "Device %s: genid bumped to %u.", sc->sc_name, 846 sc->sc_genid); 847 for (n = 0; n < sc->sc_ndisks; n++) { 848 disk = &sc->sc_disks[n]; 849 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 850 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 851 disk->d_genid = sc->sc_genid; 852 g_raid3_update_metadata(disk); 853 } 854 } 855 } 856 857 static int 858 g_raid3_idle(struct g_raid3_softc *sc, int acw) 859 { 860 struct g_raid3_disk *disk; 861 u_int i; 862 int timeout; 863 864 g_topology_assert_not(); 865 sx_assert(&sc->sc_lock, SX_XLOCKED); 866 867 if (sc->sc_provider == NULL) 868 return (0); 869 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 870 return (0); 871 if (sc->sc_idle) 872 return (0); 873 if (sc->sc_writes > 0) 874 return (0); 875 if (acw > 0 || (acw == -1 && sc->sc_provider->acw > 0)) { 876 timeout = g_raid3_idletime - (time_uptime - sc->sc_last_write); 877 if (timeout > 0) 878 return (timeout); 879 } 880 sc->sc_idle = 1; 881 for (i = 0; i < sc->sc_ndisks; i++) { 882 disk = &sc->sc_disks[i]; 883 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 884 continue; 885 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.", 886 g_raid3_get_diskname(disk), sc->sc_name); 887 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 888 g_raid3_update_metadata(disk); 889 } 890 return (0); 891 } 892 893 static void 894 g_raid3_unidle(struct g_raid3_softc *sc) 895 { 896 struct g_raid3_disk *disk; 897 u_int i; 898 899 g_topology_assert_not(); 900 sx_assert(&sc->sc_lock, SX_XLOCKED); 901 902 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 903 return; 904 sc->sc_idle = 0; 905 sc->sc_last_write = time_uptime; 906 for (i = 0; i < sc->sc_ndisks; i++) { 907 disk = &sc->sc_disks[i]; 908 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 909 continue; 910 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.", 911 g_raid3_get_diskname(disk), sc->sc_name); 912 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 913 g_raid3_update_metadata(disk); 914 } 915 } 916 917 /* 918 * Treat bio_driver1 field in parent bio as list head and field bio_caller1 919 * in child bio as pointer to the next element on the list. 920 */ 921 #define G_RAID3_HEAD_BIO(pbp) (pbp)->bio_driver1 922 923 #define G_RAID3_NEXT_BIO(cbp) (cbp)->bio_caller1 924 925 #define G_RAID3_FOREACH_BIO(pbp, bp) \ 926 for ((bp) = G_RAID3_HEAD_BIO(pbp); (bp) != NULL; \ 927 (bp) = G_RAID3_NEXT_BIO(bp)) 928 929 #define G_RAID3_FOREACH_SAFE_BIO(pbp, bp, tmpbp) \ 930 for ((bp) = G_RAID3_HEAD_BIO(pbp); \ 931 (bp) != NULL && ((tmpbp) = G_RAID3_NEXT_BIO(bp), 1); \ 932 (bp) = (tmpbp)) 933 934 static void 935 g_raid3_init_bio(struct bio *pbp) 936 { 937 938 G_RAID3_HEAD_BIO(pbp) = NULL; 939 } 940 941 static void 942 g_raid3_remove_bio(struct bio *cbp) 943 { 944 struct bio *pbp, *bp; 945 946 pbp = cbp->bio_parent; 947 if (G_RAID3_HEAD_BIO(pbp) == cbp) 948 G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp); 949 else { 950 G_RAID3_FOREACH_BIO(pbp, bp) { 951 if (G_RAID3_NEXT_BIO(bp) == cbp) { 952 G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp); 953 break; 954 } 955 } 956 } 957 G_RAID3_NEXT_BIO(cbp) = NULL; 958 } 959 960 static void 961 g_raid3_replace_bio(struct bio *sbp, struct bio *dbp) 962 { 963 struct bio *pbp, *bp; 964 965 g_raid3_remove_bio(sbp); 966 pbp = dbp->bio_parent; 967 G_RAID3_NEXT_BIO(sbp) = G_RAID3_NEXT_BIO(dbp); 968 if (G_RAID3_HEAD_BIO(pbp) == dbp) 969 G_RAID3_HEAD_BIO(pbp) = sbp; 970 else { 971 G_RAID3_FOREACH_BIO(pbp, bp) { 972 if (G_RAID3_NEXT_BIO(bp) == dbp) { 973 G_RAID3_NEXT_BIO(bp) = sbp; 974 break; 975 } 976 } 977 } 978 G_RAID3_NEXT_BIO(dbp) = NULL; 979 } 980 981 static void 982 g_raid3_destroy_bio(struct g_raid3_softc *sc, struct bio *cbp) 983 { 984 struct bio *bp, *pbp; 985 size_t size; 986 987 pbp = cbp->bio_parent; 988 pbp->bio_children--; 989 KASSERT(cbp->bio_data != NULL, ("NULL bio_data")); 990 size = pbp->bio_length / (sc->sc_ndisks - 1); 991 g_raid3_free(sc, cbp->bio_data, size); 992 if (G_RAID3_HEAD_BIO(pbp) == cbp) { 993 G_RAID3_HEAD_BIO(pbp) = G_RAID3_NEXT_BIO(cbp); 994 G_RAID3_NEXT_BIO(cbp) = NULL; 995 g_destroy_bio(cbp); 996 } else { 997 G_RAID3_FOREACH_BIO(pbp, bp) { 998 if (G_RAID3_NEXT_BIO(bp) == cbp) 999 break; 1000 } 1001 if (bp != NULL) { 1002 KASSERT(G_RAID3_NEXT_BIO(bp) != NULL, 1003 ("NULL bp->bio_driver1")); 1004 G_RAID3_NEXT_BIO(bp) = G_RAID3_NEXT_BIO(cbp); 1005 G_RAID3_NEXT_BIO(cbp) = NULL; 1006 } 1007 g_destroy_bio(cbp); 1008 } 1009 } 1010 1011 static struct bio * 1012 g_raid3_clone_bio(struct g_raid3_softc *sc, struct bio *pbp) 1013 { 1014 struct bio *bp, *cbp; 1015 size_t size; 1016 int memflag; 1017 1018 cbp = g_clone_bio(pbp); 1019 if (cbp == NULL) 1020 return (NULL); 1021 size = pbp->bio_length / (sc->sc_ndisks - 1); 1022 if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0) 1023 memflag = M_WAITOK; 1024 else 1025 memflag = M_NOWAIT; 1026 cbp->bio_data = g_raid3_alloc(sc, size, memflag); 1027 if (cbp->bio_data == NULL) { 1028 pbp->bio_children--; 1029 g_destroy_bio(cbp); 1030 return (NULL); 1031 } 1032 G_RAID3_NEXT_BIO(cbp) = NULL; 1033 if (G_RAID3_HEAD_BIO(pbp) == NULL) 1034 G_RAID3_HEAD_BIO(pbp) = cbp; 1035 else { 1036 G_RAID3_FOREACH_BIO(pbp, bp) { 1037 if (G_RAID3_NEXT_BIO(bp) == NULL) { 1038 G_RAID3_NEXT_BIO(bp) = cbp; 1039 break; 1040 } 1041 } 1042 } 1043 return (cbp); 1044 } 1045 1046 static void 1047 g_raid3_scatter(struct bio *pbp) 1048 { 1049 struct g_raid3_softc *sc; 1050 struct g_raid3_disk *disk; 1051 struct bio *bp, *cbp, *tmpbp; 1052 off_t atom, cadd, padd, left; 1053 int first; 1054 1055 sc = pbp->bio_to->geom->softc; 1056 bp = NULL; 1057 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) { 1058 /* 1059 * Find bio for which we should calculate data. 1060 */ 1061 G_RAID3_FOREACH_BIO(pbp, cbp) { 1062 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) { 1063 bp = cbp; 1064 break; 1065 } 1066 } 1067 KASSERT(bp != NULL, ("NULL parity bio.")); 1068 } 1069 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 1070 cadd = padd = 0; 1071 for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) { 1072 G_RAID3_FOREACH_BIO(pbp, cbp) { 1073 if (cbp == bp) 1074 continue; 1075 bcopy(pbp->bio_data + padd, cbp->bio_data + cadd, atom); 1076 padd += atom; 1077 } 1078 cadd += atom; 1079 } 1080 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) { 1081 /* 1082 * Calculate parity. 1083 */ 1084 first = 1; 1085 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 1086 if (cbp == bp) 1087 continue; 1088 if (first) { 1089 bcopy(cbp->bio_data, bp->bio_data, 1090 bp->bio_length); 1091 first = 0; 1092 } else { 1093 g_raid3_xor(cbp->bio_data, bp->bio_data, 1094 bp->bio_length); 1095 } 1096 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_NODISK) != 0) 1097 g_raid3_destroy_bio(sc, cbp); 1098 } 1099 } 1100 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 1101 struct g_consumer *cp; 1102 1103 disk = cbp->bio_caller2; 1104 cp = disk->d_consumer; 1105 cbp->bio_to = cp->provider; 1106 G_RAID3_LOGREQ(3, cbp, "Sending request."); 1107 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1108 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1109 cp->acr, cp->acw, cp->ace)); 1110 cp->index++; 1111 sc->sc_writes++; 1112 g_io_request(cbp, cp); 1113 } 1114 } 1115 1116 static void 1117 g_raid3_gather(struct bio *pbp) 1118 { 1119 struct g_raid3_softc *sc; 1120 struct g_raid3_disk *disk; 1121 struct bio *xbp, *fbp, *cbp; 1122 off_t atom, cadd, padd, left; 1123 1124 sc = pbp->bio_to->geom->softc; 1125 /* 1126 * Find bio for which we have to calculate data. 1127 * While going through this path, check if all requests 1128 * succeeded, if not, deny whole request. 1129 * If we're in COMPLETE mode, we allow one request to fail, 1130 * so if we find one, we're sending it to the parity consumer. 1131 * If there are more failed requests, we deny whole request. 1132 */ 1133 xbp = fbp = NULL; 1134 G_RAID3_FOREACH_BIO(pbp, cbp) { 1135 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) { 1136 KASSERT(xbp == NULL, ("More than one parity bio.")); 1137 xbp = cbp; 1138 } 1139 if (cbp->bio_error == 0) 1140 continue; 1141 /* 1142 * Found failed request. 1143 */ 1144 if (fbp == NULL) { 1145 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_DEGRADED) != 0) { 1146 /* 1147 * We are already in degraded mode, so we can't 1148 * accept any failures. 1149 */ 1150 if (pbp->bio_error == 0) 1151 pbp->bio_error = cbp->bio_error; 1152 } else { 1153 fbp = cbp; 1154 } 1155 } else { 1156 /* 1157 * Next failed request, that's too many. 1158 */ 1159 if (pbp->bio_error == 0) 1160 pbp->bio_error = fbp->bio_error; 1161 } 1162 disk = cbp->bio_caller2; 1163 if (disk == NULL) 1164 continue; 1165 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 1166 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 1167 G_RAID3_LOGREQ(0, cbp, "Request failed (error=%d).", 1168 cbp->bio_error); 1169 } else { 1170 G_RAID3_LOGREQ(1, cbp, "Request failed (error=%d).", 1171 cbp->bio_error); 1172 } 1173 if (g_raid3_disconnect_on_failure && 1174 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 1175 sc->sc_bump_id |= G_RAID3_BUMP_GENID; 1176 g_raid3_event_send(disk, 1177 G_RAID3_DISK_STATE_DISCONNECTED, 1178 G_RAID3_EVENT_DONTWAIT); 1179 } 1180 } 1181 if (pbp->bio_error != 0) 1182 goto finish; 1183 if (fbp != NULL && (pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) { 1184 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_VERIFY; 1185 if (xbp != fbp) 1186 g_raid3_replace_bio(xbp, fbp); 1187 g_raid3_destroy_bio(sc, fbp); 1188 } else if (fbp != NULL) { 1189 struct g_consumer *cp; 1190 1191 /* 1192 * One request failed, so send the same request to 1193 * the parity consumer. 1194 */ 1195 disk = pbp->bio_driver2; 1196 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) { 1197 pbp->bio_error = fbp->bio_error; 1198 goto finish; 1199 } 1200 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 1201 pbp->bio_inbed--; 1202 fbp->bio_flags &= ~(BIO_DONE | BIO_ERROR); 1203 if (disk->d_no == sc->sc_ndisks - 1) 1204 fbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 1205 fbp->bio_error = 0; 1206 fbp->bio_completed = 0; 1207 fbp->bio_children = 0; 1208 fbp->bio_inbed = 0; 1209 cp = disk->d_consumer; 1210 fbp->bio_caller2 = disk; 1211 fbp->bio_to = cp->provider; 1212 G_RAID3_LOGREQ(3, fbp, "Sending request (recover)."); 1213 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1214 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1215 cp->acr, cp->acw, cp->ace)); 1216 cp->index++; 1217 g_io_request(fbp, cp); 1218 return; 1219 } 1220 if (xbp != NULL) { 1221 /* 1222 * Calculate parity. 1223 */ 1224 G_RAID3_FOREACH_BIO(pbp, cbp) { 1225 if ((cbp->bio_cflags & G_RAID3_BIO_CFLAG_PARITY) != 0) 1226 continue; 1227 g_raid3_xor(cbp->bio_data, xbp->bio_data, 1228 xbp->bio_length); 1229 } 1230 xbp->bio_cflags &= ~G_RAID3_BIO_CFLAG_PARITY; 1231 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) { 1232 if (!g_raid3_is_zero(xbp)) { 1233 g_raid3_parity_mismatch++; 1234 pbp->bio_error = EIO; 1235 goto finish; 1236 } 1237 g_raid3_destroy_bio(sc, xbp); 1238 } 1239 } 1240 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 1241 cadd = padd = 0; 1242 for (left = pbp->bio_length; left > 0; left -= sc->sc_sectorsize) { 1243 G_RAID3_FOREACH_BIO(pbp, cbp) { 1244 bcopy(cbp->bio_data + cadd, pbp->bio_data + padd, atom); 1245 pbp->bio_completed += atom; 1246 padd += atom; 1247 } 1248 cadd += atom; 1249 } 1250 finish: 1251 if (pbp->bio_error == 0) 1252 G_RAID3_LOGREQ(3, pbp, "Request finished."); 1253 else { 1254 if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_VERIFY) != 0) 1255 G_RAID3_LOGREQ(1, pbp, "Verification error."); 1256 else 1257 G_RAID3_LOGREQ(0, pbp, "Request failed."); 1258 } 1259 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_MASK; 1260 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) 1261 g_raid3_destroy_bio(sc, cbp); 1262 g_io_deliver(pbp, pbp->bio_error); 1263 } 1264 1265 static void 1266 g_raid3_done(struct bio *bp) 1267 { 1268 struct g_raid3_softc *sc; 1269 1270 sc = bp->bio_from->geom->softc; 1271 bp->bio_cflags |= G_RAID3_BIO_CFLAG_REGULAR; 1272 G_RAID3_LOGREQ(3, bp, "Regular request done (error=%d).", bp->bio_error); 1273 mtx_lock(&sc->sc_queue_mtx); 1274 bioq_insert_head(&sc->sc_queue, bp); 1275 mtx_unlock(&sc->sc_queue_mtx); 1276 wakeup(sc); 1277 wakeup(&sc->sc_queue); 1278 } 1279 1280 static void 1281 g_raid3_regular_request(struct bio *cbp) 1282 { 1283 struct g_raid3_softc *sc; 1284 struct g_raid3_disk *disk; 1285 struct bio *pbp; 1286 1287 g_topology_assert_not(); 1288 1289 pbp = cbp->bio_parent; 1290 sc = pbp->bio_to->geom->softc; 1291 cbp->bio_from->index--; 1292 if (cbp->bio_cmd == BIO_WRITE) 1293 sc->sc_writes--; 1294 disk = cbp->bio_from->private; 1295 if (disk == NULL) { 1296 g_topology_lock(); 1297 g_raid3_kill_consumer(sc, cbp->bio_from); 1298 g_topology_unlock(); 1299 } 1300 1301 G_RAID3_LOGREQ(3, cbp, "Request finished."); 1302 pbp->bio_inbed++; 1303 KASSERT(pbp->bio_inbed <= pbp->bio_children, 1304 ("bio_inbed (%u) is bigger than bio_children (%u).", pbp->bio_inbed, 1305 pbp->bio_children)); 1306 if (pbp->bio_inbed != pbp->bio_children) 1307 return; 1308 switch (pbp->bio_cmd) { 1309 case BIO_READ: 1310 g_raid3_gather(pbp); 1311 break; 1312 case BIO_WRITE: 1313 case BIO_DELETE: 1314 { 1315 int error = 0; 1316 1317 pbp->bio_completed = pbp->bio_length; 1318 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) { 1319 if (cbp->bio_error == 0) { 1320 g_raid3_destroy_bio(sc, cbp); 1321 continue; 1322 } 1323 1324 if (error == 0) 1325 error = cbp->bio_error; 1326 else if (pbp->bio_error == 0) { 1327 /* 1328 * Next failed request, that's too many. 1329 */ 1330 pbp->bio_error = error; 1331 } 1332 1333 disk = cbp->bio_caller2; 1334 if (disk == NULL) { 1335 g_raid3_destroy_bio(sc, cbp); 1336 continue; 1337 } 1338 1339 if ((disk->d_flags & G_RAID3_DISK_FLAG_BROKEN) == 0) { 1340 disk->d_flags |= G_RAID3_DISK_FLAG_BROKEN; 1341 G_RAID3_LOGREQ(0, cbp, 1342 "Request failed (error=%d).", 1343 cbp->bio_error); 1344 } else { 1345 G_RAID3_LOGREQ(1, cbp, 1346 "Request failed (error=%d).", 1347 cbp->bio_error); 1348 } 1349 if (g_raid3_disconnect_on_failure && 1350 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 1351 sc->sc_bump_id |= G_RAID3_BUMP_GENID; 1352 g_raid3_event_send(disk, 1353 G_RAID3_DISK_STATE_DISCONNECTED, 1354 G_RAID3_EVENT_DONTWAIT); 1355 } 1356 g_raid3_destroy_bio(sc, cbp); 1357 } 1358 if (pbp->bio_error == 0) 1359 G_RAID3_LOGREQ(3, pbp, "Request finished."); 1360 else 1361 G_RAID3_LOGREQ(0, pbp, "Request failed."); 1362 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_DEGRADED; 1363 pbp->bio_pflags &= ~G_RAID3_BIO_PFLAG_NOPARITY; 1364 bioq_remove(&sc->sc_inflight, pbp); 1365 /* Release delayed sync requests if possible. */ 1366 g_raid3_sync_release(sc); 1367 g_io_deliver(pbp, pbp->bio_error); 1368 break; 1369 } 1370 } 1371 } 1372 1373 static void 1374 g_raid3_sync_done(struct bio *bp) 1375 { 1376 struct g_raid3_softc *sc; 1377 1378 G_RAID3_LOGREQ(3, bp, "Synchronization request delivered."); 1379 sc = bp->bio_from->geom->softc; 1380 bp->bio_cflags |= G_RAID3_BIO_CFLAG_SYNC; 1381 mtx_lock(&sc->sc_queue_mtx); 1382 bioq_insert_head(&sc->sc_queue, bp); 1383 mtx_unlock(&sc->sc_queue_mtx); 1384 wakeup(sc); 1385 wakeup(&sc->sc_queue); 1386 } 1387 1388 static void 1389 g_raid3_flush(struct g_raid3_softc *sc, struct bio *bp) 1390 { 1391 struct bio_queue_head queue; 1392 struct g_raid3_disk *disk; 1393 struct g_consumer *cp; 1394 struct bio *cbp; 1395 u_int i; 1396 1397 bioq_init(&queue); 1398 for (i = 0; i < sc->sc_ndisks; i++) { 1399 disk = &sc->sc_disks[i]; 1400 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) 1401 continue; 1402 cbp = g_clone_bio(bp); 1403 if (cbp == NULL) { 1404 for (cbp = bioq_first(&queue); cbp != NULL; 1405 cbp = bioq_first(&queue)) { 1406 bioq_remove(&queue, cbp); 1407 g_destroy_bio(cbp); 1408 } 1409 if (bp->bio_error == 0) 1410 bp->bio_error = ENOMEM; 1411 g_io_deliver(bp, bp->bio_error); 1412 return; 1413 } 1414 bioq_insert_tail(&queue, cbp); 1415 cbp->bio_done = g_std_done; 1416 cbp->bio_caller1 = disk; 1417 cbp->bio_to = disk->d_consumer->provider; 1418 } 1419 for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) { 1420 bioq_remove(&queue, cbp); 1421 G_RAID3_LOGREQ(3, cbp, "Sending request."); 1422 disk = cbp->bio_caller1; 1423 cbp->bio_caller1 = NULL; 1424 cp = disk->d_consumer; 1425 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1426 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1427 cp->acr, cp->acw, cp->ace)); 1428 g_io_request(cbp, disk->d_consumer); 1429 } 1430 } 1431 1432 static void 1433 g_raid3_start(struct bio *bp) 1434 { 1435 struct g_raid3_softc *sc; 1436 1437 sc = bp->bio_to->geom->softc; 1438 /* 1439 * If sc == NULL or there are no valid disks, provider's error 1440 * should be set and g_raid3_start() should not be called at all. 1441 */ 1442 KASSERT(sc != NULL && (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 1443 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE), 1444 ("Provider's error should be set (error=%d)(device=%s).", 1445 bp->bio_to->error, bp->bio_to->name)); 1446 G_RAID3_LOGREQ(3, bp, "Request received."); 1447 1448 switch (bp->bio_cmd) { 1449 case BIO_READ: 1450 case BIO_WRITE: 1451 case BIO_DELETE: 1452 break; 1453 case BIO_FLUSH: 1454 g_raid3_flush(sc, bp); 1455 return; 1456 case BIO_GETATTR: 1457 default: 1458 g_io_deliver(bp, EOPNOTSUPP); 1459 return; 1460 } 1461 mtx_lock(&sc->sc_queue_mtx); 1462 bioq_insert_tail(&sc->sc_queue, bp); 1463 mtx_unlock(&sc->sc_queue_mtx); 1464 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 1465 wakeup(sc); 1466 } 1467 1468 /* 1469 * Return TRUE if the given request is colliding with a in-progress 1470 * synchronization request. 1471 */ 1472 static int 1473 g_raid3_sync_collision(struct g_raid3_softc *sc, struct bio *bp) 1474 { 1475 struct g_raid3_disk *disk; 1476 struct bio *sbp; 1477 off_t rstart, rend, sstart, send; 1478 int i; 1479 1480 disk = sc->sc_syncdisk; 1481 if (disk == NULL) 1482 return (0); 1483 rstart = bp->bio_offset; 1484 rend = bp->bio_offset + bp->bio_length; 1485 for (i = 0; i < g_raid3_syncreqs; i++) { 1486 sbp = disk->d_sync.ds_bios[i]; 1487 if (sbp == NULL) 1488 continue; 1489 sstart = sbp->bio_offset; 1490 send = sbp->bio_length; 1491 if (sbp->bio_cmd == BIO_WRITE) { 1492 sstart *= sc->sc_ndisks - 1; 1493 send *= sc->sc_ndisks - 1; 1494 } 1495 send += sstart; 1496 if (rend > sstart && rstart < send) 1497 return (1); 1498 } 1499 return (0); 1500 } 1501 1502 /* 1503 * Return TRUE if the given sync request is colliding with a in-progress regular 1504 * request. 1505 */ 1506 static int 1507 g_raid3_regular_collision(struct g_raid3_softc *sc, struct bio *sbp) 1508 { 1509 off_t rstart, rend, sstart, send; 1510 struct bio *bp; 1511 1512 if (sc->sc_syncdisk == NULL) 1513 return (0); 1514 sstart = sbp->bio_offset; 1515 send = sstart + sbp->bio_length; 1516 TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) { 1517 rstart = bp->bio_offset; 1518 rend = bp->bio_offset + bp->bio_length; 1519 if (rend > sstart && rstart < send) 1520 return (1); 1521 } 1522 return (0); 1523 } 1524 1525 /* 1526 * Puts request onto delayed queue. 1527 */ 1528 static void 1529 g_raid3_regular_delay(struct g_raid3_softc *sc, struct bio *bp) 1530 { 1531 1532 G_RAID3_LOGREQ(2, bp, "Delaying request."); 1533 bioq_insert_head(&sc->sc_regular_delayed, bp); 1534 } 1535 1536 /* 1537 * Puts synchronization request onto delayed queue. 1538 */ 1539 static void 1540 g_raid3_sync_delay(struct g_raid3_softc *sc, struct bio *bp) 1541 { 1542 1543 G_RAID3_LOGREQ(2, bp, "Delaying synchronization request."); 1544 bioq_insert_tail(&sc->sc_sync_delayed, bp); 1545 } 1546 1547 /* 1548 * Releases delayed regular requests which don't collide anymore with sync 1549 * requests. 1550 */ 1551 static void 1552 g_raid3_regular_release(struct g_raid3_softc *sc) 1553 { 1554 struct bio *bp, *bp2; 1555 1556 TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) { 1557 if (g_raid3_sync_collision(sc, bp)) 1558 continue; 1559 bioq_remove(&sc->sc_regular_delayed, bp); 1560 G_RAID3_LOGREQ(2, bp, "Releasing delayed request (%p).", bp); 1561 mtx_lock(&sc->sc_queue_mtx); 1562 bioq_insert_head(&sc->sc_queue, bp); 1563 #if 0 1564 /* 1565 * wakeup() is not needed, because this function is called from 1566 * the worker thread. 1567 */ 1568 wakeup(&sc->sc_queue); 1569 #endif 1570 mtx_unlock(&sc->sc_queue_mtx); 1571 } 1572 } 1573 1574 /* 1575 * Releases delayed sync requests which don't collide anymore with regular 1576 * requests. 1577 */ 1578 static void 1579 g_raid3_sync_release(struct g_raid3_softc *sc) 1580 { 1581 struct bio *bp, *bp2; 1582 1583 TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) { 1584 if (g_raid3_regular_collision(sc, bp)) 1585 continue; 1586 bioq_remove(&sc->sc_sync_delayed, bp); 1587 G_RAID3_LOGREQ(2, bp, 1588 "Releasing delayed synchronization request."); 1589 g_io_request(bp, bp->bio_from); 1590 } 1591 } 1592 1593 /* 1594 * Handle synchronization requests. 1595 * Every synchronization request is two-steps process: first, READ request is 1596 * send to active provider and then WRITE request (with read data) to the provider 1597 * beeing synchronized. When WRITE is finished, new synchronization request is 1598 * send. 1599 */ 1600 static void 1601 g_raid3_sync_request(struct bio *bp) 1602 { 1603 struct g_raid3_softc *sc; 1604 struct g_raid3_disk *disk; 1605 1606 bp->bio_from->index--; 1607 sc = bp->bio_from->geom->softc; 1608 disk = bp->bio_from->private; 1609 if (disk == NULL) { 1610 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 1611 g_topology_lock(); 1612 g_raid3_kill_consumer(sc, bp->bio_from); 1613 g_topology_unlock(); 1614 free(bp->bio_data, M_RAID3); 1615 g_destroy_bio(bp); 1616 sx_xlock(&sc->sc_lock); 1617 return; 1618 } 1619 1620 /* 1621 * Synchronization request. 1622 */ 1623 switch (bp->bio_cmd) { 1624 case BIO_READ: 1625 { 1626 struct g_consumer *cp; 1627 u_char *dst, *src; 1628 off_t left; 1629 u_int atom; 1630 1631 if (bp->bio_error != 0) { 1632 G_RAID3_LOGREQ(0, bp, 1633 "Synchronization request failed (error=%d).", 1634 bp->bio_error); 1635 g_destroy_bio(bp); 1636 return; 1637 } 1638 G_RAID3_LOGREQ(3, bp, "Synchronization request finished."); 1639 atom = sc->sc_sectorsize / (sc->sc_ndisks - 1); 1640 dst = src = bp->bio_data; 1641 if (disk->d_no == sc->sc_ndisks - 1) { 1642 u_int n; 1643 1644 /* Parity component. */ 1645 for (left = bp->bio_length; left > 0; 1646 left -= sc->sc_sectorsize) { 1647 bcopy(src, dst, atom); 1648 src += atom; 1649 for (n = 1; n < sc->sc_ndisks - 1; n++) { 1650 g_raid3_xor(src, dst, atom); 1651 src += atom; 1652 } 1653 dst += atom; 1654 } 1655 } else { 1656 /* Regular component. */ 1657 src += atom * disk->d_no; 1658 for (left = bp->bio_length; left > 0; 1659 left -= sc->sc_sectorsize) { 1660 bcopy(src, dst, atom); 1661 src += sc->sc_sectorsize; 1662 dst += atom; 1663 } 1664 } 1665 bp->bio_driver1 = bp->bio_driver2 = NULL; 1666 bp->bio_pflags = 0; 1667 bp->bio_offset /= sc->sc_ndisks - 1; 1668 bp->bio_length /= sc->sc_ndisks - 1; 1669 bp->bio_cmd = BIO_WRITE; 1670 bp->bio_cflags = 0; 1671 bp->bio_children = bp->bio_inbed = 0; 1672 cp = disk->d_consumer; 1673 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1674 ("Consumer %s not opened (r%dw%de%d).", cp->provider->name, 1675 cp->acr, cp->acw, cp->ace)); 1676 cp->index++; 1677 g_io_request(bp, cp); 1678 return; 1679 } 1680 case BIO_WRITE: 1681 { 1682 struct g_raid3_disk_sync *sync; 1683 off_t boffset, moffset; 1684 void *data; 1685 int i; 1686 1687 if (bp->bio_error != 0) { 1688 G_RAID3_LOGREQ(0, bp, 1689 "Synchronization request failed (error=%d).", 1690 bp->bio_error); 1691 g_destroy_bio(bp); 1692 sc->sc_bump_id |= G_RAID3_BUMP_GENID; 1693 g_raid3_event_send(disk, 1694 G_RAID3_DISK_STATE_DISCONNECTED, 1695 G_RAID3_EVENT_DONTWAIT); 1696 return; 1697 } 1698 G_RAID3_LOGREQ(3, bp, "Synchronization request finished."); 1699 sync = &disk->d_sync; 1700 if (sync->ds_offset == sc->sc_mediasize / (sc->sc_ndisks - 1) || 1701 sync->ds_consumer == NULL || 1702 (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 1703 /* Don't send more synchronization requests. */ 1704 sync->ds_inflight--; 1705 if (sync->ds_bios != NULL) { 1706 i = (int)(uintptr_t)bp->bio_caller1; 1707 sync->ds_bios[i] = NULL; 1708 } 1709 free(bp->bio_data, M_RAID3); 1710 g_destroy_bio(bp); 1711 if (sync->ds_inflight > 0) 1712 return; 1713 if (sync->ds_consumer == NULL || 1714 (sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 1715 return; 1716 } 1717 /* 1718 * Disk up-to-date, activate it. 1719 */ 1720 g_raid3_event_send(disk, G_RAID3_DISK_STATE_ACTIVE, 1721 G_RAID3_EVENT_DONTWAIT); 1722 return; 1723 } 1724 1725 /* Send next synchronization request. */ 1726 data = bp->bio_data; 1727 bzero(bp, sizeof(*bp)); 1728 bp->bio_cmd = BIO_READ; 1729 bp->bio_offset = sync->ds_offset * (sc->sc_ndisks - 1); 1730 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 1731 sync->ds_offset += bp->bio_length / (sc->sc_ndisks - 1); 1732 bp->bio_done = g_raid3_sync_done; 1733 bp->bio_data = data; 1734 bp->bio_from = sync->ds_consumer; 1735 bp->bio_to = sc->sc_provider; 1736 G_RAID3_LOGREQ(3, bp, "Sending synchronization request."); 1737 sync->ds_consumer->index++; 1738 /* 1739 * Delay the request if it is colliding with a regular request. 1740 */ 1741 if (g_raid3_regular_collision(sc, bp)) 1742 g_raid3_sync_delay(sc, bp); 1743 else 1744 g_io_request(bp, sync->ds_consumer); 1745 1746 /* Release delayed requests if possible. */ 1747 g_raid3_regular_release(sc); 1748 1749 /* Find the smallest offset. */ 1750 moffset = sc->sc_mediasize; 1751 for (i = 0; i < g_raid3_syncreqs; i++) { 1752 bp = sync->ds_bios[i]; 1753 boffset = bp->bio_offset; 1754 if (bp->bio_cmd == BIO_WRITE) 1755 boffset *= sc->sc_ndisks - 1; 1756 if (boffset < moffset) 1757 moffset = boffset; 1758 } 1759 if (sync->ds_offset_done + (MAXPHYS * 100) < moffset) { 1760 /* Update offset_done on every 100 blocks. */ 1761 sync->ds_offset_done = moffset; 1762 g_raid3_update_metadata(disk); 1763 } 1764 return; 1765 } 1766 default: 1767 KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", 1768 bp->bio_cmd, sc->sc_name)); 1769 break; 1770 } 1771 } 1772 1773 static int 1774 g_raid3_register_request(struct bio *pbp) 1775 { 1776 struct g_raid3_softc *sc; 1777 struct g_raid3_disk *disk; 1778 struct g_consumer *cp; 1779 struct bio *cbp, *tmpbp; 1780 off_t offset, length; 1781 u_int n, ndisks; 1782 int round_robin, verify; 1783 1784 ndisks = 0; 1785 sc = pbp->bio_to->geom->softc; 1786 if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGSYNC) != 0 && 1787 sc->sc_syncdisk == NULL) { 1788 g_io_deliver(pbp, EIO); 1789 return (0); 1790 } 1791 g_raid3_init_bio(pbp); 1792 length = pbp->bio_length / (sc->sc_ndisks - 1); 1793 offset = pbp->bio_offset / (sc->sc_ndisks - 1); 1794 round_robin = verify = 0; 1795 switch (pbp->bio_cmd) { 1796 case BIO_READ: 1797 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 && 1798 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 1799 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_VERIFY; 1800 verify = 1; 1801 ndisks = sc->sc_ndisks; 1802 } else { 1803 verify = 0; 1804 ndisks = sc->sc_ndisks - 1; 1805 } 1806 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0 && 1807 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 1808 round_robin = 1; 1809 } else { 1810 round_robin = 0; 1811 } 1812 KASSERT(!round_robin || !verify, 1813 ("ROUND-ROBIN and VERIFY are mutually exclusive.")); 1814 pbp->bio_driver2 = &sc->sc_disks[sc->sc_ndisks - 1]; 1815 break; 1816 case BIO_WRITE: 1817 case BIO_DELETE: 1818 /* 1819 * Delay the request if it is colliding with a synchronization 1820 * request. 1821 */ 1822 if (g_raid3_sync_collision(sc, pbp)) { 1823 g_raid3_regular_delay(sc, pbp); 1824 return (0); 1825 } 1826 1827 if (sc->sc_idle) 1828 g_raid3_unidle(sc); 1829 else 1830 sc->sc_last_write = time_uptime; 1831 1832 ndisks = sc->sc_ndisks; 1833 break; 1834 } 1835 for (n = 0; n < ndisks; n++) { 1836 disk = &sc->sc_disks[n]; 1837 cbp = g_raid3_clone_bio(sc, pbp); 1838 if (cbp == NULL) { 1839 while ((cbp = G_RAID3_HEAD_BIO(pbp)) != NULL) 1840 g_raid3_destroy_bio(sc, cbp); 1841 /* 1842 * To prevent deadlock, we must run back up 1843 * with the ENOMEM for failed requests of any 1844 * of our consumers. Our own sync requests 1845 * can stick around, as they are finite. 1846 */ 1847 if ((pbp->bio_cflags & 1848 G_RAID3_BIO_CFLAG_REGULAR) != 0) { 1849 g_io_deliver(pbp, ENOMEM); 1850 return (0); 1851 } 1852 return (ENOMEM); 1853 } 1854 cbp->bio_offset = offset; 1855 cbp->bio_length = length; 1856 cbp->bio_done = g_raid3_done; 1857 switch (pbp->bio_cmd) { 1858 case BIO_READ: 1859 if (disk->d_state != G_RAID3_DISK_STATE_ACTIVE) { 1860 /* 1861 * Replace invalid component with the parity 1862 * component. 1863 */ 1864 disk = &sc->sc_disks[sc->sc_ndisks - 1]; 1865 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 1866 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 1867 } else if (round_robin && 1868 disk->d_no == sc->sc_round_robin) { 1869 /* 1870 * In round-robin mode skip one data component 1871 * and use parity component when reading. 1872 */ 1873 pbp->bio_driver2 = disk; 1874 disk = &sc->sc_disks[sc->sc_ndisks - 1]; 1875 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 1876 sc->sc_round_robin++; 1877 round_robin = 0; 1878 } else if (verify && disk->d_no == sc->sc_ndisks - 1) { 1879 cbp->bio_cflags |= G_RAID3_BIO_CFLAG_PARITY; 1880 } 1881 break; 1882 case BIO_WRITE: 1883 case BIO_DELETE: 1884 if (disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 1885 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 1886 if (n == ndisks - 1) { 1887 /* 1888 * Active parity component, mark it as such. 1889 */ 1890 cbp->bio_cflags |= 1891 G_RAID3_BIO_CFLAG_PARITY; 1892 } 1893 } else { 1894 pbp->bio_pflags |= G_RAID3_BIO_PFLAG_DEGRADED; 1895 if (n == ndisks - 1) { 1896 /* 1897 * Parity component is not connected, 1898 * so destroy its request. 1899 */ 1900 pbp->bio_pflags |= 1901 G_RAID3_BIO_PFLAG_NOPARITY; 1902 g_raid3_destroy_bio(sc, cbp); 1903 cbp = NULL; 1904 } else { 1905 cbp->bio_cflags |= 1906 G_RAID3_BIO_CFLAG_NODISK; 1907 disk = NULL; 1908 } 1909 } 1910 break; 1911 } 1912 if (cbp != NULL) 1913 cbp->bio_caller2 = disk; 1914 } 1915 switch (pbp->bio_cmd) { 1916 case BIO_READ: 1917 if (round_robin) { 1918 /* 1919 * If we are in round-robin mode and 'round_robin' is 1920 * still 1, it means, that we skipped parity component 1921 * for this read and must reset sc_round_robin field. 1922 */ 1923 sc->sc_round_robin = 0; 1924 } 1925 G_RAID3_FOREACH_SAFE_BIO(pbp, cbp, tmpbp) { 1926 disk = cbp->bio_caller2; 1927 cp = disk->d_consumer; 1928 cbp->bio_to = cp->provider; 1929 G_RAID3_LOGREQ(3, cbp, "Sending request."); 1930 KASSERT(cp->acr >= 1 && cp->acw >= 1 && cp->ace >= 1, 1931 ("Consumer %s not opened (r%dw%de%d).", 1932 cp->provider->name, cp->acr, cp->acw, cp->ace)); 1933 cp->index++; 1934 g_io_request(cbp, cp); 1935 } 1936 break; 1937 case BIO_WRITE: 1938 case BIO_DELETE: 1939 /* 1940 * Put request onto inflight queue, so we can check if new 1941 * synchronization requests don't collide with it. 1942 */ 1943 bioq_insert_tail(&sc->sc_inflight, pbp); 1944 1945 /* 1946 * Bump syncid on first write. 1947 */ 1948 if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) { 1949 sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID; 1950 g_raid3_bump_syncid(sc); 1951 } 1952 g_raid3_scatter(pbp); 1953 break; 1954 } 1955 return (0); 1956 } 1957 1958 static int 1959 g_raid3_can_destroy(struct g_raid3_softc *sc) 1960 { 1961 struct g_geom *gp; 1962 struct g_consumer *cp; 1963 1964 g_topology_assert(); 1965 gp = sc->sc_geom; 1966 if (gp->softc == NULL) 1967 return (1); 1968 LIST_FOREACH(cp, &gp->consumer, consumer) { 1969 if (g_raid3_is_busy(sc, cp)) 1970 return (0); 1971 } 1972 gp = sc->sc_sync.ds_geom; 1973 LIST_FOREACH(cp, &gp->consumer, consumer) { 1974 if (g_raid3_is_busy(sc, cp)) 1975 return (0); 1976 } 1977 G_RAID3_DEBUG(2, "No I/O requests for %s, it can be destroyed.", 1978 sc->sc_name); 1979 return (1); 1980 } 1981 1982 static int 1983 g_raid3_try_destroy(struct g_raid3_softc *sc) 1984 { 1985 1986 g_topology_assert_not(); 1987 sx_assert(&sc->sc_lock, SX_XLOCKED); 1988 1989 if (sc->sc_rootmount != NULL) { 1990 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 1991 sc->sc_rootmount); 1992 root_mount_rel(sc->sc_rootmount); 1993 sc->sc_rootmount = NULL; 1994 } 1995 1996 g_topology_lock(); 1997 if (!g_raid3_can_destroy(sc)) { 1998 g_topology_unlock(); 1999 return (0); 2000 } 2001 sc->sc_geom->softc = NULL; 2002 sc->sc_sync.ds_geom->softc = NULL; 2003 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_WAIT) != 0) { 2004 g_topology_unlock(); 2005 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, 2006 &sc->sc_worker); 2007 /* Unlock sc_lock here, as it can be destroyed after wakeup. */ 2008 sx_xunlock(&sc->sc_lock); 2009 wakeup(&sc->sc_worker); 2010 sc->sc_worker = NULL; 2011 } else { 2012 g_topology_unlock(); 2013 g_raid3_destroy_device(sc); 2014 free(sc->sc_disks, M_RAID3); 2015 free(sc, M_RAID3); 2016 } 2017 return (1); 2018 } 2019 2020 /* 2021 * Worker thread. 2022 */ 2023 static void 2024 g_raid3_worker(void *arg) 2025 { 2026 struct g_raid3_softc *sc; 2027 struct g_raid3_event *ep; 2028 struct bio *bp; 2029 int timeout; 2030 2031 sc = arg; 2032 thread_lock(curthread); 2033 sched_prio(curthread, PRIBIO); 2034 thread_unlock(curthread); 2035 2036 sx_xlock(&sc->sc_lock); 2037 for (;;) { 2038 G_RAID3_DEBUG(5, "%s: Let's see...", __func__); 2039 /* 2040 * First take a look at events. 2041 * This is important to handle events before any I/O requests. 2042 */ 2043 ep = g_raid3_event_get(sc); 2044 if (ep != NULL) { 2045 g_raid3_event_remove(sc, ep); 2046 if ((ep->e_flags & G_RAID3_EVENT_DEVICE) != 0) { 2047 /* Update only device status. */ 2048 G_RAID3_DEBUG(3, 2049 "Running event for device %s.", 2050 sc->sc_name); 2051 ep->e_error = 0; 2052 g_raid3_update_device(sc, 1); 2053 } else { 2054 /* Update disk status. */ 2055 G_RAID3_DEBUG(3, "Running event for disk %s.", 2056 g_raid3_get_diskname(ep->e_disk)); 2057 ep->e_error = g_raid3_update_disk(ep->e_disk, 2058 ep->e_state); 2059 if (ep->e_error == 0) 2060 g_raid3_update_device(sc, 0); 2061 } 2062 if ((ep->e_flags & G_RAID3_EVENT_DONTWAIT) != 0) { 2063 KASSERT(ep->e_error == 0, 2064 ("Error cannot be handled.")); 2065 g_raid3_event_free(ep); 2066 } else { 2067 ep->e_flags |= G_RAID3_EVENT_DONE; 2068 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, 2069 ep); 2070 mtx_lock(&sc->sc_events_mtx); 2071 wakeup(ep); 2072 mtx_unlock(&sc->sc_events_mtx); 2073 } 2074 if ((sc->sc_flags & 2075 G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 2076 if (g_raid3_try_destroy(sc)) { 2077 curthread->td_pflags &= ~TDP_GEOM; 2078 G_RAID3_DEBUG(1, "Thread exiting."); 2079 kproc_exit(0); 2080 } 2081 } 2082 G_RAID3_DEBUG(5, "%s: I'm here 1.", __func__); 2083 continue; 2084 } 2085 /* 2086 * Check if we can mark array as CLEAN and if we can't take 2087 * how much seconds should we wait. 2088 */ 2089 timeout = g_raid3_idle(sc, -1); 2090 /* 2091 * Now I/O requests. 2092 */ 2093 /* Get first request from the queue. */ 2094 mtx_lock(&sc->sc_queue_mtx); 2095 bp = bioq_first(&sc->sc_queue); 2096 if (bp == NULL) { 2097 if ((sc->sc_flags & 2098 G_RAID3_DEVICE_FLAG_DESTROY) != 0) { 2099 mtx_unlock(&sc->sc_queue_mtx); 2100 if (g_raid3_try_destroy(sc)) { 2101 curthread->td_pflags &= ~TDP_GEOM; 2102 G_RAID3_DEBUG(1, "Thread exiting."); 2103 kproc_exit(0); 2104 } 2105 mtx_lock(&sc->sc_queue_mtx); 2106 } 2107 sx_xunlock(&sc->sc_lock); 2108 /* 2109 * XXX: We can miss an event here, because an event 2110 * can be added without sx-device-lock and without 2111 * mtx-queue-lock. Maybe I should just stop using 2112 * dedicated mutex for events synchronization and 2113 * stick with the queue lock? 2114 * The event will hang here until next I/O request 2115 * or next event is received. 2116 */ 2117 MSLEEP(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "r3:w1", 2118 timeout * hz); 2119 sx_xlock(&sc->sc_lock); 2120 G_RAID3_DEBUG(5, "%s: I'm here 4.", __func__); 2121 continue; 2122 } 2123 process: 2124 bioq_remove(&sc->sc_queue, bp); 2125 mtx_unlock(&sc->sc_queue_mtx); 2126 2127 if (bp->bio_from->geom == sc->sc_sync.ds_geom && 2128 (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) { 2129 g_raid3_sync_request(bp); /* READ */ 2130 } else if (bp->bio_to != sc->sc_provider) { 2131 if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) != 0) 2132 g_raid3_regular_request(bp); 2133 else if ((bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) != 0) 2134 g_raid3_sync_request(bp); /* WRITE */ 2135 else { 2136 KASSERT(0, 2137 ("Invalid request cflags=0x%hhx to=%s.", 2138 bp->bio_cflags, bp->bio_to->name)); 2139 } 2140 } else if (g_raid3_register_request(bp) != 0) { 2141 mtx_lock(&sc->sc_queue_mtx); 2142 bioq_insert_head(&sc->sc_queue, bp); 2143 /* 2144 * We are short in memory, let see if there are finished 2145 * request we can free. 2146 */ 2147 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 2148 if (bp->bio_cflags & G_RAID3_BIO_CFLAG_REGULAR) 2149 goto process; 2150 } 2151 /* 2152 * No finished regular request, so at least keep 2153 * synchronization running. 2154 */ 2155 TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) { 2156 if (bp->bio_cflags & G_RAID3_BIO_CFLAG_SYNC) 2157 goto process; 2158 } 2159 sx_xunlock(&sc->sc_lock); 2160 MSLEEP(&sc->sc_queue, &sc->sc_queue_mtx, PRIBIO | PDROP, 2161 "r3:lowmem", hz / 10); 2162 sx_xlock(&sc->sc_lock); 2163 } 2164 G_RAID3_DEBUG(5, "%s: I'm here 9.", __func__); 2165 } 2166 } 2167 2168 static void 2169 g_raid3_update_idle(struct g_raid3_softc *sc, struct g_raid3_disk *disk) 2170 { 2171 2172 sx_assert(&sc->sc_lock, SX_LOCKED); 2173 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) != 0) 2174 return; 2175 if (!sc->sc_idle && (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) == 0) { 2176 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as dirty.", 2177 g_raid3_get_diskname(disk), sc->sc_name); 2178 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 2179 } else if (sc->sc_idle && 2180 (disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) { 2181 G_RAID3_DEBUG(1, "Disk %s (device %s) marked as clean.", 2182 g_raid3_get_diskname(disk), sc->sc_name); 2183 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 2184 } 2185 } 2186 2187 static void 2188 g_raid3_sync_start(struct g_raid3_softc *sc) 2189 { 2190 struct g_raid3_disk *disk; 2191 struct g_consumer *cp; 2192 struct bio *bp; 2193 int error; 2194 u_int n; 2195 2196 g_topology_assert_not(); 2197 sx_assert(&sc->sc_lock, SX_XLOCKED); 2198 2199 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED, 2200 ("Device not in DEGRADED state (%s, %u).", sc->sc_name, 2201 sc->sc_state)); 2202 KASSERT(sc->sc_syncdisk == NULL, ("Syncdisk is not NULL (%s, %u).", 2203 sc->sc_name, sc->sc_state)); 2204 disk = NULL; 2205 for (n = 0; n < sc->sc_ndisks; n++) { 2206 if (sc->sc_disks[n].d_state != G_RAID3_DISK_STATE_SYNCHRONIZING) 2207 continue; 2208 disk = &sc->sc_disks[n]; 2209 break; 2210 } 2211 if (disk == NULL) 2212 return; 2213 2214 sx_xunlock(&sc->sc_lock); 2215 g_topology_lock(); 2216 cp = g_new_consumer(sc->sc_sync.ds_geom); 2217 error = g_attach(cp, sc->sc_provider); 2218 KASSERT(error == 0, 2219 ("Cannot attach to %s (error=%d).", sc->sc_name, error)); 2220 error = g_access(cp, 1, 0, 0); 2221 KASSERT(error == 0, ("Cannot open %s (error=%d).", sc->sc_name, error)); 2222 g_topology_unlock(); 2223 sx_xlock(&sc->sc_lock); 2224 2225 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s.", sc->sc_name, 2226 g_raid3_get_diskname(disk)); 2227 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOFAILSYNC) == 0) 2228 disk->d_flags |= G_RAID3_DISK_FLAG_DIRTY; 2229 KASSERT(disk->d_sync.ds_consumer == NULL, 2230 ("Sync consumer already exists (device=%s, disk=%s).", 2231 sc->sc_name, g_raid3_get_diskname(disk))); 2232 2233 disk->d_sync.ds_consumer = cp; 2234 disk->d_sync.ds_consumer->private = disk; 2235 disk->d_sync.ds_consumer->index = 0; 2236 sc->sc_syncdisk = disk; 2237 2238 /* 2239 * Allocate memory for synchronization bios and initialize them. 2240 */ 2241 disk->d_sync.ds_bios = malloc(sizeof(struct bio *) * g_raid3_syncreqs, 2242 M_RAID3, M_WAITOK); 2243 for (n = 0; n < g_raid3_syncreqs; n++) { 2244 bp = g_alloc_bio(); 2245 disk->d_sync.ds_bios[n] = bp; 2246 bp->bio_parent = NULL; 2247 bp->bio_cmd = BIO_READ; 2248 bp->bio_data = malloc(MAXPHYS, M_RAID3, M_WAITOK); 2249 bp->bio_cflags = 0; 2250 bp->bio_offset = disk->d_sync.ds_offset * (sc->sc_ndisks - 1); 2251 bp->bio_length = MIN(MAXPHYS, sc->sc_mediasize - bp->bio_offset); 2252 disk->d_sync.ds_offset += bp->bio_length / (sc->sc_ndisks - 1); 2253 bp->bio_done = g_raid3_sync_done; 2254 bp->bio_from = disk->d_sync.ds_consumer; 2255 bp->bio_to = sc->sc_provider; 2256 bp->bio_caller1 = (void *)(uintptr_t)n; 2257 } 2258 2259 /* Set the number of in-flight synchronization requests. */ 2260 disk->d_sync.ds_inflight = g_raid3_syncreqs; 2261 2262 /* 2263 * Fire off first synchronization requests. 2264 */ 2265 for (n = 0; n < g_raid3_syncreqs; n++) { 2266 bp = disk->d_sync.ds_bios[n]; 2267 G_RAID3_LOGREQ(3, bp, "Sending synchronization request."); 2268 disk->d_sync.ds_consumer->index++; 2269 /* 2270 * Delay the request if it is colliding with a regular request. 2271 */ 2272 if (g_raid3_regular_collision(sc, bp)) 2273 g_raid3_sync_delay(sc, bp); 2274 else 2275 g_io_request(bp, disk->d_sync.ds_consumer); 2276 } 2277 } 2278 2279 /* 2280 * Stop synchronization process. 2281 * type: 0 - synchronization finished 2282 * 1 - synchronization stopped 2283 */ 2284 static void 2285 g_raid3_sync_stop(struct g_raid3_softc *sc, int type) 2286 { 2287 struct g_raid3_disk *disk; 2288 struct g_consumer *cp; 2289 2290 g_topology_assert_not(); 2291 sx_assert(&sc->sc_lock, SX_LOCKED); 2292 2293 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED, 2294 ("Device not in DEGRADED state (%s, %u).", sc->sc_name, 2295 sc->sc_state)); 2296 disk = sc->sc_syncdisk; 2297 sc->sc_syncdisk = NULL; 2298 KASSERT(disk != NULL, ("No disk was synchronized (%s).", sc->sc_name)); 2299 KASSERT(disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 2300 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 2301 g_raid3_disk_state2str(disk->d_state))); 2302 if (disk->d_sync.ds_consumer == NULL) 2303 return; 2304 2305 if (type == 0) { 2306 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s finished.", 2307 sc->sc_name, g_raid3_get_diskname(disk)); 2308 } else /* if (type == 1) */ { 2309 G_RAID3_DEBUG(0, "Device %s: rebuilding provider %s stopped.", 2310 sc->sc_name, g_raid3_get_diskname(disk)); 2311 } 2312 free(disk->d_sync.ds_bios, M_RAID3); 2313 disk->d_sync.ds_bios = NULL; 2314 cp = disk->d_sync.ds_consumer; 2315 disk->d_sync.ds_consumer = NULL; 2316 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 2317 sx_xunlock(&sc->sc_lock); /* Avoid recursion on sc_lock. */ 2318 g_topology_lock(); 2319 g_raid3_kill_consumer(sc, cp); 2320 g_topology_unlock(); 2321 sx_xlock(&sc->sc_lock); 2322 } 2323 2324 static void 2325 g_raid3_launch_provider(struct g_raid3_softc *sc) 2326 { 2327 struct g_provider *pp; 2328 struct g_raid3_disk *disk; 2329 int n; 2330 2331 sx_assert(&sc->sc_lock, SX_LOCKED); 2332 2333 g_topology_lock(); 2334 pp = g_new_providerf(sc->sc_geom, "raid3/%s", sc->sc_name); 2335 pp->mediasize = sc->sc_mediasize; 2336 pp->sectorsize = sc->sc_sectorsize; 2337 pp->stripesize = 0; 2338 pp->stripeoffset = 0; 2339 for (n = 0; n < sc->sc_ndisks; n++) { 2340 disk = &sc->sc_disks[n]; 2341 if (disk->d_consumer && disk->d_consumer->provider && 2342 disk->d_consumer->provider->stripesize > pp->stripesize) { 2343 pp->stripesize = disk->d_consumer->provider->stripesize; 2344 pp->stripeoffset = disk->d_consumer->provider->stripeoffset; 2345 } 2346 } 2347 pp->stripesize *= sc->sc_ndisks - 1; 2348 pp->stripeoffset *= sc->sc_ndisks - 1; 2349 sc->sc_provider = pp; 2350 g_error_provider(pp, 0); 2351 g_topology_unlock(); 2352 G_RAID3_DEBUG(0, "Device %s launched (%u/%u).", pp->name, 2353 g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE), sc->sc_ndisks); 2354 2355 if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED) 2356 g_raid3_sync_start(sc); 2357 } 2358 2359 static void 2360 g_raid3_destroy_provider(struct g_raid3_softc *sc) 2361 { 2362 struct bio *bp; 2363 2364 g_topology_assert_not(); 2365 KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", 2366 sc->sc_name)); 2367 2368 g_topology_lock(); 2369 g_error_provider(sc->sc_provider, ENXIO); 2370 mtx_lock(&sc->sc_queue_mtx); 2371 while ((bp = bioq_first(&sc->sc_queue)) != NULL) { 2372 bioq_remove(&sc->sc_queue, bp); 2373 g_io_deliver(bp, ENXIO); 2374 } 2375 mtx_unlock(&sc->sc_queue_mtx); 2376 G_RAID3_DEBUG(0, "Device %s: provider %s destroyed.", sc->sc_name, 2377 sc->sc_provider->name); 2378 sc->sc_provider->flags |= G_PF_WITHER; 2379 g_orphan_provider(sc->sc_provider, ENXIO); 2380 g_topology_unlock(); 2381 sc->sc_provider = NULL; 2382 if (sc->sc_syncdisk != NULL) 2383 g_raid3_sync_stop(sc, 1); 2384 } 2385 2386 static void 2387 g_raid3_go(void *arg) 2388 { 2389 struct g_raid3_softc *sc; 2390 2391 sc = arg; 2392 G_RAID3_DEBUG(0, "Force device %s start due to timeout.", sc->sc_name); 2393 g_raid3_event_send(sc, 0, 2394 G_RAID3_EVENT_DONTWAIT | G_RAID3_EVENT_DEVICE); 2395 } 2396 2397 static u_int 2398 g_raid3_determine_state(struct g_raid3_disk *disk) 2399 { 2400 struct g_raid3_softc *sc; 2401 u_int state; 2402 2403 sc = disk->d_softc; 2404 if (sc->sc_syncid == disk->d_sync.ds_syncid) { 2405 if ((disk->d_flags & 2406 G_RAID3_DISK_FLAG_SYNCHRONIZING) == 0) { 2407 /* Disk does not need synchronization. */ 2408 state = G_RAID3_DISK_STATE_ACTIVE; 2409 } else { 2410 if ((sc->sc_flags & 2411 G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2412 (disk->d_flags & 2413 G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) { 2414 /* 2415 * We can start synchronization from 2416 * the stored offset. 2417 */ 2418 state = G_RAID3_DISK_STATE_SYNCHRONIZING; 2419 } else { 2420 state = G_RAID3_DISK_STATE_STALE; 2421 } 2422 } 2423 } else if (disk->d_sync.ds_syncid < sc->sc_syncid) { 2424 /* 2425 * Reset all synchronization data for this disk, 2426 * because if it even was synchronized, it was 2427 * synchronized to disks with different syncid. 2428 */ 2429 disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING; 2430 disk->d_sync.ds_offset = 0; 2431 disk->d_sync.ds_offset_done = 0; 2432 disk->d_sync.ds_syncid = sc->sc_syncid; 2433 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) == 0 || 2434 (disk->d_flags & G_RAID3_DISK_FLAG_FORCE_SYNC) != 0) { 2435 state = G_RAID3_DISK_STATE_SYNCHRONIZING; 2436 } else { 2437 state = G_RAID3_DISK_STATE_STALE; 2438 } 2439 } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { 2440 /* 2441 * Not good, NOT GOOD! 2442 * It means that device was started on stale disks 2443 * and more fresh disk just arrive. 2444 * If there were writes, device is broken, sorry. 2445 * I think the best choice here is don't touch 2446 * this disk and inform the user loudly. 2447 */ 2448 G_RAID3_DEBUG(0, "Device %s was started before the freshest " 2449 "disk (%s) arrives!! It will not be connected to the " 2450 "running device.", sc->sc_name, 2451 g_raid3_get_diskname(disk)); 2452 g_raid3_destroy_disk(disk); 2453 state = G_RAID3_DISK_STATE_NONE; 2454 /* Return immediately, because disk was destroyed. */ 2455 return (state); 2456 } 2457 G_RAID3_DEBUG(3, "State for %s disk: %s.", 2458 g_raid3_get_diskname(disk), g_raid3_disk_state2str(state)); 2459 return (state); 2460 } 2461 2462 /* 2463 * Update device state. 2464 */ 2465 static void 2466 g_raid3_update_device(struct g_raid3_softc *sc, boolean_t force) 2467 { 2468 struct g_raid3_disk *disk; 2469 u_int state; 2470 2471 sx_assert(&sc->sc_lock, SX_XLOCKED); 2472 2473 switch (sc->sc_state) { 2474 case G_RAID3_DEVICE_STATE_STARTING: 2475 { 2476 u_int n, ndirty, ndisks, genid, syncid; 2477 2478 KASSERT(sc->sc_provider == NULL, 2479 ("Non-NULL provider in STARTING state (%s).", sc->sc_name)); 2480 /* 2481 * Are we ready? We are, if all disks are connected or 2482 * one disk is missing and 'force' is true. 2483 */ 2484 if (g_raid3_ndisks(sc, -1) + force == sc->sc_ndisks) { 2485 if (!force) 2486 callout_drain(&sc->sc_callout); 2487 } else { 2488 if (force) { 2489 /* 2490 * Timeout expired, so destroy device. 2491 */ 2492 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 2493 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", 2494 __LINE__, sc->sc_rootmount); 2495 root_mount_rel(sc->sc_rootmount); 2496 sc->sc_rootmount = NULL; 2497 } 2498 return; 2499 } 2500 2501 /* 2502 * Find the biggest genid. 2503 */ 2504 genid = 0; 2505 for (n = 0; n < sc->sc_ndisks; n++) { 2506 disk = &sc->sc_disks[n]; 2507 if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 2508 continue; 2509 if (disk->d_genid > genid) 2510 genid = disk->d_genid; 2511 } 2512 sc->sc_genid = genid; 2513 /* 2514 * Remove all disks without the biggest genid. 2515 */ 2516 for (n = 0; n < sc->sc_ndisks; n++) { 2517 disk = &sc->sc_disks[n]; 2518 if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 2519 continue; 2520 if (disk->d_genid < genid) { 2521 G_RAID3_DEBUG(0, 2522 "Component %s (device %s) broken, skipping.", 2523 g_raid3_get_diskname(disk), sc->sc_name); 2524 g_raid3_destroy_disk(disk); 2525 } 2526 } 2527 2528 /* 2529 * There must be at least 'sc->sc_ndisks - 1' components 2530 * with the same syncid and without SYNCHRONIZING flag. 2531 */ 2532 2533 /* 2534 * Find the biggest syncid, number of valid components and 2535 * number of dirty components. 2536 */ 2537 ndirty = ndisks = syncid = 0; 2538 for (n = 0; n < sc->sc_ndisks; n++) { 2539 disk = &sc->sc_disks[n]; 2540 if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 2541 continue; 2542 if ((disk->d_flags & G_RAID3_DISK_FLAG_DIRTY) != 0) 2543 ndirty++; 2544 if (disk->d_sync.ds_syncid > syncid) { 2545 syncid = disk->d_sync.ds_syncid; 2546 ndisks = 0; 2547 } else if (disk->d_sync.ds_syncid < syncid) { 2548 continue; 2549 } 2550 if ((disk->d_flags & 2551 G_RAID3_DISK_FLAG_SYNCHRONIZING) != 0) { 2552 continue; 2553 } 2554 ndisks++; 2555 } 2556 /* 2557 * Do we have enough valid components? 2558 */ 2559 if (ndisks + 1 < sc->sc_ndisks) { 2560 G_RAID3_DEBUG(0, 2561 "Device %s is broken, too few valid components.", 2562 sc->sc_name); 2563 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 2564 return; 2565 } 2566 /* 2567 * If there is one DIRTY component and all disks are present, 2568 * mark it for synchronization. If there is more than one DIRTY 2569 * component, mark parity component for synchronization. 2570 */ 2571 if (ndisks == sc->sc_ndisks && ndirty == 1) { 2572 for (n = 0; n < sc->sc_ndisks; n++) { 2573 disk = &sc->sc_disks[n]; 2574 if ((disk->d_flags & 2575 G_RAID3_DISK_FLAG_DIRTY) == 0) { 2576 continue; 2577 } 2578 disk->d_flags |= 2579 G_RAID3_DISK_FLAG_SYNCHRONIZING; 2580 } 2581 } else if (ndisks == sc->sc_ndisks && ndirty > 1) { 2582 disk = &sc->sc_disks[sc->sc_ndisks - 1]; 2583 disk->d_flags |= G_RAID3_DISK_FLAG_SYNCHRONIZING; 2584 } 2585 2586 sc->sc_syncid = syncid; 2587 if (force) { 2588 /* Remember to bump syncid on first write. */ 2589 sc->sc_bump_id |= G_RAID3_BUMP_SYNCID; 2590 } 2591 if (ndisks == sc->sc_ndisks) 2592 state = G_RAID3_DEVICE_STATE_COMPLETE; 2593 else /* if (ndisks == sc->sc_ndisks - 1) */ 2594 state = G_RAID3_DEVICE_STATE_DEGRADED; 2595 G_RAID3_DEBUG(1, "Device %s state changed from %s to %s.", 2596 sc->sc_name, g_raid3_device_state2str(sc->sc_state), 2597 g_raid3_device_state2str(state)); 2598 sc->sc_state = state; 2599 for (n = 0; n < sc->sc_ndisks; n++) { 2600 disk = &sc->sc_disks[n]; 2601 if (disk->d_state == G_RAID3_DISK_STATE_NODISK) 2602 continue; 2603 state = g_raid3_determine_state(disk); 2604 g_raid3_event_send(disk, state, G_RAID3_EVENT_DONTWAIT); 2605 if (state == G_RAID3_DISK_STATE_STALE) 2606 sc->sc_bump_id |= G_RAID3_BUMP_SYNCID; 2607 } 2608 break; 2609 } 2610 case G_RAID3_DEVICE_STATE_DEGRADED: 2611 /* 2612 * Genid need to be bumped immediately, so do it here. 2613 */ 2614 if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) { 2615 sc->sc_bump_id &= ~G_RAID3_BUMP_GENID; 2616 g_raid3_bump_genid(sc); 2617 } 2618 2619 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0) 2620 return; 2621 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < 2622 sc->sc_ndisks - 1) { 2623 if (sc->sc_provider != NULL) 2624 g_raid3_destroy_provider(sc); 2625 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 2626 return; 2627 } 2628 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) == 2629 sc->sc_ndisks) { 2630 state = G_RAID3_DEVICE_STATE_COMPLETE; 2631 G_RAID3_DEBUG(1, 2632 "Device %s state changed from %s to %s.", 2633 sc->sc_name, g_raid3_device_state2str(sc->sc_state), 2634 g_raid3_device_state2str(state)); 2635 sc->sc_state = state; 2636 } 2637 if (sc->sc_provider == NULL) 2638 g_raid3_launch_provider(sc); 2639 if (sc->sc_rootmount != NULL) { 2640 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2641 sc->sc_rootmount); 2642 root_mount_rel(sc->sc_rootmount); 2643 sc->sc_rootmount = NULL; 2644 } 2645 break; 2646 case G_RAID3_DEVICE_STATE_COMPLETE: 2647 /* 2648 * Genid need to be bumped immediately, so do it here. 2649 */ 2650 if ((sc->sc_bump_id & G_RAID3_BUMP_GENID) != 0) { 2651 sc->sc_bump_id &= ~G_RAID3_BUMP_GENID; 2652 g_raid3_bump_genid(sc); 2653 } 2654 2655 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NEW) > 0) 2656 return; 2657 KASSERT(g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) >= 2658 sc->sc_ndisks - 1, 2659 ("Too few ACTIVE components in COMPLETE state (device %s).", 2660 sc->sc_name)); 2661 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) == 2662 sc->sc_ndisks - 1) { 2663 state = G_RAID3_DEVICE_STATE_DEGRADED; 2664 G_RAID3_DEBUG(1, 2665 "Device %s state changed from %s to %s.", 2666 sc->sc_name, g_raid3_device_state2str(sc->sc_state), 2667 g_raid3_device_state2str(state)); 2668 sc->sc_state = state; 2669 } 2670 if (sc->sc_provider == NULL) 2671 g_raid3_launch_provider(sc); 2672 if (sc->sc_rootmount != NULL) { 2673 G_RAID3_DEBUG(1, "root_mount_rel[%u] %p", __LINE__, 2674 sc->sc_rootmount); 2675 root_mount_rel(sc->sc_rootmount); 2676 sc->sc_rootmount = NULL; 2677 } 2678 break; 2679 default: 2680 KASSERT(1 == 0, ("Wrong device state (%s, %s).", sc->sc_name, 2681 g_raid3_device_state2str(sc->sc_state))); 2682 break; 2683 } 2684 } 2685 2686 /* 2687 * Update disk state and device state if needed. 2688 */ 2689 #define DISK_STATE_CHANGED() G_RAID3_DEBUG(1, \ 2690 "Disk %s state changed from %s to %s (device %s).", \ 2691 g_raid3_get_diskname(disk), \ 2692 g_raid3_disk_state2str(disk->d_state), \ 2693 g_raid3_disk_state2str(state), sc->sc_name) 2694 static int 2695 g_raid3_update_disk(struct g_raid3_disk *disk, u_int state) 2696 { 2697 struct g_raid3_softc *sc; 2698 2699 sc = disk->d_softc; 2700 sx_assert(&sc->sc_lock, SX_XLOCKED); 2701 2702 again: 2703 G_RAID3_DEBUG(3, "Changing disk %s state from %s to %s.", 2704 g_raid3_get_diskname(disk), g_raid3_disk_state2str(disk->d_state), 2705 g_raid3_disk_state2str(state)); 2706 switch (state) { 2707 case G_RAID3_DISK_STATE_NEW: 2708 /* 2709 * Possible scenarios: 2710 * 1. New disk arrive. 2711 */ 2712 /* Previous state should be NONE. */ 2713 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NONE, 2714 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 2715 g_raid3_disk_state2str(disk->d_state))); 2716 DISK_STATE_CHANGED(); 2717 2718 disk->d_state = state; 2719 G_RAID3_DEBUG(1, "Device %s: provider %s detected.", 2720 sc->sc_name, g_raid3_get_diskname(disk)); 2721 if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) 2722 break; 2723 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 2724 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 2725 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2726 g_raid3_device_state2str(sc->sc_state), 2727 g_raid3_get_diskname(disk), 2728 g_raid3_disk_state2str(disk->d_state))); 2729 state = g_raid3_determine_state(disk); 2730 if (state != G_RAID3_DISK_STATE_NONE) 2731 goto again; 2732 break; 2733 case G_RAID3_DISK_STATE_ACTIVE: 2734 /* 2735 * Possible scenarios: 2736 * 1. New disk does not need synchronization. 2737 * 2. Synchronization process finished successfully. 2738 */ 2739 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 2740 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 2741 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2742 g_raid3_device_state2str(sc->sc_state), 2743 g_raid3_get_diskname(disk), 2744 g_raid3_disk_state2str(disk->d_state))); 2745 /* Previous state should be NEW or SYNCHRONIZING. */ 2746 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW || 2747 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 2748 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 2749 g_raid3_disk_state2str(disk->d_state))); 2750 DISK_STATE_CHANGED(); 2751 2752 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 2753 disk->d_flags &= ~G_RAID3_DISK_FLAG_SYNCHRONIZING; 2754 disk->d_flags &= ~G_RAID3_DISK_FLAG_FORCE_SYNC; 2755 g_raid3_sync_stop(sc, 0); 2756 } 2757 disk->d_state = state; 2758 disk->d_sync.ds_offset = 0; 2759 disk->d_sync.ds_offset_done = 0; 2760 g_raid3_update_idle(sc, disk); 2761 g_raid3_update_metadata(disk); 2762 G_RAID3_DEBUG(1, "Device %s: provider %s activated.", 2763 sc->sc_name, g_raid3_get_diskname(disk)); 2764 break; 2765 case G_RAID3_DISK_STATE_STALE: 2766 /* 2767 * Possible scenarios: 2768 * 1. Stale disk was connected. 2769 */ 2770 /* Previous state should be NEW. */ 2771 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 2772 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 2773 g_raid3_disk_state2str(disk->d_state))); 2774 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 2775 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 2776 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2777 g_raid3_device_state2str(sc->sc_state), 2778 g_raid3_get_diskname(disk), 2779 g_raid3_disk_state2str(disk->d_state))); 2780 /* 2781 * STALE state is only possible if device is marked 2782 * NOAUTOSYNC. 2783 */ 2784 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_NOAUTOSYNC) != 0, 2785 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2786 g_raid3_device_state2str(sc->sc_state), 2787 g_raid3_get_diskname(disk), 2788 g_raid3_disk_state2str(disk->d_state))); 2789 DISK_STATE_CHANGED(); 2790 2791 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 2792 disk->d_state = state; 2793 g_raid3_update_metadata(disk); 2794 G_RAID3_DEBUG(0, "Device %s: provider %s is stale.", 2795 sc->sc_name, g_raid3_get_diskname(disk)); 2796 break; 2797 case G_RAID3_DISK_STATE_SYNCHRONIZING: 2798 /* 2799 * Possible scenarios: 2800 * 1. Disk which needs synchronization was connected. 2801 */ 2802 /* Previous state should be NEW. */ 2803 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 2804 ("Wrong disk state (%s, %s).", g_raid3_get_diskname(disk), 2805 g_raid3_disk_state2str(disk->d_state))); 2806 KASSERT(sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 2807 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE, 2808 ("Wrong device state (%s, %s, %s, %s).", sc->sc_name, 2809 g_raid3_device_state2str(sc->sc_state), 2810 g_raid3_get_diskname(disk), 2811 g_raid3_disk_state2str(disk->d_state))); 2812 DISK_STATE_CHANGED(); 2813 2814 if (disk->d_state == G_RAID3_DISK_STATE_NEW) 2815 disk->d_flags &= ~G_RAID3_DISK_FLAG_DIRTY; 2816 disk->d_state = state; 2817 if (sc->sc_provider != NULL) { 2818 g_raid3_sync_start(sc); 2819 g_raid3_update_metadata(disk); 2820 } 2821 break; 2822 case G_RAID3_DISK_STATE_DISCONNECTED: 2823 /* 2824 * Possible scenarios: 2825 * 1. Device wasn't running yet, but disk disappear. 2826 * 2. Disk was active and disapppear. 2827 * 3. Disk disappear during synchronization process. 2828 */ 2829 if (sc->sc_state == G_RAID3_DEVICE_STATE_DEGRADED || 2830 sc->sc_state == G_RAID3_DEVICE_STATE_COMPLETE) { 2831 /* 2832 * Previous state should be ACTIVE, STALE or 2833 * SYNCHRONIZING. 2834 */ 2835 KASSERT(disk->d_state == G_RAID3_DISK_STATE_ACTIVE || 2836 disk->d_state == G_RAID3_DISK_STATE_STALE || 2837 disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING, 2838 ("Wrong disk state (%s, %s).", 2839 g_raid3_get_diskname(disk), 2840 g_raid3_disk_state2str(disk->d_state))); 2841 } else if (sc->sc_state == G_RAID3_DEVICE_STATE_STARTING) { 2842 /* Previous state should be NEW. */ 2843 KASSERT(disk->d_state == G_RAID3_DISK_STATE_NEW, 2844 ("Wrong disk state (%s, %s).", 2845 g_raid3_get_diskname(disk), 2846 g_raid3_disk_state2str(disk->d_state))); 2847 /* 2848 * Reset bumping syncid if disk disappeared in STARTING 2849 * state. 2850 */ 2851 if ((sc->sc_bump_id & G_RAID3_BUMP_SYNCID) != 0) 2852 sc->sc_bump_id &= ~G_RAID3_BUMP_SYNCID; 2853 #ifdef INVARIANTS 2854 } else { 2855 KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", 2856 sc->sc_name, 2857 g_raid3_device_state2str(sc->sc_state), 2858 g_raid3_get_diskname(disk), 2859 g_raid3_disk_state2str(disk->d_state))); 2860 #endif 2861 } 2862 DISK_STATE_CHANGED(); 2863 G_RAID3_DEBUG(0, "Device %s: provider %s disconnected.", 2864 sc->sc_name, g_raid3_get_diskname(disk)); 2865 2866 g_raid3_destroy_disk(disk); 2867 break; 2868 default: 2869 KASSERT(1 == 0, ("Unknown state (%u).", state)); 2870 break; 2871 } 2872 return (0); 2873 } 2874 #undef DISK_STATE_CHANGED 2875 2876 int 2877 g_raid3_read_metadata(struct g_consumer *cp, struct g_raid3_metadata *md) 2878 { 2879 struct g_provider *pp; 2880 u_char *buf; 2881 int error; 2882 2883 g_topology_assert(); 2884 2885 error = g_access(cp, 1, 0, 0); 2886 if (error != 0) 2887 return (error); 2888 pp = cp->provider; 2889 g_topology_unlock(); 2890 /* Metadata are stored on last sector. */ 2891 buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, 2892 &error); 2893 g_topology_lock(); 2894 g_access(cp, -1, 0, 0); 2895 if (buf == NULL) { 2896 G_RAID3_DEBUG(1, "Cannot read metadata from %s (error=%d).", 2897 cp->provider->name, error); 2898 return (error); 2899 } 2900 2901 /* Decode metadata. */ 2902 error = raid3_metadata_decode(buf, md); 2903 g_free(buf); 2904 if (strcmp(md->md_magic, G_RAID3_MAGIC) != 0) 2905 return (EINVAL); 2906 if (md->md_version > G_RAID3_VERSION) { 2907 G_RAID3_DEBUG(0, 2908 "Kernel module is too old to handle metadata from %s.", 2909 cp->provider->name); 2910 return (EINVAL); 2911 } 2912 if (error != 0) { 2913 G_RAID3_DEBUG(1, "MD5 metadata hash mismatch for provider %s.", 2914 cp->provider->name); 2915 return (error); 2916 } 2917 if (md->md_sectorsize > MAXPHYS) { 2918 G_RAID3_DEBUG(0, "The blocksize is too big."); 2919 return (EINVAL); 2920 } 2921 2922 return (0); 2923 } 2924 2925 static int 2926 g_raid3_check_metadata(struct g_raid3_softc *sc, struct g_provider *pp, 2927 struct g_raid3_metadata *md) 2928 { 2929 2930 if (md->md_no >= sc->sc_ndisks) { 2931 G_RAID3_DEBUG(1, "Invalid disk %s number (no=%u), skipping.", 2932 pp->name, md->md_no); 2933 return (EINVAL); 2934 } 2935 if (sc->sc_disks[md->md_no].d_state != G_RAID3_DISK_STATE_NODISK) { 2936 G_RAID3_DEBUG(1, "Disk %s (no=%u) already exists, skipping.", 2937 pp->name, md->md_no); 2938 return (EEXIST); 2939 } 2940 if (md->md_all != sc->sc_ndisks) { 2941 G_RAID3_DEBUG(1, 2942 "Invalid '%s' field on disk %s (device %s), skipping.", 2943 "md_all", pp->name, sc->sc_name); 2944 return (EINVAL); 2945 } 2946 if ((md->md_mediasize % md->md_sectorsize) != 0) { 2947 G_RAID3_DEBUG(1, "Invalid metadata (mediasize %% sectorsize != " 2948 "0) on disk %s (device %s), skipping.", pp->name, 2949 sc->sc_name); 2950 return (EINVAL); 2951 } 2952 if (md->md_mediasize != sc->sc_mediasize) { 2953 G_RAID3_DEBUG(1, 2954 "Invalid '%s' field on disk %s (device %s), skipping.", 2955 "md_mediasize", pp->name, sc->sc_name); 2956 return (EINVAL); 2957 } 2958 if ((md->md_mediasize % (sc->sc_ndisks - 1)) != 0) { 2959 G_RAID3_DEBUG(1, 2960 "Invalid '%s' field on disk %s (device %s), skipping.", 2961 "md_mediasize", pp->name, sc->sc_name); 2962 return (EINVAL); 2963 } 2964 if ((sc->sc_mediasize / (sc->sc_ndisks - 1)) > pp->mediasize) { 2965 G_RAID3_DEBUG(1, 2966 "Invalid size of disk %s (device %s), skipping.", pp->name, 2967 sc->sc_name); 2968 return (EINVAL); 2969 } 2970 if ((md->md_sectorsize / pp->sectorsize) < sc->sc_ndisks - 1) { 2971 G_RAID3_DEBUG(1, 2972 "Invalid '%s' field on disk %s (device %s), skipping.", 2973 "md_sectorsize", pp->name, sc->sc_name); 2974 return (EINVAL); 2975 } 2976 if (md->md_sectorsize != sc->sc_sectorsize) { 2977 G_RAID3_DEBUG(1, 2978 "Invalid '%s' field on disk %s (device %s), skipping.", 2979 "md_sectorsize", pp->name, sc->sc_name); 2980 return (EINVAL); 2981 } 2982 if ((sc->sc_sectorsize % pp->sectorsize) != 0) { 2983 G_RAID3_DEBUG(1, 2984 "Invalid sector size of disk %s (device %s), skipping.", 2985 pp->name, sc->sc_name); 2986 return (EINVAL); 2987 } 2988 if ((md->md_mflags & ~G_RAID3_DEVICE_FLAG_MASK) != 0) { 2989 G_RAID3_DEBUG(1, 2990 "Invalid device flags on disk %s (device %s), skipping.", 2991 pp->name, sc->sc_name); 2992 return (EINVAL); 2993 } 2994 if ((md->md_mflags & G_RAID3_DEVICE_FLAG_VERIFY) != 0 && 2995 (md->md_mflags & G_RAID3_DEVICE_FLAG_ROUND_ROBIN) != 0) { 2996 /* 2997 * VERIFY and ROUND-ROBIN options are mutally exclusive. 2998 */ 2999 G_RAID3_DEBUG(1, "Both VERIFY and ROUND-ROBIN flags exist on " 3000 "disk %s (device %s), skipping.", pp->name, sc->sc_name); 3001 return (EINVAL); 3002 } 3003 if ((md->md_dflags & ~G_RAID3_DISK_FLAG_MASK) != 0) { 3004 G_RAID3_DEBUG(1, 3005 "Invalid disk flags on disk %s (device %s), skipping.", 3006 pp->name, sc->sc_name); 3007 return (EINVAL); 3008 } 3009 return (0); 3010 } 3011 3012 int 3013 g_raid3_add_disk(struct g_raid3_softc *sc, struct g_provider *pp, 3014 struct g_raid3_metadata *md) 3015 { 3016 struct g_raid3_disk *disk; 3017 int error; 3018 3019 g_topology_assert_not(); 3020 G_RAID3_DEBUG(2, "Adding disk %s.", pp->name); 3021 3022 error = g_raid3_check_metadata(sc, pp, md); 3023 if (error != 0) 3024 return (error); 3025 if (sc->sc_state != G_RAID3_DEVICE_STATE_STARTING && 3026 md->md_genid < sc->sc_genid) { 3027 G_RAID3_DEBUG(0, "Component %s (device %s) broken, skipping.", 3028 pp->name, sc->sc_name); 3029 return (EINVAL); 3030 } 3031 disk = g_raid3_init_disk(sc, pp, md, &error); 3032 if (disk == NULL) 3033 return (error); 3034 error = g_raid3_event_send(disk, G_RAID3_DISK_STATE_NEW, 3035 G_RAID3_EVENT_WAIT); 3036 if (error != 0) 3037 return (error); 3038 if (md->md_version < G_RAID3_VERSION) { 3039 G_RAID3_DEBUG(0, "Upgrading metadata on %s (v%d->v%d).", 3040 pp->name, md->md_version, G_RAID3_VERSION); 3041 g_raid3_update_metadata(disk); 3042 } 3043 return (0); 3044 } 3045 3046 static void 3047 g_raid3_destroy_delayed(void *arg, int flag) 3048 { 3049 struct g_raid3_softc *sc; 3050 int error; 3051 3052 if (flag == EV_CANCEL) { 3053 G_RAID3_DEBUG(1, "Destroying canceled."); 3054 return; 3055 } 3056 sc = arg; 3057 g_topology_unlock(); 3058 sx_xlock(&sc->sc_lock); 3059 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) == 0, 3060 ("DESTROY flag set on %s.", sc->sc_name)); 3061 KASSERT((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0, 3062 ("DESTROYING flag not set on %s.", sc->sc_name)); 3063 G_RAID3_DEBUG(0, "Destroying %s (delayed).", sc->sc_name); 3064 error = g_raid3_destroy(sc, G_RAID3_DESTROY_SOFT); 3065 if (error != 0) { 3066 G_RAID3_DEBUG(0, "Cannot destroy %s.", sc->sc_name); 3067 sx_xunlock(&sc->sc_lock); 3068 } 3069 g_topology_lock(); 3070 } 3071 3072 static int 3073 g_raid3_access(struct g_provider *pp, int acr, int acw, int ace) 3074 { 3075 struct g_raid3_softc *sc; 3076 int dcr, dcw, dce, error = 0; 3077 3078 g_topology_assert(); 3079 G_RAID3_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr, 3080 acw, ace); 3081 3082 sc = pp->geom->softc; 3083 if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0) 3084 return (0); 3085 KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name)); 3086 3087 dcr = pp->acr + acr; 3088 dcw = pp->acw + acw; 3089 dce = pp->ace + ace; 3090 3091 g_topology_unlock(); 3092 sx_xlock(&sc->sc_lock); 3093 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROY) != 0 || 3094 g_raid3_ndisks(sc, G_RAID3_DISK_STATE_ACTIVE) < sc->sc_ndisks - 1) { 3095 if (acr > 0 || acw > 0 || ace > 0) 3096 error = ENXIO; 3097 goto end; 3098 } 3099 if (dcw == 0 && !sc->sc_idle) 3100 g_raid3_idle(sc, dcw); 3101 if ((sc->sc_flags & G_RAID3_DEVICE_FLAG_DESTROYING) != 0) { 3102 if (acr > 0 || acw > 0 || ace > 0) { 3103 error = ENXIO; 3104 goto end; 3105 } 3106 if (dcr == 0 && dcw == 0 && dce == 0) { 3107 g_post_event(g_raid3_destroy_delayed, sc, M_WAITOK, 3108 sc, NULL); 3109 } 3110 } 3111 end: 3112 sx_xunlock(&sc->sc_lock); 3113 g_topology_lock(); 3114 return (error); 3115 } 3116 3117 static struct g_geom * 3118 g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md) 3119 { 3120 struct g_raid3_softc *sc; 3121 struct g_geom *gp; 3122 int error, timeout; 3123 u_int n; 3124 3125 g_topology_assert(); 3126 G_RAID3_DEBUG(1, "Creating device %s (id=%u).", md->md_name, md->md_id); 3127 3128 /* One disk is minimum. */ 3129 if (md->md_all < 1) 3130 return (NULL); 3131 /* 3132 * Action geom. 3133 */ 3134 gp = g_new_geomf(mp, "%s", md->md_name); 3135 sc = malloc(sizeof(*sc), M_RAID3, M_WAITOK | M_ZERO); 3136 sc->sc_disks = malloc(sizeof(struct g_raid3_disk) * md->md_all, M_RAID3, 3137 M_WAITOK | M_ZERO); 3138 gp->start = g_raid3_start; 3139 gp->orphan = g_raid3_orphan; 3140 gp->access = g_raid3_access; 3141 gp->dumpconf = g_raid3_dumpconf; 3142 3143 sc->sc_id = md->md_id; 3144 sc->sc_mediasize = md->md_mediasize; 3145 sc->sc_sectorsize = md->md_sectorsize; 3146 sc->sc_ndisks = md->md_all; 3147 sc->sc_round_robin = 0; 3148 sc->sc_flags = md->md_mflags; 3149 sc->sc_bump_id = 0; 3150 sc->sc_idle = 1; 3151 sc->sc_last_write = time_uptime; 3152 sc->sc_writes = 0; 3153 for (n = 0; n < sc->sc_ndisks; n++) { 3154 sc->sc_disks[n].d_softc = sc; 3155 sc->sc_disks[n].d_no = n; 3156 sc->sc_disks[n].d_state = G_RAID3_DISK_STATE_NODISK; 3157 } 3158 sx_init(&sc->sc_lock, "graid3:lock"); 3159 bioq_init(&sc->sc_queue); 3160 mtx_init(&sc->sc_queue_mtx, "graid3:queue", NULL, MTX_DEF); 3161 bioq_init(&sc->sc_regular_delayed); 3162 bioq_init(&sc->sc_inflight); 3163 bioq_init(&sc->sc_sync_delayed); 3164 TAILQ_INIT(&sc->sc_events); 3165 mtx_init(&sc->sc_events_mtx, "graid3:events", NULL, MTX_DEF); 3166 callout_init(&sc->sc_callout, CALLOUT_MPSAFE); 3167 sc->sc_state = G_RAID3_DEVICE_STATE_STARTING; 3168 gp->softc = sc; 3169 sc->sc_geom = gp; 3170 sc->sc_provider = NULL; 3171 /* 3172 * Synchronization geom. 3173 */ 3174 gp = g_new_geomf(mp, "%s.sync", md->md_name); 3175 gp->softc = sc; 3176 gp->orphan = g_raid3_orphan; 3177 sc->sc_sync.ds_geom = gp; 3178 3179 if (!g_raid3_use_malloc) { 3180 sc->sc_zones[G_RAID3_ZONE_64K].sz_zone = uma_zcreate("gr3:64k", 3181 65536, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3182 UMA_ALIGN_PTR, 0); 3183 sc->sc_zones[G_RAID3_ZONE_64K].sz_inuse = 0; 3184 sc->sc_zones[G_RAID3_ZONE_64K].sz_max = g_raid3_n64k; 3185 sc->sc_zones[G_RAID3_ZONE_64K].sz_requested = 3186 sc->sc_zones[G_RAID3_ZONE_64K].sz_failed = 0; 3187 sc->sc_zones[G_RAID3_ZONE_16K].sz_zone = uma_zcreate("gr3:16k", 3188 16384, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3189 UMA_ALIGN_PTR, 0); 3190 sc->sc_zones[G_RAID3_ZONE_16K].sz_inuse = 0; 3191 sc->sc_zones[G_RAID3_ZONE_16K].sz_max = g_raid3_n16k; 3192 sc->sc_zones[G_RAID3_ZONE_16K].sz_requested = 3193 sc->sc_zones[G_RAID3_ZONE_16K].sz_failed = 0; 3194 sc->sc_zones[G_RAID3_ZONE_4K].sz_zone = uma_zcreate("gr3:4k", 3195 4096, g_raid3_uma_ctor, g_raid3_uma_dtor, NULL, NULL, 3196 UMA_ALIGN_PTR, 0); 3197 sc->sc_zones[G_RAID3_ZONE_4K].sz_inuse = 0; 3198 sc->sc_zones[G_RAID3_ZONE_4K].sz_max = g_raid3_n4k; 3199 sc->sc_zones[G_RAID3_ZONE_4K].sz_requested = 3200 sc->sc_zones[G_RAID3_ZONE_4K].sz_failed = 0; 3201 } 3202 3203 error = kproc_create(g_raid3_worker, sc, &sc->sc_worker, 0, 0, 3204 "g_raid3 %s", md->md_name); 3205 if (error != 0) { 3206 G_RAID3_DEBUG(1, "Cannot create kernel thread for %s.", 3207 sc->sc_name); 3208 if (!g_raid3_use_malloc) { 3209 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone); 3210 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone); 3211 uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone); 3212 } 3213 g_destroy_geom(sc->sc_sync.ds_geom); 3214 mtx_destroy(&sc->sc_events_mtx); 3215 mtx_destroy(&sc->sc_queue_mtx); 3216 sx_destroy(&sc->sc_lock); 3217 g_destroy_geom(sc->sc_geom); 3218 free(sc->sc_disks, M_RAID3); 3219 free(sc, M_RAID3); 3220 return (NULL); 3221 } 3222 3223 G_RAID3_DEBUG(1, "Device %s created (%u components, id=%u).", 3224 sc->sc_name, sc->sc_ndisks, sc->sc_id); 3225 3226 sc->sc_rootmount = root_mount_hold("GRAID3"); 3227 G_RAID3_DEBUG(1, "root_mount_hold %p", sc->sc_rootmount); 3228 3229 /* 3230 * Run timeout. 3231 */ 3232 timeout = atomic_load_acq_int(&g_raid3_timeout); 3233 callout_reset(&sc->sc_callout, timeout * hz, g_raid3_go, sc); 3234 return (sc->sc_geom); 3235 } 3236 3237 int 3238 g_raid3_destroy(struct g_raid3_softc *sc, int how) 3239 { 3240 struct g_provider *pp; 3241 3242 g_topology_assert_not(); 3243 if (sc == NULL) 3244 return (ENXIO); 3245 sx_assert(&sc->sc_lock, SX_XLOCKED); 3246 3247 pp = sc->sc_provider; 3248 if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { 3249 switch (how) { 3250 case G_RAID3_DESTROY_SOFT: 3251 G_RAID3_DEBUG(1, 3252 "Device %s is still open (r%dw%de%d).", pp->name, 3253 pp->acr, pp->acw, pp->ace); 3254 return (EBUSY); 3255 case G_RAID3_DESTROY_DELAYED: 3256 G_RAID3_DEBUG(1, 3257 "Device %s will be destroyed on last close.", 3258 pp->name); 3259 if (sc->sc_syncdisk != NULL) 3260 g_raid3_sync_stop(sc, 1); 3261 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROYING; 3262 return (EBUSY); 3263 case G_RAID3_DESTROY_HARD: 3264 G_RAID3_DEBUG(1, "Device %s is still open, so it " 3265 "can't be definitely removed.", pp->name); 3266 break; 3267 } 3268 } 3269 3270 g_topology_lock(); 3271 if (sc->sc_geom->softc == NULL) { 3272 g_topology_unlock(); 3273 return (0); 3274 } 3275 sc->sc_geom->softc = NULL; 3276 sc->sc_sync.ds_geom->softc = NULL; 3277 g_topology_unlock(); 3278 3279 sc->sc_flags |= G_RAID3_DEVICE_FLAG_DESTROY; 3280 sc->sc_flags |= G_RAID3_DEVICE_FLAG_WAIT; 3281 G_RAID3_DEBUG(4, "%s: Waking up %p.", __func__, sc); 3282 sx_xunlock(&sc->sc_lock); 3283 mtx_lock(&sc->sc_queue_mtx); 3284 wakeup(sc); 3285 wakeup(&sc->sc_queue); 3286 mtx_unlock(&sc->sc_queue_mtx); 3287 G_RAID3_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); 3288 while (sc->sc_worker != NULL) 3289 tsleep(&sc->sc_worker, PRIBIO, "r3:destroy", hz / 5); 3290 G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); 3291 sx_xlock(&sc->sc_lock); 3292 g_raid3_destroy_device(sc); 3293 free(sc->sc_disks, M_RAID3); 3294 free(sc, M_RAID3); 3295 return (0); 3296 } 3297 3298 static void 3299 g_raid3_taste_orphan(struct g_consumer *cp) 3300 { 3301 3302 KASSERT(1 == 0, ("%s called while tasting %s.", __func__, 3303 cp->provider->name)); 3304 } 3305 3306 static struct g_geom * 3307 g_raid3_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) 3308 { 3309 struct g_raid3_metadata md; 3310 struct g_raid3_softc *sc; 3311 struct g_consumer *cp; 3312 struct g_geom *gp; 3313 int error; 3314 3315 g_topology_assert(); 3316 g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); 3317 G_RAID3_DEBUG(2, "Tasting %s.", pp->name); 3318 3319 gp = g_new_geomf(mp, "raid3:taste"); 3320 /* This orphan function should be never called. */ 3321 gp->orphan = g_raid3_taste_orphan; 3322 cp = g_new_consumer(gp); 3323 g_attach(cp, pp); 3324 error = g_raid3_read_metadata(cp, &md); 3325 g_detach(cp); 3326 g_destroy_consumer(cp); 3327 g_destroy_geom(gp); 3328 if (error != 0) 3329 return (NULL); 3330 gp = NULL; 3331 3332 if (md.md_provider[0] != '\0' && 3333 !g_compare_names(md.md_provider, pp->name)) 3334 return (NULL); 3335 if (md.md_provsize != 0 && md.md_provsize != pp->mediasize) 3336 return (NULL); 3337 if (g_raid3_debug >= 2) 3338 raid3_metadata_dump(&md); 3339 3340 /* 3341 * Let's check if device already exists. 3342 */ 3343 sc = NULL; 3344 LIST_FOREACH(gp, &mp->geom, geom) { 3345 sc = gp->softc; 3346 if (sc == NULL) 3347 continue; 3348 if (sc->sc_sync.ds_geom == gp) 3349 continue; 3350 if (strcmp(md.md_name, sc->sc_name) != 0) 3351 continue; 3352 if (md.md_id != sc->sc_id) { 3353 G_RAID3_DEBUG(0, "Device %s already configured.", 3354 sc->sc_name); 3355 return (NULL); 3356 } 3357 break; 3358 } 3359 if (gp == NULL) { 3360 gp = g_raid3_create(mp, &md); 3361 if (gp == NULL) { 3362 G_RAID3_DEBUG(0, "Cannot create device %s.", 3363 md.md_name); 3364 return (NULL); 3365 } 3366 sc = gp->softc; 3367 } 3368 G_RAID3_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); 3369 g_topology_unlock(); 3370 sx_xlock(&sc->sc_lock); 3371 error = g_raid3_add_disk(sc, pp, &md); 3372 if (error != 0) { 3373 G_RAID3_DEBUG(0, "Cannot add disk %s to %s (error=%d).", 3374 pp->name, gp->name, error); 3375 if (g_raid3_ndisks(sc, G_RAID3_DISK_STATE_NODISK) == 3376 sc->sc_ndisks) { 3377 g_cancel_event(sc); 3378 g_raid3_destroy(sc, G_RAID3_DESTROY_HARD); 3379 g_topology_lock(); 3380 return (NULL); 3381 } 3382 gp = NULL; 3383 } 3384 sx_xunlock(&sc->sc_lock); 3385 g_topology_lock(); 3386 return (gp); 3387 } 3388 3389 static int 3390 g_raid3_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused, 3391 struct g_geom *gp) 3392 { 3393 struct g_raid3_softc *sc; 3394 int error; 3395 3396 g_topology_unlock(); 3397 sc = gp->softc; 3398 sx_xlock(&sc->sc_lock); 3399 g_cancel_event(sc); 3400 error = g_raid3_destroy(gp->softc, G_RAID3_DESTROY_SOFT); 3401 if (error != 0) 3402 sx_xunlock(&sc->sc_lock); 3403 g_topology_lock(); 3404 return (error); 3405 } 3406 3407 static void 3408 g_raid3_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, 3409 struct g_consumer *cp, struct g_provider *pp) 3410 { 3411 struct g_raid3_softc *sc; 3412 3413 g_topology_assert(); 3414 3415 sc = gp->softc; 3416 if (sc == NULL) 3417 return; 3418 /* Skip synchronization geom. */ 3419 if (gp == sc->sc_sync.ds_geom) 3420 return; 3421 if (pp != NULL) { 3422 /* Nothing here. */ 3423 } else if (cp != NULL) { 3424 struct g_raid3_disk *disk; 3425 3426 disk = cp->private; 3427 if (disk == NULL) 3428 return; 3429 g_topology_unlock(); 3430 sx_xlock(&sc->sc_lock); 3431 sbuf_printf(sb, "%s<Type>", indent); 3432 if (disk->d_no == sc->sc_ndisks - 1) 3433 sbuf_printf(sb, "PARITY"); 3434 else 3435 sbuf_printf(sb, "DATA"); 3436 sbuf_printf(sb, "</Type>\n"); 3437 sbuf_printf(sb, "%s<Number>%u</Number>\n", indent, 3438 (u_int)disk->d_no); 3439 if (disk->d_state == G_RAID3_DISK_STATE_SYNCHRONIZING) { 3440 sbuf_printf(sb, "%s<Synchronized>", indent); 3441 if (disk->d_sync.ds_offset == 0) 3442 sbuf_printf(sb, "0%%"); 3443 else { 3444 sbuf_printf(sb, "%u%%", 3445 (u_int)((disk->d_sync.ds_offset * 100) / 3446 (sc->sc_mediasize / (sc->sc_ndisks - 1)))); 3447 } 3448 sbuf_printf(sb, "</Synchronized>\n"); 3449 } 3450 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, 3451 disk->d_sync.ds_syncid); 3452 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, disk->d_genid); 3453 sbuf_printf(sb, "%s<Flags>", indent); 3454 if (disk->d_flags == 0) 3455 sbuf_printf(sb, "NONE"); 3456 else { 3457 int first = 1; 3458 3459 #define ADD_FLAG(flag, name) do { \ 3460 if ((disk->d_flags & (flag)) != 0) { \ 3461 if (!first) \ 3462 sbuf_printf(sb, ", "); \ 3463 else \ 3464 first = 0; \ 3465 sbuf_printf(sb, name); \ 3466 } \ 3467 } while (0) 3468 ADD_FLAG(G_RAID3_DISK_FLAG_DIRTY, "DIRTY"); 3469 ADD_FLAG(G_RAID3_DISK_FLAG_HARDCODED, "HARDCODED"); 3470 ADD_FLAG(G_RAID3_DISK_FLAG_SYNCHRONIZING, 3471 "SYNCHRONIZING"); 3472 ADD_FLAG(G_RAID3_DISK_FLAG_FORCE_SYNC, "FORCE_SYNC"); 3473 ADD_FLAG(G_RAID3_DISK_FLAG_BROKEN, "BROKEN"); 3474 #undef ADD_FLAG 3475 } 3476 sbuf_printf(sb, "</Flags>\n"); 3477 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3478 g_raid3_disk_state2str(disk->d_state)); 3479 sx_xunlock(&sc->sc_lock); 3480 g_topology_lock(); 3481 } else { 3482 g_topology_unlock(); 3483 sx_xlock(&sc->sc_lock); 3484 if (!g_raid3_use_malloc) { 3485 sbuf_printf(sb, 3486 "%s<Zone4kRequested>%u</Zone4kRequested>\n", indent, 3487 sc->sc_zones[G_RAID3_ZONE_4K].sz_requested); 3488 sbuf_printf(sb, 3489 "%s<Zone4kFailed>%u</Zone4kFailed>\n", indent, 3490 sc->sc_zones[G_RAID3_ZONE_4K].sz_failed); 3491 sbuf_printf(sb, 3492 "%s<Zone16kRequested>%u</Zone16kRequested>\n", indent, 3493 sc->sc_zones[G_RAID3_ZONE_16K].sz_requested); 3494 sbuf_printf(sb, 3495 "%s<Zone16kFailed>%u</Zone16kFailed>\n", indent, 3496 sc->sc_zones[G_RAID3_ZONE_16K].sz_failed); 3497 sbuf_printf(sb, 3498 "%s<Zone64kRequested>%u</Zone64kRequested>\n", indent, 3499 sc->sc_zones[G_RAID3_ZONE_64K].sz_requested); 3500 sbuf_printf(sb, 3501 "%s<Zone64kFailed>%u</Zone64kFailed>\n", indent, 3502 sc->sc_zones[G_RAID3_ZONE_64K].sz_failed); 3503 } 3504 sbuf_printf(sb, "%s<ID>%u</ID>\n", indent, (u_int)sc->sc_id); 3505 sbuf_printf(sb, "%s<SyncID>%u</SyncID>\n", indent, sc->sc_syncid); 3506 sbuf_printf(sb, "%s<GenID>%u</GenID>\n", indent, sc->sc_genid); 3507 sbuf_printf(sb, "%s<Flags>", indent); 3508 if (sc->sc_flags == 0) 3509 sbuf_printf(sb, "NONE"); 3510 else { 3511 int first = 1; 3512 3513 #define ADD_FLAG(flag, name) do { \ 3514 if ((sc->sc_flags & (flag)) != 0) { \ 3515 if (!first) \ 3516 sbuf_printf(sb, ", "); \ 3517 else \ 3518 first = 0; \ 3519 sbuf_printf(sb, name); \ 3520 } \ 3521 } while (0) 3522 ADD_FLAG(G_RAID3_DEVICE_FLAG_NOFAILSYNC, "NOFAILSYNC"); 3523 ADD_FLAG(G_RAID3_DEVICE_FLAG_NOAUTOSYNC, "NOAUTOSYNC"); 3524 ADD_FLAG(G_RAID3_DEVICE_FLAG_ROUND_ROBIN, 3525 "ROUND-ROBIN"); 3526 ADD_FLAG(G_RAID3_DEVICE_FLAG_VERIFY, "VERIFY"); 3527 #undef ADD_FLAG 3528 } 3529 sbuf_printf(sb, "</Flags>\n"); 3530 sbuf_printf(sb, "%s<Components>%u</Components>\n", indent, 3531 sc->sc_ndisks); 3532 sbuf_printf(sb, "%s<State>%s</State>\n", indent, 3533 g_raid3_device_state2str(sc->sc_state)); 3534 sx_xunlock(&sc->sc_lock); 3535 g_topology_lock(); 3536 } 3537 } 3538 3539 static void 3540 g_raid3_shutdown_pre_sync(void *arg, int howto) 3541 { 3542 struct g_class *mp; 3543 struct g_geom *gp, *gp2; 3544 struct g_raid3_softc *sc; 3545 int error; 3546 3547 mp = arg; 3548 DROP_GIANT(); 3549 g_topology_lock(); 3550 LIST_FOREACH_SAFE(gp, &mp->geom, geom, gp2) { 3551 if ((sc = gp->softc) == NULL) 3552 continue; 3553 /* Skip synchronization geom. */ 3554 if (gp == sc->sc_sync.ds_geom) 3555 continue; 3556 g_topology_unlock(); 3557 sx_xlock(&sc->sc_lock); 3558 g_cancel_event(sc); 3559 error = g_raid3_destroy(sc, G_RAID3_DESTROY_DELAYED); 3560 if (error != 0) 3561 sx_xunlock(&sc->sc_lock); 3562 g_topology_lock(); 3563 } 3564 g_topology_unlock(); 3565 PICKUP_GIANT(); 3566 } 3567 3568 static void 3569 g_raid3_init(struct g_class *mp) 3570 { 3571 3572 g_raid3_pre_sync = EVENTHANDLER_REGISTER(shutdown_pre_sync, 3573 g_raid3_shutdown_pre_sync, mp, SHUTDOWN_PRI_FIRST); 3574 if (g_raid3_pre_sync == NULL) 3575 G_RAID3_DEBUG(0, "Warning! Cannot register shutdown event."); 3576 } 3577 3578 static void 3579 g_raid3_fini(struct g_class *mp) 3580 { 3581 3582 if (g_raid3_pre_sync != NULL) 3583 EVENTHANDLER_DEREGISTER(shutdown_pre_sync, g_raid3_pre_sync); 3584 } 3585 3586 DECLARE_GEOM_CLASS(g_raid3_class, g_raid3); 3587